blob: 714139b1f2b696b0f9068f90ea01c065fd58fe90 [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/completion.h>
9#include <linux/device.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/err.h>
13#include <linux/hashtable.h>
14#include <linux/if_ether.h>
15#include <linux/ip.h>
16#include <linux/kthread.h>
17#include <linux/list.h>
18#include <linux/lockdep.h>
19#include <linux/string.h>
20
21#include <mtk_eth_soc.h>
22#include <mtk_hnat/hnat.h>
23#include <mtk_hnat/nf_hnat_mtk.h>
24
developer84f378f2023-08-24 18:26:50 +080025#include <pce/cdrt.h>
26#include <pce/cls.h>
developere5e687d2023-08-08 16:05:33 +080027#include <pce/dipfilter.h>
developer6a0a7102023-09-18 18:03:07 +080028#include <pce/netsys.h>
developere5e687d2023-08-08 16:05:33 +080029#include <pce/pce.h>
30
developer0fb30d52023-12-04 09:51:36 +080031#include "tops/internal.h"
32#include "tops/mbox.h"
33#include "tops/mcu.h"
34#include "tops/netsys.h"
35#include "tops/protocol/tunnel/gre/gretap.h"
36#include "tops/protocol/tunnel/l2tp/l2tpv2.h"
37#include "tops/tunnel.h"
developere5e687d2023-08-08 16:05:33 +080038
39#define TOPS_PPE_ENTRY_BUCKETS (64)
40#define TOPS_PPE_ENTRY_BUCKETS_BIT (6)
41
42struct tops_tnl {
43 /* tunnel types */
developer0fb30d52023-12-04 09:51:36 +080044 struct tops_tnl_type *offload_tnl_types[__TOPS_TUNNEL_TYPE_MAX];
developere5e687d2023-08-08 16:05:33 +080045 u32 offload_tnl_type_num;
46 u32 tnl_base_addr;
47
48 /* tunnel table */
49 DECLARE_HASHTABLE(ht, CONFIG_TOPS_TNL_MAP_BIT);
50 DECLARE_BITMAP(tnl_used, CONFIG_TOPS_TNL_NUM);
51 wait_queue_head_t tnl_sync_wait;
52 spinlock_t tnl_sync_lock;
53 spinlock_t tbl_lock;
54 bool has_tnl_to_sync;
55 struct task_struct *tnl_sync_thread;
56 struct list_head *tnl_sync_pending;
57 struct list_head *tnl_sync_submit;
58 struct tops_tnl_info *tnl_infos;
59
60 /* dma request */
61 struct completion dma_done;
62 struct dma_chan *dmachan;
63
64 struct device *dev;
65};
66
67static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
68 struct mailbox_msg *msg);
69
70static struct tops_tnl tops_tnl;
71
72static LIST_HEAD(tnl_sync_q1);
73static LIST_HEAD(tnl_sync_q2);
74
75struct mailbox_dev tnl_offload_mbox_recv =
76 MBOX_RECV_MGMT_DEV(TNL_OFFLOAD, tnl_offload_mbox_cmd_recv);
77
78/* tunnel mailbox communication */
79static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
80 struct mailbox_msg *msg)
81{
82 switch (msg->msg1) {
83 case TOPS_TNL_START_ADDR_SYNC:
84 tops_tnl.tnl_base_addr = msg->msg2;
85
86 return MBOX_NO_RET_MSG;
87 default:
88 break;
89 }
90
91 return MBOX_NO_RET_MSG;
92}
93
94static inline void tnl_flush_ppe_entry(struct foe_entry *entry, u32 tnl_idx)
95{
96 u32 bind_tnl_idx;
97
98 if (unlikely(!entry))
99 return;
100
101 switch (entry->bfib1.pkt_type) {
102 case IPV4_HNAPT:
103 if (entry->ipv4_hnapt.tport_id != NR_TDMA_TPORT
104 && entry->ipv4_hnapt.tport_id != NR_TDMA_QDMA_TPORT)
105 return;
106
developer0fb30d52023-12-04 09:51:36 +0800107 bind_tnl_idx = entry->ipv4_hnapt.tops_entry - __TOPS_TUNNEL_TYPE_MAX;
developere5e687d2023-08-08 16:05:33 +0800108
109 break;
110 default:
111 return;
112 }
113
114 /* unexpected tunnel index */
developer0fb30d52023-12-04 09:51:36 +0800115 if (bind_tnl_idx >= __TOPS_TUNNEL_TYPE_MAX)
developere5e687d2023-08-08 16:05:33 +0800116 return;
117
developer0fb30d52023-12-04 09:51:36 +0800118 if (tnl_idx == __TOPS_TUNNEL_TYPE_MAX || tnl_idx == bind_tnl_idx)
developere5e687d2023-08-08 16:05:33 +0800119 memset(entry, 0, sizeof(*entry));
120}
121
122static inline void skb_set_tops_tnl_idx(struct sk_buff *skb, u32 tnl_idx)
123{
developer0fb30d52023-12-04 09:51:36 +0800124 skb_hnat_tops(skb) = tnl_idx + __TOPS_TUNNEL_TYPE_MAX;
developere5e687d2023-08-08 16:05:33 +0800125}
126
127static inline bool skb_tops_valid(struct sk_buff *skb)
128{
developer0fb30d52023-12-04 09:51:36 +0800129 return (skb && skb_hnat_tops(skb) < __TOPS_TUNNEL_TYPE_MAX);
developere5e687d2023-08-08 16:05:33 +0800130}
131
132static inline struct tops_tnl_type *skb_to_tnl_type(struct sk_buff *skb)
133{
developer0fb30d52023-12-04 09:51:36 +0800134 enum tops_tunnel_type tnl_proto_type = skb_hnat_tops(skb);
developere5e687d2023-08-08 16:05:33 +0800135 struct tops_tnl_type *tnl_type;
136
developer0fb30d52023-12-04 09:51:36 +0800137 if (unlikely(!tnl_proto_type || tnl_proto_type >= __TOPS_TUNNEL_TYPE_MAX))
developere5e687d2023-08-08 16:05:33 +0800138 return ERR_PTR(-EINVAL);
139
developer0fb30d52023-12-04 09:51:36 +0800140 tnl_type = tops_tnl.offload_tnl_types[tnl_proto_type];
developere5e687d2023-08-08 16:05:33 +0800141
142 return tnl_type ? tnl_type : ERR_PTR(-ENODEV);
143}
144
developer84f378f2023-08-24 18:26:50 +0800145static inline struct tops_tnl_info *skb_to_tnl_info(struct sk_buff *skb)
146{
developer0fb30d52023-12-04 09:51:36 +0800147 u32 tnl_idx = skb_hnat_tops(skb) - __TOPS_TUNNEL_TYPE_MAX;
developer84f378f2023-08-24 18:26:50 +0800148
149 if (tnl_idx >= CONFIG_TOPS_TNL_NUM)
150 return ERR_PTR(-EINVAL);
151
152 if (!test_bit(tnl_idx, tops_tnl.tnl_used))
153 return ERR_PTR(-EACCES);
154
155 return &tops_tnl.tnl_infos[tnl_idx];
156}
157
developere5e687d2023-08-08 16:05:33 +0800158static inline void skb_mark_unbind(struct sk_buff *skb)
159{
160 skb_hnat_tops(skb) = 0;
161 skb_hnat_is_decap(skb) = 0;
162 skb_hnat_alg(skb) = 1;
163}
164
165static inline u32 tnl_params_hash(struct tops_tnl_params *tnl_params)
166{
167 if (!tnl_params)
168 return 0;
169
170 /* TODO: check collision possibility? */
developer0fb30d52023-12-04 09:51:36 +0800171 return (tnl_params->params.network.ip.sip ^ tnl_params->params.network.ip.dip);
developere5e687d2023-08-08 16:05:33 +0800172}
173
174static inline bool tnl_info_decap_is_enable(struct tops_tnl_info *tnl_info)
175{
176 return tnl_info->cache.flag & TNL_DECAP_ENABLE;
177}
178
179static inline void tnl_info_decap_enable(struct tops_tnl_info *tnl_info)
180{
181 tnl_info->cache.flag |= TNL_DECAP_ENABLE;
182}
183
184static inline void tnl_info_decap_disable(struct tops_tnl_info *tnl_info)
185{
186 tnl_info->cache.flag &= ~(TNL_DECAP_ENABLE);
187}
188
189static inline bool tnl_info_encap_is_enable(struct tops_tnl_info *tnl_info)
190{
191 return tnl_info->cache.flag & TNL_ENCAP_ENABLE;
192}
193
194static inline void tnl_info_encap_enable(struct tops_tnl_info *tnl_info)
195{
196 tnl_info->cache.flag |= TNL_ENCAP_ENABLE;
197}
198
199static inline void tnl_info_encap_disable(struct tops_tnl_info *tnl_info)
200{
201 tnl_info->cache.flag &= ~(TNL_ENCAP_ENABLE);
202}
203
204static inline void tnl_info_sta_updated_no_tnl_lock(struct tops_tnl_info *tnl_info)
205{
206 tnl_info->status &= (~TNL_STA_UPDATING);
207 tnl_info->status &= (~TNL_STA_INIT);
208 tnl_info->status |= TNL_STA_UPDATED;
209}
210
211static inline void tnl_info_sta_updated(struct tops_tnl_info *tnl_info)
212{
213 unsigned long flag = 0;
214
215 if (unlikely(!tnl_info))
216 return;
217
218 spin_lock_irqsave(&tnl_info->lock, flag);
219
220 tnl_info_sta_updated_no_tnl_lock(tnl_info);
221
222 spin_unlock_irqrestore(&tnl_info->lock, flag);
223}
224
225static inline bool tnl_info_sta_is_updated(struct tops_tnl_info *tnl_info)
226{
227 return tnl_info->status & TNL_STA_UPDATED;
228}
229
230static inline void tnl_info_sta_updating_no_tnl_lock(struct tops_tnl_info *tnl_info)
231{
232 tnl_info->status |= TNL_STA_UPDATING;
233 tnl_info->status &= (~TNL_STA_QUEUED);
234 tnl_info->status &= (~TNL_STA_UPDATED);
235}
236
237static inline void tnl_info_sta_updating(struct tops_tnl_info *tnl_info)
238{
239 unsigned long flag = 0;
240
241 if (unlikely(!tnl_info))
242 return;
243
244 spin_lock_irqsave(&tnl_info->lock, flag);
245
246 tnl_info_sta_updating_no_tnl_lock(tnl_info);
247
248 spin_unlock_irqrestore(&tnl_info->lock, flag);
249}
250
251static inline bool tnl_info_sta_is_updating(struct tops_tnl_info *tnl_info)
252{
253 return tnl_info->status & TNL_STA_UPDATING;
254}
255
256static inline void tnl_info_sta_queued_no_tnl_lock(struct tops_tnl_info *tnl_info)
257{
258 tnl_info->status |= TNL_STA_QUEUED;
259 tnl_info->status &= (~TNL_STA_UPDATED);
260}
261
262static inline void tnl_info_sta_queued(struct tops_tnl_info *tnl_info)
263{
264 unsigned long flag = 0;
265
266 if (unlikely(!tnl_info))
267 return;
268
269 spin_lock_irqsave(&tnl_info->lock, flag);
270
271 tnl_info_sta_queued_no_tnl_lock(tnl_info);
272
273 spin_unlock_irqrestore(&tnl_info->lock, flag);
274}
275
276static inline bool tnl_info_sta_is_queued(struct tops_tnl_info *tnl_info)
277{
278 return tnl_info->status & TNL_STA_QUEUED;
279}
280
281static inline void tnl_info_sta_init_no_tnl_lock(struct tops_tnl_info *tnl_info)
282{
283 tnl_info->status = TNL_STA_INIT;
284}
285
286static inline void tnl_info_sta_init(struct tops_tnl_info *tnl_info)
287{
288 unsigned long flag = 0;
289
290 if (unlikely(!tnl_info))
291 return;
292
293 spin_lock_irqsave(&tnl_info->lock, flag);
294
295 tnl_info_sta_init_no_tnl_lock(tnl_info);
296
297 spin_unlock_irqrestore(&tnl_info->lock, flag);
298}
299
300static inline bool tnl_info_sta_is_init(struct tops_tnl_info *tnl_info)
301{
302 return tnl_info->status & TNL_STA_INIT;
303}
304
305static inline void tnl_info_sta_uninit_no_tnl_lock(struct tops_tnl_info *tnl_info)
306{
307 tnl_info->status = TNL_STA_UNINIT;
308}
309
310static inline void tnl_info_sta_uninit(struct tops_tnl_info *tnl_info)
311{
312 unsigned long flag = 0;
313
314 if (unlikely(!tnl_info))
315 return;
316
317 spin_lock_irqsave(&tnl_info->lock, flag);
318
319 tnl_info_sta_uninit_no_tnl_lock(tnl_info);
320
321 spin_unlock_irqrestore(&tnl_info->lock, flag);
322}
323
324static inline bool tnl_info_sta_is_uninit(struct tops_tnl_info *tnl_info)
325{
326 return tnl_info->status & TNL_STA_UNINIT;
327}
328
329static inline void tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
330{
331 unsigned long flag = 0;
332
333 spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
334
335 list_add_tail(&tnl_info->sync_node, tops_tnl.tnl_sync_submit);
336
337 tops_tnl.has_tnl_to_sync = true;
338
339 spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
340
341 if (mtk_tops_mcu_alive())
342 wake_up_interruptible(&tops_tnl.tnl_sync_wait);
343}
344
developer15ee46c2023-08-24 16:35:34 +0800345static void mtk_tops_tnl_info_cls_update_idx(struct tops_tnl_info *tnl_info)
346{
347 unsigned long flag;
348
349 tnl_info->tnl_params.cls_entry = tnl_info->tcls->cls->idx;
developer15ee46c2023-08-24 16:35:34 +0800350
351 spin_lock_irqsave(&tnl_info->lock, flag);
352 tnl_info->cache.cls_entry = tnl_info->tcls->cls->idx;
353 spin_unlock_irqrestore(&tnl_info->lock, flag);
354}
355
developer84f378f2023-08-24 18:26:50 +0800356static void mtk_tops_tnl_info_cls_entry_unprepare(struct tops_tnl_info *tnl_info,
357 struct tops_tnl_params *tnl_params)
developer15ee46c2023-08-24 16:35:34 +0800358{
359 struct tops_cls_entry *tcls = tnl_info->tcls;
360
developer15ee46c2023-08-24 16:35:34 +0800361 tnl_info->tcls = NULL;
362
363 if (refcount_dec_and_test(&tcls->refcnt)) {
developer15ee46c2023-08-24 16:35:34 +0800364 list_del(&tcls->node);
365
developer84f378f2023-08-24 18:26:50 +0800366 if (!tnl_params->cdrt)
367 memset(&tcls->cls->cdesc, 0, sizeof(tcls->cls->cdesc));
368 else
369 /*
370 * recover tport_ix to let match packets to
371 * go through EIP197 only
372 */
373 CLS_DESC_DATA(&tcls->cls->cdesc, tport_idx, 2);
developer15ee46c2023-08-24 16:35:34 +0800374
375 mtk_pce_cls_entry_write(tcls->cls);
376
377 mtk_pce_cls_entry_free(tcls->cls);
378
379 devm_kfree(tops_dev, tcls);
380 }
381}
382
383static struct tops_cls_entry *
developer84f378f2023-08-24 18:26:50 +0800384mtk_tops_tnl_info_cls_entry_prepare(struct tops_tnl_info *tnl_info,
385 struct tops_tnl_params *tnl_params)
developer15ee46c2023-08-24 16:35:34 +0800386{
387 struct tops_cls_entry *tcls;
388 int ret;
389
390 tcls = devm_kzalloc(tops_dev, sizeof(struct tops_cls_entry), GFP_KERNEL);
391 if (!tcls)
392 return ERR_PTR(-ENOMEM);
393
developer84f378f2023-08-24 18:26:50 +0800394 if (!tnl_params->cdrt) {
395 tcls->cls = mtk_pce_cls_entry_alloc();
396 if (IS_ERR(tcls->cls)) {
397 ret = PTR_ERR(tcls->cls);
398 goto free_tcls;
399 }
400 } else {
401 struct cdrt_entry *cdrt = mtk_pce_cdrt_entry_find(tnl_params->cdrt);
402
403 if (IS_ERR(cdrt)) {
404 ret = PTR_ERR(cdrt);
405 goto free_tcls;
406 }
developer0fb30d52023-12-04 09:51:36 +0800407 if (unlikely(!cdrt->cls)) {
408 ret = -ENODEV;
409 goto free_tcls;
410 }
developer84f378f2023-08-24 18:26:50 +0800411
412 tcls->cls = cdrt->cls;
developer15ee46c2023-08-24 16:35:34 +0800413 }
414
415 INIT_LIST_HEAD(&tcls->node);
416 list_add_tail(&tnl_info->tnl_type->tcls_head, &tcls->node);
417
418 tnl_info->tcls = tcls;
419 refcount_set(&tcls->refcnt, 1);
420
421 return tcls;
422
423free_tcls:
424 devm_kfree(tops_dev, tcls);
425
426 return ERR_PTR(ret);
427}
428
429static int mtk_tops_tnl_info_cls_entry_write(struct tops_tnl_info *tnl_info)
430{
431 int ret;
432
433 if (!tnl_info->tcls)
434 return -EINVAL;
435
436 ret = mtk_pce_cls_entry_write(tnl_info->tcls->cls);
developer84f378f2023-08-24 18:26:50 +0800437 if (ret)
developer15ee46c2023-08-24 16:35:34 +0800438 return ret;
developer15ee46c2023-08-24 16:35:34 +0800439
440 tnl_info->tcls->updated = true;
441
442 mtk_tops_tnl_info_cls_update_idx(tnl_info);
443
444 return 0;
445}
446
developer84f378f2023-08-24 18:26:50 +0800447static int mtk_tops_tnl_info_cls_tear_down(struct tops_tnl_info *tnl_info,
448 struct tops_tnl_params *tnl_params)
developer15ee46c2023-08-24 16:35:34 +0800449{
developer84f378f2023-08-24 18:26:50 +0800450 mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
developer15ee46c2023-08-24 16:35:34 +0800451
452 return 0;
453}
454
455/*
456 * check cls entry is updated for tunnel protocols that only use 1 CLS HW entry
457 *
458 * since only tunnel sync task will operate on tcls linked list,
459 * it is safe to access without lock
460 *
461 * return true on updated
462 * return false on need update
463 */
464static bool mtk_tops_tnl_info_cls_single_is_updated(struct tops_tnl_info *tnl_info,
465 struct tops_tnl_type *tnl_type)
466{
467 /*
468 * check tnl_type has already allocate a tops_cls_entry
469 * if not, return false to prepare to allocate a new one
470 */
471 if (list_empty(&tnl_type->tcls_head))
472 return false;
473
474 /*
475 * if tnl_info is not associate to tnl_type's cls entry,
476 * make a reference to tops_cls_entry
477 */
478 if (!tnl_info->tcls) {
479 tnl_info->tcls = list_first_entry(&tnl_type->tcls_head,
480 struct tops_cls_entry,
481 node);
482
483 refcount_inc(&tnl_info->tcls->refcnt);
484 mtk_tops_tnl_info_cls_update_idx(tnl_info);
485 }
486
487 return tnl_info->tcls->updated;
488}
489
490static int mtk_tops_tnl_info_cls_single_setup(struct tops_tnl_info *tnl_info,
developer84f378f2023-08-24 18:26:50 +0800491 struct tops_tnl_params *tnl_params,
developer15ee46c2023-08-24 16:35:34 +0800492 struct tops_tnl_type *tnl_type)
493{
494 struct tops_cls_entry *tcls;
495 int ret;
496
497 if (mtk_tops_tnl_info_cls_single_is_updated(tnl_info, tnl_type))
498 return 0;
499
500 if (tnl_info->tcls)
developer84f378f2023-08-24 18:26:50 +0800501 goto cls_entry_write;
developer15ee46c2023-08-24 16:35:34 +0800502
developer84f378f2023-08-24 18:26:50 +0800503 tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info, tnl_params);
developer15ee46c2023-08-24 16:35:34 +0800504 if (IS_ERR(tcls))
505 return PTR_ERR(tcls);
506
developer84f378f2023-08-24 18:26:50 +0800507 if (!tnl_params->cdrt) {
508 ret = tnl_type->cls_entry_setup(tnl_info, &tcls->cls->cdesc);
509 if (ret) {
510 TOPS_ERR("tops cls entry setup failed: %d\n", ret);
511 goto cls_entry_unprepare;
512 }
513 } else {
514 /*
515 * since CLS is already filled up with outer protocol rule
developer6a0a7102023-09-18 18:03:07 +0800516 * we only update CLS tport here to let matched packet to go through
517 * QDMA and specify the destination port to TOPS
developer84f378f2023-08-24 18:26:50 +0800518 */
developer6a0a7102023-09-18 18:03:07 +0800519 CLS_DESC_DATA(&tcls->cls->cdesc, tport_idx, NR_EIP197_QDMA_TPORT);
520 CLS_DESC_DATA(&tcls->cls->cdesc, fport, PSE_PORT_TDMA);
521 CLS_DESC_DATA(&tcls->cls->cdesc, qid, 12);
developer15ee46c2023-08-24 16:35:34 +0800522 }
523
developer84f378f2023-08-24 18:26:50 +0800524cls_entry_write:
525 ret = mtk_tops_tnl_info_cls_entry_write(tnl_info);
526
527cls_entry_unprepare:
528 if (ret)
529 mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
530
531 return ret;
developer15ee46c2023-08-24 16:35:34 +0800532}
533
534static struct tops_cls_entry *
535mtk_tops_tnl_info_cls_entry_find(struct tops_tnl_type *tnl_type,
536 struct cls_desc *cdesc)
537{
538 struct tops_cls_entry *tcls;
539
540 list_for_each_entry(tcls, &tnl_type->tcls_head, node)
541 if (!memcmp(&tcls->cls->cdesc, cdesc, sizeof(struct cls_desc)))
542 return tcls;
543
544 return NULL;
545}
546
developer84f378f2023-08-24 18:26:50 +0800547static bool mtk_tops_tnl_info_cls_multi_is_updated(struct tops_tnl_info *tnl_info,
developer15ee46c2023-08-24 16:35:34 +0800548 struct tops_tnl_type *tnl_type,
549 struct cls_desc *cdesc)
550{
551 struct tops_cls_entry *tcls;
552
553 if (list_empty(&tnl_type->tcls_head))
554 return false;
555
556 if (tnl_info->tcls) {
557 if (!memcmp(cdesc, &tnl_info->tcls->cls->cdesc, sizeof(*cdesc)))
558 return tnl_info->tcls->updated;
559
560 memcpy(&tnl_info->tcls->cls->cdesc, cdesc, sizeof(*cdesc));
561 tnl_info->tcls->updated = false;
562 return false;
563 }
564
565 tcls = mtk_tops_tnl_info_cls_entry_find(tnl_type, cdesc);
566 if (!tcls)
567 return false;
568
569 tnl_info->tcls = tcls;
570 refcount_inc(&tnl_info->tcls->refcnt);
571 mtk_tops_tnl_info_cls_update_idx(tnl_info);
572
573 return tcls->updated;
574}
575
576static int mtk_tops_tnl_info_cls_multi_setup(struct tops_tnl_info *tnl_info,
developer84f378f2023-08-24 18:26:50 +0800577 struct tops_tnl_params *tnl_params,
developer15ee46c2023-08-24 16:35:34 +0800578 struct tops_tnl_type *tnl_type)
579{
580 struct tops_cls_entry *tcls;
581 struct cls_desc cdesc;
developer84f378f2023-08-24 18:26:50 +0800582
developer15ee46c2023-08-24 16:35:34 +0800583 int ret;
584
developer84f378f2023-08-24 18:26:50 +0800585 if (!tnl_params->cdrt) {
586 memset(&cdesc, 0, sizeof(struct cls_desc));
developer15ee46c2023-08-24 16:35:34 +0800587
developer84f378f2023-08-24 18:26:50 +0800588 /* prepare cls_desc from tnl_type */
589 ret = tnl_type->cls_entry_setup(tnl_info, &cdesc);
590 if (ret) {
591 TOPS_ERR("tops cls entry setup failed: %d\n", ret);
592 return ret;
593 }
594 } else {
595 struct cdrt_entry *cdrt = mtk_pce_cdrt_entry_find(tnl_params->cdrt);
596
597 if (IS_ERR(cdrt)) {
598 TOPS_ERR("no cdrt idx: %u related CDRT found\n",
599 tnl_params->cdrt);
developer0fb30d52023-12-04 09:51:36 +0800600 return PTR_ERR(cdrt);
developer84f378f2023-08-24 18:26:50 +0800601 }
602
603 memcpy(&cdesc, &cdrt->cls->cdesc, sizeof(struct cls_desc));
604
605 CLS_DESC_DATA(&cdesc, tport_idx, 0x7);
developer15ee46c2023-08-24 16:35:34 +0800606 }
607
608 /*
609 * check cdesc is already updated, if tnl_info is not associate with a
610 * tcls but we found a tcls has the same cls desc content as cdesc
611 * tnl_info will setup an association with that tcls
612 *
613 * we only go further to this if condition when
614 * a tcls is not yet updated or
615 * tnl_info is not yet associated to a tcls
616 */
developer84f378f2023-08-24 18:26:50 +0800617 if (mtk_tops_tnl_info_cls_multi_is_updated(tnl_info, tnl_type, &cdesc))
developer15ee46c2023-08-24 16:35:34 +0800618 return 0;
619
620 /* tcls is not yet updated, update this tcls */
621 if (tnl_info->tcls)
622 return mtk_tops_tnl_info_cls_entry_write(tnl_info);
623
624 /* create a new tcls entry and associate with tnl_info */
developer84f378f2023-08-24 18:26:50 +0800625 tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info, tnl_params);
developer15ee46c2023-08-24 16:35:34 +0800626 if (IS_ERR(tcls))
627 return PTR_ERR(tcls);
628
629 memcpy(&tcls->cls->cdesc, &cdesc, sizeof(struct cls_desc));
630
developer84f378f2023-08-24 18:26:50 +0800631 ret = mtk_tops_tnl_info_cls_entry_write(tnl_info);
632 if (ret)
633 mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
634
635 return ret;
developer15ee46c2023-08-24 16:35:34 +0800636}
637
developer84f378f2023-08-24 18:26:50 +0800638static int mtk_tops_tnl_info_cls_setup(struct tops_tnl_info *tnl_info,
639 struct tops_tnl_params *tnl_params)
developer15ee46c2023-08-24 16:35:34 +0800640{
641 struct tops_tnl_type *tnl_type;
642
643 if (tnl_info->tcls && tnl_info->tcls->updated)
644 return 0;
645
646 tnl_type = tnl_info->tnl_type;
647 if (!tnl_type)
648 return -EINVAL;
649
650 if (!tnl_type->use_multi_cls)
developer84f378f2023-08-24 18:26:50 +0800651 return mtk_tops_tnl_info_cls_single_setup(tnl_info,
652 tnl_params,
653 tnl_type);
developer15ee46c2023-08-24 16:35:34 +0800654
developer84f378f2023-08-24 18:26:50 +0800655 return mtk_tops_tnl_info_cls_multi_setup(tnl_info, tnl_params, tnl_type);
developer15ee46c2023-08-24 16:35:34 +0800656}
657
developere5e687d2023-08-08 16:05:33 +0800658static int mtk_tops_tnl_info_dipfilter_tear_down(struct tops_tnl_info *tnl_info)
659{
660 struct dip_desc dipd;
661
662 memset(&dipd, 0, sizeof(struct dip_desc));
663
developer0fb30d52023-12-04 09:51:36 +0800664 dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.params.network.ip.sip);
developere5e687d2023-08-08 16:05:33 +0800665 dipd.tag = DIPFILTER_IPV4;
666
667 return mtk_pce_dipfilter_entry_del(&dipd);
668}
669
670static int mtk_tops_tnl_info_dipfilter_setup(struct tops_tnl_info *tnl_info)
671{
672 struct dip_desc dipd;
673
674 /* setup dipfilter */
675 memset(&dipd, 0, sizeof(struct dip_desc));
676
developer0fb30d52023-12-04 09:51:36 +0800677 dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.params.network.ip.sip);
developere5e687d2023-08-08 16:05:33 +0800678 dipd.tag = DIPFILTER_IPV4;
679
680 return mtk_pce_dipfilter_entry_add(&dipd);
681}
682
683void mtk_tops_tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
684{
685 lockdep_assert_held(&tnl_info->lock);
686
687 if (tnl_info_sta_is_queued(tnl_info))
688 return;
689
690 tnl_info_submit_no_tnl_lock(tnl_info);
691
692 tnl_info_sta_queued_no_tnl_lock(tnl_info);
693}
694
695void mtk_tops_tnl_info_submit(struct tops_tnl_info *tnl_info)
696{
697 unsigned long flag = 0;
698
699 if (unlikely(!tnl_info))
700 return;
701
702 spin_lock_irqsave(&tnl_info->lock, flag);
703
704 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
705
706 spin_unlock_irqrestore(&tnl_info->lock, flag);
707}
708
709static void mtk_tops_tnl_info_hash_no_lock(struct tops_tnl_info *tnl_info)
710{
711 lockdep_assert_held(&tops_tnl.tbl_lock);
712 lockdep_assert_held(&tnl_info->lock);
713
714 if (hash_hashed(&tnl_info->hlist))
715 hash_del(&tnl_info->hlist);
716
717 hash_add(tops_tnl.ht, &tnl_info->hlist, tnl_params_hash(&tnl_info->cache));
718}
719
720void mtk_tops_tnl_info_hash(struct tops_tnl_info *tnl_info)
721{
722 unsigned long flag = 0;
723
724 if (unlikely(!tnl_info))
725 return;
726
727 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
728
729 spin_lock(&tnl_info->lock);
730
731 mtk_tops_tnl_info_hash_no_lock(tnl_info);
732
733 spin_unlock(&tnl_info->lock);
734
735 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
736}
737
developer0fb30d52023-12-04 09:51:36 +0800738struct tops_tnl_info *mtk_tops_tnl_info_get_by_idx(u32 tnl_idx)
739{
740 if (tnl_idx >= CONFIG_TOPS_TNL_NUM)
741 return ERR_PTR(-EINVAL);
742
743 if (!test_bit(tnl_idx, tops_tnl.tnl_used))
744 return ERR_PTR(-EACCES);
745
746 return &tops_tnl.tnl_infos[tnl_idx];
747}
748
developere5e687d2023-08-08 16:05:33 +0800749static bool mtk_tops_tnl_info_match(struct tops_tnl_type *tnl_type,
750 struct tops_tnl_info *tnl_info,
developer0fb30d52023-12-04 09:51:36 +0800751 struct tops_params *target)
developere5e687d2023-08-08 16:05:33 +0800752{
developer0fb30d52023-12-04 09:51:36 +0800753 struct tops_params *p = &tnl_info->cache.params;
developere5e687d2023-08-08 16:05:33 +0800754 unsigned long flag = 0;
755 bool match;
756
757 spin_lock_irqsave(&tnl_info->lock, flag);
758
developer0fb30d52023-12-04 09:51:36 +0800759 match = (p->tunnel.type == target->tunnel.type
760 && mtk_tops_params_match(p, target)
761 && tnl_type->tnl_param_match(p, target));
developere5e687d2023-08-08 16:05:33 +0800762
763 spin_unlock_irqrestore(&tnl_info->lock, flag);
764
765 return match;
766}
767
developer0fb30d52023-12-04 09:51:36 +0800768struct tops_tnl_info *mtk_tops_tnl_info_find(struct tops_tnl_type *tnl_type,
769 struct tops_tnl_params *tnl_params)
developere5e687d2023-08-08 16:05:33 +0800770{
771 struct tops_tnl_info *tnl_info;
developere5e687d2023-08-08 16:05:33 +0800772
773 lockdep_assert_held(&tops_tnl.tbl_lock);
774
775 if (unlikely(!tnl_params->tops_entry_proto
developer0fb30d52023-12-04 09:51:36 +0800776 || tnl_params->tops_entry_proto >= __TOPS_TUNNEL_TYPE_MAX))
developere5e687d2023-08-08 16:05:33 +0800777 return ERR_PTR(-EINVAL);
778
developere5e687d2023-08-08 16:05:33 +0800779 hash_for_each_possible(tops_tnl.ht,
780 tnl_info,
781 hlist,
782 tnl_params_hash(tnl_params))
developer0fb30d52023-12-04 09:51:36 +0800783 if (mtk_tops_tnl_info_match(tnl_type, tnl_info, &tnl_params->params))
developere5e687d2023-08-08 16:05:33 +0800784 return tnl_info;
785
786 return ERR_PTR(-ENODEV);
787}
788
developer0fb30d52023-12-04 09:51:36 +0800789static inline void mtk_tops_tnl_info_preserve(struct tops_tnl_type *tnl_type,
790 struct tops_tnl_params *old,
791 struct tops_tnl_params *new)
792{
793 new->flag |= old->flag;
794 new->cls_entry = old->cls_entry;
795 if (old->cdrt)
796 new->cdrt = old->cdrt;
797
798 /* we can only get ttl from encapsulation */
799 if (new->params.network.ip.ttl == 128 && old->params.network.ip.ttl != 0)
800 new->params.network.ip.ttl = old->params.network.ip.ttl;
801
802 if (tnl_type->tnl_param_restore)
803 tnl_type->tnl_param_restore(&old->params, &new->params);
804}
805
developere5e687d2023-08-08 16:05:33 +0800806/* tnl_info->lock should be held before calling this function */
807static int mtk_tops_tnl_info_setup(struct sk_buff *skb,
developer0fb30d52023-12-04 09:51:36 +0800808 struct tops_tnl_type *tnl_type,
developere5e687d2023-08-08 16:05:33 +0800809 struct tops_tnl_info *tnl_info,
810 struct tops_tnl_params *tnl_params)
811{
developer0fb30d52023-12-04 09:51:36 +0800812 bool has_diff = false;
813
developere5e687d2023-08-08 16:05:33 +0800814 if (unlikely(!skb || !tnl_info || !tnl_params))
815 return -EPERM;
816
817 lockdep_assert_held(&tnl_info->lock);
818
developer0fb30d52023-12-04 09:51:36 +0800819 mtk_tops_tnl_info_preserve(tnl_type, &tnl_info->cache, tnl_params);
developere5e687d2023-08-08 16:05:33 +0800820
developer0fb30d52023-12-04 09:51:36 +0800821 has_diff = memcmp(&tnl_info->cache, tnl_params, sizeof(*tnl_params));
822 if (has_diff) {
823 memcpy(&tnl_info->cache, tnl_params, sizeof(*tnl_params));
developere5e687d2023-08-08 16:05:33 +0800824 mtk_tops_tnl_info_hash_no_lock(tnl_info);
825 }
826
827 if (skb_hnat_is_decap(skb)) {
828 /* the net_device is used to forward pkt to decap'ed inf when Rx */
829 tnl_info->dev = skb->dev;
830 if (!tnl_info_decap_is_enable(tnl_info)) {
developer0fb30d52023-12-04 09:51:36 +0800831 has_diff = true;
developere5e687d2023-08-08 16:05:33 +0800832 tnl_info_decap_enable(tnl_info);
developere5e687d2023-08-08 16:05:33 +0800833 }
834 } else if (skb_hnat_is_encap(skb)) {
835 /* set skb_hnat_tops(skb) to tunnel index for ppe binding */
836 skb_set_tops_tnl_idx(skb, tnl_info->tnl_idx);
837 if (!tnl_info_encap_is_enable(tnl_info)) {
developer0fb30d52023-12-04 09:51:36 +0800838 has_diff = true;
developere5e687d2023-08-08 16:05:33 +0800839 tnl_info_encap_enable(tnl_info);
developere5e687d2023-08-08 16:05:33 +0800840 }
841 }
842
developer0fb30d52023-12-04 09:51:36 +0800843 if (has_diff)
844 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
845
developere5e687d2023-08-08 16:05:33 +0800846 return 0;
847}
848
849/* tops_tnl.tbl_lock should be acquired before calling this functions */
developer15ee46c2023-08-24 16:35:34 +0800850static struct tops_tnl_info *
851mtk_tops_tnl_info_alloc_no_lock(struct tops_tnl_type *tnl_type)
developere5e687d2023-08-08 16:05:33 +0800852{
853 struct tops_tnl_info *tnl_info;
854 unsigned long flag = 0;
855 u32 tnl_idx;
856
857 lockdep_assert_held(&tops_tnl.tbl_lock);
858
859 tnl_idx = find_first_zero_bit(tops_tnl.tnl_used, CONFIG_TOPS_TNL_NUM);
860 if (tnl_idx == CONFIG_TOPS_TNL_NUM) {
861 TOPS_NOTICE("offload tunnel table full!\n");
862 return ERR_PTR(-ENOMEM);
863 }
864
865 /* occupy used tunnel */
866 tnl_info = &tops_tnl.tnl_infos[tnl_idx];
867 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
868 memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
869
870 /* TODO: maybe spin_lock_bh() is enough? */
871 spin_lock_irqsave(&tnl_info->lock, flag);
872
873 if (tnl_info_sta_is_init(tnl_info)) {
874 TOPS_ERR("error: fetched an initialized tunnel info\n");
875
876 spin_unlock_irqrestore(&tnl_info->lock, flag);
877
878 return ERR_PTR(-EBADF);
879 }
880 tnl_info_sta_init_no_tnl_lock(tnl_info);
881
developer15ee46c2023-08-24 16:35:34 +0800882 tnl_info->tnl_type = tnl_type;
883
developere5e687d2023-08-08 16:05:33 +0800884 INIT_HLIST_NODE(&tnl_info->hlist);
885
886 spin_unlock_irqrestore(&tnl_info->lock, flag);
887
888 set_bit(tnl_idx, tops_tnl.tnl_used);
889
890 return tnl_info;
891}
892
developer15ee46c2023-08-24 16:35:34 +0800893struct tops_tnl_info *mtk_tops_tnl_info_alloc(struct tops_tnl_type *tnl_type)
developere5e687d2023-08-08 16:05:33 +0800894{
895 struct tops_tnl_info *tnl_info;
896 unsigned long flag = 0;
897
898 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
899
developer15ee46c2023-08-24 16:35:34 +0800900 tnl_info = mtk_tops_tnl_info_alloc_no_lock(tnl_type);
developere5e687d2023-08-08 16:05:33 +0800901
902 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
903
904 return tnl_info;
905}
906
907static void mtk_tops_tnl_info_free_no_lock(struct tops_tnl_info *tnl_info)
908{
909 if (unlikely(!tnl_info))
910 return;
911
912 lockdep_assert_held(&tops_tnl.tbl_lock);
913 lockdep_assert_held(&tnl_info->lock);
914
915 hash_del(&tnl_info->hlist);
916
917 tnl_info_sta_uninit_no_tnl_lock(tnl_info);
918
919 clear_bit(tnl_info->tnl_idx, tops_tnl.tnl_used);
920}
921
922static void mtk_tops_tnl_info_free(struct tops_tnl_info *tnl_info)
923{
924 unsigned long flag = 0;
925
926 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
927
928 spin_lock(&tnl_info->lock);
929
930 mtk_tops_tnl_info_free_no_lock(tnl_info);
931
932 spin_unlock(&tnl_info->lock);
933
934 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
935}
936
937static void __mtk_tops_tnl_offload_disable(struct tops_tnl_info *tnl_info)
938{
939 tnl_info->status |= TNL_STA_DELETING;
940 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
941}
942
943static int mtk_tops_tnl_offload(struct sk_buff *skb,
developer15ee46c2023-08-24 16:35:34 +0800944 struct tops_tnl_type *tnl_type,
developere5e687d2023-08-08 16:05:33 +0800945 struct tops_tnl_params *tnl_params)
946{
947 struct tops_tnl_info *tnl_info;
948 unsigned long flag;
949 int ret = 0;
950
951 if (unlikely(!tnl_params))
952 return -EPERM;
953
954 /* prepare tnl_info */
955 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
956
developer0fb30d52023-12-04 09:51:36 +0800957 tnl_info = mtk_tops_tnl_info_find(tnl_type, tnl_params);
developere5e687d2023-08-08 16:05:33 +0800958 if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) != -ENODEV) {
959 /* error */
960 ret = PTR_ERR(tnl_info);
961 goto err_out;
962 } else if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) == -ENODEV) {
963 /* not allocate yet */
developer15ee46c2023-08-24 16:35:34 +0800964 tnl_info = mtk_tops_tnl_info_alloc_no_lock(tnl_type);
developere5e687d2023-08-08 16:05:33 +0800965 }
966
967 if (IS_ERR(tnl_info)) {
968 ret = PTR_ERR(tnl_info);
969 TOPS_DBG("tnl offload alloc tnl_info failed: %d\n", ret);
970 goto err_out;
971 }
972
973 spin_lock(&tnl_info->lock);
developer0fb30d52023-12-04 09:51:36 +0800974 ret = mtk_tops_tnl_info_setup(skb, tnl_type, tnl_info, tnl_params);
developere5e687d2023-08-08 16:05:33 +0800975 spin_unlock(&tnl_info->lock);
976
977err_out:
978 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
979
980 return ret;
981}
982
developer84f378f2023-08-24 18:26:50 +0800983static int mtk_tops_tnl_l2_update(struct sk_buff *skb)
984{
985 struct tops_tnl_info *tnl_info = skb_to_tnl_info(skb);
986 struct tops_tnl_type *tnl_type;
987 unsigned long flag;
988 int ret;
989
990 if (IS_ERR(tnl_info))
991 return PTR_ERR(tnl_info);
992
993 tnl_type = tnl_info->tnl_type;
994 if (!tnl_type->tnl_l2_param_update)
995 return -ENODEV;
996
997 spin_lock_irqsave(&tnl_info->lock, flag);
998
developer0fb30d52023-12-04 09:51:36 +0800999 ret = tnl_type->tnl_l2_param_update(skb, &tnl_info->cache.params);
developer84f378f2023-08-24 18:26:50 +08001000 /* tnl params need to be updated */
1001 if (ret == 1) {
1002 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
1003 ret = 0;
1004 }
1005
1006 spin_unlock_irqrestore(&tnl_info->lock, flag);
1007
1008 return ret;
1009}
1010
developere5e687d2023-08-08 16:05:33 +08001011static bool mtk_tops_tnl_decap_offloadable(struct sk_buff *skb)
1012{
1013 struct tops_tnl_type *tnl_type;
1014 struct ethhdr *eth;
1015 u32 cnt;
1016 u32 i;
1017
1018 if (unlikely(!mtk_tops_mcu_alive())) {
1019 skb_mark_unbind(skb);
1020 return -EAGAIN;
1021 }
1022
1023 /* skb should not carry tops here */
1024 if (skb_hnat_tops(skb))
1025 return false;
1026
1027 eth = eth_hdr(skb);
1028
1029 /* TODO: currently decap only support ethernet IPv4 */
1030 if (ntohs(eth->h_proto) != ETH_P_IP)
1031 return false;
1032
1033 /* TODO: may can be optimized */
developer0fb30d52023-12-04 09:51:36 +08001034 for (i = TOPS_TUNNEL_GRETAP, cnt = 0;
1035 i < __TOPS_TUNNEL_TYPE_MAX && cnt < tops_tnl.offload_tnl_type_num;
developere5e687d2023-08-08 16:05:33 +08001036 i++) {
1037 tnl_type = tops_tnl.offload_tnl_types[i];
1038 if (unlikely(!tnl_type))
1039 continue;
1040
1041 cnt++;
1042 if (tnl_type->tnl_decap_offloadable
1043 && tnl_type->tnl_decap_offloadable(skb)) {
developer0fb30d52023-12-04 09:51:36 +08001044 skb_hnat_tops(skb) = tnl_type->tnl_proto_type;
developere5e687d2023-08-08 16:05:33 +08001045 return true;
1046 }
1047 }
1048
1049 return false;
1050}
1051
1052static int mtk_tops_tnl_decap_offload(struct sk_buff *skb)
1053{
1054 struct tops_tnl_params tnl_params;
1055 struct tops_tnl_type *tnl_type;
1056 int ret;
1057
1058 if (unlikely(!mtk_tops_mcu_alive())) {
1059 skb_mark_unbind(skb);
1060 return -EAGAIN;
1061 }
1062
1063 if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_decap(skb))) {
1064 skb_mark_unbind(skb);
1065 return -EINVAL;
1066 }
1067
1068 tnl_type = skb_to_tnl_type(skb);
1069 if (IS_ERR(tnl_type)) {
1070 skb_mark_unbind(skb);
1071 return PTR_ERR(tnl_type);
1072 }
1073
developer0fb30d52023-12-04 09:51:36 +08001074 if (unlikely(!tnl_type->tnl_decap_param_setup || !tnl_type->tnl_param_match)) {
developere5e687d2023-08-08 16:05:33 +08001075 skb_mark_unbind(skb);
1076 return -ENODEV;
1077 }
1078
1079 memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
1080
1081 /* push removed ethernet header back first */
1082 if (tnl_type->has_inner_eth)
1083 skb_push(skb, sizeof(struct ethhdr));
1084
developer0fb30d52023-12-04 09:51:36 +08001085 ret = mtk_tops_decap_param_setup(skb,
1086 &tnl_params.params,
1087 tnl_type->tnl_decap_param_setup);
developere5e687d2023-08-08 16:05:33 +08001088
1089 /* pull ethernet header to restore skb->data to ip start */
1090 if (tnl_type->has_inner_eth)
1091 skb_pull(skb, sizeof(struct ethhdr));
1092
1093 if (unlikely(ret)) {
1094 skb_mark_unbind(skb);
1095 return ret;
1096 }
1097
developer0fb30d52023-12-04 09:51:36 +08001098 tnl_params.tops_entry_proto = tnl_type->tnl_proto_type;
developer84f378f2023-08-24 18:26:50 +08001099 tnl_params.cdrt = skb_hnat_cdrt(skb);
developere5e687d2023-08-08 16:05:33 +08001100
developer15ee46c2023-08-24 16:35:34 +08001101 ret = mtk_tops_tnl_offload(skb, tnl_type, &tnl_params);
developere5e687d2023-08-08 16:05:33 +08001102
1103 /*
1104 * whether success or fail to offload a decapsulation tunnel
1105 * skb_hnat_tops(skb) must be cleared to avoid mtk_tnl_decap_offload() get
1106 * called again
1107 */
1108 skb_hnat_tops(skb) = 0;
1109 skb_hnat_is_decap(skb) = 0;
1110
1111 return ret;
1112}
1113
developer84f378f2023-08-24 18:26:50 +08001114static int __mtk_tops_tnl_encap_offload(struct sk_buff *skb)
developere5e687d2023-08-08 16:05:33 +08001115{
1116 struct tops_tnl_params tnl_params;
1117 struct tops_tnl_type *tnl_type;
1118 int ret;
1119
developere5e687d2023-08-08 16:05:33 +08001120 tnl_type = skb_to_tnl_type(skb);
1121 if (IS_ERR(tnl_type))
1122 return PTR_ERR(tnl_type);
1123
developer0fb30d52023-12-04 09:51:36 +08001124 if (unlikely(!tnl_type->tnl_encap_param_setup || !tnl_type->tnl_param_match))
developere5e687d2023-08-08 16:05:33 +08001125 return -ENODEV;
1126
1127 memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
1128
developer0fb30d52023-12-04 09:51:36 +08001129 ret = mtk_tops_encap_param_setup(skb,
1130 &tnl_params.params,
1131 tnl_type->tnl_encap_param_setup);
developere5e687d2023-08-08 16:05:33 +08001132 if (unlikely(ret))
1133 return ret;
developer0fb30d52023-12-04 09:51:36 +08001134
1135 tnl_params.tops_entry_proto = tnl_type->tnl_proto_type;
developer84f378f2023-08-24 18:26:50 +08001136 tnl_params.cdrt = skb_hnat_cdrt(skb);
developere5e687d2023-08-08 16:05:33 +08001137
developer15ee46c2023-08-24 16:35:34 +08001138 return mtk_tops_tnl_offload(skb, tnl_type, &tnl_params);
developere5e687d2023-08-08 16:05:33 +08001139}
1140
developer84f378f2023-08-24 18:26:50 +08001141static int mtk_tops_tnl_encap_offload(struct sk_buff *skb)
1142{
1143 if (unlikely(!mtk_tops_mcu_alive())) {
1144 skb_mark_unbind(skb);
1145 return -EAGAIN;
1146 }
1147
1148 if (!skb_hnat_is_encap(skb))
1149 return -EPERM;
1150
1151 if (unlikely(skb_hnat_cdrt(skb)))
1152 return mtk_tops_tnl_l2_update(skb);
1153
1154 return __mtk_tops_tnl_encap_offload(skb);
1155}
1156
developere5e687d2023-08-08 16:05:33 +08001157static struct net_device *mtk_tops_get_tnl_dev(int tnl_idx)
1158{
1159 if (tnl_idx < TOPS_CRSN_TNL_ID_START || tnl_idx > TOPS_CRSN_TNL_ID_END)
1160 return ERR_PTR(-EINVAL);
1161
1162 tnl_idx = tnl_idx - TOPS_CRSN_TNL_ID_START;
1163
1164 return tops_tnl.tnl_infos[tnl_idx].dev;
1165}
1166
1167static void mtk_tops_tnl_sync_dma_done(void *param)
1168{
1169 /* TODO: check tx status with dmaengine_tx_status()? */
1170 complete(&tops_tnl.dma_done);
1171}
1172
1173static void mtk_tops_tnl_sync_dma_start(void *param)
1174{
1175 dma_async_issue_pending(tops_tnl.dmachan);
1176
1177 wait_for_completion(&tops_tnl.dma_done);
1178}
1179
1180static void mtk_tops_tnl_sync_dma_unprepare(struct tops_tnl_info *tnl_info,
1181 dma_addr_t *addr)
1182{
1183 dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
1184 DMA_TO_DEVICE);
1185
1186 dma_release_channel(tops_tnl.dmachan);
1187}
1188
1189static int mtk_tops_tnl_sync_dma_prepare(struct tops_tnl_info *tnl_info,
1190 dma_addr_t *addr)
1191{
1192 u32 tnl_addr = tops_tnl.tnl_base_addr;
1193 struct dma_async_tx_descriptor *desc;
1194 dma_cookie_t cookie;
1195 int ret;
1196
1197 if (!tnl_info)
1198 return -EPERM;
1199
1200 tnl_addr += tnl_info->tnl_idx * sizeof(struct tops_tnl_params);
1201
1202 tops_tnl.dmachan = dma_request_slave_channel(tops_dev, "tnl-sync");
1203 if (!tops_tnl.dmachan) {
1204 TOPS_ERR("request dma channel failed\n");
1205 return -ENODEV;
1206 }
1207
1208 *addr = dma_map_single(tops_dev,
1209 &tnl_info->tnl_params,
1210 sizeof(struct tops_tnl_params),
1211 DMA_TO_DEVICE);
1212 if (dma_mapping_error(tops_dev, *addr)) {
1213 ret = -ENOMEM;
1214 goto dma_release;
1215 }
1216
1217 desc = dmaengine_prep_dma_memcpy(tops_tnl.dmachan,
1218 (dma_addr_t)tnl_addr, *addr,
1219 sizeof(struct tops_tnl_params),
1220 0);
1221 if (!desc) {
1222 ret = -EBUSY;
1223 goto dma_unmap;
1224 }
1225
1226 desc->callback = mtk_tops_tnl_sync_dma_done;
1227
1228 cookie = dmaengine_submit(desc);
1229 ret = dma_submit_error(cookie);
1230 if (ret)
1231 goto dma_terminate;
1232
1233 reinit_completion(&tops_tnl.dma_done);
1234
1235 return ret;
1236
1237dma_terminate:
1238 dmaengine_terminate_all(tops_tnl.dmachan);
1239
1240dma_unmap:
1241 dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
1242 DMA_TO_DEVICE);
1243
1244dma_release:
1245 dma_release_channel(tops_tnl.dmachan);
1246
1247 return ret;
1248}
1249
1250static int __mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
1251{
1252 struct mcu_ctrl_cmd mcmd;
1253 dma_addr_t addr;
1254 int ret;
1255
1256 mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
1257 mcmd.arg[0] = TUNNEL_CTRL_EVENT_DEL;
1258 mcmd.arg[1] = tnl_info->tnl_idx;
1259 mcmd.core_mask = CORE_TOPS_MASK;
1260
1261 ret = mtk_tops_mcu_stall(&mcmd, NULL, NULL);
1262 if (ret) {
1263 TOPS_ERR("tnl sync deletion notify mcu failed: %d\n", ret);
1264 return ret;
1265 }
1266
1267 /* there shouldn't be any other reference to tnl_info right now */
1268 memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
1269 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
1270
1271 ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
1272 if (ret) {
1273 TOPS_ERR("tnl sync deletion prepare dma request failed: %d\n", ret);
1274 return ret;
1275 }
1276
1277 mtk_tops_tnl_sync_dma_start(NULL);
1278
1279 mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
1280
1281 return ret;
1282}
1283
1284static int mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
1285{
developer84f378f2023-08-24 18:26:50 +08001286 struct tops_tnl_params tnl_params;
developere5e687d2023-08-08 16:05:33 +08001287 int ret;
1288
1289 ret = mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
1290 if (ret) {
1291 TOPS_ERR("tnl sync dipfitler tear down failed: %d\n",
1292 ret);
1293 return ret;
1294 }
1295
developer84f378f2023-08-24 18:26:50 +08001296 memcpy(&tnl_params, &tnl_info->tnl_params, sizeof(struct tops_tnl_params));
developere5e687d2023-08-08 16:05:33 +08001297 ret = __mtk_tops_tnl_sync_param_delete(tnl_info);
1298 if (ret) {
1299 TOPS_ERR("tnl sync deletion failed: %d\n", ret);
1300 return ret;
1301 }
1302
developer84f378f2023-08-24 18:26:50 +08001303 ret = mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_params);
developer15ee46c2023-08-24 16:35:34 +08001304 if (ret) {
1305 TOPS_ERR("tnl sync cls tear down faild: %d\n",
1306 ret);
1307 return ret;
1308 }
1309
developere5e687d2023-08-08 16:05:33 +08001310 mtk_tops_tnl_info_free(tnl_info);
1311
1312 return ret;
1313}
1314
1315static int __mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
1316 bool is_new_tnl)
1317{
1318 struct mcu_ctrl_cmd mcmd;
1319 dma_addr_t addr;
1320 int ret;
1321
1322 mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
1323 mcmd.arg[1] = tnl_info->tnl_idx;
1324 mcmd.core_mask = CORE_TOPS_MASK;
1325
1326 if (is_new_tnl)
1327 mcmd.arg[0] = TUNNEL_CTRL_EVENT_NEW;
1328 else
1329 mcmd.arg[0] = TUNNEL_CTRL_EVENT_DIP_UPDATE;
1330
1331 ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
1332 if (ret) {
1333 TOPS_ERR("tnl sync update prepare dma request failed: %d\n", ret);
1334 return ret;
1335 }
1336
1337 ret = mtk_tops_mcu_stall(&mcmd, mtk_tops_tnl_sync_dma_start, NULL);
1338 if (ret)
1339 TOPS_ERR("tnl sync update notify mcu failed: %d\n", ret);
1340
1341 mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
1342
1343 return ret;
1344}
1345
1346static int mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
1347 bool setup_pce, bool is_new_tnl)
1348{
1349 int ret;
1350
developer15ee46c2023-08-24 16:35:34 +08001351 if (setup_pce) {
developer84f378f2023-08-24 18:26:50 +08001352 ret = mtk_tops_tnl_info_cls_setup(tnl_info, &tnl_info->tnl_params);
developer15ee46c2023-08-24 16:35:34 +08001353 if (ret) {
1354 TOPS_ERR("tnl cls setup failed: %d\n", ret);
1355 return ret;
1356 }
1357 }
1358
developere5e687d2023-08-08 16:05:33 +08001359 ret = __mtk_tops_tnl_sync_param_update(tnl_info, is_new_tnl);
1360 if (ret) {
1361 TOPS_ERR("tnl sync failed: %d\n", ret);
developer15ee46c2023-08-24 16:35:34 +08001362 goto cls_tear_down;
developere5e687d2023-08-08 16:05:33 +08001363 }
1364
1365 tnl_info_sta_updated(tnl_info);
1366
1367 if (setup_pce) {
1368 ret = mtk_tops_tnl_info_dipfilter_setup(tnl_info);
1369 if (ret) {
1370 TOPS_ERR("tnl dipfilter setup failed: %d\n", ret);
1371 /* TODO: should undo parameter sync */
1372 return ret;
1373 }
1374 }
1375
1376 return ret;
developer15ee46c2023-08-24 16:35:34 +08001377
1378cls_tear_down:
developer84f378f2023-08-24 18:26:50 +08001379 mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_info->tnl_params);
developer15ee46c2023-08-24 16:35:34 +08001380
1381 return ret;
developere5e687d2023-08-08 16:05:33 +08001382}
1383
1384static inline int mtk_tops_tnl_sync_param_new(struct tops_tnl_info *tnl_info,
1385 bool setup_pce)
1386{
1387 return mtk_tops_tnl_sync_param_update(tnl_info, setup_pce, true);
1388}
1389
1390static void mtk_tops_tnl_sync_get_pending_queue(void)
1391{
1392 struct list_head *tmp = tops_tnl.tnl_sync_submit;
1393 unsigned long flag = 0;
1394
1395 spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
1396
1397 tops_tnl.tnl_sync_submit = tops_tnl.tnl_sync_pending;
1398 tops_tnl.tnl_sync_pending = tmp;
1399
1400 tops_tnl.has_tnl_to_sync = false;
1401
1402 spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
1403}
1404
1405static void mtk_tops_tnl_sync_queue_proc(void)
1406{
1407 struct tops_tnl_info *tnl_info;
1408 struct tops_tnl_info *tmp;
1409 unsigned long flag = 0;
1410 bool is_decap = false;
1411 u32 tnl_status = 0;
1412 int ret;
1413
1414 list_for_each_entry_safe(tnl_info,
1415 tmp,
1416 tops_tnl.tnl_sync_pending,
1417 sync_node) {
1418 spin_lock_irqsave(&tnl_info->lock, flag);
1419
1420 /* tnl update is on the fly, queue tnl to next round */
1421 if (tnl_info_sta_is_updating(tnl_info)) {
1422 list_del_init(&tnl_info->sync_node);
1423
1424 tnl_info_submit_no_tnl_lock(tnl_info);
1425
1426 goto next;
1427 }
1428
1429 /*
1430 * if tnl_info is not queued, something wrong
1431 * just remove that tnl_info from the queue
1432 * maybe trigger BUG_ON()?
1433 */
1434 if (!tnl_info_sta_is_queued(tnl_info)) {
1435 list_del_init(&tnl_info->sync_node);
1436 goto next;
1437 }
1438
1439 is_decap = (!(tnl_info->tnl_params.flag & TNL_DECAP_ENABLE)
1440 && tnl_info_decap_is_enable(tnl_info));
1441
1442 tnl_status = tnl_info->status;
1443 memcpy(&tnl_info->tnl_params, &tnl_info->cache,
1444 sizeof(struct tops_tnl_params));
1445
1446 list_del_init(&tnl_info->sync_node);
1447
1448 /*
1449 * mark tnl info to updating and release tnl info's spin lock
1450 * since it is going to use dma to transfer data
1451 * and might going to sleep
1452 */
1453 tnl_info_sta_updating_no_tnl_lock(tnl_info);
1454
1455 spin_unlock_irqrestore(&tnl_info->lock, flag);
1456
1457 if (tnl_status & TNL_STA_INIT)
1458 ret = mtk_tops_tnl_sync_param_new(tnl_info, is_decap);
1459 else if (tnl_status & TNL_STA_DELETING)
1460 ret = mtk_tops_tnl_sync_param_delete(tnl_info);
1461 else
1462 ret = mtk_tops_tnl_sync_param_update(tnl_info,
1463 is_decap,
1464 false);
1465
1466 if (ret)
1467 TOPS_ERR("sync tunnel parameter failed: %d\n", ret);
1468
1469 continue;
1470
1471next:
1472 spin_unlock_irqrestore(&tnl_info->lock, flag);
1473 }
1474}
1475
1476static int tnl_sync_task(void *data)
1477{
1478 while (1) {
1479 wait_event_interruptible(tops_tnl.tnl_sync_wait,
1480 (tops_tnl.has_tnl_to_sync && mtk_tops_mcu_alive())
1481 || kthread_should_stop());
1482
1483 if (kthread_should_stop())
1484 break;
1485
1486 mtk_tops_tnl_sync_get_pending_queue();
1487
1488 mtk_tops_tnl_sync_queue_proc();
1489 }
1490
1491 return 0;
1492}
1493
1494static void mtk_tops_tnl_info_flush_ppe(struct tops_tnl_info *tnl_info)
1495{
1496 struct foe_entry *entry;
1497 u32 max_entry;
1498 u32 ppe_id;
1499 u32 eidx;
1500
1501 /* tnl info's lock should be held */
1502 lockdep_assert_held(&tnl_info->lock);
1503
1504 /* clear all TOPS related PPE entries */
1505 for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
1506 max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
1507 for (eidx = 0; eidx < max_entry; eidx++) {
1508 entry = hnat_get_foe_entry(ppe_id, eidx);
1509 if (IS_ERR(entry))
1510 break;
1511
1512 if (!entry_hnat_is_bound(entry))
1513 continue;
1514
1515 tnl_flush_ppe_entry(entry, tnl_info->tnl_idx);
1516 }
1517 }
1518 hnat_cache_ebl(1);
1519 /* make sure all data is written to dram PPE table */
1520 wmb();
1521}
1522
1523void mtk_tops_tnl_offload_netdev_down(struct net_device *ndev)
1524{
1525 struct tops_tnl_info *tnl_info;
1526 unsigned long flag;
1527 u32 bkt;
1528
1529 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1530
1531 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1532 spin_lock(&tnl_info->lock);
1533
1534 if (tnl_info->dev == ndev) {
1535 mtk_tops_tnl_info_flush_ppe(tnl_info);
1536
1537 __mtk_tops_tnl_offload_disable(tnl_info);
1538
1539 spin_unlock(&tnl_info->lock);
1540
1541 break;
1542 }
1543
1544 spin_unlock(&tnl_info->lock);
1545 }
1546
1547 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1548}
1549
1550void mtk_tops_tnl_offload_flush(void)
1551{
1552 struct tops_tnl_info *tnl_info;
1553 struct foe_entry *entry;
1554 unsigned long flag;
1555 u32 max_entry;
1556 u32 ppe_id;
1557 u32 eidx;
1558 u32 bkt;
1559
1560 /* clear all TOPS related PPE entries */
1561 for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
1562 max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
1563 for (eidx = 0; eidx < max_entry; eidx++) {
1564 entry = hnat_get_foe_entry(ppe_id, eidx);
1565 if (IS_ERR(entry))
1566 break;
1567
1568 if (!entry_hnat_is_bound(entry))
1569 continue;
1570
developer0fb30d52023-12-04 09:51:36 +08001571 tnl_flush_ppe_entry(entry, __TOPS_TUNNEL_TYPE_MAX);
developere5e687d2023-08-08 16:05:33 +08001572 }
1573 }
1574 hnat_cache_ebl(1);
1575 /* make sure all data is written to dram PPE table */
1576 wmb();
1577
1578 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1579
1580 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1581 /* clear all tunnel's synced parameters, but preserve cache */
1582 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
1583 /*
1584 * make tnl_info status to TNL_INIT state
1585 * so that it can be added to TOPS again
1586 */
1587 spin_lock(&tnl_info->lock);
1588
1589 tnl_info_sta_init_no_tnl_lock(tnl_info);
1590 list_del_init(&tnl_info->sync_node);
1591
1592 spin_unlock(&tnl_info->lock);
1593 }
1594
1595 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1596}
1597
1598void mtk_tops_tnl_offload_recover(void)
1599{
1600 struct tops_tnl_info *tnl_info;
1601 unsigned long flag;
1602 u32 bkt;
1603
1604 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1605
1606 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist)
1607 mtk_tops_tnl_info_submit(tnl_info);
1608
1609 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1610}
1611
1612int mtk_tops_tnl_offload_init(struct platform_device *pdev)
1613{
1614 struct tops_tnl_info *tnl_info;
1615 int ret = 0;
1616 int i = 0;
1617
1618 hash_init(tops_tnl.ht);
1619
1620 tops_tnl.tnl_infos = devm_kzalloc(&pdev->dev,
1621 sizeof(struct tops_tnl_info) * CONFIG_TOPS_TNL_NUM,
1622 GFP_KERNEL);
1623 if (!tops_tnl.tnl_infos)
1624 return -ENOMEM;
1625
1626 for (i = 0; i < CONFIG_TOPS_TNL_NUM; i++) {
1627 tnl_info = &tops_tnl.tnl_infos[i];
1628 tnl_info->tnl_idx = i;
1629 tnl_info->status = TNL_STA_UNINIT;
1630 INIT_HLIST_NODE(&tnl_info->hlist);
1631 INIT_LIST_HEAD(&tnl_info->sync_node);
1632 spin_lock_init(&tnl_info->lock);
1633 }
1634
1635 ret = register_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1636 if (ret) {
1637 TOPS_ERR("tnl offload recv dev register failed: %d\n",
1638 ret);
1639 return ret;
1640 }
1641
1642 init_completion(&tops_tnl.dma_done);
1643 init_waitqueue_head(&tops_tnl.tnl_sync_wait);
1644
1645 tops_tnl.tnl_sync_thread = kthread_run(tnl_sync_task, NULL,
1646 "tnl sync param task");
1647 if (IS_ERR(tops_tnl.tnl_sync_thread)) {
1648 TOPS_ERR("tnl sync thread create failed\n");
1649 ret = -ENOMEM;
1650 goto unregister_mbox;
1651 }
1652
1653 mtk_tnl_encap_offload = mtk_tops_tnl_encap_offload;
1654 mtk_tnl_decap_offload = mtk_tops_tnl_decap_offload;
1655 mtk_tnl_decap_offloadable = mtk_tops_tnl_decap_offloadable;
1656 mtk_get_tnl_dev = mtk_tops_get_tnl_dev;
1657
1658 tops_tnl.tnl_sync_submit = &tnl_sync_q1;
1659 tops_tnl.tnl_sync_pending = &tnl_sync_q2;
1660 spin_lock_init(&tops_tnl.tnl_sync_lock);
1661 spin_lock_init(&tops_tnl.tbl_lock);
1662
1663 return 0;
1664
1665unregister_mbox:
1666 unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1667
1668 return ret;
1669}
1670
1671void mtk_tops_tnl_offload_pce_clean_up(void)
1672{
1673 struct tops_tnl_info *tnl_info;
1674 unsigned long flag;
1675 u32 bkt;
1676
1677 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1678
1679 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1680 mtk_tops_tnl_info_flush_ppe(tnl_info);
1681
1682 mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
developer15ee46c2023-08-24 16:35:34 +08001683
developer84f378f2023-08-24 18:26:50 +08001684 mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_info->tnl_params);
developere5e687d2023-08-08 16:05:33 +08001685 }
1686
1687 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1688}
1689
1690void mtk_tops_tnl_offload_deinit(struct platform_device *pdev)
1691{
1692 mtk_tnl_encap_offload = NULL;
1693 mtk_tnl_decap_offload = NULL;
1694 mtk_tnl_decap_offloadable = NULL;
1695 mtk_get_tnl_dev = NULL;
1696
1697 kthread_stop(tops_tnl.tnl_sync_thread);
1698
1699 mtk_tops_tnl_offload_pce_clean_up();
1700
1701 unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1702}
1703
1704int mtk_tops_tnl_offload_proto_setup(struct platform_device *pdev)
1705{
1706 mtk_tops_gretap_init();
1707
developer0fb30d52023-12-04 09:51:36 +08001708 mtk_tops_l2tpv2_init();
developere5e687d2023-08-08 16:05:33 +08001709
1710 return 0;
1711}
1712
1713void mtk_tops_tnl_offload_proto_teardown(struct platform_device *pdev)
1714{
1715 mtk_tops_gretap_deinit();
1716
developer0fb30d52023-12-04 09:51:36 +08001717 mtk_tops_l2tpv2_deinit();
developere5e687d2023-08-08 16:05:33 +08001718}
1719
1720struct tops_tnl_type *mtk_tops_tnl_type_get_by_name(const char *name)
1721{
developer0fb30d52023-12-04 09:51:36 +08001722 enum tops_tunnel_type tnl_proto_type = TOPS_TUNNEL_NONE + 1;
developere5e687d2023-08-08 16:05:33 +08001723 struct tops_tnl_type *tnl_type;
1724
1725 if (unlikely(!name))
1726 return ERR_PTR(-EPERM);
1727
developer0fb30d52023-12-04 09:51:36 +08001728 for (; tnl_proto_type < __TOPS_TUNNEL_TYPE_MAX; tnl_proto_type++) {
1729 tnl_type = tops_tnl.offload_tnl_types[tnl_proto_type];
developere5e687d2023-08-08 16:05:33 +08001730 if (tnl_type && !strcmp(name, tnl_type->type_name))
1731 break;
1732 }
1733
1734 return tnl_type;
1735}
1736
1737int mtk_tops_tnl_type_register(struct tops_tnl_type *tnl_type)
1738{
developer0fb30d52023-12-04 09:51:36 +08001739 enum tops_tunnel_type tnl_proto_type = tnl_type->tnl_proto_type;
developere5e687d2023-08-08 16:05:33 +08001740
developer0fb30d52023-12-04 09:51:36 +08001741 if (unlikely(tnl_proto_type == TOPS_TUNNEL_NONE
1742 || tnl_proto_type >= __TOPS_TUNNEL_TYPE_MAX)) {
1743 TOPS_ERR("invalid tnl_proto_type: %u\n", tnl_proto_type);
developere5e687d2023-08-08 16:05:33 +08001744 return -EINVAL;
1745 }
1746
1747 if (unlikely(!tnl_type))
1748 return -EINVAL;
1749
developer0fb30d52023-12-04 09:51:36 +08001750 if (tops_tnl.offload_tnl_types[tnl_proto_type]) {
1751 TOPS_ERR("offload tnl type is already registered: %u\n",
1752 tnl_proto_type);
developere5e687d2023-08-08 16:05:33 +08001753 return -EBUSY;
1754 }
1755
developer15ee46c2023-08-24 16:35:34 +08001756 INIT_LIST_HEAD(&tnl_type->tcls_head);
developer0fb30d52023-12-04 09:51:36 +08001757 tops_tnl.offload_tnl_types[tnl_proto_type] = tnl_type;
developere5e687d2023-08-08 16:05:33 +08001758 tops_tnl.offload_tnl_type_num++;
1759
1760 return 0;
1761}
1762
1763void mtk_tops_tnl_type_unregister(struct tops_tnl_type *tnl_type)
1764{
developer0fb30d52023-12-04 09:51:36 +08001765 enum tops_tunnel_type tnl_proto_type = tnl_type->tnl_proto_type;
developere5e687d2023-08-08 16:05:33 +08001766
developer0fb30d52023-12-04 09:51:36 +08001767 if (unlikely(tnl_proto_type == TOPS_TUNNEL_NONE
1768 || tnl_proto_type >= __TOPS_TUNNEL_TYPE_MAX)) {
1769 TOPS_ERR("invalid tnl_proto_type: %u\n", tnl_proto_type);
developere5e687d2023-08-08 16:05:33 +08001770 return;
1771 }
1772
1773 if (unlikely(!tnl_type))
1774 return;
1775
developer0fb30d52023-12-04 09:51:36 +08001776 if (tops_tnl.offload_tnl_types[tnl_proto_type] != tnl_type) {
developere5e687d2023-08-08 16:05:33 +08001777 TOPS_ERR("offload tnl type is registered by others\n");
1778 return;
1779 }
1780
developer0fb30d52023-12-04 09:51:36 +08001781 tops_tnl.offload_tnl_types[tnl_proto_type] = NULL;
developere5e687d2023-08-08 16:05:33 +08001782 tops_tnl.offload_tnl_type_num--;
1783}