blob: c3b2cdff6398554afd2c0cfe06270591f9f680d2 [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/completion.h>
9#include <linux/device.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/err.h>
13#include <linux/hashtable.h>
14#include <linux/if_ether.h>
15#include <linux/ip.h>
16#include <linux/kthread.h>
17#include <linux/list.h>
18#include <linux/lockdep.h>
19#include <linux/string.h>
20
21#include <mtk_eth_soc.h>
22#include <mtk_hnat/hnat.h>
23#include <mtk_hnat/nf_hnat_mtk.h>
24
25#include <pce/dipfilter.h>
26#include <pce/pce.h>
27
28#include "internal.h"
29#include "mbox.h"
30#include "mcu.h"
31#include "netsys.h"
32#include "protocol/gre/gretap.h"
33#include "protocol/l2tp/udp_l2tp_data.h"
34#include "tunnel.h"
35
36#define TOPS_PPE_ENTRY_BUCKETS (64)
37#define TOPS_PPE_ENTRY_BUCKETS_BIT (6)
38
39struct tops_tnl {
40 /* tunnel types */
41 struct tops_tnl_type *offload_tnl_types[__TOPS_ENTRY_MAX];
42 u32 offload_tnl_type_num;
43 u32 tnl_base_addr;
44
45 /* tunnel table */
46 DECLARE_HASHTABLE(ht, CONFIG_TOPS_TNL_MAP_BIT);
47 DECLARE_BITMAP(tnl_used, CONFIG_TOPS_TNL_NUM);
48 wait_queue_head_t tnl_sync_wait;
49 spinlock_t tnl_sync_lock;
50 spinlock_t tbl_lock;
51 bool has_tnl_to_sync;
52 struct task_struct *tnl_sync_thread;
53 struct list_head *tnl_sync_pending;
54 struct list_head *tnl_sync_submit;
55 struct tops_tnl_info *tnl_infos;
56
57 /* dma request */
58 struct completion dma_done;
59 struct dma_chan *dmachan;
60
61 struct device *dev;
62};
63
64static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
65 struct mailbox_msg *msg);
66
67static struct tops_tnl tops_tnl;
68
69static LIST_HEAD(tnl_sync_q1);
70static LIST_HEAD(tnl_sync_q2);
71
72struct mailbox_dev tnl_offload_mbox_recv =
73 MBOX_RECV_MGMT_DEV(TNL_OFFLOAD, tnl_offload_mbox_cmd_recv);
74
75/* tunnel mailbox communication */
76static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
77 struct mailbox_msg *msg)
78{
79 switch (msg->msg1) {
80 case TOPS_TNL_START_ADDR_SYNC:
81 tops_tnl.tnl_base_addr = msg->msg2;
82
83 return MBOX_NO_RET_MSG;
84 default:
85 break;
86 }
87
88 return MBOX_NO_RET_MSG;
89}
90
91static inline void tnl_flush_ppe_entry(struct foe_entry *entry, u32 tnl_idx)
92{
93 u32 bind_tnl_idx;
94
95 if (unlikely(!entry))
96 return;
97
98 switch (entry->bfib1.pkt_type) {
99 case IPV4_HNAPT:
100 if (entry->ipv4_hnapt.tport_id != NR_TDMA_TPORT
101 && entry->ipv4_hnapt.tport_id != NR_TDMA_QDMA_TPORT)
102 return;
103
104 bind_tnl_idx = entry->ipv4_hnapt.tops_entry - __TOPS_ENTRY_MAX;
105
106 break;
107 default:
108 return;
109 }
110
111 /* unexpected tunnel index */
112 if (bind_tnl_idx >= __TOPS_ENTRY_MAX)
113 return;
114
115 if (tnl_idx == __TOPS_ENTRY_MAX || tnl_idx == bind_tnl_idx)
116 memset(entry, 0, sizeof(*entry));
117}
118
119static inline void skb_set_tops_tnl_idx(struct sk_buff *skb, u32 tnl_idx)
120{
121 skb_hnat_tops(skb) = tnl_idx + __TOPS_ENTRY_MAX;
122}
123
124static inline bool skb_tops_valid(struct sk_buff *skb)
125{
developer0b3c7712023-08-24 16:23:03 +0800126 return (skb && skb_hnat_tops(skb) <= __TOPS_ENTRY_MAX);
developere5e687d2023-08-08 16:05:33 +0800127}
128
129static inline struct tops_tnl_type *skb_to_tnl_type(struct sk_buff *skb)
130{
131 enum tops_entry_type tops_entry = skb_hnat_tops(skb);
132 struct tops_tnl_type *tnl_type;
133
134 if (unlikely(!tops_entry || tops_entry >= __TOPS_ENTRY_MAX))
135 return ERR_PTR(-EINVAL);
136
137 tnl_type = tops_tnl.offload_tnl_types[tops_entry];
138
139 return tnl_type ? tnl_type : ERR_PTR(-ENODEV);
140}
141
142static inline void skb_mark_unbind(struct sk_buff *skb)
143{
144 skb_hnat_tops(skb) = 0;
145 skb_hnat_is_decap(skb) = 0;
146 skb_hnat_alg(skb) = 1;
147}
148
149static inline u32 tnl_params_hash(struct tops_tnl_params *tnl_params)
150{
151 if (!tnl_params)
152 return 0;
153
154 /* TODO: check collision possibility? */
155 return (tnl_params->sip ^ tnl_params->dip);
156}
157
158static inline bool tnl_info_decap_is_enable(struct tops_tnl_info *tnl_info)
159{
160 return tnl_info->cache.flag & TNL_DECAP_ENABLE;
161}
162
163static inline void tnl_info_decap_enable(struct tops_tnl_info *tnl_info)
164{
165 tnl_info->cache.flag |= TNL_DECAP_ENABLE;
166}
167
168static inline void tnl_info_decap_disable(struct tops_tnl_info *tnl_info)
169{
170 tnl_info->cache.flag &= ~(TNL_DECAP_ENABLE);
171}
172
173static inline bool tnl_info_encap_is_enable(struct tops_tnl_info *tnl_info)
174{
175 return tnl_info->cache.flag & TNL_ENCAP_ENABLE;
176}
177
178static inline void tnl_info_encap_enable(struct tops_tnl_info *tnl_info)
179{
180 tnl_info->cache.flag |= TNL_ENCAP_ENABLE;
181}
182
183static inline void tnl_info_encap_disable(struct tops_tnl_info *tnl_info)
184{
185 tnl_info->cache.flag &= ~(TNL_ENCAP_ENABLE);
186}
187
188static inline void tnl_info_sta_updated_no_tnl_lock(struct tops_tnl_info *tnl_info)
189{
190 tnl_info->status &= (~TNL_STA_UPDATING);
191 tnl_info->status &= (~TNL_STA_INIT);
192 tnl_info->status |= TNL_STA_UPDATED;
193}
194
195static inline void tnl_info_sta_updated(struct tops_tnl_info *tnl_info)
196{
197 unsigned long flag = 0;
198
199 if (unlikely(!tnl_info))
200 return;
201
202 spin_lock_irqsave(&tnl_info->lock, flag);
203
204 tnl_info_sta_updated_no_tnl_lock(tnl_info);
205
206 spin_unlock_irqrestore(&tnl_info->lock, flag);
207}
208
209static inline bool tnl_info_sta_is_updated(struct tops_tnl_info *tnl_info)
210{
211 return tnl_info->status & TNL_STA_UPDATED;
212}
213
214static inline void tnl_info_sta_updating_no_tnl_lock(struct tops_tnl_info *tnl_info)
215{
216 tnl_info->status |= TNL_STA_UPDATING;
217 tnl_info->status &= (~TNL_STA_QUEUED);
218 tnl_info->status &= (~TNL_STA_UPDATED);
219}
220
221static inline void tnl_info_sta_updating(struct tops_tnl_info *tnl_info)
222{
223 unsigned long flag = 0;
224
225 if (unlikely(!tnl_info))
226 return;
227
228 spin_lock_irqsave(&tnl_info->lock, flag);
229
230 tnl_info_sta_updating_no_tnl_lock(tnl_info);
231
232 spin_unlock_irqrestore(&tnl_info->lock, flag);
233}
234
235static inline bool tnl_info_sta_is_updating(struct tops_tnl_info *tnl_info)
236{
237 return tnl_info->status & TNL_STA_UPDATING;
238}
239
240static inline void tnl_info_sta_queued_no_tnl_lock(struct tops_tnl_info *tnl_info)
241{
242 tnl_info->status |= TNL_STA_QUEUED;
243 tnl_info->status &= (~TNL_STA_UPDATED);
244}
245
246static inline void tnl_info_sta_queued(struct tops_tnl_info *tnl_info)
247{
248 unsigned long flag = 0;
249
250 if (unlikely(!tnl_info))
251 return;
252
253 spin_lock_irqsave(&tnl_info->lock, flag);
254
255 tnl_info_sta_queued_no_tnl_lock(tnl_info);
256
257 spin_unlock_irqrestore(&tnl_info->lock, flag);
258}
259
260static inline bool tnl_info_sta_is_queued(struct tops_tnl_info *tnl_info)
261{
262 return tnl_info->status & TNL_STA_QUEUED;
263}
264
265static inline void tnl_info_sta_init_no_tnl_lock(struct tops_tnl_info *tnl_info)
266{
267 tnl_info->status = TNL_STA_INIT;
268}
269
270static inline void tnl_info_sta_init(struct tops_tnl_info *tnl_info)
271{
272 unsigned long flag = 0;
273
274 if (unlikely(!tnl_info))
275 return;
276
277 spin_lock_irqsave(&tnl_info->lock, flag);
278
279 tnl_info_sta_init_no_tnl_lock(tnl_info);
280
281 spin_unlock_irqrestore(&tnl_info->lock, flag);
282}
283
284static inline bool tnl_info_sta_is_init(struct tops_tnl_info *tnl_info)
285{
286 return tnl_info->status & TNL_STA_INIT;
287}
288
289static inline void tnl_info_sta_uninit_no_tnl_lock(struct tops_tnl_info *tnl_info)
290{
291 tnl_info->status = TNL_STA_UNINIT;
292}
293
294static inline void tnl_info_sta_uninit(struct tops_tnl_info *tnl_info)
295{
296 unsigned long flag = 0;
297
298 if (unlikely(!tnl_info))
299 return;
300
301 spin_lock_irqsave(&tnl_info->lock, flag);
302
303 tnl_info_sta_uninit_no_tnl_lock(tnl_info);
304
305 spin_unlock_irqrestore(&tnl_info->lock, flag);
306}
307
308static inline bool tnl_info_sta_is_uninit(struct tops_tnl_info *tnl_info)
309{
310 return tnl_info->status & TNL_STA_UNINIT;
311}
312
313static inline void tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
314{
315 unsigned long flag = 0;
316
317 spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
318
319 list_add_tail(&tnl_info->sync_node, tops_tnl.tnl_sync_submit);
320
321 tops_tnl.has_tnl_to_sync = true;
322
323 spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
324
325 if (mtk_tops_mcu_alive())
326 wake_up_interruptible(&tops_tnl.tnl_sync_wait);
327}
328
developer15ee46c2023-08-24 16:35:34 +0800329static void mtk_tops_tnl_info_cls_update_idx(struct tops_tnl_info *tnl_info)
330{
331 unsigned long flag;
332
333 tnl_info->tnl_params.cls_entry = tnl_info->tcls->cls->idx;
334 TOPS_NOTICE("cls entry: %u\n", tnl_info->tcls->cls->idx);
335
336 spin_lock_irqsave(&tnl_info->lock, flag);
337 tnl_info->cache.cls_entry = tnl_info->tcls->cls->idx;
338 spin_unlock_irqrestore(&tnl_info->lock, flag);
339}
340
341static void mtk_tops_tnl_info_cls_entry_unprepare(struct tops_tnl_info *tnl_info)
342{
343 struct tops_cls_entry *tcls = tnl_info->tcls;
344
345 pr_notice("cls entry unprepare\n");
346 tnl_info->tcls = NULL;
347
348 if (refcount_dec_and_test(&tcls->refcnt)) {
349 pr_notice("cls entry delete\n");
350 list_del(&tcls->node);
351
352 memset(&tcls->cls->cdesc, 0, sizeof(tcls->cls->cdesc));
353
354 mtk_pce_cls_entry_write(tcls->cls);
355
356 mtk_pce_cls_entry_free(tcls->cls);
357
358 devm_kfree(tops_dev, tcls);
359 }
360}
361
362static struct tops_cls_entry *
363mtk_tops_tnl_info_cls_entry_prepare(struct tops_tnl_info *tnl_info)
364{
365 struct tops_cls_entry *tcls;
366 int ret;
367
368 tcls = devm_kzalloc(tops_dev, sizeof(struct tops_cls_entry), GFP_KERNEL);
369 if (!tcls)
370 return ERR_PTR(-ENOMEM);
371
372 tcls->cls = mtk_pce_cls_entry_alloc();
373 if (IS_ERR(tcls->cls)) {
374 ret = PTR_ERR(tcls->cls);
375 goto free_tcls;
376 }
377
378 INIT_LIST_HEAD(&tcls->node);
379 list_add_tail(&tnl_info->tnl_type->tcls_head, &tcls->node);
380
381 tnl_info->tcls = tcls;
382 refcount_set(&tcls->refcnt, 1);
383
384 return tcls;
385
386free_tcls:
387 devm_kfree(tops_dev, tcls);
388
389 return ERR_PTR(ret);
390}
391
392static int mtk_tops_tnl_info_cls_entry_write(struct tops_tnl_info *tnl_info)
393{
394 int ret;
395
396 if (!tnl_info->tcls)
397 return -EINVAL;
398
399 ret = mtk_pce_cls_entry_write(tnl_info->tcls->cls);
400 if (ret) {
401 mtk_tops_tnl_info_cls_entry_unprepare(tnl_info);
402 return ret;
403 }
404
405 tnl_info->tcls->updated = true;
406
407 mtk_tops_tnl_info_cls_update_idx(tnl_info);
408
409 return 0;
410}
411
412static int mtk_tops_tnl_info_cls_tear_down(struct tops_tnl_info *tnl_info)
413{
414 mtk_tops_tnl_info_cls_entry_unprepare(tnl_info);
415
416 return 0;
417}
418
419/*
420 * check cls entry is updated for tunnel protocols that only use 1 CLS HW entry
421 *
422 * since only tunnel sync task will operate on tcls linked list,
423 * it is safe to access without lock
424 *
425 * return true on updated
426 * return false on need update
427 */
428static bool mtk_tops_tnl_info_cls_single_is_updated(struct tops_tnl_info *tnl_info,
429 struct tops_tnl_type *tnl_type)
430{
431 /*
432 * check tnl_type has already allocate a tops_cls_entry
433 * if not, return false to prepare to allocate a new one
434 */
435 if (list_empty(&tnl_type->tcls_head))
436 return false;
437
438 /*
439 * if tnl_info is not associate to tnl_type's cls entry,
440 * make a reference to tops_cls_entry
441 */
442 if (!tnl_info->tcls) {
443 tnl_info->tcls = list_first_entry(&tnl_type->tcls_head,
444 struct tops_cls_entry,
445 node);
446
447 refcount_inc(&tnl_info->tcls->refcnt);
448 mtk_tops_tnl_info_cls_update_idx(tnl_info);
449 }
450
451 return tnl_info->tcls->updated;
452}
453
454static int mtk_tops_tnl_info_cls_single_setup(struct tops_tnl_info *tnl_info,
455 struct tops_tnl_type *tnl_type)
456{
457 struct tops_cls_entry *tcls;
458 int ret;
459
460 if (mtk_tops_tnl_info_cls_single_is_updated(tnl_info, tnl_type))
461 return 0;
462
463 if (tnl_info->tcls)
464 return mtk_tops_tnl_info_cls_entry_write(tnl_info);
465
466 tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info);
467 if (IS_ERR(tcls))
468 return PTR_ERR(tcls);
469
470 ret = tnl_type->cls_entry_setup(tnl_info, &tcls->cls->cdesc);
471 if (ret) {
472 TOPS_ERR("tops cls entry setup failed: %d\n", ret);
473 mtk_tops_tnl_info_cls_entry_unprepare(tnl_info);
474 return ret;
475 }
476
477 return mtk_tops_tnl_info_cls_entry_write(tnl_info);
478}
479
480static struct tops_cls_entry *
481mtk_tops_tnl_info_cls_entry_find(struct tops_tnl_type *tnl_type,
482 struct cls_desc *cdesc)
483{
484 struct tops_cls_entry *tcls;
485
486 list_for_each_entry(tcls, &tnl_type->tcls_head, node)
487 if (!memcmp(&tcls->cls->cdesc, cdesc, sizeof(struct cls_desc)))
488 return tcls;
489
490 return NULL;
491}
492
493static bool mtk_tops_tnl_infO_cls_multi_is_updated(struct tops_tnl_info *tnl_info,
494 struct tops_tnl_type *tnl_type,
495 struct cls_desc *cdesc)
496{
497 struct tops_cls_entry *tcls;
498
499 if (list_empty(&tnl_type->tcls_head))
500 return false;
501
502 if (tnl_info->tcls) {
503 if (!memcmp(cdesc, &tnl_info->tcls->cls->cdesc, sizeof(*cdesc)))
504 return tnl_info->tcls->updated;
505
506 memcpy(&tnl_info->tcls->cls->cdesc, cdesc, sizeof(*cdesc));
507 tnl_info->tcls->updated = false;
508 return false;
509 }
510
511 tcls = mtk_tops_tnl_info_cls_entry_find(tnl_type, cdesc);
512 if (!tcls)
513 return false;
514
515 tnl_info->tcls = tcls;
516 refcount_inc(&tnl_info->tcls->refcnt);
517 mtk_tops_tnl_info_cls_update_idx(tnl_info);
518
519 return tcls->updated;
520}
521
522static int mtk_tops_tnl_info_cls_multi_setup(struct tops_tnl_info *tnl_info,
523 struct tops_tnl_type *tnl_type)
524{
525 struct tops_cls_entry *tcls;
526 struct cls_desc cdesc;
527 int ret;
528
529 memset(&cdesc, 0, sizeof(struct cls_desc));
530
531 /* prepare cls_desc from tnl_type */
532 ret = tnl_type->cls_entry_setup(tnl_info, &cdesc);
533 if (ret) {
534 TOPS_ERR("tops cls entry setup failed: %d\n", ret);
535 return ret;
536 }
537
538 /*
539 * check cdesc is already updated, if tnl_info is not associate with a
540 * tcls but we found a tcls has the same cls desc content as cdesc
541 * tnl_info will setup an association with that tcls
542 *
543 * we only go further to this if condition when
544 * a tcls is not yet updated or
545 * tnl_info is not yet associated to a tcls
546 */
547 if (mtk_tops_tnl_infO_cls_multi_is_updated(tnl_info, tnl_type, &cdesc))
548 return 0;
549
550 /* tcls is not yet updated, update this tcls */
551 if (tnl_info->tcls)
552 return mtk_tops_tnl_info_cls_entry_write(tnl_info);
553
554 /* create a new tcls entry and associate with tnl_info */
555 tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info);
556 if (IS_ERR(tcls))
557 return PTR_ERR(tcls);
558
559 memcpy(&tcls->cls->cdesc, &cdesc, sizeof(struct cls_desc));
560
561 return mtk_tops_tnl_info_cls_entry_write(tnl_info);
562}
563
564static int mtk_tops_tnl_info_cls_setup(struct tops_tnl_info *tnl_info)
565{
566 struct tops_tnl_type *tnl_type;
567
568 if (tnl_info->tcls && tnl_info->tcls->updated)
569 return 0;
570
571 tnl_type = tnl_info->tnl_type;
572 if (!tnl_type)
573 return -EINVAL;
574
575 if (!tnl_type->use_multi_cls)
576 return mtk_tops_tnl_info_cls_single_setup(tnl_info, tnl_type);
577
578 return mtk_tops_tnl_info_cls_multi_setup(tnl_info, tnl_type);
579}
580
developere5e687d2023-08-08 16:05:33 +0800581static int mtk_tops_tnl_info_dipfilter_tear_down(struct tops_tnl_info *tnl_info)
582{
583 struct dip_desc dipd;
584
585 memset(&dipd, 0, sizeof(struct dip_desc));
586
587 dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.sip);
588 dipd.tag = DIPFILTER_IPV4;
589
590 return mtk_pce_dipfilter_entry_del(&dipd);
591}
592
593static int mtk_tops_tnl_info_dipfilter_setup(struct tops_tnl_info *tnl_info)
594{
595 struct dip_desc dipd;
596
597 /* setup dipfilter */
598 memset(&dipd, 0, sizeof(struct dip_desc));
599
600 dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.sip);
601 dipd.tag = DIPFILTER_IPV4;
602
603 return mtk_pce_dipfilter_entry_add(&dipd);
604}
605
606void mtk_tops_tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
607{
608 lockdep_assert_held(&tnl_info->lock);
609
610 if (tnl_info_sta_is_queued(tnl_info))
611 return;
612
613 tnl_info_submit_no_tnl_lock(tnl_info);
614
615 tnl_info_sta_queued_no_tnl_lock(tnl_info);
616}
617
618void mtk_tops_tnl_info_submit(struct tops_tnl_info *tnl_info)
619{
620 unsigned long flag = 0;
621
622 if (unlikely(!tnl_info))
623 return;
624
625 spin_lock_irqsave(&tnl_info->lock, flag);
626
627 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
628
629 spin_unlock_irqrestore(&tnl_info->lock, flag);
630}
631
632static void mtk_tops_tnl_info_hash_no_lock(struct tops_tnl_info *tnl_info)
633{
634 lockdep_assert_held(&tops_tnl.tbl_lock);
635 lockdep_assert_held(&tnl_info->lock);
636
637 if (hash_hashed(&tnl_info->hlist))
638 hash_del(&tnl_info->hlist);
639
640 hash_add(tops_tnl.ht, &tnl_info->hlist, tnl_params_hash(&tnl_info->cache));
641}
642
643void mtk_tops_tnl_info_hash(struct tops_tnl_info *tnl_info)
644{
645 unsigned long flag = 0;
646
647 if (unlikely(!tnl_info))
648 return;
649
650 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
651
652 spin_lock(&tnl_info->lock);
653
654 mtk_tops_tnl_info_hash_no_lock(tnl_info);
655
656 spin_unlock(&tnl_info->lock);
657
658 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
659}
660
661static bool mtk_tops_tnl_info_match(struct tops_tnl_type *tnl_type,
662 struct tops_tnl_info *tnl_info,
663 struct tops_tnl_params *match_data)
664{
665 unsigned long flag = 0;
666 bool match;
667
668 spin_lock_irqsave(&tnl_info->lock, flag);
669
670 match = tnl_type->tnl_info_match(&tnl_info->cache, match_data);
671
672 spin_unlock_irqrestore(&tnl_info->lock, flag);
673
674 return match;
675}
676
677struct tops_tnl_info *mtk_tops_tnl_info_find(struct tops_tnl_params *tnl_params)
678{
679 struct tops_tnl_info *tnl_info;
680 struct tops_tnl_type *tnl_type;
681
682 lockdep_assert_held(&tops_tnl.tbl_lock);
683
684 if (unlikely(!tnl_params->tops_entry_proto
685 || tnl_params->tops_entry_proto >= __TOPS_ENTRY_MAX))
686 return ERR_PTR(-EINVAL);
687
688 tnl_type = tops_tnl.offload_tnl_types[tnl_params->tops_entry_proto];
689 if (unlikely(!tnl_type))
690 return ERR_PTR(-EINVAL);
691
692 if (unlikely(!tnl_type->tnl_info_match))
693 return ERR_PTR(-ENXIO);
694
695 hash_for_each_possible(tops_tnl.ht,
696 tnl_info,
697 hlist,
698 tnl_params_hash(tnl_params))
699 if (mtk_tops_tnl_info_match(tnl_type, tnl_info, tnl_params))
700 return tnl_info;
701
702 return ERR_PTR(-ENODEV);
703}
704
705/* tnl_info->lock should be held before calling this function */
706static int mtk_tops_tnl_info_setup(struct sk_buff *skb,
707 struct tops_tnl_info *tnl_info,
708 struct tops_tnl_params *tnl_params)
709{
710 if (unlikely(!skb || !tnl_info || !tnl_params))
711 return -EPERM;
712
713 lockdep_assert_held(&tnl_info->lock);
714
715 tnl_params->flag |= tnl_info->cache.flag;
developer15ee46c2023-08-24 16:35:34 +0800716 tnl_params->cls_entry = tnl_info->cache.cls_entry;
developere5e687d2023-08-08 16:05:33 +0800717
718 if (memcmp(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params))) {
719 memcpy(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params));
720
721 mtk_tops_tnl_info_hash_no_lock(tnl_info);
722 }
723
724 if (skb_hnat_is_decap(skb)) {
725 /* the net_device is used to forward pkt to decap'ed inf when Rx */
726 tnl_info->dev = skb->dev;
727 if (!tnl_info_decap_is_enable(tnl_info)) {
728 tnl_info_decap_enable(tnl_info);
729
730 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
731 }
732 } else if (skb_hnat_is_encap(skb)) {
733 /* set skb_hnat_tops(skb) to tunnel index for ppe binding */
734 skb_set_tops_tnl_idx(skb, tnl_info->tnl_idx);
735 if (!tnl_info_encap_is_enable(tnl_info)) {
736 tnl_info_encap_enable(tnl_info);
737
738 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
739 }
740 }
741
742 return 0;
743}
744
745/* tops_tnl.tbl_lock should be acquired before calling this functions */
developer15ee46c2023-08-24 16:35:34 +0800746static struct tops_tnl_info *
747mtk_tops_tnl_info_alloc_no_lock(struct tops_tnl_type *tnl_type)
developere5e687d2023-08-08 16:05:33 +0800748{
749 struct tops_tnl_info *tnl_info;
750 unsigned long flag = 0;
751 u32 tnl_idx;
752
753 lockdep_assert_held(&tops_tnl.tbl_lock);
754
755 tnl_idx = find_first_zero_bit(tops_tnl.tnl_used, CONFIG_TOPS_TNL_NUM);
756 if (tnl_idx == CONFIG_TOPS_TNL_NUM) {
757 TOPS_NOTICE("offload tunnel table full!\n");
758 return ERR_PTR(-ENOMEM);
759 }
760
761 /* occupy used tunnel */
762 tnl_info = &tops_tnl.tnl_infos[tnl_idx];
763 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
764 memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
765
766 /* TODO: maybe spin_lock_bh() is enough? */
767 spin_lock_irqsave(&tnl_info->lock, flag);
768
769 if (tnl_info_sta_is_init(tnl_info)) {
770 TOPS_ERR("error: fetched an initialized tunnel info\n");
771
772 spin_unlock_irqrestore(&tnl_info->lock, flag);
773
774 return ERR_PTR(-EBADF);
775 }
776 tnl_info_sta_init_no_tnl_lock(tnl_info);
777
developer15ee46c2023-08-24 16:35:34 +0800778 tnl_info->tnl_type = tnl_type;
779
developere5e687d2023-08-08 16:05:33 +0800780 INIT_HLIST_NODE(&tnl_info->hlist);
781
782 spin_unlock_irqrestore(&tnl_info->lock, flag);
783
784 set_bit(tnl_idx, tops_tnl.tnl_used);
785
786 return tnl_info;
787}
788
developer15ee46c2023-08-24 16:35:34 +0800789struct tops_tnl_info *mtk_tops_tnl_info_alloc(struct tops_tnl_type *tnl_type)
developere5e687d2023-08-08 16:05:33 +0800790{
791 struct tops_tnl_info *tnl_info;
792 unsigned long flag = 0;
793
794 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
795
developer15ee46c2023-08-24 16:35:34 +0800796 tnl_info = mtk_tops_tnl_info_alloc_no_lock(tnl_type);
developere5e687d2023-08-08 16:05:33 +0800797
798 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
799
800 return tnl_info;
801}
802
803static void mtk_tops_tnl_info_free_no_lock(struct tops_tnl_info *tnl_info)
804{
805 if (unlikely(!tnl_info))
806 return;
807
808 lockdep_assert_held(&tops_tnl.tbl_lock);
809 lockdep_assert_held(&tnl_info->lock);
810
811 hash_del(&tnl_info->hlist);
812
813 tnl_info_sta_uninit_no_tnl_lock(tnl_info);
814
815 clear_bit(tnl_info->tnl_idx, tops_tnl.tnl_used);
816}
817
818static void mtk_tops_tnl_info_free(struct tops_tnl_info *tnl_info)
819{
820 unsigned long flag = 0;
821
822 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
823
824 spin_lock(&tnl_info->lock);
825
826 mtk_tops_tnl_info_free_no_lock(tnl_info);
827
828 spin_unlock(&tnl_info->lock);
829
830 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
831}
832
833static void __mtk_tops_tnl_offload_disable(struct tops_tnl_info *tnl_info)
834{
835 tnl_info->status |= TNL_STA_DELETING;
836 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
837}
838
839static int mtk_tops_tnl_offload(struct sk_buff *skb,
developer15ee46c2023-08-24 16:35:34 +0800840 struct tops_tnl_type *tnl_type,
developere5e687d2023-08-08 16:05:33 +0800841 struct tops_tnl_params *tnl_params)
842{
843 struct tops_tnl_info *tnl_info;
844 unsigned long flag;
845 int ret = 0;
846
847 if (unlikely(!tnl_params))
848 return -EPERM;
849
850 /* prepare tnl_info */
851 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
852
853 tnl_info = mtk_tops_tnl_info_find(tnl_params);
854 if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) != -ENODEV) {
855 /* error */
856 ret = PTR_ERR(tnl_info);
857 goto err_out;
858 } else if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) == -ENODEV) {
859 /* not allocate yet */
developer15ee46c2023-08-24 16:35:34 +0800860 tnl_info = mtk_tops_tnl_info_alloc_no_lock(tnl_type);
developere5e687d2023-08-08 16:05:33 +0800861 }
862
863 if (IS_ERR(tnl_info)) {
864 ret = PTR_ERR(tnl_info);
865 TOPS_DBG("tnl offload alloc tnl_info failed: %d\n", ret);
866 goto err_out;
867 }
868
869 spin_lock(&tnl_info->lock);
870 ret = mtk_tops_tnl_info_setup(skb, tnl_info, tnl_params);
developere5e687d2023-08-08 16:05:33 +0800871 spin_unlock(&tnl_info->lock);
872
873err_out:
874 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
875
876 return ret;
877}
878
879static bool mtk_tops_tnl_decap_offloadable(struct sk_buff *skb)
880{
881 struct tops_tnl_type *tnl_type;
882 struct ethhdr *eth;
883 u32 cnt;
884 u32 i;
885
886 if (unlikely(!mtk_tops_mcu_alive())) {
887 skb_mark_unbind(skb);
888 return -EAGAIN;
889 }
890
891 /* skb should not carry tops here */
892 if (skb_hnat_tops(skb))
893 return false;
894
895 eth = eth_hdr(skb);
896
897 /* TODO: currently decap only support ethernet IPv4 */
898 if (ntohs(eth->h_proto) != ETH_P_IP)
899 return false;
900
901 /* TODO: may can be optimized */
902 for (i = TOPS_ENTRY_GRETAP, cnt = 0;
903 i < __TOPS_ENTRY_MAX && cnt < tops_tnl.offload_tnl_type_num;
904 i++) {
905 tnl_type = tops_tnl.offload_tnl_types[i];
906 if (unlikely(!tnl_type))
907 continue;
908
909 cnt++;
910 if (tnl_type->tnl_decap_offloadable
911 && tnl_type->tnl_decap_offloadable(skb)) {
912 skb_hnat_tops(skb) = tnl_type->tops_entry;
913 return true;
914 }
915 }
916
917 return false;
918}
919
920static int mtk_tops_tnl_decap_offload(struct sk_buff *skb)
921{
922 struct tops_tnl_params tnl_params;
923 struct tops_tnl_type *tnl_type;
924 int ret;
925
926 if (unlikely(!mtk_tops_mcu_alive())) {
927 skb_mark_unbind(skb);
928 return -EAGAIN;
929 }
930
931 if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_decap(skb))) {
932 skb_mark_unbind(skb);
933 return -EINVAL;
934 }
935
936 tnl_type = skb_to_tnl_type(skb);
937 if (IS_ERR(tnl_type)) {
938 skb_mark_unbind(skb);
939 return PTR_ERR(tnl_type);
940 }
941
942 if (unlikely(!tnl_type->tnl_decap_param_setup)) {
943 skb_mark_unbind(skb);
944 return -ENODEV;
945 }
946
947 memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
948
949 /* push removed ethernet header back first */
950 if (tnl_type->has_inner_eth)
951 skb_push(skb, sizeof(struct ethhdr));
952
953 ret = tnl_type->tnl_decap_param_setup(skb, &tnl_params);
954
955 /* pull ethernet header to restore skb->data to ip start */
956 if (tnl_type->has_inner_eth)
957 skb_pull(skb, sizeof(struct ethhdr));
958
959 if (unlikely(ret)) {
960 skb_mark_unbind(skb);
961 return ret;
962 }
963
964 tnl_params.tops_entry_proto = tnl_type->tops_entry;
965
developer15ee46c2023-08-24 16:35:34 +0800966 ret = mtk_tops_tnl_offload(skb, tnl_type, &tnl_params);
developere5e687d2023-08-08 16:05:33 +0800967
968 /*
969 * whether success or fail to offload a decapsulation tunnel
970 * skb_hnat_tops(skb) must be cleared to avoid mtk_tnl_decap_offload() get
971 * called again
972 */
973 skb_hnat_tops(skb) = 0;
974 skb_hnat_is_decap(skb) = 0;
975
976 return ret;
977}
978
979static int mtk_tops_tnl_encap_offload(struct sk_buff *skb)
980{
981 struct tops_tnl_params tnl_params;
982 struct tops_tnl_type *tnl_type;
983 int ret;
984
985 if (unlikely(!mtk_tops_mcu_alive())) {
986 skb_mark_unbind(skb);
987 return -EAGAIN;
988 }
989
990 if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_encap(skb)))
991 return -EPERM;
992
993 tnl_type = skb_to_tnl_type(skb);
994 if (IS_ERR(tnl_type))
995 return PTR_ERR(tnl_type);
996
997 if (unlikely(!tnl_type->tnl_encap_param_setup))
998 return -ENODEV;
999
1000 memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
1001
1002 ret = tnl_type->tnl_encap_param_setup(skb, &tnl_params);
1003 if (unlikely(ret))
1004 return ret;
1005 tnl_params.tops_entry_proto = tnl_type->tops_entry;
1006
developer15ee46c2023-08-24 16:35:34 +08001007 return mtk_tops_tnl_offload(skb, tnl_type, &tnl_params);
developere5e687d2023-08-08 16:05:33 +08001008}
1009
1010static struct net_device *mtk_tops_get_tnl_dev(int tnl_idx)
1011{
1012 if (tnl_idx < TOPS_CRSN_TNL_ID_START || tnl_idx > TOPS_CRSN_TNL_ID_END)
1013 return ERR_PTR(-EINVAL);
1014
1015 tnl_idx = tnl_idx - TOPS_CRSN_TNL_ID_START;
1016
1017 return tops_tnl.tnl_infos[tnl_idx].dev;
1018}
1019
1020static void mtk_tops_tnl_sync_dma_done(void *param)
1021{
1022 /* TODO: check tx status with dmaengine_tx_status()? */
1023 complete(&tops_tnl.dma_done);
1024}
1025
1026static void mtk_tops_tnl_sync_dma_start(void *param)
1027{
1028 dma_async_issue_pending(tops_tnl.dmachan);
1029
1030 wait_for_completion(&tops_tnl.dma_done);
1031}
1032
1033static void mtk_tops_tnl_sync_dma_unprepare(struct tops_tnl_info *tnl_info,
1034 dma_addr_t *addr)
1035{
1036 dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
1037 DMA_TO_DEVICE);
1038
1039 dma_release_channel(tops_tnl.dmachan);
1040}
1041
1042static int mtk_tops_tnl_sync_dma_prepare(struct tops_tnl_info *tnl_info,
1043 dma_addr_t *addr)
1044{
1045 u32 tnl_addr = tops_tnl.tnl_base_addr;
1046 struct dma_async_tx_descriptor *desc;
1047 dma_cookie_t cookie;
1048 int ret;
1049
1050 if (!tnl_info)
1051 return -EPERM;
1052
1053 tnl_addr += tnl_info->tnl_idx * sizeof(struct tops_tnl_params);
1054
1055 tops_tnl.dmachan = dma_request_slave_channel(tops_dev, "tnl-sync");
1056 if (!tops_tnl.dmachan) {
1057 TOPS_ERR("request dma channel failed\n");
1058 return -ENODEV;
1059 }
1060
1061 *addr = dma_map_single(tops_dev,
1062 &tnl_info->tnl_params,
1063 sizeof(struct tops_tnl_params),
1064 DMA_TO_DEVICE);
1065 if (dma_mapping_error(tops_dev, *addr)) {
1066 ret = -ENOMEM;
1067 goto dma_release;
1068 }
1069
1070 desc = dmaengine_prep_dma_memcpy(tops_tnl.dmachan,
1071 (dma_addr_t)tnl_addr, *addr,
1072 sizeof(struct tops_tnl_params),
1073 0);
1074 if (!desc) {
1075 ret = -EBUSY;
1076 goto dma_unmap;
1077 }
1078
1079 desc->callback = mtk_tops_tnl_sync_dma_done;
1080
1081 cookie = dmaengine_submit(desc);
1082 ret = dma_submit_error(cookie);
1083 if (ret)
1084 goto dma_terminate;
1085
1086 reinit_completion(&tops_tnl.dma_done);
1087
1088 return ret;
1089
1090dma_terminate:
1091 dmaengine_terminate_all(tops_tnl.dmachan);
1092
1093dma_unmap:
1094 dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
1095 DMA_TO_DEVICE);
1096
1097dma_release:
1098 dma_release_channel(tops_tnl.dmachan);
1099
1100 return ret;
1101}
1102
1103static int __mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
1104{
1105 struct mcu_ctrl_cmd mcmd;
1106 dma_addr_t addr;
1107 int ret;
1108
1109 mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
1110 mcmd.arg[0] = TUNNEL_CTRL_EVENT_DEL;
1111 mcmd.arg[1] = tnl_info->tnl_idx;
1112 mcmd.core_mask = CORE_TOPS_MASK;
1113
1114 ret = mtk_tops_mcu_stall(&mcmd, NULL, NULL);
1115 if (ret) {
1116 TOPS_ERR("tnl sync deletion notify mcu failed: %d\n", ret);
1117 return ret;
1118 }
1119
1120 /* there shouldn't be any other reference to tnl_info right now */
1121 memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
1122 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
1123
1124 ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
1125 if (ret) {
1126 TOPS_ERR("tnl sync deletion prepare dma request failed: %d\n", ret);
1127 return ret;
1128 }
1129
1130 mtk_tops_tnl_sync_dma_start(NULL);
1131
1132 mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
1133
1134 return ret;
1135}
1136
1137static int mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
1138{
1139 int ret;
1140
1141 ret = mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
1142 if (ret) {
1143 TOPS_ERR("tnl sync dipfitler tear down failed: %d\n",
1144 ret);
1145 return ret;
1146 }
1147
1148 ret = __mtk_tops_tnl_sync_param_delete(tnl_info);
1149 if (ret) {
1150 TOPS_ERR("tnl sync deletion failed: %d\n", ret);
1151 return ret;
1152 }
1153
developer15ee46c2023-08-24 16:35:34 +08001154 ret = mtk_tops_tnl_info_cls_tear_down(tnl_info);
1155 if (ret) {
1156 TOPS_ERR("tnl sync cls tear down faild: %d\n",
1157 ret);
1158 return ret;
1159 }
1160
developere5e687d2023-08-08 16:05:33 +08001161 mtk_tops_tnl_info_free(tnl_info);
1162
1163 return ret;
1164}
1165
1166static int __mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
1167 bool is_new_tnl)
1168{
1169 struct mcu_ctrl_cmd mcmd;
1170 dma_addr_t addr;
1171 int ret;
1172
1173 mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
1174 mcmd.arg[1] = tnl_info->tnl_idx;
1175 mcmd.core_mask = CORE_TOPS_MASK;
1176
1177 if (is_new_tnl)
1178 mcmd.arg[0] = TUNNEL_CTRL_EVENT_NEW;
1179 else
1180 mcmd.arg[0] = TUNNEL_CTRL_EVENT_DIP_UPDATE;
1181
1182 ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
1183 if (ret) {
1184 TOPS_ERR("tnl sync update prepare dma request failed: %d\n", ret);
1185 return ret;
1186 }
1187
1188 ret = mtk_tops_mcu_stall(&mcmd, mtk_tops_tnl_sync_dma_start, NULL);
1189 if (ret)
1190 TOPS_ERR("tnl sync update notify mcu failed: %d\n", ret);
1191
1192 mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
1193
1194 return ret;
1195}
1196
1197static int mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
1198 bool setup_pce, bool is_new_tnl)
1199{
1200 int ret;
1201
developer15ee46c2023-08-24 16:35:34 +08001202 if (setup_pce) {
1203 ret = mtk_tops_tnl_info_cls_setup(tnl_info);
1204 if (ret) {
1205 TOPS_ERR("tnl cls setup failed: %d\n", ret);
1206 return ret;
1207 }
1208 }
1209
developere5e687d2023-08-08 16:05:33 +08001210 ret = __mtk_tops_tnl_sync_param_update(tnl_info, is_new_tnl);
1211 if (ret) {
1212 TOPS_ERR("tnl sync failed: %d\n", ret);
developer15ee46c2023-08-24 16:35:34 +08001213 goto cls_tear_down;
developere5e687d2023-08-08 16:05:33 +08001214 }
1215
1216 tnl_info_sta_updated(tnl_info);
1217
1218 if (setup_pce) {
1219 ret = mtk_tops_tnl_info_dipfilter_setup(tnl_info);
1220 if (ret) {
1221 TOPS_ERR("tnl dipfilter setup failed: %d\n", ret);
1222 /* TODO: should undo parameter sync */
1223 return ret;
1224 }
1225 }
1226
1227 return ret;
developer15ee46c2023-08-24 16:35:34 +08001228
1229cls_tear_down:
1230 mtk_tops_tnl_info_cls_tear_down(tnl_info);
1231
1232 return ret;
developere5e687d2023-08-08 16:05:33 +08001233}
1234
1235static inline int mtk_tops_tnl_sync_param_new(struct tops_tnl_info *tnl_info,
1236 bool setup_pce)
1237{
1238 return mtk_tops_tnl_sync_param_update(tnl_info, setup_pce, true);
1239}
1240
1241static void mtk_tops_tnl_sync_get_pending_queue(void)
1242{
1243 struct list_head *tmp = tops_tnl.tnl_sync_submit;
1244 unsigned long flag = 0;
1245
1246 spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
1247
1248 tops_tnl.tnl_sync_submit = tops_tnl.tnl_sync_pending;
1249 tops_tnl.tnl_sync_pending = tmp;
1250
1251 tops_tnl.has_tnl_to_sync = false;
1252
1253 spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
1254}
1255
1256static void mtk_tops_tnl_sync_queue_proc(void)
1257{
1258 struct tops_tnl_info *tnl_info;
1259 struct tops_tnl_info *tmp;
1260 unsigned long flag = 0;
1261 bool is_decap = false;
1262 u32 tnl_status = 0;
1263 int ret;
1264
1265 list_for_each_entry_safe(tnl_info,
1266 tmp,
1267 tops_tnl.tnl_sync_pending,
1268 sync_node) {
1269 spin_lock_irqsave(&tnl_info->lock, flag);
1270
1271 /* tnl update is on the fly, queue tnl to next round */
1272 if (tnl_info_sta_is_updating(tnl_info)) {
1273 list_del_init(&tnl_info->sync_node);
1274
1275 tnl_info_submit_no_tnl_lock(tnl_info);
1276
1277 goto next;
1278 }
1279
1280 /*
1281 * if tnl_info is not queued, something wrong
1282 * just remove that tnl_info from the queue
1283 * maybe trigger BUG_ON()?
1284 */
1285 if (!tnl_info_sta_is_queued(tnl_info)) {
1286 list_del_init(&tnl_info->sync_node);
1287 goto next;
1288 }
1289
1290 is_decap = (!(tnl_info->tnl_params.flag & TNL_DECAP_ENABLE)
1291 && tnl_info_decap_is_enable(tnl_info));
1292
1293 tnl_status = tnl_info->status;
1294 memcpy(&tnl_info->tnl_params, &tnl_info->cache,
1295 sizeof(struct tops_tnl_params));
1296
1297 list_del_init(&tnl_info->sync_node);
1298
1299 /*
1300 * mark tnl info to updating and release tnl info's spin lock
1301 * since it is going to use dma to transfer data
1302 * and might going to sleep
1303 */
1304 tnl_info_sta_updating_no_tnl_lock(tnl_info);
1305
1306 spin_unlock_irqrestore(&tnl_info->lock, flag);
1307
1308 if (tnl_status & TNL_STA_INIT)
1309 ret = mtk_tops_tnl_sync_param_new(tnl_info, is_decap);
1310 else if (tnl_status & TNL_STA_DELETING)
1311 ret = mtk_tops_tnl_sync_param_delete(tnl_info);
1312 else
1313 ret = mtk_tops_tnl_sync_param_update(tnl_info,
1314 is_decap,
1315 false);
1316
1317 if (ret)
1318 TOPS_ERR("sync tunnel parameter failed: %d\n", ret);
1319
1320 continue;
1321
1322next:
1323 spin_unlock_irqrestore(&tnl_info->lock, flag);
1324 }
1325}
1326
1327static int tnl_sync_task(void *data)
1328{
1329 while (1) {
1330 wait_event_interruptible(tops_tnl.tnl_sync_wait,
1331 (tops_tnl.has_tnl_to_sync && mtk_tops_mcu_alive())
1332 || kthread_should_stop());
1333
1334 if (kthread_should_stop())
1335 break;
1336
1337 mtk_tops_tnl_sync_get_pending_queue();
1338
1339 mtk_tops_tnl_sync_queue_proc();
1340 }
1341
1342 return 0;
1343}
1344
1345static void mtk_tops_tnl_info_flush_ppe(struct tops_tnl_info *tnl_info)
1346{
1347 struct foe_entry *entry;
1348 u32 max_entry;
1349 u32 ppe_id;
1350 u32 eidx;
1351
1352 /* tnl info's lock should be held */
1353 lockdep_assert_held(&tnl_info->lock);
1354
1355 /* clear all TOPS related PPE entries */
1356 for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
1357 max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
1358 for (eidx = 0; eidx < max_entry; eidx++) {
1359 entry = hnat_get_foe_entry(ppe_id, eidx);
1360 if (IS_ERR(entry))
1361 break;
1362
1363 if (!entry_hnat_is_bound(entry))
1364 continue;
1365
1366 tnl_flush_ppe_entry(entry, tnl_info->tnl_idx);
1367 }
1368 }
1369 hnat_cache_ebl(1);
1370 /* make sure all data is written to dram PPE table */
1371 wmb();
1372}
1373
1374void mtk_tops_tnl_offload_netdev_down(struct net_device *ndev)
1375{
1376 struct tops_tnl_info *tnl_info;
1377 unsigned long flag;
1378 u32 bkt;
1379
1380 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1381
1382 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1383 spin_lock(&tnl_info->lock);
1384
1385 if (tnl_info->dev == ndev) {
1386 mtk_tops_tnl_info_flush_ppe(tnl_info);
1387
1388 __mtk_tops_tnl_offload_disable(tnl_info);
1389
1390 spin_unlock(&tnl_info->lock);
1391
1392 break;
1393 }
1394
1395 spin_unlock(&tnl_info->lock);
1396 }
1397
1398 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1399}
1400
1401void mtk_tops_tnl_offload_flush(void)
1402{
1403 struct tops_tnl_info *tnl_info;
1404 struct foe_entry *entry;
1405 unsigned long flag;
1406 u32 max_entry;
1407 u32 ppe_id;
1408 u32 eidx;
1409 u32 bkt;
1410
1411 /* clear all TOPS related PPE entries */
1412 for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
1413 max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
1414 for (eidx = 0; eidx < max_entry; eidx++) {
1415 entry = hnat_get_foe_entry(ppe_id, eidx);
1416 if (IS_ERR(entry))
1417 break;
1418
1419 if (!entry_hnat_is_bound(entry))
1420 continue;
1421
1422 tnl_flush_ppe_entry(entry, __TOPS_ENTRY_MAX);
1423 }
1424 }
1425 hnat_cache_ebl(1);
1426 /* make sure all data is written to dram PPE table */
1427 wmb();
1428
1429 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1430
1431 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1432 /* clear all tunnel's synced parameters, but preserve cache */
1433 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
1434 /*
1435 * make tnl_info status to TNL_INIT state
1436 * so that it can be added to TOPS again
1437 */
1438 spin_lock(&tnl_info->lock);
1439
1440 tnl_info_sta_init_no_tnl_lock(tnl_info);
1441 list_del_init(&tnl_info->sync_node);
1442
1443 spin_unlock(&tnl_info->lock);
1444 }
1445
1446 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1447}
1448
1449void mtk_tops_tnl_offload_recover(void)
1450{
1451 struct tops_tnl_info *tnl_info;
1452 unsigned long flag;
1453 u32 bkt;
1454
1455 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1456
1457 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist)
1458 mtk_tops_tnl_info_submit(tnl_info);
1459
1460 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1461}
1462
1463int mtk_tops_tnl_offload_init(struct platform_device *pdev)
1464{
1465 struct tops_tnl_info *tnl_info;
1466 int ret = 0;
1467 int i = 0;
1468
1469 hash_init(tops_tnl.ht);
1470
1471 tops_tnl.tnl_infos = devm_kzalloc(&pdev->dev,
1472 sizeof(struct tops_tnl_info) * CONFIG_TOPS_TNL_NUM,
1473 GFP_KERNEL);
1474 if (!tops_tnl.tnl_infos)
1475 return -ENOMEM;
1476
1477 for (i = 0; i < CONFIG_TOPS_TNL_NUM; i++) {
1478 tnl_info = &tops_tnl.tnl_infos[i];
1479 tnl_info->tnl_idx = i;
1480 tnl_info->status = TNL_STA_UNINIT;
1481 INIT_HLIST_NODE(&tnl_info->hlist);
1482 INIT_LIST_HEAD(&tnl_info->sync_node);
1483 spin_lock_init(&tnl_info->lock);
1484 }
1485
1486 ret = register_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1487 if (ret) {
1488 TOPS_ERR("tnl offload recv dev register failed: %d\n",
1489 ret);
1490 return ret;
1491 }
1492
1493 init_completion(&tops_tnl.dma_done);
1494 init_waitqueue_head(&tops_tnl.tnl_sync_wait);
1495
1496 tops_tnl.tnl_sync_thread = kthread_run(tnl_sync_task, NULL,
1497 "tnl sync param task");
1498 if (IS_ERR(tops_tnl.tnl_sync_thread)) {
1499 TOPS_ERR("tnl sync thread create failed\n");
1500 ret = -ENOMEM;
1501 goto unregister_mbox;
1502 }
1503
1504 mtk_tnl_encap_offload = mtk_tops_tnl_encap_offload;
1505 mtk_tnl_decap_offload = mtk_tops_tnl_decap_offload;
1506 mtk_tnl_decap_offloadable = mtk_tops_tnl_decap_offloadable;
1507 mtk_get_tnl_dev = mtk_tops_get_tnl_dev;
1508
1509 tops_tnl.tnl_sync_submit = &tnl_sync_q1;
1510 tops_tnl.tnl_sync_pending = &tnl_sync_q2;
1511 spin_lock_init(&tops_tnl.tnl_sync_lock);
1512 spin_lock_init(&tops_tnl.tbl_lock);
1513
1514 return 0;
1515
1516unregister_mbox:
1517 unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1518
1519 return ret;
1520}
1521
1522void mtk_tops_tnl_offload_pce_clean_up(void)
1523{
1524 struct tops_tnl_info *tnl_info;
1525 unsigned long flag;
1526 u32 bkt;
1527
1528 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1529
1530 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1531 mtk_tops_tnl_info_flush_ppe(tnl_info);
1532
1533 mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
developer15ee46c2023-08-24 16:35:34 +08001534
1535 mtk_tops_tnl_info_cls_tear_down(tnl_info);
developere5e687d2023-08-08 16:05:33 +08001536 }
1537
1538 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1539}
1540
1541void mtk_tops_tnl_offload_deinit(struct platform_device *pdev)
1542{
1543 mtk_tnl_encap_offload = NULL;
1544 mtk_tnl_decap_offload = NULL;
1545 mtk_tnl_decap_offloadable = NULL;
1546 mtk_get_tnl_dev = NULL;
1547
1548 kthread_stop(tops_tnl.tnl_sync_thread);
1549
1550 mtk_tops_tnl_offload_pce_clean_up();
1551
1552 unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1553}
1554
1555int mtk_tops_tnl_offload_proto_setup(struct platform_device *pdev)
1556{
1557 mtk_tops_gretap_init();
1558
1559 mtk_tops_udp_l2tp_data_init();
1560
1561 return 0;
1562}
1563
1564void mtk_tops_tnl_offload_proto_teardown(struct platform_device *pdev)
1565{
1566 mtk_tops_gretap_deinit();
1567
1568 mtk_tops_udp_l2tp_data_deinit();
1569}
1570
1571struct tops_tnl_type *mtk_tops_tnl_type_get_by_name(const char *name)
1572{
1573 enum tops_entry_type tops_entry = TOPS_ENTRY_NONE + 1;
1574 struct tops_tnl_type *tnl_type;
1575
1576 if (unlikely(!name))
1577 return ERR_PTR(-EPERM);
1578
1579 for (; tops_entry < __TOPS_ENTRY_MAX; tops_entry++) {
1580 tnl_type = tops_tnl.offload_tnl_types[tops_entry];
1581 if (tnl_type && !strcmp(name, tnl_type->type_name))
1582 break;
1583 }
1584
1585 return tnl_type;
1586}
1587
1588int mtk_tops_tnl_type_register(struct tops_tnl_type *tnl_type)
1589{
1590 enum tops_entry_type tops_entry = tnl_type->tops_entry;
1591
1592 if (unlikely(tops_entry == TOPS_ENTRY_NONE
1593 || tops_entry >= __TOPS_ENTRY_MAX)) {
1594 TOPS_ERR("invalid tops_entry: %u\n", tops_entry);
1595 return -EINVAL;
1596 }
1597
1598 if (unlikely(!tnl_type))
1599 return -EINVAL;
1600
1601 if (tops_tnl.offload_tnl_types[tops_entry]) {
1602 TOPS_ERR("offload tnl type is already registered: %u\n", tops_entry);
1603 return -EBUSY;
1604 }
1605
developer15ee46c2023-08-24 16:35:34 +08001606 INIT_LIST_HEAD(&tnl_type->tcls_head);
developere5e687d2023-08-08 16:05:33 +08001607 tops_tnl.offload_tnl_types[tops_entry] = tnl_type;
1608 tops_tnl.offload_tnl_type_num++;
1609
1610 return 0;
1611}
1612
1613void mtk_tops_tnl_type_unregister(struct tops_tnl_type *tnl_type)
1614{
1615 enum tops_entry_type tops_entry = tnl_type->tops_entry;
1616
1617 if (unlikely(tops_entry == TOPS_ENTRY_NONE
1618 || tops_entry >= __TOPS_ENTRY_MAX)) {
1619 TOPS_ERR("invalid tops_entry: %u\n", tops_entry);
1620 return;
1621 }
1622
1623 if (unlikely(!tnl_type))
1624 return;
1625
1626 if (tops_tnl.offload_tnl_types[tops_entry] != tnl_type) {
1627 TOPS_ERR("offload tnl type is registered by others\n");
1628 return;
1629 }
1630
1631 tops_tnl.offload_tnl_types[tops_entry] = NULL;
1632 tops_tnl.offload_tnl_type_num--;
1633}