blob: bd37faa14ef2ebdfc849a9458f317e711925aefc [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/completion.h>
9#include <linux/device.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/err.h>
13#include <linux/hashtable.h>
14#include <linux/if_ether.h>
15#include <linux/ip.h>
16#include <linux/kthread.h>
17#include <linux/list.h>
18#include <linux/lockdep.h>
19#include <linux/string.h>
20
21#include <mtk_eth_soc.h>
22#include <mtk_hnat/hnat.h>
23#include <mtk_hnat/nf_hnat_mtk.h>
24
developer84f378f2023-08-24 18:26:50 +080025#include <pce/cdrt.h>
26#include <pce/cls.h>
developere5e687d2023-08-08 16:05:33 +080027#include <pce/dipfilter.h>
28#include <pce/pce.h>
29
30#include "internal.h"
31#include "mbox.h"
32#include "mcu.h"
33#include "netsys.h"
34#include "protocol/gre/gretap.h"
35#include "protocol/l2tp/udp_l2tp_data.h"
36#include "tunnel.h"
37
38#define TOPS_PPE_ENTRY_BUCKETS (64)
39#define TOPS_PPE_ENTRY_BUCKETS_BIT (6)
40
41struct tops_tnl {
42 /* tunnel types */
43 struct tops_tnl_type *offload_tnl_types[__TOPS_ENTRY_MAX];
44 u32 offload_tnl_type_num;
45 u32 tnl_base_addr;
46
47 /* tunnel table */
48 DECLARE_HASHTABLE(ht, CONFIG_TOPS_TNL_MAP_BIT);
49 DECLARE_BITMAP(tnl_used, CONFIG_TOPS_TNL_NUM);
50 wait_queue_head_t tnl_sync_wait;
51 spinlock_t tnl_sync_lock;
52 spinlock_t tbl_lock;
53 bool has_tnl_to_sync;
54 struct task_struct *tnl_sync_thread;
55 struct list_head *tnl_sync_pending;
56 struct list_head *tnl_sync_submit;
57 struct tops_tnl_info *tnl_infos;
58
59 /* dma request */
60 struct completion dma_done;
61 struct dma_chan *dmachan;
62
63 struct device *dev;
64};
65
66static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
67 struct mailbox_msg *msg);
68
69static struct tops_tnl tops_tnl;
70
71static LIST_HEAD(tnl_sync_q1);
72static LIST_HEAD(tnl_sync_q2);
73
74struct mailbox_dev tnl_offload_mbox_recv =
75 MBOX_RECV_MGMT_DEV(TNL_OFFLOAD, tnl_offload_mbox_cmd_recv);
76
77/* tunnel mailbox communication */
78static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
79 struct mailbox_msg *msg)
80{
81 switch (msg->msg1) {
82 case TOPS_TNL_START_ADDR_SYNC:
83 tops_tnl.tnl_base_addr = msg->msg2;
84
85 return MBOX_NO_RET_MSG;
86 default:
87 break;
88 }
89
90 return MBOX_NO_RET_MSG;
91}
92
93static inline void tnl_flush_ppe_entry(struct foe_entry *entry, u32 tnl_idx)
94{
95 u32 bind_tnl_idx;
96
97 if (unlikely(!entry))
98 return;
99
100 switch (entry->bfib1.pkt_type) {
101 case IPV4_HNAPT:
102 if (entry->ipv4_hnapt.tport_id != NR_TDMA_TPORT
103 && entry->ipv4_hnapt.tport_id != NR_TDMA_QDMA_TPORT)
104 return;
105
106 bind_tnl_idx = entry->ipv4_hnapt.tops_entry - __TOPS_ENTRY_MAX;
107
108 break;
109 default:
110 return;
111 }
112
113 /* unexpected tunnel index */
114 if (bind_tnl_idx >= __TOPS_ENTRY_MAX)
115 return;
116
117 if (tnl_idx == __TOPS_ENTRY_MAX || tnl_idx == bind_tnl_idx)
118 memset(entry, 0, sizeof(*entry));
119}
120
121static inline void skb_set_tops_tnl_idx(struct sk_buff *skb, u32 tnl_idx)
122{
123 skb_hnat_tops(skb) = tnl_idx + __TOPS_ENTRY_MAX;
124}
125
126static inline bool skb_tops_valid(struct sk_buff *skb)
127{
developer84f378f2023-08-24 18:26:50 +0800128 return (skb && skb_hnat_tops(skb) < __TOPS_ENTRY_MAX);
developere5e687d2023-08-08 16:05:33 +0800129}
130
131static inline struct tops_tnl_type *skb_to_tnl_type(struct sk_buff *skb)
132{
133 enum tops_entry_type tops_entry = skb_hnat_tops(skb);
134 struct tops_tnl_type *tnl_type;
135
136 if (unlikely(!tops_entry || tops_entry >= __TOPS_ENTRY_MAX))
137 return ERR_PTR(-EINVAL);
138
139 tnl_type = tops_tnl.offload_tnl_types[tops_entry];
140
141 return tnl_type ? tnl_type : ERR_PTR(-ENODEV);
142}
143
developer84f378f2023-08-24 18:26:50 +0800144static inline struct tops_tnl_info *skb_to_tnl_info(struct sk_buff *skb)
145{
146 u32 tnl_idx = skb_hnat_tops(skb) - __TOPS_ENTRY_MAX;
147
148 if (tnl_idx >= CONFIG_TOPS_TNL_NUM)
149 return ERR_PTR(-EINVAL);
150
151 if (!test_bit(tnl_idx, tops_tnl.tnl_used))
152 return ERR_PTR(-EACCES);
153
154 return &tops_tnl.tnl_infos[tnl_idx];
155}
156
developere5e687d2023-08-08 16:05:33 +0800157static inline void skb_mark_unbind(struct sk_buff *skb)
158{
159 skb_hnat_tops(skb) = 0;
160 skb_hnat_is_decap(skb) = 0;
161 skb_hnat_alg(skb) = 1;
162}
163
164static inline u32 tnl_params_hash(struct tops_tnl_params *tnl_params)
165{
166 if (!tnl_params)
167 return 0;
168
169 /* TODO: check collision possibility? */
170 return (tnl_params->sip ^ tnl_params->dip);
171}
172
173static inline bool tnl_info_decap_is_enable(struct tops_tnl_info *tnl_info)
174{
175 return tnl_info->cache.flag & TNL_DECAP_ENABLE;
176}
177
178static inline void tnl_info_decap_enable(struct tops_tnl_info *tnl_info)
179{
180 tnl_info->cache.flag |= TNL_DECAP_ENABLE;
181}
182
183static inline void tnl_info_decap_disable(struct tops_tnl_info *tnl_info)
184{
185 tnl_info->cache.flag &= ~(TNL_DECAP_ENABLE);
186}
187
188static inline bool tnl_info_encap_is_enable(struct tops_tnl_info *tnl_info)
189{
190 return tnl_info->cache.flag & TNL_ENCAP_ENABLE;
191}
192
193static inline void tnl_info_encap_enable(struct tops_tnl_info *tnl_info)
194{
195 tnl_info->cache.flag |= TNL_ENCAP_ENABLE;
196}
197
198static inline void tnl_info_encap_disable(struct tops_tnl_info *tnl_info)
199{
200 tnl_info->cache.flag &= ~(TNL_ENCAP_ENABLE);
201}
202
203static inline void tnl_info_sta_updated_no_tnl_lock(struct tops_tnl_info *tnl_info)
204{
205 tnl_info->status &= (~TNL_STA_UPDATING);
206 tnl_info->status &= (~TNL_STA_INIT);
207 tnl_info->status |= TNL_STA_UPDATED;
208}
209
210static inline void tnl_info_sta_updated(struct tops_tnl_info *tnl_info)
211{
212 unsigned long flag = 0;
213
214 if (unlikely(!tnl_info))
215 return;
216
217 spin_lock_irqsave(&tnl_info->lock, flag);
218
219 tnl_info_sta_updated_no_tnl_lock(tnl_info);
220
221 spin_unlock_irqrestore(&tnl_info->lock, flag);
222}
223
224static inline bool tnl_info_sta_is_updated(struct tops_tnl_info *tnl_info)
225{
226 return tnl_info->status & TNL_STA_UPDATED;
227}
228
229static inline void tnl_info_sta_updating_no_tnl_lock(struct tops_tnl_info *tnl_info)
230{
231 tnl_info->status |= TNL_STA_UPDATING;
232 tnl_info->status &= (~TNL_STA_QUEUED);
233 tnl_info->status &= (~TNL_STA_UPDATED);
234}
235
236static inline void tnl_info_sta_updating(struct tops_tnl_info *tnl_info)
237{
238 unsigned long flag = 0;
239
240 if (unlikely(!tnl_info))
241 return;
242
243 spin_lock_irqsave(&tnl_info->lock, flag);
244
245 tnl_info_sta_updating_no_tnl_lock(tnl_info);
246
247 spin_unlock_irqrestore(&tnl_info->lock, flag);
248}
249
250static inline bool tnl_info_sta_is_updating(struct tops_tnl_info *tnl_info)
251{
252 return tnl_info->status & TNL_STA_UPDATING;
253}
254
255static inline void tnl_info_sta_queued_no_tnl_lock(struct tops_tnl_info *tnl_info)
256{
257 tnl_info->status |= TNL_STA_QUEUED;
258 tnl_info->status &= (~TNL_STA_UPDATED);
259}
260
261static inline void tnl_info_sta_queued(struct tops_tnl_info *tnl_info)
262{
263 unsigned long flag = 0;
264
265 if (unlikely(!tnl_info))
266 return;
267
268 spin_lock_irqsave(&tnl_info->lock, flag);
269
270 tnl_info_sta_queued_no_tnl_lock(tnl_info);
271
272 spin_unlock_irqrestore(&tnl_info->lock, flag);
273}
274
275static inline bool tnl_info_sta_is_queued(struct tops_tnl_info *tnl_info)
276{
277 return tnl_info->status & TNL_STA_QUEUED;
278}
279
280static inline void tnl_info_sta_init_no_tnl_lock(struct tops_tnl_info *tnl_info)
281{
282 tnl_info->status = TNL_STA_INIT;
283}
284
285static inline void tnl_info_sta_init(struct tops_tnl_info *tnl_info)
286{
287 unsigned long flag = 0;
288
289 if (unlikely(!tnl_info))
290 return;
291
292 spin_lock_irqsave(&tnl_info->lock, flag);
293
294 tnl_info_sta_init_no_tnl_lock(tnl_info);
295
296 spin_unlock_irqrestore(&tnl_info->lock, flag);
297}
298
299static inline bool tnl_info_sta_is_init(struct tops_tnl_info *tnl_info)
300{
301 return tnl_info->status & TNL_STA_INIT;
302}
303
304static inline void tnl_info_sta_uninit_no_tnl_lock(struct tops_tnl_info *tnl_info)
305{
306 tnl_info->status = TNL_STA_UNINIT;
307}
308
309static inline void tnl_info_sta_uninit(struct tops_tnl_info *tnl_info)
310{
311 unsigned long flag = 0;
312
313 if (unlikely(!tnl_info))
314 return;
315
316 spin_lock_irqsave(&tnl_info->lock, flag);
317
318 tnl_info_sta_uninit_no_tnl_lock(tnl_info);
319
320 spin_unlock_irqrestore(&tnl_info->lock, flag);
321}
322
323static inline bool tnl_info_sta_is_uninit(struct tops_tnl_info *tnl_info)
324{
325 return tnl_info->status & TNL_STA_UNINIT;
326}
327
328static inline void tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
329{
330 unsigned long flag = 0;
331
332 spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
333
334 list_add_tail(&tnl_info->sync_node, tops_tnl.tnl_sync_submit);
335
336 tops_tnl.has_tnl_to_sync = true;
337
338 spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
339
340 if (mtk_tops_mcu_alive())
341 wake_up_interruptible(&tops_tnl.tnl_sync_wait);
342}
343
developer15ee46c2023-08-24 16:35:34 +0800344static void mtk_tops_tnl_info_cls_update_idx(struct tops_tnl_info *tnl_info)
345{
346 unsigned long flag;
347
348 tnl_info->tnl_params.cls_entry = tnl_info->tcls->cls->idx;
developer15ee46c2023-08-24 16:35:34 +0800349
350 spin_lock_irqsave(&tnl_info->lock, flag);
351 tnl_info->cache.cls_entry = tnl_info->tcls->cls->idx;
352 spin_unlock_irqrestore(&tnl_info->lock, flag);
353}
354
developer84f378f2023-08-24 18:26:50 +0800355static void mtk_tops_tnl_info_cls_entry_unprepare(struct tops_tnl_info *tnl_info,
356 struct tops_tnl_params *tnl_params)
developer15ee46c2023-08-24 16:35:34 +0800357{
358 struct tops_cls_entry *tcls = tnl_info->tcls;
359
developer15ee46c2023-08-24 16:35:34 +0800360 tnl_info->tcls = NULL;
361
362 if (refcount_dec_and_test(&tcls->refcnt)) {
developer15ee46c2023-08-24 16:35:34 +0800363 list_del(&tcls->node);
364
developer84f378f2023-08-24 18:26:50 +0800365 if (!tnl_params->cdrt)
366 memset(&tcls->cls->cdesc, 0, sizeof(tcls->cls->cdesc));
367 else
368 /*
369 * recover tport_ix to let match packets to
370 * go through EIP197 only
371 */
372 CLS_DESC_DATA(&tcls->cls->cdesc, tport_idx, 2);
developer15ee46c2023-08-24 16:35:34 +0800373
374 mtk_pce_cls_entry_write(tcls->cls);
375
376 mtk_pce_cls_entry_free(tcls->cls);
377
378 devm_kfree(tops_dev, tcls);
379 }
380}
381
382static struct tops_cls_entry *
developer84f378f2023-08-24 18:26:50 +0800383mtk_tops_tnl_info_cls_entry_prepare(struct tops_tnl_info *tnl_info,
384 struct tops_tnl_params *tnl_params)
developer15ee46c2023-08-24 16:35:34 +0800385{
386 struct tops_cls_entry *tcls;
387 int ret;
388
389 tcls = devm_kzalloc(tops_dev, sizeof(struct tops_cls_entry), GFP_KERNEL);
390 if (!tcls)
391 return ERR_PTR(-ENOMEM);
392
developer84f378f2023-08-24 18:26:50 +0800393 if (!tnl_params->cdrt) {
394 tcls->cls = mtk_pce_cls_entry_alloc();
395 if (IS_ERR(tcls->cls)) {
396 ret = PTR_ERR(tcls->cls);
397 goto free_tcls;
398 }
399 } else {
400 struct cdrt_entry *cdrt = mtk_pce_cdrt_entry_find(tnl_params->cdrt);
401
402 if (IS_ERR(cdrt)) {
403 ret = PTR_ERR(cdrt);
404 goto free_tcls;
405 }
406
407 tcls->cls = cdrt->cls;
developer15ee46c2023-08-24 16:35:34 +0800408 }
409
410 INIT_LIST_HEAD(&tcls->node);
411 list_add_tail(&tnl_info->tnl_type->tcls_head, &tcls->node);
412
413 tnl_info->tcls = tcls;
414 refcount_set(&tcls->refcnt, 1);
415
416 return tcls;
417
418free_tcls:
419 devm_kfree(tops_dev, tcls);
420
421 return ERR_PTR(ret);
422}
423
424static int mtk_tops_tnl_info_cls_entry_write(struct tops_tnl_info *tnl_info)
425{
426 int ret;
427
428 if (!tnl_info->tcls)
429 return -EINVAL;
430
431 ret = mtk_pce_cls_entry_write(tnl_info->tcls->cls);
developer84f378f2023-08-24 18:26:50 +0800432 if (ret)
developer15ee46c2023-08-24 16:35:34 +0800433 return ret;
developer15ee46c2023-08-24 16:35:34 +0800434
435 tnl_info->tcls->updated = true;
436
437 mtk_tops_tnl_info_cls_update_idx(tnl_info);
438
439 return 0;
440}
441
developer84f378f2023-08-24 18:26:50 +0800442static int mtk_tops_tnl_info_cls_tear_down(struct tops_tnl_info *tnl_info,
443 struct tops_tnl_params *tnl_params)
developer15ee46c2023-08-24 16:35:34 +0800444{
developer84f378f2023-08-24 18:26:50 +0800445 mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
developer15ee46c2023-08-24 16:35:34 +0800446
447 return 0;
448}
449
450/*
451 * check cls entry is updated for tunnel protocols that only use 1 CLS HW entry
452 *
453 * since only tunnel sync task will operate on tcls linked list,
454 * it is safe to access without lock
455 *
456 * return true on updated
457 * return false on need update
458 */
459static bool mtk_tops_tnl_info_cls_single_is_updated(struct tops_tnl_info *tnl_info,
460 struct tops_tnl_type *tnl_type)
461{
462 /*
463 * check tnl_type has already allocate a tops_cls_entry
464 * if not, return false to prepare to allocate a new one
465 */
466 if (list_empty(&tnl_type->tcls_head))
467 return false;
468
469 /*
470 * if tnl_info is not associate to tnl_type's cls entry,
471 * make a reference to tops_cls_entry
472 */
473 if (!tnl_info->tcls) {
474 tnl_info->tcls = list_first_entry(&tnl_type->tcls_head,
475 struct tops_cls_entry,
476 node);
477
478 refcount_inc(&tnl_info->tcls->refcnt);
479 mtk_tops_tnl_info_cls_update_idx(tnl_info);
480 }
481
482 return tnl_info->tcls->updated;
483}
484
485static int mtk_tops_tnl_info_cls_single_setup(struct tops_tnl_info *tnl_info,
developer84f378f2023-08-24 18:26:50 +0800486 struct tops_tnl_params *tnl_params,
developer15ee46c2023-08-24 16:35:34 +0800487 struct tops_tnl_type *tnl_type)
488{
489 struct tops_cls_entry *tcls;
490 int ret;
491
492 if (mtk_tops_tnl_info_cls_single_is_updated(tnl_info, tnl_type))
493 return 0;
494
495 if (tnl_info->tcls)
developer84f378f2023-08-24 18:26:50 +0800496 goto cls_entry_write;
developer15ee46c2023-08-24 16:35:34 +0800497
developer84f378f2023-08-24 18:26:50 +0800498 tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info, tnl_params);
developer15ee46c2023-08-24 16:35:34 +0800499 if (IS_ERR(tcls))
500 return PTR_ERR(tcls);
501
developer84f378f2023-08-24 18:26:50 +0800502 if (!tnl_params->cdrt) {
503 ret = tnl_type->cls_entry_setup(tnl_info, &tcls->cls->cdesc);
504 if (ret) {
505 TOPS_ERR("tops cls entry setup failed: %d\n", ret);
506 goto cls_entry_unprepare;
507 }
508 } else {
509 /*
510 * since CLS is already filled up with outer protocol rule
511 * we only update CLS tport here to let matched packet stop by TOPS
512 */
513 CLS_DESC_DATA(&tcls->cls->cdesc, tport_idx, 0x7);
developer15ee46c2023-08-24 16:35:34 +0800514 }
515
developer84f378f2023-08-24 18:26:50 +0800516cls_entry_write:
517 ret = mtk_tops_tnl_info_cls_entry_write(tnl_info);
518
519cls_entry_unprepare:
520 if (ret)
521 mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
522
523 return ret;
developer15ee46c2023-08-24 16:35:34 +0800524}
525
526static struct tops_cls_entry *
527mtk_tops_tnl_info_cls_entry_find(struct tops_tnl_type *tnl_type,
528 struct cls_desc *cdesc)
529{
530 struct tops_cls_entry *tcls;
531
532 list_for_each_entry(tcls, &tnl_type->tcls_head, node)
533 if (!memcmp(&tcls->cls->cdesc, cdesc, sizeof(struct cls_desc)))
534 return tcls;
535
536 return NULL;
537}
538
developer84f378f2023-08-24 18:26:50 +0800539static bool mtk_tops_tnl_info_cls_multi_is_updated(struct tops_tnl_info *tnl_info,
developer15ee46c2023-08-24 16:35:34 +0800540 struct tops_tnl_type *tnl_type,
541 struct cls_desc *cdesc)
542{
543 struct tops_cls_entry *tcls;
544
545 if (list_empty(&tnl_type->tcls_head))
546 return false;
547
548 if (tnl_info->tcls) {
549 if (!memcmp(cdesc, &tnl_info->tcls->cls->cdesc, sizeof(*cdesc)))
550 return tnl_info->tcls->updated;
551
552 memcpy(&tnl_info->tcls->cls->cdesc, cdesc, sizeof(*cdesc));
553 tnl_info->tcls->updated = false;
554 return false;
555 }
556
557 tcls = mtk_tops_tnl_info_cls_entry_find(tnl_type, cdesc);
558 if (!tcls)
559 return false;
560
561 tnl_info->tcls = tcls;
562 refcount_inc(&tnl_info->tcls->refcnt);
563 mtk_tops_tnl_info_cls_update_idx(tnl_info);
564
565 return tcls->updated;
566}
567
568static int mtk_tops_tnl_info_cls_multi_setup(struct tops_tnl_info *tnl_info,
developer84f378f2023-08-24 18:26:50 +0800569 struct tops_tnl_params *tnl_params,
developer15ee46c2023-08-24 16:35:34 +0800570 struct tops_tnl_type *tnl_type)
571{
572 struct tops_cls_entry *tcls;
573 struct cls_desc cdesc;
developer84f378f2023-08-24 18:26:50 +0800574
developer15ee46c2023-08-24 16:35:34 +0800575 int ret;
576
developer84f378f2023-08-24 18:26:50 +0800577 if (!tnl_params->cdrt) {
578 memset(&cdesc, 0, sizeof(struct cls_desc));
developer15ee46c2023-08-24 16:35:34 +0800579
developer84f378f2023-08-24 18:26:50 +0800580 /* prepare cls_desc from tnl_type */
581 ret = tnl_type->cls_entry_setup(tnl_info, &cdesc);
582 if (ret) {
583 TOPS_ERR("tops cls entry setup failed: %d\n", ret);
584 return ret;
585 }
586 } else {
587 struct cdrt_entry *cdrt = mtk_pce_cdrt_entry_find(tnl_params->cdrt);
588
589 if (IS_ERR(cdrt)) {
590 TOPS_ERR("no cdrt idx: %u related CDRT found\n",
591 tnl_params->cdrt);
592 return ret;
593 }
594
595 memcpy(&cdesc, &cdrt->cls->cdesc, sizeof(struct cls_desc));
596
597 CLS_DESC_DATA(&cdesc, tport_idx, 0x7);
developer15ee46c2023-08-24 16:35:34 +0800598 }
599
600 /*
601 * check cdesc is already updated, if tnl_info is not associate with a
602 * tcls but we found a tcls has the same cls desc content as cdesc
603 * tnl_info will setup an association with that tcls
604 *
605 * we only go further to this if condition when
606 * a tcls is not yet updated or
607 * tnl_info is not yet associated to a tcls
608 */
developer84f378f2023-08-24 18:26:50 +0800609 if (mtk_tops_tnl_info_cls_multi_is_updated(tnl_info, tnl_type, &cdesc))
developer15ee46c2023-08-24 16:35:34 +0800610 return 0;
611
612 /* tcls is not yet updated, update this tcls */
613 if (tnl_info->tcls)
614 return mtk_tops_tnl_info_cls_entry_write(tnl_info);
615
616 /* create a new tcls entry and associate with tnl_info */
developer84f378f2023-08-24 18:26:50 +0800617 tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info, tnl_params);
developer15ee46c2023-08-24 16:35:34 +0800618 if (IS_ERR(tcls))
619 return PTR_ERR(tcls);
620
621 memcpy(&tcls->cls->cdesc, &cdesc, sizeof(struct cls_desc));
622
developer84f378f2023-08-24 18:26:50 +0800623 ret = mtk_tops_tnl_info_cls_entry_write(tnl_info);
624 if (ret)
625 mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
626
627 return ret;
developer15ee46c2023-08-24 16:35:34 +0800628}
629
developer84f378f2023-08-24 18:26:50 +0800630static int mtk_tops_tnl_info_cls_setup(struct tops_tnl_info *tnl_info,
631 struct tops_tnl_params *tnl_params)
developer15ee46c2023-08-24 16:35:34 +0800632{
633 struct tops_tnl_type *tnl_type;
634
635 if (tnl_info->tcls && tnl_info->tcls->updated)
636 return 0;
637
638 tnl_type = tnl_info->tnl_type;
639 if (!tnl_type)
640 return -EINVAL;
641
642 if (!tnl_type->use_multi_cls)
developer84f378f2023-08-24 18:26:50 +0800643 return mtk_tops_tnl_info_cls_single_setup(tnl_info,
644 tnl_params,
645 tnl_type);
developer15ee46c2023-08-24 16:35:34 +0800646
developer84f378f2023-08-24 18:26:50 +0800647 return mtk_tops_tnl_info_cls_multi_setup(tnl_info, tnl_params, tnl_type);
developer15ee46c2023-08-24 16:35:34 +0800648}
649
developere5e687d2023-08-08 16:05:33 +0800650static int mtk_tops_tnl_info_dipfilter_tear_down(struct tops_tnl_info *tnl_info)
651{
652 struct dip_desc dipd;
653
654 memset(&dipd, 0, sizeof(struct dip_desc));
655
656 dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.sip);
657 dipd.tag = DIPFILTER_IPV4;
658
659 return mtk_pce_dipfilter_entry_del(&dipd);
660}
661
662static int mtk_tops_tnl_info_dipfilter_setup(struct tops_tnl_info *tnl_info)
663{
664 struct dip_desc dipd;
665
666 /* setup dipfilter */
667 memset(&dipd, 0, sizeof(struct dip_desc));
668
669 dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.sip);
670 dipd.tag = DIPFILTER_IPV4;
671
672 return mtk_pce_dipfilter_entry_add(&dipd);
673}
674
675void mtk_tops_tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
676{
677 lockdep_assert_held(&tnl_info->lock);
678
679 if (tnl_info_sta_is_queued(tnl_info))
680 return;
681
682 tnl_info_submit_no_tnl_lock(tnl_info);
683
684 tnl_info_sta_queued_no_tnl_lock(tnl_info);
685}
686
687void mtk_tops_tnl_info_submit(struct tops_tnl_info *tnl_info)
688{
689 unsigned long flag = 0;
690
691 if (unlikely(!tnl_info))
692 return;
693
694 spin_lock_irqsave(&tnl_info->lock, flag);
695
696 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
697
698 spin_unlock_irqrestore(&tnl_info->lock, flag);
699}
700
701static void mtk_tops_tnl_info_hash_no_lock(struct tops_tnl_info *tnl_info)
702{
703 lockdep_assert_held(&tops_tnl.tbl_lock);
704 lockdep_assert_held(&tnl_info->lock);
705
706 if (hash_hashed(&tnl_info->hlist))
707 hash_del(&tnl_info->hlist);
708
709 hash_add(tops_tnl.ht, &tnl_info->hlist, tnl_params_hash(&tnl_info->cache));
710}
711
712void mtk_tops_tnl_info_hash(struct tops_tnl_info *tnl_info)
713{
714 unsigned long flag = 0;
715
716 if (unlikely(!tnl_info))
717 return;
718
719 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
720
721 spin_lock(&tnl_info->lock);
722
723 mtk_tops_tnl_info_hash_no_lock(tnl_info);
724
725 spin_unlock(&tnl_info->lock);
726
727 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
728}
729
730static bool mtk_tops_tnl_info_match(struct tops_tnl_type *tnl_type,
731 struct tops_tnl_info *tnl_info,
732 struct tops_tnl_params *match_data)
733{
734 unsigned long flag = 0;
735 bool match;
736
737 spin_lock_irqsave(&tnl_info->lock, flag);
738
739 match = tnl_type->tnl_info_match(&tnl_info->cache, match_data);
740
741 spin_unlock_irqrestore(&tnl_info->lock, flag);
742
743 return match;
744}
745
746struct tops_tnl_info *mtk_tops_tnl_info_find(struct tops_tnl_params *tnl_params)
747{
748 struct tops_tnl_info *tnl_info;
749 struct tops_tnl_type *tnl_type;
750
751 lockdep_assert_held(&tops_tnl.tbl_lock);
752
753 if (unlikely(!tnl_params->tops_entry_proto
754 || tnl_params->tops_entry_proto >= __TOPS_ENTRY_MAX))
755 return ERR_PTR(-EINVAL);
756
757 tnl_type = tops_tnl.offload_tnl_types[tnl_params->tops_entry_proto];
758 if (unlikely(!tnl_type))
759 return ERR_PTR(-EINVAL);
760
761 if (unlikely(!tnl_type->tnl_info_match))
762 return ERR_PTR(-ENXIO);
763
764 hash_for_each_possible(tops_tnl.ht,
765 tnl_info,
766 hlist,
767 tnl_params_hash(tnl_params))
768 if (mtk_tops_tnl_info_match(tnl_type, tnl_info, tnl_params))
769 return tnl_info;
770
771 return ERR_PTR(-ENODEV);
772}
773
774/* tnl_info->lock should be held before calling this function */
775static int mtk_tops_tnl_info_setup(struct sk_buff *skb,
776 struct tops_tnl_info *tnl_info,
777 struct tops_tnl_params *tnl_params)
778{
779 if (unlikely(!skb || !tnl_info || !tnl_params))
780 return -EPERM;
781
782 lockdep_assert_held(&tnl_info->lock);
783
developer84f378f2023-08-24 18:26:50 +0800784 /* manually preserve essential data among encapsulation and decapsulation */
developere5e687d2023-08-08 16:05:33 +0800785 tnl_params->flag |= tnl_info->cache.flag;
developer15ee46c2023-08-24 16:35:34 +0800786 tnl_params->cls_entry = tnl_info->cache.cls_entry;
developer84f378f2023-08-24 18:26:50 +0800787 if (tnl_info->cache.cdrt)
788 tnl_params->cdrt = tnl_info->cache.cdrt;
developere5e687d2023-08-08 16:05:33 +0800789
790 if (memcmp(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params))) {
791 memcpy(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params));
792
793 mtk_tops_tnl_info_hash_no_lock(tnl_info);
794 }
795
796 if (skb_hnat_is_decap(skb)) {
797 /* the net_device is used to forward pkt to decap'ed inf when Rx */
798 tnl_info->dev = skb->dev;
799 if (!tnl_info_decap_is_enable(tnl_info)) {
800 tnl_info_decap_enable(tnl_info);
801
802 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
803 }
804 } else if (skb_hnat_is_encap(skb)) {
805 /* set skb_hnat_tops(skb) to tunnel index for ppe binding */
806 skb_set_tops_tnl_idx(skb, tnl_info->tnl_idx);
807 if (!tnl_info_encap_is_enable(tnl_info)) {
808 tnl_info_encap_enable(tnl_info);
809
810 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
811 }
812 }
813
814 return 0;
815}
816
817/* tops_tnl.tbl_lock should be acquired before calling this functions */
developer15ee46c2023-08-24 16:35:34 +0800818static struct tops_tnl_info *
819mtk_tops_tnl_info_alloc_no_lock(struct tops_tnl_type *tnl_type)
developere5e687d2023-08-08 16:05:33 +0800820{
821 struct tops_tnl_info *tnl_info;
822 unsigned long flag = 0;
823 u32 tnl_idx;
824
825 lockdep_assert_held(&tops_tnl.tbl_lock);
826
827 tnl_idx = find_first_zero_bit(tops_tnl.tnl_used, CONFIG_TOPS_TNL_NUM);
828 if (tnl_idx == CONFIG_TOPS_TNL_NUM) {
829 TOPS_NOTICE("offload tunnel table full!\n");
830 return ERR_PTR(-ENOMEM);
831 }
832
833 /* occupy used tunnel */
834 tnl_info = &tops_tnl.tnl_infos[tnl_idx];
835 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
836 memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
837
838 /* TODO: maybe spin_lock_bh() is enough? */
839 spin_lock_irqsave(&tnl_info->lock, flag);
840
841 if (tnl_info_sta_is_init(tnl_info)) {
842 TOPS_ERR("error: fetched an initialized tunnel info\n");
843
844 spin_unlock_irqrestore(&tnl_info->lock, flag);
845
846 return ERR_PTR(-EBADF);
847 }
848 tnl_info_sta_init_no_tnl_lock(tnl_info);
849
developer15ee46c2023-08-24 16:35:34 +0800850 tnl_info->tnl_type = tnl_type;
851
developere5e687d2023-08-08 16:05:33 +0800852 INIT_HLIST_NODE(&tnl_info->hlist);
853
854 spin_unlock_irqrestore(&tnl_info->lock, flag);
855
856 set_bit(tnl_idx, tops_tnl.tnl_used);
857
858 return tnl_info;
859}
860
developer15ee46c2023-08-24 16:35:34 +0800861struct tops_tnl_info *mtk_tops_tnl_info_alloc(struct tops_tnl_type *tnl_type)
developere5e687d2023-08-08 16:05:33 +0800862{
863 struct tops_tnl_info *tnl_info;
864 unsigned long flag = 0;
865
866 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
867
developer15ee46c2023-08-24 16:35:34 +0800868 tnl_info = mtk_tops_tnl_info_alloc_no_lock(tnl_type);
developere5e687d2023-08-08 16:05:33 +0800869
870 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
871
872 return tnl_info;
873}
874
875static void mtk_tops_tnl_info_free_no_lock(struct tops_tnl_info *tnl_info)
876{
877 if (unlikely(!tnl_info))
878 return;
879
880 lockdep_assert_held(&tops_tnl.tbl_lock);
881 lockdep_assert_held(&tnl_info->lock);
882
883 hash_del(&tnl_info->hlist);
884
885 tnl_info_sta_uninit_no_tnl_lock(tnl_info);
886
887 clear_bit(tnl_info->tnl_idx, tops_tnl.tnl_used);
888}
889
890static void mtk_tops_tnl_info_free(struct tops_tnl_info *tnl_info)
891{
892 unsigned long flag = 0;
893
894 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
895
896 spin_lock(&tnl_info->lock);
897
898 mtk_tops_tnl_info_free_no_lock(tnl_info);
899
900 spin_unlock(&tnl_info->lock);
901
902 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
903}
904
905static void __mtk_tops_tnl_offload_disable(struct tops_tnl_info *tnl_info)
906{
907 tnl_info->status |= TNL_STA_DELETING;
908 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
909}
910
911static int mtk_tops_tnl_offload(struct sk_buff *skb,
developer15ee46c2023-08-24 16:35:34 +0800912 struct tops_tnl_type *tnl_type,
developere5e687d2023-08-08 16:05:33 +0800913 struct tops_tnl_params *tnl_params)
914{
915 struct tops_tnl_info *tnl_info;
916 unsigned long flag;
917 int ret = 0;
918
919 if (unlikely(!tnl_params))
920 return -EPERM;
921
922 /* prepare tnl_info */
923 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
924
925 tnl_info = mtk_tops_tnl_info_find(tnl_params);
926 if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) != -ENODEV) {
927 /* error */
928 ret = PTR_ERR(tnl_info);
929 goto err_out;
930 } else if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) == -ENODEV) {
931 /* not allocate yet */
developer15ee46c2023-08-24 16:35:34 +0800932 tnl_info = mtk_tops_tnl_info_alloc_no_lock(tnl_type);
developere5e687d2023-08-08 16:05:33 +0800933 }
934
935 if (IS_ERR(tnl_info)) {
936 ret = PTR_ERR(tnl_info);
937 TOPS_DBG("tnl offload alloc tnl_info failed: %d\n", ret);
938 goto err_out;
939 }
940
941 spin_lock(&tnl_info->lock);
942 ret = mtk_tops_tnl_info_setup(skb, tnl_info, tnl_params);
developere5e687d2023-08-08 16:05:33 +0800943 spin_unlock(&tnl_info->lock);
944
945err_out:
946 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
947
948 return ret;
949}
950
developer84f378f2023-08-24 18:26:50 +0800951static int mtk_tops_tnl_l2_update(struct sk_buff *skb)
952{
953 struct tops_tnl_info *tnl_info = skb_to_tnl_info(skb);
954 struct tops_tnl_type *tnl_type;
955 unsigned long flag;
956 int ret;
957
958 if (IS_ERR(tnl_info))
959 return PTR_ERR(tnl_info);
960
961 tnl_type = tnl_info->tnl_type;
962 if (!tnl_type->tnl_l2_param_update)
963 return -ENODEV;
964
965 spin_lock_irqsave(&tnl_info->lock, flag);
966
967 ret = tnl_type->tnl_l2_param_update(skb, &tnl_info->cache);
968 /* tnl params need to be updated */
969 if (ret == 1) {
970 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
971 ret = 0;
972 }
973
974 spin_unlock_irqrestore(&tnl_info->lock, flag);
975
976 return ret;
977}
978
developere5e687d2023-08-08 16:05:33 +0800979static bool mtk_tops_tnl_decap_offloadable(struct sk_buff *skb)
980{
981 struct tops_tnl_type *tnl_type;
982 struct ethhdr *eth;
983 u32 cnt;
984 u32 i;
985
986 if (unlikely(!mtk_tops_mcu_alive())) {
987 skb_mark_unbind(skb);
988 return -EAGAIN;
989 }
990
991 /* skb should not carry tops here */
992 if (skb_hnat_tops(skb))
993 return false;
994
995 eth = eth_hdr(skb);
996
997 /* TODO: currently decap only support ethernet IPv4 */
998 if (ntohs(eth->h_proto) != ETH_P_IP)
999 return false;
1000
1001 /* TODO: may can be optimized */
1002 for (i = TOPS_ENTRY_GRETAP, cnt = 0;
1003 i < __TOPS_ENTRY_MAX && cnt < tops_tnl.offload_tnl_type_num;
1004 i++) {
1005 tnl_type = tops_tnl.offload_tnl_types[i];
1006 if (unlikely(!tnl_type))
1007 continue;
1008
1009 cnt++;
1010 if (tnl_type->tnl_decap_offloadable
1011 && tnl_type->tnl_decap_offloadable(skb)) {
1012 skb_hnat_tops(skb) = tnl_type->tops_entry;
1013 return true;
1014 }
1015 }
1016
1017 return false;
1018}
1019
1020static int mtk_tops_tnl_decap_offload(struct sk_buff *skb)
1021{
1022 struct tops_tnl_params tnl_params;
1023 struct tops_tnl_type *tnl_type;
1024 int ret;
1025
1026 if (unlikely(!mtk_tops_mcu_alive())) {
1027 skb_mark_unbind(skb);
1028 return -EAGAIN;
1029 }
1030
1031 if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_decap(skb))) {
1032 skb_mark_unbind(skb);
1033 return -EINVAL;
1034 }
1035
1036 tnl_type = skb_to_tnl_type(skb);
1037 if (IS_ERR(tnl_type)) {
1038 skb_mark_unbind(skb);
1039 return PTR_ERR(tnl_type);
1040 }
1041
1042 if (unlikely(!tnl_type->tnl_decap_param_setup)) {
1043 skb_mark_unbind(skb);
1044 return -ENODEV;
1045 }
1046
1047 memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
1048
1049 /* push removed ethernet header back first */
1050 if (tnl_type->has_inner_eth)
1051 skb_push(skb, sizeof(struct ethhdr));
1052
1053 ret = tnl_type->tnl_decap_param_setup(skb, &tnl_params);
1054
1055 /* pull ethernet header to restore skb->data to ip start */
1056 if (tnl_type->has_inner_eth)
1057 skb_pull(skb, sizeof(struct ethhdr));
1058
1059 if (unlikely(ret)) {
1060 skb_mark_unbind(skb);
1061 return ret;
1062 }
1063
1064 tnl_params.tops_entry_proto = tnl_type->tops_entry;
developer84f378f2023-08-24 18:26:50 +08001065 tnl_params.cdrt = skb_hnat_cdrt(skb);
developere5e687d2023-08-08 16:05:33 +08001066
developer15ee46c2023-08-24 16:35:34 +08001067 ret = mtk_tops_tnl_offload(skb, tnl_type, &tnl_params);
developere5e687d2023-08-08 16:05:33 +08001068
1069 /*
1070 * whether success or fail to offload a decapsulation tunnel
1071 * skb_hnat_tops(skb) must be cleared to avoid mtk_tnl_decap_offload() get
1072 * called again
1073 */
1074 skb_hnat_tops(skb) = 0;
1075 skb_hnat_is_decap(skb) = 0;
1076
1077 return ret;
1078}
1079
developer84f378f2023-08-24 18:26:50 +08001080static int __mtk_tops_tnl_encap_offload(struct sk_buff *skb)
developere5e687d2023-08-08 16:05:33 +08001081{
1082 struct tops_tnl_params tnl_params;
1083 struct tops_tnl_type *tnl_type;
1084 int ret;
1085
developere5e687d2023-08-08 16:05:33 +08001086 tnl_type = skb_to_tnl_type(skb);
1087 if (IS_ERR(tnl_type))
1088 return PTR_ERR(tnl_type);
1089
1090 if (unlikely(!tnl_type->tnl_encap_param_setup))
1091 return -ENODEV;
1092
1093 memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
1094
1095 ret = tnl_type->tnl_encap_param_setup(skb, &tnl_params);
1096 if (unlikely(ret))
1097 return ret;
1098 tnl_params.tops_entry_proto = tnl_type->tops_entry;
developer84f378f2023-08-24 18:26:50 +08001099 tnl_params.cdrt = skb_hnat_cdrt(skb);
developere5e687d2023-08-08 16:05:33 +08001100
developer15ee46c2023-08-24 16:35:34 +08001101 return mtk_tops_tnl_offload(skb, tnl_type, &tnl_params);
developere5e687d2023-08-08 16:05:33 +08001102}
1103
developer84f378f2023-08-24 18:26:50 +08001104static int mtk_tops_tnl_encap_offload(struct sk_buff *skb)
1105{
1106 if (unlikely(!mtk_tops_mcu_alive())) {
1107 skb_mark_unbind(skb);
1108 return -EAGAIN;
1109 }
1110
1111 if (!skb_hnat_is_encap(skb))
1112 return -EPERM;
1113
1114 if (unlikely(skb_hnat_cdrt(skb)))
1115 return mtk_tops_tnl_l2_update(skb);
1116
1117 return __mtk_tops_tnl_encap_offload(skb);
1118}
1119
developere5e687d2023-08-08 16:05:33 +08001120static struct net_device *mtk_tops_get_tnl_dev(int tnl_idx)
1121{
1122 if (tnl_idx < TOPS_CRSN_TNL_ID_START || tnl_idx > TOPS_CRSN_TNL_ID_END)
1123 return ERR_PTR(-EINVAL);
1124
1125 tnl_idx = tnl_idx - TOPS_CRSN_TNL_ID_START;
1126
1127 return tops_tnl.tnl_infos[tnl_idx].dev;
1128}
1129
1130static void mtk_tops_tnl_sync_dma_done(void *param)
1131{
1132 /* TODO: check tx status with dmaengine_tx_status()? */
1133 complete(&tops_tnl.dma_done);
1134}
1135
1136static void mtk_tops_tnl_sync_dma_start(void *param)
1137{
1138 dma_async_issue_pending(tops_tnl.dmachan);
1139
1140 wait_for_completion(&tops_tnl.dma_done);
1141}
1142
1143static void mtk_tops_tnl_sync_dma_unprepare(struct tops_tnl_info *tnl_info,
1144 dma_addr_t *addr)
1145{
1146 dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
1147 DMA_TO_DEVICE);
1148
1149 dma_release_channel(tops_tnl.dmachan);
1150}
1151
1152static int mtk_tops_tnl_sync_dma_prepare(struct tops_tnl_info *tnl_info,
1153 dma_addr_t *addr)
1154{
1155 u32 tnl_addr = tops_tnl.tnl_base_addr;
1156 struct dma_async_tx_descriptor *desc;
1157 dma_cookie_t cookie;
1158 int ret;
1159
1160 if (!tnl_info)
1161 return -EPERM;
1162
1163 tnl_addr += tnl_info->tnl_idx * sizeof(struct tops_tnl_params);
1164
1165 tops_tnl.dmachan = dma_request_slave_channel(tops_dev, "tnl-sync");
1166 if (!tops_tnl.dmachan) {
1167 TOPS_ERR("request dma channel failed\n");
1168 return -ENODEV;
1169 }
1170
1171 *addr = dma_map_single(tops_dev,
1172 &tnl_info->tnl_params,
1173 sizeof(struct tops_tnl_params),
1174 DMA_TO_DEVICE);
1175 if (dma_mapping_error(tops_dev, *addr)) {
1176 ret = -ENOMEM;
1177 goto dma_release;
1178 }
1179
1180 desc = dmaengine_prep_dma_memcpy(tops_tnl.dmachan,
1181 (dma_addr_t)tnl_addr, *addr,
1182 sizeof(struct tops_tnl_params),
1183 0);
1184 if (!desc) {
1185 ret = -EBUSY;
1186 goto dma_unmap;
1187 }
1188
1189 desc->callback = mtk_tops_tnl_sync_dma_done;
1190
1191 cookie = dmaengine_submit(desc);
1192 ret = dma_submit_error(cookie);
1193 if (ret)
1194 goto dma_terminate;
1195
1196 reinit_completion(&tops_tnl.dma_done);
1197
1198 return ret;
1199
1200dma_terminate:
1201 dmaengine_terminate_all(tops_tnl.dmachan);
1202
1203dma_unmap:
1204 dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
1205 DMA_TO_DEVICE);
1206
1207dma_release:
1208 dma_release_channel(tops_tnl.dmachan);
1209
1210 return ret;
1211}
1212
1213static int __mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
1214{
1215 struct mcu_ctrl_cmd mcmd;
1216 dma_addr_t addr;
1217 int ret;
1218
1219 mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
1220 mcmd.arg[0] = TUNNEL_CTRL_EVENT_DEL;
1221 mcmd.arg[1] = tnl_info->tnl_idx;
1222 mcmd.core_mask = CORE_TOPS_MASK;
1223
1224 ret = mtk_tops_mcu_stall(&mcmd, NULL, NULL);
1225 if (ret) {
1226 TOPS_ERR("tnl sync deletion notify mcu failed: %d\n", ret);
1227 return ret;
1228 }
1229
1230 /* there shouldn't be any other reference to tnl_info right now */
1231 memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
1232 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
1233
1234 ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
1235 if (ret) {
1236 TOPS_ERR("tnl sync deletion prepare dma request failed: %d\n", ret);
1237 return ret;
1238 }
1239
1240 mtk_tops_tnl_sync_dma_start(NULL);
1241
1242 mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
1243
1244 return ret;
1245}
1246
1247static int mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
1248{
developer84f378f2023-08-24 18:26:50 +08001249 struct tops_tnl_params tnl_params;
developere5e687d2023-08-08 16:05:33 +08001250 int ret;
1251
1252 ret = mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
1253 if (ret) {
1254 TOPS_ERR("tnl sync dipfitler tear down failed: %d\n",
1255 ret);
1256 return ret;
1257 }
1258
developer84f378f2023-08-24 18:26:50 +08001259 memcpy(&tnl_params, &tnl_info->tnl_params, sizeof(struct tops_tnl_params));
developere5e687d2023-08-08 16:05:33 +08001260 ret = __mtk_tops_tnl_sync_param_delete(tnl_info);
1261 if (ret) {
1262 TOPS_ERR("tnl sync deletion failed: %d\n", ret);
1263 return ret;
1264 }
1265
developer84f378f2023-08-24 18:26:50 +08001266 ret = mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_params);
developer15ee46c2023-08-24 16:35:34 +08001267 if (ret) {
1268 TOPS_ERR("tnl sync cls tear down faild: %d\n",
1269 ret);
1270 return ret;
1271 }
1272
developere5e687d2023-08-08 16:05:33 +08001273 mtk_tops_tnl_info_free(tnl_info);
1274
1275 return ret;
1276}
1277
1278static int __mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
1279 bool is_new_tnl)
1280{
1281 struct mcu_ctrl_cmd mcmd;
1282 dma_addr_t addr;
1283 int ret;
1284
1285 mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
1286 mcmd.arg[1] = tnl_info->tnl_idx;
1287 mcmd.core_mask = CORE_TOPS_MASK;
1288
1289 if (is_new_tnl)
1290 mcmd.arg[0] = TUNNEL_CTRL_EVENT_NEW;
1291 else
1292 mcmd.arg[0] = TUNNEL_CTRL_EVENT_DIP_UPDATE;
1293
1294 ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
1295 if (ret) {
1296 TOPS_ERR("tnl sync update prepare dma request failed: %d\n", ret);
1297 return ret;
1298 }
1299
1300 ret = mtk_tops_mcu_stall(&mcmd, mtk_tops_tnl_sync_dma_start, NULL);
1301 if (ret)
1302 TOPS_ERR("tnl sync update notify mcu failed: %d\n", ret);
1303
1304 mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
1305
1306 return ret;
1307}
1308
1309static int mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
1310 bool setup_pce, bool is_new_tnl)
1311{
1312 int ret;
1313
developer15ee46c2023-08-24 16:35:34 +08001314 if (setup_pce) {
developer84f378f2023-08-24 18:26:50 +08001315 ret = mtk_tops_tnl_info_cls_setup(tnl_info, &tnl_info->tnl_params);
developer15ee46c2023-08-24 16:35:34 +08001316 if (ret) {
1317 TOPS_ERR("tnl cls setup failed: %d\n", ret);
1318 return ret;
1319 }
1320 }
1321
developere5e687d2023-08-08 16:05:33 +08001322 ret = __mtk_tops_tnl_sync_param_update(tnl_info, is_new_tnl);
1323 if (ret) {
1324 TOPS_ERR("tnl sync failed: %d\n", ret);
developer15ee46c2023-08-24 16:35:34 +08001325 goto cls_tear_down;
developere5e687d2023-08-08 16:05:33 +08001326 }
1327
1328 tnl_info_sta_updated(tnl_info);
1329
1330 if (setup_pce) {
1331 ret = mtk_tops_tnl_info_dipfilter_setup(tnl_info);
1332 if (ret) {
1333 TOPS_ERR("tnl dipfilter setup failed: %d\n", ret);
1334 /* TODO: should undo parameter sync */
1335 return ret;
1336 }
1337 }
1338
1339 return ret;
developer15ee46c2023-08-24 16:35:34 +08001340
1341cls_tear_down:
developer84f378f2023-08-24 18:26:50 +08001342 mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_info->tnl_params);
developer15ee46c2023-08-24 16:35:34 +08001343
1344 return ret;
developere5e687d2023-08-08 16:05:33 +08001345}
1346
1347static inline int mtk_tops_tnl_sync_param_new(struct tops_tnl_info *tnl_info,
1348 bool setup_pce)
1349{
1350 return mtk_tops_tnl_sync_param_update(tnl_info, setup_pce, true);
1351}
1352
1353static void mtk_tops_tnl_sync_get_pending_queue(void)
1354{
1355 struct list_head *tmp = tops_tnl.tnl_sync_submit;
1356 unsigned long flag = 0;
1357
1358 spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
1359
1360 tops_tnl.tnl_sync_submit = tops_tnl.tnl_sync_pending;
1361 tops_tnl.tnl_sync_pending = tmp;
1362
1363 tops_tnl.has_tnl_to_sync = false;
1364
1365 spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
1366}
1367
1368static void mtk_tops_tnl_sync_queue_proc(void)
1369{
1370 struct tops_tnl_info *tnl_info;
1371 struct tops_tnl_info *tmp;
1372 unsigned long flag = 0;
1373 bool is_decap = false;
1374 u32 tnl_status = 0;
1375 int ret;
1376
1377 list_for_each_entry_safe(tnl_info,
1378 tmp,
1379 tops_tnl.tnl_sync_pending,
1380 sync_node) {
1381 spin_lock_irqsave(&tnl_info->lock, flag);
1382
1383 /* tnl update is on the fly, queue tnl to next round */
1384 if (tnl_info_sta_is_updating(tnl_info)) {
1385 list_del_init(&tnl_info->sync_node);
1386
1387 tnl_info_submit_no_tnl_lock(tnl_info);
1388
1389 goto next;
1390 }
1391
1392 /*
1393 * if tnl_info is not queued, something wrong
1394 * just remove that tnl_info from the queue
1395 * maybe trigger BUG_ON()?
1396 */
1397 if (!tnl_info_sta_is_queued(tnl_info)) {
1398 list_del_init(&tnl_info->sync_node);
1399 goto next;
1400 }
1401
1402 is_decap = (!(tnl_info->tnl_params.flag & TNL_DECAP_ENABLE)
1403 && tnl_info_decap_is_enable(tnl_info));
1404
1405 tnl_status = tnl_info->status;
1406 memcpy(&tnl_info->tnl_params, &tnl_info->cache,
1407 sizeof(struct tops_tnl_params));
1408
1409 list_del_init(&tnl_info->sync_node);
1410
1411 /*
1412 * mark tnl info to updating and release tnl info's spin lock
1413 * since it is going to use dma to transfer data
1414 * and might going to sleep
1415 */
1416 tnl_info_sta_updating_no_tnl_lock(tnl_info);
1417
1418 spin_unlock_irqrestore(&tnl_info->lock, flag);
1419
1420 if (tnl_status & TNL_STA_INIT)
1421 ret = mtk_tops_tnl_sync_param_new(tnl_info, is_decap);
1422 else if (tnl_status & TNL_STA_DELETING)
1423 ret = mtk_tops_tnl_sync_param_delete(tnl_info);
1424 else
1425 ret = mtk_tops_tnl_sync_param_update(tnl_info,
1426 is_decap,
1427 false);
1428
1429 if (ret)
1430 TOPS_ERR("sync tunnel parameter failed: %d\n", ret);
1431
1432 continue;
1433
1434next:
1435 spin_unlock_irqrestore(&tnl_info->lock, flag);
1436 }
1437}
1438
1439static int tnl_sync_task(void *data)
1440{
1441 while (1) {
1442 wait_event_interruptible(tops_tnl.tnl_sync_wait,
1443 (tops_tnl.has_tnl_to_sync && mtk_tops_mcu_alive())
1444 || kthread_should_stop());
1445
1446 if (kthread_should_stop())
1447 break;
1448
1449 mtk_tops_tnl_sync_get_pending_queue();
1450
1451 mtk_tops_tnl_sync_queue_proc();
1452 }
1453
1454 return 0;
1455}
1456
1457static void mtk_tops_tnl_info_flush_ppe(struct tops_tnl_info *tnl_info)
1458{
1459 struct foe_entry *entry;
1460 u32 max_entry;
1461 u32 ppe_id;
1462 u32 eidx;
1463
1464 /* tnl info's lock should be held */
1465 lockdep_assert_held(&tnl_info->lock);
1466
1467 /* clear all TOPS related PPE entries */
1468 for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
1469 max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
1470 for (eidx = 0; eidx < max_entry; eidx++) {
1471 entry = hnat_get_foe_entry(ppe_id, eidx);
1472 if (IS_ERR(entry))
1473 break;
1474
1475 if (!entry_hnat_is_bound(entry))
1476 continue;
1477
1478 tnl_flush_ppe_entry(entry, tnl_info->tnl_idx);
1479 }
1480 }
1481 hnat_cache_ebl(1);
1482 /* make sure all data is written to dram PPE table */
1483 wmb();
1484}
1485
1486void mtk_tops_tnl_offload_netdev_down(struct net_device *ndev)
1487{
1488 struct tops_tnl_info *tnl_info;
1489 unsigned long flag;
1490 u32 bkt;
1491
1492 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1493
1494 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1495 spin_lock(&tnl_info->lock);
1496
1497 if (tnl_info->dev == ndev) {
1498 mtk_tops_tnl_info_flush_ppe(tnl_info);
1499
1500 __mtk_tops_tnl_offload_disable(tnl_info);
1501
1502 spin_unlock(&tnl_info->lock);
1503
1504 break;
1505 }
1506
1507 spin_unlock(&tnl_info->lock);
1508 }
1509
1510 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1511}
1512
1513void mtk_tops_tnl_offload_flush(void)
1514{
1515 struct tops_tnl_info *tnl_info;
1516 struct foe_entry *entry;
1517 unsigned long flag;
1518 u32 max_entry;
1519 u32 ppe_id;
1520 u32 eidx;
1521 u32 bkt;
1522
1523 /* clear all TOPS related PPE entries */
1524 for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
1525 max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
1526 for (eidx = 0; eidx < max_entry; eidx++) {
1527 entry = hnat_get_foe_entry(ppe_id, eidx);
1528 if (IS_ERR(entry))
1529 break;
1530
1531 if (!entry_hnat_is_bound(entry))
1532 continue;
1533
1534 tnl_flush_ppe_entry(entry, __TOPS_ENTRY_MAX);
1535 }
1536 }
1537 hnat_cache_ebl(1);
1538 /* make sure all data is written to dram PPE table */
1539 wmb();
1540
1541 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1542
1543 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1544 /* clear all tunnel's synced parameters, but preserve cache */
1545 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
1546 /*
1547 * make tnl_info status to TNL_INIT state
1548 * so that it can be added to TOPS again
1549 */
1550 spin_lock(&tnl_info->lock);
1551
1552 tnl_info_sta_init_no_tnl_lock(tnl_info);
1553 list_del_init(&tnl_info->sync_node);
1554
1555 spin_unlock(&tnl_info->lock);
1556 }
1557
1558 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1559}
1560
1561void mtk_tops_tnl_offload_recover(void)
1562{
1563 struct tops_tnl_info *tnl_info;
1564 unsigned long flag;
1565 u32 bkt;
1566
1567 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1568
1569 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist)
1570 mtk_tops_tnl_info_submit(tnl_info);
1571
1572 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1573}
1574
1575int mtk_tops_tnl_offload_init(struct platform_device *pdev)
1576{
1577 struct tops_tnl_info *tnl_info;
1578 int ret = 0;
1579 int i = 0;
1580
1581 hash_init(tops_tnl.ht);
1582
1583 tops_tnl.tnl_infos = devm_kzalloc(&pdev->dev,
1584 sizeof(struct tops_tnl_info) * CONFIG_TOPS_TNL_NUM,
1585 GFP_KERNEL);
1586 if (!tops_tnl.tnl_infos)
1587 return -ENOMEM;
1588
1589 for (i = 0; i < CONFIG_TOPS_TNL_NUM; i++) {
1590 tnl_info = &tops_tnl.tnl_infos[i];
1591 tnl_info->tnl_idx = i;
1592 tnl_info->status = TNL_STA_UNINIT;
1593 INIT_HLIST_NODE(&tnl_info->hlist);
1594 INIT_LIST_HEAD(&tnl_info->sync_node);
1595 spin_lock_init(&tnl_info->lock);
1596 }
1597
1598 ret = register_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1599 if (ret) {
1600 TOPS_ERR("tnl offload recv dev register failed: %d\n",
1601 ret);
1602 return ret;
1603 }
1604
1605 init_completion(&tops_tnl.dma_done);
1606 init_waitqueue_head(&tops_tnl.tnl_sync_wait);
1607
1608 tops_tnl.tnl_sync_thread = kthread_run(tnl_sync_task, NULL,
1609 "tnl sync param task");
1610 if (IS_ERR(tops_tnl.tnl_sync_thread)) {
1611 TOPS_ERR("tnl sync thread create failed\n");
1612 ret = -ENOMEM;
1613 goto unregister_mbox;
1614 }
1615
1616 mtk_tnl_encap_offload = mtk_tops_tnl_encap_offload;
1617 mtk_tnl_decap_offload = mtk_tops_tnl_decap_offload;
1618 mtk_tnl_decap_offloadable = mtk_tops_tnl_decap_offloadable;
1619 mtk_get_tnl_dev = mtk_tops_get_tnl_dev;
1620
1621 tops_tnl.tnl_sync_submit = &tnl_sync_q1;
1622 tops_tnl.tnl_sync_pending = &tnl_sync_q2;
1623 spin_lock_init(&tops_tnl.tnl_sync_lock);
1624 spin_lock_init(&tops_tnl.tbl_lock);
1625
1626 return 0;
1627
1628unregister_mbox:
1629 unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1630
1631 return ret;
1632}
1633
1634void mtk_tops_tnl_offload_pce_clean_up(void)
1635{
1636 struct tops_tnl_info *tnl_info;
1637 unsigned long flag;
1638 u32 bkt;
1639
1640 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1641
1642 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1643 mtk_tops_tnl_info_flush_ppe(tnl_info);
1644
1645 mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
developer15ee46c2023-08-24 16:35:34 +08001646
developer84f378f2023-08-24 18:26:50 +08001647 mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_info->tnl_params);
developere5e687d2023-08-08 16:05:33 +08001648 }
1649
1650 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1651}
1652
1653void mtk_tops_tnl_offload_deinit(struct platform_device *pdev)
1654{
1655 mtk_tnl_encap_offload = NULL;
1656 mtk_tnl_decap_offload = NULL;
1657 mtk_tnl_decap_offloadable = NULL;
1658 mtk_get_tnl_dev = NULL;
1659
1660 kthread_stop(tops_tnl.tnl_sync_thread);
1661
1662 mtk_tops_tnl_offload_pce_clean_up();
1663
1664 unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1665}
1666
1667int mtk_tops_tnl_offload_proto_setup(struct platform_device *pdev)
1668{
1669 mtk_tops_gretap_init();
1670
1671 mtk_tops_udp_l2tp_data_init();
1672
1673 return 0;
1674}
1675
1676void mtk_tops_tnl_offload_proto_teardown(struct platform_device *pdev)
1677{
1678 mtk_tops_gretap_deinit();
1679
1680 mtk_tops_udp_l2tp_data_deinit();
1681}
1682
1683struct tops_tnl_type *mtk_tops_tnl_type_get_by_name(const char *name)
1684{
1685 enum tops_entry_type tops_entry = TOPS_ENTRY_NONE + 1;
1686 struct tops_tnl_type *tnl_type;
1687
1688 if (unlikely(!name))
1689 return ERR_PTR(-EPERM);
1690
1691 for (; tops_entry < __TOPS_ENTRY_MAX; tops_entry++) {
1692 tnl_type = tops_tnl.offload_tnl_types[tops_entry];
1693 if (tnl_type && !strcmp(name, tnl_type->type_name))
1694 break;
1695 }
1696
1697 return tnl_type;
1698}
1699
1700int mtk_tops_tnl_type_register(struct tops_tnl_type *tnl_type)
1701{
1702 enum tops_entry_type tops_entry = tnl_type->tops_entry;
1703
1704 if (unlikely(tops_entry == TOPS_ENTRY_NONE
1705 || tops_entry >= __TOPS_ENTRY_MAX)) {
1706 TOPS_ERR("invalid tops_entry: %u\n", tops_entry);
1707 return -EINVAL;
1708 }
1709
1710 if (unlikely(!tnl_type))
1711 return -EINVAL;
1712
1713 if (tops_tnl.offload_tnl_types[tops_entry]) {
1714 TOPS_ERR("offload tnl type is already registered: %u\n", tops_entry);
1715 return -EBUSY;
1716 }
1717
developer15ee46c2023-08-24 16:35:34 +08001718 INIT_LIST_HEAD(&tnl_type->tcls_head);
developere5e687d2023-08-08 16:05:33 +08001719 tops_tnl.offload_tnl_types[tops_entry] = tnl_type;
1720 tops_tnl.offload_tnl_type_num++;
1721
1722 return 0;
1723}
1724
1725void mtk_tops_tnl_type_unregister(struct tops_tnl_type *tnl_type)
1726{
1727 enum tops_entry_type tops_entry = tnl_type->tops_entry;
1728
1729 if (unlikely(tops_entry == TOPS_ENTRY_NONE
1730 || tops_entry >= __TOPS_ENTRY_MAX)) {
1731 TOPS_ERR("invalid tops_entry: %u\n", tops_entry);
1732 return;
1733 }
1734
1735 if (unlikely(!tnl_type))
1736 return;
1737
1738 if (tops_tnl.offload_tnl_types[tops_entry] != tnl_type) {
1739 TOPS_ERR("offload tnl type is registered by others\n");
1740 return;
1741 }
1742
1743 tops_tnl.offload_tnl_types[tops_entry] = NULL;
1744 tops_tnl.offload_tnl_type_num--;
1745}