blob: c989054b03dc774573c13f478fdebc18d438ae8e [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/completion.h>
9#include <linux/device.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/err.h>
13#include <linux/hashtable.h>
14#include <linux/if_ether.h>
15#include <linux/ip.h>
16#include <linux/kthread.h>
17#include <linux/list.h>
18#include <linux/lockdep.h>
19#include <linux/string.h>
20
21#include <mtk_eth_soc.h>
22#include <mtk_hnat/hnat.h>
23#include <mtk_hnat/nf_hnat_mtk.h>
24
25#include <pce/dipfilter.h>
26#include <pce/pce.h>
27
28#include "internal.h"
29#include "mbox.h"
30#include "mcu.h"
31#include "netsys.h"
32#include "protocol/gre/gretap.h"
33#include "protocol/l2tp/udp_l2tp_data.h"
34#include "tunnel.h"
35
36#define TOPS_PPE_ENTRY_BUCKETS (64)
37#define TOPS_PPE_ENTRY_BUCKETS_BIT (6)
38
39struct tops_tnl {
40 /* tunnel types */
41 struct tops_tnl_type *offload_tnl_types[__TOPS_ENTRY_MAX];
42 u32 offload_tnl_type_num;
43 u32 tnl_base_addr;
44
45 /* tunnel table */
46 DECLARE_HASHTABLE(ht, CONFIG_TOPS_TNL_MAP_BIT);
47 DECLARE_BITMAP(tnl_used, CONFIG_TOPS_TNL_NUM);
48 wait_queue_head_t tnl_sync_wait;
49 spinlock_t tnl_sync_lock;
50 spinlock_t tbl_lock;
51 bool has_tnl_to_sync;
52 struct task_struct *tnl_sync_thread;
53 struct list_head *tnl_sync_pending;
54 struct list_head *tnl_sync_submit;
55 struct tops_tnl_info *tnl_infos;
56
57 /* dma request */
58 struct completion dma_done;
59 struct dma_chan *dmachan;
60
61 struct device *dev;
62};
63
64static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
65 struct mailbox_msg *msg);
66
67static struct tops_tnl tops_tnl;
68
69static LIST_HEAD(tnl_sync_q1);
70static LIST_HEAD(tnl_sync_q2);
71
72struct mailbox_dev tnl_offload_mbox_recv =
73 MBOX_RECV_MGMT_DEV(TNL_OFFLOAD, tnl_offload_mbox_cmd_recv);
74
75/* tunnel mailbox communication */
76static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
77 struct mailbox_msg *msg)
78{
79 switch (msg->msg1) {
80 case TOPS_TNL_START_ADDR_SYNC:
81 tops_tnl.tnl_base_addr = msg->msg2;
82
83 return MBOX_NO_RET_MSG;
84 default:
85 break;
86 }
87
88 return MBOX_NO_RET_MSG;
89}
90
91static inline void tnl_flush_ppe_entry(struct foe_entry *entry, u32 tnl_idx)
92{
93 u32 bind_tnl_idx;
94
95 if (unlikely(!entry))
96 return;
97
98 switch (entry->bfib1.pkt_type) {
99 case IPV4_HNAPT:
100 if (entry->ipv4_hnapt.tport_id != NR_TDMA_TPORT
101 && entry->ipv4_hnapt.tport_id != NR_TDMA_QDMA_TPORT)
102 return;
103
104 bind_tnl_idx = entry->ipv4_hnapt.tops_entry - __TOPS_ENTRY_MAX;
105
106 break;
107 default:
108 return;
109 }
110
111 /* unexpected tunnel index */
112 if (bind_tnl_idx >= __TOPS_ENTRY_MAX)
113 return;
114
115 if (tnl_idx == __TOPS_ENTRY_MAX || tnl_idx == bind_tnl_idx)
116 memset(entry, 0, sizeof(*entry));
117}
118
119static inline void skb_set_tops_tnl_idx(struct sk_buff *skb, u32 tnl_idx)
120{
121 skb_hnat_tops(skb) = tnl_idx + __TOPS_ENTRY_MAX;
122}
123
124static inline bool skb_tops_valid(struct sk_buff *skb)
125{
126 return (skb
127 && skb_hnat_tops(skb) >= 0
128 && skb_hnat_tops(skb) <= __TOPS_ENTRY_MAX);
129}
130
131static inline struct tops_tnl_type *skb_to_tnl_type(struct sk_buff *skb)
132{
133 enum tops_entry_type tops_entry = skb_hnat_tops(skb);
134 struct tops_tnl_type *tnl_type;
135
136 if (unlikely(!tops_entry || tops_entry >= __TOPS_ENTRY_MAX))
137 return ERR_PTR(-EINVAL);
138
139 tnl_type = tops_tnl.offload_tnl_types[tops_entry];
140
141 return tnl_type ? tnl_type : ERR_PTR(-ENODEV);
142}
143
144static inline void skb_mark_unbind(struct sk_buff *skb)
145{
146 skb_hnat_tops(skb) = 0;
147 skb_hnat_is_decap(skb) = 0;
148 skb_hnat_alg(skb) = 1;
149}
150
151static inline u32 tnl_params_hash(struct tops_tnl_params *tnl_params)
152{
153 if (!tnl_params)
154 return 0;
155
156 /* TODO: check collision possibility? */
157 return (tnl_params->sip ^ tnl_params->dip);
158}
159
160static inline bool tnl_info_decap_is_enable(struct tops_tnl_info *tnl_info)
161{
162 return tnl_info->cache.flag & TNL_DECAP_ENABLE;
163}
164
165static inline void tnl_info_decap_enable(struct tops_tnl_info *tnl_info)
166{
167 tnl_info->cache.flag |= TNL_DECAP_ENABLE;
168}
169
170static inline void tnl_info_decap_disable(struct tops_tnl_info *tnl_info)
171{
172 tnl_info->cache.flag &= ~(TNL_DECAP_ENABLE);
173}
174
175static inline bool tnl_info_encap_is_enable(struct tops_tnl_info *tnl_info)
176{
177 return tnl_info->cache.flag & TNL_ENCAP_ENABLE;
178}
179
180static inline void tnl_info_encap_enable(struct tops_tnl_info *tnl_info)
181{
182 tnl_info->cache.flag |= TNL_ENCAP_ENABLE;
183}
184
185static inline void tnl_info_encap_disable(struct tops_tnl_info *tnl_info)
186{
187 tnl_info->cache.flag &= ~(TNL_ENCAP_ENABLE);
188}
189
190static inline void tnl_info_sta_updated_no_tnl_lock(struct tops_tnl_info *tnl_info)
191{
192 tnl_info->status &= (~TNL_STA_UPDATING);
193 tnl_info->status &= (~TNL_STA_INIT);
194 tnl_info->status |= TNL_STA_UPDATED;
195}
196
197static inline void tnl_info_sta_updated(struct tops_tnl_info *tnl_info)
198{
199 unsigned long flag = 0;
200
201 if (unlikely(!tnl_info))
202 return;
203
204 spin_lock_irqsave(&tnl_info->lock, flag);
205
206 tnl_info_sta_updated_no_tnl_lock(tnl_info);
207
208 spin_unlock_irqrestore(&tnl_info->lock, flag);
209}
210
211static inline bool tnl_info_sta_is_updated(struct tops_tnl_info *tnl_info)
212{
213 return tnl_info->status & TNL_STA_UPDATED;
214}
215
216static inline void tnl_info_sta_updating_no_tnl_lock(struct tops_tnl_info *tnl_info)
217{
218 tnl_info->status |= TNL_STA_UPDATING;
219 tnl_info->status &= (~TNL_STA_QUEUED);
220 tnl_info->status &= (~TNL_STA_UPDATED);
221}
222
223static inline void tnl_info_sta_updating(struct tops_tnl_info *tnl_info)
224{
225 unsigned long flag = 0;
226
227 if (unlikely(!tnl_info))
228 return;
229
230 spin_lock_irqsave(&tnl_info->lock, flag);
231
232 tnl_info_sta_updating_no_tnl_lock(tnl_info);
233
234 spin_unlock_irqrestore(&tnl_info->lock, flag);
235}
236
237static inline bool tnl_info_sta_is_updating(struct tops_tnl_info *tnl_info)
238{
239 return tnl_info->status & TNL_STA_UPDATING;
240}
241
242static inline void tnl_info_sta_queued_no_tnl_lock(struct tops_tnl_info *tnl_info)
243{
244 tnl_info->status |= TNL_STA_QUEUED;
245 tnl_info->status &= (~TNL_STA_UPDATED);
246}
247
248static inline void tnl_info_sta_queued(struct tops_tnl_info *tnl_info)
249{
250 unsigned long flag = 0;
251
252 if (unlikely(!tnl_info))
253 return;
254
255 spin_lock_irqsave(&tnl_info->lock, flag);
256
257 tnl_info_sta_queued_no_tnl_lock(tnl_info);
258
259 spin_unlock_irqrestore(&tnl_info->lock, flag);
260}
261
262static inline bool tnl_info_sta_is_queued(struct tops_tnl_info *tnl_info)
263{
264 return tnl_info->status & TNL_STA_QUEUED;
265}
266
267static inline void tnl_info_sta_init_no_tnl_lock(struct tops_tnl_info *tnl_info)
268{
269 tnl_info->status = TNL_STA_INIT;
270}
271
272static inline void tnl_info_sta_init(struct tops_tnl_info *tnl_info)
273{
274 unsigned long flag = 0;
275
276 if (unlikely(!tnl_info))
277 return;
278
279 spin_lock_irqsave(&tnl_info->lock, flag);
280
281 tnl_info_sta_init_no_tnl_lock(tnl_info);
282
283 spin_unlock_irqrestore(&tnl_info->lock, flag);
284}
285
286static inline bool tnl_info_sta_is_init(struct tops_tnl_info *tnl_info)
287{
288 return tnl_info->status & TNL_STA_INIT;
289}
290
291static inline void tnl_info_sta_uninit_no_tnl_lock(struct tops_tnl_info *tnl_info)
292{
293 tnl_info->status = TNL_STA_UNINIT;
294}
295
296static inline void tnl_info_sta_uninit(struct tops_tnl_info *tnl_info)
297{
298 unsigned long flag = 0;
299
300 if (unlikely(!tnl_info))
301 return;
302
303 spin_lock_irqsave(&tnl_info->lock, flag);
304
305 tnl_info_sta_uninit_no_tnl_lock(tnl_info);
306
307 spin_unlock_irqrestore(&tnl_info->lock, flag);
308}
309
310static inline bool tnl_info_sta_is_uninit(struct tops_tnl_info *tnl_info)
311{
312 return tnl_info->status & TNL_STA_UNINIT;
313}
314
315static inline void tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
316{
317 unsigned long flag = 0;
318
319 spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
320
321 list_add_tail(&tnl_info->sync_node, tops_tnl.tnl_sync_submit);
322
323 tops_tnl.has_tnl_to_sync = true;
324
325 spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
326
327 if (mtk_tops_mcu_alive())
328 wake_up_interruptible(&tops_tnl.tnl_sync_wait);
329}
330
331static int mtk_tops_tnl_info_dipfilter_tear_down(struct tops_tnl_info *tnl_info)
332{
333 struct dip_desc dipd;
334
335 memset(&dipd, 0, sizeof(struct dip_desc));
336
337 dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.sip);
338 dipd.tag = DIPFILTER_IPV4;
339
340 return mtk_pce_dipfilter_entry_del(&dipd);
341}
342
343static int mtk_tops_tnl_info_dipfilter_setup(struct tops_tnl_info *tnl_info)
344{
345 struct dip_desc dipd;
346
347 /* setup dipfilter */
348 memset(&dipd, 0, sizeof(struct dip_desc));
349
350 dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.sip);
351 dipd.tag = DIPFILTER_IPV4;
352
353 return mtk_pce_dipfilter_entry_add(&dipd);
354}
355
356void mtk_tops_tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
357{
358 lockdep_assert_held(&tnl_info->lock);
359
360 if (tnl_info_sta_is_queued(tnl_info))
361 return;
362
363 tnl_info_submit_no_tnl_lock(tnl_info);
364
365 tnl_info_sta_queued_no_tnl_lock(tnl_info);
366}
367
368void mtk_tops_tnl_info_submit(struct tops_tnl_info *tnl_info)
369{
370 unsigned long flag = 0;
371
372 if (unlikely(!tnl_info))
373 return;
374
375 spin_lock_irqsave(&tnl_info->lock, flag);
376
377 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
378
379 spin_unlock_irqrestore(&tnl_info->lock, flag);
380}
381
382static void mtk_tops_tnl_info_hash_no_lock(struct tops_tnl_info *tnl_info)
383{
384 lockdep_assert_held(&tops_tnl.tbl_lock);
385 lockdep_assert_held(&tnl_info->lock);
386
387 if (hash_hashed(&tnl_info->hlist))
388 hash_del(&tnl_info->hlist);
389
390 hash_add(tops_tnl.ht, &tnl_info->hlist, tnl_params_hash(&tnl_info->cache));
391}
392
393void mtk_tops_tnl_info_hash(struct tops_tnl_info *tnl_info)
394{
395 unsigned long flag = 0;
396
397 if (unlikely(!tnl_info))
398 return;
399
400 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
401
402 spin_lock(&tnl_info->lock);
403
404 mtk_tops_tnl_info_hash_no_lock(tnl_info);
405
406 spin_unlock(&tnl_info->lock);
407
408 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
409}
410
411static bool mtk_tops_tnl_info_match(struct tops_tnl_type *tnl_type,
412 struct tops_tnl_info *tnl_info,
413 struct tops_tnl_params *match_data)
414{
415 unsigned long flag = 0;
416 bool match;
417
418 spin_lock_irqsave(&tnl_info->lock, flag);
419
420 match = tnl_type->tnl_info_match(&tnl_info->cache, match_data);
421
422 spin_unlock_irqrestore(&tnl_info->lock, flag);
423
424 return match;
425}
426
427struct tops_tnl_info *mtk_tops_tnl_info_find(struct tops_tnl_params *tnl_params)
428{
429 struct tops_tnl_info *tnl_info;
430 struct tops_tnl_type *tnl_type;
431
432 lockdep_assert_held(&tops_tnl.tbl_lock);
433
434 if (unlikely(!tnl_params->tops_entry_proto
435 || tnl_params->tops_entry_proto >= __TOPS_ENTRY_MAX))
436 return ERR_PTR(-EINVAL);
437
438 tnl_type = tops_tnl.offload_tnl_types[tnl_params->tops_entry_proto];
439 if (unlikely(!tnl_type))
440 return ERR_PTR(-EINVAL);
441
442 if (unlikely(!tnl_type->tnl_info_match))
443 return ERR_PTR(-ENXIO);
444
445 hash_for_each_possible(tops_tnl.ht,
446 tnl_info,
447 hlist,
448 tnl_params_hash(tnl_params))
449 if (mtk_tops_tnl_info_match(tnl_type, tnl_info, tnl_params))
450 return tnl_info;
451
452 return ERR_PTR(-ENODEV);
453}
454
455/* tnl_info->lock should be held before calling this function */
456static int mtk_tops_tnl_info_setup(struct sk_buff *skb,
457 struct tops_tnl_info *tnl_info,
458 struct tops_tnl_params *tnl_params)
459{
460 if (unlikely(!skb || !tnl_info || !tnl_params))
461 return -EPERM;
462
463 lockdep_assert_held(&tnl_info->lock);
464
465 tnl_params->flag |= tnl_info->cache.flag;
466
467 if (memcmp(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params))) {
468 memcpy(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params));
469
470 mtk_tops_tnl_info_hash_no_lock(tnl_info);
471 }
472
473 if (skb_hnat_is_decap(skb)) {
474 /* the net_device is used to forward pkt to decap'ed inf when Rx */
475 tnl_info->dev = skb->dev;
476 if (!tnl_info_decap_is_enable(tnl_info)) {
477 tnl_info_decap_enable(tnl_info);
478
479 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
480 }
481 } else if (skb_hnat_is_encap(skb)) {
482 /* set skb_hnat_tops(skb) to tunnel index for ppe binding */
483 skb_set_tops_tnl_idx(skb, tnl_info->tnl_idx);
484 if (!tnl_info_encap_is_enable(tnl_info)) {
485 tnl_info_encap_enable(tnl_info);
486
487 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
488 }
489 }
490
491 return 0;
492}
493
494/* tops_tnl.tbl_lock should be acquired before calling this functions */
495static struct tops_tnl_info *mtk_tops_tnl_info_alloc_no_lock(void)
496{
497 struct tops_tnl_info *tnl_info;
498 unsigned long flag = 0;
499 u32 tnl_idx;
500
501 lockdep_assert_held(&tops_tnl.tbl_lock);
502
503 tnl_idx = find_first_zero_bit(tops_tnl.tnl_used, CONFIG_TOPS_TNL_NUM);
504 if (tnl_idx == CONFIG_TOPS_TNL_NUM) {
505 TOPS_NOTICE("offload tunnel table full!\n");
506 return ERR_PTR(-ENOMEM);
507 }
508
509 /* occupy used tunnel */
510 tnl_info = &tops_tnl.tnl_infos[tnl_idx];
511 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
512 memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
513
514 /* TODO: maybe spin_lock_bh() is enough? */
515 spin_lock_irqsave(&tnl_info->lock, flag);
516
517 if (tnl_info_sta_is_init(tnl_info)) {
518 TOPS_ERR("error: fetched an initialized tunnel info\n");
519
520 spin_unlock_irqrestore(&tnl_info->lock, flag);
521
522 return ERR_PTR(-EBADF);
523 }
524 tnl_info_sta_init_no_tnl_lock(tnl_info);
525
526 INIT_HLIST_NODE(&tnl_info->hlist);
527
528 spin_unlock_irqrestore(&tnl_info->lock, flag);
529
530 set_bit(tnl_idx, tops_tnl.tnl_used);
531
532 return tnl_info;
533}
534
535struct tops_tnl_info *mtk_tops_tnl_info_alloc(void)
536{
537 struct tops_tnl_info *tnl_info;
538 unsigned long flag = 0;
539
540 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
541
542 tnl_info = mtk_tops_tnl_info_alloc_no_lock();
543
544 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
545
546 return tnl_info;
547}
548
549static void mtk_tops_tnl_info_free_no_lock(struct tops_tnl_info *tnl_info)
550{
551 if (unlikely(!tnl_info))
552 return;
553
554 lockdep_assert_held(&tops_tnl.tbl_lock);
555 lockdep_assert_held(&tnl_info->lock);
556
557 hash_del(&tnl_info->hlist);
558
559 tnl_info_sta_uninit_no_tnl_lock(tnl_info);
560
561 clear_bit(tnl_info->tnl_idx, tops_tnl.tnl_used);
562}
563
564static void mtk_tops_tnl_info_free(struct tops_tnl_info *tnl_info)
565{
566 unsigned long flag = 0;
567
568 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
569
570 spin_lock(&tnl_info->lock);
571
572 mtk_tops_tnl_info_free_no_lock(tnl_info);
573
574 spin_unlock(&tnl_info->lock);
575
576 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
577}
578
579static void __mtk_tops_tnl_offload_disable(struct tops_tnl_info *tnl_info)
580{
581 tnl_info->status |= TNL_STA_DELETING;
582 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
583}
584
585static int mtk_tops_tnl_offload(struct sk_buff *skb,
586 struct tops_tnl_params *tnl_params)
587{
588 struct tops_tnl_info *tnl_info;
589 unsigned long flag;
590 int ret = 0;
591
592 if (unlikely(!tnl_params))
593 return -EPERM;
594
595 /* prepare tnl_info */
596 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
597
598 tnl_info = mtk_tops_tnl_info_find(tnl_params);
599 if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) != -ENODEV) {
600 /* error */
601 ret = PTR_ERR(tnl_info);
602 goto err_out;
603 } else if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) == -ENODEV) {
604 /* not allocate yet */
605 tnl_info = mtk_tops_tnl_info_alloc_no_lock();
606 }
607
608 if (IS_ERR(tnl_info)) {
609 ret = PTR_ERR(tnl_info);
610 TOPS_DBG("tnl offload alloc tnl_info failed: %d\n", ret);
611 goto err_out;
612 }
613
614 spin_lock(&tnl_info->lock);
615 ret = mtk_tops_tnl_info_setup(skb, tnl_info, tnl_params);
616
617 spin_unlock(&tnl_info->lock);
618
619err_out:
620 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
621
622 return ret;
623}
624
625static bool mtk_tops_tnl_decap_offloadable(struct sk_buff *skb)
626{
627 struct tops_tnl_type *tnl_type;
628 struct ethhdr *eth;
629 u32 cnt;
630 u32 i;
631
632 if (unlikely(!mtk_tops_mcu_alive())) {
633 skb_mark_unbind(skb);
634 return -EAGAIN;
635 }
636
637 /* skb should not carry tops here */
638 if (skb_hnat_tops(skb))
639 return false;
640
641 eth = eth_hdr(skb);
642
643 /* TODO: currently decap only support ethernet IPv4 */
644 if (ntohs(eth->h_proto) != ETH_P_IP)
645 return false;
646
647 /* TODO: may can be optimized */
648 for (i = TOPS_ENTRY_GRETAP, cnt = 0;
649 i < __TOPS_ENTRY_MAX && cnt < tops_tnl.offload_tnl_type_num;
650 i++) {
651 tnl_type = tops_tnl.offload_tnl_types[i];
652 if (unlikely(!tnl_type))
653 continue;
654
655 cnt++;
656 if (tnl_type->tnl_decap_offloadable
657 && tnl_type->tnl_decap_offloadable(skb)) {
658 skb_hnat_tops(skb) = tnl_type->tops_entry;
659 return true;
660 }
661 }
662
663 return false;
664}
665
666static int mtk_tops_tnl_decap_offload(struct sk_buff *skb)
667{
668 struct tops_tnl_params tnl_params;
669 struct tops_tnl_type *tnl_type;
670 int ret;
671
672 if (unlikely(!mtk_tops_mcu_alive())) {
673 skb_mark_unbind(skb);
674 return -EAGAIN;
675 }
676
677 if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_decap(skb))) {
678 skb_mark_unbind(skb);
679 return -EINVAL;
680 }
681
682 tnl_type = skb_to_tnl_type(skb);
683 if (IS_ERR(tnl_type)) {
684 skb_mark_unbind(skb);
685 return PTR_ERR(tnl_type);
686 }
687
688 if (unlikely(!tnl_type->tnl_decap_param_setup)) {
689 skb_mark_unbind(skb);
690 return -ENODEV;
691 }
692
693 memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
694
695 /* push removed ethernet header back first */
696 if (tnl_type->has_inner_eth)
697 skb_push(skb, sizeof(struct ethhdr));
698
699 ret = tnl_type->tnl_decap_param_setup(skb, &tnl_params);
700
701 /* pull ethernet header to restore skb->data to ip start */
702 if (tnl_type->has_inner_eth)
703 skb_pull(skb, sizeof(struct ethhdr));
704
705 if (unlikely(ret)) {
706 skb_mark_unbind(skb);
707 return ret;
708 }
709
710 tnl_params.tops_entry_proto = tnl_type->tops_entry;
711
712 ret = mtk_tops_tnl_offload(skb, &tnl_params);
713
714 /*
715 * whether success or fail to offload a decapsulation tunnel
716 * skb_hnat_tops(skb) must be cleared to avoid mtk_tnl_decap_offload() get
717 * called again
718 */
719 skb_hnat_tops(skb) = 0;
720 skb_hnat_is_decap(skb) = 0;
721
722 return ret;
723}
724
725static int mtk_tops_tnl_encap_offload(struct sk_buff *skb)
726{
727 struct tops_tnl_params tnl_params;
728 struct tops_tnl_type *tnl_type;
729 int ret;
730
731 if (unlikely(!mtk_tops_mcu_alive())) {
732 skb_mark_unbind(skb);
733 return -EAGAIN;
734 }
735
736 if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_encap(skb)))
737 return -EPERM;
738
739 tnl_type = skb_to_tnl_type(skb);
740 if (IS_ERR(tnl_type))
741 return PTR_ERR(tnl_type);
742
743 if (unlikely(!tnl_type->tnl_encap_param_setup))
744 return -ENODEV;
745
746 memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
747
748 ret = tnl_type->tnl_encap_param_setup(skb, &tnl_params);
749 if (unlikely(ret))
750 return ret;
751 tnl_params.tops_entry_proto = tnl_type->tops_entry;
752
753 return mtk_tops_tnl_offload(skb, &tnl_params);
754}
755
756static struct net_device *mtk_tops_get_tnl_dev(int tnl_idx)
757{
758 if (tnl_idx < TOPS_CRSN_TNL_ID_START || tnl_idx > TOPS_CRSN_TNL_ID_END)
759 return ERR_PTR(-EINVAL);
760
761 tnl_idx = tnl_idx - TOPS_CRSN_TNL_ID_START;
762
763 return tops_tnl.tnl_infos[tnl_idx].dev;
764}
765
766static void mtk_tops_tnl_sync_dma_done(void *param)
767{
768 /* TODO: check tx status with dmaengine_tx_status()? */
769 complete(&tops_tnl.dma_done);
770}
771
772static void mtk_tops_tnl_sync_dma_start(void *param)
773{
774 dma_async_issue_pending(tops_tnl.dmachan);
775
776 wait_for_completion(&tops_tnl.dma_done);
777}
778
779static void mtk_tops_tnl_sync_dma_unprepare(struct tops_tnl_info *tnl_info,
780 dma_addr_t *addr)
781{
782 dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
783 DMA_TO_DEVICE);
784
785 dma_release_channel(tops_tnl.dmachan);
786}
787
788static int mtk_tops_tnl_sync_dma_prepare(struct tops_tnl_info *tnl_info,
789 dma_addr_t *addr)
790{
791 u32 tnl_addr = tops_tnl.tnl_base_addr;
792 struct dma_async_tx_descriptor *desc;
793 dma_cookie_t cookie;
794 int ret;
795
796 if (!tnl_info)
797 return -EPERM;
798
799 tnl_addr += tnl_info->tnl_idx * sizeof(struct tops_tnl_params);
800
801 tops_tnl.dmachan = dma_request_slave_channel(tops_dev, "tnl-sync");
802 if (!tops_tnl.dmachan) {
803 TOPS_ERR("request dma channel failed\n");
804 return -ENODEV;
805 }
806
807 *addr = dma_map_single(tops_dev,
808 &tnl_info->tnl_params,
809 sizeof(struct tops_tnl_params),
810 DMA_TO_DEVICE);
811 if (dma_mapping_error(tops_dev, *addr)) {
812 ret = -ENOMEM;
813 goto dma_release;
814 }
815
816 desc = dmaengine_prep_dma_memcpy(tops_tnl.dmachan,
817 (dma_addr_t)tnl_addr, *addr,
818 sizeof(struct tops_tnl_params),
819 0);
820 if (!desc) {
821 ret = -EBUSY;
822 goto dma_unmap;
823 }
824
825 desc->callback = mtk_tops_tnl_sync_dma_done;
826
827 cookie = dmaengine_submit(desc);
828 ret = dma_submit_error(cookie);
829 if (ret)
830 goto dma_terminate;
831
832 reinit_completion(&tops_tnl.dma_done);
833
834 return ret;
835
836dma_terminate:
837 dmaengine_terminate_all(tops_tnl.dmachan);
838
839dma_unmap:
840 dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
841 DMA_TO_DEVICE);
842
843dma_release:
844 dma_release_channel(tops_tnl.dmachan);
845
846 return ret;
847}
848
849static int __mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
850{
851 struct mcu_ctrl_cmd mcmd;
852 dma_addr_t addr;
853 int ret;
854
855 mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
856 mcmd.arg[0] = TUNNEL_CTRL_EVENT_DEL;
857 mcmd.arg[1] = tnl_info->tnl_idx;
858 mcmd.core_mask = CORE_TOPS_MASK;
859
860 ret = mtk_tops_mcu_stall(&mcmd, NULL, NULL);
861 if (ret) {
862 TOPS_ERR("tnl sync deletion notify mcu failed: %d\n", ret);
863 return ret;
864 }
865
866 /* there shouldn't be any other reference to tnl_info right now */
867 memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
868 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
869
870 ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
871 if (ret) {
872 TOPS_ERR("tnl sync deletion prepare dma request failed: %d\n", ret);
873 return ret;
874 }
875
876 mtk_tops_tnl_sync_dma_start(NULL);
877
878 mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
879
880 return ret;
881}
882
883static int mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
884{
885 int ret;
886
887 ret = mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
888 if (ret) {
889 TOPS_ERR("tnl sync dipfitler tear down failed: %d\n",
890 ret);
891 return ret;
892 }
893
894 ret = __mtk_tops_tnl_sync_param_delete(tnl_info);
895 if (ret) {
896 TOPS_ERR("tnl sync deletion failed: %d\n", ret);
897 return ret;
898 }
899
900 mtk_tops_tnl_info_free(tnl_info);
901
902 return ret;
903}
904
905static int __mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
906 bool is_new_tnl)
907{
908 struct mcu_ctrl_cmd mcmd;
909 dma_addr_t addr;
910 int ret;
911
912 mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
913 mcmd.arg[1] = tnl_info->tnl_idx;
914 mcmd.core_mask = CORE_TOPS_MASK;
915
916 if (is_new_tnl)
917 mcmd.arg[0] = TUNNEL_CTRL_EVENT_NEW;
918 else
919 mcmd.arg[0] = TUNNEL_CTRL_EVENT_DIP_UPDATE;
920
921 ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
922 if (ret) {
923 TOPS_ERR("tnl sync update prepare dma request failed: %d\n", ret);
924 return ret;
925 }
926
927 ret = mtk_tops_mcu_stall(&mcmd, mtk_tops_tnl_sync_dma_start, NULL);
928 if (ret)
929 TOPS_ERR("tnl sync update notify mcu failed: %d\n", ret);
930
931 mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
932
933 return ret;
934}
935
936static int mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
937 bool setup_pce, bool is_new_tnl)
938{
939 int ret;
940
941 ret = __mtk_tops_tnl_sync_param_update(tnl_info, is_new_tnl);
942 if (ret) {
943 TOPS_ERR("tnl sync failed: %d\n", ret);
944 return ret;
945 }
946
947 tnl_info_sta_updated(tnl_info);
948
949 if (setup_pce) {
950 ret = mtk_tops_tnl_info_dipfilter_setup(tnl_info);
951 if (ret) {
952 TOPS_ERR("tnl dipfilter setup failed: %d\n", ret);
953 /* TODO: should undo parameter sync */
954 return ret;
955 }
956 }
957
958 return ret;
959}
960
961static inline int mtk_tops_tnl_sync_param_new(struct tops_tnl_info *tnl_info,
962 bool setup_pce)
963{
964 return mtk_tops_tnl_sync_param_update(tnl_info, setup_pce, true);
965}
966
967static void mtk_tops_tnl_sync_get_pending_queue(void)
968{
969 struct list_head *tmp = tops_tnl.tnl_sync_submit;
970 unsigned long flag = 0;
971
972 spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
973
974 tops_tnl.tnl_sync_submit = tops_tnl.tnl_sync_pending;
975 tops_tnl.tnl_sync_pending = tmp;
976
977 tops_tnl.has_tnl_to_sync = false;
978
979 spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
980}
981
982static void mtk_tops_tnl_sync_queue_proc(void)
983{
984 struct tops_tnl_info *tnl_info;
985 struct tops_tnl_info *tmp;
986 unsigned long flag = 0;
987 bool is_decap = false;
988 u32 tnl_status = 0;
989 int ret;
990
991 list_for_each_entry_safe(tnl_info,
992 tmp,
993 tops_tnl.tnl_sync_pending,
994 sync_node) {
995 spin_lock_irqsave(&tnl_info->lock, flag);
996
997 /* tnl update is on the fly, queue tnl to next round */
998 if (tnl_info_sta_is_updating(tnl_info)) {
999 list_del_init(&tnl_info->sync_node);
1000
1001 tnl_info_submit_no_tnl_lock(tnl_info);
1002
1003 goto next;
1004 }
1005
1006 /*
1007 * if tnl_info is not queued, something wrong
1008 * just remove that tnl_info from the queue
1009 * maybe trigger BUG_ON()?
1010 */
1011 if (!tnl_info_sta_is_queued(tnl_info)) {
1012 list_del_init(&tnl_info->sync_node);
1013 goto next;
1014 }
1015
1016 is_decap = (!(tnl_info->tnl_params.flag & TNL_DECAP_ENABLE)
1017 && tnl_info_decap_is_enable(tnl_info));
1018
1019 tnl_status = tnl_info->status;
1020 memcpy(&tnl_info->tnl_params, &tnl_info->cache,
1021 sizeof(struct tops_tnl_params));
1022
1023 list_del_init(&tnl_info->sync_node);
1024
1025 /*
1026 * mark tnl info to updating and release tnl info's spin lock
1027 * since it is going to use dma to transfer data
1028 * and might going to sleep
1029 */
1030 tnl_info_sta_updating_no_tnl_lock(tnl_info);
1031
1032 spin_unlock_irqrestore(&tnl_info->lock, flag);
1033
1034 if (tnl_status & TNL_STA_INIT)
1035 ret = mtk_tops_tnl_sync_param_new(tnl_info, is_decap);
1036 else if (tnl_status & TNL_STA_DELETING)
1037 ret = mtk_tops_tnl_sync_param_delete(tnl_info);
1038 else
1039 ret = mtk_tops_tnl_sync_param_update(tnl_info,
1040 is_decap,
1041 false);
1042
1043 if (ret)
1044 TOPS_ERR("sync tunnel parameter failed: %d\n", ret);
1045
1046 continue;
1047
1048next:
1049 spin_unlock_irqrestore(&tnl_info->lock, flag);
1050 }
1051}
1052
1053static int tnl_sync_task(void *data)
1054{
1055 while (1) {
1056 wait_event_interruptible(tops_tnl.tnl_sync_wait,
1057 (tops_tnl.has_tnl_to_sync && mtk_tops_mcu_alive())
1058 || kthread_should_stop());
1059
1060 if (kthread_should_stop())
1061 break;
1062
1063 mtk_tops_tnl_sync_get_pending_queue();
1064
1065 mtk_tops_tnl_sync_queue_proc();
1066 }
1067
1068 return 0;
1069}
1070
1071static void mtk_tops_tnl_info_flush_ppe(struct tops_tnl_info *tnl_info)
1072{
1073 struct foe_entry *entry;
1074 u32 max_entry;
1075 u32 ppe_id;
1076 u32 eidx;
1077
1078 /* tnl info's lock should be held */
1079 lockdep_assert_held(&tnl_info->lock);
1080
1081 /* clear all TOPS related PPE entries */
1082 for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
1083 max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
1084 for (eidx = 0; eidx < max_entry; eidx++) {
1085 entry = hnat_get_foe_entry(ppe_id, eidx);
1086 if (IS_ERR(entry))
1087 break;
1088
1089 if (!entry_hnat_is_bound(entry))
1090 continue;
1091
1092 tnl_flush_ppe_entry(entry, tnl_info->tnl_idx);
1093 }
1094 }
1095 hnat_cache_ebl(1);
1096 /* make sure all data is written to dram PPE table */
1097 wmb();
1098}
1099
1100void mtk_tops_tnl_offload_netdev_down(struct net_device *ndev)
1101{
1102 struct tops_tnl_info *tnl_info;
1103 unsigned long flag;
1104 u32 bkt;
1105
1106 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1107
1108 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1109 spin_lock(&tnl_info->lock);
1110
1111 if (tnl_info->dev == ndev) {
1112 mtk_tops_tnl_info_flush_ppe(tnl_info);
1113
1114 __mtk_tops_tnl_offload_disable(tnl_info);
1115
1116 spin_unlock(&tnl_info->lock);
1117
1118 break;
1119 }
1120
1121 spin_unlock(&tnl_info->lock);
1122 }
1123
1124 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1125}
1126
1127void mtk_tops_tnl_offload_flush(void)
1128{
1129 struct tops_tnl_info *tnl_info;
1130 struct foe_entry *entry;
1131 unsigned long flag;
1132 u32 max_entry;
1133 u32 ppe_id;
1134 u32 eidx;
1135 u32 bkt;
1136
1137 /* clear all TOPS related PPE entries */
1138 for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
1139 max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
1140 for (eidx = 0; eidx < max_entry; eidx++) {
1141 entry = hnat_get_foe_entry(ppe_id, eidx);
1142 if (IS_ERR(entry))
1143 break;
1144
1145 if (!entry_hnat_is_bound(entry))
1146 continue;
1147
1148 tnl_flush_ppe_entry(entry, __TOPS_ENTRY_MAX);
1149 }
1150 }
1151 hnat_cache_ebl(1);
1152 /* make sure all data is written to dram PPE table */
1153 wmb();
1154
1155 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1156
1157 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1158 /* clear all tunnel's synced parameters, but preserve cache */
1159 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
1160 /*
1161 * make tnl_info status to TNL_INIT state
1162 * so that it can be added to TOPS again
1163 */
1164 spin_lock(&tnl_info->lock);
1165
1166 tnl_info_sta_init_no_tnl_lock(tnl_info);
1167 list_del_init(&tnl_info->sync_node);
1168
1169 spin_unlock(&tnl_info->lock);
1170 }
1171
1172 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1173}
1174
1175void mtk_tops_tnl_offload_recover(void)
1176{
1177 struct tops_tnl_info *tnl_info;
1178 unsigned long flag;
1179 u32 bkt;
1180
1181 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1182
1183 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist)
1184 mtk_tops_tnl_info_submit(tnl_info);
1185
1186 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1187}
1188
1189int mtk_tops_tnl_offload_init(struct platform_device *pdev)
1190{
1191 struct tops_tnl_info *tnl_info;
1192 int ret = 0;
1193 int i = 0;
1194
1195 hash_init(tops_tnl.ht);
1196
1197 tops_tnl.tnl_infos = devm_kzalloc(&pdev->dev,
1198 sizeof(struct tops_tnl_info) * CONFIG_TOPS_TNL_NUM,
1199 GFP_KERNEL);
1200 if (!tops_tnl.tnl_infos)
1201 return -ENOMEM;
1202
1203 for (i = 0; i < CONFIG_TOPS_TNL_NUM; i++) {
1204 tnl_info = &tops_tnl.tnl_infos[i];
1205 tnl_info->tnl_idx = i;
1206 tnl_info->status = TNL_STA_UNINIT;
1207 INIT_HLIST_NODE(&tnl_info->hlist);
1208 INIT_LIST_HEAD(&tnl_info->sync_node);
1209 spin_lock_init(&tnl_info->lock);
1210 }
1211
1212 ret = register_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1213 if (ret) {
1214 TOPS_ERR("tnl offload recv dev register failed: %d\n",
1215 ret);
1216 return ret;
1217 }
1218
1219 init_completion(&tops_tnl.dma_done);
1220 init_waitqueue_head(&tops_tnl.tnl_sync_wait);
1221
1222 tops_tnl.tnl_sync_thread = kthread_run(tnl_sync_task, NULL,
1223 "tnl sync param task");
1224 if (IS_ERR(tops_tnl.tnl_sync_thread)) {
1225 TOPS_ERR("tnl sync thread create failed\n");
1226 ret = -ENOMEM;
1227 goto unregister_mbox;
1228 }
1229
1230 mtk_tnl_encap_offload = mtk_tops_tnl_encap_offload;
1231 mtk_tnl_decap_offload = mtk_tops_tnl_decap_offload;
1232 mtk_tnl_decap_offloadable = mtk_tops_tnl_decap_offloadable;
1233 mtk_get_tnl_dev = mtk_tops_get_tnl_dev;
1234
1235 tops_tnl.tnl_sync_submit = &tnl_sync_q1;
1236 tops_tnl.tnl_sync_pending = &tnl_sync_q2;
1237 spin_lock_init(&tops_tnl.tnl_sync_lock);
1238 spin_lock_init(&tops_tnl.tbl_lock);
1239
1240 return 0;
1241
1242unregister_mbox:
1243 unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1244
1245 return ret;
1246}
1247
1248void mtk_tops_tnl_offload_pce_clean_up(void)
1249{
1250 struct tops_tnl_info *tnl_info;
1251 unsigned long flag;
1252 u32 bkt;
1253
1254 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1255
1256 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1257 mtk_tops_tnl_info_flush_ppe(tnl_info);
1258
1259 mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
1260 }
1261
1262 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1263}
1264
1265void mtk_tops_tnl_offload_deinit(struct platform_device *pdev)
1266{
1267 mtk_tnl_encap_offload = NULL;
1268 mtk_tnl_decap_offload = NULL;
1269 mtk_tnl_decap_offloadable = NULL;
1270 mtk_get_tnl_dev = NULL;
1271
1272 kthread_stop(tops_tnl.tnl_sync_thread);
1273
1274 mtk_tops_tnl_offload_pce_clean_up();
1275
1276 unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1277}
1278
1279int mtk_tops_tnl_offload_proto_setup(struct platform_device *pdev)
1280{
1281 mtk_tops_gretap_init();
1282
1283 mtk_tops_udp_l2tp_data_init();
1284
1285 return 0;
1286}
1287
1288void mtk_tops_tnl_offload_proto_teardown(struct platform_device *pdev)
1289{
1290 mtk_tops_gretap_deinit();
1291
1292 mtk_tops_udp_l2tp_data_deinit();
1293}
1294
1295struct tops_tnl_type *mtk_tops_tnl_type_get_by_name(const char *name)
1296{
1297 enum tops_entry_type tops_entry = TOPS_ENTRY_NONE + 1;
1298 struct tops_tnl_type *tnl_type;
1299
1300 if (unlikely(!name))
1301 return ERR_PTR(-EPERM);
1302
1303 for (; tops_entry < __TOPS_ENTRY_MAX; tops_entry++) {
1304 tnl_type = tops_tnl.offload_tnl_types[tops_entry];
1305 if (tnl_type && !strcmp(name, tnl_type->type_name))
1306 break;
1307 }
1308
1309 return tnl_type;
1310}
1311
1312int mtk_tops_tnl_type_register(struct tops_tnl_type *tnl_type)
1313{
1314 enum tops_entry_type tops_entry = tnl_type->tops_entry;
1315
1316 if (unlikely(tops_entry == TOPS_ENTRY_NONE
1317 || tops_entry >= __TOPS_ENTRY_MAX)) {
1318 TOPS_ERR("invalid tops_entry: %u\n", tops_entry);
1319 return -EINVAL;
1320 }
1321
1322 if (unlikely(!tnl_type))
1323 return -EINVAL;
1324
1325 if (tops_tnl.offload_tnl_types[tops_entry]) {
1326 TOPS_ERR("offload tnl type is already registered: %u\n", tops_entry);
1327 return -EBUSY;
1328 }
1329
1330 tops_tnl.offload_tnl_types[tops_entry] = tnl_type;
1331 tops_tnl.offload_tnl_type_num++;
1332
1333 return 0;
1334}
1335
1336void mtk_tops_tnl_type_unregister(struct tops_tnl_type *tnl_type)
1337{
1338 enum tops_entry_type tops_entry = tnl_type->tops_entry;
1339
1340 if (unlikely(tops_entry == TOPS_ENTRY_NONE
1341 || tops_entry >= __TOPS_ENTRY_MAX)) {
1342 TOPS_ERR("invalid tops_entry: %u\n", tops_entry);
1343 return;
1344 }
1345
1346 if (unlikely(!tnl_type))
1347 return;
1348
1349 if (tops_tnl.offload_tnl_types[tops_entry] != tnl_type) {
1350 TOPS_ERR("offload tnl type is registered by others\n");
1351 return;
1352 }
1353
1354 tops_tnl.offload_tnl_types[tops_entry] = NULL;
1355 tops_tnl.offload_tnl_type_num--;
1356}