blob: aa19f755244d72fa2495e15b9dd514be0c7267b6 [file] [log] [blame]
developere5e687d2023-08-08 16:05:33 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/completion.h>
9#include <linux/device.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/err.h>
13#include <linux/hashtable.h>
14#include <linux/if_ether.h>
15#include <linux/ip.h>
16#include <linux/kthread.h>
17#include <linux/list.h>
18#include <linux/lockdep.h>
19#include <linux/string.h>
20
21#include <mtk_eth_soc.h>
22#include <mtk_hnat/hnat.h>
23#include <mtk_hnat/nf_hnat_mtk.h>
24
25#include <pce/dipfilter.h>
26#include <pce/pce.h>
27
28#include "internal.h"
29#include "mbox.h"
30#include "mcu.h"
31#include "netsys.h"
32#include "protocol/gre/gretap.h"
33#include "protocol/l2tp/udp_l2tp_data.h"
34#include "tunnel.h"
35
36#define TOPS_PPE_ENTRY_BUCKETS (64)
37#define TOPS_PPE_ENTRY_BUCKETS_BIT (6)
38
39struct tops_tnl {
40 /* tunnel types */
41 struct tops_tnl_type *offload_tnl_types[__TOPS_ENTRY_MAX];
42 u32 offload_tnl_type_num;
43 u32 tnl_base_addr;
44
45 /* tunnel table */
46 DECLARE_HASHTABLE(ht, CONFIG_TOPS_TNL_MAP_BIT);
47 DECLARE_BITMAP(tnl_used, CONFIG_TOPS_TNL_NUM);
48 wait_queue_head_t tnl_sync_wait;
49 spinlock_t tnl_sync_lock;
50 spinlock_t tbl_lock;
51 bool has_tnl_to_sync;
52 struct task_struct *tnl_sync_thread;
53 struct list_head *tnl_sync_pending;
54 struct list_head *tnl_sync_submit;
55 struct tops_tnl_info *tnl_infos;
56
57 /* dma request */
58 struct completion dma_done;
59 struct dma_chan *dmachan;
60
61 struct device *dev;
62};
63
64static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
65 struct mailbox_msg *msg);
66
67static struct tops_tnl tops_tnl;
68
69static LIST_HEAD(tnl_sync_q1);
70static LIST_HEAD(tnl_sync_q2);
71
72struct mailbox_dev tnl_offload_mbox_recv =
73 MBOX_RECV_MGMT_DEV(TNL_OFFLOAD, tnl_offload_mbox_cmd_recv);
74
75/* tunnel mailbox communication */
76static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
77 struct mailbox_msg *msg)
78{
79 switch (msg->msg1) {
80 case TOPS_TNL_START_ADDR_SYNC:
81 tops_tnl.tnl_base_addr = msg->msg2;
82
83 return MBOX_NO_RET_MSG;
84 default:
85 break;
86 }
87
88 return MBOX_NO_RET_MSG;
89}
90
91static inline void tnl_flush_ppe_entry(struct foe_entry *entry, u32 tnl_idx)
92{
93 u32 bind_tnl_idx;
94
95 if (unlikely(!entry))
96 return;
97
98 switch (entry->bfib1.pkt_type) {
99 case IPV4_HNAPT:
100 if (entry->ipv4_hnapt.tport_id != NR_TDMA_TPORT
101 && entry->ipv4_hnapt.tport_id != NR_TDMA_QDMA_TPORT)
102 return;
103
104 bind_tnl_idx = entry->ipv4_hnapt.tops_entry - __TOPS_ENTRY_MAX;
105
106 break;
107 default:
108 return;
109 }
110
111 /* unexpected tunnel index */
112 if (bind_tnl_idx >= __TOPS_ENTRY_MAX)
113 return;
114
115 if (tnl_idx == __TOPS_ENTRY_MAX || tnl_idx == bind_tnl_idx)
116 memset(entry, 0, sizeof(*entry));
117}
118
119static inline void skb_set_tops_tnl_idx(struct sk_buff *skb, u32 tnl_idx)
120{
121 skb_hnat_tops(skb) = tnl_idx + __TOPS_ENTRY_MAX;
122}
123
124static inline bool skb_tops_valid(struct sk_buff *skb)
125{
developer0b3c7712023-08-24 16:23:03 +0800126 return (skb && skb_hnat_tops(skb) <= __TOPS_ENTRY_MAX);
developere5e687d2023-08-08 16:05:33 +0800127}
128
129static inline struct tops_tnl_type *skb_to_tnl_type(struct sk_buff *skb)
130{
131 enum tops_entry_type tops_entry = skb_hnat_tops(skb);
132 struct tops_tnl_type *tnl_type;
133
134 if (unlikely(!tops_entry || tops_entry >= __TOPS_ENTRY_MAX))
135 return ERR_PTR(-EINVAL);
136
137 tnl_type = tops_tnl.offload_tnl_types[tops_entry];
138
139 return tnl_type ? tnl_type : ERR_PTR(-ENODEV);
140}
141
142static inline void skb_mark_unbind(struct sk_buff *skb)
143{
144 skb_hnat_tops(skb) = 0;
145 skb_hnat_is_decap(skb) = 0;
146 skb_hnat_alg(skb) = 1;
147}
148
149static inline u32 tnl_params_hash(struct tops_tnl_params *tnl_params)
150{
151 if (!tnl_params)
152 return 0;
153
154 /* TODO: check collision possibility? */
155 return (tnl_params->sip ^ tnl_params->dip);
156}
157
158static inline bool tnl_info_decap_is_enable(struct tops_tnl_info *tnl_info)
159{
160 return tnl_info->cache.flag & TNL_DECAP_ENABLE;
161}
162
163static inline void tnl_info_decap_enable(struct tops_tnl_info *tnl_info)
164{
165 tnl_info->cache.flag |= TNL_DECAP_ENABLE;
166}
167
168static inline void tnl_info_decap_disable(struct tops_tnl_info *tnl_info)
169{
170 tnl_info->cache.flag &= ~(TNL_DECAP_ENABLE);
171}
172
173static inline bool tnl_info_encap_is_enable(struct tops_tnl_info *tnl_info)
174{
175 return tnl_info->cache.flag & TNL_ENCAP_ENABLE;
176}
177
178static inline void tnl_info_encap_enable(struct tops_tnl_info *tnl_info)
179{
180 tnl_info->cache.flag |= TNL_ENCAP_ENABLE;
181}
182
183static inline void tnl_info_encap_disable(struct tops_tnl_info *tnl_info)
184{
185 tnl_info->cache.flag &= ~(TNL_ENCAP_ENABLE);
186}
187
188static inline void tnl_info_sta_updated_no_tnl_lock(struct tops_tnl_info *tnl_info)
189{
190 tnl_info->status &= (~TNL_STA_UPDATING);
191 tnl_info->status &= (~TNL_STA_INIT);
192 tnl_info->status |= TNL_STA_UPDATED;
193}
194
195static inline void tnl_info_sta_updated(struct tops_tnl_info *tnl_info)
196{
197 unsigned long flag = 0;
198
199 if (unlikely(!tnl_info))
200 return;
201
202 spin_lock_irqsave(&tnl_info->lock, flag);
203
204 tnl_info_sta_updated_no_tnl_lock(tnl_info);
205
206 spin_unlock_irqrestore(&tnl_info->lock, flag);
207}
208
209static inline bool tnl_info_sta_is_updated(struct tops_tnl_info *tnl_info)
210{
211 return tnl_info->status & TNL_STA_UPDATED;
212}
213
214static inline void tnl_info_sta_updating_no_tnl_lock(struct tops_tnl_info *tnl_info)
215{
216 tnl_info->status |= TNL_STA_UPDATING;
217 tnl_info->status &= (~TNL_STA_QUEUED);
218 tnl_info->status &= (~TNL_STA_UPDATED);
219}
220
221static inline void tnl_info_sta_updating(struct tops_tnl_info *tnl_info)
222{
223 unsigned long flag = 0;
224
225 if (unlikely(!tnl_info))
226 return;
227
228 spin_lock_irqsave(&tnl_info->lock, flag);
229
230 tnl_info_sta_updating_no_tnl_lock(tnl_info);
231
232 spin_unlock_irqrestore(&tnl_info->lock, flag);
233}
234
235static inline bool tnl_info_sta_is_updating(struct tops_tnl_info *tnl_info)
236{
237 return tnl_info->status & TNL_STA_UPDATING;
238}
239
240static inline void tnl_info_sta_queued_no_tnl_lock(struct tops_tnl_info *tnl_info)
241{
242 tnl_info->status |= TNL_STA_QUEUED;
243 tnl_info->status &= (~TNL_STA_UPDATED);
244}
245
246static inline void tnl_info_sta_queued(struct tops_tnl_info *tnl_info)
247{
248 unsigned long flag = 0;
249
250 if (unlikely(!tnl_info))
251 return;
252
253 spin_lock_irqsave(&tnl_info->lock, flag);
254
255 tnl_info_sta_queued_no_tnl_lock(tnl_info);
256
257 spin_unlock_irqrestore(&tnl_info->lock, flag);
258}
259
260static inline bool tnl_info_sta_is_queued(struct tops_tnl_info *tnl_info)
261{
262 return tnl_info->status & TNL_STA_QUEUED;
263}
264
265static inline void tnl_info_sta_init_no_tnl_lock(struct tops_tnl_info *tnl_info)
266{
267 tnl_info->status = TNL_STA_INIT;
268}
269
270static inline void tnl_info_sta_init(struct tops_tnl_info *tnl_info)
271{
272 unsigned long flag = 0;
273
274 if (unlikely(!tnl_info))
275 return;
276
277 spin_lock_irqsave(&tnl_info->lock, flag);
278
279 tnl_info_sta_init_no_tnl_lock(tnl_info);
280
281 spin_unlock_irqrestore(&tnl_info->lock, flag);
282}
283
284static inline bool tnl_info_sta_is_init(struct tops_tnl_info *tnl_info)
285{
286 return tnl_info->status & TNL_STA_INIT;
287}
288
289static inline void tnl_info_sta_uninit_no_tnl_lock(struct tops_tnl_info *tnl_info)
290{
291 tnl_info->status = TNL_STA_UNINIT;
292}
293
294static inline void tnl_info_sta_uninit(struct tops_tnl_info *tnl_info)
295{
296 unsigned long flag = 0;
297
298 if (unlikely(!tnl_info))
299 return;
300
301 spin_lock_irqsave(&tnl_info->lock, flag);
302
303 tnl_info_sta_uninit_no_tnl_lock(tnl_info);
304
305 spin_unlock_irqrestore(&tnl_info->lock, flag);
306}
307
308static inline bool tnl_info_sta_is_uninit(struct tops_tnl_info *tnl_info)
309{
310 return tnl_info->status & TNL_STA_UNINIT;
311}
312
313static inline void tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
314{
315 unsigned long flag = 0;
316
317 spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
318
319 list_add_tail(&tnl_info->sync_node, tops_tnl.tnl_sync_submit);
320
321 tops_tnl.has_tnl_to_sync = true;
322
323 spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
324
325 if (mtk_tops_mcu_alive())
326 wake_up_interruptible(&tops_tnl.tnl_sync_wait);
327}
328
329static int mtk_tops_tnl_info_dipfilter_tear_down(struct tops_tnl_info *tnl_info)
330{
331 struct dip_desc dipd;
332
333 memset(&dipd, 0, sizeof(struct dip_desc));
334
335 dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.sip);
336 dipd.tag = DIPFILTER_IPV4;
337
338 return mtk_pce_dipfilter_entry_del(&dipd);
339}
340
341static int mtk_tops_tnl_info_dipfilter_setup(struct tops_tnl_info *tnl_info)
342{
343 struct dip_desc dipd;
344
345 /* setup dipfilter */
346 memset(&dipd, 0, sizeof(struct dip_desc));
347
348 dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.sip);
349 dipd.tag = DIPFILTER_IPV4;
350
351 return mtk_pce_dipfilter_entry_add(&dipd);
352}
353
354void mtk_tops_tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
355{
356 lockdep_assert_held(&tnl_info->lock);
357
358 if (tnl_info_sta_is_queued(tnl_info))
359 return;
360
361 tnl_info_submit_no_tnl_lock(tnl_info);
362
363 tnl_info_sta_queued_no_tnl_lock(tnl_info);
364}
365
366void mtk_tops_tnl_info_submit(struct tops_tnl_info *tnl_info)
367{
368 unsigned long flag = 0;
369
370 if (unlikely(!tnl_info))
371 return;
372
373 spin_lock_irqsave(&tnl_info->lock, flag);
374
375 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
376
377 spin_unlock_irqrestore(&tnl_info->lock, flag);
378}
379
380static void mtk_tops_tnl_info_hash_no_lock(struct tops_tnl_info *tnl_info)
381{
382 lockdep_assert_held(&tops_tnl.tbl_lock);
383 lockdep_assert_held(&tnl_info->lock);
384
385 if (hash_hashed(&tnl_info->hlist))
386 hash_del(&tnl_info->hlist);
387
388 hash_add(tops_tnl.ht, &tnl_info->hlist, tnl_params_hash(&tnl_info->cache));
389}
390
391void mtk_tops_tnl_info_hash(struct tops_tnl_info *tnl_info)
392{
393 unsigned long flag = 0;
394
395 if (unlikely(!tnl_info))
396 return;
397
398 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
399
400 spin_lock(&tnl_info->lock);
401
402 mtk_tops_tnl_info_hash_no_lock(tnl_info);
403
404 spin_unlock(&tnl_info->lock);
405
406 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
407}
408
409static bool mtk_tops_tnl_info_match(struct tops_tnl_type *tnl_type,
410 struct tops_tnl_info *tnl_info,
411 struct tops_tnl_params *match_data)
412{
413 unsigned long flag = 0;
414 bool match;
415
416 spin_lock_irqsave(&tnl_info->lock, flag);
417
418 match = tnl_type->tnl_info_match(&tnl_info->cache, match_data);
419
420 spin_unlock_irqrestore(&tnl_info->lock, flag);
421
422 return match;
423}
424
425struct tops_tnl_info *mtk_tops_tnl_info_find(struct tops_tnl_params *tnl_params)
426{
427 struct tops_tnl_info *tnl_info;
428 struct tops_tnl_type *tnl_type;
429
430 lockdep_assert_held(&tops_tnl.tbl_lock);
431
432 if (unlikely(!tnl_params->tops_entry_proto
433 || tnl_params->tops_entry_proto >= __TOPS_ENTRY_MAX))
434 return ERR_PTR(-EINVAL);
435
436 tnl_type = tops_tnl.offload_tnl_types[tnl_params->tops_entry_proto];
437 if (unlikely(!tnl_type))
438 return ERR_PTR(-EINVAL);
439
440 if (unlikely(!tnl_type->tnl_info_match))
441 return ERR_PTR(-ENXIO);
442
443 hash_for_each_possible(tops_tnl.ht,
444 tnl_info,
445 hlist,
446 tnl_params_hash(tnl_params))
447 if (mtk_tops_tnl_info_match(tnl_type, tnl_info, tnl_params))
448 return tnl_info;
449
450 return ERR_PTR(-ENODEV);
451}
452
453/* tnl_info->lock should be held before calling this function */
454static int mtk_tops_tnl_info_setup(struct sk_buff *skb,
455 struct tops_tnl_info *tnl_info,
456 struct tops_tnl_params *tnl_params)
457{
458 if (unlikely(!skb || !tnl_info || !tnl_params))
459 return -EPERM;
460
461 lockdep_assert_held(&tnl_info->lock);
462
463 tnl_params->flag |= tnl_info->cache.flag;
464
465 if (memcmp(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params))) {
466 memcpy(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params));
467
468 mtk_tops_tnl_info_hash_no_lock(tnl_info);
469 }
470
471 if (skb_hnat_is_decap(skb)) {
472 /* the net_device is used to forward pkt to decap'ed inf when Rx */
473 tnl_info->dev = skb->dev;
474 if (!tnl_info_decap_is_enable(tnl_info)) {
475 tnl_info_decap_enable(tnl_info);
476
477 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
478 }
479 } else if (skb_hnat_is_encap(skb)) {
480 /* set skb_hnat_tops(skb) to tunnel index for ppe binding */
481 skb_set_tops_tnl_idx(skb, tnl_info->tnl_idx);
482 if (!tnl_info_encap_is_enable(tnl_info)) {
483 tnl_info_encap_enable(tnl_info);
484
485 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
486 }
487 }
488
489 return 0;
490}
491
492/* tops_tnl.tbl_lock should be acquired before calling this functions */
493static struct tops_tnl_info *mtk_tops_tnl_info_alloc_no_lock(void)
494{
495 struct tops_tnl_info *tnl_info;
496 unsigned long flag = 0;
497 u32 tnl_idx;
498
499 lockdep_assert_held(&tops_tnl.tbl_lock);
500
501 tnl_idx = find_first_zero_bit(tops_tnl.tnl_used, CONFIG_TOPS_TNL_NUM);
502 if (tnl_idx == CONFIG_TOPS_TNL_NUM) {
503 TOPS_NOTICE("offload tunnel table full!\n");
504 return ERR_PTR(-ENOMEM);
505 }
506
507 /* occupy used tunnel */
508 tnl_info = &tops_tnl.tnl_infos[tnl_idx];
509 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
510 memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
511
512 /* TODO: maybe spin_lock_bh() is enough? */
513 spin_lock_irqsave(&tnl_info->lock, flag);
514
515 if (tnl_info_sta_is_init(tnl_info)) {
516 TOPS_ERR("error: fetched an initialized tunnel info\n");
517
518 spin_unlock_irqrestore(&tnl_info->lock, flag);
519
520 return ERR_PTR(-EBADF);
521 }
522 tnl_info_sta_init_no_tnl_lock(tnl_info);
523
524 INIT_HLIST_NODE(&tnl_info->hlist);
525
526 spin_unlock_irqrestore(&tnl_info->lock, flag);
527
528 set_bit(tnl_idx, tops_tnl.tnl_used);
529
530 return tnl_info;
531}
532
533struct tops_tnl_info *mtk_tops_tnl_info_alloc(void)
534{
535 struct tops_tnl_info *tnl_info;
536 unsigned long flag = 0;
537
538 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
539
540 tnl_info = mtk_tops_tnl_info_alloc_no_lock();
541
542 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
543
544 return tnl_info;
545}
546
547static void mtk_tops_tnl_info_free_no_lock(struct tops_tnl_info *tnl_info)
548{
549 if (unlikely(!tnl_info))
550 return;
551
552 lockdep_assert_held(&tops_tnl.tbl_lock);
553 lockdep_assert_held(&tnl_info->lock);
554
555 hash_del(&tnl_info->hlist);
556
557 tnl_info_sta_uninit_no_tnl_lock(tnl_info);
558
559 clear_bit(tnl_info->tnl_idx, tops_tnl.tnl_used);
560}
561
562static void mtk_tops_tnl_info_free(struct tops_tnl_info *tnl_info)
563{
564 unsigned long flag = 0;
565
566 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
567
568 spin_lock(&tnl_info->lock);
569
570 mtk_tops_tnl_info_free_no_lock(tnl_info);
571
572 spin_unlock(&tnl_info->lock);
573
574 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
575}
576
577static void __mtk_tops_tnl_offload_disable(struct tops_tnl_info *tnl_info)
578{
579 tnl_info->status |= TNL_STA_DELETING;
580 mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
581}
582
583static int mtk_tops_tnl_offload(struct sk_buff *skb,
584 struct tops_tnl_params *tnl_params)
585{
586 struct tops_tnl_info *tnl_info;
587 unsigned long flag;
588 int ret = 0;
589
590 if (unlikely(!tnl_params))
591 return -EPERM;
592
593 /* prepare tnl_info */
594 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
595
596 tnl_info = mtk_tops_tnl_info_find(tnl_params);
597 if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) != -ENODEV) {
598 /* error */
599 ret = PTR_ERR(tnl_info);
600 goto err_out;
601 } else if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) == -ENODEV) {
602 /* not allocate yet */
603 tnl_info = mtk_tops_tnl_info_alloc_no_lock();
604 }
605
606 if (IS_ERR(tnl_info)) {
607 ret = PTR_ERR(tnl_info);
608 TOPS_DBG("tnl offload alloc tnl_info failed: %d\n", ret);
609 goto err_out;
610 }
611
612 spin_lock(&tnl_info->lock);
613 ret = mtk_tops_tnl_info_setup(skb, tnl_info, tnl_params);
614
615 spin_unlock(&tnl_info->lock);
616
617err_out:
618 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
619
620 return ret;
621}
622
623static bool mtk_tops_tnl_decap_offloadable(struct sk_buff *skb)
624{
625 struct tops_tnl_type *tnl_type;
626 struct ethhdr *eth;
627 u32 cnt;
628 u32 i;
629
630 if (unlikely(!mtk_tops_mcu_alive())) {
631 skb_mark_unbind(skb);
632 return -EAGAIN;
633 }
634
635 /* skb should not carry tops here */
636 if (skb_hnat_tops(skb))
637 return false;
638
639 eth = eth_hdr(skb);
640
641 /* TODO: currently decap only support ethernet IPv4 */
642 if (ntohs(eth->h_proto) != ETH_P_IP)
643 return false;
644
645 /* TODO: may can be optimized */
646 for (i = TOPS_ENTRY_GRETAP, cnt = 0;
647 i < __TOPS_ENTRY_MAX && cnt < tops_tnl.offload_tnl_type_num;
648 i++) {
649 tnl_type = tops_tnl.offload_tnl_types[i];
650 if (unlikely(!tnl_type))
651 continue;
652
653 cnt++;
654 if (tnl_type->tnl_decap_offloadable
655 && tnl_type->tnl_decap_offloadable(skb)) {
656 skb_hnat_tops(skb) = tnl_type->tops_entry;
657 return true;
658 }
659 }
660
661 return false;
662}
663
664static int mtk_tops_tnl_decap_offload(struct sk_buff *skb)
665{
666 struct tops_tnl_params tnl_params;
667 struct tops_tnl_type *tnl_type;
668 int ret;
669
670 if (unlikely(!mtk_tops_mcu_alive())) {
671 skb_mark_unbind(skb);
672 return -EAGAIN;
673 }
674
675 if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_decap(skb))) {
676 skb_mark_unbind(skb);
677 return -EINVAL;
678 }
679
680 tnl_type = skb_to_tnl_type(skb);
681 if (IS_ERR(tnl_type)) {
682 skb_mark_unbind(skb);
683 return PTR_ERR(tnl_type);
684 }
685
686 if (unlikely(!tnl_type->tnl_decap_param_setup)) {
687 skb_mark_unbind(skb);
688 return -ENODEV;
689 }
690
691 memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
692
693 /* push removed ethernet header back first */
694 if (tnl_type->has_inner_eth)
695 skb_push(skb, sizeof(struct ethhdr));
696
697 ret = tnl_type->tnl_decap_param_setup(skb, &tnl_params);
698
699 /* pull ethernet header to restore skb->data to ip start */
700 if (tnl_type->has_inner_eth)
701 skb_pull(skb, sizeof(struct ethhdr));
702
703 if (unlikely(ret)) {
704 skb_mark_unbind(skb);
705 return ret;
706 }
707
708 tnl_params.tops_entry_proto = tnl_type->tops_entry;
709
710 ret = mtk_tops_tnl_offload(skb, &tnl_params);
711
712 /*
713 * whether success or fail to offload a decapsulation tunnel
714 * skb_hnat_tops(skb) must be cleared to avoid mtk_tnl_decap_offload() get
715 * called again
716 */
717 skb_hnat_tops(skb) = 0;
718 skb_hnat_is_decap(skb) = 0;
719
720 return ret;
721}
722
723static int mtk_tops_tnl_encap_offload(struct sk_buff *skb)
724{
725 struct tops_tnl_params tnl_params;
726 struct tops_tnl_type *tnl_type;
727 int ret;
728
729 if (unlikely(!mtk_tops_mcu_alive())) {
730 skb_mark_unbind(skb);
731 return -EAGAIN;
732 }
733
734 if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_encap(skb)))
735 return -EPERM;
736
737 tnl_type = skb_to_tnl_type(skb);
738 if (IS_ERR(tnl_type))
739 return PTR_ERR(tnl_type);
740
741 if (unlikely(!tnl_type->tnl_encap_param_setup))
742 return -ENODEV;
743
744 memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
745
746 ret = tnl_type->tnl_encap_param_setup(skb, &tnl_params);
747 if (unlikely(ret))
748 return ret;
749 tnl_params.tops_entry_proto = tnl_type->tops_entry;
750
751 return mtk_tops_tnl_offload(skb, &tnl_params);
752}
753
754static struct net_device *mtk_tops_get_tnl_dev(int tnl_idx)
755{
756 if (tnl_idx < TOPS_CRSN_TNL_ID_START || tnl_idx > TOPS_CRSN_TNL_ID_END)
757 return ERR_PTR(-EINVAL);
758
759 tnl_idx = tnl_idx - TOPS_CRSN_TNL_ID_START;
760
761 return tops_tnl.tnl_infos[tnl_idx].dev;
762}
763
764static void mtk_tops_tnl_sync_dma_done(void *param)
765{
766 /* TODO: check tx status with dmaengine_tx_status()? */
767 complete(&tops_tnl.dma_done);
768}
769
770static void mtk_tops_tnl_sync_dma_start(void *param)
771{
772 dma_async_issue_pending(tops_tnl.dmachan);
773
774 wait_for_completion(&tops_tnl.dma_done);
775}
776
777static void mtk_tops_tnl_sync_dma_unprepare(struct tops_tnl_info *tnl_info,
778 dma_addr_t *addr)
779{
780 dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
781 DMA_TO_DEVICE);
782
783 dma_release_channel(tops_tnl.dmachan);
784}
785
786static int mtk_tops_tnl_sync_dma_prepare(struct tops_tnl_info *tnl_info,
787 dma_addr_t *addr)
788{
789 u32 tnl_addr = tops_tnl.tnl_base_addr;
790 struct dma_async_tx_descriptor *desc;
791 dma_cookie_t cookie;
792 int ret;
793
794 if (!tnl_info)
795 return -EPERM;
796
797 tnl_addr += tnl_info->tnl_idx * sizeof(struct tops_tnl_params);
798
799 tops_tnl.dmachan = dma_request_slave_channel(tops_dev, "tnl-sync");
800 if (!tops_tnl.dmachan) {
801 TOPS_ERR("request dma channel failed\n");
802 return -ENODEV;
803 }
804
805 *addr = dma_map_single(tops_dev,
806 &tnl_info->tnl_params,
807 sizeof(struct tops_tnl_params),
808 DMA_TO_DEVICE);
809 if (dma_mapping_error(tops_dev, *addr)) {
810 ret = -ENOMEM;
811 goto dma_release;
812 }
813
814 desc = dmaengine_prep_dma_memcpy(tops_tnl.dmachan,
815 (dma_addr_t)tnl_addr, *addr,
816 sizeof(struct tops_tnl_params),
817 0);
818 if (!desc) {
819 ret = -EBUSY;
820 goto dma_unmap;
821 }
822
823 desc->callback = mtk_tops_tnl_sync_dma_done;
824
825 cookie = dmaengine_submit(desc);
826 ret = dma_submit_error(cookie);
827 if (ret)
828 goto dma_terminate;
829
830 reinit_completion(&tops_tnl.dma_done);
831
832 return ret;
833
834dma_terminate:
835 dmaengine_terminate_all(tops_tnl.dmachan);
836
837dma_unmap:
838 dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
839 DMA_TO_DEVICE);
840
841dma_release:
842 dma_release_channel(tops_tnl.dmachan);
843
844 return ret;
845}
846
847static int __mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
848{
849 struct mcu_ctrl_cmd mcmd;
850 dma_addr_t addr;
851 int ret;
852
853 mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
854 mcmd.arg[0] = TUNNEL_CTRL_EVENT_DEL;
855 mcmd.arg[1] = tnl_info->tnl_idx;
856 mcmd.core_mask = CORE_TOPS_MASK;
857
858 ret = mtk_tops_mcu_stall(&mcmd, NULL, NULL);
859 if (ret) {
860 TOPS_ERR("tnl sync deletion notify mcu failed: %d\n", ret);
861 return ret;
862 }
863
864 /* there shouldn't be any other reference to tnl_info right now */
865 memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
866 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
867
868 ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
869 if (ret) {
870 TOPS_ERR("tnl sync deletion prepare dma request failed: %d\n", ret);
871 return ret;
872 }
873
874 mtk_tops_tnl_sync_dma_start(NULL);
875
876 mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
877
878 return ret;
879}
880
881static int mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
882{
883 int ret;
884
885 ret = mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
886 if (ret) {
887 TOPS_ERR("tnl sync dipfitler tear down failed: %d\n",
888 ret);
889 return ret;
890 }
891
892 ret = __mtk_tops_tnl_sync_param_delete(tnl_info);
893 if (ret) {
894 TOPS_ERR("tnl sync deletion failed: %d\n", ret);
895 return ret;
896 }
897
898 mtk_tops_tnl_info_free(tnl_info);
899
900 return ret;
901}
902
903static int __mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
904 bool is_new_tnl)
905{
906 struct mcu_ctrl_cmd mcmd;
907 dma_addr_t addr;
908 int ret;
909
910 mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
911 mcmd.arg[1] = tnl_info->tnl_idx;
912 mcmd.core_mask = CORE_TOPS_MASK;
913
914 if (is_new_tnl)
915 mcmd.arg[0] = TUNNEL_CTRL_EVENT_NEW;
916 else
917 mcmd.arg[0] = TUNNEL_CTRL_EVENT_DIP_UPDATE;
918
919 ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
920 if (ret) {
921 TOPS_ERR("tnl sync update prepare dma request failed: %d\n", ret);
922 return ret;
923 }
924
925 ret = mtk_tops_mcu_stall(&mcmd, mtk_tops_tnl_sync_dma_start, NULL);
926 if (ret)
927 TOPS_ERR("tnl sync update notify mcu failed: %d\n", ret);
928
929 mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
930
931 return ret;
932}
933
934static int mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
935 bool setup_pce, bool is_new_tnl)
936{
937 int ret;
938
939 ret = __mtk_tops_tnl_sync_param_update(tnl_info, is_new_tnl);
940 if (ret) {
941 TOPS_ERR("tnl sync failed: %d\n", ret);
942 return ret;
943 }
944
945 tnl_info_sta_updated(tnl_info);
946
947 if (setup_pce) {
948 ret = mtk_tops_tnl_info_dipfilter_setup(tnl_info);
949 if (ret) {
950 TOPS_ERR("tnl dipfilter setup failed: %d\n", ret);
951 /* TODO: should undo parameter sync */
952 return ret;
953 }
954 }
955
956 return ret;
957}
958
959static inline int mtk_tops_tnl_sync_param_new(struct tops_tnl_info *tnl_info,
960 bool setup_pce)
961{
962 return mtk_tops_tnl_sync_param_update(tnl_info, setup_pce, true);
963}
964
965static void mtk_tops_tnl_sync_get_pending_queue(void)
966{
967 struct list_head *tmp = tops_tnl.tnl_sync_submit;
968 unsigned long flag = 0;
969
970 spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
971
972 tops_tnl.tnl_sync_submit = tops_tnl.tnl_sync_pending;
973 tops_tnl.tnl_sync_pending = tmp;
974
975 tops_tnl.has_tnl_to_sync = false;
976
977 spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
978}
979
980static void mtk_tops_tnl_sync_queue_proc(void)
981{
982 struct tops_tnl_info *tnl_info;
983 struct tops_tnl_info *tmp;
984 unsigned long flag = 0;
985 bool is_decap = false;
986 u32 tnl_status = 0;
987 int ret;
988
989 list_for_each_entry_safe(tnl_info,
990 tmp,
991 tops_tnl.tnl_sync_pending,
992 sync_node) {
993 spin_lock_irqsave(&tnl_info->lock, flag);
994
995 /* tnl update is on the fly, queue tnl to next round */
996 if (tnl_info_sta_is_updating(tnl_info)) {
997 list_del_init(&tnl_info->sync_node);
998
999 tnl_info_submit_no_tnl_lock(tnl_info);
1000
1001 goto next;
1002 }
1003
1004 /*
1005 * if tnl_info is not queued, something wrong
1006 * just remove that tnl_info from the queue
1007 * maybe trigger BUG_ON()?
1008 */
1009 if (!tnl_info_sta_is_queued(tnl_info)) {
1010 list_del_init(&tnl_info->sync_node);
1011 goto next;
1012 }
1013
1014 is_decap = (!(tnl_info->tnl_params.flag & TNL_DECAP_ENABLE)
1015 && tnl_info_decap_is_enable(tnl_info));
1016
1017 tnl_status = tnl_info->status;
1018 memcpy(&tnl_info->tnl_params, &tnl_info->cache,
1019 sizeof(struct tops_tnl_params));
1020
1021 list_del_init(&tnl_info->sync_node);
1022
1023 /*
1024 * mark tnl info to updating and release tnl info's spin lock
1025 * since it is going to use dma to transfer data
1026 * and might going to sleep
1027 */
1028 tnl_info_sta_updating_no_tnl_lock(tnl_info);
1029
1030 spin_unlock_irqrestore(&tnl_info->lock, flag);
1031
1032 if (tnl_status & TNL_STA_INIT)
1033 ret = mtk_tops_tnl_sync_param_new(tnl_info, is_decap);
1034 else if (tnl_status & TNL_STA_DELETING)
1035 ret = mtk_tops_tnl_sync_param_delete(tnl_info);
1036 else
1037 ret = mtk_tops_tnl_sync_param_update(tnl_info,
1038 is_decap,
1039 false);
1040
1041 if (ret)
1042 TOPS_ERR("sync tunnel parameter failed: %d\n", ret);
1043
1044 continue;
1045
1046next:
1047 spin_unlock_irqrestore(&tnl_info->lock, flag);
1048 }
1049}
1050
1051static int tnl_sync_task(void *data)
1052{
1053 while (1) {
1054 wait_event_interruptible(tops_tnl.tnl_sync_wait,
1055 (tops_tnl.has_tnl_to_sync && mtk_tops_mcu_alive())
1056 || kthread_should_stop());
1057
1058 if (kthread_should_stop())
1059 break;
1060
1061 mtk_tops_tnl_sync_get_pending_queue();
1062
1063 mtk_tops_tnl_sync_queue_proc();
1064 }
1065
1066 return 0;
1067}
1068
1069static void mtk_tops_tnl_info_flush_ppe(struct tops_tnl_info *tnl_info)
1070{
1071 struct foe_entry *entry;
1072 u32 max_entry;
1073 u32 ppe_id;
1074 u32 eidx;
1075
1076 /* tnl info's lock should be held */
1077 lockdep_assert_held(&tnl_info->lock);
1078
1079 /* clear all TOPS related PPE entries */
1080 for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
1081 max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
1082 for (eidx = 0; eidx < max_entry; eidx++) {
1083 entry = hnat_get_foe_entry(ppe_id, eidx);
1084 if (IS_ERR(entry))
1085 break;
1086
1087 if (!entry_hnat_is_bound(entry))
1088 continue;
1089
1090 tnl_flush_ppe_entry(entry, tnl_info->tnl_idx);
1091 }
1092 }
1093 hnat_cache_ebl(1);
1094 /* make sure all data is written to dram PPE table */
1095 wmb();
1096}
1097
1098void mtk_tops_tnl_offload_netdev_down(struct net_device *ndev)
1099{
1100 struct tops_tnl_info *tnl_info;
1101 unsigned long flag;
1102 u32 bkt;
1103
1104 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1105
1106 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1107 spin_lock(&tnl_info->lock);
1108
1109 if (tnl_info->dev == ndev) {
1110 mtk_tops_tnl_info_flush_ppe(tnl_info);
1111
1112 __mtk_tops_tnl_offload_disable(tnl_info);
1113
1114 spin_unlock(&tnl_info->lock);
1115
1116 break;
1117 }
1118
1119 spin_unlock(&tnl_info->lock);
1120 }
1121
1122 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1123}
1124
1125void mtk_tops_tnl_offload_flush(void)
1126{
1127 struct tops_tnl_info *tnl_info;
1128 struct foe_entry *entry;
1129 unsigned long flag;
1130 u32 max_entry;
1131 u32 ppe_id;
1132 u32 eidx;
1133 u32 bkt;
1134
1135 /* clear all TOPS related PPE entries */
1136 for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
1137 max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
1138 for (eidx = 0; eidx < max_entry; eidx++) {
1139 entry = hnat_get_foe_entry(ppe_id, eidx);
1140 if (IS_ERR(entry))
1141 break;
1142
1143 if (!entry_hnat_is_bound(entry))
1144 continue;
1145
1146 tnl_flush_ppe_entry(entry, __TOPS_ENTRY_MAX);
1147 }
1148 }
1149 hnat_cache_ebl(1);
1150 /* make sure all data is written to dram PPE table */
1151 wmb();
1152
1153 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1154
1155 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1156 /* clear all tunnel's synced parameters, but preserve cache */
1157 memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
1158 /*
1159 * make tnl_info status to TNL_INIT state
1160 * so that it can be added to TOPS again
1161 */
1162 spin_lock(&tnl_info->lock);
1163
1164 tnl_info_sta_init_no_tnl_lock(tnl_info);
1165 list_del_init(&tnl_info->sync_node);
1166
1167 spin_unlock(&tnl_info->lock);
1168 }
1169
1170 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1171}
1172
1173void mtk_tops_tnl_offload_recover(void)
1174{
1175 struct tops_tnl_info *tnl_info;
1176 unsigned long flag;
1177 u32 bkt;
1178
1179 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1180
1181 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist)
1182 mtk_tops_tnl_info_submit(tnl_info);
1183
1184 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1185}
1186
1187int mtk_tops_tnl_offload_init(struct platform_device *pdev)
1188{
1189 struct tops_tnl_info *tnl_info;
1190 int ret = 0;
1191 int i = 0;
1192
1193 hash_init(tops_tnl.ht);
1194
1195 tops_tnl.tnl_infos = devm_kzalloc(&pdev->dev,
1196 sizeof(struct tops_tnl_info) * CONFIG_TOPS_TNL_NUM,
1197 GFP_KERNEL);
1198 if (!tops_tnl.tnl_infos)
1199 return -ENOMEM;
1200
1201 for (i = 0; i < CONFIG_TOPS_TNL_NUM; i++) {
1202 tnl_info = &tops_tnl.tnl_infos[i];
1203 tnl_info->tnl_idx = i;
1204 tnl_info->status = TNL_STA_UNINIT;
1205 INIT_HLIST_NODE(&tnl_info->hlist);
1206 INIT_LIST_HEAD(&tnl_info->sync_node);
1207 spin_lock_init(&tnl_info->lock);
1208 }
1209
1210 ret = register_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1211 if (ret) {
1212 TOPS_ERR("tnl offload recv dev register failed: %d\n",
1213 ret);
1214 return ret;
1215 }
1216
1217 init_completion(&tops_tnl.dma_done);
1218 init_waitqueue_head(&tops_tnl.tnl_sync_wait);
1219
1220 tops_tnl.tnl_sync_thread = kthread_run(tnl_sync_task, NULL,
1221 "tnl sync param task");
1222 if (IS_ERR(tops_tnl.tnl_sync_thread)) {
1223 TOPS_ERR("tnl sync thread create failed\n");
1224 ret = -ENOMEM;
1225 goto unregister_mbox;
1226 }
1227
1228 mtk_tnl_encap_offload = mtk_tops_tnl_encap_offload;
1229 mtk_tnl_decap_offload = mtk_tops_tnl_decap_offload;
1230 mtk_tnl_decap_offloadable = mtk_tops_tnl_decap_offloadable;
1231 mtk_get_tnl_dev = mtk_tops_get_tnl_dev;
1232
1233 tops_tnl.tnl_sync_submit = &tnl_sync_q1;
1234 tops_tnl.tnl_sync_pending = &tnl_sync_q2;
1235 spin_lock_init(&tops_tnl.tnl_sync_lock);
1236 spin_lock_init(&tops_tnl.tbl_lock);
1237
1238 return 0;
1239
1240unregister_mbox:
1241 unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1242
1243 return ret;
1244}
1245
1246void mtk_tops_tnl_offload_pce_clean_up(void)
1247{
1248 struct tops_tnl_info *tnl_info;
1249 unsigned long flag;
1250 u32 bkt;
1251
1252 spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
1253
1254 hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
1255 mtk_tops_tnl_info_flush_ppe(tnl_info);
1256
1257 mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
1258 }
1259
1260 spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
1261}
1262
1263void mtk_tops_tnl_offload_deinit(struct platform_device *pdev)
1264{
1265 mtk_tnl_encap_offload = NULL;
1266 mtk_tnl_decap_offload = NULL;
1267 mtk_tnl_decap_offloadable = NULL;
1268 mtk_get_tnl_dev = NULL;
1269
1270 kthread_stop(tops_tnl.tnl_sync_thread);
1271
1272 mtk_tops_tnl_offload_pce_clean_up();
1273
1274 unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
1275}
1276
1277int mtk_tops_tnl_offload_proto_setup(struct platform_device *pdev)
1278{
1279 mtk_tops_gretap_init();
1280
1281 mtk_tops_udp_l2tp_data_init();
1282
1283 return 0;
1284}
1285
1286void mtk_tops_tnl_offload_proto_teardown(struct platform_device *pdev)
1287{
1288 mtk_tops_gretap_deinit();
1289
1290 mtk_tops_udp_l2tp_data_deinit();
1291}
1292
1293struct tops_tnl_type *mtk_tops_tnl_type_get_by_name(const char *name)
1294{
1295 enum tops_entry_type tops_entry = TOPS_ENTRY_NONE + 1;
1296 struct tops_tnl_type *tnl_type;
1297
1298 if (unlikely(!name))
1299 return ERR_PTR(-EPERM);
1300
1301 for (; tops_entry < __TOPS_ENTRY_MAX; tops_entry++) {
1302 tnl_type = tops_tnl.offload_tnl_types[tops_entry];
1303 if (tnl_type && !strcmp(name, tnl_type->type_name))
1304 break;
1305 }
1306
1307 return tnl_type;
1308}
1309
1310int mtk_tops_tnl_type_register(struct tops_tnl_type *tnl_type)
1311{
1312 enum tops_entry_type tops_entry = tnl_type->tops_entry;
1313
1314 if (unlikely(tops_entry == TOPS_ENTRY_NONE
1315 || tops_entry >= __TOPS_ENTRY_MAX)) {
1316 TOPS_ERR("invalid tops_entry: %u\n", tops_entry);
1317 return -EINVAL;
1318 }
1319
1320 if (unlikely(!tnl_type))
1321 return -EINVAL;
1322
1323 if (tops_tnl.offload_tnl_types[tops_entry]) {
1324 TOPS_ERR("offload tnl type is already registered: %u\n", tops_entry);
1325 return -EBUSY;
1326 }
1327
1328 tops_tnl.offload_tnl_types[tops_entry] = tnl_type;
1329 tops_tnl.offload_tnl_type_num++;
1330
1331 return 0;
1332}
1333
1334void mtk_tops_tnl_type_unregister(struct tops_tnl_type *tnl_type)
1335{
1336 enum tops_entry_type tops_entry = tnl_type->tops_entry;
1337
1338 if (unlikely(tops_entry == TOPS_ENTRY_NONE
1339 || tops_entry >= __TOPS_ENTRY_MAX)) {
1340 TOPS_ERR("invalid tops_entry: %u\n", tops_entry);
1341 return;
1342 }
1343
1344 if (unlikely(!tnl_type))
1345 return;
1346
1347 if (tops_tnl.offload_tnl_types[tops_entry] != tnl_type) {
1348 TOPS_ERR("offload tnl type is registered by others\n");
1349 return;
1350 }
1351
1352 tops_tnl.offload_tnl_types[tops_entry] = NULL;
1353 tops_tnl.offload_tnl_type_num--;
1354}