--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |
@@ -245,6 +245,9 @@ static const char * const mtk_clks_source_name[] = { | |
"top_netsys_warp_sel", | |
}; | |
+struct net_device *(*mtk_get_tnl_dev)(int tnl_idx) = NULL; | |
+EXPORT_SYMBOL(mtk_get_tnl_dev); | |
+ | |
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) | |
{ | |
__raw_writel(val, eth->base + reg); | |
@@ -2186,6 +2189,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |
u64 addr64 = 0; | |
u8 *data, *new_data; | |
struct mtk_rx_dma_v2 *rxd, trxd; | |
+ int tnl_idx = 0; | |
int done = 0; | |
if (unlikely(!ring)) | |
@@ -2229,11 +2233,20 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |
0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1; | |
} | |
- if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || | |
- !eth->netdev[mac])) | |
- goto release_desc; | |
+ tnl_idx = RX_DMA_GET_TOPS_CRSN(trxd.rxd6); | |
+ if (mtk_get_tnl_dev && tnl_idx) { | |
+ netdev = mtk_get_tnl_dev(tnl_idx); | |
+ if (unlikely(IS_ERR(netdev))) | |
+ netdev = NULL; | |
+ } | |
- netdev = eth->netdev[mac]; | |
+ if (!netdev) { | |
+ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || | |
+ !eth->netdev[mac])) | |
+ goto release_desc; | |
+ | |
+ netdev = eth->netdev[mac]; | |
+ } | |
if (unlikely(test_bit(MTK_RESETTING, ð->state))) | |
goto release_desc; | |
@@ -2318,6 +2331,8 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, | |
skb_hnat_alg(skb) = 0; | |
skb_hnat_filled(skb) = 0; | |
skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG; | |
+ skb_hnat_set_tops(skb, 0); | |
+ skb_hnat_set_is_decap(skb, 0); | |
if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) { | |
trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n", | |
--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c | |
+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c | |
@@ -43,6 +43,12 @@ void (*ppe_dev_register_hook)(struct net_device *dev) = NULL; | |
EXPORT_SYMBOL(ppe_dev_register_hook); | |
void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL; | |
EXPORT_SYMBOL(ppe_dev_unregister_hook); | |
+int (*mtk_tnl_encap_offload)(struct sk_buff *skb) = NULL; | |
+EXPORT_SYMBOL(mtk_tnl_encap_offload); | |
+int (*mtk_tnl_decap_offload)(struct sk_buff *skb) = NULL; | |
+EXPORT_SYMBOL(mtk_tnl_decap_offload); | |
+bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL; | |
+EXPORT_SYMBOL(mtk_tnl_decap_offloadable); | |
static void hnat_sma_build_entry(struct timer_list *t) | |
{ | |
@@ -53,6 +59,16 @@ static void hnat_sma_build_entry(struct timer_list *t) | |
SMA, SMA_FWD_CPU_BUILD_ENTRY); | |
} | |
+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index) | |
+{ | |
+ if (index == 0x7fff || index >= hnat_priv->foe_etry_num | |
+ || ppe_id >= CFG_PPE_NUM) | |
+ return ERR_PTR(-EINVAL); | |
+ | |
+ return &hnat_priv->foe_table_cpu[ppe_id][index]; | |
+} | |
+EXPORT_SYMBOL(hnat_get_foe_entry); | |
+ | |
void hnat_cache_ebl(int enable) | |
{ | |
int i; | |
@@ -63,6 +79,7 @@ void hnat_cache_ebl(int enable) | |
cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable); | |
} | |
} | |
+EXPORT_SYMBOL(hnat_cache_ebl); | |
static void hnat_reset_timestamp(struct timer_list *t) | |
{ | |
--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h | |
+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h | |
@@ -1133,6 +1133,8 @@ enum FoeIpAct { | |
#define NR_WDMA1_PORT 9 | |
#define NR_WDMA2_PORT 13 | |
#define NR_GMAC3_PORT 15 | |
+#define NR_TDMA_TPORT 4 | |
+#define NR_TDMA_QDMA_TPORT 5 | |
#define LAN_DEV_NAME hnat_priv->lan | |
#define LAN2_DEV_NAME hnat_priv->lan2 | |
#define IS_WAN(dev) \ | |
@@ -1256,6 +1258,8 @@ static inline bool hnat_dsa_is_enable(struct mtk_hnat *priv) | |
} | |
#endif | |
+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index); | |
+ | |
void hnat_deinit_debugfs(struct mtk_hnat *h); | |
int hnat_init_debugfs(struct mtk_hnat *h); | |
int hnat_register_nf_hooks(void); | |
@@ -1272,6 +1276,9 @@ extern int qos_ul_toggle; | |
extern int hook_toggle; | |
extern int mape_toggle; | |
extern int qos_toggle; | |
+extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb); | |
+extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb); | |
+extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb); | |
int ext_if_add(struct extdev_entry *ext_entry); | |
int ext_if_del(struct extdev_entry *ext_entry); | |
--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c | |
+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c | |
@@ -726,10 +726,14 @@ static unsigned int is_ppe_support_type(struct sk_buff *skb) | |
case ETH_P_IP: | |
iph = ip_hdr(skb); | |
- /* do not accelerate non tcp/udp traffic */ | |
- if ((iph->protocol == IPPROTO_TCP) || | |
+ if (mtk_tnl_decap_offloadable && mtk_tnl_decap_offloadable(skb)) { | |
+ /* tunnel protocol is offloadable */ | |
+ skb_hnat_set_is_decap(skb, 1); | |
+ return 1; | |
+ } else if ((iph->protocol == IPPROTO_TCP) || | |
(iph->protocol == IPPROTO_UDP) || | |
(iph->protocol == IPPROTO_IPV6)) { | |
+ /* do not accelerate non tcp/udp traffic */ | |
return 1; | |
} | |
@@ -846,6 +850,13 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv, struct sk_buff *skb, | |
hnat_set_head_frags(state, skb, -1, hnat_set_iif); | |
+ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb) | |
+ && is_magic_tag_valid(skb) | |
+ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL | |
+ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) { | |
+ return NF_ACCEPT; | |
+ } | |
+ | |
/* | |
* Avoid mistakenly binding of outer IP, ports in SW L2TP decap flow. | |
* In pre-routing, if dev is virtual iface, TOPS module is not loaded, | |
@@ -921,6 +932,13 @@ mtk_hnat_br_nf_local_in(void *priv, struct sk_buff *skb, | |
hnat_set_head_frags(state, skb, -1, hnat_set_iif); | |
+ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb) | |
+ && is_magic_tag_valid(skb) | |
+ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL | |
+ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) { | |
+ return NF_ACCEPT; | |
+ } | |
+ | |
pre_routing_print(skb, state->in, state->out, __func__); | |
if (unlikely(debug_level >= 7)) { | |
@@ -1073,9 +1091,22 @@ static unsigned int hnat_ipv4_get_nexthop(struct sk_buff *skb, | |
return -1; | |
} | |
+ /* | |
+ * if this packet is a tunnel packet and is about to construct | |
+ * outer header, we must update its outer mac header pointer | |
+ * before filling outer mac or it may screw up inner mac | |
+ */ | |
+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) { | |
+ skb_push(skb, sizeof(struct ethhdr)); | |
+ skb_reset_mac_header(skb); | |
+ } | |
+ | |
memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN); | |
memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN); | |
+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) | |
+ skb_pull(skb, sizeof(struct ethhdr)); | |
+ | |
rcu_read_unlock_bh(); | |
return 0; | |
@@ -1201,6 +1232,81 @@ static struct ethhdr *get_ipv6_ipip_ethhdr(struct sk_buff *skb, | |
return eth; | |
} | |
+static inline void hnat_get_filled_unbind_entry(struct sk_buff *skb, | |
+ struct foe_entry *entry) | |
+{ | |
+ if (unlikely(!skb || !entry)) | |
+ return; | |
+ | |
+ memcpy(entry, | |
+ &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)], | |
+ sizeof(*entry)); | |
+ | |
+#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3) | |
+ entry->bfib1.mc = 0; | |
+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3) */ | |
+ entry->bfib1.ka = 0; | |
+ entry->bfib1.vlan_layer = 0; | |
+ entry->bfib1.psn = 0; | |
+ entry->bfib1.vpm = 0; | |
+ entry->bfib1.ps = 0; | |
+} | |
+ | |
+static inline void hnat_qos_tnl(u32 id, const struct net_device *dev) | |
+{ | |
+ u32 cfg; | |
+ u32 max_man = 0; | |
+ u32 max_exp = 0; | |
+ const struct mtk_mac *mac; | |
+ | |
+ if (!dev) | |
+ return; | |
+ mac = netdev_priv(dev); | |
+ | |
+ switch (mac->speed) { | |
+ case SPEED_100: | |
+ case SPEED_1000: | |
+ case SPEED_2500: | |
+ case SPEED_5000: | |
+ case SPEED_10000: | |
+ max_man = mac->speed / SPEED_100; | |
+ max_exp = 5; | |
+ break; | |
+ default: | |
+ return; | |
+ } | |
+ | |
+ cfg = QTX_SCH_MIN_RATE_EN | QTX_SCH_MAX_RATE_EN; | |
+ cfg |= (1 << QTX_SCH_MIN_RATE_MAN_OFFSET) | | |
+ (4 << QTX_SCH_MIN_RATE_EXP_OFFSET) | | |
+ (max_man << QTX_SCH_MAX_RATE_MAN_OFFSET) | | |
+ (max_exp << QTX_SCH_MAX_RATE_EXP_OFFSET) | | |
+ (4 << QTX_SCH_MAX_RATE_WGHT_OFFSET); | |
+ writel(cfg, hnat_priv->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE)); | |
+} | |
+ | |
+static inline void hnat_fill_offload_engine_entry(struct sk_buff *skb, | |
+ struct foe_entry *entry, | |
+ const struct net_device *dev) | |
+{ | |
+#if defined(CONFIG_MEDIATEK_NETSYS_V3) | |
+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) { | |
+ /* | |
+ * if skb_hnat_tops(skb) is setup for encapsulation, | |
+ * we fill in hnat tport and tops_entry for tunnel encapsulation | |
+ * offloading | |
+ */ | |
+ entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT; | |
+ entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb); | |
+ } else { | |
+ return; | |
+ } | |
+ | |
+ entry->ipv4_hnapt.iblk2.qid = 12; /* offload engine use QID 12 */ | |
+ hnat_qos_tnl(12, dev); /* set rate limit to line rate */ | |
+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */ | |
+} | |
+ | |
static unsigned int skb_to_hnat_info(struct sk_buff *skb, | |
const struct net_device *dev, | |
struct foe_entry *foe, | |
@@ -1237,6 +1343,11 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb, | |
if (whnat && is_hnat_pre_filled(foe)) | |
return 0; | |
+ if (skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL)) { | |
+ hnat_get_filled_unbind_entry(skb, &entry); | |
+ goto hnat_entry_bind; | |
+ } | |
+ | |
entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/ | |
entry.bfib1.state = foe->udib1.state; | |
@@ -1679,6 +1790,10 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb, | |
/* Fill Layer2 Info.*/ | |
entry = ppe_fill_L2_info(eth, entry, hw_path); | |
+ if (skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL) | |
+ goto hnat_entry_skip_bind; | |
+ | |
+hnat_entry_bind: | |
/* Fill Info Blk*/ | |
entry = ppe_fill_info_blk(eth, entry, hw_path); | |
@@ -1879,7 +1994,20 @@ static unsigned int skb_to_hnat_info(struct sk_buff *skb, | |
entry.ipv6_5t_route.act_dp |= UDF_HNAT_PRE_FILLED; | |
} | |
+#if defined(CONFIG_MEDIATEK_NETSYS_V3) | |
+ hnat_fill_offload_engine_entry(skb, &entry, dev); | |
+#endif | |
+ | |
+hnat_entry_skip_bind: | |
wmb(); | |
+ | |
+ /* | |
+ * final check before we write BIND info. | |
+ * If this entry is already bound, we should not modify it right now | |
+ */ | |
+ if (entry_hnat_is_bound(foe)) | |
+ return 0; | |
+ | |
memcpy(foe, &entry, sizeof(entry)); | |
/*reset statistic for this entry*/ | |
if (hnat_priv->data->per_flow_accounting && | |
@@ -1951,6 +2079,12 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no) | |
switch ((int)entry.bfib1.pkt_type) { | |
case IPV4_HNAPT: | |
case IPV4_HNAT: | |
+ /* | |
+ * skip if packet is an encap tnl packet or it may | |
+ * screw up inner mac header | |
+ */ | |
+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) | |
+ break; | |
entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source)); | |
entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4])); | |
break; | |
@@ -2112,6 +2246,10 @@ int mtk_sw_nat_hook_tx(struct sk_buff *skb, int gmac_no) | |
entry.ipv6_5t_route.iblk2.dp = gmac_no; | |
} | |
+#if defined(CONFIG_MEDIATEK_NETSYS_V3) | |
+ hnat_fill_offload_engine_entry(skb, &entry, NULL); | |
+#endif | |
+ | |
entry.bfib1.ttl = 1; | |
entry.bfib1.state = BIND; | |
@@ -2187,6 +2325,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *skb) | |
} | |
skb_hnat_alg(skb) = 0; | |
+ skb_hnat_set_tops(skb, 0); | |
skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG; | |
if (skb_hnat_iface(skb) == FOE_MAGIC_WED0) | |
@@ -2636,6 +2775,7 @@ static unsigned int mtk_hnat_nf_post_routing( | |
struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out, | |
.virt_dev = (struct net_device*)out }; | |
const struct net_device *arp_dev = out; | |
+ bool is_virt_dev = false; | |
if (xlat_toggle && !mtk_464xlat_post_process(skb, out)) | |
return 0; | |
@@ -2652,10 +2792,29 @@ static unsigned int mtk_hnat_nf_post_routing( | |
if (out->netdev_ops->ndo_flow_offload_check) { | |
out->netdev_ops->ndo_flow_offload_check(&hw_path); | |
+ | |
out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev; | |
+ if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL && mtk_tnl_encap_offload) { | |
+ if (ntohs(skb->protocol) == ETH_P_IP | |
+ && ip_hdr(skb)->protocol == IPPROTO_TCP) { | |
+ skb_hnat_set_tops(skb, hw_path.tnl_type + 1); | |
+ } else { | |
+ /* | |
+ * we are not support protocols other than IPv4 TCP | |
+ * for tunnel protocol offload yet | |
+ */ | |
+ skb_hnat_alg(skb) = 1; | |
+ return 0; | |
+ } | |
+ } | |
} | |
if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out)) | |
+ is_virt_dev = true; | |
+ | |
+ if (is_virt_dev | |
+ && !(skb_hnat_tops(skb) && skb_hnat_is_encap(skb) | |
+ && (hw_path.flags & FLOW_OFFLOAD_PATH_TNL))) | |
return 0; | |
trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__, | |
@@ -2675,9 +2834,18 @@ static unsigned int mtk_hnat_nf_post_routing( | |
if (fn && !mtk_hnat_accel_type(skb)) | |
break; | |
- if (fn && fn(skb, arp_dev, &hw_path)) | |
+ if (!is_virt_dev && fn && fn(skb, arp_dev, &hw_path)) | |
break; | |
+ /* skb_hnat_tops(skb) is updated in mtk_tnl_offload() */ | |
+ if (skb_hnat_tops(skb)) { | |
+ if (skb_hnat_is_encap(skb) && !is_virt_dev | |
+ && mtk_tnl_encap_offload && mtk_tnl_encap_offload(skb)) | |
+ break; | |
+ if (skb_hnat_is_decap(skb)) | |
+ break; | |
+ } | |
+ | |
spin_lock(&hnat_priv->entry_lock); | |
skb_to_hnat_info(skb, out, entry, &hw_path); | |
spin_unlock(&hnat_priv->entry_lock); | |
@@ -2951,7 +3119,7 @@ mtk_hnat_ipv4_nf_local_out(void *priv, struct sk_buff *skb, | |
if (iph->protocol == IPPROTO_IPV6) { | |
entry->udib1.pkt_type = IPV6_6RD; | |
hnat_set_head_frags(state, skb, 0, hnat_set_alg); | |
- } else { | |
+ } else if (!skb_hnat_tops(skb)) { | |
hnat_set_head_frags(state, skb, 1, hnat_set_alg); | |
} | |
--- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h | |
+++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h | |
@@ -44,7 +44,9 @@ struct hnat_desc { | |
u32 is_sp : 1; | |
u32 hf : 1; | |
u32 amsdu : 1; | |
- u32 resv3 : 19; | |
+ u32 tops : 6; | |
+ u32 is_decap : 1; | |
+ u32 resv3 : 12; | |
u32 magic_tag_protect : 16; | |
} __packed; | |
#elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2) | |
@@ -91,6 +93,19 @@ struct hnat_desc { | |
((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0))) | |
#define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head)) | |
+#if defined(CONFIG_MEDIATEK_NETSYS_V3) | |
+#define skb_hnat_tops(skb) (((struct hnat_desc *)((skb)->head))->tops) | |
+#define skb_hnat_is_decap(skb) (((struct hnat_desc *)((skb)->head))->is_decap) | |
+#define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb)) | |
+#define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops)) | |
+#define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap)) | |
+#else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */ | |
+#define skb_hnat_tops(skb) (0) | |
+#define skb_hnat_is_decap(skb) (0) | |
+#define skb_hnat_is_encap(skb) (0) | |
+#define skb_hnat_set_tops(skb, tops) | |
+#define skb_hnat_set_is_decap(skb, is_decap) | |
+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */ | |
#define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic) | |
#define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn) | |
#define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry) | |
--- a/include/net/netfilter/nf_flow_table.h | |
+++ b/include/net/netfilter/nf_flow_table.h | |
@@ -98,10 +98,22 @@ struct flow_offload { | |
#define FLOW_OFFLOAD_PATH_6RD BIT(5) | |
#define FLOW_OFFLOAD_PATH_TNL BIT(6) | |
+enum flow_offload_tnl { | |
+ FLOW_OFFLOAD_TNL_GRETAP, | |
+ FLOW_OFFLOAD_TNL_PPTP, | |
+ FLOW_OFFLOAD_TNL_IP_L2TP, | |
+ FLOW_OFFLOAD_TNL_UDP_L2TP_CTRL, | |
+ FLOW_OFFLOAD_TNL_UDP_L2TP_DATA, | |
+ FLOW_OFFLOAD_VXLAN, | |
+ FLOW_OFFLOAD_NATT, | |
+ __FLOW_OFFLOAD_MAX, | |
+}; | |
+ | |
struct flow_offload_hw_path { | |
struct net_device *dev; | |
struct net_device *virt_dev; | |
u32 flags; | |
+ u32 tnl_type; | |
u8 eth_src[ETH_ALEN]; | |
u8 eth_dest[ETH_ALEN]; | |
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h | |
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h | |
@@ -1904,6 +1904,9 @@ extern const struct of_device_id of_mtk_match[]; | |
extern u32 mtk_hwlro_stats_ebl; | |
extern u32 dbg_show_level; | |
+/* tunnel offload related */ | |
+extern struct net_device *(*mtk_get_tnl_dev)(int tnl_idx); | |
+ | |
/* read the hardware status register */ | |
void mtk_stats_update_mac(struct mtk_mac *mac); | |