[][openwrt][mt7988][tops][remove tops]
[Description]
Remove tops driver and related kernel patches
The current release policy for tops is distribution via tarball.
As a result, the public version of the TOPS driver and related
kernel patches has been removed.
[Release-log]
N/A
Change-Id: I02c37ddb0822652b975a25e66f414614fa0d89b3
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/8932728
diff --git a/21.02/files/target/linux/mediatek/patches-5.4/999-4100-mtk-tunnel-offload-support.patch b/21.02/files/target/linux/mediatek/patches-5.4/999-4100-mtk-tunnel-offload-support.patch
deleted file mode 100644
index 6c26c22..0000000
--- a/21.02/files/target/linux/mediatek/patches-5.4/999-4100-mtk-tunnel-offload-support.patch
+++ /dev/null
@@ -1,488 +0,0 @@
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -245,6 +245,9 @@ static const char * const mtk_clks_sourc
- "top_netsys_warp_sel",
- };
-
-+struct net_device *(*mtk_get_tnl_dev)(int tnl_idx) = NULL;
-+EXPORT_SYMBOL(mtk_get_tnl_dev);
-+
- void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
- {
- __raw_writel(val, eth->base + reg);
-@@ -2168,6 +2171,7 @@ static int mtk_poll_rx(struct napi_struc
- u64 addr64 = 0;
- u8 *data, *new_data;
- struct mtk_rx_dma_v2 *rxd, trxd;
-+ int tnl_idx = 0;
- int done = 0;
-
- if (unlikely(!ring))
-@@ -2205,11 +2209,20 @@ static int mtk_poll_rx(struct napi_struc
- 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
- }
-
-- if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
-- !eth->netdev[mac]))
-- goto release_desc;
-+ tnl_idx = RX_DMA_GET_TOPS_CRSN(trxd.rxd6);
-+ if (mtk_get_tnl_dev && tnl_idx) {
-+ netdev = mtk_get_tnl_dev(tnl_idx);
-+ if (unlikely(IS_ERR(netdev)))
-+ netdev = NULL;
-+ }
-
-- netdev = eth->netdev[mac];
-+ if (!netdev) {
-+ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
-+ !eth->netdev[mac]))
-+ goto release_desc;
-+
-+ netdev = eth->netdev[mac];
-+ }
-
- if (unlikely(test_bit(MTK_RESETTING, ð->state)))
- goto release_desc;
-@@ -2294,6 +2307,8 @@ static int mtk_poll_rx(struct napi_struc
- skb_hnat_alg(skb) = 0;
- skb_hnat_filled(skb) = 0;
- skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
-+ skb_hnat_set_tops(skb, 0);
-+ skb_hnat_set_is_decap(skb, 0);
-
- if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
- trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -1915,6 +1915,9 @@ extern const struct of_device_id of_mtk_
- extern u32 mtk_hwlro_stats_ebl;
- extern u32 dbg_show_level;
-
-+/* tunnel offload related */
-+extern struct net_device *(*mtk_get_tnl_dev)(int tnl_idx);
-+
- /* read the hardware status register */
- void mtk_stats_update_mac(struct mtk_mac *mac);
-
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
-@@ -43,6 +43,12 @@ void (*ppe_dev_register_hook)(struct net
- EXPORT_SYMBOL(ppe_dev_register_hook);
- void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
- EXPORT_SYMBOL(ppe_dev_unregister_hook);
-+int (*mtk_tnl_encap_offload)(struct sk_buff *skb) = NULL;
-+EXPORT_SYMBOL(mtk_tnl_encap_offload);
-+int (*mtk_tnl_decap_offload)(struct sk_buff *skb) = NULL;
-+EXPORT_SYMBOL(mtk_tnl_decap_offload);
-+bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL;
-+EXPORT_SYMBOL(mtk_tnl_decap_offloadable);
-
- static void hnat_sma_build_entry(struct timer_list *t)
- {
-@@ -53,6 +59,16 @@ static void hnat_sma_build_entry(struct
- SMA, SMA_FWD_CPU_BUILD_ENTRY);
- }
-
-+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index)
-+{
-+ if (index == 0x7fff || index >= hnat_priv->foe_etry_num
-+ || ppe_id >= CFG_PPE_NUM)
-+ return ERR_PTR(-EINVAL);
-+
-+ return &hnat_priv->foe_table_cpu[ppe_id][index];
-+}
-+EXPORT_SYMBOL(hnat_get_foe_entry);
-+
- void hnat_cache_ebl(int enable)
- {
- int i;
-@@ -63,6 +79,7 @@ void hnat_cache_ebl(int enable)
- cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable);
- }
- }
-+EXPORT_SYMBOL(hnat_cache_ebl);
-
- static void hnat_reset_timestamp(struct timer_list *t)
- {
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
-@@ -1140,6 +1140,8 @@ enum FoeIpAct {
- #define NR_WDMA1_PORT 9
- #define NR_WDMA2_PORT 13
- #define NR_GMAC3_PORT 15
-+#define NR_TDMA_TPORT 4
-+#define NR_TDMA_QDMA_TPORT 5
- #define LAN_DEV_NAME hnat_priv->lan
- #define LAN2_DEV_NAME hnat_priv->lan2
- #define IS_WAN(dev) \
-@@ -1269,6 +1271,8 @@ static inline bool hnat_dsa_is_enable(st
- }
- #endif
-
-+struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index);
-+
- void hnat_deinit_debugfs(struct mtk_hnat *h);
- int hnat_init_debugfs(struct mtk_hnat *h);
- int hnat_register_nf_hooks(void);
-@@ -1285,6 +1289,9 @@ extern int qos_ul_toggle;
- extern int hook_toggle;
- extern int mape_toggle;
- extern int qos_toggle;
-+extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb);
-+extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb);
-+extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb);
-
- int ext_if_add(struct extdev_entry *ext_entry);
- int ext_if_del(struct extdev_entry *ext_entry);
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-@@ -728,10 +728,14 @@ static unsigned int is_ppe_support_type(
- case ETH_P_IP:
- iph = ip_hdr(skb);
-
-- /* do not accelerate non tcp/udp traffic */
-- if ((iph->protocol == IPPROTO_TCP) ||
-+ if (mtk_tnl_decap_offloadable && mtk_tnl_decap_offloadable(skb)) {
-+ /* tunnel protocol is offloadable */
-+ skb_hnat_set_is_decap(skb, 1);
-+ return 1;
-+ } else if ((iph->protocol == IPPROTO_TCP) ||
- (iph->protocol == IPPROTO_UDP) ||
- (iph->protocol == IPPROTO_IPV6)) {
-+ /* do not accelerate non tcp/udp traffic */
- return 1;
- }
-
-@@ -848,6 +852,13 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv,
-
- hnat_set_head_frags(state, skb, -1, hnat_set_iif);
-
-+ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
-+ && is_magic_tag_valid(skb)
-+ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
-+ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
-+ return NF_ACCEPT;
-+ }
-+
- /*
- * Avoid mistakenly binding of outer IP, ports in SW L2TP decap flow.
- * In pre-routing, if dev is virtual iface, TOPS module is not loaded,
-@@ -923,6 +934,13 @@ mtk_hnat_br_nf_local_in(void *priv, stru
-
- hnat_set_head_frags(state, skb, -1, hnat_set_iif);
-
-+ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
-+ && is_magic_tag_valid(skb)
-+ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
-+ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
-+ return NF_ACCEPT;
-+ }
-+
- pre_routing_print(skb, state->in, state->out, __func__);
-
- if (unlikely(debug_level >= 7)) {
-@@ -1075,8 +1093,22 @@ static unsigned int hnat_ipv4_get_nextho
- return -1;
- }
-
-+ /*
-+ * if this packet is a tunnel packet and is about to construct
-+ * outer header, we must update its outer mac header pointer
-+ * before filling outer mac or it may screw up inner mac
-+ */
-+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
-+ skb_push(skb, sizeof(struct ethhdr));
-+ skb_reset_mac_header(skb);
-+ }
-+
- memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
- memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
-+ eth_hdr(skb)->h_proto = htons(ETH_P_IP);
-+
-+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
-+ skb_pull(skb, sizeof(struct ethhdr));
-
- rcu_read_unlock_bh();
-
-@@ -1203,6 +1235,81 @@ static struct ethhdr *get_ipv6_ipip_ethh
- return eth;
- }
-
-+static inline void hnat_get_filled_unbind_entry(struct sk_buff *skb,
-+ struct foe_entry *entry)
-+{
-+ if (unlikely(!skb || !entry))
-+ return;
-+
-+ memcpy(entry,
-+ &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
-+ sizeof(*entry));
-+
-+#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
-+ entry->bfib1.mc = 0;
-+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3) */
-+ entry->bfib1.ka = 0;
-+ entry->bfib1.vlan_layer = 0;
-+ entry->bfib1.psn = 0;
-+ entry->bfib1.vpm = 0;
-+ entry->bfib1.ps = 0;
-+}
-+
-+static inline void hnat_qos_tnl(u32 id, const struct net_device *dev)
-+{
-+ u32 cfg;
-+ u32 max_man = 0;
-+ u32 max_exp = 0;
-+ const struct mtk_mac *mac;
-+
-+ if (!dev)
-+ return;
-+ mac = netdev_priv(dev);
-+
-+ switch (mac->speed) {
-+ case SPEED_100:
-+ case SPEED_1000:
-+ case SPEED_2500:
-+ case SPEED_5000:
-+ case SPEED_10000:
-+ max_man = mac->speed / SPEED_100;
-+ max_exp = 5;
-+ break;
-+ default:
-+ return;
-+ }
-+
-+ cfg = QTX_SCH_MIN_RATE_EN | QTX_SCH_MAX_RATE_EN;
-+ cfg |= (1 << QTX_SCH_MIN_RATE_MAN_OFFSET) |
-+ (4 << QTX_SCH_MIN_RATE_EXP_OFFSET) |
-+ (max_man << QTX_SCH_MAX_RATE_MAN_OFFSET) |
-+ (max_exp << QTX_SCH_MAX_RATE_EXP_OFFSET) |
-+ (4 << QTX_SCH_MAX_RATE_WGHT_OFFSET);
-+ writel(cfg, hnat_priv->fe_base + QTX_SCH(id % NUM_OF_Q_PER_PAGE));
-+}
-+
-+static inline void hnat_fill_offload_engine_entry(struct sk_buff *skb,
-+ struct foe_entry *entry,
-+ const struct net_device *dev)
-+{
-+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
-+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
-+ /*
-+ * if skb_hnat_tops(skb) is setup for encapsulation,
-+ * we fill in hnat tport and tops_entry for tunnel encapsulation
-+ * offloading
-+ */
-+ entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
-+ entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
-+ } else {
-+ return;
-+ }
-+
-+ entry->ipv4_hnapt.iblk2.qid = 12; /* offload engine use QID 12 */
-+ hnat_qos_tnl(12, dev); /* set rate limit to line rate */
-+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
-+}
-+
- static unsigned int skb_to_hnat_info(struct sk_buff *skb,
- const struct net_device *dev,
- struct foe_entry *foe,
-@@ -1240,6 +1347,11 @@ static unsigned int skb_to_hnat_info(str
- if (whnat && is_hnat_pre_filled(foe))
- return 0;
-
-+ if (skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL)) {
-+ hnat_get_filled_unbind_entry(skb, &entry);
-+ goto hnat_entry_bind;
-+ }
-+
- entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
- entry.bfib1.state = foe->udib1.state;
-
-@@ -1683,6 +1795,10 @@ static unsigned int skb_to_hnat_info(str
- /* Fill Layer2 Info.*/
- entry = ppe_fill_L2_info(eth, entry, hw_path);
-
-+ if (skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
-+ goto hnat_entry_skip_bind;
-+
-+hnat_entry_bind:
- /* Fill Info Blk*/
- entry = ppe_fill_info_blk(eth, entry, hw_path);
-
-@@ -1881,7 +1997,20 @@ static unsigned int skb_to_hnat_info(str
- entry.ipv6_5t_route.act_dp |= UDF_HNAT_PRE_FILLED;
- }
-
-+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
-+ hnat_fill_offload_engine_entry(skb, &entry, dev);
-+#endif
-+
-+hnat_entry_skip_bind:
- wmb();
-+
-+ /*
-+ * final check before we write BIND info.
-+ * If this entry is already bound, we should not modify it right now
-+ */
-+ if (entry_hnat_is_bound(foe))
-+ return 0;
-+
- memcpy(foe, &entry, sizeof(entry));
- /*reset statistic for this entry*/
- if (hnat_priv->data->per_flow_accounting &&
-@@ -1953,6 +2082,12 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
- switch ((int)entry.bfib1.pkt_type) {
- case IPV4_HNAPT:
- case IPV4_HNAT:
-+ /*
-+ * skip if packet is an encap tnl packet or it may
-+ * screw up inner mac header
-+ */
-+ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
-+ break;
- entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
- entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4]));
- break;
-@@ -2144,6 +2279,10 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
- entry.ipv6_5t_route.iblk2.dp = gmac_no;
- }
-
-+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
-+ hnat_fill_offload_engine_entry(skb, &entry, NULL);
-+#endif
-+
- entry.bfib1.ttl = 1;
- entry.bfib1.state = BIND;
- if (IS_IPV4_GRP(&entry))
-@@ -2219,6 +2358,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
- }
-
- skb_hnat_alg(skb) = 0;
-+ skb_hnat_set_tops(skb, 0);
- skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
-
- if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
-@@ -2672,6 +2812,7 @@ static unsigned int mtk_hnat_nf_post_rou
- struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
- .virt_dev = (struct net_device*)out };
- const struct net_device *arp_dev = out;
-+ bool is_virt_dev = false;
-
- if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
- return 0;
-@@ -2691,10 +2832,29 @@ static unsigned int mtk_hnat_nf_post_rou
-
- if (out->netdev_ops->ndo_flow_offload_check) {
- out->netdev_ops->ndo_flow_offload_check(&hw_path);
-+
- out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
-+ if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL && mtk_tnl_encap_offload) {
-+ if (ntohs(skb->protocol) == ETH_P_IP
-+ && ip_hdr(skb)->protocol == IPPROTO_TCP) {
-+ skb_hnat_set_tops(skb, hw_path.tnl_type + 1);
-+ } else {
-+ /*
-+ * we are not support protocols other than IPv4 TCP
-+ * for tunnel protocol offload yet
-+ */
-+ skb_hnat_alg(skb) = 1;
-+ return 0;
-+ }
-+ }
- }
-
- if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
-+ is_virt_dev = true;
-+
-+ if (is_virt_dev
-+ && !(skb_hnat_tops(skb) && skb_hnat_is_encap(skb)
-+ && (hw_path.flags & FLOW_OFFLOAD_PATH_TNL)))
- return 0;
-
- trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
-@@ -2714,9 +2874,18 @@ static unsigned int mtk_hnat_nf_post_rou
- if (fn && !mtk_hnat_accel_type(skb))
- break;
-
-- if (fn && fn(skb, arp_dev, &hw_path))
-+ if (!is_virt_dev && fn && fn(skb, arp_dev, &hw_path))
- break;
-
-+ /* skb_hnat_tops(skb) is updated in mtk_tnl_offload() */
-+ if (skb_hnat_tops(skb)) {
-+ if (skb_hnat_is_encap(skb) && !is_virt_dev
-+ && mtk_tnl_encap_offload && mtk_tnl_encap_offload(skb))
-+ break;
-+ if (skb_hnat_is_decap(skb))
-+ break;
-+ }
-+
- spin_lock(&hnat_priv->entry_lock);
- skb_to_hnat_info(skb, out, entry, &hw_path);
- spin_unlock(&hnat_priv->entry_lock);
-@@ -2989,7 +3158,7 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
- if (iph->protocol == IPPROTO_IPV6) {
- entry->udib1.pkt_type = IPV6_6RD;
- hnat_set_head_frags(state, skb, 0, hnat_set_alg);
-- } else {
-+ } else if (!skb_hnat_tops(skb)) {
- hnat_set_head_frags(state, skb, 1, hnat_set_alg);
- }
-
---- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
-@@ -44,7 +44,9 @@ struct hnat_desc {
- u32 is_sp : 1;
- u32 hf : 1;
- u32 amsdu : 1;
-- u32 resv3 : 19;
-+ u32 tops : 6;
-+ u32 is_decap : 1;
-+ u32 resv3 : 12;
- u32 magic_tag_protect : 16;
- } __packed;
- #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
-@@ -91,6 +93,19 @@ struct hnat_desc {
- ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
-
- #define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
-+#if defined(CONFIG_MEDIATEK_NETSYS_V3)
-+#define skb_hnat_tops(skb) (((struct hnat_desc *)((skb)->head))->tops)
-+#define skb_hnat_is_decap(skb) (((struct hnat_desc *)((skb)->head))->is_decap)
-+#define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb))
-+#define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
-+#define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
-+#else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
-+#define skb_hnat_tops(skb) (0)
-+#define skb_hnat_is_decap(skb) (0)
-+#define skb_hnat_is_encap(skb) (0)
-+#define skb_hnat_set_tops(skb, tops)
-+#define skb_hnat_set_is_decap(skb, is_decap)
-+#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
- #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
- #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
- #define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
---- a/include/net/netfilter/nf_flow_table.h
-+++ b/include/net/netfilter/nf_flow_table.h
-@@ -98,10 +98,21 @@ struct flow_offload {
- #define FLOW_OFFLOAD_PATH_6RD BIT(5)
- #define FLOW_OFFLOAD_PATH_TNL BIT(6)
-
-+enum flow_offload_tnl {
-+ FLOW_OFFLOAD_TNL_GRETAP,
-+ FLOW_OFFLOAD_TNL_PPTP,
-+ FLOW_OFFLOAD_TNL_L2TP_V2,
-+ FLOW_OFFLOAD_TNL_L2TP_V3,
-+ FLOW_OFFLOAD_VXLAN,
-+ FLOW_OFFLOAD_NATT,
-+ __FLOW_OFFLOAD_MAX,
-+};
-+
- struct flow_offload_hw_path {
- struct net_device *dev;
- struct net_device *virt_dev;
- u32 flags;
-+ u32 tnl_type;
-
- u8 eth_src[ETH_ALEN];
- u8 eth_dest[ETH_ALEN];
diff --git a/21.02/files/target/linux/mediatek/patches-5.4/999-4101-mtk-tunnel-network-service-error-recover-support.patch b/21.02/files/target/linux/mediatek/patches-5.4/999-4101-mtk-tunnel-network-service-error-recover-support.patch
deleted file mode 100644
index a2579c0..0000000
--- a/21.02/files/target/linux/mediatek/patches-5.4/999-4101-mtk-tunnel-network-service-error-recover-support.patch
+++ /dev/null
@@ -1,49 +0,0 @@
---- a/drivers/net/ethernet/mediatek/mtk_eth_reset.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_reset.c
-@@ -635,6 +635,9 @@ static int mtk_eth_netdevice_event(struc
- unsigned long event, void *ptr)
- {
- switch (event) {
-+ case MTK_TOPS_DUMP_DONE:
-+ complete(&wait_tops_done);
-+ break;
- case MTK_WIFI_RESET_DONE:
- case MTK_FE_STOP_TRAFFIC_DONE:
- pr_info("%s rcv done event:%lx\n", __func__, event);
---- a/drivers/net/ethernet/mediatek/mtk_eth_reset.h
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_reset.h
-@@ -13,6 +13,7 @@
- #define MTK_WIFI_RESET_DONE 0x2002
- #define MTK_WIFI_CHIP_ONLINE 0x2003
- #define MTK_WIFI_CHIP_OFFLINE 0x2004
-+#define MTK_TOPS_DUMP_DONE 0x3001
- #define MTK_FE_RESET_NAT_DONE 0x4001
-
- #define MTK_FE_STOP_TRAFFIC (0x2005)
-@@ -67,6 +68,7 @@ enum mtk_reset_event_id {
-
- extern struct notifier_block mtk_eth_netdevice_nb __read_mostly;
- extern struct completion wait_ser_done;
-+extern struct completion wait_tops_done;
- extern char* mtk_reset_event_name[32];
- extern atomic_t reset_lock;
- extern struct completion wait_nat_done;
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -38,6 +38,7 @@ atomic_t force = ATOMIC_INIT(0);
- module_param_named(msg_level, mtk_msg_level, int, 0);
- MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
- DECLARE_COMPLETION(wait_ser_done);
-+DECLARE_COMPLETION(wait_tops_done);
-
- #define MTK_ETHTOOL_STAT(x) { #x, \
- offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
-@@ -4178,6 +4179,8 @@ static void mtk_pending_work(struct work
- }
- pr_warn("wait for MTK_FE_START_RESET\n");
- }
-+ if (!try_wait_for_completion(&wait_tops_done))
-+ pr_warn("wait for MTK_TOPS_DUMP_DONE\n");
- rtnl_lock();
- break;
- }
diff --git a/21.02/files/target/linux/mediatek/patches-5.4/999-4103-mtk-tunnel-crypto-offload-support.patch b/21.02/files/target/linux/mediatek/patches-5.4/999-4103-mtk-tunnel-crypto-offload-support.patch
deleted file mode 100644
index b96f080..0000000
--- a/21.02/files/target/linux/mediatek/patches-5.4/999-4103-mtk-tunnel-crypto-offload-support.patch
+++ /dev/null
@@ -1,202 +0,0 @@
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
-@@ -1144,6 +1144,8 @@ enum FoeIpAct {
- #define NR_EIP197_QDMA_TPORT 3
- #define NR_TDMA_TPORT 4
- #define NR_TDMA_QDMA_TPORT 5
-+#define NR_TDMA_EIP197_TPORT 8
-+#define NR_TDMA_EIP197_QDMA_TPORT 9
- #define LAN_DEV_NAME hnat_priv->lan
- #define LAN2_DEV_NAME hnat_priv->lan2
- #define IS_WAN(dev) \
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-@@ -1101,7 +1101,8 @@ static unsigned int hnat_ipv4_get_nextho
- * outer header, we must update its outer mac header pointer
- * before filling outer mac or it may screw up inner mac
- */
-- if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
-+ if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
-+ || (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb))) {
- skb_push(skb, sizeof(struct ethhdr));
- skb_reset_mac_header(skb);
- }
-@@ -1110,7 +1111,8 @@ static unsigned int hnat_ipv4_get_nextho
- memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
- eth_hdr(skb)->h_proto = htons(ETH_P_IP);
-
-- if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
-+ if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
-+ || (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)))
- skb_pull(skb, sizeof(struct ethhdr));
-
- rcu_read_unlock_bh();
-@@ -1258,6 +1260,38 @@ static inline void hnat_get_filled_unbin
- entry->bfib1.ps = 0;
- }
-
-+/*
-+ * check offload engine data is prepared
-+ * return 0 for packets not related to offload engine
-+ * return positive value for offload engine prepared data done
-+ * return negative value for data is still constructing
-+ */
-+static inline int hnat_offload_engine_done(struct sk_buff *skb,
-+ struct flow_offload_hw_path *hw_path)
-+{
-+ struct dst_entry *dst = skb_dst(skb);
-+
-+ if ((skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL))) {
-+ /* tunnel encap'ed */
-+ if (dst && dst_xfrm(dst))
-+ /*
-+ * skb not ready to bind since it is still needs
-+ * to be encrypted
-+ */
-+ return -1;
-+
-+ /* nothing need to be done further for this skb */
-+ return 1;
-+ }
-+
-+ if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb) && dst && !dst_xfrm(dst))
-+ /* crypto encrypted done */
-+ return 1;
-+
-+ /* no need for tunnel encapsulation or crypto encryption */
-+ return 0;
-+}
-+
- static inline void hnat_qos_tnl(u32 id, const struct net_device *dev)
- {
- u32 cfg;
-@@ -1302,9 +1336,15 @@ static inline void hnat_fill_offload_eng
- * we fill in hnat tport and tops_entry for tunnel encapsulation
- * offloading
- */
-- entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
-+ if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)) {
-+ entry->ipv4_hnapt.tport_id = NR_TDMA_EIP197_QDMA_TPORT;
-+ entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb);
-+ } else {
-+ entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
-+ }
- entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
-- } else if (skb_hnat_cdrt(skb)) {
-+
-+ } else if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)) {
- entry->ipv4_hnapt.tport_id = NR_EIP197_QDMA_TPORT;
- entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb);
- } else {
-@@ -1405,6 +1445,7 @@ static unsigned int skb_to_hnat_info(str
- u32 port_id = 0;
- u32 payload_len = 0;
- int mape = 0;
-+ int ret;
- struct mtk_mac *mac = netdev_priv(dev);
-
- ct = nf_ct_get(skb, &ctinfo);
-@@ -1422,9 +1463,12 @@ static unsigned int skb_to_hnat_info(str
- if (whnat && is_hnat_pre_filled(foe))
- return 0;
-
-- if (skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL)) {
-+ ret = hnat_offload_engine_done(skb, hw_path);
-+ if (ret == 1) {
- hnat_get_filled_unbind_entry(skb, &entry);
- goto hnat_entry_bind;
-+ } else if (ret == -1) {
-+ return 0;
- }
-
- entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
-@@ -1870,7 +1914,9 @@ static unsigned int skb_to_hnat_info(str
- /* Fill Layer2 Info.*/
- entry = ppe_fill_L2_info(eth, entry, hw_path);
-
-- if (skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
-+ if ((skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
-+ || (!skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)
-+ && skb_dst(skb) && dst_xfrm(skb_dst(skb))))
- goto hnat_entry_skip_bind;
-
- hnat_entry_bind:
-@@ -2435,6 +2481,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
- skb_hnat_alg(skb) = 0;
- skb_hnat_set_tops(skb, 0);
- skb_hnat_set_cdrt(skb, 0);
-+ skb_hnat_set_is_decrypt(skb, 0);
- skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
-
- if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
-@@ -3244,7 +3291,8 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
- entry->udib1.pkt_type = IPV6_6RD;
- hnat_set_head_frags(state, skb, 0, hnat_set_alg);
- } else if (is_magic_tag_valid(skb)
-- && (skb_hnat_cdrt(skb) || skb_hnat_tops(skb))) {
-+ && ((skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb))
-+ || skb_hnat_tops(skb))) {
- hnat_set_head_frags(state, skb, 0, hnat_set_alg);
- } else {
- hnat_set_head_frags(state, skb, 1, hnat_set_alg);
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -2324,10 +2324,11 @@ static int mtk_poll_rx(struct napi_struc
-
- skb_hnat_alg(skb) = 0;
- skb_hnat_filled(skb) = 0;
-- skb_hnat_set_cdrt(skb, 0);
-+ skb_hnat_set_cdrt(skb, RX_DMA_GET_CDRT(trxd.rxd7));
- skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
- skb_hnat_set_tops(skb, 0);
- skb_hnat_set_is_decap(skb, 0);
-+ skb_hnat_set_is_decrypt(skb, (skb_hnat_cdrt(skb) ? 1 : 0));
-
- if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
- trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
---- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -656,6 +656,9 @@
- #define RX_DMA_GET_AGG_CNT_V2(_x) (((_x) >> 16) & 0xff)
- #define RX_DMA_GET_TOPS_CRSN(_x) (((_x) >> 24) & 0xff)
-
-+/* PDMA V2 descriptor rxd7 */
-+#define RX_DMA_GET_CDRT(_x) (((_x) >> 8) & 0xff)
-+
- /* PHY Polling and SMI Master Control registers */
- #define MTK_PPSC 0x10000
- #define PPSC_MDC_CFG GENMASK(29, 24)
---- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
-@@ -47,7 +47,8 @@ struct hnat_desc {
- u32 tops : 6;
- u32 is_decap : 1;
- u32 cdrt : 8;
-- u32 resv3 : 4;
-+ u32 is_decrypt : 1;
-+ u32 resv3 : 3;
- u32 magic_tag_protect : 16;
- } __packed;
- #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
-@@ -101,7 +102,10 @@ struct hnat_desc {
- #define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
- #define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
- #define skb_hnat_cdrt(skb) (((struct hnat_desc *)((skb)->head))->cdrt)
-+#define skb_hnat_is_decrypt(skb) (((struct hnat_desc *)((skb)->head))->is_decrypt)
-+#define skb_hnat_is_encrypt(skb) (!skb_hnat_is_decrypt(skb))
- #define skb_hnat_set_cdrt(skb, cdrt) ((skb_hnat_cdrt(skb)) = (cdrt))
-+#define skb_hnat_set_is_decrypt(skb, is_dec) ((skb_hnat_is_decrypt(skb)) = is_dec)
- #else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
- #define skb_hnat_tops(skb) (0)
- #define skb_hnat_is_decap(skb) (0)
-@@ -109,7 +113,10 @@ struct hnat_desc {
- #define skb_hnat_set_tops(skb, tops)
- #define skb_hnat_set_is_decap(skb, is_decap)
- #define skb_hnat_cdrt(skb) (0)
-+#define skb_hnat_is_decrypt(skb) (0)
-+#define skb_hnat_is_encrypt(skb) (0)
- #define skb_hnat_set_cdrt(skb, cdrt)
-+#define skb_hnat_set_is_decrypt(skb, is_dec)
- #endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
- #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
- #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
diff --git a/21.02/files/target/linux/mediatek/patches-5.4/999-4500-mtk-gre-offload-support.patch b/21.02/files/target/linux/mediatek/patches-5.4/999-4500-mtk-gre-offload-support.patch
deleted file mode 100644
index 3833fa0..0000000
--- a/21.02/files/target/linux/mediatek/patches-5.4/999-4500-mtk-gre-offload-support.patch
+++ /dev/null
@@ -1,45 +0,0 @@
---- a/net/ipv4/ip_gre.c
-+++ b/net/ipv4/ip_gre.c
-@@ -39,6 +39,7 @@
- #include <net/inet_ecn.h>
- #include <net/xfrm.h>
- #include <net/net_namespace.h>
-+#include <net/netfilter/nf_flow_table.h>
- #include <net/netns/generic.h>
- #include <net/rtnetlink.h>
- #include <net/gre.h>
-@@ -901,6 +902,24 @@ static int ipgre_close(struct net_device
- }
- #endif
-
-+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
-+static int gre_dev_flow_offload_check(struct flow_offload_hw_path *path)
-+{
-+ struct net_device *dev = path->dev;
-+ struct ip_tunnel *tunnel = netdev_priv(dev);
-+
-+ if (path->flags & FLOW_OFFLOAD_PATH_TNL)
-+ return -EEXIST;
-+
-+ path->flags |= FLOW_OFFLOAD_PATH_TNL;
-+ path->tnl_type = FLOW_OFFLOAD_TNL_GRETAP;
-+ path->virt_dev = dev;
-+ path->dev = tunnel->dev;
-+
-+ return 0;
-+}
-+#endif /* CONFIG_NF_FLOW_TABLE */
-+
- static const struct net_device_ops ipgre_netdev_ops = {
- .ndo_init = ipgre_tunnel_init,
- .ndo_uninit = ip_tunnel_uninit,
-@@ -1264,6 +1283,9 @@ static const struct net_device_ops gre_t
- .ndo_get_stats64 = ip_tunnel_get_stats64,
- .ndo_get_iflink = ip_tunnel_get_iflink,
- .ndo_fill_metadata_dst = gre_fill_metadata_dst,
-+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
-+ .ndo_flow_offload_check = gre_dev_flow_offload_check,
-+#endif
- };
-
- static int erspan_tunnel_init(struct net_device *dev)
diff --git a/21.02/files/target/linux/mediatek/patches-5.4/999-4500-mtk-ppp-offload-support.patch b/21.02/files/target/linux/mediatek/patches-5.4/999-4500-mtk-ppp-offload-support.patch
deleted file mode 100644
index 1409f9f..0000000
--- a/21.02/files/target/linux/mediatek/patches-5.4/999-4500-mtk-ppp-offload-support.patch
+++ /dev/null
@@ -1,65 +0,0 @@
---- a/drivers/net/ppp/ppp_generic.c
-+++ b/drivers/net/ppp/ppp_generic.c
-@@ -296,6 +296,9 @@ static void unit_put(struct idr *p, int
- static void *unit_find(struct idr *p, int n);
- static void ppp_setup(struct net_device *dev);
-
-+struct sock *ppp_netdev_get_sock(struct net_device *dev);
-+EXPORT_SYMBOL(ppp_netdev_get_sock);
-+
- static const struct net_device_ops ppp_netdev_ops;
-
- static struct class *ppp_class;
-@@ -1660,6 +1663,40 @@ ppp_send_frame(struct ppp *ppp, struct s
- ++ppp->dev->stats.tx_errors;
- }
-
-+struct sock *ppp_netdev_get_sock(struct net_device *dev)
-+{
-+ struct list_head *list;
-+ struct channel *pch;
-+ struct ppp *ppp;
-+ struct sock *sk;
-+
-+ if (!dev)
-+ return ERR_PTR(-EINVAL);
-+
-+ ppp = netdev_priv(dev);
-+
-+ list = &ppp->channels;
-+ if (list_empty(list))
-+ /* nowhere to send the packet */
-+ return ERR_PTR(-EINVAL);
-+
-+ if (ppp->flags & SC_MULTILINK)
-+ /* not doing multilink: send it down the first channel */
-+ return ERR_PTR(-EPERM);
-+
-+ list = list->next;
-+ pch = list_entry(list, struct channel, clist);
-+
-+ spin_lock(&pch->downl);
-+ if (pch->chan)
-+ sk = (struct sock *)pch->chan->private;
-+ else
-+ sk = ERR_PTR(-EINVAL);
-+ spin_unlock(&pch->downl);
-+
-+ return sk;
-+}
-+
- /*
- * Try to send the frame in xmit_pending.
- * The caller should have the xmit path locked.
---- a/include/linux/ppp_channel.h
-+++ b/include/linux/ppp_channel.h
-@@ -75,6 +75,9 @@ extern int ppp_unit_number(struct ppp_ch
- /* Get the device name associated with a channel, or NULL if none */
- extern char *ppp_dev_name(struct ppp_channel *);
-
-+/* Get the socket structure of a given ppp netdev */
-+extern struct sock *ppp_netdev_get_sock(struct net_device *dev);
-+
- /*
- * SMP locking notes:
- * The channel code must ensure that when it calls ppp_unregister_channel,
diff --git a/21.02/files/target/linux/mediatek/patches-5.4/999-4501-mtk-l2tp-offload-support.patch b/21.02/files/target/linux/mediatek/patches-5.4/999-4501-mtk-l2tp-offload-support.patch
deleted file mode 100644
index 5e16ba8..0000000
--- a/21.02/files/target/linux/mediatek/patches-5.4/999-4501-mtk-l2tp-offload-support.patch
+++ /dev/null
@@ -1,35 +0,0 @@
---- a/net/l2tp/l2tp_core.c
-+++ b/net/l2tp/l2tp_core.c
-@@ -1068,6 +1068,10 @@ int l2tp_xmit_skb(struct l2tp_session *s
- int udp_len;
- int ret = NET_XMIT_SUCCESS;
-
-+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
-+ skb_reset_inner_headers(skb);
-+#endif
-+
- /* Check that there's enough headroom in the skb to insert IP,
- * UDP and L2TP headers. If not enough, expand it to
- * make room. Adjust truesize.
---- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
-@@ -865,7 +865,8 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv,
- * and it's L2TP flow, then do not bind.
- */
- if (skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
-- && skb->dev->netdev_ops->ndo_flow_offload_check) {
-+ && skb->dev->netdev_ops->ndo_flow_offload_check
-+ && !mtk_tnl_decap_offload) {
- skb->dev->netdev_ops->ndo_flow_offload_check(&hw_path);
-
- if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL)
---- a/net/l2tp/l2tp_ppp.c
-+++ b/net/l2tp/l2tp_ppp.c
-@@ -356,6 +356,7 @@ static int l2tp_ppp_flow_offload_check(s
- return -EINVAL;
-
- path->flags |= FLOW_OFFLOAD_PATH_TNL;
-+ path->tnl_type = FLOW_OFFLOAD_TNL_L2TP_V2;
-
- return 0;
- }
diff --git a/21.02/files/target/linux/mediatek/patches-5.4/999-4501-mtk-pptp-offload-support.patch b/21.02/files/target/linux/mediatek/patches-5.4/999-4501-mtk-pptp-offload-support.patch
deleted file mode 100644
index ec218e1..0000000
--- a/21.02/files/target/linux/mediatek/patches-5.4/999-4501-mtk-pptp-offload-support.patch
+++ /dev/null
@@ -1,100 +0,0 @@
---- a/drivers/net/ppp/pptp.c
-+++ b/drivers/net/ppp/pptp.c
-@@ -33,6 +33,7 @@
- #include <net/route.h>
- #include <net/gre.h>
- #include <net/pptp.h>
-+#include <net/netfilter/nf_flow_table.h>
-
- #include <linux/uaccess.h>
-
-@@ -40,6 +41,9 @@
-
- #define MAX_CALLID 65535
-
-+int (*mtk_pptp_seq_next)(u16 call_id, u32 *val) = NULL;
-+EXPORT_SYMBOL(mtk_pptp_seq_next);
-+
- static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
- static struct pppox_sock __rcu **callid_sock;
-
-@@ -128,6 +132,26 @@ static void del_chan(struct pppox_sock *
- spin_unlock(&chan_lock);
- }
-
-+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
-+static int pptp_flow_offload_check(struct ppp_channel *chan,
-+ struct flow_offload_hw_path *path)
-+{
-+ struct sock *sk = (struct sock *)chan->private;
-+ struct pppox_sock *po = pppox_sk(sk);
-+
-+ if (path->flags & FLOW_OFFLOAD_PATH_TNL)
-+ return -EEXIST;
-+
-+ if (sk_pppox(po)->sk_state & PPPOX_DEAD)
-+ return -EINVAL;
-+
-+ path->flags |= FLOW_OFFLOAD_PATH_TNL;
-+ path->tnl_type = FLOW_OFFLOAD_TNL_PPTP;
-+
-+ return 0;
-+}
-+#endif /* IS_ENABLED(CONFIG_NF_FLOW_TABLE) */
-+
- static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
- {
- struct sock *sk = (struct sock *) chan->private;
-@@ -140,6 +164,7 @@ static int pptp_xmit(struct ppp_channel
- int islcp;
- int len;
- unsigned char *data;
-+ u32 seq_sent_hw;
- __u32 seq_recv;
-
-
-@@ -204,7 +229,14 @@ static int pptp_xmit(struct ppp_channel
- hdr->gre_hd.protocol = GRE_PROTO_PPP;
- hdr->call_id = htons(opt->dst_addr.call_id);
-
-- hdr->seq = htonl(++opt->seq_sent);
-+ if (mtk_pptp_seq_next && !mtk_pptp_seq_next(opt->dst_addr.call_id,
-+ &seq_sent_hw)) {
-+ opt->seq_sent = seq_sent_hw;
-+ hdr->seq = htonl(opt->seq_sent);
-+ } else {
-+ hdr->seq = htonl(++opt->seq_sent);
-+ }
-+
- if (opt->ack_sent != seq_recv) {
- /* send ack with this message */
- hdr->gre_hd.flags |= GRE_ACK;
-@@ -598,6 +630,9 @@ static int pptp_ppp_ioctl(struct ppp_cha
- static const struct ppp_channel_ops pptp_chan_ops = {
- .start_xmit = pptp_xmit,
- .ioctl = pptp_ppp_ioctl,
-+#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
-+ .flow_offload_check = pptp_flow_offload_check,
-+#endif /* IS_ENABLED(CONFIG_NF_FLOW_TABLE) */
- };
-
- static struct proto pptp_sk_proto __read_mostly = {
---- a/include/net/pptp.h
-+++ b/include/net/pptp.h
-@@ -2,6 +2,8 @@
- #ifndef _NET_PPTP_H
- #define _NET_PPTP_H
-
-+#include <net/gre.h>
-+
- #define PPP_LCP_ECHOREQ 0x09
- #define PPP_LCP_ECHOREP 0x0A
- #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
-@@ -20,5 +22,7 @@ struct pptp_gre_header {
- __be32 ack;
- } __packed;
-
-+/* symbol exported from linux kernel driver/net/ppp/pptp.c */
-+extern int (*mtk_pptp_seq_next)(uint16_t call_id, uint32_t *val);
-
- #endif
diff --git a/feed/kernel/tops/Config-protocols.in b/feed/kernel/tops/Config-protocols.in
deleted file mode 100644
index a226263..0000000
--- a/feed/kernel/tops/Config-protocols.in
+++ /dev/null
@@ -1,36 +0,0 @@
-menu "TOPS Offload Tunnel Protocols Configuration"
-
-config MTK_TOPS_GRE
- bool
- default n
-
-config MTK_TOPS_GRETAP
- bool "Mediatek TOPS L2oGRE HW Offload"
- default y
- select MTK_TOPS_GRE
- select PACKAGE_kmod-gre
- help
- select y for L2oGRE HW offload by tunnel offload processing system
-
-config MTK_TOPS_PPTP
- bool "Mediatek TOPS PPTP HW Offload"
- default y
- select PACKAGE_ppp-mod-pptp
- select PACKAGE_pptpd
- help
- select y for PPTP HW offload by tunnel offload processing system
-
-config MTK_TOPS_L2TP
- bool
- default n
-
-config MTK_TOPS_L2TP_V2
- bool "Mediatek TOPS L2TPv2 HW Offload"
- default y
- select MTK_TOPS_L2TP
- select PACKAGE_kmod-l2tp
- select PACKAGE_xl2tpd
- help
- select y for L2TPv2 offload by tunnel offload processing system
-
-endmenu
diff --git a/feed/kernel/tops/Config.in b/feed/kernel/tops/Config.in
deleted file mode 100644
index c4e14cf..0000000
--- a/feed/kernel/tops/Config.in
+++ /dev/null
@@ -1,58 +0,0 @@
-menu "TOPS Configurations"
- depends on PACKAGE_kmod-tops
-
-choice
- prompt "TOPS Build Target"
- default MTK_TOPS_RELEASE_TARGET
- help
- select TOPS build target. Either release or developing target
-
-config MTK_TOPS_RELEASE_TARGET
- bool "MTK TOPS Release Target Build"
- select PACKAGE_kmod-tops-release
- select PACKAGE_tops-rebb-fw-release
-
-config MTK_TOPS_DEV_TARGET
- bool "MTK TOPS Developing Target Build"
- select PACKAGE_kmod-tops-dev
- select PACKAGE_tops-rebb-fw-dev
-endchoice
-
-choice
- prompt "TOPS Tunnel Count"
- default TOPS_TNL_32
- help
- Determine number of TOPS tunnel
-
-config TOPS_TNL_32
- bool "32 TOPS Tunnel"
-
-endchoice
-
-source "$(SOURCE)/Config-protocols.in"
-
-config TOPS_TNL_NUM
- int
- default 32 if TOPS_TNL_32
- help
- Configuration for TOPS tunnel count. This value should be
- 2 ^ TOPS_TNL_MAP_BIT.
-
-config TOPS_TNL_MAP_BIT
- int
- default 5 if TOPS_TNL_32
- help
- Configuration for TOPS tunnel map bit. This value should be the log
- of TOPS_TNL_NUM.
-
-config TOPS_TNL_TYPE_NUM
- int
- default 32 if TARGET_mediatek_mt7988
-
-config MTK_TOPS_SECURE_FW
- bool "TOPS Secure Firmware Load"
- default n
- help
- Enable TOPS secure firmware load
-
-endmenu
diff --git a/feed/kernel/tops/Makefile b/feed/kernel/tops/Makefile
deleted file mode 100644
index 2c7e0a9..0000000
--- a/feed/kernel/tops/Makefile
+++ /dev/null
@@ -1,118 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-or-later
-#
-# Copyright (C) 2023 Mediatek Inc. All Rights Reserved.
-# Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
-#
-
-include $(TOPDIR)/rules.mk
-include $(INCLUDE_DIR)/kernel.mk
-
-PKG_NAME:=tops
-PKG_RELEASE:=1
-
-include $(INCLUDE_DIR)/package.mk
-include $(INCLUDE_DIR)/package-defaults.mk
-
-define Build/Prepare
- mkdir -p $(PKG_BUILD_DIR)
- $(CP) ./firmware $(PKG_BUILD_DIR)/firmware
- $(CP) ./src/* $(PKG_BUILD_DIR)/
-endef
-
-define Package/tops-rebb-fw-release
- TITLE:=Mediatek Tunnel Offload Processor System ReBB Release Firmware
- SECTION:=firmware
- CATEGORY:=Firmware
- DEPENDS:= \
- @MTK_TOPS_RELEASE_TARGET \
- kmod-tops-release
-endef
-
-define Package/tops-rebb-fw-release/description
- Support for Mediatek Tunnel Offload Processor System ReBB release firmware.
- The firmware offload and accerlerate APMCU's tunnel protocols traffic.
- Available offload tunnel include L2oGRE, L2TP.
-endef
-
-define Package/tops-rebb-fw-release/install
- $(INSTALL_DIR) $(1)/lib/firmware/mediatek
- $(CP) \
- $(PKG_BUILD_DIR)/firmware/rebb/mt7988_mgmt/tops-mgmt.img \
- $(PKG_BUILD_DIR)/firmware/rebb/mt7988_offload/tops-offload.img \
- $(1)/lib/firmware/mediatek
-endef
-
-define KernelPackage/tops-release
- CATEGORY:=MTK Properties
- SUBMENU:=Drivers
- TITLE:= MTK Tunnel Offload Processor System Kernel Driver
- FILES+=$(PKG_BUILD_DIR)/tops.ko
- KCONFIG:=
- DEPENDS:= \
- @MTK_TOPS_RELEASE_TARGET \
- kmod-mediatek_hnat \
- kmod-tops \
- +kmod-pce \
- +@KERNEL_RELAY
-ifeq ($(CONFIG_MTK_TOPS_PPTP), y)
- DEPENDS+= +kmod-pptp
-endif
-endef
-
-define KernelPackage/tops-release/description
- Support for MTK Tunnel Offload Processor System Released Driver. The driver
- controls the TOPS system to reduce the loading of tunnel protocol processing
- in APMCU.
-endef
-
-define KernelPackage/tops-release-autoload
- CATEGORY:=MTK Properties
- SUBMENU:=Drivers
- TITLE:= MTK Tunnel Offload Processor System Auto Load
- AUTOLOAD:=$(call AutoLoad,51,tops)
- KCONFIG:=
- DEPENDS:= \
- kmod-tops-release \
- +kmod-pce-autoload
-endef
-
-define KernelPackage/tops-release-autoload/description
- Support for MTK Tunnel Offload Processor System auto load on system
- boot process.
-endef
-
-define KernelPackage/tops
- CATEGORY:=MTK Properties
- TITLE:=Mediatek Tunnel Offload Processor System
- SUBMENU:=Drivers
- DEFAULT:=y
- DEPENDS:= \
- @TARGET_mediatek_mt7988
-endef
-
-define KernelPackage/tops/config
- source "$(SOURCE)/Config.in"
-endef
-
-define KernelPackage/tops/description
- Support for MTK Tunnel Offload Processor System. This system reduces the
- loading of APMCU's tunnel protocol overhead and improve tunnel protocol's
- throughput.
-endef
-
-define Build/Compile
- $(MAKE) -C "$(LINUX_DIR)" \
- $(KERNEL_MAKE_FLAGS) \
- M="$(PKG_BUILD_DIR)" \
- EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \
- $(EXTRA_KCONFIG) \
- modules
-endef
-
-include tops.mk
-include $(filter-out tops.mk,$(wildcard *.mk))
-
-$(eval $(call BuildPackage,tops-rebb-fw-release))
-$(eval $(call KernelPackage,tops))
-$(eval $(call KernelPackage,tops-release))
-$(eval $(call KernelPackage,tops-release-autoload))
diff --git a/feed/kernel/tops/firmware/rebb/mt7988_mgmt/tops-mgmt.img b/feed/kernel/tops/firmware/rebb/mt7988_mgmt/tops-mgmt.img
deleted file mode 100644
index 5833d10..0000000
--- a/feed/kernel/tops/firmware/rebb/mt7988_mgmt/tops-mgmt.img
+++ /dev/null
Binary files differ
diff --git a/feed/kernel/tops/firmware/rebb/mt7988_offload/tops-offload.img b/feed/kernel/tops/firmware/rebb/mt7988_offload/tops-offload.img
deleted file mode 100644
index c57c7da..0000000
--- a/feed/kernel/tops/firmware/rebb/mt7988_offload/tops-offload.img
+++ /dev/null
Binary files differ
diff --git a/feed/kernel/tops/src/Kconfig b/feed/kernel/tops/src/Kconfig
deleted file mode 100644
index eafa48e..0000000
--- a/feed/kernel/tops/src/Kconfig
+++ /dev/null
@@ -1,158 +0,0 @@
-config MTK_TOPS_SUPPORT
- bool "Mediatek Tunnel Offload Processor System Support"
- help
- Support for Mediatek Tunnel Offload Processor System which
- offloads tunnel protocols such as GRE, VxLAN, L2TP, PPTP etc. from
- host CPU. The TOPS system cooperate with Mediatek HNAT HW and
- Mediatek PCE HW to offload specific tunnel procotol.
-
-config MTK_TOPS_GRE
- depends on MTK_TOPS_SUPPORT
- bool
- help
- Support for GRE offload to Mediatek Network Processing Unit.
- Alleviate host CPU's loading by offloading GRE related encapulation
- and decapulation to NPU.
-
-config MTK_TOPS_GRETAP
- depends on MTK_TOPS_SUPPORT
- select MTK_TOPS_GRE
- bool "Mediatek TOPS L2oGRE Offload Support"
- help
- Support for L2oGRE offload to Mediatek Network Processing Unit.
- Alleviate host CPU's loading by offloading L2oGRE encapulation and
- decapulation to NPU.
-
-config MTK_TOPS_PPTP
- depends on MTK_TOPS_SUPPORT
- bool "Mediatek TOPS PPTP Offload Support"
- help
- Support for PPTP offload to Mediatek Network Processing Unit.
- Alleviate host CPU's loading by offloading PPTP encapulation and
- decapulation to NPU.
-
-config MTK_TOPS_L2TP
- depends on MTK_TOPS_SUPPORT
- bool
- help
- Support for L2TP offload to Mediatek Network Processing Unit.
- Alleviate host CPU's loading by offloading L2TP related encapulation
- and decapulation to NPU.
-
-config MTK_TOPS_IP_L2TP
- depends on MTK_TOPS_SUPPORT
- select MTK_TOPS_L2TP
- bool "Mediatek TOPS IP L2TP Offload Support"
- help
- Support for IP L2TP offload to Mediatek Network Processing Unit.
- Alleviate host CPU's loading by offloading IP L2TP encapulation and
- decapulation to NPU.
-
-config MTK_TOPS_UDP_L2TP_CTRL
- depends on MTK_TOPS_SUPPORT
- select MTK_TOPS_L2TP
- bool "Mediatek TOPS UDP L2TP Control Offload Support"
- help
- Support for UDP L2TP control offload to Mediatek Network Processing
- Unit. Alleviate host CPU's loading by offloading UDP L2TP control
- encapulation and decapulation to NPU.
-
-config MTK_TOPS_UDP_L2TP_DATA
- depends on MTK_TOPS_SUPPORT
- select MTK_TOPS_L2TP
- bool "Mediatek TOPS UDP L2TP Data Offload Support"
- help
- Support for UDP L2TP data offload to Mediatek Network Processing
- Unit. Alleviate host CPU's loading by offloading UDP L2TP data
- encapulation and decapulation to NPU.
-
-config MTK_TOPS_VXLAN
- depends on MTK_TOPS_SUPPORT
- bool "Mediatek TOPS VxLAN Offload Support"
- help
- Support for VxLAN offload to Mediatek Network Processing Unit.
- Alleviate host CPU's loading by offloading VxLAN encapulation and
- decapulation to NPU.
-
-config MTK_TOPS_NATT
- depends on MTK_TOPS_SUPPORT
- bool "Mediatek TOPS NAT Traversal Offload Support"
- help
- Support for NATT offload to Mediatek Network Processing Unit.
- Alleviate host CPU's loading by offloading NATT encapulation and
- decapulation to NPU.
-
-config MTK_TOPS_CAPWAP
- depends on MTK_TOPS_SUPPORT
- bool
- help
- Support for CAPWAP offload to Mediatek Network Processing Unit.
- Alleviate host CPU's loading by offloading CAPWAP related
- encapulation and decapulation to NPU.
-
-config MTK_TOPS_CAPWAP_CTRL
- depends on MTK_TOPS_SUPPORT
- select MTK_TOPS_CAPWAP
- bool "Mediatek TOPS CAPWAP Control Offload Support"
- help
- Support for CAPWAP control offload to Mediatek Network Processing
- Unit. Alleviate host CPU's loading by offloading CAPWAP control
- encapulation and decapulation to NPU.
-
-config MTK_TOPS_CAPWAP_DATA
- depends on MTK_TOPS_SUPPORT
- select MTK_TOPS_CAPWAP
- bool "Mediatek TOPS CAPWAP Data Offload Support"
- help
- Support for CAPWAP data offload to Mediatek Network Processing
- Unit. Alleviate host CPU's loading by offloading CAPWAP data
- encapulation and decapulation to NPU.
-
-config MTK_TOPS_CAPWAP_DTLS
- depends on MTK_TOPS_SUPPORT
- select MTK_TOPS_CAPWAP
- bool "Mediatek TOPS CAPWAP DTLS Offload Support"
- help
- Support for CAPWAP DTLS offload to Mediatek Network Processing
- Unit. Alleviate host CPU's loading by offloading CAPWAP DTLS
- encapulation and decapulation to NPU.
-
-config MTK_TOPS_IPSEC
- depends on MTK_TOPS_SUPPORT
- bool
- help
- Support for IPSEC offload to Mediatek Network Processing Unit.
- Alleviate host CPU's loading by offloading IPSEC related
- encapulation and decapulation to NPU.
-
-config MTK_TOPS_IPSEC_ESP
- depends on MTK_TOPS_SUPPORT
- select MTK_TOPS_IPSEC
- bool "Mediatek TOPS IPSec ESP Offload Support"
- help
- Support for IPSec ESP offload to Mediatek Network Processing
- Unit. Alleviate host CPU's loading by offloading IPSec ESP
- encapulation and decapulation to NPU.
-
-config MTK_TOPS_IPSEC_AH
- depends on MTK_TOPS_SUPPORT
- select MTK_TOPS_IPSEC
- bool "Mediatek TOPS IPSec AH Offload Support"
- help
- Support for IPSec AH offload to Mediatek Network Processing
- Unit. Alleviate host CPU's loading by offloading IPSec AH
- encapulation and decapulation to NPU.
-
-config TOPS_TNL_NUM
- int "Mediatek TOPS Tunnel Count"
- depends on MTK_TOPS_SUPPORT
- help
- Configuration for tunnel count for Tunnel Offload Processing
- System. This value should be 2 ^ TOPS_TNL_MAP_BIT.
-
-config TOPS_TNL_MAP_BIT
- int "Mediatek TOPS Tunnel Map Bit"
- depends on MTK_TOPS_SUPPORT
- help
- Configuration for tunnel map bit for Tunnel Offload Processing
- System. This value is log of TOPS_TNL_NUM.
diff --git a/feed/kernel/tops/src/Makefile b/feed/kernel/tops/src/Makefile
deleted file mode 100644
index 2ad73cd..0000000
--- a/feed/kernel/tops/src/Makefile
+++ /dev/null
@@ -1,44 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-or-later
-#
-# Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
-#
-# Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
-#
-
-obj-$(CONFIG_MTK_TOPS_SUPPORT) += tops.o
-
-ccflags-y += -I$(src)/inc
-ccflags-y += -I$(src)/protocol/inc
-
-tops-y += ctrl.o
-tops-y += debugfs.o
-tops-y += firmware.o
-tops-y += init.o
-tops-y += hpdma.o
-tops-y += hwspinlock.o
-tops-y += mbox.o
-tops-y += mcu.o
-tops-y += misc.o
-tops-y += netsys.o
-tops-y += net-event.o
-tops-y += tops_params.o
-tops-y += tnl_offload.o
-tops-y += ser.o
-tops-y += tdma.o
-tops-y += trm-fs.o
-tops-y += trm-mcu.o
-tops-y += trm-debugfs.o
-tops-y += trm.o
-tops-y += wdt.o
-tops-y += seq_gen.o
-
-tops-y += protocol/mac/eth.o
-tops-y += protocol/mac/ppp.o
-tops-y += protocol/network/ip.o
-tops-y += protocol/transport/udp.o
-tops-$(CONFIG_MTK_TOPS_GRETAP) += protocol/tunnel/gre/gretap.o
-tops-$(CONFIG_MTK_TOPS_L2TP_V2) += protocol/tunnel/l2tp/l2tpv2.o
-tops-$(CONFIG_MTK_TOPS_PPTP) += protocol/tunnel/pptp/pptp.o
-tops-$(CONFIG_MTK_TOPS_PPTP) += protocol/tunnel/pptp/pptp_seq.o
-
-include $(wildcard $(src)/*.mk)
diff --git a/feed/kernel/tops/src/ctrl.c b/feed/kernel/tops/src/ctrl.c
deleted file mode 100644
index df41715..0000000
--- a/feed/kernel/tops/src/ctrl.c
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/device.h>
-
-#include "tops/firmware.h"
-#include "tops/internal.h"
-#include "tops/mcu.h"
-#include "tops/trm.h"
-#include "tops/tunnel.h"
-#include "tops/wdt.h"
-
-static int mtk_tops_trm_fetch_setting(const char *buf,
- int *ofs,
- char *name,
- u32 *offset,
- u32 *size,
- u8 *enable)
-{
- int nchar = 0;
- int ret = 0;
-
- ret = sscanf(buf + *ofs, "%31s %x %x %hhx %n",
- name, offset, size, enable, &nchar);
- if (ret != 4)
- return -EPERM;
-
- *ofs += nchar;
-
- return nchar;
-}
-
-static ssize_t mtk_tops_trm_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- char name[TRM_CONFIG_NAME_MAX_LEN] = { 0 };
- char cmd[21] = { 0 };
- int nchar = 0;
- int ret = 0;
- u32 offset;
- u8 enable;
- u32 size;
-
- ret = sscanf(buf, "%20s %n", cmd, &nchar);
- if (ret != 1)
- return -EPERM;
-
- if (!strcmp(cmd, "trm_dump")) {
- ret = mtk_trm_dump(TRM_RSN_NULL);
- if (ret)
- return ret;
- } else if (!strcmp(cmd, "trm_cfg_setup")) {
- ret = mtk_tops_trm_fetch_setting(buf, &nchar,
- name, &offset, &size, &enable);
- if (ret < 0)
- return ret;
-
- ret = mtk_trm_cfg_setup(name, offset, size, enable);
- if (ret)
- return ret;
- }
-
- return count;
-}
-
-static ssize_t mtk_tops_wdt_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- char cmd[21] = {0};
- u32 core = 0;
- u32 i;
- int ret;
-
- ret = sscanf(buf, "%20s %x", cmd, &core);
- if (ret != 2)
- return -EPERM;
-
- core &= CORE_TOPS_MASK;
- if (!strcmp(cmd, "WDT_TO")) {
- for (i = 0; i < CORE_TOPS_NUM; i++) {
- if (core & 0x1)
- mtk_tops_wdt_trigger_timeout(i);
- core >>= 1;
- }
- } else {
- return -EINVAL;
- }
-
- return count;
-}
-
-static DEVICE_ATTR_WO(mtk_tops_trm);
-static DEVICE_ATTR_WO(mtk_tops_wdt);
-
-static struct attribute *mtk_tops_attributes[] = {
- &dev_attr_mtk_tops_trm.attr,
- &dev_attr_mtk_tops_wdt.attr,
- NULL,
-};
-
-static const struct attribute_group mtk_tops_attr_group = {
- .name = "mtk_tops",
- .attrs = mtk_tops_attributes,
-};
-
-int mtk_tops_ctrl_init(struct platform_device *pdev)
-{
- int ret = 0;
-
- ret = sysfs_create_group(&pdev->dev.kobj, &mtk_tops_attr_group);
- if (ret) {
- TOPS_ERR("create sysfs failed\n");
- return ret;
- }
-
- return ret;
-}
-
-void mtk_tops_ctrl_deinit(struct platform_device *pdev)
-{
- sysfs_remove_group(&pdev->dev.kobj, &mtk_tops_attr_group);
-}
diff --git a/feed/kernel/tops/src/debugfs.c b/feed/kernel/tops/src/debugfs.c
deleted file mode 100644
index d2c40cf..0000000
--- a/feed/kernel/tops/src/debugfs.c
+++ /dev/null
@@ -1,213 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 Mediatek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/debugfs.h>
-#include <linux/inet.h>
-#include <linux/uaccess.h>
-
-#include "tops/debugfs.h"
-#include "tops/firmware.h"
-#include "tops/internal.h"
-#include "tops/mcu.h"
-#include "tops/trm.h"
-#include "tops/tunnel.h"
-#include "tops/wdt.h"
-
-static const char *tops_role_name[__TOPS_ROLE_TYPE_MAX] = {
- [TOPS_ROLE_TYPE_MGMT] = "tops-mgmt",
- [TOPS_ROLE_TYPE_CLUSTER] = "tops-offload",
-};
-
-static struct dentry *tops_fw_debugfs_root;
-
-static int tops_fw_info_show(struct seq_file *s, void *private)
-{
- enum tops_role_type rtype;
- struct tm tm = {0};
- const char *value;
- const char *prop;
- u32 nattr;
- u32 i;
-
- for (rtype = TOPS_ROLE_TYPE_MGMT; rtype < __TOPS_ROLE_TYPE_MAX; rtype++) {
- mtk_tops_fw_get_built_date(rtype, &tm);
-
- seq_printf(s, "%s FW information:\n", tops_role_name[rtype]);
- seq_printf(s, "Git revision:\t%llx\n",
- mtk_tops_fw_get_git_commit_id(rtype));
- seq_printf(s, "Build date:\t%04ld/%02d/%02d %02d:%02d:%02d\n",
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
-
- nattr = mtk_tops_fw_attr_get_num(rtype);
-
- for (i = 0; i < nattr; i++) {
- prop = mtk_tops_fw_attr_get_property(rtype, i);
- if (!prop)
- continue;
-
- value = mtk_tops_fw_attr_get_value(rtype, prop);
-
- seq_printf(s, "%s:\t%s\n", prop, value);
- }
- seq_puts(s, "\n");
- }
-
- return 0;
-}
-
-static int tops_tnl_show(struct seq_file *s, void *private)
-{
- struct tops_tnl_info *tnl_info;
- struct tops_tnl_params *tnl_params;
- u32 i;
-
- for (i = 0; i < CONFIG_TOPS_TNL_NUM; i++) {
- tnl_info = mtk_tops_tnl_info_get_by_idx(i);
- if (IS_ERR(tnl_info))
- /* tunnel not enabled */
- continue;
-
- tnl_params = &tnl_info->tnl_params;
- if (!tnl_info->tnl_type || !tnl_info->tnl_type->tnl_param_dump)
- continue;
-
- seq_printf(s, "Tunnel Index: %02u\n", i);
-
- mtk_tops_mac_param_dump(s, &tnl_params->params);
-
- mtk_tops_network_param_dump(s, &tnl_params->params);
-
- mtk_tops_transport_param_dump(s, &tnl_params->params);
-
- tnl_info->tnl_type->tnl_param_dump(s, &tnl_params->params);
-
- seq_printf(s, "\tTOPS Entry: %02u CLS Entry: %02u CDRT: %02u Flag: 0x%x\n",
- tnl_params->tops_entry_proto,
- tnl_params->cls_entry,
- tnl_params->cdrt,
- tnl_params->flag);
- }
-
- return 0;
-}
-
-static int tops_tnl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, tops_tnl_show, file->private_data);
-}
-
-static int tops_tnl_add_new_tnl(const char *buf)
-{
- struct tops_tnl_params tnl_params;
- struct tops_params *params;
- struct tops_tnl_info *tnl_info;
- struct tops_tnl_type *tnl_type;
- char proto[DEBUG_PROTO_LEN];
- int ofs = 0;
- int ret = 0;
-
- memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
- memset(proto, 0, sizeof(proto));
-
- params = &tnl_params.params;
-
- ret = mtk_tops_debug_param_setup(buf, &ofs, params);
- if (ret)
- return ret;
-
- ret = mtk_tops_debug_param_proto_peek(buf, ofs, proto);
- if (ret < 0)
- return ret;
-
- ofs += ret;
-
- tnl_type = mtk_tops_tnl_type_get_by_name(proto);
- if (!tnl_type || !tnl_type->tnl_debug_param_setup)
- return -ENODEV;
-
- ret = tnl_type->tnl_debug_param_setup(buf, &ofs, params);
- if (ret < 0)
- return ret;
-
- tnl_params.flag |= TNL_DECAP_ENABLE;
- tnl_params.flag |= TNL_ENCAP_ENABLE;
- tnl_params.tops_entry_proto = tnl_type->tnl_proto_type;
-
- tnl_info = mtk_tops_tnl_info_alloc(tnl_type);
- if (IS_ERR(tnl_info))
- return -ENOMEM;
-
- tnl_info->flag |= TNL_INFO_DEBUG;
- memcpy(&tnl_info->cache, &tnl_params, sizeof(struct tops_tnl_params));
-
- mtk_tops_tnl_info_hash(tnl_info);
-
- mtk_tops_tnl_info_submit(tnl_info);
-
- return 0;
-}
-
-static ssize_t tops_tnl_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *data)
-{
- char cmd[21] = {0};
- char buf[512];
- int nchar = 0;
- int ret = 0;
-
- if (count > sizeof(buf))
- return -ENOMEM;
-
- if (copy_from_user(buf, buffer, count))
- return -EFAULT;
-
- buf[count] = '\0';
-
- ret = sscanf(buf, "%20s %n", cmd, &nchar);
-
- if (ret != 1)
- return -EPERM;
-
- if (!strcmp(cmd, "NEW_TNL")) {
- ret = tops_tnl_add_new_tnl(buf + nchar);
- if (ret)
- return ret;
- } else {
- return -EINVAL;
- }
-
- return count;
-}
-
-DEFINE_SHOW_ATTRIBUTE(tops_fw_info);
-
-static const struct file_operations tops_tnl_fops = {
- .open = tops_tnl_open,
- .read = seq_read,
- .write = tops_tnl_write,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-int mtk_tops_debugfs_init(struct platform_device *pdev)
-{
- tops_fw_debugfs_root = debugfs_create_dir("fw", tops_debugfs_root);
-
- debugfs_create_file("firmware_info", 0400, tops_fw_debugfs_root, NULL,
- &tops_fw_info_fops);
-
- debugfs_create_file("tunnel", 0444, tops_fw_debugfs_root, NULL,
- &tops_tnl_fops);
-
- return 0;
-}
-
-void mtk_tops_debugfs_deinit(struct platform_device *pdev)
-{
- debugfs_remove_recursive(tops_fw_debugfs_root);
-}
diff --git a/feed/kernel/tops/src/firmware.c b/feed/kernel/tops/src/firmware.c
deleted file mode 100644
index 438cbda..0000000
--- a/feed/kernel/tops/src/firmware.c
+++ /dev/null
@@ -1,798 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/arm-smccc.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/firmware.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include "tops/firmware.h"
-#include "tops/internal.h"
-#include "tops/mcu.h"
-
-#define TOPS_MGMT_IMG "mediatek/tops-mgmt.img"
-#define TOPS_OFFLOAD_IMG "mediatek/tops-offload.img"
-
-#define MTK_SIP_TOPS_LOAD 0xC2000560
-
-#define PAYLOAD_ALIGNMENT (32)
-
-#define TOPS_ITCM_BOOT_ADDR (0x40020000)
-#define TOPS_DTCM_BOOT_ADDR (0x40000000)
-#define TOPS_L2SRAM_BOOT_ADDR (0x4E100000)
-#define TOPS_DEFAULT_BOOT_ADDR (TOPS_ITCM_BOOT_ADDR)
-
-#define TOPS_FW_MAGIC (0x53504f54)
-#define TOPS_FW_HDR_VER (1)
-#define FW_HLEN (sizeof(struct tops_fw_header))
-#define FW_DATA(fw) ((fw)->data)
-#define FW_ROLE(fw) ((fw)->hdr.role)
-#define FW_PART_HLEN(fw) ((fw)->hdr.part_hdr_len)
-#define FW_PART_HDR(fw, idx) (FW_DATA(fw) + FW_PART_HLEN(fw) * (idx))
-#define FW_NUM_PARTS(fw) ((fw)->hdr.num_parts)
-#define FW_GIT_ID(fw) ((fw)->hdr.git_commit_id)
-#define FW_BUILD_TS(fw) ((fw)->hdr.build_ts)
-
-#define FW_PART_LOAD_ADDR_OVERRIDE (BIT(0))
-#define FW_PART_BOOT_OVERRIDE (BIT(1))
-
-enum tops_part_type {
- TOPS_PART_TYPE_IRAM0,
- TOPS_PART_TYPE_DRAM0,
- TOPS_PART_TYPE_L2SRAM,
- TOPS_PART_TYPE_METADATA,
-
- __TOPS_PART_TYPE_MAX,
-};
-
-enum tops_plat_id {
- TOPS_PLAT_MT7988,
-
- __TOPS_PLAT_MAX,
-};
-
-struct tops_boot_config {
- enum tops_part_type boot_type;
- u32 boot_addr;
-};
-
-struct tops_fw_plat {
- enum tops_plat_id plat;
- u16 id;
-};
-
-struct tops_fw_header {
- u32 magic;
- u8 hdr_ver;
- u8 api_ver;
- u16 hdr_len;
- u32 hdr_crc;
- u16 plat_id;
- u16 flags;
- u64 git_commit_id;
- u32 build_ts;
- u8 role;
- u8 signing_type;
- u8 num_parts;
- u8 part_hdr_len;
- u32 part_hdr_crc;
- u32 payload_len;
- u32 payload_crc;
- u32 sign_body_len;
-} __aligned(4);
-
-struct tops_fw_part_hdr {
- u8 part_type;
- u8 resv;
- u16 flags;
- u32 size;
- u32 value[2];
-} __aligned(4);
-
-struct tops_fw_part {
- const struct tops_fw_part_hdr *hdr[__TOPS_PART_TYPE_MAX];
- const void *payload[__TOPS_PART_TYPE_MAX];
-};
-
-struct tops_fw {
- struct tops_fw_header hdr;
- u8 data[0];
-};
-
-struct tops_fw_attr {
- char *property;
- char *value;
-};
-
-struct tops_fw_info {
- struct tops_fw_attr *attrs;
- u64 git_commit_id;
- u32 build_ts;
- u32 nattr;
-};
-
-struct npu {
- void __iomem *base;
- struct device *dev;
- struct tops_fw_info fw_info[__TOPS_ROLE_TYPE_MAX];
-};
-
-#if !defined(CONFIG_MTK_TOPS_SECURE_FW)
-static struct tops_boot_config tops_boot_configs[] = {
- { .boot_type = TOPS_PART_TYPE_IRAM0, .boot_addr = TOPS_ITCM_BOOT_ADDR },
- { .boot_type = TOPS_PART_TYPE_DRAM0, .boot_addr = TOPS_DTCM_BOOT_ADDR },
- { .boot_type = TOPS_PART_TYPE_L2SRAM, .boot_addr = TOPS_L2SRAM_BOOT_ADDR},
-};
-
-static struct tops_fw_plat tops_plats[] = {
- { .plat = TOPS_PLAT_MT7988, .id = 0x7988 },
-};
-#endif /* !defined(CONFIG_MTK_TOPS_SECURE_FW) */
-
-static struct npu npu;
-
-static inline void npu_write(u32 reg, u32 val)
-{
- writel(val, npu.base + reg);
-}
-
-static inline void npu_set(u32 reg, u32 mask)
-{
- setbits(npu.base + reg, mask);
-}
-
-static inline void npu_clr(u32 reg, u32 mask)
-{
- clrbits(npu.base + reg, mask);
-}
-
-static inline void npu_rmw(u32 reg, u32 mask, u32 val)
-{
- clrsetbits(npu.base + reg, mask, val);
-}
-
-static inline u32 npu_read(u32 reg)
-{
- return readl(npu.base + reg);
-}
-
-u64 mtk_tops_fw_get_git_commit_id(enum tops_role_type rtype)
-{
- if (rtype >= __TOPS_ROLE_TYPE_MAX)
- return 0;
-
- return npu.fw_info[rtype].git_commit_id;
-}
-
-void mtk_tops_fw_get_built_date(enum tops_role_type rtype, struct tm *tm)
-{
- if (rtype >= __TOPS_ROLE_TYPE_MAX)
- return;
-
- time64_to_tm(npu.fw_info[rtype].build_ts, 0, tm);
-}
-
-u32 mtk_tops_fw_attr_get_num(enum tops_role_type rtype)
-{
- if (rtype >= __TOPS_ROLE_TYPE_MAX)
- return 0;
-
- return npu.fw_info[rtype].nattr;
-}
-
-const char *mtk_tops_fw_attr_get_property(enum tops_role_type rtype, u32 idx)
-{
- if (rtype >= __TOPS_ROLE_TYPE_MAX || idx >= npu.fw_info[rtype].nattr)
- return NULL;
-
- return npu.fw_info[rtype].attrs[idx].property;
-}
-
-const char *mtk_tops_fw_attr_get_value(enum tops_role_type rtype,
- const char *property)
-{
- u32 plen = strlen(property);
- u32 nattr;
- u32 i;
-
- if (rtype >= __TOPS_ROLE_TYPE_MAX)
- return NULL;
-
- nattr = npu.fw_info[rtype].nattr;
- for (i = 0; i < nattr; i++) {
- if (!strncmp(property, npu.fw_info[rtype].attrs[i].property, plen))
- return npu.fw_info[rtype].attrs[i].value;
- }
-
- return NULL;
-}
-
-static bool mtk_tops_fw_support_plat(const struct tops_fw_header *fw_hdr)
-{
- u32 i;
-
- for (i = 0; i < __TOPS_PLAT_MAX; i++)
- if (le16_to_cpu(fw_hdr->plat_id) == tops_plats[i].plat)
- return true;
-
- return false;
-}
-
-static int mtk_tops_fw_valid_hdr(const struct tops_fw *tfw, uint32_t fw_size)
-{
- const struct tops_fw_header *fw_hdr = &tfw->hdr;
- u32 total_size;
- u32 ph_len;
-
- if (fw_size < FW_HLEN) {
- TOPS_ERR("requested fw hlen is less than minimal TOPS fw hlen\n");
- return -EINVAL;
- }
-
- if (le32_to_cpu(fw_hdr->magic) != TOPS_FW_MAGIC) {
- TOPS_ERR("not a tops fw!\n");
- return -EBADF;
- }
-
- if (le16_to_cpu(fw_hdr->hdr_ver) != TOPS_FW_HDR_VER) {
- TOPS_ERR("unsupport tops fw header: %u\n",
- le16_to_cpu(fw_hdr->hdr_ver));
- return -EBADF;
- }
-
- if (le16_to_cpu(fw_hdr->hdr_len) != sizeof(struct tops_fw_header)) {
- TOPS_ERR("tops fw header length mismatch\n");
- return -EBADF;
- }
-
- if (fw_hdr->part_hdr_len != sizeof(struct tops_fw_part_hdr)) {
- TOPS_ERR("unsupport tops fw header len: %u\n",
- fw_hdr->part_hdr_len);
- return -EBADF;
- }
-
- if (!mtk_tops_fw_support_plat(fw_hdr)) {
- TOPS_ERR("unsupport tops platform fw: %u\n",
- le16_to_cpu(fw_hdr->plat_id));
- return -EBADF;
- }
-
- if (fw_hdr->role >= __TOPS_ROLE_TYPE_MAX) {
- TOPS_ERR("unsupport tops role: %u\n", fw_hdr->role);
- return -EBADF;
- }
-
- if (fw_hdr->num_parts > __TOPS_PART_TYPE_MAX) {
- TOPS_ERR("number of parts exceeds tops' support: %u\n",
- fw_hdr->num_parts);
- return -EBADF;
- }
-
- ph_len = fw_hdr->part_hdr_len * fw_hdr->num_parts;
- total_size = fw_hdr->hdr_len + ph_len + fw_hdr->payload_len;
-
- if (total_size > fw_size) {
- TOPS_ERR("firmware incomplete\n");
- return -EBADF;
- }
-
- return 0;
-}
-
-static int mtk_tops_fw_init_part_data(const struct tops_fw *fw,
- struct tops_fw_part *part)
-{
- const struct tops_fw_part_hdr *phdr;
- uint32_t part_off = FW_PART_HLEN(fw) * FW_NUM_PARTS(fw);
- int ret = 0;
- u8 i;
-
- for (i = 0; i < FW_NUM_PARTS(fw); i++) {
- /* get part hdr */
- phdr = (struct tops_fw_part_hdr *)FW_PART_HDR(fw, i);
- if (phdr->part_type >= __TOPS_PART_TYPE_MAX) {
- TOPS_ERR("unknown part type: %u\n", phdr->part_type);
- return -EBADF;
- }
-
- part->hdr[phdr->part_type] = phdr;
-
- /* get part payload */
- part->payload[phdr->part_type] = FW_DATA(fw) + part_off;
-
- part_off += ALIGN(le32_to_cpu(phdr->size), PAYLOAD_ALIGNMENT);
- }
-
- return ret;
-}
-
-#if defined(CONFIG_MTK_TOPS_SECURE_FW)
-static int mtk_tops_fw_smc(u32 smc_id,
- u64 x1,
- u64 x2,
- u64 x3,
- u64 x4,
- struct arm_smccc_res *res)
-{
- if (!res)
- return -EINVAL;
-
- arm_smccc_smc(smc_id, x1, x2, x3, x4, 0, 0, 0, res);
-
- return res->a0;
-}
-
-static int __mtk_tops_fw_bring_up_core(const void *fw, u32 fw_size)
-{
- struct arm_smccc_res res = {0};
- dma_addr_t fw_paddr;
- void *fw_vaddr;
- u32 order = 0;
- u32 psize;
- int ret;
-
- psize = (fw_size / PAGE_SIZE) + 1;
- while ((1 << order) < psize)
- order++;
-
- fw_vaddr = __get_free_pages(GFP_KERNEL, order);
- if (!fw_vaddr)
- return -ENOMEM;
-
- memcpy(fw_vaddr, fw, fw_size);
-
- fw_paddr = dma_map_single(tops_dev, fw_vaddr, PAGE_SIZE, DMA_TO_DEVICE);
- if (dma_mapping_error(tops_dev, fw_paddr)) {
- ret = -ENOMEM;
- goto dma_map_err;
- }
- /* make sure firmware data is written and mapped to buffer */
- wmb();
-
- ret = mtk_tops_fw_smc(MTK_SIP_TOPS_LOAD, 0, fw_paddr, fw_size, 0, &res);
- if (ret)
- TOPS_ERR("tops secure firmware load failed: %d\n", ret);
-
- dma_unmap_single(tops_dev, fw_paddr, fw_size, DMA_TO_DEVICE);
-
-dma_map_err:
- free_pages(fw_vaddr, order);
-
- return ret;
-}
-#else /* !defined(CONFIG_MTK_TOPS_SECURE_FW) */
-static u32 mtk_tops_fw_get_boot_addr(struct tops_fw_part *part)
-{
- const struct tops_fw_part_hdr *hdr = NULL;
- u32 boot_addr = TOPS_DEFAULT_BOOT_ADDR;
- u32 i;
-
- for (i = TOPS_PART_TYPE_IRAM0; i < TOPS_PART_TYPE_METADATA; i++) {
- hdr = part->hdr[i];
-
- if (le16_to_cpu(hdr->flags) & FW_PART_BOOT_OVERRIDE) {
- boot_addr = tops_boot_configs[i].boot_addr;
-
- if (le16_to_cpu(hdr->flags) & FW_PART_LOAD_ADDR_OVERRIDE)
- boot_addr = le32_to_cpu(hdr->value[0]);
- }
- }
-
- return boot_addr;
-}
-
-static void __mtk_tops_fw_load_data(const struct tops_fw_part_hdr *phdr,
- const void *payload,
- u32 addr)
-{
- int ofs;
-
- for (ofs = 0; ofs < le32_to_cpu(phdr->size); ofs += 0x4)
- npu_write(addr + ofs, *(u32 *)(payload + ofs));
-}
-
-static int mtk_tops_fw_load_core_mgmt(struct tops_fw_part *part)
-{
- if (!part)
- return -ENODEV;
-
- __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_IRAM0],
- part->payload[TOPS_PART_TYPE_IRAM0],
- TOP_CORE_M_ITCM);
-
- __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_DRAM0],
- part->payload[TOPS_PART_TYPE_DRAM0],
- TOP_CORE_M_DTCM);
-
- __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_L2SRAM],
- part->payload[TOPS_PART_TYPE_L2SRAM],
- TOP_L2SRAM);
-
- return 0;
-}
-
-static int mtk_tops_fw_bring_up_core_mgmt(struct tops_fw_part *part)
-{
- int ret = 0;
-
- /* setup boot address */
- npu_write(TOP_CORE_M_RESET_VECTOR, mtk_tops_fw_get_boot_addr(part));
-
- /* de-assert core reset */
- npu_write(TOP_CORE_NPU_SW_RST, 0);
-
- /* enable run stall */
- npu_write(TOP_CORE_NPU_CTRL, 0x1);
-
- /* enable ext bootup sel */
- npu_write(TOP_CORE_M_STAT_VECTOR_SEL, 0x1);
-
- /* toggle reset */
- npu_write(TOP_CORE_NPU_SW_RST, 0x1);
- npu_write(TOP_CORE_NPU_SW_RST, 0x0);
-
- /* load firmware */
- ret = mtk_tops_fw_load_core_mgmt(part);
- if (ret) {
- TOPS_ERR("load core mgmt fw failed: %d\n", ret);
- return ret;
- }
-
- /* release run stall */
- npu_write(TOP_CORE_NPU_CTRL, 0);
-
- return ret;
-}
-
-static int mtk_tops_fw_load_core_offload(struct tops_fw_part *part,
- enum core_id core)
-{
- if (!part)
- return -ENODEV;
-
- if (core >= CORE_OFFLOAD_NUM)
- return -EPERM;
-
- __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_IRAM0],
- part->payload[TOPS_PART_TYPE_IRAM0],
- CLUST_CORE_X_ITCM(core));
-
- __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_DRAM0],
- part->payload[TOPS_PART_TYPE_DRAM0],
- CLUST_CORE_X_DTCM(core));
-
- return 0;
-}
-
-static int __mtk_tops_fw_bring_up_core_offload(struct tops_fw_part *part,
- enum core_id core)
-{
- int ret = 0;
-
- /* setup boot address */
- npu_write(CLUST_CORE_X_RESET_VECTOR(core),
- mtk_tops_fw_get_boot_addr(part));
-
- /* de-assert core reset */
- npu_write(CLUST_CORE_NPU_SW_RST(core), 0);
-
- /* enable run stall */
- npu_write(CLUST_CORE_NPU_CTRL(core), 0x1);
-
- /* enable ext bootup sel */
- npu_write(CLUST_CORE_X_STAT_VECTOR_SEL(core), 0x1);
-
- /* toggle reset */
- npu_write(CLUST_CORE_NPU_SW_RST(core), 0x1);
- npu_write(CLUST_CORE_NPU_SW_RST(core), 0x0);
-
- /* load firmware */
- ret = mtk_tops_fw_load_core_offload(part, core);
- if (ret) {
- TOPS_ERR("load core offload fw failed: %d\n", ret);
- return ret;
- }
-
- /* release run stall */
- npu_write(CLUST_CORE_NPU_CTRL(core), 0);
-
- return ret;
-}
-
-static int mtk_tops_fw_bring_up_core_offload(struct tops_fw_part *part)
-{
- int ret = 0;
- u32 i = 0;
-
- __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_L2SRAM],
- part->payload[TOPS_PART_TYPE_L2SRAM],
- CLUST_L2SRAM);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- ret = __mtk_tops_fw_bring_up_core_offload(part, i);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int __mtk_tops_fw_bring_up_core(const struct tops_fw *tfw,
- struct tops_fw_part *part)
-{
- int ret = 0;
-
- if (!tfw || !part)
- return -EINVAL;
-
- /* bring up core by role */
- switch (FW_ROLE(tfw)) {
- case TOPS_ROLE_TYPE_MGMT:
- ret = mtk_tops_fw_bring_up_core_mgmt(part);
-
- break;
- case TOPS_ROLE_TYPE_CLUSTER:
- ret = mtk_tops_fw_bring_up_core_offload(part);
-
- break;
- default:
- TOPS_ERR("unsupport tops fw role\n");
-
- return -EBADF;
- }
-
- return ret;
-}
-#endif /* defined(CONFIG_MTK_TOPS_SECURE_FW) */
-
-static int mtk_tops_fw_get_info(const struct tops_fw *tfw, struct tops_fw_part *part)
-{
- const struct tops_fw_part_hdr *phdr;
- const u8 *payload;
- struct tops_fw_info *fw_info;
- struct tops_fw_attr *attrs;
- u32 kofs, klen, vofs, vlen;
- u32 meta_len;
- u32 ofs = 0;
- u32 nattr;
- int i;
-
- if (!tfw || !part)
- return -EINVAL;
-
- if (FW_ROLE(tfw) >= __TOPS_ROLE_TYPE_MAX)
- return -EINVAL;
-
- phdr = part->hdr[TOPS_PART_TYPE_METADATA];
- payload = part->payload[TOPS_PART_TYPE_METADATA];
- if (!phdr || !payload)
- return 0;
-
- meta_len = le32_to_cpu(phdr->size);
- if (!meta_len)
- return 0;
-
- fw_info = &npu.fw_info[FW_ROLE(tfw)];
- fw_info->nattr = nattr = le32_to_cpu(*((u32 *)payload));
- ofs += 0x4;
-
- fw_info->attrs = devm_kcalloc(tops_dev,
- nattr * 2,
- sizeof(char *),
- GFP_KERNEL);
- if (!fw_info->attrs) {
- fw_info->nattr = 0;
- return -ENOMEM;
- }
- attrs = fw_info->attrs;
-
- for (i = 0; i < nattr; i++) {
- struct tops_fw_attr *attr = &attrs[i];
-
- /* get property offset */
- if (ofs + (i * 2) * 0x4 >= meta_len)
- break;
- kofs = le32_to_cpu(*((u32 *)(payload + ofs + (i * 2) * 0x4)));
-
- /* get value offset */
- if (ofs + (i * 2 + 1) * 0x4 >= meta_len)
- break;
- vofs = le32_to_cpu(*((u32 *)(payload + ofs + (i * 2 + 1) * 0x4)));
-
- klen = strlen(payload + kofs);
- vlen = strlen(payload + vofs);
- if (!kofs || !vofs || !klen || !vlen) {
- TOPS_ERR("invalid attribute property value pair, kofs: %u, klen: %u, vofs: %u, vlen: %u\n",
- kofs, klen, vofs, vlen);
- break;
- }
-
- attr->property = devm_kzalloc(tops_dev,
- sizeof(char) * klen + 1,
- GFP_KERNEL);
- if (!attr->property)
- goto err_out;
-
- attr->value = devm_kzalloc(tops_dev,
- sizeof(char) * vlen + 1,
- GFP_KERNEL);
- if (!attr->value) {
- devm_kfree(tops_dev, attr->property);
- goto err_out;
- }
-
- strncpy(attr->property, payload + kofs, klen);
- strncpy(attr->value, payload + vofs, vlen);
- }
-
- fw_info->git_commit_id = le64_to_cpu(FW_GIT_ID(tfw));
- fw_info->build_ts = le32_to_cpu(FW_BUILD_TS(tfw));
-
- return 0;
-
-err_out:
- fw_info->git_commit_id = 0;
- fw_info->build_ts = 0;
-
- for (i = i - 1; i >= 0; i--) {
- devm_kfree(tops_dev, attrs[i].property);
- devm_kfree(tops_dev, attrs[i].value);
- }
-
- devm_kfree(tops_dev, attrs);
-
- return -ENOMEM;
-}
-
-static void mtk_tops_fw_put_info(void)
-{
- enum tops_role_type rtype;
- struct tops_fw_attr *attrs;
- u32 nattr;
- u32 i;
-
- for (rtype = TOPS_ROLE_TYPE_MGMT; rtype < __TOPS_ROLE_TYPE_MAX; rtype++) {
- nattr = npu.fw_info[rtype].nattr;
- attrs = npu.fw_info[rtype].attrs;
-
- npu.fw_info[rtype].git_commit_id = 0;
- npu.fw_info[rtype].build_ts = 0;
-
- if (!nattr)
- continue;
-
- for (i = 0; i < nattr; i++) {
- devm_kfree(tops_dev, attrs[i].property);
- devm_kfree(tops_dev, attrs[i].value);
- }
-
- devm_kfree(tops_dev, attrs);
-
- npu.fw_info[rtype].nattr = 0;
- npu.fw_info[rtype].attrs = NULL;
- }
-}
-
-int mtk_tops_fw_bring_up_core(const char *fw_path)
-{
- const struct firmware *fw;
- const struct tops_fw *tfw;
- struct tops_fw_part part;
- struct tm tm = {0};
- int ret;
-
- ret = request_firmware(&fw, fw_path, tops_dev);
- if (ret) {
- TOPS_ERR("request %s firmware failed\n", fw_path);
- return ret;
- }
-
- tfw = (const void *)fw->data;
-
- ret = mtk_tops_fw_valid_hdr(tfw, fw->size);
- if (ret) {
- TOPS_ERR("valid fw: %s image failed: %d\n", fw_path, ret);
- goto err_out;
- }
-
- ret = mtk_tops_fw_init_part_data(tfw, &part);
- if (ret) {
- TOPS_ERR("init fw part data failed: %d\n", ret);
- goto err_out;
- }
-
- ret = mtk_tops_fw_get_info(tfw, &part);
- if (ret) {
- TOPS_ERR("meta data initialize failed: %d\n", ret);
- goto err_out;
- }
-
- ret = __mtk_tops_fw_bring_up_core(tfw, &part);
- if (ret) {
- TOPS_ERR("bring up core %s failed\n", fw_path);
- mtk_tops_fw_put_info();
- goto err_out;
- }
-
- mtk_tops_fw_get_built_date(FW_ROLE(tfw), &tm);
-
- TOPS_NOTICE("TOPS Load Firmware: %s\n", fw_path);
- TOPS_NOTICE("\tFirmware version:\t%s\n",
- mtk_tops_fw_attr_get_value(FW_ROLE(tfw), "version"));
- TOPS_NOTICE("\tGit revision:\t\t%llx\n",
- mtk_tops_fw_get_git_commit_id(FW_ROLE(tfw)));
- TOPS_NOTICE("\tBuilt date:\t\t%04ld/%02d/%02d %02d:%02d:%02d\n",
- tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
- tm.tm_hour, tm.tm_min, tm.tm_sec);
-
-err_out:
- release_firmware(fw);
-
- return ret;
-}
-#if defined(CONFIG_MTK_TOPS_EVALUATION)
-EXPORT_SYMBOL(mtk_tops_fw_bring_up_core);
-#endif /* defined(CONFIG_MTK_TOPS_EVALUATION) */
-
-int mtk_tops_fw_bring_up_default_cores(void)
-{
- int ret;
-
- ret = mtk_tops_fw_bring_up_core(TOPS_MGMT_IMG);
- if (ret)
- return ret;
-
- ret = mtk_tops_fw_bring_up_core(TOPS_OFFLOAD_IMG);
-
- return ret;
-}
-
-#if defined(CONFIG_MTK_TOPS_CORE_DEBUG)
-static void mtk_tops_fw_enable_core_debug(void)
-{
- u32 i;
-
- npu_write(TOP_CORE_DBG_CTRL, 0x3F);
- npu_write(CLUST_CORE_DBG_CTRL, 0x1F);
-
- npu_write(TOP_CORE_OCD_CTRL, 0x1);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++)
- npu_write(CLUST_CORE_OCD_CTRL(i), 0x1);
-}
-#endif /* defined(CONFIG_MTK_TOPS_CORE_DEBUG) */
-
-void mtk_tops_fw_clean_up(void)
-{
- mtk_tops_fw_put_info();
-}
-
-int mtk_tops_fw_init(struct platform_device *pdev)
-{
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
- if (!res)
- return -ENXIO;
-
- npu.base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!npu.base)
- return -ENOMEM;
-
-/* TODO: move to somewhere else */
-#if defined(CONFIG_MTK_TOPS_CORE_DEBUG)
- mtk_tops_enable_core_debug();
-#endif /* defined(CONFIG_MTK_TOPS_CORE_DEBUG) */
-
- return 0;
-}
diff --git a/feed/kernel/tops/src/hpdma.c b/feed/kernel/tops/src/hpdma.c
deleted file mode 100644
index 4bcae0f..0000000
--- a/feed/kernel/tops/src/hpdma.c
+++ /dev/null
@@ -1,939 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/device.h>
-#include <linux/dmaengine.h>
-#include <linux/io.h>
-#include <linux/lockdep.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_dma.h>
-#include <linux/wait.h>
-#include <linux/workqueue.h>
-
-#include <virt-dma.h>
-
-#include "tops/hpdma.h"
-#include "tops/hwspinlock.h"
-#include "tops/internal.h"
-#include "tops/mbox.h"
-#include "tops/mcu.h"
-
-#define HPDMA_CHAN_NUM (4)
-
-#define MTK_HPDMA_ALIGN_SIZE (DMAENGINE_ALIGN_16_BYTES)
-#define MTK_HPDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
-
-struct hpdma_dev;
-struct hpdma_vchan;
-struct hpdma_vdesc;
-struct hpdma_init_data;
-
-typedef struct hpdma_dev *(*hpdma_init_func_t)(struct platform_device *pdev,
- const struct hpdma_init_data *data);
-typedef void (*tx_pending_desc_t)(struct hpdma_dev *hpdma,
- struct hpdma_vchan *hchan,
- struct hpdma_vdesc *hdesc);
-typedef struct dma_chan *(*of_dma_xlate_func_t)(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma);
-
-struct hpdma_vdesc {
- struct virt_dma_desc vdesc;
- dma_addr_t src;
- dma_addr_t dst;
- u32 total_num;
- u32 axsize;
- size_t len;
-};
-
-struct hpdma_vchan {
- struct virt_dma_chan vchan;
- struct work_struct tx_work;
- struct hpdma_vdesc *issued_desc;
- wait_queue_head_t stop_wait;
- bool busy;
- bool terminating;
- u8 pchan_id;
-};
-
-struct hpdma_ops {
- int (*vchan_init)(struct hpdma_dev *hpdma, struct dma_device *ddev);
- void (*vchan_deinit)(struct hpdma_dev *hpdma);
- int (*mbox_init)(struct platform_device *pdev, struct hpdma_dev *hpdma);
- void (*mbox_deinit)(struct platform_device *pdev, struct hpdma_dev *hpdma);
- tx_pending_desc_t tx_pending_desc;
- of_dma_xlate_func_t of_dma_xlate;
-};
-
-struct hpdma_init_data {
- struct hpdma_ops ops;
- hpdma_init_func_t init;
- mbox_handler_func_t mbox_handler;
- enum hwspinlock_group hwspinlock_grp;
- u32 trigger_start_slot; /* permission to start dma transfer */
- u32 ch_base_slot; /* permission to occupy a physical channel */
-};
-
-struct hpdma_dev {
- struct dma_device ddev;
- struct hpdma_ops ops;
- struct hpdma_vchan *hvchans;
- struct hpdma_vchan *issued_chan;
- spinlock_t lock; /* prevent inter-process racing hwspinlock */
- void __iomem *base;
- enum hwspinlock_group hwspinlock_grp;
- u32 trigger_start_slot; /* permission to start dma transfer */
- u32 ch_base_slot; /* permission to occupy a physical channel */
-};
-
-struct top_hpdma_dev {
- struct mailbox_dev mdev;
- struct hpdma_dev hpdma;
-};
-
-struct clust_hpdma_dev {
- struct mailbox_dev mdev[CORE_MAX];
- struct hpdma_dev hpdma;
-};
-
-static inline void hpdma_write(struct hpdma_dev *hpdma, u32 reg, u32 val)
-{
- writel(val, hpdma->base + reg);
-}
-
-static inline void hpdma_set(struct hpdma_dev *hpdma, u32 reg, u32 mask)
-{
- setbits(hpdma->base + reg, mask);
-}
-
-static inline void hpdma_clr(struct hpdma_dev *hpdma, u32 reg, u32 mask)
-{
- clrbits(hpdma->base + reg, mask);
-}
-
-static inline void hpdma_rmw(struct hpdma_dev *hpdma, u32 reg, u32 mask, u32 val)
-{
- clrsetbits(hpdma->base + reg, mask, val);
-}
-
-static inline u32 hpdma_read(struct hpdma_dev *hpdma, u32 reg)
-{
- return readl(hpdma->base + reg);
-}
-
-struct hpdma_dev *chan_to_hpdma_dev(struct dma_chan *chan)
-{
- return container_of(chan->device, struct hpdma_dev, ddev);
-}
-
-struct hpdma_vchan *chan_to_hpdma_vchan(struct dma_chan *chan)
-{
- return container_of(chan, struct hpdma_vchan, vchan.chan);
-}
-
-struct hpdma_vdesc *vdesc_to_hpdma_vdesc(struct virt_dma_desc *vdesc)
-{
- return container_of(vdesc, struct hpdma_vdesc, vdesc);
-}
-
-static inline void __mtk_hpdma_vchan_deinit(struct virt_dma_chan *vchan)
-{
- list_del(&vchan->chan.device_node);
- tasklet_kill(&vchan->task);
-}
-
-static inline int mtk_hpdma_prepare_transfer(struct hpdma_dev *hpdma)
-{
- /*
- * release when hpdma done
- * prevent other APMCU's process contend hw spinlock
- * since this lock will not be contended in interrupt context,
- * it's safe to hold it without disable irq
- */
- spin_lock(&hpdma->lock);
-
- /* it is not expected any issued chan right here */
- if (!hpdma->issued_chan)
- return 0;
-
- dev_err(hpdma->ddev.dev,
- "hpdma issued_chan is not empty when transfer started");
-
- WARN_ON(1);
-
- spin_unlock(&hpdma->lock);
-
- return -1;
-}
-
-static inline void mtk_hpdma_unprepare_transfer(struct hpdma_dev *hpdma)
-{
- spin_unlock(&hpdma->lock);
-}
-
-static inline int mtk_hpdma_start_transfer(struct hpdma_dev *hpdma,
- struct hpdma_vchan *hvchan,
- struct hpdma_vdesc *hvdesc)
-{
- /* occupy hpdma start permission */
- mtk_tops_hwspin_lock(hpdma->hwspinlock_grp, hpdma->trigger_start_slot);
-
- /* acknowledge the terminate flow that HW is going to start */
- hvchan->busy = true;
-
- list_del(&hvdesc->vdesc.node);
-
- /* set vdesc to current channel's pending transfer */
- hvchan->issued_desc = hvdesc;
- hpdma->issued_chan = hvchan;
-
- /* last chance to abort the transfer if channel is terminating */
- if (unlikely(hvchan->terminating))
- goto terminate_transfer;
-
- /* trigger dma start */
- hpdma_set(hpdma, TOPS_HPDMA_X_START(hvchan->pchan_id), HPDMA_START);
-
- return 0;
-
-terminate_transfer:
- hvchan->busy = false;
-
- hpdma->issued_chan = NULL;
-
- mtk_tops_hwspin_unlock(hpdma->hwspinlock_grp, hpdma->trigger_start_slot);
-
- return -1;
-}
-
-/* setup a channel's parameter before it acquires the permission to start transfer */
-static inline void mtk_hpdma_config_pchan(struct hpdma_dev *hpdma,
- struct hpdma_vchan *hvchan,
- struct hpdma_vdesc *hvdesc)
-{
- /* update axsize */
- hpdma_rmw(hpdma,
- TOPS_HPDMA_X_CTRL(hvchan->pchan_id),
- HPDMA_AXSIZE_MASK,
- FIELD_PREP(HPDMA_AXSIZE_MASK, hvdesc->axsize));
-
- /* update total num */
- hpdma_rmw(hpdma,
- TOPS_HPDMA_X_NUM(hvchan->pchan_id),
- HPDMA_TOTALNUM_MASK,
- FIELD_PREP(HPDMA_TOTALNUM_MASK, hvdesc->total_num));
-
- /* set src addr */
- hpdma_write(hpdma, TOPS_HPDMA_X_SRC(hvchan->pchan_id), hvdesc->src);
-
- /* set dst addr */
- hpdma_write(hpdma, TOPS_HPDMA_X_DST(hvchan->pchan_id), hvdesc->dst);
-}
-
-/*
- * TODO: in general, we should allocate some buffer for dma transmission
- * nothing to allocate for hpdma right now?
- * TODO: we may not need this right now
- */
-static int mtk_hpdma_alloc_chan_resources(struct dma_chan *chan)
-{
- return 0;
-}
-
-/* TODO: we may not need this right now */
-static void mtk_hpdma_free_chan_resources(struct dma_chan *chan)
-{
- /* stop all transmission, we have nothing to free for each channel */
- dmaengine_terminate_sync(chan);
-}
-
-static void mtk_hpdma_issue_vchan_pending(struct hpdma_dev *hpdma,
- struct hpdma_vchan *hvchan)
-{
- struct virt_dma_desc *vdesc;
-
- /* vchan's lock need to be held since its list will be modified */
- lockdep_assert_held(&hvchan->vchan.lock);
-
- /* if there is pending transfer on the fly, we should wait until it done */
- if (unlikely(hvchan->issued_desc))
- return;
-
- /* fetch next desc to process */
- vdesc = vchan_next_desc(&hvchan->vchan);
- if (unlikely(!vdesc))
- return;
-
- /* start to transfer a pending descriptor */
- hpdma->ops.tx_pending_desc(hpdma, hvchan, vdesc_to_hpdma_vdesc(vdesc));
-}
-
-static void mtk_hpdma_issue_pending(struct dma_chan *chan)
-{
- struct hpdma_dev *hpdma = chan_to_hpdma_dev(chan);
- struct hpdma_vchan *hvchan = chan_to_hpdma_vchan(chan);
- unsigned long flag;
-
- spin_lock_irqsave(&hvchan->vchan.lock, flag);
-
- if (vchan_issue_pending(&hvchan->vchan))
- mtk_hpdma_issue_vchan_pending(hpdma, hvchan);
-
- spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
-}
-
-/*
- * since hpdma is not support to report how many chunks left to transfer,
- * we can only report that current desc is completed or not
- */
-static enum dma_status mtk_hpdma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *tx_state)
-{
- return dma_cookie_status(chan, cookie, tx_state);
-}
-
-/* optimize the hpdma parameters to get maximum throughput */
-static int mtk_hpdma_config_desc(struct hpdma_vdesc *hvdesc)
-{
- hvdesc->axsize = 4;
-
- /*
- * the total transfer length = axsize * total_num
- * axsize can be 1, 2, 4, 8, 16 bytes
- * calculate axsize
- */
- while (hvdesc->axsize > 0 && hvdesc->len % (0x1 << hvdesc->axsize))
- hvdesc->axsize--;
-
- hvdesc->total_num = hvdesc->len / (0x1 << hvdesc->axsize);
-
- return 0;
-}
-
-static struct dma_async_tx_descriptor *mtk_hpdma_prep_dma_memcpy(struct dma_chan *chan,
- dma_addr_t dst,
- dma_addr_t src,
- size_t len,
- unsigned long flags)
-{
- struct hpdma_vdesc *hvdesc;
- int ret = 0;
-
- if (!len)
- return ERR_PTR(-EPERM);
-
- if (dst > 0xFFFFFFFF || src > 0xFFFFFFFF)
- return ERR_PTR(-EINVAL);
-
- hvdesc = kzalloc(sizeof(struct hpdma_vdesc), GFP_NOWAIT);
- if (!hvdesc)
- return ERR_PTR(-ENOMEM);
-
- hvdesc->src = src;
- hvdesc->dst = dst;
- hvdesc->len = len;
-
- ret = mtk_hpdma_config_desc(hvdesc);
- if (ret) {
- kfree(hvdesc);
- return ERR_PTR(ret);
- }
-
- return vchan_tx_prep(to_virt_chan(chan), &hvdesc->vdesc, flags);
-}
-
-static void mtk_hpdma_terminate_all_inactive_desc(struct dma_chan *chan)
-{
- struct virt_dma_chan *vchan = to_virt_chan(chan);
- unsigned long flag;
- LIST_HEAD(head);
-
- spin_lock_irqsave(&vchan->lock, flag);
-
- list_splice_tail_init(&vchan->desc_allocated, &head);
- list_splice_tail_init(&vchan->desc_submitted, &head);
- list_splice_tail_init(&vchan->desc_issued, &head);
-
- spin_unlock_irqrestore(&vchan->lock, flag);
-
- vchan_dma_desc_free_list(vchan, &head);
-}
-
-static int mtk_hpdma_terminate_all(struct dma_chan *chan)
-{
- struct hpdma_vchan *hvchan = chan_to_hpdma_vchan(chan);
-
- hvchan->terminating = true;
-
- /* first terminate all inactive descriptors */
- mtk_hpdma_terminate_all_inactive_desc(chan);
-
- if (!hvchan->issued_desc)
- goto out;
-
- /* if there is a desc on the fly, we must wait until it done */
- wait_event_interruptible(hvchan->stop_wait, !hvchan->busy);
-
- vchan_terminate_vdesc(&hvchan->issued_desc->vdesc);
-
- hvchan->issued_desc = NULL;
-
- vchan_synchronize(&hvchan->vchan);
-
-out:
- hvchan->terminating = false;
-
- return 0;
-}
-
-static void mtk_hpdma_vdesc_free(struct virt_dma_desc *vdesc)
-{
- kfree(container_of(vdesc, struct hpdma_vdesc, vdesc));
-}
-
-static void mtk_hpdma_tx_work(struct work_struct *work)
-{
- struct hpdma_vchan *hvchan = container_of(work, struct hpdma_vchan, tx_work);
- struct hpdma_dev *hpdma = chan_to_hpdma_dev(&hvchan->vchan.chan);
- unsigned long flag;
-
- if (unlikely(!vchan_next_desc(&hvchan->vchan)))
- return;
-
- spin_lock_irqsave(&hvchan->vchan.lock, flag);
-
- mtk_hpdma_issue_vchan_pending(hpdma, hvchan);
-
- spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
-}
-
-static int mtk_hpdma_provider_init(struct platform_device *pdev,
- struct hpdma_dev *hpdma)
-{
- struct dma_device *ddev = &hpdma->ddev;
- int ret = 0;
-
- dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
- dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
-
- ddev->dev = &pdev->dev;
- ddev->directions = BIT(DMA_MEM_TO_MEM);
- ddev->copy_align = MTK_HPDMA_ALIGN_SIZE;
- ddev->src_addr_widths = MTK_HPDMA_DMA_BUSWIDTHS;
- ddev->dst_addr_widths = MTK_HPDMA_DMA_BUSWIDTHS;
- ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
-
- ddev->device_alloc_chan_resources = mtk_hpdma_alloc_chan_resources;
- ddev->device_free_chan_resources = mtk_hpdma_free_chan_resources;
- ddev->device_issue_pending = mtk_hpdma_issue_pending;
- ddev->device_tx_status = mtk_hpdma_tx_status;
- ddev->device_prep_dma_memcpy = mtk_hpdma_prep_dma_memcpy;
- ddev->device_terminate_all = mtk_hpdma_terminate_all;
-
- INIT_LIST_HEAD(&ddev->channels);
-
- ret = hpdma->ops.vchan_init(hpdma, ddev);
- if (ret)
- return ret;
-
- ret = dma_async_device_register(ddev);
- if (ret) {
- dev_err(&pdev->dev, "register async dma device failed: %d\n", ret);
- return ret;
- }
-
- ret = of_dma_controller_register(pdev->dev.of_node,
- hpdma->ops.of_dma_xlate,
- ddev);
- if (ret) {
- dev_err(&pdev->dev, "register dma controller failed: %d\n", ret);
- goto unregister_async_dev;
- }
-
- return ret;
-
-unregister_async_dev:
- dma_async_device_unregister(ddev);
-
- return ret;
-}
-
-static int mtk_hpdma_probe(struct platform_device *pdev)
-{
- const struct hpdma_init_data *init_data;
- struct hpdma_dev *hpdma;
- struct resource *res;
- int ret = 0;
-
- init_data = of_device_get_match_data(&pdev->dev);
- if (!init_data) {
- dev_err(&pdev->dev, "hpdma init data not exist\n");
- return -ENODEV;
- }
-
- hpdma = init_data->init(pdev, init_data);
- if (IS_ERR(hpdma)) {
- dev_err(&pdev->dev, "hpdma init failed: %ld\n", PTR_ERR(hpdma));
- return PTR_ERR(hpdma);
- }
-
- memcpy(&hpdma->ops, &init_data->ops, sizeof(struct hpdma_ops));
- hpdma->hwspinlock_grp = init_data->hwspinlock_grp;
- hpdma->trigger_start_slot = init_data->trigger_start_slot;
- hpdma->ch_base_slot = init_data->ch_base_slot;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
- if (!res)
- return -ENXIO;
-
- hpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!hpdma->base)
- return -ENOMEM;
-
- /*
- * since hpdma does not send signal to APMCU,
- * we need TOPS mailbox to notify us when hpdma done
- */
- ret = hpdma->ops.mbox_init(pdev, hpdma);
- if (ret)
- return ret;
-
- ret = mtk_hpdma_provider_init(pdev, hpdma);
- if (ret)
- goto unregister_mbox;
-
- spin_lock_init(&hpdma->lock);
-
- platform_set_drvdata(pdev, hpdma);
-
- dev_info(hpdma->ddev.dev, "hpdma init done\n");
-
- return ret;
-
-unregister_mbox:
- hpdma->ops.mbox_deinit(pdev, hpdma);
-
- return ret;
-}
-
-static int mtk_hpdma_remove(struct platform_device *pdev)
-{
- struct hpdma_dev *hpdma = platform_get_drvdata(pdev);
-
- if (!hpdma)
- return 0;
-
- hpdma->ops.vchan_deinit(hpdma);
-
- hpdma->ops.mbox_deinit(pdev, hpdma);
-
- dma_async_device_unregister(&hpdma->ddev);
-
- of_dma_controller_free(pdev->dev.of_node);
-
- return 0;
-}
-
-static struct dma_chan *mtk_clust_hpdma_of_xlate(struct of_phandle_args *dma_spec,
- struct of_dma *ofdma)
-{
- struct dma_device *ddev = ofdma->of_dma_data;
- struct hpdma_dev *hpdma;
- u32 id;
-
- if (!ddev || dma_spec->args_count != 2)
- return ERR_PTR(-EINVAL);
-
- hpdma = container_of(ddev, struct hpdma_dev, ddev);
- id = dma_spec->args[0] * CORE_OFFLOAD_NUM + dma_spec->args[1];
-
- return dma_get_slave_channel(&hpdma->hvchans[id].vchan.chan);
-}
-
-static struct hpdma_dev *mtk_top_hpdma_init(struct platform_device *pdev,
- const struct hpdma_init_data *data)
-{
- struct top_hpdma_dev *top_hpdma = NULL;
-
- if (!data)
- return ERR_PTR(-EINVAL);
-
- top_hpdma = devm_kzalloc(&pdev->dev, sizeof(*top_hpdma), GFP_KERNEL);
- if (!top_hpdma)
- return ERR_PTR(-ENOMEM);
-
- top_hpdma->mdev.core = CORE_MGMT;
- top_hpdma->mdev.cmd_id = MBOX_CM2AP_CMD_HPDMA;
- top_hpdma->mdev.mbox_handler = data->mbox_handler;
- top_hpdma->mdev.priv = &top_hpdma->hpdma;
-
- return &top_hpdma->hpdma;
-}
-
-static void mtk_top_hpdma_vchan_deinit(struct hpdma_dev *hpdma)
-{
- struct hpdma_vchan *hvchan;
- u32 i;
-
- for (i = 0; i < __TOP_HPDMA_REQ; i++) {
- hvchan = &hpdma->hvchans[i];
- __mtk_hpdma_vchan_deinit(&hvchan->vchan);
- }
-}
-
-static int mtk_top_hpdma_vchan_init(struct hpdma_dev *hpdma, struct dma_device *ddev)
-{
- struct hpdma_vchan *hvchan;
- u32 i;
-
- hpdma->hvchans = devm_kcalloc(ddev->dev, __TOP_HPDMA_REQ,
- sizeof(struct hpdma_vchan),
- GFP_KERNEL);
- if (!hpdma->hvchans)
- return -ENOMEM;
-
- for (i = 0; i < __TOP_HPDMA_REQ; i++) {
- hvchan = &hpdma->hvchans[i];
-
- init_waitqueue_head(&hvchan->stop_wait);
- INIT_WORK(&hvchan->tx_work, mtk_hpdma_tx_work);
-
- hvchan->vchan.desc_free = mtk_hpdma_vdesc_free;
- /*
- * TODO: maybe init vchan by ourselves with
- * customized tasklet?
- * if we setup customized tasklet to transmit
- * remaining chunks in a channel, we should be careful about
- * hpdma->lock since it will be acquired in softirq context
- */
- vchan_init(&hvchan->vchan, ddev);
- }
-
- return 0;
-}
-
-static void mtk_top_hpdma_unregister_mbox(struct platform_device *pdev,
- struct hpdma_dev *hpdma)
-{
- struct top_hpdma_dev *top_hpdma;
-
- top_hpdma = container_of(hpdma, struct top_hpdma_dev, hpdma);
-
- unregister_mbox_dev(MBOX_RECV, &top_hpdma->mdev);
-}
-
-static int mtk_top_hpdma_register_mbox(struct platform_device *pdev,
- struct hpdma_dev *hpdma)
-{
- struct top_hpdma_dev *top_hpdma;
- int ret = 0;
-
- top_hpdma = container_of(hpdma, struct top_hpdma_dev, hpdma);
-
- ret = register_mbox_dev(MBOX_RECV, &top_hpdma->mdev);
- if (ret) {
- dev_err(&pdev->dev, "register mailbox device failed: %d\n", ret);
- return ret;
- }
-
- return ret;
-}
-
-static void mtk_top_hpdma_tx_pending_desc(struct hpdma_dev *hpdma,
- struct hpdma_vchan *hvchan,
- struct hpdma_vdesc *hvdesc)
-{
- u32 slot = hpdma->ch_base_slot;
- enum hwspinlock_group grp = hpdma->hwspinlock_grp;
-
- hvchan->pchan_id = 0;
-
- mtk_hpdma_prepare_transfer(hpdma);
-
- /* occupy hpdma physical channel */
- while (!mtk_tops_hwspin_try_lock(grp, slot)) {
-
- if (unlikely(hvchan->terminating)) {
- spin_unlock(&hpdma->lock);
- return;
- }
-
- hvchan->pchan_id = (hvchan->pchan_id + 1) % HPDMA_CHAN_NUM;
- if (++slot - hpdma->ch_base_slot == HPDMA_CHAN_NUM)
- slot = hpdma->ch_base_slot;
- }
-
- mtk_hpdma_config_pchan(hpdma, hvchan, hvdesc);
-
- if (!mtk_hpdma_start_transfer(hpdma, hvchan, hvdesc))
- return;
-
- /* start transfer failed */
- mtk_tops_hwspin_unlock(grp, slot);
-
- mtk_hpdma_unprepare_transfer(hpdma);
-
- wake_up_interruptible(&hvchan->stop_wait);
-}
-
-static struct hpdma_dev *mtk_clust_hpdma_init(struct platform_device *pdev,
- const struct hpdma_init_data *data)
-{
- struct clust_hpdma_dev *clust_hpdma = NULL;
- u32 i;
-
- if (!data)
- return ERR_PTR(-EINVAL);
-
- clust_hpdma = devm_kzalloc(&pdev->dev, sizeof(*clust_hpdma), GFP_KERNEL);
- if (!clust_hpdma)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- clust_hpdma->mdev[i].core = CORE_OFFLOAD_0 + i;
- clust_hpdma->mdev[i].cmd_id = MBOX_CX2AP_CMD_HPDMA;
- clust_hpdma->mdev[i].mbox_handler = data->mbox_handler;
- clust_hpdma->mdev[i].priv = &clust_hpdma->hpdma;
- }
-
- return &clust_hpdma->hpdma;
-}
-
-static void mtk_clust_hpdma_vchan_deinit(struct hpdma_dev *hpdma)
-{
- struct hpdma_vchan *hvchan;
- u32 i, j;
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- for (j = 0; j < __CLUST_HPDMA_REQ; j++) {
- hvchan = &hpdma->hvchans[i];
- __mtk_hpdma_vchan_deinit(&hvchan->vchan);
- }
- }
-}
-
-static int mtk_clust_hpdma_vchan_init(struct hpdma_dev *hpdma, struct dma_device *ddev)
-{
- struct hpdma_vchan *hvchan;
- u32 i, j;
-
- hpdma->hvchans = devm_kcalloc(ddev->dev, __CLUST_HPDMA_REQ * CORE_OFFLOAD_NUM,
- sizeof(struct hpdma_vchan),
- GFP_KERNEL);
- if (!hpdma->hvchans)
- return -ENOMEM;
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- for (j = 0; j < __CLUST_HPDMA_REQ; j++) {
- hvchan = &hpdma->hvchans[i * __CLUST_HPDMA_REQ + j];
-
- hvchan->pchan_id = i;
- init_waitqueue_head(&hvchan->stop_wait);
- INIT_WORK(&hvchan->tx_work, mtk_hpdma_tx_work);
-
- hvchan->vchan.desc_free = mtk_hpdma_vdesc_free;
- /*
- * TODO: maybe init vchan by ourselves with
- * customized tasklet?
- * if we setup customized tasklet to transmit
- * remaining chunks in a channel, we should be careful about
- * hpdma->lock since it will be acquired in softirq context
- */
- vchan_init(&hvchan->vchan, ddev);
- }
- }
-
- return 0;
-}
-
-static void mtk_clust_hpdma_unregister_mbox(struct platform_device *pdev,
- struct hpdma_dev *hpdma)
-{
- struct clust_hpdma_dev *clust_hpdma;
- u32 i;
-
- clust_hpdma = container_of(hpdma, struct clust_hpdma_dev, hpdma);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++)
- unregister_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
-}
-
-static int mtk_clust_hpdma_register_mbox(struct platform_device *pdev,
- struct hpdma_dev *hpdma)
-{
- struct clust_hpdma_dev *clust_hpdma;
- int ret = 0;
- int i;
-
- clust_hpdma = container_of(hpdma, struct clust_hpdma_dev, hpdma);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- ret = register_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
- if (ret) {
- dev_err(&pdev->dev, "register mbox%d failed: %d\n", i, ret);
- goto unregister_mbox;
- }
- }
-
- return ret;
-
-unregister_mbox:
- for (--i; i >= 0; i--)
- unregister_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
-
- return ret;
-}
-
-static void mtk_clust_hpdma_tx_pending_desc(struct hpdma_dev *hpdma,
- struct hpdma_vchan *hvchan,
- struct hpdma_vdesc *hvdesc)
-{
- u32 slot = hpdma->ch_base_slot + hvchan->pchan_id;
- enum hwspinlock_group grp = hpdma->hwspinlock_grp;
-
- mtk_hpdma_prepare_transfer(hpdma);
-
- /* occupy hpdma physical channel */
- mtk_tops_hwspin_lock(grp, slot);
-
- mtk_hpdma_config_pchan(hpdma, hvchan, hvdesc);
-
- if (!mtk_hpdma_start_transfer(hpdma, hvchan, hvdesc))
- return;
-
- /* start transfer failed */
- mtk_tops_hwspin_unlock(grp, slot);
-
- mtk_hpdma_unprepare_transfer(hpdma);
-
- wake_up_interruptible(&hvchan->stop_wait);
-}
-
-static enum mbox_msg_cnt mtk_hpdma_ap_recv_mbox_msg(struct mailbox_dev *mdev,
- struct mailbox_msg *msg)
-{
- struct hpdma_dev *hpdma = mdev->priv;
- struct hpdma_vchan *hvchan;
- struct hpdma_vdesc *hvdesc;
- enum hwspinlock_group grp;
- unsigned long flag;
- u32 slot;
-
- if (!hpdma)
- return MBOX_NO_RET_MSG;
-
- hvchan = hpdma->issued_chan;
- if (!hvchan) {
- dev_err(hpdma->ddev.dev, "unexpected hpdma mailbox recv\n");
- return MBOX_NO_RET_MSG;
- }
-
- grp = hpdma->hwspinlock_grp;
-
- hvdesc = hvchan->issued_desc;
-
- /* clear issued channel before releasing hwspinlock */
- hpdma->issued_chan = NULL;
-
- hvchan->busy = false;
- hvchan->issued_desc = NULL;
-
- /* release hwspinlock */
- slot = hvchan->pchan_id + hpdma->ch_base_slot;
-
- mtk_tops_hwspin_unlock(grp, hpdma->trigger_start_slot);
-
- mtk_tops_hwspin_unlock(grp, slot);
-
- /* release to let other APMCU process to contend hw spinlock */
- spin_unlock(&hpdma->lock);
-
- if (unlikely(hvchan->terminating)) {
- wake_up_interruptible(&hvchan->stop_wait);
- return MBOX_NO_RET_MSG;
- }
-
- /*
- * complete vdesc and schedule tx work again
- * if there is more vdesc left in the channel
- */
- spin_lock_irqsave(&hvchan->vchan.lock, flag);
-
- vchan_cookie_complete(&hvdesc->vdesc);
-
- if (vchan_next_desc(&hvchan->vchan))
- schedule_work(&hvchan->tx_work);
-
- spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
-
- return MBOX_NO_RET_MSG;
-}
-
-struct hpdma_init_data top_hpdma_init_data = {
- .ops = {
- .vchan_init = mtk_top_hpdma_vchan_init,
- .vchan_deinit = mtk_top_hpdma_vchan_deinit,
- .mbox_init = mtk_top_hpdma_register_mbox,
- .mbox_deinit = mtk_top_hpdma_unregister_mbox,
- .tx_pending_desc = mtk_top_hpdma_tx_pending_desc,
- .of_dma_xlate = of_dma_xlate_by_chan_id,
- },
- .init = mtk_top_hpdma_init,
- .mbox_handler = mtk_hpdma_ap_recv_mbox_msg,
- .hwspinlock_grp = HWSPINLOCK_GROUP_TOP,
- .trigger_start_slot = HWSPINLOCK_TOP_SLOT_HPDMA_LOCK,
- .ch_base_slot = HWSPINLOCK_TOP_SLOT_HPDMA_PCH0,
-};
-
-static struct hpdma_init_data clust_hpdma_init_data = {
- .ops = {
- .vchan_init = mtk_clust_hpdma_vchan_init,
- .vchan_deinit = mtk_clust_hpdma_vchan_deinit,
- .mbox_init = mtk_clust_hpdma_register_mbox,
- .mbox_deinit = mtk_clust_hpdma_unregister_mbox,
- .tx_pending_desc = mtk_clust_hpdma_tx_pending_desc,
- .of_dma_xlate = mtk_clust_hpdma_of_xlate,
- },
- .init = mtk_clust_hpdma_init,
- .mbox_handler = mtk_hpdma_ap_recv_mbox_msg,
- .hwspinlock_grp = HWSPINLOCK_GROUP_CLUST,
- .trigger_start_slot = HWSPINLOCK_CLUST_SLOT_HPDMA_LOCK,
- .ch_base_slot = HWSPINLOCK_CLUST_SLOT_HPDMA_PCH0,
-};
-
-static struct of_device_id mtk_hpdma_match[] = {
- { .compatible = "mediatek,hpdma-top", .data = &top_hpdma_init_data, },
- { .compatible = "mediatek,hpdma-sub", .data = &clust_hpdma_init_data, },
- { },
-};
-
-static struct platform_driver mtk_hpdma_driver = {
- .probe = mtk_hpdma_probe,
- .remove = mtk_hpdma_remove,
- .driver = {
- .name = "mediatek,hpdma",
- .owner = THIS_MODULE,
- .of_match_table = mtk_hpdma_match,
- },
-};
-
-int __init mtk_tops_hpdma_init(void)
-{
- int ret = 0;
-
- ret = platform_driver_register(&mtk_hpdma_driver);
- if (ret)
- return ret;
-
- return ret;
-}
-
-void __exit mtk_tops_hpdma_exit(void)
-{
- platform_driver_unregister(&mtk_hpdma_driver);
-}
diff --git a/feed/kernel/tops/src/hwspinlock.c b/feed/kernel/tops/src/hwspinlock.c
deleted file mode 100644
index 774c56f..0000000
--- a/feed/kernel/tops/src/hwspinlock.c
+++ /dev/null
@@ -1,92 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-
-#include "tops/hwspinlock.h"
-#include "tops/tops.h"
-
-#define SEMA_ID (BIT(CORE_AP))
-
-static void __iomem *base;
-
-static inline u32 hwspinlock_read(u32 reg)
-{
- return readl(base + reg);
-}
-
-static inline void hwspinlock_write(u32 reg, u32 val)
-{
- writel(val, base + reg);
-}
-
-static inline u32 __mtk_tops_hwspinlock_get_reg(enum hwspinlock_group grp, u32 slot)
-{
- if (unlikely(slot >= HWSPINLOCK_SLOT_MAX || grp >= __HWSPINLOCK_GROUP_MAX))
- return 0;
-
- if (grp == HWSPINLOCK_GROUP_TOP)
- return HWSPINLOCK_TOP_BASE + slot * 4;
- else
- return HWSPINLOCK_CLUST_BASE + slot * 4;
-}
-
-/*
- * try take TOPS HW spinlock
- * return 1 on success
- * return 0 on failure
- */
-int mtk_tops_hwspin_try_lock(enum hwspinlock_group grp, u32 slot)
-{
- u32 reg = __mtk_tops_hwspinlock_get_reg(grp, slot);
-
- WARN_ON(!reg);
-
- hwspinlock_write(reg, SEMA_ID);
-
- return hwspinlock_read(reg) == SEMA_ID ? 1 : 0;
-}
-
-void mtk_tops_hwspin_lock(enum hwspinlock_group grp, u32 slot)
-{
- u32 reg = __mtk_tops_hwspinlock_get_reg(grp, slot);
-
- WARN_ON(!reg);
-
- do {
- hwspinlock_write(reg, SEMA_ID);
- } while (hwspinlock_read(reg) != SEMA_ID);
-}
-
-void mtk_tops_hwspin_unlock(enum hwspinlock_group grp, u32 slot)
-{
- u32 reg = __mtk_tops_hwspinlock_get_reg(grp, slot);
-
- WARN_ON(!reg);
-
- if (hwspinlock_read(reg) == SEMA_ID)
- hwspinlock_write(reg, SEMA_ID);
-}
-
-int mtk_tops_hwspinlock_init(struct platform_device *pdev)
-{
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
- if (!res)
- return -ENXIO;
-
- base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!base)
- return -ENOMEM;
-
- return 0;
-}
diff --git a/feed/kernel/tops/src/inc/tops/ctrl.h b/feed/kernel/tops/src/inc/tops/ctrl.h
deleted file mode 100644
index fb74e40..0000000
--- a/feed/kernel/tops/src/inc/tops/ctrl.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_CTRL_H_
-#define _TOPS_CTRL_H_
-
-#include <linux/platform_device.h>
-
-int mtk_tops_ctrl_init(struct platform_device *pdev);
-void mtk_tops_ctrl_deinit(struct platform_device *pdev);
-#endif /* _TOPS_CTRL_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/debugfs.h b/feed/kernel/tops/src/inc/tops/debugfs.h
deleted file mode 100644
index e4a6da1..0000000
--- a/feed/kernel/tops/src/inc/tops/debugfs.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuo@mediatek.com>
- */
-
-#ifndef _TOPS_DEBUGFS_H_
-#define _TOPS_DEBUGFS_H_
-
-#include <linux/debugfs.h>
-#include <linux/platform_device.h>
-
-extern struct dentry *tops_debugfs_root;
-
-int mtk_tops_debugfs_init(struct platform_device *pdev);
-void mtk_tops_debugfs_deinit(struct platform_device *pdev);
-#endif /* _TOPS_DEBUGFS_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/firmware.h b/feed/kernel/tops/src/inc/tops/firmware.h
deleted file mode 100644
index 663ea95..0000000
--- a/feed/kernel/tops/src/inc/tops/firmware.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_FW_H_
-#define _TOPS_FW_H_
-
-#include <linux/platform_device.h>
-#include <linux/time.h>
-
-enum tops_role_type {
- TOPS_ROLE_TYPE_MGMT,
- TOPS_ROLE_TYPE_CLUSTER,
-
- __TOPS_ROLE_TYPE_MAX,
-};
-
-u64 mtk_tops_fw_get_git_commit_id(enum tops_role_type rtype);
-void mtk_tops_fw_get_built_date(enum tops_role_type rtype, struct tm *tm);
-u32 mtk_tops_fw_attr_get_num(enum tops_role_type rtype);
-const char *mtk_tops_fw_attr_get_property(enum tops_role_type rtype, u32 idx);
-const char *mtk_tops_fw_attr_get_value(enum tops_role_type rtype,
- const char *property);
-
-int mtk_tops_fw_bring_up_default_cores(void);
-int mtk_tops_fw_bring_up_core(const char *fw_path);
-void mtk_tops_fw_clean_up(void);
-int mtk_tops_fw_init(struct platform_device *pdev);
-#endif /* _TOPS_FW_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/hpdma.h b/feed/kernel/tops/src/inc/tops/hpdma.h
deleted file mode 100644
index 4f3d08c..0000000
--- a/feed/kernel/tops/src/inc/tops/hpdma.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_HPDMA_H_
-#define _TOPS_HPDMA_H_
-
-#include <linux/bitops.h>
-#include <linux/bitfield.h>
-
-/* AXI DMA */
-#define TOPS_HPDMA_X_SRC(x) (0x100 * (x) + 0x0000)
-#define TOPS_HPDMA_X_DST(x) (0x100 * (x) + 0x0004)
-#define TOPS_HPDMA_X_NUM(x) (0x100 * (x) + 0x0008)
-#define TOPS_HPDMA_X_CTRL(x) (0x100 * (x) + 0x000C)
-#define TOPS_HPDMA_X_CLRIRQ(x) (0x100 * (x) + 0x0010)
-#define TOPS_HPDMA_X_START(x) (0x100 * (x) + 0x0014)
-#define TOPS_HPDMA_X_RRESP(x) (0x100 * (x) + 0x0018)
-#define TOPS_HPDMA_X_BRESP(x) (0x100 * (x) + 0x001C)
-#define TOPS_HPDMA_X_HW(x) (0x100 * (x) + 0x0020)
-#define TOPS_HPDMA_X_ERR(x) (0x100 * (x) + 0x0024)
-
-
-/* AXI DMA NUM */
-#define HPDMA_TOTALNUM_SHIFT (0)
-#define HPDMA_TOTALNUM_MASK GENMASK(15, 0)
-
-/* AXI DMA CTRL */
-#define HPDMA_AXLEN_SHIFT (0)
-#define HPDMA_AXLEN_MASK GENMASK(3, 0)
-#define HPDMA_AXSIZE_SHIFT (8)
-#define HPDMA_AXSIZE_MASK GENMASK(10, 8)
-#define HPDMA_IRQEN BIT(16)
-#define HPDMA_AWMODE_EN BIT(24)
-#define HPDMA_OUTSTD_SHIFT (25)
-#define HPDMA_OUTSTD_MASK GENMASK(29, 25)
-
-/* AXI DMA START */
-#define HPDMA_STATUS_SHIFT (0)
-#define HPDMA_STATUS_MASK GENMASK(0, 0)
-#define HPDMA_SKIP_RACE_SHIFT (7)
-#define HPDMA_SKIP_RACE_MASK GENMASK(7, 7)
-#define HPDMA_START BIT(15)
-
-/* AXI DMA RRESP */
-#define HPDMA_LOG_SHIFT (0)
-#define HPDMA_LOG_MASK GENMASK(15, 0)
-#define HPDMA_RESP_SHIFT (16)
-#define HPDMA_RESP_MASK GENMASK(17, 16)
-
-/* AXI DMA HW */
-#define HPDMA_FIFO_DEPTH_SHIFT (0)
-#define HPDMA_FIFO_DEPTH_MASK GENMASK(7, 0)
-#define HPDMA_MAX_AXSIZE_SHIFT (8)
-#define HPDMA_MAX_AXSIZE_MASK GENMASK(15, 8)
-
-enum hpdma_err {
- AWMODE_ERR = 0x1 << 0,
- AXSIZE_ERR = 0x1 << 1,
- ARADDR_ERR = 0x1 << 2,
- AWADDR_ERR = 0x1 << 3,
- RACE_ERR = 0x1 << 4,
-};
-
-enum top_hpdma_req {
- TOP_HPDMA_TNL_SYNC_REQ,
-
- __TOP_HPDMA_REQ,
-};
-
-enum clust_hpdma_req {
- CLUST_HPDMA_DUMMY_REQ,
-
- __CLUST_HPDMA_REQ,
-};
-
-int mtk_tops_hpdma_init(void);
-void mtk_tops_hpdma_exit(void);
-#endif /* _TOPS_HPDMA_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/hwspinlock.h b/feed/kernel/tops/src/inc/tops/hwspinlock.h
deleted file mode 100644
index 407c01a..0000000
--- a/feed/kernel/tops/src/inc/tops/hwspinlock.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_HWSPIN_LOCK_H_
-#define _TOPS_HWSPIN_LOCK_H_
-
-#include <linux/platform_device.h>
-#include <linux/types.h>
-
-#define HWSPINLOCK_SLOT_MAX 16
-
-#define HWSPINLOCK_TOP_BASE 0x10100
-#define HWSPINLOCK_CLUST_BASE 0x880000
-
-enum hwspinlock_group {
- HWSPINLOCK_GROUP_TOP,
- HWSPINLOCK_GROUP_CLUST,
-
- __HWSPINLOCK_GROUP_MAX,
-};
-
-enum hwspinlock_top_slot {
- HWSPINLOCK_TOP_SLOT_HPDMA_LOCK,
- HWSPINLOCK_TOP_SLOT_HPDMA_PCH0,
- HWSPINLOCK_TOP_SLOT_HPDMA_PCH1,
- HWSPINLOCK_TOP_SLOT_HPDMA_PCH2,
- HWSPINLOCK_TOP_SLOT_HPDMA_PCH3,
- HWSPINLOCK_TOP_SLOT_5,
- HWSPINLOCK_TOP_SLOT_6,
- HWSPINLOCK_TOP_SLOT_7,
- HWSPINLOCK_TOP_SLOT_8,
- HWSPINLOCK_TOP_SLOT_9,
- HWSPINLOCK_TOP_SLOT_10,
- HWSPINLOCK_TOP_SLOT_11,
- HWSPINLOCK_TOP_SLOT_12,
- HWSPINLOCK_TOP_SLOT_13,
- HWSPINLOCK_TOP_SLOT_14,
- HWSPINLOCK_TOP_SLOT_15,
-
- __HWSPINLOCK_TOP_MAX = HWSPINLOCK_SLOT_MAX,
-};
-
-enum hwspinlock_clust_slot {
- HWSPINLOCK_CLUST_SLOT_PRINTF,
- HWSPINLOCK_CLUST_SLOT_HPDMA_LOCK,
- HWSPINLOCK_CLUST_SLOT_HPDMA_PCH0,
- HWSPINLOCK_CLUST_SLOT_HPDMA_PCH1,
- HWSPINLOCK_CLUST_SLOT_HPDMA_PCH2,
- HWSPINLOCK_CLUST_SLOT_HPDMA_PCH3,
- HWSPINLOCK_CLUST_SLOT_6,
- HWSPINLOCK_CLUST_SLOT_7,
- HWSPINLOCK_CLUST_SLOT_8,
- HWSPINLOCK_CLUST_SLOT_9,
- HWSPINLOCK_CLUST_SLOT_10,
- HWSPINLOCK_CLUST_SLOT_PPTP_SEQ,
- HWSPINLOCK_CLUST_SLOT_12,
- HWSPINLOCK_CLUST_SLOT_13,
- HWSPINLOCK_CLUST_SLOT_14,
- HWSPINLOCK_CLUST_SLOT_15,
-
- __HWSPINLOCK_CLUST_MAX = HWSPINLOCK_SLOT_MAX,
-};
-
-int mtk_tops_hwspin_try_lock(enum hwspinlock_group grp, u32 slot);
-void mtk_tops_hwspin_lock(enum hwspinlock_group grp, u32 slot);
-void mtk_tops_hwspin_unlock(enum hwspinlock_group grp, u32 slot);
-int mtk_tops_hwspinlock_init(struct platform_device *pdev);
-#endif /* _TOPS_HWSPIN_LOCK_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/internal.h b/feed/kernel/tops/src/inc/tops/internal.h
deleted file mode 100644
index 81e1ca1..0000000
--- a/feed/kernel/tops/src/inc/tops/internal.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_INTERNAL_H_
-#define _TOPS_INTERNAL_H_
-
-#include <linux/bitfield.h>
-#include <linux/device.h>
-#include <linux/io.h>
-
-extern struct device *tops_dev;
-
-#define TOPS_DBG(fmt, ...) dev_dbg(tops_dev, fmt, ##__VA_ARGS__)
-#define TOPS_INFO(fmt, ...) dev_info(tops_dev, fmt, ##__VA_ARGS__)
-#define TOPS_NOTICE(fmt, ...) dev_notice(tops_dev, fmt, ##__VA_ARGS__)
-#define TOPS_WARN(fmt, ...) dev_warn(tops_dev, fmt, ##__VA_ARGS__)
-#define TOPS_ERR(fmt, ...) dev_err(tops_dev, fmt, ##__VA_ARGS__)
-
-/* tops 32 bits read/write */
-#define setbits(addr, set) writel(readl(addr) | (set), (addr))
-#define clrbits(addr, clr) writel(readl(addr) & ~(clr), (addr))
-#define clrsetbits(addr, clr, set) writel((readl(addr) & ~(clr)) | (set), (addr))
-#endif /* _TOPS_INTERNAL_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/mbox.h b/feed/kernel/tops/src/inc/tops/mbox.h
deleted file mode 100644
index c116162..0000000
--- a/feed/kernel/tops/src/inc/tops/mbox.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_MBOX_H_
-#define _TOPS_MBOX_H_
-
-#include <linux/list.h>
-
-#include "tops/mbox_id.h"
-#include "tops/tops.h"
-
-/* mbox device macros */
-#define MBOX_DEV(core_id, cmd) \
- .core = core_id, \
- .cmd_id = cmd,
-
-#define MBOX_SEND_DEV(core_id, cmd) \
- { \
- MBOX_DEV(core_id, cmd) \
- }
-
-#define MBOX_SEND_MGMT_DEV(cmd) \
- MBOX_SEND_DEV(CORE_MGMT, MBOX_AP2CM_CMD_ ## cmd)
-
-#define MBOX_SEND_OFFLOAD_DEV(core_id, cmd) \
- MBOX_SEND_DEV(CORE_OFFLOAD_ ## core_id, MBOX_AP2CX_CMD_ ## cmd)
-
-#define MBOX_RECV_DEV(core_id, cmd, handler) \
- { \
- MBOX_DEV(core_id, cmd) \
- .mbox_handler = handler, \
- }
-
-#define MBOX_RECV_MGMT_DEV(cmd, handler) \
- MBOX_RECV_DEV(CORE_MGMT, MBOX_CM2AP_CMD_ ## cmd, handler)
-
-#define MBOX_RECV_OFFLOAD_DEV(core_id, cmd, handler) \
- MBOX_RECV_DEV(CORE_OFFLOAD_ ## core_id, MBOX_CX2AP_CMD_ ## cmd, handler)
-
-/* Base Address */
-#define MBOX_TOP_BASE (0x010000)
-#define MBOX_CLUST0_BASE (0x510000)
-
-/* TOP Mailbox */
-#define TOPS_TOP_CM_SLOT (MBOX_TOP_BASE + 0x000)
-#define TOPS_TOP_AP_SLOT (MBOX_TOP_BASE + 0x004)
-
-#define TOPS_TOP_AP_TO_CM_CMD_SET (MBOX_TOP_BASE + 0x200)
-#define TOPS_TOP_AP_TO_CM_CMD_CLR (MBOX_TOP_BASE + 0x204)
-#define TOPS_TOP_CM_TO_AP_CMD_SET (MBOX_TOP_BASE + 0x21C)
-#define TOPS_TOP_CM_TO_AP_CMD_CLR (MBOX_TOP_BASE + 0x220)
-
-#define TOPS_TOP_AP_TO_CM_MSG_N(n) (MBOX_TOP_BASE + 0x208 + 0x4 * (n))
-#define TOPS_TOP_CM_TO_AP_MSG_N(n) (MBOX_TOP_BASE + 0x224 + 0x4 * (n))
-
-/* CLUST Mailbox */
-#define TOPS_CLUST0_CX_SLOT(x) (MBOX_CLUST0_BASE + (0x4 * (x)))
-#define TOPS_CLUST0_CM_SLOT (MBOX_CLUST0_BASE + 0x10)
-#define TOPS_CLUST0_AP_SLOT (MBOX_CLUST0_BASE + 0x14)
-
-#define TOPS_CLUST0_CX_TO_CY_CMD_SET(x, y) \
- (MBOX_CLUST0_BASE + 0x100 + ((x) * 0x200) + ((y) * 0x40))
-#define TOPS_CLUST0_CX_TO_CY_CMD_CLR(x, y) \
- (MBOX_CLUST0_BASE + 0x104 + ((x) * 0x200) + ((y) * 0x40))
-#define TOPS_CLUST0_CX_TO_CM_CMD_SET(x) \
- (MBOX_CLUST0_BASE + 0x200 + ((x) * 0x200))
-#define TOPS_CLUST0_CX_TO_CM_CMD_CLR(x) \
- (MBOX_CLUST0_BASE + 0x204 + ((x) * 0x200))
-#define TOPS_CLUST0_CX_TO_AP_CMD_SET(x) \
- (MBOX_CLUST0_BASE + 0x240 + ((x) * 0x200))
-#define TOPS_CLUST0_CX_TO_AP_CMD_CLR(x) \
- (MBOX_CLUST0_BASE + 0x244 + ((x) * 0x200))
-#define TOPS_CLUST0_CM_TO_CX_CMD_SET(x) \
- (MBOX_CLUST0_BASE + 0x900 + ((x) * 0x40))
-#define TOPS_CLUST0_CM_TO_CX_CMD_CLR(x) \
- (MBOX_CLUST0_BASE + 0x904 + ((x) * 0x40))
-#define TOPS_CLUST0_AP_TO_CX_CMD_SET(x) \
- (MBOX_CLUST0_BASE + 0xB00 + ((x) * 0x40))
-#define TOPS_CLUST0_AP_TO_CX_CMD_CLR(x) \
- (MBOX_CLUST0_BASE + 0xB04 + ((x) * 0x40))
-
-#define TOPS_CLUST0_CX_TO_CY_MSG_N(x, y, n) \
- (MBOX_CLUST0_BASE + 0x108 + ((n) * 0x4) + ((x) * 0x200) + ((y) * 0x40))
-#define TOPS_CLUST0_CX_TO_CM_MSG_N(x, n) \
- (MBOX_CLUST0_BASE + 0x208 + ((n) * 0x4) + ((x) * 0x200))
-#define TOPS_CLUST0_CX_TO_AP_MSG_N(x, n) \
- (MBOX_CLUST0_BASE + 0x248 + ((n) * 0x4) + ((x) * 0x200))
-#define TOPS_CLUST0_CM_TO_CX_MSG_N(x, n) \
- (MBOX_CLUST0_BASE + 0x908 + ((n) * 0x4) + ((x) * 0x40))
-#define TOPS_CLUST0_AP_TO_CX_MSG_N(x, n) \
- (MBOX_CLUST0_BASE + 0xB08 + ((n) * 0x4) + ((x) * 0x40))
-
-#define MBOX_TOP_MBOX_FROM_C0 (0x1)
-#define MBOX_TOP_MBOX_FROM_C1 (0x2)
-#define MBOX_TOP_MBOX_FROM_C2 (0x4)
-#define MBOX_TOP_MBOX_FROM_C3 (0x8)
-#define MBOX_TOP_MBOX_FROM_AP (0x10)
-#define MBOX_TOP_MBOX_FROM_CM (0x20) /* TODO: need DE update */
-
-#define MBOX_CLUST0_MBOX_FROM_C0 (0x1)
-#define MBOX_CLUST0_MBOX_FROM_C1 (0x2)
-#define MBOX_CLUST0_MBOX_FROM_C2 (0x4)
-#define MBOX_CLUST0_MBOX_FROM_C3 (0x8)
-#define MBOX_CLUST0_MBOX_FROM_CM (0x10)
-#define MBOX_CLUST0_MBOX_FROM_AP (0x20)
-
-struct mailbox_msg;
-struct mailbox_dev;
-enum mbox_msg_cnt;
-
-typedef void (*mbox_ret_func_t)(void *priv, struct mailbox_msg *msg);
-typedef enum mbox_msg_cnt (*mbox_handler_func_t)(struct mailbox_dev *mdev,
- struct mailbox_msg *msg);
-
-enum mbox_act {
- MBOX_SEND,
- MBOX_RECV,
- MBOX_ACT_MAX,
-};
-
-enum mbox_msg_cnt {
- MBOX_NO_RET_MSG,
- MBOX_RET_MSG1,
- MBOX_RET_MSG2,
- MBOX_RET_MSG3,
- MBOX_RET_MSG4,
-};
-
-struct mailbox_msg {
- u32 msg1;
- u32 msg2;
- u32 msg3;
- u32 msg4;
-};
-
-struct mailbox_dev {
- struct list_head list;
- enum core_id core;
- mbox_handler_func_t mbox_handler;
- void *priv;
- u8 cmd_id;
-};
-
-int mbox_send_msg_no_wait_irq(struct mailbox_dev *mdev, struct mailbox_msg *msg);
-int mbox_send_msg_no_wait(struct mailbox_dev *mdev, struct mailbox_msg *msg);
-int mbox_send_msg(struct mailbox_dev *mdev, struct mailbox_msg *msg, void *priv,
- mbox_ret_func_t ret_handler);
-int register_mbox_dev(enum mbox_act act, struct mailbox_dev *mdev);
-int unregister_mbox_dev(enum mbox_act act, struct mailbox_dev *mdev);
-void mtk_tops_mbox_clear_all_cmd(void);
-int mtk_tops_mbox_init(void);
-void mtk_tops_mbox_exit(void);
-#endif /* _TOPS_MBOX_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/mbox_id.h b/feed/kernel/tops/src/inc/tops/mbox_id.h
deleted file mode 100644
index 23a14ca..0000000
--- a/feed/kernel/tops/src/inc/tops/mbox_id.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_MBOX_ID_H_
-#define _TOPS_MBOX_ID_H_
-
-enum mbox_cm2ap_cmd_id {
- MBOX_CM2AP_CMD_CORE_CTRL = 0,
- MBOX_CM2AP_CMD_HPDMA = 10,
- MBOX_CM2AP_CMD_TNL_OFFLOAD = 11,
- MBOX_CM2AP_CMD_TEST = 31,
- __MBOX_CM2AP_CMD_MAX = 32,
-};
-
-enum mbox_ap2cm_cmd_id {
- MBOX_AP2CM_CMD_CORE_CTRL = 0,
- MBOX_AP2CM_CMD_NET = 1,
- MBOX_AP2CM_CMD_WDT = 2,
- MBOX_AP2CM_CMD_TRM = 3,
- MBOX_AP2CM_CMD_TNL_OFFLOAD = 11,
- MBOX_AP2CM_CMD_TEST = 31,
- __MBOX_AP2CM_CMD_MAX = 32,
-};
-
-enum mbox_cx2ap_cmd_id {
- MBOX_CX2AP_CMD_CORE_CTRL = 0,
- MBOX_CX2AP_CMD_HPDMA = 10,
- __MBOX_CX2AP_CMD_MAX = 32,
-};
-
-enum mbox_ap2cx_cmd_id {
- MBOX_AP2CX_CMD_CORE_CTRL = 0,
- MBOX_AP2CX_CMD_NET = 1,
- MBOX_AP2CX_CMD_WDT = 2,
- MBOX_AP2CX_CMD_TRM = 3,
- MBOX_AP2CX_CMD_MISC = 4,
- __MBOX_AP2CX_CMD_MAX = 32,
-};
-
-enum mbox_cm2cx_cmd_id {
- MBOX_CM2CX_CMD_CORE_CTRL = 0,
- __MBOX_CM2CX_CMD_MAX = 32,
-};
-
-enum mbox_cx2cm_cmd_id {
- MBOX_CX2CM_CMD_CORE_CTRL = 0,
- __MBOX_CX2CM_CMD_MAX = 32,
-};
-
-#endif /* _TOPS_MBOX_ID_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/mcu.h b/feed/kernel/tops/src/inc/tops/mcu.h
deleted file mode 100644
index df9b059..0000000
--- a/feed/kernel/tops/src/inc/tops/mcu.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_MCU_H_
-#define _TOPS_MCU_H_
-
-#include <linux/clk.h>
-#include <linux/bits.h>
-#include <linux/list.h>
-#include <linux/wait.h>
-#include <linux/timer.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/platform_device.h>
-
-#include "tops/tops.h"
-
-struct mcu_state;
-
-#define TOP_CORE_BASE (0x001000)
-#define TOP_SEC_BASE (0x00A000)
-#define TOP_L2SRAM (0x100000)
-#define TOP_CORE_M_DTCM (0x300000)
-#define TOP_CORE_M_ITCM (0x310000)
-#define CLUST_CORE_BASE(x) (0x501000 + 0x1000 * (x))
-#define CLUST_SEC_BASE (0x50A000)
-#define CLUST_L2SRAM (0x700000)
-#define CLUST_CORE_X_DTCM(x) (0x800000 + 0x20000 * (x))
-#define CLUST_CORE_X_ITCM(x) (0x810000 + 0x20000 * (x))
-
-/* CORE */
-#define TOP_CORE_NPU_SW_RST (TOP_CORE_BASE + 0x00)
-#define TOP_CORE_NPU_CTRL (TOP_CORE_BASE + 0x04)
-#define TOP_CORE_OCD_CTRL (TOP_CORE_BASE + 0x18)
-
-#define TOP_CORE_DBG_CTRL (TOP_SEC_BASE + 0x64)
-#define TOP_CORE_M_STAT_VECTOR_SEL (TOP_SEC_BASE + 0x68)
-#define TOP_CORE_M_RESET_VECTOR (TOP_SEC_BASE + 0x6C)
-
-#define CLUST_CORE_NPU_SW_RST(x) (CLUST_CORE_BASE(x) + 0x00)
-#define CLUST_CORE_NPU_CTRL(x) (CLUST_CORE_BASE(x) + 0x04)
-#define CLUST_CORE_OCD_CTRL(x) (CLUST_CORE_BASE(x) + 0x18)
-
-#define CLUST_CORE_DBG_CTRL (CLUST_SEC_BASE + 0x64)
-#define CLUST_CORE_X_STAT_VECTOR_SEL(x) (CLUST_SEC_BASE + 0x68 + (0xC * (x)))
-#define CLUST_CORE_X_RESET_VECTOR(x) (CLUST_SEC_BASE + 0x6C + (0xC * (x)))
-
-#define MCU_ACT_ABNORMAL (BIT(MCU_ACT_ABNORMAL_BIT))
-#define MCU_ACT_RESET (BIT(MCU_ACT_RESET_BIT))
-#define MCU_ACT_NETSTOP (BIT(MCU_ACT_NETSTOP_BIT))
-#define MCU_ACT_SHUTDOWN (BIT(MCU_ACT_SHUTDOWN_BIT))
-#define MCU_ACT_INIT (BIT(MCU_ACT_INIT_BIT))
-#define MCU_ACT_STALL (BIT(MCU_ACT_STALL_BIT))
-#define MCU_ACT_FREERUN (BIT(MCU_ACT_FREERUN_BIT))
-
-#define MCU_CTRL_ARG_NUM 2
-
-enum mcu_act {
- MCU_ACT_ABNORMAL_BIT,
- MCU_ACT_RESET_BIT,
- MCU_ACT_NETSTOP_BIT,
- MCU_ACT_SHUTDOWN_BIT,
- MCU_ACT_INIT_BIT,
- MCU_ACT_STALL_BIT,
- MCU_ACT_FREERUN_BIT,
-
- __MCU_ACT_MAX,
-};
-
-enum mcu_state_type {
- MCU_STATE_TYPE_SHUTDOWN,
- MCU_STATE_TYPE_INIT,
- MCU_STATE_TYPE_FREERUN,
- MCU_STATE_TYPE_STALL,
- MCU_STATE_TYPE_NETSTOP,
- MCU_STATE_TYPE_RESET,
- MCU_STATE_TYPE_ABNORMAL,
-
- __MCU_STATE_TYPE_MAX,
-};
-
-enum mcu_cmd_type {
- MCU_CMD_TYPE_NULL,
- MCU_CMD_TYPE_INIT_DONE,
- MCU_CMD_TYPE_STALL,
- MCU_CMD_TYPE_STALL_DONE,
- MCU_CMD_TYPE_FREERUN,
- MCU_CMD_TYPE_FREERUN_DONE,
- MCU_CMD_TYPE_ASSERT_RESET,
- MCU_CMD_TYPE_ASSERT_RESET_DONE,
- MCU_CMD_TYPE_RELEASE_RESET,
- MCU_CMD_TYPE_RELEASE_RESET_DONE,
-
- __MCU_CMD_TYPE_MAX,
-};
-
-enum mcu_event_type {
- MCU_EVENT_TYPE_NULL,
- MCU_EVENT_TYPE_SYNC_TNL,
- MCU_EVENT_TYPE_WDT_TIMEOUT,
- MCU_EVENT_TYPE_FE_RESET,
-
- __MCU_EVENT_TYPE_MAX,
-};
-
-struct mcu_ctrl_cmd {
- enum mcu_event_type e;
- u32 arg[MCU_CTRL_ARG_NUM];
- /*
- * if bit n (BIT(enum core_id)) == 1, send control message to that core.
- * default send to all cores if core_mask == 0
- */
- u32 core_mask;
-};
-
-struct mcu_state {
- enum mcu_state_type state;
- struct mcu_state *(*state_trans)(u32 mcu_act, struct mcu_state *state);
- int (*enter)(struct mcu_state *state);
- int (*leave)(struct mcu_state *state);
-};
-
-bool mtk_tops_mcu_alive(void);
-bool mtk_tops_mcu_bring_up_done(void);
-bool mtk_tops_mcu_netsys_fe_rst(void);
-int mtk_tops_mcu_stall(struct mcu_ctrl_cmd *mcmd,
- void (*callback)(void *param), void *param);
-int mtk_tops_mcu_reset(struct mcu_ctrl_cmd *mcmd,
- void (*callback)(void *param), void *param);
-
-int mtk_tops_mcu_bring_up(struct platform_device *pdev);
-void mtk_tops_mcu_tear_down(struct platform_device *pdev);
-int mtk_tops_mcu_init(struct platform_device *pdev);
-void mtk_tops_mcu_deinit(struct platform_device *pdev);
-#endif /* _TOPS_MCU_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/misc.h b/feed/kernel/tops/src/inc/tops/misc.h
deleted file mode 100644
index ec1ba73..0000000
--- a/feed/kernel/tops/src/inc/tops/misc.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuo@mediatek.com>
- */
-
-#ifndef _TOPS_MISC_H_
-#define _TOPS_MISC_H_
-
-#include <linux/platform_device.h>
-
-enum misc_cmd_type {
- MISC_CMD_TYPE_NULL,
- MISC_CMD_TYPE_SET_PPE_NUM,
-
- __MISC_CMD_TYPE_MAX,
-};
-
-int mtk_tops_misc_set_ppe_num(void);
-int mtk_tops_misc_init(struct platform_device *pdev);
-void mtk_tops_misc_deinit(struct platform_device *pdev);
-#endif /* _TOPS_MISC_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/net-event.h b/feed/kernel/tops/src/inc/tops/net-event.h
deleted file mode 100644
index 785a124..0000000
--- a/feed/kernel/tops/src/inc/tops/net-event.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_NET_EVENT_H_
-#define _TOPS_NET_EVENT_H_
-
-#include <linux/platform_device.h>
-
-#include <mtk_eth_soc.h>
-#include <mtk_eth_reset.h>
-
-struct tops_net_ser_data {
- struct net_device *ndev;
-};
-
-int mtk_tops_netevent_register(struct platform_device *pdev);
-void mtk_tops_netevent_unregister(struct platform_device *pdev);
-#endif /* _TOPS_NET_EVENT_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/netsys.h b/feed/kernel/tops/src/inc/tops/netsys.h
deleted file mode 100644
index fb68424..0000000
--- a/feed/kernel/tops/src/inc/tops/netsys.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_NETSYS_H_
-#define _TOPS_NETSYS_H_
-
-#include <linux/bitops.h>
-#include <linux/bitfield.h>
-#include <linux/platform_device.h>
-
-#include "tops/tunnel.h"
-
-/* FE BASE */
-#define FE_BASE (0x0000)
-
-/* PPE BASE */
-#define PPE0_BASE (0x2000)
-#define PPE1_BASE (0x2400)
-#define PPE2_BASE (0x2C00)
-
-/* FE_INT */
-#define FE_INT_GRP (0x0020)
-#define FE_INT_STA2 (0x0028)
-#define FE_INT_EN2 (0x002C)
-
-/* PSE IQ/OQ */
-#define PSE_IQ_STA6 (0x0194)
-#define PSE_OQ_STA6 (0x01B4)
-
-/* PPE */
-#define PPE_TBL_CFG (0x021C)
-
-/* FE_INT_GRP */
-#define FE_MISC_INT_ASG_SHIFT (0)
-#define FE_MISC_INT_ASG_MASK GENMASK(3, 0)
-
-/* FE_INT_STA2/FE_INT_EN2 */
-#define PSE_FC_ON_1_SHIFT (0)
-#define PSE_FC_ON_1_MASK GENMASK(6, 0)
-#define TDMA_TX_PAUSE (BIT(2))
-
-/* PSE IQ/OQ PORT */
-#define TDMA_PORT_SHIFT (0)
-#define TDMA_PORT_MASK GENMASK(15, 0)
-
-u32 mtk_tops_netsys_ppe_get_num(void);
-u32 mtk_tops_netsys_ppe_get_max_entry_num(u32 ppe_id);
-int mtk_tops_netsys_init(struct platform_device *pdev);
-void mtk_tops_netsys_deinit(struct platform_device *pdev);
-#endif /* _TOPS_NETSYS_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/seq_gen.h b/feed/kernel/tops/src/inc/tops/seq_gen.h
deleted file mode 100644
index 992cfb0..0000000
--- a/feed/kernel/tops/src/inc/tops/seq_gen.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Frank-zj Lin <frank-zj.lin@mediatek.com>
- */
-
-#ifndef _TOPS_SEQ_GEN_H_
-#define _TOPS_SEQ_GEN_H_
-
-#include <linux/platform_device.h>
-
-#define TOPS_SEQ_GEN_BASE 0x880100 /* PKT_ID_GEN reg base */
-#define TOPS_SEQ_GEN_IDX_MAX 16 /* num of PKT_ID_GEN reg */
-
-void mtk_tops_seq_gen_set_16(int seq_gen_idx, u16 val);
-int mtk_tops_seq_gen_next_16(int seq_gen_idx, u16 *val);
-void mtk_tops_seq_gen_set_32(int seq_gen_idx, u32 val);
-int mtk_tops_seq_gen_next_32(int seq_gen_idx, u32 *val);
-int mtk_tops_seq_gen_alloc(int *seq_gen_idx);
-void mtk_tops_seq_gen_free(int seq_gen_idx);
-int mtk_tops_seq_gen_init(struct platform_device *pdev);
-#endif /* _TOPS_SEQ_GEN_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/ser.h b/feed/kernel/tops/src/inc/tops/ser.h
deleted file mode 100644
index 72f1f7d..0000000
--- a/feed/kernel/tops/src/inc/tops/ser.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>
- */
-
-#ifndef _TOPS_SER_H_
-#define _TOPS_SER_H_
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/platform_device.h>
-
-#include "tops/net-event.h"
-#include "tops/mcu.h"
-#include "tops/wdt.h"
-
-enum tops_ser_type {
- TOPS_SER_NETSYS_FE_RST,
- TOPS_SER_WDT_TO,
-
- __TOPS_SER_TYPE_MAX,
-};
-
-struct tops_ser_params {
- enum tops_ser_type type;
-
- union {
- struct tops_net_ser_data net;
- struct tops_wdt_ser_data wdt;
- } data;
-
- void (*ser_callback)(struct tops_ser_params *ser_params);
- void (*ser_mcmd_setup)(struct tops_ser_params *ser_params,
- struct mcu_ctrl_cmd *mcmd);
-};
-
-int mtk_tops_ser(struct tops_ser_params *ser_params);
-int mtk_tops_ser_init(struct platform_device *pdev);
-int mtk_tops_ser_deinit(struct platform_device *pdev);
-#endif /* _TOPS_SER_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/tdma.h b/feed/kernel/tops/src/inc/tops/tdma.h
deleted file mode 100644
index 2cbd644..0000000
--- a/feed/kernel/tops/src/inc/tops/tdma.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_TDMA_H_
-#define _TOPS_TDMA_H_
-
-#include <linux/bitops.h>
-#include <linux/bitfield.h>
-
-/* TDMA */
-#define TDMA_BASE (0x6000)
-
-#define TDMA_TX_CTX_IDX_0 (0x008)
-#define TDMA_RX_MAX_CNT_X(idx) (0x104 + ((idx) * 0x10))
-#define TDMA_RX_CRX_IDX_X(idx) (0x108 + ((idx) * 0x10))
-#define TDMA_RX_DRX_IDX_X(idx) (0x10C + ((idx) * 0x10))
-#define TDMA_GLO_CFG0 (0x204)
-#define TDMA_RST_IDX (0x208)
-#define TDMA_TX_XDMA_FIFO_CFG0 (0x238)
-#define TDMA_RX_XDMA_FIFO_CFG0 (0x23C)
-#define TDMA_PREF_TX_CFG (0x2D0)
-#define TDMA_PREF_TX_FIFO_CFG0 (0x2D4)
-#define TDMA_PREF_RX_CFG (0x2DC)
-#define TDMA_PREF_RX_FIFO_CFG0 (0x2E0)
-#define TDMA_PREF_SIDX_CFG (0x2E4)
-#define TDMA_WRBK_TX_CFG (0x300)
-#define TDMA_WRBK_TX_FIFO_CFG0 (0x304)
-#define TDMA_WRBK_RX_CFG (0x344)
-#define TDMA_WRBK_RX_FIFO_CFGX(x) (0x348 + 0x4 * (x))
-#define TDMA_WRBK_SIDX_CFG (0x388)
-#define TDMA_PREF_RX_FIFO_CFG1 (0x3EC)
-
-/* TDMA_GLO_CFG0 */
-#define TX_DMA_EN (BIT(0))
-#define TX_DMA_BUSY (BIT(1))
-#define RX_DMA_EN (BIT(2))
-#define RX_DMA_BUSY (BIT(3))
-#define DMA_BT_SIZE_MASK (0x7)
-#define DMA_BT_SIZE_SHIFT (11)
-#define OTSD_THRES_MASK (0xF)
-#define OTSD_THRES_SHIFT (14)
-#define CDM_FCNT_THRES_MASK (0xF)
-#define CDM_FCNT_THRES_SHIFT (18)
-#define LB_MODE (BIT(24))
-#define PKT_WCOMP (BIT(27))
-#define DEC_WCOMP (BIT(28))
-
-/* TDMA_RST_IDX */
-#define RST_DTX_IDX_0 (BIT(0))
-#define RST_DRX_IDX_X(idx) (BIT(16 + (idx)))
-
-/* TDMA_TX_XDMA_FIFO_CFG0 TDMA_RX_XDMA_FIFO_CFG0 */
-#define PAR_FIFO_CLEAR (BIT(0))
-#define CMD_FIFO_CLEAR (BIT(4))
-#define DMAD_FIFO_CLEAR (BIT(8))
-#define ARR_FIFO_CLEAR (BIT(12))
-#define LEN_FIFO_CLEAR (BIT(15))
-#define WID_FIFO_CLEAR (BIT(18))
-#define BID_FIFO_CLEAR (BIT(21))
-
-/* TDMA_SDL_CFG */
-#define SDL_EN (BIT(16))
-#define SDL_MASK (0xFFFF)
-#define SDL_SHIFT (0)
-
-/* TDMA_PREF_TX_CFG TDMA_PREF_RX_CFG */
-#define PREF_BUSY BIT(1)
-#define PREF_EN BIT(0)
-
-/* TDMA_PREF_TX_FIFO_CFG0 TDMA_PREF_RX_FIFO_CFG0 TDMA_PREF_RX_FIFO_CFG1 */
-#define PREF_TX_RING0_CLEAR (BIT(0))
-#define PREF_RX_RINGX_CLEAR(x) (BIT((((x) % 2) * 16)))
-#define PREF_RX_RING1_CLEAR (BIT(0))
-#define PREF_RX_RING2_CLEAR (BIT(16))
-#define PREF_RX_RING3_CLEAR (BIT(0))
-#define PREF_RX_RING4_CLEAR (BIT(16))
-
-/* TDMA_PREF_SIDX_CFG TDMA_WRBK_SIDX_CFG */
-#define TX_RING0_SIDX_CLR (BIT(0))
-#define RX_RINGX_SIDX_CLR(x) (BIT(4 + (x)))
-
-/* TDMA_WRBK_TX_FIFO_CFG0 TDMA_WRBK_RX_FIFO_CFGX */
-#define WRBK_RING_CLEAR (BIT(0))
-
-/* TDMA_WRBK_TX_CFG TDMA_WRBK_RX_CFG */
-#define WRBK_BUSY (BIT(0))
-#define BURST_SIZE_SHIFT (6)
-#define BURST_SIZE_MASK (0x1F)
-#define WRBK_THRES_SHIFT (14)
-#define WRBK_THRES_MASK (0x3F)
-#define FLUSH_TIMER_EN (BIT(21))
-#define MAX_PENDING_TIME_SHIFT (22)
-#define MAX_PENDING_TIME_MASK (0xFF)
-#define WRBK_EN (BIT(30))
-
-#define TDMA_RING_NUM (4)
-#define TDMA_RING_NUM_MOD (TDMA_RING_NUM - 1)
-
-enum tops_net_cmd {
- TOPS_NET_CMD_NULL,
- TOPS_NET_CMD_STOP,
- TOPS_NET_CMD_START,
-
- __TOPS_NET_CMD_MAX,
-};
-
-void mtk_tops_tdma_record_last_state(void);
-void mtk_tops_tdma_reset(void);
-int mtk_tops_tdma_enable(void);
-void mtk_tops_tdma_disable(void);
-int mtk_tops_tdma_init(struct platform_device *pdev);
-void mtk_tops_tdma_deinit(struct platform_device *pdev);
-#endif /* _TOPS_TDMA_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/tops.h b/feed/kernel/tops/src/inc/tops/tops.h
deleted file mode 100644
index 224ed7f..0000000
--- a/feed/kernel/tops/src/inc/tops/tops.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_H_
-#define _TOPS_H_
-
-#define CORE_TOPS_MASK (GENMASK(CORE_TOPS_NUM - 1, 0))
-
-enum core_id {
- CORE_OFFLOAD_0,
- CORE_OFFLOAD_1,
- CORE_OFFLOAD_2,
- CORE_OFFLOAD_3,
- CORE_OFFLOAD_NUM,
- CORE_MGMT = CORE_OFFLOAD_NUM,
- CORE_TOPS_NUM,
- CORE_AP = CORE_TOPS_NUM,
- CORE_MAX,
-};
-#endif /* _TOPS_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/tops_params.h b/feed/kernel/tops/src/inc/tops/tops_params.h
deleted file mode 100644
index 057f20b..0000000
--- a/feed/kernel/tops/src/inc/tops/tops_params.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_PARAMS_H_
-#define _TOPS_PARAMS_H_
-
-#include <linux/if_ether.h>
-#include <linux/seq_file.h>
-#include <linux/types.h>
-
-#include "tops/protocol/network/ip_params.h"
-#include "tops/protocol/transport/udp_params.h"
-#include "tops/protocol/tunnel/l2tp/l2tp_params.h"
-#include "tops/protocol/tunnel/pptp/pptp_params.h"
-
-/* tunnel params flags */
-#define TNL_DECAP_ENABLE (BIT(TNL_PARAMS_DECAP_ENABLE_BIT))
-#define TNL_ENCAP_ENABLE (BIT(TNL_PARAMS_ENCAP_ENABLE_BIT))
-
-#define DEBUG_PROTO_LEN (21)
-#define DEBUG_PROTO_ETH "eth"
-#define DEBUG_PROTO_IP "ipv4"
-#define DEBUG_PROTO_UDP "udp"
-#define DEBUG_PROTO_GRETAP "gretap"
-#define DEBUG_PROTO_L2TP_V2 "l2tpv2"
-
-enum tops_mac_type {
- TOPS_MAC_NONE,
- TOPS_MAC_ETH,
-
- __TOPS_MAC_TYPE_MAX,
-};
-
-enum tops_network_type {
- TOPS_NETWORK_NONE,
- TOPS_NETWORK_IPV4,
-
- __TOPS_NETWORK_TYPE_MAX,
-};
-
-enum tops_transport_type {
- TOPS_TRANSPORT_NONE,
- TOPS_TRANSPORT_UDP,
-
- __TOPS_TRANSPORT_TYPE_MAX,
-};
-
-enum tops_tunnel_type {
- TOPS_TUNNEL_NONE = 0,
- TOPS_TUNNEL_GRETAP,
- TOPS_TUNNEL_PPTP,
- TOPS_TUNNEL_L2TP_V2,
- TOPS_TUNNEL_L2TP_V3 = 5,
- TOPS_TUNNEL_VXLAN,
- TOPS_TUNNEL_NATT,
- TOPS_TUNNEL_CAPWAP_CTRL,
- TOPS_TUNNEL_CAPWAP_DATA,
- TOPS_TUNNEL_CAPWAP_DTLS = 10,
- TOPS_TUNNEL_IPSEC_ESP,
- TOPS_TUNNEL_IPSEC_AH,
-
- __TOPS_TUNNEL_TYPE_MAX = CONFIG_TOPS_TNL_TYPE_NUM,
-};
-
-enum tops_tnl_params_flag {
- TNL_PARAMS_DECAP_ENABLE_BIT,
- TNL_PARAMS_ENCAP_ENABLE_BIT,
-};
-
-struct tops_mac_params {
- union {
- struct ethhdr eth;
- };
- enum tops_mac_type type;
-};
-
-struct tops_network_params {
- union {
- struct tops_ip_params ip;
- };
- enum tops_network_type type;
-};
-
-struct tops_transport_params {
- union {
- struct tops_udp_params udp;
- };
- enum tops_transport_type type;
-};
-
-struct tops_tunnel_params {
- union {
- struct tops_l2tp_params l2tp;
- struct tops_pptp_params pptp;
- };
- enum tops_tunnel_type type;
-};
-
-struct tops_params {
- struct tops_mac_params mac;
- struct tops_network_params network;
- struct tops_transport_params transport;
- struct tops_tunnel_params tunnel;
-};
-
-/* record outer tunnel header data for HW offloading */
-struct tops_tnl_params {
- struct tops_params params;
- u8 tops_entry_proto;
- u8 cls_entry;
- u8 cdrt;
- u8 flag; /* bit: enum tops_tnl_params_flag */
-} __packed __aligned(16);
-
-int
-mtk_tops_encap_param_setup(struct sk_buff *skb,
- struct tops_params *params,
- int (*tnl_encap_param_setup)(struct sk_buff *skb,
- struct tops_params *params));
-int
-mtk_tops_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params,
- int (*tnl_decap_param_setup)(struct sk_buff *skb,
- struct tops_params *params));
-
-int mtk_tops_transport_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params);
-int mtk_tops_network_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params);
-int mtk_tops_mac_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params);
-
-int mtk_tops_debug_param_proto_peek(const char *buf, int ofs, char *out);
-int mtk_tops_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params);
-void mtk_tops_mac_param_dump(struct seq_file *s, struct tops_params *params);
-void mtk_tops_network_param_dump(struct seq_file *s, struct tops_params *params);
-void mtk_tops_transport_param_dump(struct seq_file *s, struct tops_params *params);
-
-bool mtk_tops_params_match(struct tops_params *p1, struct tops_params *p2);
-#endif /* _TOPS_PARAMS_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/trm-debugfs.h b/feed/kernel/tops/src/inc/tops/trm-debugfs.h
deleted file mode 100644
index 5425485..0000000
--- a/feed/kernel/tops/src/inc/tops/trm-debugfs.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuo@mediatek.com>
- */
-
-#ifndef _TRM_DEBUGFS_H_
-#define _TRM_DEBUGFS_H_
-
-#include <linux/debugfs.h>
-
-extern struct dentry *trm_debugfs_root;
-
-int mtk_trm_debugfs_init(void);
-void mtk_trm_debugfs_deinit(void);
-#endif /* _TRM_DEBUGFS_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/trm-fs.h b/feed/kernel/tops/src/inc/tops/trm-fs.h
deleted file mode 100644
index 972924f..0000000
--- a/feed/kernel/tops/src/inc/tops/trm-fs.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>
- * Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_TRM_FS_H_
-#define _TOPS_TRM_FS_H_
-
-#define RLY_DUMP_SUBBUF_SZ 2048
-#define RLY_DUMP_SUBBUF_NUM 256
-
-bool mtk_trm_fs_is_init(void);
-void *mtk_trm_fs_relay_reserve(u32 size);
-void mtk_trm_fs_relay_flush(void);
-int mtk_trm_fs_init(void);
-void mtk_trm_fs_deinit(void);
-#endif /* _TOPS_TRM_FS_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/trm-mcu.h b/feed/kernel/tops/src/inc/tops/trm-mcu.h
deleted file mode 100644
index 87cf699..0000000
--- a/feed/kernel/tops/src/inc/tops/trm-mcu.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>
- * Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_TRM_MCU_H_
-#define _TOPS_TRM_MCU_H_
-
-#include "tops/tops.h"
-
-#define XCHAL_NUM_AREG (32)
-#define CORE_DUMP_FRAM_MAGIC (0x00BE00BE)
-
-#define CORE_DUMP_FRAME_LEN (sizeof(struct core_dump_fram))
-
-/* need to sync with core_dump.S */
-struct core_dump_fram {
- uint32_t magic;
- uint32_t num_areg;
- uint32_t pc;
- uint32_t ps;
- uint32_t windowstart;
- uint32_t windowbase;
- uint32_t epc1;
- uint32_t exccause;
- uint32_t excvaddr;
- uint32_t excsave1;
- uint32_t areg[XCHAL_NUM_AREG];
-};
-
-extern struct core_dump_fram cd_frams[CORE_TOPS_NUM];
-
-int mtk_trm_mcu_core_dump(void);
-int mtk_tops_trm_mcu_init(void);
-void mtk_tops_trm_mcu_exit(void);
-#endif /* _TOPS_TRM_MCU_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/trm.h b/feed/kernel/tops/src/inc/tops/trm.h
deleted file mode 100644
index 2acf54a..0000000
--- a/feed/kernel/tops/src/inc/tops/trm.h
+++ /dev/null
@@ -1,116 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>
- * Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_TRM_H_
-#define _TOPS_TRM_H_
-
-#include <linux/platform_device.h>
-
-extern struct device *trm_dev;
-
-#define TRM_DBG(fmt, ...) dev_dbg(trm_dev, "[TRM] " fmt, ##__VA_ARGS__)
-#define TRM_INFO(fmt, ...) dev_info(trm_dev, "[TRM] " fmt, ##__VA_ARGS__)
-#define TRM_NOTICE(fmt, ...) dev_notice(trm_dev, "[TRM] " fmt, ##__VA_ARGS__)
-#define TRM_WARN(fmt, ...) dev_warn(trm_dev, "[TRM] " fmt, ##__VA_ARGS__)
-#define TRM_ERR(fmt, ...) dev_err(trm_dev, "[TRM] " fmt, ##__VA_ARGS__)
-
-#define TRM_CONFIG_NAME_MAX_LEN 32
-
-/* TRM Configuration */
-#define TRM_CFG(_name, _addr, _len, _ofs, _size, _flag) \
- .name = _name, \
- .addr = _addr, \
- .len = _len, \
- .offset = _ofs, \
- .size = _size, \
- .flag = _flag,
-
-#define TRM_CFG_EN(name, addr, len, ofs, size, flag) \
- TRM_CFG(name, addr, len, ofs, size, TRM_CONFIG_F_ENABLE | (flag))
-
-#define TRM_CFG_CORE_DUMP_EN(name, addr, len, ofs, size, flag, core_id) \
- TRM_CFG_EN(name, addr, len, ofs, size, TRM_CONFIG_F_CORE_DUMP | flag) \
- .core = core_id
-
-/* TRM configuration flags */
-#define TRM_CONFIG_F(trm_cfg_bit) \
- (BIT(TRM_CONFIG_F_ ## trm_cfg_bit ## _BIT))
-#define TRM_CONFIG_F_CX_CORE_DUMP_MASK (GENMASK(CORE_TOPS_NUM, 0))
-#define TRM_CONFIG_F_CX_CORE_DUMP_SHIFT (0)
-
-/* TRM reason flag */
-#define TRM_RSN(trm_rsn_bit) (BIT(TRM_RSN_ ## trm_rsn_bit ## _BIT))
-
-/* TRM Reason */
-#define TRM_RSN_NULL (0x0000)
-#define TRM_RSN_WDT_TIMEOUT_CORE0 (TRM_RSN(C0_WDT))
-#define TRM_RSN_WDT_TIMEOUT_CORE1 (TRM_RSN(C1_WDT))
-#define TRM_RSN_WDT_TIMEOUT_CORE2 (TRM_RSN(C2_WDT))
-#define TRM_RSN_WDT_TIMEOUT_CORE3 (TRM_RSN(C3_WDT))
-#define TRM_RSN_WDT_TIMEOUT_COREM (TRM_RSN(CM_WDT))
-#define TRM_RSN_FE_RESET (TRM_RSN(FE_RESET))
-#define TRM_RSN_MCU_STATE_ACT_FAIL (TRM_RSN(MCU_STATE_ACT_FAIL))
-
-enum trm_cmd_type {
- TRM_CMD_TYPE_NULL,
- TRM_CMD_TYPE_CPU_UTILIZATION,
-
- __TRM_CMD_TYPE_MAX,
-};
-
-enum trm_config_flag {
- TRM_CONFIG_F_ENABLE_BIT,
- TRM_CONFIG_F_CORE_DUMP_BIT,
-};
-
-enum trm_rsn {
- TRM_RSN_C0_WDT_BIT,
- TRM_RSN_C1_WDT_BIT,
- TRM_RSN_C2_WDT_BIT,
- TRM_RSN_C3_WDT_BIT,
- TRM_RSN_CM_WDT_BIT,
- TRM_RSN_FE_RESET_BIT,
- TRM_RSN_MCU_STATE_ACT_FAIL_BIT,
-};
-
-enum trm_hardware {
- TRM_TOPS,
- TRM_NETSYS,
- TRM_TDMA,
-
- __TRM_HARDWARE_MAX,
-};
-
-struct trm_config {
- char name[TRM_CONFIG_NAME_MAX_LEN];
- enum core_id core; /* valid if TRM_CONFIG_F_CORE_DUMP is set */
- u32 addr; /* memory address of the dump info */
- u32 len; /* total length of the dump info */
- u32 offset; /* dump offset */
- u32 size; /* dump size */
- u8 flag;
-#define TRM_CONFIG_F_CORE_DUMP (TRM_CONFIG_F(CORE_DUMP))
-#define TRM_CONFIG_F_ENABLE (TRM_CONFIG_F(ENABLE))
-};
-
-struct trm_hw_config {
- struct trm_config *trm_cfgs;
- u32 cfg_len;
- int (*trm_hw_dump)(void *dst, u32 ofs, u32 len);
-};
-
-int mtk_trm_cpu_utilization(enum core_id core, u32 *cpu_utilization);
-int mtk_trm_dump(u32 dump_rsn);
-int mtk_trm_cfg_setup(char *name, u32 offset, u32 size, u8 enable);
-int mtk_tops_trm_init(void);
-void mtk_tops_trm_exit(void);
-int mtk_trm_hw_config_register(enum trm_hardware trm_hw,
- struct trm_hw_config *trm_hw_cfg);
-void mtk_trm_hw_config_unregister(enum trm_hardware trm_hw,
- struct trm_hw_config *trm_hw_cfg);
-#endif /* _TOPS_TRM_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/tunnel.h b/feed/kernel/tops/src/inc/tops/tunnel.h
deleted file mode 100644
index cbe4dc6..0000000
--- a/feed/kernel/tops/src/inc/tops/tunnel.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_TUNNEL_H_
-#define _TOPS_TUNNEL_H_
-
-#include <linux/bitmap.h>
-#include <linux/hashtable.h>
-#include <linux/if_ether.h>
-#include <linux/ip.h>
-#include <linux/kthread.h>
-#include <linux/netdevice.h>
-#include <linux/refcount.h>
-#include <linux/seq_file.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <pce/cls.h>
-
-#include "tops/tops_params.h"
-
-/* tunnel info status */
-#define TNL_STA_UNINIT (BIT(TNL_STATUS_UNINIT))
-#define TNL_STA_INIT (BIT(TNL_STATUS_INIT))
-#define TNL_STA_QUEUED (BIT(TNL_STATUS_QUEUED))
-#define TNL_STA_UPDATING (BIT(TNL_STATUS_UPDATING))
-#define TNL_STA_UPDATED (BIT(TNL_STATUS_UPDATED))
-#define TNL_STA_DIP_UPDATE (BIT(TNL_STATUS_DIP_UPDATE))
-#define TNL_STA_DELETING (BIT(TNL_STATUS_DELETING))
-
-/* tunnel info flags */
-#define TNL_INFO_DEBUG (BIT(TNL_INFO_DEBUG_BIT))
-
-struct tops_tnl_info;
-struct tops_tnl_params;
-
-/*
- * tops_crsn
- * TOPS_CRSN_TNL_ID_START
- * TOPS_CRSN_TNL_ID_END
- * APMCU checks whether tops_crsn is in this range to know if this packet
- * was processed by TOPS previously.
- */
-enum tops_crsn {
- TOPS_CRSN_IGNORE = 0x00,
- TOPS_CRSN_TNL_ID_START = 0x10,
- TOPS_CRSN_TNL_ID_END = 0x2F,
-};
-
-enum tops_tunnel_mbox_cmd {
- TOPS_TNL_MBOX_CMD_RESV,
- TOPS_TNL_START_ADDR_SYNC,
-
- __TOPS_TNL_MBOX_CMD_MAX,
-};
-
-enum tunnel_ctrl_event {
- TUNNEL_CTRL_EVENT_NULL,
- TUNNEL_CTRL_EVENT_NEW,
- TUNNEL_CTRL_EVENT_DEL,
- TUNNEL_CTRL_EVENT_DIP_UPDATE,
-
- __TUNNEL_CTRL_EVENT_MAX,
-};
-
-enum tnl_status {
- TNL_STATUS_UNINIT,
- TNL_STATUS_INIT,
- TNL_STATUS_QUEUED,
- TNL_STATUS_UPDATING,
- TNL_STATUS_UPDATED,
- TNL_STATUS_DIP_UPDATE,
- TNL_STATUS_DELETING,
-
- __TNL_STATUS_MAX,
-};
-
-enum tops_tnl_info_flag {
- TNL_INFO_DEBUG_BIT,
-};
-
-struct tops_cls_entry {
- struct cls_entry *cls;
- struct list_head node;
- refcount_t refcnt;
- bool updated;
-};
-
-struct tops_tnl_info {
- struct tops_tnl_params tnl_params;
- struct tops_tnl_params cache;
- struct tops_tnl_type *tnl_type;
- struct tops_cls_entry *tcls;
- struct list_head sync_node;
- struct hlist_node hlist;
- struct net_device *dev;
- spinlock_t lock;
- u32 tnl_idx;
- u32 status;
- u32 flag; /* bit: enum tops_tnl_info_flag */
-} __aligned(16);
-
-/*
- * tnl_l2_param_update:
- * update tunnel l2 info only
- * return 1 on l2 params have difference
- * return 0 on l2 params are the same
- * return negative value on error
- */
-struct tops_tnl_type {
- const char *type_name;
- enum tops_tunnel_type tnl_proto_type;
-
- int (*cls_entry_setup)(struct tops_tnl_info *tnl_info,
- struct cls_desc *cdesc);
- struct list_head tcls_head;
- bool use_multi_cls;
-
- /* parameter setup */
- int (*tnl_decap_param_setup)(struct sk_buff *skb,
- struct tops_params *params);
- int (*tnl_encap_param_setup)(struct sk_buff *skb,
- struct tops_params *params);
- int (*tnl_debug_param_setup)(const char *buf, int *ofs,
- struct tops_params *params);
- int (*tnl_l2_param_update)(struct sk_buff *skb,
- struct tops_params *params);
- /* parameter debug dump */
- void (*tnl_param_dump)(struct seq_file *s, struct tops_params *params);
- /* check skb content can be offloaded */
- bool (*tnl_decap_offloadable)(struct sk_buff *skb);
- /* match between 2 parameters */
- bool (*tnl_param_match)(struct tops_params *p, struct tops_params *target);
- /* recover essential parameters before updating */
- void (*tnl_param_restore)(struct tops_params *old, struct tops_params *new);
- bool has_inner_eth;
-};
-
-void mtk_tops_tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info);
-void mtk_tops_tnl_info_submit(struct tops_tnl_info *tnl_info);
-struct tops_tnl_info *mtk_tops_tnl_info_get_by_idx(u32 tnl_idx);
-struct tops_tnl_info *mtk_tops_tnl_info_find(struct tops_tnl_type *tnl_type,
- struct tops_tnl_params *tnl_params);
-struct tops_tnl_info *mtk_tops_tnl_info_alloc(struct tops_tnl_type *tnl_type);
-void mtk_tops_tnl_info_hash(struct tops_tnl_info *tnl_info);
-
-int mtk_tops_tnl_offload_init(struct platform_device *pdev);
-void mtk_tops_tnl_offload_deinit(struct platform_device *pdev);
-int mtk_tops_tnl_offload_proto_setup(struct platform_device *pdev);
-void mtk_tops_tnl_offload_proto_teardown(struct platform_device *pdev);
-void mtk_tops_tnl_offload_flush(void);
-void mtk_tops_tnl_offload_recover(void);
-void mtk_tops_tnl_offload_netdev_down(struct net_device *ndev);
-
-struct tops_tnl_type *mtk_tops_tnl_type_get_by_name(const char *name);
-int mtk_tops_tnl_type_register(struct tops_tnl_type *tnl_type);
-void mtk_tops_tnl_type_unregister(struct tops_tnl_type *tnl_type);
-#endif /* _TOPS_TUNNEL_H_ */
diff --git a/feed/kernel/tops/src/inc/tops/wdt.h b/feed/kernel/tops/src/inc/tops/wdt.h
deleted file mode 100644
index 22918a0..0000000
--- a/feed/kernel/tops/src/inc/tops/wdt.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>
- */
-
-#ifndef _TOPS_WDT_H_
-#define _TOPS_WDT_H_
-
-#include <linux/platform_device.h>
-
-#include "tops/tops.h"
-
-enum wdt_cmd {
- WDT_CMD_TRIGGER_TIMEOUT,
-
- __WDT_CMD_MAX,
-};
-
-struct tops_wdt_ser_data {
- u32 timeout_cores;
-};
-
-int mtk_tops_wdt_trigger_timeout(enum core_id core);
-int mtk_tops_wdt_init(struct platform_device *pdev);
-int mtk_tops_wdt_deinit(struct platform_device *pdev);
-#endif /* _TOPS_WDT_H_ */
diff --git a/feed/kernel/tops/src/init.c b/feed/kernel/tops/src/init.c
deleted file mode 100644
index 4495dfb..0000000
--- a/feed/kernel/tops/src/init.c
+++ /dev/null
@@ -1,315 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/debugfs.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include "tops/ctrl.h"
-#include "tops/debugfs.h"
-#include "tops/firmware.h"
-#include "tops/hpdma.h"
-#include "tops/hwspinlock.h"
-#include "tops/internal.h"
-#include "tops/mbox.h"
-#include "tops/mcu.h"
-#include "tops/misc.h"
-#include "tops/netsys.h"
-#include "tops/net-event.h"
-#include "tops/ser.h"
-#include "tops/tdma.h"
-#include "tops/trm-mcu.h"
-#include "tops/trm.h"
-#include "tops/tunnel.h"
-#include "tops/wdt.h"
-#include "tops/seq_gen.h"
-
-#define EFUSE_TOPS_POWER_OFF (0xD08)
-
-struct device *tops_dev;
-struct dentry *tops_debugfs_root;
-
-static int mtk_tops_post_init(struct platform_device *pdev)
-{
- int ret = 0;
-
- /* kick core */
- ret = mtk_tops_mcu_bring_up(pdev);
- if (ret) {
- TOPS_ERR("mcu post init failed: %d\n", ret);
- return ret;
- }
-
- /* offload tunnel protocol initialization */
- ret = mtk_tops_tnl_offload_proto_setup(pdev);
- if (ret) {
- TOPS_ERR("tnl offload protocol init failed: %d\n", ret);
- goto err_mcu_tear_down;
- }
-
- ret = mtk_tops_netevent_register(pdev);
- if (ret) {
- TOPS_ERR("netevent register fail: %d\n", ret);
- goto err_offload_proto_tear_down;
- }
-
- /* create sysfs file */
- ret = mtk_tops_ctrl_init(pdev);
- if (ret) {
- TOPS_ERR("ctrl init failed: %d\n", ret);
- goto err_netevent_unregister;
- }
-
- ret = mtk_tops_ser_init(pdev);
- if (ret) {
- TOPS_ERR("ser init failed: %d\n", ret);
- goto err_ctrl_deinit;
- }
-
- ret = mtk_tops_wdt_init(pdev);
- if (ret) {
- TOPS_ERR("wdt init failed: %d\n", ret);
- goto err_ser_deinit;
- }
-
- ret = mtk_tops_debugfs_init(pdev);
- if (ret) {
- TOPS_ERR("debugfs init failed: %d\n", ret);
- goto err_wdt_deinit;
- }
-
- return ret;
-
-err_wdt_deinit:
- mtk_tops_wdt_deinit(pdev);
-
-err_ser_deinit:
- mtk_tops_ser_deinit(pdev);
-
-err_ctrl_deinit:
- mtk_tops_ctrl_deinit(pdev);
-
-err_netevent_unregister:
- mtk_tops_netevent_unregister(pdev);
-
-err_offload_proto_tear_down:
- mtk_tops_tnl_offload_proto_teardown(pdev);
-
-err_mcu_tear_down:
- mtk_tops_mcu_tear_down(pdev);
-
- return ret;
-}
-
-static int mtk_tops_probe(struct platform_device *pdev)
-{
- int ret = 0;
-
- tops_dev = &pdev->dev;
-
- ret = mtk_tops_hwspinlock_init(pdev);
- if (ret) {
- TOPS_ERR("hwspinlock init failed: %d\n", ret);
- return ret;
- }
-
- ret = mtk_tops_fw_init(pdev);
- if (ret) {
- TOPS_ERR("firmware init failed: %d\n", ret);
- return ret;
- }
-
- ret = mtk_tops_mcu_init(pdev);
- if (ret) {
- TOPS_ERR("mcu init failed: %d\n", ret);
- return ret;
- }
-
- ret = mtk_tops_netsys_init(pdev);
- if (ret) {
- TOPS_ERR("netsys init failed: %d\n", ret);
- goto err_mcu_deinit;
- }
-
- ret = mtk_tops_tdma_init(pdev);
- if (ret) {
- TOPS_ERR("tdma init failed: %d\n", ret);
- goto err_netsys_deinit;
- }
-
- ret = mtk_tops_seq_gen_init(pdev);
- if (ret) {
- TOPS_ERR("sequence generator init failed: %d\n", ret);
- goto err_tdma_deinit;
- }
-
- ret = mtk_tops_tnl_offload_init(pdev);
- if (ret) {
- TOPS_ERR("tunnel table init failed: %d\n", ret);
- goto err_tdma_deinit;
- }
-
- ret = mtk_tops_misc_init(pdev);
- if (ret)
- goto err_tnl_offload_deinit;
-
- ret = mtk_tops_post_init(pdev);
- if (ret)
- goto err_misc_deinit;
-
- TOPS_ERR("init done\n");
- return ret;
-
-err_misc_deinit:
- mtk_tops_misc_deinit(pdev);
-
-err_tnl_offload_deinit:
- mtk_tops_tnl_offload_deinit(pdev);
-
-err_tdma_deinit:
- mtk_tops_tdma_deinit(pdev);
-
-err_netsys_deinit:
- mtk_tops_netsys_deinit(pdev);
-
-err_mcu_deinit:
- mtk_tops_mcu_deinit(pdev);
-
- return ret;
-}
-
-static int mtk_tops_remove(struct platform_device *pdev)
-{
- mtk_tops_debugfs_init(pdev);
-
- mtk_tops_wdt_deinit(pdev);
-
- mtk_tops_ser_deinit(pdev);
-
- mtk_tops_ctrl_deinit(pdev);
-
- mtk_tops_netevent_unregister(pdev);
-
- mtk_tops_tnl_offload_proto_teardown(pdev);
-
- mtk_tops_mcu_tear_down(pdev);
-
- mtk_tops_misc_deinit(pdev);
-
- mtk_tops_tnl_offload_deinit(pdev);
-
- mtk_tops_tdma_deinit(pdev);
-
- mtk_tops_netsys_deinit(pdev);
-
- mtk_tops_mcu_deinit(pdev);
-
- return 0;
-}
-
-static const struct of_device_id tops_match[] = {
- { .compatible = "mediatek,tops", },
- { },
-};
-MODULE_DEVICE_TABLE(of, tops_match);
-
-static struct platform_driver mtk_tops_driver = {
- .probe = mtk_tops_probe,
- .remove = mtk_tops_remove,
- .driver = {
- .name = "mediatek,tops",
- .owner = THIS_MODULE,
- .of_match_table = tops_match,
- },
-};
-
-static int __init mtk_tops_hw_disabled(void)
-{
- struct platform_device *efuse_pdev;
- struct device_node *efuse_node;
- struct resource res;
- void __iomem *efuse_base;
- int ret = 0;
-
- efuse_node = of_find_compatible_node(NULL, NULL, "mediatek,efuse");
- if (!efuse_node)
- return -ENODEV;
-
- efuse_pdev = of_find_device_by_node(efuse_node);
- if (!efuse_pdev) {
- ret = -ENODEV;
- goto out;
- }
-
- if (of_address_to_resource(efuse_node, 0, &res)) {
- ret = -ENXIO;
- goto out;
- }
-
- /* since we are not probed yet, we cannot use devm_* API */
- efuse_base = ioremap(res.start, resource_size(&res));
- if (!efuse_base) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (readl(efuse_base + EFUSE_TOPS_POWER_OFF))
- ret = -ENODEV;
-
- iounmap(efuse_base);
-
-out:
- of_node_put(efuse_node);
-
- return ret;
-}
-
-static int __init mtk_tops_init(void)
-{
- if (mtk_tops_hw_disabled())
- return -ENODEV;
-
- tops_debugfs_root = debugfs_create_dir("tops", NULL);
- if (IS_ERR(tops_debugfs_root)) {
- TOPS_ERR("create tops debugfs root directory failed\n");
- return PTR_ERR(tops_debugfs_root);
- }
-
- mtk_tops_mbox_init();
-
- mtk_tops_hpdma_init();
-
- mtk_tops_trm_init();
-
- return platform_driver_register(&mtk_tops_driver);
-}
-
-static void __exit mtk_tops_exit(void)
-{
- platform_driver_unregister(&mtk_tops_driver);
-
- mtk_tops_trm_exit();
-
- mtk_tops_hpdma_exit();
-
- mtk_tops_mbox_exit();
-
- debugfs_remove_recursive(tops_debugfs_root);
-}
-
-module_init(mtk_tops_init);
-module_exit(mtk_tops_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MediaTek TOPS Driver");
-MODULE_AUTHOR("Ren-Ting Wang <ren-ting.wang@mediatek.com>");
diff --git a/feed/kernel/tops/src/mbox.c b/feed/kernel/tops/src/mbox.c
deleted file mode 100644
index dc902c4..0000000
--- a/feed/kernel/tops/src/mbox.c
+++ /dev/null
@@ -1,574 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/err.h>
-#include <linux/ktime.h>
-#include <linux/device.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-
-#include "tops/mcu.h"
-#include "tops/mbox.h"
-#include "tops/internal.h"
-
-#define MBOX_SEND_TIMEOUT (2000)
-
-struct mailbox_reg {
- u32 cmd_set_reg;
- u32 cmd_clr_reg;
- u32 msg_reg;
-};
-
-struct mailbox_core {
- struct list_head mdev_list;
- u32 registered_cmd;
- spinlock_t lock;
-};
-
-struct mailbox_hw {
- struct mailbox_core core[MBOX_ACT_MAX][CORE_MAX];
- struct device *dev;
- void __iomem *base;
-};
-
-static struct mailbox_hw mbox;
-
-static inline void mbox_write(u32 reg, u32 val)
-{
- writel(val, mbox.base + reg);
-}
-
-static inline void mbox_set(u32 reg, u32 mask)
-{
- setbits(mbox.base + reg, mask);
-}
-
-static inline void mbox_clr(u32 reg, u32 mask)
-{
- clrbits(mbox.base + reg, mask);
-}
-
-static inline void mbox_rmw(u32 reg, u32 mask, u32 val)
-{
- clrsetbits(mbox.base + reg, mask, val);
-}
-
-static inline u32 mbox_read(u32 reg)
-{
- return readl(mbox.base + reg);
-}
-
-static inline void mbox_fill_msg(enum mbox_msg_cnt cnt, struct mailbox_msg *msg,
- struct mailbox_reg *mbox_reg)
-{
- if (cnt == MBOX_RET_MSG4)
- goto send_msg4;
- else if (cnt == MBOX_RET_MSG3)
- goto send_msg3;
- else if (cnt == MBOX_RET_MSG2)
- goto send_msg2;
- else if (cnt == MBOX_RET_MSG1)
- goto send_msg1;
- else
- return;
-
-send_msg4:
- mbox_write(mbox_reg->msg_reg + 0x10, msg->msg4);
-send_msg3:
- mbox_write(mbox_reg->msg_reg + 0xC, msg->msg3);
-send_msg2:
- mbox_write(mbox_reg->msg_reg + 0x8, msg->msg3);
-send_msg1:
- mbox_write(mbox_reg->msg_reg + 0x4, msg->msg1);
-}
-
-static inline void mbox_clear_msg(enum mbox_msg_cnt cnt,
- struct mailbox_reg *mbox_reg)
-{
- if (cnt == MBOX_NO_RET_MSG)
- goto clear_msg4;
- else if (cnt == MBOX_RET_MSG1)
- goto clear_msg3;
- else if (cnt == MBOX_RET_MSG2)
- goto clear_msg2;
- else if (cnt == MBOX_RET_MSG3)
- goto clear_msg1;
- else
- return;
-
-clear_msg4:
- mbox_write(mbox_reg->msg_reg + 0x4, 0);
-clear_msg3:
- mbox_write(mbox_reg->msg_reg + 0x8, 0);
-clear_msg2:
- mbox_write(mbox_reg->msg_reg + 0xC, 0);
-clear_msg1:
- mbox_write(mbox_reg->msg_reg + 0x10, 0);
-}
-
-static void exec_mbox_handler(enum core_id core, struct mailbox_reg *mbox_reg)
-{
- struct mailbox_core *mcore = &mbox.core[MBOX_RECV][core];
- struct mailbox_dev *mdev = NULL;
- struct mailbox_msg msg = {0};
- enum mbox_msg_cnt ret = 0;
- u32 cmd_id = 0;
-
- cmd_id = mbox_read(mbox_reg->msg_reg);
-
- list_for_each_entry(mdev, &mcore->mdev_list, list) {
- if (mdev->cmd_id == cmd_id) {
- if (!mdev->mbox_handler)
- goto out;
-
- /* setup msg for handler */
- msg.msg1 = mbox_read(mbox_reg->msg_reg + 0x4);
- msg.msg2 = mbox_read(mbox_reg->msg_reg + 0x8);
- msg.msg3 = mbox_read(mbox_reg->msg_reg + 0xC);
- msg.msg4 = mbox_read(mbox_reg->msg_reg + 0x10);
-
- ret = mdev->mbox_handler(mdev, &msg);
-
- mbox_fill_msg(ret, &msg, mbox_reg);
-
- break;
- }
- }
-out:
- mbox_write(mbox_reg->msg_reg, 0);
- mbox_clear_msg(ret, mbox_reg);
-
- /* clear cmd */
- mbox_write(mbox_reg->cmd_clr_reg, 0xFFFFFFFF);
-}
-
-static irqreturn_t mtk_tops_mbox_handler(int irq, void *dev_id)
-{
- struct mailbox_reg mreg = {0};
- u32 cluster_reg = 0;
- u32 top_reg = 0;
-
- top_reg = mbox_read(TOPS_TOP_AP_SLOT);
- cluster_reg = mbox_read(TOPS_CLUST0_AP_SLOT);
-
- if (top_reg & MBOX_TOP_MBOX_FROM_CM) {
- mreg.cmd_set_reg = TOPS_TOP_CM_TO_AP_CMD_SET;
- mreg.cmd_clr_reg = TOPS_TOP_CM_TO_AP_CMD_CLR;
- mreg.msg_reg = TOPS_TOP_CM_TO_AP_MSG_N(0);
- exec_mbox_handler(CORE_MGMT, &mreg);
- }
- if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C0) {
- mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(0);
- mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(0);
- mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(0, 0);
- exec_mbox_handler(CORE_OFFLOAD_0, &mreg);
- }
- if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C1) {
- mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(1);
- mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(1);
- mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(1, 0);
- exec_mbox_handler(CORE_OFFLOAD_1, &mreg);
- }
- if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C2) {
- mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(2);
- mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(2);
- mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(2, 0);
- exec_mbox_handler(CORE_OFFLOAD_2, &mreg);
- }
- if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C3) {
- mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(3);
- mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(3);
- mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(3, 0);
- exec_mbox_handler(CORE_OFFLOAD_3, &mreg);
- }
-
- return IRQ_HANDLED;
-}
-
-static int mbox_get_send_reg(struct mailbox_dev *mdev,
- struct mailbox_reg *mbox_reg)
-{
- if (!mdev) {
- dev_notice(mbox.dev, "no mdev specified!\n");
- return -EINVAL;
- }
-
- if (mdev->core == CORE_MGMT) {
- mbox_reg->cmd_set_reg = TOPS_TOP_AP_TO_CM_CMD_SET;
- mbox_reg->cmd_clr_reg = TOPS_TOP_AP_TO_CM_CMD_CLR;
- mbox_reg->msg_reg = TOPS_TOP_AP_TO_CM_MSG_N(0);
- } else if (mdev->core == CORE_OFFLOAD_0) {
- mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(0);
- mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(0);
- mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(0, 0);
- } else if (mdev->core == CORE_OFFLOAD_1) {
- mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(1);
- mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(1);
- mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(1, 0);
- } else if (mdev->core == CORE_OFFLOAD_2) {
- mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(2);
- mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(2);
- mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(2, 0);
- } else if (mdev->core == CORE_OFFLOAD_3) {
- mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(3);
- mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(3);
- mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(3, 0);
- } else {
- dev_notice(mbox.dev, "invalid mdev->core: %u\n", mdev->core);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void mbox_post_send(u32 msg_reg, struct mailbox_msg *msg,
- void *priv,
- mbox_ret_func_t ret_handler)
-{
- if (!ret_handler)
- goto out;
-
- msg->msg1 = mbox_read(msg_reg + 0x4);
- msg->msg2 = mbox_read(msg_reg + 0x8);
- msg->msg3 = mbox_read(msg_reg + 0xC);
- msg->msg4 = mbox_read(msg_reg + 0x10);
-
- ret_handler(priv, msg);
-
-out:
- mbox_write(msg_reg, 0);
- mbox_write(msg_reg + 0x4, 0);
- mbox_write(msg_reg + 0x8, 0);
- mbox_write(msg_reg + 0xC, 0);
- mbox_write(msg_reg + 0x10, 0);
-}
-
-static inline bool mbox_send_msg_chk_timeout(ktime_t start)
-{
- return ktime_to_us(ktime_sub(ktime_get(), start)) > MBOX_SEND_TIMEOUT;
-}
-
-static inline int __mbox_send_msg_no_wait_irq(struct mailbox_dev *mdev,
- struct mailbox_msg *msg,
- struct mailbox_reg *mbox_reg)
-{
- ktime_t start;
-
- if (!mdev || !msg || !mbox_reg) {
- dev_notice(mbox.dev, "missing some necessary parameters!\n");
- return -EPERM;
- }
-
- start = ktime_get();
-
- /* wait for all cmd cleared */
- while (mbox_read(mbox_reg->cmd_set_reg)) {
- if (mbox_send_msg_chk_timeout(start)) {
- dev_notice(mbox.dev, "mbox occupied too long\n");
- dev_notice(mbox.dev, "cmd set reg (0x%x): 0x%x\n",
- mbox_reg->cmd_set_reg,
- mbox_read(mbox_reg->cmd_set_reg));
- dev_notice(mbox.dev, "msg1 reg (0x%x): 0x%x\n",
- mbox_reg->msg_reg,
- mbox_read(mbox_reg->msg_reg));
- dev_notice(mbox.dev, "msg2 reg (0x%x): 0x%x\n",
- mbox_reg->msg_reg,
- mbox_read(mbox_reg->msg_reg + 0x4));
- dev_notice(mbox.dev, "msg3 reg (0x%x): 0x%x\n",
- mbox_reg->msg_reg,
- mbox_read(mbox_reg->msg_reg + 0x8));
- dev_notice(mbox.dev, "msg4 reg (0x%x): 0x%x\n",
- mbox_reg->msg_reg,
- mbox_read(mbox_reg->msg_reg + 0xC));
- dev_notice(mbox.dev, "msg5 reg (0x%x): 0x%x\n",
- mbox_reg->msg_reg,
- mbox_read(mbox_reg->msg_reg + 0x10));
- WARN_ON(1);
- }
- }
-
- /* write msg */
- mbox_write(mbox_reg->msg_reg, mdev->cmd_id);
- mbox_write(mbox_reg->msg_reg + 0x4, msg->msg1);
- mbox_write(mbox_reg->msg_reg + 0x8, msg->msg2);
- mbox_write(mbox_reg->msg_reg + 0xC, msg->msg3);
- mbox_write(mbox_reg->msg_reg + 0x10, msg->msg4);
-
- /* write cmd */
- mbox_write(mbox_reg->cmd_set_reg, BIT(mdev->cmd_id));
-
- return 0;
-}
-
-int mbox_send_msg_no_wait_irq(struct mailbox_dev *mdev, struct mailbox_msg *msg)
-{
- struct mailbox_reg mbox_reg = {0};
- int ret = 0;
-
- ret = mbox_get_send_reg(mdev, &mbox_reg);
- if (ret)
- return ret;
-
- spin_lock(&mbox.core[MBOX_SEND][mdev->core].lock);
-
- /* send cmd + msg */
- ret = __mbox_send_msg_no_wait_irq(mdev, msg, &mbox_reg);
-
- spin_unlock(&mbox.core[MBOX_SEND][mdev->core].lock);
-
- return ret;
-}
-EXPORT_SYMBOL(mbox_send_msg_no_wait_irq);
-
-int mbox_send_msg_no_wait(struct mailbox_dev *mdev, struct mailbox_msg *msg)
-{
- struct mailbox_reg mbox_reg = {0};
- unsigned long flag = 0;
- int ret = 0;
-
- ret = mbox_get_send_reg(mdev, &mbox_reg);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
-
- /* send cmd + msg */
- ret = __mbox_send_msg_no_wait_irq(mdev, msg, &mbox_reg);
-
- spin_unlock_irqrestore(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
-
- return ret;
-}
-EXPORT_SYMBOL(mbox_send_msg_no_wait);
-
-int mbox_send_msg(struct mailbox_dev *mdev, struct mailbox_msg *msg, void *priv,
- mbox_ret_func_t ret_handler)
-{
- struct mailbox_reg mbox_reg = {0};
- unsigned long flag = 0;
- ktime_t start;
- int ret = 0;
-
- ret = mbox_get_send_reg(mdev, &mbox_reg);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
-
- /* send cmd + msg */
- ret = __mbox_send_msg_no_wait_irq(mdev, msg, &mbox_reg);
-
- start = ktime_get();
-
- /* wait for cmd clear */
- while (mbox_read(mbox_reg.cmd_set_reg) & BIT(mdev->cmd_id))
- mbox_send_msg_chk_timeout(start);
-
- /* execute return handler and clear message */
- mbox_post_send(mbox_reg.msg_reg, msg, priv, ret_handler);
-
- spin_unlock_irqrestore(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
-
- return ret;
-}
-EXPORT_SYMBOL(mbox_send_msg);
-
-static inline int mbox_ctrl_sanity_check(enum core_id core, enum mbox_act act)
-{
- /* sanity check */
- if (core >= CORE_MAX || act >= MBOX_ACT_MAX)
- return -EINVAL;
-
- /* mbox handler should not be register to core itself */
- if (core == CORE_AP)
- return -EINVAL;
-
- return 0;
-}
-
-static void __register_mbox_dev(struct mailbox_core *mcore,
- struct mailbox_dev *mdev)
-{
- struct mailbox_dev *cur = NULL;
-
- INIT_LIST_HEAD(&mdev->list);
-
- /* insert the mailbox_dev in order */
- list_for_each_entry(cur, &mcore->mdev_list, list)
- if (cur->cmd_id > mdev->cmd_id)
- break;
-
- list_add(&mdev->list, &cur->list);
-
- mcore->registered_cmd |= (0x1 << mdev->cmd_id);
-}
-
-static void __unregister_mbox_dev(struct mailbox_core *mcore,
- struct mailbox_dev *mdev)
-{
- struct mailbox_dev *cur = NULL;
- struct mailbox_dev *next = NULL;
-
- /* ensure the node being deleted is existed in the list */
- list_for_each_entry_safe(cur, next, &mcore->mdev_list, list) {
- if (cur->cmd_id == mdev->cmd_id && cur == mdev) {
- list_del(&mdev->list);
- break;
- }
- }
-
- mcore->registered_cmd &= (~(0x1 << mdev->cmd_id));
-}
-
-int register_mbox_dev(enum mbox_act act, struct mailbox_dev *mdev)
-{
- struct mailbox_core *mcore;
- int ret = 0;
-
- /* sanity check */
- ret = mbox_ctrl_sanity_check(mdev->core, act);
- if (ret)
- return ret;
-
- mcore = &mbox.core[act][mdev->core];
-
- /* check cmd is occupied or not */
- if (mcore->registered_cmd & (0x1 << mdev->cmd_id))
- return -EBUSY;
-
- __register_mbox_dev(mcore, mdev);
-
- return 0;
-}
-EXPORT_SYMBOL(register_mbox_dev);
-
-int unregister_mbox_dev(enum mbox_act act, struct mailbox_dev *mdev)
-{
- struct mailbox_core *mcore;
- int ret = 0;
-
- /* sanity check */
- ret = mbox_ctrl_sanity_check(mdev->core, act);
- if (ret)
- return ret;
-
- mcore = &mbox.core[act][mdev->core];
-
- /* check cmd need to unregister or not */
- if (!(mcore->registered_cmd & (0x1 << mdev->cmd_id)))
- return 0;
-
- __unregister_mbox_dev(mcore, mdev);
-
- return 0;
-}
-EXPORT_SYMBOL(unregister_mbox_dev);
-
-void mtk_tops_mbox_clear_all_cmd(void)
-{
- u32 i, j;
-
- mbox_write(TOPS_TOP_AP_TO_CM_CMD_CLR, 0xFFFFFFFF);
- mbox_write(TOPS_TOP_CM_TO_AP_CMD_CLR, 0xFFFFFFFF);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- mbox_write(TOPS_CLUST0_CX_TO_CM_CMD_CLR(i), 0xFFFFFFFF);
- mbox_write(TOPS_CLUST0_CM_TO_CX_CMD_CLR(i), 0xFFFFFFFF);
- mbox_write(TOPS_CLUST0_CX_TO_AP_CMD_CLR(i), 0xFFFFFFFF);
- mbox_write(TOPS_CLUST0_AP_TO_CX_CMD_CLR(i), 0xFFFFFFFF);
-
- for (j = 0; j < CORE_OFFLOAD_NUM; j++) {
- if (i == j)
- continue;
-
- mbox_write(TOPS_CLUST0_CX_TO_CY_CMD_CLR(i, j), 0xFFFFFFFF);
- }
- }
-}
-
-static int mtk_tops_mbox_probe(struct platform_device *pdev)
-{
- struct device_node *tops = NULL;
- struct resource res;
- int irq = platform_get_irq_byname(pdev, "mbox");
- int ret = 0;
- u32 idx = 0;
-
- mbox.dev = &pdev->dev;
-
- tops = of_parse_phandle(pdev->dev.of_node, "tops", 0);
- if (!tops) {
- dev_err(mbox.dev, "can not find tops node\n");
- return -ENODEV;
- }
-
- if (of_address_to_resource(tops, 0, &res))
- return -ENXIO;
-
- mbox.base = devm_ioremap(mbox.dev, res.start, resource_size(&res));
- if (!mbox.base)
- return -ENOMEM;
-
- if (irq < 0) {
- dev_err(mbox.dev, "get mbox irq failed\n");
- return irq;
- }
-
- ret = devm_request_irq(&pdev->dev, irq,
- mtk_tops_mbox_handler,
- IRQF_ONESHOT,
- pdev->name, NULL);
- if (ret) {
- dev_err(mbox.dev, "request mbox irq failed\n");
- return ret;
- }
-
- for (idx = 0; idx < CORE_MAX; idx++) {
- INIT_LIST_HEAD(&mbox.core[MBOX_SEND][idx].mdev_list);
- INIT_LIST_HEAD(&mbox.core[MBOX_RECV][idx].mdev_list);
- spin_lock_init(&mbox.core[MBOX_SEND][idx].lock);
- spin_lock_init(&mbox.core[MBOX_RECV][idx].lock);
- }
-
- mtk_tops_mbox_clear_all_cmd();
-
- return ret;
-}
-
-static int mtk_tops_mbox_remove(struct platform_device *pdev)
-{
- return 0;
-}
-
-static struct of_device_id mtk_mbox_match[] = {
- { .compatible = "mediatek,tops-mbox", },
- { },
-};
-
-static struct platform_driver mtk_tops_mbox_driver = {
- .probe = mtk_tops_mbox_probe,
- .remove = mtk_tops_mbox_remove,
- .driver = {
- .name = "mediatek,tops-mbox",
- .owner = THIS_MODULE,
- .of_match_table = mtk_mbox_match,
- },
-};
-
-int __init mtk_tops_mbox_init(void)
-{
- return platform_driver_register(&mtk_tops_mbox_driver);
-}
-
-void __exit mtk_tops_mbox_exit(void)
-{
- platform_driver_unregister(&mtk_tops_mbox_driver);
-}
diff --git a/feed/kernel/tops/src/mcu.c b/feed/kernel/tops/src/mcu.c
deleted file mode 100644
index 2150c97..0000000
--- a/feed/kernel/tops/src/mcu.c
+++ /dev/null
@@ -1,1495 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/firmware.h>
-#include <linux/io.h>
-#include <linux/kthread.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
-#include <linux/pm_runtime.h>
-
-#include <pce/pce.h>
-
-#include "tops/ctrl.h"
-#include "tops/firmware.h"
-#include "tops/hpdma.h"
-#include "tops/internal.h"
-#include "tops/mbox.h"
-#include "tops/mcu.h"
-#include "tops/misc.h"
-#include "tops/netsys.h"
-#include "tops/tdma.h"
-#include "tops/trm.h"
-
-#define TDMA_TIMEOUT_MAX_CNT (3)
-#define TDMA_TIMEOUT_DELAY (100) /* 100ms */
-
-#define MCU_STATE_TRANS_TIMEOUT (5000) /* 5000ms */
-#define MCU_CTRL_DONE_BIT (31)
-#define MCU_CTRL_DONE (CORE_TOPS_MASK | \
- BIT(MCU_CTRL_DONE_BIT))
-
-/* TRM dump length */
-#define TOP_CORE_BASE_LEN (0x80)
-#define TOP_L2SRAM_LEN (0x40000)
-#define TOP_CORE_M_XTCM_LEN (0x8000)
-
-#define CLUST_CORE_BASE_LEN (0x80)
-#define CLUST_L2SRAM_LEN (0x40000)
-#define CLUST_CORE_X_XTCM_LEN (0x8000)
-
-/* MCU State */
-#define MCU_STATE_FUNC_DECLARE(name) \
-static int mtk_tops_mcu_state_ ## name ## _enter(struct mcu_state *state); \
-static int mtk_tops_mcu_state_ ## name ## _leave(struct mcu_state *state); \
-static struct mcu_state *mtk_tops_mcu_state_ ## name ## _trans( \
- u32 mcu_act, \
- struct mcu_state *state)
-
-#define MCU_STATE_DATA(name, id) \
- [id] = { \
- .state = id, \
- .state_trans = mtk_tops_mcu_state_ ## name ## _trans, \
- .enter = mtk_tops_mcu_state_ ## name ## _enter, \
- .leave = mtk_tops_mcu_state_ ## name ## _leave, \
- }
-
-static inline void mcu_ctrl_issue_pending_act(u32 mcu_act);
-static enum mbox_msg_cnt mtk_tops_ap_recv_mgmt_mbox_msg(struct mailbox_dev *mdev,
- struct mailbox_msg *msg);
-static enum mbox_msg_cnt mtk_tops_ap_recv_offload_mbox_msg(struct mailbox_dev *mdev,
- struct mailbox_msg *msg);
-static int mcu_trm_hw_dump(void *dst, u32 ofs, u32 len);
-
-MCU_STATE_FUNC_DECLARE(shutdown);
-MCU_STATE_FUNC_DECLARE(init);
-MCU_STATE_FUNC_DECLARE(freerun);
-MCU_STATE_FUNC_DECLARE(stall);
-MCU_STATE_FUNC_DECLARE(netstop);
-MCU_STATE_FUNC_DECLARE(reset);
-MCU_STATE_FUNC_DECLARE(abnormal);
-
-struct npu {
- void __iomem *base;
-
- struct clk *bus_clk;
- struct clk *sram_clk;
- struct clk *xdma_clk;
- struct clk *offload_clk;
- struct clk *mgmt_clk;
-
- struct device **pd_devices;
- struct device_link **pd_links;
- int pd_num;
-
- struct task_struct *mcu_ctrl_thread;
- struct timer_list mcu_ctrl_timer;
- struct mcu_state *next_state;
- struct mcu_state *cur_state;
- /* ensure that only 1 user can trigger state transition at a time */
- struct mutex mcu_ctrl_lock;
- spinlock_t pending_act_lock;
- wait_queue_head_t mcu_ctrl_wait_act;
- wait_queue_head_t mcu_state_wait_done;
- bool mcu_bring_up_done;
- bool state_trans_fail;
- u32 pending_act;
-
- spinlock_t ctrl_done_lock;
- wait_queue_head_t mcu_ctrl_wait_done;
- enum mcu_cmd_type ctrl_done_cmd;
- /* MSB = 1 means that mcu control done. Otherwise it is still ongoing */
- u32 ctrl_done;
-
- struct work_struct recover_work;
- bool in_reset;
- bool in_recover;
- bool netsys_fe_ser;
- bool shuting_down;
-
- struct mailbox_msg ctrl_msg;
- struct mailbox_dev recv_mgmt_mbox_dev;
- struct mailbox_dev send_mgmt_mbox_dev;
-
- struct mailbox_dev recv_offload_mbox_dev[CORE_OFFLOAD_NUM];
- struct mailbox_dev send_offload_mbox_dev[CORE_OFFLOAD_NUM];
-};
-
-static struct mcu_state mcu_states[__MCU_STATE_TYPE_MAX] = {
- MCU_STATE_DATA(shutdown, MCU_STATE_TYPE_SHUTDOWN),
- MCU_STATE_DATA(init, MCU_STATE_TYPE_INIT),
- MCU_STATE_DATA(freerun, MCU_STATE_TYPE_FREERUN),
- MCU_STATE_DATA(stall, MCU_STATE_TYPE_STALL),
- MCU_STATE_DATA(netstop, MCU_STATE_TYPE_NETSTOP),
- MCU_STATE_DATA(reset, MCU_STATE_TYPE_RESET),
- MCU_STATE_DATA(abnormal, MCU_STATE_TYPE_ABNORMAL),
-};
-
-static struct npu npu = {
- .send_mgmt_mbox_dev = MBOX_SEND_MGMT_DEV(CORE_CTRL),
- .send_offload_mbox_dev = {
- [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, CORE_CTRL),
- [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, CORE_CTRL),
- [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, CORE_CTRL),
- [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, CORE_CTRL),
- },
- .recv_mgmt_mbox_dev =
- MBOX_RECV_MGMT_DEV(CORE_CTRL, mtk_tops_ap_recv_mgmt_mbox_msg),
- .recv_offload_mbox_dev = {
- [CORE_OFFLOAD_0] =
- MBOX_RECV_OFFLOAD_DEV(0,
- CORE_CTRL,
- mtk_tops_ap_recv_offload_mbox_msg
- ),
- [CORE_OFFLOAD_1] =
- MBOX_RECV_OFFLOAD_DEV(1,
- CORE_CTRL,
- mtk_tops_ap_recv_offload_mbox_msg
- ),
- [CORE_OFFLOAD_2] =
- MBOX_RECV_OFFLOAD_DEV(2,
- CORE_CTRL,
- mtk_tops_ap_recv_offload_mbox_msg
- ),
- [CORE_OFFLOAD_3] =
- MBOX_RECV_OFFLOAD_DEV(3,
- CORE_CTRL,
- mtk_tops_ap_recv_offload_mbox_msg
- ),
- },
-};
-
-static struct trm_config mcu_trm_cfgs[] = {
- {
- TRM_CFG_EN("top-core-base",
- TOP_CORE_BASE, TOP_CORE_BASE_LEN,
- 0x0, TOP_CORE_BASE_LEN,
- 0)
- },
- {
- TRM_CFG_EN("clust-core0-base",
- CLUST_CORE_BASE(0), CLUST_CORE_BASE_LEN,
- 0x0, CLUST_CORE_BASE_LEN,
- 0)
- },
- {
- TRM_CFG_EN("clust-core1-base",
- CLUST_CORE_BASE(1), CLUST_CORE_BASE_LEN,
- 0x0, CLUST_CORE_BASE_LEN,
- 0)
- },
- {
- TRM_CFG_EN("clust-core2-base",
- CLUST_CORE_BASE(2), CLUST_CORE_BASE_LEN,
- 0x0, CLUST_CORE_BASE_LEN,
- 0)
- },
- {
- TRM_CFG_EN("clust-core3-base",
- CLUST_CORE_BASE(3), CLUST_CORE_BASE_LEN,
- 0x0, CLUST_CORE_BASE_LEN,
- 0)
- },
- {
- TRM_CFG_CORE_DUMP_EN("top-core-m-dtcm",
- TOP_CORE_M_DTCM, TOP_CORE_M_XTCM_LEN,
- 0x0, TOP_CORE_M_XTCM_LEN,
- 0, CORE_MGMT)
- },
- {
- TRM_CFG_CORE_DUMP_EN("clust-core-0-dtcm",
- CLUST_CORE_X_DTCM(0), CLUST_CORE_X_XTCM_LEN,
- 0x0, CLUST_CORE_X_XTCM_LEN,
- 0, CORE_OFFLOAD_0)
- },
- {
- TRM_CFG_CORE_DUMP_EN("clust-core-1-dtcm",
- CLUST_CORE_X_DTCM(1), CLUST_CORE_X_XTCM_LEN,
- 0x0, CLUST_CORE_X_XTCM_LEN,
- 0, CORE_OFFLOAD_1)
- },
- {
- TRM_CFG_CORE_DUMP_EN("clust-core-2-dtcm",
- CLUST_CORE_X_DTCM(2), CLUST_CORE_X_XTCM_LEN,
- 0x0, CLUST_CORE_X_XTCM_LEN,
- 0, CORE_OFFLOAD_2)
- },
- {
- TRM_CFG_CORE_DUMP_EN("clust-core-3-dtcm",
- CLUST_CORE_X_DTCM(3), CLUST_CORE_X_XTCM_LEN,
- 0x0, CLUST_CORE_X_XTCM_LEN,
- 0, CORE_OFFLOAD_3)
- },
- {
- TRM_CFG("top-core-m-itcm",
- TOP_CORE_M_ITCM, TOP_CORE_M_XTCM_LEN,
- 0x0, TOP_CORE_M_XTCM_LEN,
- 0)
- },
- {
- TRM_CFG("clust-core-0-itcm",
- CLUST_CORE_X_ITCM(0), CLUST_CORE_X_XTCM_LEN,
- 0x0, CLUST_CORE_X_XTCM_LEN,
- 0)
- },
- {
- TRM_CFG("clust-core-1-itcm",
- CLUST_CORE_X_ITCM(1), CLUST_CORE_X_XTCM_LEN,
- 0x0, CLUST_CORE_X_XTCM_LEN,
- 0)
- },
- {
- TRM_CFG("clust-core-2-itcm",
- CLUST_CORE_X_ITCM(2), CLUST_CORE_X_XTCM_LEN,
- 0x0, CLUST_CORE_X_XTCM_LEN,
- 0)
- },
- {
- TRM_CFG("clust-core-3-itcm",
- CLUST_CORE_X_ITCM(3), CLUST_CORE_X_XTCM_LEN,
- 0x0, CLUST_CORE_X_XTCM_LEN,
- 0)
- },
- {
- TRM_CFG("top-l2sram",
- TOP_L2SRAM, TOP_L2SRAM_LEN,
- 0x0, TOP_L2SRAM_LEN,
- 0)
- },
- {
- TRM_CFG_EN("clust-l2sram",
- CLUST_L2SRAM, CLUST_L2SRAM_LEN,
- 0x38000, 0x8000,
- 0)
- },
-};
-
-static struct trm_hw_config mcu_trm_hw_cfg = {
- .trm_cfgs = mcu_trm_cfgs,
- .cfg_len = ARRAY_SIZE(mcu_trm_cfgs),
- .trm_hw_dump = mcu_trm_hw_dump,
-};
-
-static inline void npu_write(u32 reg, u32 val)
-{
- writel(val, npu.base + reg);
-}
-
-static inline void npu_set(u32 reg, u32 mask)
-{
- setbits(npu.base + reg, mask);
-}
-
-static inline void npu_clr(u32 reg, u32 mask)
-{
- clrbits(npu.base + reg, mask);
-}
-
-static inline void npu_rmw(u32 reg, u32 mask, u32 val)
-{
- clrsetbits(npu.base + reg, mask, val);
-}
-
-static inline u32 npu_read(u32 reg)
-{
- return readl(npu.base + reg);
-}
-
-static int mcu_trm_hw_dump(void *dst, u32 start_addr, u32 len)
-{
- u32 ofs;
-
- if (unlikely(!dst))
- return -ENODEV;
-
- for (ofs = 0; len > 0; len -= 0x4, ofs += 0x4)
- writel(npu_read(start_addr + ofs), dst + ofs);
-
- return 0;
-}
-
-static int mcu_power_on(void)
-{
- int ret = 0;
-
- ret = clk_prepare_enable(npu.bus_clk);
- if (ret) {
- TOPS_ERR("bus clk enable failed: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(npu.sram_clk);
- if (ret) {
- TOPS_ERR("sram clk enable failed: %d\n", ret);
- goto err_disable_bus_clk;
- }
-
- ret = clk_prepare_enable(npu.xdma_clk);
- if (ret) {
- TOPS_ERR("xdma clk enable failed: %d\n", ret);
- goto err_disable_sram_clk;
- }
-
- ret = clk_prepare_enable(npu.offload_clk);
- if (ret) {
- TOPS_ERR("offload clk enable failed: %d\n", ret);
- goto err_disable_xdma_clk;
- }
-
- ret = clk_prepare_enable(npu.mgmt_clk);
- if (ret) {
- TOPS_ERR("mgmt clk enable failed: %d\n", ret);
- goto err_disable_offload_clk;
- }
-
- ret = pm_runtime_get_sync(tops_dev);
- if (ret < 0) {
- TOPS_ERR("power on failed: %d\n", ret);
- goto err_disable_mgmt_clk;
- }
-
- return ret;
-
-err_disable_mgmt_clk:
- clk_disable_unprepare(npu.mgmt_clk);
-
-err_disable_offload_clk:
- clk_disable_unprepare(npu.offload_clk);
-
-err_disable_xdma_clk:
- clk_disable_unprepare(npu.xdma_clk);
-
-err_disable_sram_clk:
- clk_disable_unprepare(npu.sram_clk);
-
-err_disable_bus_clk:
- clk_disable_unprepare(npu.bus_clk);
-
- return ret;
-}
-
-static void mcu_power_off(void)
-{
- pm_runtime_put_sync(tops_dev);
-
- clk_disable_unprepare(npu.mgmt_clk);
-
- clk_disable_unprepare(npu.offload_clk);
-
- clk_disable_unprepare(npu.xdma_clk);
-
- clk_disable_unprepare(npu.sram_clk);
-
- clk_disable_unprepare(npu.bus_clk);
-}
-
-static inline int mcu_state_send_cmd(struct mcu_state *state)
-{
- unsigned long flag;
- enum core_id core;
- u32 ctrl_cpu;
- int ret;
-
- spin_lock_irqsave(&npu.ctrl_done_lock, flag);
- ctrl_cpu = (~npu.ctrl_done) & CORE_TOPS_MASK;
- spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
-
- if (ctrl_cpu & BIT(CORE_MGMT)) {
- ret = mbox_send_msg_no_wait(&npu.send_mgmt_mbox_dev,
- &npu.ctrl_msg);
- if (ret)
- goto out;
- }
-
- for (core = CORE_OFFLOAD_0; core < CORE_OFFLOAD_NUM; core++) {
- if (ctrl_cpu & BIT(core)) {
- ret = mbox_send_msg_no_wait(&npu.send_offload_mbox_dev[core],
- &npu.ctrl_msg);
- if (ret)
- goto out;
- }
- }
-
-out:
- return ret;
-}
-
-static inline void mcu_state_trans_start(void)
-{
- mod_timer(&npu.mcu_ctrl_timer,
- jiffies + msecs_to_jiffies(MCU_STATE_TRANS_TIMEOUT));
-}
-
-static inline void mcu_state_trans_end(void)
-{
- del_timer_sync(&npu.mcu_ctrl_timer);
-}
-
-static inline void mcu_state_trans_err(void)
-{
- wake_up_interruptible(&npu.mcu_ctrl_wait_done);
-}
-
-static inline int mcu_state_wait_complete(void (*state_complete_cb)(void))
-{
- unsigned long flag;
- int ret = 0;
-
- wait_event_interruptible(npu.mcu_state_wait_done,
- (npu.ctrl_done == CORE_TOPS_MASK) ||
- (npu.state_trans_fail));
-
- if (npu.state_trans_fail)
- return -EINVAL;
-
- npu.ctrl_msg.msg1 = npu.ctrl_done_cmd;
-
- spin_lock_irqsave(&npu.ctrl_done_lock, flag);
- npu.ctrl_done |= BIT(MCU_CTRL_DONE_BIT);
- spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
-
- if (state_complete_cb)
- state_complete_cb();
-
- wake_up_interruptible(&npu.mcu_ctrl_wait_done);
-
- return ret;
-}
-
-static inline void mcu_state_prepare_wait(enum mcu_cmd_type done_cmd)
-{
- unsigned long flag;
-
- /* if user does not specify CPU to control, default controll all CPU */
- spin_lock_irqsave(&npu.ctrl_done_lock, flag);
- if ((npu.ctrl_done & CORE_TOPS_MASK) == CORE_TOPS_MASK)
- npu.ctrl_done = 0;
- spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
-
- npu.ctrl_done_cmd = done_cmd;
-}
-
-static struct mcu_state *mtk_tops_mcu_state_shutdown_trans(u32 mcu_act,
- struct mcu_state *state)
-{
- if (mcu_act == MCU_ACT_INIT)
- return &mcu_states[MCU_STATE_TYPE_INIT];
-
- return ERR_PTR(-ENODEV);
-}
-
-static int mtk_tops_mcu_state_shutdown_enter(struct mcu_state *state)
-{
- mcu_power_off();
-
- mtk_tops_tdma_record_last_state();
-
- mtk_tops_fw_clean_up();
-
- npu.mcu_bring_up_done = false;
-
- if (npu.shuting_down) {
- npu.shuting_down = false;
- wake_up_interruptible(&npu.mcu_ctrl_wait_done);
-
- return 0;
- }
-
- if (npu.in_recover || npu.in_reset)
- mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
-
- return 0;
-}
-
-static int mtk_tops_mcu_state_shutdown_leave(struct mcu_state *state)
-{
- return 0;
-}
-
-static struct mcu_state *mtk_tops_mcu_state_init_trans(u32 mcu_act,
- struct mcu_state *state)
-{
- if (mcu_act == MCU_ACT_FREERUN)
- return &mcu_states[MCU_STATE_TYPE_FREERUN];
- else if (mcu_act == MCU_ACT_NETSTOP)
- return &mcu_states[MCU_STATE_TYPE_NETSTOP];
-
- return ERR_PTR(-ENODEV);
-}
-
-static void mtk_tops_mcu_state_init_enter_complete_cb(void)
-{
- npu.mcu_bring_up_done = true;
- npu.in_reset = false;
- npu.in_recover = false;
- npu.netsys_fe_ser = false;
-
- mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
-}
-
-static int mtk_tops_mcu_state_init_enter(struct mcu_state *state)
-{
- int ret = 0;
-
- ret = mcu_power_on();
- if (ret)
- return ret;
-
- mtk_tops_mbox_clear_all_cmd();
-
- /* reset TDMA first */
- mtk_tops_tdma_reset();
-
- npu.ctrl_done = 0;
- mcu_state_prepare_wait(MCU_CMD_TYPE_INIT_DONE);
-
- ret = mtk_tops_fw_bring_up_default_cores();
- if (ret) {
- TOPS_ERR("bring up TOPS cores failed: %d\n", ret);
- goto out;
- }
-
- ret = mcu_state_wait_complete(mtk_tops_mcu_state_init_enter_complete_cb);
- if (unlikely(ret))
- TOPS_ERR("init leave failed\n");
-
-out:
- return ret;
-}
-
-static int mtk_tops_mcu_state_init_leave(struct mcu_state *state)
-{
- int ret;
-
- mtk_tops_misc_set_ppe_num();
-
- mtk_tops_tdma_enable();
-
- mtk_tops_tnl_offload_recover();
-
- /* enable cls, dipfilter */
- ret = mtk_pce_enable();
- if (ret) {
- TOPS_ERR("netsys enable failed: %d\n", ret);
- return ret;
- }
-
- return ret;
-}
-
-static struct mcu_state *mtk_tops_mcu_state_freerun_trans(u32 mcu_act,
- struct mcu_state *state)
-{
- if (mcu_act == MCU_ACT_RESET)
- return &mcu_states[MCU_STATE_TYPE_RESET];
- else if (mcu_act == MCU_ACT_STALL)
- return &mcu_states[MCU_STATE_TYPE_STALL];
- else if (mcu_act == MCU_ACT_NETSTOP)
- return &mcu_states[MCU_STATE_TYPE_NETSTOP];
-
- return ERR_PTR(-ENODEV);
-}
-
-static int mtk_tops_mcu_state_freerun_enter(struct mcu_state *state)
-{
- /* TODO : switch to HW path */
-
- return 0;
-}
-
-static int mtk_tops_mcu_state_freerun_leave(struct mcu_state *state)
-{
- /* TODO : switch to SW path */
-
- return 0;
-}
-
-static struct mcu_state *mtk_tops_mcu_state_stall_trans(u32 mcu_act,
- struct mcu_state *state)
-{
- if (mcu_act == MCU_ACT_RESET)
- return &mcu_states[MCU_STATE_TYPE_RESET];
- else if (mcu_act == MCU_ACT_FREERUN)
- return &mcu_states[MCU_STATE_TYPE_FREERUN];
- else if (mcu_act == MCU_ACT_NETSTOP)
- return &mcu_states[MCU_STATE_TYPE_NETSTOP];
-
- return ERR_PTR(-ENODEV);
-}
-
-static int mtk_tops_mcu_state_stall_enter(struct mcu_state *state)
-{
- int ret = 0;
-
- mcu_state_prepare_wait(MCU_CMD_TYPE_STALL_DONE);
-
- ret = mcu_state_send_cmd(state);
- if (ret)
- return ret;
-
- ret = mcu_state_wait_complete(NULL);
- if (ret)
- TOPS_ERR("stall enter failed\n");
-
- return ret;
-}
-
-static int mtk_tops_mcu_state_stall_leave(struct mcu_state *state)
-{
- int ret = 0;
-
- /*
- * if next state is going to stop network,
- * we should not let mcu do freerun cmd since it is going to abort stall
- */
- if (npu.next_state->state == MCU_STATE_TYPE_NETSTOP)
- return 0;
-
- mcu_state_prepare_wait(MCU_CMD_TYPE_FREERUN_DONE);
-
- ret = mcu_state_send_cmd(state);
- if (ret)
- return ret;
-
- ret = mcu_state_wait_complete(NULL);
- if (ret)
- TOPS_ERR("stall leave failed\n");
-
- return ret;
-}
-
-static struct mcu_state *mtk_tops_mcu_state_netstop_trans(u32 mcu_act,
- struct mcu_state *state)
-{
- if (mcu_act == MCU_ACT_ABNORMAL)
- return &mcu_states[MCU_STATE_TYPE_ABNORMAL];
- else if (mcu_act == MCU_ACT_RESET)
- return &mcu_states[MCU_STATE_TYPE_RESET];
- else if (mcu_act == MCU_ACT_SHUTDOWN)
- return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
-
- return ERR_PTR(-ENODEV);
-}
-
-static int mtk_tops_mcu_state_netstop_enter(struct mcu_state *state)
-{
- mtk_tops_tnl_offload_flush();
-
- mtk_pce_disable();
-
- mtk_tops_tdma_disable();
-
- if (npu.in_recover)
- mcu_ctrl_issue_pending_act(MCU_ACT_ABNORMAL);
- else if (npu.in_reset)
- mcu_ctrl_issue_pending_act(MCU_ACT_RESET);
- else
- mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
-
- return 0;
-}
-
-static int mtk_tops_mcu_state_netstop_leave(struct mcu_state *state)
-{
- return 0;
-}
-
-static struct mcu_state *mtk_tops_mcu_state_reset_trans(u32 mcu_act,
- struct mcu_state *state)
-{
- if (mcu_act == MCU_ACT_FREERUN)
- return &mcu_states[MCU_STATE_TYPE_FREERUN];
- else if (mcu_act == MCU_ACT_SHUTDOWN)
- return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
- else if (mcu_act == MCU_ACT_NETSTOP)
- /*
- * since netstop is already done before reset,
- * there is no need to do it again. We just go to abnormal directly
- */
- return &mcu_states[MCU_STATE_TYPE_ABNORMAL];
-
- return ERR_PTR(-ENODEV);
-}
-
-static int mtk_tops_mcu_state_reset_enter(struct mcu_state *state)
-{
- int ret = 0;
-
- mcu_state_prepare_wait(MCU_CMD_TYPE_ASSERT_RESET_DONE);
-
- if (!npu.netsys_fe_ser) {
- ret = mcu_state_send_cmd(state);
- if (ret)
- return ret;
- } else {
- /* skip to assert reset mcu if NETSYS SER */
- npu.ctrl_done = CORE_TOPS_MASK;
- }
-
- ret = mcu_state_wait_complete(NULL);
- if (ret)
- TOPS_ERR("assert reset failed\n");
-
- return ret;
-}
-
-static int mtk_tops_mcu_state_reset_leave(struct mcu_state *state)
-{
- int ret = 0;
-
- /*
- * if next state is going to shutdown,
- * no need to let mcu do release reset cmd
- */
- if (npu.next_state->state == MCU_STATE_TYPE_ABNORMAL
- || npu.next_state->state == MCU_STATE_TYPE_SHUTDOWN)
- return 0;
-
- mcu_state_prepare_wait(MCU_CMD_TYPE_RELEASE_RESET_DONE);
-
- ret = mcu_state_send_cmd(state);
- if (ret)
- return ret;
-
- ret = mcu_state_wait_complete(NULL);
- if (ret)
- TOPS_ERR("release reset failed\n");
-
- return ret;
-}
-
-static struct mcu_state *mtk_tops_mcu_state_abnormal_trans(u32 mcu_act,
- struct mcu_state *state)
-{
- if (mcu_act == MCU_ACT_SHUTDOWN)
- return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
-
- return ERR_PTR(-ENODEV);
-}
-
-static int mtk_tops_mcu_state_abnormal_enter(struct mcu_state *state)
-{
- mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
-
- return 0;
-}
-
-static int mtk_tops_mcu_state_abnormal_leave(struct mcu_state *state)
-{
- if (npu.mcu_bring_up_done)
- mtk_trm_dump(TRM_RSN_MCU_STATE_ACT_FAIL);
-
- return 0;
-}
-
-static int mtk_tops_mcu_state_transition(u32 mcu_act)
-{
- int ret = 0;
-
- npu.next_state = npu.cur_state->state_trans(mcu_act, npu.cur_state);
- if (IS_ERR(npu.next_state))
- return PTR_ERR(npu.next_state);
-
- /* skip mcu_state leave if current MCU_ACT has failure */
- if (unlikely(mcu_act == MCU_ACT_ABNORMAL))
- goto skip_state_leave;
-
- mcu_state_trans_start();
- if (npu.cur_state->leave) {
- ret = npu.cur_state->leave(npu.cur_state);
- if (ret) {
- TOPS_ERR("state%d transition leave failed: %d\n",
- npu.cur_state->state, ret);
- goto state_trans_end;
- }
- }
- mcu_state_trans_end();
-
-skip_state_leave:
- npu.cur_state = npu.next_state;
-
- mcu_state_trans_start();
- if (npu.cur_state->enter) {
- ret = npu.cur_state->enter(npu.cur_state);
- if (ret) {
- TOPS_ERR("state%d transition enter failed: %d\n",
- npu.cur_state->state, ret);
- goto state_trans_end;
- }
- }
-
-state_trans_end:
- mcu_state_trans_end();
-
- return ret;
-}
-
-static void mtk_tops_mcu_state_trans_timeout(struct timer_list *timer)
-{
- TOPS_ERR("state%d transition timeout!\n", npu.cur_state->state);
- TOPS_ERR("ctrl_done=0x%x ctrl_msg.msg1: 0x%x\n",
- npu.ctrl_done, npu.ctrl_msg.msg1);
-
- npu.state_trans_fail = true;
-
- wake_up_interruptible(&npu.mcu_state_wait_done);
-}
-
-static inline int mcu_ctrl_cmd_prepare(enum mcu_cmd_type cmd,
- struct mcu_ctrl_cmd *mcmd)
-{
- if (!mcmd || cmd == MCU_CMD_TYPE_NULL || cmd >= __MCU_CMD_TYPE_MAX)
- return -EINVAL;
-
- lockdep_assert_held(&npu.mcu_ctrl_lock);
-
- npu.ctrl_msg.msg1 = cmd;
- npu.ctrl_msg.msg2 = mcmd->e;
- npu.ctrl_msg.msg3 = mcmd->arg[0];
- npu.ctrl_msg.msg4 = mcmd->arg[1];
-
- if (mcmd->core_mask) {
- unsigned long flag;
-
- spin_lock_irqsave(&npu.ctrl_done_lock, flag);
- npu.ctrl_done = ~(CORE_TOPS_MASK & mcmd->core_mask);
- npu.ctrl_done &= CORE_TOPS_MASK;
- spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
- }
-
- return 0;
-}
-
-static inline void mcu_ctrl_callback(void (*callback)(void *param), void *param)
-{
- if (callback)
- callback(param);
-}
-
-static inline void mcu_ctrl_issue_pending_act(u32 mcu_act)
-{
- unsigned long flag;
-
- spin_lock_irqsave(&npu.pending_act_lock, flag);
-
- npu.pending_act |= mcu_act;
-
- spin_unlock_irqrestore(&npu.pending_act_lock, flag);
-
- wake_up_interruptible(&npu.mcu_ctrl_wait_act);
-}
-
-static inline enum mcu_act mcu_ctrl_pop_pending_act(void)
-{
- unsigned long flag;
- enum mcu_act act;
-
- spin_lock_irqsave(&npu.pending_act_lock, flag);
-
- act = ffs(npu.pending_act) - 1;
- npu.pending_act &= ~BIT(act);
-
- spin_unlock_irqrestore(&npu.pending_act_lock, flag);
-
- return act;
-}
-
-static inline bool mcu_ctrl_is_complete(enum mcu_cmd_type done_cmd)
-{
- unsigned long flag;
- bool ctrl_done;
-
- spin_lock_irqsave(&npu.ctrl_done_lock, flag);
- ctrl_done = npu.ctrl_done == MCU_CTRL_DONE && npu.ctrl_msg.msg1 == done_cmd;
- spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
-
- return ctrl_done;
-}
-
-static inline void mcu_ctrl_done(enum core_id core)
-{
- unsigned long flag;
-
- if (core > CORE_MGMT)
- return;
-
- spin_lock_irqsave(&npu.ctrl_done_lock, flag);
- npu.ctrl_done |= BIT(core);
- spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
-}
-
-static int mcu_ctrl_task(void *data)
-{
- enum mcu_act act;
- int ret;
-
- while (1) {
- wait_event_interruptible(npu.mcu_ctrl_wait_act,
- npu.pending_act || kthread_should_stop());
-
- if (kthread_should_stop()) {
- TOPS_INFO("tops mcu ctrl task stop\n");
- break;
- }
-
- act = mcu_ctrl_pop_pending_act();
- if (unlikely(act >= __MCU_ACT_MAX)) {
- TOPS_ERR("invalid MCU act: %u\n", act);
- continue;
- }
-
- /*
- * ensure that the act is submitted by either
- * mtk_tops_mcu_stall, mtk_tops_mcu_reset or mtk_tops_mcu_cold_boot
- * if mcu_act is ABNORMAL, it must be caused by the state transition
- * triggerred by above APIs
- * as a result, mcu_ctrl_lock must be held before mcu_ctrl_task start
- */
- lockdep_assert_held(&npu.mcu_ctrl_lock);
-
- if (unlikely(!npu.cur_state->state_trans)) {
- TOPS_ERR("cur state has no state_trans()\n");
- WARN_ON(1);
- }
-
- ret = mtk_tops_mcu_state_transition(BIT(act));
- if (ret) {
- npu.state_trans_fail = true;
-
- mcu_state_trans_err();
- }
- }
- return 0;
-}
-
-bool mtk_tops_mcu_alive(void)
-{
- return npu.mcu_bring_up_done && !npu.in_reset && !npu.state_trans_fail;
-}
-
-bool mtk_tops_mcu_bring_up_done(void)
-{
- return npu.mcu_bring_up_done;
-}
-
-bool mtk_tops_mcu_netsys_fe_rst(void)
-{
- return npu.netsys_fe_ser;
-}
-
-static int mtk_tops_mcu_wait_done(enum mcu_cmd_type done_cmd)
-{
- int ret = 0;
-
- wait_event_interruptible(npu.mcu_ctrl_wait_done,
- mcu_ctrl_is_complete(done_cmd)
- || npu.state_trans_fail);
-
- if (npu.state_trans_fail)
- return -EINVAL;
-
- return ret;
-}
-
-int mtk_tops_mcu_stall(struct mcu_ctrl_cmd *mcmd,
- void (*callback)(void *param), void *param)
-{
- int ret = 0;
-
- if (unlikely(!npu.mcu_bring_up_done || npu.state_trans_fail))
- return -EBUSY;
-
- if (unlikely(!mcmd || mcmd->e >= __MCU_EVENT_TYPE_MAX))
- return -EINVAL;
-
- mutex_lock(&npu.mcu_ctrl_lock);
-
- /* go to stall state */
- ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_STALL, mcmd);
- if (ret)
- goto unlock;
-
- mcu_ctrl_issue_pending_act(MCU_ACT_STALL);
-
- ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_STALL_DONE);
- if (ret) {
- TOPS_ERR("tops stall failed: %d\n", ret);
- goto recover_mcu;
- }
-
- mcu_ctrl_callback(callback, param);
-
- /* go to freerun state */
- ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_FREERUN, mcmd);
- if (ret)
- goto recover_mcu;
-
- mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
-
- ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_FREERUN_DONE);
- if (ret) {
- TOPS_ERR("tops freerun failed: %d\n", ret);
- goto recover_mcu;
- }
-
- /* stall freerun successfully done */
- goto unlock;
-
-recover_mcu:
- schedule_work(&npu.recover_work);
-
-unlock:
- mutex_unlock(&npu.mcu_ctrl_lock);
-
- return ret;
-}
-
-int mtk_tops_mcu_reset(struct mcu_ctrl_cmd *mcmd,
- void (*callback)(void *param), void *param)
-{
- int ret = 0;
-
- if (unlikely(!npu.mcu_bring_up_done || npu.state_trans_fail))
- return -EBUSY;
-
- if (unlikely(!mcmd || mcmd->e >= __MCU_EVENT_TYPE_MAX))
- return -EINVAL;
-
- mutex_lock(&npu.mcu_ctrl_lock);
-
- npu.in_reset = true;
- if (mcmd->e == MCU_EVENT_TYPE_FE_RESET)
- npu.netsys_fe_ser = true;
-
- ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_ASSERT_RESET, mcmd);
- if (ret)
- goto unlock;
-
- mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
-
- ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_ASSERT_RESET_DONE);
- if (ret) {
- TOPS_ERR("tops assert reset failed: %d\n", ret);
- goto recover_mcu;
- }
-
- mcu_ctrl_callback(callback, param);
-
- switch (mcmd->e) {
- case MCU_EVENT_TYPE_WDT_TIMEOUT:
- case MCU_EVENT_TYPE_FE_RESET:
- mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
-
- ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE);
- if (ret)
- goto recover_mcu;
-
- break;
- default:
- ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_RELEASE_RESET, mcmd);
- if (ret)
- goto recover_mcu;
-
- mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
-
- ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_RELEASE_RESET_DONE);
- if (ret)
- goto recover_mcu;
-
- break;
- }
-
- goto unlock;
-
-recover_mcu:
- schedule_work(&npu.recover_work);
-
-unlock:
- mutex_unlock(&npu.mcu_ctrl_lock);
-
- return ret;
-}
-
-static void mtk_tops_mcu_recover_work(struct work_struct *work)
-{
- int ret;
-
- mutex_lock(&npu.mcu_ctrl_lock);
-
- if (!npu.mcu_bring_up_done && !npu.in_reset && !npu.state_trans_fail)
- mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
- else if (npu.in_reset || npu.state_trans_fail)
- mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
-
- npu.state_trans_fail = false;
- npu.in_recover = true;
-
- while ((ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE))) {
- if (npu.shuting_down)
- goto unlock;
-
- npu.mcu_bring_up_done = false;
- npu.state_trans_fail = false;
- TOPS_ERR("bring up failed: %d\n", ret);
-
- msleep(1000);
-
- mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
- }
-
-unlock:
- mutex_unlock(&npu.mcu_ctrl_lock);
-}
-
-static int mtk_tops_mcu_register_mbox(void)
-{
- int ret;
- int i;
-
- ret = register_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
- if (ret) {
- TOPS_ERR("register mcu_ctrl mgmt mbox send failed: %d\n", ret);
- return ret;
- }
-
- ret = register_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
- if (ret) {
- TOPS_ERR("register mcu_ctrl mgmt mbox recv failed: %d\n", ret);
- goto err_unregister_mgmt_mbox_send;
- }
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- ret = register_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
- if (ret) {
- TOPS_ERR("register mcu_ctrl offload %d mbox send failed: %d\n",
- i, ret);
- goto err_unregister_offload_mbox;
- }
-
- ret = register_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
- if (ret) {
- TOPS_ERR("register mcu_ctrl offload %d mbox recv failed: %d\n",
- i, ret);
- unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
- goto err_unregister_offload_mbox;
- }
- }
-
- return ret;
-
-err_unregister_offload_mbox:
- for (i -= 1; i >= 0; i--) {
- unregister_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
- unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
- }
-
- unregister_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
-
-err_unregister_mgmt_mbox_send:
- unregister_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
-
- return ret;
-}
-
-static void mtk_tops_mcu_unregister_mbox(void)
-{
- int i;
-
- unregister_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
- unregister_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
- unregister_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
- }
-}
-
-static void mtk_tops_mcu_shutdown(void)
-{
- npu.shuting_down = true;
-
- mutex_lock(&npu.mcu_ctrl_lock);
-
- mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
-
- wait_event_interruptible(npu.mcu_ctrl_wait_done,
- !npu.mcu_bring_up_done && !npu.shuting_down);
-
- mutex_unlock(&npu.mcu_ctrl_lock);
-}
-
-/* TODO: should be implemented to not block other module's init tasks */
-static int mtk_tops_mcu_cold_boot(void)
-{
- int ret = 0;
-
- npu.cur_state = &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
-
- mutex_lock(&npu.mcu_ctrl_lock);
-
- mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
- ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE);
-
- mutex_unlock(&npu.mcu_ctrl_lock);
- if (!ret)
- return ret;
-
- TOPS_ERR("cold boot failed: %d\n", ret);
-
- schedule_work(&npu.recover_work);
-
- return 0;
-}
-
-int mtk_tops_mcu_bring_up(struct platform_device *pdev)
-{
- int ret = 0;
-
- pm_runtime_enable(&pdev->dev);
-
- ret = mtk_tops_mcu_register_mbox();
- if (ret) {
- TOPS_ERR("register mcu ctrl mbox failed: %d\n", ret);
- goto runtime_disable;
- }
-
- npu.mcu_ctrl_thread = kthread_run(mcu_ctrl_task, NULL, "tops mcu ctrl task");
- if (IS_ERR(npu.mcu_ctrl_thread)) {
- ret = PTR_ERR(npu.mcu_ctrl_thread);
- TOPS_ERR("mcu ctrl thread create failed: %d\n", ret);
- goto err_unregister_mbox;
- }
-
- ret = mtk_tops_mcu_cold_boot();
- if (ret) {
- TOPS_ERR("cold boot failed: %d\n", ret);
- goto err_stop_mcu_ctrl_thread;
- }
-
- return ret;
-
-err_stop_mcu_ctrl_thread:
- kthread_stop(npu.mcu_ctrl_thread);
-
-err_unregister_mbox:
- mtk_tops_mcu_unregister_mbox();
-
-runtime_disable:
- pm_runtime_disable(&pdev->dev);
-
- return ret;
-}
-
-void mtk_tops_mcu_tear_down(struct platform_device *pdev)
-{
- mtk_tops_mcu_shutdown();
-
- kthread_stop(npu.mcu_ctrl_thread);
-
- /* TODO: stop mcu? */
-
- mtk_tops_mcu_unregister_mbox();
-
- pm_runtime_disable(&pdev->dev);
-}
-
-static int mtk_tops_mcu_dts_init(struct platform_device *pdev)
-{
- struct device_node *node = pdev->dev.of_node;
- struct resource *res = NULL;
- int ret = 0;
-
- if (!node)
- return -EINVAL;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
- if (!res) {
- TOPS_ERR("can not find tops base\n");
- return -ENXIO;
- }
-
- npu.base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!npu.base) {
- TOPS_ERR("map tops base failed\n");
- return -ENOMEM;
- }
-
- npu.bus_clk = devm_clk_get(tops_dev, "bus");
- if (IS_ERR(npu.bus_clk)) {
- TOPS_ERR("get bus clk failed: %ld\n", PTR_ERR(npu.bus_clk));
- return PTR_ERR(npu.bus_clk);
- }
-
- npu.sram_clk = devm_clk_get(tops_dev, "sram");
- if (IS_ERR(npu.sram_clk)) {
- TOPS_ERR("get sram clk failed: %ld\n", PTR_ERR(npu.sram_clk));
- return PTR_ERR(npu.sram_clk);
- }
-
- npu.xdma_clk = devm_clk_get(tops_dev, "xdma");
- if (IS_ERR(npu.xdma_clk)) {
- TOPS_ERR("get xdma clk failed: %ld\n", PTR_ERR(npu.xdma_clk));
- return PTR_ERR(npu.xdma_clk);
- }
-
- npu.offload_clk = devm_clk_get(tops_dev, "offload");
- if (IS_ERR(npu.offload_clk)) {
- TOPS_ERR("get offload clk failed: %ld\n", PTR_ERR(npu.offload_clk));
- return PTR_ERR(npu.offload_clk);
- }
-
- npu.mgmt_clk = devm_clk_get(tops_dev, "mgmt");
- if (IS_ERR(npu.mgmt_clk)) {
- TOPS_ERR("get mgmt clk failed: %ld\n", PTR_ERR(npu.mgmt_clk));
- return PTR_ERR(npu.mgmt_clk);
- }
-
- return ret;
-}
-
-static void mtk_tops_mcu_pm_domain_detach(void)
-{
- int i = npu.pd_num;
-
- while (--i >= 0) {
- device_link_del(npu.pd_links[i]);
- dev_pm_domain_detach(npu.pd_devices[i], true);
- }
-}
-
-static int mtk_tops_mcu_pm_domain_attach(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- int ret = 0;
- int i;
-
- npu.pd_num = of_count_phandle_with_args(dev->of_node,
- "power-domains",
- "#power-domain-cells");
-
- /* only 1 power domain exist, no need to link devices */
- if (npu.pd_num <= 1)
- return 0;
-
- npu.pd_devices = devm_kmalloc_array(dev, npu.pd_num,
- sizeof(struct device),
- GFP_KERNEL);
- if (!npu.pd_devices)
- return -ENOMEM;
-
- npu.pd_links = devm_kmalloc_array(dev, npu.pd_num,
- sizeof(*npu.pd_links),
- GFP_KERNEL);
- if (!npu.pd_links)
- return -ENOMEM;
-
- for (i = 0; i < npu.pd_num; i++) {
- npu.pd_devices[i] = dev_pm_domain_attach_by_id(dev, i);
- if (IS_ERR(npu.pd_devices[i])) {
- ret = PTR_ERR(npu.pd_devices[i]);
- goto pm_attach_fail;
- }
-
- npu.pd_links[i] = device_link_add(dev, npu.pd_devices[i],
- DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME);
- if (!npu.pd_links[i]) {
- ret = -EINVAL;
- dev_pm_domain_detach(npu.pd_devices[i], false);
- goto pm_attach_fail;
- }
- }
-
- return 0;
-
-pm_attach_fail:
- TOPS_ERR("attach power domain failed: %d\n", ret);
-
- while (--i >= 0) {
- device_link_del(npu.pd_links[i]);
- dev_pm_domain_detach(npu.pd_devices[i], false);
- }
-
- return ret;
-}
-
-int mtk_tops_mcu_init(struct platform_device *pdev)
-{
- int ret = 0;
-
- dma_set_mask(tops_dev, DMA_BIT_MASK(32));
-
- ret = mtk_tops_mcu_dts_init(pdev);
- if (ret)
- return ret;
-
- ret = mtk_tops_mcu_pm_domain_attach(pdev);
- if (ret)
- return ret;
-
- INIT_WORK(&npu.recover_work, mtk_tops_mcu_recover_work);
- init_waitqueue_head(&npu.mcu_ctrl_wait_act);
- init_waitqueue_head(&npu.mcu_ctrl_wait_done);
- init_waitqueue_head(&npu.mcu_state_wait_done);
- spin_lock_init(&npu.pending_act_lock);
- spin_lock_init(&npu.ctrl_done_lock);
- mutex_init(&npu.mcu_ctrl_lock);
- timer_setup(&npu.mcu_ctrl_timer, mtk_tops_mcu_state_trans_timeout, 0);
-
- ret = mtk_trm_hw_config_register(TRM_TOPS, &mcu_trm_hw_cfg);
- if (ret) {
- TOPS_ERR("TRM register failed: %d\n", ret);
- return ret;
- }
-
- return ret;
-}
-
-void mtk_tops_mcu_deinit(struct platform_device *pdev)
-{
- mtk_trm_hw_config_unregister(TRM_TOPS, &mcu_trm_hw_cfg);
-
- mtk_tops_mcu_pm_domain_detach();
-}
-
-static enum mbox_msg_cnt mtk_tops_ap_recv_mgmt_mbox_msg(struct mailbox_dev *mdev,
- struct mailbox_msg *msg)
-{
- if (msg->msg1 == npu.ctrl_done_cmd)
- /* mcu side state transition success */
- mcu_ctrl_done(mdev->core);
- else
- /* mcu side state transition failed */
- npu.state_trans_fail = true;
-
- wake_up_interruptible(&npu.mcu_state_wait_done);
-
- return MBOX_NO_RET_MSG;
-}
-
-static enum mbox_msg_cnt mtk_tops_ap_recv_offload_mbox_msg(struct mailbox_dev *mdev,
- struct mailbox_msg *msg)
-{
- if (msg->msg1 == npu.ctrl_done_cmd)
- /* mcu side state transition success */
- mcu_ctrl_done(mdev->core);
- else
- /* mcu side state transition failed */
- npu.state_trans_fail = true;
-
- wake_up_interruptible(&npu.mcu_state_wait_done);
-
- return MBOX_NO_RET_MSG;
-}
diff --git a/feed/kernel/tops/src/misc.c b/feed/kernel/tops/src/misc.c
deleted file mode 100644
index c61f647..0000000
--- a/feed/kernel/tops/src/misc.c
+++ /dev/null
@@ -1,66 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuo@mediatek.com>
- */
-
-#include "tops/internal.h"
-#include "tops/misc.h"
-#include "tops/mbox.h"
-#include "tops/netsys.h"
-
-static struct mailbox_dev offload_send_mbox_dev[CORE_OFFLOAD_NUM] = {
- [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, MISC),
- [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, MISC),
- [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, MISC),
- [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, MISC),
-};
-
-int mtk_tops_misc_set_ppe_num(void)
-{
- struct mailbox_msg msg = {
- .msg1 = MISC_CMD_TYPE_SET_PPE_NUM,
- .msg2 = mtk_tops_netsys_ppe_get_num(),
- };
- enum core_id core;
- int ret;
-
- for (core = CORE_OFFLOAD_0; core < CORE_OFFLOAD_NUM; core++) {
- ret = mbox_send_msg_no_wait(&offload_send_mbox_dev[core], &msg);
- /* TODO: error handle? */
- if (ret)
- TOPS_ERR("core offload%u set PPE num failed: %d\n",
- core, ret);
- }
-
- return ret;
-}
-
-int mtk_tops_misc_init(struct platform_device *pdev)
-{
- enum core_id core;
- int ret;
-
- for (core = CORE_OFFLOAD_0; core < CORE_OFFLOAD_NUM; core++) {
- ret = register_mbox_dev(MBOX_SEND, &offload_send_mbox_dev[core]);
- if (ret)
- goto err_out;
- }
-
- return ret;
-
-err_out:
- for (; core > 0; core--)
- unregister_mbox_dev(MBOX_SEND, &offload_send_mbox_dev[core - 1]);
-
- return ret;
-}
-
-void mtk_tops_misc_deinit(struct platform_device *pdev)
-{
- enum core_id core;
-
- for (core = CORE_OFFLOAD_0; core < CORE_OFFLOAD_NUM; core++)
- unregister_mbox_dev(MBOX_SEND, &offload_send_mbox_dev[core]);
-}
diff --git a/feed/kernel/tops/src/net-event.c b/feed/kernel/tops/src/net-event.c
deleted file mode 100644
index 6143a03..0000000
--- a/feed/kernel/tops/src/net-event.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/device.h>
-#include <linux/hashtable.h>
-#include <linux/netdevice.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/notifier.h>
-#include <net/arp.h>
-#include <net/flow.h>
-#include <net/ip.h>
-#include <net/ip_tunnels.h>
-#include <net/netevent.h>
-#include <net/net_namespace.h>
-#include <net/neighbour.h>
-#include <net/route.h>
-
-#include "tops/internal.h"
-#include "tops/netsys.h"
-#include "tops/net-event.h"
-#include "tops/mcu.h"
-#include "tops/ser.h"
-#include "tops/trm.h"
-#include "tops/tunnel.h"
-
-static struct completion wait_fe_reset_done;
-
-static void mtk_tops_netdev_ser_callback(struct tops_ser_params *ser_param)
-{
- struct net_device *netdev = ser_param->data.net.ndev;
-
- WARN_ON(ser_param->type != TOPS_SER_NETSYS_FE_RST);
-
- mtk_trm_dump(TRM_RSN_FE_RESET);
-
- /* send tops dump done notification to mtk eth */
- rtnl_lock();
- call_netdevice_notifiers(MTK_TOPS_DUMP_DONE, netdev);
- rtnl_unlock();
-
- /* wait for FE reset done notification */
- /* TODO : if not received FE reset done notification */
- wait_for_completion(&wait_fe_reset_done);
-}
-
-static inline void mtk_tops_netdev_ser(struct net_device *dev)
-{
- struct tops_ser_params ser_params = {
- .type = TOPS_SER_NETSYS_FE_RST,
- .data.net.ndev = dev,
- .ser_callback = mtk_tops_netdev_ser_callback,
- };
-
- mtk_tops_ser(&ser_params);
-}
-
-/* TODO: update tunnel status when user delete or change tunnel parameters */
-/*
- * eth will send out MTK_FE_START_RESET event if detected wdma abnormal, or
- * send out MTK_FE_STOP_TRAFFIC event if detected qdma or adma or tdma abnormal,
- * then do FE reset, so we use the same mcu event to represent it.
- *
- * after FE reset done, eth will send out MTK_FE_START_TRAFFIC event if this is
- * wdma abnormal induced FE reset, or send out MTK_FE_RESET_DONE event for qdma
- * or adma or tdma abnormal induced FE reset.
- */
-static int mtk_tops_netdev_callback(struct notifier_block *nb,
- unsigned long event,
- void *data)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(data);
- int ret = 0;
-
- switch (event) {
- case NETDEV_UP:
- break;
- case NETDEV_DOWN:
- mtk_tops_tnl_offload_netdev_down(dev);
- break;
- case MTK_FE_START_RESET:
- case MTK_FE_STOP_TRAFFIC:
- mtk_tops_netdev_ser(dev);
- break;
- case MTK_FE_RESET_DONE:
- case MTK_FE_START_TRAFFIC:
- complete(&wait_fe_reset_done);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static struct notifier_block mtk_tops_netdev_notifier = {
- .notifier_call = mtk_tops_netdev_callback,
-};
-
-static int mtk_tops_netevent_callback(struct notifier_block *nb,
- unsigned long event,
- void *data)
-{
- int ret = 0;
-
- switch (event) {
- case NETEVENT_NEIGH_UPDATE:
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-static struct notifier_block mtk_tops_netevent_notifier = {
- .notifier_call = mtk_tops_netevent_callback,
-};
-
-int mtk_tops_netevent_register(struct platform_device *pdev)
-{
- int ret = 0;
-
- ret = register_netdevice_notifier(&mtk_tops_netdev_notifier);
- if (ret) {
- TOPS_ERR("TOPS register netdev notifier failed: %d\n", ret);
- return ret;
- }
-
- ret = register_netevent_notifier(&mtk_tops_netevent_notifier);
- if (ret) {
- unregister_netdevice_notifier(&mtk_tops_netdev_notifier);
- TOPS_ERR("TOPS register net event notifier failed: %d\n", ret);
- return ret;
- }
-
- init_completion(&wait_fe_reset_done);
-
- return ret;
-}
-
-void mtk_tops_netevent_unregister(struct platform_device *pdev)
-{
- unregister_netevent_notifier(&mtk_tops_netevent_notifier);
-
- unregister_netdevice_notifier(&mtk_tops_netdev_notifier);
-}
diff --git a/feed/kernel/tops/src/netsys.c b/feed/kernel/tops/src/netsys.c
deleted file mode 100644
index e1770c9..0000000
--- a/feed/kernel/tops/src/netsys.c
+++ /dev/null
@@ -1,215 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-
-#include <mtk_hnat/hnat.h>
-
-#include <pce/netsys.h>
-
-#include "tops/hpdma.h"
-#include "tops/internal.h"
-#include "tops/mcu.h"
-#include "tops/netsys.h"
-#include "tops/tdma.h"
-#include "tops/trm.h"
-
-/* Netsys dump length */
-#define FE_BASE_LEN (0x2900)
-
-#define PPE_DEFAULT_ENTRY_SIZE (0x400)
-
-static int netsys_trm_hw_dump(void *dst, u32 ofs, u32 len);
-
-struct netsys_hw {
- void __iomem *base;
- u32 ppe_num;
-};
-
-static struct netsys_hw netsys;
-
-static struct trm_config netsys_trm_configs[] = {
- {
- TRM_CFG_EN("netsys-fe",
- FE_BASE, FE_BASE_LEN,
- 0x0, FE_BASE_LEN,
- 0)
- },
-};
-
-static struct trm_hw_config netsys_trm_hw_cfg = {
- .trm_cfgs = netsys_trm_configs,
- .cfg_len = ARRAY_SIZE(netsys_trm_configs),
- .trm_hw_dump = netsys_trm_hw_dump,
-};
-
-static inline void netsys_write(u32 reg, u32 val)
-{
- writel(val, netsys.base + reg);
-}
-
-static inline void netsys_set(u32 reg, u32 mask)
-{
- setbits(netsys.base + reg, mask);
-}
-
-static inline void netsys_clr(u32 reg, u32 mask)
-{
- clrbits(netsys.base + reg, mask);
-}
-
-static inline void netsys_rmw(u32 reg, u32 mask, u32 val)
-{
- clrsetbits(netsys.base + reg, mask, val);
-}
-
-static inline u32 netsys_read(u32 reg)
-{
- return readl(netsys.base + reg);
-}
-
-static int netsys_trm_hw_dump(void *dst, u32 start_addr, u32 len)
-{
- u32 ofs;
-
- if (unlikely(!dst))
- return -ENODEV;
-
- for (ofs = 0; len > 0; len -= 0x4, ofs += 0x4)
- writel(netsys_read(start_addr + ofs), dst + ofs);
-
- return 0;
-}
-
-static inline void ppe_rmw(enum pse_port ppe, u32 reg, u32 mask, u32 val)
-{
- if (ppe == PSE_PORT_PPE0)
- netsys_rmw(PPE0_BASE + reg, mask, val);
- else if (ppe == PSE_PORT_PPE1)
- netsys_rmw(PPE1_BASE + reg, mask, val);
- else if (ppe == PSE_PORT_PPE2)
- netsys_rmw(PPE2_BASE + reg, mask, val);
-}
-
-static inline u32 ppe_read(enum pse_port ppe, u32 reg)
-{
- if (ppe == PSE_PORT_PPE0)
- return netsys_read(PPE0_BASE + reg);
- else if (ppe == PSE_PORT_PPE1)
- return netsys_read(PPE1_BASE + reg);
- else if (ppe == PSE_PORT_PPE2)
- return netsys_read(PPE2_BASE + reg);
-
- return 0;
-}
-
-u32 mtk_tops_netsys_ppe_get_num(void)
-{
- return netsys.ppe_num;
-}
-
-u32 mtk_tops_netsys_ppe_get_max_entry_num(u32 ppe_id)
-{
- u32 tbl_entry_num;
- enum pse_port ppe;
-
- if (ppe_id == 0)
- ppe = PSE_PORT_PPE0;
- else if (ppe_id == 1)
- ppe = PSE_PORT_PPE1;
- else if (ppe_id == 2)
- ppe = PSE_PORT_PPE2;
- else
- return PPE_DEFAULT_ENTRY_SIZE << 5; /* max entry count */
-
- tbl_entry_num = ppe_read(ppe, PPE_TBL_CFG);
- if (tbl_entry_num > 5)
- return PPE_DEFAULT_ENTRY_SIZE << 5;
-
- return PPE_DEFAULT_ENTRY_SIZE << tbl_entry_num;
-}
-
-static int mtk_tops_netsys_base_init(struct platform_device *pdev)
-{
- struct device_node *fe_mem = NULL;
- struct resource res;
- int ret = 0;
-
- fe_mem = of_parse_phandle(pdev->dev.of_node, "fe_mem", 0);
- if (!fe_mem) {
- TOPS_ERR("can not find fe_mem node\n");
- return -ENODEV;
- }
-
- if (of_address_to_resource(fe_mem, 0, &res)) {
- ret = -ENXIO;
- goto out;
- }
-
- netsys.base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
- if (!netsys.base) {
- ret = -ENOMEM;
- goto out;
- }
-
-out:
- of_node_put(fe_mem);
-
- return ret;
-}
-
-static int mtk_tops_netsys_ppe_num_init(struct platform_device *pdev)
-{
- struct device_node *hnat = NULL;
- u32 val = 0;
- int ret = 0;
-
- hnat = of_parse_phandle(pdev->dev.of_node, "hnat", 0);
- if (!hnat) {
- TOPS_ERR("can not find hnat node\n");
- return -ENODEV;
- }
-
- ret = of_property_read_u32(hnat, "mtketh-ppe-num", &val);
- if (ret)
- netsys.ppe_num = 1;
- else
- netsys.ppe_num = val;
-
- of_node_put(hnat);
-
- return 0;
-}
-
-int mtk_tops_netsys_init(struct platform_device *pdev)
-{
- int ret;
-
- ret = mtk_tops_netsys_base_init(pdev);
- if (ret)
- return ret;
-
- ret = mtk_tops_netsys_ppe_num_init(pdev);
- if (ret)
- return ret;
-
- ret = mtk_trm_hw_config_register(TRM_NETSYS, &netsys_trm_hw_cfg);
- if (ret)
- return ret;
-
- return ret;
-}
-
-void mtk_tops_netsys_deinit(struct platform_device *pdev)
-{
- mtk_trm_hw_config_unregister(TRM_NETSYS, &netsys_trm_hw_cfg);
-}
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/mac/eth.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/mac/eth.h
deleted file mode 100644
index f306230..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/mac/eth.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_ETH_H_
-#define _TOPS_ETH_H_
-
-#include <linux/if_ether.h>
-
-#include "tops/tops_params.h"
-
-int
-mtk_tops_eth_encap_param_setup(
- struct sk_buff *skb,
- struct tops_params *params,
- int (*tnl_encap_param_setup)(struct sk_buff *skb,
- struct tops_params *params));
-int mtk_tops_eth_decap_param_setup(struct sk_buff *skb, struct tops_params *params);
-int mtk_tops_eth_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params);
-void mtk_tops_eth_param_dump(struct seq_file *s, struct tops_params *params);
-#endif /* _TOPS_ETH_H_ */
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/mac/ppp.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/mac/ppp.h
deleted file mode 100644
index b5f1cb4..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/mac/ppp.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Frank-zj Lin <rank-zj.lin@mediatek.com>
- */
-
-#ifndef _TOPS_PPP_H_
-#define _TOPS_PPP_H_
-
-#include <linux/ppp_defs.h>
-#include <linux/skbuff.h>
-#include <linux/types.h>
-
-/* Limited support: ppp header, no options */
-struct ppp_hdr {
- u8 addr;
- u8 ctrl;
- u16 proto;
-};
-
-bool mtk_tops_ppp_valid(struct sk_buff *skb);
-#endif /* _TOPS_PPP_H_ */
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/network/ip.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/network/ip.h
deleted file mode 100644
index 3182b5d..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/network/ip.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_IP_H_
-#define _TOPS_IP_H_
-
-#include <linux/ip.h>
-#include <uapi/linux/in.h>
-
-#include "tops/protocol/network/ip_params.h"
-#include "tops/tops_params.h"
-
-int mtk_tops_ip_encap_param_setup(
- struct sk_buff *skb,
- struct tops_params *params,
- int (*tnl_encap_param_setup)(struct sk_buff *skb,
- struct tops_params *params));
-int mtk_tops_ip_decap_param_setup(struct sk_buff *skb, struct tops_params *params);
-int mtk_tops_ip_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params);
-void mtk_tops_ip_param_dump(struct seq_file *s, struct tops_params *params);
-#endif /* _TOPS_IP_H_ */
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/network/ip_params.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/network/ip_params.h
deleted file mode 100644
index 9583862..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/network/ip_params.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_IP_PARAMS_H_
-#define _TOPS_IP_PARAMS_H_
-
-#include <linux/types.h>
-
-struct tops_ip_params {
- __be32 sip;
- __be32 dip;
- u8 proto;
- u8 tos;
- u8 ttl;
-};
-#endif /* _TOPS_IP_PARAMS_H_ */
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/transport/udp.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/transport/udp.h
deleted file mode 100644
index 9ee8977..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/transport/udp.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_UDP_H_
-#define _TOPS_UDP_H_
-
-#include "tops/protocol/transport/udp_params.h"
-#include "tops/tops_params.h"
-
-int mtk_tops_udp_encap_param_setup(
- struct sk_buff *skb,
- struct tops_params *params,
- int (*tnl_encap_param_setup)(struct sk_buff *skb,
- struct tops_params *params));
-int mtk_tops_udp_decap_param_setup(struct sk_buff *skb, struct tops_params *params);
-int mtk_tops_udp_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params);
-void mtk_tops_udp_param_dump(struct seq_file *s, struct tops_params *params);
-#endif /* _TOPS_UDP_H_ */
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/transport/udp_params.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/transport/udp_params.h
deleted file mode 100644
index b66476c..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/transport/udp_params.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_UDP_PARAMS_H_
-#define _TOPS_UDP_PARAMS_H_
-
-#include <linux/types.h>
-#include <linux/udp.h>
-
-struct tops_udp_params {
- u16 sport;
- u16 dport;
-};
-#endif /* _TOPS_UDP_PARAMS_H_ */
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/gre/gretap.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/gre/gretap.h
deleted file mode 100644
index 46a68db..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/gre/gretap.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_GRETAP_H_
-#define _TOPS_GRETAP_H_
-
-#include "tops/tops_params.h"
-
-#if defined(CONFIG_MTK_TOPS_GRETAP)
-int mtk_tops_gretap_decap_param_setup(struct sk_buff *skb, struct tops_params *params);
-int mtk_tops_gretap_init(void);
-void mtk_tops_gretap_deinit(void);
-#else /* !defined(CONFIG_MTK_TOPS_GRETAP) */
-static inline int mtk_tops_gretap_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params)
-{
- return -ENODEV;
-}
-
-static inline int mtk_tops_gretap_init(void)
-{
- return 0;
-}
-
-static inline void mtk_tops_gretap_deinit(void)
-{
-}
-#endif /* defined(CONFIG_MTK_TOPS_GRETAP) */
-#endif /* _TOPS_GRETAP_H_ */
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/l2tp/l2tp_params.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/l2tp/l2tp_params.h
deleted file mode 100644
index 62db153..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/l2tp/l2tp_params.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Frank-zj Lin <rank-zj.lin@mediatek.com>
- */
-
-#ifndef _TOPS_L2TP_H_
-#define _TOPS_L2TP_H_
-
-/* L2TP header constants */
-#define L2TP_HDRFLAG_T 0x8000
-#define L2TP_HDRFLAG_L 0x4000
-
-#define L2TP_HDR_VER_MASK 0x000F
-#define L2TP_HDR_VER_2 0x0002
-#define L2TP_HDR_VER_3 0x0003
-
-#define UDP_L2TP_PORT 1701
-
-struct tops_l2tp_params {
- u16 dl_tid; /* l2tp tunnel id for DL */
- u16 dl_sid; /* l2tp session id for DL */
- u16 ul_tid; /* l2tp tunnel id for UL */
- u16 ul_sid; /* l2tp session id for UL */
-};
-
-/* Limited support: L2TPv2 only, no length field, no options */
-struct udp_l2tp_data_hdr {
- u16 flag_ver;
- u16 tid;
- u16 sid;
-};
-#endif /* _TOPS_L2TP_H_ */
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/l2tp/l2tpv2.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/l2tp/l2tpv2.h
deleted file mode 100644
index a757f11..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/l2tp/l2tpv2.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#ifndef _TOPS_L2TP_V2_H_
-#define _TOPS_L2TP_V2_H_
-
-#include "tops/protocol/tunnel/l2tp/l2tp_params.h"
-
-#if defined(CONFIG_MTK_TOPS_L2TP_V2)
-int mtk_tops_l2tpv2_init(void);
-void mtk_tops_l2tpv2_deinit(void);
-#else /* !defined(CONFIG_MTK_TOPS_L2TP_V2) */
-static inline int mtk_tops_l2tpv2_init(void)
-{
- return 0;
-}
-
-static inline void mtk_tops_l2tpv2_deinit(void)
-{
-}
-#endif /* defined(CONFIG_MTK_TOPS_L2TP_V2) */
-#endif /* _TOPS_L2TP_V2_H_ */
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/pptp/pptp.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/pptp/pptp.h
deleted file mode 100644
index 950c984..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/pptp/pptp.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Frank-zj Lin <frank-zj.lin@mediatek.com>
- */
-
-#ifndef _TOPS_PPTP_H_
-#define _TOPS_PPTP_H_
-
-int mtk_tops_pptp_seq_get_seq_gen_idx(u16 call_id, int *seq_gen_idx);
-int mtk_tops_pptp_seq_alloc(u16 call_id, u32 start, int *seq_gen_idx);
-void mtk_tops_pptp_seq_free(u16 call_id);
-int mtk_tops_pptp_seq_init(void);
-void mtk_tops_pptp_seq_deinit(void);
-
-#if defined(CONFIG_MTK_TOPS_PPTP)
-int mtk_tops_pptp_init(void);
-void mtk_tops_pptp_deinit(void);
-#else /* !defined(CONFIG_MTK_TOPS_PPTP) */
-static inline int mtk_tops_pptp_init(void)
-{
- return 0;
-}
-
-static inline void mtk_tops_pptp_deinit(void)
-{
-}
-#endif /* defined(CONFIG_MTK_TOPS_PPTP) */
-#endif /* _TOPS_PPTP_H_ */
diff --git a/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/pptp/pptp_params.h b/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/pptp/pptp_params.h
deleted file mode 100644
index 0ac4b9b..0000000
--- a/feed/kernel/tops/src/protocol/inc/tops/protocol/tunnel/pptp/pptp_params.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Frank-zj Lin <frank-zj.lin@mediatek.com>
- */
-
-#ifndef _TOPS_PPTP_PARAMS_H_
-#define _TOPS_PPTP_PARAMS_H_
-
-#define PPTP_GRE_HDR_ACK_LEN 4
-
-struct tops_pptp_params {
- u16 dl_call_id; /* call id for download */
- u16 ul_call_id; /* call id for upload */
- u8 seq_gen_idx; /* seq generator idx */
-};
-#endif /* _TOPS_PPTP_PARAMS_H_ */
diff --git a/feed/kernel/tops/src/protocol/mac/eth.c b/feed/kernel/tops/src/protocol/mac/eth.c
deleted file mode 100644
index 37d4d59..0000000
--- a/feed/kernel/tops/src/protocol/mac/eth.c
+++ /dev/null
@@ -1,130 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include "tops/internal.h"
-#include "tops/protocol/mac/eth.h"
-#include "tops/protocol/network/ip.h"
-
-#include <mtk_hnat/nf_hnat_mtk.h>
-
-int
-mtk_tops_eth_encap_param_setup(
- struct sk_buff *skb,
- struct tops_params *params,
- int (*tnl_encap_param_setup)(struct sk_buff *skb,
- struct tops_params *params))
-{
- struct ethhdr *eth = eth_hdr(skb);
-
- params->mac.type = TOPS_MAC_ETH;
-
- memcpy(¶ms->mac.eth.h_source, eth->h_source, ETH_ALEN);
- memcpy(¶ms->mac.eth.h_dest, eth->h_dest, ETH_ALEN);
- params->mac.eth.h_proto = htons(ETH_P_IP);
-
- /*
- * either has contrusted ethernet header with IP
- * or the packet is going to do xfrm encryption
- */
- if ((ntohs(eth->h_proto) == ETH_P_IP)
- || (!skb_hnat_cdrt(skb) && skb_dst(skb) && dst_xfrm(skb_dst(skb)))) {
- return mtk_tops_ip_encap_param_setup(skb,
- params,
- tnl_encap_param_setup);
- }
-
- TOPS_NOTICE("eth proto not support, proto: 0x%x\n",
- ntohs(eth->h_proto));
-
- return -EINVAL;
-}
-
-int mtk_tops_eth_decap_param_setup(struct sk_buff *skb, struct tops_params *params)
-{
- struct ethhdr *eth;
- struct ethhdr ethh;
- int ret = 0;
-
- skb_push(skb, sizeof(struct ethhdr));
- eth = skb_header_pointer(skb, 0, sizeof(struct ethhdr), ðh);
- if (unlikely(!eth)) {
- ret = -EINVAL;
- goto out;
- }
-
- if (unlikely(ntohs(eth->h_proto) != ETH_P_IP)) {
- TOPS_NOTICE("eth proto not support, proto: 0x%x\n",
- ntohs(eth->h_proto));
- ret = -EINVAL;
- goto out;
- }
-
- params->mac.type = TOPS_MAC_ETH;
-
- memcpy(¶ms->mac.eth.h_source, eth->h_dest, ETH_ALEN);
- memcpy(¶ms->mac.eth.h_dest, eth->h_source, ETH_ALEN);
- params->mac.eth.h_proto = htons(ETH_P_IP);
-
-out:
- skb_pull(skb, sizeof(struct ethhdr));
-
- return ret;
-}
-
-static int tops_eth_debug_param_fetch_mac(const char *buf, int *ofs, u8 *mac)
-{
- int nchar = 0;
- int ret;
-
- ret = sscanf(buf + *ofs, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %n",
- &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5], &nchar);
- if (ret != 6)
- return -EPERM;
-
- *ofs += nchar;
-
- return 0;
-}
-
-int mtk_tops_eth_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params)
-{
- char proto[DEBUG_PROTO_LEN] = {0};
- int ret;
-
- params->mac.type = TOPS_MAC_ETH;
-
- ret = tops_eth_debug_param_fetch_mac(buf, ofs, params->mac.eth.h_source);
- if (ret)
- return ret;
-
- ret = tops_eth_debug_param_fetch_mac(buf, ofs, params->mac.eth.h_dest);
- if (ret)
- return ret;
-
- ret = mtk_tops_debug_param_proto_peek(buf, *ofs, proto);
- if (ret < 0)
- return ret;
-
- *ofs += ret;
-
- if (!strcmp(proto, DEBUG_PROTO_IP)) {
- params->mac.eth.h_proto = htons(ETH_P_IP);
- ret = mtk_tops_ip_debug_param_setup(buf, ofs, params);
- } else {
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-void mtk_tops_eth_param_dump(struct seq_file *s, struct tops_params *params)
-{
- seq_puts(s, "\tMAC Type: Ethernet ");
- seq_printf(s, "saddr: %pM daddr: %pM\n",
- params->mac.eth.h_source, params->mac.eth.h_dest);
-}
diff --git a/feed/kernel/tops/src/protocol/mac/ppp.c b/feed/kernel/tops/src/protocol/mac/ppp.c
deleted file mode 100644
index 8b8400b..0000000
--- a/feed/kernel/tops/src/protocol/mac/ppp.c
+++ /dev/null
@@ -1,23 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Frank-zj Lin <rank-zj.lin@mediatek.com>
- * Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include "tops/protocol/mac/ppp.h"
-
-bool mtk_tops_ppp_valid(struct sk_buff *skb)
-{
- struct ppp_hdr *ppp;
- struct ppp_hdr ppph;
-
- ppp = skb_header_pointer(skb, 0, sizeof(struct ppp_hdr), &ppph);
-
- if (unlikely(!ppp))
- return false;
-
- return (ppp->addr == PPP_ALLSTATIONS &&
- ppp->ctrl == PPP_UI && ntohs(ppp->proto) == PPP_IP);
-}
diff --git a/feed/kernel/tops/src/protocol/network/ip.c b/feed/kernel/tops/src/protocol/network/ip.c
deleted file mode 100644
index abe7f10..0000000
--- a/feed/kernel/tops/src/protocol/network/ip.c
+++ /dev/null
@@ -1,163 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include "tops/internal.h"
-#include "tops/protocol/network/ip.h"
-#include "tops/protocol/transport/udp.h"
-
-int mtk_tops_ip_encap_param_setup(
- struct sk_buff *skb,
- struct tops_params *params,
- int (*tnl_encap_param_setup)(struct sk_buff *skb,
- struct tops_params *params))
-{
- struct tops_ip_params *ipp = ¶ms->network.ip;
- struct iphdr *ip;
- struct iphdr iph;
- int ret;
-
- ip = skb_header_pointer(skb, 0, sizeof(struct iphdr), &iph);
- if (unlikely(!ip))
- return -EINVAL;
-
- if (unlikely(ip->version != IPVERSION)) {
- TOPS_NOTICE("ip ver: 0x%x invalid\n", ip->version);
- return -EINVAL;
- }
-
- params->network.type = TOPS_NETWORK_IPV4;
-
- ipp->proto = ip->protocol;
- ipp->sip = ip->saddr;
- ipp->dip = ip->daddr;
- ipp->tos = ip->tos;
- ipp->ttl = ip->ttl;
-
- skb_pull(skb, sizeof(struct iphdr));
-
- switch (ip->protocol) {
- case IPPROTO_UDP:
- ret = mtk_tops_udp_encap_param_setup(skb,
- params,
- tnl_encap_param_setup);
- break;
- case IPPROTO_GRE:
- ret = tnl_encap_param_setup(skb, params);
- break;
- default:
- ret = -EINVAL;
- break;
- };
-
- skb_push(skb, sizeof(struct iphdr));
-
- return ret;
-}
-
-int mtk_tops_ip_decap_param_setup(struct sk_buff *skb, struct tops_params *params)
-{
- struct tops_ip_params *ipp;
- struct iphdr *ip;
- struct iphdr iph;
- int ret;
-
- skb_push(skb, sizeof(struct iphdr));
- ip = skb_header_pointer(skb, 0, sizeof(struct iphdr), &iph);
- if (unlikely(!ip)) {
- ret = -EINVAL;
- goto out;
- }
-
- if (unlikely(ip->version != IPVERSION)) {
- ret = -EINVAL;
- goto out;
- }
-
- params->network.type = TOPS_NETWORK_IPV4;
-
- ipp = ¶ms->network.ip;
-
- ipp->proto = ip->protocol;
- ipp->sip = ip->daddr;
- ipp->dip = ip->saddr;
- ipp->tos = ip->tos;
- /*
- * if encapsulation parameter is already configured, TTL will remain as
- * encapsulation's data
- */
- ipp->ttl = 128;
-
- ret = mtk_tops_mac_decap_param_setup(skb, params);
-
-out:
- skb_pull(skb, sizeof(struct iphdr));
-
- return ret;
-}
-
-static int tops_ip_debug_param_fetch_ip(const char *buf, int *ofs, u32 *ip)
-{
- int nchar = 0;
- int ret = 0;
- u8 tmp[4];
-
- ret = sscanf(buf + *ofs, "%hhu.%hhu.%hhu.%hhu %n",
- &tmp[3], &tmp[2], &tmp[1], &tmp[0], &nchar);
- if (ret != 4)
- return -EPERM;
-
- *ip = tmp[3] | tmp[2] << 8 | tmp[1] << 16 | tmp[0] << 24;
-
- *ofs += nchar;
-
- return 0;
-}
-
-int mtk_tops_ip_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params)
-{
- char proto[DEBUG_PROTO_LEN] = {0};
- int ret;
-
- params->network.type = TOPS_NETWORK_IPV4;
-
- ret = tops_ip_debug_param_fetch_ip(buf, ofs, ¶ms->network.ip.sip);
- if (ret)
- return ret;
-
- ret = tops_ip_debug_param_fetch_ip(buf, ofs, ¶ms->network.ip.dip);
- if (ret)
- return ret;
-
- ret = mtk_tops_debug_param_proto_peek(buf, *ofs, proto);
- if (ret < 0)
- return ret;
-
- if (!strcmp(proto, DEBUG_PROTO_UDP)) {
- params->network.ip.proto = IPPROTO_UDP;
- *ofs += ret;
- ret = mtk_tops_udp_debug_param_setup(buf, ofs, params);
- } else if (!strcmp(proto, DEBUG_PROTO_GRETAP)) {
- params->network.ip.proto = IPPROTO_GRE;
- ret = 0;
- } else {
- ret = -EINVAL;
- }
-
- return ret;
-}
-
-void mtk_tops_ip_param_dump(struct seq_file *s, struct tops_params *params)
-{
- struct tops_ip_params *ipp = ¶ms->network.ip;
- u32 sip = params->network.ip.sip;
- u32 dip = params->network.ip.dip;
-
- seq_puts(s, "\tNetwork Type: IPv4 ");
- seq_printf(s, "sip: %pI4 dip: %pI4 protocol: 0x%02x tos: 0x%02x ttl: %03u\n",
- &sip, &dip, ipp->proto, ipp->tos, ipp->ttl);
-}
diff --git a/feed/kernel/tops/src/protocol/transport/udp.c b/feed/kernel/tops/src/protocol/transport/udp.c
deleted file mode 100644
index ab653e0..0000000
--- a/feed/kernel/tops/src/protocol/transport/udp.c
+++ /dev/null
@@ -1,110 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include "tops/protocol/network/ip.h"
-#include "tops/protocol/transport/udp.h"
-
-int mtk_tops_udp_encap_param_setup(
- struct sk_buff *skb,
- struct tops_params *params,
- int (*tnl_encap_param_setup)(struct sk_buff *skb,
- struct tops_params *params))
-{
- struct tops_udp_params *udpp = ¶ms->transport.udp;
- struct udphdr *udp;
- struct udphdr udph;
- int ret;
-
- udp = skb_header_pointer(skb, 0, sizeof(struct udphdr), &udph);
- if (unlikely(!udp))
- return -EINVAL;
-
- params->transport.type = TOPS_TRANSPORT_UDP;
-
- udpp->sport = udp->source;
- udpp->dport = udp->dest;
-
- skb_pull(skb, sizeof(struct udphdr));
-
- /* udp must be the end of a tunnel */
- ret = tnl_encap_param_setup(skb, params);
-
- skb_push(skb, sizeof(struct udphdr));
-
- return ret;
-}
-
-int mtk_tops_udp_decap_param_setup(struct sk_buff *skb, struct tops_params *params)
-{
- struct tops_udp_params *udpp = ¶ms->transport.udp;
- struct udphdr *udp;
- struct udphdr udph;
- int ret;
-
- skb_push(skb, sizeof(struct udphdr));
- udp = skb_header_pointer(skb, 0, sizeof(struct udphdr), &udph);
- if (unlikely(!udp)) {
- ret = -EINVAL;
- goto out;
- }
-
- params->transport.type = TOPS_TRANSPORT_UDP;
-
- udpp->sport = udp->dest;
- udpp->dport = udp->source;
-
- ret = mtk_tops_network_decap_param_setup(skb, params);
-
-out:
- skb_pull(skb, sizeof(struct udphdr));
-
- return ret;
-}
-
-static int tops_udp_debug_param_fetch_port(const char *buf, int *ofs, u16 *port)
-{
- int nchar = 0;
- int ret;
- u16 p = 0;
-
- ret = sscanf(buf + *ofs, "%hu %n", &p, &nchar);
- if (ret != 1)
- return -EPERM;
-
- *port = htons(p);
-
- *ofs += nchar;
-
- return 0;
-}
-
-int mtk_tops_udp_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params)
-{
- int ret;
-
- params->transport.type = TOPS_TRANSPORT_UDP;
-
- ret = tops_udp_debug_param_fetch_port(buf, ofs, ¶ms->transport.udp.sport);
- if (ret)
- return ret;
-
- ret = tops_udp_debug_param_fetch_port(buf, ofs, ¶ms->transport.udp.dport);
- if (ret)
- return ret;
-
- return ret;
-}
-
-void mtk_tops_udp_param_dump(struct seq_file *s, struct tops_params *params)
-{
- struct tops_udp_params *udpp = ¶ms->transport.udp;
-
- seq_puts(s, "\tTransport Type: UDP ");
- seq_printf(s, "sport: %05u dport: %05u\n",
- ntohs(udpp->sport), ntohs(udpp->dport));
-}
diff --git a/feed/kernel/tops/src/protocol/tunnel/gre/gretap.c b/feed/kernel/tops/src/protocol/tunnel/gre/gretap.c
deleted file mode 100644
index 576d15c..0000000
--- a/feed/kernel/tops/src/protocol/tunnel/gre/gretap.c
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <net/gre.h>
-
-#include <pce/cls.h>
-#include <pce/netsys.h>
-#include <pce/pce.h>
-
-#include "tops/internal.h"
-#include "tops/netsys.h"
-#include "tops/protocol/tunnel/gre/gretap.h"
-#include "tops/tunnel.h"
-
-static int gretap_cls_entry_setup(struct tops_tnl_info *tnl_info,
- struct cls_desc *cdesc)
-{
- /*
- * If the system only has 1 PPE,
- * packets from any GDM will default forward to PPE0 first
- * If the system has 3 PPE,
- * packets from GDM1 will forward to PPE0
- * packets from GDM2 will forward to PPE1
- * packets from GDM3 will forward to PPE2
- */
- if (mtk_tops_netsys_ppe_get_num() == 1)
- CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE0);
- else
- CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE1);
-
- CLS_DESC_DATA(cdesc, tport_idx, 0x4);
- CLS_DESC_MASK_DATA(cdesc, tag, CLS_DESC_TAG_MASK, CLS_DESC_TAG_MATCH_L4_HDR);
- CLS_DESC_MASK_DATA(cdesc, dip_match, CLS_DESC_DIP_MATCH, CLS_DESC_DIP_MATCH);
- CLS_DESC_MASK_DATA(cdesc, l4_type, CLS_DESC_L4_TYPE_MASK, IPPROTO_GRE);
- CLS_DESC_MASK_DATA(cdesc, l4_udp_hdr_nez,
- CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK,
- CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK);
- CLS_DESC_MASK_DATA(cdesc, l4_valid,
- CLS_DESC_L4_VALID_MASK,
- CLS_DESC_VALID_UPPER_HALF_WORD_BIT |
- CLS_DESC_VALID_LOWER_HALF_WORD_BIT);
- CLS_DESC_MASK_DATA(cdesc, l4_hdr_usr_data, 0x0000FFFF, 0x00006558);
-
- return 0;
-}
-
-static int gretap_tnl_encap_param_setup(struct sk_buff *skb, struct tops_params *params)
-{
- params->tunnel.type = TOPS_TUNNEL_GRETAP;
-
- return 0;
-}
-
-static int gretap_tnl_decap_param_setup(struct sk_buff *skb, struct tops_params *params)
-{
- struct gre_base_hdr *pgre;
- struct gre_base_hdr greh;
- int ret;
-
- if (!skb->dev->rtnl_link_ops
- || strcmp(skb->dev->rtnl_link_ops->kind, "gretap"))
- return -EAGAIN;
-
- skb_push(skb, sizeof(struct gre_base_hdr));
- pgre = skb_header_pointer(skb, 0, sizeof(struct gre_base_hdr), &greh);
- if (unlikely(!pgre)) {
- ret = -EINVAL;
- goto out;
- }
-
- if (unlikely(ntohs(pgre->protocol) != ETH_P_TEB)) {
- TOPS_NOTICE("gre: %p protocol unmatched, proto: 0x%x\n",
- pgre, ntohs(pgre->protocol));
- ret = -EINVAL;
- goto out;
- }
-
- params->tunnel.type = TOPS_TUNNEL_GRETAP;
-
- ret = mtk_tops_network_decap_param_setup(skb, params);
-
-out:
- skb_pull(skb, sizeof(struct gre_base_hdr));
-
- return ret;
-}
-
-static int gretap_tnl_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params)
-{
- params->tunnel.type = TOPS_TUNNEL_GRETAP;
-
- return 0;
-}
-
-static bool gretap_tnl_decap_offloadable(struct sk_buff *skb)
-{
- struct iphdr *ip = ip_hdr(skb);
- struct gre_base_hdr *pgre;
- struct gre_base_hdr greh;
-
- if (ip->protocol != IPPROTO_GRE)
- return false;
-
- pgre = skb_header_pointer(skb, ip_hdr(skb)->ihl * 4,
- sizeof(struct gre_base_hdr), &greh);
- if (unlikely(!pgre))
- return false;
-
- if (ntohs(pgre->protocol) != ETH_P_TEB)
- return false;
-
- return true;
-}
-
-static void gretap_tnl_param_dump(struct seq_file *s, struct tops_params *params)
-{
- seq_puts(s, "\tTunnel Type: GRETAP\n");
-}
-
-static bool gretap_tnl_param_match(struct tops_params *p, struct tops_params *target)
-{
- return !memcmp(&p->tunnel, &target->tunnel, sizeof(struct tops_tunnel_params));
-}
-
-static struct tops_tnl_type gretap_type = {
- .type_name = "gretap",
- .cls_entry_setup = gretap_cls_entry_setup,
- .tnl_decap_param_setup = gretap_tnl_decap_param_setup,
- .tnl_encap_param_setup = gretap_tnl_encap_param_setup,
- .tnl_debug_param_setup = gretap_tnl_debug_param_setup,
- .tnl_decap_offloadable = gretap_tnl_decap_offloadable,
- .tnl_param_match = gretap_tnl_param_match,
- .tnl_param_dump = gretap_tnl_param_dump,
- .tnl_proto_type = TOPS_TUNNEL_GRETAP,
- .has_inner_eth = true,
-};
-
-int mtk_tops_gretap_init(void)
-{
- return mtk_tops_tnl_type_register(&gretap_type);
-}
-
-void mtk_tops_gretap_deinit(void)
-{
- mtk_tops_tnl_type_unregister(&gretap_type);
-}
diff --git a/feed/kernel/tops/src/protocol/tunnel/l2tp/l2tpv2.c b/feed/kernel/tops/src/protocol/tunnel/l2tp/l2tpv2.c
deleted file mode 100644
index a066202..0000000
--- a/feed/kernel/tops/src/protocol/tunnel/l2tp/l2tpv2.c
+++ /dev/null
@@ -1,305 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Frank-zj Lin <rank-zj.lin@mediatek.com>
- * Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/if_pppox.h>
-#include <linux/netdevice.h>
-#include <linux/ppp_channel.h>
-
-#include <l2tp_core.h>
-
-#include <pce/cls.h>
-#include <pce/netsys.h>
-#include <pce/pce.h>
-
-#include "tops/internal.h"
-#include "tops/netsys.h"
-#include "tops/protocol/mac/ppp.h"
-#include "tops/protocol/transport/udp.h"
-#include "tops/protocol/tunnel/l2tp/l2tpv2.h"
-#include "tops/tunnel.h"
-
-static int l2tpv2_cls_entry_setup(struct tops_tnl_info *tnl_info,
- struct cls_desc *cdesc)
-{
- /*
- * If the system only has 1 PPE,
- * packets from any GDM will default forward to PPE0 first
- * If the system has 3 PPE,
- * packets from GDM1 will forward to PPE0
- * packets from GDM2 will forward to PPE1
- * packets from GDM3 will forward to PPE2
- */
- if (mtk_tops_netsys_ppe_get_num() == 1)
- CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE0);
- else
- CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE1);
-
- CLS_DESC_DATA(cdesc, tport_idx, 0x4);
- CLS_DESC_MASK_DATA(cdesc, tag, CLS_DESC_TAG_MASK, CLS_DESC_TAG_MATCH_L4_USR);
- CLS_DESC_MASK_DATA(cdesc, dip_match, CLS_DESC_DIP_MATCH, CLS_DESC_DIP_MATCH);
- CLS_DESC_MASK_DATA(cdesc, l4_type, CLS_DESC_L4_TYPE_MASK, IPPROTO_UDP);
- CLS_DESC_MASK_DATA(cdesc, l4_valid,
- CLS_DESC_L4_VALID_MASK,
- CLS_DESC_VALID_UPPER_HALF_WORD_BIT |
- CLS_DESC_VALID_LOWER_HALF_WORD_BIT |
- CLS_DESC_VALID_DPORT_BIT);
- CLS_DESC_MASK_DATA(cdesc, l4_dport, CLS_DESC_L4_DPORT_MASK, 1701);
- CLS_DESC_MASK_DATA(cdesc, l4_hdr_usr_data, 0x80030000, 0x00020000);
-
- return 0;
-}
-
-/* Helpers to obtain tunnel params from ppp netdev */
-static int l2tpv2_param_obtain_from_netdev(struct net_device *dev,
- struct tops_params *params)
-{
- struct tops_l2tp_params *l2tpp;
- struct l2tp_session *session;
- struct l2tp_tunnel *tunnel;
- struct sock *sk;
- int ret = 0;
-
- if (!dev || !params)
- return -EINVAL;
-
- sk = ppp_netdev_get_sock(dev);
- if (IS_ERR(sk) || !sk)
- return -EINVAL;
-
- sock_hold(sk);
- session = (struct l2tp_session *)(sk->sk_user_data);
- if (!session) {
- ret = -EINVAL;
- goto out;
- }
-
- if (session->magic != L2TP_SESSION_MAGIC) {
- ret = -EINVAL;
- goto out;
- }
-
- tunnel = session->tunnel;
-
- l2tpp = ¶ms->tunnel.l2tp;
- l2tpp->dl_tid = htons(tunnel->tunnel_id);
- l2tpp->dl_sid = htons(session->session_id);
- l2tpp->ul_tid = htons(tunnel->peer_tunnel_id);
- l2tpp->ul_sid = htons(session->peer_session_id);
-out:
- sock_put(sk);
-
- return ret;
-}
-
-static inline bool l2tpv2_offload_valid(struct sk_buff *skb)
-{
- struct udp_l2tp_data_hdr *l2tp;
- struct udp_l2tp_data_hdr l2tph;
- u16 hdrflags;
-
- l2tp = skb_header_pointer(skb, 0, sizeof(struct udp_l2tp_data_hdr), &l2tph);
- if (!l2tp)
- return false;
-
- hdrflags = ntohs(l2tp->flag_ver);
-
- return ((hdrflags & L2TP_HDR_VER_MASK) == L2TP_HDR_VER_2 &&
- !(hdrflags & L2TP_HDRFLAG_T));
-}
-
-static int l2tpv2_tnl_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params)
-{
- int ret = 0;
-
- /* ppp */
- skb_push(skb, sizeof(struct ppp_hdr));
- if (unlikely(!mtk_tops_ppp_valid(skb))) {
- ret = -EINVAL;
- goto restore_ppp;
- }
-
- /* l2tp */
- skb_push(skb, sizeof(struct udp_l2tp_data_hdr));
- if (unlikely(!l2tpv2_offload_valid(skb))) {
- ret = -EINVAL;
- goto restore_l2tp;
- }
-
- params->tunnel.type = TOPS_TUNNEL_L2TP_V2;
-
- ret = l2tpv2_param_obtain_from_netdev(skb->dev, params);
- if (ret)
- goto restore_l2tp;
-
- ret = mtk_tops_transport_decap_param_setup(skb, params);
-
-restore_l2tp:
- skb_pull(skb, sizeof(struct udp_l2tp_data_hdr));
-
-restore_ppp:
- skb_pull(skb, sizeof(struct ppp_hdr));
-
- return ret;
-}
-
-static int l2tpv2_tnl_encap_param_setup(struct sk_buff *skb,
- struct tops_params *params)
-{
- struct tops_l2tp_params *l2tpp;
- struct udp_l2tp_data_hdr *l2tp;
- struct udp_l2tp_data_hdr l2tph;
-
- if (unlikely(!l2tpv2_offload_valid(skb)))
- return -EINVAL;
-
- l2tp = skb_header_pointer(skb, 0, sizeof(struct udp_l2tp_data_hdr), &l2tph);
- if (unlikely(!l2tp))
- return -EINVAL;
-
- params->tunnel.type = TOPS_TUNNEL_L2TP_V2;
-
- l2tpp = ¶ms->tunnel.l2tp;
- l2tpp->ul_tid = l2tp->tid;
- l2tpp->ul_sid = l2tp->sid;
-
- return 0;
-}
-
-static int l2tpv2_tnl_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params)
-{
- struct tops_l2tp_params *l2tpp;
- u16 ul_tid = 0;
- u16 ul_sid = 0;
- u16 dl_tid = 0;
- u16 dl_sid = 0;
- int nchar = 0;
- int ret;
-
- params->tunnel.type = TOPS_TUNNEL_L2TP_V2;
- l2tpp = ¶ms->tunnel.l2tp;
-
- ret = sscanf(buf + *ofs, "%hu %hu %hu %hu %n",
- &ul_tid, &ul_sid, &dl_tid, &dl_sid, &nchar);
- if (ret != 2)
- return -EINVAL;
-
- l2tpp->ul_tid = htons(ul_tid);
- l2tpp->ul_sid = htons(ul_sid);
- l2tpp->dl_tid = htons(dl_tid);
- l2tpp->dl_sid = htons(dl_sid);
-
- *ofs += nchar;
-
- return 0;
-}
-
-static int l2tpv2_tnl_l2_param_update(struct sk_buff *skb,
- struct tops_params *params)
-{
- struct ethhdr *eth = eth_hdr(skb);
- struct tops_mac_params *mac = ¶ms->mac;
-
- memcpy(&mac->eth.h_source, eth->h_source, sizeof(u8) * ETH_ALEN);
- memcpy(&mac->eth.h_dest, eth->h_dest, sizeof(u8) * ETH_ALEN);
-
- return 1;
-}
-
-static bool l2tpv2_tnl_decap_offloadable(struct sk_buff *skb)
-{
- struct iphdr *ip;
- bool ret = true;
- u32 ip_len;
-
- ip = ip_hdr(skb);
- if (ip->protocol != IPPROTO_UDP)
- return false;
-
- ip_len = ip_hdr(skb)->ihl * 4;
-
- skb_pull(skb, ip_len + sizeof(struct udphdr));
- if (!l2tpv2_offload_valid(skb)) {
- ret = false;
- goto restore_ip_udp;
- }
-
- skb_pull(skb, sizeof(struct udp_l2tp_data_hdr));
- if (!mtk_tops_ppp_valid(skb)) {
- ret = false;
- goto restore_l2tp;
- }
-
-restore_l2tp:
- skb_push(skb, sizeof(struct udp_l2tp_data_hdr));
-restore_ip_udp:
- skb_push(skb, ip_len + sizeof(struct udphdr));
-
- return ret;
-}
-
-static void l2tpv2_tnl_param_restore(struct tops_params *old, struct tops_params *new)
-{
- /* dl_tid and dl_sid are assigned at decap */
- if (old->tunnel.l2tp.dl_tid)
- new->tunnel.l2tp.dl_tid = old->tunnel.l2tp.dl_tid;
- if (old->tunnel.l2tp.dl_sid)
- new->tunnel.l2tp.dl_sid = old->tunnel.l2tp.dl_sid;
-
- if (old->tunnel.l2tp.ul_tid)
- new->tunnel.l2tp.ul_tid = old->tunnel.l2tp.ul_tid;
- if (old->tunnel.l2tp.ul_sid)
- new->tunnel.l2tp.ul_sid = old->tunnel.l2tp.ul_sid;
-}
-
-static bool l2tpv2_tnl_param_match(struct tops_params *p, struct tops_params *target)
-{
- /*
- * Only UL params are guaranteed to be valid for comparison, DL params
- * may be left empty if no DL traffic had passed yet.
- */
- return (p->tunnel.l2tp.ul_tid == target->tunnel.l2tp.ul_tid)
- && (p->tunnel.l2tp.ul_sid == target->tunnel.l2tp.ul_sid);
-}
-
-static void l2tpv2_tnl_param_dump(struct seq_file *s, struct tops_params *params)
-{
- struct tops_l2tp_params *l2tpp = ¶ms->tunnel.l2tp;
-
- seq_puts(s, "\tTunnel Type: L2TPv2 ");
- seq_printf(s, "DL tunnel ID: %05u DL session ID: %05u ",
- ntohs(l2tpp->dl_tid), ntohs(l2tpp->dl_sid));
- seq_printf(s, "UL tunnel ID: %05u UL session ID: %05u\n",
- ntohs(l2tpp->ul_tid), ntohs(l2tpp->ul_sid));
-}
-
-static struct tops_tnl_type l2tpv2_type = {
- .type_name = "l2tpv2",
- .cls_entry_setup = l2tpv2_cls_entry_setup,
- .tnl_decap_param_setup = l2tpv2_tnl_decap_param_setup,
- .tnl_encap_param_setup = l2tpv2_tnl_encap_param_setup,
- .tnl_debug_param_setup = l2tpv2_tnl_debug_param_setup,
- .tnl_decap_offloadable = l2tpv2_tnl_decap_offloadable,
- .tnl_l2_param_update = l2tpv2_tnl_l2_param_update,
- .tnl_param_restore = l2tpv2_tnl_param_restore,
- .tnl_param_match = l2tpv2_tnl_param_match,
- .tnl_param_dump = l2tpv2_tnl_param_dump,
- .tnl_proto_type = TOPS_TUNNEL_L2TP_V2,
- .has_inner_eth = false,
-};
-
-int mtk_tops_l2tpv2_init(void)
-{
- return mtk_tops_tnl_type_register(&l2tpv2_type);
-}
-
-void mtk_tops_l2tpv2_deinit(void)
-{
- mtk_tops_tnl_type_unregister(&l2tpv2_type);
-}
diff --git a/feed/kernel/tops/src/protocol/tunnel/pptp/pptp.c b/feed/kernel/tops/src/protocol/tunnel/pptp/pptp.c
deleted file mode 100644
index 098852f..0000000
--- a/feed/kernel/tops/src/protocol/tunnel/pptp/pptp.c
+++ /dev/null
@@ -1,384 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Frank-zj Lin <frank-zj.lin@mediatek.com>
- */
-
-#include <linux/if_pppox.h>
-#include <linux/ppp_channel.h>
-
-#include <net/gre.h>
-#include <net/pptp.h>
-#include <net/sock.h>
-
-#include <pce/cls.h>
-#include <pce/netsys.h>
-#include <pce/pce.h>
-
-#include "tops/netsys.h"
-#include "tops/protocol/mac/ppp.h"
-#include "tops/protocol/tunnel/pptp/pptp.h"
-#include "tops/seq_gen.h"
-#include "tops/tunnel.h"
-
-static int pptp_cls_entry_setup(struct tops_tnl_info *tnl_info,
- struct cls_desc *cdesc)
-{
- /*
- * If the system only has 1 PPE,
- * packets from any GDM will default forward to PPE0 first
- * If the system has 3 PPE,
- * packets from GDM1 will forward to PPE0
- * packets from GDM2 will forward to PPE1
- * packets from GDM3 will forward to PPE2
- */
- if (mtk_tops_netsys_ppe_get_num() == 1)
- CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE0);
- else
- CLS_DESC_DATA(cdesc, fport, PSE_PORT_PPE1);
-
- CLS_DESC_DATA(cdesc, tport_idx, 0x4);
- CLS_DESC_MASK_DATA(cdesc, tag, CLS_DESC_TAG_MASK, CLS_DESC_TAG_MATCH_L4_HDR);
- CLS_DESC_MASK_DATA(cdesc, dip_match, CLS_DESC_DIP_MATCH, CLS_DESC_DIP_MATCH);
- CLS_DESC_MASK_DATA(cdesc, l4_type, CLS_DESC_L4_TYPE_MASK, IPPROTO_GRE);
- CLS_DESC_MASK_DATA(cdesc, l4_udp_hdr_nez,
- CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK,
- CLS_DESC_UDPLITE_L4_HDR_NEZ_MASK);
- CLS_DESC_MASK_DATA(cdesc, l4_valid,
- CLS_DESC_L4_VALID_MASK,
- CLS_DESC_VALID_UPPER_HALF_WORD_BIT |
- CLS_DESC_VALID_LOWER_HALF_WORD_BIT);
- CLS_DESC_MASK_DATA(cdesc, l4_hdr_usr_data, 0x0000FFFF, 0x0000880B);
-
- return 0;
-}
-
-/*
- * If a sequence generator is already allocated for this tunnel (call_id),
- * return with seq_gen_idx set. Otherwise, allocate a new sequence generator
- * and set the starting sequence number.
- */
-static int pptp_get_seq_gen_idx(uint16_t call_id, uint32_t seq_start,
- int *seq_gen_idx)
-{
- int ret;
-
- ret = mtk_tops_pptp_seq_get_seq_gen_idx(call_id, seq_gen_idx);
- if (ret)
- ret = mtk_tops_pptp_seq_alloc(call_id, seq_start, seq_gen_idx);
-
- return ret;
-}
-
-static inline bool pptp_gre_offload_valid(struct sk_buff *skb)
-{
- struct pptp_gre_header *pptp_gre;
- struct pptp_gre_header pptp_greh;
-
- pptp_gre = skb_header_pointer(skb, 0, sizeof(struct pptp_gre_header), &pptp_greh);
- if (unlikely(!pptp_gre))
- return false;
-
- if (pptp_gre->gre_hd.protocol != GRE_PROTO_PPP
- || pptp_gre->payload_len < sizeof(struct ppp_hdr)
- || GRE_IS_CSUM(pptp_gre->gre_hd.flags) /* flag CSUM should be clear */
- || GRE_IS_ROUTING(pptp_gre->gre_hd.flags) /* flag ROUTING should be clear */
- || !GRE_IS_KEY(pptp_gre->gre_hd.flags) /* flag KEY should be set */
- || pptp_gre->gre_hd.flags & GRE_FLAGS) /* flag Recursion Ctrl should be clear */
- return false;
-
- return true;
-}
-
-static inline int pptp_gre_len_evaluate(struct sk_buff *skb)
-{
- static const int possible_greh_len[] = {
- sizeof(struct pptp_gre_header) - PPTP_GRE_HDR_ACK_LEN,
- sizeof(struct pptp_gre_header),
- };
- struct pptp_gre_header *pptp_gre;
- int pptp_gre_len;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(possible_greh_len); i++) {
- pptp_gre_len = possible_greh_len[i];
-
- skb_push(skb, pptp_gre_len);
- pptp_gre = (struct pptp_gre_header *)skb->data;
- skb_pull(skb, pptp_gre_len);
-
- if (pptp_gre->gre_hd.protocol == GRE_PROTO_PPP)
- return pptp_gre_len;
- }
-
- return -EINVAL;
-}
-
-static int pptp_tnl_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params)
-{
- struct pptp_gre_header *pptp_gre;
- struct pptp_gre_header pptp_greh;
- struct tops_pptp_params *pptpp;
- struct sock *sk;
- int pptp_gre_len;
- int ret = 0;
-
- /* ppp */
- skb_push(skb, sizeof(struct ppp_hdr));
- if (unlikely(!mtk_tops_ppp_valid(skb))) {
- ret = -EINVAL;
- goto restore_ppp;
- }
-
- /* pptp_gre */
- pptp_gre_len = pptp_gre_len_evaluate(skb);
- if (pptp_gre_len < 0) {
- ret = -EINVAL;
- goto restore_ppp;
- }
-
- skb_push(skb, pptp_gre_len);
- pptp_gre = skb_header_pointer(skb, 0, pptp_gre_len, &pptp_greh);
- if (unlikely(!pptp_gre)) {
- ret = -EINVAL;
- goto restore_pptp_gre;
- }
-
- if (unlikely(!pptp_gre_offload_valid(skb))) {
- ret = -EINVAL;
- goto restore_pptp_gre;
- }
-
- /*
- * In decap setup, dl_call_id is fetched from the skb and ul_call_id is
- * fetched from socket struct of ppp device.
- */
- sk = ppp_netdev_get_sock(skb->dev);
- if (IS_ERR(sk)) {
- ret = PTR_ERR(sk);
- goto restore_pptp_gre;
- }
-
- params->tunnel.type = TOPS_TUNNEL_PPTP;
- pptpp = ¶ms->tunnel.pptp;
- pptpp->dl_call_id = pptp_gre->call_id;
- pptpp->ul_call_id = htons(pppox_sk(sk)->proto.pptp.dst_addr.call_id);
-
- ret = mtk_tops_network_decap_param_setup(skb, params);
-
-restore_pptp_gre:
- skb_pull(skb, pptp_gre_len);
-
-restore_ppp:
- skb_pull(skb, sizeof(struct ppp_hdr));
-
- return ret;
-}
-
-static int pptp_tnl_encap_param_setup(struct sk_buff *skb,
- struct tops_params *params)
-{
- struct pptp_gre_header *pptp_gre;
- struct pptp_gre_header pptp_greh;
- struct tops_pptp_params *pptpp;
- uint32_t pptp_gre_len;
- int seq_gen_idx;
- int ret = 0;
-
- if (unlikely(!pptp_gre_offload_valid(skb)))
- return -EINVAL;
-
- pptp_gre = skb_header_pointer(skb, 0, sizeof(struct pptp_gre_header), &pptp_greh);
- if (unlikely(!pptp_gre))
- return -EINVAL;
-
- pptp_gre_len = sizeof(*pptp_gre);
- if (!(GRE_IS_ACK(pptp_gre->gre_hd.flags)))
- pptp_gre_len -= sizeof(pptp_gre->ack);
-
- skb_pull(skb, pptp_gre_len);
-
- /* check ppp */
- if (unlikely(!mtk_tops_ppp_valid(skb))) {
- ret = -EINVAL;
- goto restore_pptp_gre;
- }
-
- ret = pptp_get_seq_gen_idx(ntohs(pptp_gre->call_id),
- ntohl(pptp_gre->seq), &seq_gen_idx);
- if (ret)
- goto restore_pptp_gre;
-
- params->tunnel.type = TOPS_TUNNEL_PPTP;
- pptpp = ¶ms->tunnel.pptp;
- pptpp->seq_gen_idx = (u8)seq_gen_idx;
- pptpp->ul_call_id = pptp_gre->call_id;
-
-restore_pptp_gre:
- skb_push(skb, pptp_gre_len);
-
- return ret;
-}
-
-static int pptp_debug_param_fetch_call_id(const char *buf, int *ofs, u16 *call_id)
-{
- int nchar = 0;
- int ret;
- u16 c = 0;
-
- ret = sscanf(buf + *ofs, "%hu %n", &c, &nchar);
- if (ret != 1)
- return -EPERM;
-
- *call_id = htons(c);
-
- *ofs += nchar;
-
- return 0;
-}
-
-static int pptp_tnl_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params)
-{
- struct tops_pptp_params *pptpp;
- int seq_gen_idx;
- int ret;
-
- pptpp = ¶ms->tunnel.pptp;
-
- ret = pptp_debug_param_fetch_call_id(buf, ofs, &pptpp->ul_call_id);
- if (ret)
- return ret;
-
- ret = pptp_debug_param_fetch_call_id(buf, ofs, &pptpp->dl_call_id);
- if (ret)
- return ret;
-
- ret = pptp_get_seq_gen_idx(ntohs(pptpp->ul_call_id), 0, &seq_gen_idx);
- if (ret)
- return ret;
-
- pptpp->seq_gen_idx = (u8)seq_gen_idx;
-
- return 0;
-}
-
-static bool pptp_tnl_decap_offloadable(struct sk_buff *skb)
-{
- struct pptp_gre_header *pptp_gre;
- struct pptp_gre_header pptp_greh;
- struct iphdr *ip;
- int pptp_gre_len;
- int ip_len;
- bool ret = true;
-
- /* check ip */
- ip = ip_hdr(skb);
- if (ip->protocol != IPPROTO_GRE)
- return false;
-
- ip_len = ip_hdr(skb)->ihl * 4;
-
- skb_pull(skb, ip_len);
-
- /* check gre */
- if (!pptp_gre_offload_valid(skb)) {
- ret = false;
- goto restore_ip;
- }
-
- pptp_gre = skb_header_pointer(skb, 0, sizeof(struct pptp_gre_header), &pptp_greh);
- if (unlikely(!pptp_gre)) {
- ret = false;
- goto restore_ip;
- }
-
- pptp_gre_len = sizeof(*pptp_gre);
- if (!(GRE_IS_ACK(pptp_gre->gre_hd.flags)))
- pptp_gre_len -= sizeof(pptp_gre->ack);
-
- skb_pull(skb, pptp_gre_len);
-
- /* check ppp */
- if (unlikely(!mtk_tops_ppp_valid(skb))) {
- ret = false;
- goto restore_pptp_gre;
- }
-
-restore_pptp_gre:
- skb_push(skb, pptp_gre_len);
-
-restore_ip:
- skb_push(skb, ip_len);
-
- return ret;
-}
-
-static void pptp_tnl_param_restore(struct tops_params *old, struct tops_params *new)
-{
- /* dl_call_id is assigned at decap */
- if (old->tunnel.pptp.dl_call_id)
- new->tunnel.pptp.dl_call_id = old->tunnel.pptp.dl_call_id;
-
- if (old->tunnel.pptp.ul_call_id)
- new->tunnel.pptp.ul_call_id = old->tunnel.pptp.ul_call_id;
-
- /* seq_gen_idx is assigned at encap */
- if (old->tunnel.pptp.seq_gen_idx)
- new->tunnel.pptp.seq_gen_idx = old->tunnel.pptp.seq_gen_idx;
-}
-
-static bool pptp_tnl_param_match(struct tops_params *p, struct tops_params *target)
-{
- /*
- * Only ul_call_id is guaranteed to be valid for comparison, dl_call_id
- * may be left empty if no DL traffic had passed yet.
- */
- return p->tunnel.pptp.ul_call_id == target->tunnel.pptp.ul_call_id;
-}
-
-static void pptp_tnl_param_dump(struct seq_file *s, struct tops_params *params)
-{
- struct tops_pptp_params *pptpp = ¶ms->tunnel.pptp;
-
- seq_puts(s, "\tTunnel Type: PPTP ");
- seq_printf(s, "DL Call ID: %05u UL Call ID: %05u SEQ_GEN_IDX: %05u\n",
- ntohs(pptpp->dl_call_id), ntohs(pptpp->ul_call_id),
- pptpp->seq_gen_idx);
-}
-
-static struct tops_tnl_type pptp_type = {
- .type_name = "pptp",
- .cls_entry_setup = pptp_cls_entry_setup,
- .tnl_decap_param_setup = pptp_tnl_decap_param_setup,
- .tnl_encap_param_setup = pptp_tnl_encap_param_setup,
- .tnl_debug_param_setup = pptp_tnl_debug_param_setup,
- .tnl_decap_offloadable = pptp_tnl_decap_offloadable,
- .tnl_param_restore = pptp_tnl_param_restore,
- .tnl_param_match = pptp_tnl_param_match,
- .tnl_param_dump = pptp_tnl_param_dump,
- .tnl_proto_type = TOPS_TUNNEL_PPTP,
- .has_inner_eth = false,
-};
-
-int mtk_tops_pptp_init(void)
-{
- int ret = 0;
-
- ret = mtk_tops_tnl_type_register(&pptp_type);
- if (ret)
- return ret;
-
- mtk_tops_pptp_seq_init();
-
- return ret;
-}
-
-void mtk_tops_pptp_deinit(void)
-{
- mtk_tops_pptp_seq_deinit();
-
- mtk_tops_tnl_type_unregister(&pptp_type);
-}
diff --git a/feed/kernel/tops/src/protocol/tunnel/pptp/pptp_seq.c b/feed/kernel/tops/src/protocol/tunnel/pptp/pptp_seq.c
deleted file mode 100644
index ff61cc8..0000000
--- a/feed/kernel/tops/src/protocol/tunnel/pptp/pptp_seq.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Frank-zj Lin <frank-zj.lin@mediatek.com>
- */
-
-#include <linux/hashtable.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-
-#include <net/pptp.h>
-
-#include "tops/hwspinlock.h"
-#include "tops/seq_gen.h"
-
-#define PPTP_SEQ_HT_BITS 4
-
-struct pptp_seq {
- struct hlist_node hlist;
- int seq_gen_idx;
- uint16_t call_id;
-};
-
-static DEFINE_HASHTABLE(pptp_seq_ht, PPTP_SEQ_HT_BITS);
-static DEFINE_SPINLOCK(pptp_seq_ht_lock);
-
-static struct pptp_seq *mtk_tops_pptp_seq_find_no_lock(uint16_t call_id)
-{
- struct pptp_seq *pptp_seq;
-
- hash_for_each_possible(pptp_seq_ht, pptp_seq, hlist, call_id) {
- if (pptp_seq->call_id == call_id)
- return pptp_seq;
- }
-
- return ERR_PTR(-ENODEV);
-}
-
-static int mtk_tops_pptp_seq_alloc_no_lock(uint16_t call_id, uint32_t seq_start,
- int *seq_gen_idx)
-{
- struct pptp_seq *pptp_seq;
- int ret;
-
- if (!IS_ERR(mtk_tops_pptp_seq_find_no_lock(call_id)))
- return -EBUSY;
-
- ret = mtk_tops_seq_gen_alloc(seq_gen_idx);
- if (ret)
- return ret;
-
- pptp_seq = kzalloc(sizeof(struct pptp_seq), GFP_KERNEL);
- if (!pptp_seq) {
- mtk_tops_seq_gen_free(*seq_gen_idx);
- return -ENOMEM;
- }
-
- pptp_seq->seq_gen_idx = *seq_gen_idx;
- pptp_seq->call_id = call_id;
- hash_add(pptp_seq_ht, &pptp_seq->hlist, pptp_seq->call_id);
-
- mtk_tops_seq_gen_set_32(*seq_gen_idx, seq_start);
-
- return 0;
-}
-
-int mtk_tops_pptp_seq_alloc(uint16_t call_id, uint32_t seq_start,
- int *seq_gen_idx)
-{
- unsigned long flag;
- int ret;
-
- spin_lock_irqsave(&pptp_seq_ht_lock, flag);
-
- ret = mtk_tops_pptp_seq_alloc_no_lock(call_id, seq_start, seq_gen_idx);
-
- spin_unlock_irqrestore(&pptp_seq_ht_lock, flag);
-
- return ret;
-}
-
-static void mtk_tops_pptp_seq_free_no_lock(uint16_t call_id)
-{
- struct pptp_seq *pptp_seq;
-
- pptp_seq = mtk_tops_pptp_seq_find_no_lock(call_id);
- if (IS_ERR(pptp_seq))
- return;
-
- mtk_tops_seq_gen_free(pptp_seq->seq_gen_idx);
- hash_del(&pptp_seq->hlist);
- kfree(pptp_seq);
-}
-
-void mtk_tops_pptp_seq_free(uint16_t call_id)
-{
- unsigned long flag;
-
- spin_lock_irqsave(&pptp_seq_ht_lock, flag);
-
- mtk_tops_pptp_seq_free_no_lock(call_id);
-
- spin_unlock_irqrestore(&pptp_seq_ht_lock, flag);
-}
-
-static int mtk_tops_pptp_seq_next_no_lock(uint16_t call_id, uint32_t *val)
-{
- struct pptp_seq *pptp_seq;
-
- pptp_seq = mtk_tops_pptp_seq_find_no_lock(call_id);
- if (IS_ERR(pptp_seq))
- return -EINVAL;
-
- return mtk_tops_seq_gen_next_32(pptp_seq->seq_gen_idx, val);
-}
-
-static int mtk_tops_pptp_seq_next(uint16_t call_id, uint32_t *val)
-{
- unsigned long flag;
- int ret;
-
- spin_lock_irqsave(&pptp_seq_ht_lock, flag);
-
- mtk_tops_hwspin_lock(HWSPINLOCK_GROUP_CLUST,
- HWSPINLOCK_CLUST_SLOT_PPTP_SEQ);
-
- ret = mtk_tops_pptp_seq_next_no_lock(call_id, val);
-
- mtk_tops_hwspin_unlock(HWSPINLOCK_GROUP_CLUST,
- HWSPINLOCK_CLUST_SLOT_PPTP_SEQ);
-
- spin_unlock_irqrestore(&pptp_seq_ht_lock, flag);
-
- return ret;
-}
-
-static int mtk_tops_pptp_seq_get_seq_gen_idx_no_lock(uint16_t call_id,
- int *seq_gen_idx)
-{
- struct pptp_seq *pptp_seq;
-
- pptp_seq = mtk_tops_pptp_seq_find_no_lock(call_id);
- if (IS_ERR(pptp_seq))
- return -EINVAL;
-
- *seq_gen_idx = pptp_seq->seq_gen_idx;
-
- return 0;
-}
-
-int mtk_tops_pptp_seq_get_seq_gen_idx(uint16_t call_id, int *seq_gen_idx)
-{
- unsigned long flag;
- int ret;
-
- spin_lock_irqsave(&pptp_seq_ht_lock, flag);
-
- ret = mtk_tops_pptp_seq_get_seq_gen_idx_no_lock(call_id, seq_gen_idx);
-
- spin_unlock_irqrestore(&pptp_seq_ht_lock, flag);
-
- return ret;
-}
-
-void mtk_tops_pptp_seq_init(void)
-{
- mtk_pptp_seq_next = mtk_tops_pptp_seq_next;
-}
-
-void mtk_tops_pptp_seq_deinit(void)
-{
- mtk_pptp_seq_next = NULL;
-}
diff --git a/feed/kernel/tops/src/seq_gen.c b/feed/kernel/tops/src/seq_gen.c
deleted file mode 100644
index b9ac9e5..0000000
--- a/feed/kernel/tops/src/seq_gen.c
+++ /dev/null
@@ -1,219 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Frank-zj Lin <frank-zj.lin@mediatek.com>
- */
-
-#include <linux/io.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include "tops/internal.h"
-#include "tops/seq_gen.h"
-
-#define SEQ_GEN_L(idx) (TOPS_SEQ_GEN_BASE + (idx) * 0x20)
-#define SEQ_GEN_H(idx) (TOPS_SEQ_GEN_BASE + (idx) * 0x20 + 0x10)
-
-static void __iomem *base;
-
-static DECLARE_BITMAP(seq_gen_used, TOPS_SEQ_GEN_IDX_MAX);
-static DEFINE_SPINLOCK(seq_gen_used_lock);
-
-static inline u32 seq_gen_read(u32 reg)
-{
- return readl(base + reg);
-}
-
-static inline void seq_gen_write(u32 reg, u32 val)
-{
- writel(val, base + reg);
-}
-
-static inline int seq_gen_read_16(u32 seq_gen_idx, u16 *val)
-{
- if (seq_gen_idx >= TOPS_SEQ_GEN_IDX_MAX)
- return -EINVAL;
-
- *val = (u16)seq_gen_read(SEQ_GEN_L(seq_gen_idx));
-
- return 0;
-}
-
-static inline void seq_gen_write_16(u32 seq_gen_idx, u16 val)
-{
- if (seq_gen_idx >= TOPS_SEQ_GEN_IDX_MAX)
- return;
-
- seq_gen_write(SEQ_GEN_L(seq_gen_idx), (u32)val);
-}
-
-static inline int seq_gen_read_32(u32 seq_gen_idx, u32 *val)
-{
- u32 val_h, val_l;
-
- if (seq_gen_idx >= TOPS_SEQ_GEN_IDX_MAX)
- return -EINVAL;
-
- val_l = seq_gen_read(SEQ_GEN_L(seq_gen_idx));
- val_h = seq_gen_read(SEQ_GEN_H(seq_gen_idx));
-
- if (val_l != 0xFFFF)
- seq_gen_write(SEQ_GEN_H(seq_gen_idx), val_h);
-
- *val = (val_h << 16) | val_l;
-
- return 0;
-}
-
-static inline void seq_gen_write_32(u32 seq_gen_idx, u32 val)
-{
- if (seq_gen_idx >= TOPS_SEQ_GEN_IDX_MAX)
- return;
-
- seq_gen_write(SEQ_GEN_L(seq_gen_idx), (val & 0xFFFF));
- seq_gen_write(SEQ_GEN_H(seq_gen_idx), (val >> 16));
-}
-
-static void mtk_tops_seq_gen_set_16_no_lock(int seq_gen_idx, u16 val)
-{
- if (unlikely(!test_bit(seq_gen_idx, seq_gen_used)))
- return;
-
- seq_gen_write_16(seq_gen_idx, val);
-}
-
-static void mtk_tops_seq_gen_set_32_no_lock(int seq_gen_idx, u32 val)
-{
- if (unlikely(!test_bit(seq_gen_idx, seq_gen_used)))
- return;
-
- seq_gen_write_32(seq_gen_idx, val);
-}
-
-static int mtk_tops_seq_gen_next_16_no_lock(int seq_gen_idx, u16 *val)
-{
- if (unlikely(!val || !test_bit(seq_gen_idx, seq_gen_used)))
- return -EINVAL;
-
- return seq_gen_read_16(seq_gen_idx, val);
-}
-
-static int mtk_tops_seq_gen_next_32_no_lock(int seq_gen_idx, u32 *val)
-{
- if (unlikely(!val || !test_bit(seq_gen_idx, seq_gen_used)))
- return -EINVAL;
-
- return seq_gen_read_32(seq_gen_idx, val);
-}
-
-void mtk_tops_seq_gen_set_16(int seq_gen_idx, u16 val)
-{
- unsigned long flag;
-
- spin_lock_irqsave(&seq_gen_used_lock, flag);
-
- mtk_tops_seq_gen_set_16_no_lock(seq_gen_idx, val);
-
- spin_unlock_irqrestore(&seq_gen_used_lock, flag);
-}
-
-int mtk_tops_seq_gen_next_16(int seq_gen_idx, u16 *val)
-{
- unsigned long flag;
- int ret;
-
- spin_lock_irqsave(&seq_gen_used_lock, flag);
-
- ret = mtk_tops_seq_gen_next_16_no_lock(seq_gen_idx, val);
-
- spin_unlock_irqrestore(&seq_gen_used_lock, flag);
-
- return ret;
-}
-
-void mtk_tops_seq_gen_set_32(int seq_gen_idx, u32 val)
-{
- unsigned long flag;
-
- spin_lock_irqsave(&seq_gen_used_lock, flag);
-
- mtk_tops_seq_gen_set_32_no_lock(seq_gen_idx, val);
-
- spin_unlock_irqrestore(&seq_gen_used_lock, flag);
-}
-
-int mtk_tops_seq_gen_next_32(int seq_gen_idx, u32 *val)
-{
- unsigned long flag;
- int ret;
-
- spin_lock_irqsave(&seq_gen_used_lock, flag);
-
- ret = mtk_tops_seq_gen_next_32_no_lock(seq_gen_idx, val);
-
- spin_unlock_irqrestore(&seq_gen_used_lock, flag);
-
- return ret;
-}
-
-static int mtk_tops_seq_gen_alloc_no_lock(int *seq_gen_idx)
-{
- if (!seq_gen_idx)
- return -EINVAL;
-
- *seq_gen_idx = find_first_zero_bit(seq_gen_used, TOPS_SEQ_GEN_IDX_MAX);
- if (*seq_gen_idx == TOPS_SEQ_GEN_IDX_MAX) {
- TOPS_NOTICE("Sequence generator exhausted\n");
- return -ENOMEM;
- }
-
- set_bit(*seq_gen_idx, seq_gen_used);
-
- return 0;
-}
-
-int mtk_tops_seq_gen_alloc(int *seq_gen_idx)
-{
- unsigned long flag;
- int ret;
-
- spin_lock_irqsave(&seq_gen_used_lock, flag);
-
- ret = mtk_tops_seq_gen_alloc_no_lock(seq_gen_idx);
-
- spin_unlock_irqrestore(&seq_gen_used_lock, flag);
-
- return ret;
-}
-
-static void mtk_tops_seq_gen_free_no_lock(int seq_gen_idx)
-{
- clear_bit(seq_gen_idx, seq_gen_used);
-}
-
-void mtk_tops_seq_gen_free(int seq_gen_idx)
-{
- unsigned long flag = 0;
-
- spin_lock_irqsave(&seq_gen_used_lock, flag);
-
- mtk_tops_seq_gen_free_no_lock(seq_gen_idx);
-
- spin_unlock_irqrestore(&seq_gen_used_lock, flag);
-}
-
-int mtk_tops_seq_gen_init(struct platform_device *pdev)
-{
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
- if (!res)
- return -ENXIO;
-
- base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!base)
- return -ENOMEM;
-
- return 0;
-}
diff --git a/feed/kernel/tops/src/ser.c b/feed/kernel/tops/src/ser.c
deleted file mode 100644
index 0a0fa6c..0000000
--- a/feed/kernel/tops/src/ser.c
+++ /dev/null
@@ -1,158 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>
- */
-
-#include <linux/err.h>
-#include <linux/workqueue.h>
-#include <linux/netdevice.h>
-#include <linux/rtnetlink.h>
-
-#include "tops/internal.h"
-#include "tops/net-event.h"
-#include "tops/ser.h"
-#include "tops/trm.h"
-
-struct tops_ser {
- struct work_struct work;
- struct tops_ser_params ser_params;
- spinlock_t params_lock;
-};
-
-struct tops_ser tops_ser;
-
-static inline void __mtk_tops_ser_cmd_clear(void)
-{
- memset(&tops_ser.ser_params, 0, sizeof(struct tops_ser_params));
- tops_ser.ser_params.type = __TOPS_SER_TYPE_MAX;
-}
-
-static inline void mtk_tops_ser_cmd_clear(void)
-{
- unsigned long flag;
-
- spin_lock_irqsave(&tops_ser.params_lock, flag);
-
- __mtk_tops_ser_cmd_clear();
-
- spin_unlock_irqrestore(&tops_ser.params_lock, flag);
-}
-
-static void mtk_tops_ser_setup_mcmd(struct tops_ser_params *ser_params,
- struct mcu_ctrl_cmd *mcmd)
-{
- memset(mcmd, 0, sizeof(struct mcu_ctrl_cmd));
-
- switch (ser_params->type) {
- case TOPS_SER_NETSYS_FE_RST:
- mcmd->e = MCU_EVENT_TYPE_FE_RESET;
- break;
- case TOPS_SER_WDT_TO:
- mcmd->e = MCU_EVENT_TYPE_WDT_TIMEOUT;
- break;
- default:
- TOPS_ERR("unsupport TOPS SER type: %u\n", ser_params->type);
- return;
- }
-
- if (ser_params->ser_mcmd_setup)
- ser_params->ser_mcmd_setup(ser_params, mcmd);
-}
-
-static void mtk_tops_ser_reset_callback(void *params)
-{
- struct tops_ser_params *ser_params = params;
-
- if (ser_params->ser_callback)
- ser_params->ser_callback(ser_params);
-}
-
-static void mtk_tops_ser_work(struct work_struct *work)
-{
- struct tops_ser_params ser_params;
- struct mcu_ctrl_cmd mcmd;
- unsigned long flag = 0;
-
- spin_lock_irqsave(&tops_ser.params_lock, flag);
-
- while (tops_ser.ser_params.type != __TOPS_SER_TYPE_MAX) {
- memcpy(&ser_params,
- &tops_ser.ser_params,
- sizeof(struct tops_ser_params));
-
- spin_unlock_irqrestore(&tops_ser.params_lock, flag);
-
- mtk_tops_ser_setup_mcmd(&ser_params, &mcmd);
-
- if (mtk_tops_mcu_reset(&mcmd,
- mtk_tops_ser_reset_callback,
- &ser_params)) {
- TOPS_INFO("SER type: %u failed to recover\n",
- ser_params.type);
- /*
- * TODO: check is OK to directly return
- * since mcu state machine should handle
- * state transition failed?
- */
- mtk_tops_ser_cmd_clear();
- return;
- }
-
- TOPS_INFO("SER type: %u successfully recovered\n", ser_params.type);
-
- spin_lock_irqsave(&tops_ser.params_lock, flag);
- /*
- * If there isn't queued any other SER cmd that has higher priority
- * than current SER command, clear SER command and exit.
- * Otherwise let the work perform reset again for high priority SER.
- */
- if (tops_ser.ser_params.type > ser_params.type
- || !memcmp(&tops_ser.ser_params, &ser_params,
- sizeof(struct tops_ser_params)))
- __mtk_tops_ser_cmd_clear();
- }
-
- spin_unlock_irqrestore(&tops_ser.params_lock, flag);
-}
-
-int mtk_tops_ser(struct tops_ser_params *ser_params)
-{
- unsigned long flag;
-
- if (!ser_params)
- return -EINVAL;
-
- spin_lock_irqsave(&tops_ser.params_lock, flag);
-
- /* higher SER type should not override lower SER type */
- if (tops_ser.ser_params.type != __TOPS_SER_TYPE_MAX
- && tops_ser.ser_params.type < ser_params->type)
- goto unlock;
-
- memcpy(&tops_ser.ser_params, ser_params, sizeof(*ser_params));
-
- schedule_work(&tops_ser.work);
-
-unlock:
- spin_unlock_irqrestore(&tops_ser.params_lock, flag);
-
- return 0;
-}
-
-int mtk_tops_ser_init(struct platform_device *pdev)
-{
- INIT_WORK(&tops_ser.work, mtk_tops_ser_work);
-
- spin_lock_init(&tops_ser.params_lock);
-
- tops_ser.ser_params.type = __TOPS_SER_TYPE_MAX;
-
- return 0;
-}
-
-int mtk_tops_ser_deinit(struct platform_device *pdev)
-{
- return 0;
-}
diff --git a/feed/kernel/tops/src/tdma.c b/feed/kernel/tops/src/tdma.c
deleted file mode 100644
index 7eff39f..0000000
--- a/feed/kernel/tops/src/tdma.c
+++ /dev/null
@@ -1,476 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-
-#include "tops/internal.h"
-#include "tops/mbox.h"
-#include "tops/mcu.h"
-#include "tops/tdma.h"
-#include "tops/tops.h"
-#include "tops/trm.h"
-
-/* TDMA dump length */
-#define TDMA_BASE_LEN (0x400)
-
-static int tdma_trm_hw_dump(void *dst, u32 start_addr, u32 len);
-
-struct tdma_hw {
- void __iomem *base;
- u32 start_ring;
-
- struct mailbox_dev mgmt_mdev;
- struct mailbox_dev offload_mdev[CORE_OFFLOAD_NUM];
-};
-
-struct tdma_hw tdma = {
- .mgmt_mdev = MBOX_SEND_MGMT_DEV(NET),
- .offload_mdev = {
- [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, NET),
- [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, NET),
- [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, NET),
- [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, NET),
- },
-};
-
-static inline void tdma_write(u32 reg, u32 val)
-{
- writel(val, tdma.base + reg);
-}
-
-static inline void tdma_set(u32 reg, u32 mask)
-{
- setbits(tdma.base + reg, mask);
-}
-
-static inline void tdma_clr(u32 reg, u32 mask)
-{
- clrbits(tdma.base + reg, mask);
-}
-
-static inline void tdma_rmw(u32 reg, u32 mask, u32 val)
-{
- clrsetbits(tdma.base + reg, mask, val);
-}
-
-static inline u32 tdma_read(u32 reg)
-{
- return readl(tdma.base + reg);
-}
-
-static struct trm_config tdma_trm_configs[] = {
- {
- TRM_CFG_EN("netsys-tdma",
- TDMA_BASE, TDMA_BASE_LEN,
- 0x0, TDMA_BASE_LEN,
- 0)
- },
-};
-
-static struct trm_hw_config tdma_trm_hw_cfg = {
- .trm_cfgs = tdma_trm_configs,
- .cfg_len = ARRAY_SIZE(tdma_trm_configs),
- .trm_hw_dump = tdma_trm_hw_dump,
-};
-
-static int tdma_trm_hw_dump(void *dst, u32 start_addr, u32 len)
-{
- u32 ofs;
-
- if (unlikely(!dst))
- return -ENODEV;
-
- for (ofs = 0; len > 0; len -= 0x4, ofs += 0x4)
- writel(tdma_read(start_addr + ofs), dst + ofs);
-
- return 0;
-}
-
-static inline void tdma_prefetch_enable(bool en)
-{
- if (en) {
- tdma_set(TDMA_PREF_TX_CFG, PREF_EN);
- tdma_set(TDMA_PREF_RX_CFG, PREF_EN);
- } else {
- /* wait for prefetch idle */
- while ((tdma_read(TDMA_PREF_TX_CFG) & PREF_BUSY)
- || (tdma_read(TDMA_PREF_RX_CFG) & PREF_BUSY))
- ;
-
- tdma_write(TDMA_PREF_TX_CFG,
- tdma_read(TDMA_PREF_TX_CFG) & (~PREF_EN));
- tdma_write(TDMA_PREF_RX_CFG,
- tdma_read(TDMA_PREF_RX_CFG) & (~PREF_EN));
- }
-}
-
-static inline void tdma_writeback_enable(bool en)
-{
- if (en) {
- tdma_set(TDMA_WRBK_TX_CFG, WRBK_EN);
- tdma_set(TDMA_WRBK_RX_CFG, WRBK_EN);
- } else {
- /* wait for write back idle */
- while ((tdma_read(TDMA_WRBK_TX_CFG) & WRBK_BUSY)
- || (tdma_read(TDMA_WRBK_RX_CFG) & WRBK_BUSY))
- ;
-
- tdma_write(TDMA_WRBK_TX_CFG,
- tdma_read(TDMA_WRBK_TX_CFG) & (~WRBK_EN));
- tdma_write(TDMA_WRBK_RX_CFG,
- tdma_read(TDMA_WRBK_RX_CFG) & (~WRBK_EN));
- }
-}
-
-static inline void tdma_assert_prefetch_reset(bool en)
-{
- if (en) {
- tdma_set(TDMA_PREF_TX_FIFO_CFG0, PREF_TX_RING0_CLEAR);
- tdma_set(TDMA_PREF_RX_FIFO_CFG0,
- PREF_RX_RINGX_CLEAR(0) | PREF_RX_RINGX_CLEAR(1));
- tdma_set(TDMA_PREF_RX_FIFO_CFG1,
- PREF_RX_RINGX_CLEAR(2) | PREF_RX_RINGX_CLEAR(3));
- } else {
- tdma_clr(TDMA_PREF_TX_FIFO_CFG0, PREF_TX_RING0_CLEAR);
- tdma_clr(TDMA_PREF_RX_FIFO_CFG0,
- PREF_RX_RINGX_CLEAR(0) | PREF_RX_RINGX_CLEAR(1));
- tdma_clr(TDMA_PREF_RX_FIFO_CFG1,
- PREF_RX_RINGX_CLEAR(2) | PREF_RX_RINGX_CLEAR(3));
- }
-}
-
-static inline void tdma_assert_fifo_reset(bool en)
-{
- if (en) {
- tdma_set(TDMA_TX_XDMA_FIFO_CFG0,
- (PAR_FIFO_CLEAR
- | CMD_FIFO_CLEAR
- | DMAD_FIFO_CLEAR
- | ARR_FIFO_CLEAR));
- tdma_set(TDMA_RX_XDMA_FIFO_CFG0,
- (PAR_FIFO_CLEAR
- | CMD_FIFO_CLEAR
- | DMAD_FIFO_CLEAR
- | ARR_FIFO_CLEAR
- | LEN_FIFO_CLEAR
- | WID_FIFO_CLEAR
- | BID_FIFO_CLEAR));
- } else {
- tdma_clr(TDMA_TX_XDMA_FIFO_CFG0,
- (PAR_FIFO_CLEAR
- | CMD_FIFO_CLEAR
- | DMAD_FIFO_CLEAR
- | ARR_FIFO_CLEAR));
- tdma_clr(TDMA_RX_XDMA_FIFO_CFG0,
- (PAR_FIFO_CLEAR
- | CMD_FIFO_CLEAR
- | DMAD_FIFO_CLEAR
- | ARR_FIFO_CLEAR
- | LEN_FIFO_CLEAR
- | WID_FIFO_CLEAR
- | BID_FIFO_CLEAR));
- }
-}
-
-static inline void tdma_assert_writeback_reset(bool en)
-{
- if (en) {
- tdma_set(TDMA_WRBK_TX_FIFO_CFG0, WRBK_RING_CLEAR);
- tdma_set(TDMA_WRBK_RX_FIFO_CFGX(0), WRBK_RING_CLEAR);
- tdma_set(TDMA_WRBK_RX_FIFO_CFGX(1), WRBK_RING_CLEAR);
- tdma_set(TDMA_WRBK_RX_FIFO_CFGX(2), WRBK_RING_CLEAR);
- tdma_set(TDMA_WRBK_RX_FIFO_CFGX(3), WRBK_RING_CLEAR);
- } else {
- tdma_clr(TDMA_WRBK_TX_FIFO_CFG0, WRBK_RING_CLEAR);
- tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(0), WRBK_RING_CLEAR);
- tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(1), WRBK_RING_CLEAR);
- tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(2), WRBK_RING_CLEAR);
- tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(3), WRBK_RING_CLEAR);
- }
-}
-
-static inline void tdma_assert_prefetch_ring_reset(bool en)
-{
- if (en) {
- tdma_set(TDMA_PREF_SIDX_CFG,
- (TX_RING0_SIDX_CLR
- | RX_RINGX_SIDX_CLR(0)
- | RX_RINGX_SIDX_CLR(1)
- | RX_RINGX_SIDX_CLR(2)
- | RX_RINGX_SIDX_CLR(3)));
- } else {
- tdma_clr(TDMA_PREF_SIDX_CFG,
- (TX_RING0_SIDX_CLR
- | RX_RINGX_SIDX_CLR(0)
- | RX_RINGX_SIDX_CLR(1)
- | RX_RINGX_SIDX_CLR(2)
- | RX_RINGX_SIDX_CLR(3)));
- }
-}
-
-static inline void tdma_assert_writeback_ring_reset(bool en)
-{
- if (en) {
- tdma_set(TDMA_WRBK_SIDX_CFG,
- (TX_RING0_SIDX_CLR
- | RX_RINGX_SIDX_CLR(0)
- | RX_RINGX_SIDX_CLR(1)
- | RX_RINGX_SIDX_CLR(2)
- | RX_RINGX_SIDX_CLR(3)));
- } else {
- tdma_clr(TDMA_WRBK_SIDX_CFG,
- (TX_RING0_SIDX_CLR
- | RX_RINGX_SIDX_CLR(0)
- | RX_RINGX_SIDX_CLR(1)
- | RX_RINGX_SIDX_CLR(2)
- | RX_RINGX_SIDX_CLR(3)));
- }
-}
-
-static void mtk_tops_tdma_retrieve_last_state(void)
-{
- tdma.start_ring = tdma_read(TDMA_TX_CTX_IDX_0);
-}
-
-void mtk_tops_tdma_record_last_state(void)
-{
- tdma_write(TDMA_TX_CTX_IDX_0, tdma.start_ring);
-}
-
-static void tdma_get_next_rx_ring(void)
-{
- u32 pkt_num_per_core = tdma_read(TDMA_RX_MAX_CNT_X(0));
- u32 ring[TDMA_RING_NUM] = {0};
- u32 start = 0;
- u32 tmp_idx;
- u32 i;
-
- for (i = 0; i < TDMA_RING_NUM; i++) {
- tmp_idx = (tdma.start_ring + i) % TDMA_RING_NUM;
- ring[i] = tdma_read(TDMA_RX_DRX_IDX_X(tmp_idx));
- }
-
- for (i = 1; i < TDMA_RING_NUM; i++) {
- if (ring[i] >= (pkt_num_per_core - 1) && !ring[i - 1])
- ring[i - 1] += pkt_num_per_core;
-
- if (!ring[i] && ring[i - 1] >= (pkt_num_per_core - 1))
- ring[i] = pkt_num_per_core;
-
- if (ring[i] < ring[i - 1])
- start = i;
- }
-
- tdma.start_ring = (tdma.start_ring + start) & TDMA_RING_NUM_MOD;
-}
-
-void mtk_tops_tdma_reset(void)
-{
- if (!mtk_tops_mcu_netsys_fe_rst())
- /* get next start Rx ring if TDMA reset without NETSYS FE reset */
- tdma_get_next_rx_ring();
- else
- /*
- * NETSYS FE reset will restart CDM ring index
- * so we don't need to calculate next ring index
- */
- tdma.start_ring = 0;
-
- /* then start reset TDMA */
- tdma_assert_prefetch_reset(true);
- tdma_assert_prefetch_reset(false);
-
- tdma_assert_fifo_reset(true);
- tdma_assert_fifo_reset(false);
-
- tdma_assert_writeback_reset(true);
- tdma_assert_writeback_reset(false);
-
- /* reset tdma ring */
- tdma_set(TDMA_RST_IDX,
- (RST_DTX_IDX_0
- | RST_DRX_IDX_X(0)
- | RST_DRX_IDX_X(1)
- | RST_DRX_IDX_X(2)
- | RST_DRX_IDX_X(3)));
-
- tdma_assert_prefetch_ring_reset(true);
- tdma_assert_prefetch_ring_reset(false);
-
- tdma_assert_writeback_ring_reset(true);
- tdma_assert_writeback_ring_reset(false);
-
- /* TODO: should we reset Tx/Rx CPU ring index? */
-}
-
-int mtk_tops_tdma_enable(void)
-{
- struct mailbox_msg msg = {
- .msg1 = TOPS_NET_CMD_START,
- .msg2 = tdma.start_ring,
- };
- int ret;
- u32 i;
-
- tdma_prefetch_enable(true);
-
- tdma_set(TDMA_GLO_CFG0, RX_DMA_EN | TX_DMA_EN);
-
- tdma_writeback_enable(true);
-
- /* notify TOPS start network processing */
- ret = mbox_send_msg_no_wait(&tdma.mgmt_mdev, &msg);
- if (unlikely(ret))
- return ret;
-
- for (i = CORE_OFFLOAD_0; i < CORE_OFFLOAD_NUM; i++) {
- ret = mbox_send_msg_no_wait(&tdma.offload_mdev[i], &msg);
- if (unlikely(ret))
- return ret;
- }
-
- return ret;
-}
-
-void mtk_tops_tdma_disable(void)
-{
- struct mailbox_msg msg = {
- .msg1 = TOPS_NET_CMD_STOP,
- };
- u32 i;
-
- if (mtk_tops_mcu_bring_up_done()) {
- /* notify TOPS stop network processing */
- if (unlikely(mbox_send_msg_no_wait(&tdma.mgmt_mdev, &msg)))
- return;
-
- for (i = CORE_OFFLOAD_0; i < CORE_OFFLOAD_NUM; i++) {
- if (unlikely(mbox_send_msg_no_wait(&tdma.offload_mdev[i],
- &msg)))
- return;
- }
- }
-
- tdma_prefetch_enable(false);
-
- /* There is no need to wait for Tx/Rx idle before we stop Tx/Rx */
- if (!mtk_tops_mcu_netsys_fe_rst())
- while (tdma_read(TDMA_GLO_CFG0) & RX_DMA_BUSY)
- ;
- tdma_write(TDMA_GLO_CFG0, tdma_read(TDMA_GLO_CFG0) & (~RX_DMA_EN));
-
- if (!mtk_tops_mcu_netsys_fe_rst())
- while (tdma_read(TDMA_GLO_CFG0) & TX_DMA_BUSY)
- ;
- tdma_write(TDMA_GLO_CFG0, tdma_read(TDMA_GLO_CFG0) & (~TX_DMA_EN));
-
- tdma_writeback_enable(false);
-}
-
-static int mtk_tops_tdma_register_mbox(void)
-{
- int ret;
- int i;
-
- ret = register_mbox_dev(MBOX_SEND, &tdma.mgmt_mdev);
- if (ret) {
- TOPS_ERR("register tdma mgmt mbox send failed: %d\n", ret);
- return ret;
- }
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- ret = register_mbox_dev(MBOX_SEND, &tdma.offload_mdev[i]);
- if (ret) {
- TOPS_ERR("register tdma offload %d mbox send failed: %d\n",
- i, ret);
- goto err_unregister_offload_mbox;
- }
- }
-
- return ret;
-
-err_unregister_offload_mbox:
- for (i -= 1; i >= 0; i--)
- unregister_mbox_dev(MBOX_SEND, &tdma.offload_mdev[i]);
-
- unregister_mbox_dev(MBOX_SEND, &tdma.mgmt_mdev);
-
- return ret;
-}
-
-static void mtk_tops_tdma_unregister_mbox(void)
-{
- int i;
-
- unregister_mbox_dev(MBOX_SEND, &tdma.mgmt_mdev);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++)
- unregister_mbox_dev(MBOX_SEND, &tdma.offload_mdev[i]);
-}
-
-static int mtk_tops_tdma_dts_init(struct platform_device *pdev)
-{
- struct device_node *fe_mem = NULL;
- struct resource res;
- int ret = 0;
-
- fe_mem = of_parse_phandle(pdev->dev.of_node, "fe_mem", 0);
- if (!fe_mem) {
- TOPS_ERR("can not find fe_mem node\n");
- return -ENODEV;
- }
-
- if (of_address_to_resource(fe_mem, 0, &res))
- return -ENXIO;
-
- /* map FE address */
- tdma.base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
- if (!tdma.base)
- return -ENOMEM;
-
- /* shift FE address to TDMA base */
- tdma.base += TDMA_BASE;
-
- of_node_put(fe_mem);
-
- return ret;
-}
-
-int mtk_tops_tdma_init(struct platform_device *pdev)
-{
- int ret = 0;
-
- ret = mtk_tops_tdma_register_mbox();
- if (ret)
- return ret;
-
- ret = mtk_tops_tdma_dts_init(pdev);
- if (ret)
- return ret;
-
- ret = mtk_trm_hw_config_register(TRM_TDMA, &tdma_trm_hw_cfg);
- if (ret)
- return ret;
-
- mtk_tops_tdma_retrieve_last_state();
-
- return ret;
-}
-
-void mtk_tops_tdma_deinit(struct platform_device *pdev)
-{
- mtk_trm_hw_config_unregister(TRM_TDMA, &tdma_trm_hw_cfg);
-
- mtk_tops_tdma_unregister_mbox();
-}
diff --git a/feed/kernel/tops/src/tnl_offload.c b/feed/kernel/tops/src/tnl_offload.c
deleted file mode 100644
index bc3057d..0000000
--- a/feed/kernel/tops/src/tnl_offload.c
+++ /dev/null
@@ -1,1788 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/completion.h>
-#include <linux/device.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/hashtable.h>
-#include <linux/if_ether.h>
-#include <linux/ip.h>
-#include <linux/kthread.h>
-#include <linux/list.h>
-#include <linux/lockdep.h>
-#include <linux/string.h>
-
-#include <mtk_eth_soc.h>
-#include <mtk_hnat/hnat.h>
-#include <mtk_hnat/nf_hnat_mtk.h>
-
-#include <pce/cdrt.h>
-#include <pce/cls.h>
-#include <pce/dipfilter.h>
-#include <pce/netsys.h>
-#include <pce/pce.h>
-
-#include "tops/internal.h"
-#include "tops/mbox.h"
-#include "tops/mcu.h"
-#include "tops/netsys.h"
-#include "tops/protocol/tunnel/gre/gretap.h"
-#include "tops/protocol/tunnel/l2tp/l2tpv2.h"
-#include "tops/protocol/tunnel/pptp/pptp.h"
-#include "tops/tunnel.h"
-
-#define TOPS_PPE_ENTRY_BUCKETS (64)
-#define TOPS_PPE_ENTRY_BUCKETS_BIT (6)
-
-struct tops_tnl {
- /* tunnel types */
- struct tops_tnl_type *offload_tnl_types[__TOPS_TUNNEL_TYPE_MAX];
- u32 offload_tnl_type_num;
- u32 tnl_base_addr;
-
- /* tunnel table */
- DECLARE_HASHTABLE(ht, CONFIG_TOPS_TNL_MAP_BIT);
- DECLARE_BITMAP(tnl_used, CONFIG_TOPS_TNL_NUM);
- wait_queue_head_t tnl_sync_wait;
- spinlock_t tnl_sync_lock;
- spinlock_t tbl_lock;
- bool has_tnl_to_sync;
- struct task_struct *tnl_sync_thread;
- struct list_head *tnl_sync_pending;
- struct list_head *tnl_sync_submit;
- struct tops_tnl_info *tnl_infos;
-
- /* dma request */
- struct completion dma_done;
- struct dma_chan *dmachan;
-
- struct device *dev;
-};
-
-static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
- struct mailbox_msg *msg);
-
-static struct tops_tnl tops_tnl;
-
-static LIST_HEAD(tnl_sync_q1);
-static LIST_HEAD(tnl_sync_q2);
-
-struct mailbox_dev tnl_offload_mbox_recv =
- MBOX_RECV_MGMT_DEV(TNL_OFFLOAD, tnl_offload_mbox_cmd_recv);
-
-/* tunnel mailbox communication */
-static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
- struct mailbox_msg *msg)
-{
- switch (msg->msg1) {
- case TOPS_TNL_START_ADDR_SYNC:
- tops_tnl.tnl_base_addr = msg->msg2;
-
- return MBOX_NO_RET_MSG;
- default:
- break;
- }
-
- return MBOX_NO_RET_MSG;
-}
-
-static inline void tnl_flush_ppe_entry(struct foe_entry *entry, u32 tnl_idx)
-{
- u32 bind_tnl_idx;
-
- if (unlikely(!entry))
- return;
-
- switch (entry->bfib1.pkt_type) {
- case IPV4_HNAPT:
- if (entry->ipv4_hnapt.tport_id != NR_TDMA_TPORT
- && entry->ipv4_hnapt.tport_id != NR_TDMA_QDMA_TPORT)
- return;
-
- bind_tnl_idx = entry->ipv4_hnapt.tops_entry - __TOPS_TUNNEL_TYPE_MAX;
-
- break;
- default:
- return;
- }
-
- /* unexpected tunnel index */
- if (bind_tnl_idx >= __TOPS_TUNNEL_TYPE_MAX)
- return;
-
- if (tnl_idx == __TOPS_TUNNEL_TYPE_MAX || tnl_idx == bind_tnl_idx)
- memset(entry, 0, sizeof(*entry));
-}
-
-static inline void skb_set_tops_tnl_idx(struct sk_buff *skb, u32 tnl_idx)
-{
- skb_hnat_tops(skb) = tnl_idx + __TOPS_TUNNEL_TYPE_MAX;
-}
-
-static inline bool skb_tops_valid(struct sk_buff *skb)
-{
- return (skb && skb_hnat_tops(skb) < __TOPS_TUNNEL_TYPE_MAX);
-}
-
-static inline struct tops_tnl_type *skb_to_tnl_type(struct sk_buff *skb)
-{
- enum tops_tunnel_type tnl_proto_type = skb_hnat_tops(skb);
- struct tops_tnl_type *tnl_type;
-
- if (unlikely(!tnl_proto_type || tnl_proto_type >= __TOPS_TUNNEL_TYPE_MAX))
- return ERR_PTR(-EINVAL);
-
- tnl_type = tops_tnl.offload_tnl_types[tnl_proto_type];
-
- return tnl_type ? tnl_type : ERR_PTR(-ENODEV);
-}
-
-static inline struct tops_tnl_info *skb_to_tnl_info(struct sk_buff *skb)
-{
- u32 tnl_idx = skb_hnat_tops(skb) - __TOPS_TUNNEL_TYPE_MAX;
-
- if (tnl_idx >= CONFIG_TOPS_TNL_NUM)
- return ERR_PTR(-EINVAL);
-
- if (!test_bit(tnl_idx, tops_tnl.tnl_used))
- return ERR_PTR(-EACCES);
-
- return &tops_tnl.tnl_infos[tnl_idx];
-}
-
-static inline void skb_mark_unbind(struct sk_buff *skb)
-{
- skb_hnat_tops(skb) = 0;
- skb_hnat_is_decap(skb) = 0;
- skb_hnat_alg(skb) = 1;
-}
-
-static inline u32 tnl_params_hash(struct tops_tnl_params *tnl_params)
-{
- if (!tnl_params)
- return 0;
-
- /* TODO: check collision possibility? */
- return (tnl_params->params.network.ip.sip ^ tnl_params->params.network.ip.dip);
-}
-
-static inline bool tnl_info_decap_is_enable(struct tops_tnl_info *tnl_info)
-{
- return tnl_info->cache.flag & TNL_DECAP_ENABLE;
-}
-
-static inline void tnl_info_decap_enable(struct tops_tnl_info *tnl_info)
-{
- tnl_info->cache.flag |= TNL_DECAP_ENABLE;
-}
-
-static inline void tnl_info_decap_disable(struct tops_tnl_info *tnl_info)
-{
- tnl_info->cache.flag &= ~(TNL_DECAP_ENABLE);
-}
-
-static inline bool tnl_info_encap_is_enable(struct tops_tnl_info *tnl_info)
-{
- return tnl_info->cache.flag & TNL_ENCAP_ENABLE;
-}
-
-static inline void tnl_info_encap_enable(struct tops_tnl_info *tnl_info)
-{
- tnl_info->cache.flag |= TNL_ENCAP_ENABLE;
-}
-
-static inline void tnl_info_encap_disable(struct tops_tnl_info *tnl_info)
-{
- tnl_info->cache.flag &= ~(TNL_ENCAP_ENABLE);
-}
-
-static inline void tnl_info_sta_updated_no_tnl_lock(struct tops_tnl_info *tnl_info)
-{
- tnl_info->status &= (~TNL_STA_UPDATING);
- tnl_info->status &= (~TNL_STA_INIT);
- tnl_info->status |= TNL_STA_UPDATED;
-}
-
-static inline void tnl_info_sta_updated(struct tops_tnl_info *tnl_info)
-{
- unsigned long flag = 0;
-
- if (unlikely(!tnl_info))
- return;
-
- spin_lock_irqsave(&tnl_info->lock, flag);
-
- tnl_info_sta_updated_no_tnl_lock(tnl_info);
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-}
-
-static inline bool tnl_info_sta_is_updated(struct tops_tnl_info *tnl_info)
-{
- return tnl_info->status & TNL_STA_UPDATED;
-}
-
-static inline void tnl_info_sta_updating_no_tnl_lock(struct tops_tnl_info *tnl_info)
-{
- tnl_info->status |= TNL_STA_UPDATING;
- tnl_info->status &= (~TNL_STA_QUEUED);
- tnl_info->status &= (~TNL_STA_UPDATED);
-}
-
-static inline void tnl_info_sta_updating(struct tops_tnl_info *tnl_info)
-{
- unsigned long flag = 0;
-
- if (unlikely(!tnl_info))
- return;
-
- spin_lock_irqsave(&tnl_info->lock, flag);
-
- tnl_info_sta_updating_no_tnl_lock(tnl_info);
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-}
-
-static inline bool tnl_info_sta_is_updating(struct tops_tnl_info *tnl_info)
-{
- return tnl_info->status & TNL_STA_UPDATING;
-}
-
-static inline void tnl_info_sta_queued_no_tnl_lock(struct tops_tnl_info *tnl_info)
-{
- tnl_info->status |= TNL_STA_QUEUED;
- tnl_info->status &= (~TNL_STA_UPDATED);
-}
-
-static inline void tnl_info_sta_queued(struct tops_tnl_info *tnl_info)
-{
- unsigned long flag = 0;
-
- if (unlikely(!tnl_info))
- return;
-
- spin_lock_irqsave(&tnl_info->lock, flag);
-
- tnl_info_sta_queued_no_tnl_lock(tnl_info);
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-}
-
-static inline bool tnl_info_sta_is_queued(struct tops_tnl_info *tnl_info)
-{
- return tnl_info->status & TNL_STA_QUEUED;
-}
-
-static inline void tnl_info_sta_init_no_tnl_lock(struct tops_tnl_info *tnl_info)
-{
- tnl_info->status = TNL_STA_INIT;
-}
-
-static inline void tnl_info_sta_init(struct tops_tnl_info *tnl_info)
-{
- unsigned long flag = 0;
-
- if (unlikely(!tnl_info))
- return;
-
- spin_lock_irqsave(&tnl_info->lock, flag);
-
- tnl_info_sta_init_no_tnl_lock(tnl_info);
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-}
-
-static inline bool tnl_info_sta_is_init(struct tops_tnl_info *tnl_info)
-{
- return tnl_info->status & TNL_STA_INIT;
-}
-
-static inline void tnl_info_sta_uninit_no_tnl_lock(struct tops_tnl_info *tnl_info)
-{
- tnl_info->status = TNL_STA_UNINIT;
-}
-
-static inline void tnl_info_sta_uninit(struct tops_tnl_info *tnl_info)
-{
- unsigned long flag = 0;
-
- if (unlikely(!tnl_info))
- return;
-
- spin_lock_irqsave(&tnl_info->lock, flag);
-
- tnl_info_sta_uninit_no_tnl_lock(tnl_info);
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-}
-
-static inline bool tnl_info_sta_is_uninit(struct tops_tnl_info *tnl_info)
-{
- return tnl_info->status & TNL_STA_UNINIT;
-}
-
-static inline void tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
-{
- unsigned long flag = 0;
-
- spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
-
- list_add_tail(&tnl_info->sync_node, tops_tnl.tnl_sync_submit);
-
- tops_tnl.has_tnl_to_sync = true;
-
- spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
-
- if (mtk_tops_mcu_alive())
- wake_up_interruptible(&tops_tnl.tnl_sync_wait);
-}
-
-static void mtk_tops_tnl_info_cls_update_idx(struct tops_tnl_info *tnl_info)
-{
- unsigned long flag;
-
- tnl_info->tnl_params.cls_entry = tnl_info->tcls->cls->idx;
-
- spin_lock_irqsave(&tnl_info->lock, flag);
- tnl_info->cache.cls_entry = tnl_info->tcls->cls->idx;
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-}
-
-static void mtk_tops_tnl_info_cls_entry_unprepare(struct tops_tnl_info *tnl_info,
- struct tops_tnl_params *tnl_params)
-{
- struct tops_cls_entry *tcls = tnl_info->tcls;
-
- tnl_info->tcls = NULL;
-
- if (refcount_dec_and_test(&tcls->refcnt)) {
- list_del(&tcls->node);
-
- if (!tnl_params->cdrt)
- memset(&tcls->cls->cdesc, 0, sizeof(tcls->cls->cdesc));
- else
- /*
- * recover tport_ix to let match packets to
- * go through EIP197 only
- */
- CLS_DESC_DATA(&tcls->cls->cdesc, tport_idx, 2);
-
- mtk_pce_cls_entry_write(tcls->cls);
-
- mtk_pce_cls_entry_free(tcls->cls);
-
- devm_kfree(tops_dev, tcls);
- }
-}
-
-static struct tops_cls_entry *
-mtk_tops_tnl_info_cls_entry_prepare(struct tops_tnl_info *tnl_info,
- struct tops_tnl_params *tnl_params)
-{
- struct tops_cls_entry *tcls;
- int ret;
-
- tcls = devm_kzalloc(tops_dev, sizeof(struct tops_cls_entry), GFP_KERNEL);
- if (!tcls)
- return ERR_PTR(-ENOMEM);
-
- if (!tnl_params->cdrt) {
- tcls->cls = mtk_pce_cls_entry_alloc();
- if (IS_ERR(tcls->cls)) {
- ret = PTR_ERR(tcls->cls);
- goto free_tcls;
- }
- } else {
- struct cdrt_entry *cdrt = mtk_pce_cdrt_entry_find(tnl_params->cdrt);
-
- if (IS_ERR(cdrt)) {
- ret = PTR_ERR(cdrt);
- goto free_tcls;
- }
- if (unlikely(!cdrt->cls)) {
- ret = -ENODEV;
- goto free_tcls;
- }
-
- tcls->cls = cdrt->cls;
- }
-
- INIT_LIST_HEAD(&tcls->node);
- list_add_tail(&tnl_info->tnl_type->tcls_head, &tcls->node);
-
- tnl_info->tcls = tcls;
- refcount_set(&tcls->refcnt, 1);
-
- return tcls;
-
-free_tcls:
- devm_kfree(tops_dev, tcls);
-
- return ERR_PTR(ret);
-}
-
-static int mtk_tops_tnl_info_cls_entry_write(struct tops_tnl_info *tnl_info)
-{
- int ret;
-
- if (!tnl_info->tcls)
- return -EINVAL;
-
- ret = mtk_pce_cls_entry_write(tnl_info->tcls->cls);
- if (ret)
- return ret;
-
- tnl_info->tcls->updated = true;
-
- mtk_tops_tnl_info_cls_update_idx(tnl_info);
-
- return 0;
-}
-
-static int mtk_tops_tnl_info_cls_tear_down(struct tops_tnl_info *tnl_info,
- struct tops_tnl_params *tnl_params)
-{
- mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
-
- return 0;
-}
-
-/*
- * check cls entry is updated for tunnel protocols that only use 1 CLS HW entry
- *
- * since only tunnel sync task will operate on tcls linked list,
- * it is safe to access without lock
- *
- * return true on updated
- * return false on need update
- */
-static bool mtk_tops_tnl_info_cls_single_is_updated(struct tops_tnl_info *tnl_info,
- struct tops_tnl_type *tnl_type)
-{
- /*
- * check tnl_type has already allocate a tops_cls_entry
- * if not, return false to prepare to allocate a new one
- */
- if (list_empty(&tnl_type->tcls_head))
- return false;
-
- /*
- * if tnl_info is not associate to tnl_type's cls entry,
- * make a reference to tops_cls_entry
- */
- if (!tnl_info->tcls) {
- tnl_info->tcls = list_first_entry(&tnl_type->tcls_head,
- struct tops_cls_entry,
- node);
-
- refcount_inc(&tnl_info->tcls->refcnt);
- mtk_tops_tnl_info_cls_update_idx(tnl_info);
- }
-
- return tnl_info->tcls->updated;
-}
-
-static int mtk_tops_tnl_info_cls_single_setup(struct tops_tnl_info *tnl_info,
- struct tops_tnl_params *tnl_params,
- struct tops_tnl_type *tnl_type)
-{
- struct tops_cls_entry *tcls;
- int ret;
-
- if (mtk_tops_tnl_info_cls_single_is_updated(tnl_info, tnl_type))
- return 0;
-
- if (tnl_info->tcls)
- goto cls_entry_write;
-
- tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info, tnl_params);
- if (IS_ERR(tcls))
- return PTR_ERR(tcls);
-
- if (!tnl_params->cdrt) {
- ret = tnl_type->cls_entry_setup(tnl_info, &tcls->cls->cdesc);
- if (ret) {
- TOPS_ERR("tops cls entry setup failed: %d\n", ret);
- goto cls_entry_unprepare;
- }
- } else {
- /*
- * since CLS is already filled up with outer protocol rule
- * we only update CLS tport here to let matched packet to go through
- * QDMA and specify the destination port to TOPS
- */
- CLS_DESC_DATA(&tcls->cls->cdesc, tport_idx, NR_EIP197_QDMA_TPORT);
- CLS_DESC_DATA(&tcls->cls->cdesc, fport, PSE_PORT_TDMA);
- CLS_DESC_DATA(&tcls->cls->cdesc, qid, 12);
- }
-
-cls_entry_write:
- ret = mtk_tops_tnl_info_cls_entry_write(tnl_info);
-
-cls_entry_unprepare:
- if (ret)
- mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
-
- return ret;
-}
-
-static struct tops_cls_entry *
-mtk_tops_tnl_info_cls_entry_find(struct tops_tnl_type *tnl_type,
- struct cls_desc *cdesc)
-{
- struct tops_cls_entry *tcls;
-
- list_for_each_entry(tcls, &tnl_type->tcls_head, node)
- if (!memcmp(&tcls->cls->cdesc, cdesc, sizeof(struct cls_desc)))
- return tcls;
-
- return NULL;
-}
-
-static bool mtk_tops_tnl_info_cls_multi_is_updated(struct tops_tnl_info *tnl_info,
- struct tops_tnl_type *tnl_type,
- struct cls_desc *cdesc)
-{
- struct tops_cls_entry *tcls;
-
- if (list_empty(&tnl_type->tcls_head))
- return false;
-
- if (tnl_info->tcls) {
- if (!memcmp(cdesc, &tnl_info->tcls->cls->cdesc, sizeof(*cdesc)))
- return tnl_info->tcls->updated;
-
- memcpy(&tnl_info->tcls->cls->cdesc, cdesc, sizeof(*cdesc));
- tnl_info->tcls->updated = false;
- return false;
- }
-
- tcls = mtk_tops_tnl_info_cls_entry_find(tnl_type, cdesc);
- if (!tcls)
- return false;
-
- tnl_info->tcls = tcls;
- refcount_inc(&tnl_info->tcls->refcnt);
- mtk_tops_tnl_info_cls_update_idx(tnl_info);
-
- return tcls->updated;
-}
-
-static int mtk_tops_tnl_info_cls_multi_setup(struct tops_tnl_info *tnl_info,
- struct tops_tnl_params *tnl_params,
- struct tops_tnl_type *tnl_type)
-{
- struct tops_cls_entry *tcls;
- struct cls_desc cdesc;
-
- int ret;
-
- if (!tnl_params->cdrt) {
- memset(&cdesc, 0, sizeof(struct cls_desc));
-
- /* prepare cls_desc from tnl_type */
- ret = tnl_type->cls_entry_setup(tnl_info, &cdesc);
- if (ret) {
- TOPS_ERR("tops cls entry setup failed: %d\n", ret);
- return ret;
- }
- } else {
- struct cdrt_entry *cdrt = mtk_pce_cdrt_entry_find(tnl_params->cdrt);
-
- if (IS_ERR(cdrt)) {
- TOPS_ERR("no cdrt idx: %u related CDRT found\n",
- tnl_params->cdrt);
- return PTR_ERR(cdrt);
- }
-
- memcpy(&cdesc, &cdrt->cls->cdesc, sizeof(struct cls_desc));
-
- CLS_DESC_DATA(&cdesc, tport_idx, 0x7);
- }
-
- /*
- * check cdesc is already updated, if tnl_info is not associate with a
- * tcls but we found a tcls has the same cls desc content as cdesc
- * tnl_info will setup an association with that tcls
- *
- * we only go further to this if condition when
- * a tcls is not yet updated or
- * tnl_info is not yet associated to a tcls
- */
- if (mtk_tops_tnl_info_cls_multi_is_updated(tnl_info, tnl_type, &cdesc))
- return 0;
-
- /* tcls is not yet updated, update this tcls */
- if (tnl_info->tcls)
- return mtk_tops_tnl_info_cls_entry_write(tnl_info);
-
- /* create a new tcls entry and associate with tnl_info */
- tcls = mtk_tops_tnl_info_cls_entry_prepare(tnl_info, tnl_params);
- if (IS_ERR(tcls))
- return PTR_ERR(tcls);
-
- memcpy(&tcls->cls->cdesc, &cdesc, sizeof(struct cls_desc));
-
- ret = mtk_tops_tnl_info_cls_entry_write(tnl_info);
- if (ret)
- mtk_tops_tnl_info_cls_entry_unprepare(tnl_info, tnl_params);
-
- return ret;
-}
-
-static int mtk_tops_tnl_info_cls_setup(struct tops_tnl_info *tnl_info,
- struct tops_tnl_params *tnl_params)
-{
- struct tops_tnl_type *tnl_type;
-
- if (tnl_info->tcls && tnl_info->tcls->updated)
- return 0;
-
- tnl_type = tnl_info->tnl_type;
- if (!tnl_type)
- return -EINVAL;
-
- if (!tnl_type->use_multi_cls)
- return mtk_tops_tnl_info_cls_single_setup(tnl_info,
- tnl_params,
- tnl_type);
-
- return mtk_tops_tnl_info_cls_multi_setup(tnl_info, tnl_params, tnl_type);
-}
-
-static int mtk_tops_tnl_info_dipfilter_tear_down(struct tops_tnl_info *tnl_info)
-{
- struct dip_desc dipd;
-
- memset(&dipd, 0, sizeof(struct dip_desc));
-
- dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.params.network.ip.sip);
- dipd.tag = DIPFILTER_IPV4;
-
- return mtk_pce_dipfilter_entry_del(&dipd);
-}
-
-static int mtk_tops_tnl_info_dipfilter_setup(struct tops_tnl_info *tnl_info)
-{
- struct dip_desc dipd;
-
- /* setup dipfilter */
- memset(&dipd, 0, sizeof(struct dip_desc));
-
- dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.params.network.ip.sip);
- dipd.tag = DIPFILTER_IPV4;
-
- return mtk_pce_dipfilter_entry_add(&dipd);
-}
-
-void mtk_tops_tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
-{
- lockdep_assert_held(&tnl_info->lock);
-
- if (tnl_info_sta_is_queued(tnl_info))
- return;
-
- tnl_info_submit_no_tnl_lock(tnl_info);
-
- tnl_info_sta_queued_no_tnl_lock(tnl_info);
-}
-
-void mtk_tops_tnl_info_submit(struct tops_tnl_info *tnl_info)
-{
- unsigned long flag = 0;
-
- if (unlikely(!tnl_info))
- return;
-
- spin_lock_irqsave(&tnl_info->lock, flag);
-
- mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-}
-
-static void mtk_tops_tnl_info_hash_no_lock(struct tops_tnl_info *tnl_info)
-{
- lockdep_assert_held(&tops_tnl.tbl_lock);
- lockdep_assert_held(&tnl_info->lock);
-
- if (hash_hashed(&tnl_info->hlist))
- hash_del(&tnl_info->hlist);
-
- hash_add(tops_tnl.ht, &tnl_info->hlist, tnl_params_hash(&tnl_info->cache));
-}
-
-void mtk_tops_tnl_info_hash(struct tops_tnl_info *tnl_info)
-{
- unsigned long flag = 0;
-
- if (unlikely(!tnl_info))
- return;
-
- spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
-
- spin_lock(&tnl_info->lock);
-
- mtk_tops_tnl_info_hash_no_lock(tnl_info);
-
- spin_unlock(&tnl_info->lock);
-
- spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
-}
-
-struct tops_tnl_info *mtk_tops_tnl_info_get_by_idx(u32 tnl_idx)
-{
- if (tnl_idx >= CONFIG_TOPS_TNL_NUM)
- return ERR_PTR(-EINVAL);
-
- if (!test_bit(tnl_idx, tops_tnl.tnl_used))
- return ERR_PTR(-EACCES);
-
- return &tops_tnl.tnl_infos[tnl_idx];
-}
-
-static bool mtk_tops_tnl_info_match(struct tops_tnl_type *tnl_type,
- struct tops_tnl_info *tnl_info,
- struct tops_params *target)
-{
- struct tops_params *p = &tnl_info->cache.params;
- unsigned long flag = 0;
- bool match;
-
- spin_lock_irqsave(&tnl_info->lock, flag);
-
- match = (p->tunnel.type == target->tunnel.type
- && mtk_tops_params_match(p, target)
- && tnl_type->tnl_param_match(p, target));
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-
- return match;
-}
-
-struct tops_tnl_info *mtk_tops_tnl_info_find(struct tops_tnl_type *tnl_type,
- struct tops_tnl_params *tnl_params)
-{
- struct tops_tnl_info *tnl_info;
-
- lockdep_assert_held(&tops_tnl.tbl_lock);
-
- if (unlikely(!tnl_params->tops_entry_proto
- || tnl_params->tops_entry_proto >= __TOPS_TUNNEL_TYPE_MAX))
- return ERR_PTR(-EINVAL);
-
- hash_for_each_possible(tops_tnl.ht,
- tnl_info,
- hlist,
- tnl_params_hash(tnl_params))
- if (mtk_tops_tnl_info_match(tnl_type, tnl_info, &tnl_params->params))
- return tnl_info;
-
- return ERR_PTR(-ENODEV);
-}
-
-static inline void mtk_tops_tnl_info_preserve(struct tops_tnl_type *tnl_type,
- struct tops_tnl_params *old,
- struct tops_tnl_params *new)
-{
- new->flag |= old->flag;
- new->cls_entry = old->cls_entry;
- if (old->cdrt)
- new->cdrt = old->cdrt;
-
- /* we can only get ttl from encapsulation */
- if (new->params.network.ip.ttl == 128 && old->params.network.ip.ttl != 0)
- new->params.network.ip.ttl = old->params.network.ip.ttl;
-
- if (tnl_type->tnl_param_restore)
- tnl_type->tnl_param_restore(&old->params, &new->params);
-}
-
-/* tnl_info->lock should be held before calling this function */
-static int mtk_tops_tnl_info_setup(struct sk_buff *skb,
- struct tops_tnl_type *tnl_type,
- struct tops_tnl_info *tnl_info,
- struct tops_tnl_params *tnl_params)
-{
- bool has_diff = false;
-
- if (unlikely(!skb || !tnl_info || !tnl_params))
- return -EPERM;
-
- lockdep_assert_held(&tnl_info->lock);
-
- mtk_tops_tnl_info_preserve(tnl_type, &tnl_info->cache, tnl_params);
-
- has_diff = memcmp(&tnl_info->cache, tnl_params, sizeof(*tnl_params));
- if (has_diff) {
- memcpy(&tnl_info->cache, tnl_params, sizeof(*tnl_params));
- mtk_tops_tnl_info_hash_no_lock(tnl_info);
- }
-
- if (skb_hnat_is_decap(skb)) {
- /* the net_device is used to forward pkt to decap'ed inf when Rx */
- tnl_info->dev = skb->dev;
- if (!tnl_info_decap_is_enable(tnl_info)) {
- has_diff = true;
- tnl_info_decap_enable(tnl_info);
- }
- } else if (skb_hnat_is_encap(skb)) {
- /* set skb_hnat_tops(skb) to tunnel index for ppe binding */
- skb_set_tops_tnl_idx(skb, tnl_info->tnl_idx);
- if (!tnl_info_encap_is_enable(tnl_info)) {
- has_diff = true;
- tnl_info_encap_enable(tnl_info);
- }
- }
-
- if (has_diff)
- mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
-
- return 0;
-}
-
-/* tops_tnl.tbl_lock should be acquired before calling this functions */
-static struct tops_tnl_info *
-mtk_tops_tnl_info_alloc_no_lock(struct tops_tnl_type *tnl_type)
-{
- struct tops_tnl_info *tnl_info;
- unsigned long flag = 0;
- u32 tnl_idx;
-
- lockdep_assert_held(&tops_tnl.tbl_lock);
-
- tnl_idx = find_first_zero_bit(tops_tnl.tnl_used, CONFIG_TOPS_TNL_NUM);
- if (tnl_idx == CONFIG_TOPS_TNL_NUM) {
- TOPS_NOTICE("offload tunnel table full!\n");
- return ERR_PTR(-ENOMEM);
- }
-
- /* occupy used tunnel */
- tnl_info = &tops_tnl.tnl_infos[tnl_idx];
- memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
- memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
-
- /* TODO: maybe spin_lock_bh() is enough? */
- spin_lock_irqsave(&tnl_info->lock, flag);
-
- if (tnl_info_sta_is_init(tnl_info)) {
- TOPS_ERR("error: fetched an initialized tunnel info\n");
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-
- return ERR_PTR(-EBADF);
- }
- tnl_info_sta_init_no_tnl_lock(tnl_info);
-
- tnl_info->tnl_type = tnl_type;
-
- INIT_HLIST_NODE(&tnl_info->hlist);
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-
- set_bit(tnl_idx, tops_tnl.tnl_used);
-
- return tnl_info;
-}
-
-struct tops_tnl_info *mtk_tops_tnl_info_alloc(struct tops_tnl_type *tnl_type)
-{
- struct tops_tnl_info *tnl_info;
- unsigned long flag = 0;
-
- spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
-
- tnl_info = mtk_tops_tnl_info_alloc_no_lock(tnl_type);
-
- spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
-
- return tnl_info;
-}
-
-static void mtk_tops_tnl_info_free_no_lock(struct tops_tnl_info *tnl_info)
-{
- if (unlikely(!tnl_info))
- return;
-
- lockdep_assert_held(&tops_tnl.tbl_lock);
- lockdep_assert_held(&tnl_info->lock);
-
- hash_del(&tnl_info->hlist);
-
- tnl_info_sta_uninit_no_tnl_lock(tnl_info);
-
- clear_bit(tnl_info->tnl_idx, tops_tnl.tnl_used);
-}
-
-static void mtk_tops_tnl_info_free(struct tops_tnl_info *tnl_info)
-{
- unsigned long flag = 0;
-
- spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
-
- spin_lock(&tnl_info->lock);
-
- mtk_tops_tnl_info_free_no_lock(tnl_info);
-
- spin_unlock(&tnl_info->lock);
-
- spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
-}
-
-static void __mtk_tops_tnl_offload_disable(struct tops_tnl_info *tnl_info)
-{
- tnl_info->status |= TNL_STA_DELETING;
- mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
-}
-
-static int mtk_tops_tnl_offload(struct sk_buff *skb,
- struct tops_tnl_type *tnl_type,
- struct tops_tnl_params *tnl_params)
-{
- struct tops_tnl_info *tnl_info;
- unsigned long flag;
- int ret = 0;
-
- if (unlikely(!tnl_params))
- return -EPERM;
-
- /* prepare tnl_info */
- spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
-
- tnl_info = mtk_tops_tnl_info_find(tnl_type, tnl_params);
- if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) != -ENODEV) {
- /* error */
- ret = PTR_ERR(tnl_info);
- goto err_out;
- } else if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) == -ENODEV) {
- /* not allocate yet */
- tnl_info = mtk_tops_tnl_info_alloc_no_lock(tnl_type);
- }
-
- if (IS_ERR(tnl_info)) {
- ret = PTR_ERR(tnl_info);
- TOPS_DBG("tnl offload alloc tnl_info failed: %d\n", ret);
- goto err_out;
- }
-
- spin_lock(&tnl_info->lock);
- ret = mtk_tops_tnl_info_setup(skb, tnl_type, tnl_info, tnl_params);
- spin_unlock(&tnl_info->lock);
-
-err_out:
- spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
-
- return ret;
-}
-
-static int mtk_tops_tnl_l2_update(struct sk_buff *skb)
-{
- struct tops_tnl_info *tnl_info = skb_to_tnl_info(skb);
- struct tops_tnl_type *tnl_type;
- unsigned long flag;
- int ret;
-
- if (IS_ERR(tnl_info))
- return PTR_ERR(tnl_info);
-
- tnl_type = tnl_info->tnl_type;
- if (!tnl_type->tnl_l2_param_update)
- return -ENODEV;
-
- spin_lock_irqsave(&tnl_info->lock, flag);
-
- ret = tnl_type->tnl_l2_param_update(skb, &tnl_info->cache.params);
- /* tnl params need to be updated */
- if (ret == 1) {
- mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
- ret = 0;
- }
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-
- return ret;
-}
-
-static bool mtk_tops_tnl_decap_offloadable(struct sk_buff *skb)
-{
- struct tops_tnl_type *tnl_type;
- struct ethhdr *eth;
- u32 cnt;
- u32 i;
-
- if (unlikely(!mtk_tops_mcu_alive())) {
- skb_mark_unbind(skb);
- return -EAGAIN;
- }
-
- /* skb should not carry tops here */
- if (skb_hnat_tops(skb))
- return false;
-
- eth = eth_hdr(skb);
-
- /* TODO: currently decap only support ethernet IPv4 */
- if (ntohs(eth->h_proto) != ETH_P_IP)
- return false;
-
- /* TODO: may can be optimized */
- for (i = TOPS_TUNNEL_GRETAP, cnt = 0;
- i < __TOPS_TUNNEL_TYPE_MAX && cnt < tops_tnl.offload_tnl_type_num;
- i++) {
- tnl_type = tops_tnl.offload_tnl_types[i];
- if (unlikely(!tnl_type))
- continue;
-
- cnt++;
- if (tnl_type->tnl_decap_offloadable
- && tnl_type->tnl_decap_offloadable(skb)) {
- skb_hnat_tops(skb) = tnl_type->tnl_proto_type;
- return true;
- }
- }
-
- return false;
-}
-
-static int mtk_tops_tnl_decap_offload(struct sk_buff *skb)
-{
- struct tops_tnl_params tnl_params;
- struct tops_tnl_type *tnl_type;
- int ret;
-
- if (unlikely(!mtk_tops_mcu_alive())) {
- skb_mark_unbind(skb);
- return -EAGAIN;
- }
-
- if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_decap(skb))) {
- skb_mark_unbind(skb);
- return -EINVAL;
- }
-
- tnl_type = skb_to_tnl_type(skb);
- if (IS_ERR(tnl_type)) {
- skb_mark_unbind(skb);
- return PTR_ERR(tnl_type);
- }
-
- if (unlikely(!tnl_type->tnl_decap_param_setup || !tnl_type->tnl_param_match)) {
- skb_mark_unbind(skb);
- return -ENODEV;
- }
-
- memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
-
- /* push removed ethernet header back first */
- if (tnl_type->has_inner_eth)
- skb_push(skb, sizeof(struct ethhdr));
-
- ret = mtk_tops_decap_param_setup(skb,
- &tnl_params.params,
- tnl_type->tnl_decap_param_setup);
-
- /* pull ethernet header to restore skb->data to ip start */
- if (tnl_type->has_inner_eth)
- skb_pull(skb, sizeof(struct ethhdr));
-
- if (unlikely(ret)) {
- skb_mark_unbind(skb);
- return ret;
- }
-
- tnl_params.tops_entry_proto = tnl_type->tnl_proto_type;
- tnl_params.cdrt = skb_hnat_cdrt(skb);
-
- ret = mtk_tops_tnl_offload(skb, tnl_type, &tnl_params);
-
- /*
- * whether success or fail to offload a decapsulation tunnel
- * skb_hnat_tops(skb) must be cleared to avoid mtk_tnl_decap_offload() get
- * called again
- */
- skb_hnat_tops(skb) = 0;
- skb_hnat_is_decap(skb) = 0;
-
- return ret;
-}
-
-static int __mtk_tops_tnl_encap_offload(struct sk_buff *skb)
-{
- struct tops_tnl_params tnl_params;
- struct tops_tnl_type *tnl_type;
- int ret;
-
- tnl_type = skb_to_tnl_type(skb);
- if (IS_ERR(tnl_type))
- return PTR_ERR(tnl_type);
-
- if (unlikely(!tnl_type->tnl_encap_param_setup || !tnl_type->tnl_param_match))
- return -ENODEV;
-
- memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
-
- ret = mtk_tops_encap_param_setup(skb,
- &tnl_params.params,
- tnl_type->tnl_encap_param_setup);
- if (unlikely(ret))
- return ret;
-
- tnl_params.tops_entry_proto = tnl_type->tnl_proto_type;
- tnl_params.cdrt = skb_hnat_cdrt(skb);
-
- return mtk_tops_tnl_offload(skb, tnl_type, &tnl_params);
-}
-
-static int mtk_tops_tnl_encap_offload(struct sk_buff *skb)
-{
- if (unlikely(!mtk_tops_mcu_alive())) {
- skb_mark_unbind(skb);
- return -EAGAIN;
- }
-
- if (!skb_hnat_is_encap(skb))
- return -EPERM;
-
- if (unlikely(skb_hnat_cdrt(skb)))
- return mtk_tops_tnl_l2_update(skb);
-
- return __mtk_tops_tnl_encap_offload(skb);
-}
-
-static struct net_device *mtk_tops_get_tnl_dev(int tnl_idx)
-{
- if (tnl_idx < TOPS_CRSN_TNL_ID_START || tnl_idx > TOPS_CRSN_TNL_ID_END)
- return ERR_PTR(-EINVAL);
-
- tnl_idx = tnl_idx - TOPS_CRSN_TNL_ID_START;
-
- return tops_tnl.tnl_infos[tnl_idx].dev;
-}
-
-static void mtk_tops_tnl_sync_dma_done(void *param)
-{
- /* TODO: check tx status with dmaengine_tx_status()? */
- complete(&tops_tnl.dma_done);
-}
-
-static void mtk_tops_tnl_sync_dma_start(void *param)
-{
- dma_async_issue_pending(tops_tnl.dmachan);
-
- wait_for_completion(&tops_tnl.dma_done);
-}
-
-static void mtk_tops_tnl_sync_dma_unprepare(struct tops_tnl_info *tnl_info,
- dma_addr_t *addr)
-{
- dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
- DMA_TO_DEVICE);
-
- dma_release_channel(tops_tnl.dmachan);
-}
-
-static int mtk_tops_tnl_sync_dma_prepare(struct tops_tnl_info *tnl_info,
- dma_addr_t *addr)
-{
- u32 tnl_addr = tops_tnl.tnl_base_addr;
- struct dma_async_tx_descriptor *desc;
- dma_cookie_t cookie;
- int ret;
-
- if (!tnl_info)
- return -EPERM;
-
- tnl_addr += tnl_info->tnl_idx * sizeof(struct tops_tnl_params);
-
- tops_tnl.dmachan = dma_request_slave_channel(tops_dev, "tnl-sync");
- if (!tops_tnl.dmachan) {
- TOPS_ERR("request dma channel failed\n");
- return -ENODEV;
- }
-
- *addr = dma_map_single(tops_dev,
- &tnl_info->tnl_params,
- sizeof(struct tops_tnl_params),
- DMA_TO_DEVICE);
- if (dma_mapping_error(tops_dev, *addr)) {
- ret = -ENOMEM;
- goto dma_release;
- }
-
- desc = dmaengine_prep_dma_memcpy(tops_tnl.dmachan,
- (dma_addr_t)tnl_addr, *addr,
- sizeof(struct tops_tnl_params),
- 0);
- if (!desc) {
- ret = -EBUSY;
- goto dma_unmap;
- }
-
- desc->callback = mtk_tops_tnl_sync_dma_done;
-
- cookie = dmaengine_submit(desc);
- ret = dma_submit_error(cookie);
- if (ret)
- goto dma_terminate;
-
- reinit_completion(&tops_tnl.dma_done);
-
- return ret;
-
-dma_terminate:
- dmaengine_terminate_all(tops_tnl.dmachan);
-
-dma_unmap:
- dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
- DMA_TO_DEVICE);
-
-dma_release:
- dma_release_channel(tops_tnl.dmachan);
-
- return ret;
-}
-
-static int __mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
-{
- struct mcu_ctrl_cmd mcmd;
- dma_addr_t addr;
- int ret;
-
- mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
- mcmd.arg[0] = TUNNEL_CTRL_EVENT_DEL;
- mcmd.arg[1] = tnl_info->tnl_idx;
- mcmd.core_mask = CORE_TOPS_MASK;
-
- ret = mtk_tops_mcu_stall(&mcmd, NULL, NULL);
- if (ret) {
- TOPS_ERR("tnl sync deletion notify mcu failed: %d\n", ret);
- return ret;
- }
-
- /* there shouldn't be any other reference to tnl_info right now */
- memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
- memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
-
- ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
- if (ret) {
- TOPS_ERR("tnl sync deletion prepare dma request failed: %d\n", ret);
- return ret;
- }
-
- mtk_tops_tnl_sync_dma_start(NULL);
-
- mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
-
- return ret;
-}
-
-static int mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
-{
- struct tops_tnl_params tnl_params;
- int ret;
-
- ret = mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
- if (ret) {
- TOPS_ERR("tnl sync dipfitler tear down failed: %d\n",
- ret);
- return ret;
- }
-
- memcpy(&tnl_params, &tnl_info->tnl_params, sizeof(struct tops_tnl_params));
- ret = __mtk_tops_tnl_sync_param_delete(tnl_info);
- if (ret) {
- TOPS_ERR("tnl sync deletion failed: %d\n", ret);
- return ret;
- }
-
- ret = mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_params);
- if (ret) {
- TOPS_ERR("tnl sync cls tear down faild: %d\n",
- ret);
- return ret;
- }
-
- mtk_tops_tnl_info_free(tnl_info);
-
- return ret;
-}
-
-static int __mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
- bool is_new_tnl)
-{
- struct mcu_ctrl_cmd mcmd;
- dma_addr_t addr;
- int ret;
-
- mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
- mcmd.arg[1] = tnl_info->tnl_idx;
- mcmd.core_mask = CORE_TOPS_MASK;
-
- if (is_new_tnl)
- mcmd.arg[0] = TUNNEL_CTRL_EVENT_NEW;
- else
- mcmd.arg[0] = TUNNEL_CTRL_EVENT_DIP_UPDATE;
-
- ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
- if (ret) {
- TOPS_ERR("tnl sync update prepare dma request failed: %d\n", ret);
- return ret;
- }
-
- ret = mtk_tops_mcu_stall(&mcmd, mtk_tops_tnl_sync_dma_start, NULL);
- if (ret)
- TOPS_ERR("tnl sync update notify mcu failed: %d\n", ret);
-
- mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
-
- return ret;
-}
-
-static int mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
- bool setup_pce, bool is_new_tnl)
-{
- int ret;
-
- if (setup_pce) {
- ret = mtk_tops_tnl_info_cls_setup(tnl_info, &tnl_info->tnl_params);
- if (ret) {
- TOPS_ERR("tnl cls setup failed: %d\n", ret);
- return ret;
- }
- }
-
- ret = __mtk_tops_tnl_sync_param_update(tnl_info, is_new_tnl);
- if (ret) {
- TOPS_ERR("tnl sync failed: %d\n", ret);
- goto cls_tear_down;
- }
-
- tnl_info_sta_updated(tnl_info);
-
- if (setup_pce) {
- ret = mtk_tops_tnl_info_dipfilter_setup(tnl_info);
- if (ret) {
- TOPS_ERR("tnl dipfilter setup failed: %d\n", ret);
- /* TODO: should undo parameter sync */
- return ret;
- }
- }
-
- return ret;
-
-cls_tear_down:
- mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_info->tnl_params);
-
- return ret;
-}
-
-static inline int mtk_tops_tnl_sync_param_new(struct tops_tnl_info *tnl_info,
- bool setup_pce)
-{
- return mtk_tops_tnl_sync_param_update(tnl_info, setup_pce, true);
-}
-
-static void mtk_tops_tnl_sync_get_pending_queue(void)
-{
- struct list_head *tmp = tops_tnl.tnl_sync_submit;
- unsigned long flag = 0;
-
- spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
-
- tops_tnl.tnl_sync_submit = tops_tnl.tnl_sync_pending;
- tops_tnl.tnl_sync_pending = tmp;
-
- tops_tnl.has_tnl_to_sync = false;
-
- spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
-}
-
-static void mtk_tops_tnl_sync_queue_proc(void)
-{
- struct tops_tnl_info *tnl_info;
- struct tops_tnl_info *tmp;
- unsigned long flag = 0;
- bool is_decap = false;
- u32 tnl_status = 0;
- int ret;
-
- list_for_each_entry_safe(tnl_info,
- tmp,
- tops_tnl.tnl_sync_pending,
- sync_node) {
- spin_lock_irqsave(&tnl_info->lock, flag);
-
- /* tnl update is on the fly, queue tnl to next round */
- if (tnl_info_sta_is_updating(tnl_info)) {
- list_del_init(&tnl_info->sync_node);
-
- tnl_info_submit_no_tnl_lock(tnl_info);
-
- goto next;
- }
-
- /*
- * if tnl_info is not queued, something wrong
- * just remove that tnl_info from the queue
- * maybe trigger BUG_ON()?
- */
- if (!tnl_info_sta_is_queued(tnl_info)) {
- list_del_init(&tnl_info->sync_node);
- goto next;
- }
-
- is_decap = (!(tnl_info->tnl_params.flag & TNL_DECAP_ENABLE)
- && tnl_info_decap_is_enable(tnl_info));
-
- tnl_status = tnl_info->status;
- memcpy(&tnl_info->tnl_params, &tnl_info->cache,
- sizeof(struct tops_tnl_params));
-
- list_del_init(&tnl_info->sync_node);
-
- /*
- * mark tnl info to updating and release tnl info's spin lock
- * since it is going to use dma to transfer data
- * and might going to sleep
- */
- tnl_info_sta_updating_no_tnl_lock(tnl_info);
-
- spin_unlock_irqrestore(&tnl_info->lock, flag);
-
- if (tnl_status & TNL_STA_INIT)
- ret = mtk_tops_tnl_sync_param_new(tnl_info, is_decap);
- else if (tnl_status & TNL_STA_DELETING)
- ret = mtk_tops_tnl_sync_param_delete(tnl_info);
- else
- ret = mtk_tops_tnl_sync_param_update(tnl_info,
- is_decap,
- false);
-
- if (ret)
- TOPS_ERR("sync tunnel parameter failed: %d\n", ret);
-
- continue;
-
-next:
- spin_unlock_irqrestore(&tnl_info->lock, flag);
- }
-}
-
-static int tnl_sync_task(void *data)
-{
- while (1) {
- wait_event_interruptible(tops_tnl.tnl_sync_wait,
- (tops_tnl.has_tnl_to_sync && mtk_tops_mcu_alive())
- || kthread_should_stop());
-
- if (kthread_should_stop())
- break;
-
- mtk_tops_tnl_sync_get_pending_queue();
-
- mtk_tops_tnl_sync_queue_proc();
- }
-
- return 0;
-}
-
-static void mtk_tops_tnl_info_flush_ppe(struct tops_tnl_info *tnl_info)
-{
- struct foe_entry *entry;
- u32 max_entry;
- u32 ppe_id;
- u32 eidx;
-
- /* tnl info's lock should be held */
- lockdep_assert_held(&tnl_info->lock);
-
- /* clear all TOPS related PPE entries */
- for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
- max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
- for (eidx = 0; eidx < max_entry; eidx++) {
- entry = hnat_get_foe_entry(ppe_id, eidx);
- if (IS_ERR(entry))
- break;
-
- if (!entry_hnat_is_bound(entry))
- continue;
-
- tnl_flush_ppe_entry(entry, tnl_info->tnl_idx);
- }
- }
- hnat_cache_ebl(1);
- /* make sure all data is written to dram PPE table */
- wmb();
-}
-
-void mtk_tops_tnl_offload_netdev_down(struct net_device *ndev)
-{
- struct tops_tnl_info *tnl_info;
- unsigned long flag;
- u32 bkt;
-
- spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
-
- hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
- spin_lock(&tnl_info->lock);
-
- if (tnl_info->dev == ndev) {
- mtk_tops_tnl_info_flush_ppe(tnl_info);
-
- __mtk_tops_tnl_offload_disable(tnl_info);
-
- spin_unlock(&tnl_info->lock);
-
- break;
- }
-
- spin_unlock(&tnl_info->lock);
- }
-
- spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
-}
-
-void mtk_tops_tnl_offload_flush(void)
-{
- struct tops_tnl_info *tnl_info;
- struct foe_entry *entry;
- unsigned long flag;
- u32 max_entry;
- u32 ppe_id;
- u32 eidx;
- u32 bkt;
-
- /* clear all TOPS related PPE entries */
- for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
- max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
- for (eidx = 0; eidx < max_entry; eidx++) {
- entry = hnat_get_foe_entry(ppe_id, eidx);
- if (IS_ERR(entry))
- break;
-
- if (!entry_hnat_is_bound(entry))
- continue;
-
- tnl_flush_ppe_entry(entry, __TOPS_TUNNEL_TYPE_MAX);
- }
- }
- hnat_cache_ebl(1);
- /* make sure all data is written to dram PPE table */
- wmb();
-
- spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
-
- hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
- /* clear all tunnel's synced parameters, but preserve cache */
- memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
- /*
- * make tnl_info status to TNL_INIT state
- * so that it can be added to TOPS again
- */
- spin_lock(&tnl_info->lock);
-
- tnl_info_sta_init_no_tnl_lock(tnl_info);
- list_del_init(&tnl_info->sync_node);
-
- spin_unlock(&tnl_info->lock);
- }
-
- spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
-}
-
-void mtk_tops_tnl_offload_recover(void)
-{
- struct tops_tnl_info *tnl_info;
- unsigned long flag;
- u32 bkt;
-
- spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
-
- hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist)
- mtk_tops_tnl_info_submit(tnl_info);
-
- spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
-}
-
-int mtk_tops_tnl_offload_init(struct platform_device *pdev)
-{
- struct tops_tnl_info *tnl_info;
- int ret = 0;
- int i = 0;
-
- hash_init(tops_tnl.ht);
-
- tops_tnl.tnl_infos = devm_kzalloc(&pdev->dev,
- sizeof(struct tops_tnl_info) * CONFIG_TOPS_TNL_NUM,
- GFP_KERNEL);
- if (!tops_tnl.tnl_infos)
- return -ENOMEM;
-
- for (i = 0; i < CONFIG_TOPS_TNL_NUM; i++) {
- tnl_info = &tops_tnl.tnl_infos[i];
- tnl_info->tnl_idx = i;
- tnl_info->status = TNL_STA_UNINIT;
- INIT_HLIST_NODE(&tnl_info->hlist);
- INIT_LIST_HEAD(&tnl_info->sync_node);
- spin_lock_init(&tnl_info->lock);
- }
-
- ret = register_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
- if (ret) {
- TOPS_ERR("tnl offload recv dev register failed: %d\n",
- ret);
- return ret;
- }
-
- init_completion(&tops_tnl.dma_done);
- init_waitqueue_head(&tops_tnl.tnl_sync_wait);
-
- tops_tnl.tnl_sync_thread = kthread_run(tnl_sync_task, NULL,
- "tnl sync param task");
- if (IS_ERR(tops_tnl.tnl_sync_thread)) {
- TOPS_ERR("tnl sync thread create failed\n");
- ret = -ENOMEM;
- goto unregister_mbox;
- }
-
- mtk_tnl_encap_offload = mtk_tops_tnl_encap_offload;
- mtk_tnl_decap_offload = mtk_tops_tnl_decap_offload;
- mtk_tnl_decap_offloadable = mtk_tops_tnl_decap_offloadable;
- mtk_get_tnl_dev = mtk_tops_get_tnl_dev;
-
- tops_tnl.tnl_sync_submit = &tnl_sync_q1;
- tops_tnl.tnl_sync_pending = &tnl_sync_q2;
- spin_lock_init(&tops_tnl.tnl_sync_lock);
- spin_lock_init(&tops_tnl.tbl_lock);
-
- return 0;
-
-unregister_mbox:
- unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
-
- return ret;
-}
-
-void mtk_tops_tnl_offload_pce_clean_up(void)
-{
- struct tops_tnl_info *tnl_info;
- unsigned long flag;
- u32 bkt;
-
- spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
-
- hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
- mtk_tops_tnl_info_flush_ppe(tnl_info);
-
- mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
-
- mtk_tops_tnl_info_cls_tear_down(tnl_info, &tnl_info->tnl_params);
- }
-
- spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
-}
-
-void mtk_tops_tnl_offload_deinit(struct platform_device *pdev)
-{
- mtk_tnl_encap_offload = NULL;
- mtk_tnl_decap_offload = NULL;
- mtk_tnl_decap_offloadable = NULL;
- mtk_get_tnl_dev = NULL;
-
- kthread_stop(tops_tnl.tnl_sync_thread);
-
- mtk_tops_tnl_offload_pce_clean_up();
-
- unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
-}
-
-int mtk_tops_tnl_offload_proto_setup(struct platform_device *pdev)
-{
- mtk_tops_gretap_init();
-
- mtk_tops_l2tpv2_init();
-
- mtk_tops_pptp_init();
-
- return 0;
-}
-
-void mtk_tops_tnl_offload_proto_teardown(struct platform_device *pdev)
-{
- mtk_tops_pptp_deinit();
-
- mtk_tops_l2tpv2_deinit();
-
- mtk_tops_gretap_deinit();
-}
-
-struct tops_tnl_type *mtk_tops_tnl_type_get_by_name(const char *name)
-{
- enum tops_tunnel_type tnl_proto_type = TOPS_TUNNEL_NONE + 1;
- struct tops_tnl_type *tnl_type;
-
- if (unlikely(!name))
- return ERR_PTR(-EPERM);
-
- for (; tnl_proto_type < __TOPS_TUNNEL_TYPE_MAX; tnl_proto_type++) {
- tnl_type = tops_tnl.offload_tnl_types[tnl_proto_type];
- if (tnl_type && !strcmp(name, tnl_type->type_name))
- break;
- }
-
- return tnl_type;
-}
-
-int mtk_tops_tnl_type_register(struct tops_tnl_type *tnl_type)
-{
- enum tops_tunnel_type tnl_proto_type = tnl_type->tnl_proto_type;
-
- if (unlikely(tnl_proto_type == TOPS_TUNNEL_NONE
- || tnl_proto_type >= __TOPS_TUNNEL_TYPE_MAX)) {
- TOPS_ERR("invalid tnl_proto_type: %u\n", tnl_proto_type);
- return -EINVAL;
- }
-
- if (unlikely(!tnl_type))
- return -EINVAL;
-
- if (tops_tnl.offload_tnl_types[tnl_proto_type]) {
- TOPS_ERR("offload tnl type is already registered: %u\n",
- tnl_proto_type);
- return -EBUSY;
- }
-
- INIT_LIST_HEAD(&tnl_type->tcls_head);
- tops_tnl.offload_tnl_types[tnl_proto_type] = tnl_type;
- tops_tnl.offload_tnl_type_num++;
-
- return 0;
-}
-
-void mtk_tops_tnl_type_unregister(struct tops_tnl_type *tnl_type)
-{
- enum tops_tunnel_type tnl_proto_type = tnl_type->tnl_proto_type;
-
- if (unlikely(tnl_proto_type == TOPS_TUNNEL_NONE
- || tnl_proto_type >= __TOPS_TUNNEL_TYPE_MAX)) {
- TOPS_ERR("invalid tnl_proto_type: %u\n", tnl_proto_type);
- return;
- }
-
- if (unlikely(!tnl_type))
- return;
-
- if (tops_tnl.offload_tnl_types[tnl_proto_type] != tnl_type) {
- TOPS_ERR("offload tnl type is registered by others\n");
- return;
- }
-
- tops_tnl.offload_tnl_types[tnl_proto_type] = NULL;
- tops_tnl.offload_tnl_type_num--;
-}
diff --git a/feed/kernel/tops/src/tops_params.c b/feed/kernel/tops/src/tops_params.c
deleted file mode 100644
index c23a44a..0000000
--- a/feed/kernel/tops/src/tops_params.c
+++ /dev/null
@@ -1,131 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include "tops/tops_params.h"
-
-#include "tops/protocol/mac/eth.h"
-#include "tops/protocol/network/ip.h"
-#include "tops/protocol/transport/udp.h"
-
-int
-mtk_tops_encap_param_setup(struct sk_buff *skb,
- struct tops_params *params,
- int (*tnl_encap_param_setup)(struct sk_buff *skb,
- struct tops_params *params))
-{
- return mtk_tops_eth_encap_param_setup(skb, params, tnl_encap_param_setup);
-}
-
-int
-mtk_tops_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params,
- int (*tnl_decap_param_setup)(struct sk_buff *skb,
- struct tops_params *params))
-{
- return tnl_decap_param_setup(skb, params);
-}
-
-int mtk_tops_transport_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params)
-{
- return mtk_tops_udp_decap_param_setup(skb, params);
-}
-
-int mtk_tops_network_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params)
-{
- /* TODO: IPv6 */
- return mtk_tops_ip_decap_param_setup(skb, params);
-}
-
-int mtk_tops_mac_decap_param_setup(struct sk_buff *skb,
- struct tops_params *params)
-{
- return mtk_tops_eth_decap_param_setup(skb, params);
-}
-
-int mtk_tops_debug_param_proto_peek(const char *buf, int ofs, char *proto)
-{
- int nchar = 0;
- int ret;
-
- if (!proto)
- return -EINVAL;
-
- ret = sscanf(buf + ofs, "%20s %n", proto, &nchar);
- if (ret != 1)
- return -EPERM;
-
- return nchar;
-}
-
-int mtk_tops_debug_param_setup(const char *buf, int *ofs,
- struct tops_params *params)
-{
- char proto[DEBUG_PROTO_LEN];
- int ret;
-
- memset(proto, 0, sizeof(proto));
-
- ret = mtk_tops_debug_param_proto_peek(buf, *ofs, proto);
- if (ret < 0)
- return ret;
-
- *ofs += ret;
-
- if (!strcmp(proto, DEBUG_PROTO_ETH))
- return mtk_tops_eth_debug_param_setup(buf, ofs, params);
-
- /* not support mac protocols other than Ethernet */
- return -EINVAL;
-}
-
-void mtk_tops_mac_param_dump(struct seq_file *s, struct tops_params *params)
-{
- if (params->mac.type == TOPS_MAC_ETH)
- mtk_tops_eth_param_dump(s, params);
-}
-
-void mtk_tops_network_param_dump(struct seq_file *s, struct tops_params *params)
-{
- if (params->network.type == TOPS_NETWORK_IPV4)
- mtk_tops_ip_param_dump(s, params);
-}
-
-void mtk_tops_transport_param_dump(struct seq_file *s, struct tops_params *params)
-{
- if (params->transport.type == TOPS_TRANSPORT_UDP)
- mtk_tops_udp_param_dump(s, params);
-}
-
-static bool tops_transport_params_match(struct tops_transport_params *t1,
- struct tops_transport_params *t2)
-{
- return !memcmp(t1, t2, sizeof(*t1));
-}
-
-static bool tops_network_params_match(struct tops_network_params *n1,
- struct tops_network_params *n2)
-{
- if (n1->type != n2->type)
- return false;
-
- if (n1->type == TOPS_NETWORK_IPV4)
- return (n1->ip.sip == n2->ip.sip
- && n1->ip.dip == n2->ip.dip
- && n1->ip.proto == n2->ip.proto
- && n1->ip.tos == n2->ip.tos);
-
- /* TODO: support IPv6 */
- return false;
-}
-
-bool mtk_tops_params_match(struct tops_params *p1, struct tops_params *p2)
-{
- return (tops_network_params_match(&p1->network, &p2->network)
- && tops_transport_params_match(&p1->transport, &p2->transport));
-}
diff --git a/feed/kernel/tops/src/trm-debugfs.c b/feed/kernel/tops/src/trm-debugfs.c
deleted file mode 100644
index 2566b6e..0000000
--- a/feed/kernel/tops/src/trm-debugfs.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>
- */
-
-#include <linux/debugfs.h>
-#include <linux/uaccess.h>
-
-#include "tops/debugfs.h"
-#include "tops/internal.h"
-#include "tops/tops.h"
-#include "tops/trm-debugfs.h"
-#include "tops/trm.h"
-
-struct dentry *trm_debugfs_root;
-
-static int cpu_utilization_debug_read(struct seq_file *s, void *private)
-{
- u32 cpu_utilization;
- enum core_id core;
- int ret;
-
- seq_puts(s, "CPU Utilization:\n");
- for (core = CORE_OFFLOAD_0; core <= CORE_MGMT; core++) {
- ret = mtk_trm_cpu_utilization(core, &cpu_utilization);
- if (ret) {
- if (core <= CORE_OFFLOAD_3)
- TOPS_ERR("fetch Core%d cpu utilization failed(%d)\n", core, ret);
- else
- TOPS_ERR("fetch CoreM cpu utilization failed(%d)\n", ret);
-
- return ret;
- }
-
- if (core <= CORE_OFFLOAD_3)
- seq_printf(s, "Core%d\t\t%u%%\n", core, cpu_utilization);
- else
- seq_printf(s, "CoreM\t\t%u%%\n", cpu_utilization);
- }
-
- return 0;
-}
-
-static int cpu_utilization_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, cpu_utilization_debug_read, file->private_data);
-}
-
-static ssize_t cpu_utilization_debug_write(struct file *file,
- const char __user *buffer,
- size_t count, loff_t *data)
-{
- return count;
-}
-
-static const struct file_operations cpu_utilization_debug_ops = {
- .open = cpu_utilization_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .write = cpu_utilization_debug_write,
- .release = single_release,
-};
-
-int mtk_trm_debugfs_init(void)
-{
- if (!tops_debugfs_root)
- return -ENOENT;
-
- if (!trm_debugfs_root) {
- trm_debugfs_root = debugfs_create_dir("trm", tops_debugfs_root);
- if (IS_ERR(trm_debugfs_root)) {
- TOPS_ERR("create trm debugfs root directory failed\n");
- return PTR_ERR(trm_debugfs_root);
- }
- }
-
- debugfs_create_file("cpu-utilization", 0644, trm_debugfs_root, NULL,
- &cpu_utilization_debug_ops);
-
- return 0;
-}
-
-void mtk_trm_debugfs_deinit(void)
-{
- debugfs_remove_recursive(trm_debugfs_root);
-}
diff --git a/feed/kernel/tops/src/trm-fs.c b/feed/kernel/tops/src/trm-fs.c
deleted file mode 100644
index 130a569..0000000
--- a/feed/kernel/tops/src/trm-fs.c
+++ /dev/null
@@ -1,107 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>
- * Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/relay.h>
-
-#include "tops/trm-debugfs.h"
-#include "tops/trm-fs.h"
-#include "tops/trm-mcu.h"
-#include "tops/trm.h"
-
-#define RLY_RETRY_NUM 3
-
-static struct rchan *relay;
-static bool trm_fs_is_init;
-
-bool mtk_trm_fs_is_init(void)
-{
- return trm_fs_is_init;
-}
-
-void *mtk_trm_fs_relay_reserve(u32 size)
-{
- u32 rty = 0;
- void *dst;
-
- while (rty < RLY_RETRY_NUM) {
- dst = relay_reserve(relay, size);
- if (likely(dst))
- return dst;
-
- rty++;
- msleep(100);
- }
-
- return ERR_PTR(-ENOMEM);
-}
-
-void mtk_trm_fs_relay_flush(void)
-{
- relay_flush(relay);
-}
-
-static struct dentry *trm_fs_create_buf_file_cb(const char *filename,
- struct dentry *parent,
- umode_t mode,
- struct rchan_buf *buf,
- int *is_global)
-{
- struct dentry *debugfs_file;
-
- debugfs_file = debugfs_create_file("dump-data", mode,
- parent, buf,
- &relay_file_operations);
-
- *is_global = 1;
-
- return debugfs_file;
-}
-
-static int trm_fs_remove_buf_file_cb(struct dentry *debugfs_file)
-{
- debugfs_remove(debugfs_file);
-
- return 0;
-}
-
-int mtk_trm_fs_init(void)
-{
- static struct rchan_callbacks relay_cb = {
- .create_buf_file = trm_fs_create_buf_file_cb,
- .remove_buf_file = trm_fs_remove_buf_file_cb,
- };
- int ret = 0;
-
- if (!trm_debugfs_root)
- return -ENOENT;
-
- if (!relay) {
- relay = relay_open("dump-data", trm_debugfs_root,
- RLY_DUMP_SUBBUF_SZ,
- RLY_DUMP_SUBBUF_NUM,
- &relay_cb, NULL);
- if (!relay)
- return -EINVAL;
- }
-
- relay_reset(relay);
-
- trm_fs_is_init = true;
-
- return ret;
-}
-
-void mtk_trm_fs_deinit(void)
-{
- trm_fs_is_init = false;
-
- relay_close(relay);
-}
diff --git a/feed/kernel/tops/src/trm-mcu.c b/feed/kernel/tops/src/trm-mcu.c
deleted file mode 100644
index e7a8268..0000000
--- a/feed/kernel/tops/src/trm-mcu.c
+++ /dev/null
@@ -1,391 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>
- * Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/io.h>
-#include <linux/of.h>
-
-#include "tops/internal.h"
-#include "tops/mcu.h"
-#include "tops/trm-debugfs.h"
-#include "tops/trm-fs.h"
-#include "tops/trm-mcu.h"
-#include "tops/trm.h"
-
-#define TOPS_OCD_RETRY_TIMES (3)
-
-#define TOPS_OCD_DCRSET (0x200C)
-#define ENABLE_OCD (1 << 0)
-#define DEBUG_INT (1 << 1)
-
-#define TOPS_OCD_DSR (0x2010)
-#define EXEC_DONE (1 << 0)
-#define EXEC_EXCE (1 << 1)
-#define EXEC_BUSY (1 << 2)
-#define STOPPED (1 << 4)
-#define DEBUG_PEND_HOST (1 << 17)
-
-#define TOPS_OCD_DDR (0x2014)
-
-#define TOPS_OCD_DIR0EXEC (0x201C)
-
-struct tops_ocd_dev {
- void __iomem *base;
- u32 base_offset;
- struct clk *debugsys_clk;
-};
-
-static struct tops_ocd_dev ocd;
-
-struct core_dump_fram cd_frams[CORE_TOPS_NUM];
-
-static inline void ocd_write(struct tops_ocd_dev *ocd, u32 reg, u32 val)
-{
- writel(val, ocd->base + ocd->base_offset + reg);
-}
-
-static inline u32 ocd_read(struct tops_ocd_dev *ocd, u32 reg)
-{
- return readl(ocd->base + ocd->base_offset + reg);
-}
-
-static inline void ocd_set(struct tops_ocd_dev *ocd, u32 reg, u32 mask)
-{
- setbits(ocd->base + ocd->base_offset + reg, mask);
-}
-
-static inline void ocd_clr(struct tops_ocd_dev *ocd, u32 reg, u32 mask)
-{
- clrbits(ocd->base + ocd->base_offset + reg, mask);
-}
-
-static int core_exec_instr(u32 instr)
-{
- u32 rty = 0;
- int ret;
-
- ocd_set(&ocd, TOPS_OCD_DSR, EXEC_DONE);
- ocd_set(&ocd, TOPS_OCD_DSR, EXEC_EXCE);
-
- ocd_write(&ocd, TOPS_OCD_DIR0EXEC, instr);
-
- while ((ocd_read(&ocd, TOPS_OCD_DSR) & EXEC_BUSY)) {
- if (rty++ < TOPS_OCD_RETRY_TIMES) {
- usleep_range(1000, 1500);
- } else {
- TRM_ERR("run instruction(0x%x) timeout\n", instr);
- ret = -1;
- goto out;
- }
- }
-
- ret = ocd_read(&ocd, TOPS_OCD_DSR) & EXEC_EXCE ? -1 : 0;
- if (ret)
- TRM_ERR("run instruction(0x%x) fail\n", instr);
-
-out:
- return ret;
-}
-
-static int core_dump(struct core_dump_fram *cd_fram)
-{
- cd_fram->magic = CORE_DUMP_FRAM_MAGIC;
- cd_fram->num_areg = XCHAL_NUM_AREG;
-
- /*
- * save
- * PC, PS, WINDOWSTART, WINDOWBASE,
- * EPC1, EXCCAUSE, EXCVADDR, EXCSAVE1
- */
- core_exec_instr(0x13f500);
-
- core_exec_instr(0x03b500);
- core_exec_instr(0x136800);
- cd_fram->pc = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x03e600);
- core_exec_instr(0x136800);
- cd_fram->ps = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x034900);
- core_exec_instr(0x136800);
- cd_fram->windowstart = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x034800);
- core_exec_instr(0x136800);
- cd_fram->windowbase = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x03b100);
- core_exec_instr(0x136800);
- cd_fram->epc1 = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x03e800);
- core_exec_instr(0x136800);
- cd_fram->exccause = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x03ee00);
- core_exec_instr(0x136800);
- cd_fram->excvaddr = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x03d100);
- core_exec_instr(0x136800);
- cd_fram->excsave1 = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x03f500);
-
- /*
- * save
- * a0, a1, a2, a3, a4, a5, a6, a7
- */
- core_exec_instr(0x136800);
- cd_fram->areg[0] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x136810);
- cd_fram->areg[1] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x136820);
- cd_fram->areg[2] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x136830);
- cd_fram->areg[3] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x136840);
- cd_fram->areg[4] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x136850);
- cd_fram->areg[5] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x136860);
- cd_fram->areg[6] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x136870);
- cd_fram->areg[7] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- /*
- * save
- * a8, a9, a10, a11, a12, a13, a14, a15
- */
- core_exec_instr(0x136880);
- cd_fram->areg[8] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x136890);
- cd_fram->areg[9] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368a0);
- cd_fram->areg[10] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368b0);
- cd_fram->areg[11] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368c0);
- cd_fram->areg[12] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368d0);
- cd_fram->areg[13] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368e0);
- cd_fram->areg[14] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368f0);
- cd_fram->areg[15] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x408020);
-
- /*
- * save
- * a16, a17, a18, a19, a20, a21, a22, a23
- */
- core_exec_instr(0x136880);
- cd_fram->areg[16] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x136890);
- cd_fram->areg[17] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368a0);
- cd_fram->areg[18] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368b0);
- cd_fram->areg[19] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368c0);
- cd_fram->areg[20] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368d0);
- cd_fram->areg[21] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368e0);
- cd_fram->areg[22] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368f0);
- cd_fram->areg[23] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x408020);
-
- /*
- * save
- * a24, a25, a26, a27, a28, a29, a30, a31
- */
- core_exec_instr(0x136880);
- cd_fram->areg[24] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x136890);
- cd_fram->areg[25] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368a0);
- cd_fram->areg[26] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368b0);
- cd_fram->areg[27] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368c0);
- cd_fram->areg[28] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368d0);
- cd_fram->areg[29] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368e0);
- cd_fram->areg[30] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x1368f0);
- cd_fram->areg[31] = ocd_read(&ocd, TOPS_OCD_DDR);
-
- core_exec_instr(0x408040);
-
- core_exec_instr(0xf1e000);
-
- return 0;
-}
-
-static int __mtk_trm_mcu_core_dump(enum core_id core)
-{
- u32 rty = 0;
- int ret;
-
- ocd.base_offset = (core == CORE_MGMT) ? (0x0) : (0x5000 + (core * 0x4000));
-
- /* enable OCD */
- ocd_set(&ocd, TOPS_OCD_DCRSET, ENABLE_OCD);
-
- /* assert debug interrupt to core */
- ocd_set(&ocd, TOPS_OCD_DCRSET, DEBUG_INT);
-
- /* wait core into stopped state */
- while (!(ocd_read(&ocd, TOPS_OCD_DSR) & STOPPED)) {
- if (rty++ < TOPS_OCD_RETRY_TIMES) {
- usleep_range(10000, 15000);
- } else {
- TRM_ERR("wait core(%d) into stopped state timeout\n", core);
- ret = -1;
- goto out;
- }
- }
-
- /* deassert debug interrupt to core */
- ocd_set(&ocd, TOPS_OCD_DSR, DEBUG_PEND_HOST);
-
- /* dump core's registers and let core into running state */
- ret = core_dump(&cd_frams[core]);
-
-out:
- return ret;
-}
-
-int mtk_trm_mcu_core_dump(void)
-{
- enum core_id core;
- int ret;
-
- ret = clk_prepare_enable(ocd.debugsys_clk);
- if (ret) {
- TRM_ERR("debugsys clk enable failed: %d\n", ret);
- goto out;
- }
-
- memset(cd_frams, 0, sizeof(cd_frams));
-
- for (core = CORE_OFFLOAD_0; core <= CORE_MGMT; core++) {
- ret = __mtk_trm_mcu_core_dump(core);
- if (ret)
- break;
- }
-
- clk_disable_unprepare(ocd.debugsys_clk);
-
-out:
- return ret;
-}
-
-static int mtk_tops_ocd_probe(struct platform_device *pdev)
-{
- struct resource *res = NULL;
- int ret;
-
- trm_dev = &pdev->dev;
-
- ocd.debugsys_clk = devm_clk_get(trm_dev, "debugsys");
- if (IS_ERR(ocd.debugsys_clk)) {
- TRM_ERR("get debugsys clk failed: %ld\n", PTR_ERR(ocd.debugsys_clk));
- return PTR_ERR(ocd.debugsys_clk);
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-ocd-base");
- if (!res)
- return -ENXIO;
-
- ocd.base = devm_ioremap(trm_dev, res->start, resource_size(res));
- if (!ocd.base)
- return -ENOMEM;
-
- ret = mtk_trm_debugfs_init();
- if (ret)
- return ret;
-
- ret = mtk_trm_fs_init();
- if (ret)
- return ret;
-
- TRM_INFO("tops-ocd init done\n");
-
- return 0;
-}
-
-static int mtk_tops_ocd_remove(struct platform_device *pdev)
-{
- mtk_trm_fs_deinit();
-
- mtk_trm_debugfs_deinit();
-
- return 0;
-}
-
-static struct of_device_id mtk_tops_ocd_match[] = {
- { .compatible = "mediatek,tops-ocd", },
- { },
-};
-
-static struct platform_driver mtk_tops_ocd_driver = {
- .probe = mtk_tops_ocd_probe,
- .remove = mtk_tops_ocd_remove,
- .driver = {
- .name = "mediatek,tops-ocd",
- .owner = THIS_MODULE,
- .of_match_table = mtk_tops_ocd_match,
- },
-};
-
-int __init mtk_tops_trm_mcu_init(void)
-{
- return platform_driver_register(&mtk_tops_ocd_driver);
-}
-
-void __exit mtk_tops_trm_mcu_exit(void)
-{
- platform_driver_unregister(&mtk_tops_ocd_driver);
-}
diff --git a/feed/kernel/tops/src/trm.c b/feed/kernel/tops/src/trm.c
deleted file mode 100644
index 0b61f87..0000000
--- a/feed/kernel/tops/src/trm.c
+++ /dev/null
@@ -1,409 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>
- * Ren-Ting Wang <ren-ting.wang@mediatek.com>
- */
-
-#include <linux/debugfs.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/printk.h>
-#include <linux/relay.h>
-#include <linux/types.h>
-
-#include "tops/internal.h"
-#include "tops/mbox.h"
-#include "tops/mcu.h"
-#include "tops/netsys.h"
-#include "tops/trm-fs.h"
-#include "tops/trm-mcu.h"
-#include "tops/trm.h"
-
-#define TRM_HDR_LEN (sizeof(struct trm_header))
-
-#define RLY_DUMP_SUBBUF_DATA_MAX (RLY_DUMP_SUBBUF_SZ - TRM_HDR_LEN)
-
-struct tops_runtime_monitor {
- struct mailbox_dev mgmt_send_mdev;
- struct mailbox_dev offload_send_mdev[CORE_OFFLOAD_NUM];
-};
-
-struct trm_info {
- char name[TRM_CONFIG_NAME_MAX_LEN];
- u64 dump_time;
- u32 start_addr;
- u32 size;
- u32 rsn; /* TRM_RSN_* */
-};
-
-struct trm_header {
- struct trm_info info;
- u32 data_offset;
- u32 data_len;
- u8 last_frag;
-};
-
-struct device *trm_dev;
-
-static struct tops_runtime_monitor trm = {
- .mgmt_send_mdev = MBOX_SEND_MGMT_DEV(TRM),
- .offload_send_mdev = {
- [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, TRM),
- [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, TRM),
- [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, TRM),
- [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, TRM),
- },
-};
-static struct trm_hw_config *trm_hw_configs[__TRM_HARDWARE_MAX];
-struct mutex trm_lock;
-
-static inline void trm_hdr_init(struct trm_header *trm_hdr,
- struct trm_config *trm_cfg,
- u32 size,
- u64 dump_time,
- u32 dump_rsn)
-{
- if (unlikely(!trm_hdr || !trm_cfg))
- return;
-
- memset(trm_hdr, 0, TRM_HDR_LEN);
-
- strncpy(trm_hdr->info.name, trm_cfg->name, TRM_CONFIG_NAME_MAX_LEN);
- trm_hdr->info.name[TRM_CONFIG_NAME_MAX_LEN - 1] = '\0';
- trm_hdr->info.start_addr = trm_cfg->addr + trm_cfg->offset;
- trm_hdr->info.size = size;
- trm_hdr->info.dump_time = dump_time;
- trm_hdr->info.rsn = dump_rsn;
-}
-
-static inline int trm_cfg_sanity_check(struct trm_config *trm_cfg)
-{
- u32 start = trm_cfg->addr + trm_cfg->offset;
- u32 end = start + trm_cfg->size;
-
- if (start < trm_cfg->addr || end > trm_cfg->addr + trm_cfg->len)
- return -1;
-
- return 0;
-}
-
-static inline bool trm_cfg_is_core_dump_en(struct trm_config *trm_cfg)
-{
- return trm_cfg->flag & TRM_CONFIG_F_CORE_DUMP;
-}
-
-static inline bool trm_cfg_is_en(struct trm_config *trm_cfg)
-{
- return trm_cfg->flag & TRM_CONFIG_F_ENABLE;
-}
-
-static inline int __mtk_trm_cfg_setup(struct trm_config *trm_cfg,
- u32 offset, u32 size, u8 enable)
-{
- struct trm_config tmp = { 0 };
-
- if (!enable) {
- trm_cfg->flag &= ~TRM_CONFIG_F_ENABLE;
- } else {
- tmp.addr = trm_cfg->addr;
- tmp.len = trm_cfg->len;
- tmp.offset = offset;
- tmp.size = size;
-
- if (trm_cfg_sanity_check(&tmp))
- return -EINVAL;
-
- trm_cfg->offset = offset;
- trm_cfg->size = size;
- trm_cfg->flag |= TRM_CONFIG_F_ENABLE;
- }
-
- return 0;
-}
-
-int mtk_trm_cfg_setup(char *name, u32 offset, u32 size, u8 enable)
-{
- struct trm_hw_config *trm_hw_cfg;
- struct trm_config *trm_cfg;
- int ret = 0;
- u32 i, j;
-
- for (i = 0; i < __TRM_HARDWARE_MAX; i++) {
- trm_hw_cfg = trm_hw_configs[i];
- if (unlikely(!trm_hw_cfg || !trm_hw_cfg->trm_cfgs))
- continue;
-
- for (j = 0; j < trm_hw_cfg->cfg_len; j++) {
- trm_cfg = &trm_hw_cfg->trm_cfgs[j];
-
- if (!strncmp(trm_cfg->name, name, strlen(name))) {
- mutex_lock(&trm_lock);
-
- ret = __mtk_trm_cfg_setup(trm_cfg,
- offset,
- size,
- enable);
-
- mutex_unlock(&trm_lock);
- }
- }
- }
-
- return ret;
-}
-
-/* append core dump(via ocd) in bottom of core-x-dtcm file */
-static inline void __mtk_trm_save_core_dump(struct trm_config *trm_cfg,
- void *dst,
- u32 *frag_len)
-{
- *frag_len -= CORE_DUMP_FRAME_LEN;
- memcpy(dst + *frag_len, &cd_frams[trm_cfg->core], CORE_DUMP_FRAME_LEN);
-}
-
-static int __mtk_trm_dump(struct trm_hw_config *trm_hw_cfg,
- struct trm_config *trm_cfg,
- u64 dump_time,
- u32 dump_rsn)
-{
- struct trm_header trm_hdr;
- u32 total = trm_cfg->size;
- u32 i = 0;
- u32 frag_len;
- u32 ofs;
- void *dst;
-
- /* reserve core dump frame len if core dump enabled */
- if (trm_cfg_is_core_dump_en(trm_cfg))
- total += CORE_DUMP_FRAME_LEN;
-
- /* fill in trm inforamtion */
- trm_hdr_init(&trm_hdr, trm_cfg, total, dump_time, dump_rsn);
-
- while (total > 0) {
- if (total >= RLY_DUMP_SUBBUF_DATA_MAX) {
- frag_len = RLY_DUMP_SUBBUF_DATA_MAX;
- total -= RLY_DUMP_SUBBUF_DATA_MAX;
- } else {
- frag_len = total;
- total = 0;
- trm_hdr.last_frag = true;
- }
-
- trm_hdr.data_offset = i++ * RLY_DUMP_SUBBUF_DATA_MAX;
- trm_hdr.data_len = frag_len;
-
- dst = mtk_trm_fs_relay_reserve(frag_len + TRM_HDR_LEN);
- if (IS_ERR(dst))
- return PTR_ERR(dst);
-
- memcpy(dst, &trm_hdr, TRM_HDR_LEN);
- dst += TRM_HDR_LEN;
-
- /* TODO: what if core dump is being cut between 2 fragment? */
- if (trm_hdr.last_frag && trm_cfg_is_core_dump_en(trm_cfg))
- __mtk_trm_save_core_dump(trm_cfg, dst, &frag_len);
-
- ofs = trm_hdr.info.start_addr + trm_hdr.data_offset;
-
- /* let TRM HW write memory to destination */
- trm_hw_cfg->trm_hw_dump(dst, ofs, frag_len);
-
- mtk_trm_fs_relay_flush();
- }
-
- return 0;
-}
-
-static void trm_cpu_utilization_ret_handler(void *priv,
- struct mailbox_msg *msg)
-{
- u32 *cpu_utilization = priv;
-
- /*
- * msg1: ticks of idle task
- * msg2: ticks of this statistic period
- */
- if (msg->msg2 != 0)
- *cpu_utilization = (msg->msg2 - msg->msg1) * 100U / msg->msg2;
-}
-
-int mtk_trm_cpu_utilization(enum core_id core, u32 *cpu_utilization)
-{
- struct mailbox_dev *send_mdev;
- struct mailbox_msg msg;
- int ret;
-
- if (core > CORE_MGMT || !cpu_utilization)
- return -EINVAL;
-
- if (!mtk_tops_mcu_alive()) {
- TRM_ERR("mcu not alive\n");
- return -EAGAIN;
- }
-
- memset(&msg, 0, sizeof(struct mailbox_msg));
- msg.msg1 = TRM_CMD_TYPE_CPU_UTILIZATION;
-
- *cpu_utilization = 0;
-
- if (core == CORE_MGMT)
- send_mdev = &trm.mgmt_send_mdev;
- else
- send_mdev = &trm.offload_send_mdev[core];
-
- ret = mbox_send_msg(send_mdev,
- &msg,
- cpu_utilization,
- trm_cpu_utilization_ret_handler);
- if (ret) {
- TRM_ERR("send CPU_UTILIZATION cmd failed(%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-int mtk_trm_dump(u32 rsn)
-{
- u64 time = ktime_to_ns(ktime_get_real()) / 1000000000;
- struct trm_hw_config *trm_hw_cfg;
- struct trm_config *trm_cfg;
- int ret = 0;
- u32 i, j;
-
- if (!mtk_trm_fs_is_init())
- return -EINVAL;
-
- mutex_lock(&trm_lock);
-
- mtk_trm_mcu_core_dump();
-
- for (i = 0; i < __TRM_HARDWARE_MAX; i++) {
- trm_hw_cfg = trm_hw_configs[i];
- if (unlikely(!trm_hw_cfg || !trm_hw_cfg->trm_hw_dump))
- continue;
-
- for (j = 0; j < trm_hw_cfg->cfg_len; j++) {
- trm_cfg = &trm_hw_cfg->trm_cfgs[j];
- if (unlikely(!trm_cfg || !trm_cfg_is_en(trm_cfg)))
- continue;
-
- if (unlikely(trm_cfg_sanity_check(trm_cfg))) {
- TRM_ERR("trm %s: sanity check fail\n", trm_cfg->name);
- ret = -EINVAL;
- goto out;
- }
-
- ret = __mtk_trm_dump(trm_hw_cfg, trm_cfg, time, rsn);
- if (ret) {
- TRM_ERR("trm %s: trm dump fail: %d\n",
- trm_cfg->name, ret);
- goto out;
- }
- }
- }
-
- TRM_NOTICE("TOPS runtime monitor dump\n");
-
-out:
- mutex_unlock(&trm_lock);
-
- return ret;
-}
-
-static int mtk_tops_trm_register_mbox(void)
-{
- int ret;
- int i;
-
- ret = register_mbox_dev(MBOX_SEND, &trm.mgmt_send_mdev);
- if (ret) {
- TRM_ERR("register trm mgmt mbox send failed: %d\n", ret);
- return ret;
- }
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- ret = register_mbox_dev(MBOX_SEND, &trm.offload_send_mdev[i]);
- if (ret) {
- TRM_ERR("register trm offload %d mbox send failed: %d\n",
- i, ret);
- goto err_unregister_offload_mbox;
- }
- }
-
- return ret;
-
-err_unregister_offload_mbox:
- for (i -= 1; i >= 0; i--)
- unregister_mbox_dev(MBOX_SEND, &trm.offload_send_mdev[i]);
-
- unregister_mbox_dev(MBOX_SEND, &trm.mgmt_send_mdev);
-
- return ret;
-}
-
-static void mtk_tops_trm_unregister_mbox(void)
-{
- int i;
-
- unregister_mbox_dev(MBOX_SEND, &trm.mgmt_send_mdev);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++)
- unregister_mbox_dev(MBOX_SEND, &trm.offload_send_mdev[i]);
-}
-
-int __init mtk_tops_trm_init(void)
-{
- int ret;
-
- mutex_init(&trm_lock);
-
- ret = mtk_tops_trm_register_mbox();
- if (ret)
- return ret;
-
- return mtk_tops_trm_mcu_init();
-}
-
-void __exit mtk_tops_trm_exit(void)
-{
- mtk_tops_trm_unregister_mbox();
-
- mtk_tops_trm_mcu_exit();
-}
-
-int mtk_trm_hw_config_register(enum trm_hardware trm_hw,
- struct trm_hw_config *trm_hw_cfg)
-{
- if (unlikely(trm_hw >= __TRM_HARDWARE_MAX || !trm_hw_cfg))
- return -ENODEV;
-
- if (unlikely(!trm_hw_cfg->cfg_len || !trm_hw_cfg->trm_hw_dump))
- return -EINVAL;
-
- if (trm_hw_configs[trm_hw])
- return -EBUSY;
-
- trm_hw_configs[trm_hw] = trm_hw_cfg;
-
- return 0;
-}
-
-void mtk_trm_hw_config_unregister(enum trm_hardware trm_hw,
- struct trm_hw_config *trm_hw_cfg)
-{
- if (unlikely(trm_hw >= __TRM_HARDWARE_MAX || !trm_hw_cfg))
- return;
-
- if (trm_hw_configs[trm_hw] != trm_hw_cfg)
- return;
-
- trm_hw_configs[trm_hw] = NULL;
-}
diff --git a/feed/kernel/tops/src/wdt.c b/feed/kernel/tops/src/wdt.c
deleted file mode 100644
index b632d4e..0000000
--- a/feed/kernel/tops/src/wdt.c
+++ /dev/null
@@ -1,207 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
- *
- * Author: Alvin Kuo <alvin.kuog@mediatek.com>,
- */
-
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include "tops/internal.h"
-#include "tops/mbox.h"
-#include "tops/ser.h"
-#include "tops/trm.h"
-#include "tops/wdt.h"
-
-#define WDT_IRQ_STATUS 0x0140B0
-#define TOP_WDT_MODE 0x012000
-#define CLUST_WDT_MODE(x) (0x512000 + 0x100 * (x))
-
-#define WDT_MODE_KEY 0x22000000
-
-struct watchdog_hw {
- void __iomem *base;
- struct mailbox_dev mgmt_mdev;
- struct mailbox_dev offload_mdev[CORE_OFFLOAD_NUM];
-};
-
-static struct watchdog_hw wdt = {
- .mgmt_mdev = MBOX_SEND_MGMT_DEV(WDT),
- .offload_mdev = {
- [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, WDT),
- [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, WDT),
- [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, WDT),
- [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, WDT),
- },
-};
-
-static inline void wdt_write(u32 reg, u32 val)
-{
- writel(val, wdt.base + reg);
-}
-
-static inline void wdt_set(u32 reg, u32 mask)
-{
- setbits(wdt.base + reg, mask);
-}
-
-static inline void wdt_clr(u32 reg, u32 mask)
-{
- clrbits(wdt.base + reg, mask);
-}
-
-static inline void wdt_rmw(u32 reg, u32 mask, u32 val)
-{
- clrsetbits(wdt.base + reg, mask, val);
-}
-
-static inline u32 wdt_read(u32 reg)
-{
- return readl(wdt.base + reg);
-}
-
-static inline void wdt_irq_clr(u32 wdt_mode_reg)
-{
- wdt_set(wdt_mode_reg, WDT_MODE_KEY);
-}
-
-static void wdt_ser_callback(struct tops_ser_params *ser_params)
-{
- if (ser_params->type != TOPS_SER_WDT_TO)
- WARN_ON(1);
-
- mtk_trm_dump(ser_params->data.wdt.timeout_cores);
-}
-
-static void wdt_ser_mcmd_setup(struct tops_ser_params *ser_params,
- struct mcu_ctrl_cmd *mcmd)
-{
- mcmd->core_mask = (~ser_params->data.wdt.timeout_cores) & CORE_TOPS_MASK;
-}
-
-static irqreturn_t wdt_irq_handler(int irq, void *dev_id)
-{
- struct tops_ser_params ser_params = {
- .type = TOPS_SER_WDT_TO,
- .ser_callback = wdt_ser_callback,
- .ser_mcmd_setup = wdt_ser_mcmd_setup,
- };
- u32 status;
- u32 i;
-
- status = wdt_read(WDT_IRQ_STATUS);
- if (status) {
- ser_params.data.wdt.timeout_cores = status;
- mtk_tops_ser(&ser_params);
-
- /* clear wdt irq */
- if (status & BIT(CORE_MGMT))
- wdt_irq_clr(TOP_WDT_MODE);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++)
- if (status & BIT(i))
- wdt_irq_clr(CLUST_WDT_MODE(i));
- }
- TOPS_ERR("WDT Timeout: 0x%x\n", status);
-
- return IRQ_HANDLED;
-}
-
-int mtk_tops_wdt_trigger_timeout(enum core_id core)
-{
- struct mailbox_msg msg = {
- .msg1 = WDT_CMD_TRIGGER_TIMEOUT,
- };
-
- if (core == CORE_MGMT)
- mbox_send_msg_no_wait(&wdt.mgmt_mdev, &msg);
- else
- mbox_send_msg_no_wait(&wdt.offload_mdev[core], &msg);
-
- return 0;
-}
-
-static int mtk_tops_wdt_register_mbox(void)
-{
- int ret;
- int i;
-
- ret = register_mbox_dev(MBOX_SEND, &wdt.mgmt_mdev);
- if (ret) {
- TOPS_ERR("register wdt mgmt mbox send failed: %d\n", ret);
- return ret;
- }
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
- ret = register_mbox_dev(MBOX_SEND, &wdt.offload_mdev[i]);
- if (ret) {
- TOPS_ERR("register wdt offload %d mbox send failed: %d\n",
- i, ret);
- goto err_unregister_offload_mbox;
- }
- }
-
- return ret;
-
-err_unregister_offload_mbox:
- for (i -= 1; i >= 0; i--)
- unregister_mbox_dev(MBOX_SEND, &wdt.offload_mdev[i]);
-
- unregister_mbox_dev(MBOX_SEND, &wdt.mgmt_mdev);
-
- return ret;
-}
-
-static void mtk_tops_wdt_unregister_mbox(void)
-{
- int i;
-
- unregister_mbox_dev(MBOX_SEND, &wdt.mgmt_mdev);
-
- for (i = 0; i < CORE_OFFLOAD_NUM; i++)
- unregister_mbox_dev(MBOX_SEND, &wdt.offload_mdev[i]);
-}
-
-int mtk_tops_wdt_init(struct platform_device *pdev)
-{
- struct resource *res = NULL;
- int ret;
- int irq;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
- if (!res)
- return -ENXIO;
-
- wdt.base = devm_ioremap(tops_dev, res->start, resource_size(res));
- if (!wdt.base)
- return -ENOMEM;
-
- irq = platform_get_irq_byname(pdev, "wdt");
- if (irq < 0) {
- TOPS_ERR("get wdt irq failed\n");
- return irq;
- }
-
- ret = devm_request_irq(tops_dev, irq,
- wdt_irq_handler,
- IRQF_ONESHOT,
- pdev->name, NULL);
- if (ret) {
- TOPS_ERR("request wdt irq failed\n");
- return ret;
- }
-
- ret = mtk_tops_wdt_register_mbox();
- if (ret)
- return ret;
-
- return ret;
-}
-
-int mtk_tops_wdt_deinit(struct platform_device *pdev)
-{
- mtk_tops_wdt_unregister_mbox();
-
- return 0;
-}
diff --git a/feed/kernel/tops/tops.mk b/feed/kernel/tops/tops.mk
deleted file mode 100644
index e8ccd39..0000000
--- a/feed/kernel/tops/tops.mk
+++ /dev/null
@@ -1,28 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-or-later
-#
-# Copyright (C) 2023 Mediatek Inc. All Rights Reserved.
-# Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
-#
-
-EXTRA_KCONFIG+= \
- CONFIG_MTK_TOPS_SUPPORT=m \
- CONFIG_MTK_TOPS_GRE=$(CONFIG_MTK_TOPS_GRE) \
- CONFIG_MTK_TOPS_GRETAP=$(CONFIG_MTK_TOPS_GRETAP) \
- CONFIG_MTK_TOPS_L2TP=$(CONFIG_MTK_TOPS_L2TP) \
- CONFIG_MTK_TOPS_L2TP_V2=$(CONFIG_MTK_TOPS_L2TP_V2) \
- CONFIG_MTK_TOPS_PPTP=$(CONFIG_MTK_TOPS_PPTP) \
- CONFIG_MTK_TOPS_SECURE_FW=$(CONFIG_MTK_TOPS_SECURE_FW)
-
-EXTRA_CFLAGS+= \
- $(patsubst CONFIG_%, -DCONFIG_%=1, $(patsubst %=m,%,$(filter %=m,$(EXTRA_KCONFIG)))) \
- $(patsubst CONFIG_%, -DCONFIG_%=1, $(patsubst %=y,%,$(filter %=y,$(EXTRA_KCONFIG))))
-
-EXTRA_CFLAGS+= \
- -I$(LINUX_DIR)/drivers/net/ethernet/mediatek/ \
- -I$(LINUX_DIR)/drivers/dma/ \
- -I$(LINUX_DIR)/net/l2tp/ \
- -I$(KERNEL_BUILD_DIR)/pce/inc/ \
- -DCONFIG_TOPS_TNL_NUM=$(CONFIG_TOPS_TNL_NUM) \
- -DCONFIG_TOPS_TNL_MAP_BIT=$(CONFIG_TOPS_TNL_MAP_BIT) \
- -DCONFIG_TOPS_TNL_TYPE_NUM=$(CONFIG_TOPS_TNL_TYPE_NUM) \
- -Wall -Werror