blob: 9ccf9cedcd140cd4cdf8f108d1beac6e910e0e61 [file] [log] [blame]
developeree39bcf2023-06-16 08:03:30 +08001From 6ad9bd65769003ab526e504577e0f747eba14287 Mon Sep 17 00:00:00 2001
2From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Wed, 22 Jun 2022 09:42:19 +0800
4Subject: [PATCH 1/8]
5 9990-mt7622-backport-nf-hw-offload-framework-and-upstream-hnat-plus-xt-FLOWOFFLOAD-update-v2
developer8cb3ac72022-07-04 10:55:14 +08006
7---
8 drivers/net/ethernet/mediatek/Makefile | 3 +-
developeree39bcf2023-06-16 08:03:30 +08009 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 28 +-
10 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 20 +-
11 drivers/net/ethernet/mediatek/mtk_ppe.c | 509 +++++++
12 drivers/net/ethernet/mediatek/mtk_ppe.h | 288 ++++
13 .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 214 +++
14 .../net/ethernet/mediatek/mtk_ppe_offload.c | 526 ++++++++
15 drivers/net/ethernet/mediatek/mtk_ppe_regs.h | 144 ++
developer8cb3ac72022-07-04 10:55:14 +080016 drivers/net/ppp/ppp_generic.c | 22 +
17 drivers/net/ppp/pppoe.c | 24 +
developeree39bcf2023-06-16 08:03:30 +080018 include/linux/netdevice.h | 60 +
developer8cb3ac72022-07-04 10:55:14 +080019 include/linux/ppp_channel.h | 3 +
20 include/net/dsa.h | 10 +
21 include/net/flow_offload.h | 4 +
22 include/net/ip6_route.h | 5 +-
23 .../net/netfilter/ipv6/nf_conntrack_ipv6.h | 3 -
24 include/net/netfilter/nf_conntrack.h | 12 +
25 include/net/netfilter/nf_conntrack_acct.h | 11 +
developeree39bcf2023-06-16 08:03:30 +080026 include/net/netfilter/nf_flow_table.h | 264 +++-
developer8cb3ac72022-07-04 10:55:14 +080027 include/net/netns/conntrack.h | 6 +
28 .../linux/netfilter/nf_conntrack_common.h | 9 +-
29 include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h | 17 +
30 net/8021q/vlan_dev.c | 21 +
31 net/bridge/br_device.c | 49 +
32 net/bridge/br_private.h | 20 +
33 net/bridge/br_vlan.c | 55 +
34 net/core/dev.c | 46 +
35 net/dsa/dsa.c | 9 +
developeree39bcf2023-06-16 08:03:30 +080036 net/dsa/slave.c | 41 +-
developer8cb3ac72022-07-04 10:55:14 +080037 net/ipv4/netfilter/Kconfig | 4 +-
38 net/ipv6/ip6_output.c | 2 +-
39 net/ipv6/netfilter/Kconfig | 3 +-
40 net/ipv6/route.c | 22 +-
41 net/netfilter/Kconfig | 14 +-
42 net/netfilter/Makefile | 4 +-
43 net/netfilter/nf_conntrack_core.c | 20 +-
44 net/netfilter/nf_conntrack_proto_tcp.c | 4 +
45 net/netfilter/nf_conntrack_proto_udp.c | 4 +
46 net/netfilter/nf_conntrack_standalone.c | 34 +-
developeree39bcf2023-06-16 08:03:30 +080047 net/netfilter/nf_flow_table_core.c | 446 +++---
48 net/netfilter/nf_flow_table_ip.c | 455 ++++---
49 net/netfilter/nf_flow_table_offload.c | 1191 +++++++++++++++++
50 net/netfilter/xt_FLOWOFFLOAD.c | 719 ++++++++++
51 43 files changed, 4913 insertions(+), 432 deletions(-)
developer8cb3ac72022-07-04 10:55:14 +080052 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
53 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.h
54 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
55 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_offload.c
56 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_regs.h
57 create mode 100644 include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h
58 create mode 100644 net/netfilter/nf_flow_table_offload.c
59 create mode 100644 net/netfilter/xt_FLOWOFFLOAD.c
60
61diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
developeree39bcf2023-06-16 08:03:30 +080062index 13c5b4e8f..0a6af99f1 100755
developer8cb3ac72022-07-04 10:55:14 +080063--- a/drivers/net/ethernet/mediatek/Makefile
64+++ b/drivers/net/ethernet/mediatek/Makefile
developeree39bcf2023-06-16 08:03:30 +080065@@ -4,5 +4,6 @@
developer8cb3ac72022-07-04 10:55:14 +080066 #
67
68 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
developer68838542022-10-03 23:42:21 +080069-mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_usxgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o
70+mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_usxgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
developer8cb3ac72022-07-04 10:55:14 +080071+ mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
72 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
73diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developeree39bcf2023-06-16 08:03:30 +080074index 2b21f7ed0..819d8a0be 100755
developer8cb3ac72022-07-04 10:55:14 +080075--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
76+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developeree39bcf2023-06-16 08:03:30 +080077@@ -3081,6 +3081,7 @@ static int mtk_open(struct net_device *d
developerdca0fde2022-12-14 11:40:35 +080078 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
79 int err, i;
80 struct device_node *phy_node;
developeree39bcf2023-06-16 08:03:30 +080081+ u32 gdm_config = MTK_GDMA_TO_PDMA;
developer8cb3ac72022-07-04 10:55:14 +080082
developeree39bcf2023-06-16 08:03:30 +080083 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
84 if (err) {
85@@ -3157,7 +3158,10 @@ static int mtk_open(struct net_device *d
86 if (!phy_node && eth->xgmii->regmap_sgmii[mac->id])
87 regmap_write(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer8cb3ac72022-07-04 10:55:14 +080088
developerdca0fde2022-12-14 11:40:35 +080089- mtk_gdm_config(eth, mac->id, MTK_GDMA_TO_PDMA);
developeree39bcf2023-06-16 08:03:30 +080090+ if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
91+ gdm_config = MTK_GDMA_TO_PPE;
developer8cb3ac72022-07-04 10:55:14 +080092+
developerdca0fde2022-12-14 11:40:35 +080093+ mtk_gdm_config(eth, mac->id, gdm_config);
developer8cb3ac72022-07-04 10:55:14 +080094
developerdca0fde2022-12-14 11:40:35 +080095 return 0;
96 }
developeree39bcf2023-06-16 08:03:30 +080097@@ -3238,6 +3242,9 @@ static int mtk_stop(struct net_device *d
developer8cb3ac72022-07-04 10:55:14 +080098
99 mtk_dma_free(eth);
100
developeree39bcf2023-06-16 08:03:30 +0800101+ if (eth->soc->offload_version)
102+ mtk_ppe_stop(&eth->ppe);
developer8cb3ac72022-07-04 10:55:14 +0800103+
104 return 0;
105 }
106
developeree39bcf2023-06-16 08:03:30 +0800107@@ -3915,6 +3922,7 @@ static const struct net_device_ops mtk_n
developer8cb3ac72022-07-04 10:55:14 +0800108 #ifdef CONFIG_NET_POLL_CONTROLLER
109 .ndo_poll_controller = mtk_poll_controller,
110 #endif
111+ .ndo_setup_tc = mtk_eth_setup_tc,
112 };
113
114 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
developeree39bcf2023-06-16 08:03:30 +0800115@@ -4308,6 +4316,17 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800116 goto err_free_dev;
117 }
118
119+ if (eth->soc->offload_version) {
developeree39bcf2023-06-16 08:03:30 +0800120+ err = mtk_ppe_init(&eth->ppe, eth->dev,
121+ eth->base + MTK_ETH_PPE_BASE, 2);
122+ if (err)
123+ goto err_free_dev;
developer8cb3ac72022-07-04 10:55:14 +0800124+
125+ err = mtk_eth_offload_init(eth);
126+ if (err)
127+ goto err_free_dev;
128+ }
129+
130 for (i = 0; i < MTK_MAX_DEVS; i++) {
131 if (!eth->netdev[i])
132 continue;
developeree39bcf2023-06-16 08:03:30 +0800133@@ -4410,6 +4429,7 @@ static const struct mtk_soc_data mt2701_
developer8cb3ac72022-07-04 10:55:14 +0800134 .required_clks = MT7623_CLKS_BITMAP,
135 .required_pctl = true,
136 .has_sram = false,
developeree39bcf2023-06-16 08:03:30 +0800137+ .offload_version = 2,
138 .rss_num = 0,
developerdca0fde2022-12-14 11:40:35 +0800139 .txrx = {
140 .txd_size = sizeof(struct mtk_tx_dma),
developeree39bcf2023-06-16 08:03:30 +0800141@@ -4424,6 +4444,7 @@ static const struct mtk_soc_data mt7621_
developer8cb3ac72022-07-04 10:55:14 +0800142 .required_clks = MT7621_CLKS_BITMAP,
143 .required_pctl = false,
144 .has_sram = false,
developeree39bcf2023-06-16 08:03:30 +0800145+ .offload_version = 2,
146 .rss_num = 0,
developerdca0fde2022-12-14 11:40:35 +0800147 .txrx = {
148 .txd_size = sizeof(struct mtk_tx_dma),
developeree39bcf2023-06-16 08:03:30 +0800149@@ -4439,6 +4460,7 @@ static const struct mtk_soc_data mt7622_
developer8cb3ac72022-07-04 10:55:14 +0800150 .required_clks = MT7622_CLKS_BITMAP,
151 .required_pctl = false,
152 .has_sram = false,
153+ .offload_version = 2,
developeree39bcf2023-06-16 08:03:30 +0800154 .rss_num = 0,
developerdca0fde2022-12-14 11:40:35 +0800155 .txrx = {
156 .txd_size = sizeof(struct mtk_tx_dma),
developeree39bcf2023-06-16 08:03:30 +0800157@@ -4453,6 +4475,7 @@ static const struct mtk_soc_data mt7623_
developer8cb3ac72022-07-04 10:55:14 +0800158 .required_clks = MT7623_CLKS_BITMAP,
159 .required_pctl = true,
160 .has_sram = false,
developer7eb15dc2023-06-14 17:44:03 +0800161+ .offload_version = 2,
developeree39bcf2023-06-16 08:03:30 +0800162 .rss_num = 0,
developer7eb15dc2023-06-14 17:44:03 +0800163 .txrx = {
164 .txd_size = sizeof(struct mtk_tx_dma),
developer8cb3ac72022-07-04 10:55:14 +0800165diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developeree39bcf2023-06-16 08:03:30 +0800166index b6380ffeb..349f98503 100755
developer8cb3ac72022-07-04 10:55:14 +0800167--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
168+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
169@@ -15,6 +15,8 @@
170 #include <linux/u64_stats_sync.h>
171 #include <linux/refcount.h>
172 #include <linux/phylink.h>
173+#include <linux/rhashtable.h>
174+#include "mtk_ppe.h"
175
176 #define MTK_QDMA_PAGE_SIZE 2048
177 #define MTK_MAX_RX_LENGTH 1536
developeree39bcf2023-06-16 08:03:30 +0800178@@ -37,7 +39,8 @@
developer8cb3ac72022-07-04 10:55:14 +0800179 NETIF_F_HW_VLAN_CTAG_TX | \
180 NETIF_F_SG | NETIF_F_TSO | \
181 NETIF_F_TSO6 | \
182- NETIF_F_IPV6_CSUM)
183+ NETIF_F_IPV6_CSUM |\
184+ NETIF_F_HW_TC)
185 #define MTK_SET_FEATURES (NETIF_F_LRO | \
186 NETIF_F_HW_VLAN_CTAG_RX)
187 #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
developeree39bcf2023-06-16 08:03:30 +0800188@@ -107,6 +110,7 @@
189 #define MTK_GDMA_TCS_EN BIT(21)
developer8cb3ac72022-07-04 10:55:14 +0800190 #define MTK_GDMA_UCS_EN BIT(20)
191 #define MTK_GDMA_TO_PDMA 0x0
192+#define MTK_GDMA_TO_PPE 0x4444
193 #define MTK_GDMA_DROP_ALL 0x7777
194
developeree39bcf2023-06-16 08:03:30 +0800195 /* Unicast Filter MAC Address Register - Low */
196@@ -547,6 +551,12 @@
developer8cb3ac72022-07-04 10:55:14 +0800197 #define RX_DMA_TCI(_x) ((_x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
198 #define RX_DMA_VPID(_x) (((_x) >> 16) & 0xffff)
199
200+/* QDMA descriptor rxd4 */
201+#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
202+#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14)
203+#define MTK_RXD4_SRC_PORT GENMASK(21, 19)
204+#define MTK_RXD4_ALG GENMASK(31, 22)
205+
206 /* QDMA descriptor rxd4 */
207 #define RX_DMA_L4_VALID BIT(24)
208 #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
developeree39bcf2023-06-16 08:03:30 +0800209@@ -1158,6 +1168,7 @@ struct mtk_soc_data {
210 u32 caps;
211 u32 required_clks;
developer8cb3ac72022-07-04 10:55:14 +0800212 bool required_pctl;
213+ u8 offload_version;
214 netdev_features_t hw_features;
215 bool has_sram;
developeree39bcf2023-06-16 08:03:30 +0800216 };
217@@ -1271,6 +1282,9 @@ struct mtk_eth {
developer8cb3ac72022-07-04 10:55:14 +0800218 int ip_align;
219 spinlock_t syscfg0_lock;
220 struct timer_list mtk_dma_monitor_timer;
221+
developeree39bcf2023-06-16 08:03:30 +0800222+ struct mtk_ppe ppe;
developer8cb3ac72022-07-04 10:55:14 +0800223+ struct rhashtable flow_table;
224 };
225
226 /* struct mtk_mac - the structure that holds the info about the MACs of the
developeree39bcf2023-06-16 08:03:30 +0800227@@ -1319,4 +1333,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
228 void mtk_usxgmii_reset(struct mtk_xgmii *ss, int mac_id);
developer1fb19c92023-03-07 23:45:23 +0800229 int mtk_dump_usxgmii(struct regmap *pmap, char *name, u32 offset, u32 range);
developer8cb3ac72022-07-04 10:55:14 +0800230
231+int mtk_eth_offload_init(struct mtk_eth *eth);
232+int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
233+ void *type_data);
developer1fb19c92023-03-07 23:45:23 +0800234 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
developer8cb3ac72022-07-04 10:55:14 +0800235diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
236new file mode 100644
developeree39bcf2023-06-16 08:03:30 +0800237index 000000000..66298e223
developer8cb3ac72022-07-04 10:55:14 +0800238--- /dev/null
239+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
developercbbf1b02023-09-06 10:24:04 +0800240@@ -0,0 +1,510 @@
developer8cb3ac72022-07-04 10:55:14 +0800241+// SPDX-License-Identifier: GPL-2.0-only
242+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
243+
244+#include <linux/kernel.h>
245+#include <linux/io.h>
246+#include <linux/iopoll.h>
247+#include <linux/etherdevice.h>
248+#include <linux/platform_device.h>
249+#include "mtk_ppe.h"
250+#include "mtk_ppe_regs.h"
251+
252+static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
253+{
254+ writel(val, ppe->base + reg);
255+}
256+
257+static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
258+{
259+ return readl(ppe->base + reg);
260+}
261+
262+static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
263+{
264+ u32 val;
265+
266+ val = ppe_r32(ppe, reg);
267+ val &= ~mask;
268+ val |= set;
269+ ppe_w32(ppe, reg, val);
270+
271+ return val;
272+}
273+
274+static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
275+{
276+ return ppe_m32(ppe, reg, 0, val);
277+}
278+
279+static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
280+{
281+ return ppe_m32(ppe, reg, val, 0);
282+}
283+
284+static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
285+{
286+ int ret;
287+ u32 val;
288+
289+ ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
290+ !(val & MTK_PPE_GLO_CFG_BUSY),
291+ 20, MTK_PPE_WAIT_TIMEOUT_US);
292+
293+ if (ret)
294+ dev_err(ppe->dev, "PPE table busy");
295+
296+ return ret;
297+}
298+
299+static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
300+{
301+ ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
302+ ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
303+}
304+
305+static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
306+{
307+ mtk_ppe_cache_clear(ppe);
308+
309+ ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
310+ enable * MTK_PPE_CACHE_CTL_EN);
311+}
312+
developeree39bcf2023-06-16 08:03:30 +0800313+static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
developer8cb3ac72022-07-04 10:55:14 +0800314+{
315+ u32 hv1, hv2, hv3;
316+ u32 hash;
317+
developeree39bcf2023-06-16 08:03:30 +0800318+ switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
319+ case MTK_PPE_PKT_TYPE_BRIDGE:
320+ hv1 = e->bridge.src_mac_lo;
321+ hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
322+ hv2 = e->bridge.src_mac_hi >> 16;
323+ hv2 ^= e->bridge.dest_mac_lo;
324+ hv3 = e->bridge.dest_mac_hi;
325+ break;
developer8cb3ac72022-07-04 10:55:14 +0800326+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
327+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
328+ hv1 = e->ipv4.orig.ports;
329+ hv2 = e->ipv4.orig.dest_ip;
330+ hv3 = e->ipv4.orig.src_ip;
331+ break;
332+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
333+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
334+ hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
335+ hv1 ^= e->ipv6.ports;
336+
337+ hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
338+ hv2 ^= e->ipv6.dest_ip[0];
339+
340+ hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
341+ hv3 ^= e->ipv6.src_ip[0];
342+ break;
343+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
344+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
345+ default:
346+ WARN_ON_ONCE(1);
347+ return MTK_PPE_HASH_MASK;
348+ }
349+
350+ hash = (hv1 & hv2) | ((~hv1) & hv3);
351+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
352+ hash ^= hv1 ^ hv2 ^ hv3;
353+ hash ^= hash >> 16;
developeree39bcf2023-06-16 08:03:30 +0800354+ hash <<= 1;
developer8cb3ac72022-07-04 10:55:14 +0800355+ hash &= MTK_PPE_ENTRIES - 1;
356+
357+ return hash;
358+}
359+
360+static inline struct mtk_foe_mac_info *
developeree39bcf2023-06-16 08:03:30 +0800361+mtk_foe_entry_l2(struct mtk_foe_entry *entry)
developer8cb3ac72022-07-04 10:55:14 +0800362+{
developeree39bcf2023-06-16 08:03:30 +0800363+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
developer8cb3ac72022-07-04 10:55:14 +0800364+
365+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
366+ return &entry->ipv6.l2;
367+
368+ return &entry->ipv4.l2;
369+}
370+
371+static inline u32 *
developeree39bcf2023-06-16 08:03:30 +0800372+mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
developer8cb3ac72022-07-04 10:55:14 +0800373+{
developeree39bcf2023-06-16 08:03:30 +0800374+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
developer8cb3ac72022-07-04 10:55:14 +0800375+
376+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
377+ return &entry->ipv6.ib2;
378+
379+ return &entry->ipv4.ib2;
380+}
381+
developeree39bcf2023-06-16 08:03:30 +0800382+int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
383+ u8 pse_port, u8 *src_mac, u8 *dest_mac)
developer8cb3ac72022-07-04 10:55:14 +0800384+{
385+ struct mtk_foe_mac_info *l2;
386+ u32 ports_pad, val;
387+
388+ memset(entry, 0, sizeof(*entry));
389+
developeree39bcf2023-06-16 08:03:30 +0800390+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
391+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
392+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
393+ MTK_FOE_IB1_BIND_TTL |
394+ MTK_FOE_IB1_BIND_CACHE;
395+ entry->ib1 = val;
developer8cb3ac72022-07-04 10:55:14 +0800396+
developeree39bcf2023-06-16 08:03:30 +0800397+ val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
398+ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
399+ FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
developer8cb3ac72022-07-04 10:55:14 +0800400+
401+ if (is_multicast_ether_addr(dest_mac))
developeree39bcf2023-06-16 08:03:30 +0800402+ val |= MTK_FOE_IB2_MULTICAST;
developer8cb3ac72022-07-04 10:55:14 +0800403+
404+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
405+ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
406+ entry->ipv4.orig.ports = ports_pad;
407+ if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
408+ entry->ipv6.ports = ports_pad;
409+
developeree39bcf2023-06-16 08:03:30 +0800410+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
developer8cb3ac72022-07-04 10:55:14 +0800411+ entry->ipv6.ib2 = val;
412+ l2 = &entry->ipv6.l2;
413+ } else {
414+ entry->ipv4.ib2 = val;
415+ l2 = &entry->ipv4.l2;
416+ }
417+
418+ l2->dest_mac_hi = get_unaligned_be32(dest_mac);
419+ l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
420+ l2->src_mac_hi = get_unaligned_be32(src_mac);
421+ l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
422+
423+ if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
424+ l2->etype = ETH_P_IPV6;
425+ else
426+ l2->etype = ETH_P_IP;
427+
428+ return 0;
429+}
430+
developeree39bcf2023-06-16 08:03:30 +0800431+int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
developer8cb3ac72022-07-04 10:55:14 +0800432+{
developeree39bcf2023-06-16 08:03:30 +0800433+ u32 *ib2 = mtk_foe_entry_ib2(entry);
434+ u32 val;
developer8cb3ac72022-07-04 10:55:14 +0800435+
developeree39bcf2023-06-16 08:03:30 +0800436+ val = *ib2;
437+ val &= ~MTK_FOE_IB2_DEST_PORT;
438+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
developer8cb3ac72022-07-04 10:55:14 +0800439+ *ib2 = val;
440+
441+ return 0;
442+}
443+
developeree39bcf2023-06-16 08:03:30 +0800444+int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
developer8cb3ac72022-07-04 10:55:14 +0800445+ __be32 src_addr, __be16 src_port,
446+ __be32 dest_addr, __be16 dest_port)
447+{
developeree39bcf2023-06-16 08:03:30 +0800448+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
developer8cb3ac72022-07-04 10:55:14 +0800449+ struct mtk_ipv4_tuple *t;
450+
451+ switch (type) {
452+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
453+ if (egress) {
454+ t = &entry->ipv4.new;
455+ break;
456+ }
457+ fallthrough;
458+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
459+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
460+ t = &entry->ipv4.orig;
461+ break;
462+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
463+ entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
464+ entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
465+ return 0;
466+ default:
467+ WARN_ON_ONCE(1);
468+ return -EINVAL;
469+ }
470+
471+ t->src_ip = be32_to_cpu(src_addr);
472+ t->dest_ip = be32_to_cpu(dest_addr);
473+
474+ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
475+ return 0;
476+
477+ t->src_port = be16_to_cpu(src_port);
478+ t->dest_port = be16_to_cpu(dest_port);
479+
480+ return 0;
481+}
482+
developeree39bcf2023-06-16 08:03:30 +0800483+int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
developer8cb3ac72022-07-04 10:55:14 +0800484+ __be32 *src_addr, __be16 src_port,
485+ __be32 *dest_addr, __be16 dest_port)
486+{
developeree39bcf2023-06-16 08:03:30 +0800487+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
developer8cb3ac72022-07-04 10:55:14 +0800488+ u32 *src, *dest;
489+ int i;
490+
491+ switch (type) {
492+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
493+ src = entry->dslite.tunnel_src_ip;
494+ dest = entry->dslite.tunnel_dest_ip;
495+ break;
496+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
497+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
498+ entry->ipv6.src_port = be16_to_cpu(src_port);
499+ entry->ipv6.dest_port = be16_to_cpu(dest_port);
500+ fallthrough;
501+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
502+ src = entry->ipv6.src_ip;
503+ dest = entry->ipv6.dest_ip;
504+ break;
505+ default:
506+ WARN_ON_ONCE(1);
507+ return -EINVAL;
508+ }
509+
510+ for (i = 0; i < 4; i++)
511+ src[i] = be32_to_cpu(src_addr[i]);
512+ for (i = 0; i < 4; i++)
513+ dest[i] = be32_to_cpu(dest_addr[i]);
514+
515+ return 0;
516+}
517+
developeree39bcf2023-06-16 08:03:30 +0800518+int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
developer8cb3ac72022-07-04 10:55:14 +0800519+{
developeree39bcf2023-06-16 08:03:30 +0800520+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
developer8cb3ac72022-07-04 10:55:14 +0800521+
522+ l2->etype = BIT(port);
523+
developeree39bcf2023-06-16 08:03:30 +0800524+ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
525+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
developer8cb3ac72022-07-04 10:55:14 +0800526+ else
527+ l2->etype |= BIT(8);
528+
developeree39bcf2023-06-16 08:03:30 +0800529+ entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
developer8cb3ac72022-07-04 10:55:14 +0800530+
531+ return 0;
532+}
533+
developeree39bcf2023-06-16 08:03:30 +0800534+int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
developer8cb3ac72022-07-04 10:55:14 +0800535+{
developeree39bcf2023-06-16 08:03:30 +0800536+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
developer8cb3ac72022-07-04 10:55:14 +0800537+
developeree39bcf2023-06-16 08:03:30 +0800538+ switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
developer8cb3ac72022-07-04 10:55:14 +0800539+ case 0:
developeree39bcf2023-06-16 08:03:30 +0800540+ entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
541+ FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
developer8cb3ac72022-07-04 10:55:14 +0800542+ l2->vlan1 = vid;
543+ return 0;
544+ case 1:
developeree39bcf2023-06-16 08:03:30 +0800545+ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
developer8cb3ac72022-07-04 10:55:14 +0800546+ l2->vlan1 = vid;
547+ l2->etype |= BIT(8);
548+ } else {
549+ l2->vlan2 = vid;
developeree39bcf2023-06-16 08:03:30 +0800550+ entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
developer8cb3ac72022-07-04 10:55:14 +0800551+ }
552+ return 0;
553+ default:
554+ return -ENOSPC;
555+ }
556+}
557+
developeree39bcf2023-06-16 08:03:30 +0800558+int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
developer8cb3ac72022-07-04 10:55:14 +0800559+{
developeree39bcf2023-06-16 08:03:30 +0800560+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
developer8cb3ac72022-07-04 10:55:14 +0800561+
developeree39bcf2023-06-16 08:03:30 +0800562+ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
563+ (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
developer8cb3ac72022-07-04 10:55:14 +0800564+ l2->etype = ETH_P_PPP_SES;
565+
developeree39bcf2023-06-16 08:03:30 +0800566+ entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
developer8cb3ac72022-07-04 10:55:14 +0800567+ l2->pppoe_id = sid;
568+
569+ return 0;
570+}
571+
572+static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
573+{
574+ return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
575+ FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
576+}
577+
developeree39bcf2023-06-16 08:03:30 +0800578+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
579+ u16 timestamp)
developer7eb15dc2023-06-14 17:44:03 +0800580+{
developer8cb3ac72022-07-04 10:55:14 +0800581+ struct mtk_foe_entry *hwe;
developeree39bcf2023-06-16 08:03:30 +0800582+ u32 hash;
developer7eb15dc2023-06-14 17:44:03 +0800583+
developeree39bcf2023-06-16 08:03:30 +0800584+ timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
585+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
586+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
developer7eb15dc2023-06-14 17:44:03 +0800587+
developeree39bcf2023-06-16 08:03:30 +0800588+ hash = mtk_ppe_hash_entry(entry);
589+ hwe = &ppe->foe_table[hash];
590+ if (!mtk_foe_entry_usable(hwe)) {
591+ hwe++;
592+ hash++;
developer7eb15dc2023-06-14 17:44:03 +0800593+
developeree39bcf2023-06-16 08:03:30 +0800594+ if (!mtk_foe_entry_usable(hwe))
595+ return -ENOSPC;
developer7eb15dc2023-06-14 17:44:03 +0800596+ }
597+
developeree39bcf2023-06-16 08:03:30 +0800598+ memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
developer8cb3ac72022-07-04 10:55:14 +0800599+ wmb();
600+ hwe->ib1 = entry->ib1;
601+
602+ dma_wmb();
603+
604+ mtk_ppe_cache_clear(ppe);
developer7eb15dc2023-06-14 17:44:03 +0800605+
developeree39bcf2023-06-16 08:03:30 +0800606+ return hash;
developer7eb15dc2023-06-14 17:44:03 +0800607+}
608+
developeree39bcf2023-06-16 08:03:30 +0800609+int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
610+ int version)
developer7eb15dc2023-06-14 17:44:03 +0800611+{
developeree39bcf2023-06-16 08:03:30 +0800612+ struct mtk_foe_entry *foe;
developer8cb3ac72022-07-04 10:55:14 +0800613+
614+ /* need to allocate a separate device, since it PPE DMA access is
615+ * not coherent.
616+ */
617+ ppe->base = base;
618+ ppe->dev = dev;
developeree39bcf2023-06-16 08:03:30 +0800619+ ppe->version = version;
developer8cb3ac72022-07-04 10:55:14 +0800620+
developeree39bcf2023-06-16 08:03:30 +0800621+ foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
developer8cb3ac72022-07-04 10:55:14 +0800622+ &ppe->foe_phys, GFP_KERNEL);
623+ if (!foe)
developeree39bcf2023-06-16 08:03:30 +0800624+ return -ENOMEM;
developer8cb3ac72022-07-04 10:55:14 +0800625+
626+ ppe->foe_table = foe;
627+
developeree39bcf2023-06-16 08:03:30 +0800628+ mtk_ppe_debugfs_init(ppe);
developer7eb15dc2023-06-14 17:44:03 +0800629+
developeree39bcf2023-06-16 08:03:30 +0800630+ return 0;
developer8cb3ac72022-07-04 10:55:14 +0800631+}
632+
633+static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
634+{
635+ static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
636+ int i, k;
637+
developeree39bcf2023-06-16 08:03:30 +0800638+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
developer8cb3ac72022-07-04 10:55:14 +0800639+
640+ if (!IS_ENABLED(CONFIG_SOC_MT7621))
641+ return;
642+
643+ /* skip all entries that cross the 1024 byte boundary */
developeree39bcf2023-06-16 08:03:30 +0800644+ for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
645+ for (k = 0; k < ARRAY_SIZE(skip); k++)
646+ ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
developer8cb3ac72022-07-04 10:55:14 +0800647+}
648+
developeree39bcf2023-06-16 08:03:30 +0800649+int mtk_ppe_start(struct mtk_ppe *ppe)
developer8cb3ac72022-07-04 10:55:14 +0800650+{
651+ u32 val;
652+
653+ mtk_ppe_init_foe_table(ppe);
654+ ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
655+
656+ val = MTK_PPE_TB_CFG_ENTRY_80B |
657+ MTK_PPE_TB_CFG_AGE_NON_L4 |
658+ MTK_PPE_TB_CFG_AGE_UNBIND |
659+ MTK_PPE_TB_CFG_AGE_TCP |
660+ MTK_PPE_TB_CFG_AGE_UDP |
661+ MTK_PPE_TB_CFG_AGE_TCP_FIN |
662+ FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
663+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
664+ FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
665+ MTK_PPE_KEEPALIVE_DISABLE) |
666+ FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
667+ FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
668+ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
669+ FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
670+ MTK_PPE_ENTRIES_SHIFT);
671+ ppe_w32(ppe, MTK_PPE_TB_CFG, val);
672+
673+ ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
674+ MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
675+
676+ mtk_ppe_cache_enable(ppe, true);
677+
developeree39bcf2023-06-16 08:03:30 +0800678+ val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
679+ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
680+ MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
developer8cb3ac72022-07-04 10:55:14 +0800681+ MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
682+ MTK_PPE_FLOW_CFG_IP6_6RD |
683+ MTK_PPE_FLOW_CFG_IP4_NAT |
684+ MTK_PPE_FLOW_CFG_IP4_NAPT |
685+ MTK_PPE_FLOW_CFG_IP4_DSLITE |
developeree39bcf2023-06-16 08:03:30 +0800686+ MTK_PPE_FLOW_CFG_L2_BRIDGE |
developer8cb3ac72022-07-04 10:55:14 +0800687+ MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
688+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
689+
690+ val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
691+ FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
692+ ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
693+
developeree39bcf2023-06-16 08:03:30 +0800694+ val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 30) |
developer8cb3ac72022-07-04 10:55:14 +0800695+ FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
696+ ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
697+
698+ val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
developeree39bcf2023-06-16 08:03:30 +0800699+ FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 30);
developer8cb3ac72022-07-04 10:55:14 +0800700+ ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
701+
702+ val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
703+ ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
704+
705+ val = MTK_PPE_BIND_LIMIT1_FULL |
706+ FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
707+ ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
708+
709+ val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
710+ FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
711+ ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
712+
713+ /* enable PPE */
714+ val = MTK_PPE_GLO_CFG_EN |
715+ MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
716+ MTK_PPE_GLO_CFG_IP4_CS_DROP |
developercbbf1b02023-09-06 10:24:04 +0800717+ MTK_PPE_GLO_CFG_MCAST_TB_EN |
developer8cb3ac72022-07-04 10:55:14 +0800718+ MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
719+ ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
720+
721+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
722+
developeree39bcf2023-06-16 08:03:30 +0800723+ return 0;
developer8cb3ac72022-07-04 10:55:14 +0800724+}
725+
726+int mtk_ppe_stop(struct mtk_ppe *ppe)
727+{
728+ u32 val;
729+ int i;
730+
developeree39bcf2023-06-16 08:03:30 +0800731+ for (i = 0; i < MTK_PPE_ENTRIES; i++)
732+ ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
733+ MTK_FOE_STATE_INVALID);
developer8cb3ac72022-07-04 10:55:14 +0800734+
735+ mtk_ppe_cache_enable(ppe, false);
736+
737+ /* disable offload engine */
738+ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
739+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
740+
741+ /* disable aging */
742+ val = MTK_PPE_TB_CFG_AGE_NON_L4 |
743+ MTK_PPE_TB_CFG_AGE_UNBIND |
744+ MTK_PPE_TB_CFG_AGE_TCP |
745+ MTK_PPE_TB_CFG_AGE_UDP |
746+ MTK_PPE_TB_CFG_AGE_TCP_FIN;
747+ ppe_clear(ppe, MTK_PPE_TB_CFG, val);
748+
749+ return mtk_ppe_wait_busy(ppe);
750+}
751diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
752new file mode 100644
developeree39bcf2023-06-16 08:03:30 +0800753index 000000000..242fb8f2a
developer8cb3ac72022-07-04 10:55:14 +0800754--- /dev/null
755+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
developeree39bcf2023-06-16 08:03:30 +0800756@@ -0,0 +1,288 @@
developer8cb3ac72022-07-04 10:55:14 +0800757+// SPDX-License-Identifier: GPL-2.0-only
758+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
759+
760+#ifndef __MTK_PPE_H
761+#define __MTK_PPE_H
762+
763+#include <linux/kernel.h>
764+#include <linux/bitfield.h>
developeree39bcf2023-06-16 08:03:30 +0800765+
766+#define MTK_ETH_PPE_BASE 0xc00
developer8cb3ac72022-07-04 10:55:14 +0800767+
768+#define MTK_PPE_ENTRIES_SHIFT 3
769+#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
770+#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
771+#define MTK_PPE_WAIT_TIMEOUT_US 1000000
772+
773+#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
774+#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
775+#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24)
776+
777+#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
778+#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15)
779+#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16)
780+#define MTK_FOE_IB1_BIND_PPPOE BIT(19)
781+#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20)
782+#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21)
783+#define MTK_FOE_IB1_BIND_CACHE BIT(22)
784+#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
785+#define MTK_FOE_IB1_BIND_TTL BIT(24)
786+
787+#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25)
788+#define MTK_FOE_IB1_STATE GENMASK(29, 28)
789+#define MTK_FOE_IB1_UDP BIT(30)
790+#define MTK_FOE_IB1_STATIC BIT(31)
791+
792+enum {
793+ MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
794+ MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
795+ MTK_PPE_PKT_TYPE_BRIDGE = 2,
796+ MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3,
797+ MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
798+ MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
799+ MTK_PPE_PKT_TYPE_IPV6_6RD = 7,
800+};
801+
802+#define MTK_FOE_IB2_QID GENMASK(3, 0)
803+#define MTK_FOE_IB2_PSE_QOS BIT(4)
804+#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
805+#define MTK_FOE_IB2_MULTICAST BIT(8)
806+
developeree39bcf2023-06-16 08:03:30 +0800807+#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
808+#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
809+#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
developer8cb3ac72022-07-04 10:55:14 +0800810+
811+#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
812+
813+#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
814+
815+#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
816+
developeree39bcf2023-06-16 08:03:30 +0800817+#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
818+#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
819+#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
developer8cb3ac72022-07-04 10:55:14 +0800820+
821+enum {
822+ MTK_FOE_STATE_INVALID,
823+ MTK_FOE_STATE_UNBIND,
824+ MTK_FOE_STATE_BIND,
825+ MTK_FOE_STATE_FIN
826+};
827+
828+struct mtk_foe_mac_info {
829+ u16 vlan1;
830+ u16 etype;
831+
832+ u32 dest_mac_hi;
833+
834+ u16 vlan2;
835+ u16 dest_mac_lo;
836+
837+ u32 src_mac_hi;
838+
839+ u16 pppoe_id;
840+ u16 src_mac_lo;
841+};
842+
843+struct mtk_foe_bridge {
developeree39bcf2023-06-16 08:03:30 +0800844+ u32 dest_mac_hi;
845+
846+ u16 src_mac_lo;
847+ u16 dest_mac_lo;
developer8cb3ac72022-07-04 10:55:14 +0800848+
developeree39bcf2023-06-16 08:03:30 +0800849+ u32 src_mac_hi;
developer8cb3ac72022-07-04 10:55:14 +0800850+
851+ u32 ib2;
852+
developeree39bcf2023-06-16 08:03:30 +0800853+ u32 _rsv[5];
854+
855+ u32 udf_tsid;
developer8cb3ac72022-07-04 10:55:14 +0800856+ struct mtk_foe_mac_info l2;
857+};
858+
859+struct mtk_ipv4_tuple {
860+ u32 src_ip;
861+ u32 dest_ip;
862+ union {
863+ struct {
864+ u16 dest_port;
865+ u16 src_port;
866+ };
867+ struct {
868+ u8 protocol;
869+ u8 _pad[3]; /* fill with 0xa5a5a5 */
870+ };
871+ u32 ports;
872+ };
873+};
874+
875+struct mtk_foe_ipv4 {
876+ struct mtk_ipv4_tuple orig;
877+
878+ u32 ib2;
879+
880+ struct mtk_ipv4_tuple new;
881+
882+ u16 timestamp;
883+ u16 _rsv0[3];
884+
885+ u32 udf_tsid;
886+
887+ struct mtk_foe_mac_info l2;
888+};
889+
890+struct mtk_foe_ipv4_dslite {
891+ struct mtk_ipv4_tuple ip4;
892+
893+ u32 tunnel_src_ip[4];
894+ u32 tunnel_dest_ip[4];
895+
896+ u8 flow_label[3];
897+ u8 priority;
898+
899+ u32 udf_tsid;
900+
901+ u32 ib2;
902+
903+ struct mtk_foe_mac_info l2;
904+};
905+
906+struct mtk_foe_ipv6 {
907+ u32 src_ip[4];
908+ u32 dest_ip[4];
909+
910+ union {
911+ struct {
912+ u8 protocol;
913+ u8 _pad[3]; /* fill with 0xa5a5a5 */
914+ }; /* 3-tuple */
915+ struct {
916+ u16 dest_port;
917+ u16 src_port;
918+ }; /* 5-tuple */
919+ u32 ports;
920+ };
921+
922+ u32 _rsv[3];
923+
924+ u32 udf;
925+
926+ u32 ib2;
927+ struct mtk_foe_mac_info l2;
928+};
929+
930+struct mtk_foe_ipv6_6rd {
931+ u32 src_ip[4];
932+ u32 dest_ip[4];
933+ u16 dest_port;
934+ u16 src_port;
935+
936+ u32 tunnel_src_ip;
937+ u32 tunnel_dest_ip;
938+
939+ u16 hdr_csum;
940+ u8 dscp;
941+ u8 ttl;
942+
943+ u8 flag;
944+ u8 pad;
945+ u8 per_flow_6rd_id;
946+ u8 pad2;
947+
948+ u32 ib2;
949+ struct mtk_foe_mac_info l2;
950+};
951+
952+struct mtk_foe_entry {
953+ u32 ib1;
954+
955+ union {
956+ struct mtk_foe_bridge bridge;
957+ struct mtk_foe_ipv4 ipv4;
958+ struct mtk_foe_ipv4_dslite dslite;
959+ struct mtk_foe_ipv6 ipv6;
960+ struct mtk_foe_ipv6_6rd ipv6_6rd;
developeree39bcf2023-06-16 08:03:30 +0800961+ u32 data[19];
developer8cb3ac72022-07-04 10:55:14 +0800962+ };
963+};
964+
965+enum {
966+ MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02,
967+ MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03,
968+ MTK_PPE_CPU_REASON_NO_FLOW = 0x07,
969+ MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08,
970+ MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09,
971+ MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a,
972+ MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b,
973+ MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c,
974+ MTK_PPE_CPU_REASON_UN_HIT = 0x0d,
975+ MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e,
976+ MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
977+ MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10,
978+ MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11,
979+ MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12,
980+ MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13,
981+ MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14,
982+ MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15,
983+ MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16,
984+ MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17,
985+ MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18,
986+ MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19,
987+ MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a,
988+ MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b,
989+ MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c,
990+ MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e,
991+ MTK_PPE_CPU_REASON_INVALID = 0x1f,
992+};
993+
994+struct mtk_ppe {
995+ struct device *dev;
996+ void __iomem *base;
997+ int version;
998+
developeree39bcf2023-06-16 08:03:30 +0800999+ struct mtk_foe_entry *foe_table;
developer8cb3ac72022-07-04 10:55:14 +08001000+ dma_addr_t foe_phys;
1001+
1002+ void *acct_table;
1003+};
1004+
developeree39bcf2023-06-16 08:03:30 +08001005+int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
1006+ int version);
1007+int mtk_ppe_start(struct mtk_ppe *ppe);
developer8cb3ac72022-07-04 10:55:14 +08001008+int mtk_ppe_stop(struct mtk_ppe *ppe);
1009+
1010+static inline void
developeree39bcf2023-06-16 08:03:30 +08001011+mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
developer8cb3ac72022-07-04 10:55:14 +08001012+{
developeree39bcf2023-06-16 08:03:30 +08001013+ ppe->foe_table[hash].ib1 = 0;
1014+ dma_wmb();
1015+}
developer8cb3ac72022-07-04 10:55:14 +08001016+
developeree39bcf2023-06-16 08:03:30 +08001017+static inline int
1018+mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
1019+{
1020+ u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
developer8cb3ac72022-07-04 10:55:14 +08001021+
developeree39bcf2023-06-16 08:03:30 +08001022+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
1023+ return -1;
developer7eb15dc2023-06-14 17:44:03 +08001024+
developeree39bcf2023-06-16 08:03:30 +08001025+ return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
developer8cb3ac72022-07-04 10:55:14 +08001026+}
1027+
developeree39bcf2023-06-16 08:03:30 +08001028+int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
1029+ u8 pse_port, u8 *src_mac, u8 *dest_mac);
1030+int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
1031+int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
developer8cb3ac72022-07-04 10:55:14 +08001032+ __be32 src_addr, __be16 src_port,
1033+ __be32 dest_addr, __be16 dest_port);
developeree39bcf2023-06-16 08:03:30 +08001034+int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
developer8cb3ac72022-07-04 10:55:14 +08001035+ __be32 *src_addr, __be16 src_port,
1036+ __be32 *dest_addr, __be16 dest_port);
developeree39bcf2023-06-16 08:03:30 +08001037+int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
1038+int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
1039+int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
1040+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
1041+ u16 timestamp);
1042+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
developer8cb3ac72022-07-04 10:55:14 +08001043+
1044+#endif
1045diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1046new file mode 100644
developeree39bcf2023-06-16 08:03:30 +08001047index 000000000..d4b482340
developer8cb3ac72022-07-04 10:55:14 +08001048--- /dev/null
1049+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
developeree39bcf2023-06-16 08:03:30 +08001050@@ -0,0 +1,214 @@
developer8cb3ac72022-07-04 10:55:14 +08001051+// SPDX-License-Identifier: GPL-2.0-only
1052+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
1053+
1054+#include <linux/kernel.h>
1055+#include <linux/debugfs.h>
1056+#include "mtk_eth_soc.h"
1057+
1058+struct mtk_flow_addr_info
1059+{
1060+ void *src, *dest;
1061+ u16 *src_port, *dest_port;
1062+ bool ipv6;
1063+};
1064+
1065+static const char *mtk_foe_entry_state_str(int state)
1066+{
1067+ static const char * const state_str[] = {
1068+ [MTK_FOE_STATE_INVALID] = "INV",
1069+ [MTK_FOE_STATE_UNBIND] = "UNB",
1070+ [MTK_FOE_STATE_BIND] = "BND",
1071+ [MTK_FOE_STATE_FIN] = "FIN",
1072+ };
1073+
1074+ if (state >= ARRAY_SIZE(state_str) || !state_str[state])
1075+ return "UNK";
1076+
1077+ return state_str[state];
1078+}
1079+
1080+static const char *mtk_foe_pkt_type_str(int type)
1081+{
1082+ static const char * const type_str[] = {
1083+ [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
1084+ [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
developeree39bcf2023-06-16 08:03:30 +08001085+ [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
developer8cb3ac72022-07-04 10:55:14 +08001086+ [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
1087+ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
1088+ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
1089+ [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD",
1090+ };
1091+
1092+ if (type >= ARRAY_SIZE(type_str) || !type_str[type])
1093+ return "UNKNOWN";
1094+
1095+ return type_str[type];
1096+}
1097+
1098+static void
1099+mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
1100+{
1101+ u32 n_addr[4];
1102+ int i;
1103+
1104+ if (!ipv6) {
1105+ seq_printf(m, "%pI4h", addr);
1106+ return;
1107+ }
1108+
1109+ for (i = 0; i < ARRAY_SIZE(n_addr); i++)
1110+ n_addr[i] = htonl(addr[i]);
1111+ seq_printf(m, "%pI6", n_addr);
1112+}
1113+
1114+static void
1115+mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai)
1116+{
1117+ mtk_print_addr(m, ai->src, ai->ipv6);
1118+ if (ai->src_port)
1119+ seq_printf(m, ":%d", *ai->src_port);
1120+ seq_printf(m, "->");
1121+ mtk_print_addr(m, ai->dest, ai->ipv6);
1122+ if (ai->dest_port)
1123+ seq_printf(m, ":%d", *ai->dest_port);
1124+}
1125+
1126+static int
1127+mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
1128+{
1129+ struct mtk_ppe *ppe = m->private;
1130+ int i;
1131+
1132+ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
developeree39bcf2023-06-16 08:03:30 +08001133+ struct mtk_foe_entry *entry = &ppe->foe_table[i];
developer8cb3ac72022-07-04 10:55:14 +08001134+ struct mtk_foe_mac_info *l2;
1135+ struct mtk_flow_addr_info ai = {};
1136+ unsigned char h_source[ETH_ALEN];
1137+ unsigned char h_dest[ETH_ALEN];
1138+ int type, state;
1139+ u32 ib2;
1140+
1141+
1142+ state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1);
1143+ if (!state)
1144+ continue;
1145+
1146+ if (bind && state != MTK_FOE_STATE_BIND)
1147+ continue;
1148+
1149+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
1150+ seq_printf(m, "%05x %s %7s", i,
1151+ mtk_foe_entry_state_str(state),
1152+ mtk_foe_pkt_type_str(type));
1153+
1154+ switch (type) {
1155+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
1156+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
1157+ ai.src_port = &entry->ipv4.orig.src_port;
1158+ ai.dest_port = &entry->ipv4.orig.dest_port;
1159+ fallthrough;
1160+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
1161+ ai.src = &entry->ipv4.orig.src_ip;
1162+ ai.dest = &entry->ipv4.orig.dest_ip;
1163+ break;
1164+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
1165+ ai.src_port = &entry->ipv6.src_port;
1166+ ai.dest_port = &entry->ipv6.dest_port;
1167+ fallthrough;
1168+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
1169+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
1170+ ai.src = &entry->ipv6.src_ip;
1171+ ai.dest = &entry->ipv6.dest_ip;
1172+ ai.ipv6 = true;
1173+ break;
1174+ }
1175+
1176+ seq_printf(m, " orig=");
1177+ mtk_print_addr_info(m, &ai);
1178+
1179+ switch (type) {
1180+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
1181+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
1182+ ai.src_port = &entry->ipv4.new.src_port;
1183+ ai.dest_port = &entry->ipv4.new.dest_port;
1184+ fallthrough;
1185+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
1186+ ai.src = &entry->ipv4.new.src_ip;
1187+ ai.dest = &entry->ipv4.new.dest_ip;
1188+ seq_printf(m, " new=");
1189+ mtk_print_addr_info(m, &ai);
1190+ break;
1191+ }
1192+
1193+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
1194+ l2 = &entry->ipv6.l2;
1195+ ib2 = entry->ipv6.ib2;
1196+ } else {
1197+ l2 = &entry->ipv4.l2;
1198+ ib2 = entry->ipv4.ib2;
1199+ }
1200+
1201+ *((__be32 *)h_source) = htonl(l2->src_mac_hi);
1202+ *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo);
1203+ *((__be32 *)h_dest) = htonl(l2->dest_mac_hi);
1204+ *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
1205+
1206+ seq_printf(m, " eth=%pM->%pM etype=%04x"
developeree39bcf2023-06-16 08:03:30 +08001207+ " vlan=%d,%d ib1=%08x ib2=%08x\n",
developer8cb3ac72022-07-04 10:55:14 +08001208+ h_source, h_dest, ntohs(l2->etype),
developeree39bcf2023-06-16 08:03:30 +08001209+ l2->vlan1, l2->vlan2, entry->ib1, ib2);
developer8cb3ac72022-07-04 10:55:14 +08001210+ }
1211+
1212+ return 0;
1213+}
1214+
1215+static int
1216+mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private)
1217+{
1218+ return mtk_ppe_debugfs_foe_show(m, private, false);
1219+}
1220+
1221+static int
1222+mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private)
1223+{
1224+ return mtk_ppe_debugfs_foe_show(m, private, true);
1225+}
1226+
1227+static int
1228+mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file)
1229+{
1230+ return single_open(file, mtk_ppe_debugfs_foe_show_all,
1231+ inode->i_private);
1232+}
1233+
1234+static int
1235+mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
1236+{
1237+ return single_open(file, mtk_ppe_debugfs_foe_show_bind,
1238+ inode->i_private);
1239+}
1240+
developeree39bcf2023-06-16 08:03:30 +08001241+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
developer8cb3ac72022-07-04 10:55:14 +08001242+{
1243+ static const struct file_operations fops_all = {
1244+ .open = mtk_ppe_debugfs_foe_open_all,
1245+ .read = seq_read,
1246+ .llseek = seq_lseek,
1247+ .release = single_release,
1248+ };
developeree39bcf2023-06-16 08:03:30 +08001249+
developer8cb3ac72022-07-04 10:55:14 +08001250+ static const struct file_operations fops_bind = {
1251+ .open = mtk_ppe_debugfs_foe_open_bind,
1252+ .read = seq_read,
1253+ .llseek = seq_lseek,
1254+ .release = single_release,
1255+ };
developer7eb15dc2023-06-14 17:44:03 +08001256+
developeree39bcf2023-06-16 08:03:30 +08001257+ struct dentry *root;
developer7eb15dc2023-06-14 17:44:03 +08001258+
developeree39bcf2023-06-16 08:03:30 +08001259+ root = debugfs_create_dir("mtk_ppe", NULL);
developer8cb3ac72022-07-04 10:55:14 +08001260+ debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
1261+ debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
1262+
1263+ return 0;
1264+}
1265diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1266new file mode 100644
developeree39bcf2023-06-16 08:03:30 +08001267index 000000000..4294f0c74
developer8cb3ac72022-07-04 10:55:14 +08001268--- /dev/null
1269+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
developeree39bcf2023-06-16 08:03:30 +08001270@@ -0,0 +1,535 @@
developer8cb3ac72022-07-04 10:55:14 +08001271+// SPDX-License-Identifier: GPL-2.0-only
1272+/*
1273+ * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
1274+ */
1275+
1276+#include <linux/if_ether.h>
1277+#include <linux/rhashtable.h>
1278+#include <linux/ip.h>
1279+#include <linux/ipv6.h>
1280+#include <net/flow_offload.h>
1281+#include <net/pkt_cls.h>
1282+#include <net/dsa.h>
1283+#include "mtk_eth_soc.h"
1284+
1285+struct mtk_flow_data {
1286+ struct ethhdr eth;
1287+
1288+ union {
1289+ struct {
1290+ __be32 src_addr;
1291+ __be32 dst_addr;
1292+ } v4;
1293+
1294+ struct {
1295+ struct in6_addr src_addr;
1296+ struct in6_addr dst_addr;
1297+ } v6;
1298+ };
1299+
1300+ __be16 src_port;
1301+ __be16 dst_port;
1302+
1303+ struct {
1304+ u16 id;
1305+ __be16 proto;
1306+ u8 num;
1307+ } vlan;
1308+ struct {
1309+ u16 sid;
1310+ u8 num;
1311+ } pppoe;
1312+};
1313+
developeree39bcf2023-06-16 08:03:30 +08001314+struct mtk_flow_entry {
1315+ struct rhash_head node;
1316+ unsigned long cookie;
1317+ u16 hash;
1318+};
1319+
developer8cb3ac72022-07-04 10:55:14 +08001320+static const struct rhashtable_params mtk_flow_ht_params = {
1321+ .head_offset = offsetof(struct mtk_flow_entry, node),
1322+ .key_offset = offsetof(struct mtk_flow_entry, cookie),
1323+ .key_len = sizeof(unsigned long),
1324+ .automatic_shrinking = true,
1325+};
1326+
developeree39bcf2023-06-16 08:03:30 +08001327+static u32
1328+mtk_eth_timestamp(struct mtk_eth *eth)
1329+{
1330+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
1331+}
1332+
developer8cb3ac72022-07-04 10:55:14 +08001333+static int
developeree39bcf2023-06-16 08:03:30 +08001334+mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
1335+ bool egress)
developer8cb3ac72022-07-04 10:55:14 +08001336+{
developeree39bcf2023-06-16 08:03:30 +08001337+ return mtk_foe_entry_set_ipv4_tuple(foe, egress,
developer8cb3ac72022-07-04 10:55:14 +08001338+ data->v4.src_addr, data->src_port,
1339+ data->v4.dst_addr, data->dst_port);
1340+}
1341+
1342+static int
developeree39bcf2023-06-16 08:03:30 +08001343+mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
developer8cb3ac72022-07-04 10:55:14 +08001344+{
developeree39bcf2023-06-16 08:03:30 +08001345+ return mtk_foe_entry_set_ipv6_tuple(foe,
developer8cb3ac72022-07-04 10:55:14 +08001346+ data->v6.src_addr.s6_addr32, data->src_port,
1347+ data->v6.dst_addr.s6_addr32, data->dst_port);
1348+}
1349+
1350+static void
1351+mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
1352+{
1353+ void *dest = eth + act->mangle.offset;
1354+ const void *src = &act->mangle.val;
1355+
1356+ if (act->mangle.offset > 8)
1357+ return;
1358+
1359+ if (act->mangle.mask == 0xffff) {
1360+ src += 2;
1361+ dest += 2;
1362+ }
1363+
1364+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
1365+}
1366+
developeree39bcf2023-06-16 08:03:30 +08001367+
developer8cb3ac72022-07-04 10:55:14 +08001368+static int
1369+mtk_flow_mangle_ports(const struct flow_action_entry *act,
1370+ struct mtk_flow_data *data)
1371+{
1372+ u32 val = ntohl(act->mangle.val);
1373+
1374+ switch (act->mangle.offset) {
1375+ case 0:
1376+ if (act->mangle.mask == ~htonl(0xffff))
1377+ data->dst_port = cpu_to_be16(val);
1378+ else
1379+ data->src_port = cpu_to_be16(val >> 16);
1380+ break;
1381+ case 2:
1382+ data->dst_port = cpu_to_be16(val);
1383+ break;
1384+ default:
1385+ return -EINVAL;
1386+ }
1387+
1388+ return 0;
1389+}
1390+
1391+static int
1392+mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
1393+ struct mtk_flow_data *data)
1394+{
1395+ __be32 *dest;
1396+
1397+ switch (act->mangle.offset) {
1398+ case offsetof(struct iphdr, saddr):
1399+ dest = &data->v4.src_addr;
1400+ break;
1401+ case offsetof(struct iphdr, daddr):
1402+ dest = &data->v4.dst_addr;
1403+ break;
1404+ default:
1405+ return -EINVAL;
1406+ }
1407+
1408+ memcpy(dest, &act->mangle.val, sizeof(u32));
1409+
1410+ return 0;
1411+}
1412+
1413+static int
1414+mtk_flow_get_dsa_port(struct net_device **dev)
1415+{
1416+#if IS_ENABLED(CONFIG_NET_DSA)
1417+ struct dsa_port *dp;
1418+
1419+ dp = dsa_port_from_netdev(*dev);
1420+ if (IS_ERR(dp))
1421+ return -ENODEV;
1422+
1423+ if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
1424+ return -ENODEV;
1425+
1426+ *dev = dp->cpu_dp->master;
1427+
1428+ return dp->index;
1429+#else
1430+ return -ENODEV;
1431+#endif
1432+}
1433+
1434+static int
1435+mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
developeree39bcf2023-06-16 08:03:30 +08001436+ struct net_device *dev)
developer8cb3ac72022-07-04 10:55:14 +08001437+{
developeree39bcf2023-06-16 08:03:30 +08001438+ int pse_port, dsa_port;
developer8cb3ac72022-07-04 10:55:14 +08001439+
1440+ dsa_port = mtk_flow_get_dsa_port(&dev);
developeree39bcf2023-06-16 08:03:30 +08001441+ if (dsa_port >= 0)
1442+ mtk_foe_entry_set_dsa(foe, dsa_port);
developer8cb3ac72022-07-04 10:55:14 +08001443+
1444+ if (dev == eth->netdev[0])
developeree39bcf2023-06-16 08:03:30 +08001445+ pse_port = PSE_GDM1_PORT;
developer8cb3ac72022-07-04 10:55:14 +08001446+ else if (dev == eth->netdev[1])
developeree39bcf2023-06-16 08:03:30 +08001447+ pse_port = PSE_GDM2_PORT;
1448+ else
1449+ return -EOPNOTSUPP;
developer7eb15dc2023-06-14 17:44:03 +08001450+
developeree39bcf2023-06-16 08:03:30 +08001451+ mtk_foe_entry_set_pse_port(foe, pse_port);
developer8cb3ac72022-07-04 10:55:14 +08001452+
1453+ return 0;
1454+}
1455+
1456+static int
developeree39bcf2023-06-16 08:03:30 +08001457+mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
developer8cb3ac72022-07-04 10:55:14 +08001458+{
1459+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1460+ struct flow_action_entry *act;
1461+ struct mtk_flow_data data = {};
1462+ struct mtk_foe_entry foe;
1463+ struct net_device *odev = NULL;
1464+ struct mtk_flow_entry *entry;
1465+ int offload_type = 0;
1466+ u16 addr_type = 0;
developeree39bcf2023-06-16 08:03:30 +08001467+ u32 timestamp;
developer8cb3ac72022-07-04 10:55:14 +08001468+ u8 l4proto = 0;
1469+ int err = 0;
developeree39bcf2023-06-16 08:03:30 +08001470+ int hash;
developer8cb3ac72022-07-04 10:55:14 +08001471+ int i;
1472+
1473+ if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
1474+ return -EEXIST;
1475+
1476+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
1477+ struct flow_match_meta match;
1478+
1479+ flow_rule_match_meta(rule, &match);
1480+ } else {
1481+ return -EOPNOTSUPP;
1482+ }
1483+
1484+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1485+ struct flow_match_control match;
1486+
1487+ flow_rule_match_control(rule, &match);
1488+ addr_type = match.key->addr_type;
1489+ } else {
1490+ return -EOPNOTSUPP;
1491+ }
1492+
1493+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1494+ struct flow_match_basic match;
1495+
1496+ flow_rule_match_basic(rule, &match);
1497+ l4proto = match.key->ip_proto;
1498+ } else {
1499+ return -EOPNOTSUPP;
1500+ }
1501+
1502+ flow_action_for_each(i, act, &rule->action) {
1503+ switch (act->id) {
1504+ case FLOW_ACTION_MANGLE:
1505+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1506+ mtk_flow_offload_mangle_eth(act, &data.eth);
1507+ break;
1508+ case FLOW_ACTION_REDIRECT:
1509+ odev = act->dev;
1510+ break;
1511+ case FLOW_ACTION_CSUM:
1512+ break;
1513+ case FLOW_ACTION_VLAN_PUSH:
1514+ if (data.vlan.num == 1 ||
1515+ act->vlan.proto != htons(ETH_P_8021Q))
1516+ return -EOPNOTSUPP;
1517+
1518+ data.vlan.id = act->vlan.vid;
1519+ data.vlan.proto = act->vlan.proto;
1520+ data.vlan.num++;
1521+ break;
1522+ case FLOW_ACTION_VLAN_POP:
1523+ break;
1524+ case FLOW_ACTION_PPPOE_PUSH:
1525+ if (data.pppoe.num == 1)
1526+ return -EOPNOTSUPP;
1527+
1528+ data.pppoe.sid = act->pppoe.sid;
1529+ data.pppoe.num++;
1530+ break;
1531+ default:
1532+ return -EOPNOTSUPP;
1533+ }
1534+ }
1535+
developeree39bcf2023-06-16 08:03:30 +08001536+ switch (addr_type) {
1537+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1538+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1539+ break;
1540+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1541+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1542+ break;
1543+ default:
1544+ return -EOPNOTSUPP;
1545+ }
1546+
developer8cb3ac72022-07-04 10:55:14 +08001547+ if (!is_valid_ether_addr(data.eth.h_source) ||
1548+ !is_valid_ether_addr(data.eth.h_dest))
1549+ return -EINVAL;
1550+
developeree39bcf2023-06-16 08:03:30 +08001551+ err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
1552+ data.eth.h_source,
1553+ data.eth.h_dest);
developer8cb3ac72022-07-04 10:55:14 +08001554+ if (err)
1555+ return err;
1556+
1557+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1558+ struct flow_match_ports ports;
1559+
1560+ flow_rule_match_ports(rule, &ports);
1561+ data.src_port = ports.key->src;
1562+ data.dst_port = ports.key->dst;
developeree39bcf2023-06-16 08:03:30 +08001563+ } else {
developer8cb3ac72022-07-04 10:55:14 +08001564+ return -EOPNOTSUPP;
1565+ }
1566+
1567+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1568+ struct flow_match_ipv4_addrs addrs;
1569+
1570+ flow_rule_match_ipv4_addrs(rule, &addrs);
1571+
1572+ data.v4.src_addr = addrs.key->src;
1573+ data.v4.dst_addr = addrs.key->dst;
1574+
developeree39bcf2023-06-16 08:03:30 +08001575+ mtk_flow_set_ipv4_addr(&foe, &data, false);
developer8cb3ac72022-07-04 10:55:14 +08001576+ }
1577+
1578+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1579+ struct flow_match_ipv6_addrs addrs;
1580+
1581+ flow_rule_match_ipv6_addrs(rule, &addrs);
1582+
1583+ data.v6.src_addr = addrs.key->src;
1584+ data.v6.dst_addr = addrs.key->dst;
1585+
developeree39bcf2023-06-16 08:03:30 +08001586+ mtk_flow_set_ipv6_addr(&foe, &data);
developer8cb3ac72022-07-04 10:55:14 +08001587+ }
1588+
1589+ flow_action_for_each(i, act, &rule->action) {
1590+ if (act->id != FLOW_ACTION_MANGLE)
1591+ continue;
1592+
1593+ switch (act->mangle.htype) {
1594+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1595+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1596+ err = mtk_flow_mangle_ports(act, &data);
1597+ break;
1598+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1599+ err = mtk_flow_mangle_ipv4(act, &data);
1600+ break;
1601+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
1602+ /* handled earlier */
1603+ break;
1604+ default:
1605+ return -EOPNOTSUPP;
1606+ }
1607+
1608+ if (err)
1609+ return err;
1610+ }
1611+
1612+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
developeree39bcf2023-06-16 08:03:30 +08001613+ err = mtk_flow_set_ipv4_addr(&foe, &data, true);
developer8cb3ac72022-07-04 10:55:14 +08001614+ if (err)
1615+ return err;
1616+ }
1617+
1618+ if (data.vlan.num == 1) {
1619+ if (data.vlan.proto != htons(ETH_P_8021Q))
1620+ return -EOPNOTSUPP;
1621+
developeree39bcf2023-06-16 08:03:30 +08001622+ mtk_foe_entry_set_vlan(&foe, data.vlan.id);
developer8cb3ac72022-07-04 10:55:14 +08001623+ }
1624+ if (data.pppoe.num == 1)
developeree39bcf2023-06-16 08:03:30 +08001625+ mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
developer8cb3ac72022-07-04 10:55:14 +08001626+
developeree39bcf2023-06-16 08:03:30 +08001627+ err = mtk_flow_set_output_device(eth, &foe, odev);
developer8cb3ac72022-07-04 10:55:14 +08001628+ if (err)
1629+ return err;
1630+
1631+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1632+ if (!entry)
1633+ return -ENOMEM;
1634+
1635+ entry->cookie = f->cookie;
developeree39bcf2023-06-16 08:03:30 +08001636+ timestamp = mtk_eth_timestamp(eth);
1637+ hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
1638+ if (hash < 0) {
1639+ err = hash;
developer8cb3ac72022-07-04 10:55:14 +08001640+ goto free;
developeree39bcf2023-06-16 08:03:30 +08001641+ }
developer8cb3ac72022-07-04 10:55:14 +08001642+
developeree39bcf2023-06-16 08:03:30 +08001643+ entry->hash = hash;
developer8cb3ac72022-07-04 10:55:14 +08001644+ err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
1645+ mtk_flow_ht_params);
1646+ if (err < 0)
developeree39bcf2023-06-16 08:03:30 +08001647+ goto clear_flow;
developer8cb3ac72022-07-04 10:55:14 +08001648+
1649+ return 0;
developeree39bcf2023-06-16 08:03:30 +08001650+clear_flow:
1651+ mtk_foe_entry_clear(&eth->ppe, hash);
developer8cb3ac72022-07-04 10:55:14 +08001652+free:
1653+ kfree(entry);
1654+ return err;
1655+}
1656+
1657+static int
1658+mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
1659+{
1660+ struct mtk_flow_entry *entry;
1661+
1662+ entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1663+ mtk_flow_ht_params);
1664+ if (!entry)
1665+ return -ENOENT;
1666+
developeree39bcf2023-06-16 08:03:30 +08001667+ mtk_foe_entry_clear(&eth->ppe, entry->hash);
developer8cb3ac72022-07-04 10:55:14 +08001668+ rhashtable_remove_fast(&eth->flow_table, &entry->node,
1669+ mtk_flow_ht_params);
1670+ kfree(entry);
1671+
1672+ return 0;
1673+}
1674+
1675+static int
1676+mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1677+{
1678+ struct mtk_flow_entry *entry;
developeree39bcf2023-06-16 08:03:30 +08001679+ int timestamp;
1680+ u32 idle;
developer8cb3ac72022-07-04 10:55:14 +08001681+
1682+ entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1683+ mtk_flow_ht_params);
1684+ if (!entry)
1685+ return -ENOENT;
1686+
developeree39bcf2023-06-16 08:03:30 +08001687+ timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
1688+ if (timestamp < 0)
1689+ return -ETIMEDOUT;
1690+
1691+ idle = mtk_eth_timestamp(eth) - timestamp;
developer8cb3ac72022-07-04 10:55:14 +08001692+ f->stats.lastused = jiffies - idle * HZ;
1693+
1694+ return 0;
1695+}
1696+
1697+static DEFINE_MUTEX(mtk_flow_offload_mutex);
1698+
developeree39bcf2023-06-16 08:03:30 +08001699+static int
1700+mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
developer8cb3ac72022-07-04 10:55:14 +08001701+{
developeree39bcf2023-06-16 08:03:30 +08001702+ struct flow_cls_offload *cls = type_data;
1703+ struct net_device *dev = cb_priv;
1704+ struct mtk_mac *mac = netdev_priv(dev);
1705+ struct mtk_eth *eth = mac->hw;
developer8cb3ac72022-07-04 10:55:14 +08001706+ int err;
1707+
developeree39bcf2023-06-16 08:03:30 +08001708+ if (!tc_can_offload(dev))
1709+ return -EOPNOTSUPP;
1710+
1711+ if (type != TC_SETUP_CLSFLOWER)
1712+ return -EOPNOTSUPP;
1713+
developer8cb3ac72022-07-04 10:55:14 +08001714+ mutex_lock(&mtk_flow_offload_mutex);
1715+ switch (cls->command) {
1716+ case FLOW_CLS_REPLACE:
developeree39bcf2023-06-16 08:03:30 +08001717+ err = mtk_flow_offload_replace(eth, cls);
developer8cb3ac72022-07-04 10:55:14 +08001718+ break;
1719+ case FLOW_CLS_DESTROY:
1720+ err = mtk_flow_offload_destroy(eth, cls);
1721+ break;
1722+ case FLOW_CLS_STATS:
1723+ err = mtk_flow_offload_stats(eth, cls);
1724+ break;
1725+ default:
1726+ err = -EOPNOTSUPP;
1727+ break;
1728+ }
1729+ mutex_unlock(&mtk_flow_offload_mutex);
1730+
1731+ return err;
1732+}
1733+
1734+static int
1735+mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1736+{
1737+ struct mtk_mac *mac = netdev_priv(dev);
1738+ struct mtk_eth *eth = mac->hw;
1739+ static LIST_HEAD(block_cb_list);
1740+ struct flow_block_cb *block_cb;
1741+ flow_setup_cb_t *cb;
developeree39bcf2023-06-16 08:03:30 +08001742+ int err = 0;
developer207b39d2022-10-07 15:57:16 +08001743+
developeree39bcf2023-06-16 08:03:30 +08001744+ if (!eth->ppe.foe_table)
developer8cb3ac72022-07-04 10:55:14 +08001745+ return -EOPNOTSUPP;
1746+
1747+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1748+ return -EOPNOTSUPP;
1749+
1750+ cb = mtk_eth_setup_tc_block_cb;
1751+ f->driver_block_list = &block_cb_list;
1752+
1753+ switch (f->command) {
1754+ case FLOW_BLOCK_BIND:
1755+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
1756+ if (block_cb) {
1757+ flow_block_cb_incref(block_cb);
developeree39bcf2023-06-16 08:03:30 +08001758+ goto unlock;
developer8cb3ac72022-07-04 10:55:14 +08001759+ }
1760+ block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
developeree39bcf2023-06-16 08:03:30 +08001761+ if (IS_ERR(block_cb)) {
1762+ err = PTR_ERR(block_cb);
1763+ goto unlock;
1764+ }
developer8cb3ac72022-07-04 10:55:14 +08001765+
1766+ flow_block_cb_add(block_cb, f);
1767+ list_add_tail(&block_cb->driver_list, &block_cb_list);
developeree39bcf2023-06-16 08:03:30 +08001768+ break;
developer8cb3ac72022-07-04 10:55:14 +08001769+ case FLOW_BLOCK_UNBIND:
1770+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
developeree39bcf2023-06-16 08:03:30 +08001771+ if (!block_cb) {
1772+ err = -ENOENT;
1773+ goto unlock;
1774+ }
developer8cb3ac72022-07-04 10:55:14 +08001775+
developeree39bcf2023-06-16 08:03:30 +08001776+ if (flow_block_cb_decref(block_cb)) {
developer8cb3ac72022-07-04 10:55:14 +08001777+ flow_block_cb_remove(block_cb, f);
1778+ list_del(&block_cb->driver_list);
1779+ }
developeree39bcf2023-06-16 08:03:30 +08001780+ break;
developer8cb3ac72022-07-04 10:55:14 +08001781+ default:
developeree39bcf2023-06-16 08:03:30 +08001782+ err = -EOPNOTSUPP;
1783+ break;
developer8cb3ac72022-07-04 10:55:14 +08001784+ }
developeree39bcf2023-06-16 08:03:30 +08001785+
1786+unlock:
1787+ return err;
developer8cb3ac72022-07-04 10:55:14 +08001788+}
1789+
1790+int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1791+ void *type_data)
1792+{
developeree39bcf2023-06-16 08:03:30 +08001793+ if (type == TC_SETUP_FT)
developer8cb3ac72022-07-04 10:55:14 +08001794+ return mtk_eth_setup_tc_block(dev, type_data);
developeree39bcf2023-06-16 08:03:30 +08001795+
1796+ return -EOPNOTSUPP;
developer8cb3ac72022-07-04 10:55:14 +08001797+}
1798+
1799+int mtk_eth_offload_init(struct mtk_eth *eth)
1800+{
developeree39bcf2023-06-16 08:03:30 +08001801+ if (!eth->ppe.foe_table)
1802+ return 0;
1803+
developer8cb3ac72022-07-04 10:55:14 +08001804+ return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
1805+}
1806diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
1807new file mode 100644
developeree39bcf2023-06-16 08:03:30 +08001808index 000000000..0c45ea090
developer8cb3ac72022-07-04 10:55:14 +08001809--- /dev/null
1810+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
developeree39bcf2023-06-16 08:03:30 +08001811@@ -0,0 +1,144 @@
developer8cb3ac72022-07-04 10:55:14 +08001812+// SPDX-License-Identifier: GPL-2.0-only
1813+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
1814+
1815+#ifndef __MTK_PPE_REGS_H
1816+#define __MTK_PPE_REGS_H
1817+
1818+#define MTK_PPE_GLO_CFG 0x200
1819+#define MTK_PPE_GLO_CFG_EN BIT(0)
1820+#define MTK_PPE_GLO_CFG_TSID_EN BIT(1)
1821+#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2)
1822+#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3)
1823+#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4)
1824+#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5)
1825+#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6)
1826+#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7)
1827+#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8)
1828+#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9)
1829+#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10)
1830+#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11)
1831+#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12)
1832+#define MTK_PPE_GLO_CFG_BUSY BIT(31)
1833+
1834+#define MTK_PPE_FLOW_CFG 0x204
1835+#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
1836+#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
1837+#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
1838+#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9)
1839+#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10)
1840+#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12)
1841+#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13)
1842+#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14)
1843+#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15)
1844+#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16)
1845+#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17)
1846+#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18)
1847+#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19)
1848+#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20)
1849+
1850+#define MTK_PPE_IP_PROTO_CHK 0x208
1851+#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0)
1852+#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16)
1853+
1854+#define MTK_PPE_TB_CFG 0x21c
1855+#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0)
1856+#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3)
1857+#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4)
1858+#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6)
1859+#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7)
1860+#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8)
1861+#define MTK_PPE_TB_CFG_AGE_TCP BIT(9)
1862+#define MTK_PPE_TB_CFG_AGE_UDP BIT(10)
1863+#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11)
1864+#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12)
1865+#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
1866+#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
1867+#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
1868+
1869+enum {
1870+ MTK_PPE_SCAN_MODE_DISABLED,
1871+ MTK_PPE_SCAN_MODE_CHECK_AGE,
1872+ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE,
1873+};
1874+
1875+enum {
1876+ MTK_PPE_KEEPALIVE_DISABLE,
1877+ MTK_PPE_KEEPALIVE_UNICAST_CPU,
1878+ MTK_PPE_KEEPALIVE_DUP_CPU = 3,
1879+};
1880+
1881+enum {
1882+ MTK_PPE_SEARCH_MISS_ACTION_DROP,
1883+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2,
1884+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3,
1885+};
1886+
1887+#define MTK_PPE_TB_BASE 0x220
1888+
1889+#define MTK_PPE_TB_USED 0x224
1890+#define MTK_PPE_TB_USED_NUM GENMASK(13, 0)
1891+
1892+#define MTK_PPE_BIND_RATE 0x228
1893+#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0)
1894+#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16)
1895+
1896+#define MTK_PPE_BIND_LIMIT0 0x22c
1897+#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0)
1898+#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16)
1899+
1900+#define MTK_PPE_BIND_LIMIT1 0x230
1901+#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0)
1902+#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16)
1903+
1904+#define MTK_PPE_KEEPALIVE 0x234
1905+#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0)
1906+#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16)
1907+#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24)
1908+
1909+#define MTK_PPE_UNBIND_AGE 0x238
1910+#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16)
1911+#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0)
1912+
1913+#define MTK_PPE_BIND_AGE0 0x23c
1914+#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
1915+#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
1916+
1917+#define MTK_PPE_BIND_AGE1 0x240
1918+#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
1919+#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
1920+
1921+#define MTK_PPE_HASH_SEED 0x244
1922+
1923+#define MTK_PPE_DEFAULT_CPU_PORT 0x248
1924+#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
1925+
1926+#define MTK_PPE_MTU_DROP 0x308
1927+
1928+#define MTK_PPE_VLAN_MTU0 0x30c
1929+#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0)
1930+#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16)
1931+
1932+#define MTK_PPE_VLAN_MTU1 0x310
1933+#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0)