blob: 936f979567b3c2b0feca615c3097ba14e82a9b03 [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From 6ad9bd65769003ab526e504577e0f747eba14287 Mon Sep 17 00:00:00 2001
2From: Bo Jiao <Bo.Jiao@mediatek.com>
3Date: Wed, 22 Jun 2022 09:42:19 +0800
4Subject: [PATCH 1/8]
5 9990-mt7622-backport-nf-hw-offload-framework-and-upstream-hnat-plus-xt-FLOWOFFLOAD-update-v2
6
7---
8 drivers/net/ethernet/mediatek/Makefile | 3 +-
9 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 28 +-
10 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 20 +-
11 drivers/net/ethernet/mediatek/mtk_ppe.c | 509 +++++++
12 drivers/net/ethernet/mediatek/mtk_ppe.h | 288 ++++
13 .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 214 +++
14 .../net/ethernet/mediatek/mtk_ppe_offload.c | 526 ++++++++
15 drivers/net/ethernet/mediatek/mtk_ppe_regs.h | 144 ++
16 drivers/net/ppp/ppp_generic.c | 22 +
17 drivers/net/ppp/pppoe.c | 24 +
18 include/linux/netdevice.h | 60 +
19 include/linux/ppp_channel.h | 3 +
20 include/net/dsa.h | 10 +
21 include/net/flow_offload.h | 4 +
22 include/net/ip6_route.h | 5 +-
23 .../net/netfilter/ipv6/nf_conntrack_ipv6.h | 3 -
24 include/net/netfilter/nf_conntrack.h | 12 +
25 include/net/netfilter/nf_conntrack_acct.h | 11 +
26 include/net/netfilter/nf_flow_table.h | 264 +++-
27 include/net/netns/conntrack.h | 6 +
28 .../linux/netfilter/nf_conntrack_common.h | 9 +-
29 include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h | 17 +
30 net/8021q/vlan_dev.c | 21 +
31 net/bridge/br_device.c | 49 +
32 net/bridge/br_private.h | 20 +
33 net/bridge/br_vlan.c | 55 +
34 net/core/dev.c | 46 +
35 net/dsa/dsa.c | 9 +
36 net/dsa/slave.c | 41 +-
37 net/ipv4/netfilter/Kconfig | 4 +-
38 net/ipv6/ip6_output.c | 2 +-
39 net/ipv6/netfilter/Kconfig | 3 +-
40 net/ipv6/route.c | 22 +-
41 net/netfilter/Kconfig | 14 +-
42 net/netfilter/Makefile | 4 +-
43 net/netfilter/nf_conntrack_core.c | 20 +-
44 net/netfilter/nf_conntrack_proto_tcp.c | 4 +
45 net/netfilter/nf_conntrack_proto_udp.c | 4 +
46 net/netfilter/nf_conntrack_standalone.c | 34 +-
47 net/netfilter/nf_flow_table_core.c | 446 +++---
48 net/netfilter/nf_flow_table_ip.c | 455 ++++---
49 net/netfilter/nf_flow_table_offload.c | 1191 +++++++++++++++++
50 net/netfilter/xt_FLOWOFFLOAD.c | 719 ++++++++++
51 43 files changed, 4913 insertions(+), 432 deletions(-)
52 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.c
53 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe.h
54 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
55 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_offload.c
56 create mode 100644 drivers/net/ethernet/mediatek/mtk_ppe_regs.h
57 create mode 100644 include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h
58 create mode 100644 net/netfilter/nf_flow_table_offload.c
59 create mode 100644 net/netfilter/xt_FLOWOFFLOAD.c
60
61diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
62index 13c5b4e8f..0a6af99f1 100755
63--- a/drivers/net/ethernet/mediatek/Makefile
64+++ b/drivers/net/ethernet/mediatek/Makefile
65@@ -4,5 +4,6 @@
66 #
67
68 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
developer68838542022-10-03 23:42:21 +080069-mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_usxgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o
70+mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_usxgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
developer8cb3ac72022-07-04 10:55:14 +080071+ mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
72 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
73diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
74index 2b21f7ed0..819d8a0be 100755
75--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
76+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developerdca0fde2022-12-14 11:40:35 +080077@@ -3081,6 +3081,7 @@ static int mtk_open(struct net_device *d
78 struct mtk_phylink_priv *phylink_priv = &mac->phylink_priv;
79 int err, i;
80 struct device_node *phy_node;
81+ u32 gdm_config = MTK_GDMA_TO_PDMA;
developer8cb3ac72022-07-04 10:55:14 +080082
developerdca0fde2022-12-14 11:40:35 +080083 err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
84 if (err) {
85@@ -3157,7 +3158,10 @@ static int mtk_open(struct net_device *d
86 if (!phy_node && eth->xgmii->regmap_sgmii[mac->id])
87 regmap_write(eth->xgmii->regmap_sgmii[mac->id], SGMSYS_QPHY_PWR_STATE_CTRL, 0);
developer8cb3ac72022-07-04 10:55:14 +080088
developerdca0fde2022-12-14 11:40:35 +080089- mtk_gdm_config(eth, mac->id, MTK_GDMA_TO_PDMA);
90+ if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
developer8cb3ac72022-07-04 10:55:14 +080091+ gdm_config = MTK_GDMA_TO_PPE;
92+
developerdca0fde2022-12-14 11:40:35 +080093+ mtk_gdm_config(eth, mac->id, gdm_config);
developer8cb3ac72022-07-04 10:55:14 +080094
developerdca0fde2022-12-14 11:40:35 +080095 return 0;
96 }
97@@ -3238,6 +3242,9 @@ static int mtk_stop(struct net_device *d
developer8cb3ac72022-07-04 10:55:14 +080098
99 mtk_dma_free(eth);
100
101+ if (eth->soc->offload_version)
102+ mtk_ppe_stop(&eth->ppe);
103+
104 return 0;
105 }
106
developerdca0fde2022-12-14 11:40:35 +0800107@@ -3915,6 +3922,7 @@ static const struct net_device_ops mtk_n
developer8cb3ac72022-07-04 10:55:14 +0800108 #ifdef CONFIG_NET_POLL_CONTROLLER
109 .ndo_poll_controller = mtk_poll_controller,
110 #endif
111+ .ndo_setup_tc = mtk_eth_setup_tc,
112 };
113
114 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
developerdca0fde2022-12-14 11:40:35 +0800115@@ -4308,6 +4316,17 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800116 goto err_free_dev;
117 }
118
119+ if (eth->soc->offload_version) {
120+ err = mtk_ppe_init(&eth->ppe, eth->dev,
121+ eth->base + MTK_ETH_PPE_BASE, 2);
122+ if (err)
123+ goto err_free_dev;
124+
125+ err = mtk_eth_offload_init(eth);
126+ if (err)
127+ goto err_free_dev;
128+ }
129+
130 for (i = 0; i < MTK_MAX_DEVS; i++) {
131 if (!eth->netdev[i])
132 continue;
developerdca0fde2022-12-14 11:40:35 +0800133@@ -4410,6 +4429,7 @@ static const struct mtk_soc_data mt2701_
developer8cb3ac72022-07-04 10:55:14 +0800134 .required_clks = MT7623_CLKS_BITMAP,
135 .required_pctl = true,
136 .has_sram = false,
137+ .offload_version = 2,
developerdca0fde2022-12-14 11:40:35 +0800138 .txrx = {
139 .txd_size = sizeof(struct mtk_tx_dma),
140 .rxd_size = sizeof(struct mtk_rx_dma),
141@@ -4424,6 +4444,7 @@ static const struct mtk_soc_data mt7621_
developer8cb3ac72022-07-04 10:55:14 +0800142 .required_clks = MT7621_CLKS_BITMAP,
143 .required_pctl = false,
144 .has_sram = false,
145+ .offload_version = 2,
developerdca0fde2022-12-14 11:40:35 +0800146 .txrx = {
147 .txd_size = sizeof(struct mtk_tx_dma),
148 .rxd_size = sizeof(struct mtk_rx_dma),
149@@ -4439,6 +4460,7 @@ static const struct mtk_soc_data mt7622_
developer8cb3ac72022-07-04 10:55:14 +0800150 .required_clks = MT7622_CLKS_BITMAP,
151 .required_pctl = false,
152 .has_sram = false,
153+ .offload_version = 2,
developerdca0fde2022-12-14 11:40:35 +0800154 .txrx = {
155 .txd_size = sizeof(struct mtk_tx_dma),
156 .rxd_size = sizeof(struct mtk_rx_dma),
157@@ -4453,6 +4475,7 @@ static const struct mtk_soc_data mt7623_
developer8cb3ac72022-07-04 10:55:14 +0800158 .required_clks = MT7623_CLKS_BITMAP,
159 .required_pctl = true,
160 .has_sram = false,
161+ .offload_version = 2,
developerdca0fde2022-12-14 11:40:35 +0800162 .txrx = {
163 .txd_size = sizeof(struct mtk_tx_dma),
164 .rxd_size = sizeof(struct mtk_rx_dma),
developer8cb3ac72022-07-04 10:55:14 +0800165diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
166index b6380ffeb..349f98503 100755
167--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
168+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
169@@ -15,6 +15,8 @@
170 #include <linux/u64_stats_sync.h>
171 #include <linux/refcount.h>
172 #include <linux/phylink.h>
173+#include <linux/rhashtable.h>
174+#include "mtk_ppe.h"
175
176 #define MTK_QDMA_PAGE_SIZE 2048
177 #define MTK_MAX_RX_LENGTH 1536
178@@ -37,7 +39,8 @@
179 NETIF_F_HW_VLAN_CTAG_TX | \
180 NETIF_F_SG | NETIF_F_TSO | \
181 NETIF_F_TSO6 | \
182- NETIF_F_IPV6_CSUM)
183+ NETIF_F_IPV6_CSUM |\
184+ NETIF_F_HW_TC)
185 #define MTK_SET_FEATURES (NETIF_F_LRO | \
186 NETIF_F_HW_VLAN_CTAG_RX)
187 #define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
188@@ -107,6 +110,7 @@
189 #define MTK_GDMA_TCS_EN BIT(21)
190 #define MTK_GDMA_UCS_EN BIT(20)
191 #define MTK_GDMA_TO_PDMA 0x0
192+#define MTK_GDMA_TO_PPE 0x4444
193 #define MTK_GDMA_DROP_ALL 0x7777
194
195 /* Unicast Filter MAC Address Register - Low */
196@@ -547,6 +551,12 @@
197 #define RX_DMA_TCI(_x) ((_x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
198 #define RX_DMA_VPID(_x) (((_x) >> 16) & 0xffff)
199
200+/* QDMA descriptor rxd4 */
201+#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
202+#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14)
203+#define MTK_RXD4_SRC_PORT GENMASK(21, 19)
204+#define MTK_RXD4_ALG GENMASK(31, 22)
205+
206 /* QDMA descriptor rxd4 */
207 #define RX_DMA_L4_VALID BIT(24)
208 #define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
209@@ -1158,6 +1168,7 @@ struct mtk_soc_data {
210 u32 caps;
211 u32 required_clks;
212 bool required_pctl;
213+ u8 offload_version;
214 netdev_features_t hw_features;
215 bool has_sram;
216 };
217@@ -1271,6 +1282,9 @@ struct mtk_eth {
218 int ip_align;
219 spinlock_t syscfg0_lock;
220 struct timer_list mtk_dma_monitor_timer;
221+
222+ struct mtk_ppe ppe;
223+ struct rhashtable flow_table;
224 };
225
226 /* struct mtk_mac - the structure that holds the info about the MACs of the
developer1fb19c92023-03-07 23:45:23 +0800227@@ -1319,4 +1333,7 @@ int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
228 void mtk_usxgmii_reset(struct mtk_xgmii *ss, int mac_id);
229 int mtk_dump_usxgmii(struct regmap *pmap, char *name, u32 offset, u32 range);
developer8cb3ac72022-07-04 10:55:14 +0800230
231+int mtk_eth_offload_init(struct mtk_eth *eth);
232+int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
233+ void *type_data);
developer1fb19c92023-03-07 23:45:23 +0800234 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
developer8cb3ac72022-07-04 10:55:14 +0800235diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
236new file mode 100644
237index 000000000..66298e223
238--- /dev/null
239+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
240@@ -0,0 +1,509 @@
241+// SPDX-License-Identifier: GPL-2.0-only
242+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
243+
244+#include <linux/kernel.h>
245+#include <linux/io.h>
246+#include <linux/iopoll.h>
247+#include <linux/etherdevice.h>
248+#include <linux/platform_device.h>
249+#include "mtk_ppe.h"
250+#include "mtk_ppe_regs.h"
251+
252+static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
253+{
254+ writel(val, ppe->base + reg);
255+}
256+
257+static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
258+{
259+ return readl(ppe->base + reg);
260+}
261+
262+static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
263+{
264+ u32 val;
265+
266+ val = ppe_r32(ppe, reg);
267+ val &= ~mask;
268+ val |= set;
269+ ppe_w32(ppe, reg, val);
270+
271+ return val;
272+}
273+
274+static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
275+{
276+ return ppe_m32(ppe, reg, 0, val);
277+}
278+
279+static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
280+{
281+ return ppe_m32(ppe, reg, val, 0);
282+}
283+
284+static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
285+{
286+ int ret;
287+ u32 val;
288+
289+ ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
290+ !(val & MTK_PPE_GLO_CFG_BUSY),
291+ 20, MTK_PPE_WAIT_TIMEOUT_US);
292+
293+ if (ret)
294+ dev_err(ppe->dev, "PPE table busy");
295+
296+ return ret;
297+}
298+
299+static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
300+{
301+ ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
302+ ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
303+}
304+
305+static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
306+{
307+ mtk_ppe_cache_clear(ppe);
308+
309+ ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
310+ enable * MTK_PPE_CACHE_CTL_EN);
311+}
312+
313+static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
314+{
315+ u32 hv1, hv2, hv3;
316+ u32 hash;
317+
318+ switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
319+ case MTK_PPE_PKT_TYPE_BRIDGE:
320+ hv1 = e->bridge.src_mac_lo;
321+ hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
322+ hv2 = e->bridge.src_mac_hi >> 16;
323+ hv2 ^= e->bridge.dest_mac_lo;
324+ hv3 = e->bridge.dest_mac_hi;
325+ break;
326+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
327+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
328+ hv1 = e->ipv4.orig.ports;
329+ hv2 = e->ipv4.orig.dest_ip;
330+ hv3 = e->ipv4.orig.src_ip;
331+ break;
332+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
333+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
334+ hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
335+ hv1 ^= e->ipv6.ports;
336+
337+ hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
338+ hv2 ^= e->ipv6.dest_ip[0];
339+
340+ hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
341+ hv3 ^= e->ipv6.src_ip[0];
342+ break;
343+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
344+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
345+ default:
346+ WARN_ON_ONCE(1);
347+ return MTK_PPE_HASH_MASK;
348+ }
349+
350+ hash = (hv1 & hv2) | ((~hv1) & hv3);
351+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
352+ hash ^= hv1 ^ hv2 ^ hv3;
353+ hash ^= hash >> 16;
354+ hash <<= 1;
355+ hash &= MTK_PPE_ENTRIES - 1;
356+
357+ return hash;
358+}
359+
360+static inline struct mtk_foe_mac_info *
361+mtk_foe_entry_l2(struct mtk_foe_entry *entry)
362+{
363+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
364+
365+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
366+ return &entry->ipv6.l2;
367+
368+ return &entry->ipv4.l2;
369+}
370+
371+static inline u32 *
372+mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
373+{
374+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
375+
376+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
377+ return &entry->ipv6.ib2;
378+
379+ return &entry->ipv4.ib2;
380+}
381+
382+int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
383+ u8 pse_port, u8 *src_mac, u8 *dest_mac)
384+{
385+ struct mtk_foe_mac_info *l2;
386+ u32 ports_pad, val;
387+
388+ memset(entry, 0, sizeof(*entry));
389+
390+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
391+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
392+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
393+ MTK_FOE_IB1_BIND_TTL |
394+ MTK_FOE_IB1_BIND_CACHE;
395+ entry->ib1 = val;
396+
397+ val = FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) |
398+ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f) |
399+ FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port);
400+
401+ if (is_multicast_ether_addr(dest_mac))
402+ val |= MTK_FOE_IB2_MULTICAST;
403+
404+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
405+ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
406+ entry->ipv4.orig.ports = ports_pad;
407+ if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
408+ entry->ipv6.ports = ports_pad;
409+
410+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
411+ entry->ipv6.ib2 = val;
412+ l2 = &entry->ipv6.l2;
413+ } else {
414+ entry->ipv4.ib2 = val;
415+ l2 = &entry->ipv4.l2;
416+ }
417+
418+ l2->dest_mac_hi = get_unaligned_be32(dest_mac);
419+ l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
420+ l2->src_mac_hi = get_unaligned_be32(src_mac);
421+ l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
422+
423+ if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
424+ l2->etype = ETH_P_IPV6;
425+ else
426+ l2->etype = ETH_P_IP;
427+
428+ return 0;
429+}
430+
431+int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port)
432+{
433+ u32 *ib2 = mtk_foe_entry_ib2(entry);
434+ u32 val;
435+
436+ val = *ib2;
437+ val &= ~MTK_FOE_IB2_DEST_PORT;
438+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
439+ *ib2 = val;
440+
441+ return 0;
442+}
443+
444+int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool egress,
445+ __be32 src_addr, __be16 src_port,
446+ __be32 dest_addr, __be16 dest_port)
447+{
448+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
449+ struct mtk_ipv4_tuple *t;
450+
451+ switch (type) {
452+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
453+ if (egress) {
454+ t = &entry->ipv4.new;
455+ break;
456+ }
457+ fallthrough;
458+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
459+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
460+ t = &entry->ipv4.orig;
461+ break;
462+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
463+ entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
464+ entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
465+ return 0;
466+ default:
467+ WARN_ON_ONCE(1);
468+ return -EINVAL;
469+ }
470+
471+ t->src_ip = be32_to_cpu(src_addr);
472+ t->dest_ip = be32_to_cpu(dest_addr);
473+
474+ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
475+ return 0;
476+
477+ t->src_port = be16_to_cpu(src_port);
478+ t->dest_port = be16_to_cpu(dest_port);
479+
480+ return 0;
481+}
482+
483+int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
484+ __be32 *src_addr, __be16 src_port,
485+ __be32 *dest_addr, __be16 dest_port)
486+{
487+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
488+ u32 *src, *dest;
489+ int i;
490+
491+ switch (type) {
492+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
493+ src = entry->dslite.tunnel_src_ip;
494+ dest = entry->dslite.tunnel_dest_ip;
495+ break;
496+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
497+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
498+ entry->ipv6.src_port = be16_to_cpu(src_port);
499+ entry->ipv6.dest_port = be16_to_cpu(dest_port);
500+ fallthrough;
501+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
502+ src = entry->ipv6.src_ip;
503+ dest = entry->ipv6.dest_ip;
504+ break;
505+ default:
506+ WARN_ON_ONCE(1);
507+ return -EINVAL;
508+ }
509+
510+ for (i = 0; i < 4; i++)
511+ src[i] = be32_to_cpu(src_addr[i]);
512+ for (i = 0; i < 4; i++)
513+ dest[i] = be32_to_cpu(dest_addr[i]);
514+
515+ return 0;
516+}
517+
518+int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port)
519+{
520+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
521+
522+ l2->etype = BIT(port);
523+
524+ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER))
525+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
526+ else
527+ l2->etype |= BIT(8);
528+
529+ entry->ib1 &= ~MTK_FOE_IB1_BIND_VLAN_TAG;
530+
531+ return 0;
532+}
533+
534+int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid)
535+{
536+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
537+
538+ switch (FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, entry->ib1)) {
539+ case 0:
540+ entry->ib1 |= MTK_FOE_IB1_BIND_VLAN_TAG |
541+ FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
542+ l2->vlan1 = vid;
543+ return 0;
544+ case 1:
545+ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG)) {
546+ l2->vlan1 = vid;
547+ l2->etype |= BIT(8);
548+ } else {
549+ l2->vlan2 = vid;
550+ entry->ib1 += FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, 1);
551+ }
552+ return 0;
553+ default:
554+ return -ENOSPC;
555+ }
556+}
557+
558+int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
559+{
560+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
561+
562+ if (!(entry->ib1 & MTK_FOE_IB1_BIND_VLAN_LAYER) ||
563+ (entry->ib1 & MTK_FOE_IB1_BIND_VLAN_TAG))
564+ l2->etype = ETH_P_PPP_SES;
565+
566+ entry->ib1 |= MTK_FOE_IB1_BIND_PPPOE;
567+ l2->pppoe_id = sid;
568+
569+ return 0;
570+}
571+
572+static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
573+{
574+ return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
575+ FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
576+}
577+
578+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
579+ u16 timestamp)
580+{
581+ struct mtk_foe_entry *hwe;
582+ u32 hash;
583+
584+ timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
585+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
586+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
587+
588+ hash = mtk_ppe_hash_entry(entry);
589+ hwe = &ppe->foe_table[hash];
590+ if (!mtk_foe_entry_usable(hwe)) {
591+ hwe++;
592+ hash++;
593+
594+ if (!mtk_foe_entry_usable(hwe))
595+ return -ENOSPC;
596+ }
597+
598+ memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
599+ wmb();
600+ hwe->ib1 = entry->ib1;
601+
602+ dma_wmb();
603+
604+ mtk_ppe_cache_clear(ppe);
605+
606+ return hash;
607+}
608+
609+int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
610+ int version)
611+{
612+ struct mtk_foe_entry *foe;
613+
614+ /* need to allocate a separate device, since it PPE DMA access is
615+ * not coherent.
616+ */
617+ ppe->base = base;
618+ ppe->dev = dev;
619+ ppe->version = version;
620+
621+ foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
622+ &ppe->foe_phys, GFP_KERNEL);
623+ if (!foe)
624+ return -ENOMEM;
625+
626+ ppe->foe_table = foe;
627+
628+ mtk_ppe_debugfs_init(ppe);
629+
630+ return 0;
631+}
632+
633+static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
634+{
635+ static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
636+ int i, k;
637+
638+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
639+
640+ if (!IS_ENABLED(CONFIG_SOC_MT7621))
641+ return;
642+
643+ /* skip all entries that cross the 1024 byte boundary */
644+ for (i = 0; i < MTK_PPE_ENTRIES; i += 128)
645+ for (k = 0; k < ARRAY_SIZE(skip); k++)
646+ ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
647+}
648+
649+int mtk_ppe_start(struct mtk_ppe *ppe)
650+{
651+ u32 val;
652+
653+ mtk_ppe_init_foe_table(ppe);
654+ ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
655+
656+ val = MTK_PPE_TB_CFG_ENTRY_80B |
657+ MTK_PPE_TB_CFG_AGE_NON_L4 |
658+ MTK_PPE_TB_CFG_AGE_UNBIND |
659+ MTK_PPE_TB_CFG_AGE_TCP |
660+ MTK_PPE_TB_CFG_AGE_UDP |
661+ MTK_PPE_TB_CFG_AGE_TCP_FIN |
662+ FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
663+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
664+ FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
665+ MTK_PPE_KEEPALIVE_DISABLE) |
666+ FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
667+ FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
668+ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
669+ FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
670+ MTK_PPE_ENTRIES_SHIFT);
671+ ppe_w32(ppe, MTK_PPE_TB_CFG, val);
672+
673+ ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
674+ MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
675+
676+ mtk_ppe_cache_enable(ppe, true);
677+
678+ val = MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
679+ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG |
680+ MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
681+ MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
682+ MTK_PPE_FLOW_CFG_IP6_6RD |
683+ MTK_PPE_FLOW_CFG_IP4_NAT |
684+ MTK_PPE_FLOW_CFG_IP4_NAPT |
685+ MTK_PPE_FLOW_CFG_IP4_DSLITE |
686+ MTK_PPE_FLOW_CFG_L2_BRIDGE |
687+ MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
688+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
689+
690+ val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
691+ FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
692+ ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
693+
developere71ba072023-01-06 09:34:01 +0800694+ val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 30) |
developer8cb3ac72022-07-04 10:55:14 +0800695+ FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
696+ ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
697+
698+ val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
developere71ba072023-01-06 09:34:01 +0800699+ FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 30);
developer8cb3ac72022-07-04 10:55:14 +0800700+ ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
701+
702+ val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
703+ ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
704+
705+ val = MTK_PPE_BIND_LIMIT1_FULL |
706+ FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
707+ ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
708+
709+ val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
710+ FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
711+ ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
712+
713+ /* enable PPE */
714+ val = MTK_PPE_GLO_CFG_EN |
715+ MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
716+ MTK_PPE_GLO_CFG_IP4_CS_DROP |
717+ MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
718+ ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
719+
720+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
721+
722+ return 0;
723+}
724+
725+int mtk_ppe_stop(struct mtk_ppe *ppe)
726+{
727+ u32 val;
728+ int i;
729+
730+ for (i = 0; i < MTK_PPE_ENTRIES; i++)
731+ ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
732+ MTK_FOE_STATE_INVALID);
733+
734+ mtk_ppe_cache_enable(ppe, false);
735+
736+ /* disable offload engine */
737+ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
738+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
739+
740+ /* disable aging */
741+ val = MTK_PPE_TB_CFG_AGE_NON_L4 |
742+ MTK_PPE_TB_CFG_AGE_UNBIND |
743+ MTK_PPE_TB_CFG_AGE_TCP |
744+ MTK_PPE_TB_CFG_AGE_UDP |
745+ MTK_PPE_TB_CFG_AGE_TCP_FIN;
746+ ppe_clear(ppe, MTK_PPE_TB_CFG, val);
747+
748+ return mtk_ppe_wait_busy(ppe);
749+}
750diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
751new file mode 100644
752index 000000000..242fb8f2a
753--- /dev/null
754+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
755@@ -0,0 +1,288 @@
756+// SPDX-License-Identifier: GPL-2.0-only
757+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
758+
759+#ifndef __MTK_PPE_H
760+#define __MTK_PPE_H
761+
762+#include <linux/kernel.h>
763+#include <linux/bitfield.h>
764+
765+#define MTK_ETH_PPE_BASE 0xc00
766+
767+#define MTK_PPE_ENTRIES_SHIFT 3
768+#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
769+#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
770+#define MTK_PPE_WAIT_TIMEOUT_US 1000000
771+
772+#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
773+#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
774+#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24)
775+
776+#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
777+#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15)
778+#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16)
779+#define MTK_FOE_IB1_BIND_PPPOE BIT(19)
780+#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20)
781+#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21)
782+#define MTK_FOE_IB1_BIND_CACHE BIT(22)
783+#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
784+#define MTK_FOE_IB1_BIND_TTL BIT(24)
785+
786+#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25)
787+#define MTK_FOE_IB1_STATE GENMASK(29, 28)
788+#define MTK_FOE_IB1_UDP BIT(30)
789+#define MTK_FOE_IB1_STATIC BIT(31)
790+
791+enum {
792+ MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
793+ MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
794+ MTK_PPE_PKT_TYPE_BRIDGE = 2,
795+ MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3,
796+ MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
797+ MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
798+ MTK_PPE_PKT_TYPE_IPV6_6RD = 7,
799+};
800+
801+#define MTK_FOE_IB2_QID GENMASK(3, 0)
802+#define MTK_FOE_IB2_PSE_QOS BIT(4)
803+#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
804+#define MTK_FOE_IB2_MULTICAST BIT(8)
805+
806+#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
807+#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
808+#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
809+
810+#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
811+
812+#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
813+
814+#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
815+
816+#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
817+#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
818+#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
819+
820+enum {
821+ MTK_FOE_STATE_INVALID,
822+ MTK_FOE_STATE_UNBIND,
823+ MTK_FOE_STATE_BIND,
824+ MTK_FOE_STATE_FIN
825+};
826+
827+struct mtk_foe_mac_info {
828+ u16 vlan1;
829+ u16 etype;
830+
831+ u32 dest_mac_hi;
832+
833+ u16 vlan2;
834+ u16 dest_mac_lo;
835+
836+ u32 src_mac_hi;
837+
838+ u16 pppoe_id;
839+ u16 src_mac_lo;
840+};
841+
842+struct mtk_foe_bridge {
843+ u32 dest_mac_hi;
844+
845+ u16 src_mac_lo;
846+ u16 dest_mac_lo;
847+
848+ u32 src_mac_hi;
849+
850+ u32 ib2;
851+
852+ u32 _rsv[5];
853+
854+ u32 udf_tsid;
855+ struct mtk_foe_mac_info l2;
856+};
857+
858+struct mtk_ipv4_tuple {
859+ u32 src_ip;
860+ u32 dest_ip;
861+ union {
862+ struct {
863+ u16 dest_port;
864+ u16 src_port;
865+ };
866+ struct {
867+ u8 protocol;
868+ u8 _pad[3]; /* fill with 0xa5a5a5 */
869+ };
870+ u32 ports;
871+ };
872+};
873+
874+struct mtk_foe_ipv4 {
875+ struct mtk_ipv4_tuple orig;
876+
877+ u32 ib2;
878+
879+ struct mtk_ipv4_tuple new;
880+
881+ u16 timestamp;
882+ u16 _rsv0[3];
883+
884+ u32 udf_tsid;
885+
886+ struct mtk_foe_mac_info l2;
887+};
888+
889+struct mtk_foe_ipv4_dslite {
890+ struct mtk_ipv4_tuple ip4;
891+
892+ u32 tunnel_src_ip[4];
893+ u32 tunnel_dest_ip[4];
894+
895+ u8 flow_label[3];
896+ u8 priority;
897+
898+ u32 udf_tsid;
899+
900+ u32 ib2;
901+
902+ struct mtk_foe_mac_info l2;
903+};
904+
905+struct mtk_foe_ipv6 {
906+ u32 src_ip[4];
907+ u32 dest_ip[4];
908+
909+ union {
910+ struct {
911+ u8 protocol;
912+ u8 _pad[3]; /* fill with 0xa5a5a5 */
913+ }; /* 3-tuple */
914+ struct {
915+ u16 dest_port;
916+ u16 src_port;
917+ }; /* 5-tuple */
918+ u32 ports;
919+ };
920+
921+ u32 _rsv[3];
922+
923+ u32 udf;
924+
925+ u32 ib2;
926+ struct mtk_foe_mac_info l2;
927+};
928+
929+struct mtk_foe_ipv6_6rd {
930+ u32 src_ip[4];
931+ u32 dest_ip[4];
932+ u16 dest_port;
933+ u16 src_port;
934+
935+ u32 tunnel_src_ip;
936+ u32 tunnel_dest_ip;
937+
938+ u16 hdr_csum;
939+ u8 dscp;
940+ u8 ttl;
941+
942+ u8 flag;
943+ u8 pad;
944+ u8 per_flow_6rd_id;
945+ u8 pad2;
946+
947+ u32 ib2;
948+ struct mtk_foe_mac_info l2;
949+};
950+
951+struct mtk_foe_entry {
952+ u32 ib1;
953+
954+ union {
955+ struct mtk_foe_bridge bridge;
956+ struct mtk_foe_ipv4 ipv4;
957+ struct mtk_foe_ipv4_dslite dslite;
958+ struct mtk_foe_ipv6 ipv6;
959+ struct mtk_foe_ipv6_6rd ipv6_6rd;
960+ u32 data[19];
961+ };
962+};
963+
964+enum {
965+ MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02,
966+ MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03,
967+ MTK_PPE_CPU_REASON_NO_FLOW = 0x07,
968+ MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08,
969+ MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09,
970+ MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a,
971+ MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b,
972+ MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c,
973+ MTK_PPE_CPU_REASON_UN_HIT = 0x0d,
974+ MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e,
975+ MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
976+ MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10,
977+ MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11,
978+ MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12,
979+ MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13,
980+ MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14,
981+ MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15,
982+ MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16,
983+ MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17,
984+ MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18,
985+ MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19,
986+ MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a,
987+ MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b,
988+ MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c,
989+ MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e,
990+ MTK_PPE_CPU_REASON_INVALID = 0x1f,
991+};
992+
993+struct mtk_ppe {
994+ struct device *dev;
995+ void __iomem *base;
996+ int version;
997+
998+ struct mtk_foe_entry *foe_table;
999+ dma_addr_t foe_phys;
1000+
1001+ void *acct_table;
1002+};
1003+
1004+int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
1005+ int version);
1006+int mtk_ppe_start(struct mtk_ppe *ppe);
1007+int mtk_ppe_stop(struct mtk_ppe *ppe);
1008+
1009+static inline void
1010+mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
1011+{
1012+ ppe->foe_table[hash].ib1 = 0;
1013+ dma_wmb();
1014+}
1015+
1016+static inline int
1017+mtk_foe_entry_timestamp(struct mtk_ppe *ppe, u16 hash)
1018+{
1019+ u32 ib1 = READ_ONCE(ppe->foe_table[hash].ib1);
1020+
1021+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND)
1022+ return -1;
1023+
1024+ return FIELD_GET(MTK_FOE_IB1_BIND_TIMESTAMP, ib1);
1025+}
1026+
1027+int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
1028+ u8 pse_port, u8 *src_mac, u8 *dest_mac);
1029+int mtk_foe_entry_set_pse_port(struct mtk_foe_entry *entry, u8 port);
1030+int mtk_foe_entry_set_ipv4_tuple(struct mtk_foe_entry *entry, bool orig,
1031+ __be32 src_addr, __be16 src_port,
1032+ __be32 dest_addr, __be16 dest_port);
1033+int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
1034+ __be32 *src_addr, __be16 src_port,
1035+ __be32 *dest_addr, __be16 dest_port);
1036+int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
1037+int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
1038+int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
1039+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
1040+ u16 timestamp);
1041+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
1042+
1043+#endif
1044diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1045new file mode 100644
1046index 000000000..d4b482340
1047--- /dev/null
1048+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1049@@ -0,0 +1,214 @@
1050+// SPDX-License-Identifier: GPL-2.0-only
1051+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
1052+
1053+#include <linux/kernel.h>
1054+#include <linux/debugfs.h>
1055+#include "mtk_eth_soc.h"
1056+
1057+struct mtk_flow_addr_info
1058+{
1059+ void *src, *dest;
1060+ u16 *src_port, *dest_port;
1061+ bool ipv6;
1062+};
1063+
1064+static const char *mtk_foe_entry_state_str(int state)
1065+{
1066+ static const char * const state_str[] = {
1067+ [MTK_FOE_STATE_INVALID] = "INV",
1068+ [MTK_FOE_STATE_UNBIND] = "UNB",
1069+ [MTK_FOE_STATE_BIND] = "BND",
1070+ [MTK_FOE_STATE_FIN] = "FIN",
1071+ };
1072+
1073+ if (state >= ARRAY_SIZE(state_str) || !state_str[state])
1074+ return "UNK";
1075+
1076+ return state_str[state];
1077+}
1078+
1079+static const char *mtk_foe_pkt_type_str(int type)
1080+{
1081+ static const char * const type_str[] = {
1082+ [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
1083+ [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
1084+ [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
1085+ [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
1086+ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
1087+ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
1088+ [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD",
1089+ };
1090+
1091+ if (type >= ARRAY_SIZE(type_str) || !type_str[type])
1092+ return "UNKNOWN";
1093+
1094+ return type_str[type];
1095+}
1096+
1097+static void
1098+mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
1099+{
1100+ u32 n_addr[4];
1101+ int i;
1102+
1103+ if (!ipv6) {
1104+ seq_printf(m, "%pI4h", addr);
1105+ return;
1106+ }
1107+
1108+ for (i = 0; i < ARRAY_SIZE(n_addr); i++)
1109+ n_addr[i] = htonl(addr[i]);
1110+ seq_printf(m, "%pI6", n_addr);
1111+}
1112+
1113+static void
1114+mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai)
1115+{
1116+ mtk_print_addr(m, ai->src, ai->ipv6);
1117+ if (ai->src_port)
1118+ seq_printf(m, ":%d", *ai->src_port);
1119+ seq_printf(m, "->");
1120+ mtk_print_addr(m, ai->dest, ai->ipv6);
1121+ if (ai->dest_port)
1122+ seq_printf(m, ":%d", *ai->dest_port);
1123+}
1124+
1125+static int
1126+mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
1127+{
1128+ struct mtk_ppe *ppe = m->private;
1129+ int i;
1130+
1131+ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
1132+ struct mtk_foe_entry *entry = &ppe->foe_table[i];
1133+ struct mtk_foe_mac_info *l2;
1134+ struct mtk_flow_addr_info ai = {};
1135+ unsigned char h_source[ETH_ALEN];
1136+ unsigned char h_dest[ETH_ALEN];
1137+ int type, state;
1138+ u32 ib2;
1139+
1140+
1141+ state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1);
1142+ if (!state)
1143+ continue;
1144+
1145+ if (bind && state != MTK_FOE_STATE_BIND)
1146+ continue;
1147+
1148+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
1149+ seq_printf(m, "%05x %s %7s", i,
1150+ mtk_foe_entry_state_str(state),
1151+ mtk_foe_pkt_type_str(type));
1152+
1153+ switch (type) {
1154+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
1155+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
1156+ ai.src_port = &entry->ipv4.orig.src_port;
1157+ ai.dest_port = &entry->ipv4.orig.dest_port;
1158+ fallthrough;
1159+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
1160+ ai.src = &entry->ipv4.orig.src_ip;
1161+ ai.dest = &entry->ipv4.orig.dest_ip;
1162+ break;
1163+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
1164+ ai.src_port = &entry->ipv6.src_port;
1165+ ai.dest_port = &entry->ipv6.dest_port;
1166+ fallthrough;
1167+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
1168+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
1169+ ai.src = &entry->ipv6.src_ip;
1170+ ai.dest = &entry->ipv6.dest_ip;
1171+ ai.ipv6 = true;
1172+ break;
1173+ }
1174+
1175+ seq_printf(m, " orig=");
1176+ mtk_print_addr_info(m, &ai);
1177+
1178+ switch (type) {
1179+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
1180+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
1181+ ai.src_port = &entry->ipv4.new.src_port;
1182+ ai.dest_port = &entry->ipv4.new.dest_port;
1183+ fallthrough;
1184+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
1185+ ai.src = &entry->ipv4.new.src_ip;
1186+ ai.dest = &entry->ipv4.new.dest_ip;
1187+ seq_printf(m, " new=");
1188+ mtk_print_addr_info(m, &ai);
1189+ break;
1190+ }
1191+
1192+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
1193+ l2 = &entry->ipv6.l2;
1194+ ib2 = entry->ipv6.ib2;
1195+ } else {
1196+ l2 = &entry->ipv4.l2;
1197+ ib2 = entry->ipv4.ib2;
1198+ }
1199+
1200+ *((__be32 *)h_source) = htonl(l2->src_mac_hi);
1201+ *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo);
1202+ *((__be32 *)h_dest) = htonl(l2->dest_mac_hi);
1203+ *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
1204+
1205+ seq_printf(m, " eth=%pM->%pM etype=%04x"
1206+ " vlan=%d,%d ib1=%08x ib2=%08x\n",
1207+ h_source, h_dest, ntohs(l2->etype),
1208+ l2->vlan1, l2->vlan2, entry->ib1, ib2);
1209+ }
1210+
1211+ return 0;
1212+}
1213+
1214+static int
1215+mtk_ppe_debugfs_foe_show_all(struct seq_file *m, void *private)
1216+{
1217+ return mtk_ppe_debugfs_foe_show(m, private, false);
1218+}
1219+
1220+static int
1221+mtk_ppe_debugfs_foe_show_bind(struct seq_file *m, void *private)
1222+{
1223+ return mtk_ppe_debugfs_foe_show(m, private, true);
1224+}
1225+
1226+static int
1227+mtk_ppe_debugfs_foe_open_all(struct inode *inode, struct file *file)
1228+{
1229+ return single_open(file, mtk_ppe_debugfs_foe_show_all,
1230+ inode->i_private);
1231+}
1232+
1233+static int
1234+mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
1235+{
1236+ return single_open(file, mtk_ppe_debugfs_foe_show_bind,
1237+ inode->i_private);
1238+}
1239+
1240+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
1241+{
1242+ static const struct file_operations fops_all = {
1243+ .open = mtk_ppe_debugfs_foe_open_all,
1244+ .read = seq_read,
1245+ .llseek = seq_lseek,
1246+ .release = single_release,
1247+ };
1248+
1249+ static const struct file_operations fops_bind = {
1250+ .open = mtk_ppe_debugfs_foe_open_bind,
1251+ .read = seq_read,
1252+ .llseek = seq_lseek,
1253+ .release = single_release,
1254+ };
1255+
1256+ struct dentry *root;
1257+
1258+ root = debugfs_create_dir("mtk_ppe", NULL);
1259+ debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
1260+ debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
1261+
1262+ return 0;
1263+}
1264diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1265new file mode 100644
1266index 000000000..4294f0c74
1267--- /dev/null
1268+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
developer207b39d2022-10-07 15:57:16 +08001269@@ -0,0 +1,541 @@
developer8cb3ac72022-07-04 10:55:14 +08001270+// SPDX-License-Identifier: GPL-2.0-only
1271+/*
1272+ * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
1273+ */
1274+
1275+#include <linux/if_ether.h>
1276+#include <linux/rhashtable.h>
1277+#include <linux/ip.h>
1278+#include <linux/ipv6.h>
1279+#include <net/flow_offload.h>
1280+#include <net/pkt_cls.h>
1281+#include <net/dsa.h>
1282+#include "mtk_eth_soc.h"
1283+
1284+struct mtk_flow_data {
1285+ struct ethhdr eth;
1286+
1287+ union {
1288+ struct {
1289+ __be32 src_addr;
1290+ __be32 dst_addr;
1291+ } v4;
1292+
1293+ struct {
1294+ struct in6_addr src_addr;
1295+ struct in6_addr dst_addr;
1296+ } v6;
1297+ };
1298+
1299+ __be16 src_port;
1300+ __be16 dst_port;
1301+
1302+ struct {
1303+ u16 id;
1304+ __be16 proto;
1305+ u8 num;
1306+ } vlan;
1307+ struct {
1308+ u16 sid;
1309+ u8 num;
1310+ } pppoe;
1311+};
1312+
1313+struct mtk_flow_entry {
1314+ struct rhash_head node;
1315+ unsigned long cookie;
1316+ u16 hash;
1317+};
1318+
1319+static const struct rhashtable_params mtk_flow_ht_params = {
1320+ .head_offset = offsetof(struct mtk_flow_entry, node),
1321+ .key_offset = offsetof(struct mtk_flow_entry, cookie),
1322+ .key_len = sizeof(unsigned long),
1323+ .automatic_shrinking = true,
1324+};
1325+
1326+static u32
1327+mtk_eth_timestamp(struct mtk_eth *eth)
1328+{
1329+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
1330+}
1331+
1332+static int
1333+mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
1334+ bool egress)
1335+{
1336+ return mtk_foe_entry_set_ipv4_tuple(foe, egress,
1337+ data->v4.src_addr, data->src_port,
1338+ data->v4.dst_addr, data->dst_port);
1339+}
1340+
1341+static int
1342+mtk_flow_set_ipv6_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data)
1343+{
1344+ return mtk_foe_entry_set_ipv6_tuple(foe,
1345+ data->v6.src_addr.s6_addr32, data->src_port,
1346+ data->v6.dst_addr.s6_addr32, data->dst_port);
1347+}
1348+
1349+static void
1350+mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
1351+{
1352+ void *dest = eth + act->mangle.offset;
1353+ const void *src = &act->mangle.val;
1354+
1355+ if (act->mangle.offset > 8)
1356+ return;
1357+
1358+ if (act->mangle.mask == 0xffff) {
1359+ src += 2;
1360+ dest += 2;
1361+ }
1362+
1363+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
1364+}
1365+
1366+
1367+static int
1368+mtk_flow_mangle_ports(const struct flow_action_entry *act,
1369+ struct mtk_flow_data *data)
1370+{
1371+ u32 val = ntohl(act->mangle.val);
1372+
1373+ switch (act->mangle.offset) {
1374+ case 0:
1375+ if (act->mangle.mask == ~htonl(0xffff))
1376+ data->dst_port = cpu_to_be16(val);
1377+ else
1378+ data->src_port = cpu_to_be16(val >> 16);
1379+ break;
1380+ case 2:
1381+ data->dst_port = cpu_to_be16(val);
1382+ break;
1383+ default:
1384+ return -EINVAL;
1385+ }
1386+
1387+ return 0;
1388+}
1389+
1390+static int
1391+mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
1392+ struct mtk_flow_data *data)
1393+{
1394+ __be32 *dest;
1395+
1396+ switch (act->mangle.offset) {
1397+ case offsetof(struct iphdr, saddr):
1398+ dest = &data->v4.src_addr;
1399+ break;
1400+ case offsetof(struct iphdr, daddr):
1401+ dest = &data->v4.dst_addr;
1402+ break;
1403+ default:
1404+ return -EINVAL;
1405+ }
1406+
1407+ memcpy(dest, &act->mangle.val, sizeof(u32));
1408+
1409+ return 0;
1410+}
1411+
1412+static int
1413+mtk_flow_get_dsa_port(struct net_device **dev)
1414+{
1415+#if IS_ENABLED(CONFIG_NET_DSA)
1416+ struct dsa_port *dp;
1417+
1418+ dp = dsa_port_from_netdev(*dev);
1419+ if (IS_ERR(dp))
1420+ return -ENODEV;
1421+
1422+ if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
1423+ return -ENODEV;
1424+
1425+ *dev = dp->cpu_dp->master;
1426+
1427+ return dp->index;
1428+#else
1429+ return -ENODEV;
1430+#endif
1431+}
1432+
1433+static int
1434+mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1435+ struct net_device *dev)
1436+{
1437+ int pse_port, dsa_port;
1438+
1439+ dsa_port = mtk_flow_get_dsa_port(&dev);
1440+ if (dsa_port >= 0)
1441+ mtk_foe_entry_set_dsa(foe, dsa_port);
1442+
1443+ if (dev == eth->netdev[0])
developerc693c152022-12-02 09:38:46 +08001444+ pse_port = PSE_GDM1_PORT;
developer8cb3ac72022-07-04 10:55:14 +08001445+ else if (dev == eth->netdev[1])
developerc693c152022-12-02 09:38:46 +08001446+ pse_port = PSE_GDM2_PORT;
developer8cb3ac72022-07-04 10:55:14 +08001447+ else
1448+ return -EOPNOTSUPP;
1449+
1450+ mtk_foe_entry_set_pse_port(foe, pse_port);
1451+
1452+ return 0;
1453+}
1454+
1455+static int
1456+mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1457+{
1458+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1459+ struct flow_action_entry *act;
1460+ struct mtk_flow_data data = {};
1461+ struct mtk_foe_entry foe;
1462+ struct net_device *odev = NULL;
1463+ struct mtk_flow_entry *entry;
1464+ int offload_type = 0;
1465+ u16 addr_type = 0;
1466+ u32 timestamp;
1467+ u8 l4proto = 0;
1468+ int err = 0;
1469+ int hash;
1470+ int i;
1471+
1472+ if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
1473+ return -EEXIST;
1474+
1475+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
1476+ struct flow_match_meta match;
1477+
1478+ flow_rule_match_meta(rule, &match);
1479+ } else {
1480+ return -EOPNOTSUPP;
1481+ }
1482+
1483+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1484+ struct flow_match_control match;
1485+
1486+ flow_rule_match_control(rule, &match);
1487+ addr_type = match.key->addr_type;
1488+ } else {
1489+ return -EOPNOTSUPP;
1490+ }
1491+
1492+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1493+ struct flow_match_basic match;
1494+
1495+ flow_rule_match_basic(rule, &match);
1496+ l4proto = match.key->ip_proto;
1497+ } else {
1498+ return -EOPNOTSUPP;
1499+ }
1500+
1501+ flow_action_for_each(i, act, &rule->action) {
1502+ switch (act->id) {
1503+ case FLOW_ACTION_MANGLE:
1504+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1505+ mtk_flow_offload_mangle_eth(act, &data.eth);
1506+ break;
1507+ case FLOW_ACTION_REDIRECT:
1508+ odev = act->dev;
1509+ break;
1510+ case FLOW_ACTION_CSUM:
1511+ break;
1512+ case FLOW_ACTION_VLAN_PUSH:
1513+ if (data.vlan.num == 1 ||
1514+ act->vlan.proto != htons(ETH_P_8021Q))
1515+ return -EOPNOTSUPP;
1516+
1517+ data.vlan.id = act->vlan.vid;
1518+ data.vlan.proto = act->vlan.proto;
1519+ data.vlan.num++;
1520+ break;
1521+ case FLOW_ACTION_VLAN_POP:
1522+ break;
1523+ case FLOW_ACTION_PPPOE_PUSH:
1524+ if (data.pppoe.num == 1)
1525+ return -EOPNOTSUPP;
1526+
1527+ data.pppoe.sid = act->pppoe.sid;
1528+ data.pppoe.num++;
1529+ break;
1530+ default:
1531+ return -EOPNOTSUPP;
1532+ }
1533+ }
1534+
1535+ switch (addr_type) {
1536+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1537+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1538+ break;
1539+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1540+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1541+ break;
1542+ default:
1543+ return -EOPNOTSUPP;
1544+ }
1545+
1546+ if (!is_valid_ether_addr(data.eth.h_source) ||
1547+ !is_valid_ether_addr(data.eth.h_dest))
1548+ return -EINVAL;
1549+
1550+ err = mtk_foe_entry_prepare(&foe, offload_type, l4proto, 0,
1551+ data.eth.h_source,
1552+ data.eth.h_dest);
1553+ if (err)
1554+ return err;
1555+
1556+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1557+ struct flow_match_ports ports;
1558+
1559+ flow_rule_match_ports(rule, &ports);
1560+ data.src_port = ports.key->src;
1561+ data.dst_port = ports.key->dst;
1562+ } else {
1563+ return -EOPNOTSUPP;
1564+ }
1565+
1566+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1567+ struct flow_match_ipv4_addrs addrs;
1568+
1569+ flow_rule_match_ipv4_addrs(rule, &addrs);
1570+
1571+ data.v4.src_addr = addrs.key->src;
1572+ data.v4.dst_addr = addrs.key->dst;
1573+
1574+ mtk_flow_set_ipv4_addr(&foe, &data, false);
1575+ }
1576+
1577+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1578+ struct flow_match_ipv6_addrs addrs;
1579+
1580+ flow_rule_match_ipv6_addrs(rule, &addrs);
1581+
1582+ data.v6.src_addr = addrs.key->src;
1583+ data.v6.dst_addr = addrs.key->dst;
1584+
1585+ mtk_flow_set_ipv6_addr(&foe, &data);
1586+ }
1587+
1588+ flow_action_for_each(i, act, &rule->action) {
1589+ if (act->id != FLOW_ACTION_MANGLE)
1590+ continue;
1591+
1592+ switch (act->mangle.htype) {
1593+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1594+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1595+ err = mtk_flow_mangle_ports(act, &data);
1596+ break;
1597+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1598+ err = mtk_flow_mangle_ipv4(act, &data);
1599+ break;
1600+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
1601+ /* handled earlier */
1602+ break;
1603+ default:
1604+ return -EOPNOTSUPP;
1605+ }
1606+
1607+ if (err)
1608+ return err;
1609+ }
1610+
1611+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1612+ err = mtk_flow_set_ipv4_addr(&foe, &data, true);
1613+ if (err)
1614+ return err;
1615+ }
1616+
1617+ if (data.vlan.num == 1) {
1618+ if (data.vlan.proto != htons(ETH_P_8021Q))
1619+ return -EOPNOTSUPP;
1620+
1621+ mtk_foe_entry_set_vlan(&foe, data.vlan.id);
1622+ }
1623+ if (data.pppoe.num == 1)
1624+ mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
1625+
1626+ err = mtk_flow_set_output_device(eth, &foe, odev);
1627+ if (err)
1628+ return err;
1629+
1630+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1631+ if (!entry)
1632+ return -ENOMEM;
1633+
1634+ entry->cookie = f->cookie;
1635+ timestamp = mtk_eth_timestamp(eth);
1636+ hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
1637+ if (hash < 0) {
1638+ err = hash;
1639+ goto free;
1640+ }
1641+
1642+ entry->hash = hash;
1643+ err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
1644+ mtk_flow_ht_params);
1645+ if (err < 0)
1646+ goto clear_flow;
1647+
1648+ return 0;
1649+clear_flow:
1650+ mtk_foe_entry_clear(&eth->ppe, hash);
1651+free:
1652+ kfree(entry);
1653+ return err;
1654+}
1655+
1656+static int
1657+mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
1658+{
1659+ struct mtk_flow_entry *entry;
1660+
1661+ entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1662+ mtk_flow_ht_params);
1663+ if (!entry)
1664+ return -ENOENT;
1665+
1666+ mtk_foe_entry_clear(&eth->ppe, entry->hash);
1667+ rhashtable_remove_fast(&eth->flow_table, &entry->node,
1668+ mtk_flow_ht_params);
1669+ kfree(entry);
1670+
1671+ return 0;
1672+}
1673+
1674+static int
1675+mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1676+{
1677+ struct mtk_flow_entry *entry;
1678+ int timestamp;
1679+ u32 idle;
1680+
1681+ entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1682+ mtk_flow_ht_params);
1683+ if (!entry)
1684+ return -ENOENT;
1685+
1686+ timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
1687+ if (timestamp < 0)
1688+ return -ETIMEDOUT;
1689+
1690+ idle = mtk_eth_timestamp(eth) - timestamp;
1691+ f->stats.lastused = jiffies - idle * HZ;
1692+
1693+ return 0;
1694+}
1695+
1696+static DEFINE_MUTEX(mtk_flow_offload_mutex);
1697+
1698+static int
1699+mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1700+{
1701+ struct flow_cls_offload *cls = type_data;
1702+ struct net_device *dev = cb_priv;
1703+ struct mtk_mac *mac = netdev_priv(dev);
1704+ struct mtk_eth *eth = mac->hw;
1705+ int err;
1706+
1707+ if (!tc_can_offload(dev))
1708+ return -EOPNOTSUPP;
1709+
1710+ if (type != TC_SETUP_CLSFLOWER)
1711+ return -EOPNOTSUPP;
1712+
1713+ mutex_lock(&mtk_flow_offload_mutex);
1714+ switch (cls->command) {
1715+ case FLOW_CLS_REPLACE:
1716+ err = mtk_flow_offload_replace(eth, cls);
1717+ break;
1718+ case FLOW_CLS_DESTROY:
1719+ err = mtk_flow_offload_destroy(eth, cls);
1720+ break;
1721+ case FLOW_CLS_STATS:
1722+ err = mtk_flow_offload_stats(eth, cls);
1723+ break;
1724+ default:
1725+ err = -EOPNOTSUPP;
1726+ break;
1727+ }
1728+ mutex_unlock(&mtk_flow_offload_mutex);
1729+
1730+ return err;
1731+}
1732+
1733+static int
1734+mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1735+{
1736+ struct mtk_mac *mac = netdev_priv(dev);
1737+ struct mtk_eth *eth = mac->hw;
developer207b39d2022-10-07 15:57:16 +08001738+ struct nf_flowtable *flowtable;
developer8cb3ac72022-07-04 10:55:14 +08001739+ static LIST_HEAD(block_cb_list);
1740+ struct flow_block_cb *block_cb;
1741+ flow_setup_cb_t *cb;
developer207b39d2022-10-07 15:57:16 +08001742+ int err = 0;
1743+
1744+ flowtable = container_of(f->block, struct nf_flowtable, flow_block);
developer8cb3ac72022-07-04 10:55:14 +08001745+
1746+ if (!eth->ppe.foe_table)
1747+ return -EOPNOTSUPP;
1748+
1749+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1750+ return -EOPNOTSUPP;
1751+
1752+ cb = mtk_eth_setup_tc_block_cb;
1753+ f->driver_block_list = &block_cb_list;
1754+
developer207b39d2022-10-07 15:57:16 +08001755+ down_write(&flowtable->flow_block_lock);
1756+
developer8cb3ac72022-07-04 10:55:14 +08001757+ switch (f->command) {
1758+ case FLOW_BLOCK_BIND:
1759+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
1760+ if (block_cb) {
1761+ flow_block_cb_incref(block_cb);
developer207b39d2022-10-07 15:57:16 +08001762+ goto unlock;
developer8cb3ac72022-07-04 10:55:14 +08001763+ }
1764+ block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
developer207b39d2022-10-07 15:57:16 +08001765+ if (IS_ERR(block_cb)) {
1766+ err = PTR_ERR(block_cb);
1767+ goto unlock;
1768+ }
developer8cb3ac72022-07-04 10:55:14 +08001769+
1770+ flow_block_cb_add(block_cb, f);
1771+ list_add_tail(&block_cb->driver_list, &block_cb_list);
developer207b39d2022-10-07 15:57:16 +08001772+ break;
developer8cb3ac72022-07-04 10:55:14 +08001773+ case FLOW_BLOCK_UNBIND:
1774+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
developer207b39d2022-10-07 15:57:16 +08001775+ if (!block_cb) {
1776+ err = -ENOENT;
1777+ goto unlock;
1778+ }
developer8cb3ac72022-07-04 10:55:14 +08001779+
1780+ if (flow_block_cb_decref(block_cb)) {
1781+ flow_block_cb_remove(block_cb, f);
1782+ list_del(&block_cb->driver_list);
1783+ }
developer207b39d2022-10-07 15:57:16 +08001784+ break;
developer8cb3ac72022-07-04 10:55:14 +08001785+ default:
developer207b39d2022-10-07 15:57:16 +08001786+ err = -EOPNOTSUPP;
1787+ break;
developer8cb3ac72022-07-04 10:55:14 +08001788+ }
developer207b39d2022-10-07 15:57:16 +08001789+
1790+unlock:
1791+ up_write(&flowtable->flow_block_lock);
1792+ return err;
developer8cb3ac72022-07-04 10:55:14 +08001793+}
1794+
1795+int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1796+ void *type_data)
1797+{
1798+ if (type == TC_SETUP_FT)
1799+ return mtk_eth_setup_tc_block(dev, type_data);
1800+
1801+ return -EOPNOTSUPP;
1802+}
1803+
1804+int mtk_eth_offload_init(struct mtk_eth *eth)
1805+{
1806+ if (!eth->ppe.foe_table)
1807+ return 0;
1808+
1809+ return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
1810+}
1811diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
1812new file mode 100644
1813index 000000000..0c45ea090
1814--- /dev/null
1815+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
1816@@ -0,0 +1,144 @@
1817+// SPDX-License-Identifier: GPL-2.0-only
1818+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
1819+
1820+#ifndef __MTK_PPE_REGS_H
1821+#define __MTK_PPE_REGS_H
1822+
1823+#define MTK_PPE_GLO_CFG 0x200
1824+#define MTK_PPE_GLO_CFG_EN BIT(0)
1825+#define MTK_PPE_GLO_CFG_TSID_EN BIT(1)
1826+#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2)
1827+#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3)
1828+#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4)
1829+#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5)
1830+#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6)
1831+#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7)
1832+#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8)
1833+#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9)
1834+#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10)
1835+#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11)
1836+#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12)
1837+#define MTK_PPE_GLO_CFG_BUSY BIT(31)
1838+
1839+#define MTK_PPE_FLOW_CFG 0x204
1840+#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
1841+#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
1842+#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
1843+#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9)
1844+#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10)
1845+#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12)
1846+#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13)
1847+#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14)
1848+#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15)
1849+#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16)
1850+#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17)
1851+#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18)
1852+#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19)
1853+#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20)
1854+
1855+#define MTK_PPE_IP_PROTO_CHK 0x208
1856+#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0)
1857+#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16)
1858+
1859+#define MTK_PPE_TB_CFG 0x21c
1860+#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0)
1861+#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3)
1862+#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4)
1863+#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6)
1864+#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7)
1865+#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8)
1866+#define MTK_PPE_TB_CFG_AGE_TCP BIT(9)
1867+#define MTK_PPE_TB_CFG_AGE_UDP BIT(10)
1868+#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11)
1869+#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12)
1870+#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
1871+#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
1872+#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
1873+
1874+enum {
1875+ MTK_PPE_SCAN_MODE_DISABLED,
1876+ MTK_PPE_SCAN_MODE_CHECK_AGE,
1877+ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE,
1878+};
1879+
1880+enum {
1881+ MTK_PPE_KEEPALIVE_DISABLE,
1882+ MTK_PPE_KEEPALIVE_UNICAST_CPU,
1883+ MTK_PPE_KEEPALIVE_DUP_CPU = 3,
1884+};
1885+
1886+enum {
1887+ MTK_PPE_SEARCH_MISS_ACTION_DROP,
1888+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2,
1889+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3,
1890+};
1891+
1892+#define MTK_PPE_TB_BASE 0x220
1893+
1894+#define MTK_PPE_TB_USED 0x224
1895+#define MTK_PPE_TB_USED_NUM GENMASK(13, 0)
1896+
1897+#define MTK_PPE_BIND_RATE 0x228
1898+#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0)
1899+#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16)
1900+
1901+#define MTK_PPE_BIND_LIMIT0 0x22c
1902+#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0)
1903+#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16)
1904+
1905+#define MTK_PPE_BIND_LIMIT1 0x230
1906+#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0)
1907+#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16)
1908+
1909+#define MTK_PPE_KEEPALIVE 0x234
1910+#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0)
1911+#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16)
1912+#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24)
1913+
1914+#define MTK_PPE_UNBIND_AGE 0x238
1915+#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16)
1916+#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0)
1917+
1918+#define MTK_PPE_BIND_AGE0 0x23c
1919+#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
1920+#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
1921+
1922+#define MTK_PPE_BIND_AGE1 0x240
1923+#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
1924+#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
1925+
1926+#define MTK_PPE_HASH_SEED 0x244
1927+
1928+#define MTK_PPE_DEFAULT_CPU_PORT 0x248
1929+#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
1930+
1931+#define MTK_PPE_MTU_DROP 0x308
1932+
1933+#define MTK_PPE_VLAN_MTU0 0x30c
1934+#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0)
1935+#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16)
1936+
1937+#define MTK_PPE_VLAN_MTU1 0x310
1938+#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0)
1939+#define MTK_PPE_VLAN_MTU1_3TAG GENMASK(29, 16)
1940+
1941+#define MTK_PPE_VPM_TPID 0x318
1942+
1943+#define MTK_PPE_CACHE_CTL 0x320
1944+#define MTK_PPE_CACHE_CTL_EN BIT(0)
1945+#define MTK_PPE_CACHE_CTL_LOCK_CLR BIT(4)
1946+#define MTK_PPE_CACHE_CTL_REQ BIT(8)
1947+#define MTK_PPE_CACHE_CTL_CLEAR BIT(9)
1948+#define MTK_PPE_CACHE_CTL_CMD GENMASK(13, 12)
1949+
1950+#define MTK_PPE_MIB_CFG 0x334
1951+#define MTK_PPE_MIB_CFG_EN BIT(0)
1952+#define MTK_PPE_MIB_CFG_RD_CLR BIT(1)
1953+
1954+#define MTK_PPE_MIB_TB_BASE 0x338
1955+
1956+#define MTK_PPE_MIB_CACHE_CTL 0x350
1957+#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
1958+#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
1959+
1960+#endif
1961diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
1962index a085213dc..813e30495 100644
1963--- a/drivers/net/ppp/ppp_generic.c
1964+++ b/drivers/net/ppp/ppp_generic.c
1965@@ -1378,12 +1378,34 @@ static void ppp_dev_priv_destructor(struct net_device *dev)
1966 ppp_destroy_interface(ppp);
1967 }
1968
1969+static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
1970+ struct net_device_path *path)
1971+{
1972+ struct ppp *ppp = netdev_priv(ctx->dev);
1973+ struct ppp_channel *chan;
1974+ struct channel *pch;
1975+
1976+ if (ppp->flags & SC_MULTILINK)
1977+ return -EOPNOTSUPP;
1978+
1979+ if (list_empty(&ppp->channels))
1980+ return -ENODEV;
1981+
1982+ pch = list_first_entry(&ppp->channels, struct channel, clist);
1983+ chan = pch->chan;
1984+ if (!chan->ops->fill_forward_path)
1985+ return -EOPNOTSUPP;
1986+
1987+ return chan->ops->fill_forward_path(ctx, path, chan);
1988+}
1989+
1990 static const struct net_device_ops ppp_netdev_ops = {
1991 .ndo_init = ppp_dev_init,
1992 .ndo_uninit = ppp_dev_uninit,
1993 .ndo_start_xmit = ppp_start_xmit,
1994 .ndo_do_ioctl = ppp_net_ioctl,
1995 .ndo_get_stats64 = ppp_get_stats64,
1996+ .ndo_fill_forward_path = ppp_fill_forward_path,
1997 };
1998
1999 static struct device_type ppp_type = {
2000diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
2001index 087b01684..7a8c246ab 100644
2002--- a/drivers/net/ppp/pppoe.c
2003+++ b/drivers/net/ppp/pppoe.c
2004@@ -974,8 +974,32 @@ static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb)
2005 return __pppoe_xmit(sk, skb);
2006 }
2007
2008+static int pppoe_fill_forward_path(struct net_device_path_ctx *ctx,
2009+ struct net_device_path *path,
2010+ const struct ppp_channel *chan)
2011+{
2012+ struct sock *sk = (struct sock *)chan->private;
2013+ struct pppox_sock *po = pppox_sk(sk);
2014+ struct net_device *dev = po->pppoe_dev;
2015+
2016+ if (sock_flag(sk, SOCK_DEAD) ||
2017+ !(sk->sk_state & PPPOX_CONNECTED) || !dev)
2018+ return -1;
2019+
2020+ path->type = DEV_PATH_PPPOE;
2021+ path->encap.proto = htons(ETH_P_PPP_SES);
2022+ path->encap.id = be16_to_cpu(po->num);
2023+ memcpy(path->encap.h_dest, po->pppoe_pa.remote, ETH_ALEN);
2024+ memcpy(ctx->daddr, po->pppoe_pa.remote, ETH_ALEN);
2025+ path->dev = ctx->dev;
2026+ ctx->dev = dev;
2027+
2028+ return 0;
2029+}
2030+
2031 static const struct ppp_channel_ops pppoe_chan_ops = {
2032 .start_xmit = pppoe_xmit,
2033+ .fill_forward_path = pppoe_fill_forward_path,
2034 };
2035
2036 static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
2037diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2038index 38af42bf8..9f64504ac 100644
2039--- a/include/linux/netdevice.h
2040+++ b/include/linux/netdevice.h
2041@@ -829,6 +829,59 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
2042 struct sk_buff *skb,
2043 struct net_device *sb_dev);
2044
2045+enum net_device_path_type {
2046+ DEV_PATH_ETHERNET = 0,
2047+ DEV_PATH_VLAN,
2048+ DEV_PATH_BRIDGE,
2049+ DEV_PATH_PPPOE,
2050+ DEV_PATH_DSA,
2051+};
2052+
2053+struct net_device_path {
2054+ enum net_device_path_type type;
2055+ const struct net_device *dev;
2056+ union {
2057+ struct {
2058+ u16 id;
2059+ __be16 proto;
2060+ u8 h_dest[ETH_ALEN];
2061+ } encap;
2062+ struct {
2063+ enum {
2064+ DEV_PATH_BR_VLAN_KEEP,
2065+ DEV_PATH_BR_VLAN_TAG,
2066+ DEV_PATH_BR_VLAN_UNTAG,
2067+ DEV_PATH_BR_VLAN_UNTAG_HW,
2068+ } vlan_mode;
2069+ u16 vlan_id;
2070+ __be16 vlan_proto;
2071+ } bridge;
2072+ struct {
2073+ int port;
2074+ u16 proto;
2075+ } dsa;
2076+ };
2077+};
2078+
2079+#define NET_DEVICE_PATH_STACK_MAX 5
2080+#define NET_DEVICE_PATH_VLAN_MAX 2
2081+
2082+struct net_device_path_stack {
2083+ int num_paths;
2084+ struct net_device_path path[NET_DEVICE_PATH_STACK_MAX];
2085+};
2086+
2087+struct net_device_path_ctx {
2088+ const struct net_device *dev;
2089+ u8 daddr[ETH_ALEN];
2090+
2091+ int num_vlans;
2092+ struct {
2093+ u16 id;
2094+ __be16 proto;
2095+ } vlan[NET_DEVICE_PATH_VLAN_MAX];
2096+};
2097+
2098 enum tc_setup_type {
2099 TC_SETUP_QDISC_MQPRIO,
2100 TC_SETUP_CLSU32,
2101@@ -844,6 +897,7 @@ enum tc_setup_type {
2102 TC_SETUP_ROOT_QDISC,
2103 TC_SETUP_QDISC_GRED,
2104 TC_SETUP_QDISC_TAPRIO,
2105+ TC_SETUP_FT,
2106 };
2107
2108 /* These structures hold the attributes of bpf state that are being passed
2109@@ -1239,6 +1293,8 @@ struct tlsdev_ops;
2110 * Get devlink port instance associated with a given netdev.
2111 * Called with a reference on the netdevice and devlink locks only,
2112 * rtnl_lock is not held.
2113+ * int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path);
2114+ * Get the forwarding path to reach the real device from the HW destination address
2115 */
2116 struct net_device_ops {
2117 int (*ndo_init)(struct net_device *dev);
2118@@ -1436,6 +1492,8 @@ struct net_device_ops {
2119 int (*ndo_xsk_wakeup)(struct net_device *dev,
2120 u32 queue_id, u32 flags);
2121 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
2122+ int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx,
2123+ struct net_device_path *path);
2124 };
2125
2126 /**
2127@@ -2661,6 +2719,8 @@ void dev_remove_offload(struct packet_offload *po);
2128
2129 int dev_get_iflink(const struct net_device *dev);
2130 int dev_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb);
2131+int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
2132+ struct net_device_path_stack *stack);
2133 struct net_device *__dev_get_by_flags(struct net *net, unsigned short flags,
2134 unsigned short mask);
2135 struct net_device *dev_get_by_name(struct net *net, const char *name);
2136diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
2137index 98966064e..91f9a9283 100644
2138--- a/include/linux/ppp_channel.h
2139+++ b/include/linux/ppp_channel.h
2140@@ -28,6 +28,9 @@ struct ppp_channel_ops {
2141 int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
2142 /* Handle an ioctl call that has come in via /dev/ppp. */
2143 int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
2144+ int (*fill_forward_path)(struct net_device_path_ctx *,
2145+ struct net_device_path *,
2146+ const struct ppp_channel *);
2147 };
2148
2149 struct ppp_channel {
2150diff --git a/include/net/dsa.h b/include/net/dsa.h
2151index 05f66d487..cafc74218 100644
2152--- a/include/net/dsa.h
2153+++ b/include/net/dsa.h
2154@@ -561,6 +561,8 @@ struct dsa_switch_ops {
2155 struct sk_buff *skb);
2156 };
2157
2158+struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
2159+
2160 struct dsa_switch_driver {
2161 struct list_head list;
2162 const struct dsa_switch_ops *ops;
2163@@ -653,6 +655,14 @@ static inline int call_dsa_notifiers(unsigned long val, struct net_device *dev,
2164 #define BRCM_TAG_GET_PORT(v) ((v) >> 8)
2165 #define BRCM_TAG_GET_QUEUE(v) ((v) & 0xff)
2166
2167+#if IS_ENABLED(CONFIG_NET_DSA)
2168+bool dsa_slave_dev_check(const struct net_device *dev);
2169+#else
2170+static inline bool dsa_slave_dev_check(const struct net_device *dev)
2171+{
2172+ return false;
2173+}
2174+#endif
2175
2176 netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
2177 int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data);
2178diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
2179index c6f7bd22d..59b873653 100644
2180--- a/include/net/flow_offload.h
2181+++ b/include/net/flow_offload.h
2182@@ -138,6 +138,7 @@ enum flow_action_id {
2183 FLOW_ACTION_MPLS_PUSH,
2184 FLOW_ACTION_MPLS_POP,
2185 FLOW_ACTION_MPLS_MANGLE,
2186+ FLOW_ACTION_PPPOE_PUSH,
2187 NUM_FLOW_ACTIONS,
2188 };
2189
2190@@ -213,6 +214,9 @@ struct flow_action_entry {
2191 u8 bos;
2192 u8 ttl;
2193 } mpls_mangle;
2194+ struct { /* FLOW_ACTION_PPPOE_PUSH */
2195+ u16 sid;
2196+ } pppoe;
2197 };
2198 };
2199
2200diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
2201index 2c739fc75..89ab8f180 100644
2202--- a/include/net/ip6_route.h
2203+++ b/include/net/ip6_route.h
2204@@ -314,12 +314,13 @@ static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *
2205 !lwtunnel_cmp_encap(nha->fib_nh_lws, nhb->fib_nh_lws);
2206 }
2207
2208-static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
2209+static inline unsigned int ip6_dst_mtu_maybe_forward(const struct dst_entry *dst,
2210+ bool forwarding)
2211 {
2212 struct inet6_dev *idev;
2213 unsigned int mtu;
2214
2215- if (dst_metric_locked(dst, RTAX_MTU)) {
2216+ if (!forwarding || dst_metric_locked(dst, RTAX_MTU)) {
2217 mtu = dst_metric_raw(dst, RTAX_MTU);
2218 if (mtu)
2219 goto out;
2220diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
2221index 7b3c873f8..e95483192 100644
2222--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
2223+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
2224@@ -4,7 +4,4 @@
2225
2226 extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
2227
2228-#include <linux/sysctl.h>
2229-extern struct ctl_table nf_ct_ipv6_sysctl_table[];
2230-
2231 #endif /* _NF_CONNTRACK_IPV6_H*/
2232diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
2233index 90690e37a..ce0bc3e62 100644
2234--- a/include/net/netfilter/nf_conntrack.h
2235+++ b/include/net/netfilter/nf_conntrack.h
2236@@ -279,6 +279,18 @@ static inline bool nf_ct_should_gc(const struct nf_conn *ct)
2237 !nf_ct_is_dying(ct);
2238 }
2239
2240+#define NF_CT_DAY (86400 * HZ)
2241+
2242+/* Set an arbitrary timeout large enough not to ever expire, this save
2243+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
2244+ * nf_ct_is_expired().
2245+ */
2246+static inline void nf_ct_offload_timeout(struct nf_conn *ct)
2247+{
2248+ if (nf_ct_expires(ct) < NF_CT_DAY / 2)
2249+ WRITE_ONCE(ct->timeout, nfct_time_stamp + NF_CT_DAY);
2250+}
2251+
2252 struct kernel_param;
2253
2254 int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp);
2255diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
2256index f7a060c6e..7f44a7715 100644
2257--- a/include/net/netfilter/nf_conntrack_acct.h
2258+++ b/include/net/netfilter/nf_conntrack_acct.h
2259@@ -65,6 +65,17 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
2260 #endif
2261 }
2262
2263+void nf_ct_acct_add(struct nf_conn *ct, u32 dir, unsigned int packets,
2264+ unsigned int bytes);
2265+
2266+static inline void nf_ct_acct_update(struct nf_conn *ct, u32 dir,
2267+ unsigned int bytes)
2268+{
2269+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
2270+ nf_ct_acct_add(ct, dir, 1, bytes);
2271+#endif
2272+}
2273+
2274 void nf_conntrack_acct_pernet_init(struct net *net);
2275
2276 int nf_conntrack_acct_init(void);
2277diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
developerb7c46752022-07-04 19:51:38 +08002278index 68d7fc92..7cf89767 100644
developer8cb3ac72022-07-04 10:55:14 +08002279--- a/include/net/netfilter/nf_flow_table.h
2280+++ b/include/net/netfilter/nf_flow_table.h
2281@@ -8,31 +8,99 @@
2282 #include <linux/rcupdate.h>
2283 #include <linux/netfilter.h>
2284 #include <linux/netfilter/nf_conntrack_tuple_common.h>
2285+#include <net/flow_offload.h>
2286 #include <net/dst.h>
2287+#include <linux/if_pppox.h>
2288+#include <linux/ppp_defs.h>
2289
2290 struct nf_flowtable;
2291+struct nf_flow_rule;
2292+struct flow_offload;
2293+enum flow_offload_tuple_dir;
2294+
2295+struct nf_flow_key {
2296+ struct flow_dissector_key_meta meta;
2297+ struct flow_dissector_key_control control;
2298+ struct flow_dissector_key_control enc_control;
2299+ struct flow_dissector_key_basic basic;
2300+ struct flow_dissector_key_vlan vlan;
2301+ struct flow_dissector_key_vlan cvlan;
2302+ union {
2303+ struct flow_dissector_key_ipv4_addrs ipv4;
2304+ struct flow_dissector_key_ipv6_addrs ipv6;
2305+ };
2306+ struct flow_dissector_key_keyid enc_key_id;
2307+ union {
2308+ struct flow_dissector_key_ipv4_addrs enc_ipv4;
2309+ struct flow_dissector_key_ipv6_addrs enc_ipv6;
2310+ };
2311+ struct flow_dissector_key_tcp tcp;
2312+ struct flow_dissector_key_ports tp;
2313+} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
2314+
2315+struct nf_flow_match {
2316+ struct flow_dissector dissector;
2317+ struct nf_flow_key key;
2318+ struct nf_flow_key mask;
2319+};
2320+
2321+struct nf_flow_rule {
2322+ struct nf_flow_match match;
2323+ struct flow_rule *rule;
2324+};
2325
2326 struct nf_flowtable_type {
2327 struct list_head list;
2328 int family;
2329 int (*init)(struct nf_flowtable *ft);
2330+ int (*setup)(struct nf_flowtable *ft,
2331+ struct net_device *dev,
2332+ enum flow_block_command cmd);
2333+ int (*action)(struct net *net,
2334+ const struct flow_offload *flow,
2335+ enum flow_offload_tuple_dir dir,
2336+ struct nf_flow_rule *flow_rule);
2337 void (*free)(struct nf_flowtable *ft);
2338 nf_hookfn *hook;
2339 struct module *owner;
2340 };
2341
2342+enum nf_flowtable_flags {
2343+ NF_FLOWTABLE_HW_OFFLOAD = 0x1, /* NFT_FLOWTABLE_HW_OFFLOAD */
2344+ NF_FLOWTABLE_COUNTER = 0x2, /* NFT_FLOWTABLE_COUNTER */
2345+};
2346+
2347 struct nf_flowtable {
2348 struct list_head list;
2349 struct rhashtable rhashtable;
2350+ int priority;
2351 const struct nf_flowtable_type *type;
2352 struct delayed_work gc_work;
2353+ unsigned int flags;
2354+ struct flow_block flow_block;
2355+ struct rw_semaphore flow_block_lock; /* Guards flow_block */
2356+ possible_net_t net;
2357 };
2358
2359+static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
2360+{
2361+ return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
2362+}
2363+
2364 enum flow_offload_tuple_dir {
2365 FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
2366 FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
2367- FLOW_OFFLOAD_DIR_MAX = IP_CT_DIR_MAX
2368 };
2369+#define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX
2370+
2371+enum flow_offload_xmit_type {
2372+ FLOW_OFFLOAD_XMIT_UNSPEC = 0,
2373+ FLOW_OFFLOAD_XMIT_NEIGH,
2374+ FLOW_OFFLOAD_XMIT_XFRM,
2375+ FLOW_OFFLOAD_XMIT_DIRECT,
2376+};
2377+
2378+#define NF_FLOW_TABLE_ENCAP_MAX 2
2379
2380 struct flow_offload_tuple {
2381 union {
developerb7c46752022-07-04 19:51:38 +08002382@@ -52,13 +120,30 @@ struct flow_offload_tuple {
developer8cb3ac72022-07-04 10:55:14 +08002383
2384 u8 l3proto;
2385 u8 l4proto;
2386- u8 dir;
2387+ struct {
2388+ u16 id;
2389+ __be16 proto;
2390+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
2391
2392- u16 mtu;
2393+ /* All members above are keys for lookups, see flow_offload_hash(). */
2394+ struct { } __hash;
2395
developerb7c46752022-07-04 19:51:38 +08002396- struct {
2397- struct dst_entry *dst_cache;
2398- u32 dst_cookie;
developer8cb3ac72022-07-04 10:55:14 +08002399+ u8 dir:2,
2400+ xmit_type:2,
2401+ encap_num:2,
2402+ in_vlan_ingress:2;
2403+ u16 mtu;
2404+ union {
2405+ struct {
2406+ struct dst_entry *dst_cache;
2407+ u32 dst_cookie;
2408+ };
2409+ struct {
2410+ u32 ifidx;
2411+ u32 hw_ifidx;
2412+ u8 h_source[ETH_ALEN];
2413+ u8 h_dest[ETH_ALEN];
2414+ } out;
developerb7c46752022-07-04 19:51:38 +08002415 };
developer8cb3ac72022-07-04 10:55:14 +08002416 };
2417
developerec862f42023-03-23 13:08:45 +08002418@@ -67,52 +152,140 @@ struct flow_offload_tuple_rhash {
developer8cb3ac72022-07-04 10:55:14 +08002419 struct flow_offload_tuple tuple;
2420 };
2421
2422-#define FLOW_OFFLOAD_SNAT 0x1
2423-#define FLOW_OFFLOAD_DNAT 0x2
2424-#define FLOW_OFFLOAD_DYING 0x4
2425-#define FLOW_OFFLOAD_TEARDOWN 0x8
2426+enum nf_flow_flags {
2427+ NF_FLOW_SNAT,
2428+ NF_FLOW_DNAT,
2429+ NF_FLOW_TEARDOWN,
2430+ NF_FLOW_HW,
developerec862f42023-03-23 13:08:45 +08002431+ NF_FLOW_HW_ACCT_DYING,
developer8cb3ac72022-07-04 10:55:14 +08002432+ NF_FLOW_HW_DYING,
2433+ NF_FLOW_HW_DEAD,
2434+ NF_FLOW_HW_PENDING,
2435+};
2436+
2437+enum flow_offload_type {
2438+ NF_FLOW_OFFLOAD_UNSPEC = 0,
2439+ NF_FLOW_OFFLOAD_ROUTE,
2440+};
2441
2442 struct flow_offload {
2443 struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
2444- u32 flags;
2445- union {
2446- /* Your private driver data here. */
2447- u32 timeout;
2448- };
2449+ struct nf_conn *ct;
2450+ unsigned long flags;
2451+ u16 type;
2452+ u32 timeout;
2453+ struct rcu_head rcu_head;
2454 };
2455
2456 #define NF_FLOW_TIMEOUT (30 * HZ)
2457+#define nf_flowtable_time_stamp (u32)jiffies
2458+
2459+unsigned long flow_offload_get_timeout(struct flow_offload *flow);
2460+
2461+static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
2462+{
2463+ return (__s32)(timeout - nf_flowtable_time_stamp);
2464+}
2465
2466 struct nf_flow_route {
2467 struct {
2468- struct dst_entry *dst;
2469+ struct dst_entry *dst;
2470+ struct {
2471+ u32 ifindex;
2472+ struct {
2473+ u16 id;
2474+ __be16 proto;
2475+ } encap[NF_FLOW_TABLE_ENCAP_MAX];
2476+ u8 num_encaps:2,
2477+ ingress_vlans:2;
2478+ } in;
2479+ struct {
2480+ u32 ifindex;
2481+ u32 hw_ifindex;
2482+ u8 h_source[ETH_ALEN];
2483+ u8 h_dest[ETH_ALEN];
2484+ } out;
2485+ enum flow_offload_xmit_type xmit_type;
2486 } tuple[FLOW_OFFLOAD_DIR_MAX];
2487 };
2488
2489-struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
2490- struct nf_flow_route *route);
2491+struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
2492 void flow_offload_free(struct flow_offload *flow);
2493
2494+static inline int
2495+nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
2496+ flow_setup_cb_t *cb, void *cb_priv)
2497+{
2498+ struct flow_block *block = &flow_table->flow_block;
2499+ struct flow_block_cb *block_cb;
2500+ int err = 0;
2501+
2502+ down_write(&flow_table->flow_block_lock);
2503+ block_cb = flow_block_cb_lookup(block, cb, cb_priv);
2504+ if (block_cb) {
2505+ err = -EEXIST;
2506+ goto unlock;
2507+ }
2508+