blob: b4f8e4efa16944daaf10ad09ae1b410e8cc44dce [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From 342fdc50b761309e75974554cdcf790a2d09e134 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 2 Jun 2022 15:32:07 +0800
4Subject: [PATCH 4/8] 9993-add-wed
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7622.dtsi | 32 +-
9 drivers/net/ethernet/mediatek/Kconfig | 4 +
10 drivers/net/ethernet/mediatek/Makefile | 5 +
11 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 136 ++-
12 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 14 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.c | 373 +++++++-
14 drivers/net/ethernet/mediatek/mtk_ppe.h | 89 +-
15 .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 4 +-
16 .../net/ethernet/mediatek/mtk_ppe_offload.c | 167 +++-
17 drivers/net/ethernet/mediatek/mtk_wed.c | 876 ++++++++++++++++++
18 drivers/net/ethernet/mediatek/mtk_wed.h | 135 +++
19 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 175 ++++
20 drivers/net/ethernet/mediatek/mtk_wed_ops.c | 8 +
21 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 251 +++++
22 include/linux/netdevice.h | 7 +
23 include/linux/soc/mediatek/mtk_wed.h | 131 +++
24 net/core/dev.c | 4 +
25 17 files changed, 2283 insertions(+), 128 deletions(-)
26 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Kconfig
27 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Makefile
28 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c
29 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h
30 mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_ppe.c
31 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c
32 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h
33 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
34 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c
35 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h
36 create mode 100644 include/linux/soc/mediatek/mtk_wed.h
37
38diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
39index 369e01389..d0fbc367e 100644
40--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
41+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
42@@ -338,7 +338,7 @@
43 };
44
45 cci_control2: slave-if@5000 {
46- compatible = "arm,cci-400-ctrl-if";
47+ compatible = "arm,cci-400-ctrl-if", "syscon";
48 interface-type = "ace";
49 reg = <0x5000 0x1000>;
50 };
51@@ -920,6 +920,11 @@
52 };
53 };
54
55+ hifsys: syscon@1af00000 {
56+ compatible = "mediatek,mt7622-hifsys", "syscon";
57+ reg = <0 0x1af00000 0 0x70>;
58+ };
59+
60 ethsys: syscon@1b000000 {
61 compatible = "mediatek,mt7622-ethsys",
62 "syscon";
63@@ -938,6 +943,26 @@
64 #dma-cells = <1>;
65 };
66
67+ pcie_mirror: pcie-mirror@10000400 {
68+ compatible = "mediatek,mt7622-pcie-mirror",
69+ "syscon";
70+ reg = <0 0x10000400 0 0x10>;
71+ };
72+
73+ wed0: wed@1020a000 {
74+ compatible = "mediatek,mt7622-wed",
75+ "syscon";
76+ reg = <0 0x1020a000 0 0x1000>;
77+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
78+ };
79+
80+ wed1: wed@1020b000 {
81+ compatible = "mediatek,mt7622-wed",
82+ "syscon";
83+ reg = <0 0x1020b000 0 0x1000>;
84+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
85+ };
86+
87 eth: ethernet@1b100000 {
88 compatible = "mediatek,mt7622-eth",
89 "mediatek,mt2701-eth",
90@@ -964,6 +989,11 @@
91 power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
92 mediatek,ethsys = <&ethsys>;
93 mediatek,sgmiisys = <&sgmiisys>;
94+ mediatek,cci-control = <&cci_control2>;
95+ mediatek,wed = <&wed0>, <&wed1>;
96+ mediatek,pcie-mirror = <&pcie_mirror>;
97+ mediatek,hifsys = <&hifsys>;
98+ dma-coherent;
99 #address-cells = <1>;
100 #size-cells = <0>;
101 status = "disabled";
102diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
103old mode 100755
104new mode 100644
105index 42e6b38d2..8ab6615a3
106--- a/drivers/net/ethernet/mediatek/Kconfig
107+++ b/drivers/net/ethernet/mediatek/Kconfig
108@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK
109
110 if NET_VENDOR_MEDIATEK
111
112+config NET_MEDIATEK_SOC_WED
113+ depends on ARCH_MEDIATEK || COMPILE_TEST
114+ def_bool NET_MEDIATEK_SOC != n
115+
116 config NET_MEDIATEK_SOC
117 tristate "MediaTek SoC Gigabit Ethernet support"
118 select PHYLINK
119diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
120old mode 100755
121new mode 100644
122index 0a6af99f1..3528f1b3c
123--- a/drivers/net/ethernet/mediatek/Makefile
124+++ b/drivers/net/ethernet/mediatek/Makefile
125@@ -6,4 +6,9 @@
126 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
127 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
128 mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
129+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
130+ifdef CONFIG_DEBUG_FS
131+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
132+endif
133+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
134 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
135diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
136old mode 100755
137new mode 100644
138index 819d8a0be..2121335a1
139--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
140+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
141@@ -9,6 +9,7 @@
142 #include <linux/of_device.h>
143 #include <linux/of_mdio.h>
144 #include <linux/of_net.h>
145+#include <linux/of_address.h>
146 #include <linux/mfd/syscon.h>
147 #include <linux/regmap.h>
148 #include <linux/clk.h>
developerdca0fde2022-12-14 11:40:35 +0800149@@ -20,12 +21,14 @@
developer8cb3ac72022-07-04 10:55:14 +0800150 #include <linux/pinctrl/devinfo.h>
151 #include <linux/phylink.h>
developer926f9162022-07-05 10:55:37 +0800152 #include <linux/gpio/consumer.h>
developer8cb3ac72022-07-04 10:55:14 +0800153+#include <linux/bitfield.h>
154 #include <net/dsa.h>
155
156 #include "mtk_eth_soc.h"
157 #include "mtk_eth_dbg.h"
158 #include "mtk_eth_reset.h"
159 #include "mtk_hnat/hnat.h"
160+#include "mtk_wed.h"
161
162 #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
163 #include "mtk_hnat/nf_hnat_mtk.h"
developerdca0fde2022-12-14 11:40:35 +0800164@@ -1116,7 +1119,7 @@ static int mtk_init_fq_dma(struct mtk_et
developer8cb3ac72022-07-04 10:55:14 +0800165 int i;
166
167 if (!eth->soc->has_sram) {
168- eth->scratch_ring = dma_alloc_coherent(eth->dev,
169+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800170 cnt * soc->txrx.txd_size,
developer8cb3ac72022-07-04 10:55:14 +0800171 &eth->phy_scratch_ring,
developerdca0fde2022-12-14 11:40:35 +0800172 GFP_KERNEL);
173@@ -1134,10 +1137,10 @@ static int mtk_init_fq_dma(struct mtk_et
developer8cb3ac72022-07-04 10:55:14 +0800174 if (unlikely(!eth->scratch_head))
175 return -ENOMEM;
176
177- dma_addr = dma_map_single(eth->dev,
178+ dma_addr = dma_map_single(eth->dma_dev,
179 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
180 DMA_FROM_DEVICE);
181- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
182+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
183 return -ENOMEM;
184
185 phy_ring_tail = eth->phy_scratch_ring +
developerdca0fde2022-12-14 11:40:35 +0800186@@ -1201,26 +1204,26 @@ static void mtk_tx_unmap(struct mtk_eth
developer8cb3ac72022-07-04 10:55:14 +0800187 {
188 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
189 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
190- dma_unmap_single(eth->dev,
191+ dma_unmap_single(eth->dma_dev,
192 dma_unmap_addr(tx_buf, dma_addr0),
193 dma_unmap_len(tx_buf, dma_len0),
194 DMA_TO_DEVICE);
195 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
196- dma_unmap_page(eth->dev,
197+ dma_unmap_page(eth->dma_dev,
198 dma_unmap_addr(tx_buf, dma_addr0),
199 dma_unmap_len(tx_buf, dma_len0),
200 DMA_TO_DEVICE);
201 }
202 } else {
203 if (dma_unmap_len(tx_buf, dma_len0)) {
204- dma_unmap_page(eth->dev,
205+ dma_unmap_page(eth->dma_dev,
206 dma_unmap_addr(tx_buf, dma_addr0),
207 dma_unmap_len(tx_buf, dma_len0),
208 DMA_TO_DEVICE);
209 }
210
211 if (dma_unmap_len(tx_buf, dma_len1)) {
212- dma_unmap_page(eth->dev,
213+ dma_unmap_page(eth->dma_dev,
214 dma_unmap_addr(tx_buf, dma_addr1),
215 dma_unmap_len(tx_buf, dma_len1),
216 DMA_TO_DEVICE);
developerdca0fde2022-12-14 11:40:35 +0800217@@ -1454,9 +1457,9 @@ static int mtk_tx_map(struct sk_buff *sk
218 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developer8cb3ac72022-07-04 10:55:14 +0800219 memset(itx_buf, 0, sizeof(*itx_buf));
220
developer0c6c5252022-07-12 11:59:21 +0800221- txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
222+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
223 DMA_TO_DEVICE);
224- if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
225+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
developer8cb3ac72022-07-04 10:55:14 +0800226 return -ENOMEM;
227
developerdca0fde2022-12-14 11:40:35 +0800228 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
229@@ -1497,10 +1500,10 @@ static int mtk_tx_map(struct sk_buff *sk
developer0c6c5252022-07-12 11:59:21 +0800230 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
231 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
232 !(frag_size - txd_info.size);
233- txd_info.addr = skb_frag_dma_map(eth->dev, frag,
234+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
235 offset, txd_info.size,
236 DMA_TO_DEVICE);
237- if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
238+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
239 goto err_dma;
developer8cb3ac72022-07-04 10:55:14 +0800240
developer0c6c5252022-07-12 11:59:21 +0800241 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerdca0fde2022-12-14 11:40:35 +0800242@@ -1737,6 +1740,7 @@ static int mtk_poll_rx(struct napi_struc
243 struct net_device *netdev = NULL;
developer8cb3ac72022-07-04 10:55:14 +0800244 unsigned int pktlen;
developerea825232022-11-29 11:26:54 +0800245 dma_addr_t dma_addr = 0;
developer8cb3ac72022-07-04 10:55:14 +0800246+ u32 hash, reason;
developer0c6c5252022-07-12 11:59:21 +0800247 int mac = 0;
developer8cb3ac72022-07-04 10:55:14 +0800248
249 if (eth->hwlro)
developerdca0fde2022-12-14 11:40:35 +0800250@@ -1787,12 +1791,12 @@ static int mtk_poll_rx(struct napi_struc
developer8cb3ac72022-07-04 10:55:14 +0800251 netdev->stats.rx_dropped++;
252 goto release_desc;
253 }
254- dma_addr = dma_map_single(eth->dev,
255+ dma_addr = dma_map_single(eth->dma_dev,
256 new_data + NET_SKB_PAD +
257 eth->ip_align,
258 ring->buf_size,
259 DMA_FROM_DEVICE);
260- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
261+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
262 skb_free_frag(new_data);
263 netdev->stats.rx_dropped++;
264 goto release_desc;
developerdca0fde2022-12-14 11:40:35 +0800265@@ -1801,7 +1805,7 @@ static int mtk_poll_rx(struct napi_struc
developer68838542022-10-03 23:42:21 +0800266 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
267 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
268
269- dma_unmap_single(eth->dev,
270+ dma_unmap_single(eth->dma_dev,
271 (u64)(trxd.rxd1 | addr64),
developer8cb3ac72022-07-04 10:55:14 +0800272 ring->buf_size, DMA_FROM_DEVICE);
273
developerdca0fde2022-12-14 11:40:35 +0800274@@ -1827,6 +1831,17 @@ static int mtk_poll_rx(struct napi_struc
developer8cb3ac72022-07-04 10:55:14 +0800275 skb_checksum_none_assert(skb);
276 skb->protocol = eth_type_trans(skb, netdev);
277
278+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
279+ if (hash != MTK_RXD4_FOE_ENTRY) {
280+ hash = jhash_1word(hash, 0);
281+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
282+ }
283+
284+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
285+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
286+ mtk_ppe_check_skb(eth->ppe, skb,
287+ trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
288+
289 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developerdca0fde2022-12-14 11:40:35 +0800290 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
291 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
292@@ -2120,7 +2135,7 @@ static int mtk_tx_alloc(struct mtk_eth *
developer8cb3ac72022-07-04 10:55:14 +0800293 goto no_tx_mem;
294
295 if (!eth->soc->has_sram)
296- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
297+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developer0c6c5252022-07-12 11:59:21 +0800298 &ring->phys, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800299 else {
developerdca0fde2022-12-14 11:40:35 +0800300 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
301@@ -2154,7 +2169,7 @@ static int mtk_tx_alloc(struct mtk_eth *
developer8cb3ac72022-07-04 10:55:14 +0800302 * descriptors in ring->dma_pdma.
303 */
304 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
305- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
306+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developer0c6c5252022-07-12 11:59:21 +0800307 &ring->phys_pdma, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800308 if (!ring->dma_pdma)
developerdca0fde2022-12-14 11:40:35 +0800309 goto no_tx_mem;
310@@ -2215,14 +2230,14 @@ static void mtk_tx_clean(struct mtk_eth
developer8cb3ac72022-07-04 10:55:14 +0800311 }
312
313 if (!eth->soc->has_sram && ring->dma) {
314- dma_free_coherent(eth->dev,
315+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800316 MTK_DMA_SIZE * soc->txrx.txd_size,
317 ring->dma, ring->phys);
developerdca0fde2022-12-14 11:40:35 +0800318 ring->dma = NULL;
developer8cb3ac72022-07-04 10:55:14 +0800319 }
320
321 if (ring->dma_pdma) {
322- dma_free_coherent(eth->dev,
323+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800324 MTK_DMA_SIZE * soc->txrx.txd_size,
325 ring->dma_pdma, ring->phys_pdma);
developerdca0fde2022-12-14 11:40:35 +0800326 ring->dma_pdma = NULL;
327@@ -2267,7 +2282,7 @@ static int mtk_rx_alloc(struct mtk_eth *
developer8cb3ac72022-07-04 10:55:14 +0800328
329 if ((!eth->soc->has_sram) || (eth->soc->has_sram
330 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
331- ring->dma = dma_alloc_coherent(eth->dev,
332+ ring->dma = dma_alloc_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800333 rx_dma_size * eth->soc->txrx.rxd_size,
334 &ring->phys, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800335 else {
developerdca0fde2022-12-14 11:40:35 +0800336@@ -2284,11 +2299,11 @@ static int mtk_rx_alloc(struct mtk_eth *
developer8cb3ac72022-07-04 10:55:14 +0800337 for (i = 0; i < rx_dma_size; i++) {
developer0c6c5252022-07-12 11:59:21 +0800338 struct mtk_rx_dma_v2 *rxd;
developerdca0fde2022-12-14 11:40:35 +0800339
developer8cb3ac72022-07-04 10:55:14 +0800340- dma_addr_t dma_addr = dma_map_single(eth->dev,
341+ dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
342 ring->data[i] + NET_SKB_PAD + eth->ip_align,
343 ring->buf_size,
344 DMA_FROM_DEVICE);
345- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
346+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
347 return -ENOMEM;
developerdca0fde2022-12-14 11:40:35 +0800348
developer0c6c5252022-07-12 11:59:21 +0800349 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
developerdca0fde2022-12-14 11:40:35 +0800350@@ -2360,7 +2375,7 @@ static void mtk_rx_clean(struct mtk_eth
developer68838542022-10-03 23:42:21 +0800351 MTK_8GB_ADDRESSING)) ?
352 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
353
developer8cb3ac72022-07-04 10:55:14 +0800354- dma_unmap_single(eth->dev,
355+ dma_unmap_single(eth->dma_dev,
developer68838542022-10-03 23:42:21 +0800356 (u64)(rxd->rxd1 | addr64),
developer8cb3ac72022-07-04 10:55:14 +0800357 ring->buf_size,
358 DMA_FROM_DEVICE);
developerdca0fde2022-12-14 11:40:35 +0800359@@ -2374,7 +2389,7 @@ static void mtk_rx_clean(struct mtk_eth
developer8cb3ac72022-07-04 10:55:14 +0800360 return;
361
362 if (ring->dma) {
363- dma_free_coherent(eth->dev,
364+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800365 ring->dma_size * eth->soc->txrx.rxd_size,
developer8cb3ac72022-07-04 10:55:14 +0800366 ring->dma,
367 ring->phys);
developerdca0fde2022-12-14 11:40:35 +0800368@@ -2861,7 +2876,7 @@ static void mtk_dma_free(struct mtk_eth
developer8cb3ac72022-07-04 10:55:14 +0800369 if (eth->netdev[i])
370 netdev_reset_queue(eth->netdev[i]);
371 if ( !eth->soc->has_sram && eth->scratch_ring) {
372- dma_free_coherent(eth->dev,
373+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800374 MTK_DMA_SIZE * soc->txrx.txd_size,
375 eth->scratch_ring, eth->phy_scratch_ring);
developerdca0fde2022-12-14 11:40:35 +0800376 eth->scratch_ring = NULL;
377@@ -3243,7 +3258,7 @@ static int mtk_stop(struct net_device *d
developer8cb3ac72022-07-04 10:55:14 +0800378 mtk_dma_free(eth);
379
380 if (eth->soc->offload_version)
381- mtk_ppe_stop(&eth->ppe);
382+ mtk_ppe_stop(eth->ppe);
383
384 return 0;
385 }
developerdca0fde2022-12-14 11:40:35 +0800386@@ -3320,6 +3335,8 @@ static int mtk_napi_init(struct mtk_eth
developer8cb3ac72022-07-04 10:55:14 +0800387
388 static int mtk_hw_init(struct mtk_eth *eth, u32 type)
389 {
390+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
391+ ETHSYS_DMA_AG_MAP_PPE;
392 int i, ret = 0;
developerdca0fde2022-12-14 11:40:35 +0800393 u32 val;
developer8cb3ac72022-07-04 10:55:14 +0800394
developerdca0fde2022-12-14 11:40:35 +0800395@@ -3338,6 +3355,10 @@ static int mtk_hw_init(struct mtk_eth *e
developer8cb3ac72022-07-04 10:55:14 +0800396 goto err_disable_pm;
397 }
398
399+ if (eth->ethsys)
400+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
401+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
402+
403 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
404 ret = device_reset(eth->dev);
405 if (ret) {
developerdca0fde2022-12-14 11:40:35 +0800406@@ -4091,6 +4112,35 @@ free_netdev:
developer8cb3ac72022-07-04 10:55:14 +0800407 return err;
408 }
409
410+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
411+{
412+ struct net_device *dev, *tmp;
413+ LIST_HEAD(dev_list);
414+ int i;
415+
416+ rtnl_lock();
417+
418+ for (i = 0; i < MTK_MAC_COUNT; i++) {
419+ dev = eth->netdev[i];
420+
421+ if (!dev || !(dev->flags & IFF_UP))
422+ continue;
423+
424+ list_add_tail(&dev->close_list, &dev_list);
425+ }
426+
427+ dev_close_many(&dev_list, false);
428+
429+ eth->dma_dev = dma_dev;
430+
431+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
432+ list_del_init(&dev->close_list);
433+ dev_open(dev, NULL);
434+ }
435+
436+ rtnl_unlock();
437+}
438+
439 static int mtk_probe(struct platform_device *pdev)
440 {
441 struct device_node *mac_np;
developerdca0fde2022-12-14 11:40:35 +0800442@@ -4104,6 +4154,7 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800443 eth->soc = of_device_get_match_data(&pdev->dev);
444
445 eth->dev = &pdev->dev;
446+ eth->dma_dev = &pdev->dev;
447 eth->base = devm_platform_ioremap_resource(pdev, 0);
448 if (IS_ERR(eth->base))
449 return PTR_ERR(eth->base);
developerdca0fde2022-12-14 11:40:35 +0800450@@ -4176,6 +4227,16 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800451 }
452 }
453
454+ if (of_dma_is_coherent(pdev->dev.of_node)) {
455+ struct regmap *cci;
456+
457+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
458+ "mediatek,cci-control");
459+ /* enable CPU/bus coherency */
460+ if (!IS_ERR(cci))
461+ regmap_write(cci, 0, 3);
462+ }
463+
464 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
developerdca0fde2022-12-14 11:40:35 +0800465 eth->xgmii = devm_kzalloc(eth->dev, sizeof(*eth->xgmii),
developer8cb3ac72022-07-04 10:55:14 +0800466 GFP_KERNEL);
developerdca0fde2022-12-14 11:40:35 +0800467@@ -4217,6 +4278,22 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800468 }
469 }
470
471+ for (i = 0;; i++) {
472+ struct device_node *np = of_parse_phandle(pdev->dev.of_node,
473+ "mediatek,wed", i);
474+ static const u32 wdma_regs[] = {
475+ MTK_WDMA0_BASE,
476+ MTK_WDMA1_BASE
477+ };
478+ void __iomem *wdma;
479+
480+ if (!np || i >= ARRAY_SIZE(wdma_regs))
481+ break;
482+
483+ wdma = eth->base + wdma_regs[i];
484+ mtk_wed_add_hw(np, eth, wdma, i);
485+ }
486+
487 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
488 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
489 eth->irq[i] = eth->irq[0];
developerdca0fde2022-12-14 11:40:35 +0800490@@ -4320,10 +4397,11 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800491 }
492
493 if (eth->soc->offload_version) {
494- err = mtk_ppe_init(&eth->ppe, eth->dev,
495- eth->base + MTK_ETH_PPE_BASE, 2);
496- if (err)
497+ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
498+ if (!eth->ppe) {
499+ err = -ENOMEM;
500 goto err_free_dev;
501+ }
502
503 err = mtk_eth_offload_init(eth);
504 if (err)
505diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
506old mode 100755
507new mode 100644
508index 349f98503..b52378bd6
509--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
510+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developerdca0fde2022-12-14 11:40:35 +0800511@@ -549,6 +549,9 @@
developer8cb3ac72022-07-04 10:55:14 +0800512 #define RX_DMA_SPORT_MASK 0x7
developerdca0fde2022-12-14 11:40:35 +0800513 #define RX_DMA_SPORT_MASK_V2 0xf
developer8cb3ac72022-07-04 10:55:14 +0800514
515+#define MTK_WDMA0_BASE 0x2800
516+#define MTK_WDMA1_BASE 0x2c00
517+
518 /* QDMA descriptor txd4 */
519 #define TX_DMA_CHKSUM (0x7 << 29)
520 #define TX_DMA_TSO BIT(28)
developerdca0fde2022-12-14 11:40:35 +0800521@@ -773,6 +776,12 @@
developer8cb3ac72022-07-04 10:55:14 +0800522 #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
523
524
525+/* ethernet dma channel agent map */
526+#define ETHSYS_DMA_AG_MAP 0x408
527+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
528+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
529+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
530+
531 /* SGMII subsystem config registers */
532 /* Register to auto-negotiation restart */
533 #define SGMSYS_PCS_CONTROL_1 0x0
developerdca0fde2022-12-14 11:40:35 +0800534@@ -1520,6 +1529,7 @@ struct mtk_phylink_priv {
developer8cb3ac72022-07-04 10:55:14 +0800535 /* struct mtk_eth - This is the main datasructure for holding the state
536 * of the driver
537 * @dev: The device pointer
538+ * @dev: The device pointer used for dma mapping/alloc
539 * @base: The mapped register i/o base
540 * @page_lock: Make sure that register operations are atomic
541 * @tx_irq__lock: Make sure that IRQ register operations are atomic
developerdca0fde2022-12-14 11:40:35 +0800542@@ -1554,6 +1564,7 @@ struct mtk_phylink_priv {
developer8cb3ac72022-07-04 10:55:14 +0800543
544 struct mtk_eth {
545 struct device *dev;
546+ struct device *dma_dev;
547 void __iomem *base;
developerdca0fde2022-12-14 11:40:35 +0800548 void __iomem *sram_base;
developer8cb3ac72022-07-04 10:55:14 +0800549 spinlock_t page_lock;
developerdca0fde2022-12-14 11:40:35 +0800550@@ -1596,7 +1607,7 @@ struct mtk_eth {
developer8cb3ac72022-07-04 10:55:14 +0800551 spinlock_t syscfg0_lock;
552 struct timer_list mtk_dma_monitor_timer;
553
554- struct mtk_ppe ppe;
555+ struct mtk_ppe *ppe;
556 struct rhashtable flow_table;
557 };
558
developerdca0fde2022-12-14 11:40:35 +0800559@@ -1655,6 +1666,7 @@ void ethsys_reset(struct mtk_eth *eth, u
developer8cb3ac72022-07-04 10:55:14 +0800560 int mtk_eth_offload_init(struct mtk_eth *eth);
561 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
562 void *type_data);
563+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
564
developerdca0fde2022-12-14 11:40:35 +0800565 int mtk_mac2xgmii_id(struct mtk_eth *eth, int mac_id);
566 int mtk_usxgmii_init(struct mtk_xgmii *ss, struct device_node *r);
567
developer8cb3ac72022-07-04 10:55:14 +0800568diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
569old mode 100644
570new mode 100755
571index 66298e223..3d75c22be
572--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
573+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
574@@ -6,9 +6,22 @@
575 #include <linux/iopoll.h>
576 #include <linux/etherdevice.h>
577 #include <linux/platform_device.h>
578+#include <linux/if_ether.h>
579+#include <linux/if_vlan.h>
580+#include <net/dsa.h>
581+#include "mtk_eth_soc.h"
582 #include "mtk_ppe.h"
583 #include "mtk_ppe_regs.h"
584
585+static DEFINE_SPINLOCK(ppe_lock);
586+
587+static const struct rhashtable_params mtk_flow_l2_ht_params = {
588+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
589+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
590+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
591+ .automatic_shrinking = true,
592+};
593+
594 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
595 {
596 writel(val, ppe->base + reg);
597@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
598 return ppe_m32(ppe, reg, val, 0);
599 }
600
601+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
602+{
603+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
604+}
605+
606 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
607 {
608 int ret;
609@@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
610 u32 hash;
611
612 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
613- case MTK_PPE_PKT_TYPE_BRIDGE:
614- hv1 = e->bridge.src_mac_lo;
615- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
616- hv2 = e->bridge.src_mac_hi >> 16;
617- hv2 ^= e->bridge.dest_mac_lo;
618- hv3 = e->bridge.dest_mac_hi;
619- break;
620 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
621 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
622 hv1 = e->ipv4.orig.ports;
623@@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
624 {
625 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
626
627+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
628+ return &entry->bridge.l2;
629+
630 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
631 return &entry->ipv6.l2;
632
633@@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
634 {
635 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
636
637+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
638+ return &entry->bridge.ib2;
639+
640 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
641 return &entry->ipv6.ib2;
642
643@@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
644 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
645 entry->ipv6.ports = ports_pad;
646
647- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
648+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
649+ ether_addr_copy(entry->bridge.src_mac, src_mac);
650+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
651+ entry->bridge.ib2 = val;
652+ l2 = &entry->bridge.l2;
653+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
654 entry->ipv6.ib2 = val;
655 l2 = &entry->ipv6.l2;
656 } else {
developer18a49a22023-03-03 13:45:06 +0800657@@ -329,32 +351,168 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
developer8cb3ac72022-07-04 10:55:14 +0800658 return 0;
659 }
660
661+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
662+ int bss, int wcid)
663+{
664+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
665+ u32 *ib2 = mtk_foe_entry_ib2(entry);
666+
667+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
668+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
669+ if (wdma_idx)
670+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
671+
672+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
673+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
674+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
675+
676+ return 0;
677+}
678+
679 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
680 {
681 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
682 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
683 }
684
685-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
686- u16 timestamp)
687+static bool
688+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
689+{
690+ int type, len;
691+
692+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
693+ return false;
694+
695+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
696+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
697+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
698+ else
699+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
700+
701+ return !memcmp(&entry->data.data, &data->data, len - 4);
702+}
703+
704+static void
705+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
706+{
707+ struct hlist_head *head;
708+ struct hlist_node *tmp;
709+
710+ if (entry->type == MTK_FLOW_TYPE_L2) {
711+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
712+ mtk_flow_l2_ht_params);
713+
714+ head = &entry->l2_flows;
715+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
716+ __mtk_foe_entry_clear(ppe, entry);
717+ return;
718+ }
719+
720+ hlist_del_init(&entry->list);
721+ if (entry->hash != 0xffff) {
722+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
723+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
724+ MTK_FOE_STATE_INVALID);
725+ dma_wmb();
developer18a49a22023-03-03 13:45:06 +0800726+ mtk_ppe_cache_clear(ppe);
developer8cb3ac72022-07-04 10:55:14 +0800727+ }
728+ entry->hash = 0xffff;
729+
730+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
731+ return;
732+
733+ hlist_del_init(&entry->l2_data.list);
734+ kfree(entry);
735+}
736+
737+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
738+{
739+ u16 timestamp;
740+ u16 now;
741+
742+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
743+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
744+
745+ if (timestamp > now)
746+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
747+ else
748+ return now - timestamp;
749+}
750+
751+static void
752+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
753 {
754+ struct mtk_flow_entry *cur;
755 struct mtk_foe_entry *hwe;
756- u32 hash;
757+ struct hlist_node *tmp;
758+ int idle;
759+
760+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
761+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
762+ int cur_idle;
763+ u32 ib1;
764+
765+ hwe = &ppe->foe_table[cur->hash];
766+ ib1 = READ_ONCE(hwe->ib1);
767+
768+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
769+ cur->hash = 0xffff;
770+ __mtk_foe_entry_clear(ppe, cur);
771+ continue;
772+ }
773+
774+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
775+ if (cur_idle >= idle)
776+ continue;
777+
778+ idle = cur_idle;
779+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
780+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
781+ }
782+}
783+
784+static void
785+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
786+{
787+ struct mtk_foe_entry *hwe;
788+ struct mtk_foe_entry foe;
789+
790+ spin_lock_bh(&ppe_lock);
791+
792+ if (entry->type == MTK_FLOW_TYPE_L2) {
793+ mtk_flow_entry_update_l2(ppe, entry);
794+ goto out;
795+ }
796+
797+ if (entry->hash == 0xffff)
798+ goto out;
799+
800+ hwe = &ppe->foe_table[entry->hash];
801+ memcpy(&foe, hwe, sizeof(foe));
802+ if (!mtk_flow_entry_match(entry, &foe)) {
803+ entry->hash = 0xffff;
804+ goto out;
805+ }
806+
807+ entry->data.ib1 = foe.ib1;
808+
809+out:
810+ spin_unlock_bh(&ppe_lock);
811+}
812+
813+static void
814+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
815+ u16 hash)
816+{
817+ struct mtk_foe_entry *hwe;
818+ u16 timestamp;
819
820+ timestamp = mtk_eth_timestamp(ppe->eth);
821 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
822 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
823 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
824
825- hash = mtk_ppe_hash_entry(entry);
826 hwe = &ppe->foe_table[hash];
827- if (!mtk_foe_entry_usable(hwe)) {
828- hwe++;
829- hash++;
830-
831- if (!mtk_foe_entry_usable(hwe))
832- return -ENOSPC;
833- }
834-
835 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
836 wmb();
837 hwe->ib1 = entry->ib1;
developer86f099b2022-11-17 09:35:09 +0800838@@ -362,32 +519,201 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
developer8cb3ac72022-07-04 10:55:14 +0800839 dma_wmb();
840
841 mtk_ppe_cache_clear(ppe);
842+}
843
844- return hash;
845+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
846+{
847+ spin_lock_bh(&ppe_lock);
848+ __mtk_foe_entry_clear(ppe, entry);
849+ spin_unlock_bh(&ppe_lock);
850+}
851+
852+static int
853+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
854+{
855+ entry->type = MTK_FLOW_TYPE_L2;
856+
857+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
858+ mtk_flow_l2_ht_params);
859+}
860+
861+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
862+{
863+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
864+ u32 hash;
865+
866+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
867+ return mtk_foe_entry_commit_l2(ppe, entry);
868+
869+ hash = mtk_ppe_hash_entry(&entry->data);
870+ entry->hash = 0xffff;
871+ spin_lock_bh(&ppe_lock);
872+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 4]);
873+ spin_unlock_bh(&ppe_lock);
874+
875+ return 0;
876+}
877+
878+static void
879+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
880+ u16 hash)
881+{
882+ struct mtk_flow_entry *flow_info;
883+ struct mtk_foe_entry foe, *hwe;
884+ struct mtk_foe_mac_info *l2;
885+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
886+ int type;
887+
888+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
889+ GFP_ATOMIC);
890+ if (!flow_info)
891+ return;
892+
893+ flow_info->l2_data.base_flow = entry;
894+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
895+ flow_info->hash = hash;
896+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 4]);
897+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
898+
899+ hwe = &ppe->foe_table[hash];
900+ memcpy(&foe, hwe, sizeof(foe));
901+ foe.ib1 &= ib1_mask;
902+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
903+
904+ l2 = mtk_foe_entry_l2(&foe);
905+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
906+
907+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
908+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
909+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
910+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
911+ l2->etype = ETH_P_IPV6;
912+
913+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
914+
915+ __mtk_foe_entry_commit(ppe, &foe, hash);
916 }
917
918-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
919+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
920+{
921+ struct hlist_head *head = &ppe->foe_flow[hash / 4];
922+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
923+ struct mtk_flow_entry *entry;
924+ struct mtk_foe_bridge key = {};
developerb5c6eed2022-08-11 22:58:44 +0800925+ struct hlist_node *n;
developer8cb3ac72022-07-04 10:55:14 +0800926+ struct ethhdr *eh;
927+ bool found = false;
928+ u8 *tag;
929+
930+ spin_lock_bh(&ppe_lock);
931+
developer86f099b2022-11-17 09:35:09 +0800932+ if (hash >= MTK_PPE_ENTRIES)
933+ goto out;
934+
developer8cb3ac72022-07-04 10:55:14 +0800935+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
936+ goto out;
937+
developerb5c6eed2022-08-11 22:58:44 +0800938+ hlist_for_each_entry_safe(entry, n, head, list) {
developer8cb3ac72022-07-04 10:55:14 +0800939+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
940+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
941+ MTK_FOE_STATE_BIND))
942+ continue;
943+
944+ entry->hash = 0xffff;
945+ __mtk_foe_entry_clear(ppe, entry);
946+ continue;
947+ }
948+
949+ if (found || !mtk_flow_entry_match(entry, hwe)) {
950+ if (entry->hash != 0xffff)
951+ entry->hash = 0xffff;
952+ continue;
953+ }
954+
955+ entry->hash = hash;
956+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
957+ found = true;
958+ }
959+
960+ if (found)
961+ goto out;
962+
963+ if (!skb)
964+ goto out;
965+
966+ eh = eth_hdr(skb);
967+ ether_addr_copy(key.dest_mac, eh->h_dest);
968+ ether_addr_copy(key.src_mac, eh->h_source);
969+ tag = skb->data - 2;
970+ key.vlan = 0;
971+ switch (skb->protocol) {
972+#if IS_ENABLED(CONFIG_NET_DSA)
973+ case htons(ETH_P_XDSA):
974+ if (!netdev_uses_dsa(skb->dev) ||
975+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
976+ goto out;
977+
978+ tag += 4;
979+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
980+ break;
981+
982+ fallthrough;
983+#endif
984+ case htons(ETH_P_8021Q):
985+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
986+ break;
987+ default:
988+ break;
989+ }
990+
991+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
992+ if (!entry)
993+ goto out;
994+
995+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
996+
997+out:
998+ spin_unlock_bh(&ppe_lock);
999+}
1000+
1001+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
1002+{
1003+ mtk_flow_entry_update(ppe, entry);
1004+
1005+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
1006+}
1007+
1008+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
1009 int version)
1010 {
1011+ struct device *dev = eth->dev;
1012 struct mtk_foe_entry *foe;
1013+ struct mtk_ppe *ppe;
1014+
1015+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
1016+ if (!ppe)
1017+ return NULL;
1018+
1019+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
1020
1021 /* need to allocate a separate device, since it PPE DMA access is
1022 * not coherent.
1023 */
1024 ppe->base = base;
1025+ ppe->eth = eth;
1026 ppe->dev = dev;
1027 ppe->version = version;
1028
1029 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
1030 &ppe->foe_phys, GFP_KERNEL);
1031 if (!foe)
1032- return -ENOMEM;
1033+ return NULL;
1034
1035 ppe->foe_table = foe;
1036
1037 mtk_ppe_debugfs_init(ppe);
1038
1039- return 0;
1040+ return ppe;
1041 }
1042
1043 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1044@@ -395,7 +717,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1045 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
1046 int i, k;
1047
1048- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
1049+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
1050
1051 if (!IS_ENABLED(CONFIG_SOC_MT7621))
1052 return;
1053@@ -443,7 +765,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
1054 MTK_PPE_FLOW_CFG_IP4_NAT |
1055 MTK_PPE_FLOW_CFG_IP4_NAPT |
1056 MTK_PPE_FLOW_CFG_IP4_DSLITE |
1057- MTK_PPE_FLOW_CFG_L2_BRIDGE |
1058 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1059 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1060
1061diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
1062index 242fb8f2a..1f5cf1c9a 100644
1063--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
1064+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
1065@@ -6,6 +6,7 @@
1066
1067 #include <linux/kernel.h>
1068 #include <linux/bitfield.h>
1069+#include <linux/rhashtable.h>
1070
1071 #define MTK_ETH_PPE_BASE 0xc00
1072
1073@@ -48,9 +49,9 @@ enum {
1074 #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
1075 #define MTK_FOE_IB2_MULTICAST BIT(8)
1076
1077-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
1078-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
1079-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
1080+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
1081+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
1082+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
1083
1084 #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
1085
1086@@ -58,9 +59,9 @@ enum {
1087
1088 #define MTK_FOE_IB2_DSCP GENMASK(31, 24)
1089
1090-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
1091-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
1092-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
1093+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
1094+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
1095+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
1096
1097 enum {
1098 MTK_FOE_STATE_INVALID,
1099@@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
1100 u16 src_mac_lo;
1101 };
1102
1103+/* software-only entry type */
1104 struct mtk_foe_bridge {
1105- u32 dest_mac_hi;
1106+ u8 dest_mac[ETH_ALEN];
1107+ u8 src_mac[ETH_ALEN];
1108+ u16 vlan;
1109
1110- u16 src_mac_lo;
1111- u16 dest_mac_lo;
1112-
1113- u32 src_mac_hi;
1114+ struct {} key_end;
1115
1116 u32 ib2;
1117
1118- u32 _rsv[5];
1119-
1120- u32 udf_tsid;
1121 struct mtk_foe_mac_info l2;
1122 };
1123
1124@@ -235,7 +233,37 @@ enum {
1125 MTK_PPE_CPU_REASON_INVALID = 0x1f,
1126 };
1127
1128+enum {
1129+ MTK_FLOW_TYPE_L4,
1130+ MTK_FLOW_TYPE_L2,
1131+ MTK_FLOW_TYPE_L2_SUBFLOW,
1132+};
1133+
1134+struct mtk_flow_entry {
1135+ union {
1136+ struct hlist_node list;
1137+ struct {
1138+ struct rhash_head l2_node;
1139+ struct hlist_head l2_flows;
1140+ };
1141+ };
1142+ u8 type;
1143+ s8 wed_index;
1144+ u16 hash;
1145+ union {
1146+ struct mtk_foe_entry data;
1147+ struct {
1148+ struct mtk_flow_entry *base_flow;
1149+ struct hlist_node list;
1150+ struct {} end;
1151+ } l2_data;
1152+ };
1153+ struct rhash_head node;
1154+ unsigned long cookie;
1155+};
1156+
1157 struct mtk_ppe {
1158+ struct mtk_eth *eth;
1159 struct device *dev;
1160 void __iomem *base;
1161 int version;
1162@@ -243,19 +271,35 @@ struct mtk_ppe {
1163 struct mtk_foe_entry *foe_table;
1164 dma_addr_t foe_phys;
1165
1166+ u16 foe_check_time[MTK_PPE_ENTRIES];
1167+ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
1168+
1169+ struct rhashtable l2_flows;
1170+
1171 void *acct_table;
1172 };
1173
1174-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
1175- int version);
1176+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
1177 int mtk_ppe_start(struct mtk_ppe *ppe);
1178 int mtk_ppe_stop(struct mtk_ppe *ppe);
1179
1180+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
1181+
1182 static inline void
1183-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
1184+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
1185 {
1186- ppe->foe_table[hash].ib1 = 0;
1187- dma_wmb();
1188+ u16 now, diff;
1189+
1190+ if (!ppe)
1191+ return;
1192+
1193+ now = (u16)jiffies;
1194+ diff = now - ppe->foe_check_time[hash];
1195+ if (diff < HZ / 10)
1196+ return;
1197+
1198+ ppe->foe_check_time[hash] = now;
1199+ __mtk_ppe_check_skb(ppe, skb, hash);
1200 }
1201
1202 static inline int
1203@@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
1204 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
1205 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
1206 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
1207-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
1208- u16 timestamp);
1209+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
1210+ int bss, int wcid);
1211+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1212+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1213+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1214 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
1215
1216 #endif
1217diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1218index d4b482340..a591ab1fd 100644
1219--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1220+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1221@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
1222 static const char * const type_str[] = {
1223 [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
1224 [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
1225- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
1226 [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
1227 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
1228 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
1229@@ -207,6 +206,9 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
1230 struct dentry *root;
1231
1232 root = debugfs_create_dir("mtk_ppe", NULL);
1233+ if (!root)
1234+ return -ENOMEM;
1235+
1236 debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
1237 debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
1238
1239diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1240index 4294f0c74..d4a012608 100644
1241--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1242+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1243@@ -11,6 +11,7 @@
1244 #include <net/pkt_cls.h>
1245 #include <net/dsa.h>
1246 #include "mtk_eth_soc.h"
1247+#include "mtk_wed.h"
1248
1249 struct mtk_flow_data {
1250 struct ethhdr eth;
1251@@ -30,6 +31,8 @@ struct mtk_flow_data {
1252 __be16 src_port;
1253 __be16 dst_port;
1254
1255+ u16 vlan_in;
1256+
1257 struct {
1258 u16 id;
1259 __be16 proto;
1260@@ -41,12 +44,6 @@ struct mtk_flow_data {
1261 } pppoe;
1262 };
1263
1264-struct mtk_flow_entry {
1265- struct rhash_head node;
1266- unsigned long cookie;
1267- u16 hash;
1268-};
1269-
1270 static const struct rhashtable_params mtk_flow_ht_params = {
1271 .head_offset = offsetof(struct mtk_flow_entry, node),
1272 .key_offset = offsetof(struct mtk_flow_entry, cookie),
1273@@ -54,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = {
1274 .automatic_shrinking = true,
1275 };
1276
1277-static u32
1278-mtk_eth_timestamp(struct mtk_eth *eth)
1279-{
1280- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
1281-}
1282-
1283 static int
1284 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
1285 bool egress)
1286@@ -94,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
1287 memcpy(dest, src, act->mangle.mask ? 2 : 4);
1288 }
1289
1290+static int
1291+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
1292+{
1293+ struct net_device_path_ctx ctx = {
1294+ .dev = dev,
1295+ };
1296+ struct net_device_path path = {};
1297+
1298+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
1299+ return -1;
1300+
1301+ if (!dev->netdev_ops->ndo_fill_forward_path)
1302+ return -1;
1303+
1304+ memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
1305+ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
1306+ return -1;
1307+
1308+ if (path.type != DEV_PATH_MTK_WDMA)
1309+ return -1;
1310+
1311+ info->wdma_idx = path.mtk_wdma.wdma_idx;
1312+ info->queue = path.mtk_wdma.queue;
1313+ info->bss = path.mtk_wdma.bss;
1314+ info->wcid = path.mtk_wdma.wcid;
1315+
1316+ return 0;
1317+}
1318+
1319
1320 static int
1321 mtk_flow_mangle_ports(const struct flow_action_entry *act,
1322@@ -163,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev)
1323
1324 static int
1325 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1326- struct net_device *dev)
1327+ struct net_device *dev, const u8 *dest_mac,
1328+ int *wed_index)
1329 {
1330+ struct mtk_wdma_info info = {};
1331 int pse_port, dsa_port;
1332
1333+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
1334+ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
1335+ info.wcid);
developerc693c152022-12-02 09:38:46 +08001336+ pse_port = PSE_PPE0_PORT;
developer8cb3ac72022-07-04 10:55:14 +08001337+ *wed_index = info.wdma_idx;
1338+ goto out;
1339+ }
1340+
1341 dsa_port = mtk_flow_get_dsa_port(&dev);
1342 if (dsa_port >= 0)
1343 mtk_foe_entry_set_dsa(foe, dsa_port);
1344@@ -178,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1345 else
1346 return -EOPNOTSUPP;
1347
1348+out:
1349 mtk_foe_entry_set_pse_port(foe, pse_port);
1350
1351 return 0;
1352@@ -193,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1353 struct net_device *odev = NULL;
1354 struct mtk_flow_entry *entry;
1355 int offload_type = 0;
1356+ int wed_index = -1;
1357 u16 addr_type = 0;
1358- u32 timestamp;
1359 u8 l4proto = 0;
1360 int err = 0;
1361- int hash;
1362 int i;
1363
1364 if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
1365@@ -229,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1366 return -EOPNOTSUPP;
1367 }
1368
1369+ switch (addr_type) {
1370+ case 0:
1371+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
1372+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1373+ struct flow_match_eth_addrs match;
1374+
1375+ flow_rule_match_eth_addrs(rule, &match);
1376+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
1377+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
1378+ } else {
1379+ return -EOPNOTSUPP;
1380+ }
1381+
1382+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1383+ struct flow_match_vlan match;
1384+
1385+ flow_rule_match_vlan(rule, &match);
1386+
1387+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
1388+ return -EOPNOTSUPP;
1389+
1390+ data.vlan_in = match.key->vlan_id;
1391+ }
1392+ break;
1393+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1394+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1395+ break;
1396+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1397+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1398+ break;
1399+ default:
1400+ return -EOPNOTSUPP;
1401+ }
1402+
1403 flow_action_for_each(i, act, &rule->action) {
1404 switch (act->id) {
1405 case FLOW_ACTION_MANGLE:
1406+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1407+ return -EOPNOTSUPP;
1408 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1409 mtk_flow_offload_mangle_eth(act, &data.eth);
1410 break;
1411@@ -263,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1412 }
1413 }
1414
1415- switch (addr_type) {
1416- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1417- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1418- break;
1419- case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1420- offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1421- break;
1422- default:
1423- return -EOPNOTSUPP;
1424- }
1425-
1426 if (!is_valid_ether_addr(data.eth.h_source) ||
1427 !is_valid_ether_addr(data.eth.h_dest))
1428 return -EINVAL;
1429@@ -287,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1430 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1431 struct flow_match_ports ports;
1432
1433+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1434+ return -EOPNOTSUPP;
1435+
1436 flow_rule_match_ports(rule, &ports);
1437 data.src_port = ports.key->src;
1438 data.dst_port = ports.key->dst;
1439- } else {
1440+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
1441 return -EOPNOTSUPP;
1442 }
1443
1444@@ -320,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1445 if (act->id != FLOW_ACTION_MANGLE)
1446 continue;
1447
1448+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1449+ return -EOPNOTSUPP;
1450+
1451 switch (act->mangle.htype) {
1452 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1453 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1454@@ -345,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1455 return err;
1456 }
1457
1458+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1459+ foe.bridge.vlan = data.vlan_in;
1460+
1461 if (data.vlan.num == 1) {
1462 if (data.vlan.proto != htons(ETH_P_8021Q))
1463 return -EOPNOTSUPP;
1464@@ -354,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1465 if (data.pppoe.num == 1)
1466 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
1467
1468- err = mtk_flow_set_output_device(eth, &foe, odev);
1469+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
1470+ &wed_index);
1471 if (err)
1472 return err;
1473
1474+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
1475+ return err;
1476+
1477 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1478 if (!entry)
1479 return -ENOMEM;
1480
1481 entry->cookie = f->cookie;
1482- timestamp = mtk_eth_timestamp(eth);
1483- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
1484- if (hash < 0) {
1485- err = hash;
1486+ memcpy(&entry->data, &foe, sizeof(entry->data));
1487+ entry->wed_index = wed_index;
1488+
1489+ if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
1490 goto free;
1491- }
1492
1493- entry->hash = hash;
1494 err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
1495 mtk_flow_ht_params);
1496 if (err < 0)
1497- goto clear_flow;
1498+ goto clear;
1499
1500 return 0;
1501-clear_flow:
1502- mtk_foe_entry_clear(&eth->ppe, hash);
1503+
1504+clear:
1505+ mtk_foe_entry_clear(eth->ppe, entry);
1506 free:
1507 kfree(entry);
1508+ if (wed_index >= 0)
1509+ mtk_wed_flow_remove(wed_index);
1510 return err;
1511 }
1512
1513@@ -394,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
1514 if (!entry)
1515 return -ENOENT;
1516
1517- mtk_foe_entry_clear(&eth->ppe, entry->hash);
1518+ mtk_foe_entry_clear(eth->ppe, entry);
1519 rhashtable_remove_fast(&eth->flow_table, &entry->node,
1520 mtk_flow_ht_params);
1521+ if (entry->wed_index >= 0)
1522+ mtk_wed_flow_remove(entry->wed_index);
1523 kfree(entry);
1524
1525 return 0;
1526@@ -406,7 +477,6 @@ static int
1527 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1528 {
1529 struct mtk_flow_entry *entry;
1530- int timestamp;
1531 u32 idle;
1532
1533 entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1534@@ -414,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1535 if (!entry)
1536 return -ENOENT;
1537
1538- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
1539- if (timestamp < 0)
1540- return -ETIMEDOUT;
1541-
1542- idle = mtk_eth_timestamp(eth) - timestamp;
1543+ idle = mtk_foe_entry_idle_time(eth->ppe, entry);
1544 f->stats.lastused = jiffies - idle * HZ;
1545
1546 return 0;
1547@@ -470,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1548 struct flow_block_cb *block_cb;
1549 flow_setup_cb_t *cb;
1550
1551- if (!eth->ppe.foe_table)
1552+ if (!eth->ppe || !eth->ppe->foe_table)
1553 return -EOPNOTSUPP;
1554
1555 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1556@@ -511,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1557 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1558 void *type_data)
1559 {
1560- if (type == TC_SETUP_FT)
1561+ switch (type) {
1562+ case TC_SETUP_BLOCK:
1563+ case TC_SETUP_FT:
1564 return mtk_eth_setup_tc_block(dev, type_data);
1565-
1566- return -EOPNOTSUPP;
1567+ default:
1568+ return -EOPNOTSUPP;
1569+ }
1570 }
1571
1572 int mtk_eth_offload_init(struct mtk_eth *eth)
1573 {
1574- if (!eth->ppe.foe_table)
1575+ if (!eth->ppe || !eth->ppe->foe_table)
1576 return 0;
1577
1578 return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
1579diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
1580new file mode 100644
1581index 000000000..ea1cbdf1a
1582--- /dev/null
1583+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
1584@@ -0,0 +1,876 @@
1585+// SPDX-License-Identifier: GPL-2.0-only
1586+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1587+
1588+#include <linux/kernel.h>
1589+#include <linux/slab.h>
1590+#include <linux/module.h>
1591+#include <linux/bitfield.h>
1592+#include <linux/dma-mapping.h>
1593+#include <linux/skbuff.h>
1594+#include <linux/of_platform.h>
1595+#include <linux/of_address.h>
1596+#include <linux/mfd/syscon.h>
1597+#include <linux/debugfs.h>
1598+#include <linux/iopoll.h>
1599+#include <linux/soc/mediatek/mtk_wed.h>
1600+#include "mtk_eth_soc.h"
1601+#include "mtk_wed_regs.h"
1602+#include "mtk_wed.h"
1603+#include "mtk_ppe.h"
1604+
1605+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
1606+
1607+#define MTK_WED_PKT_SIZE 1900
1608+#define MTK_WED_BUF_SIZE 2048
1609+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1610+
1611+#define MTK_WED_TX_RING_SIZE 2048
1612+#define MTK_WED_WDMA_RING_SIZE 1024
1613+
1614+static struct mtk_wed_hw *hw_list[2];
1615+static DEFINE_MUTEX(hw_lock);
1616+
1617+static void
1618+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1619+{
1620+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
1621+}
1622+
1623+static void
1624+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1625+{
1626+ return wed_m32(dev, reg, 0, mask);
1627+}
1628+
1629+static void
1630+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
1631+{
1632+ return wed_m32(dev, reg, mask, 0);
1633+}
1634+
1635+static void
1636+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1637+{
1638+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
1639+}
1640+
1641+static void
1642+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1643+{
1644+ wdma_m32(dev, reg, 0, mask);
1645+}
1646+
1647+static u32
1648+mtk_wed_read_reset(struct mtk_wed_device *dev)
1649+{
1650+ return wed_r32(dev, MTK_WED_RESET);
1651+}
1652+
1653+static void
1654+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
1655+{
1656+ u32 status;
1657+
1658+ wed_w32(dev, MTK_WED_RESET, mask);
1659+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
1660+ !(status & mask), 0, 1000))
1661+ WARN_ON_ONCE(1);
1662+}
1663+
1664+static struct mtk_wed_hw *
1665+mtk_wed_assign(struct mtk_wed_device *dev)
1666+{
1667+ struct mtk_wed_hw *hw;
1668+
1669+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
1670+ if (!hw || hw->wed_dev)
1671+ return NULL;
1672+
1673+ hw->wed_dev = dev;
1674+ return hw;
1675+}
1676+
1677+static int
1678+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
1679+{
1680+ struct mtk_wdma_desc *desc;
1681+ dma_addr_t desc_phys;
1682+ void **page_list;
1683+ int token = dev->wlan.token_start;
1684+ int ring_size;
1685+ int n_pages;
1686+ int i, page_idx;
1687+
1688+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
1689+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
1690+
1691+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
1692+ if (!page_list)
1693+ return -ENOMEM;
1694+
1695+ dev->buf_ring.size = ring_size;
1696+ dev->buf_ring.pages = page_list;
1697+
1698+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
1699+ &desc_phys, GFP_KERNEL);
1700+ if (!desc)
1701+ return -ENOMEM;
1702+
1703+ dev->buf_ring.desc = desc;
1704+ dev->buf_ring.desc_phys = desc_phys;
1705+
1706+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
1707+ dma_addr_t page_phys, buf_phys;
1708+ struct page *page;
1709+ void *buf;
1710+ int s;
1711+
1712+ page = __dev_alloc_pages(GFP_KERNEL, 0);
1713+ if (!page)
1714+ return -ENOMEM;
1715+
1716+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
1717+ DMA_BIDIRECTIONAL);
1718+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
1719+ __free_page(page);
1720+ return -ENOMEM;
1721+ }
1722+
1723+ page_list[page_idx++] = page;
1724+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
1725+ DMA_BIDIRECTIONAL);
1726+
1727+ buf = page_to_virt(page);
1728+ buf_phys = page_phys;
1729+
1730+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
1731+ u32 txd_size;
1732+
1733+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
1734+
1735+ desc->buf0 = buf_phys;
1736+ desc->buf1 = buf_phys + txd_size;
1737+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
1738+ txd_size) |
1739+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
1740+ MTK_WED_BUF_SIZE - txd_size) |
1741+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
1742+ desc->info = 0;
1743+ desc++;
1744+
1745+ buf += MTK_WED_BUF_SIZE;
1746+ buf_phys += MTK_WED_BUF_SIZE;
1747+ }
1748+
1749+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
1750+ DMA_BIDIRECTIONAL);
1751+ }
1752+
1753+ return 0;
1754+}
1755+
1756+static void
1757+mtk_wed_free_buffer(struct mtk_wed_device *dev)
1758+{
1759+ struct mtk_wdma_desc *desc = dev->buf_ring.desc;
1760+ void **page_list = dev->buf_ring.pages;
1761+ int page_idx;
1762+ int i;
1763+
1764+ if (!page_list)
1765+ return;
1766+
1767+ if (!desc)
1768+ goto free_pagelist;
1769+
1770+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
1771+ void *page = page_list[page_idx++];
1772+
1773+ if (!page)
1774+ break;
1775+
1776+ dma_unmap_page(dev->hw->dev, desc[i].buf0,
1777+ PAGE_SIZE, DMA_BIDIRECTIONAL);
1778+ __free_page(page);
1779+ }
1780+
1781+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
1782+ desc, dev->buf_ring.desc_phys);
1783+
1784+free_pagelist:
1785+ kfree(page_list);
1786+}
1787+
1788+static void
1789+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
1790+{
1791+ if (!ring->desc)
1792+ return;
1793+
1794+ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
1795+ ring->desc, ring->desc_phys);
1796+}
1797+
1798+static void
1799+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
1800+{
1801+ int i;
1802+
1803+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
1804+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
1805+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1806+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
1807+}
1808+
1809+static void
1810+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
1811+{
1812+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
1813+
1814+ if (!dev->hw->num_flows)
1815+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
1816+
1817+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
1818+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
1819+}
1820+
1821+static void
1822+mtk_wed_stop(struct mtk_wed_device *dev)
1823+{
1824+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
1825+ mtk_wed_set_ext_int(dev, false);
1826+
1827+ wed_clr(dev, MTK_WED_CTRL,
1828+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
1829+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
1830+ MTK_WED_CTRL_WED_TX_BM_EN |
1831+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1832+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
1833+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
1834+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
1835+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
1836+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
1837+
1838+ wed_clr(dev, MTK_WED_GLO_CFG,
1839+ MTK_WED_GLO_CFG_TX_DMA_EN |
1840+ MTK_WED_GLO_CFG_RX_DMA_EN);
1841+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1842+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1843+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1844+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1845+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1846+}
1847+
1848+static void
1849+mtk_wed_detach(struct mtk_wed_device *dev)
1850+{
1851+ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
1852+ struct mtk_wed_hw *hw = dev->hw;
1853+
1854+ mutex_lock(&hw_lock);
1855+
1856+ mtk_wed_stop(dev);
1857+
1858+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
1859+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
1860+
1861+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1862+
1863+ mtk_wed_free_buffer(dev);
1864+ mtk_wed_free_tx_rings(dev);
1865+
1866+ if (of_dma_is_coherent(wlan_node))
1867+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1868+ BIT(hw->index), BIT(hw->index));
1869+
1870+ if (!hw_list[!hw->index]->wed_dev &&
1871+ hw->eth->dma_dev != hw->eth->dev)
1872+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
1873+
1874+ memset(dev, 0, sizeof(*dev));
1875+ module_put(THIS_MODULE);
1876+
1877+ hw->wed_dev = NULL;
1878+ mutex_unlock(&hw_lock);
1879+}
1880+
1881+static void
1882+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
1883+{
1884+ u32 mask, set;
1885+ u32 offset;
1886+
1887+ mtk_wed_stop(dev);
1888+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1889+
1890+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1891+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1892+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1893+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1894+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1895+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1896+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1897+
1898+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
1899+
1900+ offset = dev->hw->index ? 0x04000400 : 0;
1901+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1902+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
1903+
1904+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
1905+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1906+}
1907+
1908+static void
1909+mtk_wed_hw_init(struct mtk_wed_device *dev)
1910+{
1911+ if (dev->init_done)
1912+ return;
1913+
1914+ dev->init_done = true;
1915+ mtk_wed_set_ext_int(dev, false);
1916+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1917+ MTK_WED_TX_BM_CTRL_PAUSE |
1918+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
1919+ dev->buf_ring.size / 128) |
1920+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
1921+ MTK_WED_TX_RING_SIZE / 256));
1922+
1923+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1924+
1925+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1926+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1927+ dev->wlan.token_start) |
1928+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1929+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1930+
1931+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1932+
1933+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1934+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
1935+ MTK_WED_TX_BM_DYN_THR_HI);
1936+
1937+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1938+
1939+ wed_set(dev, MTK_WED_CTRL,
1940+ MTK_WED_CTRL_WED_TX_BM_EN |
1941+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1942+
1943+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1944+}
1945+
1946+static void
1947+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
1948+{
1949+ int i;
1950+
1951+ for (i = 0; i < size; i++) {
1952+ desc[i].buf0 = 0;
1953+ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
1954+ desc[i].buf1 = 0;
1955+ desc[i].info = 0;
1956+ }
1957+}
1958+
1959+static u32
1960+mtk_wed_check_busy(struct mtk_wed_device *dev)
1961+{
1962+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
1963+ return true;
1964+
1965+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
1966+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
1967+ return true;
1968+
1969+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
1970+ return true;
1971+
1972+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
1973+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1974+ return true;
1975+
1976+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
1977+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1978+ return true;
1979+
1980+ if (wed_r32(dev, MTK_WED_CTRL) &
1981+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
1982+ return true;
1983+
1984+ return false;
1985+}
1986+
1987+static int
1988+mtk_wed_poll_busy(struct mtk_wed_device *dev)
1989+{
1990+ int sleep = 15000;
1991+ int timeout = 100 * sleep;
1992+ u32 val;
1993+
1994+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
1995+ timeout, false, dev);
1996+}
1997+
1998+static void
1999+mtk_wed_reset_dma(struct mtk_wed_device *dev)
2000+{
2001+ bool busy = false;
2002+ u32 val;
2003+ int i;
2004+
2005+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
2006+ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
2007+
2008+ if (!desc)
2009+ continue;
2010+
2011+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
2012+ }
2013+
2014+ if (mtk_wed_poll_busy(dev))
2015+ busy = mtk_wed_check_busy(dev);
2016+
2017+ if (busy) {
2018+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
2019+ } else {
2020+ wed_w32(dev, MTK_WED_RESET_IDX,
2021+ MTK_WED_RESET_IDX_TX |
2022+ MTK_WED_RESET_IDX_RX);
2023+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
2024+ }
2025+
2026+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
2027+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
2028+
2029+ if (busy) {
2030+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
2031+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
2032+ } else {
2033+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
2034+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
2035+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
2036+
2037+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2038+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2039+
2040+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
2041+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2042+ }
2043+
2044+ for (i = 0; i < 100; i++) {
2045+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
2046+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
2047+ break;
2048+ }
2049+
2050+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
2051+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
2052+
2053+ if (busy) {
2054+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
2055+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
2056+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
2057+ } else {
2058+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
2059+ MTK_WED_WPDMA_RESET_IDX_TX |
2060+ MTK_WED_WPDMA_RESET_IDX_RX);
2061+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
2062+ }
2063+
2064+}
2065+
2066+static int
2067+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
2068+ int size)
2069+{
2070+ ring->desc = dma_alloc_coherent(dev->hw->dev,
2071+ size * sizeof(*ring->desc),
2072+ &ring->desc_phys, GFP_KERNEL);
2073+ if (!ring->desc)
2074+ return -ENOMEM;
2075+
2076+ ring->size = size;
2077+ mtk_wed_ring_reset(ring->desc, size);
2078+
2079+ return 0;
2080+}
2081+
2082+static int
2083+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
2084+{
2085+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
2086+
2087+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
2088+ return -ENOMEM;
2089+
2090+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2091+ wdma->desc_phys);
2092+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2093+ size);
2094+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2095+
2096+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2097+ wdma->desc_phys);
2098+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2099+ size);
2100+
2101+ return 0;
2102+}
2103+
2104+static void
2105+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
2106+{
2107+ u32 wdma_mask;
2108+ u32 val;
2109+ int i;
2110+
2111+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
2112+ if (!dev->tx_wdma[i].desc)
2113+ mtk_wed_wdma_ring_setup(dev, i, 16);
2114+
2115+ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
2116+
2117+ mtk_wed_hw_init(dev);
2118+
2119+ wed_set(dev, MTK_WED_CTRL,
2120+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
2121+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
2122+ MTK_WED_CTRL_WED_TX_BM_EN |
2123+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
2124+
2125+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
2126+
2127+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
2128+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
2129+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
2130+
2131+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
2132+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
2133+
2134+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
2135+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
2136+
2137+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
2138+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
2139+
2140+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
2141+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
2142+
2143+ wed_set(dev, MTK_WED_GLO_CFG,
2144+ MTK_WED_GLO_CFG_TX_DMA_EN |
2145+ MTK_WED_GLO_CFG_RX_DMA_EN);
2146+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
2147+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
2148+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
2149+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2150+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
2151+
2152+ mtk_wed_set_ext_int(dev, true);
2153+ val = dev->wlan.wpdma_phys |
2154+ MTK_PCIE_MIRROR_MAP_EN |
2155+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
2156+
2157+ if (dev->hw->index)
2158+ val |= BIT(1);
2159+ val |= BIT(0);
2160+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
2161+
2162+ dev->running = true;
2163+}
2164+
2165+static int
2166+mtk_wed_attach(struct mtk_wed_device *dev)
2167+ __releases(RCU)
2168+{
2169+ struct mtk_wed_hw *hw;
2170+ int ret = 0;
2171+
2172+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
2173+ "mtk_wed_attach without holding the RCU read lock");
2174+
2175+ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
2176+ !try_module_get(THIS_MODULE))
2177+ ret = -ENODEV;
2178+
2179+ rcu_read_unlock();
2180+
2181+ if (ret)
2182+ return ret;
2183+
2184+ mutex_lock(&hw_lock);
2185+
2186+ hw = mtk_wed_assign(dev);
2187+ if (!hw) {
2188+ module_put(THIS_MODULE);
2189+ ret = -ENODEV;
2190+ goto out;
2191+ }
2192+
2193+ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
2194+
2195+ dev->hw = hw;
2196+ dev->dev = hw->dev;
2197+ dev->irq = hw->irq;
2198+ dev->wdma_idx = hw->index;
2199+
2200+ if (hw->eth->dma_dev == hw->eth->dev &&
2201+ of_dma_is_coherent(hw->eth->dev->of_node))
2202+ mtk_eth_set_dma_device(hw->eth, hw->dev);
2203+
2204+ ret = mtk_wed_buffer_alloc(dev);
2205+ if (ret) {
2206+ mtk_wed_detach(dev);
2207+ goto out;
2208+ }
2209+
2210+ mtk_wed_hw_init_early(dev);
2211+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
2212+
2213+out:
2214+ mutex_unlock(&hw_lock);
2215+
2216+ return ret;
2217+}
2218+
2219+static int
2220+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
2221+{
2222+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
2223+
2224+ /*
2225+ * Tx ring redirection:
2226+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
2227+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
2228+ * registers.
2229+ *
2230+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
2231+ * into MTK_WED_WPDMA_RING_TX(n) registers.
2232+ * It gets filled with packets picked up from WED TX ring and from
2233+ * WDMA RX.
2234+ */
2235+
2236+ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
2237+
2238+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
2239+ return -ENOMEM;
2240+
2241+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
2242+ return -ENOMEM;
2243+
2244+ ring->reg_base = MTK_WED_RING_TX(idx);
2245+ ring->wpdma = regs;
2246+
2247+ /* WED -> WPDMA */
2248+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
2249+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
2250+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
2251+
2252+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
2253+ ring->desc_phys);
2254+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
2255+ MTK_WED_TX_RING_SIZE);
2256+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2257+
2258+ return 0;
2259+}
2260+
2261+static int
2262+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
2263+{
2264+ struct mtk_wed_ring *ring = &dev->txfree_ring;
2265+ int i;
2266+
2267+ /*
2268+ * For txfree event handling, the same DMA ring is shared between WED
2269+ * and WLAN. The WLAN driver accesses the ring index registers through
2270+ * WED
2271+ */
2272+ ring->reg_base = MTK_WED_RING_RX(1);
2273+ ring->wpdma = regs;
2274+
2275+ for (i = 0; i < 12; i += 4) {
2276+ u32 val = readl(regs + i);
2277+
2278+ wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
2279+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
2280+ }
2281+
2282+ return 0;
2283+}
2284+
2285+static u32
2286+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
2287+{
2288+ u32 val;
2289+
2290+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
2291+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
2292+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2293+ if (!dev->hw->num_flows)
2294+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2295+ if (val && net_ratelimit())
2296+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
2297+
2298+ val = wed_r32(dev, MTK_WED_INT_STATUS);
2299+ val &= mask;
2300+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
2301+
2302+ return val;
2303+}
2304+
2305+static void
2306+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
2307+{
2308+ if (!dev->running)
2309+ return;
2310+
2311+ mtk_wed_set_ext_int(dev, !!mask);
2312+ wed_w32(dev, MTK_WED_INT_MASK, mask);
2313+}
2314+
2315+int mtk_wed_flow_add(int index)
2316+{
2317+ struct mtk_wed_hw *hw = hw_list[index];
2318+ int ret;
2319+
2320+ if (!hw || !hw->wed_dev)
2321+ return -ENODEV;
2322+
2323+ if (hw->num_flows) {
2324+ hw->num_flows++;
2325+ return 0;
2326+ }
2327+
2328+ mutex_lock(&hw_lock);
2329+ if (!hw->wed_dev) {
2330+ ret = -ENODEV;
2331+ goto out;
2332+ }
2333+
2334+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
2335+ if (!ret)
2336+ hw->num_flows++;
2337+ mtk_wed_set_ext_int(hw->wed_dev, true);
2338+
2339+out:
2340+ mutex_unlock(&hw_lock);
2341+
2342+ return ret;
2343+}
2344+
2345+void mtk_wed_flow_remove(int index)
2346+{
2347+ struct mtk_wed_hw *hw = hw_list[index];
2348+
2349+ if (!hw)
2350+ return;
2351+
2352+ if (--hw->num_flows)
2353+ return;
2354+
2355+ mutex_lock(&hw_lock);
2356+ if (!hw->wed_dev)
2357+ goto out;
2358+
2359+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
2360+ mtk_wed_set_ext_int(hw->wed_dev, true);
2361+
2362+out:
2363+ mutex_unlock(&hw_lock);
2364+}
2365+
2366+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2367+ void __iomem *wdma, int index)
2368+{
2369+ static const struct mtk_wed_ops wed_ops = {
2370+ .attach = mtk_wed_attach,
2371+ .tx_ring_setup = mtk_wed_tx_ring_setup,
2372+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2373+ .start = mtk_wed_start,
2374+ .stop = mtk_wed_stop,
2375+ .reset_dma = mtk_wed_reset_dma,
2376+ .reg_read = wed_r32,
2377+ .reg_write = wed_w32,
2378+ .irq_get = mtk_wed_irq_get,
2379+ .irq_set_mask = mtk_wed_irq_set_mask,
2380+ .detach = mtk_wed_detach,
2381+ };
2382+ struct device_node *eth_np = eth->dev->of_node;
2383+ struct platform_device *pdev;
2384+ struct mtk_wed_hw *hw;
2385+ struct regmap *regs;
2386+ int irq;
2387+
2388+ if (!np)
2389+ return;
2390+
2391+ pdev = of_find_device_by_node(np);
2392+ if (!pdev)
2393+ return;
2394+
2395+ get_device(&pdev->dev);
2396+ irq = platform_get_irq(pdev, 0);
2397+ if (irq < 0)
2398+ return;
2399+
2400+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
2401+ if (!regs)
2402+ return;
2403+
2404+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
2405+
2406+ mutex_lock(&hw_lock);
2407+
2408+ if (WARN_ON(hw_list[index]))
2409+ goto unlock;
2410+
2411+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
2412+ hw->node = np;
2413+ hw->regs = regs;
2414+ hw->eth = eth;
2415+ hw->dev = &pdev->dev;
2416+ hw->wdma = wdma;
2417+ hw->index = index;
2418+ hw->irq = irq;
2419+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2420+ "mediatek,pcie-mirror");
2421+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
2422+ "mediatek,hifsys");
2423+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
2424+ kfree(hw);
2425+ goto unlock;
2426+ }
2427+
2428+ if (!index) {
2429+ regmap_write(hw->mirror, 0, 0);
2430+ regmap_write(hw->mirror, 4, 0);
2431+ }
2432+ mtk_wed_hw_add_debugfs(hw);
2433+
2434+ hw_list[index] = hw;
2435+
2436+unlock:
2437+ mutex_unlock(&hw_lock);
2438+}
2439+
2440+void mtk_wed_exit(void)
2441+{
2442+ int i;
2443+
2444+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
2445+
2446+ synchronize_rcu();
2447+
2448+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
2449+ struct mtk_wed_hw *hw;
2450+
2451+ hw = hw_list[i];
2452+ if (!hw)
2453+ continue;
2454+
2455+ hw_list[i] = NULL;
2456+ debugfs_remove(hw->debugfs_dir);
2457+ put_device(hw->dev);
2458+ kfree(hw);
2459+ }
2460+}
2461diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2462new file mode 100644
2463index 000000000..981ec613f
2464--- /dev/null
2465+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2466@@ -0,0 +1,135 @@
2467+// SPDX-License-Identifier: GPL-2.0-only
2468+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2469+
2470+#ifndef __MTK_WED_PRIV_H
2471+#define __MTK_WED_PRIV_H
2472+
2473+#include <linux/soc/mediatek/mtk_wed.h>
2474+#include <linux/debugfs.h>
2475+#include <linux/regmap.h>
2476+#include <linux/netdevice.h>
2477+
2478+struct mtk_eth;
2479+
2480+struct mtk_wed_hw {
2481+ struct device_node *node;
2482+ struct mtk_eth *eth;
2483+ struct regmap *regs;
2484+ struct regmap *hifsys;
2485+ struct device *dev;
2486+ void __iomem *wdma;
2487+ struct regmap *mirror;
2488+ struct dentry *debugfs_dir;
2489+ struct mtk_wed_device *wed_dev;
2490+ u32 debugfs_reg;
2491+ u32 num_flows;
2492+ char dirname[5];
2493+ int irq;
2494+ int index;
2495+};
2496+
2497+struct mtk_wdma_info {
2498+ u8 wdma_idx;
2499+ u8 queue;
2500+ u16 wcid;
2501+ u8 bss;
2502+};
2503+
2504+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2505+static inline void
2506+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2507+{
2508+ regmap_write(dev->hw->regs, reg, val);
2509+}
2510+
2511+static inline u32
2512+wed_r32(struct mtk_wed_device *dev, u32 reg)
2513+{
2514+ unsigned int val;
2515+
2516+ regmap_read(dev->hw->regs, reg, &val);
2517+
2518+ return val;
2519+}
2520+
2521+static inline void
2522+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2523+{
2524+ writel(val, dev->hw->wdma + reg);
2525+}
2526+
2527+static inline u32
2528+wdma_r32(struct mtk_wed_device *dev, u32 reg)
2529+{
2530+ return readl(dev->hw->wdma + reg);
2531+}
2532+
2533+static inline u32
2534+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
2535+{
2536+ if (!dev->tx_ring[ring].wpdma)
2537+ return 0;
2538+
2539+ return readl(dev->tx_ring[ring].wpdma + reg);
2540+}
2541+
2542+static inline void
2543+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
2544+{
2545+ if (!dev->tx_ring[ring].wpdma)
2546+ return;
2547+
2548+ writel(val, dev->tx_ring[ring].wpdma + reg);
2549+}
2550+
2551+static inline u32
2552+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
2553+{
2554+ if (!dev->txfree_ring.wpdma)
2555+ return 0;
2556+
2557+ return readl(dev->txfree_ring.wpdma + reg);
2558+}
2559+
2560+static inline void
2561+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2562+{
2563+ if (!dev->txfree_ring.wpdma)
2564+ return;
2565+
2566+ writel(val, dev->txfree_ring.wpdma + reg);
2567+}
2568+
2569+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2570+ void __iomem *wdma, int index);
2571+void mtk_wed_exit(void);
2572+int mtk_wed_flow_add(int index);
2573+void mtk_wed_flow_remove(int index);
2574+#else
2575+static inline void
2576+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2577+ void __iomem *wdma, int index)
2578+{
2579+}
2580+static inline void
2581+mtk_wed_exit(void)
2582+{
2583+}
2584+static inline int mtk_wed_flow_add(int index)
2585+{
2586+ return -EINVAL;
2587+}
2588+static inline void mtk_wed_flow_remove(int index)
2589+{
2590+}
2591+#endif
2592+
2593+#ifdef CONFIG_DEBUG_FS
2594+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
2595+#else
2596+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2597+{
2598+}
2599+#endif
2600+
2601+#endif
2602diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2603new file mode 100644
2604index 000000000..a81d3fd1a
2605--- /dev/null
2606+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2607@@ -0,0 +1,175 @@
2608+// SPDX-License-Identifier: GPL-2.0-only
2609+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2610+
2611+#include <linux/seq_file.h>
2612+#include "mtk_wed.h"
2613+#include "mtk_wed_regs.h"
2614+
2615+struct reg_dump {
2616+ const char *name;
2617+ u16 offset;
2618+ u8 type;
2619+ u8 base;
2620+};
2621+
2622+enum {
2623+ DUMP_TYPE_STRING,
2624+ DUMP_TYPE_WED,
2625+ DUMP_TYPE_WDMA,
2626+ DUMP_TYPE_WPDMA_TX,
2627+ DUMP_TYPE_WPDMA_TXFREE,
2628+};
2629+
2630+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2631+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2632+#define DUMP_RING(_prefix, _base, ...) \
2633+ { _prefix " BASE", _base, __VA_ARGS__ }, \
2634+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2635+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
2636+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2637+
2638+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2639+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2640+
2641+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2642+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
2643+
2644+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
2645+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
2646+
2647+static void
2648+print_reg_val(struct seq_file *s, const char *name, u32 val)
2649+{
2650+ seq_printf(s, "%-32s %08x\n", name, val);
2651+}
2652+
2653+static void
2654+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2655+ const struct reg_dump *regs, int n_regs)
2656+{
2657+ const struct reg_dump *cur;
2658+ u32 val;
2659+
2660+ for (cur = regs; cur < &regs[n_regs]; cur++) {
2661+ switch (cur->type) {
2662+ case DUMP_TYPE_STRING:
2663+ seq_printf(s, "%s======== %s:\n",
2664+ cur > regs ? "\n" : "",
2665+ cur->name);
2666+ continue;
2667+ case DUMP_TYPE_WED:
2668+ val = wed_r32(dev, cur->offset);
2669+ break;
2670+ case DUMP_TYPE_WDMA:
2671+ val = wdma_r32(dev, cur->offset);
2672+ break;
2673+ case DUMP_TYPE_WPDMA_TX:
2674+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
2675+ break;
2676+ case DUMP_TYPE_WPDMA_TXFREE:
2677+ val = wpdma_txfree_r32(dev, cur->offset);
2678+ break;
2679+ }
2680+ print_reg_val(s, cur->name, val);
2681+ }
2682+}
2683+
2684+
2685+static int
2686+wed_txinfo_show(struct seq_file *s, void *data)
2687+{
2688+ static const struct reg_dump regs[] = {
2689+ DUMP_STR("WED TX"),
2690+ DUMP_WED(WED_TX_MIB(0)),
2691+ DUMP_WED_RING(WED_RING_TX(0)),
2692+
2693+ DUMP_WED(WED_TX_MIB(1)),
2694+ DUMP_WED_RING(WED_RING_TX(1)),
2695+
2696+ DUMP_STR("WPDMA TX"),
2697+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
2698+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
2699+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
2700+
2701+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
2702+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
2703+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
2704+
2705+ DUMP_STR("WPDMA TX"),
2706+ DUMP_WPDMA_TX_RING(0),
2707+ DUMP_WPDMA_TX_RING(1),
2708+
2709+ DUMP_STR("WED WDMA RX"),
2710+ DUMP_WED(WED_WDMA_RX_MIB(0)),
2711+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
2712+ DUMP_WED(WED_WDMA_RX_THRES(0)),
2713+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
2714+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
2715+
2716+ DUMP_WED(WED_WDMA_RX_MIB(1)),
2717+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
2718+ DUMP_WED(WED_WDMA_RX_THRES(1)),
2719+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
2720+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
2721+
2722+ DUMP_STR("WDMA RX"),
2723+ DUMP_WDMA(WDMA_GLO_CFG),
2724+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
2725+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
2726+ };
2727+ struct mtk_wed_hw *hw = s->private;
2728+ struct mtk_wed_device *dev = hw->wed_dev;
2729+
2730+ if (!dev)
2731+ return 0;
2732+
2733+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2734+
2735+ return 0;
2736+}
2737+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
2738+
2739+
2740+static int
2741+mtk_wed_reg_set(void *data, u64 val)
2742+{
2743+ struct mtk_wed_hw *hw = data;
2744+
2745+ regmap_write(hw->regs, hw->debugfs_reg, val);
2746+
2747+ return 0;
2748+}
2749+
2750+static int
2751+mtk_wed_reg_get(void *data, u64 *val)
2752+{
2753+ struct mtk_wed_hw *hw = data;
2754+ unsigned int regval;
2755+ int ret;
2756+
2757+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
2758+ if (ret)
2759+ return ret;
2760+
2761+ *val = regval;
2762+
2763+ return 0;
2764+}
2765+
2766+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
2767+ "0x%08llx\n");
2768+
2769+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2770+{
2771+ struct dentry *dir;
2772+
2773+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
2774+ dir = debugfs_create_dir(hw->dirname, NULL);
2775+ if (!dir)
2776+ return;
2777+
2778+ hw->debugfs_dir = dir;
2779+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
2780+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
2781+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
2782+}
2783diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2784new file mode 100644
2785index 000000000..a5d9d8a5b
2786--- /dev/null
2787+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2788@@ -0,0 +1,8 @@
2789+// SPDX-License-Identifier: GPL-2.0-only
2790+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2791+
2792+#include <linux/kernel.h>
2793+#include <linux/soc/mediatek/mtk_wed.h>
2794+
2795+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
2796+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
2797diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2798new file mode 100644
2799index 000000000..0a0465ea5
2800--- /dev/null
2801+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2802@@ -0,0 +1,251 @@
2803+// SPDX-License-Identifier: GPL-2.0-only
2804+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2805+
2806+#ifndef __MTK_WED_REGS_H
2807+#define __MTK_WED_REGS_H
2808+
2809+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
2810+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
2811+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
2812+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2813+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2814+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2815+
2816+struct mtk_wdma_desc {
2817+ __le32 buf0;
2818+ __le32 ctrl;
2819+ __le32 buf1;
2820+ __le32 info;
2821+} __packed __aligned(4);
2822+
2823+#define MTK_WED_RESET 0x008
2824+#define MTK_WED_RESET_TX_BM BIT(0)
2825+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
2826+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
2827+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
2828+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
2829+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
2830+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2831+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2832+#define MTK_WED_RESET_WED BIT(31)
2833+
2834+#define MTK_WED_CTRL 0x00c
2835+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
2836+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
2837+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
2838+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
2839+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
2840+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2841+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2842+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2843+#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2844+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2845+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2846+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
2847+
2848+#define MTK_WED_EXT_INT_STATUS 0x020
2849+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
2850+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
2851+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
2852+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
2853+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
2854+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2855+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2856+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2857+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2858+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
2859+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
2860+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
2861+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
2862+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
2863+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
2864+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2865+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2866+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2867+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2868+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2869+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
2870+ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
2871+ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
2872+
2873+#define MTK_WED_EXT_INT_MASK 0x028
2874+
2875+#define MTK_WED_STATUS 0x060
2876+#define MTK_WED_STATUS_TX GENMASK(15, 8)
2877+
2878+#define MTK_WED_TX_BM_CTRL 0x080
2879+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
2880+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
2881+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
2882+
2883+#define MTK_WED_TX_BM_BASE 0x084
2884+
2885+#define MTK_WED_TX_BM_TKID 0x088
2886+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
2887+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
2888+
2889+#define MTK_WED_TX_BM_BUF_LEN 0x08c
2890+
2891+#define MTK_WED_TX_BM_INTF 0x09c
2892+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
2893+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
2894+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
2895+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
2896+
2897+#define MTK_WED_TX_BM_DYN_THR 0x0a0
2898+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
2899+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
2900+
2901+#define MTK_WED_INT_STATUS 0x200
2902+#define MTK_WED_INT_MASK 0x204
2903+
2904+#define MTK_WED_GLO_CFG 0x208
2905+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
2906+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
2907+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
2908+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
2909+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2910+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
2911+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
2912+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2913+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2914+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2915+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2916+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2917+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2918+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
2919+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2920+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
2921+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
2922+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
2923+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
2924+
2925+#define MTK_WED_RESET_IDX 0x20c
2926+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
2927+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
2928+
2929+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
2930+
2931+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
2932+
2933+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2934+
2935+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
2936+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2937+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
2938+
2939+#define MTK_WED_WPDMA_GLO_CFG 0x508
2940+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
2941+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
2942+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
2943+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2944+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2945+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2946+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
2947+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2948+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2949+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2950+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2951+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2952+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2953+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
2954+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2955+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
2956+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
2957+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
2958+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
2959+
2960+#define MTK_WED_WPDMA_RESET_IDX 0x50c
2961+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
2962+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
2963+
2964+#define MTK_WED_WPDMA_INT_CTRL 0x520
2965+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
2966+
2967+#define MTK_WED_WPDMA_INT_MASK 0x524
2968+
2969+#define MTK_WED_PCIE_CFG_BASE 0x560
2970+
2971+#define MTK_WED_PCIE_INT_TRIGGER 0x570
2972+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
2973+
2974+#define MTK_WED_WPDMA_CFG_BASE 0x580
2975+
2976+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2977+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2978+
2979+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2980+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
2981+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2982+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2983+
2984+#define MTK_WED_WDMA_GLO_CFG 0xa04
2985+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2986+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2987+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2988+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
2989+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2990+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
2991+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
2992+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
2993+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
2994+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
2995+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
2996+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
2997+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
2998+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
2999+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
3000+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
3001+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
3002+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
3003+
3004+#define MTK_WED_WDMA_RESET_IDX 0xa08
3005+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
3006+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
3007+
3008+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
3009+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
3010+
3011+#define MTK_WED_WDMA_INT_CTRL 0xa2c
3012+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
3013+
3014+#define MTK_WED_WDMA_OFFSET0 0xaa4
3015+#define MTK_WED_WDMA_OFFSET1 0xaa8
3016+
3017+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
3018+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
3019+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
3020+
3021+#define MTK_WED_RING_OFS_BASE 0x00
3022+#define MTK_WED_RING_OFS_COUNT 0x04
3023+#define MTK_WED_RING_OFS_CPU_IDX 0x08
3024+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
3025+
3026+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
3027+
3028+#define MTK_WDMA_GLO_CFG 0x204
3029+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
3030+
3031+#define MTK_WDMA_RESET_IDX 0x208
3032+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
3033+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
3034+
3035+#define MTK_WDMA_INT_MASK 0x228
3036+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
3037+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
3038+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
3039+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
3040+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
3041+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
3042+
3043+#define MTK_WDMA_INT_GRP1 0x250
3044+#define MTK_WDMA_INT_GRP2 0x254
3045+
3046+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
3047+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
3048+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
3049+
3050+/* DMA channel mapping */
3051+#define HIFSYS_DMA_AG_MAP 0x008
3052+
3053+#endif
3054diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3055index 9f64504ac..35998b1a7 100644
3056--- a/include/linux/netdevice.h
3057+++ b/include/linux/netdevice.h
3058@@ -835,6 +835,7 @@ enum net_device_path_type {
3059 DEV_PATH_BRIDGE,
3060 DEV_PATH_PPPOE,
3061 DEV_PATH_DSA,
3062+ DEV_PATH_MTK_WDMA,
3063 };
3064
3065 struct net_device_path {
3066@@ -860,6 +861,12 @@ struct net_device_path {
3067 int port;
3068 u16 proto;
3069 } dsa;
3070+ struct {
3071+ u8 wdma_idx;
3072+ u8 queue;
3073+ u16 wcid;
3074+ u8 bss;
3075+ } mtk_wdma;
3076 };
3077 };
3078
3079diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
3080new file mode 100644
3081index 000000000..7e00cca06
3082--- /dev/null
3083+++ b/include/linux/soc/mediatek/mtk_wed.h
3084@@ -0,0 +1,131 @@
3085+#ifndef __MTK_WED_H
3086+#define __MTK_WED_H
3087+
3088+#include <linux/kernel.h>
3089+#include <linux/rcupdate.h>
3090+#include <linux/regmap.h>
3091+#include <linux/pci.h>
3092+
3093+#define MTK_WED_TX_QUEUES 2
3094+
3095+struct mtk_wed_hw;
3096+struct mtk_wdma_desc;
3097+
3098+struct mtk_wed_ring {
3099+ struct mtk_wdma_desc *desc;
3100+ dma_addr_t desc_phys;
3101+ int size;
3102+
3103+ u32 reg_base;
3104+ void __iomem *wpdma;
3105+};
3106+
3107+struct mtk_wed_device {
3108+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3109+ const struct mtk_wed_ops *ops;
3110+ struct device *dev;
3111+ struct mtk_wed_hw *hw;
3112+ bool init_done, running;
3113+ int wdma_idx;
3114+ int irq;
3115+
3116+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3117+ struct mtk_wed_ring txfree_ring;
3118+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3119+
3120+ struct {
3121+ int size;
3122+ void **pages;
3123+ struct mtk_wdma_desc *desc;
3124+ dma_addr_t desc_phys;
3125+ } buf_ring;
3126+
3127+ /* filled by driver: */
3128+ struct {
3129+ struct pci_dev *pci_dev;
3130+
3131+ u32 wpdma_phys;
3132+
3133+ u16 token_start;
3134+ unsigned int nbuf;
3135+
3136+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3137+ int (*offload_enable)(struct mtk_wed_device *wed);
3138+ void (*offload_disable)(struct mtk_wed_device *wed);
3139+ } wlan;
3140+#endif
3141+};
3142+
3143+struct mtk_wed_ops {
3144+ int (*attach)(struct mtk_wed_device *dev);
3145+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
3146+ void __iomem *regs);
3147+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3148+ void __iomem *regs);
3149+ void (*detach)(struct mtk_wed_device *dev);
3150+
3151+ void (*stop)(struct mtk_wed_device *dev);
3152+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
3153+ void (*reset_dma)(struct mtk_wed_device *dev);
3154+
3155+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
3156+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
3157+
3158+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3159+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3160+};
3161+
3162+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3163+
3164+static inline int
3165+mtk_wed_device_attach(struct mtk_wed_device *dev)
3166+{
3167+ int ret = -ENODEV;
3168+
3169+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3170+ rcu_read_lock();
3171+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
3172+ if (dev->ops)
3173+ ret = dev->ops->attach(dev);
3174+ else
3175+ rcu_read_unlock();
3176+
3177+ if (ret)
3178+ dev->ops = NULL;
3179+#endif
3180+
3181+ return ret;
3182+}
3183+
3184+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3185+#define mtk_wed_device_active(_dev) !!(_dev)->ops
3186+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
3187+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
3188+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
3189+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3190+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3191+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
3192+#define mtk_wed_device_reg_read(_dev, _reg) \
3193+ (_dev)->ops->reg_read(_dev, _reg)
3194+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
3195+ (_dev)->ops->reg_write(_dev, _reg, _val)
3196+#define mtk_wed_device_irq_get(_dev, _mask) \
3197+ (_dev)->ops->irq_get(_dev, _mask)
3198+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
3199+ (_dev)->ops->irq_set_mask(_dev, _mask)
3200+#else
3201+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3202+{
3203+ return false;
3204+}
3205+#define mtk_wed_device_detach(_dev) do {} while (0)
3206+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
3207+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3208+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3209+#define mtk_wed_device_reg_read(_dev, _reg) 0
3210+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3211+#define mtk_wed_device_irq_get(_dev, _mask) 0
3212+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3213+#endif
3214+
3215+#endif
3216diff --git a/net/core/dev.c b/net/core/dev.c
3217index 4f0edb218..031ac7c6f 100644
3218--- a/net/core/dev.c
3219+++ b/net/core/dev.c
3220@@ -675,6 +675,10 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3221 if (WARN_ON_ONCE(last_dev == ctx.dev))
3222 return -1;
3223 }
3224+
3225+ if (!ctx.dev)
3226+ return ret;
3227+
3228 path = dev_fwd_path(stack);
3229 if (!path)
3230 return -1;
3231--
32322.18.0
3233