blob: b09682f2d0e0921a4830ffb201a8430ac6070cde [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From 342fdc50b761309e75974554cdcf790a2d09e134 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 2 Jun 2022 15:32:07 +0800
4Subject: [PATCH 4/8] 9993-add-wed
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7622.dtsi | 32 +-
9 drivers/net/ethernet/mediatek/Kconfig | 4 +
10 drivers/net/ethernet/mediatek/Makefile | 5 +
11 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 136 ++-
12 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 14 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.c | 373 +++++++-
14 drivers/net/ethernet/mediatek/mtk_ppe.h | 89 +-
15 .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 4 +-
16 .../net/ethernet/mediatek/mtk_ppe_offload.c | 167 +++-
17 drivers/net/ethernet/mediatek/mtk_wed.c | 876 ++++++++++++++++++
18 drivers/net/ethernet/mediatek/mtk_wed.h | 135 +++
19 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 175 ++++
20 drivers/net/ethernet/mediatek/mtk_wed_ops.c | 8 +
21 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 251 +++++
22 include/linux/netdevice.h | 7 +
23 include/linux/soc/mediatek/mtk_wed.h | 131 +++
24 net/core/dev.c | 4 +
25 17 files changed, 2283 insertions(+), 128 deletions(-)
26 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Kconfig
27 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Makefile
28 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c
29 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h
30 mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_ppe.c
31 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c
32 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h
33 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
34 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c
35 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h
36 create mode 100644 include/linux/soc/mediatek/mtk_wed.h
37
38diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
39index 369e01389..d0fbc367e 100644
40--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
41+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
42@@ -338,7 +338,7 @@
43 };
44
45 cci_control2: slave-if@5000 {
46- compatible = "arm,cci-400-ctrl-if";
47+ compatible = "arm,cci-400-ctrl-if", "syscon";
48 interface-type = "ace";
49 reg = <0x5000 0x1000>;
50 };
51@@ -920,6 +920,11 @@
52 };
53 };
54
55+ hifsys: syscon@1af00000 {
56+ compatible = "mediatek,mt7622-hifsys", "syscon";
57+ reg = <0 0x1af00000 0 0x70>;
58+ };
59+
60 ethsys: syscon@1b000000 {
61 compatible = "mediatek,mt7622-ethsys",
62 "syscon";
63@@ -938,6 +943,26 @@
64 #dma-cells = <1>;
65 };
66
67+ pcie_mirror: pcie-mirror@10000400 {
68+ compatible = "mediatek,mt7622-pcie-mirror",
69+ "syscon";
70+ reg = <0 0x10000400 0 0x10>;
71+ };
72+
73+ wed0: wed@1020a000 {
74+ compatible = "mediatek,mt7622-wed",
75+ "syscon";
76+ reg = <0 0x1020a000 0 0x1000>;
77+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
78+ };
79+
80+ wed1: wed@1020b000 {
81+ compatible = "mediatek,mt7622-wed",
82+ "syscon";
83+ reg = <0 0x1020b000 0 0x1000>;
84+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
85+ };
86+
87 eth: ethernet@1b100000 {
88 compatible = "mediatek,mt7622-eth",
89 "mediatek,mt2701-eth",
90@@ -964,6 +989,11 @@
91 power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
92 mediatek,ethsys = <&ethsys>;
93 mediatek,sgmiisys = <&sgmiisys>;
94+ mediatek,cci-control = <&cci_control2>;
95+ mediatek,wed = <&wed0>, <&wed1>;
96+ mediatek,pcie-mirror = <&pcie_mirror>;
97+ mediatek,hifsys = <&hifsys>;
98+ dma-coherent;
99 #address-cells = <1>;
100 #size-cells = <0>;
101 status = "disabled";
102diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
103old mode 100755
104new mode 100644
105index 42e6b38d2..8ab6615a3
106--- a/drivers/net/ethernet/mediatek/Kconfig
107+++ b/drivers/net/ethernet/mediatek/Kconfig
108@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK
109
110 if NET_VENDOR_MEDIATEK
111
112+config NET_MEDIATEK_SOC_WED
113+ depends on ARCH_MEDIATEK || COMPILE_TEST
114+ def_bool NET_MEDIATEK_SOC != n
115+
116 config NET_MEDIATEK_SOC
117 tristate "MediaTek SoC Gigabit Ethernet support"
118 select PHYLINK
119diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
120old mode 100755
121new mode 100644
122index 0a6af99f1..3528f1b3c
123--- a/drivers/net/ethernet/mediatek/Makefile
124+++ b/drivers/net/ethernet/mediatek/Makefile
125@@ -6,4 +6,9 @@
126 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
127 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
128 mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
129+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
130+ifdef CONFIG_DEBUG_FS
131+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
132+endif
133+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
134 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
135diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
136old mode 100755
137new mode 100644
138index 819d8a0be..2121335a1
139--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
140+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
141@@ -9,6 +9,7 @@
142 #include <linux/of_device.h>
143 #include <linux/of_mdio.h>
144 #include <linux/of_net.h>
145+#include <linux/of_address.h>
146 #include <linux/mfd/syscon.h>
147 #include <linux/regmap.h>
148 #include <linux/clk.h>
developer926f9162022-07-05 10:55:37 +0800149@@ -19,13 +20,15 @@
developer8cb3ac72022-07-04 10:55:14 +0800150 #include <linux/interrupt.h>
151 #include <linux/pinctrl/devinfo.h>
152 #include <linux/phylink.h>
developer926f9162022-07-05 10:55:37 +0800153 #include <linux/gpio/consumer.h>
developer8cb3ac72022-07-04 10:55:14 +0800154+#include <linux/bitfield.h>
155 #include <net/dsa.h>
156
157 #include "mtk_eth_soc.h"
158 #include "mtk_eth_dbg.h"
159 #include "mtk_eth_reset.h"
160 #include "mtk_hnat/hnat.h"
161+#include "mtk_wed.h"
162
163 #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
164 #include "mtk_hnat/nf_hnat_mtk.h"
165@@ -850,7 +853,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
166 int i;
167
168 if (!eth->soc->has_sram) {
169- eth->scratch_ring = dma_alloc_coherent(eth->dev,
170+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800171 cnt * soc->txrx.txd_size,
developer8cb3ac72022-07-04 10:55:14 +0800172 &eth->phy_scratch_ring,
173 GFP_ATOMIC);
174@@ -866,10 +869,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
175 if (unlikely(!eth->scratch_head))
176 return -ENOMEM;
177
178- dma_addr = dma_map_single(eth->dev,
179+ dma_addr = dma_map_single(eth->dma_dev,
180 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
181 DMA_FROM_DEVICE);
182- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
183+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
184 return -ENOMEM;
185
186 phy_ring_tail = eth->phy_scratch_ring +
187@@ -933,26 +936,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
188 {
189 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
190 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
191- dma_unmap_single(eth->dev,
192+ dma_unmap_single(eth->dma_dev,
193 dma_unmap_addr(tx_buf, dma_addr0),
194 dma_unmap_len(tx_buf, dma_len0),
195 DMA_TO_DEVICE);
196 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
197- dma_unmap_page(eth->dev,
198+ dma_unmap_page(eth->dma_dev,
199 dma_unmap_addr(tx_buf, dma_addr0),
200 dma_unmap_len(tx_buf, dma_len0),
201 DMA_TO_DEVICE);
202 }
203 } else {
204 if (dma_unmap_len(tx_buf, dma_len0)) {
205- dma_unmap_page(eth->dev,
206+ dma_unmap_page(eth->dma_dev,
207 dma_unmap_addr(tx_buf, dma_addr0),
208 dma_unmap_len(tx_buf, dma_len0),
209 DMA_TO_DEVICE);
210 }
211
212 if (dma_unmap_len(tx_buf, dma_len1)) {
213- dma_unmap_page(eth->dev,
214+ dma_unmap_page(eth->dma_dev,
215 dma_unmap_addr(tx_buf, dma_addr1),
216 dma_unmap_len(tx_buf, dma_len1),
217 DMA_TO_DEVICE);
218@@ -1017,9 +1020,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
219 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
220 memset(itx_buf, 0, sizeof(*itx_buf));
221
developer0c6c5252022-07-12 11:59:21 +0800222- txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
223+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
224 DMA_TO_DEVICE);
225- if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
226+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
developer8cb3ac72022-07-04 10:55:14 +0800227 return -ENOMEM;
228
229 WRITE_ONCE(itxd->txd1, mapped_addr);
230@@ -1114,10 +1117,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
developer0c6c5252022-07-12 11:59:21 +0800231 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
232 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
233 !(frag_size - txd_info.size);
234- txd_info.addr = skb_frag_dma_map(eth->dev, frag,
235+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
236 offset, txd_info.size,
237 DMA_TO_DEVICE);
238- if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
239+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
240 goto err_dma;
developer8cb3ac72022-07-04 10:55:14 +0800241
developer0c6c5252022-07-12 11:59:21 +0800242 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developer8cb3ac72022-07-04 10:55:14 +0800243@@ -1384,6 +1387,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
244 struct net_device *netdev;
245 unsigned int pktlen;
246 dma_addr_t dma_addr;
247+ u32 hash, reason;
developer0c6c5252022-07-12 11:59:21 +0800248 int mac = 0;
developer8cb3ac72022-07-04 10:55:14 +0800249
250 if (eth->hwlro)
developer68838542022-10-03 23:42:21 +0800251@@ -1427,22 +1431,22 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
developer8cb3ac72022-07-04 10:55:14 +0800252 netdev->stats.rx_dropped++;
253 goto release_desc;
254 }
255- dma_addr = dma_map_single(eth->dev,
256+ dma_addr = dma_map_single(eth->dma_dev,
257 new_data + NET_SKB_PAD +
258 eth->ip_align,
259 ring->buf_size,
260 DMA_FROM_DEVICE);
261- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
262+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
263 skb_free_frag(new_data);
264 netdev->stats.rx_dropped++;
265 goto release_desc;
266 }
267
developer68838542022-10-03 23:42:21 +0800268 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
269 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
270
271- dma_unmap_single(eth->dev,
272+ dma_unmap_single(eth->dma_dev,
273 (u64)(trxd.rxd1 | addr64),
developer8cb3ac72022-07-04 10:55:14 +0800274 ring->buf_size, DMA_FROM_DEVICE);
275
276 /* receive data */
277@@ -1463,6 +1467,17 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
278 skb_checksum_none_assert(skb);
279 skb->protocol = eth_type_trans(skb, netdev);
280
281+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
282+ if (hash != MTK_RXD4_FOE_ENTRY) {
283+ hash = jhash_1word(hash, 0);
284+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
285+ }
286+
287+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
288+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
289+ mtk_ppe_check_skb(eth->ppe, skb,
290+ trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
291+
292 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
293 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
294 if (trxd.rxd3 & RX_DMA_VTAG_V2)
295@@ -1748,7 +1763,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
296 goto no_tx_mem;
297
298 if (!eth->soc->has_sram)
299- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
300+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developer0c6c5252022-07-12 11:59:21 +0800301 &ring->phys, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800302 else {
303 ring->dma = eth->scratch_ring + MTK_DMA_SIZE;
developer0c6c5252022-07-12 11:59:21 +0800304@@ -1780,6 +1795,6 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
developer8cb3ac72022-07-04 10:55:14 +0800305 * descriptors in ring->dma_pdma.
306 */
307 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
308- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
309+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developer0c6c5252022-07-12 11:59:21 +0800310 &ring->phys_pdma, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800311 if (!ring->dma_pdma)
developer0c6c5252022-07-12 11:59:21 +0800312@@ -1839,6 +1854,6 @@ static void mtk_tx_clean(struct mtk_eth *eth)
developer8cb3ac72022-07-04 10:55:14 +0800313 }
314
315 if (!eth->soc->has_sram && ring->dma) {
316- dma_free_coherent(eth->dev,
317+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800318 MTK_DMA_SIZE * soc->txrx.txd_size,
319 ring->dma, ring->phys);
320@@ -1847,6 +1862,6 @@ static void mtk_tx_clean(struct mtk_eth *eth)
developer8cb3ac72022-07-04 10:55:14 +0800321 }
322
323 if (ring->dma_pdma) {
324- dma_free_coherent(eth->dev,
325+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800326 MTK_DMA_SIZE * soc->txrx.txd_size,
327 ring->dma_pdma, ring->phys_pdma);
developer8cb3ac72022-07-04 10:55:14 +0800328@@ -1892,7 +1907,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
329
330 if ((!eth->soc->has_sram) || (eth->soc->has_sram
331 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
332- ring->dma = dma_alloc_coherent(eth->dev,
333+ ring->dma = dma_alloc_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800334 rx_dma_size * eth->soc->txrx.rxd_size,
335 &ring->phys, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800336 else {
developer0c6c5252022-07-12 11:59:21 +0800337@@ -1907,13 +1922,13 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
developer8cb3ac72022-07-04 10:55:14 +0800338 return -ENOMEM;
339
340 for (i = 0; i < rx_dma_size; i++) {
developer0c6c5252022-07-12 11:59:21 +0800341 struct mtk_rx_dma_v2 *rxd;
342
developer8cb3ac72022-07-04 10:55:14 +0800343- dma_addr_t dma_addr = dma_map_single(eth->dev,
344+ dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
345 ring->data[i] + NET_SKB_PAD + eth->ip_align,
346 ring->buf_size,
347 DMA_FROM_DEVICE);
348- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
349+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
350 return -ENOMEM;
developer0c6c5252022-07-12 11:59:21 +0800351
352 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
developer68838542022-10-03 23:42:21 +0800353@@ -1968,7 +1983,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_s
354 MTK_8GB_ADDRESSING)) ?
355 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
356
developer8cb3ac72022-07-04 10:55:14 +0800357- dma_unmap_single(eth->dev,
358+ dma_unmap_single(eth->dma_dev,
developer68838542022-10-03 23:42:21 +0800359 (u64)(rxd->rxd1 | addr64),
developer8cb3ac72022-07-04 10:55:14 +0800360 ring->buf_size,
361 DMA_FROM_DEVICE);
362@@ -1982,7 +1997,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_s
363 return;
364
365 if (ring->dma) {
366- dma_free_coherent(eth->dev,
367+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800368 ring->dma_size * eth->soc->txrx.rxd_size,
developer8cb3ac72022-07-04 10:55:14 +0800369 ring->dma,
370 ring->phys);
developer0c6c5252022-07-12 11:59:21 +0800371@@ -2462,6 +2477,6 @@ static void mtk_dma_free(struct mtk_eth *eth)
developer8cb3ac72022-07-04 10:55:14 +0800372 if (eth->netdev[i])
373 netdev_reset_queue(eth->netdev[i]);
374 if ( !eth->soc->has_sram && eth->scratch_ring) {
375- dma_free_coherent(eth->dev,
376+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800377 MTK_DMA_SIZE * soc->txrx.txd_size,
378 eth->scratch_ring, eth->phy_scratch_ring);
developer8cb3ac72022-07-04 10:55:14 +0800379@@ -2661,7 +2676,7 @@ static int mtk_open(struct net_device *dev)
380 if (err)
381 return err;
382
383- if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
384+ if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
385 gdm_config = MTK_GDMA_TO_PPE;
386
387 mtk_gdm_config(eth, gdm_config);
388@@ -2778,7 +2793,7 @@ static int mtk_stop(struct net_device *dev)
389 mtk_dma_free(eth);
390
391 if (eth->soc->offload_version)
392- mtk_ppe_stop(&eth->ppe);
393+ mtk_ppe_stop(eth->ppe);
394
395 return 0;
396 }
397@@ -2855,6 +2870,8 @@ static int mtk_napi_init(struct mtk_eth *eth)
398
399 static int mtk_hw_init(struct mtk_eth *eth, u32 type)
400 {
401+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
402+ ETHSYS_DMA_AG_MAP_PPE;
403 int i, ret = 0;
404
405 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
406@@ -2872,6 +2889,10 @@ static int mtk_hw_init(struct mtk_eth *eth, u32 type)
407 goto err_disable_pm;
408 }
409
410+ if (eth->ethsys)
411+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
412+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
413+
414 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
415 ret = device_reset(eth->dev);
416 if (ret) {
417@@ -3501,6 +3522,35 @@ free_netdev:
418 return err;
419 }
420
421+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
422+{
423+ struct net_device *dev, *tmp;
424+ LIST_HEAD(dev_list);
425+ int i;
426+
427+ rtnl_lock();
428+
429+ for (i = 0; i < MTK_MAC_COUNT; i++) {
430+ dev = eth->netdev[i];
431+
432+ if (!dev || !(dev->flags & IFF_UP))
433+ continue;
434+
435+ list_add_tail(&dev->close_list, &dev_list);
436+ }
437+
438+ dev_close_many(&dev_list, false);
439+
440+ eth->dma_dev = dma_dev;
441+
442+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
443+ list_del_init(&dev->close_list);
444+ dev_open(dev, NULL);
445+ }
446+
447+ rtnl_unlock();
448+}
449+
450 static int mtk_probe(struct platform_device *pdev)
451 {
452 struct device_node *mac_np;
453@@ -3514,6 +3564,7 @@ static int mtk_probe(struct platform_device *pdev)
454 eth->soc = of_device_get_match_data(&pdev->dev);
455
456 eth->dev = &pdev->dev;
457+ eth->dma_dev = &pdev->dev;
458 eth->base = devm_platform_ioremap_resource(pdev, 0);
459 if (IS_ERR(eth->base))
460 return PTR_ERR(eth->base);
461@@ -3567,6 +3618,16 @@ static int mtk_probe(struct platform_device *pdev)
462 }
463 }
464
465+ if (of_dma_is_coherent(pdev->dev.of_node)) {
466+ struct regmap *cci;
467+
468+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
469+ "mediatek,cci-control");
470+ /* enable CPU/bus coherency */
471+ if (!IS_ERR(cci))
472+ regmap_write(cci, 0, 3);
473+ }
474+
475 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
476 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
477 GFP_KERNEL);
478@@ -3589,6 +3650,22 @@ static int mtk_probe(struct platform_device *pdev)
479 }
480 }
481
482+ for (i = 0;; i++) {
483+ struct device_node *np = of_parse_phandle(pdev->dev.of_node,
484+ "mediatek,wed", i);
485+ static const u32 wdma_regs[] = {
486+ MTK_WDMA0_BASE,
487+ MTK_WDMA1_BASE
488+ };
489+ void __iomem *wdma;
490+
491+ if (!np || i >= ARRAY_SIZE(wdma_regs))
492+ break;
493+
494+ wdma = eth->base + wdma_regs[i];
495+ mtk_wed_add_hw(np, eth, wdma, i);
496+ }
497+
498 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
499 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
500 eth->irq[i] = eth->irq[0];
501@@ -3692,10 +3769,11 @@ static int mtk_probe(struct platform_device *pdev)
502 }
503
504 if (eth->soc->offload_version) {
505- err = mtk_ppe_init(&eth->ppe, eth->dev,
506- eth->base + MTK_ETH_PPE_BASE, 2);
507- if (err)
508+ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
509+ if (!eth->ppe) {
510+ err = -ENOMEM;
511 goto err_free_dev;
512+ }
513
514 err = mtk_eth_offload_init(eth);
515 if (err)
516diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
517old mode 100755
518new mode 100644
519index 349f98503..b52378bd6
520--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
521+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
522@@ -517,6 +517,9 @@
523 #define RX_DMA_SPORT_MASK 0x7
524 #endif
525
526+#define MTK_WDMA0_BASE 0x2800
527+#define MTK_WDMA1_BASE 0x2c00
528+
529 /* QDMA descriptor txd4 */
530 #define TX_DMA_CHKSUM (0x7 << 29)
531 #define TX_DMA_TSO BIT(28)
532@@ -704,6 +707,12 @@
533 #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
534
535
536+/* ethernet dma channel agent map */
537+#define ETHSYS_DMA_AG_MAP 0x408
538+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
539+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
540+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
541+
542 /* SGMII subsystem config registers */
543 /* Register to auto-negotiation restart */
544 #define SGMSYS_PCS_CONTROL_1 0x0
545@@ -1209,6 +1218,7 @@ struct mtk_reset_event {
546 /* struct mtk_eth - This is the main datasructure for holding the state
547 * of the driver
548 * @dev: The device pointer
549+ * @dev: The device pointer used for dma mapping/alloc
550 * @base: The mapped register i/o base
551 * @page_lock: Make sure that register operations are atomic
552 * @tx_irq__lock: Make sure that IRQ register operations are atomic
553@@ -1243,6 +1253,7 @@ struct mtk_reset_event {
554
555 struct mtk_eth {
556 struct device *dev;
557+ struct device *dma_dev;
558 void __iomem *base;
559 spinlock_t page_lock;
560 spinlock_t tx_irq_lock;
561@@ -1283,7 +1294,7 @@ struct mtk_eth {
562 spinlock_t syscfg0_lock;
563 struct timer_list mtk_dma_monitor_timer;
564
565- struct mtk_ppe ppe;
566+ struct mtk_ppe *ppe;
567 struct rhashtable flow_table;
568 };
569
570@@ -1336,5 +1347,6 @@ void ethsys_reset(struct mtk_eth *eth, u32 reset_bits);
571 int mtk_eth_offload_init(struct mtk_eth *eth);
572 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
573 void *type_data);
574+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
575
576 #endif /* MTK_ETH_H */
577diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
578old mode 100644
579new mode 100755
580index 66298e223..3d75c22be
581--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
582+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
583@@ -6,9 +6,22 @@
584 #include <linux/iopoll.h>
585 #include <linux/etherdevice.h>
586 #include <linux/platform_device.h>
587+#include <linux/if_ether.h>
588+#include <linux/if_vlan.h>
589+#include <net/dsa.h>
590+#include "mtk_eth_soc.h"
591 #include "mtk_ppe.h"
592 #include "mtk_ppe_regs.h"
593
594+static DEFINE_SPINLOCK(ppe_lock);
595+
596+static const struct rhashtable_params mtk_flow_l2_ht_params = {
597+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
598+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
599+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
600+ .automatic_shrinking = true,
601+};
602+
603 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
604 {
605 writel(val, ppe->base + reg);
606@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
607 return ppe_m32(ppe, reg, val, 0);
608 }
609
610+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
611+{
612+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
613+}
614+
615 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
616 {
617 int ret;
618@@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
619 u32 hash;
620
621 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
622- case MTK_PPE_PKT_TYPE_BRIDGE:
623- hv1 = e->bridge.src_mac_lo;
624- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
625- hv2 = e->bridge.src_mac_hi >> 16;
626- hv2 ^= e->bridge.dest_mac_lo;
627- hv3 = e->bridge.dest_mac_hi;
628- break;
629 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
630 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
631 hv1 = e->ipv4.orig.ports;
632@@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
633 {
634 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
635
636+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
637+ return &entry->bridge.l2;
638+
639 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
640 return &entry->ipv6.l2;
641
642@@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
643 {
644 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
645
646+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
647+ return &entry->bridge.ib2;
648+
649 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
650 return &entry->ipv6.ib2;
651
652@@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
653 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
654 entry->ipv6.ports = ports_pad;
655
656- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
657+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
658+ ether_addr_copy(entry->bridge.src_mac, src_mac);
659+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
660+ entry->bridge.ib2 = val;
661+ l2 = &entry->bridge.l2;
662+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
663 entry->ipv6.ib2 = val;
664 l2 = &entry->ipv6.l2;
665 } else {
666@@ -329,32 +351,167 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
667 return 0;
668 }
669
670+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
671+ int bss, int wcid)
672+{
673+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
674+ u32 *ib2 = mtk_foe_entry_ib2(entry);
675+
676+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
677+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
678+ if (wdma_idx)
679+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
680+
681+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
682+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
683+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
684+
685+ return 0;
686+}
687+
688 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
689 {
690 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
691 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
692 }
693
694-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
695- u16 timestamp)
696+static bool
697+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
698+{
699+ int type, len;
700+
701+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
702+ return false;
703+
704+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
705+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
706+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
707+ else
708+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
709+
710+ return !memcmp(&entry->data.data, &data->data, len - 4);
711+}
712+
713+static void
714+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
715+{
716+ struct hlist_head *head;
717+ struct hlist_node *tmp;
718+
719+ if (entry->type == MTK_FLOW_TYPE_L2) {
720+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
721+ mtk_flow_l2_ht_params);
722+
723+ head = &entry->l2_flows;
724+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
725+ __mtk_foe_entry_clear(ppe, entry);
726+ return;
727+ }
728+
729+ hlist_del_init(&entry->list);
730+ if (entry->hash != 0xffff) {
731+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
732+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
733+ MTK_FOE_STATE_INVALID);
734+ dma_wmb();
735+ }
736+ entry->hash = 0xffff;
737+
738+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
739+ return;
740+
741+ hlist_del_init(&entry->l2_data.list);
742+ kfree(entry);
743+}
744+
745+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
746+{
747+ u16 timestamp;
748+ u16 now;
749+
750+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
751+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
752+
753+ if (timestamp > now)
754+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
755+ else
756+ return now - timestamp;
757+}
758+
759+static void
760+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
761 {
762+ struct mtk_flow_entry *cur;
763 struct mtk_foe_entry *hwe;
764- u32 hash;
765+ struct hlist_node *tmp;
766+ int idle;
767+
768+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
769+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
770+ int cur_idle;
771+ u32 ib1;
772+
773+ hwe = &ppe->foe_table[cur->hash];
774+ ib1 = READ_ONCE(hwe->ib1);
775+
776+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
777+ cur->hash = 0xffff;
778+ __mtk_foe_entry_clear(ppe, cur);
779+ continue;
780+ }
781+
782+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
783+ if (cur_idle >= idle)
784+ continue;
785+
786+ idle = cur_idle;
787+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
788+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
789+ }
790+}
791+
792+static void
793+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
794+{
795+ struct mtk_foe_entry *hwe;
796+ struct mtk_foe_entry foe;
797+
798+ spin_lock_bh(&ppe_lock);
799+
800+ if (entry->type == MTK_FLOW_TYPE_L2) {
801+ mtk_flow_entry_update_l2(ppe, entry);
802+ goto out;
803+ }
804+
805+ if (entry->hash == 0xffff)
806+ goto out;
807+
808+ hwe = &ppe->foe_table[entry->hash];
809+ memcpy(&foe, hwe, sizeof(foe));
810+ if (!mtk_flow_entry_match(entry, &foe)) {
811+ entry->hash = 0xffff;
812+ goto out;
813+ }
814+
815+ entry->data.ib1 = foe.ib1;
816+
817+out:
818+ spin_unlock_bh(&ppe_lock);
819+}
820+
821+static void
822+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
823+ u16 hash)
824+{
825+ struct mtk_foe_entry *hwe;
826+ u16 timestamp;
827
828+ timestamp = mtk_eth_timestamp(ppe->eth);
829 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
830 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
831 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
832
833- hash = mtk_ppe_hash_entry(entry);
834 hwe = &ppe->foe_table[hash];
835- if (!mtk_foe_entry_usable(hwe)) {
836- hwe++;
837- hash++;
838-
839- if (!mtk_foe_entry_usable(hwe))
840- return -ENOSPC;
841- }
842-
843 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
844 wmb();
845 hwe->ib1 = entry->ib1;
developerb5c6eed2022-08-11 22:58:44 +0800846@@ -362,32 +519,198 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
developer8cb3ac72022-07-04 10:55:14 +0800847 dma_wmb();
848
849 mtk_ppe_cache_clear(ppe);
850+}
851
852- return hash;
853+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
854+{
855+ spin_lock_bh(&ppe_lock);
856+ __mtk_foe_entry_clear(ppe, entry);
857+ spin_unlock_bh(&ppe_lock);
858+}
859+
860+static int
861+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
862+{
863+ entry->type = MTK_FLOW_TYPE_L2;
864+
865+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
866+ mtk_flow_l2_ht_params);
867+}
868+
869+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
870+{
871+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
872+ u32 hash;
873+
874+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
875+ return mtk_foe_entry_commit_l2(ppe, entry);
876+
877+ hash = mtk_ppe_hash_entry(&entry->data);
878+ entry->hash = 0xffff;
879+ spin_lock_bh(&ppe_lock);
880+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 4]);
881+ spin_unlock_bh(&ppe_lock);
882+
883+ return 0;
884+}
885+
886+static void
887+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
888+ u16 hash)
889+{
890+ struct mtk_flow_entry *flow_info;
891+ struct mtk_foe_entry foe, *hwe;
892+ struct mtk_foe_mac_info *l2;
893+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
894+ int type;
895+
896+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
897+ GFP_ATOMIC);
898+ if (!flow_info)
899+ return;
900+
901+ flow_info->l2_data.base_flow = entry;
902+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
903+ flow_info->hash = hash;
904+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 4]);
905+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
906+
907+ hwe = &ppe->foe_table[hash];
908+ memcpy(&foe, hwe, sizeof(foe));
909+ foe.ib1 &= ib1_mask;
910+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
911+
912+ l2 = mtk_foe_entry_l2(&foe);
913+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
914+
915+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
916+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
917+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
918+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
919+ l2->etype = ETH_P_IPV6;
920+
921+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
922+
923+ __mtk_foe_entry_commit(ppe, &foe, hash);
924 }
925
926-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
927+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
928+{
929+ struct hlist_head *head = &ppe->foe_flow[hash / 4];
930+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
931+ struct mtk_flow_entry *entry;
932+ struct mtk_foe_bridge key = {};
developerb5c6eed2022-08-11 22:58:44 +0800933+ struct hlist_node *n;
developer8cb3ac72022-07-04 10:55:14 +0800934+ struct ethhdr *eh;
935+ bool found = false;
936+ u8 *tag;
937+
938+ spin_lock_bh(&ppe_lock);
939+
940+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
941+ goto out;
942+
developerb5c6eed2022-08-11 22:58:44 +0800943+ hlist_for_each_entry_safe(entry, n, head, list) {
developer8cb3ac72022-07-04 10:55:14 +0800944+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
945+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
946+ MTK_FOE_STATE_BIND))
947+ continue;
948+
949+ entry->hash = 0xffff;
950+ __mtk_foe_entry_clear(ppe, entry);
951+ continue;
952+ }
953+
954+ if (found || !mtk_flow_entry_match(entry, hwe)) {
955+ if (entry->hash != 0xffff)
956+ entry->hash = 0xffff;
957+ continue;
958+ }
959+
960+ entry->hash = hash;
961+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
962+ found = true;
963+ }
964+
965+ if (found)
966+ goto out;
967+
968+ if (!skb)
969+ goto out;
970+
971+ eh = eth_hdr(skb);
972+ ether_addr_copy(key.dest_mac, eh->h_dest);
973+ ether_addr_copy(key.src_mac, eh->h_source);
974+ tag = skb->data - 2;
975+ key.vlan = 0;
976+ switch (skb->protocol) {
977+#if IS_ENABLED(CONFIG_NET_DSA)
978+ case htons(ETH_P_XDSA):
979+ if (!netdev_uses_dsa(skb->dev) ||
980+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
981+ goto out;
982+
983+ tag += 4;
984+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
985+ break;
986+
987+ fallthrough;
988+#endif
989+ case htons(ETH_P_8021Q):
990+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
991+ break;
992+ default:
993+ break;
994+ }
995+
996+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
997+ if (!entry)
998+ goto out;
999+
1000+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
1001+
1002+out:
1003+ spin_unlock_bh(&ppe_lock);
1004+}
1005+
1006+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
1007+{
1008+ mtk_flow_entry_update(ppe, entry);
1009+
1010+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
1011+}
1012+
1013+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
1014 int version)
1015 {
1016+ struct device *dev = eth->dev;
1017 struct mtk_foe_entry *foe;
1018+ struct mtk_ppe *ppe;
1019+
1020+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
1021+ if (!ppe)
1022+ return NULL;
1023+
1024+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
1025
1026 /* need to allocate a separate device, since it PPE DMA access is
1027 * not coherent.
1028 */
1029 ppe->base = base;
1030+ ppe->eth = eth;
1031 ppe->dev = dev;
1032 ppe->version = version;
1033
1034 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
1035 &ppe->foe_phys, GFP_KERNEL);
1036 if (!foe)
1037- return -ENOMEM;
1038+ return NULL;
1039
1040 ppe->foe_table = foe;
1041
1042 mtk_ppe_debugfs_init(ppe);
1043
1044- return 0;
1045+ return ppe;
1046 }
1047
1048 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1049@@ -395,7 +717,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1050 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
1051 int i, k;
1052
1053- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
1054+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
1055
1056 if (!IS_ENABLED(CONFIG_SOC_MT7621))
1057 return;
1058@@ -443,7 +765,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
1059 MTK_PPE_FLOW_CFG_IP4_NAT |
1060 MTK_PPE_FLOW_CFG_IP4_NAPT |
1061 MTK_PPE_FLOW_CFG_IP4_DSLITE |
1062- MTK_PPE_FLOW_CFG_L2_BRIDGE |
1063 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1064 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1065
1066diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
1067index 242fb8f2a..1f5cf1c9a 100644
1068--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
1069+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
1070@@ -6,6 +6,7 @@
1071
1072 #include <linux/kernel.h>
1073 #include <linux/bitfield.h>
1074+#include <linux/rhashtable.h>
1075
1076 #define MTK_ETH_PPE_BASE 0xc00
1077
1078@@ -48,9 +49,9 @@ enum {
1079 #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
1080 #define MTK_FOE_IB2_MULTICAST BIT(8)
1081
1082-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
1083-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
1084-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
1085+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
1086+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
1087+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
1088
1089 #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
1090
1091@@ -58,9 +59,9 @@ enum {
1092
1093 #define MTK_FOE_IB2_DSCP GENMASK(31, 24)
1094
1095-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
1096-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
1097-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
1098+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
1099+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
1100+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
1101
1102 enum {
1103 MTK_FOE_STATE_INVALID,
1104@@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
1105 u16 src_mac_lo;
1106 };
1107
1108+/* software-only entry type */
1109 struct mtk_foe_bridge {
1110- u32 dest_mac_hi;
1111+ u8 dest_mac[ETH_ALEN];
1112+ u8 src_mac[ETH_ALEN];
1113+ u16 vlan;
1114
1115- u16 src_mac_lo;
1116- u16 dest_mac_lo;
1117-
1118- u32 src_mac_hi;
1119+ struct {} key_end;
1120
1121 u32 ib2;
1122
1123- u32 _rsv[5];
1124-
1125- u32 udf_tsid;
1126 struct mtk_foe_mac_info l2;
1127 };
1128
1129@@ -235,7 +233,37 @@ enum {
1130 MTK_PPE_CPU_REASON_INVALID = 0x1f,
1131 };
1132
1133+enum {
1134+ MTK_FLOW_TYPE_L4,
1135+ MTK_FLOW_TYPE_L2,
1136+ MTK_FLOW_TYPE_L2_SUBFLOW,
1137+};
1138+
1139+struct mtk_flow_entry {
1140+ union {
1141+ struct hlist_node list;
1142+ struct {
1143+ struct rhash_head l2_node;
1144+ struct hlist_head l2_flows;
1145+ };
1146+ };
1147+ u8 type;
1148+ s8 wed_index;
1149+ u16 hash;
1150+ union {
1151+ struct mtk_foe_entry data;
1152+ struct {
1153+ struct mtk_flow_entry *base_flow;
1154+ struct hlist_node list;
1155+ struct {} end;
1156+ } l2_data;
1157+ };
1158+ struct rhash_head node;
1159+ unsigned long cookie;
1160+};
1161+
1162 struct mtk_ppe {
1163+ struct mtk_eth *eth;
1164 struct device *dev;
1165 void __iomem *base;
1166 int version;
1167@@ -243,19 +271,35 @@ struct mtk_ppe {
1168 struct mtk_foe_entry *foe_table;
1169 dma_addr_t foe_phys;
1170
1171+ u16 foe_check_time[MTK_PPE_ENTRIES];
1172+ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
1173+
1174+ struct rhashtable l2_flows;
1175+
1176 void *acct_table;
1177 };
1178
1179-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
1180- int version);
1181+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
1182 int mtk_ppe_start(struct mtk_ppe *ppe);
1183 int mtk_ppe_stop(struct mtk_ppe *ppe);
1184
1185+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
1186+
1187 static inline void
1188-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
1189+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
1190 {
1191- ppe->foe_table[hash].ib1 = 0;
1192- dma_wmb();
1193+ u16 now, diff;
1194+
1195+ if (!ppe)
1196+ return;
1197+
1198+ now = (u16)jiffies;
1199+ diff = now - ppe->foe_check_time[hash];
1200+ if (diff < HZ / 10)
1201+ return;
1202+
1203+ ppe->foe_check_time[hash] = now;
1204+ __mtk_ppe_check_skb(ppe, skb, hash);
1205 }
1206
1207 static inline int
1208@@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
1209 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
1210 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
1211 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
1212-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
1213- u16 timestamp);
1214+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
1215+ int bss, int wcid);
1216+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1217+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1218+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1219 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
1220
1221 #endif
1222diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1223index d4b482340..a591ab1fd 100644
1224--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1225+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1226@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
1227 static const char * const type_str[] = {
1228 [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
1229 [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
1230- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
1231 [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
1232 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
1233 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
1234@@ -207,6 +206,9 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
1235 struct dentry *root;
1236
1237 root = debugfs_create_dir("mtk_ppe", NULL);
1238+ if (!root)
1239+ return -ENOMEM;
1240+
1241 debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
1242 debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
1243
1244diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1245index 4294f0c74..d4a012608 100644
1246--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1247+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1248@@ -11,6 +11,7 @@
1249 #include <net/pkt_cls.h>
1250 #include <net/dsa.h>
1251 #include "mtk_eth_soc.h"
1252+#include "mtk_wed.h"
1253
1254 struct mtk_flow_data {
1255 struct ethhdr eth;
1256@@ -30,6 +31,8 @@ struct mtk_flow_data {
1257 __be16 src_port;
1258 __be16 dst_port;
1259
1260+ u16 vlan_in;
1261+
1262 struct {
1263 u16 id;
1264 __be16 proto;
1265@@ -41,12 +44,6 @@ struct mtk_flow_data {
1266 } pppoe;
1267 };
1268
1269-struct mtk_flow_entry {
1270- struct rhash_head node;
1271- unsigned long cookie;
1272- u16 hash;
1273-};
1274-
1275 static const struct rhashtable_params mtk_flow_ht_params = {
1276 .head_offset = offsetof(struct mtk_flow_entry, node),
1277 .key_offset = offsetof(struct mtk_flow_entry, cookie),
1278@@ -54,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = {
1279 .automatic_shrinking = true,
1280 };
1281
1282-static u32
1283-mtk_eth_timestamp(struct mtk_eth *eth)
1284-{
1285- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
1286-}
1287-
1288 static int
1289 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
1290 bool egress)
1291@@ -94,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
1292 memcpy(dest, src, act->mangle.mask ? 2 : 4);
1293 }
1294
1295+static int
1296+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
1297+{
1298+ struct net_device_path_ctx ctx = {
1299+ .dev = dev,
1300+ };
1301+ struct net_device_path path = {};
1302+
1303+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
1304+ return -1;
1305+
1306+ if (!dev->netdev_ops->ndo_fill_forward_path)
1307+ return -1;
1308+
1309+ memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
1310+ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
1311+ return -1;
1312+
1313+ if (path.type != DEV_PATH_MTK_WDMA)
1314+ return -1;
1315+
1316+ info->wdma_idx = path.mtk_wdma.wdma_idx;
1317+ info->queue = path.mtk_wdma.queue;
1318+ info->bss = path.mtk_wdma.bss;
1319+ info->wcid = path.mtk_wdma.wcid;
1320+
1321+ return 0;
1322+}
1323+
1324
1325 static int
1326 mtk_flow_mangle_ports(const struct flow_action_entry *act,
1327@@ -163,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev)
1328
1329 static int
1330 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1331- struct net_device *dev)
1332+ struct net_device *dev, const u8 *dest_mac,
1333+ int *wed_index)
1334 {
1335+ struct mtk_wdma_info info = {};
1336 int pse_port, dsa_port;
1337
1338+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
1339+ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
1340+ info.wcid);
1341+ pse_port = 3;
1342+ *wed_index = info.wdma_idx;
1343+ goto out;
1344+ }
1345+
1346 dsa_port = mtk_flow_get_dsa_port(&dev);
1347 if (dsa_port >= 0)
1348 mtk_foe_entry_set_dsa(foe, dsa_port);
1349@@ -178,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1350 else
1351 return -EOPNOTSUPP;
1352
1353+out:
1354 mtk_foe_entry_set_pse_port(foe, pse_port);
1355
1356 return 0;
1357@@ -193,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1358 struct net_device *odev = NULL;
1359 struct mtk_flow_entry *entry;
1360 int offload_type = 0;
1361+ int wed_index = -1;
1362 u16 addr_type = 0;
1363- u32 timestamp;
1364 u8 l4proto = 0;
1365 int err = 0;
1366- int hash;
1367 int i;
1368
1369 if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
1370@@ -229,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1371 return -EOPNOTSUPP;
1372 }
1373
1374+ switch (addr_type) {
1375+ case 0:
1376+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
1377+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1378+ struct flow_match_eth_addrs match;
1379+
1380+ flow_rule_match_eth_addrs(rule, &match);
1381+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
1382+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
1383+ } else {
1384+ return -EOPNOTSUPP;
1385+ }
1386+
1387+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1388+ struct flow_match_vlan match;
1389+
1390+ flow_rule_match_vlan(rule, &match);
1391+
1392+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
1393+ return -EOPNOTSUPP;
1394+
1395+ data.vlan_in = match.key->vlan_id;
1396+ }
1397+ break;
1398+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1399+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1400+ break;
1401+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1402+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1403+ break;
1404+ default:
1405+ return -EOPNOTSUPP;
1406+ }
1407+
1408 flow_action_for_each(i, act, &rule->action) {
1409 switch (act->id) {
1410 case FLOW_ACTION_MANGLE:
1411+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1412+ return -EOPNOTSUPP;
1413 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1414 mtk_flow_offload_mangle_eth(act, &data.eth);
1415 break;
1416@@ -263,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1417 }
1418 }
1419
1420- switch (addr_type) {
1421- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1422- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1423- break;
1424- case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1425- offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1426- break;
1427- default:
1428- return -EOPNOTSUPP;
1429- }
1430-
1431 if (!is_valid_ether_addr(data.eth.h_source) ||
1432 !is_valid_ether_addr(data.eth.h_dest))
1433 return -EINVAL;
1434@@ -287,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1435 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1436 struct flow_match_ports ports;
1437
1438+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1439+ return -EOPNOTSUPP;
1440+
1441 flow_rule_match_ports(rule, &ports);
1442 data.src_port = ports.key->src;
1443 data.dst_port = ports.key->dst;
1444- } else {
1445+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
1446 return -EOPNOTSUPP;
1447 }
1448
1449@@ -320,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1450 if (act->id != FLOW_ACTION_MANGLE)
1451 continue;
1452
1453+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1454+ return -EOPNOTSUPP;
1455+
1456 switch (act->mangle.htype) {
1457 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1458 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1459@@ -345,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1460 return err;
1461 }
1462
1463+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1464+ foe.bridge.vlan = data.vlan_in;
1465+
1466 if (data.vlan.num == 1) {
1467 if (data.vlan.proto != htons(ETH_P_8021Q))
1468 return -EOPNOTSUPP;
1469@@ -354,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1470 if (data.pppoe.num == 1)
1471 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
1472
1473- err = mtk_flow_set_output_device(eth, &foe, odev);
1474+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
1475+ &wed_index);
1476 if (err)
1477 return err;
1478
1479+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
1480+ return err;
1481+
1482 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1483 if (!entry)
1484 return -ENOMEM;
1485
1486 entry->cookie = f->cookie;
1487- timestamp = mtk_eth_timestamp(eth);
1488- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
1489- if (hash < 0) {
1490- err = hash;
1491+ memcpy(&entry->data, &foe, sizeof(entry->data));
1492+ entry->wed_index = wed_index;
1493+
1494+ if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
1495 goto free;
1496- }
1497
1498- entry->hash = hash;
1499 err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
1500 mtk_flow_ht_params);
1501 if (err < 0)
1502- goto clear_flow;
1503+ goto clear;
1504
1505 return 0;
1506-clear_flow:
1507- mtk_foe_entry_clear(&eth->ppe, hash);
1508+
1509+clear:
1510+ mtk_foe_entry_clear(eth->ppe, entry);
1511 free:
1512 kfree(entry);
1513+ if (wed_index >= 0)
1514+ mtk_wed_flow_remove(wed_index);
1515 return err;
1516 }
1517
1518@@ -394,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
1519 if (!entry)
1520 return -ENOENT;
1521
1522- mtk_foe_entry_clear(&eth->ppe, entry->hash);
1523+ mtk_foe_entry_clear(eth->ppe, entry);
1524 rhashtable_remove_fast(&eth->flow_table, &entry->node,
1525 mtk_flow_ht_params);
1526+ if (entry->wed_index >= 0)
1527+ mtk_wed_flow_remove(entry->wed_index);
1528 kfree(entry);
1529
1530 return 0;
1531@@ -406,7 +477,6 @@ static int
1532 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1533 {
1534 struct mtk_flow_entry *entry;
1535- int timestamp;
1536 u32 idle;
1537
1538 entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1539@@ -414,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1540 if (!entry)
1541 return -ENOENT;
1542
1543- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
1544- if (timestamp < 0)
1545- return -ETIMEDOUT;
1546-
1547- idle = mtk_eth_timestamp(eth) - timestamp;
1548+ idle = mtk_foe_entry_idle_time(eth->ppe, entry);
1549 f->stats.lastused = jiffies - idle * HZ;
1550
1551 return 0;
1552@@ -470,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1553 struct flow_block_cb *block_cb;
1554 flow_setup_cb_t *cb;
1555
1556- if (!eth->ppe.foe_table)
1557+ if (!eth->ppe || !eth->ppe->foe_table)
1558 return -EOPNOTSUPP;
1559
1560 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1561@@ -511,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1562 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1563 void *type_data)
1564 {
1565- if (type == TC_SETUP_FT)
1566+ switch (type) {
1567+ case TC_SETUP_BLOCK:
1568+ case TC_SETUP_FT:
1569 return mtk_eth_setup_tc_block(dev, type_data);
1570-
1571- return -EOPNOTSUPP;
1572+ default:
1573+ return -EOPNOTSUPP;
1574+ }
1575 }
1576
1577 int mtk_eth_offload_init(struct mtk_eth *eth)
1578 {
1579- if (!eth->ppe.foe_table)
1580+ if (!eth->ppe || !eth->ppe->foe_table)
1581 return 0;
1582
1583 return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
1584diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
1585new file mode 100644
1586index 000000000..ea1cbdf1a
1587--- /dev/null
1588+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
1589@@ -0,0 +1,876 @@
1590+// SPDX-License-Identifier: GPL-2.0-only
1591+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1592+
1593+#include <linux/kernel.h>
1594+#include <linux/slab.h>
1595+#include <linux/module.h>
1596+#include <linux/bitfield.h>
1597+#include <linux/dma-mapping.h>
1598+#include <linux/skbuff.h>
1599+#include <linux/of_platform.h>
1600+#include <linux/of_address.h>
1601+#include <linux/mfd/syscon.h>
1602+#include <linux/debugfs.h>
1603+#include <linux/iopoll.h>
1604+#include <linux/soc/mediatek/mtk_wed.h>
1605+#include "mtk_eth_soc.h"
1606+#include "mtk_wed_regs.h"
1607+#include "mtk_wed.h"
1608+#include "mtk_ppe.h"
1609+
1610+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
1611+
1612+#define MTK_WED_PKT_SIZE 1900
1613+#define MTK_WED_BUF_SIZE 2048
1614+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1615+
1616+#define MTK_WED_TX_RING_SIZE 2048
1617+#define MTK_WED_WDMA_RING_SIZE 1024
1618+
1619+static struct mtk_wed_hw *hw_list[2];
1620+static DEFINE_MUTEX(hw_lock);
1621+
1622+static void
1623+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1624+{
1625+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
1626+}
1627+
1628+static void
1629+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1630+{
1631+ return wed_m32(dev, reg, 0, mask);
1632+}
1633+
1634+static void
1635+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
1636+{
1637+ return wed_m32(dev, reg, mask, 0);
1638+}
1639+
1640+static void
1641+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1642+{
1643+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
1644+}
1645+
1646+static void
1647+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1648+{
1649+ wdma_m32(dev, reg, 0, mask);
1650+}
1651+
1652+static u32
1653+mtk_wed_read_reset(struct mtk_wed_device *dev)
1654+{
1655+ return wed_r32(dev, MTK_WED_RESET);
1656+}
1657+
1658+static void
1659+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
1660+{
1661+ u32 status;
1662+
1663+ wed_w32(dev, MTK_WED_RESET, mask);
1664+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
1665+ !(status & mask), 0, 1000))
1666+ WARN_ON_ONCE(1);
1667+}
1668+
1669+static struct mtk_wed_hw *
1670+mtk_wed_assign(struct mtk_wed_device *dev)
1671+{
1672+ struct mtk_wed_hw *hw;
1673+
1674+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
1675+ if (!hw || hw->wed_dev)
1676+ return NULL;
1677+
1678+ hw->wed_dev = dev;
1679+ return hw;
1680+}
1681+
1682+static int
1683+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
1684+{
1685+ struct mtk_wdma_desc *desc;
1686+ dma_addr_t desc_phys;
1687+ void **page_list;
1688+ int token = dev->wlan.token_start;
1689+ int ring_size;
1690+ int n_pages;
1691+ int i, page_idx;
1692+
1693+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
1694+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
1695+
1696+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
1697+ if (!page_list)
1698+ return -ENOMEM;
1699+
1700+ dev->buf_ring.size = ring_size;
1701+ dev->buf_ring.pages = page_list;
1702+
1703+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
1704+ &desc_phys, GFP_KERNEL);
1705+ if (!desc)
1706+ return -ENOMEM;
1707+
1708+ dev->buf_ring.desc = desc;
1709+ dev->buf_ring.desc_phys = desc_phys;
1710+
1711+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
1712+ dma_addr_t page_phys, buf_phys;
1713+ struct page *page;
1714+ void *buf;
1715+ int s;
1716+
1717+ page = __dev_alloc_pages(GFP_KERNEL, 0);
1718+ if (!page)
1719+ return -ENOMEM;
1720+
1721+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
1722+ DMA_BIDIRECTIONAL);
1723+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
1724+ __free_page(page);
1725+ return -ENOMEM;
1726+ }
1727+
1728+ page_list[page_idx++] = page;
1729+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
1730+ DMA_BIDIRECTIONAL);
1731+
1732+ buf = page_to_virt(page);
1733+ buf_phys = page_phys;
1734+
1735+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
1736+ u32 txd_size;
1737+
1738+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
1739+
1740+ desc->buf0 = buf_phys;
1741+ desc->buf1 = buf_phys + txd_size;
1742+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
1743+ txd_size) |
1744+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
1745+ MTK_WED_BUF_SIZE - txd_size) |
1746+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
1747+ desc->info = 0;
1748+ desc++;
1749+
1750+ buf += MTK_WED_BUF_SIZE;
1751+ buf_phys += MTK_WED_BUF_SIZE;
1752+ }
1753+
1754+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
1755+ DMA_BIDIRECTIONAL);
1756+ }
1757+
1758+ return 0;
1759+}
1760+
1761+static void
1762+mtk_wed_free_buffer(struct mtk_wed_device *dev)
1763+{
1764+ struct mtk_wdma_desc *desc = dev->buf_ring.desc;
1765+ void **page_list = dev->buf_ring.pages;
1766+ int page_idx;
1767+ int i;
1768+
1769+ if (!page_list)
1770+ return;
1771+
1772+ if (!desc)
1773+ goto free_pagelist;
1774+
1775+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
1776+ void *page = page_list[page_idx++];
1777+
1778+ if (!page)
1779+ break;
1780+
1781+ dma_unmap_page(dev->hw->dev, desc[i].buf0,
1782+ PAGE_SIZE, DMA_BIDIRECTIONAL);
1783+ __free_page(page);
1784+ }
1785+
1786+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
1787+ desc, dev->buf_ring.desc_phys);
1788+
1789+free_pagelist:
1790+ kfree(page_list);
1791+}
1792+
1793+static void
1794+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
1795+{
1796+ if (!ring->desc)
1797+ return;
1798+
1799+ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
1800+ ring->desc, ring->desc_phys);
1801+}
1802+
1803+static void
1804+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
1805+{
1806+ int i;
1807+
1808+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
1809+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
1810+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1811+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
1812+}
1813+
1814+static void
1815+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
1816+{
1817+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
1818+
1819+ if (!dev->hw->num_flows)
1820+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
1821+
1822+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
1823+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
1824+}
1825+
1826+static void
1827+mtk_wed_stop(struct mtk_wed_device *dev)
1828+{
1829+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
1830+ mtk_wed_set_ext_int(dev, false);
1831+
1832+ wed_clr(dev, MTK_WED_CTRL,
1833+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
1834+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
1835+ MTK_WED_CTRL_WED_TX_BM_EN |
1836+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1837+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
1838+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
1839+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
1840+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
1841+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
1842+
1843+ wed_clr(dev, MTK_WED_GLO_CFG,
1844+ MTK_WED_GLO_CFG_TX_DMA_EN |
1845+ MTK_WED_GLO_CFG_RX_DMA_EN);
1846+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1847+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1848+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1849+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1850+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1851+}
1852+
1853+static void
1854+mtk_wed_detach(struct mtk_wed_device *dev)
1855+{
1856+ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
1857+ struct mtk_wed_hw *hw = dev->hw;
1858+
1859+ mutex_lock(&hw_lock);
1860+
1861+ mtk_wed_stop(dev);
1862+
1863+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
1864+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
1865+
1866+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1867+
1868+ mtk_wed_free_buffer(dev);
1869+ mtk_wed_free_tx_rings(dev);
1870+
1871+ if (of_dma_is_coherent(wlan_node))
1872+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1873+ BIT(hw->index), BIT(hw->index));
1874+
1875+ if (!hw_list[!hw->index]->wed_dev &&
1876+ hw->eth->dma_dev != hw->eth->dev)
1877+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
1878+
1879+ memset(dev, 0, sizeof(*dev));
1880+ module_put(THIS_MODULE);
1881+
1882+ hw->wed_dev = NULL;
1883+ mutex_unlock(&hw_lock);
1884+}
1885+
1886+static void
1887+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
1888+{
1889+ u32 mask, set;
1890+ u32 offset;
1891+
1892+ mtk_wed_stop(dev);
1893+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1894+
1895+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1896+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1897+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1898+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1899+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1900+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1901+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1902+
1903+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
1904+
1905+ offset = dev->hw->index ? 0x04000400 : 0;
1906+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1907+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
1908+
1909+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
1910+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1911+}
1912+
1913+static void
1914+mtk_wed_hw_init(struct mtk_wed_device *dev)
1915+{
1916+ if (dev->init_done)
1917+ return;
1918+
1919+ dev->init_done = true;
1920+ mtk_wed_set_ext_int(dev, false);
1921+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1922+ MTK_WED_TX_BM_CTRL_PAUSE |
1923+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
1924+ dev->buf_ring.size / 128) |
1925+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
1926+ MTK_WED_TX_RING_SIZE / 256));
1927+
1928+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1929+
1930+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1931+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1932+ dev->wlan.token_start) |
1933+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1934+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1935+
1936+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1937+
1938+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1939+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
1940+ MTK_WED_TX_BM_DYN_THR_HI);
1941+
1942+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1943+
1944+ wed_set(dev, MTK_WED_CTRL,
1945+ MTK_WED_CTRL_WED_TX_BM_EN |
1946+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1947+
1948+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1949+}
1950+
1951+static void
1952+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
1953+{
1954+ int i;
1955+
1956+ for (i = 0; i < size; i++) {
1957+ desc[i].buf0 = 0;
1958+ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
1959+ desc[i].buf1 = 0;
1960+ desc[i].info = 0;
1961+ }
1962+}
1963+
1964+static u32
1965+mtk_wed_check_busy(struct mtk_wed_device *dev)
1966+{
1967+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
1968+ return true;
1969+
1970+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
1971+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
1972+ return true;
1973+
1974+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
1975+ return true;
1976+
1977+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
1978+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1979+ return true;
1980+
1981+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
1982+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1983+ return true;
1984+
1985+ if (wed_r32(dev, MTK_WED_CTRL) &
1986+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
1987+ return true;
1988+
1989+ return false;
1990+}
1991+
1992+static int
1993+mtk_wed_poll_busy(struct mtk_wed_device *dev)
1994+{
1995+ int sleep = 15000;
1996+ int timeout = 100 * sleep;
1997+ u32 val;
1998+
1999+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
2000+ timeout, false, dev);
2001+}
2002+
2003+static void
2004+mtk_wed_reset_dma(struct mtk_wed_device *dev)
2005+{
2006+ bool busy = false;
2007+ u32 val;
2008+ int i;
2009+
2010+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
2011+ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
2012+
2013+ if (!desc)
2014+ continue;
2015+
2016+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
2017+ }
2018+
2019+ if (mtk_wed_poll_busy(dev))
2020+ busy = mtk_wed_check_busy(dev);
2021+
2022+ if (busy) {
2023+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
2024+ } else {
2025+ wed_w32(dev, MTK_WED_RESET_IDX,
2026+ MTK_WED_RESET_IDX_TX |
2027+ MTK_WED_RESET_IDX_RX);
2028+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
2029+ }
2030+
2031+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
2032+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
2033+
2034+ if (busy) {
2035+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
2036+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
2037+ } else {
2038+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
2039+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
2040+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
2041+
2042+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2043+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2044+
2045+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
2046+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2047+ }
2048+
2049+ for (i = 0; i < 100; i++) {
2050+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
2051+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
2052+ break;
2053+ }
2054+
2055+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
2056+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
2057+
2058+ if (busy) {
2059+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
2060+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
2061+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
2062+ } else {
2063+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
2064+ MTK_WED_WPDMA_RESET_IDX_TX |
2065+ MTK_WED_WPDMA_RESET_IDX_RX);
2066+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
2067+ }
2068+
2069+}
2070+
2071+static int
2072+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
2073+ int size)
2074+{
2075+ ring->desc = dma_alloc_coherent(dev->hw->dev,
2076+ size * sizeof(*ring->desc),
2077+ &ring->desc_phys, GFP_KERNEL);
2078+ if (!ring->desc)
2079+ return -ENOMEM;
2080+
2081+ ring->size = size;
2082+ mtk_wed_ring_reset(ring->desc, size);
2083+
2084+ return 0;
2085+}
2086+
2087+static int
2088+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
2089+{
2090+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
2091+
2092+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
2093+ return -ENOMEM;
2094+
2095+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2096+ wdma->desc_phys);
2097+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2098+ size);
2099+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2100+
2101+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2102+ wdma->desc_phys);
2103+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2104+ size);
2105+
2106+ return 0;
2107+}
2108+
2109+static void
2110+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
2111+{
2112+ u32 wdma_mask;
2113+ u32 val;
2114+ int i;
2115+
2116+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
2117+ if (!dev->tx_wdma[i].desc)
2118+ mtk_wed_wdma_ring_setup(dev, i, 16);
2119+
2120+ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
2121+
2122+ mtk_wed_hw_init(dev);
2123+
2124+ wed_set(dev, MTK_WED_CTRL,
2125+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
2126+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
2127+ MTK_WED_CTRL_WED_TX_BM_EN |
2128+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
2129+
2130+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
2131+
2132+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
2133+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
2134+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
2135+
2136+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
2137+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
2138+
2139+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
2140+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
2141+
2142+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
2143+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
2144+
2145+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
2146+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
2147+
2148+ wed_set(dev, MTK_WED_GLO_CFG,
2149+ MTK_WED_GLO_CFG_TX_DMA_EN |
2150+ MTK_WED_GLO_CFG_RX_DMA_EN);
2151+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
2152+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
2153+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
2154+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2155+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
2156+
2157+ mtk_wed_set_ext_int(dev, true);
2158+ val = dev->wlan.wpdma_phys |
2159+ MTK_PCIE_MIRROR_MAP_EN |
2160+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
2161+
2162+ if (dev->hw->index)
2163+ val |= BIT(1);
2164+ val |= BIT(0);
2165+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
2166+
2167+ dev->running = true;
2168+}
2169+
2170+static int
2171+mtk_wed_attach(struct mtk_wed_device *dev)
2172+ __releases(RCU)
2173+{
2174+ struct mtk_wed_hw *hw;
2175+ int ret = 0;
2176+
2177+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
2178+ "mtk_wed_attach without holding the RCU read lock");
2179+
2180+ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
2181+ !try_module_get(THIS_MODULE))
2182+ ret = -ENODEV;
2183+
2184+ rcu_read_unlock();
2185+
2186+ if (ret)
2187+ return ret;
2188+
2189+ mutex_lock(&hw_lock);
2190+
2191+ hw = mtk_wed_assign(dev);
2192+ if (!hw) {
2193+ module_put(THIS_MODULE);
2194+ ret = -ENODEV;
2195+ goto out;
2196+ }
2197+
2198+ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
2199+
2200+ dev->hw = hw;
2201+ dev->dev = hw->dev;
2202+ dev->irq = hw->irq;
2203+ dev->wdma_idx = hw->index;
2204+
2205+ if (hw->eth->dma_dev == hw->eth->dev &&
2206+ of_dma_is_coherent(hw->eth->dev->of_node))
2207+ mtk_eth_set_dma_device(hw->eth, hw->dev);
2208+
2209+ ret = mtk_wed_buffer_alloc(dev);
2210+ if (ret) {
2211+ mtk_wed_detach(dev);
2212+ goto out;
2213+ }
2214+
2215+ mtk_wed_hw_init_early(dev);
2216+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
2217+
2218+out:
2219+ mutex_unlock(&hw_lock);
2220+
2221+ return ret;
2222+}
2223+
2224+static int
2225+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
2226+{
2227+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
2228+
2229+ /*
2230+ * Tx ring redirection:
2231+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
2232+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
2233+ * registers.
2234+ *
2235+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
2236+ * into MTK_WED_WPDMA_RING_TX(n) registers.
2237+ * It gets filled with packets picked up from WED TX ring and from
2238+ * WDMA RX.
2239+ */
2240+
2241+ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
2242+
2243+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
2244+ return -ENOMEM;
2245+
2246+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
2247+ return -ENOMEM;
2248+
2249+ ring->reg_base = MTK_WED_RING_TX(idx);
2250+ ring->wpdma = regs;
2251+
2252+ /* WED -> WPDMA */
2253+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
2254+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
2255+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
2256+
2257+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
2258+ ring->desc_phys);
2259+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
2260+ MTK_WED_TX_RING_SIZE);
2261+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2262+
2263+ return 0;
2264+}
2265+
2266+static int
2267+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
2268+{
2269+ struct mtk_wed_ring *ring = &dev->txfree_ring;
2270+ int i;
2271+
2272+ /*
2273+ * For txfree event handling, the same DMA ring is shared between WED
2274+ * and WLAN. The WLAN driver accesses the ring index registers through
2275+ * WED
2276+ */
2277+ ring->reg_base = MTK_WED_RING_RX(1);
2278+ ring->wpdma = regs;
2279+
2280+ for (i = 0; i < 12; i += 4) {
2281+ u32 val = readl(regs + i);
2282+
2283+ wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
2284+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
2285+ }
2286+
2287+ return 0;
2288+}
2289+
2290+static u32
2291+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
2292+{
2293+ u32 val;
2294+
2295+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
2296+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
2297+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2298+ if (!dev->hw->num_flows)
2299+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2300+ if (val && net_ratelimit())
2301+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
2302+
2303+ val = wed_r32(dev, MTK_WED_INT_STATUS);
2304+ val &= mask;
2305+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
2306+
2307+ return val;
2308+}
2309+
2310+static void
2311+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
2312+{
2313+ if (!dev->running)
2314+ return;
2315+
2316+ mtk_wed_set_ext_int(dev, !!mask);
2317+ wed_w32(dev, MTK_WED_INT_MASK, mask);
2318+}
2319+
2320+int mtk_wed_flow_add(int index)
2321+{
2322+ struct mtk_wed_hw *hw = hw_list[index];
2323+ int ret;
2324+
2325+ if (!hw || !hw->wed_dev)
2326+ return -ENODEV;
2327+
2328+ if (hw->num_flows) {
2329+ hw->num_flows++;
2330+ return 0;
2331+ }
2332+
2333+ mutex_lock(&hw_lock);
2334+ if (!hw->wed_dev) {
2335+ ret = -ENODEV;
2336+ goto out;
2337+ }
2338+
2339+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
2340+ if (!ret)
2341+ hw->num_flows++;
2342+ mtk_wed_set_ext_int(hw->wed_dev, true);
2343+
2344+out:
2345+ mutex_unlock(&hw_lock);
2346+
2347+ return ret;
2348+}
2349+
2350+void mtk_wed_flow_remove(int index)
2351+{
2352+ struct mtk_wed_hw *hw = hw_list[index];
2353+
2354+ if (!hw)
2355+ return;
2356+
2357+ if (--hw->num_flows)
2358+ return;
2359+
2360+ mutex_lock(&hw_lock);
2361+ if (!hw->wed_dev)
2362+ goto out;
2363+
2364+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
2365+ mtk_wed_set_ext_int(hw->wed_dev, true);
2366+
2367+out:
2368+ mutex_unlock(&hw_lock);
2369+}
2370+
2371+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2372+ void __iomem *wdma, int index)
2373+{
2374+ static const struct mtk_wed_ops wed_ops = {
2375+ .attach = mtk_wed_attach,
2376+ .tx_ring_setup = mtk_wed_tx_ring_setup,
2377+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2378+ .start = mtk_wed_start,
2379+ .stop = mtk_wed_stop,
2380+ .reset_dma = mtk_wed_reset_dma,
2381+ .reg_read = wed_r32,
2382+ .reg_write = wed_w32,
2383+ .irq_get = mtk_wed_irq_get,
2384+ .irq_set_mask = mtk_wed_irq_set_mask,
2385+ .detach = mtk_wed_detach,
2386+ };
2387+ struct device_node *eth_np = eth->dev->of_node;
2388+ struct platform_device *pdev;
2389+ struct mtk_wed_hw *hw;
2390+ struct regmap *regs;
2391+ int irq;
2392+
2393+ if (!np)
2394+ return;
2395+
2396+ pdev = of_find_device_by_node(np);
2397+ if (!pdev)
2398+ return;
2399+
2400+ get_device(&pdev->dev);
2401+ irq = platform_get_irq(pdev, 0);
2402+ if (irq < 0)
2403+ return;
2404+
2405+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
2406+ if (!regs)
2407+ return;
2408+
2409+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
2410+
2411+ mutex_lock(&hw_lock);
2412+
2413+ if (WARN_ON(hw_list[index]))
2414+ goto unlock;
2415+
2416+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
2417+ hw->node = np;
2418+ hw->regs = regs;
2419+ hw->eth = eth;
2420+ hw->dev = &pdev->dev;
2421+ hw->wdma = wdma;
2422+ hw->index = index;
2423+ hw->irq = irq;
2424+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2425+ "mediatek,pcie-mirror");
2426+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
2427+ "mediatek,hifsys");
2428+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
2429+ kfree(hw);
2430+ goto unlock;
2431+ }
2432+
2433+ if (!index) {
2434+ regmap_write(hw->mirror, 0, 0);
2435+ regmap_write(hw->mirror, 4, 0);
2436+ }
2437+ mtk_wed_hw_add_debugfs(hw);
2438+
2439+ hw_list[index] = hw;
2440+
2441+unlock:
2442+ mutex_unlock(&hw_lock);
2443+}
2444+
2445+void mtk_wed_exit(void)
2446+{
2447+ int i;
2448+
2449+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
2450+
2451+ synchronize_rcu();
2452+
2453+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
2454+ struct mtk_wed_hw *hw;
2455+
2456+ hw = hw_list[i];
2457+ if (!hw)
2458+ continue;
2459+
2460+ hw_list[i] = NULL;
2461+ debugfs_remove(hw->debugfs_dir);
2462+ put_device(hw->dev);
2463+ kfree(hw);
2464+ }
2465+}
2466diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2467new file mode 100644
2468index 000000000..981ec613f
2469--- /dev/null
2470+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2471@@ -0,0 +1,135 @@
2472+// SPDX-License-Identifier: GPL-2.0-only
2473+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2474+
2475+#ifndef __MTK_WED_PRIV_H
2476+#define __MTK_WED_PRIV_H
2477+
2478+#include <linux/soc/mediatek/mtk_wed.h>
2479+#include <linux/debugfs.h>
2480+#include <linux/regmap.h>
2481+#include <linux/netdevice.h>
2482+
2483+struct mtk_eth;
2484+
2485+struct mtk_wed_hw {
2486+ struct device_node *node;
2487+ struct mtk_eth *eth;
2488+ struct regmap *regs;
2489+ struct regmap *hifsys;
2490+ struct device *dev;
2491+ void __iomem *wdma;
2492+ struct regmap *mirror;
2493+ struct dentry *debugfs_dir;
2494+ struct mtk_wed_device *wed_dev;
2495+ u32 debugfs_reg;
2496+ u32 num_flows;
2497+ char dirname[5];
2498+ int irq;
2499+ int index;
2500+};
2501+
2502+struct mtk_wdma_info {
2503+ u8 wdma_idx;
2504+ u8 queue;
2505+ u16 wcid;
2506+ u8 bss;
2507+};
2508+
2509+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2510+static inline void
2511+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2512+{
2513+ regmap_write(dev->hw->regs, reg, val);
2514+}
2515+
2516+static inline u32
2517+wed_r32(struct mtk_wed_device *dev, u32 reg)
2518+{
2519+ unsigned int val;
2520+
2521+ regmap_read(dev->hw->regs, reg, &val);
2522+
2523+ return val;
2524+}
2525+
2526+static inline void
2527+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2528+{
2529+ writel(val, dev->hw->wdma + reg);
2530+}
2531+
2532+static inline u32
2533+wdma_r32(struct mtk_wed_device *dev, u32 reg)
2534+{
2535+ return readl(dev->hw->wdma + reg);
2536+}
2537+
2538+static inline u32
2539+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
2540+{
2541+ if (!dev->tx_ring[ring].wpdma)
2542+ return 0;
2543+
2544+ return readl(dev->tx_ring[ring].wpdma + reg);
2545+}
2546+
2547+static inline void
2548+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
2549+{
2550+ if (!dev->tx_ring[ring].wpdma)
2551+ return;
2552+
2553+ writel(val, dev->tx_ring[ring].wpdma + reg);
2554+}
2555+
2556+static inline u32
2557+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
2558+{
2559+ if (!dev->txfree_ring.wpdma)
2560+ return 0;
2561+
2562+ return readl(dev->txfree_ring.wpdma + reg);
2563+}
2564+
2565+static inline void
2566+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2567+{
2568+ if (!dev->txfree_ring.wpdma)
2569+ return;
2570+
2571+ writel(val, dev->txfree_ring.wpdma + reg);
2572+}
2573+
2574+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2575+ void __iomem *wdma, int index);
2576+void mtk_wed_exit(void);
2577+int mtk_wed_flow_add(int index);
2578+void mtk_wed_flow_remove(int index);
2579+#else
2580+static inline void
2581+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2582+ void __iomem *wdma, int index)
2583+{
2584+}
2585+static inline void
2586+mtk_wed_exit(void)
2587+{
2588+}
2589+static inline int mtk_wed_flow_add(int index)
2590+{
2591+ return -EINVAL;
2592+}
2593+static inline void mtk_wed_flow_remove(int index)
2594+{
2595+}
2596+#endif
2597+
2598+#ifdef CONFIG_DEBUG_FS
2599+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
2600+#else
2601+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2602+{
2603+}
2604+#endif
2605+
2606+#endif
2607diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2608new file mode 100644
2609index 000000000..a81d3fd1a
2610--- /dev/null
2611+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2612@@ -0,0 +1,175 @@
2613+// SPDX-License-Identifier: GPL-2.0-only
2614+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2615+
2616+#include <linux/seq_file.h>
2617+#include "mtk_wed.h"
2618+#include "mtk_wed_regs.h"
2619+
2620+struct reg_dump {
2621+ const char *name;
2622+ u16 offset;
2623+ u8 type;
2624+ u8 base;
2625+};
2626+
2627+enum {
2628+ DUMP_TYPE_STRING,
2629+ DUMP_TYPE_WED,
2630+ DUMP_TYPE_WDMA,
2631+ DUMP_TYPE_WPDMA_TX,
2632+ DUMP_TYPE_WPDMA_TXFREE,
2633+};
2634+
2635+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2636+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2637+#define DUMP_RING(_prefix, _base, ...) \
2638+ { _prefix " BASE", _base, __VA_ARGS__ }, \
2639+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2640+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
2641+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2642+
2643+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2644+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2645+
2646+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2647+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
2648+
2649+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
2650+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
2651+
2652+static void
2653+print_reg_val(struct seq_file *s, const char *name, u32 val)
2654+{
2655+ seq_printf(s, "%-32s %08x\n", name, val);
2656+}
2657+
2658+static void
2659+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2660+ const struct reg_dump *regs, int n_regs)
2661+{
2662+ const struct reg_dump *cur;
2663+ u32 val;
2664+
2665+ for (cur = regs; cur < &regs[n_regs]; cur++) {
2666+ switch (cur->type) {
2667+ case DUMP_TYPE_STRING:
2668+ seq_printf(s, "%s======== %s:\n",
2669+ cur > regs ? "\n" : "",
2670+ cur->name);
2671+ continue;
2672+ case DUMP_TYPE_WED:
2673+ val = wed_r32(dev, cur->offset);
2674+ break;
2675+ case DUMP_TYPE_WDMA:
2676+ val = wdma_r32(dev, cur->offset);
2677+ break;
2678+ case DUMP_TYPE_WPDMA_TX:
2679+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
2680+ break;
2681+ case DUMP_TYPE_WPDMA_TXFREE:
2682+ val = wpdma_txfree_r32(dev, cur->offset);
2683+ break;
2684+ }
2685+ print_reg_val(s, cur->name, val);
2686+ }
2687+}
2688+
2689+
2690+static int
2691+wed_txinfo_show(struct seq_file *s, void *data)
2692+{
2693+ static const struct reg_dump regs[] = {
2694+ DUMP_STR("WED TX"),
2695+ DUMP_WED(WED_TX_MIB(0)),
2696+ DUMP_WED_RING(WED_RING_TX(0)),
2697+
2698+ DUMP_WED(WED_TX_MIB(1)),
2699+ DUMP_WED_RING(WED_RING_TX(1)),
2700+
2701+ DUMP_STR("WPDMA TX"),
2702+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
2703+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
2704+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
2705+
2706+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
2707+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
2708+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
2709+
2710+ DUMP_STR("WPDMA TX"),
2711+ DUMP_WPDMA_TX_RING(0),
2712+ DUMP_WPDMA_TX_RING(1),
2713+
2714+ DUMP_STR("WED WDMA RX"),
2715+ DUMP_WED(WED_WDMA_RX_MIB(0)),
2716+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
2717+ DUMP_WED(WED_WDMA_RX_THRES(0)),
2718+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
2719+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
2720+
2721+ DUMP_WED(WED_WDMA_RX_MIB(1)),
2722+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
2723+ DUMP_WED(WED_WDMA_RX_THRES(1)),
2724+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
2725+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
2726+
2727+ DUMP_STR("WDMA RX"),
2728+ DUMP_WDMA(WDMA_GLO_CFG),
2729+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
2730+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
2731+ };
2732+ struct mtk_wed_hw *hw = s->private;
2733+ struct mtk_wed_device *dev = hw->wed_dev;
2734+
2735+ if (!dev)
2736+ return 0;
2737+
2738+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2739+
2740+ return 0;
2741+}
2742+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
2743+
2744+
2745+static int
2746+mtk_wed_reg_set(void *data, u64 val)
2747+{
2748+ struct mtk_wed_hw *hw = data;
2749+
2750+ regmap_write(hw->regs, hw->debugfs_reg, val);
2751+
2752+ return 0;
2753+}
2754+
2755+static int
2756+mtk_wed_reg_get(void *data, u64 *val)
2757+{
2758+ struct mtk_wed_hw *hw = data;
2759+ unsigned int regval;
2760+ int ret;
2761+
2762+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
2763+ if (ret)
2764+ return ret;
2765+
2766+ *val = regval;
2767+
2768+ return 0;
2769+}
2770+
2771+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
2772+ "0x%08llx\n");
2773+
2774+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2775+{
2776+ struct dentry *dir;
2777+
2778+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
2779+ dir = debugfs_create_dir(hw->dirname, NULL);
2780+ if (!dir)
2781+ return;
2782+
2783+ hw->debugfs_dir = dir;
2784+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
2785+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
2786+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
2787+}
2788diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2789new file mode 100644
2790index 000000000..a5d9d8a5b
2791--- /dev/null
2792+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2793@@ -0,0 +1,8 @@
2794+// SPDX-License-Identifier: GPL-2.0-only
2795+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2796+
2797+#include <linux/kernel.h>
2798+#include <linux/soc/mediatek/mtk_wed.h>
2799+
2800+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
2801+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
2802diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2803new file mode 100644
2804index 000000000..0a0465ea5
2805--- /dev/null
2806+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2807@@ -0,0 +1,251 @@
2808+// SPDX-License-Identifier: GPL-2.0-only
2809+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2810+
2811+#ifndef __MTK_WED_REGS_H
2812+#define __MTK_WED_REGS_H
2813+
2814+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
2815+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
2816+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
2817+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2818+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2819+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2820+
2821+struct mtk_wdma_desc {
2822+ __le32 buf0;
2823+ __le32 ctrl;
2824+ __le32 buf1;
2825+ __le32 info;
2826+} __packed __aligned(4);
2827+
2828+#define MTK_WED_RESET 0x008
2829+#define MTK_WED_RESET_TX_BM BIT(0)
2830+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
2831+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
2832+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
2833+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
2834+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
2835+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2836+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2837+#define MTK_WED_RESET_WED BIT(31)
2838+
2839+#define MTK_WED_CTRL 0x00c
2840+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
2841+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
2842+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
2843+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
2844+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
2845+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2846+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2847+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2848+#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2849+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2850+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2851+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
2852+
2853+#define MTK_WED_EXT_INT_STATUS 0x020
2854+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
2855+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
2856+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
2857+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
2858+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
2859+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2860+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2861+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2862+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2863+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
2864+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
2865+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
2866+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
2867+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
2868+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
2869+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2870+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2871+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2872+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2873+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2874+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
2875+ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
2876+ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
2877+
2878+#define MTK_WED_EXT_INT_MASK 0x028
2879+
2880+#define MTK_WED_STATUS 0x060
2881+#define MTK_WED_STATUS_TX GENMASK(15, 8)
2882+
2883+#define MTK_WED_TX_BM_CTRL 0x080
2884+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
2885+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
2886+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
2887+
2888+#define MTK_WED_TX_BM_BASE 0x084
2889+
2890+#define MTK_WED_TX_BM_TKID 0x088
2891+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
2892+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
2893+
2894+#define MTK_WED_TX_BM_BUF_LEN 0x08c
2895+
2896+#define MTK_WED_TX_BM_INTF 0x09c
2897+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
2898+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
2899+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
2900+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
2901+
2902+#define MTK_WED_TX_BM_DYN_THR 0x0a0
2903+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
2904+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
2905+
2906+#define MTK_WED_INT_STATUS 0x200
2907+#define MTK_WED_INT_MASK 0x204
2908+
2909+#define MTK_WED_GLO_CFG 0x208
2910+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
2911+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
2912+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
2913+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
2914+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2915+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
2916+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
2917+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2918+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2919+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2920+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2921+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2922+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2923+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
2924+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2925+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
2926+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
2927+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
2928+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
2929+
2930+#define MTK_WED_RESET_IDX 0x20c
2931+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
2932+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
2933+
2934+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
2935+
2936+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
2937+
2938+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2939+
2940+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
2941+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2942+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
2943+
2944+#define MTK_WED_WPDMA_GLO_CFG 0x508
2945+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
2946+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
2947+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
2948+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2949+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2950+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2951+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
2952+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2953+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2954+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2955+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2956+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2957+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2958+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
2959+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2960+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
2961+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
2962+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
2963+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
2964+
2965+#define MTK_WED_WPDMA_RESET_IDX 0x50c
2966+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
2967+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
2968+
2969+#define MTK_WED_WPDMA_INT_CTRL 0x520
2970+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
2971+
2972+#define MTK_WED_WPDMA_INT_MASK 0x524
2973+
2974+#define MTK_WED_PCIE_CFG_BASE 0x560
2975+
2976+#define MTK_WED_PCIE_INT_TRIGGER 0x570
2977+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
2978+
2979+#define MTK_WED_WPDMA_CFG_BASE 0x580
2980+
2981+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2982+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2983+
2984+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2985+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
2986+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2987+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2988+
2989+#define MTK_WED_WDMA_GLO_CFG 0xa04
2990+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2991+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2992+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2993+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
2994+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2995+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
2996+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
2997+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
2998+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
2999+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
3000+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
3001+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
3002+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
3003+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
3004+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
3005+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
3006+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
3007+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
3008+
3009+#define MTK_WED_WDMA_RESET_IDX 0xa08
3010+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
3011+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
3012+
3013+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
3014+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
3015+
3016+#define MTK_WED_WDMA_INT_CTRL 0xa2c
3017+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
3018+
3019+#define MTK_WED_WDMA_OFFSET0 0xaa4
3020+#define MTK_WED_WDMA_OFFSET1 0xaa8
3021+
3022+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
3023+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
3024+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
3025+
3026+#define MTK_WED_RING_OFS_BASE 0x00
3027+#define MTK_WED_RING_OFS_COUNT 0x04
3028+#define MTK_WED_RING_OFS_CPU_IDX 0x08
3029+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
3030+
3031+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
3032+
3033+#define MTK_WDMA_GLO_CFG 0x204
3034+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
3035+
3036+#define MTK_WDMA_RESET_IDX 0x208
3037+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
3038+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
3039+
3040+#define MTK_WDMA_INT_MASK 0x228
3041+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
3042+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
3043+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
3044+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
3045+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
3046+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
3047+
3048+#define MTK_WDMA_INT_GRP1 0x250
3049+#define MTK_WDMA_INT_GRP2 0x254
3050+
3051+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
3052+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
3053+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
3054+
3055+/* DMA channel mapping */
3056+#define HIFSYS_DMA_AG_MAP 0x008
3057+
3058+#endif
3059diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3060index 9f64504ac..35998b1a7 100644
3061--- a/include/linux/netdevice.h
3062+++ b/include/linux/netdevice.h
3063@@ -835,6 +835,7 @@ enum net_device_path_type {
3064 DEV_PATH_BRIDGE,
3065 DEV_PATH_PPPOE,
3066 DEV_PATH_DSA,
3067+ DEV_PATH_MTK_WDMA,
3068 };
3069
3070 struct net_device_path {
3071@@ -860,6 +861,12 @@ struct net_device_path {
3072 int port;
3073 u16 proto;
3074 } dsa;
3075+ struct {
3076+ u8 wdma_idx;
3077+ u8 queue;
3078+ u16 wcid;
3079+ u8 bss;
3080+ } mtk_wdma;
3081 };
3082 };
3083
3084diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
3085new file mode 100644
3086index 000000000..7e00cca06
3087--- /dev/null
3088+++ b/include/linux/soc/mediatek/mtk_wed.h
3089@@ -0,0 +1,131 @@
3090+#ifndef __MTK_WED_H
3091+#define __MTK_WED_H
3092+
3093+#include <linux/kernel.h>
3094+#include <linux/rcupdate.h>
3095+#include <linux/regmap.h>
3096+#include <linux/pci.h>
3097+
3098+#define MTK_WED_TX_QUEUES 2
3099+
3100+struct mtk_wed_hw;
3101+struct mtk_wdma_desc;
3102+
3103+struct mtk_wed_ring {
3104+ struct mtk_wdma_desc *desc;
3105+ dma_addr_t desc_phys;
3106+ int size;
3107+
3108+ u32 reg_base;
3109+ void __iomem *wpdma;
3110+};
3111+
3112+struct mtk_wed_device {
3113+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3114+ const struct mtk_wed_ops *ops;
3115+ struct device *dev;
3116+ struct mtk_wed_hw *hw;
3117+ bool init_done, running;
3118+ int wdma_idx;
3119+ int irq;
3120+
3121+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3122+ struct mtk_wed_ring txfree_ring;
3123+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3124+
3125+ struct {
3126+ int size;
3127+ void **pages;
3128+ struct mtk_wdma_desc *desc;
3129+ dma_addr_t desc_phys;
3130+ } buf_ring;
3131+
3132+ /* filled by driver: */
3133+ struct {
3134+ struct pci_dev *pci_dev;
3135+
3136+ u32 wpdma_phys;
3137+
3138+ u16 token_start;
3139+ unsigned int nbuf;
3140+
3141+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3142+ int (*offload_enable)(struct mtk_wed_device *wed);
3143+ void (*offload_disable)(struct mtk_wed_device *wed);
3144+ } wlan;
3145+#endif
3146+};
3147+
3148+struct mtk_wed_ops {
3149+ int (*attach)(struct mtk_wed_device *dev);
3150+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
3151+ void __iomem *regs);
3152+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3153+ void __iomem *regs);
3154+ void (*detach)(struct mtk_wed_device *dev);
3155+
3156+ void (*stop)(struct mtk_wed_device *dev);
3157+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
3158+ void (*reset_dma)(struct mtk_wed_device *dev);
3159+
3160+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
3161+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
3162+
3163+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3164+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3165+};
3166+
3167+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3168+
3169+static inline int
3170+mtk_wed_device_attach(struct mtk_wed_device *dev)
3171+{
3172+ int ret = -ENODEV;
3173+
3174+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3175+ rcu_read_lock();
3176+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
3177+ if (dev->ops)
3178+ ret = dev->ops->attach(dev);
3179+ else
3180+ rcu_read_unlock();
3181+
3182+ if (ret)
3183+ dev->ops = NULL;
3184+#endif
3185+
3186+ return ret;
3187+}
3188+
3189+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3190+#define mtk_wed_device_active(_dev) !!(_dev)->ops
3191+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
3192+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
3193+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
3194+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3195+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3196+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
3197+#define mtk_wed_device_reg_read(_dev, _reg) \
3198+ (_dev)->ops->reg_read(_dev, _reg)
3199+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
3200+ (_dev)->ops->reg_write(_dev, _reg, _val)
3201+#define mtk_wed_device_irq_get(_dev, _mask) \
3202+ (_dev)->ops->irq_get(_dev, _mask)
3203+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
3204+ (_dev)->ops->irq_set_mask(_dev, _mask)
3205+#else
3206+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3207+{
3208+ return false;
3209+}
3210+#define mtk_wed_device_detach(_dev) do {} while (0)
3211+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
3212+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3213+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3214+#define mtk_wed_device_reg_read(_dev, _reg) 0
3215+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3216+#define mtk_wed_device_irq_get(_dev, _mask) 0
3217+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3218+#endif
3219+
3220+#endif
3221diff --git a/net/core/dev.c b/net/core/dev.c
3222index 4f0edb218..031ac7c6f 100644
3223--- a/net/core/dev.c
3224+++ b/net/core/dev.c
3225@@ -675,6 +675,10 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3226 if (WARN_ON_ONCE(last_dev == ctx.dev))
3227 return -1;
3228 }
3229+
3230+ if (!ctx.dev)
3231+ return ret;
3232+
3233 path = dev_fwd_path(stack);
3234 if (!path)
3235 return -1;
3236--
32372.18.0
3238