blob: 2d27d904ca8d4064eef0a465dceae3561830341d [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From 342fdc50b761309e75974554cdcf790a2d09e134 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 2 Jun 2022 15:32:07 +0800
4Subject: [PATCH 4/8] 9993-add-wed
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7622.dtsi | 32 +-
9 drivers/net/ethernet/mediatek/Kconfig | 4 +
10 drivers/net/ethernet/mediatek/Makefile | 5 +
11 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 136 ++-
12 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 14 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.c | 373 +++++++-
14 drivers/net/ethernet/mediatek/mtk_ppe.h | 89 +-
15 .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 4 +-
16 .../net/ethernet/mediatek/mtk_ppe_offload.c | 167 +++-
17 drivers/net/ethernet/mediatek/mtk_wed.c | 876 ++++++++++++++++++
18 drivers/net/ethernet/mediatek/mtk_wed.h | 135 +++
19 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 175 ++++
20 drivers/net/ethernet/mediatek/mtk_wed_ops.c | 8 +
21 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 251 +++++
22 include/linux/netdevice.h | 7 +
23 include/linux/soc/mediatek/mtk_wed.h | 131 +++
24 net/core/dev.c | 4 +
25 17 files changed, 2283 insertions(+), 128 deletions(-)
26 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Kconfig
27 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Makefile
28 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c
29 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h
30 mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_ppe.c
31 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c
32 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h
33 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
34 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c
35 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h
36 create mode 100644 include/linux/soc/mediatek/mtk_wed.h
37
38diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
39index 369e01389..d0fbc367e 100644
40--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
41+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
42@@ -338,7 +338,7 @@
43 };
44
45 cci_control2: slave-if@5000 {
46- compatible = "arm,cci-400-ctrl-if";
47+ compatible = "arm,cci-400-ctrl-if", "syscon";
48 interface-type = "ace";
49 reg = <0x5000 0x1000>;
50 };
51@@ -920,6 +920,11 @@
52 };
53 };
54
55+ hifsys: syscon@1af00000 {
56+ compatible = "mediatek,mt7622-hifsys", "syscon";
57+ reg = <0 0x1af00000 0 0x70>;
58+ };
59+
60 ethsys: syscon@1b000000 {
61 compatible = "mediatek,mt7622-ethsys",
62 "syscon";
63@@ -938,6 +943,26 @@
64 #dma-cells = <1>;
65 };
66
67+ pcie_mirror: pcie-mirror@10000400 {
68+ compatible = "mediatek,mt7622-pcie-mirror",
69+ "syscon";
70+ reg = <0 0x10000400 0 0x10>;
71+ };
72+
73+ wed0: wed@1020a000 {
74+ compatible = "mediatek,mt7622-wed",
75+ "syscon";
76+ reg = <0 0x1020a000 0 0x1000>;
77+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
78+ };
79+
80+ wed1: wed@1020b000 {
81+ compatible = "mediatek,mt7622-wed",
82+ "syscon";
83+ reg = <0 0x1020b000 0 0x1000>;
84+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
85+ };
86+
87 eth: ethernet@1b100000 {
88 compatible = "mediatek,mt7622-eth",
89 "mediatek,mt2701-eth",
90@@ -964,6 +989,11 @@
91 power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
92 mediatek,ethsys = <&ethsys>;
93 mediatek,sgmiisys = <&sgmiisys>;
94+ mediatek,cci-control = <&cci_control2>;
95+ mediatek,wed = <&wed0>, <&wed1>;
96+ mediatek,pcie-mirror = <&pcie_mirror>;
97+ mediatek,hifsys = <&hifsys>;
98+ dma-coherent;
99 #address-cells = <1>;
100 #size-cells = <0>;
101 status = "disabled";
102diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
103old mode 100755
104new mode 100644
105index 42e6b38d2..8ab6615a3
106--- a/drivers/net/ethernet/mediatek/Kconfig
107+++ b/drivers/net/ethernet/mediatek/Kconfig
108@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK
109
110 if NET_VENDOR_MEDIATEK
111
112+config NET_MEDIATEK_SOC_WED
113+ depends on ARCH_MEDIATEK || COMPILE_TEST
114+ def_bool NET_MEDIATEK_SOC != n
115+
116 config NET_MEDIATEK_SOC
117 tristate "MediaTek SoC Gigabit Ethernet support"
118 select PHYLINK
119diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
120old mode 100755
121new mode 100644
122index 0a6af99f1..3528f1b3c
123--- a/drivers/net/ethernet/mediatek/Makefile
124+++ b/drivers/net/ethernet/mediatek/Makefile
125@@ -6,4 +6,9 @@
126 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
127 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
128 mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
129+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
130+ifdef CONFIG_DEBUG_FS
131+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
132+endif
133+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
134 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
135diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
136old mode 100755
137new mode 100644
138index 819d8a0be..2121335a1
139--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
140+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
141@@ -9,6 +9,7 @@
142 #include <linux/of_device.h>
143 #include <linux/of_mdio.h>
144 #include <linux/of_net.h>
145+#include <linux/of_address.h>
146 #include <linux/mfd/syscon.h>
147 #include <linux/regmap.h>
148 #include <linux/clk.h>
developer926f9162022-07-05 10:55:37 +0800149@@ -19,13 +20,15 @@
developer8cb3ac72022-07-04 10:55:14 +0800150 #include <linux/interrupt.h>
151 #include <linux/pinctrl/devinfo.h>
152 #include <linux/phylink.h>
developer926f9162022-07-05 10:55:37 +0800153 #include <linux/gpio/consumer.h>
developer8cb3ac72022-07-04 10:55:14 +0800154+#include <linux/bitfield.h>
155 #include <net/dsa.h>
156
157 #include "mtk_eth_soc.h"
158 #include "mtk_eth_dbg.h"
159 #include "mtk_eth_reset.h"
160 #include "mtk_hnat/hnat.h"
161+#include "mtk_wed.h"
162
163 #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
164 #include "mtk_hnat/nf_hnat_mtk.h"
165@@ -850,7 +853,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
166 int i;
167
168 if (!eth->soc->has_sram) {
169- eth->scratch_ring = dma_alloc_coherent(eth->dev,
170+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800171 cnt * soc->txrx.txd_size,
developer8cb3ac72022-07-04 10:55:14 +0800172 &eth->phy_scratch_ring,
173 GFP_ATOMIC);
174@@ -866,10 +869,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
175 if (unlikely(!eth->scratch_head))
176 return -ENOMEM;
177
178- dma_addr = dma_map_single(eth->dev,
179+ dma_addr = dma_map_single(eth->dma_dev,
180 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
181 DMA_FROM_DEVICE);
182- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
183+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
184 return -ENOMEM;
185
186 phy_ring_tail = eth->phy_scratch_ring +
187@@ -933,26 +936,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
188 {
189 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
190 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
191- dma_unmap_single(eth->dev,
192+ dma_unmap_single(eth->dma_dev,
193 dma_unmap_addr(tx_buf, dma_addr0),
194 dma_unmap_len(tx_buf, dma_len0),
195 DMA_TO_DEVICE);
196 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
197- dma_unmap_page(eth->dev,
198+ dma_unmap_page(eth->dma_dev,
199 dma_unmap_addr(tx_buf, dma_addr0),
200 dma_unmap_len(tx_buf, dma_len0),
201 DMA_TO_DEVICE);
202 }
203 } else {
204 if (dma_unmap_len(tx_buf, dma_len0)) {
205- dma_unmap_page(eth->dev,
206+ dma_unmap_page(eth->dma_dev,
207 dma_unmap_addr(tx_buf, dma_addr0),
208 dma_unmap_len(tx_buf, dma_len0),
209 DMA_TO_DEVICE);
210 }
211
212 if (dma_unmap_len(tx_buf, dma_len1)) {
213- dma_unmap_page(eth->dev,
214+ dma_unmap_page(eth->dma_dev,
215 dma_unmap_addr(tx_buf, dma_addr1),
216 dma_unmap_len(tx_buf, dma_len1),
217 DMA_TO_DEVICE);
218@@ -1017,9 +1020,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
219 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
220 memset(itx_buf, 0, sizeof(*itx_buf));
221
developer0c6c5252022-07-12 11:59:21 +0800222- txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
223+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
224 DMA_TO_DEVICE);
225- if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
226+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
developer8cb3ac72022-07-04 10:55:14 +0800227 return -ENOMEM;
228
229 WRITE_ONCE(itxd->txd1, mapped_addr);
230@@ -1114,10 +1117,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
developer0c6c5252022-07-12 11:59:21 +0800231 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
232 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
233 !(frag_size - txd_info.size);
234- txd_info.addr = skb_frag_dma_map(eth->dev, frag,
235+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
236 offset, txd_info.size,
237 DMA_TO_DEVICE);
238- if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
239+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
240 goto err_dma;
developer8cb3ac72022-07-04 10:55:14 +0800241
developer0c6c5252022-07-12 11:59:21 +0800242 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developer8cb3ac72022-07-04 10:55:14 +0800243@@ -1384,6 +1387,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
244 struct net_device *netdev;
245 unsigned int pktlen;
developerea825232022-11-29 11:26:54 +0800246 dma_addr_t dma_addr = 0;
developer8cb3ac72022-07-04 10:55:14 +0800247+ u32 hash, reason;
developer0c6c5252022-07-12 11:59:21 +0800248 int mac = 0;
developer8cb3ac72022-07-04 10:55:14 +0800249
250 if (eth->hwlro)
developer68838542022-10-03 23:42:21 +0800251@@ -1427,22 +1431,22 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
developer8cb3ac72022-07-04 10:55:14 +0800252 netdev->stats.rx_dropped++;
253 goto release_desc;
254 }
255- dma_addr = dma_map_single(eth->dev,
256+ dma_addr = dma_map_single(eth->dma_dev,
257 new_data + NET_SKB_PAD +
258 eth->ip_align,
259 ring->buf_size,
260 DMA_FROM_DEVICE);
261- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
262+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
263 skb_free_frag(new_data);
264 netdev->stats.rx_dropped++;
265 goto release_desc;
266 }
267
developer68838542022-10-03 23:42:21 +0800268 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
269 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
270
271- dma_unmap_single(eth->dev,
272+ dma_unmap_single(eth->dma_dev,
273 (u64)(trxd.rxd1 | addr64),
developer8cb3ac72022-07-04 10:55:14 +0800274 ring->buf_size, DMA_FROM_DEVICE);
275
276 /* receive data */
277@@ -1463,6 +1467,17 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
278 skb_checksum_none_assert(skb);
279 skb->protocol = eth_type_trans(skb, netdev);
280
281+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
282+ if (hash != MTK_RXD4_FOE_ENTRY) {
283+ hash = jhash_1word(hash, 0);
284+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
285+ }
286+
287+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
288+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
289+ mtk_ppe_check_skb(eth->ppe, skb,
290+ trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
291+
292 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
293 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
294 if (trxd.rxd3 & RX_DMA_VTAG_V2)
295@@ -1748,7 +1763,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
296 goto no_tx_mem;
297
298 if (!eth->soc->has_sram)
299- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
300+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developer0c6c5252022-07-12 11:59:21 +0800301 &ring->phys, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800302 else {
303 ring->dma = eth->scratch_ring + MTK_DMA_SIZE;
developer0c6c5252022-07-12 11:59:21 +0800304@@ -1780,6 +1795,6 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
developer8cb3ac72022-07-04 10:55:14 +0800305 * descriptors in ring->dma_pdma.
306 */
307 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
308- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
309+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developer0c6c5252022-07-12 11:59:21 +0800310 &ring->phys_pdma, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800311 if (!ring->dma_pdma)
developer0c6c5252022-07-12 11:59:21 +0800312@@ -1839,6 +1854,6 @@ static void mtk_tx_clean(struct mtk_eth *eth)
developer8cb3ac72022-07-04 10:55:14 +0800313 }
314
315 if (!eth->soc->has_sram && ring->dma) {
316- dma_free_coherent(eth->dev,
317+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800318 MTK_DMA_SIZE * soc->txrx.txd_size,
319 ring->dma, ring->phys);
320@@ -1847,6 +1862,6 @@ static void mtk_tx_clean(struct mtk_eth *eth)
developer8cb3ac72022-07-04 10:55:14 +0800321 }
322
323 if (ring->dma_pdma) {
324- dma_free_coherent(eth->dev,
325+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800326 MTK_DMA_SIZE * soc->txrx.txd_size,
327 ring->dma_pdma, ring->phys_pdma);
developer8cb3ac72022-07-04 10:55:14 +0800328@@ -1892,7 +1907,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
329
330 if ((!eth->soc->has_sram) || (eth->soc->has_sram
331 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
332- ring->dma = dma_alloc_coherent(eth->dev,
333+ ring->dma = dma_alloc_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800334 rx_dma_size * eth->soc->txrx.rxd_size,
335 &ring->phys, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800336 else {
developer0c6c5252022-07-12 11:59:21 +0800337@@ -1907,13 +1922,13 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
developer8cb3ac72022-07-04 10:55:14 +0800338 return -ENOMEM;
339
340 for (i = 0; i < rx_dma_size; i++) {
developer0c6c5252022-07-12 11:59:21 +0800341 struct mtk_rx_dma_v2 *rxd;
342
developer8cb3ac72022-07-04 10:55:14 +0800343- dma_addr_t dma_addr = dma_map_single(eth->dev,
344+ dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
345 ring->data[i] + NET_SKB_PAD + eth->ip_align,
346 ring->buf_size,
347 DMA_FROM_DEVICE);
348- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
349+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
350 return -ENOMEM;
developer0c6c5252022-07-12 11:59:21 +0800351
352 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
developer68838542022-10-03 23:42:21 +0800353@@ -1968,7 +1983,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_s
354 MTK_8GB_ADDRESSING)) ?
355 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
356
developer8cb3ac72022-07-04 10:55:14 +0800357- dma_unmap_single(eth->dev,
358+ dma_unmap_single(eth->dma_dev,
developer68838542022-10-03 23:42:21 +0800359 (u64)(rxd->rxd1 | addr64),
developer8cb3ac72022-07-04 10:55:14 +0800360 ring->buf_size,
361 DMA_FROM_DEVICE);
362@@ -1982,7 +1997,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_s
363 return;
364
365 if (ring->dma) {
366- dma_free_coherent(eth->dev,
367+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800368 ring->dma_size * eth->soc->txrx.rxd_size,
developer8cb3ac72022-07-04 10:55:14 +0800369 ring->dma,
370 ring->phys);
developer0c6c5252022-07-12 11:59:21 +0800371@@ -2462,6 +2477,6 @@ static void mtk_dma_free(struct mtk_eth *eth)
developer8cb3ac72022-07-04 10:55:14 +0800372 if (eth->netdev[i])
373 netdev_reset_queue(eth->netdev[i]);
374 if ( !eth->soc->has_sram && eth->scratch_ring) {
375- dma_free_coherent(eth->dev,
376+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800377 MTK_DMA_SIZE * soc->txrx.txd_size,
378 eth->scratch_ring, eth->phy_scratch_ring);
developer8cb3ac72022-07-04 10:55:14 +0800379@@ -2661,7 +2676,7 @@ static int mtk_open(struct net_device *dev)
380 if (err)
381 return err;
382
383- if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
384+ if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
385 gdm_config = MTK_GDMA_TO_PPE;
386
387 mtk_gdm_config(eth, gdm_config);
388@@ -2778,7 +2793,7 @@ static int mtk_stop(struct net_device *dev)
389 mtk_dma_free(eth);
390
391 if (eth->soc->offload_version)
392- mtk_ppe_stop(&eth->ppe);
393+ mtk_ppe_stop(eth->ppe);
394
395 return 0;
396 }
397@@ -2855,6 +2870,8 @@ static int mtk_napi_init(struct mtk_eth *eth)
398
399 static int mtk_hw_init(struct mtk_eth *eth, u32 type)
400 {
401+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
402+ ETHSYS_DMA_AG_MAP_PPE;
403 int i, ret = 0;
404
405 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
406@@ -2872,6 +2889,10 @@ static int mtk_hw_init(struct mtk_eth *eth, u32 type)
407 goto err_disable_pm;
408 }
409
410+ if (eth->ethsys)
411+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
412+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
413+
414 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
415 ret = device_reset(eth->dev);
416 if (ret) {
417@@ -3501,6 +3522,35 @@ free_netdev:
418 return err;
419 }
420
421+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
422+{
423+ struct net_device *dev, *tmp;
424+ LIST_HEAD(dev_list);
425+ int i;
426+
427+ rtnl_lock();
428+
429+ for (i = 0; i < MTK_MAC_COUNT; i++) {
430+ dev = eth->netdev[i];
431+
432+ if (!dev || !(dev->flags & IFF_UP))
433+ continue;
434+
435+ list_add_tail(&dev->close_list, &dev_list);
436+ }
437+
438+ dev_close_many(&dev_list, false);
439+
440+ eth->dma_dev = dma_dev;
441+
442+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
443+ list_del_init(&dev->close_list);
444+ dev_open(dev, NULL);
445+ }
446+
447+ rtnl_unlock();
448+}
449+
450 static int mtk_probe(struct platform_device *pdev)
451 {
452 struct device_node *mac_np;
453@@ -3514,6 +3564,7 @@ static int mtk_probe(struct platform_device *pdev)
454 eth->soc = of_device_get_match_data(&pdev->dev);
455
456 eth->dev = &pdev->dev;
457+ eth->dma_dev = &pdev->dev;
458 eth->base = devm_platform_ioremap_resource(pdev, 0);
459 if (IS_ERR(eth->base))
460 return PTR_ERR(eth->base);
461@@ -3567,6 +3618,16 @@ static int mtk_probe(struct platform_device *pdev)
462 }
463 }
464
465+ if (of_dma_is_coherent(pdev->dev.of_node)) {
466+ struct regmap *cci;
467+
468+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
469+ "mediatek,cci-control");
470+ /* enable CPU/bus coherency */
471+ if (!IS_ERR(cci))
472+ regmap_write(cci, 0, 3);
473+ }
474+
475 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
476 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
477 GFP_KERNEL);
478@@ -3589,6 +3650,22 @@ static int mtk_probe(struct platform_device *pdev)
479 }
480 }
481
482+ for (i = 0;; i++) {
483+ struct device_node *np = of_parse_phandle(pdev->dev.of_node,
484+ "mediatek,wed", i);
485+ static const u32 wdma_regs[] = {
486+ MTK_WDMA0_BASE,
487+ MTK_WDMA1_BASE
488+ };
489+ void __iomem *wdma;
490+
491+ if (!np || i >= ARRAY_SIZE(wdma_regs))
492+ break;
493+
494+ wdma = eth->base + wdma_regs[i];
495+ mtk_wed_add_hw(np, eth, wdma, i);
496+ }
497+
498 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
499 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
500 eth->irq[i] = eth->irq[0];
501@@ -3692,10 +3769,11 @@ static int mtk_probe(struct platform_device *pdev)
502 }
503
504 if (eth->soc->offload_version) {
505- err = mtk_ppe_init(&eth->ppe, eth->dev,
506- eth->base + MTK_ETH_PPE_BASE, 2);
507- if (err)
508+ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
509+ if (!eth->ppe) {
510+ err = -ENOMEM;
511 goto err_free_dev;
512+ }
513
514 err = mtk_eth_offload_init(eth);
515 if (err)
516diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
517old mode 100755
518new mode 100644
519index 349f98503..b52378bd6
520--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
521+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
522@@ -517,6 +517,9 @@
523 #define RX_DMA_SPORT_MASK 0x7
524 #endif
525
526+#define MTK_WDMA0_BASE 0x2800
527+#define MTK_WDMA1_BASE 0x2c00
528+
529 /* QDMA descriptor txd4 */
530 #define TX_DMA_CHKSUM (0x7 << 29)
531 #define TX_DMA_TSO BIT(28)
532@@ -704,6 +707,12 @@
533 #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
534
535
536+/* ethernet dma channel agent map */
537+#define ETHSYS_DMA_AG_MAP 0x408
538+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
539+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
540+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
541+
542 /* SGMII subsystem config registers */
543 /* Register to auto-negotiation restart */
544 #define SGMSYS_PCS_CONTROL_1 0x0
545@@ -1209,6 +1218,7 @@ struct mtk_reset_event {
546 /* struct mtk_eth - This is the main datasructure for holding the state
547 * of the driver
548 * @dev: The device pointer
549+ * @dev: The device pointer used for dma mapping/alloc
550 * @base: The mapped register i/o base
551 * @page_lock: Make sure that register operations are atomic
552 * @tx_irq__lock: Make sure that IRQ register operations are atomic
553@@ -1243,6 +1253,7 @@ struct mtk_reset_event {
554
555 struct mtk_eth {
556 struct device *dev;
557+ struct device *dma_dev;
558 void __iomem *base;
559 spinlock_t page_lock;
560 spinlock_t tx_irq_lock;
561@@ -1283,7 +1294,7 @@ struct mtk_eth {
562 spinlock_t syscfg0_lock;
563 struct timer_list mtk_dma_monitor_timer;
564
565- struct mtk_ppe ppe;
566+ struct mtk_ppe *ppe;
567 struct rhashtable flow_table;
568 };
569
570@@ -1336,5 +1347,6 @@ void ethsys_reset(struct mtk_eth *eth, u32 reset_bits);
571 int mtk_eth_offload_init(struct mtk_eth *eth);
572 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
573 void *type_data);
574+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
575
576 #endif /* MTK_ETH_H */
577diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
578old mode 100644
579new mode 100755
580index 66298e223..3d75c22be
581--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
582+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
583@@ -6,9 +6,22 @@
584 #include <linux/iopoll.h>
585 #include <linux/etherdevice.h>
586 #include <linux/platform_device.h>
587+#include <linux/if_ether.h>
588+#include <linux/if_vlan.h>
589+#include <net/dsa.h>
590+#include "mtk_eth_soc.h"
591 #include "mtk_ppe.h"
592 #include "mtk_ppe_regs.h"
593
594+static DEFINE_SPINLOCK(ppe_lock);
595+
596+static const struct rhashtable_params mtk_flow_l2_ht_params = {
597+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
598+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
599+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
600+ .automatic_shrinking = true,
601+};
602+
603 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
604 {
605 writel(val, ppe->base + reg);
606@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
607 return ppe_m32(ppe, reg, val, 0);
608 }
609
610+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
611+{
612+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
613+}
614+
615 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
616 {
617 int ret;
618@@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
619 u32 hash;
620
621 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
622- case MTK_PPE_PKT_TYPE_BRIDGE:
623- hv1 = e->bridge.src_mac_lo;
624- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
625- hv2 = e->bridge.src_mac_hi >> 16;
626- hv2 ^= e->bridge.dest_mac_lo;
627- hv3 = e->bridge.dest_mac_hi;
628- break;
629 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
630 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
631 hv1 = e->ipv4.orig.ports;
632@@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
633 {
634 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
635
636+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
637+ return &entry->bridge.l2;
638+
639 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
640 return &entry->ipv6.l2;
641
642@@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
643 {
644 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
645
646+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
647+ return &entry->bridge.ib2;
648+
649 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
650 return &entry->ipv6.ib2;
651
652@@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
653 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
654 entry->ipv6.ports = ports_pad;
655
656- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
657+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
658+ ether_addr_copy(entry->bridge.src_mac, src_mac);
659+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
660+ entry->bridge.ib2 = val;
661+ l2 = &entry->bridge.l2;
662+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
663 entry->ipv6.ib2 = val;
664 l2 = &entry->ipv6.l2;
665 } else {
666@@ -329,32 +351,167 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
667 return 0;
668 }
669
670+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
671+ int bss, int wcid)
672+{
673+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
674+ u32 *ib2 = mtk_foe_entry_ib2(entry);
675+
676+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
677+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
678+ if (wdma_idx)
679+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
680+
681+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
682+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
683+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
684+
685+ return 0;
686+}
687+
688 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
689 {
690 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
691 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
692 }
693
694-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
695- u16 timestamp)
696+static bool
697+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
698+{
699+ int type, len;
700+
701+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
702+ return false;
703+
704+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
705+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
706+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
707+ else
708+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
709+
710+ return !memcmp(&entry->data.data, &data->data, len - 4);
711+}
712+
713+static void
714+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
715+{
716+ struct hlist_head *head;
717+ struct hlist_node *tmp;
718+
719+ if (entry->type == MTK_FLOW_TYPE_L2) {
720+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
721+ mtk_flow_l2_ht_params);
722+
723+ head = &entry->l2_flows;
724+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
725+ __mtk_foe_entry_clear(ppe, entry);
726+ return;
727+ }
728+
729+ hlist_del_init(&entry->list);
730+ if (entry->hash != 0xffff) {
731+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
732+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
733+ MTK_FOE_STATE_INVALID);
734+ dma_wmb();
735+ }
736+ entry->hash = 0xffff;
737+
738+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
739+ return;
740+
741+ hlist_del_init(&entry->l2_data.list);
742+ kfree(entry);
743+}
744+
745+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
746+{
747+ u16 timestamp;
748+ u16 now;
749+
750+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
751+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
752+
753+ if (timestamp > now)
754+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
755+ else
756+ return now - timestamp;
757+}
758+
759+static void
760+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
761 {
762+ struct mtk_flow_entry *cur;
763 struct mtk_foe_entry *hwe;
764- u32 hash;
765+ struct hlist_node *tmp;
766+ int idle;
767+
768+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
769+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
770+ int cur_idle;
771+ u32 ib1;
772+
773+ hwe = &ppe->foe_table[cur->hash];
774+ ib1 = READ_ONCE(hwe->ib1);
775+
776+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
777+ cur->hash = 0xffff;
778+ __mtk_foe_entry_clear(ppe, cur);
779+ continue;
780+ }
781+
782+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
783+ if (cur_idle >= idle)
784+ continue;
785+
786+ idle = cur_idle;
787+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
788+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
789+ }
790+}
791+
792+static void
793+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
794+{
795+ struct mtk_foe_entry *hwe;
796+ struct mtk_foe_entry foe;
797+
798+ spin_lock_bh(&ppe_lock);
799+
800+ if (entry->type == MTK_FLOW_TYPE_L2) {
801+ mtk_flow_entry_update_l2(ppe, entry);
802+ goto out;
803+ }
804+
805+ if (entry->hash == 0xffff)
806+ goto out;
807+
808+ hwe = &ppe->foe_table[entry->hash];
809+ memcpy(&foe, hwe, sizeof(foe));
810+ if (!mtk_flow_entry_match(entry, &foe)) {
811+ entry->hash = 0xffff;
812+ goto out;
813+ }
814+
815+ entry->data.ib1 = foe.ib1;
816+
817+out:
818+ spin_unlock_bh(&ppe_lock);
819+}
820+
821+static void
822+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
823+ u16 hash)
824+{
825+ struct mtk_foe_entry *hwe;
826+ u16 timestamp;
827
828+ timestamp = mtk_eth_timestamp(ppe->eth);
829 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
830 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
831 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
832
833- hash = mtk_ppe_hash_entry(entry);
834 hwe = &ppe->foe_table[hash];
835- if (!mtk_foe_entry_usable(hwe)) {
836- hwe++;
837- hash++;
838-
839- if (!mtk_foe_entry_usable(hwe))
840- return -ENOSPC;
841- }
842-
843 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
844 wmb();
845 hwe->ib1 = entry->ib1;
developer86f099b2022-11-17 09:35:09 +0800846@@ -362,32 +519,201 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
developer8cb3ac72022-07-04 10:55:14 +0800847 dma_wmb();
848
849 mtk_ppe_cache_clear(ppe);
850+}
851
852- return hash;
853+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
854+{
855+ spin_lock_bh(&ppe_lock);
856+ __mtk_foe_entry_clear(ppe, entry);
857+ spin_unlock_bh(&ppe_lock);
858+}
859+
860+static int
861+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
862+{
863+ entry->type = MTK_FLOW_TYPE_L2;
864+
865+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
866+ mtk_flow_l2_ht_params);
867+}
868+
869+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
870+{
871+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
872+ u32 hash;
873+
874+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
875+ return mtk_foe_entry_commit_l2(ppe, entry);
876+
877+ hash = mtk_ppe_hash_entry(&entry->data);
878+ entry->hash = 0xffff;
879+ spin_lock_bh(&ppe_lock);
880+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 4]);
881+ spin_unlock_bh(&ppe_lock);
882+
883+ return 0;
884+}
885+
886+static void
887+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
888+ u16 hash)
889+{
890+ struct mtk_flow_entry *flow_info;
891+ struct mtk_foe_entry foe, *hwe;
892+ struct mtk_foe_mac_info *l2;
893+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
894+ int type;
895+
896+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
897+ GFP_ATOMIC);
898+ if (!flow_info)
899+ return;
900+
901+ flow_info->l2_data.base_flow = entry;
902+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
903+ flow_info->hash = hash;
904+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 4]);
905+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
906+
907+ hwe = &ppe->foe_table[hash];
908+ memcpy(&foe, hwe, sizeof(foe));
909+ foe.ib1 &= ib1_mask;
910+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
911+
912+ l2 = mtk_foe_entry_l2(&foe);
913+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
914+
915+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
916+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
917+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
918+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
919+ l2->etype = ETH_P_IPV6;
920+
921+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
922+
923+ __mtk_foe_entry_commit(ppe, &foe, hash);
924 }
925
926-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
927+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
928+{
929+ struct hlist_head *head = &ppe->foe_flow[hash / 4];
930+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
931+ struct mtk_flow_entry *entry;
932+ struct mtk_foe_bridge key = {};
developerb5c6eed2022-08-11 22:58:44 +0800933+ struct hlist_node *n;
developer8cb3ac72022-07-04 10:55:14 +0800934+ struct ethhdr *eh;
935+ bool found = false;
936+ u8 *tag;
937+
938+ spin_lock_bh(&ppe_lock);
939+
developer86f099b2022-11-17 09:35:09 +0800940+ if (hash >= MTK_PPE_ENTRIES)
941+ goto out;
942+
developer8cb3ac72022-07-04 10:55:14 +0800943+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
944+ goto out;
945+
developerb5c6eed2022-08-11 22:58:44 +0800946+ hlist_for_each_entry_safe(entry, n, head, list) {
developer8cb3ac72022-07-04 10:55:14 +0800947+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
948+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
949+ MTK_FOE_STATE_BIND))
950+ continue;
951+
952+ entry->hash = 0xffff;
953+ __mtk_foe_entry_clear(ppe, entry);
954+ continue;
955+ }
956+
957+ if (found || !mtk_flow_entry_match(entry, hwe)) {
958+ if (entry->hash != 0xffff)
959+ entry->hash = 0xffff;
960+ continue;
961+ }
962+
963+ entry->hash = hash;
964+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
965+ found = true;
966+ }
967+
968+ if (found)
969+ goto out;
970+
971+ if (!skb)
972+ goto out;
973+
974+ eh = eth_hdr(skb);
975+ ether_addr_copy(key.dest_mac, eh->h_dest);
976+ ether_addr_copy(key.src_mac, eh->h_source);
977+ tag = skb->data - 2;
978+ key.vlan = 0;
979+ switch (skb->protocol) {
980+#if IS_ENABLED(CONFIG_NET_DSA)
981+ case htons(ETH_P_XDSA):
982+ if (!netdev_uses_dsa(skb->dev) ||
983+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
984+ goto out;
985+
986+ tag += 4;
987+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
988+ break;
989+
990+ fallthrough;
991+#endif
992+ case htons(ETH_P_8021Q):
993+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
994+ break;
995+ default:
996+ break;
997+ }
998+
999+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
1000+ if (!entry)
1001+ goto out;
1002+
1003+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
1004+
1005+out:
1006+ spin_unlock_bh(&ppe_lock);
1007+}
1008+
1009+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
1010+{
1011+ mtk_flow_entry_update(ppe, entry);
1012+
1013+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
1014+}
1015+
1016+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
1017 int version)
1018 {
1019+ struct device *dev = eth->dev;
1020 struct mtk_foe_entry *foe;
1021+ struct mtk_ppe *ppe;
1022+
1023+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
1024+ if (!ppe)
1025+ return NULL;
1026+
1027+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
1028
1029 /* need to allocate a separate device, since it PPE DMA access is
1030 * not coherent.
1031 */
1032 ppe->base = base;
1033+ ppe->eth = eth;
1034 ppe->dev = dev;
1035 ppe->version = version;
1036
1037 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
1038 &ppe->foe_phys, GFP_KERNEL);
1039 if (!foe)
1040- return -ENOMEM;
1041+ return NULL;
1042
1043 ppe->foe_table = foe;
1044
1045 mtk_ppe_debugfs_init(ppe);
1046
1047- return 0;
1048+ return ppe;
1049 }
1050
1051 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1052@@ -395,7 +717,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1053 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
1054 int i, k;
1055
1056- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
1057+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
1058
1059 if (!IS_ENABLED(CONFIG_SOC_MT7621))
1060 return;
1061@@ -443,7 +765,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
1062 MTK_PPE_FLOW_CFG_IP4_NAT |
1063 MTK_PPE_FLOW_CFG_IP4_NAPT |
1064 MTK_PPE_FLOW_CFG_IP4_DSLITE |
1065- MTK_PPE_FLOW_CFG_L2_BRIDGE |
1066 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1067 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1068
1069diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
1070index 242fb8f2a..1f5cf1c9a 100644
1071--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
1072+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
1073@@ -6,6 +6,7 @@
1074
1075 #include <linux/kernel.h>
1076 #include <linux/bitfield.h>
1077+#include <linux/rhashtable.h>
1078
1079 #define MTK_ETH_PPE_BASE 0xc00
1080
1081@@ -48,9 +49,9 @@ enum {
1082 #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
1083 #define MTK_FOE_IB2_MULTICAST BIT(8)
1084
1085-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
1086-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
1087-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
1088+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
1089+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
1090+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
1091
1092 #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
1093
1094@@ -58,9 +59,9 @@ enum {
1095
1096 #define MTK_FOE_IB2_DSCP GENMASK(31, 24)
1097
1098-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
1099-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
1100-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
1101+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
1102+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
1103+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
1104
1105 enum {
1106 MTK_FOE_STATE_INVALID,
1107@@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
1108 u16 src_mac_lo;
1109 };
1110
1111+/* software-only entry type */
1112 struct mtk_foe_bridge {
1113- u32 dest_mac_hi;
1114+ u8 dest_mac[ETH_ALEN];
1115+ u8 src_mac[ETH_ALEN];
1116+ u16 vlan;
1117
1118- u16 src_mac_lo;
1119- u16 dest_mac_lo;
1120-
1121- u32 src_mac_hi;
1122+ struct {} key_end;
1123
1124 u32 ib2;
1125
1126- u32 _rsv[5];
1127-
1128- u32 udf_tsid;
1129 struct mtk_foe_mac_info l2;
1130 };
1131
1132@@ -235,7 +233,37 @@ enum {
1133 MTK_PPE_CPU_REASON_INVALID = 0x1f,
1134 };
1135
1136+enum {
1137+ MTK_FLOW_TYPE_L4,
1138+ MTK_FLOW_TYPE_L2,
1139+ MTK_FLOW_TYPE_L2_SUBFLOW,
1140+};
1141+
1142+struct mtk_flow_entry {
1143+ union {
1144+ struct hlist_node list;
1145+ struct {
1146+ struct rhash_head l2_node;
1147+ struct hlist_head l2_flows;
1148+ };
1149+ };
1150+ u8 type;
1151+ s8 wed_index;
1152+ u16 hash;
1153+ union {
1154+ struct mtk_foe_entry data;
1155+ struct {
1156+ struct mtk_flow_entry *base_flow;
1157+ struct hlist_node list;
1158+ struct {} end;
1159+ } l2_data;
1160+ };
1161+ struct rhash_head node;
1162+ unsigned long cookie;
1163+};
1164+
1165 struct mtk_ppe {
1166+ struct mtk_eth *eth;
1167 struct device *dev;
1168 void __iomem *base;
1169 int version;
1170@@ -243,19 +271,35 @@ struct mtk_ppe {
1171 struct mtk_foe_entry *foe_table;
1172 dma_addr_t foe_phys;
1173
1174+ u16 foe_check_time[MTK_PPE_ENTRIES];
1175+ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
1176+
1177+ struct rhashtable l2_flows;
1178+
1179 void *acct_table;
1180 };
1181
1182-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
1183- int version);
1184+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
1185 int mtk_ppe_start(struct mtk_ppe *ppe);
1186 int mtk_ppe_stop(struct mtk_ppe *ppe);
1187
1188+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
1189+
1190 static inline void
1191-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
1192+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
1193 {
1194- ppe->foe_table[hash].ib1 = 0;
1195- dma_wmb();
1196+ u16 now, diff;
1197+
1198+ if (!ppe)
1199+ return;
1200+
1201+ now = (u16)jiffies;
1202+ diff = now - ppe->foe_check_time[hash];
1203+ if (diff < HZ / 10)
1204+ return;
1205+
1206+ ppe->foe_check_time[hash] = now;
1207+ __mtk_ppe_check_skb(ppe, skb, hash);
1208 }
1209
1210 static inline int
1211@@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
1212 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
1213 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
1214 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
1215-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
1216- u16 timestamp);
1217+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
1218+ int bss, int wcid);
1219+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1220+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1221+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1222 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
1223
1224 #endif
1225diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1226index d4b482340..a591ab1fd 100644
1227--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1228+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1229@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
1230 static const char * const type_str[] = {
1231 [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
1232 [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
1233- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
1234 [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
1235 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
1236 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
1237@@ -207,6 +206,9 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
1238 struct dentry *root;
1239
1240 root = debugfs_create_dir("mtk_ppe", NULL);
1241+ if (!root)
1242+ return -ENOMEM;
1243+
1244 debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
1245 debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
1246
1247diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1248index 4294f0c74..d4a012608 100644
1249--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1250+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1251@@ -11,6 +11,7 @@
1252 #include <net/pkt_cls.h>
1253 #include <net/dsa.h>
1254 #include "mtk_eth_soc.h"
1255+#include "mtk_wed.h"
1256
1257 struct mtk_flow_data {
1258 struct ethhdr eth;
1259@@ -30,6 +31,8 @@ struct mtk_flow_data {
1260 __be16 src_port;
1261 __be16 dst_port;
1262
1263+ u16 vlan_in;
1264+
1265 struct {
1266 u16 id;
1267 __be16 proto;
1268@@ -41,12 +44,6 @@ struct mtk_flow_data {
1269 } pppoe;
1270 };
1271
1272-struct mtk_flow_entry {
1273- struct rhash_head node;
1274- unsigned long cookie;
1275- u16 hash;
1276-};
1277-
1278 static const struct rhashtable_params mtk_flow_ht_params = {
1279 .head_offset = offsetof(struct mtk_flow_entry, node),
1280 .key_offset = offsetof(struct mtk_flow_entry, cookie),
1281@@ -54,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = {
1282 .automatic_shrinking = true,
1283 };
1284
1285-static u32
1286-mtk_eth_timestamp(struct mtk_eth *eth)
1287-{
1288- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
1289-}
1290-
1291 static int
1292 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
1293 bool egress)
1294@@ -94,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
1295 memcpy(dest, src, act->mangle.mask ? 2 : 4);
1296 }
1297
1298+static int
1299+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
1300+{
1301+ struct net_device_path_ctx ctx = {
1302+ .dev = dev,
1303+ };
1304+ struct net_device_path path = {};
1305+
1306+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
1307+ return -1;
1308+
1309+ if (!dev->netdev_ops->ndo_fill_forward_path)
1310+ return -1;
1311+
1312+ memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
1313+ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
1314+ return -1;
1315+
1316+ if (path.type != DEV_PATH_MTK_WDMA)
1317+ return -1;
1318+
1319+ info->wdma_idx = path.mtk_wdma.wdma_idx;
1320+ info->queue = path.mtk_wdma.queue;
1321+ info->bss = path.mtk_wdma.bss;
1322+ info->wcid = path.mtk_wdma.wcid;
1323+
1324+ return 0;
1325+}
1326+
1327
1328 static int
1329 mtk_flow_mangle_ports(const struct flow_action_entry *act,
1330@@ -163,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev)
1331
1332 static int
1333 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1334- struct net_device *dev)
1335+ struct net_device *dev, const u8 *dest_mac,
1336+ int *wed_index)
1337 {
1338+ struct mtk_wdma_info info = {};
1339 int pse_port, dsa_port;
1340
1341+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
1342+ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
1343+ info.wcid);
1344+ pse_port = 3;
1345+ *wed_index = info.wdma_idx;
1346+ goto out;
1347+ }
1348+
1349 dsa_port = mtk_flow_get_dsa_port(&dev);
1350 if (dsa_port >= 0)
1351 mtk_foe_entry_set_dsa(foe, dsa_port);
1352@@ -178,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1353 else
1354 return -EOPNOTSUPP;
1355
1356+out:
1357 mtk_foe_entry_set_pse_port(foe, pse_port);
1358
1359 return 0;
1360@@ -193,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1361 struct net_device *odev = NULL;
1362 struct mtk_flow_entry *entry;
1363 int offload_type = 0;
1364+ int wed_index = -1;
1365 u16 addr_type = 0;
1366- u32 timestamp;
1367 u8 l4proto = 0;
1368 int err = 0;
1369- int hash;
1370 int i;
1371
1372 if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
1373@@ -229,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1374 return -EOPNOTSUPP;
1375 }
1376
1377+ switch (addr_type) {
1378+ case 0:
1379+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
1380+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1381+ struct flow_match_eth_addrs match;
1382+
1383+ flow_rule_match_eth_addrs(rule, &match);
1384+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
1385+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
1386+ } else {
1387+ return -EOPNOTSUPP;
1388+ }
1389+
1390+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1391+ struct flow_match_vlan match;
1392+
1393+ flow_rule_match_vlan(rule, &match);
1394+
1395+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
1396+ return -EOPNOTSUPP;
1397+
1398+ data.vlan_in = match.key->vlan_id;
1399+ }
1400+ break;
1401+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1402+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1403+ break;
1404+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1405+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1406+ break;
1407+ default:
1408+ return -EOPNOTSUPP;
1409+ }
1410+
1411 flow_action_for_each(i, act, &rule->action) {
1412 switch (act->id) {
1413 case FLOW_ACTION_MANGLE:
1414+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1415+ return -EOPNOTSUPP;
1416 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1417 mtk_flow_offload_mangle_eth(act, &data.eth);
1418 break;
1419@@ -263,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1420 }
1421 }
1422
1423- switch (addr_type) {
1424- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1425- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1426- break;
1427- case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1428- offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1429- break;
1430- default:
1431- return -EOPNOTSUPP;
1432- }
1433-
1434 if (!is_valid_ether_addr(data.eth.h_source) ||
1435 !is_valid_ether_addr(data.eth.h_dest))
1436 return -EINVAL;
1437@@ -287,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1438 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1439 struct flow_match_ports ports;
1440
1441+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1442+ return -EOPNOTSUPP;
1443+
1444 flow_rule_match_ports(rule, &ports);
1445 data.src_port = ports.key->src;
1446 data.dst_port = ports.key->dst;
1447- } else {
1448+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
1449 return -EOPNOTSUPP;
1450 }
1451
1452@@ -320,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1453 if (act->id != FLOW_ACTION_MANGLE)
1454 continue;
1455
1456+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1457+ return -EOPNOTSUPP;
1458+
1459 switch (act->mangle.htype) {
1460 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1461 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1462@@ -345,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1463 return err;
1464 }
1465
1466+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1467+ foe.bridge.vlan = data.vlan_in;
1468+
1469 if (data.vlan.num == 1) {
1470 if (data.vlan.proto != htons(ETH_P_8021Q))
1471 return -EOPNOTSUPP;
1472@@ -354,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1473 if (data.pppoe.num == 1)
1474 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
1475
1476- err = mtk_flow_set_output_device(eth, &foe, odev);
1477+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
1478+ &wed_index);
1479 if (err)
1480 return err;
1481
1482+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
1483+ return err;
1484+
1485 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1486 if (!entry)
1487 return -ENOMEM;
1488
1489 entry->cookie = f->cookie;
1490- timestamp = mtk_eth_timestamp(eth);
1491- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
1492- if (hash < 0) {
1493- err = hash;
1494+ memcpy(&entry->data, &foe, sizeof(entry->data));
1495+ entry->wed_index = wed_index;
1496+
1497+ if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
1498 goto free;
1499- }
1500
1501- entry->hash = hash;
1502 err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
1503 mtk_flow_ht_params);
1504 if (err < 0)
1505- goto clear_flow;
1506+ goto clear;
1507
1508 return 0;
1509-clear_flow:
1510- mtk_foe_entry_clear(&eth->ppe, hash);
1511+
1512+clear:
1513+ mtk_foe_entry_clear(eth->ppe, entry);
1514 free:
1515 kfree(entry);
1516+ if (wed_index >= 0)
1517+ mtk_wed_flow_remove(wed_index);
1518 return err;
1519 }
1520
1521@@ -394,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
1522 if (!entry)
1523 return -ENOENT;
1524
1525- mtk_foe_entry_clear(&eth->ppe, entry->hash);
1526+ mtk_foe_entry_clear(eth->ppe, entry);
1527 rhashtable_remove_fast(&eth->flow_table, &entry->node,
1528 mtk_flow_ht_params);
1529+ if (entry->wed_index >= 0)
1530+ mtk_wed_flow_remove(entry->wed_index);
1531 kfree(entry);
1532
1533 return 0;
1534@@ -406,7 +477,6 @@ static int
1535 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1536 {
1537 struct mtk_flow_entry *entry;
1538- int timestamp;
1539 u32 idle;
1540
1541 entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1542@@ -414,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1543 if (!entry)
1544 return -ENOENT;
1545
1546- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
1547- if (timestamp < 0)
1548- return -ETIMEDOUT;
1549-
1550- idle = mtk_eth_timestamp(eth) - timestamp;
1551+ idle = mtk_foe_entry_idle_time(eth->ppe, entry);
1552 f->stats.lastused = jiffies - idle * HZ;
1553
1554 return 0;
1555@@ -470,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1556 struct flow_block_cb *block_cb;
1557 flow_setup_cb_t *cb;
1558
1559- if (!eth->ppe.foe_table)
1560+ if (!eth->ppe || !eth->ppe->foe_table)
1561 return -EOPNOTSUPP;
1562
1563 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1564@@ -511,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1565 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1566 void *type_data)
1567 {
1568- if (type == TC_SETUP_FT)
1569+ switch (type) {
1570+ case TC_SETUP_BLOCK:
1571+ case TC_SETUP_FT:
1572 return mtk_eth_setup_tc_block(dev, type_data);
1573-
1574- return -EOPNOTSUPP;
1575+ default:
1576+ return -EOPNOTSUPP;
1577+ }
1578 }
1579
1580 int mtk_eth_offload_init(struct mtk_eth *eth)
1581 {
1582- if (!eth->ppe.foe_table)
1583+ if (!eth->ppe || !eth->ppe->foe_table)
1584 return 0;
1585
1586 return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
1587diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
1588new file mode 100644
1589index 000000000..ea1cbdf1a
1590--- /dev/null
1591+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
1592@@ -0,0 +1,876 @@
1593+// SPDX-License-Identifier: GPL-2.0-only
1594+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1595+
1596+#include <linux/kernel.h>
1597+#include <linux/slab.h>
1598+#include <linux/module.h>
1599+#include <linux/bitfield.h>
1600+#include <linux/dma-mapping.h>
1601+#include <linux/skbuff.h>
1602+#include <linux/of_platform.h>
1603+#include <linux/of_address.h>
1604+#include <linux/mfd/syscon.h>
1605+#include <linux/debugfs.h>
1606+#include <linux/iopoll.h>
1607+#include <linux/soc/mediatek/mtk_wed.h>
1608+#include "mtk_eth_soc.h"
1609+#include "mtk_wed_regs.h"
1610+#include "mtk_wed.h"
1611+#include "mtk_ppe.h"
1612+
1613+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
1614+
1615+#define MTK_WED_PKT_SIZE 1900
1616+#define MTK_WED_BUF_SIZE 2048
1617+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1618+
1619+#define MTK_WED_TX_RING_SIZE 2048
1620+#define MTK_WED_WDMA_RING_SIZE 1024
1621+
1622+static struct mtk_wed_hw *hw_list[2];
1623+static DEFINE_MUTEX(hw_lock);
1624+
1625+static void
1626+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1627+{
1628+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
1629+}
1630+
1631+static void
1632+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1633+{
1634+ return wed_m32(dev, reg, 0, mask);
1635+}
1636+
1637+static void
1638+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
1639+{
1640+ return wed_m32(dev, reg, mask, 0);
1641+}
1642+
1643+static void
1644+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1645+{
1646+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
1647+}
1648+
1649+static void
1650+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1651+{
1652+ wdma_m32(dev, reg, 0, mask);
1653+}
1654+
1655+static u32
1656+mtk_wed_read_reset(struct mtk_wed_device *dev)
1657+{
1658+ return wed_r32(dev, MTK_WED_RESET);
1659+}
1660+
1661+static void
1662+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
1663+{
1664+ u32 status;
1665+
1666+ wed_w32(dev, MTK_WED_RESET, mask);
1667+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
1668+ !(status & mask), 0, 1000))
1669+ WARN_ON_ONCE(1);
1670+}
1671+
1672+static struct mtk_wed_hw *
1673+mtk_wed_assign(struct mtk_wed_device *dev)
1674+{
1675+ struct mtk_wed_hw *hw;
1676+
1677+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
1678+ if (!hw || hw->wed_dev)
1679+ return NULL;
1680+
1681+ hw->wed_dev = dev;
1682+ return hw;
1683+}
1684+
1685+static int
1686+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
1687+{
1688+ struct mtk_wdma_desc *desc;
1689+ dma_addr_t desc_phys;
1690+ void **page_list;
1691+ int token = dev->wlan.token_start;
1692+ int ring_size;
1693+ int n_pages;
1694+ int i, page_idx;
1695+
1696+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
1697+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
1698+
1699+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
1700+ if (!page_list)
1701+ return -ENOMEM;
1702+
1703+ dev->buf_ring.size = ring_size;
1704+ dev->buf_ring.pages = page_list;
1705+
1706+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
1707+ &desc_phys, GFP_KERNEL);
1708+ if (!desc)
1709+ return -ENOMEM;
1710+
1711+ dev->buf_ring.desc = desc;
1712+ dev->buf_ring.desc_phys = desc_phys;
1713+
1714+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
1715+ dma_addr_t page_phys, buf_phys;
1716+ struct page *page;
1717+ void *buf;
1718+ int s;
1719+
1720+ page = __dev_alloc_pages(GFP_KERNEL, 0);
1721+ if (!page)
1722+ return -ENOMEM;
1723+
1724+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
1725+ DMA_BIDIRECTIONAL);
1726+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
1727+ __free_page(page);
1728+ return -ENOMEM;
1729+ }
1730+
1731+ page_list[page_idx++] = page;
1732+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
1733+ DMA_BIDIRECTIONAL);
1734+
1735+ buf = page_to_virt(page);
1736+ buf_phys = page_phys;
1737+
1738+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
1739+ u32 txd_size;
1740+
1741+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
1742+
1743+ desc->buf0 = buf_phys;
1744+ desc->buf1 = buf_phys + txd_size;
1745+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
1746+ txd_size) |
1747+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
1748+ MTK_WED_BUF_SIZE - txd_size) |
1749+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
1750+ desc->info = 0;
1751+ desc++;
1752+
1753+ buf += MTK_WED_BUF_SIZE;
1754+ buf_phys += MTK_WED_BUF_SIZE;
1755+ }
1756+
1757+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
1758+ DMA_BIDIRECTIONAL);
1759+ }
1760+
1761+ return 0;
1762+}
1763+
1764+static void
1765+mtk_wed_free_buffer(struct mtk_wed_device *dev)
1766+{
1767+ struct mtk_wdma_desc *desc = dev->buf_ring.desc;
1768+ void **page_list = dev->buf_ring.pages;
1769+ int page_idx;
1770+ int i;
1771+
1772+ if (!page_list)
1773+ return;
1774+
1775+ if (!desc)
1776+ goto free_pagelist;
1777+
1778+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
1779+ void *page = page_list[page_idx++];
1780+
1781+ if (!page)
1782+ break;
1783+
1784+ dma_unmap_page(dev->hw->dev, desc[i].buf0,
1785+ PAGE_SIZE, DMA_BIDIRECTIONAL);
1786+ __free_page(page);
1787+ }
1788+
1789+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
1790+ desc, dev->buf_ring.desc_phys);
1791+
1792+free_pagelist:
1793+ kfree(page_list);
1794+}
1795+
1796+static void
1797+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
1798+{
1799+ if (!ring->desc)
1800+ return;
1801+
1802+ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
1803+ ring->desc, ring->desc_phys);
1804+}
1805+
1806+static void
1807+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
1808+{
1809+ int i;
1810+
1811+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
1812+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
1813+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1814+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
1815+}
1816+
1817+static void
1818+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
1819+{
1820+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
1821+
1822+ if (!dev->hw->num_flows)
1823+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
1824+
1825+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
1826+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
1827+}
1828+
1829+static void
1830+mtk_wed_stop(struct mtk_wed_device *dev)
1831+{
1832+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
1833+ mtk_wed_set_ext_int(dev, false);
1834+
1835+ wed_clr(dev, MTK_WED_CTRL,
1836+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
1837+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
1838+ MTK_WED_CTRL_WED_TX_BM_EN |
1839+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1840+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
1841+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
1842+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
1843+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
1844+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
1845+
1846+ wed_clr(dev, MTK_WED_GLO_CFG,
1847+ MTK_WED_GLO_CFG_TX_DMA_EN |
1848+ MTK_WED_GLO_CFG_RX_DMA_EN);
1849+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1850+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1851+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1852+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1853+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1854+}
1855+
1856+static void
1857+mtk_wed_detach(struct mtk_wed_device *dev)
1858+{
1859+ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
1860+ struct mtk_wed_hw *hw = dev->hw;
1861+
1862+ mutex_lock(&hw_lock);
1863+
1864+ mtk_wed_stop(dev);
1865+
1866+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
1867+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
1868+
1869+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1870+
1871+ mtk_wed_free_buffer(dev);
1872+ mtk_wed_free_tx_rings(dev);
1873+
1874+ if (of_dma_is_coherent(wlan_node))
1875+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1876+ BIT(hw->index), BIT(hw->index));
1877+
1878+ if (!hw_list[!hw->index]->wed_dev &&
1879+ hw->eth->dma_dev != hw->eth->dev)
1880+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
1881+
1882+ memset(dev, 0, sizeof(*dev));
1883+ module_put(THIS_MODULE);
1884+
1885+ hw->wed_dev = NULL;
1886+ mutex_unlock(&hw_lock);
1887+}
1888+
1889+static void
1890+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
1891+{
1892+ u32 mask, set;
1893+ u32 offset;
1894+
1895+ mtk_wed_stop(dev);
1896+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1897+
1898+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1899+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1900+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1901+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1902+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1903+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1904+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1905+
1906+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
1907+
1908+ offset = dev->hw->index ? 0x04000400 : 0;
1909+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1910+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
1911+
1912+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
1913+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1914+}
1915+
1916+static void
1917+mtk_wed_hw_init(struct mtk_wed_device *dev)
1918+{
1919+ if (dev->init_done)
1920+ return;
1921+
1922+ dev->init_done = true;
1923+ mtk_wed_set_ext_int(dev, false);
1924+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1925+ MTK_WED_TX_BM_CTRL_PAUSE |
1926+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
1927+ dev->buf_ring.size / 128) |
1928+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
1929+ MTK_WED_TX_RING_SIZE / 256));
1930+
1931+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1932+
1933+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1934+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1935+ dev->wlan.token_start) |
1936+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1937+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1938+
1939+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1940+
1941+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1942+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
1943+ MTK_WED_TX_BM_DYN_THR_HI);
1944+
1945+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1946+
1947+ wed_set(dev, MTK_WED_CTRL,
1948+ MTK_WED_CTRL_WED_TX_BM_EN |
1949+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1950+
1951+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1952+}
1953+
1954+static void
1955+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
1956+{
1957+ int i;
1958+
1959+ for (i = 0; i < size; i++) {
1960+ desc[i].buf0 = 0;
1961+ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
1962+ desc[i].buf1 = 0;
1963+ desc[i].info = 0;
1964+ }
1965+}
1966+
1967+static u32
1968+mtk_wed_check_busy(struct mtk_wed_device *dev)
1969+{
1970+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
1971+ return true;
1972+
1973+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
1974+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
1975+ return true;
1976+
1977+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
1978+ return true;
1979+
1980+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
1981+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1982+ return true;
1983+
1984+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
1985+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1986+ return true;
1987+
1988+ if (wed_r32(dev, MTK_WED_CTRL) &
1989+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
1990+ return true;
1991+
1992+ return false;
1993+}
1994+
1995+static int
1996+mtk_wed_poll_busy(struct mtk_wed_device *dev)
1997+{
1998+ int sleep = 15000;
1999+ int timeout = 100 * sleep;
2000+ u32 val;
2001+
2002+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
2003+ timeout, false, dev);
2004+}
2005+
2006+static void
2007+mtk_wed_reset_dma(struct mtk_wed_device *dev)
2008+{
2009+ bool busy = false;
2010+ u32 val;
2011+ int i;
2012+
2013+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
2014+ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
2015+
2016+ if (!desc)
2017+ continue;
2018+
2019+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
2020+ }
2021+
2022+ if (mtk_wed_poll_busy(dev))
2023+ busy = mtk_wed_check_busy(dev);
2024+
2025+ if (busy) {
2026+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
2027+ } else {
2028+ wed_w32(dev, MTK_WED_RESET_IDX,
2029+ MTK_WED_RESET_IDX_TX |
2030+ MTK_WED_RESET_IDX_RX);
2031+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
2032+ }
2033+
2034+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
2035+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
2036+
2037+ if (busy) {
2038+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
2039+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
2040+ } else {
2041+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
2042+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
2043+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
2044+
2045+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2046+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2047+
2048+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
2049+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2050+ }
2051+
2052+ for (i = 0; i < 100; i++) {
2053+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
2054+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
2055+ break;
2056+ }
2057+
2058+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
2059+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
2060+
2061+ if (busy) {
2062+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
2063+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
2064+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
2065+ } else {
2066+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
2067+ MTK_WED_WPDMA_RESET_IDX_TX |
2068+ MTK_WED_WPDMA_RESET_IDX_RX);
2069+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
2070+ }
2071+
2072+}
2073+
2074+static int
2075+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
2076+ int size)
2077+{
2078+ ring->desc = dma_alloc_coherent(dev->hw->dev,
2079+ size * sizeof(*ring->desc),
2080+ &ring->desc_phys, GFP_KERNEL);
2081+ if (!ring->desc)
2082+ return -ENOMEM;
2083+
2084+ ring->size = size;
2085+ mtk_wed_ring_reset(ring->desc, size);
2086+
2087+ return 0;
2088+}
2089+
2090+static int
2091+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
2092+{
2093+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
2094+
2095+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
2096+ return -ENOMEM;
2097+
2098+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2099+ wdma->desc_phys);
2100+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2101+ size);
2102+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2103+
2104+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2105+ wdma->desc_phys);
2106+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2107+ size);
2108+
2109+ return 0;
2110+}
2111+
2112+static void
2113+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
2114+{
2115+ u32 wdma_mask;
2116+ u32 val;
2117+ int i;
2118+
2119+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
2120+ if (!dev->tx_wdma[i].desc)
2121+ mtk_wed_wdma_ring_setup(dev, i, 16);
2122+
2123+ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
2124+
2125+ mtk_wed_hw_init(dev);
2126+
2127+ wed_set(dev, MTK_WED_CTRL,
2128+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
2129+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
2130+ MTK_WED_CTRL_WED_TX_BM_EN |
2131+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
2132+
2133+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
2134+
2135+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
2136+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
2137+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
2138+
2139+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
2140+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
2141+
2142+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
2143+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
2144+
2145+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
2146+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
2147+
2148+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
2149+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
2150+
2151+ wed_set(dev, MTK_WED_GLO_CFG,
2152+ MTK_WED_GLO_CFG_TX_DMA_EN |
2153+ MTK_WED_GLO_CFG_RX_DMA_EN);
2154+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
2155+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
2156+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
2157+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2158+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
2159+
2160+ mtk_wed_set_ext_int(dev, true);
2161+ val = dev->wlan.wpdma_phys |
2162+ MTK_PCIE_MIRROR_MAP_EN |
2163+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
2164+
2165+ if (dev->hw->index)
2166+ val |= BIT(1);
2167+ val |= BIT(0);
2168+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
2169+
2170+ dev->running = true;
2171+}
2172+
2173+static int
2174+mtk_wed_attach(struct mtk_wed_device *dev)
2175+ __releases(RCU)
2176+{
2177+ struct mtk_wed_hw *hw;
2178+ int ret = 0;
2179+
2180+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
2181+ "mtk_wed_attach without holding the RCU read lock");
2182+
2183+ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
2184+ !try_module_get(THIS_MODULE))
2185+ ret = -ENODEV;
2186+
2187+ rcu_read_unlock();
2188+
2189+ if (ret)
2190+ return ret;
2191+
2192+ mutex_lock(&hw_lock);
2193+
2194+ hw = mtk_wed_assign(dev);
2195+ if (!hw) {
2196+ module_put(THIS_MODULE);
2197+ ret = -ENODEV;
2198+ goto out;
2199+ }
2200+
2201+ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
2202+
2203+ dev->hw = hw;
2204+ dev->dev = hw->dev;
2205+ dev->irq = hw->irq;
2206+ dev->wdma_idx = hw->index;
2207+
2208+ if (hw->eth->dma_dev == hw->eth->dev &&
2209+ of_dma_is_coherent(hw->eth->dev->of_node))
2210+ mtk_eth_set_dma_device(hw->eth, hw->dev);
2211+
2212+ ret = mtk_wed_buffer_alloc(dev);
2213+ if (ret) {
2214+ mtk_wed_detach(dev);
2215+ goto out;
2216+ }
2217+
2218+ mtk_wed_hw_init_early(dev);
2219+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
2220+
2221+out:
2222+ mutex_unlock(&hw_lock);
2223+
2224+ return ret;
2225+}
2226+
2227+static int
2228+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
2229+{
2230+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
2231+
2232+ /*
2233+ * Tx ring redirection:
2234+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
2235+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
2236+ * registers.
2237+ *
2238+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
2239+ * into MTK_WED_WPDMA_RING_TX(n) registers.
2240+ * It gets filled with packets picked up from WED TX ring and from
2241+ * WDMA RX.
2242+ */
2243+
2244+ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
2245+
2246+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
2247+ return -ENOMEM;
2248+
2249+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
2250+ return -ENOMEM;
2251+
2252+ ring->reg_base = MTK_WED_RING_TX(idx);
2253+ ring->wpdma = regs;
2254+
2255+ /* WED -> WPDMA */
2256+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
2257+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
2258+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
2259+
2260+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
2261+ ring->desc_phys);
2262+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
2263+ MTK_WED_TX_RING_SIZE);
2264+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2265+
2266+ return 0;
2267+}
2268+
2269+static int
2270+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
2271+{
2272+ struct mtk_wed_ring *ring = &dev->txfree_ring;
2273+ int i;
2274+
2275+ /*
2276+ * For txfree event handling, the same DMA ring is shared between WED
2277+ * and WLAN. The WLAN driver accesses the ring index registers through
2278+ * WED
2279+ */
2280+ ring->reg_base = MTK_WED_RING_RX(1);
2281+ ring->wpdma = regs;
2282+
2283+ for (i = 0; i < 12; i += 4) {
2284+ u32 val = readl(regs + i);
2285+
2286+ wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
2287+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
2288+ }
2289+
2290+ return 0;
2291+}
2292+
2293+static u32
2294+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
2295+{
2296+ u32 val;
2297+
2298+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
2299+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
2300+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2301+ if (!dev->hw->num_flows)
2302+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2303+ if (val && net_ratelimit())
2304+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
2305+
2306+ val = wed_r32(dev, MTK_WED_INT_STATUS);
2307+ val &= mask;
2308+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
2309+
2310+ return val;
2311+}
2312+
2313+static void
2314+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
2315+{
2316+ if (!dev->running)
2317+ return;
2318+
2319+ mtk_wed_set_ext_int(dev, !!mask);
2320+ wed_w32(dev, MTK_WED_INT_MASK, mask);
2321+}
2322+
2323+int mtk_wed_flow_add(int index)
2324+{
2325+ struct mtk_wed_hw *hw = hw_list[index];
2326+ int ret;
2327+
2328+ if (!hw || !hw->wed_dev)
2329+ return -ENODEV;
2330+
2331+ if (hw->num_flows) {
2332+ hw->num_flows++;
2333+ return 0;
2334+ }
2335+
2336+ mutex_lock(&hw_lock);
2337+ if (!hw->wed_dev) {
2338+ ret = -ENODEV;
2339+ goto out;
2340+ }
2341+
2342+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
2343+ if (!ret)
2344+ hw->num_flows++;
2345+ mtk_wed_set_ext_int(hw->wed_dev, true);
2346+
2347+out:
2348+ mutex_unlock(&hw_lock);
2349+
2350+ return ret;
2351+}
2352+
2353+void mtk_wed_flow_remove(int index)
2354+{
2355+ struct mtk_wed_hw *hw = hw_list[index];
2356+
2357+ if (!hw)
2358+ return;
2359+
2360+ if (--hw->num_flows)
2361+ return;
2362+
2363+ mutex_lock(&hw_lock);
2364+ if (!hw->wed_dev)
2365+ goto out;
2366+
2367+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
2368+ mtk_wed_set_ext_int(hw->wed_dev, true);
2369+
2370+out:
2371+ mutex_unlock(&hw_lock);
2372+}
2373+
2374+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2375+ void __iomem *wdma, int index)
2376+{
2377+ static const struct mtk_wed_ops wed_ops = {
2378+ .attach = mtk_wed_attach,
2379+ .tx_ring_setup = mtk_wed_tx_ring_setup,
2380+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2381+ .start = mtk_wed_start,
2382+ .stop = mtk_wed_stop,
2383+ .reset_dma = mtk_wed_reset_dma,
2384+ .reg_read = wed_r32,
2385+ .reg_write = wed_w32,
2386+ .irq_get = mtk_wed_irq_get,
2387+ .irq_set_mask = mtk_wed_irq_set_mask,
2388+ .detach = mtk_wed_detach,
2389+ };
2390+ struct device_node *eth_np = eth->dev->of_node;
2391+ struct platform_device *pdev;
2392+ struct mtk_wed_hw *hw;
2393+ struct regmap *regs;
2394+ int irq;
2395+
2396+ if (!np)
2397+ return;
2398+
2399+ pdev = of_find_device_by_node(np);
2400+ if (!pdev)
2401+ return;
2402+
2403+ get_device(&pdev->dev);
2404+ irq = platform_get_irq(pdev, 0);
2405+ if (irq < 0)
2406+ return;
2407+
2408+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
2409+ if (!regs)
2410+ return;
2411+
2412+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
2413+
2414+ mutex_lock(&hw_lock);
2415+
2416+ if (WARN_ON(hw_list[index]))
2417+ goto unlock;
2418+
2419+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
2420+ hw->node = np;
2421+ hw->regs = regs;
2422+ hw->eth = eth;
2423+ hw->dev = &pdev->dev;
2424+ hw->wdma = wdma;
2425+ hw->index = index;
2426+ hw->irq = irq;
2427+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2428+ "mediatek,pcie-mirror");
2429+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
2430+ "mediatek,hifsys");
2431+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
2432+ kfree(hw);
2433+ goto unlock;
2434+ }
2435+
2436+ if (!index) {
2437+ regmap_write(hw->mirror, 0, 0);
2438+ regmap_write(hw->mirror, 4, 0);
2439+ }
2440+ mtk_wed_hw_add_debugfs(hw);
2441+
2442+ hw_list[index] = hw;
2443+
2444+unlock:
2445+ mutex_unlock(&hw_lock);
2446+}
2447+
2448+void mtk_wed_exit(void)
2449+{
2450+ int i;
2451+
2452+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
2453+
2454+ synchronize_rcu();
2455+
2456+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
2457+ struct mtk_wed_hw *hw;
2458+
2459+ hw = hw_list[i];
2460+ if (!hw)
2461+ continue;
2462+
2463+ hw_list[i] = NULL;
2464+ debugfs_remove(hw->debugfs_dir);
2465+ put_device(hw->dev);
2466+ kfree(hw);
2467+ }
2468+}
2469diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2470new file mode 100644
2471index 000000000..981ec613f
2472--- /dev/null
2473+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2474@@ -0,0 +1,135 @@
2475+// SPDX-License-Identifier: GPL-2.0-only
2476+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2477+
2478+#ifndef __MTK_WED_PRIV_H
2479+#define __MTK_WED_PRIV_H
2480+
2481+#include <linux/soc/mediatek/mtk_wed.h>
2482+#include <linux/debugfs.h>
2483+#include <linux/regmap.h>
2484+#include <linux/netdevice.h>
2485+
2486+struct mtk_eth;
2487+
2488+struct mtk_wed_hw {
2489+ struct device_node *node;
2490+ struct mtk_eth *eth;
2491+ struct regmap *regs;
2492+ struct regmap *hifsys;
2493+ struct device *dev;
2494+ void __iomem *wdma;
2495+ struct regmap *mirror;
2496+ struct dentry *debugfs_dir;
2497+ struct mtk_wed_device *wed_dev;
2498+ u32 debugfs_reg;
2499+ u32 num_flows;
2500+ char dirname[5];
2501+ int irq;
2502+ int index;
2503+};
2504+
2505+struct mtk_wdma_info {
2506+ u8 wdma_idx;
2507+ u8 queue;
2508+ u16 wcid;
2509+ u8 bss;
2510+};
2511+
2512+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2513+static inline void
2514+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2515+{
2516+ regmap_write(dev->hw->regs, reg, val);
2517+}
2518+
2519+static inline u32
2520+wed_r32(struct mtk_wed_device *dev, u32 reg)
2521+{
2522+ unsigned int val;
2523+
2524+ regmap_read(dev->hw->regs, reg, &val);
2525+
2526+ return val;
2527+}
2528+
2529+static inline void
2530+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2531+{
2532+ writel(val, dev->hw->wdma + reg);
2533+}
2534+
2535+static inline u32
2536+wdma_r32(struct mtk_wed_device *dev, u32 reg)
2537+{
2538+ return readl(dev->hw->wdma + reg);
2539+}
2540+
2541+static inline u32
2542+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
2543+{
2544+ if (!dev->tx_ring[ring].wpdma)
2545+ return 0;
2546+
2547+ return readl(dev->tx_ring[ring].wpdma + reg);
2548+}
2549+
2550+static inline void
2551+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
2552+{
2553+ if (!dev->tx_ring[ring].wpdma)
2554+ return;
2555+
2556+ writel(val, dev->tx_ring[ring].wpdma + reg);
2557+}
2558+
2559+static inline u32
2560+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
2561+{
2562+ if (!dev->txfree_ring.wpdma)
2563+ return 0;
2564+
2565+ return readl(dev->txfree_ring.wpdma + reg);
2566+}
2567+
2568+static inline void
2569+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2570+{
2571+ if (!dev->txfree_ring.wpdma)
2572+ return;
2573+
2574+ writel(val, dev->txfree_ring.wpdma + reg);
2575+}
2576+
2577+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2578+ void __iomem *wdma, int index);
2579+void mtk_wed_exit(void);
2580+int mtk_wed_flow_add(int index);
2581+void mtk_wed_flow_remove(int index);
2582+#else
2583+static inline void
2584+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2585+ void __iomem *wdma, int index)
2586+{
2587+}
2588+static inline void
2589+mtk_wed_exit(void)
2590+{
2591+}
2592+static inline int mtk_wed_flow_add(int index)
2593+{
2594+ return -EINVAL;
2595+}
2596+static inline void mtk_wed_flow_remove(int index)
2597+{
2598+}
2599+#endif
2600+
2601+#ifdef CONFIG_DEBUG_FS
2602+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
2603+#else
2604+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2605+{
2606+}
2607+#endif
2608+
2609+#endif
2610diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2611new file mode 100644
2612index 000000000..a81d3fd1a
2613--- /dev/null
2614+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2615@@ -0,0 +1,175 @@
2616+// SPDX-License-Identifier: GPL-2.0-only
2617+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2618+
2619+#include <linux/seq_file.h>
2620+#include "mtk_wed.h"
2621+#include "mtk_wed_regs.h"
2622+
2623+struct reg_dump {
2624+ const char *name;
2625+ u16 offset;
2626+ u8 type;
2627+ u8 base;
2628+};
2629+
2630+enum {
2631+ DUMP_TYPE_STRING,
2632+ DUMP_TYPE_WED,
2633+ DUMP_TYPE_WDMA,
2634+ DUMP_TYPE_WPDMA_TX,
2635+ DUMP_TYPE_WPDMA_TXFREE,
2636+};
2637+
2638+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2639+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2640+#define DUMP_RING(_prefix, _base, ...) \
2641+ { _prefix " BASE", _base, __VA_ARGS__ }, \
2642+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2643+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
2644+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2645+
2646+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2647+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2648+
2649+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2650+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
2651+
2652+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
2653+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
2654+
2655+static void
2656+print_reg_val(struct seq_file *s, const char *name, u32 val)
2657+{
2658+ seq_printf(s, "%-32s %08x\n", name, val);
2659+}
2660+
2661+static void
2662+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2663+ const struct reg_dump *regs, int n_regs)
2664+{
2665+ const struct reg_dump *cur;
2666+ u32 val;
2667+
2668+ for (cur = regs; cur < &regs[n_regs]; cur++) {
2669+ switch (cur->type) {
2670+ case DUMP_TYPE_STRING:
2671+ seq_printf(s, "%s======== %s:\n",
2672+ cur > regs ? "\n" : "",
2673+ cur->name);
2674+ continue;
2675+ case DUMP_TYPE_WED:
2676+ val = wed_r32(dev, cur->offset);
2677+ break;
2678+ case DUMP_TYPE_WDMA:
2679+ val = wdma_r32(dev, cur->offset);
2680+ break;
2681+ case DUMP_TYPE_WPDMA_TX:
2682+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
2683+ break;
2684+ case DUMP_TYPE_WPDMA_TXFREE:
2685+ val = wpdma_txfree_r32(dev, cur->offset);
2686+ break;
2687+ }
2688+ print_reg_val(s, cur->name, val);
2689+ }
2690+}
2691+
2692+
2693+static int
2694+wed_txinfo_show(struct seq_file *s, void *data)
2695+{
2696+ static const struct reg_dump regs[] = {
2697+ DUMP_STR("WED TX"),
2698+ DUMP_WED(WED_TX_MIB(0)),
2699+ DUMP_WED_RING(WED_RING_TX(0)),
2700+
2701+ DUMP_WED(WED_TX_MIB(1)),
2702+ DUMP_WED_RING(WED_RING_TX(1)),
2703+
2704+ DUMP_STR("WPDMA TX"),
2705+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
2706+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
2707+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
2708+
2709+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
2710+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
2711+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
2712+
2713+ DUMP_STR("WPDMA TX"),
2714+ DUMP_WPDMA_TX_RING(0),
2715+ DUMP_WPDMA_TX_RING(1),
2716+
2717+ DUMP_STR("WED WDMA RX"),
2718+ DUMP_WED(WED_WDMA_RX_MIB(0)),
2719+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
2720+ DUMP_WED(WED_WDMA_RX_THRES(0)),
2721+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
2722+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
2723+
2724+ DUMP_WED(WED_WDMA_RX_MIB(1)),
2725+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
2726+ DUMP_WED(WED_WDMA_RX_THRES(1)),
2727+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
2728+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
2729+
2730+ DUMP_STR("WDMA RX"),
2731+ DUMP_WDMA(WDMA_GLO_CFG),
2732+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
2733+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
2734+ };
2735+ struct mtk_wed_hw *hw = s->private;
2736+ struct mtk_wed_device *dev = hw->wed_dev;
2737+
2738+ if (!dev)
2739+ return 0;
2740+
2741+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2742+
2743+ return 0;
2744+}
2745+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
2746+
2747+
2748+static int
2749+mtk_wed_reg_set(void *data, u64 val)
2750+{
2751+ struct mtk_wed_hw *hw = data;
2752+
2753+ regmap_write(hw->regs, hw->debugfs_reg, val);
2754+
2755+ return 0;
2756+}
2757+
2758+static int
2759+mtk_wed_reg_get(void *data, u64 *val)
2760+{
2761+ struct mtk_wed_hw *hw = data;
2762+ unsigned int regval;
2763+ int ret;
2764+
2765+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
2766+ if (ret)
2767+ return ret;
2768+
2769+ *val = regval;
2770+
2771+ return 0;
2772+}
2773+
2774+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
2775+ "0x%08llx\n");
2776+
2777+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2778+{
2779+ struct dentry *dir;
2780+
2781+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
2782+ dir = debugfs_create_dir(hw->dirname, NULL);
2783+ if (!dir)
2784+ return;
2785+
2786+ hw->debugfs_dir = dir;
2787+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
2788+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
2789+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
2790+}
2791diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2792new file mode 100644
2793index 000000000..a5d9d8a5b
2794--- /dev/null
2795+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2796@@ -0,0 +1,8 @@
2797+// SPDX-License-Identifier: GPL-2.0-only
2798+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2799+
2800+#include <linux/kernel.h>
2801+#include <linux/soc/mediatek/mtk_wed.h>
2802+
2803+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
2804+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
2805diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2806new file mode 100644
2807index 000000000..0a0465ea5
2808--- /dev/null
2809+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2810@@ -0,0 +1,251 @@
2811+// SPDX-License-Identifier: GPL-2.0-only
2812+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2813+
2814+#ifndef __MTK_WED_REGS_H
2815+#define __MTK_WED_REGS_H
2816+
2817+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
2818+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
2819+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
2820+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2821+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2822+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2823+
2824+struct mtk_wdma_desc {
2825+ __le32 buf0;
2826+ __le32 ctrl;
2827+ __le32 buf1;
2828+ __le32 info;
2829+} __packed __aligned(4);
2830+
2831+#define MTK_WED_RESET 0x008
2832+#define MTK_WED_RESET_TX_BM BIT(0)
2833+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
2834+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
2835+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
2836+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
2837+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
2838+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2839+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2840+#define MTK_WED_RESET_WED BIT(31)
2841+
2842+#define MTK_WED_CTRL 0x00c
2843+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
2844+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
2845+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
2846+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
2847+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
2848+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2849+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2850+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2851+#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2852+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2853+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2854+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
2855+
2856+#define MTK_WED_EXT_INT_STATUS 0x020
2857+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
2858+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
2859+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
2860+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
2861+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
2862+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2863+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2864+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2865+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2866+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
2867+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
2868+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
2869+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
2870+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
2871+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
2872+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2873+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2874+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2875+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2876+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2877+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
2878+ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
2879+ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
2880+
2881+#define MTK_WED_EXT_INT_MASK 0x028
2882+
2883+#define MTK_WED_STATUS 0x060
2884+#define MTK_WED_STATUS_TX GENMASK(15, 8)
2885+
2886+#define MTK_WED_TX_BM_CTRL 0x080
2887+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
2888+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
2889+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
2890+
2891+#define MTK_WED_TX_BM_BASE 0x084
2892+
2893+#define MTK_WED_TX_BM_TKID 0x088
2894+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
2895+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
2896+
2897+#define MTK_WED_TX_BM_BUF_LEN 0x08c
2898+
2899+#define MTK_WED_TX_BM_INTF 0x09c
2900+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
2901+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
2902+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
2903+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
2904+
2905+#define MTK_WED_TX_BM_DYN_THR 0x0a0
2906+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
2907+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
2908+
2909+#define MTK_WED_INT_STATUS 0x200
2910+#define MTK_WED_INT_MASK 0x204
2911+
2912+#define MTK_WED_GLO_CFG 0x208
2913+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
2914+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
2915+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
2916+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
2917+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2918+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
2919+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
2920+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2921+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2922+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2923+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2924+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2925+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2926+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
2927+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2928+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
2929+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
2930+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
2931+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
2932+
2933+#define MTK_WED_RESET_IDX 0x20c
2934+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
2935+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
2936+
2937+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
2938+
2939+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
2940+
2941+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2942+
2943+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
2944+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2945+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
2946+
2947+#define MTK_WED_WPDMA_GLO_CFG 0x508
2948+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
2949+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
2950+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
2951+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2952+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2953+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2954+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
2955+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2956+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2957+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2958+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2959+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2960+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2961+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
2962+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2963+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
2964+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
2965+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
2966+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
2967+
2968+#define MTK_WED_WPDMA_RESET_IDX 0x50c
2969+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
2970+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
2971+
2972+#define MTK_WED_WPDMA_INT_CTRL 0x520
2973+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
2974+
2975+#define MTK_WED_WPDMA_INT_MASK 0x524
2976+
2977+#define MTK_WED_PCIE_CFG_BASE 0x560
2978+
2979+#define MTK_WED_PCIE_INT_TRIGGER 0x570
2980+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
2981+
2982+#define MTK_WED_WPDMA_CFG_BASE 0x580
2983+
2984+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2985+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2986+
2987+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2988+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
2989+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2990+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2991+
2992+#define MTK_WED_WDMA_GLO_CFG 0xa04
2993+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2994+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2995+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2996+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
2997+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2998+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
2999+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
3000+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
3001+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
3002+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
3003+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
3004+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
3005+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
3006+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
3007+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
3008+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
3009+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
3010+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
3011+
3012+#define MTK_WED_WDMA_RESET_IDX 0xa08
3013+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
3014+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
3015+
3016+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
3017+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
3018+
3019+#define MTK_WED_WDMA_INT_CTRL 0xa2c
3020+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
3021+
3022+#define MTK_WED_WDMA_OFFSET0 0xaa4
3023+#define MTK_WED_WDMA_OFFSET1 0xaa8
3024+
3025+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
3026+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
3027+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
3028+
3029+#define MTK_WED_RING_OFS_BASE 0x00
3030+#define MTK_WED_RING_OFS_COUNT 0x04
3031+#define MTK_WED_RING_OFS_CPU_IDX 0x08
3032+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
3033+
3034+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
3035+
3036+#define MTK_WDMA_GLO_CFG 0x204
3037+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
3038+
3039+#define MTK_WDMA_RESET_IDX 0x208
3040+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
3041+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
3042+
3043+#define MTK_WDMA_INT_MASK 0x228
3044+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
3045+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
3046+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
3047+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
3048+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
3049+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
3050+
3051+#define MTK_WDMA_INT_GRP1 0x250
3052+#define MTK_WDMA_INT_GRP2 0x254
3053+
3054+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
3055+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
3056+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
3057+
3058+/* DMA channel mapping */
3059+#define HIFSYS_DMA_AG_MAP 0x008
3060+
3061+#endif
3062diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3063index 9f64504ac..35998b1a7 100644
3064--- a/include/linux/netdevice.h
3065+++ b/include/linux/netdevice.h
3066@@ -835,6 +835,7 @@ enum net_device_path_type {
3067 DEV_PATH_BRIDGE,
3068 DEV_PATH_PPPOE,
3069 DEV_PATH_DSA,
3070+ DEV_PATH_MTK_WDMA,
3071 };
3072
3073 struct net_device_path {
3074@@ -860,6 +861,12 @@ struct net_device_path {
3075 int port;
3076 u16 proto;
3077 } dsa;
3078+ struct {
3079+ u8 wdma_idx;
3080+ u8 queue;
3081+ u16 wcid;
3082+ u8 bss;
3083+ } mtk_wdma;
3084 };
3085 };
3086
3087diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
3088new file mode 100644
3089index 000000000..7e00cca06
3090--- /dev/null
3091+++ b/include/linux/soc/mediatek/mtk_wed.h
3092@@ -0,0 +1,131 @@
3093+#ifndef __MTK_WED_H
3094+#define __MTK_WED_H
3095+
3096+#include <linux/kernel.h>
3097+#include <linux/rcupdate.h>
3098+#include <linux/regmap.h>
3099+#include <linux/pci.h>
3100+
3101+#define MTK_WED_TX_QUEUES 2
3102+
3103+struct mtk_wed_hw;
3104+struct mtk_wdma_desc;
3105+
3106+struct mtk_wed_ring {
3107+ struct mtk_wdma_desc *desc;
3108+ dma_addr_t desc_phys;
3109+ int size;
3110+
3111+ u32 reg_base;
3112+ void __iomem *wpdma;
3113+};
3114+
3115+struct mtk_wed_device {
3116+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3117+ const struct mtk_wed_ops *ops;
3118+ struct device *dev;
3119+ struct mtk_wed_hw *hw;
3120+ bool init_done, running;
3121+ int wdma_idx;
3122+ int irq;
3123+
3124+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3125+ struct mtk_wed_ring txfree_ring;
3126+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3127+
3128+ struct {
3129+ int size;
3130+ void **pages;
3131+ struct mtk_wdma_desc *desc;
3132+ dma_addr_t desc_phys;
3133+ } buf_ring;
3134+
3135+ /* filled by driver: */
3136+ struct {
3137+ struct pci_dev *pci_dev;
3138+
3139+ u32 wpdma_phys;
3140+
3141+ u16 token_start;
3142+ unsigned int nbuf;
3143+
3144+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3145+ int (*offload_enable)(struct mtk_wed_device *wed);
3146+ void (*offload_disable)(struct mtk_wed_device *wed);
3147+ } wlan;
3148+#endif
3149+};
3150+
3151+struct mtk_wed_ops {
3152+ int (*attach)(struct mtk_wed_device *dev);
3153+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
3154+ void __iomem *regs);
3155+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3156+ void __iomem *regs);
3157+ void (*detach)(struct mtk_wed_device *dev);
3158+
3159+ void (*stop)(struct mtk_wed_device *dev);
3160+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
3161+ void (*reset_dma)(struct mtk_wed_device *dev);
3162+
3163+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
3164+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
3165+
3166+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3167+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3168+};
3169+
3170+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3171+
3172+static inline int
3173+mtk_wed_device_attach(struct mtk_wed_device *dev)
3174+{
3175+ int ret = -ENODEV;
3176+
3177+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3178+ rcu_read_lock();
3179+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
3180+ if (dev->ops)
3181+ ret = dev->ops->attach(dev);
3182+ else
3183+ rcu_read_unlock();
3184+
3185+ if (ret)
3186+ dev->ops = NULL;
3187+#endif
3188+
3189+ return ret;
3190+}
3191+
3192+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3193+#define mtk_wed_device_active(_dev) !!(_dev)->ops
3194+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
3195+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
3196+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
3197+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3198+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3199+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
3200+#define mtk_wed_device_reg_read(_dev, _reg) \
3201+ (_dev)->ops->reg_read(_dev, _reg)
3202+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
3203+ (_dev)->ops->reg_write(_dev, _reg, _val)
3204+#define mtk_wed_device_irq_get(_dev, _mask) \
3205+ (_dev)->ops->irq_get(_dev, _mask)
3206+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
3207+ (_dev)->ops->irq_set_mask(_dev, _mask)
3208+#else
3209+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3210+{
3211+ return false;
3212+}
3213+#define mtk_wed_device_detach(_dev) do {} while (0)
3214+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
3215+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3216+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3217+#define mtk_wed_device_reg_read(_dev, _reg) 0
3218+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3219+#define mtk_wed_device_irq_get(_dev, _mask) 0
3220+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3221+#endif
3222+
3223+#endif
3224diff --git a/net/core/dev.c b/net/core/dev.c
3225index 4f0edb218..031ac7c6f 100644
3226--- a/net/core/dev.c
3227+++ b/net/core/dev.c
3228@@ -675,6 +675,10 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3229 if (WARN_ON_ONCE(last_dev == ctx.dev))
3230 return -1;
3231 }
3232+
3233+ if (!ctx.dev)
3234+ return ret;
3235+
3236 path = dev_fwd_path(stack);
3237 if (!path)
3238 return -1;
3239--
32402.18.0
3241