blob: 42a90779122251ec1f6792b8b6d0e986255ff6cf [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From 342fdc50b761309e75974554cdcf790a2d09e134 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 2 Jun 2022 15:32:07 +0800
4Subject: [PATCH 4/8] 9993-add-wed
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7622.dtsi | 32 +-
9 drivers/net/ethernet/mediatek/Kconfig | 4 +
10 drivers/net/ethernet/mediatek/Makefile | 5 +
11 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 136 ++-
12 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 14 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.c | 373 +++++++-
14 drivers/net/ethernet/mediatek/mtk_ppe.h | 89 +-
15 .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 4 +-
16 .../net/ethernet/mediatek/mtk_ppe_offload.c | 167 +++-
17 drivers/net/ethernet/mediatek/mtk_wed.c | 876 ++++++++++++++++++
18 drivers/net/ethernet/mediatek/mtk_wed.h | 135 +++
19 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 175 ++++
20 drivers/net/ethernet/mediatek/mtk_wed_ops.c | 8 +
21 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 251 +++++
22 include/linux/netdevice.h | 7 +
23 include/linux/soc/mediatek/mtk_wed.h | 131 +++
24 net/core/dev.c | 4 +
25 17 files changed, 2283 insertions(+), 128 deletions(-)
26 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Kconfig
27 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Makefile
28 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c
29 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h
30 mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_ppe.c
31 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c
32 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h
33 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
34 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c
35 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h
36 create mode 100644 include/linux/soc/mediatek/mtk_wed.h
37
38diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
39index 369e01389..d0fbc367e 100644
40--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
41+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
42@@ -338,7 +338,7 @@
43 };
44
45 cci_control2: slave-if@5000 {
46- compatible = "arm,cci-400-ctrl-if";
47+ compatible = "arm,cci-400-ctrl-if", "syscon";
48 interface-type = "ace";
49 reg = <0x5000 0x1000>;
50 };
51@@ -920,6 +920,11 @@
52 };
53 };
54
55+ hifsys: syscon@1af00000 {
56+ compatible = "mediatek,mt7622-hifsys", "syscon";
57+ reg = <0 0x1af00000 0 0x70>;
58+ };
59+
60 ethsys: syscon@1b000000 {
61 compatible = "mediatek,mt7622-ethsys",
62 "syscon";
63@@ -938,6 +943,26 @@
64 #dma-cells = <1>;
65 };
66
67+ pcie_mirror: pcie-mirror@10000400 {
68+ compatible = "mediatek,mt7622-pcie-mirror",
69+ "syscon";
70+ reg = <0 0x10000400 0 0x10>;
71+ };
72+
73+ wed0: wed@1020a000 {
74+ compatible = "mediatek,mt7622-wed",
75+ "syscon";
76+ reg = <0 0x1020a000 0 0x1000>;
77+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
78+ };
79+
80+ wed1: wed@1020b000 {
81+ compatible = "mediatek,mt7622-wed",
82+ "syscon";
83+ reg = <0 0x1020b000 0 0x1000>;
84+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
85+ };
86+
87 eth: ethernet@1b100000 {
88 compatible = "mediatek,mt7622-eth",
89 "mediatek,mt2701-eth",
90@@ -964,6 +989,11 @@
91 power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
92 mediatek,ethsys = <&ethsys>;
93 mediatek,sgmiisys = <&sgmiisys>;
94+ mediatek,cci-control = <&cci_control2>;
95+ mediatek,wed = <&wed0>, <&wed1>;
96+ mediatek,pcie-mirror = <&pcie_mirror>;
97+ mediatek,hifsys = <&hifsys>;
98+ dma-coherent;
99 #address-cells = <1>;
100 #size-cells = <0>;
101 status = "disabled";
102diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
103old mode 100755
104new mode 100644
105index 42e6b38d2..8ab6615a3
106--- a/drivers/net/ethernet/mediatek/Kconfig
107+++ b/drivers/net/ethernet/mediatek/Kconfig
108@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK
109
110 if NET_VENDOR_MEDIATEK
111
112+config NET_MEDIATEK_SOC_WED
113+ depends on ARCH_MEDIATEK || COMPILE_TEST
114+ def_bool NET_MEDIATEK_SOC != n
115+
116 config NET_MEDIATEK_SOC
117 tristate "MediaTek SoC Gigabit Ethernet support"
118 select PHYLINK
119diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
120old mode 100755
121new mode 100644
122index 0a6af99f1..3528f1b3c
123--- a/drivers/net/ethernet/mediatek/Makefile
124+++ b/drivers/net/ethernet/mediatek/Makefile
125@@ -6,4 +6,9 @@
126 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
127 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
128 mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
129+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
130+ifdef CONFIG_DEBUG_FS
131+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
132+endif
133+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
134 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
135diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
136old mode 100755
137new mode 100644
138index 819d8a0be..2121335a1
139--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
140+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
141@@ -9,6 +9,7 @@
142 #include <linux/of_device.h>
143 #include <linux/of_mdio.h>
144 #include <linux/of_net.h>
145+#include <linux/of_address.h>
146 #include <linux/mfd/syscon.h>
147 #include <linux/regmap.h>
148 #include <linux/clk.h>
developer926f9162022-07-05 10:55:37 +0800149@@ -19,13 +20,15 @@
developer8cb3ac72022-07-04 10:55:14 +0800150 #include <linux/interrupt.h>
151 #include <linux/pinctrl/devinfo.h>
152 #include <linux/phylink.h>
developer926f9162022-07-05 10:55:37 +0800153 #include <linux/gpio/consumer.h>
developer8cb3ac72022-07-04 10:55:14 +0800154+#include <linux/bitfield.h>
155 #include <net/dsa.h>
156
157 #include "mtk_eth_soc.h"
158 #include "mtk_eth_dbg.h"
159 #include "mtk_eth_reset.h"
160 #include "mtk_hnat/hnat.h"
161+#include "mtk_wed.h"
162
163 #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
164 #include "mtk_hnat/nf_hnat_mtk.h"
165@@ -850,7 +853,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
166 int i;
167
168 if (!eth->soc->has_sram) {
169- eth->scratch_ring = dma_alloc_coherent(eth->dev,
170+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
171 cnt * sizeof(struct mtk_tx_dma),
172 &eth->phy_scratch_ring,
173 GFP_ATOMIC);
174@@ -866,10 +869,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
175 if (unlikely(!eth->scratch_head))
176 return -ENOMEM;
177
178- dma_addr = dma_map_single(eth->dev,
179+ dma_addr = dma_map_single(eth->dma_dev,
180 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
181 DMA_FROM_DEVICE);
182- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
183+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
184 return -ENOMEM;
185
186 phy_ring_tail = eth->phy_scratch_ring +
187@@ -933,26 +936,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
188 {
189 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
190 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
191- dma_unmap_single(eth->dev,
192+ dma_unmap_single(eth->dma_dev,
193 dma_unmap_addr(tx_buf, dma_addr0),
194 dma_unmap_len(tx_buf, dma_len0),
195 DMA_TO_DEVICE);
196 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
197- dma_unmap_page(eth->dev,
198+ dma_unmap_page(eth->dma_dev,
199 dma_unmap_addr(tx_buf, dma_addr0),
200 dma_unmap_len(tx_buf, dma_len0),
201 DMA_TO_DEVICE);
202 }
203 } else {
204 if (dma_unmap_len(tx_buf, dma_len0)) {
205- dma_unmap_page(eth->dev,
206+ dma_unmap_page(eth->dma_dev,
207 dma_unmap_addr(tx_buf, dma_addr0),
208 dma_unmap_len(tx_buf, dma_len0),
209 DMA_TO_DEVICE);
210 }
211
212 if (dma_unmap_len(tx_buf, dma_len1)) {
213- dma_unmap_page(eth->dev,
214+ dma_unmap_page(eth->dma_dev,
215 dma_unmap_addr(tx_buf, dma_addr1),
216 dma_unmap_len(tx_buf, dma_len1),
217 DMA_TO_DEVICE);
218@@ -1017,9 +1020,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
219 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
220 memset(itx_buf, 0, sizeof(*itx_buf));
221
222- mapped_addr = dma_map_single(eth->dev, skb->data,
223+ mapped_addr = dma_map_single(eth->dma_dev, skb->data,
224 skb_headlen(skb), DMA_TO_DEVICE);
225- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
226+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
227 return -ENOMEM;
228
229 WRITE_ONCE(itxd->txd1, mapped_addr);
230@@ -1114,10 +1117,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
231
232
233 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
234- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
235+ mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
236 frag_map_size,
237 DMA_TO_DEVICE);
238- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
239+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
240 goto err_dma;
241
242 if (i == nr_frags - 1 &&
243@@ -1384,6 +1387,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
244 struct net_device *netdev;
245 unsigned int pktlen;
246 dma_addr_t dma_addr;
247+ u32 hash, reason;
248 int mac;
249
250 if (eth->hwlro)
251@@ -1427,18 +1431,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
252 netdev->stats.rx_dropped++;
253 goto release_desc;
254 }
255- dma_addr = dma_map_single(eth->dev,
256+ dma_addr = dma_map_single(eth->dma_dev,
257 new_data + NET_SKB_PAD +
258 eth->ip_align,
259 ring->buf_size,
260 DMA_FROM_DEVICE);
261- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
262+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
263 skb_free_frag(new_data);
264 netdev->stats.rx_dropped++;
265 goto release_desc;
266 }
267
268- dma_unmap_single(eth->dev, trxd.rxd1,
269+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
270 ring->buf_size, DMA_FROM_DEVICE);
271
272 /* receive data */
273@@ -1463,6 +1467,17 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
274 skb_checksum_none_assert(skb);
275 skb->protocol = eth_type_trans(skb, netdev);
276
277+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
278+ if (hash != MTK_RXD4_FOE_ENTRY) {
279+ hash = jhash_1word(hash, 0);
280+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
281+ }
282+
283+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
284+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
285+ mtk_ppe_check_skb(eth->ppe, skb,
286+ trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
287+
288 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
289 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
290 if (trxd.rxd3 & RX_DMA_VTAG_V2)
291@@ -1748,7 +1763,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
292 goto no_tx_mem;
293
294 if (!eth->soc->has_sram)
295- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
296+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
297 &ring->phys, GFP_ATOMIC);
298 else {
299 ring->dma = eth->scratch_ring + MTK_DMA_SIZE;
300@@ -1780,7 +1795,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
301 * descriptors in ring->dma_pdma.
302 */
303 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
304- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
305+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
306 &ring->phys_pdma,
307 GFP_ATOMIC);
308 if (!ring->dma_pdma)
309@@ -1839,7 +1854,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
310 }
311
312 if (!eth->soc->has_sram && ring->dma) {
313- dma_free_coherent(eth->dev,
314+ dma_free_coherent(eth->dma_dev,
315 MTK_DMA_SIZE * sizeof(*ring->dma),
316 ring->dma,
317 ring->phys);
318@@ -1847,7 +1862,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
319 }
320
321 if (ring->dma_pdma) {
322- dma_free_coherent(eth->dev,
323+ dma_free_coherent(eth->dma_dev,
324 MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
325 ring->dma_pdma,
326 ring->phys_pdma);
327@@ -1892,7 +1907,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
328
329 if ((!eth->soc->has_sram) || (eth->soc->has_sram
330 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
331- ring->dma = dma_alloc_coherent(eth->dev,
332+ ring->dma = dma_alloc_coherent(eth->dma_dev,
333 rx_dma_size * sizeof(*ring->dma),
334 &ring->phys, GFP_ATOMIC);
335 else {
336@@ -1907,11 +1922,11 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
337 return -ENOMEM;
338
339 for (i = 0; i < rx_dma_size; i++) {
340- dma_addr_t dma_addr = dma_map_single(eth->dev,
341+ dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
342 ring->data[i] + NET_SKB_PAD + eth->ip_align,
343 ring->buf_size,
344 DMA_FROM_DEVICE);
345- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
346+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
347 return -ENOMEM;
348 ring->dma[i].rxd1 = (unsigned int)dma_addr;
349
350@@ -1968,7 +1983,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_s
351 continue;
352 if (!ring->dma[i].rxd1)
353 continue;
354- dma_unmap_single(eth->dev,
355+ dma_unmap_single(eth->dma_dev,
356 ring->dma[i].rxd1,
357 ring->buf_size,
358 DMA_FROM_DEVICE);
359@@ -1982,7 +1997,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_s
360 return;
361
362 if (ring->dma) {
363- dma_free_coherent(eth->dev,
364+ dma_free_coherent(eth->dma_dev,
365 ring->dma_size * sizeof(*ring->dma),
366 ring->dma,
367 ring->phys);
368@@ -2462,7 +2477,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
369 if (eth->netdev[i])
370 netdev_reset_queue(eth->netdev[i]);
371 if ( !eth->soc->has_sram && eth->scratch_ring) {
372- dma_free_coherent(eth->dev,
373+ dma_free_coherent(eth->dma_dev,
374 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
375 eth->scratch_ring,
376 eth->phy_scratch_ring);
377@@ -2661,7 +2676,7 @@ static int mtk_open(struct net_device *dev)
378 if (err)
379 return err;
380
381- if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
382+ if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
383 gdm_config = MTK_GDMA_TO_PPE;
384
385 mtk_gdm_config(eth, gdm_config);
386@@ -2778,7 +2793,7 @@ static int mtk_stop(struct net_device *dev)
387 mtk_dma_free(eth);
388
389 if (eth->soc->offload_version)
390- mtk_ppe_stop(&eth->ppe);
391+ mtk_ppe_stop(eth->ppe);
392
393 return 0;
394 }
395@@ -2855,6 +2870,8 @@ static int mtk_napi_init(struct mtk_eth *eth)
396
397 static int mtk_hw_init(struct mtk_eth *eth, u32 type)
398 {
399+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
400+ ETHSYS_DMA_AG_MAP_PPE;
401 int i, ret = 0;
402
403 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
404@@ -2872,6 +2889,10 @@ static int mtk_hw_init(struct mtk_eth *eth, u32 type)
405 goto err_disable_pm;
406 }
407
408+ if (eth->ethsys)
409+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
410+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
411+
412 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
413 ret = device_reset(eth->dev);
414 if (ret) {
415@@ -3501,6 +3522,35 @@ free_netdev:
416 return err;
417 }
418
419+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
420+{
421+ struct net_device *dev, *tmp;
422+ LIST_HEAD(dev_list);
423+ int i;
424+
425+ rtnl_lock();
426+
427+ for (i = 0; i < MTK_MAC_COUNT; i++) {
428+ dev = eth->netdev[i];
429+
430+ if (!dev || !(dev->flags & IFF_UP))
431+ continue;
432+
433+ list_add_tail(&dev->close_list, &dev_list);
434+ }
435+
436+ dev_close_many(&dev_list, false);
437+
438+ eth->dma_dev = dma_dev;
439+
440+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
441+ list_del_init(&dev->close_list);
442+ dev_open(dev, NULL);
443+ }
444+
445+ rtnl_unlock();
446+}
447+
448 static int mtk_probe(struct platform_device *pdev)
449 {
450 struct device_node *mac_np;
451@@ -3514,6 +3564,7 @@ static int mtk_probe(struct platform_device *pdev)
452 eth->soc = of_device_get_match_data(&pdev->dev);
453
454 eth->dev = &pdev->dev;
455+ eth->dma_dev = &pdev->dev;
456 eth->base = devm_platform_ioremap_resource(pdev, 0);
457 if (IS_ERR(eth->base))
458 return PTR_ERR(eth->base);
459@@ -3567,6 +3618,16 @@ static int mtk_probe(struct platform_device *pdev)
460 }
461 }
462
463+ if (of_dma_is_coherent(pdev->dev.of_node)) {
464+ struct regmap *cci;
465+
466+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
467+ "mediatek,cci-control");
468+ /* enable CPU/bus coherency */
469+ if (!IS_ERR(cci))
470+ regmap_write(cci, 0, 3);
471+ }
472+
473 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
474 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
475 GFP_KERNEL);
476@@ -3589,6 +3650,22 @@ static int mtk_probe(struct platform_device *pdev)
477 }
478 }
479
480+ for (i = 0;; i++) {
481+ struct device_node *np = of_parse_phandle(pdev->dev.of_node,
482+ "mediatek,wed", i);
483+ static const u32 wdma_regs[] = {
484+ MTK_WDMA0_BASE,
485+ MTK_WDMA1_BASE
486+ };
487+ void __iomem *wdma;
488+
489+ if (!np || i >= ARRAY_SIZE(wdma_regs))
490+ break;
491+
492+ wdma = eth->base + wdma_regs[i];
493+ mtk_wed_add_hw(np, eth, wdma, i);
494+ }
495+
496 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
497 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
498 eth->irq[i] = eth->irq[0];
499@@ -3692,10 +3769,11 @@ static int mtk_probe(struct platform_device *pdev)
500 }
501
502 if (eth->soc->offload_version) {
503- err = mtk_ppe_init(&eth->ppe, eth->dev,
504- eth->base + MTK_ETH_PPE_BASE, 2);
505- if (err)
506+ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
507+ if (!eth->ppe) {
508+ err = -ENOMEM;
509 goto err_free_dev;
510+ }
511
512 err = mtk_eth_offload_init(eth);
513 if (err)
514diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
515old mode 100755
516new mode 100644
517index 349f98503..b52378bd6
518--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
519+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
520@@ -517,6 +517,9 @@
521 #define RX_DMA_SPORT_MASK 0x7
522 #endif
523
524+#define MTK_WDMA0_BASE 0x2800
525+#define MTK_WDMA1_BASE 0x2c00
526+
527 /* QDMA descriptor txd4 */
528 #define TX_DMA_CHKSUM (0x7 << 29)
529 #define TX_DMA_TSO BIT(28)
530@@ -704,6 +707,12 @@
531 #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
532
533
534+/* ethernet dma channel agent map */
535+#define ETHSYS_DMA_AG_MAP 0x408
536+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
537+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
538+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
539+
540 /* SGMII subsystem config registers */
541 /* Register to auto-negotiation restart */
542 #define SGMSYS_PCS_CONTROL_1 0x0
543@@ -1209,6 +1218,7 @@ struct mtk_reset_event {
544 /* struct mtk_eth - This is the main datasructure for holding the state
545 * of the driver
546 * @dev: The device pointer
547+ * @dev: The device pointer used for dma mapping/alloc
548 * @base: The mapped register i/o base
549 * @page_lock: Make sure that register operations are atomic
550 * @tx_irq__lock: Make sure that IRQ register operations are atomic
551@@ -1243,6 +1253,7 @@ struct mtk_reset_event {
552
553 struct mtk_eth {
554 struct device *dev;
555+ struct device *dma_dev;
556 void __iomem *base;
557 spinlock_t page_lock;
558 spinlock_t tx_irq_lock;
559@@ -1283,7 +1294,7 @@ struct mtk_eth {
560 spinlock_t syscfg0_lock;
561 struct timer_list mtk_dma_monitor_timer;
562
563- struct mtk_ppe ppe;
564+ struct mtk_ppe *ppe;
565 struct rhashtable flow_table;
566 };
567
568@@ -1336,5 +1347,6 @@ void ethsys_reset(struct mtk_eth *eth, u32 reset_bits);
569 int mtk_eth_offload_init(struct mtk_eth *eth);
570 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
571 void *type_data);
572+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
573
574 #endif /* MTK_ETH_H */
575diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
576old mode 100644
577new mode 100755
578index 66298e223..3d75c22be
579--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
580+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
581@@ -6,9 +6,22 @@
582 #include <linux/iopoll.h>
583 #include <linux/etherdevice.h>
584 #include <linux/platform_device.h>
585+#include <linux/if_ether.h>
586+#include <linux/if_vlan.h>
587+#include <net/dsa.h>
588+#include "mtk_eth_soc.h"
589 #include "mtk_ppe.h"
590 #include "mtk_ppe_regs.h"
591
592+static DEFINE_SPINLOCK(ppe_lock);
593+
594+static const struct rhashtable_params mtk_flow_l2_ht_params = {
595+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
596+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
597+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
598+ .automatic_shrinking = true,
599+};
600+
601 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
602 {
603 writel(val, ppe->base + reg);
604@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
605 return ppe_m32(ppe, reg, val, 0);
606 }
607
608+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
609+{
610+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
611+}
612+
613 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
614 {
615 int ret;
616@@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
617 u32 hash;
618
619 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
620- case MTK_PPE_PKT_TYPE_BRIDGE:
621- hv1 = e->bridge.src_mac_lo;
622- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
623- hv2 = e->bridge.src_mac_hi >> 16;
624- hv2 ^= e->bridge.dest_mac_lo;
625- hv3 = e->bridge.dest_mac_hi;
626- break;
627 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
628 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
629 hv1 = e->ipv4.orig.ports;
630@@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
631 {
632 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
633
634+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
635+ return &entry->bridge.l2;
636+
637 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
638 return &entry->ipv6.l2;
639
640@@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
641 {
642 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
643
644+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
645+ return &entry->bridge.ib2;
646+
647 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
648 return &entry->ipv6.ib2;
649
650@@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
651 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
652 entry->ipv6.ports = ports_pad;
653
654- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
655+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
656+ ether_addr_copy(entry->bridge.src_mac, src_mac);
657+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
658+ entry->bridge.ib2 = val;
659+ l2 = &entry->bridge.l2;
660+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
661 entry->ipv6.ib2 = val;
662 l2 = &entry->ipv6.l2;
663 } else {
664@@ -329,32 +351,167 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
665 return 0;
666 }
667
668+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
669+ int bss, int wcid)
670+{
671+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
672+ u32 *ib2 = mtk_foe_entry_ib2(entry);
673+
674+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
675+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
676+ if (wdma_idx)
677+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
678+
679+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
680+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
681+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
682+
683+ return 0;
684+}
685+
686 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
687 {
688 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
689 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
690 }
691
692-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
693- u16 timestamp)
694+static bool
695+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
696+{
697+ int type, len;
698+
699+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
700+ return false;
701+
702+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
703+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
704+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
705+ else
706+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
707+
708+ return !memcmp(&entry->data.data, &data->data, len - 4);
709+}
710+
711+static void
712+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
713+{
714+ struct hlist_head *head;
715+ struct hlist_node *tmp;
716+
717+ if (entry->type == MTK_FLOW_TYPE_L2) {
718+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
719+ mtk_flow_l2_ht_params);
720+
721+ head = &entry->l2_flows;
722+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
723+ __mtk_foe_entry_clear(ppe, entry);
724+ return;
725+ }
726+
727+ hlist_del_init(&entry->list);
728+ if (entry->hash != 0xffff) {
729+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
730+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
731+ MTK_FOE_STATE_INVALID);
732+ dma_wmb();
733+ }
734+ entry->hash = 0xffff;
735+
736+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
737+ return;
738+
739+ hlist_del_init(&entry->l2_data.list);
740+ kfree(entry);
741+}
742+
743+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
744+{
745+ u16 timestamp;
746+ u16 now;
747+
748+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
749+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
750+
751+ if (timestamp > now)
752+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
753+ else
754+ return now - timestamp;
755+}
756+
757+static void
758+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
759 {
760+ struct mtk_flow_entry *cur;
761 struct mtk_foe_entry *hwe;
762- u32 hash;
763+ struct hlist_node *tmp;
764+ int idle;
765+
766+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
767+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
768+ int cur_idle;
769+ u32 ib1;
770+
771+ hwe = &ppe->foe_table[cur->hash];
772+ ib1 = READ_ONCE(hwe->ib1);
773+
774+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
775+ cur->hash = 0xffff;
776+ __mtk_foe_entry_clear(ppe, cur);
777+ continue;
778+ }
779+
780+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
781+ if (cur_idle >= idle)
782+ continue;
783+
784+ idle = cur_idle;
785+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
786+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
787+ }
788+}
789+
790+static void
791+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
792+{
793+ struct mtk_foe_entry *hwe;
794+ struct mtk_foe_entry foe;
795+
796+ spin_lock_bh(&ppe_lock);
797+
798+ if (entry->type == MTK_FLOW_TYPE_L2) {
799+ mtk_flow_entry_update_l2(ppe, entry);
800+ goto out;
801+ }
802+
803+ if (entry->hash == 0xffff)
804+ goto out;
805+
806+ hwe = &ppe->foe_table[entry->hash];
807+ memcpy(&foe, hwe, sizeof(foe));
808+ if (!mtk_flow_entry_match(entry, &foe)) {
809+ entry->hash = 0xffff;
810+ goto out;
811+ }
812+
813+ entry->data.ib1 = foe.ib1;
814+
815+out:
816+ spin_unlock_bh(&ppe_lock);
817+}
818+
819+static void
820+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
821+ u16 hash)
822+{
823+ struct mtk_foe_entry *hwe;
824+ u16 timestamp;
825
826+ timestamp = mtk_eth_timestamp(ppe->eth);
827 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
828 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
829 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
830
831- hash = mtk_ppe_hash_entry(entry);
832 hwe = &ppe->foe_table[hash];
833- if (!mtk_foe_entry_usable(hwe)) {
834- hwe++;
835- hash++;
836-
837- if (!mtk_foe_entry_usable(hwe))
838- return -ENOSPC;
839- }
840-
841 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
842 wmb();
843 hwe->ib1 = entry->ib1;
844@@ -362,32 +519,197 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
845 dma_wmb();
846
847 mtk_ppe_cache_clear(ppe);
848+}
849
850- return hash;
851+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
852+{
853+ spin_lock_bh(&ppe_lock);
854+ __mtk_foe_entry_clear(ppe, entry);
855+ spin_unlock_bh(&ppe_lock);
856+}
857+
858+static int
859+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
860+{
861+ entry->type = MTK_FLOW_TYPE_L2;
862+
863+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
864+ mtk_flow_l2_ht_params);
865+}
866+
867+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
868+{
869+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
870+ u32 hash;
871+
872+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
873+ return mtk_foe_entry_commit_l2(ppe, entry);
874+
875+ hash = mtk_ppe_hash_entry(&entry->data);
876+ entry->hash = 0xffff;
877+ spin_lock_bh(&ppe_lock);
878+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 4]);
879+ spin_unlock_bh(&ppe_lock);
880+
881+ return 0;
882+}
883+
884+static void
885+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
886+ u16 hash)
887+{
888+ struct mtk_flow_entry *flow_info;
889+ struct mtk_foe_entry foe, *hwe;
890+ struct mtk_foe_mac_info *l2;
891+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
892+ int type;
893+
894+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
895+ GFP_ATOMIC);
896+ if (!flow_info)
897+ return;
898+
899+ flow_info->l2_data.base_flow = entry;
900+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
901+ flow_info->hash = hash;
902+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 4]);
903+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
904+
905+ hwe = &ppe->foe_table[hash];
906+ memcpy(&foe, hwe, sizeof(foe));
907+ foe.ib1 &= ib1_mask;
908+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
909+
910+ l2 = mtk_foe_entry_l2(&foe);
911+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
912+
913+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
914+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
915+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
916+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
917+ l2->etype = ETH_P_IPV6;
918+
919+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
920+
921+ __mtk_foe_entry_commit(ppe, &foe, hash);
922 }
923
924-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
925+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
926+{
927+ struct hlist_head *head = &ppe->foe_flow[hash / 4];
928+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
929+ struct mtk_flow_entry *entry;
930+ struct mtk_foe_bridge key = {};
931+ struct ethhdr *eh;
932+ bool found = false;
933+ u8 *tag;
934+
935+ spin_lock_bh(&ppe_lock);
936+
937+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
938+ goto out;
939+
940+ hlist_for_each_entry(entry, head, list) {
941+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
942+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
943+ MTK_FOE_STATE_BIND))
944+ continue;
945+
946+ entry->hash = 0xffff;
947+ __mtk_foe_entry_clear(ppe, entry);
948+ continue;
949+ }
950+
951+ if (found || !mtk_flow_entry_match(entry, hwe)) {
952+ if (entry->hash != 0xffff)
953+ entry->hash = 0xffff;
954+ continue;
955+ }
956+
957+ entry->hash = hash;
958+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
959+ found = true;
960+ }
961+
962+ if (found)
963+ goto out;
964+
965+ if (!skb)
966+ goto out;
967+
968+ eh = eth_hdr(skb);
969+ ether_addr_copy(key.dest_mac, eh->h_dest);
970+ ether_addr_copy(key.src_mac, eh->h_source);
971+ tag = skb->data - 2;
972+ key.vlan = 0;
973+ switch (skb->protocol) {
974+#if IS_ENABLED(CONFIG_NET_DSA)
975+ case htons(ETH_P_XDSA):
976+ if (!netdev_uses_dsa(skb->dev) ||
977+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
978+ goto out;
979+
980+ tag += 4;
981+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
982+ break;
983+
984+ fallthrough;
985+#endif
986+ case htons(ETH_P_8021Q):
987+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
988+ break;
989+ default:
990+ break;
991+ }
992+
993+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
994+ if (!entry)
995+ goto out;
996+
997+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
998+
999+out:
1000+ spin_unlock_bh(&ppe_lock);
1001+}
1002+
1003+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
1004+{
1005+ mtk_flow_entry_update(ppe, entry);
1006+
1007+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
1008+}
1009+
1010+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
1011 int version)
1012 {
1013+ struct device *dev = eth->dev;
1014 struct mtk_foe_entry *foe;
1015+ struct mtk_ppe *ppe;
1016+
1017+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
1018+ if (!ppe)
1019+ return NULL;
1020+
1021+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
1022
1023 /* need to allocate a separate device, since it PPE DMA access is
1024 * not coherent.
1025 */
1026 ppe->base = base;
1027+ ppe->eth = eth;
1028 ppe->dev = dev;
1029 ppe->version = version;
1030
1031 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
1032 &ppe->foe_phys, GFP_KERNEL);
1033 if (!foe)
1034- return -ENOMEM;
1035+ return NULL;
1036
1037 ppe->foe_table = foe;
1038
1039 mtk_ppe_debugfs_init(ppe);
1040
1041- return 0;
1042+ return ppe;
1043 }
1044
1045 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1046@@ -395,7 +717,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1047 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
1048 int i, k;
1049
1050- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
1051+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
1052
1053 if (!IS_ENABLED(CONFIG_SOC_MT7621))
1054 return;
1055@@ -443,7 +765,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
1056 MTK_PPE_FLOW_CFG_IP4_NAT |
1057 MTK_PPE_FLOW_CFG_IP4_NAPT |
1058 MTK_PPE_FLOW_CFG_IP4_DSLITE |
1059- MTK_PPE_FLOW_CFG_L2_BRIDGE |
1060 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1061 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1062
1063diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
1064index 242fb8f2a..1f5cf1c9a 100644
1065--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
1066+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
1067@@ -6,6 +6,7 @@
1068
1069 #include <linux/kernel.h>
1070 #include <linux/bitfield.h>
1071+#include <linux/rhashtable.h>
1072
1073 #define MTK_ETH_PPE_BASE 0xc00
1074
1075@@ -48,9 +49,9 @@ enum {
1076 #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
1077 #define MTK_FOE_IB2_MULTICAST BIT(8)
1078
1079-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
1080-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
1081-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
1082+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
1083+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
1084+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
1085
1086 #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
1087
1088@@ -58,9 +59,9 @@ enum {
1089
1090 #define MTK_FOE_IB2_DSCP GENMASK(31, 24)
1091
1092-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
1093-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
1094-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
1095+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
1096+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
1097+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
1098
1099 enum {
1100 MTK_FOE_STATE_INVALID,
1101@@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
1102 u16 src_mac_lo;
1103 };
1104
1105+/* software-only entry type */
1106 struct mtk_foe_bridge {
1107- u32 dest_mac_hi;
1108+ u8 dest_mac[ETH_ALEN];
1109+ u8 src_mac[ETH_ALEN];
1110+ u16 vlan;
1111
1112- u16 src_mac_lo;
1113- u16 dest_mac_lo;
1114-
1115- u32 src_mac_hi;
1116+ struct {} key_end;
1117
1118 u32 ib2;
1119
1120- u32 _rsv[5];
1121-
1122- u32 udf_tsid;
1123 struct mtk_foe_mac_info l2;
1124 };
1125
1126@@ -235,7 +233,37 @@ enum {
1127 MTK_PPE_CPU_REASON_INVALID = 0x1f,
1128 };
1129
1130+enum {
1131+ MTK_FLOW_TYPE_L4,
1132+ MTK_FLOW_TYPE_L2,
1133+ MTK_FLOW_TYPE_L2_SUBFLOW,
1134+};
1135+
1136+struct mtk_flow_entry {
1137+ union {
1138+ struct hlist_node list;
1139+ struct {
1140+ struct rhash_head l2_node;
1141+ struct hlist_head l2_flows;
1142+ };
1143+ };
1144+ u8 type;
1145+ s8 wed_index;
1146+ u16 hash;
1147+ union {
1148+ struct mtk_foe_entry data;
1149+ struct {
1150+ struct mtk_flow_entry *base_flow;
1151+ struct hlist_node list;
1152+ struct {} end;
1153+ } l2_data;
1154+ };
1155+ struct rhash_head node;
1156+ unsigned long cookie;
1157+};
1158+
1159 struct mtk_ppe {
1160+ struct mtk_eth *eth;
1161 struct device *dev;
1162 void __iomem *base;
1163 int version;
1164@@ -243,19 +271,35 @@ struct mtk_ppe {
1165 struct mtk_foe_entry *foe_table;
1166 dma_addr_t foe_phys;
1167
1168+ u16 foe_check_time[MTK_PPE_ENTRIES];
1169+ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
1170+
1171+ struct rhashtable l2_flows;
1172+
1173 void *acct_table;
1174 };
1175
1176-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
1177- int version);
1178+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
1179 int mtk_ppe_start(struct mtk_ppe *ppe);
1180 int mtk_ppe_stop(struct mtk_ppe *ppe);
1181
1182+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
1183+
1184 static inline void
1185-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
1186+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
1187 {
1188- ppe->foe_table[hash].ib1 = 0;
1189- dma_wmb();
1190+ u16 now, diff;
1191+
1192+ if (!ppe)
1193+ return;
1194+
1195+ now = (u16)jiffies;
1196+ diff = now - ppe->foe_check_time[hash];
1197+ if (diff < HZ / 10)
1198+ return;
1199+
1200+ ppe->foe_check_time[hash] = now;
1201+ __mtk_ppe_check_skb(ppe, skb, hash);
1202 }
1203
1204 static inline int
1205@@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
1206 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
1207 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
1208 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
1209-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
1210- u16 timestamp);
1211+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
1212+ int bss, int wcid);
1213+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1214+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1215+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1216 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
1217
1218 #endif
1219diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1220index d4b482340..a591ab1fd 100644
1221--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1222+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1223@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
1224 static const char * const type_str[] = {
1225 [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
1226 [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
1227- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
1228 [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
1229 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
1230 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
1231@@ -207,6 +206,9 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
1232 struct dentry *root;
1233
1234 root = debugfs_create_dir("mtk_ppe", NULL);
1235+ if (!root)
1236+ return -ENOMEM;
1237+
1238 debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
1239 debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
1240
1241diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1242index 4294f0c74..d4a012608 100644
1243--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1244+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1245@@ -11,6 +11,7 @@
1246 #include <net/pkt_cls.h>
1247 #include <net/dsa.h>
1248 #include "mtk_eth_soc.h"
1249+#include "mtk_wed.h"
1250
1251 struct mtk_flow_data {
1252 struct ethhdr eth;
1253@@ -30,6 +31,8 @@ struct mtk_flow_data {
1254 __be16 src_port;
1255 __be16 dst_port;
1256
1257+ u16 vlan_in;
1258+
1259 struct {
1260 u16 id;
1261 __be16 proto;
1262@@ -41,12 +44,6 @@ struct mtk_flow_data {
1263 } pppoe;
1264 };
1265
1266-struct mtk_flow_entry {
1267- struct rhash_head node;
1268- unsigned long cookie;
1269- u16 hash;
1270-};
1271-
1272 static const struct rhashtable_params mtk_flow_ht_params = {
1273 .head_offset = offsetof(struct mtk_flow_entry, node),
1274 .key_offset = offsetof(struct mtk_flow_entry, cookie),
1275@@ -54,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = {
1276 .automatic_shrinking = true,
1277 };
1278
1279-static u32
1280-mtk_eth_timestamp(struct mtk_eth *eth)
1281-{
1282- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
1283-}
1284-
1285 static int
1286 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
1287 bool egress)
1288@@ -94,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
1289 memcpy(dest, src, act->mangle.mask ? 2 : 4);
1290 }
1291
1292+static int
1293+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
1294+{
1295+ struct net_device_path_ctx ctx = {
1296+ .dev = dev,
1297+ };
1298+ struct net_device_path path = {};
1299+
1300+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
1301+ return -1;
1302+
1303+ if (!dev->netdev_ops->ndo_fill_forward_path)
1304+ return -1;
1305+
1306+ memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
1307+ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
1308+ return -1;
1309+
1310+ if (path.type != DEV_PATH_MTK_WDMA)
1311+ return -1;
1312+
1313+ info->wdma_idx = path.mtk_wdma.wdma_idx;
1314+ info->queue = path.mtk_wdma.queue;
1315+ info->bss = path.mtk_wdma.bss;
1316+ info->wcid = path.mtk_wdma.wcid;
1317+
1318+ return 0;
1319+}
1320+
1321
1322 static int
1323 mtk_flow_mangle_ports(const struct flow_action_entry *act,
1324@@ -163,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev)
1325
1326 static int
1327 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1328- struct net_device *dev)
1329+ struct net_device *dev, const u8 *dest_mac,
1330+ int *wed_index)
1331 {
1332+ struct mtk_wdma_info info = {};
1333 int pse_port, dsa_port;
1334
1335+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
1336+ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
1337+ info.wcid);
1338+ pse_port = 3;
1339+ *wed_index = info.wdma_idx;
1340+ goto out;
1341+ }
1342+
1343 dsa_port = mtk_flow_get_dsa_port(&dev);
1344 if (dsa_port >= 0)
1345 mtk_foe_entry_set_dsa(foe, dsa_port);
1346@@ -178,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1347 else
1348 return -EOPNOTSUPP;
1349
1350+out:
1351 mtk_foe_entry_set_pse_port(foe, pse_port);
1352
1353 return 0;
1354@@ -193,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1355 struct net_device *odev = NULL;
1356 struct mtk_flow_entry *entry;
1357 int offload_type = 0;
1358+ int wed_index = -1;
1359 u16 addr_type = 0;
1360- u32 timestamp;
1361 u8 l4proto = 0;
1362 int err = 0;
1363- int hash;
1364 int i;
1365
1366 if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
1367@@ -229,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1368 return -EOPNOTSUPP;
1369 }
1370
1371+ switch (addr_type) {
1372+ case 0:
1373+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
1374+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1375+ struct flow_match_eth_addrs match;
1376+
1377+ flow_rule_match_eth_addrs(rule, &match);
1378+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
1379+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
1380+ } else {
1381+ return -EOPNOTSUPP;
1382+ }
1383+
1384+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1385+ struct flow_match_vlan match;
1386+
1387+ flow_rule_match_vlan(rule, &match);
1388+
1389+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
1390+ return -EOPNOTSUPP;
1391+
1392+ data.vlan_in = match.key->vlan_id;
1393+ }
1394+ break;
1395+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1396+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1397+ break;
1398+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1399+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1400+ break;
1401+ default:
1402+ return -EOPNOTSUPP;
1403+ }
1404+
1405 flow_action_for_each(i, act, &rule->action) {
1406 switch (act->id) {
1407 case FLOW_ACTION_MANGLE:
1408+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1409+ return -EOPNOTSUPP;
1410 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1411 mtk_flow_offload_mangle_eth(act, &data.eth);
1412 break;
1413@@ -263,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1414 }
1415 }
1416
1417- switch (addr_type) {
1418- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1419- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1420- break;
1421- case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1422- offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1423- break;
1424- default:
1425- return -EOPNOTSUPP;
1426- }
1427-
1428 if (!is_valid_ether_addr(data.eth.h_source) ||
1429 !is_valid_ether_addr(data.eth.h_dest))
1430 return -EINVAL;
1431@@ -287,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1432 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1433 struct flow_match_ports ports;
1434
1435+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1436+ return -EOPNOTSUPP;
1437+
1438 flow_rule_match_ports(rule, &ports);
1439 data.src_port = ports.key->src;
1440 data.dst_port = ports.key->dst;
1441- } else {
1442+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
1443 return -EOPNOTSUPP;
1444 }
1445
1446@@ -320,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1447 if (act->id != FLOW_ACTION_MANGLE)
1448 continue;
1449
1450+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1451+ return -EOPNOTSUPP;
1452+
1453 switch (act->mangle.htype) {
1454 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1455 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1456@@ -345,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1457 return err;
1458 }
1459
1460+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1461+ foe.bridge.vlan = data.vlan_in;
1462+
1463 if (data.vlan.num == 1) {
1464 if (data.vlan.proto != htons(ETH_P_8021Q))
1465 return -EOPNOTSUPP;
1466@@ -354,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1467 if (data.pppoe.num == 1)
1468 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
1469
1470- err = mtk_flow_set_output_device(eth, &foe, odev);
1471+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
1472+ &wed_index);
1473 if (err)
1474 return err;
1475
1476+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
1477+ return err;
1478+
1479 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1480 if (!entry)
1481 return -ENOMEM;
1482
1483 entry->cookie = f->cookie;
1484- timestamp = mtk_eth_timestamp(eth);
1485- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
1486- if (hash < 0) {
1487- err = hash;
1488+ memcpy(&entry->data, &foe, sizeof(entry->data));
1489+ entry->wed_index = wed_index;
1490+
1491+ if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
1492 goto free;
1493- }
1494
1495- entry->hash = hash;
1496 err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
1497 mtk_flow_ht_params);
1498 if (err < 0)
1499- goto clear_flow;
1500+ goto clear;
1501
1502 return 0;
1503-clear_flow:
1504- mtk_foe_entry_clear(&eth->ppe, hash);
1505+
1506+clear:
1507+ mtk_foe_entry_clear(eth->ppe, entry);
1508 free:
1509 kfree(entry);
1510+ if (wed_index >= 0)
1511+ mtk_wed_flow_remove(wed_index);
1512 return err;
1513 }
1514
1515@@ -394,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
1516 if (!entry)
1517 return -ENOENT;
1518
1519- mtk_foe_entry_clear(&eth->ppe, entry->hash);
1520+ mtk_foe_entry_clear(eth->ppe, entry);
1521 rhashtable_remove_fast(&eth->flow_table, &entry->node,
1522 mtk_flow_ht_params);
1523+ if (entry->wed_index >= 0)
1524+ mtk_wed_flow_remove(entry->wed_index);
1525 kfree(entry);
1526
1527 return 0;
1528@@ -406,7 +477,6 @@ static int
1529 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1530 {
1531 struct mtk_flow_entry *entry;
1532- int timestamp;
1533 u32 idle;
1534
1535 entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1536@@ -414,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1537 if (!entry)
1538 return -ENOENT;
1539
1540- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
1541- if (timestamp < 0)
1542- return -ETIMEDOUT;
1543-
1544- idle = mtk_eth_timestamp(eth) - timestamp;
1545+ idle = mtk_foe_entry_idle_time(eth->ppe, entry);
1546 f->stats.lastused = jiffies - idle * HZ;
1547
1548 return 0;
1549@@ -470,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1550 struct flow_block_cb *block_cb;
1551 flow_setup_cb_t *cb;
1552
1553- if (!eth->ppe.foe_table)
1554+ if (!eth->ppe || !eth->ppe->foe_table)
1555 return -EOPNOTSUPP;
1556
1557 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1558@@ -511,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1559 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1560 void *type_data)
1561 {
1562- if (type == TC_SETUP_FT)
1563+ switch (type) {
1564+ case TC_SETUP_BLOCK:
1565+ case TC_SETUP_FT:
1566 return mtk_eth_setup_tc_block(dev, type_data);
1567-
1568- return -EOPNOTSUPP;
1569+ default:
1570+ return -EOPNOTSUPP;
1571+ }
1572 }
1573
1574 int mtk_eth_offload_init(struct mtk_eth *eth)
1575 {
1576- if (!eth->ppe.foe_table)
1577+ if (!eth->ppe || !eth->ppe->foe_table)
1578 return 0;
1579
1580 return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
1581diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
1582new file mode 100644
1583index 000000000..ea1cbdf1a
1584--- /dev/null
1585+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
1586@@ -0,0 +1,876 @@
1587+// SPDX-License-Identifier: GPL-2.0-only
1588+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1589+
1590+#include <linux/kernel.h>
1591+#include <linux/slab.h>
1592+#include <linux/module.h>
1593+#include <linux/bitfield.h>
1594+#include <linux/dma-mapping.h>
1595+#include <linux/skbuff.h>
1596+#include <linux/of_platform.h>
1597+#include <linux/of_address.h>
1598+#include <linux/mfd/syscon.h>
1599+#include <linux/debugfs.h>
1600+#include <linux/iopoll.h>
1601+#include <linux/soc/mediatek/mtk_wed.h>
1602+#include "mtk_eth_soc.h"
1603+#include "mtk_wed_regs.h"
1604+#include "mtk_wed.h"
1605+#include "mtk_ppe.h"
1606+
1607+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
1608+
1609+#define MTK_WED_PKT_SIZE 1900
1610+#define MTK_WED_BUF_SIZE 2048
1611+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1612+
1613+#define MTK_WED_TX_RING_SIZE 2048
1614+#define MTK_WED_WDMA_RING_SIZE 1024
1615+
1616+static struct mtk_wed_hw *hw_list[2];
1617+static DEFINE_MUTEX(hw_lock);
1618+
1619+static void
1620+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1621+{
1622+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
1623+}
1624+
1625+static void
1626+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1627+{
1628+ return wed_m32(dev, reg, 0, mask);
1629+}
1630+
1631+static void
1632+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
1633+{
1634+ return wed_m32(dev, reg, mask, 0);
1635+}
1636+
1637+static void
1638+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1639+{
1640+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
1641+}
1642+
1643+static void
1644+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1645+{
1646+ wdma_m32(dev, reg, 0, mask);
1647+}
1648+
1649+static u32
1650+mtk_wed_read_reset(struct mtk_wed_device *dev)
1651+{
1652+ return wed_r32(dev, MTK_WED_RESET);
1653+}
1654+
1655+static void
1656+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
1657+{
1658+ u32 status;
1659+
1660+ wed_w32(dev, MTK_WED_RESET, mask);
1661+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
1662+ !(status & mask), 0, 1000))
1663+ WARN_ON_ONCE(1);
1664+}
1665+
1666+static struct mtk_wed_hw *
1667+mtk_wed_assign(struct mtk_wed_device *dev)
1668+{
1669+ struct mtk_wed_hw *hw;
1670+
1671+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
1672+ if (!hw || hw->wed_dev)
1673+ return NULL;
1674+
1675+ hw->wed_dev = dev;
1676+ return hw;
1677+}
1678+
1679+static int
1680+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
1681+{
1682+ struct mtk_wdma_desc *desc;
1683+ dma_addr_t desc_phys;
1684+ void **page_list;
1685+ int token = dev->wlan.token_start;
1686+ int ring_size;
1687+ int n_pages;
1688+ int i, page_idx;
1689+
1690+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
1691+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
1692+
1693+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
1694+ if (!page_list)
1695+ return -ENOMEM;
1696+
1697+ dev->buf_ring.size = ring_size;
1698+ dev->buf_ring.pages = page_list;
1699+
1700+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
1701+ &desc_phys, GFP_KERNEL);
1702+ if (!desc)
1703+ return -ENOMEM;
1704+
1705+ dev->buf_ring.desc = desc;
1706+ dev->buf_ring.desc_phys = desc_phys;
1707+
1708+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
1709+ dma_addr_t page_phys, buf_phys;
1710+ struct page *page;
1711+ void *buf;
1712+ int s;
1713+
1714+ page = __dev_alloc_pages(GFP_KERNEL, 0);
1715+ if (!page)
1716+ return -ENOMEM;
1717+
1718+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
1719+ DMA_BIDIRECTIONAL);
1720+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
1721+ __free_page(page);
1722+ return -ENOMEM;
1723+ }
1724+
1725+ page_list[page_idx++] = page;
1726+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
1727+ DMA_BIDIRECTIONAL);
1728+
1729+ buf = page_to_virt(page);
1730+ buf_phys = page_phys;
1731+
1732+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
1733+ u32 txd_size;
1734+
1735+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
1736+
1737+ desc->buf0 = buf_phys;
1738+ desc->buf1 = buf_phys + txd_size;
1739+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
1740+ txd_size) |
1741+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
1742+ MTK_WED_BUF_SIZE - txd_size) |
1743+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
1744+ desc->info = 0;
1745+ desc++;
1746+
1747+ buf += MTK_WED_BUF_SIZE;
1748+ buf_phys += MTK_WED_BUF_SIZE;
1749+ }
1750+
1751+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
1752+ DMA_BIDIRECTIONAL);
1753+ }
1754+
1755+ return 0;
1756+}
1757+
1758+static void
1759+mtk_wed_free_buffer(struct mtk_wed_device *dev)
1760+{
1761+ struct mtk_wdma_desc *desc = dev->buf_ring.desc;
1762+ void **page_list = dev->buf_ring.pages;
1763+ int page_idx;
1764+ int i;
1765+
1766+ if (!page_list)
1767+ return;
1768+
1769+ if (!desc)
1770+ goto free_pagelist;
1771+
1772+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
1773+ void *page = page_list[page_idx++];
1774+
1775+ if (!page)
1776+ break;
1777+
1778+ dma_unmap_page(dev->hw->dev, desc[i].buf0,
1779+ PAGE_SIZE, DMA_BIDIRECTIONAL);
1780+ __free_page(page);
1781+ }
1782+
1783+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
1784+ desc, dev->buf_ring.desc_phys);
1785+
1786+free_pagelist:
1787+ kfree(page_list);
1788+}
1789+
1790+static void
1791+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
1792+{
1793+ if (!ring->desc)
1794+ return;
1795+
1796+ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
1797+ ring->desc, ring->desc_phys);
1798+}
1799+
1800+static void
1801+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
1802+{
1803+ int i;
1804+
1805+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
1806+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
1807+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1808+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
1809+}
1810+
1811+static void
1812+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
1813+{
1814+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
1815+
1816+ if (!dev->hw->num_flows)
1817+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
1818+
1819+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
1820+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
1821+}
1822+
1823+static void
1824+mtk_wed_stop(struct mtk_wed_device *dev)
1825+{
1826+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
1827+ mtk_wed_set_ext_int(dev, false);
1828+
1829+ wed_clr(dev, MTK_WED_CTRL,
1830+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
1831+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
1832+ MTK_WED_CTRL_WED_TX_BM_EN |
1833+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1834+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
1835+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
1836+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
1837+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
1838+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
1839+
1840+ wed_clr(dev, MTK_WED_GLO_CFG,
1841+ MTK_WED_GLO_CFG_TX_DMA_EN |
1842+ MTK_WED_GLO_CFG_RX_DMA_EN);
1843+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1844+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1845+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1846+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1847+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1848+}
1849+
1850+static void
1851+mtk_wed_detach(struct mtk_wed_device *dev)
1852+{
1853+ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
1854+ struct mtk_wed_hw *hw = dev->hw;
1855+
1856+ mutex_lock(&hw_lock);
1857+
1858+ mtk_wed_stop(dev);
1859+
1860+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
1861+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
1862+
1863+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1864+
1865+ mtk_wed_free_buffer(dev);
1866+ mtk_wed_free_tx_rings(dev);
1867+
1868+ if (of_dma_is_coherent(wlan_node))
1869+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1870+ BIT(hw->index), BIT(hw->index));
1871+
1872+ if (!hw_list[!hw->index]->wed_dev &&
1873+ hw->eth->dma_dev != hw->eth->dev)
1874+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
1875+
1876+ memset(dev, 0, sizeof(*dev));
1877+ module_put(THIS_MODULE);
1878+
1879+ hw->wed_dev = NULL;
1880+ mutex_unlock(&hw_lock);
1881+}
1882+
1883+static void
1884+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
1885+{
1886+ u32 mask, set;
1887+ u32 offset;
1888+
1889+ mtk_wed_stop(dev);
1890+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1891+
1892+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1893+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1894+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1895+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1896+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1897+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1898+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1899+
1900+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
1901+
1902+ offset = dev->hw->index ? 0x04000400 : 0;
1903+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1904+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
1905+
1906+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
1907+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1908+}
1909+
1910+static void
1911+mtk_wed_hw_init(struct mtk_wed_device *dev)
1912+{
1913+ if (dev->init_done)
1914+ return;
1915+
1916+ dev->init_done = true;
1917+ mtk_wed_set_ext_int(dev, false);
1918+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1919+ MTK_WED_TX_BM_CTRL_PAUSE |
1920+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
1921+ dev->buf_ring.size / 128) |
1922+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
1923+ MTK_WED_TX_RING_SIZE / 256));
1924+
1925+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1926+
1927+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1928+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1929+ dev->wlan.token_start) |
1930+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1931+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1932+
1933+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1934+
1935+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1936+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
1937+ MTK_WED_TX_BM_DYN_THR_HI);
1938+
1939+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1940+
1941+ wed_set(dev, MTK_WED_CTRL,
1942+ MTK_WED_CTRL_WED_TX_BM_EN |
1943+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1944+
1945+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1946+}
1947+
1948+static void
1949+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
1950+{
1951+ int i;
1952+
1953+ for (i = 0; i < size; i++) {
1954+ desc[i].buf0 = 0;
1955+ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
1956+ desc[i].buf1 = 0;
1957+ desc[i].info = 0;
1958+ }
1959+}
1960+
1961+static u32
1962+mtk_wed_check_busy(struct mtk_wed_device *dev)
1963+{
1964+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
1965+ return true;
1966+
1967+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
1968+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
1969+ return true;
1970+
1971+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
1972+ return true;
1973+
1974+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
1975+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1976+ return true;
1977+
1978+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
1979+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1980+ return true;
1981+
1982+ if (wed_r32(dev, MTK_WED_CTRL) &
1983+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
1984+ return true;
1985+
1986+ return false;
1987+}
1988+
1989+static int
1990+mtk_wed_poll_busy(struct mtk_wed_device *dev)
1991+{
1992+ int sleep = 15000;
1993+ int timeout = 100 * sleep;
1994+ u32 val;
1995+
1996+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
1997+ timeout, false, dev);
1998+}
1999+
2000+static void
2001+mtk_wed_reset_dma(struct mtk_wed_device *dev)
2002+{
2003+ bool busy = false;
2004+ u32 val;
2005+ int i;
2006+
2007+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
2008+ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
2009+
2010+ if (!desc)
2011+ continue;
2012+
2013+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
2014+ }
2015+
2016+ if (mtk_wed_poll_busy(dev))
2017+ busy = mtk_wed_check_busy(dev);
2018+
2019+ if (busy) {
2020+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
2021+ } else {
2022+ wed_w32(dev, MTK_WED_RESET_IDX,
2023+ MTK_WED_RESET_IDX_TX |
2024+ MTK_WED_RESET_IDX_RX);
2025+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
2026+ }
2027+
2028+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
2029+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
2030+
2031+ if (busy) {
2032+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
2033+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
2034+ } else {
2035+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
2036+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
2037+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
2038+
2039+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2040+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2041+
2042+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
2043+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2044+ }
2045+
2046+ for (i = 0; i < 100; i++) {
2047+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
2048+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
2049+ break;
2050+ }
2051+
2052+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
2053+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
2054+
2055+ if (busy) {
2056+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
2057+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
2058+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
2059+ } else {
2060+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
2061+ MTK_WED_WPDMA_RESET_IDX_TX |
2062+ MTK_WED_WPDMA_RESET_IDX_RX);
2063+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
2064+ }
2065+
2066+}
2067+
2068+static int
2069+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
2070+ int size)
2071+{
2072+ ring->desc = dma_alloc_coherent(dev->hw->dev,
2073+ size * sizeof(*ring->desc),
2074+ &ring->desc_phys, GFP_KERNEL);
2075+ if (!ring->desc)
2076+ return -ENOMEM;
2077+
2078+ ring->size = size;
2079+ mtk_wed_ring_reset(ring->desc, size);
2080+
2081+ return 0;
2082+}
2083+
2084+static int
2085+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
2086+{
2087+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
2088+
2089+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
2090+ return -ENOMEM;
2091+
2092+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2093+ wdma->desc_phys);
2094+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2095+ size);
2096+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2097+
2098+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2099+ wdma->desc_phys);
2100+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2101+ size);
2102+
2103+ return 0;
2104+}
2105+
2106+static void
2107+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
2108+{
2109+ u32 wdma_mask;
2110+ u32 val;
2111+ int i;
2112+
2113+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
2114+ if (!dev->tx_wdma[i].desc)
2115+ mtk_wed_wdma_ring_setup(dev, i, 16);
2116+
2117+ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
2118+
2119+ mtk_wed_hw_init(dev);
2120+
2121+ wed_set(dev, MTK_WED_CTRL,
2122+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
2123+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
2124+ MTK_WED_CTRL_WED_TX_BM_EN |
2125+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
2126+
2127+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
2128+
2129+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
2130+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
2131+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
2132+
2133+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
2134+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
2135+
2136+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
2137+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
2138+
2139+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
2140+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
2141+
2142+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
2143+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
2144+
2145+ wed_set(dev, MTK_WED_GLO_CFG,
2146+ MTK_WED_GLO_CFG_TX_DMA_EN |
2147+ MTK_WED_GLO_CFG_RX_DMA_EN);
2148+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
2149+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
2150+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
2151+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2152+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
2153+
2154+ mtk_wed_set_ext_int(dev, true);
2155+ val = dev->wlan.wpdma_phys |
2156+ MTK_PCIE_MIRROR_MAP_EN |
2157+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
2158+
2159+ if (dev->hw->index)
2160+ val |= BIT(1);
2161+ val |= BIT(0);
2162+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
2163+
2164+ dev->running = true;
2165+}
2166+
2167+static int
2168+mtk_wed_attach(struct mtk_wed_device *dev)
2169+ __releases(RCU)
2170+{
2171+ struct mtk_wed_hw *hw;
2172+ int ret = 0;
2173+
2174+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
2175+ "mtk_wed_attach without holding the RCU read lock");
2176+
2177+ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
2178+ !try_module_get(THIS_MODULE))
2179+ ret = -ENODEV;
2180+
2181+ rcu_read_unlock();
2182+
2183+ if (ret)
2184+ return ret;
2185+
2186+ mutex_lock(&hw_lock);
2187+
2188+ hw = mtk_wed_assign(dev);
2189+ if (!hw) {
2190+ module_put(THIS_MODULE);
2191+ ret = -ENODEV;
2192+ goto out;
2193+ }
2194+
2195+ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
2196+
2197+ dev->hw = hw;
2198+ dev->dev = hw->dev;
2199+ dev->irq = hw->irq;
2200+ dev->wdma_idx = hw->index;
2201+
2202+ if (hw->eth->dma_dev == hw->eth->dev &&
2203+ of_dma_is_coherent(hw->eth->dev->of_node))
2204+ mtk_eth_set_dma_device(hw->eth, hw->dev);
2205+
2206+ ret = mtk_wed_buffer_alloc(dev);
2207+ if (ret) {
2208+ mtk_wed_detach(dev);
2209+ goto out;
2210+ }
2211+
2212+ mtk_wed_hw_init_early(dev);
2213+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
2214+
2215+out:
2216+ mutex_unlock(&hw_lock);
2217+
2218+ return ret;
2219+}
2220+
2221+static int
2222+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
2223+{
2224+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
2225+
2226+ /*
2227+ * Tx ring redirection:
2228+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
2229+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
2230+ * registers.
2231+ *
2232+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
2233+ * into MTK_WED_WPDMA_RING_TX(n) registers.
2234+ * It gets filled with packets picked up from WED TX ring and from
2235+ * WDMA RX.
2236+ */
2237+
2238+ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
2239+
2240+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
2241+ return -ENOMEM;
2242+
2243+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
2244+ return -ENOMEM;
2245+
2246+ ring->reg_base = MTK_WED_RING_TX(idx);
2247+ ring->wpdma = regs;
2248+
2249+ /* WED -> WPDMA */
2250+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
2251+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
2252+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
2253+
2254+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
2255+ ring->desc_phys);
2256+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
2257+ MTK_WED_TX_RING_SIZE);
2258+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2259+
2260+ return 0;
2261+}
2262+
2263+static int
2264+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
2265+{
2266+ struct mtk_wed_ring *ring = &dev->txfree_ring;
2267+ int i;
2268+
2269+ /*
2270+ * For txfree event handling, the same DMA ring is shared between WED
2271+ * and WLAN. The WLAN driver accesses the ring index registers through
2272+ * WED
2273+ */
2274+ ring->reg_base = MTK_WED_RING_RX(1);
2275+ ring->wpdma = regs;
2276+
2277+ for (i = 0; i < 12; i += 4) {
2278+ u32 val = readl(regs + i);
2279+
2280+ wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
2281+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
2282+ }
2283+
2284+ return 0;
2285+}
2286+
2287+static u32
2288+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
2289+{
2290+ u32 val;
2291+
2292+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
2293+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
2294+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2295+ if (!dev->hw->num_flows)
2296+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2297+ if (val && net_ratelimit())
2298+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
2299+
2300+ val = wed_r32(dev, MTK_WED_INT_STATUS);
2301+ val &= mask;
2302+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
2303+
2304+ return val;
2305+}
2306+
2307+static void
2308+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
2309+{
2310+ if (!dev->running)
2311+ return;
2312+
2313+ mtk_wed_set_ext_int(dev, !!mask);
2314+ wed_w32(dev, MTK_WED_INT_MASK, mask);
2315+}
2316+
2317+int mtk_wed_flow_add(int index)
2318+{
2319+ struct mtk_wed_hw *hw = hw_list[index];
2320+ int ret;
2321+
2322+ if (!hw || !hw->wed_dev)
2323+ return -ENODEV;
2324+
2325+ if (hw->num_flows) {
2326+ hw->num_flows++;
2327+ return 0;
2328+ }
2329+
2330+ mutex_lock(&hw_lock);
2331+ if (!hw->wed_dev) {
2332+ ret = -ENODEV;
2333+ goto out;
2334+ }
2335+
2336+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
2337+ if (!ret)
2338+ hw->num_flows++;
2339+ mtk_wed_set_ext_int(hw->wed_dev, true);
2340+
2341+out:
2342+ mutex_unlock(&hw_lock);
2343+
2344+ return ret;
2345+}
2346+
2347+void mtk_wed_flow_remove(int index)
2348+{
2349+ struct mtk_wed_hw *hw = hw_list[index];
2350+
2351+ if (!hw)
2352+ return;
2353+
2354+ if (--hw->num_flows)
2355+ return;
2356+
2357+ mutex_lock(&hw_lock);
2358+ if (!hw->wed_dev)
2359+ goto out;
2360+
2361+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
2362+ mtk_wed_set_ext_int(hw->wed_dev, true);
2363+
2364+out:
2365+ mutex_unlock(&hw_lock);
2366+}
2367+
2368+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2369+ void __iomem *wdma, int index)
2370+{
2371+ static const struct mtk_wed_ops wed_ops = {
2372+ .attach = mtk_wed_attach,
2373+ .tx_ring_setup = mtk_wed_tx_ring_setup,
2374+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2375+ .start = mtk_wed_start,
2376+ .stop = mtk_wed_stop,
2377+ .reset_dma = mtk_wed_reset_dma,
2378+ .reg_read = wed_r32,
2379+ .reg_write = wed_w32,
2380+ .irq_get = mtk_wed_irq_get,
2381+ .irq_set_mask = mtk_wed_irq_set_mask,
2382+ .detach = mtk_wed_detach,
2383+ };
2384+ struct device_node *eth_np = eth->dev->of_node;
2385+ struct platform_device *pdev;
2386+ struct mtk_wed_hw *hw;
2387+ struct regmap *regs;
2388+ int irq;
2389+
2390+ if (!np)
2391+ return;
2392+
2393+ pdev = of_find_device_by_node(np);
2394+ if (!pdev)
2395+ return;
2396+
2397+ get_device(&pdev->dev);
2398+ irq = platform_get_irq(pdev, 0);
2399+ if (irq < 0)
2400+ return;
2401+
2402+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
2403+ if (!regs)
2404+ return;
2405+
2406+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
2407+
2408+ mutex_lock(&hw_lock);
2409+
2410+ if (WARN_ON(hw_list[index]))
2411+ goto unlock;
2412+
2413+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
2414+ hw->node = np;
2415+ hw->regs = regs;
2416+ hw->eth = eth;
2417+ hw->dev = &pdev->dev;
2418+ hw->wdma = wdma;
2419+ hw->index = index;
2420+ hw->irq = irq;
2421+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2422+ "mediatek,pcie-mirror");
2423+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
2424+ "mediatek,hifsys");
2425+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
2426+ kfree(hw);
2427+ goto unlock;
2428+ }
2429+
2430+ if (!index) {
2431+ regmap_write(hw->mirror, 0, 0);
2432+ regmap_write(hw->mirror, 4, 0);
2433+ }
2434+ mtk_wed_hw_add_debugfs(hw);
2435+
2436+ hw_list[index] = hw;
2437+
2438+unlock:
2439+ mutex_unlock(&hw_lock);
2440+}
2441+
2442+void mtk_wed_exit(void)
2443+{
2444+ int i;
2445+
2446+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
2447+
2448+ synchronize_rcu();
2449+
2450+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
2451+ struct mtk_wed_hw *hw;
2452+
2453+ hw = hw_list[i];
2454+ if (!hw)
2455+ continue;
2456+
2457+ hw_list[i] = NULL;
2458+ debugfs_remove(hw->debugfs_dir);
2459+ put_device(hw->dev);
2460+ kfree(hw);
2461+ }
2462+}
2463diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2464new file mode 100644
2465index 000000000..981ec613f
2466--- /dev/null
2467+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2468@@ -0,0 +1,135 @@
2469+// SPDX-License-Identifier: GPL-2.0-only
2470+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2471+
2472+#ifndef __MTK_WED_PRIV_H
2473+#define __MTK_WED_PRIV_H
2474+
2475+#include <linux/soc/mediatek/mtk_wed.h>
2476+#include <linux/debugfs.h>
2477+#include <linux/regmap.h>
2478+#include <linux/netdevice.h>
2479+
2480+struct mtk_eth;
2481+
2482+struct mtk_wed_hw {
2483+ struct device_node *node;
2484+ struct mtk_eth *eth;
2485+ struct regmap *regs;
2486+ struct regmap *hifsys;
2487+ struct device *dev;
2488+ void __iomem *wdma;
2489+ struct regmap *mirror;
2490+ struct dentry *debugfs_dir;
2491+ struct mtk_wed_device *wed_dev;
2492+ u32 debugfs_reg;
2493+ u32 num_flows;
2494+ char dirname[5];
2495+ int irq;
2496+ int index;
2497+};
2498+
2499+struct mtk_wdma_info {
2500+ u8 wdma_idx;
2501+ u8 queue;
2502+ u16 wcid;
2503+ u8 bss;
2504+};
2505+
2506+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2507+static inline void
2508+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2509+{
2510+ regmap_write(dev->hw->regs, reg, val);
2511+}
2512+
2513+static inline u32
2514+wed_r32(struct mtk_wed_device *dev, u32 reg)
2515+{
2516+ unsigned int val;
2517+
2518+ regmap_read(dev->hw->regs, reg, &val);
2519+
2520+ return val;
2521+}
2522+
2523+static inline void
2524+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2525+{
2526+ writel(val, dev->hw->wdma + reg);
2527+}
2528+
2529+static inline u32
2530+wdma_r32(struct mtk_wed_device *dev, u32 reg)
2531+{
2532+ return readl(dev->hw->wdma + reg);
2533+}
2534+
2535+static inline u32
2536+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
2537+{
2538+ if (!dev->tx_ring[ring].wpdma)
2539+ return 0;
2540+
2541+ return readl(dev->tx_ring[ring].wpdma + reg);
2542+}
2543+
2544+static inline void
2545+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
2546+{
2547+ if (!dev->tx_ring[ring].wpdma)
2548+ return;
2549+
2550+ writel(val, dev->tx_ring[ring].wpdma + reg);
2551+}
2552+
2553+static inline u32
2554+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
2555+{
2556+ if (!dev->txfree_ring.wpdma)
2557+ return 0;
2558+
2559+ return readl(dev->txfree_ring.wpdma + reg);
2560+}
2561+
2562+static inline void
2563+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2564+{
2565+ if (!dev->txfree_ring.wpdma)
2566+ return;
2567+
2568+ writel(val, dev->txfree_ring.wpdma + reg);
2569+}
2570+
2571+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2572+ void __iomem *wdma, int index);
2573+void mtk_wed_exit(void);
2574+int mtk_wed_flow_add(int index);
2575+void mtk_wed_flow_remove(int index);
2576+#else
2577+static inline void
2578+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2579+ void __iomem *wdma, int index)
2580+{
2581+}
2582+static inline void
2583+mtk_wed_exit(void)
2584+{
2585+}
2586+static inline int mtk_wed_flow_add(int index)
2587+{
2588+ return -EINVAL;
2589+}
2590+static inline void mtk_wed_flow_remove(int index)
2591+{
2592+}
2593+#endif
2594+
2595+#ifdef CONFIG_DEBUG_FS
2596+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
2597+#else
2598+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2599+{
2600+}
2601+#endif
2602+
2603+#endif
2604diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2605new file mode 100644
2606index 000000000..a81d3fd1a
2607--- /dev/null
2608+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2609@@ -0,0 +1,175 @@
2610+// SPDX-License-Identifier: GPL-2.0-only
2611+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2612+
2613+#include <linux/seq_file.h>
2614+#include "mtk_wed.h"
2615+#include "mtk_wed_regs.h"
2616+
2617+struct reg_dump {
2618+ const char *name;
2619+ u16 offset;
2620+ u8 type;
2621+ u8 base;
2622+};
2623+
2624+enum {
2625+ DUMP_TYPE_STRING,
2626+ DUMP_TYPE_WED,
2627+ DUMP_TYPE_WDMA,
2628+ DUMP_TYPE_WPDMA_TX,
2629+ DUMP_TYPE_WPDMA_TXFREE,
2630+};
2631+
2632+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2633+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2634+#define DUMP_RING(_prefix, _base, ...) \
2635+ { _prefix " BASE", _base, __VA_ARGS__ }, \
2636+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2637+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
2638+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2639+
2640+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2641+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2642+
2643+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2644+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
2645+
2646+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
2647+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
2648+
2649+static void
2650+print_reg_val(struct seq_file *s, const char *name, u32 val)
2651+{
2652+ seq_printf(s, "%-32s %08x\n", name, val);
2653+}
2654+
2655+static void
2656+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2657+ const struct reg_dump *regs, int n_regs)
2658+{
2659+ const struct reg_dump *cur;
2660+ u32 val;
2661+
2662+ for (cur = regs; cur < &regs[n_regs]; cur++) {
2663+ switch (cur->type) {
2664+ case DUMP_TYPE_STRING:
2665+ seq_printf(s, "%s======== %s:\n",
2666+ cur > regs ? "\n" : "",
2667+ cur->name);
2668+ continue;
2669+ case DUMP_TYPE_WED:
2670+ val = wed_r32(dev, cur->offset);
2671+ break;
2672+ case DUMP_TYPE_WDMA:
2673+ val = wdma_r32(dev, cur->offset);
2674+ break;
2675+ case DUMP_TYPE_WPDMA_TX:
2676+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
2677+ break;
2678+ case DUMP_TYPE_WPDMA_TXFREE:
2679+ val = wpdma_txfree_r32(dev, cur->offset);
2680+ break;
2681+ }
2682+ print_reg_val(s, cur->name, val);
2683+ }
2684+}
2685+
2686+
2687+static int
2688+wed_txinfo_show(struct seq_file *s, void *data)
2689+{
2690+ static const struct reg_dump regs[] = {
2691+ DUMP_STR("WED TX"),
2692+ DUMP_WED(WED_TX_MIB(0)),
2693+ DUMP_WED_RING(WED_RING_TX(0)),
2694+
2695+ DUMP_WED(WED_TX_MIB(1)),
2696+ DUMP_WED_RING(WED_RING_TX(1)),
2697+
2698+ DUMP_STR("WPDMA TX"),
2699+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
2700+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
2701+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
2702+
2703+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
2704+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
2705+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
2706+
2707+ DUMP_STR("WPDMA TX"),
2708+ DUMP_WPDMA_TX_RING(0),
2709+ DUMP_WPDMA_TX_RING(1),
2710+
2711+ DUMP_STR("WED WDMA RX"),
2712+ DUMP_WED(WED_WDMA_RX_MIB(0)),
2713+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
2714+ DUMP_WED(WED_WDMA_RX_THRES(0)),
2715+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
2716+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
2717+
2718+ DUMP_WED(WED_WDMA_RX_MIB(1)),
2719+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
2720+ DUMP_WED(WED_WDMA_RX_THRES(1)),
2721+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
2722+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
2723+
2724+ DUMP_STR("WDMA RX"),
2725+ DUMP_WDMA(WDMA_GLO_CFG),
2726+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
2727+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
2728+ };
2729+ struct mtk_wed_hw *hw = s->private;
2730+ struct mtk_wed_device *dev = hw->wed_dev;
2731+
2732+ if (!dev)
2733+ return 0;
2734+
2735+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2736+
2737+ return 0;
2738+}
2739+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
2740+
2741+
2742+static int
2743+mtk_wed_reg_set(void *data, u64 val)
2744+{
2745+ struct mtk_wed_hw *hw = data;
2746+
2747+ regmap_write(hw->regs, hw->debugfs_reg, val);
2748+
2749+ return 0;
2750+}
2751+
2752+static int
2753+mtk_wed_reg_get(void *data, u64 *val)
2754+{
2755+ struct mtk_wed_hw *hw = data;
2756+ unsigned int regval;
2757+ int ret;
2758+
2759+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
2760+ if (ret)
2761+ return ret;
2762+
2763+ *val = regval;
2764+
2765+ return 0;
2766+}
2767+
2768+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
2769+ "0x%08llx\n");
2770+
2771+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2772+{
2773+ struct dentry *dir;
2774+
2775+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
2776+ dir = debugfs_create_dir(hw->dirname, NULL);
2777+ if (!dir)
2778+ return;
2779+
2780+ hw->debugfs_dir = dir;
2781+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
2782+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
2783+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
2784+}
2785diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2786new file mode 100644
2787index 000000000..a5d9d8a5b
2788--- /dev/null
2789+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2790@@ -0,0 +1,8 @@
2791+// SPDX-License-Identifier: GPL-2.0-only
2792+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2793+
2794+#include <linux/kernel.h>
2795+#include <linux/soc/mediatek/mtk_wed.h>
2796+
2797+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
2798+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
2799diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2800new file mode 100644
2801index 000000000..0a0465ea5
2802--- /dev/null
2803+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2804@@ -0,0 +1,251 @@
2805+// SPDX-License-Identifier: GPL-2.0-only
2806+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2807+
2808+#ifndef __MTK_WED_REGS_H
2809+#define __MTK_WED_REGS_H
2810+
2811+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
2812+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
2813+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
2814+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2815+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2816+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2817+
2818+struct mtk_wdma_desc {
2819+ __le32 buf0;
2820+ __le32 ctrl;
2821+ __le32 buf1;
2822+ __le32 info;
2823+} __packed __aligned(4);
2824+
2825+#define MTK_WED_RESET 0x008
2826+#define MTK_WED_RESET_TX_BM BIT(0)
2827+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
2828+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
2829+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
2830+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
2831+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
2832+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2833+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2834+#define MTK_WED_RESET_WED BIT(31)
2835+
2836+#define MTK_WED_CTRL 0x00c
2837+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
2838+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
2839+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
2840+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
2841+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
2842+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2843+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2844+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2845+#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2846+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2847+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2848+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
2849+
2850+#define MTK_WED_EXT_INT_STATUS 0x020
2851+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
2852+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
2853+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
2854+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
2855+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
2856+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2857+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2858+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2859+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2860+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
2861+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
2862+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
2863+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
2864+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
2865+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
2866+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2867+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2868+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2869+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2870+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2871+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
2872+ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
2873+ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
2874+
2875+#define MTK_WED_EXT_INT_MASK 0x028
2876+
2877+#define MTK_WED_STATUS 0x060
2878+#define MTK_WED_STATUS_TX GENMASK(15, 8)
2879+
2880+#define MTK_WED_TX_BM_CTRL 0x080
2881+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
2882+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
2883+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
2884+
2885+#define MTK_WED_TX_BM_BASE 0x084
2886+
2887+#define MTK_WED_TX_BM_TKID 0x088
2888+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
2889+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
2890+
2891+#define MTK_WED_TX_BM_BUF_LEN 0x08c
2892+
2893+#define MTK_WED_TX_BM_INTF 0x09c
2894+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
2895+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
2896+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
2897+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
2898+
2899+#define MTK_WED_TX_BM_DYN_THR 0x0a0
2900+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
2901+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
2902+
2903+#define MTK_WED_INT_STATUS 0x200
2904+#define MTK_WED_INT_MASK 0x204
2905+
2906+#define MTK_WED_GLO_CFG 0x208
2907+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
2908+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
2909+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
2910+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
2911+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2912+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
2913+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
2914+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2915+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2916+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2917+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2918+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2919+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2920+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
2921+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2922+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
2923+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
2924+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
2925+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
2926+
2927+#define MTK_WED_RESET_IDX 0x20c
2928+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
2929+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
2930+
2931+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
2932+
2933+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
2934+
2935+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2936+
2937+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
2938+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2939+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
2940+
2941+#define MTK_WED_WPDMA_GLO_CFG 0x508
2942+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
2943+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
2944+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
2945+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2946+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2947+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2948+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
2949+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2950+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2951+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2952+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2953+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2954+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2955+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
2956+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2957+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
2958+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
2959+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
2960+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
2961+
2962+#define MTK_WED_WPDMA_RESET_IDX 0x50c
2963+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
2964+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
2965+
2966+#define MTK_WED_WPDMA_INT_CTRL 0x520
2967+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
2968+
2969+#define MTK_WED_WPDMA_INT_MASK 0x524
2970+
2971+#define MTK_WED_PCIE_CFG_BASE 0x560
2972+
2973+#define MTK_WED_PCIE_INT_TRIGGER 0x570
2974+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
2975+
2976+#define MTK_WED_WPDMA_CFG_BASE 0x580
2977+
2978+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2979+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2980+
2981+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2982+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
2983+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2984+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2985+
2986+#define MTK_WED_WDMA_GLO_CFG 0xa04
2987+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2988+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2989+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2990+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
2991+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2992+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
2993+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
2994+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
2995+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
2996+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
2997+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
2998+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
2999+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
3000+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
3001+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
3002+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
3003+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
3004+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
3005+
3006+#define MTK_WED_WDMA_RESET_IDX 0xa08
3007+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
3008+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
3009+
3010+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
3011+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
3012+
3013+#define MTK_WED_WDMA_INT_CTRL 0xa2c
3014+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
3015+
3016+#define MTK_WED_WDMA_OFFSET0 0xaa4
3017+#define MTK_WED_WDMA_OFFSET1 0xaa8
3018+
3019+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
3020+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
3021+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
3022+
3023+#define MTK_WED_RING_OFS_BASE 0x00
3024+#define MTK_WED_RING_OFS_COUNT 0x04
3025+#define MTK_WED_RING_OFS_CPU_IDX 0x08
3026+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
3027+
3028+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
3029+
3030+#define MTK_WDMA_GLO_CFG 0x204
3031+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
3032+
3033+#define MTK_WDMA_RESET_IDX 0x208
3034+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
3035+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
3036+
3037+#define MTK_WDMA_INT_MASK 0x228
3038+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
3039+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
3040+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
3041+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
3042+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
3043+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
3044+
3045+#define MTK_WDMA_INT_GRP1 0x250
3046+#define MTK_WDMA_INT_GRP2 0x254
3047+
3048+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
3049+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
3050+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
3051+
3052+/* DMA channel mapping */
3053+#define HIFSYS_DMA_AG_MAP 0x008
3054+
3055+#endif
3056diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3057index 9f64504ac..35998b1a7 100644
3058--- a/include/linux/netdevice.h
3059+++ b/include/linux/netdevice.h
3060@@ -835,6 +835,7 @@ enum net_device_path_type {
3061 DEV_PATH_BRIDGE,
3062 DEV_PATH_PPPOE,
3063 DEV_PATH_DSA,
3064+ DEV_PATH_MTK_WDMA,
3065 };
3066
3067 struct net_device_path {
3068@@ -860,6 +861,12 @@ struct net_device_path {
3069 int port;
3070 u16 proto;
3071 } dsa;
3072+ struct {
3073+ u8 wdma_idx;
3074+ u8 queue;
3075+ u16 wcid;
3076+ u8 bss;
3077+ } mtk_wdma;
3078 };
3079 };
3080
3081diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
3082new file mode 100644
3083index 000000000..7e00cca06
3084--- /dev/null
3085+++ b/include/linux/soc/mediatek/mtk_wed.h
3086@@ -0,0 +1,131 @@
3087+#ifndef __MTK_WED_H
3088+#define __MTK_WED_H
3089+
3090+#include <linux/kernel.h>
3091+#include <linux/rcupdate.h>
3092+#include <linux/regmap.h>
3093+#include <linux/pci.h>
3094+
3095+#define MTK_WED_TX_QUEUES 2
3096+
3097+struct mtk_wed_hw;
3098+struct mtk_wdma_desc;
3099+
3100+struct mtk_wed_ring {
3101+ struct mtk_wdma_desc *desc;
3102+ dma_addr_t desc_phys;
3103+ int size;
3104+
3105+ u32 reg_base;
3106+ void __iomem *wpdma;
3107+};
3108+
3109+struct mtk_wed_device {
3110+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3111+ const struct mtk_wed_ops *ops;
3112+ struct device *dev;
3113+ struct mtk_wed_hw *hw;
3114+ bool init_done, running;
3115+ int wdma_idx;
3116+ int irq;
3117+
3118+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3119+ struct mtk_wed_ring txfree_ring;
3120+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3121+
3122+ struct {
3123+ int size;
3124+ void **pages;
3125+ struct mtk_wdma_desc *desc;
3126+ dma_addr_t desc_phys;
3127+ } buf_ring;
3128+
3129+ /* filled by driver: */
3130+ struct {
3131+ struct pci_dev *pci_dev;
3132+
3133+ u32 wpdma_phys;
3134+
3135+ u16 token_start;
3136+ unsigned int nbuf;
3137+
3138+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3139+ int (*offload_enable)(struct mtk_wed_device *wed);
3140+ void (*offload_disable)(struct mtk_wed_device *wed);
3141+ } wlan;
3142+#endif
3143+};
3144+
3145+struct mtk_wed_ops {
3146+ int (*attach)(struct mtk_wed_device *dev);
3147+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
3148+ void __iomem *regs);
3149+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3150+ void __iomem *regs);
3151+ void (*detach)(struct mtk_wed_device *dev);
3152+
3153+ void (*stop)(struct mtk_wed_device *dev);
3154+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
3155+ void (*reset_dma)(struct mtk_wed_device *dev);
3156+
3157+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
3158+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
3159+
3160+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3161+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3162+};
3163+
3164+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3165+
3166+static inline int
3167+mtk_wed_device_attach(struct mtk_wed_device *dev)
3168+{
3169+ int ret = -ENODEV;
3170+
3171+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3172+ rcu_read_lock();
3173+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
3174+ if (dev->ops)
3175+ ret = dev->ops->attach(dev);
3176+ else
3177+ rcu_read_unlock();
3178+
3179+ if (ret)
3180+ dev->ops = NULL;
3181+#endif
3182+
3183+ return ret;
3184+}
3185+
3186+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3187+#define mtk_wed_device_active(_dev) !!(_dev)->ops
3188+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
3189+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
3190+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
3191+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3192+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3193+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
3194+#define mtk_wed_device_reg_read(_dev, _reg) \
3195+ (_dev)->ops->reg_read(_dev, _reg)
3196+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
3197+ (_dev)->ops->reg_write(_dev, _reg, _val)
3198+#define mtk_wed_device_irq_get(_dev, _mask) \
3199+ (_dev)->ops->irq_get(_dev, _mask)
3200+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
3201+ (_dev)->ops->irq_set_mask(_dev, _mask)
3202+#else
3203+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3204+{
3205+ return false;
3206+}
3207+#define mtk_wed_device_detach(_dev) do {} while (0)
3208+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
3209+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3210+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3211+#define mtk_wed_device_reg_read(_dev, _reg) 0
3212+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3213+#define mtk_wed_device_irq_get(_dev, _mask) 0
3214+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3215+#endif
3216+
3217+#endif
3218diff --git a/net/core/dev.c b/net/core/dev.c
3219index 4f0edb218..031ac7c6f 100644
3220--- a/net/core/dev.c
3221+++ b/net/core/dev.c
3222@@ -675,6 +675,10 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3223 if (WARN_ON_ONCE(last_dev == ctx.dev))
3224 return -1;
3225 }
3226+
3227+ if (!ctx.dev)
3228+ return ret;
3229+
3230 path = dev_fwd_path(stack);
3231 if (!path)
3232 return -1;
3233--
32342.18.0
3235