blob: 06b0bcb62b753ca7602a20716f8ca170956126d8 [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From 342fdc50b761309e75974554cdcf790a2d09e134 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 2 Jun 2022 15:32:07 +0800
4Subject: [PATCH 4/8] 9993-add-wed
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7622.dtsi | 32 +-
9 drivers/net/ethernet/mediatek/Kconfig | 4 +
10 drivers/net/ethernet/mediatek/Makefile | 5 +
11 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 136 ++-
12 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 14 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.c | 373 +++++++-
14 drivers/net/ethernet/mediatek/mtk_ppe.h | 89 +-
15 .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 4 +-
16 .../net/ethernet/mediatek/mtk_ppe_offload.c | 167 +++-
17 drivers/net/ethernet/mediatek/mtk_wed.c | 876 ++++++++++++++++++
18 drivers/net/ethernet/mediatek/mtk_wed.h | 135 +++
19 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 175 ++++
20 drivers/net/ethernet/mediatek/mtk_wed_ops.c | 8 +
21 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 251 +++++
22 include/linux/netdevice.h | 7 +
23 include/linux/soc/mediatek/mtk_wed.h | 131 +++
24 net/core/dev.c | 4 +
25 17 files changed, 2283 insertions(+), 128 deletions(-)
26 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Kconfig
27 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Makefile
28 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c
29 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h
30 mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_ppe.c
31 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c
32 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h
33 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
34 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c
35 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h
36 create mode 100644 include/linux/soc/mediatek/mtk_wed.h
37
38diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
39index 369e01389..d0fbc367e 100644
40--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
41+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
42@@ -338,7 +338,7 @@
43 };
44
45 cci_control2: slave-if@5000 {
46- compatible = "arm,cci-400-ctrl-if";
47+ compatible = "arm,cci-400-ctrl-if", "syscon";
48 interface-type = "ace";
49 reg = <0x5000 0x1000>;
50 };
51@@ -920,6 +920,11 @@
52 };
53 };
54
55+ hifsys: syscon@1af00000 {
56+ compatible = "mediatek,mt7622-hifsys", "syscon";
57+ reg = <0 0x1af00000 0 0x70>;
58+ };
59+
60 ethsys: syscon@1b000000 {
61 compatible = "mediatek,mt7622-ethsys",
62 "syscon";
63@@ -938,6 +943,26 @@
64 #dma-cells = <1>;
65 };
66
67+ pcie_mirror: pcie-mirror@10000400 {
68+ compatible = "mediatek,mt7622-pcie-mirror",
69+ "syscon";
70+ reg = <0 0x10000400 0 0x10>;
71+ };
72+
73+ wed0: wed@1020a000 {
74+ compatible = "mediatek,mt7622-wed",
75+ "syscon";
76+ reg = <0 0x1020a000 0 0x1000>;
77+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
78+ };
79+
80+ wed1: wed@1020b000 {
81+ compatible = "mediatek,mt7622-wed",
82+ "syscon";
83+ reg = <0 0x1020b000 0 0x1000>;
84+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
85+ };
86+
87 eth: ethernet@1b100000 {
88 compatible = "mediatek,mt7622-eth",
89 "mediatek,mt2701-eth",
90@@ -964,6 +989,11 @@
91 power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
92 mediatek,ethsys = <&ethsys>;
93 mediatek,sgmiisys = <&sgmiisys>;
94+ mediatek,cci-control = <&cci_control2>;
95+ mediatek,wed = <&wed0>, <&wed1>;
96+ mediatek,pcie-mirror = <&pcie_mirror>;
97+ mediatek,hifsys = <&hifsys>;
98+ dma-coherent;
99 #address-cells = <1>;
100 #size-cells = <0>;
101 status = "disabled";
102diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
103old mode 100755
104new mode 100644
105index 42e6b38d2..8ab6615a3
106--- a/drivers/net/ethernet/mediatek/Kconfig
107+++ b/drivers/net/ethernet/mediatek/Kconfig
108@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK
109
110 if NET_VENDOR_MEDIATEK
111
112+config NET_MEDIATEK_SOC_WED
113+ depends on ARCH_MEDIATEK || COMPILE_TEST
114+ def_bool NET_MEDIATEK_SOC != n
115+
116 config NET_MEDIATEK_SOC
117 tristate "MediaTek SoC Gigabit Ethernet support"
118 select PHYLINK
119diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
120old mode 100755
121new mode 100644
122index 0a6af99f1..3528f1b3c
123--- a/drivers/net/ethernet/mediatek/Makefile
124+++ b/drivers/net/ethernet/mediatek/Makefile
125@@ -6,4 +6,9 @@
126 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
127 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
128 mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
129+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
130+ifdef CONFIG_DEBUG_FS
131+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
132+endif
133+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
134 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
135diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
136old mode 100755
137new mode 100644
138index 819d8a0be..2121335a1
139--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
140+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
141@@ -9,6 +9,7 @@
142 #include <linux/of_device.h>
143 #include <linux/of_mdio.h>
144 #include <linux/of_net.h>
145+#include <linux/of_address.h>
146 #include <linux/mfd/syscon.h>
147 #include <linux/regmap.h>
148 #include <linux/clk.h>
149@@ -19,12 +20,14 @@
150 #include <linux/interrupt.h>
151 #include <linux/pinctrl/devinfo.h>
152 #include <linux/phylink.h>
153+#include <linux/bitfield.h>
154 #include <net/dsa.h>
155
156 #include "mtk_eth_soc.h"
157 #include "mtk_eth_dbg.h"
158 #include "mtk_eth_reset.h"
159 #include "mtk_hnat/hnat.h"
160+#include "mtk_wed.h"
161
162 #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
163 #include "mtk_hnat/nf_hnat_mtk.h"
164@@ -850,7 +853,7 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
165 int i;
166
167 if (!eth->soc->has_sram) {
168- eth->scratch_ring = dma_alloc_coherent(eth->dev,
169+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
170 cnt * sizeof(struct mtk_tx_dma),
171 &eth->phy_scratch_ring,
172 GFP_ATOMIC);
173@@ -866,10 +869,10 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
174 if (unlikely(!eth->scratch_head))
175 return -ENOMEM;
176
177- dma_addr = dma_map_single(eth->dev,
178+ dma_addr = dma_map_single(eth->dma_dev,
179 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
180 DMA_FROM_DEVICE);
181- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
182+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
183 return -ENOMEM;
184
185 phy_ring_tail = eth->phy_scratch_ring +
186@@ -933,26 +936,26 @@ static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
187 {
188 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
189 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
190- dma_unmap_single(eth->dev,
191+ dma_unmap_single(eth->dma_dev,
192 dma_unmap_addr(tx_buf, dma_addr0),
193 dma_unmap_len(tx_buf, dma_len0),
194 DMA_TO_DEVICE);
195 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
196- dma_unmap_page(eth->dev,
197+ dma_unmap_page(eth->dma_dev,
198 dma_unmap_addr(tx_buf, dma_addr0),
199 dma_unmap_len(tx_buf, dma_len0),
200 DMA_TO_DEVICE);
201 }
202 } else {
203 if (dma_unmap_len(tx_buf, dma_len0)) {
204- dma_unmap_page(eth->dev,
205+ dma_unmap_page(eth->dma_dev,
206 dma_unmap_addr(tx_buf, dma_addr0),
207 dma_unmap_len(tx_buf, dma_len0),
208 DMA_TO_DEVICE);
209 }
210
211 if (dma_unmap_len(tx_buf, dma_len1)) {
212- dma_unmap_page(eth->dev,
213+ dma_unmap_page(eth->dma_dev,
214 dma_unmap_addr(tx_buf, dma_addr1),
215 dma_unmap_len(tx_buf, dma_len1),
216 DMA_TO_DEVICE);
217@@ -1017,9 +1020,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
218 itx_buf = mtk_desc_to_tx_buf(ring, itxd);
219 memset(itx_buf, 0, sizeof(*itx_buf));
220
221- mapped_addr = dma_map_single(eth->dev, skb->data,
222+ mapped_addr = dma_map_single(eth->dma_dev, skb->data,
223 skb_headlen(skb), DMA_TO_DEVICE);
224- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
225+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
226 return -ENOMEM;
227
228 WRITE_ONCE(itxd->txd1, mapped_addr);
229@@ -1114,10 +1117,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
230
231
232 frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
233- mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
234+ mapped_addr = skb_frag_dma_map(eth->dma_dev, frag, offset,
235 frag_map_size,
236 DMA_TO_DEVICE);
237- if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
238+ if (unlikely(dma_mapping_error(eth->dma_dev, mapped_addr)))
239 goto err_dma;
240
241 if (i == nr_frags - 1 &&
242@@ -1384,6 +1387,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
243 struct net_device *netdev;
244 unsigned int pktlen;
245 dma_addr_t dma_addr;
246+ u32 hash, reason;
247 int mac;
248
249 if (eth->hwlro)
250@@ -1427,18 +1431,18 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
251 netdev->stats.rx_dropped++;
252 goto release_desc;
253 }
254- dma_addr = dma_map_single(eth->dev,
255+ dma_addr = dma_map_single(eth->dma_dev,
256 new_data + NET_SKB_PAD +
257 eth->ip_align,
258 ring->buf_size,
259 DMA_FROM_DEVICE);
260- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
261+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
262 skb_free_frag(new_data);
263 netdev->stats.rx_dropped++;
264 goto release_desc;
265 }
266
267- dma_unmap_single(eth->dev, trxd.rxd1,
268+ dma_unmap_single(eth->dma_dev, trxd.rxd1,
269 ring->buf_size, DMA_FROM_DEVICE);
270
271 /* receive data */
272@@ -1463,6 +1467,17 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
273 skb_checksum_none_assert(skb);
274 skb->protocol = eth_type_trans(skb, netdev);
275
276+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
277+ if (hash != MTK_RXD4_FOE_ENTRY) {
278+ hash = jhash_1word(hash, 0);
279+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
280+ }
281+
282+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
283+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
284+ mtk_ppe_check_skb(eth->ppe, skb,
285+ trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
286+
287 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
288 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
289 if (trxd.rxd3 & RX_DMA_VTAG_V2)
290@@ -1748,7 +1763,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
291 goto no_tx_mem;
292
293 if (!eth->soc->has_sram)
294- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
295+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
296 &ring->phys, GFP_ATOMIC);
297 else {
298 ring->dma = eth->scratch_ring + MTK_DMA_SIZE;
299@@ -1780,7 +1795,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
300 * descriptors in ring->dma_pdma.
301 */
302 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
303- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
304+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
305 &ring->phys_pdma,
306 GFP_ATOMIC);
307 if (!ring->dma_pdma)
308@@ -1839,7 +1854,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
309 }
310
311 if (!eth->soc->has_sram && ring->dma) {
312- dma_free_coherent(eth->dev,
313+ dma_free_coherent(eth->dma_dev,
314 MTK_DMA_SIZE * sizeof(*ring->dma),
315 ring->dma,
316 ring->phys);
317@@ -1847,7 +1862,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
318 }
319
320 if (ring->dma_pdma) {
321- dma_free_coherent(eth->dev,
322+ dma_free_coherent(eth->dma_dev,
323 MTK_DMA_SIZE * sizeof(*ring->dma_pdma),
324 ring->dma_pdma,
325 ring->phys_pdma);
326@@ -1892,7 +1907,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
327
328 if ((!eth->soc->has_sram) || (eth->soc->has_sram
329 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
330- ring->dma = dma_alloc_coherent(eth->dev,
331+ ring->dma = dma_alloc_coherent(eth->dma_dev,
332 rx_dma_size * sizeof(*ring->dma),
333 &ring->phys, GFP_ATOMIC);
334 else {
335@@ -1907,11 +1922,11 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
336 return -ENOMEM;
337
338 for (i = 0; i < rx_dma_size; i++) {
339- dma_addr_t dma_addr = dma_map_single(eth->dev,
340+ dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
341 ring->data[i] + NET_SKB_PAD + eth->ip_align,
342 ring->buf_size,
343 DMA_FROM_DEVICE);
344- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
345+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
346 return -ENOMEM;
347 ring->dma[i].rxd1 = (unsigned int)dma_addr;
348
349@@ -1968,7 +1983,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_s
350 continue;
351 if (!ring->dma[i].rxd1)
352 continue;
353- dma_unmap_single(eth->dev,
354+ dma_unmap_single(eth->dma_dev,
355 ring->dma[i].rxd1,
356 ring->buf_size,
357 DMA_FROM_DEVICE);
358@@ -1982,7 +1997,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, int in_s
359 return;
360
361 if (ring->dma) {
362- dma_free_coherent(eth->dev,
363+ dma_free_coherent(eth->dma_dev,
364 ring->dma_size * sizeof(*ring->dma),
365 ring->dma,
366 ring->phys);
367@@ -2462,7 +2477,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
368 if (eth->netdev[i])
369 netdev_reset_queue(eth->netdev[i]);
370 if ( !eth->soc->has_sram && eth->scratch_ring) {
371- dma_free_coherent(eth->dev,
372+ dma_free_coherent(eth->dma_dev,
373 MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
374 eth->scratch_ring,
375 eth->phy_scratch_ring);
376@@ -2661,7 +2676,7 @@ static int mtk_open(struct net_device *dev)
377 if (err)
378 return err;
379
380- if (eth->soc->offload_version && mtk_ppe_start(&eth->ppe) == 0)
381+ if (eth->soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
382 gdm_config = MTK_GDMA_TO_PPE;
383
384 mtk_gdm_config(eth, gdm_config);
385@@ -2778,7 +2793,7 @@ static int mtk_stop(struct net_device *dev)
386 mtk_dma_free(eth);
387
388 if (eth->soc->offload_version)
389- mtk_ppe_stop(&eth->ppe);
390+ mtk_ppe_stop(eth->ppe);
391
392 return 0;
393 }
394@@ -2855,6 +2870,8 @@ static int mtk_napi_init(struct mtk_eth *eth)
395
396 static int mtk_hw_init(struct mtk_eth *eth, u32 type)
397 {
398+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
399+ ETHSYS_DMA_AG_MAP_PPE;
400 int i, ret = 0;
401
402 pr_info("[%s] reset_lock:%d, force:%d\n", __func__,
403@@ -2872,6 +2889,10 @@ static int mtk_hw_init(struct mtk_eth *eth, u32 type)
404 goto err_disable_pm;
405 }
406
407+ if (eth->ethsys)
408+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
409+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
410+
411 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
412 ret = device_reset(eth->dev);
413 if (ret) {
414@@ -3501,6 +3522,35 @@ free_netdev:
415 return err;
416 }
417
418+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
419+{
420+ struct net_device *dev, *tmp;
421+ LIST_HEAD(dev_list);
422+ int i;
423+
424+ rtnl_lock();
425+
426+ for (i = 0; i < MTK_MAC_COUNT; i++) {
427+ dev = eth->netdev[i];
428+
429+ if (!dev || !(dev->flags & IFF_UP))
430+ continue;
431+
432+ list_add_tail(&dev->close_list, &dev_list);
433+ }
434+
435+ dev_close_many(&dev_list, false);
436+
437+ eth->dma_dev = dma_dev;
438+
439+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
440+ list_del_init(&dev->close_list);
441+ dev_open(dev, NULL);
442+ }
443+
444+ rtnl_unlock();
445+}
446+
447 static int mtk_probe(struct platform_device *pdev)
448 {
449 struct device_node *mac_np;
450@@ -3514,6 +3564,7 @@ static int mtk_probe(struct platform_device *pdev)
451 eth->soc = of_device_get_match_data(&pdev->dev);
452
453 eth->dev = &pdev->dev;
454+ eth->dma_dev = &pdev->dev;
455 eth->base = devm_platform_ioremap_resource(pdev, 0);
456 if (IS_ERR(eth->base))
457 return PTR_ERR(eth->base);
458@@ -3567,6 +3618,16 @@ static int mtk_probe(struct platform_device *pdev)
459 }
460 }
461
462+ if (of_dma_is_coherent(pdev->dev.of_node)) {
463+ struct regmap *cci;
464+
465+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
466+ "mediatek,cci-control");
467+ /* enable CPU/bus coherency */
468+ if (!IS_ERR(cci))
469+ regmap_write(cci, 0, 3);
470+ }
471+
472 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
473 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii),
474 GFP_KERNEL);
475@@ -3589,6 +3650,22 @@ static int mtk_probe(struct platform_device *pdev)
476 }
477 }
478
479+ for (i = 0;; i++) {
480+ struct device_node *np = of_parse_phandle(pdev->dev.of_node,
481+ "mediatek,wed", i);
482+ static const u32 wdma_regs[] = {
483+ MTK_WDMA0_BASE,
484+ MTK_WDMA1_BASE
485+ };
486+ void __iomem *wdma;
487+
488+ if (!np || i >= ARRAY_SIZE(wdma_regs))
489+ break;
490+
491+ wdma = eth->base + wdma_regs[i];
492+ mtk_wed_add_hw(np, eth, wdma, i);
493+ }
494+
495 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
496 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
497 eth->irq[i] = eth->irq[0];
498@@ -3692,10 +3769,11 @@ static int mtk_probe(struct platform_device *pdev)
499 }
500
501 if (eth->soc->offload_version) {
502- err = mtk_ppe_init(&eth->ppe, eth->dev,
503- eth->base + MTK_ETH_PPE_BASE, 2);
504- if (err)
505+ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
506+ if (!eth->ppe) {
507+ err = -ENOMEM;
508 goto err_free_dev;
509+ }
510
511 err = mtk_eth_offload_init(eth);
512 if (err)
513diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
514old mode 100755
515new mode 100644
516index 349f98503..b52378bd6
517--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
518+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
519@@ -517,6 +517,9 @@
520 #define RX_DMA_SPORT_MASK 0x7
521 #endif
522
523+#define MTK_WDMA0_BASE 0x2800
524+#define MTK_WDMA1_BASE 0x2c00
525+
526 /* QDMA descriptor txd4 */
527 #define TX_DMA_CHKSUM (0x7 << 29)
528 #define TX_DMA_TSO BIT(28)
529@@ -704,6 +707,12 @@
530 #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
531
532
533+/* ethernet dma channel agent map */
534+#define ETHSYS_DMA_AG_MAP 0x408
535+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
536+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
537+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
538+
539 /* SGMII subsystem config registers */
540 /* Register to auto-negotiation restart */
541 #define SGMSYS_PCS_CONTROL_1 0x0
542@@ -1209,6 +1218,7 @@ struct mtk_reset_event {
543 /* struct mtk_eth - This is the main datasructure for holding the state
544 * of the driver
545 * @dev: The device pointer
546+ * @dev: The device pointer used for dma mapping/alloc
547 * @base: The mapped register i/o base
548 * @page_lock: Make sure that register operations are atomic
549 * @tx_irq__lock: Make sure that IRQ register operations are atomic
550@@ -1243,6 +1253,7 @@ struct mtk_reset_event {
551
552 struct mtk_eth {
553 struct device *dev;
554+ struct device *dma_dev;
555 void __iomem *base;
556 spinlock_t page_lock;
557 spinlock_t tx_irq_lock;
558@@ -1283,7 +1294,7 @@ struct mtk_eth {
559 spinlock_t syscfg0_lock;
560 struct timer_list mtk_dma_monitor_timer;
561
562- struct mtk_ppe ppe;
563+ struct mtk_ppe *ppe;
564 struct rhashtable flow_table;
565 };
566
567@@ -1336,5 +1347,6 @@ void ethsys_reset(struct mtk_eth *eth, u32 reset_bits);
568 int mtk_eth_offload_init(struct mtk_eth *eth);
569 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
570 void *type_data);
571+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
572
573 #endif /* MTK_ETH_H */
574diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
575old mode 100644
576new mode 100755
577index 66298e223..3d75c22be
578--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
579+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
580@@ -6,9 +6,22 @@
581 #include <linux/iopoll.h>
582 #include <linux/etherdevice.h>
583 #include <linux/platform_device.h>
584+#include <linux/if_ether.h>
585+#include <linux/if_vlan.h>
586+#include <net/dsa.h>
587+#include "mtk_eth_soc.h"
588 #include "mtk_ppe.h"
589 #include "mtk_ppe_regs.h"
590
591+static DEFINE_SPINLOCK(ppe_lock);
592+
593+static const struct rhashtable_params mtk_flow_l2_ht_params = {
594+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
595+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
596+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
597+ .automatic_shrinking = true,
598+};
599+
600 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
601 {
602 writel(val, ppe->base + reg);
603@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
604 return ppe_m32(ppe, reg, val, 0);
605 }
606
607+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
608+{
609+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
610+}
611+
612 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
613 {
614 int ret;
615@@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
616 u32 hash;
617
618 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
619- case MTK_PPE_PKT_TYPE_BRIDGE:
620- hv1 = e->bridge.src_mac_lo;
621- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
622- hv2 = e->bridge.src_mac_hi >> 16;
623- hv2 ^= e->bridge.dest_mac_lo;
624- hv3 = e->bridge.dest_mac_hi;
625- break;
626 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
627 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
628 hv1 = e->ipv4.orig.ports;
629@@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
630 {
631 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
632
633+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
634+ return &entry->bridge.l2;
635+
636 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
637 return &entry->ipv6.l2;
638
639@@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
640 {
641 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
642
643+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
644+ return &entry->bridge.ib2;
645+
646 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
647 return &entry->ipv6.ib2;
648
649@@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
650 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
651 entry->ipv6.ports = ports_pad;
652
653- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
654+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
655+ ether_addr_copy(entry->bridge.src_mac, src_mac);
656+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
657+ entry->bridge.ib2 = val;
658+ l2 = &entry->bridge.l2;
659+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
660 entry->ipv6.ib2 = val;
661 l2 = &entry->ipv6.l2;
662 } else {
663@@ -329,32 +351,167 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
664 return 0;
665 }
666
667+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
668+ int bss, int wcid)
669+{
670+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
671+ u32 *ib2 = mtk_foe_entry_ib2(entry);
672+
673+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
674+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
675+ if (wdma_idx)
676+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
677+
678+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
679+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
680+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
681+
682+ return 0;
683+}
684+
685 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
686 {
687 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
688 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
689 }
690
691-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
692- u16 timestamp)
693+static bool
694+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
695+{
696+ int type, len;
697+
698+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
699+ return false;
700+
701+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
702+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
703+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
704+ else
705+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
706+
707+ return !memcmp(&entry->data.data, &data->data, len - 4);
708+}
709+
710+static void
711+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
712+{
713+ struct hlist_head *head;
714+ struct hlist_node *tmp;
715+
716+ if (entry->type == MTK_FLOW_TYPE_L2) {
717+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
718+ mtk_flow_l2_ht_params);
719+
720+ head = &entry->l2_flows;
721+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
722+ __mtk_foe_entry_clear(ppe, entry);
723+ return;
724+ }
725+
726+ hlist_del_init(&entry->list);
727+ if (entry->hash != 0xffff) {
728+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
729+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
730+ MTK_FOE_STATE_INVALID);
731+ dma_wmb();
732+ }
733+ entry->hash = 0xffff;
734+
735+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
736+ return;
737+
738+ hlist_del_init(&entry->l2_data.list);
739+ kfree(entry);
740+}
741+
742+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
743+{
744+ u16 timestamp;
745+ u16 now;
746+
747+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
748+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
749+
750+ if (timestamp > now)
751+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
752+ else
753+ return now - timestamp;
754+}
755+
756+static void
757+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
758 {
759+ struct mtk_flow_entry *cur;
760 struct mtk_foe_entry *hwe;
761- u32 hash;
762+ struct hlist_node *tmp;
763+ int idle;
764+
765+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
766+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
767+ int cur_idle;
768+ u32 ib1;
769+
770+ hwe = &ppe->foe_table[cur->hash];
771+ ib1 = READ_ONCE(hwe->ib1);
772+
773+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
774+ cur->hash = 0xffff;
775+ __mtk_foe_entry_clear(ppe, cur);
776+ continue;
777+ }
778+
779+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
780+ if (cur_idle >= idle)
781+ continue;
782+
783+ idle = cur_idle;
784+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
785+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
786+ }
787+}
788+
789+static void
790+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
791+{
792+ struct mtk_foe_entry *hwe;
793+ struct mtk_foe_entry foe;
794+
795+ spin_lock_bh(&ppe_lock);
796+
797+ if (entry->type == MTK_FLOW_TYPE_L2) {
798+ mtk_flow_entry_update_l2(ppe, entry);
799+ goto out;
800+ }
801+
802+ if (entry->hash == 0xffff)
803+ goto out;
804+
805+ hwe = &ppe->foe_table[entry->hash];
806+ memcpy(&foe, hwe, sizeof(foe));
807+ if (!mtk_flow_entry_match(entry, &foe)) {
808+ entry->hash = 0xffff;
809+ goto out;
810+ }
811+
812+ entry->data.ib1 = foe.ib1;
813+
814+out:
815+ spin_unlock_bh(&ppe_lock);
816+}
817+
818+static void
819+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
820+ u16 hash)
821+{
822+ struct mtk_foe_entry *hwe;
823+ u16 timestamp;
824
825+ timestamp = mtk_eth_timestamp(ppe->eth);
826 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
827 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
828 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
829
830- hash = mtk_ppe_hash_entry(entry);
831 hwe = &ppe->foe_table[hash];
832- if (!mtk_foe_entry_usable(hwe)) {
833- hwe++;
834- hash++;
835-
836- if (!mtk_foe_entry_usable(hwe))
837- return -ENOSPC;
838- }
839-
840 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
841 wmb();
842 hwe->ib1 = entry->ib1;
843@@ -362,32 +519,197 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
844 dma_wmb();
845
846 mtk_ppe_cache_clear(ppe);
847+}
848
849- return hash;
850+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
851+{
852+ spin_lock_bh(&ppe_lock);
853+ __mtk_foe_entry_clear(ppe, entry);
854+ spin_unlock_bh(&ppe_lock);
855+}
856+
857+static int
858+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
859+{
860+ entry->type = MTK_FLOW_TYPE_L2;
861+
862+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
863+ mtk_flow_l2_ht_params);
864+}
865+
866+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
867+{
868+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
869+ u32 hash;
870+
871+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
872+ return mtk_foe_entry_commit_l2(ppe, entry);
873+
874+ hash = mtk_ppe_hash_entry(&entry->data);
875+ entry->hash = 0xffff;
876+ spin_lock_bh(&ppe_lock);
877+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 4]);
878+ spin_unlock_bh(&ppe_lock);
879+
880+ return 0;
881+}
882+
883+static void
884+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
885+ u16 hash)
886+{
887+ struct mtk_flow_entry *flow_info;
888+ struct mtk_foe_entry foe, *hwe;
889+ struct mtk_foe_mac_info *l2;
890+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
891+ int type;
892+
893+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
894+ GFP_ATOMIC);
895+ if (!flow_info)
896+ return;
897+
898+ flow_info->l2_data.base_flow = entry;
899+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
900+ flow_info->hash = hash;
901+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 4]);
902+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
903+
904+ hwe = &ppe->foe_table[hash];
905+ memcpy(&foe, hwe, sizeof(foe));
906+ foe.ib1 &= ib1_mask;
907+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
908+
909+ l2 = mtk_foe_entry_l2(&foe);
910+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
911+
912+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
913+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
914+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
915+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
916+ l2->etype = ETH_P_IPV6;
917+
918+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
919+
920+ __mtk_foe_entry_commit(ppe, &foe, hash);
921 }
922
923-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
924+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
925+{
926+ struct hlist_head *head = &ppe->foe_flow[hash / 4];
927+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
928+ struct mtk_flow_entry *entry;
929+ struct mtk_foe_bridge key = {};
930+ struct ethhdr *eh;
931+ bool found = false;
932+ u8 *tag;
933+
934+ spin_lock_bh(&ppe_lock);
935+
936+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
937+ goto out;
938+
939+ hlist_for_each_entry(entry, head, list) {
940+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
941+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
942+ MTK_FOE_STATE_BIND))
943+ continue;
944+
945+ entry->hash = 0xffff;
946+ __mtk_foe_entry_clear(ppe, entry);
947+ continue;
948+ }
949+
950+ if (found || !mtk_flow_entry_match(entry, hwe)) {
951+ if (entry->hash != 0xffff)
952+ entry->hash = 0xffff;
953+ continue;
954+ }
955+
956+ entry->hash = hash;
957+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
958+ found = true;
959+ }
960+
961+ if (found)
962+ goto out;
963+
964+ if (!skb)
965+ goto out;
966+
967+ eh = eth_hdr(skb);
968+ ether_addr_copy(key.dest_mac, eh->h_dest);
969+ ether_addr_copy(key.src_mac, eh->h_source);
970+ tag = skb->data - 2;
971+ key.vlan = 0;
972+ switch (skb->protocol) {
973+#if IS_ENABLED(CONFIG_NET_DSA)
974+ case htons(ETH_P_XDSA):
975+ if (!netdev_uses_dsa(skb->dev) ||
976+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
977+ goto out;
978+
979+ tag += 4;
980+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
981+ break;
982+
983+ fallthrough;
984+#endif
985+ case htons(ETH_P_8021Q):
986+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
987+ break;
988+ default:
989+ break;
990+ }
991+
992+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
993+ if (!entry)
994+ goto out;
995+
996+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
997+
998+out:
999+ spin_unlock_bh(&ppe_lock);
1000+}
1001+
1002+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
1003+{
1004+ mtk_flow_entry_update(ppe, entry);
1005+
1006+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
1007+}
1008+
1009+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
1010 int version)
1011 {
1012+ struct device *dev = eth->dev;
1013 struct mtk_foe_entry *foe;
1014+ struct mtk_ppe *ppe;
1015+
1016+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
1017+ if (!ppe)
1018+ return NULL;
1019+
1020+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
1021
1022 /* need to allocate a separate device, since it PPE DMA access is
1023 * not coherent.
1024 */
1025 ppe->base = base;
1026+ ppe->eth = eth;
1027 ppe->dev = dev;
1028 ppe->version = version;
1029
1030 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
1031 &ppe->foe_phys, GFP_KERNEL);
1032 if (!foe)
1033- return -ENOMEM;
1034+ return NULL;
1035
1036 ppe->foe_table = foe;
1037
1038 mtk_ppe_debugfs_init(ppe);
1039
1040- return 0;
1041+ return ppe;
1042 }
1043
1044 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1045@@ -395,7 +717,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1046 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
1047 int i, k;
1048
1049- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
1050+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
1051
1052 if (!IS_ENABLED(CONFIG_SOC_MT7621))
1053 return;
1054@@ -443,7 +765,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
1055 MTK_PPE_FLOW_CFG_IP4_NAT |
1056 MTK_PPE_FLOW_CFG_IP4_NAPT |
1057 MTK_PPE_FLOW_CFG_IP4_DSLITE |
1058- MTK_PPE_FLOW_CFG_L2_BRIDGE |
1059 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1060 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1061
1062diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
1063index 242fb8f2a..1f5cf1c9a 100644
1064--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
1065+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
1066@@ -6,6 +6,7 @@
1067
1068 #include <linux/kernel.h>
1069 #include <linux/bitfield.h>
1070+#include <linux/rhashtable.h>
1071
1072 #define MTK_ETH_PPE_BASE 0xc00
1073
1074@@ -48,9 +49,9 @@ enum {
1075 #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
1076 #define MTK_FOE_IB2_MULTICAST BIT(8)
1077
1078-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
1079-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
1080-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
1081+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
1082+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
1083+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
1084
1085 #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
1086
1087@@ -58,9 +59,9 @@ enum {
1088
1089 #define MTK_FOE_IB2_DSCP GENMASK(31, 24)
1090
1091-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
1092-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
1093-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
1094+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
1095+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
1096+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
1097
1098 enum {
1099 MTK_FOE_STATE_INVALID,
1100@@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
1101 u16 src_mac_lo;
1102 };
1103
1104+/* software-only entry type */
1105 struct mtk_foe_bridge {
1106- u32 dest_mac_hi;
1107+ u8 dest_mac[ETH_ALEN];
1108+ u8 src_mac[ETH_ALEN];
1109+ u16 vlan;
1110
1111- u16 src_mac_lo;
1112- u16 dest_mac_lo;
1113-
1114- u32 src_mac_hi;
1115+ struct {} key_end;
1116
1117 u32 ib2;
1118
1119- u32 _rsv[5];
1120-
1121- u32 udf_tsid;
1122 struct mtk_foe_mac_info l2;
1123 };
1124
1125@@ -235,7 +233,37 @@ enum {
1126 MTK_PPE_CPU_REASON_INVALID = 0x1f,
1127 };
1128
1129+enum {
1130+ MTK_FLOW_TYPE_L4,
1131+ MTK_FLOW_TYPE_L2,
1132+ MTK_FLOW_TYPE_L2_SUBFLOW,
1133+};
1134+
1135+struct mtk_flow_entry {
1136+ union {
1137+ struct hlist_node list;
1138+ struct {
1139+ struct rhash_head l2_node;
1140+ struct hlist_head l2_flows;
1141+ };
1142+ };
1143+ u8 type;
1144+ s8 wed_index;
1145+ u16 hash;
1146+ union {
1147+ struct mtk_foe_entry data;
1148+ struct {
1149+ struct mtk_flow_entry *base_flow;
1150+ struct hlist_node list;
1151+ struct {} end;
1152+ } l2_data;
1153+ };
1154+ struct rhash_head node;
1155+ unsigned long cookie;
1156+};
1157+
1158 struct mtk_ppe {
1159+ struct mtk_eth *eth;
1160 struct device *dev;
1161 void __iomem *base;
1162 int version;
1163@@ -243,19 +271,35 @@ struct mtk_ppe {
1164 struct mtk_foe_entry *foe_table;
1165 dma_addr_t foe_phys;
1166
1167+ u16 foe_check_time[MTK_PPE_ENTRIES];
1168+ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
1169+
1170+ struct rhashtable l2_flows;
1171+
1172 void *acct_table;
1173 };
1174
1175-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
1176- int version);
1177+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
1178 int mtk_ppe_start(struct mtk_ppe *ppe);
1179 int mtk_ppe_stop(struct mtk_ppe *ppe);
1180
1181+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
1182+
1183 static inline void
1184-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
1185+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
1186 {
1187- ppe->foe_table[hash].ib1 = 0;
1188- dma_wmb();
1189+ u16 now, diff;
1190+
1191+ if (!ppe)
1192+ return;
1193+
1194+ now = (u16)jiffies;
1195+ diff = now - ppe->foe_check_time[hash];
1196+ if (diff < HZ / 10)
1197+ return;
1198+
1199+ ppe->foe_check_time[hash] = now;
1200+ __mtk_ppe_check_skb(ppe, skb, hash);
1201 }
1202
1203 static inline int
1204@@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
1205 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
1206 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
1207 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
1208-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
1209- u16 timestamp);
1210+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
1211+ int bss, int wcid);
1212+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1213+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1214+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1215 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
1216
1217 #endif
1218diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1219index d4b482340..a591ab1fd 100644
1220--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1221+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1222@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
1223 static const char * const type_str[] = {
1224 [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
1225 [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
1226- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
1227 [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
1228 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
1229 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
1230@@ -207,6 +206,9 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
1231 struct dentry *root;
1232
1233 root = debugfs_create_dir("mtk_ppe", NULL);
1234+ if (!root)
1235+ return -ENOMEM;
1236+
1237 debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
1238 debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
1239
1240diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1241index 4294f0c74..d4a012608 100644
1242--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1243+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1244@@ -11,6 +11,7 @@
1245 #include <net/pkt_cls.h>
1246 #include <net/dsa.h>
1247 #include "mtk_eth_soc.h"
1248+#include "mtk_wed.h"
1249
1250 struct mtk_flow_data {
1251 struct ethhdr eth;
1252@@ -30,6 +31,8 @@ struct mtk_flow_data {
1253 __be16 src_port;
1254 __be16 dst_port;
1255
1256+ u16 vlan_in;
1257+
1258 struct {
1259 u16 id;
1260 __be16 proto;
1261@@ -41,12 +44,6 @@ struct mtk_flow_data {
1262 } pppoe;
1263 };
1264
1265-struct mtk_flow_entry {
1266- struct rhash_head node;
1267- unsigned long cookie;
1268- u16 hash;
1269-};
1270-
1271 static const struct rhashtable_params mtk_flow_ht_params = {
1272 .head_offset = offsetof(struct mtk_flow_entry, node),
1273 .key_offset = offsetof(struct mtk_flow_entry, cookie),
1274@@ -54,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = {
1275 .automatic_shrinking = true,
1276 };
1277
1278-static u32
1279-mtk_eth_timestamp(struct mtk_eth *eth)
1280-{
1281- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
1282-}
1283-
1284 static int
1285 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
1286 bool egress)
1287@@ -94,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
1288 memcpy(dest, src, act->mangle.mask ? 2 : 4);
1289 }
1290
1291+static int
1292+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
1293+{
1294+ struct net_device_path_ctx ctx = {
1295+ .dev = dev,
1296+ };
1297+ struct net_device_path path = {};
1298+
1299+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
1300+ return -1;
1301+
1302+ if (!dev->netdev_ops->ndo_fill_forward_path)
1303+ return -1;
1304+
1305+ memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
1306+ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
1307+ return -1;
1308+
1309+ if (path.type != DEV_PATH_MTK_WDMA)
1310+ return -1;
1311+
1312+ info->wdma_idx = path.mtk_wdma.wdma_idx;
1313+ info->queue = path.mtk_wdma.queue;
1314+ info->bss = path.mtk_wdma.bss;
1315+ info->wcid = path.mtk_wdma.wcid;
1316+
1317+ return 0;
1318+}
1319+
1320
1321 static int
1322 mtk_flow_mangle_ports(const struct flow_action_entry *act,
1323@@ -163,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev)
1324
1325 static int
1326 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1327- struct net_device *dev)
1328+ struct net_device *dev, const u8 *dest_mac,
1329+ int *wed_index)
1330 {
1331+ struct mtk_wdma_info info = {};
1332 int pse_port, dsa_port;
1333
1334+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
1335+ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
1336+ info.wcid);
1337+ pse_port = 3;
1338+ *wed_index = info.wdma_idx;
1339+ goto out;
1340+ }
1341+
1342 dsa_port = mtk_flow_get_dsa_port(&dev);
1343 if (dsa_port >= 0)
1344 mtk_foe_entry_set_dsa(foe, dsa_port);
1345@@ -178,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1346 else
1347 return -EOPNOTSUPP;
1348
1349+out:
1350 mtk_foe_entry_set_pse_port(foe, pse_port);
1351
1352 return 0;
1353@@ -193,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1354 struct net_device *odev = NULL;
1355 struct mtk_flow_entry *entry;
1356 int offload_type = 0;
1357+ int wed_index = -1;
1358 u16 addr_type = 0;
1359- u32 timestamp;
1360 u8 l4proto = 0;
1361 int err = 0;
1362- int hash;
1363 int i;
1364
1365 if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
1366@@ -229,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1367 return -EOPNOTSUPP;
1368 }
1369
1370+ switch (addr_type) {
1371+ case 0:
1372+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
1373+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1374+ struct flow_match_eth_addrs match;
1375+
1376+ flow_rule_match_eth_addrs(rule, &match);
1377+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
1378+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
1379+ } else {
1380+ return -EOPNOTSUPP;
1381+ }
1382+
1383+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1384+ struct flow_match_vlan match;
1385+
1386+ flow_rule_match_vlan(rule, &match);
1387+
1388+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
1389+ return -EOPNOTSUPP;
1390+
1391+ data.vlan_in = match.key->vlan_id;
1392+ }
1393+ break;
1394+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1395+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1396+ break;
1397+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1398+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1399+ break;
1400+ default:
1401+ return -EOPNOTSUPP;
1402+ }
1403+
1404 flow_action_for_each(i, act, &rule->action) {
1405 switch (act->id) {
1406 case FLOW_ACTION_MANGLE:
1407+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1408+ return -EOPNOTSUPP;
1409 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1410 mtk_flow_offload_mangle_eth(act, &data.eth);
1411 break;
1412@@ -263,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1413 }
1414 }
1415
1416- switch (addr_type) {
1417- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1418- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1419- break;
1420- case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1421- offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1422- break;
1423- default:
1424- return -EOPNOTSUPP;
1425- }
1426-
1427 if (!is_valid_ether_addr(data.eth.h_source) ||
1428 !is_valid_ether_addr(data.eth.h_dest))
1429 return -EINVAL;
1430@@ -287,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1431 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1432 struct flow_match_ports ports;
1433
1434+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1435+ return -EOPNOTSUPP;
1436+
1437 flow_rule_match_ports(rule, &ports);
1438 data.src_port = ports.key->src;
1439 data.dst_port = ports.key->dst;
1440- } else {
1441+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
1442 return -EOPNOTSUPP;
1443 }
1444
1445@@ -320,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1446 if (act->id != FLOW_ACTION_MANGLE)
1447 continue;
1448
1449+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1450+ return -EOPNOTSUPP;
1451+
1452 switch (act->mangle.htype) {
1453 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1454 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1455@@ -345,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1456 return err;
1457 }
1458
1459+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1460+ foe.bridge.vlan = data.vlan_in;
1461+
1462 if (data.vlan.num == 1) {
1463 if (data.vlan.proto != htons(ETH_P_8021Q))
1464 return -EOPNOTSUPP;
1465@@ -354,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1466 if (data.pppoe.num == 1)
1467 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
1468
1469- err = mtk_flow_set_output_device(eth, &foe, odev);
1470+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
1471+ &wed_index);
1472 if (err)
1473 return err;
1474
1475+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
1476+ return err;
1477+
1478 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1479 if (!entry)
1480 return -ENOMEM;
1481
1482 entry->cookie = f->cookie;
1483- timestamp = mtk_eth_timestamp(eth);
1484- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
1485- if (hash < 0) {
1486- err = hash;
1487+ memcpy(&entry->data, &foe, sizeof(entry->data));
1488+ entry->wed_index = wed_index;
1489+
1490+ if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
1491 goto free;
1492- }
1493
1494- entry->hash = hash;
1495 err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
1496 mtk_flow_ht_params);
1497 if (err < 0)
1498- goto clear_flow;
1499+ goto clear;
1500
1501 return 0;
1502-clear_flow:
1503- mtk_foe_entry_clear(&eth->ppe, hash);
1504+
1505+clear:
1506+ mtk_foe_entry_clear(eth->ppe, entry);
1507 free:
1508 kfree(entry);
1509+ if (wed_index >= 0)
1510+ mtk_wed_flow_remove(wed_index);
1511 return err;
1512 }
1513
1514@@ -394,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
1515 if (!entry)
1516 return -ENOENT;
1517
1518- mtk_foe_entry_clear(&eth->ppe, entry->hash);
1519+ mtk_foe_entry_clear(eth->ppe, entry);
1520 rhashtable_remove_fast(&eth->flow_table, &entry->node,
1521 mtk_flow_ht_params);
1522+ if (entry->wed_index >= 0)
1523+ mtk_wed_flow_remove(entry->wed_index);
1524 kfree(entry);
1525
1526 return 0;
1527@@ -406,7 +477,6 @@ static int
1528 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1529 {
1530 struct mtk_flow_entry *entry;
1531- int timestamp;
1532 u32 idle;
1533
1534 entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1535@@ -414,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1536 if (!entry)
1537 return -ENOENT;
1538
1539- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
1540- if (timestamp < 0)
1541- return -ETIMEDOUT;
1542-
1543- idle = mtk_eth_timestamp(eth) - timestamp;
1544+ idle = mtk_foe_entry_idle_time(eth->ppe, entry);
1545 f->stats.lastused = jiffies - idle * HZ;
1546
1547 return 0;
1548@@ -470,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1549 struct flow_block_cb *block_cb;
1550 flow_setup_cb_t *cb;
1551
1552- if (!eth->ppe.foe_table)
1553+ if (!eth->ppe || !eth->ppe->foe_table)
1554 return -EOPNOTSUPP;
1555
1556 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1557@@ -511,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1558 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1559 void *type_data)
1560 {
1561- if (type == TC_SETUP_FT)
1562+ switch (type) {
1563+ case TC_SETUP_BLOCK:
1564+ case TC_SETUP_FT:
1565 return mtk_eth_setup_tc_block(dev, type_data);
1566-
1567- return -EOPNOTSUPP;
1568+ default:
1569+ return -EOPNOTSUPP;
1570+ }
1571 }
1572
1573 int mtk_eth_offload_init(struct mtk_eth *eth)
1574 {
1575- if (!eth->ppe.foe_table)
1576+ if (!eth->ppe || !eth->ppe->foe_table)
1577 return 0;
1578
1579 return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
1580diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
1581new file mode 100644
1582index 000000000..ea1cbdf1a
1583--- /dev/null
1584+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
1585@@ -0,0 +1,876 @@
1586+// SPDX-License-Identifier: GPL-2.0-only
1587+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1588+
1589+#include <linux/kernel.h>
1590+#include <linux/slab.h>
1591+#include <linux/module.h>
1592+#include <linux/bitfield.h>
1593+#include <linux/dma-mapping.h>
1594+#include <linux/skbuff.h>
1595+#include <linux/of_platform.h>
1596+#include <linux/of_address.h>
1597+#include <linux/mfd/syscon.h>
1598+#include <linux/debugfs.h>
1599+#include <linux/iopoll.h>
1600+#include <linux/soc/mediatek/mtk_wed.h>
1601+#include "mtk_eth_soc.h"
1602+#include "mtk_wed_regs.h"
1603+#include "mtk_wed.h"
1604+#include "mtk_ppe.h"
1605+
1606+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
1607+
1608+#define MTK_WED_PKT_SIZE 1900
1609+#define MTK_WED_BUF_SIZE 2048
1610+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1611+
1612+#define MTK_WED_TX_RING_SIZE 2048
1613+#define MTK_WED_WDMA_RING_SIZE 1024
1614+
1615+static struct mtk_wed_hw *hw_list[2];
1616+static DEFINE_MUTEX(hw_lock);
1617+
1618+static void
1619+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1620+{
1621+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
1622+}
1623+
1624+static void
1625+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1626+{
1627+ return wed_m32(dev, reg, 0, mask);
1628+}
1629+
1630+static void
1631+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
1632+{
1633+ return wed_m32(dev, reg, mask, 0);
1634+}
1635+
1636+static void
1637+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1638+{
1639+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
1640+}
1641+
1642+static void
1643+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1644+{
1645+ wdma_m32(dev, reg, 0, mask);
1646+}
1647+
1648+static u32
1649+mtk_wed_read_reset(struct mtk_wed_device *dev)
1650+{
1651+ return wed_r32(dev, MTK_WED_RESET);
1652+}
1653+
1654+static void
1655+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
1656+{
1657+ u32 status;
1658+
1659+ wed_w32(dev, MTK_WED_RESET, mask);
1660+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
1661+ !(status & mask), 0, 1000))
1662+ WARN_ON_ONCE(1);
1663+}
1664+
1665+static struct mtk_wed_hw *
1666+mtk_wed_assign(struct mtk_wed_device *dev)
1667+{
1668+ struct mtk_wed_hw *hw;
1669+
1670+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
1671+ if (!hw || hw->wed_dev)
1672+ return NULL;
1673+
1674+ hw->wed_dev = dev;
1675+ return hw;
1676+}
1677+
1678+static int
1679+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
1680+{
1681+ struct mtk_wdma_desc *desc;
1682+ dma_addr_t desc_phys;
1683+ void **page_list;
1684+ int token = dev->wlan.token_start;
1685+ int ring_size;
1686+ int n_pages;
1687+ int i, page_idx;
1688+
1689+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
1690+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
1691+
1692+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
1693+ if (!page_list)
1694+ return -ENOMEM;
1695+
1696+ dev->buf_ring.size = ring_size;
1697+ dev->buf_ring.pages = page_list;
1698+
1699+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
1700+ &desc_phys, GFP_KERNEL);
1701+ if (!desc)
1702+ return -ENOMEM;
1703+
1704+ dev->buf_ring.desc = desc;
1705+ dev->buf_ring.desc_phys = desc_phys;
1706+
1707+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
1708+ dma_addr_t page_phys, buf_phys;
1709+ struct page *page;
1710+ void *buf;
1711+ int s;
1712+
1713+ page = __dev_alloc_pages(GFP_KERNEL, 0);
1714+ if (!page)
1715+ return -ENOMEM;
1716+
1717+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
1718+ DMA_BIDIRECTIONAL);
1719+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
1720+ __free_page(page);
1721+ return -ENOMEM;
1722+ }
1723+
1724+ page_list[page_idx++] = page;
1725+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
1726+ DMA_BIDIRECTIONAL);
1727+
1728+ buf = page_to_virt(page);
1729+ buf_phys = page_phys;
1730+
1731+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
1732+ u32 txd_size;
1733+
1734+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
1735+
1736+ desc->buf0 = buf_phys;
1737+ desc->buf1 = buf_phys + txd_size;
1738+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
1739+ txd_size) |
1740+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
1741+ MTK_WED_BUF_SIZE - txd_size) |
1742+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
1743+ desc->info = 0;
1744+ desc++;
1745+
1746+ buf += MTK_WED_BUF_SIZE;
1747+ buf_phys += MTK_WED_BUF_SIZE;
1748+ }
1749+
1750+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
1751+ DMA_BIDIRECTIONAL);
1752+ }
1753+
1754+ return 0;
1755+}
1756+
1757+static void
1758+mtk_wed_free_buffer(struct mtk_wed_device *dev)
1759+{
1760+ struct mtk_wdma_desc *desc = dev->buf_ring.desc;
1761+ void **page_list = dev->buf_ring.pages;
1762+ int page_idx;
1763+ int i;
1764+
1765+ if (!page_list)
1766+ return;
1767+
1768+ if (!desc)
1769+ goto free_pagelist;
1770+
1771+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
1772+ void *page = page_list[page_idx++];
1773+
1774+ if (!page)
1775+ break;
1776+
1777+ dma_unmap_page(dev->hw->dev, desc[i].buf0,
1778+ PAGE_SIZE, DMA_BIDIRECTIONAL);
1779+ __free_page(page);
1780+ }
1781+
1782+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
1783+ desc, dev->buf_ring.desc_phys);
1784+
1785+free_pagelist:
1786+ kfree(page_list);
1787+}
1788+
1789+static void
1790+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
1791+{
1792+ if (!ring->desc)
1793+ return;
1794+
1795+ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
1796+ ring->desc, ring->desc_phys);
1797+}
1798+
1799+static void
1800+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
1801+{
1802+ int i;
1803+
1804+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
1805+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
1806+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1807+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
1808+}
1809+
1810+static void
1811+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
1812+{
1813+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
1814+
1815+ if (!dev->hw->num_flows)
1816+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
1817+
1818+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
1819+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
1820+}
1821+
1822+static void
1823+mtk_wed_stop(struct mtk_wed_device *dev)
1824+{
1825+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
1826+ mtk_wed_set_ext_int(dev, false);
1827+
1828+ wed_clr(dev, MTK_WED_CTRL,
1829+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
1830+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
1831+ MTK_WED_CTRL_WED_TX_BM_EN |
1832+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1833+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
1834+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
1835+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
1836+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
1837+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
1838+
1839+ wed_clr(dev, MTK_WED_GLO_CFG,
1840+ MTK_WED_GLO_CFG_TX_DMA_EN |
1841+ MTK_WED_GLO_CFG_RX_DMA_EN);
1842+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1843+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1844+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1845+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1846+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1847+}
1848+
1849+static void
1850+mtk_wed_detach(struct mtk_wed_device *dev)
1851+{
1852+ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
1853+ struct mtk_wed_hw *hw = dev->hw;
1854+
1855+ mutex_lock(&hw_lock);
1856+
1857+ mtk_wed_stop(dev);
1858+
1859+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
1860+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
1861+
1862+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1863+
1864+ mtk_wed_free_buffer(dev);
1865+ mtk_wed_free_tx_rings(dev);
1866+
1867+ if (of_dma_is_coherent(wlan_node))
1868+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1869+ BIT(hw->index), BIT(hw->index));
1870+
1871+ if (!hw_list[!hw->index]->wed_dev &&
1872+ hw->eth->dma_dev != hw->eth->dev)
1873+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
1874+
1875+ memset(dev, 0, sizeof(*dev));
1876+ module_put(THIS_MODULE);
1877+
1878+ hw->wed_dev = NULL;
1879+ mutex_unlock(&hw_lock);
1880+}
1881+
1882+static void
1883+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
1884+{
1885+ u32 mask, set;
1886+ u32 offset;
1887+
1888+ mtk_wed_stop(dev);
1889+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1890+
1891+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1892+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1893+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1894+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1895+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1896+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1897+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1898+
1899+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
1900+
1901+ offset = dev->hw->index ? 0x04000400 : 0;
1902+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1903+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
1904+
1905+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
1906+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1907+}
1908+
1909+static void
1910+mtk_wed_hw_init(struct mtk_wed_device *dev)
1911+{
1912+ if (dev->init_done)
1913+ return;
1914+
1915+ dev->init_done = true;
1916+ mtk_wed_set_ext_int(dev, false);
1917+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1918+ MTK_WED_TX_BM_CTRL_PAUSE |
1919+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
1920+ dev->buf_ring.size / 128) |
1921+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
1922+ MTK_WED_TX_RING_SIZE / 256));
1923+
1924+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1925+
1926+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1927+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1928+ dev->wlan.token_start) |
1929+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1930+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1931+
1932+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1933+
1934+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1935+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
1936+ MTK_WED_TX_BM_DYN_THR_HI);
1937+
1938+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1939+
1940+ wed_set(dev, MTK_WED_CTRL,
1941+ MTK_WED_CTRL_WED_TX_BM_EN |
1942+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1943+
1944+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1945+}
1946+
1947+static void
1948+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
1949+{
1950+ int i;
1951+
1952+ for (i = 0; i < size; i++) {
1953+ desc[i].buf0 = 0;
1954+ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
1955+ desc[i].buf1 = 0;
1956+ desc[i].info = 0;
1957+ }
1958+}
1959+
1960+static u32
1961+mtk_wed_check_busy(struct mtk_wed_device *dev)
1962+{
1963+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
1964+ return true;
1965+
1966+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
1967+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
1968+ return true;
1969+
1970+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
1971+ return true;
1972+
1973+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
1974+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1975+ return true;
1976+
1977+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
1978+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1979+ return true;
1980+
1981+ if (wed_r32(dev, MTK_WED_CTRL) &
1982+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
1983+ return true;
1984+
1985+ return false;
1986+}
1987+
1988+static int
1989+mtk_wed_poll_busy(struct mtk_wed_device *dev)
1990+{
1991+ int sleep = 15000;
1992+ int timeout = 100 * sleep;
1993+ u32 val;
1994+
1995+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
1996+ timeout, false, dev);
1997+}
1998+
1999+static void
2000+mtk_wed_reset_dma(struct mtk_wed_device *dev)
2001+{
2002+ bool busy = false;
2003+ u32 val;
2004+ int i;
2005+
2006+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
2007+ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
2008+
2009+ if (!desc)
2010+ continue;
2011+
2012+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
2013+ }
2014+
2015+ if (mtk_wed_poll_busy(dev))
2016+ busy = mtk_wed_check_busy(dev);
2017+
2018+ if (busy) {
2019+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
2020+ } else {
2021+ wed_w32(dev, MTK_WED_RESET_IDX,
2022+ MTK_WED_RESET_IDX_TX |
2023+ MTK_WED_RESET_IDX_RX);
2024+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
2025+ }
2026+
2027+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
2028+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
2029+
2030+ if (busy) {
2031+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
2032+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
2033+ } else {
2034+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
2035+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
2036+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
2037+
2038+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2039+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2040+
2041+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
2042+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2043+ }
2044+
2045+ for (i = 0; i < 100; i++) {
2046+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
2047+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
2048+ break;
2049+ }
2050+
2051+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
2052+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
2053+
2054+ if (busy) {
2055+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
2056+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
2057+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
2058+ } else {
2059+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
2060+ MTK_WED_WPDMA_RESET_IDX_TX |
2061+ MTK_WED_WPDMA_RESET_IDX_RX);
2062+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
2063+ }
2064+
2065+}
2066+
2067+static int
2068+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
2069+ int size)
2070+{
2071+ ring->desc = dma_alloc_coherent(dev->hw->dev,
2072+ size * sizeof(*ring->desc),
2073+ &ring->desc_phys, GFP_KERNEL);
2074+ if (!ring->desc)
2075+ return -ENOMEM;
2076+
2077+ ring->size = size;
2078+ mtk_wed_ring_reset(ring->desc, size);
2079+
2080+ return 0;
2081+}
2082+
2083+static int
2084+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
2085+{
2086+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
2087+
2088+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
2089+ return -ENOMEM;
2090+
2091+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2092+ wdma->desc_phys);
2093+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2094+ size);
2095+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2096+
2097+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2098+ wdma->desc_phys);
2099+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2100+ size);
2101+
2102+ return 0;
2103+}
2104+
2105+static void
2106+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
2107+{
2108+ u32 wdma_mask;
2109+ u32 val;
2110+ int i;
2111+
2112+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
2113+ if (!dev->tx_wdma[i].desc)
2114+ mtk_wed_wdma_ring_setup(dev, i, 16);
2115+
2116+ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
2117+
2118+ mtk_wed_hw_init(dev);
2119+
2120+ wed_set(dev, MTK_WED_CTRL,
2121+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
2122+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
2123+ MTK_WED_CTRL_WED_TX_BM_EN |
2124+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
2125+
2126+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
2127+
2128+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
2129+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
2130+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
2131+
2132+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
2133+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
2134+
2135+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
2136+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
2137+
2138+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
2139+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
2140+
2141+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
2142+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
2143+
2144+ wed_set(dev, MTK_WED_GLO_CFG,
2145+ MTK_WED_GLO_CFG_TX_DMA_EN |
2146+ MTK_WED_GLO_CFG_RX_DMA_EN);
2147+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
2148+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
2149+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
2150+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2151+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
2152+
2153+ mtk_wed_set_ext_int(dev, true);
2154+ val = dev->wlan.wpdma_phys |
2155+ MTK_PCIE_MIRROR_MAP_EN |
2156+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
2157+
2158+ if (dev->hw->index)
2159+ val |= BIT(1);
2160+ val |= BIT(0);
2161+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
2162+
2163+ dev->running = true;
2164+}
2165+
2166+static int
2167+mtk_wed_attach(struct mtk_wed_device *dev)
2168+ __releases(RCU)
2169+{
2170+ struct mtk_wed_hw *hw;
2171+ int ret = 0;
2172+
2173+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
2174+ "mtk_wed_attach without holding the RCU read lock");
2175+
2176+ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
2177+ !try_module_get(THIS_MODULE))
2178+ ret = -ENODEV;
2179+
2180+ rcu_read_unlock();
2181+
2182+ if (ret)
2183+ return ret;
2184+
2185+ mutex_lock(&hw_lock);
2186+
2187+ hw = mtk_wed_assign(dev);
2188+ if (!hw) {
2189+ module_put(THIS_MODULE);
2190+ ret = -ENODEV;
2191+ goto out;
2192+ }
2193+
2194+ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
2195+
2196+ dev->hw = hw;
2197+ dev->dev = hw->dev;
2198+ dev->irq = hw->irq;
2199+ dev->wdma_idx = hw->index;
2200+
2201+ if (hw->eth->dma_dev == hw->eth->dev &&
2202+ of_dma_is_coherent(hw->eth->dev->of_node))
2203+ mtk_eth_set_dma_device(hw->eth, hw->dev);
2204+
2205+ ret = mtk_wed_buffer_alloc(dev);
2206+ if (ret) {
2207+ mtk_wed_detach(dev);
2208+ goto out;
2209+ }
2210+
2211+ mtk_wed_hw_init_early(dev);
2212+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
2213+
2214+out:
2215+ mutex_unlock(&hw_lock);
2216+
2217+ return ret;
2218+}
2219+
2220+static int
2221+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
2222+{
2223+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
2224+
2225+ /*
2226+ * Tx ring redirection:
2227+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
2228+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
2229+ * registers.
2230+ *
2231+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
2232+ * into MTK_WED_WPDMA_RING_TX(n) registers.
2233+ * It gets filled with packets picked up from WED TX ring and from
2234+ * WDMA RX.
2235+ */
2236+
2237+ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
2238+
2239+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
2240+ return -ENOMEM;
2241+
2242+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
2243+ return -ENOMEM;
2244+
2245+ ring->reg_base = MTK_WED_RING_TX(idx);
2246+ ring->wpdma = regs;
2247+
2248+ /* WED -> WPDMA */
2249+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
2250+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
2251+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
2252+
2253+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
2254+ ring->desc_phys);
2255+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
2256+ MTK_WED_TX_RING_SIZE);
2257+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2258+
2259+ return 0;
2260+}
2261+
2262+static int
2263+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
2264+{
2265+ struct mtk_wed_ring *ring = &dev->txfree_ring;
2266+ int i;
2267+
2268+ /*
2269+ * For txfree event handling, the same DMA ring is shared between WED
2270+ * and WLAN. The WLAN driver accesses the ring index registers through
2271+ * WED
2272+ */
2273+ ring->reg_base = MTK_WED_RING_RX(1);
2274+ ring->wpdma = regs;
2275+
2276+ for (i = 0; i < 12; i += 4) {
2277+ u32 val = readl(regs + i);
2278+
2279+ wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
2280+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
2281+ }
2282+
2283+ return 0;
2284+}
2285+
2286+static u32
2287+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
2288+{
2289+ u32 val;
2290+
2291+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
2292+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
2293+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2294+ if (!dev->hw->num_flows)
2295+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2296+ if (val && net_ratelimit())
2297+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
2298+
2299+ val = wed_r32(dev, MTK_WED_INT_STATUS);
2300+ val &= mask;
2301+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
2302+
2303+ return val;
2304+}
2305+
2306+static void
2307+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
2308+{
2309+ if (!dev->running)
2310+ return;
2311+
2312+ mtk_wed_set_ext_int(dev, !!mask);
2313+ wed_w32(dev, MTK_WED_INT_MASK, mask);
2314+}
2315+
2316+int mtk_wed_flow_add(int index)
2317+{
2318+ struct mtk_wed_hw *hw = hw_list[index];
2319+ int ret;
2320+
2321+ if (!hw || !hw->wed_dev)
2322+ return -ENODEV;
2323+
2324+ if (hw->num_flows) {
2325+ hw->num_flows++;
2326+ return 0;
2327+ }
2328+
2329+ mutex_lock(&hw_lock);
2330+ if (!hw->wed_dev) {
2331+ ret = -ENODEV;
2332+ goto out;
2333+ }
2334+
2335+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
2336+ if (!ret)
2337+ hw->num_flows++;
2338+ mtk_wed_set_ext_int(hw->wed_dev, true);
2339+
2340+out:
2341+ mutex_unlock(&hw_lock);
2342+
2343+ return ret;
2344+}
2345+
2346+void mtk_wed_flow_remove(int index)
2347+{
2348+ struct mtk_wed_hw *hw = hw_list[index];
2349+
2350+ if (!hw)
2351+ return;
2352+
2353+ if (--hw->num_flows)
2354+ return;
2355+
2356+ mutex_lock(&hw_lock);
2357+ if (!hw->wed_dev)
2358+ goto out;
2359+
2360+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
2361+ mtk_wed_set_ext_int(hw->wed_dev, true);
2362+
2363+out:
2364+ mutex_unlock(&hw_lock);
2365+}
2366+
2367+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2368+ void __iomem *wdma, int index)
2369+{
2370+ static const struct mtk_wed_ops wed_ops = {
2371+ .attach = mtk_wed_attach,
2372+ .tx_ring_setup = mtk_wed_tx_ring_setup,
2373+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2374+ .start = mtk_wed_start,
2375+ .stop = mtk_wed_stop,
2376+ .reset_dma = mtk_wed_reset_dma,
2377+ .reg_read = wed_r32,
2378+ .reg_write = wed_w32,
2379+ .irq_get = mtk_wed_irq_get,
2380+ .irq_set_mask = mtk_wed_irq_set_mask,
2381+ .detach = mtk_wed_detach,
2382+ };
2383+ struct device_node *eth_np = eth->dev->of_node;
2384+ struct platform_device *pdev;
2385+ struct mtk_wed_hw *hw;
2386+ struct regmap *regs;
2387+ int irq;
2388+
2389+ if (!np)
2390+ return;
2391+
2392+ pdev = of_find_device_by_node(np);
2393+ if (!pdev)
2394+ return;
2395+
2396+ get_device(&pdev->dev);
2397+ irq = platform_get_irq(pdev, 0);
2398+ if (irq < 0)
2399+ return;
2400+
2401+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
2402+ if (!regs)
2403+ return;
2404+
2405+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
2406+
2407+ mutex_lock(&hw_lock);
2408+
2409+ if (WARN_ON(hw_list[index]))
2410+ goto unlock;
2411+
2412+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
2413+ hw->node = np;
2414+ hw->regs = regs;
2415+ hw->eth = eth;
2416+ hw->dev = &pdev->dev;
2417+ hw->wdma = wdma;
2418+ hw->index = index;
2419+ hw->irq = irq;
2420+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2421+ "mediatek,pcie-mirror");
2422+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
2423+ "mediatek,hifsys");
2424+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
2425+ kfree(hw);
2426+ goto unlock;
2427+ }
2428+
2429+ if (!index) {
2430+ regmap_write(hw->mirror, 0, 0);
2431+ regmap_write(hw->mirror, 4, 0);
2432+ }
2433+ mtk_wed_hw_add_debugfs(hw);
2434+
2435+ hw_list[index] = hw;
2436+
2437+unlock:
2438+ mutex_unlock(&hw_lock);
2439+}
2440+
2441+void mtk_wed_exit(void)
2442+{
2443+ int i;
2444+
2445+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
2446+
2447+ synchronize_rcu();
2448+
2449+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
2450+ struct mtk_wed_hw *hw;
2451+
2452+ hw = hw_list[i];
2453+ if (!hw)
2454+ continue;
2455+
2456+ hw_list[i] = NULL;
2457+ debugfs_remove(hw->debugfs_dir);
2458+ put_device(hw->dev);
2459+ kfree(hw);
2460+ }
2461+}
2462diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2463new file mode 100644
2464index 000000000..981ec613f
2465--- /dev/null
2466+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2467@@ -0,0 +1,135 @@
2468+// SPDX-License-Identifier: GPL-2.0-only
2469+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2470+
2471+#ifndef __MTK_WED_PRIV_H
2472+#define __MTK_WED_PRIV_H
2473+
2474+#include <linux/soc/mediatek/mtk_wed.h>
2475+#include <linux/debugfs.h>
2476+#include <linux/regmap.h>
2477+#include <linux/netdevice.h>
2478+
2479+struct mtk_eth;
2480+
2481+struct mtk_wed_hw {
2482+ struct device_node *node;
2483+ struct mtk_eth *eth;
2484+ struct regmap *regs;
2485+ struct regmap *hifsys;
2486+ struct device *dev;
2487+ void __iomem *wdma;
2488+ struct regmap *mirror;
2489+ struct dentry *debugfs_dir;
2490+ struct mtk_wed_device *wed_dev;
2491+ u32 debugfs_reg;
2492+ u32 num_flows;
2493+ char dirname[5];
2494+ int irq;
2495+ int index;
2496+};
2497+
2498+struct mtk_wdma_info {
2499+ u8 wdma_idx;
2500+ u8 queue;
2501+ u16 wcid;
2502+ u8 bss;
2503+};
2504+
2505+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2506+static inline void
2507+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2508+{
2509+ regmap_write(dev->hw->regs, reg, val);
2510+}
2511+
2512+static inline u32
2513+wed_r32(struct mtk_wed_device *dev, u32 reg)
2514+{
2515+ unsigned int val;
2516+
2517+ regmap_read(dev->hw->regs, reg, &val);
2518+
2519+ return val;
2520+}
2521+
2522+static inline void
2523+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2524+{
2525+ writel(val, dev->hw->wdma + reg);
2526+}
2527+
2528+static inline u32
2529+wdma_r32(struct mtk_wed_device *dev, u32 reg)
2530+{
2531+ return readl(dev->hw->wdma + reg);
2532+}
2533+
2534+static inline u32
2535+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
2536+{
2537+ if (!dev->tx_ring[ring].wpdma)
2538+ return 0;
2539+
2540+ return readl(dev->tx_ring[ring].wpdma + reg);
2541+}
2542+
2543+static inline void
2544+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
2545+{
2546+ if (!dev->tx_ring[ring].wpdma)
2547+ return;
2548+
2549+ writel(val, dev->tx_ring[ring].wpdma + reg);
2550+}
2551+
2552+static inline u32
2553+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
2554+{
2555+ if (!dev->txfree_ring.wpdma)
2556+ return 0;
2557+
2558+ return readl(dev->txfree_ring.wpdma + reg);
2559+}
2560+
2561+static inline void
2562+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2563+{
2564+ if (!dev->txfree_ring.wpdma)
2565+ return;
2566+
2567+ writel(val, dev->txfree_ring.wpdma + reg);
2568+}
2569+
2570+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2571+ void __iomem *wdma, int index);
2572+void mtk_wed_exit(void);
2573+int mtk_wed_flow_add(int index);
2574+void mtk_wed_flow_remove(int index);
2575+#else
2576+static inline void
2577+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2578+ void __iomem *wdma, int index)
2579+{
2580+}
2581+static inline void
2582+mtk_wed_exit(void)
2583+{
2584+}
2585+static inline int mtk_wed_flow_add(int index)
2586+{
2587+ return -EINVAL;
2588+}
2589+static inline void mtk_wed_flow_remove(int index)
2590+{
2591+}
2592+#endif
2593+
2594+#ifdef CONFIG_DEBUG_FS
2595+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
2596+#else
2597+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2598+{
2599+}
2600+#endif
2601+
2602+#endif
2603diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2604new file mode 100644
2605index 000000000..a81d3fd1a
2606--- /dev/null
2607+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2608@@ -0,0 +1,175 @@
2609+// SPDX-License-Identifier: GPL-2.0-only
2610+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2611+
2612+#include <linux/seq_file.h>
2613+#include "mtk_wed.h"
2614+#include "mtk_wed_regs.h"
2615+
2616+struct reg_dump {
2617+ const char *name;
2618+ u16 offset;
2619+ u8 type;
2620+ u8 base;
2621+};
2622+
2623+enum {
2624+ DUMP_TYPE_STRING,
2625+ DUMP_TYPE_WED,
2626+ DUMP_TYPE_WDMA,
2627+ DUMP_TYPE_WPDMA_TX,
2628+ DUMP_TYPE_WPDMA_TXFREE,
2629+};
2630+
2631+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2632+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2633+#define DUMP_RING(_prefix, _base, ...) \
2634+ { _prefix " BASE", _base, __VA_ARGS__ }, \
2635+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2636+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
2637+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2638+
2639+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2640+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2641+
2642+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2643+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
2644+
2645+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
2646+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
2647+
2648+static void
2649+print_reg_val(struct seq_file *s, const char *name, u32 val)
2650+{
2651+ seq_printf(s, "%-32s %08x\n", name, val);
2652+}
2653+
2654+static void
2655+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2656+ const struct reg_dump *regs, int n_regs)
2657+{
2658+ const struct reg_dump *cur;
2659+ u32 val;
2660+
2661+ for (cur = regs; cur < &regs[n_regs]; cur++) {
2662+ switch (cur->type) {
2663+ case DUMP_TYPE_STRING:
2664+ seq_printf(s, "%s======== %s:\n",
2665+ cur > regs ? "\n" : "",
2666+ cur->name);
2667+ continue;
2668+ case DUMP_TYPE_WED:
2669+ val = wed_r32(dev, cur->offset);
2670+ break;
2671+ case DUMP_TYPE_WDMA:
2672+ val = wdma_r32(dev, cur->offset);
2673+ break;
2674+ case DUMP_TYPE_WPDMA_TX:
2675+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
2676+ break;
2677+ case DUMP_TYPE_WPDMA_TXFREE:
2678+ val = wpdma_txfree_r32(dev, cur->offset);
2679+ break;
2680+ }
2681+ print_reg_val(s, cur->name, val);
2682+ }
2683+}
2684+
2685+
2686+static int
2687+wed_txinfo_show(struct seq_file *s, void *data)
2688+{
2689+ static const struct reg_dump regs[] = {
2690+ DUMP_STR("WED TX"),
2691+ DUMP_WED(WED_TX_MIB(0)),
2692+ DUMP_WED_RING(WED_RING_TX(0)),
2693+
2694+ DUMP_WED(WED_TX_MIB(1)),
2695+ DUMP_WED_RING(WED_RING_TX(1)),
2696+
2697+ DUMP_STR("WPDMA TX"),
2698+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
2699+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
2700+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
2701+
2702+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
2703+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
2704+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
2705+
2706+ DUMP_STR("WPDMA TX"),
2707+ DUMP_WPDMA_TX_RING(0),
2708+ DUMP_WPDMA_TX_RING(1),
2709+
2710+ DUMP_STR("WED WDMA RX"),
2711+ DUMP_WED(WED_WDMA_RX_MIB(0)),
2712+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
2713+ DUMP_WED(WED_WDMA_RX_THRES(0)),
2714+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
2715+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
2716+
2717+ DUMP_WED(WED_WDMA_RX_MIB(1)),
2718+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
2719+ DUMP_WED(WED_WDMA_RX_THRES(1)),
2720+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
2721+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
2722+
2723+ DUMP_STR("WDMA RX"),
2724+ DUMP_WDMA(WDMA_GLO_CFG),
2725+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
2726+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
2727+ };
2728+ struct mtk_wed_hw *hw = s->private;
2729+ struct mtk_wed_device *dev = hw->wed_dev;
2730+
2731+ if (!dev)
2732+ return 0;
2733+
2734+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2735+
2736+ return 0;
2737+}
2738+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
2739+
2740+
2741+static int
2742+mtk_wed_reg_set(void *data, u64 val)
2743+{
2744+ struct mtk_wed_hw *hw = data;
2745+
2746+ regmap_write(hw->regs, hw->debugfs_reg, val);
2747+
2748+ return 0;
2749+}
2750+
2751+static int
2752+mtk_wed_reg_get(void *data, u64 *val)
2753+{
2754+ struct mtk_wed_hw *hw = data;
2755+ unsigned int regval;
2756+ int ret;
2757+
2758+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
2759+ if (ret)
2760+ return ret;
2761+
2762+ *val = regval;
2763+
2764+ return 0;
2765+}
2766+
2767+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
2768+ "0x%08llx\n");
2769+
2770+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2771+{
2772+ struct dentry *dir;
2773+
2774+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
2775+ dir = debugfs_create_dir(hw->dirname, NULL);
2776+ if (!dir)
2777+ return;
2778+
2779+ hw->debugfs_dir = dir;
2780+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
2781+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
2782+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
2783+}
2784diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2785new file mode 100644
2786index 000000000..a5d9d8a5b
2787--- /dev/null
2788+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2789@@ -0,0 +1,8 @@
2790+// SPDX-License-Identifier: GPL-2.0-only
2791+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2792+
2793+#include <linux/kernel.h>
2794+#include <linux/soc/mediatek/mtk_wed.h>
2795+
2796+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
2797+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
2798diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2799new file mode 100644
2800index 000000000..0a0465ea5
2801--- /dev/null
2802+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2803@@ -0,0 +1,251 @@
2804+// SPDX-License-Identifier: GPL-2.0-only
2805+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2806+
2807+#ifndef __MTK_WED_REGS_H
2808+#define __MTK_WED_REGS_H
2809+
2810+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
2811+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
2812+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
2813+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2814+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2815+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2816+
2817+struct mtk_wdma_desc {
2818+ __le32 buf0;
2819+ __le32 ctrl;
2820+ __le32 buf1;
2821+ __le32 info;
2822+} __packed __aligned(4);
2823+
2824+#define MTK_WED_RESET 0x008
2825+#define MTK_WED_RESET_TX_BM BIT(0)
2826+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
2827+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
2828+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
2829+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
2830+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
2831+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2832+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2833+#define MTK_WED_RESET_WED BIT(31)
2834+
2835+#define MTK_WED_CTRL 0x00c
2836+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
2837+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
2838+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
2839+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
2840+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
2841+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2842+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2843+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2844+#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2845+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2846+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2847+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
2848+
2849+#define MTK_WED_EXT_INT_STATUS 0x020
2850+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
2851+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
2852+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
2853+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
2854+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
2855+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2856+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2857+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2858+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2859+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
2860+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
2861+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
2862+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
2863+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
2864+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
2865+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2866+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2867+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2868+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2869+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2870+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
2871+ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
2872+ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
2873+
2874+#define MTK_WED_EXT_INT_MASK 0x028
2875+
2876+#define MTK_WED_STATUS 0x060
2877+#define MTK_WED_STATUS_TX GENMASK(15, 8)
2878+
2879+#define MTK_WED_TX_BM_CTRL 0x080
2880+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
2881+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
2882+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
2883+
2884+#define MTK_WED_TX_BM_BASE 0x084
2885+
2886+#define MTK_WED_TX_BM_TKID 0x088
2887+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
2888+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
2889+
2890+#define MTK_WED_TX_BM_BUF_LEN 0x08c
2891+
2892+#define MTK_WED_TX_BM_INTF 0x09c
2893+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
2894+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
2895+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
2896+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
2897+
2898+#define MTK_WED_TX_BM_DYN_THR 0x0a0
2899+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
2900+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
2901+
2902+#define MTK_WED_INT_STATUS 0x200
2903+#define MTK_WED_INT_MASK 0x204
2904+
2905+#define MTK_WED_GLO_CFG 0x208
2906+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
2907+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
2908+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
2909+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
2910+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2911+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
2912+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
2913+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2914+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2915+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2916+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2917+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2918+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2919+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
2920+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2921+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
2922+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
2923+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
2924+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
2925+
2926+#define MTK_WED_RESET_IDX 0x20c
2927+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
2928+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
2929+
2930+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
2931+
2932+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
2933+
2934+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2935+
2936+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
2937+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2938+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
2939+
2940+#define MTK_WED_WPDMA_GLO_CFG 0x508
2941+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
2942+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
2943+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
2944+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2945+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2946+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2947+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
2948+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2949+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2950+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2951+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2952+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2953+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2954+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
2955+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2956+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
2957+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
2958+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
2959+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
2960+
2961+#define MTK_WED_WPDMA_RESET_IDX 0x50c
2962+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
2963+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
2964+
2965+#define MTK_WED_WPDMA_INT_CTRL 0x520
2966+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
2967+
2968+#define MTK_WED_WPDMA_INT_MASK 0x524
2969+
2970+#define MTK_WED_PCIE_CFG_BASE 0x560
2971+
2972+#define MTK_WED_PCIE_INT_TRIGGER 0x570
2973+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
2974+
2975+#define MTK_WED_WPDMA_CFG_BASE 0x580
2976+
2977+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2978+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2979+
2980+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2981+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
2982+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2983+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2984+
2985+#define MTK_WED_WDMA_GLO_CFG 0xa04
2986+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2987+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2988+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2989+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
2990+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2991+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
2992+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
2993+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
2994+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
2995+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
2996+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
2997+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
2998+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
2999+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
3000+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
3001+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
3002+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
3003+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
3004+
3005+#define MTK_WED_WDMA_RESET_IDX 0xa08
3006+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
3007+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
3008+
3009+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
3010+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
3011+
3012+#define MTK_WED_WDMA_INT_CTRL 0xa2c
3013+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
3014+
3015+#define MTK_WED_WDMA_OFFSET0 0xaa4
3016+#define MTK_WED_WDMA_OFFSET1 0xaa8
3017+
3018+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
3019+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
3020+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
3021+
3022+#define MTK_WED_RING_OFS_BASE 0x00
3023+#define MTK_WED_RING_OFS_COUNT 0x04
3024+#define MTK_WED_RING_OFS_CPU_IDX 0x08
3025+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
3026+
3027+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
3028+
3029+#define MTK_WDMA_GLO_CFG 0x204
3030+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
3031+
3032+#define MTK_WDMA_RESET_IDX 0x208
3033+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
3034+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
3035+
3036+#define MTK_WDMA_INT_MASK 0x228
3037+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
3038+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
3039+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
3040+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
3041+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
3042+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
3043+
3044+#define MTK_WDMA_INT_GRP1 0x250
3045+#define MTK_WDMA_INT_GRP2 0x254
3046+
3047+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
3048+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
3049+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
3050+
3051+/* DMA channel mapping */
3052+#define HIFSYS_DMA_AG_MAP 0x008
3053+
3054+#endif
3055diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3056index 9f64504ac..35998b1a7 100644
3057--- a/include/linux/netdevice.h
3058+++ b/include/linux/netdevice.h
3059@@ -835,6 +835,7 @@ enum net_device_path_type {
3060 DEV_PATH_BRIDGE,
3061 DEV_PATH_PPPOE,
3062 DEV_PATH_DSA,
3063+ DEV_PATH_MTK_WDMA,
3064 };
3065
3066 struct net_device_path {
3067@@ -860,6 +861,12 @@ struct net_device_path {
3068 int port;
3069 u16 proto;
3070 } dsa;
3071+ struct {
3072+ u8 wdma_idx;
3073+ u8 queue;
3074+ u16 wcid;
3075+ u8 bss;
3076+ } mtk_wdma;
3077 };
3078 };
3079
3080diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
3081new file mode 100644
3082index 000000000..7e00cca06
3083--- /dev/null
3084+++ b/include/linux/soc/mediatek/mtk_wed.h
3085@@ -0,0 +1,131 @@
3086+#ifndef __MTK_WED_H
3087+#define __MTK_WED_H
3088+
3089+#include <linux/kernel.h>
3090+#include <linux/rcupdate.h>
3091+#include <linux/regmap.h>
3092+#include <linux/pci.h>
3093+
3094+#define MTK_WED_TX_QUEUES 2
3095+
3096+struct mtk_wed_hw;
3097+struct mtk_wdma_desc;
3098+
3099+struct mtk_wed_ring {
3100+ struct mtk_wdma_desc *desc;
3101+ dma_addr_t desc_phys;
3102+ int size;
3103+
3104+ u32 reg_base;
3105+ void __iomem *wpdma;
3106+};
3107+
3108+struct mtk_wed_device {
3109+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3110+ const struct mtk_wed_ops *ops;
3111+ struct device *dev;
3112+ struct mtk_wed_hw *hw;
3113+ bool init_done, running;
3114+ int wdma_idx;
3115+ int irq;
3116+
3117+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3118+ struct mtk_wed_ring txfree_ring;
3119+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3120+
3121+ struct {
3122+ int size;
3123+ void **pages;
3124+ struct mtk_wdma_desc *desc;
3125+ dma_addr_t desc_phys;
3126+ } buf_ring;
3127+
3128+ /* filled by driver: */
3129+ struct {
3130+ struct pci_dev *pci_dev;
3131+
3132+ u32 wpdma_phys;
3133+
3134+ u16 token_start;
3135+ unsigned int nbuf;
3136+
3137+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3138+ int (*offload_enable)(struct mtk_wed_device *wed);
3139+ void (*offload_disable)(struct mtk_wed_device *wed);
3140+ } wlan;
3141+#endif
3142+};
3143+
3144+struct mtk_wed_ops {
3145+ int (*attach)(struct mtk_wed_device *dev);
3146+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
3147+ void __iomem *regs);
3148+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3149+ void __iomem *regs);
3150+ void (*detach)(struct mtk_wed_device *dev);
3151+
3152+ void (*stop)(struct mtk_wed_device *dev);
3153+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
3154+ void (*reset_dma)(struct mtk_wed_device *dev);
3155+
3156+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
3157+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
3158+
3159+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3160+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3161+};
3162+
3163+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3164+
3165+static inline int
3166+mtk_wed_device_attach(struct mtk_wed_device *dev)
3167+{
3168+ int ret = -ENODEV;
3169+
3170+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3171+ rcu_read_lock();
3172+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
3173+ if (dev->ops)
3174+ ret = dev->ops->attach(dev);
3175+ else
3176+ rcu_read_unlock();
3177+
3178+ if (ret)
3179+ dev->ops = NULL;
3180+#endif
3181+
3182+ return ret;
3183+}
3184+
3185+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3186+#define mtk_wed_device_active(_dev) !!(_dev)->ops
3187+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
3188+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
3189+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
3190+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3191+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3192+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
3193+#define mtk_wed_device_reg_read(_dev, _reg) \
3194+ (_dev)->ops->reg_read(_dev, _reg)
3195+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
3196+ (_dev)->ops->reg_write(_dev, _reg, _val)
3197+#define mtk_wed_device_irq_get(_dev, _mask) \
3198+ (_dev)->ops->irq_get(_dev, _mask)
3199+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
3200+ (_dev)->ops->irq_set_mask(_dev, _mask)
3201+#else
3202+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3203+{
3204+ return false;
3205+}
3206+#define mtk_wed_device_detach(_dev) do {} while (0)
3207+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
3208+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3209+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3210+#define mtk_wed_device_reg_read(_dev, _reg) 0
3211+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3212+#define mtk_wed_device_irq_get(_dev, _mask) 0
3213+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3214+#endif
3215+
3216+#endif
3217diff --git a/net/core/dev.c b/net/core/dev.c
3218index 4f0edb218..031ac7c6f 100644
3219--- a/net/core/dev.c
3220+++ b/net/core/dev.c
3221@@ -675,6 +675,10 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3222 if (WARN_ON_ONCE(last_dev == ctx.dev))
3223 return -1;
3224 }
3225+
3226+ if (!ctx.dev)
3227+ return ret;
3228+
3229 path = dev_fwd_path(stack);
3230 if (!path)
3231 return -1;
3232--
32332.18.0
3234