blob: baa88ef884b087f2df118466375e2008a65d3ec4 [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From 342fdc50b761309e75974554cdcf790a2d09e134 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 2 Jun 2022 15:32:07 +0800
4Subject: [PATCH 4/8] 9993-add-wed
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7622.dtsi | 32 +-
9 drivers/net/ethernet/mediatek/Kconfig | 4 +
10 drivers/net/ethernet/mediatek/Makefile | 5 +
11 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 136 ++-
12 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 14 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.c | 373 +++++++-
14 drivers/net/ethernet/mediatek/mtk_ppe.h | 89 +-
15 .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 4 +-
16 .../net/ethernet/mediatek/mtk_ppe_offload.c | 167 +++-
17 drivers/net/ethernet/mediatek/mtk_wed.c | 876 ++++++++++++++++++
18 drivers/net/ethernet/mediatek/mtk_wed.h | 135 +++
19 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 175 ++++
20 drivers/net/ethernet/mediatek/mtk_wed_ops.c | 8 +
21 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 251 +++++
22 include/linux/netdevice.h | 7 +
23 include/linux/soc/mediatek/mtk_wed.h | 131 +++
24 net/core/dev.c | 4 +
25 17 files changed, 2283 insertions(+), 128 deletions(-)
26 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Kconfig
27 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Makefile
28 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c
29 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h
30 mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_ppe.c
31 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c
32 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h
33 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
34 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c
35 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h
36 create mode 100644 include/linux/soc/mediatek/mtk_wed.h
37
38diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
39index 369e01389..d0fbc367e 100644
40--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
41+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
42@@ -338,7 +338,7 @@
43 };
44
45 cci_control2: slave-if@5000 {
46- compatible = "arm,cci-400-ctrl-if";
47+ compatible = "arm,cci-400-ctrl-if", "syscon";
48 interface-type = "ace";
49 reg = <0x5000 0x1000>;
50 };
51@@ -920,6 +920,11 @@
52 };
53 };
54
55+ hifsys: syscon@1af00000 {
56+ compatible = "mediatek,mt7622-hifsys", "syscon";
57+ reg = <0 0x1af00000 0 0x70>;
58+ };
59+
60 ethsys: syscon@1b000000 {
61 compatible = "mediatek,mt7622-ethsys",
62 "syscon";
63@@ -938,6 +943,26 @@
64 #dma-cells = <1>;
65 };
66
67+ pcie_mirror: pcie-mirror@10000400 {
68+ compatible = "mediatek,mt7622-pcie-mirror",
69+ "syscon";
70+ reg = <0 0x10000400 0 0x10>;
71+ };
72+
73+ wed0: wed@1020a000 {
74+ compatible = "mediatek,mt7622-wed",
75+ "syscon";
76+ reg = <0 0x1020a000 0 0x1000>;
77+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
78+ };
79+
80+ wed1: wed@1020b000 {
81+ compatible = "mediatek,mt7622-wed",
82+ "syscon";
83+ reg = <0 0x1020b000 0 0x1000>;
84+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
85+ };
86+
87 eth: ethernet@1b100000 {
88 compatible = "mediatek,mt7622-eth",
89 "mediatek,mt2701-eth",
90@@ -964,6 +989,11 @@
91 power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
92 mediatek,ethsys = <&ethsys>;
93 mediatek,sgmiisys = <&sgmiisys>;
94+ mediatek,cci-control = <&cci_control2>;
95+ mediatek,wed = <&wed0>, <&wed1>;
96+ mediatek,pcie-mirror = <&pcie_mirror>;
97+ mediatek,hifsys = <&hifsys>;
98+ dma-coherent;
99 #address-cells = <1>;
100 #size-cells = <0>;
101 status = "disabled";
102diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
103old mode 100755
104new mode 100644
105index 42e6b38d2..8ab6615a3
106--- a/drivers/net/ethernet/mediatek/Kconfig
107+++ b/drivers/net/ethernet/mediatek/Kconfig
108@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK
109
110 if NET_VENDOR_MEDIATEK
111
112+config NET_MEDIATEK_SOC_WED
113+ depends on ARCH_MEDIATEK || COMPILE_TEST
114+ def_bool NET_MEDIATEK_SOC != n
115+
116 config NET_MEDIATEK_SOC
117 tristate "MediaTek SoC Gigabit Ethernet support"
118 select PHYLINK
119diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
120old mode 100755
121new mode 100644
122index 0a6af99f1..3528f1b3c
123--- a/drivers/net/ethernet/mediatek/Makefile
124+++ b/drivers/net/ethernet/mediatek/Makefile
125@@ -6,4 +6,9 @@
126 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
127 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
128 mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
129+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
130+ifdef CONFIG_DEBUG_FS
131+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
132+endif
133+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
134 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
135diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
136old mode 100755
137new mode 100644
138index 819d8a0be..2121335a1
139--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
140+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
141@@ -9,6 +9,7 @@
142 #include <linux/of_device.h>
143 #include <linux/of_mdio.h>
144 #include <linux/of_net.h>
145+#include <linux/of_address.h>
146 #include <linux/mfd/syscon.h>
147 #include <linux/regmap.h>
148 #include <linux/clk.h>
developerdca0fde2022-12-14 11:40:35 +0800149@@ -20,12 +21,14 @@
developer8cb3ac72022-07-04 10:55:14 +0800150 #include <linux/pinctrl/devinfo.h>
151 #include <linux/phylink.h>
developer926f9162022-07-05 10:55:37 +0800152 #include <linux/gpio/consumer.h>
developer8cb3ac72022-07-04 10:55:14 +0800153+#include <linux/bitfield.h>
154 #include <net/dsa.h>
155
156 #include "mtk_eth_soc.h"
157 #include "mtk_eth_dbg.h"
158 #include "mtk_eth_reset.h"
159 #include "mtk_hnat/hnat.h"
160+#include "mtk_wed.h"
161
162 #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
163 #include "mtk_hnat/nf_hnat_mtk.h"
developerdca0fde2022-12-14 11:40:35 +0800164@@ -1116,7 +1119,7 @@ static int mtk_init_fq_dma(struct mtk_et
developer8cb3ac72022-07-04 10:55:14 +0800165 int i;
166
167 if (!eth->soc->has_sram) {
168- eth->scratch_ring = dma_alloc_coherent(eth->dev,
169+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800170 cnt * soc->txrx.txd_size,
developer8cb3ac72022-07-04 10:55:14 +0800171 &eth->phy_scratch_ring,
developerdca0fde2022-12-14 11:40:35 +0800172 GFP_KERNEL);
173@@ -1134,10 +1137,10 @@ static int mtk_init_fq_dma(struct mtk_et
developer8cb3ac72022-07-04 10:55:14 +0800174 if (unlikely(!eth->scratch_head))
175 return -ENOMEM;
176
177- dma_addr = dma_map_single(eth->dev,
178+ dma_addr = dma_map_single(eth->dma_dev,
179 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
180 DMA_FROM_DEVICE);
181- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
182+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
183 return -ENOMEM;
184
185 phy_ring_tail = eth->phy_scratch_ring +
developerdca0fde2022-12-14 11:40:35 +0800186@@ -1201,26 +1204,26 @@ static void mtk_tx_unmap(struct mtk_eth
developer8cb3ac72022-07-04 10:55:14 +0800187 {
188 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
189 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
190- dma_unmap_single(eth->dev,
191+ dma_unmap_single(eth->dma_dev,
192 dma_unmap_addr(tx_buf, dma_addr0),
193 dma_unmap_len(tx_buf, dma_len0),
194 DMA_TO_DEVICE);
195 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
196- dma_unmap_page(eth->dev,
197+ dma_unmap_page(eth->dma_dev,
198 dma_unmap_addr(tx_buf, dma_addr0),
199 dma_unmap_len(tx_buf, dma_len0),
200 DMA_TO_DEVICE);
201 }
202 } else {
203 if (dma_unmap_len(tx_buf, dma_len0)) {
204- dma_unmap_page(eth->dev,
205+ dma_unmap_page(eth->dma_dev,
206 dma_unmap_addr(tx_buf, dma_addr0),
207 dma_unmap_len(tx_buf, dma_len0),
208 DMA_TO_DEVICE);
209 }
210
211 if (dma_unmap_len(tx_buf, dma_len1)) {
212- dma_unmap_page(eth->dev,
213+ dma_unmap_page(eth->dma_dev,
214 dma_unmap_addr(tx_buf, dma_addr1),
215 dma_unmap_len(tx_buf, dma_len1),
216 DMA_TO_DEVICE);
developerdca0fde2022-12-14 11:40:35 +0800217@@ -1454,9 +1457,9 @@ static int mtk_tx_map(struct sk_buff *sk
218 itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
developer8cb3ac72022-07-04 10:55:14 +0800219 memset(itx_buf, 0, sizeof(*itx_buf));
220
developer0c6c5252022-07-12 11:59:21 +0800221- txd_info.addr = dma_map_single(eth->dev, skb->data, txd_info.size,
222+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
223 DMA_TO_DEVICE);
224- if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
225+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
developer8cb3ac72022-07-04 10:55:14 +0800226 return -ENOMEM;
227
developerdca0fde2022-12-14 11:40:35 +0800228 mtk_tx_set_dma_desc(skb, dev, itxd, &txd_info);
229@@ -1497,10 +1500,10 @@ static int mtk_tx_map(struct sk_buff *sk
developer0c6c5252022-07-12 11:59:21 +0800230 txd_info.qid = skb->mark & MTK_QDMA_TX_MASK;
231 txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
232 !(frag_size - txd_info.size);
233- txd_info.addr = skb_frag_dma_map(eth->dev, frag,
234+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
235 offset, txd_info.size,
236 DMA_TO_DEVICE);
237- if (unlikely(dma_mapping_error(eth->dev, txd_info.addr)))
238+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
239 goto err_dma;
developer8cb3ac72022-07-04 10:55:14 +0800240
developer0c6c5252022-07-12 11:59:21 +0800241 mtk_tx_set_dma_desc(skb, dev, txd, &txd_info);
developerdca0fde2022-12-14 11:40:35 +0800242@@ -1737,6 +1740,7 @@ static int mtk_poll_rx(struct napi_struc
243 struct net_device *netdev = NULL;
developer8cb3ac72022-07-04 10:55:14 +0800244 unsigned int pktlen;
developerea825232022-11-29 11:26:54 +0800245 dma_addr_t dma_addr = 0;
developer8cb3ac72022-07-04 10:55:14 +0800246+ u32 hash, reason;
developer0c6c5252022-07-12 11:59:21 +0800247 int mac = 0;
developer8cb3ac72022-07-04 10:55:14 +0800248
249 if (eth->hwlro)
developerdca0fde2022-12-14 11:40:35 +0800250@@ -1787,12 +1791,12 @@ static int mtk_poll_rx(struct napi_struc
developer8cb3ac72022-07-04 10:55:14 +0800251 netdev->stats.rx_dropped++;
252 goto release_desc;
253 }
254- dma_addr = dma_map_single(eth->dev,
255+ dma_addr = dma_map_single(eth->dma_dev,
256 new_data + NET_SKB_PAD +
257 eth->ip_align,
258 ring->buf_size,
259 DMA_FROM_DEVICE);
260- if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
261+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) {
262 skb_free_frag(new_data);
263 netdev->stats.rx_dropped++;
264 goto release_desc;
developerdca0fde2022-12-14 11:40:35 +0800265@@ -1801,7 +1805,7 @@ static int mtk_poll_rx(struct napi_struc
developer68838542022-10-03 23:42:21 +0800266 addr64 = (MTK_HAS_CAPS(eth->soc->caps, MTK_8GB_ADDRESSING)) ?
267 ((u64)(trxd.rxd2 & 0xf)) << 32 : 0;
268
269- dma_unmap_single(eth->dev,
270+ dma_unmap_single(eth->dma_dev,
271 (u64)(trxd.rxd1 | addr64),
developer8cb3ac72022-07-04 10:55:14 +0800272 ring->buf_size, DMA_FROM_DEVICE);
273
developerdca0fde2022-12-14 11:40:35 +0800274@@ -1827,6 +1831,17 @@ static int mtk_poll_rx(struct napi_struc
developer8cb3ac72022-07-04 10:55:14 +0800275 skb_checksum_none_assert(skb);
276 skb->protocol = eth_type_trans(skb, netdev);
277
278+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
279+ if (hash != MTK_RXD4_FOE_ENTRY) {
280+ hash = jhash_1word(hash, 0);
281+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
282+ }
283+
284+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
285+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
286+ mtk_ppe_check_skb(eth->ppe, skb,
287+ trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
288+
289 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developerdca0fde2022-12-14 11:40:35 +0800290 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
291 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
292@@ -2120,7 +2135,7 @@ static int mtk_tx_alloc(struct mtk_eth *
developer8cb3ac72022-07-04 10:55:14 +0800293 goto no_tx_mem;
294
295 if (!eth->soc->has_sram)
296- ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
297+ ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developer0c6c5252022-07-12 11:59:21 +0800298 &ring->phys, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800299 else {
developerdca0fde2022-12-14 11:40:35 +0800300 ring->dma = eth->scratch_ring + MTK_DMA_SIZE * sz;
301@@ -2154,7 +2169,7 @@ static int mtk_tx_alloc(struct mtk_eth *
developer8cb3ac72022-07-04 10:55:14 +0800302 * descriptors in ring->dma_pdma.
303 */
304 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
305- ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz,
306+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
developer0c6c5252022-07-12 11:59:21 +0800307 &ring->phys_pdma, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800308 if (!ring->dma_pdma)
developerdca0fde2022-12-14 11:40:35 +0800309 goto no_tx_mem;
310@@ -2215,14 +2230,14 @@ static void mtk_tx_clean(struct mtk_eth
developer8cb3ac72022-07-04 10:55:14 +0800311 }
312
313 if (!eth->soc->has_sram && ring->dma) {
314- dma_free_coherent(eth->dev,
315+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800316 MTK_DMA_SIZE * soc->txrx.txd_size,
317 ring->dma, ring->phys);
developerdca0fde2022-12-14 11:40:35 +0800318 ring->dma = NULL;
developer8cb3ac72022-07-04 10:55:14 +0800319 }
320
321 if (ring->dma_pdma) {
322- dma_free_coherent(eth->dev,
323+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800324 MTK_DMA_SIZE * soc->txrx.txd_size,
325 ring->dma_pdma, ring->phys_pdma);
developerdca0fde2022-12-14 11:40:35 +0800326 ring->dma_pdma = NULL;
327@@ -2267,7 +2282,7 @@ static int mtk_rx_alloc(struct mtk_eth *
developer8cb3ac72022-07-04 10:55:14 +0800328
329 if ((!eth->soc->has_sram) || (eth->soc->has_sram
330 && (rx_flag != MTK_RX_FLAGS_NORMAL)))
331- ring->dma = dma_alloc_coherent(eth->dev,
332+ ring->dma = dma_alloc_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800333 rx_dma_size * eth->soc->txrx.rxd_size,
334 &ring->phys, GFP_KERNEL);
developer8cb3ac72022-07-04 10:55:14 +0800335 else {
developerdca0fde2022-12-14 11:40:35 +0800336@@ -2284,11 +2299,11 @@ static int mtk_rx_alloc(struct mtk_eth *
developer8cb3ac72022-07-04 10:55:14 +0800337 for (i = 0; i < rx_dma_size; i++) {
developer0c6c5252022-07-12 11:59:21 +0800338 struct mtk_rx_dma_v2 *rxd;
developerdca0fde2022-12-14 11:40:35 +0800339
developer8cb3ac72022-07-04 10:55:14 +0800340- dma_addr_t dma_addr = dma_map_single(eth->dev,
341+ dma_addr_t dma_addr = dma_map_single(eth->dma_dev,
342 ring->data[i] + NET_SKB_PAD + eth->ip_align,
343 ring->buf_size,
344 DMA_FROM_DEVICE);
345- if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
346+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
347 return -ENOMEM;
developerdca0fde2022-12-14 11:40:35 +0800348
developer0c6c5252022-07-12 11:59:21 +0800349 rxd = ring->dma + i * eth->soc->txrx.rxd_size;
developerdca0fde2022-12-14 11:40:35 +0800350@@ -2360,7 +2375,7 @@ static void mtk_rx_clean(struct mtk_eth
developer68838542022-10-03 23:42:21 +0800351 MTK_8GB_ADDRESSING)) ?
352 ((u64)(rxd->rxd2 & 0xf)) << 32 : 0;
353
developer8cb3ac72022-07-04 10:55:14 +0800354- dma_unmap_single(eth->dev,
355+ dma_unmap_single(eth->dma_dev,
developer68838542022-10-03 23:42:21 +0800356 (u64)(rxd->rxd1 | addr64),
developer8cb3ac72022-07-04 10:55:14 +0800357 ring->buf_size,
358 DMA_FROM_DEVICE);
developerdca0fde2022-12-14 11:40:35 +0800359@@ -2374,7 +2389,7 @@ static void mtk_rx_clean(struct mtk_eth
developer8cb3ac72022-07-04 10:55:14 +0800360 return;
361
362 if (ring->dma) {
363- dma_free_coherent(eth->dev,
364+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800365 ring->dma_size * eth->soc->txrx.rxd_size,
developer8cb3ac72022-07-04 10:55:14 +0800366 ring->dma,
367 ring->phys);
developerdca0fde2022-12-14 11:40:35 +0800368@@ -2861,7 +2876,7 @@ static void mtk_dma_free(struct mtk_eth
developer8cb3ac72022-07-04 10:55:14 +0800369 if (eth->netdev[i])
370 netdev_reset_queue(eth->netdev[i]);
371 if ( !eth->soc->has_sram && eth->scratch_ring) {
372- dma_free_coherent(eth->dev,
373+ dma_free_coherent(eth->dma_dev,
developer0c6c5252022-07-12 11:59:21 +0800374 MTK_DMA_SIZE * soc->txrx.txd_size,
375 eth->scratch_ring, eth->phy_scratch_ring);
developerdca0fde2022-12-14 11:40:35 +0800376 eth->scratch_ring = NULL;
377@@ -3243,7 +3258,7 @@ static int mtk_stop(struct net_device *d
developer8cb3ac72022-07-04 10:55:14 +0800378 mtk_dma_free(eth);
379
380 if (eth->soc->offload_version)
381- mtk_ppe_stop(&eth->ppe);
382+ mtk_ppe_stop(eth->ppe);
383
384 return 0;
385 }
developerdca0fde2022-12-14 11:40:35 +0800386@@ -3320,6 +3335,8 @@ static int mtk_napi_init(struct mtk_eth
developer8cb3ac72022-07-04 10:55:14 +0800387
388 static int mtk_hw_init(struct mtk_eth *eth, u32 type)
389 {
390+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
391+ ETHSYS_DMA_AG_MAP_PPE;
392 int i, ret = 0;
developerdca0fde2022-12-14 11:40:35 +0800393 u32 val;
developer8cb3ac72022-07-04 10:55:14 +0800394
developerdca0fde2022-12-14 11:40:35 +0800395@@ -3338,6 +3355,10 @@ static int mtk_hw_init(struct mtk_eth *e
developer8cb3ac72022-07-04 10:55:14 +0800396 goto err_disable_pm;
397 }
398
399+ if (eth->ethsys)
400+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
401+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
402+
403 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
404 ret = device_reset(eth->dev);
405 if (ret) {
developerdca0fde2022-12-14 11:40:35 +0800406@@ -4091,6 +4112,35 @@ free_netdev:
developer8cb3ac72022-07-04 10:55:14 +0800407 return err;
408 }
409
410+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
411+{
412+ struct net_device *dev, *tmp;
413+ LIST_HEAD(dev_list);
414+ int i;
415+
416+ rtnl_lock();
417+
418+ for (i = 0; i < MTK_MAC_COUNT; i++) {
419+ dev = eth->netdev[i];
420+
421+ if (!dev || !(dev->flags & IFF_UP))
422+ continue;
423+
424+ list_add_tail(&dev->close_list, &dev_list);
425+ }
426+
427+ dev_close_many(&dev_list, false);
428+
429+ eth->dma_dev = dma_dev;
430+
431+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
432+ list_del_init(&dev->close_list);
433+ dev_open(dev, NULL);
434+ }
435+
436+ rtnl_unlock();
437+}
438+
439 static int mtk_probe(struct platform_device *pdev)
440 {
441 struct device_node *mac_np;
developerdca0fde2022-12-14 11:40:35 +0800442@@ -4104,6 +4154,7 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800443 eth->soc = of_device_get_match_data(&pdev->dev);
444
445 eth->dev = &pdev->dev;
446+ eth->dma_dev = &pdev->dev;
447 eth->base = devm_platform_ioremap_resource(pdev, 0);
448 if (IS_ERR(eth->base))
449 return PTR_ERR(eth->base);
developerdca0fde2022-12-14 11:40:35 +0800450@@ -4176,6 +4227,16 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800451 }
452 }
453
454+ if (of_dma_is_coherent(pdev->dev.of_node)) {
455+ struct regmap *cci;
456+
457+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
458+ "mediatek,cci-control");
459+ /* enable CPU/bus coherency */
460+ if (!IS_ERR(cci))
461+ regmap_write(cci, 0, 3);
462+ }
463+
464 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
developerdca0fde2022-12-14 11:40:35 +0800465 eth->xgmii = devm_kzalloc(eth->dev, sizeof(*eth->xgmii),
developer8cb3ac72022-07-04 10:55:14 +0800466 GFP_KERNEL);
developerdca0fde2022-12-14 11:40:35 +0800467@@ -4217,6 +4278,22 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800468 }
469 }
470
471+ for (i = 0;; i++) {
472+ struct device_node *np = of_parse_phandle(pdev->dev.of_node,
473+ "mediatek,wed", i);
474+ static const u32 wdma_regs[] = {
475+ MTK_WDMA0_BASE,
476+ MTK_WDMA1_BASE
477+ };
478+ void __iomem *wdma;
479+
480+ if (!np || i >= ARRAY_SIZE(wdma_regs))
481+ break;
482+
483+ wdma = eth->base + wdma_regs[i];
484+ mtk_wed_add_hw(np, eth, wdma, i);
485+ }
486+
487 for (i = 0; i < MTK_MAX_IRQ_NUM; i++) {
488 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
489 eth->irq[i] = eth->irq[0];
developerdca0fde2022-12-14 11:40:35 +0800490@@ -4320,10 +4397,11 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800491 }
492
493 if (eth->soc->offload_version) {
494- err = mtk_ppe_init(&eth->ppe, eth->dev,
495- eth->base + MTK_ETH_PPE_BASE, 2);
496- if (err)
497+ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
498+ if (!eth->ppe) {
499+ err = -ENOMEM;
500 goto err_free_dev;
501+ }
502
503 err = mtk_eth_offload_init(eth);
504 if (err)
505diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
506old mode 100755
507new mode 100644
508index 349f98503..b52378bd6
509--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
510+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developerdca0fde2022-12-14 11:40:35 +0800511@@ -549,6 +549,9 @@
developer8cb3ac72022-07-04 10:55:14 +0800512 #define RX_DMA_SPORT_MASK 0x7
developerdca0fde2022-12-14 11:40:35 +0800513 #define RX_DMA_SPORT_MASK_V2 0xf
developer8cb3ac72022-07-04 10:55:14 +0800514
515+#define MTK_WDMA0_BASE 0x2800
516+#define MTK_WDMA1_BASE 0x2c00
517+
518 /* QDMA descriptor txd4 */
519 #define TX_DMA_CHKSUM (0x7 << 29)
520 #define TX_DMA_TSO BIT(28)
developerdca0fde2022-12-14 11:40:35 +0800521@@ -773,6 +776,12 @@
developer8cb3ac72022-07-04 10:55:14 +0800522 #define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
523
524
525+/* ethernet dma channel agent map */
526+#define ETHSYS_DMA_AG_MAP 0x408
527+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
528+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
529+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
530+
531 /* SGMII subsystem config registers */
532 /* Register to auto-negotiation restart */
533 #define SGMSYS_PCS_CONTROL_1 0x0
developerdca0fde2022-12-14 11:40:35 +0800534@@ -1520,6 +1529,7 @@ struct mtk_phylink_priv {
developer8cb3ac72022-07-04 10:55:14 +0800535 /* struct mtk_eth - This is the main datasructure for holding the state
536 * of the driver
537 * @dev: The device pointer
538+ * @dev: The device pointer used for dma mapping/alloc
539 * @base: The mapped register i/o base
540 * @page_lock: Make sure that register operations are atomic
541 * @tx_irq__lock: Make sure that IRQ register operations are atomic
developerdca0fde2022-12-14 11:40:35 +0800542@@ -1554,6 +1564,7 @@ struct mtk_phylink_priv {
developer8cb3ac72022-07-04 10:55:14 +0800543
544 struct mtk_eth {
545 struct device *dev;
546+ struct device *dma_dev;
547 void __iomem *base;
developerdca0fde2022-12-14 11:40:35 +0800548 void __iomem *sram_base;
developer8cb3ac72022-07-04 10:55:14 +0800549 spinlock_t page_lock;
developerdca0fde2022-12-14 11:40:35 +0800550@@ -1596,7 +1607,7 @@ struct mtk_eth {
developer8cb3ac72022-07-04 10:55:14 +0800551 spinlock_t syscfg0_lock;
552 struct timer_list mtk_dma_monitor_timer;
553
554- struct mtk_ppe ppe;
555+ struct mtk_ppe *ppe;
556 struct rhashtable flow_table;
557 };
558
developerdca0fde2022-12-14 11:40:35 +0800559@@ -1655,6 +1666,7 @@ void ethsys_reset(struct mtk_eth *eth, u
developer8cb3ac72022-07-04 10:55:14 +0800560 int mtk_eth_offload_init(struct mtk_eth *eth);
561 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
562 void *type_data);
563+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
564
developerdca0fde2022-12-14 11:40:35 +0800565 int mtk_mac2xgmii_id(struct mtk_eth *eth, int mac_id);
566 int mtk_usxgmii_init(struct mtk_xgmii *ss, struct device_node *r);
567
developer8cb3ac72022-07-04 10:55:14 +0800568diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
569old mode 100644
570new mode 100755
571index 66298e223..3d75c22be
572--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
573+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
574@@ -6,9 +6,22 @@
575 #include <linux/iopoll.h>
576 #include <linux/etherdevice.h>
577 #include <linux/platform_device.h>
578+#include <linux/if_ether.h>
579+#include <linux/if_vlan.h>
580+#include <net/dsa.h>
581+#include "mtk_eth_soc.h"
582 #include "mtk_ppe.h"
583 #include "mtk_ppe_regs.h"
584
585+static DEFINE_SPINLOCK(ppe_lock);
586+
587+static const struct rhashtable_params mtk_flow_l2_ht_params = {
588+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
589+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
590+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
591+ .automatic_shrinking = true,
592+};
593+
594 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
595 {
596 writel(val, ppe->base + reg);
597@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
598 return ppe_m32(ppe, reg, val, 0);
599 }
600
601+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
602+{
603+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
604+}
605+
606 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
607 {
608 int ret;
609@@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
610 u32 hash;
611
612 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
613- case MTK_PPE_PKT_TYPE_BRIDGE:
614- hv1 = e->bridge.src_mac_lo;
615- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
616- hv2 = e->bridge.src_mac_hi >> 16;
617- hv2 ^= e->bridge.dest_mac_lo;
618- hv3 = e->bridge.dest_mac_hi;
619- break;
620 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
621 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
622 hv1 = e->ipv4.orig.ports;
623@@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
624 {
625 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
626
627+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
628+ return &entry->bridge.l2;
629+
630 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
631 return &entry->ipv6.l2;
632
633@@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
634 {
635 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
636
637+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
638+ return &entry->bridge.ib2;
639+
640 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
641 return &entry->ipv6.ib2;
642
643@@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
644 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
645 entry->ipv6.ports = ports_pad;
646
647- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
648+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
649+ ether_addr_copy(entry->bridge.src_mac, src_mac);
650+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
651+ entry->bridge.ib2 = val;
652+ l2 = &entry->bridge.l2;
653+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
654 entry->ipv6.ib2 = val;
655 l2 = &entry->ipv6.l2;
656 } else {
657@@ -329,32 +351,167 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
658 return 0;
659 }
660
661+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
662+ int bss, int wcid)
663+{
664+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
665+ u32 *ib2 = mtk_foe_entry_ib2(entry);
666+
667+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
668+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
669+ if (wdma_idx)
670+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
671+
672+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
673+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
674+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
675+
676+ return 0;
677+}
678+
679 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
680 {
681 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
682 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
683 }
684
685-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
686- u16 timestamp)
687+static bool
688+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
689+{
690+ int type, len;
691+
692+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
693+ return false;
694+
695+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
696+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
697+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
698+ else
699+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
700+
701+ return !memcmp(&entry->data.data, &data->data, len - 4);
702+}
703+
704+static void
705+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
706+{
707+ struct hlist_head *head;
708+ struct hlist_node *tmp;
709+
710+ if (entry->type == MTK_FLOW_TYPE_L2) {
711+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
712+ mtk_flow_l2_ht_params);
713+
714+ head = &entry->l2_flows;
715+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
716+ __mtk_foe_entry_clear(ppe, entry);
717+ return;
718+ }
719+
720+ hlist_del_init(&entry->list);
721+ if (entry->hash != 0xffff) {
722+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
723+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
724+ MTK_FOE_STATE_INVALID);
725+ dma_wmb();
726+ }
727+ entry->hash = 0xffff;
728+
729+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
730+ return;
731+
732+ hlist_del_init(&entry->l2_data.list);
733+ kfree(entry);
734+}
735+
736+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
737+{
738+ u16 timestamp;
739+ u16 now;
740+
741+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
742+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
743+
744+ if (timestamp > now)
745+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
746+ else
747+ return now - timestamp;
748+}
749+
750+static void
751+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
752 {
753+ struct mtk_flow_entry *cur;
754 struct mtk_foe_entry *hwe;
755- u32 hash;
756+ struct hlist_node *tmp;
757+ int idle;
758+
759+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
760+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
761+ int cur_idle;
762+ u32 ib1;
763+
764+ hwe = &ppe->foe_table[cur->hash];
765+ ib1 = READ_ONCE(hwe->ib1);
766+
767+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
768+ cur->hash = 0xffff;
769+ __mtk_foe_entry_clear(ppe, cur);
770+ continue;
771+ }
772+
773+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
774+ if (cur_idle >= idle)
775+ continue;
776+
777+ idle = cur_idle;
778+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
779+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
780+ }
781+}
782+
783+static void
784+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
785+{
786+ struct mtk_foe_entry *hwe;
787+ struct mtk_foe_entry foe;
788+
789+ spin_lock_bh(&ppe_lock);
790+
791+ if (entry->type == MTK_FLOW_TYPE_L2) {
792+ mtk_flow_entry_update_l2(ppe, entry);
793+ goto out;
794+ }
795+
796+ if (entry->hash == 0xffff)
797+ goto out;
798+
799+ hwe = &ppe->foe_table[entry->hash];
800+ memcpy(&foe, hwe, sizeof(foe));
801+ if (!mtk_flow_entry_match(entry, &foe)) {
802+ entry->hash = 0xffff;
803+ goto out;
804+ }
805+
806+ entry->data.ib1 = foe.ib1;
807+
808+out:
809+ spin_unlock_bh(&ppe_lock);
810+}
811+
812+static void
813+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
814+ u16 hash)
815+{
816+ struct mtk_foe_entry *hwe;
817+ u16 timestamp;
818
819+ timestamp = mtk_eth_timestamp(ppe->eth);
820 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
821 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
822 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
823
824- hash = mtk_ppe_hash_entry(entry);
825 hwe = &ppe->foe_table[hash];
826- if (!mtk_foe_entry_usable(hwe)) {
827- hwe++;
828- hash++;
829-
830- if (!mtk_foe_entry_usable(hwe))
831- return -ENOSPC;
832- }
833-
834 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
835 wmb();
836 hwe->ib1 = entry->ib1;
developer86f099b2022-11-17 09:35:09 +0800837@@ -362,32 +519,201 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
developer8cb3ac72022-07-04 10:55:14 +0800838 dma_wmb();
839
840 mtk_ppe_cache_clear(ppe);
841+}
842
843- return hash;
844+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
845+{
846+ spin_lock_bh(&ppe_lock);
847+ __mtk_foe_entry_clear(ppe, entry);
848+ spin_unlock_bh(&ppe_lock);
849+}
850+
851+static int
852+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
853+{
854+ entry->type = MTK_FLOW_TYPE_L2;
855+
856+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
857+ mtk_flow_l2_ht_params);
858+}
859+
860+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
861+{
862+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
863+ u32 hash;
864+
865+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
866+ return mtk_foe_entry_commit_l2(ppe, entry);
867+
868+ hash = mtk_ppe_hash_entry(&entry->data);
869+ entry->hash = 0xffff;
870+ spin_lock_bh(&ppe_lock);
871+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 4]);
872+ spin_unlock_bh(&ppe_lock);
873+
874+ return 0;
875+}
876+
877+static void
878+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
879+ u16 hash)
880+{
881+ struct mtk_flow_entry *flow_info;
882+ struct mtk_foe_entry foe, *hwe;
883+ struct mtk_foe_mac_info *l2;
884+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
885+ int type;
886+
887+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
888+ GFP_ATOMIC);
889+ if (!flow_info)
890+ return;
891+
892+ flow_info->l2_data.base_flow = entry;
893+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
894+ flow_info->hash = hash;
895+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 4]);
896+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
897+
898+ hwe = &ppe->foe_table[hash];
899+ memcpy(&foe, hwe, sizeof(foe));
900+ foe.ib1 &= ib1_mask;
901+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
902+
903+ l2 = mtk_foe_entry_l2(&foe);
904+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
905+
906+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
907+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
908+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
909+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
910+ l2->etype = ETH_P_IPV6;
911+
912+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
913+
914+ __mtk_foe_entry_commit(ppe, &foe, hash);
915 }
916
917-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
918+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
919+{
920+ struct hlist_head *head = &ppe->foe_flow[hash / 4];
921+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
922+ struct mtk_flow_entry *entry;
923+ struct mtk_foe_bridge key = {};
developerb5c6eed2022-08-11 22:58:44 +0800924+ struct hlist_node *n;
developer8cb3ac72022-07-04 10:55:14 +0800925+ struct ethhdr *eh;
926+ bool found = false;
927+ u8 *tag;
928+
929+ spin_lock_bh(&ppe_lock);
930+
developer86f099b2022-11-17 09:35:09 +0800931+ if (hash >= MTK_PPE_ENTRIES)
932+ goto out;
933+
developer8cb3ac72022-07-04 10:55:14 +0800934+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
935+ goto out;
936+
developerb5c6eed2022-08-11 22:58:44 +0800937+ hlist_for_each_entry_safe(entry, n, head, list) {
developer8cb3ac72022-07-04 10:55:14 +0800938+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
939+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
940+ MTK_FOE_STATE_BIND))
941+ continue;
942+
943+ entry->hash = 0xffff;
944+ __mtk_foe_entry_clear(ppe, entry);
945+ continue;
946+ }
947+
948+ if (found || !mtk_flow_entry_match(entry, hwe)) {
949+ if (entry->hash != 0xffff)
950+ entry->hash = 0xffff;
951+ continue;
952+ }
953+
954+ entry->hash = hash;
955+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
956+ found = true;
957+ }
958+
959+ if (found)
960+ goto out;
961+
962+ if (!skb)
963+ goto out;
964+
965+ eh = eth_hdr(skb);
966+ ether_addr_copy(key.dest_mac, eh->h_dest);
967+ ether_addr_copy(key.src_mac, eh->h_source);
968+ tag = skb->data - 2;
969+ key.vlan = 0;
970+ switch (skb->protocol) {
971+#if IS_ENABLED(CONFIG_NET_DSA)
972+ case htons(ETH_P_XDSA):
973+ if (!netdev_uses_dsa(skb->dev) ||
974+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
975+ goto out;
976+
977+ tag += 4;
978+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
979+ break;
980+
981+ fallthrough;
982+#endif
983+ case htons(ETH_P_8021Q):
984+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
985+ break;
986+ default:
987+ break;
988+ }
989+
990+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
991+ if (!entry)
992+ goto out;
993+
994+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
995+
996+out:
997+ spin_unlock_bh(&ppe_lock);
998+}
999+
1000+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
1001+{
1002+ mtk_flow_entry_update(ppe, entry);
1003+
1004+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
1005+}
1006+
1007+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
1008 int version)
1009 {
1010+ struct device *dev = eth->dev;
1011 struct mtk_foe_entry *foe;
1012+ struct mtk_ppe *ppe;
1013+
1014+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
1015+ if (!ppe)
1016+ return NULL;
1017+
1018+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
1019
1020 /* need to allocate a separate device, since it PPE DMA access is
1021 * not coherent.
1022 */
1023 ppe->base = base;
1024+ ppe->eth = eth;
1025 ppe->dev = dev;
1026 ppe->version = version;
1027
1028 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
1029 &ppe->foe_phys, GFP_KERNEL);
1030 if (!foe)
1031- return -ENOMEM;
1032+ return NULL;
1033
1034 ppe->foe_table = foe;
1035
1036 mtk_ppe_debugfs_init(ppe);
1037
1038- return 0;
1039+ return ppe;
1040 }
1041
1042 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1043@@ -395,7 +717,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
1044 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
1045 int i, k;
1046
1047- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
1048+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
1049
1050 if (!IS_ENABLED(CONFIG_SOC_MT7621))
1051 return;
1052@@ -443,7 +765,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
1053 MTK_PPE_FLOW_CFG_IP4_NAT |
1054 MTK_PPE_FLOW_CFG_IP4_NAPT |
1055 MTK_PPE_FLOW_CFG_IP4_DSLITE |
1056- MTK_PPE_FLOW_CFG_L2_BRIDGE |
1057 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
1058 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
1059
1060diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
1061index 242fb8f2a..1f5cf1c9a 100644
1062--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
1063+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
1064@@ -6,6 +6,7 @@
1065
1066 #include <linux/kernel.h>
1067 #include <linux/bitfield.h>
1068+#include <linux/rhashtable.h>
1069
1070 #define MTK_ETH_PPE_BASE 0xc00
1071
1072@@ -48,9 +49,9 @@ enum {
1073 #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
1074 #define MTK_FOE_IB2_MULTICAST BIT(8)
1075
1076-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
1077-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
1078-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
1079+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
1080+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
1081+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
1082
1083 #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
1084
1085@@ -58,9 +59,9 @@ enum {
1086
1087 #define MTK_FOE_IB2_DSCP GENMASK(31, 24)
1088
1089-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
1090-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
1091-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
1092+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
1093+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
1094+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
1095
1096 enum {
1097 MTK_FOE_STATE_INVALID,
1098@@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
1099 u16 src_mac_lo;
1100 };
1101
1102+/* software-only entry type */
1103 struct mtk_foe_bridge {
1104- u32 dest_mac_hi;
1105+ u8 dest_mac[ETH_ALEN];
1106+ u8 src_mac[ETH_ALEN];
1107+ u16 vlan;
1108
1109- u16 src_mac_lo;
1110- u16 dest_mac_lo;
1111-
1112- u32 src_mac_hi;
1113+ struct {} key_end;
1114
1115 u32 ib2;
1116
1117- u32 _rsv[5];
1118-
1119- u32 udf_tsid;
1120 struct mtk_foe_mac_info l2;
1121 };
1122
1123@@ -235,7 +233,37 @@ enum {
1124 MTK_PPE_CPU_REASON_INVALID = 0x1f,
1125 };
1126
1127+enum {
1128+ MTK_FLOW_TYPE_L4,
1129+ MTK_FLOW_TYPE_L2,
1130+ MTK_FLOW_TYPE_L2_SUBFLOW,
1131+};
1132+
1133+struct mtk_flow_entry {
1134+ union {
1135+ struct hlist_node list;
1136+ struct {
1137+ struct rhash_head l2_node;
1138+ struct hlist_head l2_flows;
1139+ };
1140+ };
1141+ u8 type;
1142+ s8 wed_index;
1143+ u16 hash;
1144+ union {
1145+ struct mtk_foe_entry data;
1146+ struct {
1147+ struct mtk_flow_entry *base_flow;
1148+ struct hlist_node list;
1149+ struct {} end;
1150+ } l2_data;
1151+ };
1152+ struct rhash_head node;
1153+ unsigned long cookie;
1154+};
1155+
1156 struct mtk_ppe {
1157+ struct mtk_eth *eth;
1158 struct device *dev;
1159 void __iomem *base;
1160 int version;
1161@@ -243,19 +271,35 @@ struct mtk_ppe {
1162 struct mtk_foe_entry *foe_table;
1163 dma_addr_t foe_phys;
1164
1165+ u16 foe_check_time[MTK_PPE_ENTRIES];
1166+ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
1167+
1168+ struct rhashtable l2_flows;
1169+
1170 void *acct_table;
1171 };
1172
1173-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
1174- int version);
1175+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
1176 int mtk_ppe_start(struct mtk_ppe *ppe);
1177 int mtk_ppe_stop(struct mtk_ppe *ppe);
1178
1179+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
1180+
1181 static inline void
1182-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
1183+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
1184 {
1185- ppe->foe_table[hash].ib1 = 0;
1186- dma_wmb();
1187+ u16 now, diff;
1188+
1189+ if (!ppe)
1190+ return;
1191+
1192+ now = (u16)jiffies;
1193+ diff = now - ppe->foe_check_time[hash];
1194+ if (diff < HZ / 10)
1195+ return;
1196+
1197+ ppe->foe_check_time[hash] = now;
1198+ __mtk_ppe_check_skb(ppe, skb, hash);
1199 }
1200
1201 static inline int
1202@@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
1203 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
1204 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
1205 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
1206-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
1207- u16 timestamp);
1208+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
1209+ int bss, int wcid);
1210+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1211+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1212+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
1213 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
1214
1215 #endif
1216diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1217index d4b482340..a591ab1fd 100644
1218--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1219+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
1220@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
1221 static const char * const type_str[] = {
1222 [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
1223 [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
1224- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
1225 [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
1226 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
1227 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
1228@@ -207,6 +206,9 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
1229 struct dentry *root;
1230
1231 root = debugfs_create_dir("mtk_ppe", NULL);
1232+ if (!root)
1233+ return -ENOMEM;
1234+
1235 debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
1236 debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
1237
1238diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1239index 4294f0c74..d4a012608 100644
1240--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1241+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
1242@@ -11,6 +11,7 @@
1243 #include <net/pkt_cls.h>
1244 #include <net/dsa.h>
1245 #include "mtk_eth_soc.h"
1246+#include "mtk_wed.h"
1247
1248 struct mtk_flow_data {
1249 struct ethhdr eth;
1250@@ -30,6 +31,8 @@ struct mtk_flow_data {
1251 __be16 src_port;
1252 __be16 dst_port;
1253
1254+ u16 vlan_in;
1255+
1256 struct {
1257 u16 id;
1258 __be16 proto;
1259@@ -41,12 +44,6 @@ struct mtk_flow_data {
1260 } pppoe;
1261 };
1262
1263-struct mtk_flow_entry {
1264- struct rhash_head node;
1265- unsigned long cookie;
1266- u16 hash;
1267-};
1268-
1269 static const struct rhashtable_params mtk_flow_ht_params = {
1270 .head_offset = offsetof(struct mtk_flow_entry, node),
1271 .key_offset = offsetof(struct mtk_flow_entry, cookie),
1272@@ -54,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = {
1273 .automatic_shrinking = true,
1274 };
1275
1276-static u32
1277-mtk_eth_timestamp(struct mtk_eth *eth)
1278-{
1279- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
1280-}
1281-
1282 static int
1283 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
1284 bool egress)
1285@@ -94,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
1286 memcpy(dest, src, act->mangle.mask ? 2 : 4);
1287 }
1288
1289+static int
1290+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
1291+{
1292+ struct net_device_path_ctx ctx = {
1293+ .dev = dev,
1294+ };
1295+ struct net_device_path path = {};
1296+
1297+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
1298+ return -1;
1299+
1300+ if (!dev->netdev_ops->ndo_fill_forward_path)
1301+ return -1;
1302+
1303+ memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
1304+ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
1305+ return -1;
1306+
1307+ if (path.type != DEV_PATH_MTK_WDMA)
1308+ return -1;
1309+
1310+ info->wdma_idx = path.mtk_wdma.wdma_idx;
1311+ info->queue = path.mtk_wdma.queue;
1312+ info->bss = path.mtk_wdma.bss;
1313+ info->wcid = path.mtk_wdma.wcid;
1314+
1315+ return 0;
1316+}
1317+
1318
1319 static int
1320 mtk_flow_mangle_ports(const struct flow_action_entry *act,
1321@@ -163,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev)
1322
1323 static int
1324 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1325- struct net_device *dev)
1326+ struct net_device *dev, const u8 *dest_mac,
1327+ int *wed_index)
1328 {
1329+ struct mtk_wdma_info info = {};
1330 int pse_port, dsa_port;
1331
1332+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
1333+ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
1334+ info.wcid);
developerc693c152022-12-02 09:38:46 +08001335+ pse_port = PSE_PPE0_PORT;
developer8cb3ac72022-07-04 10:55:14 +08001336+ *wed_index = info.wdma_idx;
1337+ goto out;
1338+ }
1339+
1340 dsa_port = mtk_flow_get_dsa_port(&dev);
1341 if (dsa_port >= 0)
1342 mtk_foe_entry_set_dsa(foe, dsa_port);
1343@@ -178,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1344 else
1345 return -EOPNOTSUPP;
1346
1347+out:
1348 mtk_foe_entry_set_pse_port(foe, pse_port);
1349
1350 return 0;
1351@@ -193,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1352 struct net_device *odev = NULL;
1353 struct mtk_flow_entry *entry;
1354 int offload_type = 0;
1355+ int wed_index = -1;
1356 u16 addr_type = 0;
1357- u32 timestamp;
1358 u8 l4proto = 0;
1359 int err = 0;
1360- int hash;
1361 int i;
1362
1363 if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
1364@@ -229,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1365 return -EOPNOTSUPP;
1366 }
1367
1368+ switch (addr_type) {
1369+ case 0:
1370+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
1371+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1372+ struct flow_match_eth_addrs match;
1373+
1374+ flow_rule_match_eth_addrs(rule, &match);
1375+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
1376+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
1377+ } else {
1378+ return -EOPNOTSUPP;
1379+ }
1380+
1381+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1382+ struct flow_match_vlan match;
1383+
1384+ flow_rule_match_vlan(rule, &match);
1385+
1386+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
1387+ return -EOPNOTSUPP;
1388+
1389+ data.vlan_in = match.key->vlan_id;
1390+ }
1391+ break;
1392+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1393+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1394+ break;
1395+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1396+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1397+ break;
1398+ default:
1399+ return -EOPNOTSUPP;
1400+ }
1401+
1402 flow_action_for_each(i, act, &rule->action) {
1403 switch (act->id) {
1404 case FLOW_ACTION_MANGLE:
1405+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1406+ return -EOPNOTSUPP;
1407 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1408 mtk_flow_offload_mangle_eth(act, &data.eth);
1409 break;
1410@@ -263,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1411 }
1412 }
1413
1414- switch (addr_type) {
1415- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1416- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1417- break;
1418- case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1419- offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1420- break;
1421- default:
1422- return -EOPNOTSUPP;
1423- }
1424-
1425 if (!is_valid_ether_addr(data.eth.h_source) ||
1426 !is_valid_ether_addr(data.eth.h_dest))
1427 return -EINVAL;
1428@@ -287,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1429 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1430 struct flow_match_ports ports;
1431
1432+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1433+ return -EOPNOTSUPP;
1434+
1435 flow_rule_match_ports(rule, &ports);
1436 data.src_port = ports.key->src;
1437 data.dst_port = ports.key->dst;
1438- } else {
1439+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
1440 return -EOPNOTSUPP;
1441 }
1442
1443@@ -320,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1444 if (act->id != FLOW_ACTION_MANGLE)
1445 continue;
1446
1447+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1448+ return -EOPNOTSUPP;
1449+
1450 switch (act->mangle.htype) {
1451 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1452 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1453@@ -345,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1454 return err;
1455 }
1456
1457+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1458+ foe.bridge.vlan = data.vlan_in;
1459+
1460 if (data.vlan.num == 1) {
1461 if (data.vlan.proto != htons(ETH_P_8021Q))
1462 return -EOPNOTSUPP;
1463@@ -354,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1464 if (data.pppoe.num == 1)
1465 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
1466
1467- err = mtk_flow_set_output_device(eth, &foe, odev);
1468+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
1469+ &wed_index);
1470 if (err)
1471 return err;
1472
1473+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
1474+ return err;
1475+
1476 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1477 if (!entry)
1478 return -ENOMEM;
1479
1480 entry->cookie = f->cookie;
1481- timestamp = mtk_eth_timestamp(eth);
1482- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
1483- if (hash < 0) {
1484- err = hash;
1485+ memcpy(&entry->data, &foe, sizeof(entry->data));
1486+ entry->wed_index = wed_index;
1487+
1488+ if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
1489 goto free;
1490- }
1491
1492- entry->hash = hash;
1493 err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
1494 mtk_flow_ht_params);
1495 if (err < 0)
1496- goto clear_flow;
1497+ goto clear;
1498
1499 return 0;
1500-clear_flow:
1501- mtk_foe_entry_clear(&eth->ppe, hash);
1502+
1503+clear:
1504+ mtk_foe_entry_clear(eth->ppe, entry);
1505 free:
1506 kfree(entry);
1507+ if (wed_index >= 0)
1508+ mtk_wed_flow_remove(wed_index);
1509 return err;
1510 }
1511
1512@@ -394,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
1513 if (!entry)
1514 return -ENOENT;
1515
1516- mtk_foe_entry_clear(&eth->ppe, entry->hash);
1517+ mtk_foe_entry_clear(eth->ppe, entry);
1518 rhashtable_remove_fast(&eth->flow_table, &entry->node,
1519 mtk_flow_ht_params);
1520+ if (entry->wed_index >= 0)
1521+ mtk_wed_flow_remove(entry->wed_index);
1522 kfree(entry);
1523
1524 return 0;
1525@@ -406,7 +477,6 @@ static int
1526 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1527 {
1528 struct mtk_flow_entry *entry;
1529- int timestamp;
1530 u32 idle;
1531
1532 entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1533@@ -414,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1534 if (!entry)
1535 return -ENOENT;
1536
1537- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
1538- if (timestamp < 0)
1539- return -ETIMEDOUT;
1540-
1541- idle = mtk_eth_timestamp(eth) - timestamp;
1542+ idle = mtk_foe_entry_idle_time(eth->ppe, entry);
1543 f->stats.lastused = jiffies - idle * HZ;
1544
1545 return 0;
1546@@ -470,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1547 struct flow_block_cb *block_cb;
1548 flow_setup_cb_t *cb;
1549
1550- if (!eth->ppe.foe_table)
1551+ if (!eth->ppe || !eth->ppe->foe_table)
1552 return -EOPNOTSUPP;
1553
1554 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1555@@ -511,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1556 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1557 void *type_data)
1558 {
1559- if (type == TC_SETUP_FT)
1560+ switch (type) {
1561+ case TC_SETUP_BLOCK:
1562+ case TC_SETUP_FT:
1563 return mtk_eth_setup_tc_block(dev, type_data);
1564-
1565- return -EOPNOTSUPP;
1566+ default:
1567+ return -EOPNOTSUPP;
1568+ }
1569 }
1570
1571 int mtk_eth_offload_init(struct mtk_eth *eth)
1572 {
1573- if (!eth->ppe.foe_table)
1574+ if (!eth->ppe || !eth->ppe->foe_table)
1575 return 0;
1576
1577 return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
1578diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
1579new file mode 100644
1580index 000000000..ea1cbdf1a
1581--- /dev/null
1582+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
1583@@ -0,0 +1,876 @@
1584+// SPDX-License-Identifier: GPL-2.0-only
1585+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1586+
1587+#include <linux/kernel.h>
1588+#include <linux/slab.h>
1589+#include <linux/module.h>
1590+#include <linux/bitfield.h>
1591+#include <linux/dma-mapping.h>
1592+#include <linux/skbuff.h>
1593+#include <linux/of_platform.h>
1594+#include <linux/of_address.h>
1595+#include <linux/mfd/syscon.h>
1596+#include <linux/debugfs.h>
1597+#include <linux/iopoll.h>
1598+#include <linux/soc/mediatek/mtk_wed.h>
1599+#include "mtk_eth_soc.h"
1600+#include "mtk_wed_regs.h"
1601+#include "mtk_wed.h"
1602+#include "mtk_ppe.h"
1603+
1604+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
1605+
1606+#define MTK_WED_PKT_SIZE 1900
1607+#define MTK_WED_BUF_SIZE 2048
1608+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1609+
1610+#define MTK_WED_TX_RING_SIZE 2048
1611+#define MTK_WED_WDMA_RING_SIZE 1024
1612+
1613+static struct mtk_wed_hw *hw_list[2];
1614+static DEFINE_MUTEX(hw_lock);
1615+
1616+static void
1617+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1618+{
1619+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
1620+}
1621+
1622+static void
1623+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1624+{
1625+ return wed_m32(dev, reg, 0, mask);
1626+}
1627+
1628+static void
1629+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
1630+{
1631+ return wed_m32(dev, reg, mask, 0);
1632+}
1633+
1634+static void
1635+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1636+{
1637+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
1638+}
1639+
1640+static void
1641+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1642+{
1643+ wdma_m32(dev, reg, 0, mask);
1644+}
1645+
1646+static u32
1647+mtk_wed_read_reset(struct mtk_wed_device *dev)
1648+{
1649+ return wed_r32(dev, MTK_WED_RESET);
1650+}
1651+
1652+static void
1653+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
1654+{
1655+ u32 status;
1656+
1657+ wed_w32(dev, MTK_WED_RESET, mask);
1658+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
1659+ !(status & mask), 0, 1000))
1660+ WARN_ON_ONCE(1);
1661+}
1662+
1663+static struct mtk_wed_hw *
1664+mtk_wed_assign(struct mtk_wed_device *dev)
1665+{
1666+ struct mtk_wed_hw *hw;
1667+
1668+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
1669+ if (!hw || hw->wed_dev)
1670+ return NULL;
1671+
1672+ hw->wed_dev = dev;
1673+ return hw;
1674+}
1675+
1676+static int
1677+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
1678+{
1679+ struct mtk_wdma_desc *desc;
1680+ dma_addr_t desc_phys;
1681+ void **page_list;
1682+ int token = dev->wlan.token_start;
1683+ int ring_size;
1684+ int n_pages;
1685+ int i, page_idx;
1686+
1687+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
1688+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
1689+
1690+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
1691+ if (!page_list)
1692+ return -ENOMEM;
1693+
1694+ dev->buf_ring.size = ring_size;
1695+ dev->buf_ring.pages = page_list;
1696+
1697+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
1698+ &desc_phys, GFP_KERNEL);
1699+ if (!desc)
1700+ return -ENOMEM;
1701+
1702+ dev->buf_ring.desc = desc;
1703+ dev->buf_ring.desc_phys = desc_phys;
1704+
1705+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
1706+ dma_addr_t page_phys, buf_phys;
1707+ struct page *page;
1708+ void *buf;
1709+ int s;
1710+
1711+ page = __dev_alloc_pages(GFP_KERNEL, 0);
1712+ if (!page)
1713+ return -ENOMEM;
1714+
1715+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
1716+ DMA_BIDIRECTIONAL);
1717+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
1718+ __free_page(page);
1719+ return -ENOMEM;
1720+ }
1721+
1722+ page_list[page_idx++] = page;
1723+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
1724+ DMA_BIDIRECTIONAL);
1725+
1726+ buf = page_to_virt(page);
1727+ buf_phys = page_phys;
1728+
1729+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
1730+ u32 txd_size;
1731+
1732+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
1733+
1734+ desc->buf0 = buf_phys;
1735+ desc->buf1 = buf_phys + txd_size;
1736+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
1737+ txd_size) |
1738+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
1739+ MTK_WED_BUF_SIZE - txd_size) |
1740+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
1741+ desc->info = 0;
1742+ desc++;
1743+
1744+ buf += MTK_WED_BUF_SIZE;
1745+ buf_phys += MTK_WED_BUF_SIZE;
1746+ }
1747+
1748+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
1749+ DMA_BIDIRECTIONAL);
1750+ }
1751+
1752+ return 0;
1753+}
1754+
1755+static void
1756+mtk_wed_free_buffer(struct mtk_wed_device *dev)
1757+{
1758+ struct mtk_wdma_desc *desc = dev->buf_ring.desc;
1759+ void **page_list = dev->buf_ring.pages;
1760+ int page_idx;
1761+ int i;
1762+
1763+ if (!page_list)
1764+ return;
1765+
1766+ if (!desc)
1767+ goto free_pagelist;
1768+
1769+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
1770+ void *page = page_list[page_idx++];
1771+
1772+ if (!page)
1773+ break;
1774+
1775+ dma_unmap_page(dev->hw->dev, desc[i].buf0,
1776+ PAGE_SIZE, DMA_BIDIRECTIONAL);
1777+ __free_page(page);
1778+ }
1779+
1780+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
1781+ desc, dev->buf_ring.desc_phys);
1782+
1783+free_pagelist:
1784+ kfree(page_list);
1785+}
1786+
1787+static void
1788+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
1789+{
1790+ if (!ring->desc)
1791+ return;
1792+
1793+ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
1794+ ring->desc, ring->desc_phys);
1795+}
1796+
1797+static void
1798+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
1799+{
1800+ int i;
1801+
1802+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
1803+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
1804+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1805+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
1806+}
1807+
1808+static void
1809+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
1810+{
1811+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
1812+
1813+ if (!dev->hw->num_flows)
1814+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
1815+
1816+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
1817+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
1818+}
1819+
1820+static void
1821+mtk_wed_stop(struct mtk_wed_device *dev)
1822+{
1823+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
1824+ mtk_wed_set_ext_int(dev, false);
1825+
1826+ wed_clr(dev, MTK_WED_CTRL,
1827+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
1828+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
1829+ MTK_WED_CTRL_WED_TX_BM_EN |
1830+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1831+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
1832+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
1833+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
1834+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
1835+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
1836+
1837+ wed_clr(dev, MTK_WED_GLO_CFG,
1838+ MTK_WED_GLO_CFG_TX_DMA_EN |
1839+ MTK_WED_GLO_CFG_RX_DMA_EN);
1840+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1841+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1842+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1843+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1844+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1845+}
1846+
1847+static void
1848+mtk_wed_detach(struct mtk_wed_device *dev)
1849+{
1850+ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
1851+ struct mtk_wed_hw *hw = dev->hw;
1852+
1853+ mutex_lock(&hw_lock);
1854+
1855+ mtk_wed_stop(dev);
1856+
1857+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
1858+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
1859+
1860+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1861+
1862+ mtk_wed_free_buffer(dev);
1863+ mtk_wed_free_tx_rings(dev);
1864+
1865+ if (of_dma_is_coherent(wlan_node))
1866+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1867+ BIT(hw->index), BIT(hw->index));
1868+
1869+ if (!hw_list[!hw->index]->wed_dev &&
1870+ hw->eth->dma_dev != hw->eth->dev)
1871+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
1872+
1873+ memset(dev, 0, sizeof(*dev));
1874+ module_put(THIS_MODULE);
1875+
1876+ hw->wed_dev = NULL;
1877+ mutex_unlock(&hw_lock);
1878+}
1879+
1880+static void
1881+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
1882+{
1883+ u32 mask, set;
1884+ u32 offset;
1885+
1886+ mtk_wed_stop(dev);
1887+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1888+
1889+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1890+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1891+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1892+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1893+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1894+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1895+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1896+
1897+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
1898+
1899+ offset = dev->hw->index ? 0x04000400 : 0;
1900+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1901+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
1902+
1903+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
1904+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1905+}
1906+
1907+static void
1908+mtk_wed_hw_init(struct mtk_wed_device *dev)
1909+{
1910+ if (dev->init_done)
1911+ return;
1912+
1913+ dev->init_done = true;
1914+ mtk_wed_set_ext_int(dev, false);
1915+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1916+ MTK_WED_TX_BM_CTRL_PAUSE |
1917+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
1918+ dev->buf_ring.size / 128) |
1919+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
1920+ MTK_WED_TX_RING_SIZE / 256));
1921+
1922+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1923+
1924+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1925+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1926+ dev->wlan.token_start) |
1927+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1928+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1929+
1930+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1931+
1932+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1933+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
1934+ MTK_WED_TX_BM_DYN_THR_HI);
1935+
1936+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1937+
1938+ wed_set(dev, MTK_WED_CTRL,
1939+ MTK_WED_CTRL_WED_TX_BM_EN |
1940+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1941+
1942+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1943+}
1944+
1945+static void
1946+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
1947+{
1948+ int i;
1949+
1950+ for (i = 0; i < size; i++) {
1951+ desc[i].buf0 = 0;
1952+ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
1953+ desc[i].buf1 = 0;
1954+ desc[i].info = 0;
1955+ }
1956+}
1957+
1958+static u32
1959+mtk_wed_check_busy(struct mtk_wed_device *dev)
1960+{
1961+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
1962+ return true;
1963+
1964+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
1965+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
1966+ return true;
1967+
1968+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
1969+ return true;
1970+
1971+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
1972+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1973+ return true;
1974+
1975+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
1976+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1977+ return true;
1978+
1979+ if (wed_r32(dev, MTK_WED_CTRL) &
1980+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
1981+ return true;
1982+
1983+ return false;
1984+}
1985+
1986+static int
1987+mtk_wed_poll_busy(struct mtk_wed_device *dev)
1988+{
1989+ int sleep = 15000;
1990+ int timeout = 100 * sleep;
1991+ u32 val;
1992+
1993+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
1994+ timeout, false, dev);
1995+}
1996+
1997+static void
1998+mtk_wed_reset_dma(struct mtk_wed_device *dev)
1999+{
2000+ bool busy = false;
2001+ u32 val;
2002+ int i;
2003+
2004+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
2005+ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
2006+
2007+ if (!desc)
2008+ continue;
2009+
2010+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
2011+ }
2012+
2013+ if (mtk_wed_poll_busy(dev))
2014+ busy = mtk_wed_check_busy(dev);
2015+
2016+ if (busy) {
2017+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
2018+ } else {
2019+ wed_w32(dev, MTK_WED_RESET_IDX,
2020+ MTK_WED_RESET_IDX_TX |
2021+ MTK_WED_RESET_IDX_RX);
2022+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
2023+ }
2024+
2025+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
2026+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
2027+
2028+ if (busy) {
2029+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
2030+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
2031+ } else {
2032+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
2033+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
2034+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
2035+
2036+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2037+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2038+
2039+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
2040+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
2041+ }
2042+
2043+ for (i = 0; i < 100; i++) {
2044+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
2045+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
2046+ break;
2047+ }
2048+
2049+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
2050+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
2051+
2052+ if (busy) {
2053+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
2054+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
2055+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
2056+ } else {
2057+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
2058+ MTK_WED_WPDMA_RESET_IDX_TX |
2059+ MTK_WED_WPDMA_RESET_IDX_RX);
2060+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
2061+ }
2062+
2063+}
2064+
2065+static int
2066+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
2067+ int size)
2068+{
2069+ ring->desc = dma_alloc_coherent(dev->hw->dev,
2070+ size * sizeof(*ring->desc),
2071+ &ring->desc_phys, GFP_KERNEL);
2072+ if (!ring->desc)
2073+ return -ENOMEM;
2074+
2075+ ring->size = size;
2076+ mtk_wed_ring_reset(ring->desc, size);
2077+
2078+ return 0;
2079+}
2080+
2081+static int
2082+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
2083+{
2084+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
2085+
2086+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
2087+ return -ENOMEM;
2088+
2089+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2090+ wdma->desc_phys);
2091+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2092+ size);
2093+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2094+
2095+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
2096+ wdma->desc_phys);
2097+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
2098+ size);
2099+
2100+ return 0;
2101+}
2102+
2103+static void
2104+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
2105+{
2106+ u32 wdma_mask;
2107+ u32 val;
2108+ int i;
2109+
2110+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
2111+ if (!dev->tx_wdma[i].desc)
2112+ mtk_wed_wdma_ring_setup(dev, i, 16);
2113+
2114+ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
2115+
2116+ mtk_wed_hw_init(dev);
2117+
2118+ wed_set(dev, MTK_WED_CTRL,
2119+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
2120+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
2121+ MTK_WED_CTRL_WED_TX_BM_EN |
2122+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
2123+
2124+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
2125+
2126+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
2127+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
2128+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
2129+
2130+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
2131+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
2132+
2133+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
2134+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
2135+
2136+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
2137+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
2138+
2139+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
2140+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
2141+
2142+ wed_set(dev, MTK_WED_GLO_CFG,
2143+ MTK_WED_GLO_CFG_TX_DMA_EN |
2144+ MTK_WED_GLO_CFG_RX_DMA_EN);
2145+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
2146+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
2147+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
2148+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
2149+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
2150+
2151+ mtk_wed_set_ext_int(dev, true);
2152+ val = dev->wlan.wpdma_phys |
2153+ MTK_PCIE_MIRROR_MAP_EN |
2154+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
2155+
2156+ if (dev->hw->index)
2157+ val |= BIT(1);
2158+ val |= BIT(0);
2159+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
2160+
2161+ dev->running = true;
2162+}
2163+
2164+static int
2165+mtk_wed_attach(struct mtk_wed_device *dev)
2166+ __releases(RCU)
2167+{
2168+ struct mtk_wed_hw *hw;
2169+ int ret = 0;
2170+
2171+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
2172+ "mtk_wed_attach without holding the RCU read lock");
2173+
2174+ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
2175+ !try_module_get(THIS_MODULE))
2176+ ret = -ENODEV;
2177+
2178+ rcu_read_unlock();
2179+
2180+ if (ret)
2181+ return ret;
2182+
2183+ mutex_lock(&hw_lock);
2184+
2185+ hw = mtk_wed_assign(dev);
2186+ if (!hw) {
2187+ module_put(THIS_MODULE);
2188+ ret = -ENODEV;
2189+ goto out;
2190+ }
2191+
2192+ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
2193+
2194+ dev->hw = hw;
2195+ dev->dev = hw->dev;
2196+ dev->irq = hw->irq;
2197+ dev->wdma_idx = hw->index;
2198+
2199+ if (hw->eth->dma_dev == hw->eth->dev &&
2200+ of_dma_is_coherent(hw->eth->dev->of_node))
2201+ mtk_eth_set_dma_device(hw->eth, hw->dev);
2202+
2203+ ret = mtk_wed_buffer_alloc(dev);
2204+ if (ret) {
2205+ mtk_wed_detach(dev);
2206+ goto out;
2207+ }
2208+
2209+ mtk_wed_hw_init_early(dev);
2210+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
2211+
2212+out:
2213+ mutex_unlock(&hw_lock);
2214+
2215+ return ret;
2216+}
2217+
2218+static int
2219+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
2220+{
2221+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
2222+
2223+ /*
2224+ * Tx ring redirection:
2225+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
2226+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
2227+ * registers.
2228+ *
2229+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
2230+ * into MTK_WED_WPDMA_RING_TX(n) registers.
2231+ * It gets filled with packets picked up from WED TX ring and from
2232+ * WDMA RX.
2233+ */
2234+
2235+ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
2236+
2237+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
2238+ return -ENOMEM;
2239+
2240+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
2241+ return -ENOMEM;
2242+
2243+ ring->reg_base = MTK_WED_RING_TX(idx);
2244+ ring->wpdma = regs;
2245+
2246+ /* WED -> WPDMA */
2247+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
2248+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
2249+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
2250+
2251+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
2252+ ring->desc_phys);
2253+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
2254+ MTK_WED_TX_RING_SIZE);
2255+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
2256+
2257+ return 0;
2258+}
2259+
2260+static int
2261+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
2262+{
2263+ struct mtk_wed_ring *ring = &dev->txfree_ring;
2264+ int i;
2265+
2266+ /*
2267+ * For txfree event handling, the same DMA ring is shared between WED
2268+ * and WLAN. The WLAN driver accesses the ring index registers through
2269+ * WED
2270+ */
2271+ ring->reg_base = MTK_WED_RING_RX(1);
2272+ ring->wpdma = regs;
2273+
2274+ for (i = 0; i < 12; i += 4) {
2275+ u32 val = readl(regs + i);
2276+
2277+ wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
2278+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
2279+ }
2280+
2281+ return 0;
2282+}
2283+
2284+static u32
2285+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
2286+{
2287+ u32 val;
2288+
2289+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
2290+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
2291+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
2292+ if (!dev->hw->num_flows)
2293+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
2294+ if (val && net_ratelimit())
2295+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
2296+
2297+ val = wed_r32(dev, MTK_WED_INT_STATUS);
2298+ val &= mask;
2299+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
2300+
2301+ return val;
2302+}
2303+
2304+static void
2305+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
2306+{
2307+ if (!dev->running)
2308+ return;
2309+
2310+ mtk_wed_set_ext_int(dev, !!mask);
2311+ wed_w32(dev, MTK_WED_INT_MASK, mask);
2312+}
2313+
2314+int mtk_wed_flow_add(int index)
2315+{
2316+ struct mtk_wed_hw *hw = hw_list[index];
2317+ int ret;
2318+
2319+ if (!hw || !hw->wed_dev)
2320+ return -ENODEV;
2321+
2322+ if (hw->num_flows) {
2323+ hw->num_flows++;
2324+ return 0;
2325+ }
2326+
2327+ mutex_lock(&hw_lock);
2328+ if (!hw->wed_dev) {
2329+ ret = -ENODEV;
2330+ goto out;
2331+ }
2332+
2333+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
2334+ if (!ret)
2335+ hw->num_flows++;
2336+ mtk_wed_set_ext_int(hw->wed_dev, true);
2337+
2338+out:
2339+ mutex_unlock(&hw_lock);
2340+
2341+ return ret;
2342+}
2343+
2344+void mtk_wed_flow_remove(int index)
2345+{
2346+ struct mtk_wed_hw *hw = hw_list[index];
2347+
2348+ if (!hw)
2349+ return;
2350+
2351+ if (--hw->num_flows)
2352+ return;
2353+
2354+ mutex_lock(&hw_lock);
2355+ if (!hw->wed_dev)
2356+ goto out;
2357+
2358+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
2359+ mtk_wed_set_ext_int(hw->wed_dev, true);
2360+
2361+out:
2362+ mutex_unlock(&hw_lock);
2363+}
2364+
2365+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2366+ void __iomem *wdma, int index)
2367+{
2368+ static const struct mtk_wed_ops wed_ops = {
2369+ .attach = mtk_wed_attach,
2370+ .tx_ring_setup = mtk_wed_tx_ring_setup,
2371+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2372+ .start = mtk_wed_start,
2373+ .stop = mtk_wed_stop,
2374+ .reset_dma = mtk_wed_reset_dma,
2375+ .reg_read = wed_r32,
2376+ .reg_write = wed_w32,
2377+ .irq_get = mtk_wed_irq_get,
2378+ .irq_set_mask = mtk_wed_irq_set_mask,
2379+ .detach = mtk_wed_detach,
2380+ };
2381+ struct device_node *eth_np = eth->dev->of_node;
2382+ struct platform_device *pdev;
2383+ struct mtk_wed_hw *hw;
2384+ struct regmap *regs;
2385+ int irq;
2386+
2387+ if (!np)
2388+ return;
2389+
2390+ pdev = of_find_device_by_node(np);
2391+ if (!pdev)
2392+ return;
2393+
2394+ get_device(&pdev->dev);
2395+ irq = platform_get_irq(pdev, 0);
2396+ if (irq < 0)
2397+ return;
2398+
2399+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
2400+ if (!regs)
2401+ return;
2402+
2403+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
2404+
2405+ mutex_lock(&hw_lock);
2406+
2407+ if (WARN_ON(hw_list[index]))
2408+ goto unlock;
2409+
2410+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
2411+ hw->node = np;
2412+ hw->regs = regs;
2413+ hw->eth = eth;
2414+ hw->dev = &pdev->dev;
2415+ hw->wdma = wdma;
2416+ hw->index = index;
2417+ hw->irq = irq;
2418+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2419+ "mediatek,pcie-mirror");
2420+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
2421+ "mediatek,hifsys");
2422+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
2423+ kfree(hw);
2424+ goto unlock;
2425+ }
2426+
2427+ if (!index) {
2428+ regmap_write(hw->mirror, 0, 0);
2429+ regmap_write(hw->mirror, 4, 0);
2430+ }
2431+ mtk_wed_hw_add_debugfs(hw);
2432+
2433+ hw_list[index] = hw;
2434+
2435+unlock:
2436+ mutex_unlock(&hw_lock);
2437+}
2438+
2439+void mtk_wed_exit(void)
2440+{
2441+ int i;
2442+
2443+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
2444+
2445+ synchronize_rcu();
2446+
2447+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
2448+ struct mtk_wed_hw *hw;
2449+
2450+ hw = hw_list[i];
2451+ if (!hw)
2452+ continue;
2453+
2454+ hw_list[i] = NULL;
2455+ debugfs_remove(hw->debugfs_dir);
2456+ put_device(hw->dev);
2457+ kfree(hw);
2458+ }
2459+}
2460diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2461new file mode 100644
2462index 000000000..981ec613f
2463--- /dev/null
2464+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2465@@ -0,0 +1,135 @@
2466+// SPDX-License-Identifier: GPL-2.0-only
2467+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2468+
2469+#ifndef __MTK_WED_PRIV_H
2470+#define __MTK_WED_PRIV_H
2471+
2472+#include <linux/soc/mediatek/mtk_wed.h>
2473+#include <linux/debugfs.h>
2474+#include <linux/regmap.h>
2475+#include <linux/netdevice.h>
2476+
2477+struct mtk_eth;
2478+
2479+struct mtk_wed_hw {
2480+ struct device_node *node;
2481+ struct mtk_eth *eth;
2482+ struct regmap *regs;
2483+ struct regmap *hifsys;
2484+ struct device *dev;
2485+ void __iomem *wdma;
2486+ struct regmap *mirror;
2487+ struct dentry *debugfs_dir;
2488+ struct mtk_wed_device *wed_dev;
2489+ u32 debugfs_reg;
2490+ u32 num_flows;
2491+ char dirname[5];
2492+ int irq;
2493+ int index;
2494+};
2495+
2496+struct mtk_wdma_info {
2497+ u8 wdma_idx;
2498+ u8 queue;
2499+ u16 wcid;
2500+ u8 bss;
2501+};
2502+
2503+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2504+static inline void
2505+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2506+{
2507+ regmap_write(dev->hw->regs, reg, val);
2508+}
2509+
2510+static inline u32
2511+wed_r32(struct mtk_wed_device *dev, u32 reg)
2512+{
2513+ unsigned int val;
2514+
2515+ regmap_read(dev->hw->regs, reg, &val);
2516+
2517+ return val;
2518+}
2519+
2520+static inline void
2521+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2522+{
2523+ writel(val, dev->hw->wdma + reg);
2524+}
2525+
2526+static inline u32
2527+wdma_r32(struct mtk_wed_device *dev, u32 reg)
2528+{
2529+ return readl(dev->hw->wdma + reg);
2530+}
2531+
2532+static inline u32
2533+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
2534+{
2535+ if (!dev->tx_ring[ring].wpdma)
2536+ return 0;
2537+
2538+ return readl(dev->tx_ring[ring].wpdma + reg);
2539+}
2540+
2541+static inline void
2542+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
2543+{
2544+ if (!dev->tx_ring[ring].wpdma)
2545+ return;
2546+
2547+ writel(val, dev->tx_ring[ring].wpdma + reg);
2548+}
2549+
2550+static inline u32
2551+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
2552+{
2553+ if (!dev->txfree_ring.wpdma)
2554+ return 0;
2555+
2556+ return readl(dev->txfree_ring.wpdma + reg);
2557+}
2558+
2559+static inline void
2560+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2561+{
2562+ if (!dev->txfree_ring.wpdma)
2563+ return;
2564+
2565+ writel(val, dev->txfree_ring.wpdma + reg);
2566+}
2567+
2568+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2569+ void __iomem *wdma, int index);
2570+void mtk_wed_exit(void);
2571+int mtk_wed_flow_add(int index);
2572+void mtk_wed_flow_remove(int index);
2573+#else
2574+static inline void
2575+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2576+ void __iomem *wdma, int index)
2577+{
2578+}
2579+static inline void
2580+mtk_wed_exit(void)
2581+{
2582+}
2583+static inline int mtk_wed_flow_add(int index)
2584+{
2585+ return -EINVAL;
2586+}
2587+static inline void mtk_wed_flow_remove(int index)
2588+{
2589+}
2590+#endif
2591+
2592+#ifdef CONFIG_DEBUG_FS
2593+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
2594+#else
2595+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2596+{
2597+}
2598+#endif
2599+
2600+#endif
2601diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2602new file mode 100644
2603index 000000000..a81d3fd1a
2604--- /dev/null
2605+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2606@@ -0,0 +1,175 @@
2607+// SPDX-License-Identifier: GPL-2.0-only
2608+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2609+
2610+#include <linux/seq_file.h>
2611+#include "mtk_wed.h"
2612+#include "mtk_wed_regs.h"
2613+
2614+struct reg_dump {
2615+ const char *name;
2616+ u16 offset;
2617+ u8 type;
2618+ u8 base;
2619+};
2620+
2621+enum {
2622+ DUMP_TYPE_STRING,
2623+ DUMP_TYPE_WED,
2624+ DUMP_TYPE_WDMA,
2625+ DUMP_TYPE_WPDMA_TX,
2626+ DUMP_TYPE_WPDMA_TXFREE,
2627+};
2628+
2629+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2630+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2631+#define DUMP_RING(_prefix, _base, ...) \
2632+ { _prefix " BASE", _base, __VA_ARGS__ }, \
2633+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2634+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
2635+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2636+
2637+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2638+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2639+
2640+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2641+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
2642+
2643+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
2644+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
2645+
2646+static void
2647+print_reg_val(struct seq_file *s, const char *name, u32 val)
2648+{
2649+ seq_printf(s, "%-32s %08x\n", name, val);
2650+}
2651+
2652+static void
2653+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2654+ const struct reg_dump *regs, int n_regs)
2655+{
2656+ const struct reg_dump *cur;
2657+ u32 val;
2658+
2659+ for (cur = regs; cur < &regs[n_regs]; cur++) {
2660+ switch (cur->type) {
2661+ case DUMP_TYPE_STRING:
2662+ seq_printf(s, "%s======== %s:\n",
2663+ cur > regs ? "\n" : "",
2664+ cur->name);
2665+ continue;
2666+ case DUMP_TYPE_WED:
2667+ val = wed_r32(dev, cur->offset);
2668+ break;
2669+ case DUMP_TYPE_WDMA:
2670+ val = wdma_r32(dev, cur->offset);
2671+ break;
2672+ case DUMP_TYPE_WPDMA_TX:
2673+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
2674+ break;
2675+ case DUMP_TYPE_WPDMA_TXFREE:
2676+ val = wpdma_txfree_r32(dev, cur->offset);
2677+ break;
2678+ }
2679+ print_reg_val(s, cur->name, val);
2680+ }
2681+}
2682+
2683+
2684+static int
2685+wed_txinfo_show(struct seq_file *s, void *data)
2686+{
2687+ static const struct reg_dump regs[] = {
2688+ DUMP_STR("WED TX"),
2689+ DUMP_WED(WED_TX_MIB(0)),
2690+ DUMP_WED_RING(WED_RING_TX(0)),
2691+
2692+ DUMP_WED(WED_TX_MIB(1)),
2693+ DUMP_WED_RING(WED_RING_TX(1)),
2694+
2695+ DUMP_STR("WPDMA TX"),
2696+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
2697+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
2698+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
2699+
2700+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
2701+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
2702+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
2703+
2704+ DUMP_STR("WPDMA TX"),
2705+ DUMP_WPDMA_TX_RING(0),
2706+ DUMP_WPDMA_TX_RING(1),
2707+
2708+ DUMP_STR("WED WDMA RX"),
2709+ DUMP_WED(WED_WDMA_RX_MIB(0)),
2710+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
2711+ DUMP_WED(WED_WDMA_RX_THRES(0)),
2712+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
2713+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
2714+
2715+ DUMP_WED(WED_WDMA_RX_MIB(1)),
2716+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
2717+ DUMP_WED(WED_WDMA_RX_THRES(1)),
2718+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
2719+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
2720+
2721+ DUMP_STR("WDMA RX"),
2722+ DUMP_WDMA(WDMA_GLO_CFG),
2723+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
2724+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
2725+ };
2726+ struct mtk_wed_hw *hw = s->private;
2727+ struct mtk_wed_device *dev = hw->wed_dev;
2728+
2729+ if (!dev)
2730+ return 0;
2731+
2732+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2733+
2734+ return 0;
2735+}
2736+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
2737+
2738+
2739+static int
2740+mtk_wed_reg_set(void *data, u64 val)
2741+{
2742+ struct mtk_wed_hw *hw = data;
2743+
2744+ regmap_write(hw->regs, hw->debugfs_reg, val);
2745+
2746+ return 0;
2747+}
2748+
2749+static int
2750+mtk_wed_reg_get(void *data, u64 *val)
2751+{
2752+ struct mtk_wed_hw *hw = data;
2753+ unsigned int regval;
2754+ int ret;
2755+
2756+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
2757+ if (ret)
2758+ return ret;
2759+
2760+ *val = regval;
2761+
2762+ return 0;
2763+}
2764+
2765+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
2766+ "0x%08llx\n");
2767+
2768+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2769+{
2770+ struct dentry *dir;
2771+
2772+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
2773+ dir = debugfs_create_dir(hw->dirname, NULL);
2774+ if (!dir)
2775+ return;
2776+
2777+ hw->debugfs_dir = dir;
2778+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
2779+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
2780+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
2781+}
2782diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2783new file mode 100644
2784index 000000000..a5d9d8a5b
2785--- /dev/null
2786+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2787@@ -0,0 +1,8 @@
2788+// SPDX-License-Identifier: GPL-2.0-only
2789+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2790+
2791+#include <linux/kernel.h>
2792+#include <linux/soc/mediatek/mtk_wed.h>
2793+
2794+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
2795+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
2796diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2797new file mode 100644
2798index 000000000..0a0465ea5
2799--- /dev/null
2800+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2801@@ -0,0 +1,251 @@
2802+// SPDX-License-Identifier: GPL-2.0-only
2803+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2804+
2805+#ifndef __MTK_WED_REGS_H
2806+#define __MTK_WED_REGS_H
2807+
2808+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
2809+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
2810+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
2811+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2812+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2813+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2814+
2815+struct mtk_wdma_desc {
2816+ __le32 buf0;
2817+ __le32 ctrl;
2818+ __le32 buf1;
2819+ __le32 info;
2820+} __packed __aligned(4);
2821+
2822+#define MTK_WED_RESET 0x008
2823+#define MTK_WED_RESET_TX_BM BIT(0)
2824+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
2825+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
2826+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
2827+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
2828+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
2829+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2830+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2831+#define MTK_WED_RESET_WED BIT(31)
2832+
2833+#define MTK_WED_CTRL 0x00c
2834+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
2835+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
2836+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
2837+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
2838+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
2839+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2840+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2841+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2842+#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2843+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2844+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2845+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
2846+
2847+#define MTK_WED_EXT_INT_STATUS 0x020
2848+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
2849+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
2850+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
2851+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
2852+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
2853+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2854+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2855+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2856+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2857+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
2858+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
2859+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
2860+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
2861+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
2862+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
2863+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2864+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2865+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2866+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2867+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2868+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
2869+ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
2870+ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
2871+
2872+#define MTK_WED_EXT_INT_MASK 0x028
2873+
2874+#define MTK_WED_STATUS 0x060
2875+#define MTK_WED_STATUS_TX GENMASK(15, 8)
2876+
2877+#define MTK_WED_TX_BM_CTRL 0x080
2878+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
2879+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
2880+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
2881+
2882+#define MTK_WED_TX_BM_BASE 0x084
2883+
2884+#define MTK_WED_TX_BM_TKID 0x088
2885+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
2886+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
2887+
2888+#define MTK_WED_TX_BM_BUF_LEN 0x08c
2889+
2890+#define MTK_WED_TX_BM_INTF 0x09c
2891+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
2892+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
2893+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
2894+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
2895+
2896+#define MTK_WED_TX_BM_DYN_THR 0x0a0
2897+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
2898+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
2899+
2900+#define MTK_WED_INT_STATUS 0x200
2901+#define MTK_WED_INT_MASK 0x204
2902+
2903+#define MTK_WED_GLO_CFG 0x208
2904+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
2905+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
2906+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
2907+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
2908+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2909+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
2910+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
2911+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2912+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2913+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2914+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2915+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2916+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2917+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
2918+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2919+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
2920+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
2921+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
2922+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
2923+
2924+#define MTK_WED_RESET_IDX 0x20c
2925+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
2926+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
2927+
2928+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
2929+
2930+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
2931+
2932+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2933+
2934+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
2935+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2936+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
2937+
2938+#define MTK_WED_WPDMA_GLO_CFG 0x508
2939+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
2940+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
2941+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
2942+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2943+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2944+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2945+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
2946+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2947+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2948+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2949+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2950+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2951+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2952+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
2953+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2954+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
2955+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
2956+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
2957+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
2958+
2959+#define MTK_WED_WPDMA_RESET_IDX 0x50c
2960+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
2961+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
2962+
2963+#define MTK_WED_WPDMA_INT_CTRL 0x520
2964+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
2965+
2966+#define MTK_WED_WPDMA_INT_MASK 0x524
2967+
2968+#define MTK_WED_PCIE_CFG_BASE 0x560
2969+
2970+#define MTK_WED_PCIE_INT_TRIGGER 0x570
2971+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
2972+
2973+#define MTK_WED_WPDMA_CFG_BASE 0x580
2974+
2975+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2976+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2977+
2978+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2979+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
2980+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2981+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2982+
2983+#define MTK_WED_WDMA_GLO_CFG 0xa04
2984+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2985+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2986+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2987+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
2988+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2989+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
2990+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
2991+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
2992+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
2993+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
2994+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
2995+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
2996+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
2997+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
2998+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
2999+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
3000+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
3001+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
3002+
3003+#define MTK_WED_WDMA_RESET_IDX 0xa08
3004+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
3005+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
3006+
3007+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
3008+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
3009+
3010+#define MTK_WED_WDMA_INT_CTRL 0xa2c
3011+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
3012+
3013+#define MTK_WED_WDMA_OFFSET0 0xaa4
3014+#define MTK_WED_WDMA_OFFSET1 0xaa8
3015+
3016+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
3017+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
3018+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
3019+
3020+#define MTK_WED_RING_OFS_BASE 0x00
3021+#define MTK_WED_RING_OFS_COUNT 0x04
3022+#define MTK_WED_RING_OFS_CPU_IDX 0x08
3023+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
3024+
3025+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
3026+
3027+#define MTK_WDMA_GLO_CFG 0x204
3028+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
3029+
3030+#define MTK_WDMA_RESET_IDX 0x208
3031+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
3032+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
3033+
3034+#define MTK_WDMA_INT_MASK 0x228
3035+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
3036+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
3037+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
3038+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
3039+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
3040+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
3041+
3042+#define MTK_WDMA_INT_GRP1 0x250
3043+#define MTK_WDMA_INT_GRP2 0x254
3044+
3045+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
3046+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
3047+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
3048+
3049+/* DMA channel mapping */
3050+#define HIFSYS_DMA_AG_MAP 0x008
3051+
3052+#endif
3053diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
3054index 9f64504ac..35998b1a7 100644
3055--- a/include/linux/netdevice.h
3056+++ b/include/linux/netdevice.h
3057@@ -835,6 +835,7 @@ enum net_device_path_type {
3058 DEV_PATH_BRIDGE,
3059 DEV_PATH_PPPOE,
3060 DEV_PATH_DSA,
3061+ DEV_PATH_MTK_WDMA,
3062 };
3063
3064 struct net_device_path {
3065@@ -860,6 +861,12 @@ struct net_device_path {
3066 int port;
3067 u16 proto;
3068 } dsa;
3069+ struct {
3070+ u8 wdma_idx;
3071+ u8 queue;
3072+ u16 wcid;
3073+ u8 bss;
3074+ } mtk_wdma;
3075 };
3076 };
3077
3078diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
3079new file mode 100644
3080index 000000000..7e00cca06
3081--- /dev/null
3082+++ b/include/linux/soc/mediatek/mtk_wed.h
3083@@ -0,0 +1,131 @@
3084+#ifndef __MTK_WED_H
3085+#define __MTK_WED_H
3086+
3087+#include <linux/kernel.h>
3088+#include <linux/rcupdate.h>
3089+#include <linux/regmap.h>
3090+#include <linux/pci.h>
3091+
3092+#define MTK_WED_TX_QUEUES 2
3093+
3094+struct mtk_wed_hw;
3095+struct mtk_wdma_desc;
3096+
3097+struct mtk_wed_ring {
3098+ struct mtk_wdma_desc *desc;
3099+ dma_addr_t desc_phys;
3100+ int size;
3101+
3102+ u32 reg_base;
3103+ void __iomem *wpdma;
3104+};
3105+
3106+struct mtk_wed_device {
3107+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3108+ const struct mtk_wed_ops *ops;
3109+ struct device *dev;
3110+ struct mtk_wed_hw *hw;
3111+ bool init_done, running;
3112+ int wdma_idx;
3113+ int irq;
3114+
3115+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
3116+ struct mtk_wed_ring txfree_ring;
3117+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
3118+
3119+ struct {
3120+ int size;
3121+ void **pages;
3122+ struct mtk_wdma_desc *desc;
3123+ dma_addr_t desc_phys;
3124+ } buf_ring;
3125+
3126+ /* filled by driver: */
3127+ struct {
3128+ struct pci_dev *pci_dev;
3129+
3130+ u32 wpdma_phys;
3131+
3132+ u16 token_start;
3133+ unsigned int nbuf;
3134+
3135+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
3136+ int (*offload_enable)(struct mtk_wed_device *wed);
3137+ void (*offload_disable)(struct mtk_wed_device *wed);
3138+ } wlan;
3139+#endif
3140+};
3141+
3142+struct mtk_wed_ops {
3143+ int (*attach)(struct mtk_wed_device *dev);
3144+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
3145+ void __iomem *regs);
3146+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
3147+ void __iomem *regs);
3148+ void (*detach)(struct mtk_wed_device *dev);
3149+
3150+ void (*stop)(struct mtk_wed_device *dev);
3151+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
3152+ void (*reset_dma)(struct mtk_wed_device *dev);
3153+
3154+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
3155+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
3156+
3157+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
3158+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
3159+};
3160+
3161+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
3162+
3163+static inline int
3164+mtk_wed_device_attach(struct mtk_wed_device *dev)
3165+{
3166+ int ret = -ENODEV;
3167+
3168+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3169+ rcu_read_lock();
3170+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
3171+ if (dev->ops)
3172+ ret = dev->ops->attach(dev);
3173+ else
3174+ rcu_read_unlock();
3175+
3176+ if (ret)
3177+ dev->ops = NULL;
3178+#endif
3179+
3180+ return ret;
3181+}
3182+
3183+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
3184+#define mtk_wed_device_active(_dev) !!(_dev)->ops
3185+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
3186+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
3187+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
3188+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
3189+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
3190+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
3191+#define mtk_wed_device_reg_read(_dev, _reg) \
3192+ (_dev)->ops->reg_read(_dev, _reg)
3193+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
3194+ (_dev)->ops->reg_write(_dev, _reg, _val)
3195+#define mtk_wed_device_irq_get(_dev, _mask) \
3196+ (_dev)->ops->irq_get(_dev, _mask)
3197+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
3198+ (_dev)->ops->irq_set_mask(_dev, _mask)
3199+#else
3200+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
3201+{
3202+ return false;
3203+}
3204+#define mtk_wed_device_detach(_dev) do {} while (0)
3205+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
3206+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
3207+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
3208+#define mtk_wed_device_reg_read(_dev, _reg) 0
3209+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
3210+#define mtk_wed_device_irq_get(_dev, _mask) 0
3211+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
3212+#endif
3213+
3214+#endif
3215diff --git a/net/core/dev.c b/net/core/dev.c
3216index 4f0edb218..031ac7c6f 100644
3217--- a/net/core/dev.c
3218+++ b/net/core/dev.c
3219@@ -675,6 +675,10 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
3220 if (WARN_ON_ONCE(last_dev == ctx.dev))
3221 return -1;
3222 }
3223+
3224+ if (!ctx.dev)
3225+ return ret;
3226+
3227 path = dev_fwd_path(stack);
3228 if (!path)
3229 return -1;
3230--
32312.18.0
3232