blob: 201a629fdfb950bc789405ae0cd9b67a26147fc7 [file] [log] [blame]
developer8cb3ac72022-07-04 10:55:14 +08001From 342fdc50b761309e75974554cdcf790a2d09e134 Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 2 Jun 2022 15:32:07 +0800
4Subject: [PATCH 4/8] 9993-add-wed
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 arch/arm64/boot/dts/mediatek/mt7622.dtsi | 32 +-
9 drivers/net/ethernet/mediatek/Kconfig | 4 +
10 drivers/net/ethernet/mediatek/Makefile | 5 +
11 drivers/net/ethernet/mediatek/mtk_eth_soc.c | 136 ++-
12 drivers/net/ethernet/mediatek/mtk_eth_soc.h | 14 +-
13 drivers/net/ethernet/mediatek/mtk_ppe.c | 373 +++++++-
14 drivers/net/ethernet/mediatek/mtk_ppe.h | 89 +-
15 .../net/ethernet/mediatek/mtk_ppe_debugfs.c | 4 +-
16 .../net/ethernet/mediatek/mtk_ppe_offload.c | 167 +++-
17 drivers/net/ethernet/mediatek/mtk_wed.c | 876 ++++++++++++++++++
18 drivers/net/ethernet/mediatek/mtk_wed.h | 135 +++
19 .../net/ethernet/mediatek/mtk_wed_debugfs.c | 175 ++++
20 drivers/net/ethernet/mediatek/mtk_wed_ops.c | 8 +
21 drivers/net/ethernet/mediatek/mtk_wed_regs.h | 251 +++++
22 include/linux/netdevice.h | 7 +
23 include/linux/soc/mediatek/mtk_wed.h | 131 +++
24 net/core/dev.c | 4 +
25 17 files changed, 2283 insertions(+), 128 deletions(-)
26 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Kconfig
27 mode change 100755 => 100644 drivers/net/ethernet/mediatek/Makefile
28 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.c
29 mode change 100755 => 100644 drivers/net/ethernet/mediatek/mtk_eth_soc.h
30 mode change 100644 => 100755 drivers/net/ethernet/mediatek/mtk_ppe.c
31 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.c
32 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed.h
33 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
34 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_ops.c
35 create mode 100644 drivers/net/ethernet/mediatek/mtk_wed_regs.h
36 create mode 100644 include/linux/soc/mediatek/mtk_wed.h
37
38diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
39index 369e01389..d0fbc367e 100644
40--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
41+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
42@@ -338,7 +338,7 @@
43 };
44
45 cci_control2: slave-if@5000 {
46- compatible = "arm,cci-400-ctrl-if";
47+ compatible = "arm,cci-400-ctrl-if", "syscon";
48 interface-type = "ace";
49 reg = <0x5000 0x1000>;
50 };
51@@ -920,6 +920,11 @@
52 };
53 };
54
55+ hifsys: syscon@1af00000 {
56+ compatible = "mediatek,mt7622-hifsys", "syscon";
57+ reg = <0 0x1af00000 0 0x70>;
58+ };
59+
60 ethsys: syscon@1b000000 {
61 compatible = "mediatek,mt7622-ethsys",
62 "syscon";
63@@ -938,6 +943,26 @@
64 #dma-cells = <1>;
65 };
66
67+ pcie_mirror: pcie-mirror@10000400 {
68+ compatible = "mediatek,mt7622-pcie-mirror",
69+ "syscon";
70+ reg = <0 0x10000400 0 0x10>;
71+ };
72+
73+ wed0: wed@1020a000 {
74+ compatible = "mediatek,mt7622-wed",
75+ "syscon";
76+ reg = <0 0x1020a000 0 0x1000>;
77+ interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_LOW>;
78+ };
79+
80+ wed1: wed@1020b000 {
81+ compatible = "mediatek,mt7622-wed",
82+ "syscon";
83+ reg = <0 0x1020b000 0 0x1000>;
84+ interrupts = <GIC_SPI 215 IRQ_TYPE_LEVEL_LOW>;
85+ };
86+
87 eth: ethernet@1b100000 {
88 compatible = "mediatek,mt7622-eth",
89 "mediatek,mt2701-eth",
90@@ -964,6 +989,11 @@
91 power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>;
92 mediatek,ethsys = <&ethsys>;
93 mediatek,sgmiisys = <&sgmiisys>;
94+ mediatek,cci-control = <&cci_control2>;
95+ mediatek,wed = <&wed0>, <&wed1>;
96+ mediatek,pcie-mirror = <&pcie_mirror>;
97+ mediatek,hifsys = <&hifsys>;
98+ dma-coherent;
99 #address-cells = <1>;
100 #size-cells = <0>;
101 status = "disabled";
102diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
103old mode 100755
104new mode 100644
105index 42e6b38d2..8ab6615a3
106--- a/drivers/net/ethernet/mediatek/Kconfig
107+++ b/drivers/net/ethernet/mediatek/Kconfig
108@@ -7,6 +7,10 @@ config NET_VENDOR_MEDIATEK
109
110 if NET_VENDOR_MEDIATEK
111
112+config NET_MEDIATEK_SOC_WED
113+ depends on ARCH_MEDIATEK || COMPILE_TEST
114+ def_bool NET_MEDIATEK_SOC != n
115+
116 config NET_MEDIATEK_SOC
117 tristate "MediaTek SoC Gigabit Ethernet support"
118 select PHYLINK
119diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
120old mode 100755
121new mode 100644
122index 0a6af99f1..3528f1b3c
123--- a/drivers/net/ethernet/mediatek/Makefile
124+++ b/drivers/net/ethernet/mediatek/Makefile
125@@ -6,4 +6,9 @@
126 obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
127 mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
128 mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
129+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
130+ifdef CONFIG_DEBUG_FS
131+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
132+endif
133+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
134 obj-$(CONFIG_NET_MEDIATEK_HNAT) += mtk_hnat/
135diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
136old mode 100755
137new mode 100644
138index 819d8a0be..2121335a1
139--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
140+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developerdca0fde2022-12-14 11:40:35 +0800141@@ -20,12 +21,14 @@
developer8cb3ac72022-07-04 10:55:14 +0800142 #include <linux/pinctrl/devinfo.h>
143 #include <linux/phylink.h>
developer926f9162022-07-05 10:55:37 +0800144 #include <linux/gpio/consumer.h>
developer8cb3ac72022-07-04 10:55:14 +0800145+#include <linux/bitfield.h>
146 #include <net/dsa.h>
147
148 #include "mtk_eth_soc.h"
149 #include "mtk_eth_dbg.h"
150 #include "mtk_eth_reset.h"
151 #include "mtk_hnat/hnat.h"
152+#include "mtk_wed.h"
153
154 #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
155 #include "mtk_hnat/nf_hnat_mtk.h"
developerdca0fde2022-12-14 11:40:35 +0800156@@ -1737,6 +1740,7 @@ static int mtk_poll_rx(struct napi_struc
157 struct net_device *netdev = NULL;
developer8cb3ac72022-07-04 10:55:14 +0800158 unsigned int pktlen;
developerea825232022-11-29 11:26:54 +0800159 dma_addr_t dma_addr = 0;
developer8cb3ac72022-07-04 10:55:14 +0800160+ u32 hash, reason;
developer0c6c5252022-07-12 11:59:21 +0800161 int mac = 0;
developer8cb3ac72022-07-04 10:55:14 +0800162
163 if (eth->hwlro)
developerdca0fde2022-12-14 11:40:35 +0800164@@ -1827,6 +1831,17 @@ static int mtk_poll_rx(struct napi_struc
developer8cb3ac72022-07-04 10:55:14 +0800165 skb_checksum_none_assert(skb);
166 skb->protocol = eth_type_trans(skb, netdev);
167
168+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
169+ if (hash != MTK_RXD4_FOE_ENTRY) {
170+ hash = jhash_1word(hash, 0);
171+ skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
172+ }
173+
174+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
175+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
176+ mtk_ppe_check_skb(eth->ppe, skb,
177+ trxd.rxd4 & MTK_RXD4_FOE_ENTRY);
178+
179 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
developerdca0fde2022-12-14 11:40:35 +0800180 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ||
181 MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V3)) {
developerdca0fde2022-12-14 11:40:35 +0800182@@ -3243,7 +3258,7 @@ static int mtk_stop(struct net_device *d
developer8cb3ac72022-07-04 10:55:14 +0800183 mtk_dma_free(eth);
184
185 if (eth->soc->offload_version)
186- mtk_ppe_stop(&eth->ppe);
187+ mtk_ppe_stop(eth->ppe);
188
189 return 0;
190 }
developerdca0fde2022-12-14 11:40:35 +0800191@@ -4217,6 +4278,22 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800192 }
193 }
194
195+ for (i = 0;; i++) {
196+ struct device_node *np = of_parse_phandle(pdev->dev.of_node,
197+ "mediatek,wed", i);
198+ static const u32 wdma_regs[] = {
199+ MTK_WDMA0_BASE,
200+ MTK_WDMA1_BASE
201+ };
202+ void __iomem *wdma;
203+
204+ if (!np || i >= ARRAY_SIZE(wdma_regs))
205+ break;
206+
207+ wdma = eth->base + wdma_regs[i];
208+ mtk_wed_add_hw(np, eth, wdma, i);
209+ }
210+
developer0d3fae32023-05-22 18:17:14 +0800211 for (i = 0; i < MTK_PDMA_IRQ_NUM; i++)
212 eth->irq_pdma[i] = platform_get_irq(pdev, i);
213
developerdca0fde2022-12-14 11:40:35 +0800214@@ -4320,10 +4397,11 @@ static int mtk_probe(struct platform_dev
developer8cb3ac72022-07-04 10:55:14 +0800215 }
216
217 if (eth->soc->offload_version) {
218- err = mtk_ppe_init(&eth->ppe, eth->dev,
219- eth->base + MTK_ETH_PPE_BASE, 2);
220- if (err)
221+ eth->ppe = mtk_ppe_init(eth, eth->base + MTK_ETH_PPE_BASE, 2);
222+ if (!eth->ppe) {
223+ err = -ENOMEM;
224 goto err_free_dev;
225+ }
226
227 err = mtk_eth_offload_init(eth);
228 if (err)
229diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
230old mode 100755
231new mode 100644
232index 349f98503..b52378bd6
233--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
234+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
developerdca0fde2022-12-14 11:40:35 +0800235@@ -549,6 +549,9 @@
developer8cb3ac72022-07-04 10:55:14 +0800236 #define RX_DMA_SPORT_MASK 0x7
developerdca0fde2022-12-14 11:40:35 +0800237 #define RX_DMA_SPORT_MASK_V2 0xf
developer8cb3ac72022-07-04 10:55:14 +0800238
239+#define MTK_WDMA0_BASE 0x2800
240+#define MTK_WDMA1_BASE 0x2c00
241+
242 /* QDMA descriptor txd4 */
243 #define TX_DMA_CHKSUM (0x7 << 29)
244 #define TX_DMA_TSO BIT(28)
developerdca0fde2022-12-14 11:40:35 +0800245@@ -1596,7 +1607,7 @@ struct mtk_eth {
developer8cb3ac72022-07-04 10:55:14 +0800246 spinlock_t syscfg0_lock;
247 struct timer_list mtk_dma_monitor_timer;
248
249- struct mtk_ppe ppe;
250+ struct mtk_ppe *ppe;
251 struct rhashtable flow_table;
252 };
253
developer8cb3ac72022-07-04 10:55:14 +0800254diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
255old mode 100644
256new mode 100755
257index 66298e223..3d75c22be
258--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
259+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
260@@ -6,9 +6,22 @@
261 #include <linux/iopoll.h>
262 #include <linux/etherdevice.h>
263 #include <linux/platform_device.h>
264+#include <linux/if_ether.h>
265+#include <linux/if_vlan.h>
266+#include <net/dsa.h>
267+#include "mtk_eth_soc.h"
268 #include "mtk_ppe.h"
269 #include "mtk_ppe_regs.h"
270
271+static DEFINE_SPINLOCK(ppe_lock);
272+
273+static const struct rhashtable_params mtk_flow_l2_ht_params = {
274+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
275+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
276+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
277+ .automatic_shrinking = true,
278+};
279+
280 static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
281 {
282 writel(val, ppe->base + reg);
283@@ -41,6 +54,11 @@ static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
284 return ppe_m32(ppe, reg, val, 0);
285 }
286
287+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
288+{
289+ return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
290+}
291+
292 static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
293 {
294 int ret;
295@@ -76,13 +94,6 @@ static u32 mtk_ppe_hash_entry(struct mtk_foe_entry *e)
296 u32 hash;
297
298 switch (FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, e->ib1)) {
299- case MTK_PPE_PKT_TYPE_BRIDGE:
300- hv1 = e->bridge.src_mac_lo;
301- hv1 ^= ((e->bridge.src_mac_hi & 0xffff) << 16);
302- hv2 = e->bridge.src_mac_hi >> 16;
303- hv2 ^= e->bridge.dest_mac_lo;
304- hv3 = e->bridge.dest_mac_hi;
305- break;
306 case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
307 case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
308 hv1 = e->ipv4.orig.ports;
309@@ -122,6 +133,9 @@ mtk_foe_entry_l2(struct mtk_foe_entry *entry)
310 {
311 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
312
313+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
314+ return &entry->bridge.l2;
315+
316 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
317 return &entry->ipv6.l2;
318
319@@ -133,6 +147,9 @@ mtk_foe_entry_ib2(struct mtk_foe_entry *entry)
320 {
321 int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
322
323+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
324+ return &entry->bridge.ib2;
325+
326 if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
327 return &entry->ipv6.ib2;
328
329@@ -167,7 +184,12 @@ int mtk_foe_entry_prepare(struct mtk_foe_entry *entry, int type, int l4proto,
330 if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
331 entry->ipv6.ports = ports_pad;
332
333- if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
334+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
335+ ether_addr_copy(entry->bridge.src_mac, src_mac);
336+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
337+ entry->bridge.ib2 = val;
338+ l2 = &entry->bridge.l2;
339+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
340 entry->ipv6.ib2 = val;
341 l2 = &entry->ipv6.l2;
342 } else {
developer18a49a22023-03-03 13:45:06 +0800343@@ -329,32 +351,168 @@ int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid)
developer8cb3ac72022-07-04 10:55:14 +0800344 return 0;
345 }
346
347+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
348+ int bss, int wcid)
349+{
350+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(entry);
351+ u32 *ib2 = mtk_foe_entry_ib2(entry);
352+
353+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
354+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
355+ if (wdma_idx)
356+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
357+
358+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
359+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
360+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
361+
362+ return 0;
363+}
364+
365 static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
366 {
367 return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
368 FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
369 }
370
371-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
372- u16 timestamp)
373+static bool
374+mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
375+{
376+ int type, len;
377+
378+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
379+ return false;
380+
381+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
382+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
383+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
384+ else
385+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
386+
387+ return !memcmp(&entry->data.data, &data->data, len - 4);
388+}
389+
390+static void
391+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
392+{
393+ struct hlist_head *head;
394+ struct hlist_node *tmp;
395+
396+ if (entry->type == MTK_FLOW_TYPE_L2) {
397+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
398+ mtk_flow_l2_ht_params);
399+
400+ head = &entry->l2_flows;
401+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
402+ __mtk_foe_entry_clear(ppe, entry);
403+ return;
404+ }
405+
406+ hlist_del_init(&entry->list);
407+ if (entry->hash != 0xffff) {
408+ ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
409+ ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
410+ MTK_FOE_STATE_INVALID);
411+ dma_wmb();
developer18a49a22023-03-03 13:45:06 +0800412+ mtk_ppe_cache_clear(ppe);
developer8cb3ac72022-07-04 10:55:14 +0800413+ }
414+ entry->hash = 0xffff;
415+
416+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
417+ return;
418+
419+ hlist_del_init(&entry->l2_data.list);
420+ kfree(entry);
421+}
422+
423+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
424+{
425+ u16 timestamp;
426+ u16 now;
427+
428+ now = mtk_eth_timestamp(ppe->eth) & MTK_FOE_IB1_BIND_TIMESTAMP;
429+ timestamp = ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
430+
431+ if (timestamp > now)
432+ return MTK_FOE_IB1_BIND_TIMESTAMP + 1 - timestamp + now;
433+ else
434+ return now - timestamp;
435+}
436+
437+static void
438+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
439 {
440+ struct mtk_flow_entry *cur;
441 struct mtk_foe_entry *hwe;
442- u32 hash;
443+ struct hlist_node *tmp;
444+ int idle;
445+
446+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
447+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
448+ int cur_idle;
449+ u32 ib1;
450+
451+ hwe = &ppe->foe_table[cur->hash];
452+ ib1 = READ_ONCE(hwe->ib1);
453+
454+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
455+ cur->hash = 0xffff;
456+ __mtk_foe_entry_clear(ppe, cur);
457+ continue;
458+ }
459+
460+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
461+ if (cur_idle >= idle)
462+ continue;
463+
464+ idle = cur_idle;
465+ entry->data.ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
466+ entry->data.ib1 |= hwe->ib1 & MTK_FOE_IB1_BIND_TIMESTAMP;
467+ }
468+}
469+
470+static void
471+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
472+{
473+ struct mtk_foe_entry *hwe;
474+ struct mtk_foe_entry foe;
475+
476+ spin_lock_bh(&ppe_lock);
477+
478+ if (entry->type == MTK_FLOW_TYPE_L2) {
479+ mtk_flow_entry_update_l2(ppe, entry);
480+ goto out;
481+ }
482+
483+ if (entry->hash == 0xffff)
484+ goto out;
485+
486+ hwe = &ppe->foe_table[entry->hash];
487+ memcpy(&foe, hwe, sizeof(foe));
488+ if (!mtk_flow_entry_match(entry, &foe)) {
489+ entry->hash = 0xffff;
490+ goto out;
491+ }
492+
493+ entry->data.ib1 = foe.ib1;
494+
495+out:
496+ spin_unlock_bh(&ppe_lock);
497+}
498+
499+static void
500+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
501+ u16 hash)
502+{
503+ struct mtk_foe_entry *hwe;
504+ u16 timestamp;
505
506+ timestamp = mtk_eth_timestamp(ppe->eth);
507 timestamp &= MTK_FOE_IB1_BIND_TIMESTAMP;
508 entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
509 entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, timestamp);
510
511- hash = mtk_ppe_hash_entry(entry);
512 hwe = &ppe->foe_table[hash];
513- if (!mtk_foe_entry_usable(hwe)) {
514- hwe++;
515- hash++;
516-
517- if (!mtk_foe_entry_usable(hwe))
518- return -ENOSPC;
519- }
520-
521 memcpy(&hwe->data, &entry->data, sizeof(hwe->data));
522 wmb();
523 hwe->ib1 = entry->ib1;
developer86f099b2022-11-17 09:35:09 +0800524@@ -362,32 +519,201 @@ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
developer8cb3ac72022-07-04 10:55:14 +0800525 dma_wmb();
526
527 mtk_ppe_cache_clear(ppe);
528+}
529
530- return hash;
531+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
532+{
533+ spin_lock_bh(&ppe_lock);
534+ __mtk_foe_entry_clear(ppe, entry);
535+ spin_unlock_bh(&ppe_lock);
536+}
537+
538+static int
539+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
540+{
541+ entry->type = MTK_FLOW_TYPE_L2;
542+
543+ return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node,
544+ mtk_flow_l2_ht_params);
545+}
546+
547+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
548+{
549+ int type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->data.ib1);
550+ u32 hash;
551+
552+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
553+ return mtk_foe_entry_commit_l2(ppe, entry);
554+
555+ hash = mtk_ppe_hash_entry(&entry->data);
556+ entry->hash = 0xffff;
557+ spin_lock_bh(&ppe_lock);
558+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / 4]);
559+ spin_unlock_bh(&ppe_lock);
560+
561+ return 0;
562+}
563+
564+static void
565+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
566+ u16 hash)
567+{
568+ struct mtk_flow_entry *flow_info;
569+ struct mtk_foe_entry foe, *hwe;
570+ struct mtk_foe_mac_info *l2;
571+ u32 ib1_mask = MTK_FOE_IB1_PACKET_TYPE | MTK_FOE_IB1_UDP;
572+ int type;
573+
574+ flow_info = kzalloc(offsetof(struct mtk_flow_entry, l2_data.end),
575+ GFP_ATOMIC);
576+ if (!flow_info)
577+ return;
578+
579+ flow_info->l2_data.base_flow = entry;
580+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
581+ flow_info->hash = hash;
582+ hlist_add_head(&flow_info->list, &ppe->foe_flow[hash / 4]);
583+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
584+
585+ hwe = &ppe->foe_table[hash];
586+ memcpy(&foe, hwe, sizeof(foe));
587+ foe.ib1 &= ib1_mask;
588+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
589+
590+ l2 = mtk_foe_entry_l2(&foe);
591+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
592+
593+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, foe.ib1);
594+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
595+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
596+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
597+ l2->etype = ETH_P_IPV6;
598+
599+ *mtk_foe_entry_ib2(&foe) = entry->data.bridge.ib2;
600+
601+ __mtk_foe_entry_commit(ppe, &foe, hash);
602 }
603
604-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
605+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
606+{
607+ struct hlist_head *head = &ppe->foe_flow[hash / 4];
608+ struct mtk_foe_entry *hwe = &ppe->foe_table[hash];
609+ struct mtk_flow_entry *entry;
610+ struct mtk_foe_bridge key = {};
developerb5c6eed2022-08-11 22:58:44 +0800611+ struct hlist_node *n;
developer8cb3ac72022-07-04 10:55:14 +0800612+ struct ethhdr *eh;
613+ bool found = false;
614+ u8 *tag;
615+
616+ spin_lock_bh(&ppe_lock);
617+
developer86f099b2022-11-17 09:35:09 +0800618+ if (hash >= MTK_PPE_ENTRIES)
619+ goto out;
620+
developer8cb3ac72022-07-04 10:55:14 +0800621+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
622+ goto out;
623+
developerb5c6eed2022-08-11 22:58:44 +0800624+ hlist_for_each_entry_safe(entry, n, head, list) {
developer8cb3ac72022-07-04 10:55:14 +0800625+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
626+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
627+ MTK_FOE_STATE_BIND))
628+ continue;
629+
630+ entry->hash = 0xffff;
631+ __mtk_foe_entry_clear(ppe, entry);
632+ continue;
633+ }
634+
635+ if (found || !mtk_flow_entry_match(entry, hwe)) {
636+ if (entry->hash != 0xffff)
637+ entry->hash = 0xffff;
638+ continue;
639+ }
640+
641+ entry->hash = hash;
642+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
643+ found = true;
644+ }
645+
646+ if (found)
647+ goto out;
648+
649+ if (!skb)
650+ goto out;
651+
652+ eh = eth_hdr(skb);
653+ ether_addr_copy(key.dest_mac, eh->h_dest);
654+ ether_addr_copy(key.src_mac, eh->h_source);
655+ tag = skb->data - 2;
656+ key.vlan = 0;
657+ switch (skb->protocol) {
658+#if IS_ENABLED(CONFIG_NET_DSA)
659+ case htons(ETH_P_XDSA):
660+ if (!netdev_uses_dsa(skb->dev) ||
661+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
662+ goto out;
663+
664+ tag += 4;
665+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
666+ break;
667+
668+ fallthrough;
669+#endif
670+ case htons(ETH_P_8021Q):
671+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
672+ break;
673+ default:
674+ break;
675+ }
676+
677+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
678+ if (!entry)
679+ goto out;
680+
681+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
682+
683+out:
684+ spin_unlock_bh(&ppe_lock);
685+}
686+
687+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
688+{
689+ mtk_flow_entry_update(ppe, entry);
690+
691+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
692+}
693+
694+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
695 int version)
696 {
697+ struct device *dev = eth->dev;
698 struct mtk_foe_entry *foe;
699+ struct mtk_ppe *ppe;
700+
701+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
702+ if (!ppe)
703+ return NULL;
704+
705+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
706
707 /* need to allocate a separate device, since it PPE DMA access is
708 * not coherent.
709 */
710 ppe->base = base;
711+ ppe->eth = eth;
712 ppe->dev = dev;
713 ppe->version = version;
714
715 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
716 &ppe->foe_phys, GFP_KERNEL);
717 if (!foe)
718- return -ENOMEM;
719+ return NULL;
720
721 ppe->foe_table = foe;
722
723 mtk_ppe_debugfs_init(ppe);
724
725- return 0;
726+ return ppe;
727 }
728
729 static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
730@@ -395,7 +717,7 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
731 static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
732 int i, k;
733
734- memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(*ppe->foe_table));
735+ memset(ppe->foe_table, 0, MTK_PPE_ENTRIES * sizeof(ppe->foe_table));
736
737 if (!IS_ENABLED(CONFIG_SOC_MT7621))
738 return;
739@@ -443,7 +765,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
740 MTK_PPE_FLOW_CFG_IP4_NAT |
741 MTK_PPE_FLOW_CFG_IP4_NAPT |
742 MTK_PPE_FLOW_CFG_IP4_DSLITE |
743- MTK_PPE_FLOW_CFG_L2_BRIDGE |
744 MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
745 ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
746
747diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
748index 242fb8f2a..1f5cf1c9a 100644
749--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
750+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
751@@ -6,6 +6,7 @@
752
753 #include <linux/kernel.h>
754 #include <linux/bitfield.h>
755+#include <linux/rhashtable.h>
756
757 #define MTK_ETH_PPE_BASE 0xc00
758
759@@ -48,9 +49,9 @@ enum {
760 #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
761 #define MTK_FOE_IB2_MULTICAST BIT(8)
762
763-#define MTK_FOE_IB2_WHNAT_QID2 GENMASK(13, 12)
764-#define MTK_FOE_IB2_WHNAT_DEVIDX BIT(16)
765-#define MTK_FOE_IB2_WHNAT_NAT BIT(17)
766+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
767+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
768+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
769
770 #define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
771
772@@ -58,9 +59,9 @@ enum {
773
774 #define MTK_FOE_IB2_DSCP GENMASK(31, 24)
775
776-#define MTK_FOE_VLAN2_WHNAT_BSS GEMMASK(5, 0)
777-#define MTK_FOE_VLAN2_WHNAT_WCID GENMASK(13, 6)
778-#define MTK_FOE_VLAN2_WHNAT_RING GENMASK(15, 14)
779+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
780+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
781+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
782
783 enum {
784 MTK_FOE_STATE_INVALID,
785@@ -84,19 +85,16 @@ struct mtk_foe_mac_info {
786 u16 src_mac_lo;
787 };
788
789+/* software-only entry type */
790 struct mtk_foe_bridge {
791- u32 dest_mac_hi;
792+ u8 dest_mac[ETH_ALEN];
793+ u8 src_mac[ETH_ALEN];
794+ u16 vlan;
795
796- u16 src_mac_lo;
797- u16 dest_mac_lo;
798-
799- u32 src_mac_hi;
800+ struct {} key_end;
801
802 u32 ib2;
803
804- u32 _rsv[5];
805-
806- u32 udf_tsid;
807 struct mtk_foe_mac_info l2;
808 };
809
810@@ -235,7 +233,37 @@ enum {
811 MTK_PPE_CPU_REASON_INVALID = 0x1f,
812 };
813
814+enum {
815+ MTK_FLOW_TYPE_L4,
816+ MTK_FLOW_TYPE_L2,
817+ MTK_FLOW_TYPE_L2_SUBFLOW,
818+};
819+
820+struct mtk_flow_entry {
821+ union {
822+ struct hlist_node list;
823+ struct {
824+ struct rhash_head l2_node;
825+ struct hlist_head l2_flows;
826+ };
827+ };
828+ u8 type;
829+ s8 wed_index;
830+ u16 hash;
831+ union {
832+ struct mtk_foe_entry data;
833+ struct {
834+ struct mtk_flow_entry *base_flow;
835+ struct hlist_node list;
836+ struct {} end;
837+ } l2_data;
838+ };
839+ struct rhash_head node;
840+ unsigned long cookie;
841+};
842+
843 struct mtk_ppe {
844+ struct mtk_eth *eth;
845 struct device *dev;
846 void __iomem *base;
847 int version;
848@@ -243,19 +271,35 @@ struct mtk_ppe {
849 struct mtk_foe_entry *foe_table;
850 dma_addr_t foe_phys;
851
852+ u16 foe_check_time[MTK_PPE_ENTRIES];
853+ struct hlist_head foe_flow[MTK_PPE_ENTRIES / 2];
854+
855+ struct rhashtable l2_flows;
856+
857 void *acct_table;
858 };
859
860-int mtk_ppe_init(struct mtk_ppe *ppe, struct device *dev, void __iomem *base,
861- int version);
862+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
863 int mtk_ppe_start(struct mtk_ppe *ppe);
864 int mtk_ppe_stop(struct mtk_ppe *ppe);
865
866+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
867+
868 static inline void
869-mtk_foe_entry_clear(struct mtk_ppe *ppe, u16 hash)
870+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
871 {
872- ppe->foe_table[hash].ib1 = 0;
873- dma_wmb();
874+ u16 now, diff;
875+
876+ if (!ppe)
877+ return;
878+
879+ now = (u16)jiffies;
880+ diff = now - ppe->foe_check_time[hash];
881+ if (diff < HZ / 10)
882+ return;
883+
884+ ppe->foe_check_time[hash] = now;
885+ __mtk_ppe_check_skb(ppe, skb, hash);
886 }
887
888 static inline int
889@@ -281,8 +325,11 @@ int mtk_foe_entry_set_ipv6_tuple(struct mtk_foe_entry *entry,
890 int mtk_foe_entry_set_dsa(struct mtk_foe_entry *entry, int port);
891 int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
892 int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
893-int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
894- u16 timestamp);
895+int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
896+ int bss, int wcid);
897+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
898+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
899+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
900 int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
901
902 #endif
903diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
904index d4b482340..a591ab1fd 100644
905--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
906+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
907@@ -32,7 +32,6 @@ static const char *mtk_foe_pkt_type_str(int type)
908 static const char * const type_str[] = {
909 [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
910 [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
911- [MTK_PPE_PKT_TYPE_BRIDGE] = "L2",
912 [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
913 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
914 [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
915@@ -207,6 +206,9 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
916 struct dentry *root;
917
918 root = debugfs_create_dir("mtk_ppe", NULL);
919+ if (!root)
920+ return -ENOMEM;
921+
922 debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
923 debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
924
925diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
926index 4294f0c74..d4a012608 100644
927--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
928+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
929@@ -11,6 +11,7 @@
930 #include <net/pkt_cls.h>
931 #include <net/dsa.h>
932 #include "mtk_eth_soc.h"
933+#include "mtk_wed.h"
934
935 struct mtk_flow_data {
936 struct ethhdr eth;
937@@ -30,6 +31,8 @@ struct mtk_flow_data {
938 __be16 src_port;
939 __be16 dst_port;
940
941+ u16 vlan_in;
942+
943 struct {
944 u16 id;
945 __be16 proto;
946@@ -41,12 +44,6 @@ struct mtk_flow_data {
947 } pppoe;
948 };
949
950-struct mtk_flow_entry {
951- struct rhash_head node;
952- unsigned long cookie;
953- u16 hash;
954-};
955-
956 static const struct rhashtable_params mtk_flow_ht_params = {
957 .head_offset = offsetof(struct mtk_flow_entry, node),
958 .key_offset = offsetof(struct mtk_flow_entry, cookie),
959@@ -54,12 +51,6 @@ static const struct rhashtable_params mtk_flow_ht_params = {
960 .automatic_shrinking = true,
961 };
962
963-static u32
964-mtk_eth_timestamp(struct mtk_eth *eth)
965-{
966- return mtk_r32(eth, 0x0010) & MTK_FOE_IB1_BIND_TIMESTAMP;
967-}
968-
969 static int
970 mtk_flow_set_ipv4_addr(struct mtk_foe_entry *foe, struct mtk_flow_data *data,
971 bool egress)
972@@ -94,6 +85,35 @@ mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
973 memcpy(dest, src, act->mangle.mask ? 2 : 4);
974 }
975
976+static int
977+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
978+{
979+ struct net_device_path_ctx ctx = {
980+ .dev = dev,
981+ };
982+ struct net_device_path path = {};
983+
984+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
985+ return -1;
986+
987+ if (!dev->netdev_ops->ndo_fill_forward_path)
988+ return -1;
989+
990+ memcpy(ctx.daddr, addr, sizeof(ctx.daddr));
991+ if (dev->netdev_ops->ndo_fill_forward_path(&ctx, &path))
992+ return -1;
993+
994+ if (path.type != DEV_PATH_MTK_WDMA)
995+ return -1;
996+
997+ info->wdma_idx = path.mtk_wdma.wdma_idx;
998+ info->queue = path.mtk_wdma.queue;
999+ info->bss = path.mtk_wdma.bss;
1000+ info->wcid = path.mtk_wdma.wcid;
1001+
1002+ return 0;
1003+}
1004+
1005
1006 static int
1007 mtk_flow_mangle_ports(const struct flow_action_entry *act,
1008@@ -163,10 +183,20 @@ mtk_flow_get_dsa_port(struct net_device **dev)
1009
1010 static int
1011 mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1012- struct net_device *dev)
1013+ struct net_device *dev, const u8 *dest_mac,
1014+ int *wed_index)
1015 {
1016+ struct mtk_wdma_info info = {};
1017 int pse_port, dsa_port;
1018
1019+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
1020+ mtk_foe_entry_set_wdma(foe, info.wdma_idx, info.queue, info.bss,
1021+ info.wcid);
developerc693c152022-12-02 09:38:46 +08001022+ pse_port = PSE_PPE0_PORT;
developer8cb3ac72022-07-04 10:55:14 +08001023+ *wed_index = info.wdma_idx;
1024+ goto out;
1025+ }
1026+
1027 dsa_port = mtk_flow_get_dsa_port(&dev);
1028 if (dsa_port >= 0)
1029 mtk_foe_entry_set_dsa(foe, dsa_port);
1030@@ -178,6 +208,7 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
1031 else
1032 return -EOPNOTSUPP;
1033
1034+out:
1035 mtk_foe_entry_set_pse_port(foe, pse_port);
1036
1037 return 0;
1038@@ -193,11 +224,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1039 struct net_device *odev = NULL;
1040 struct mtk_flow_entry *entry;
1041 int offload_type = 0;
1042+ int wed_index = -1;
1043 u16 addr_type = 0;
1044- u32 timestamp;
1045 u8 l4proto = 0;
1046 int err = 0;
1047- int hash;
1048 int i;
1049
1050 if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
1051@@ -229,9 +259,45 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1052 return -EOPNOTSUPP;
1053 }
1054
1055+ switch (addr_type) {
1056+ case 0:
1057+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
1058+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1059+ struct flow_match_eth_addrs match;
1060+
1061+ flow_rule_match_eth_addrs(rule, &match);
1062+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
1063+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
1064+ } else {
1065+ return -EOPNOTSUPP;
1066+ }
1067+
1068+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1069+ struct flow_match_vlan match;
1070+
1071+ flow_rule_match_vlan(rule, &match);
1072+
1073+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
1074+ return -EOPNOTSUPP;
1075+
1076+ data.vlan_in = match.key->vlan_id;
1077+ }
1078+ break;
1079+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1080+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1081+ break;
1082+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1083+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1084+ break;
1085+ default:
1086+ return -EOPNOTSUPP;
1087+ }
1088+
1089 flow_action_for_each(i, act, &rule->action) {
1090 switch (act->id) {
1091 case FLOW_ACTION_MANGLE:
1092+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1093+ return -EOPNOTSUPP;
1094 if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
1095 mtk_flow_offload_mangle_eth(act, &data.eth);
1096 break;
1097@@ -263,17 +329,6 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1098 }
1099 }
1100
1101- switch (addr_type) {
1102- case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1103- offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
1104- break;
1105- case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1106- offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
1107- break;
1108- default:
1109- return -EOPNOTSUPP;
1110- }
1111-
1112 if (!is_valid_ether_addr(data.eth.h_source) ||
1113 !is_valid_ether_addr(data.eth.h_dest))
1114 return -EINVAL;
1115@@ -287,10 +342,13 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1116 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1117 struct flow_match_ports ports;
1118
1119+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1120+ return -EOPNOTSUPP;
1121+
1122 flow_rule_match_ports(rule, &ports);
1123 data.src_port = ports.key->src;
1124 data.dst_port = ports.key->dst;
1125- } else {
1126+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
1127 return -EOPNOTSUPP;
1128 }
1129
1130@@ -320,6 +378,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1131 if (act->id != FLOW_ACTION_MANGLE)
1132 continue;
1133
1134+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1135+ return -EOPNOTSUPP;
1136+
1137 switch (act->mangle.htype) {
1138 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1139 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1140@@ -345,6 +406,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1141 return err;
1142 }
1143
1144+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
1145+ foe.bridge.vlan = data.vlan_in;
1146+
1147 if (data.vlan.num == 1) {
1148 if (data.vlan.proto != htons(ETH_P_8021Q))
1149 return -EOPNOTSUPP;
1150@@ -354,33 +418,38 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
1151 if (data.pppoe.num == 1)
1152 mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
1153
1154- err = mtk_flow_set_output_device(eth, &foe, odev);
1155+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
1156+ &wed_index);
1157 if (err)
1158 return err;
1159
1160+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
1161+ return err;
1162+
1163 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1164 if (!entry)
1165 return -ENOMEM;
1166
1167 entry->cookie = f->cookie;
1168- timestamp = mtk_eth_timestamp(eth);
1169- hash = mtk_foe_entry_commit(&eth->ppe, &foe, timestamp);
1170- if (hash < 0) {
1171- err = hash;
1172+ memcpy(&entry->data, &foe, sizeof(entry->data));
1173+ entry->wed_index = wed_index;
1174+
1175+ if (mtk_foe_entry_commit(eth->ppe, entry) < 0)
1176 goto free;
1177- }
1178
1179- entry->hash = hash;
1180 err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
1181 mtk_flow_ht_params);
1182 if (err < 0)
1183- goto clear_flow;
1184+ goto clear;
1185
1186 return 0;
1187-clear_flow:
1188- mtk_foe_entry_clear(&eth->ppe, hash);
1189+
1190+clear:
1191+ mtk_foe_entry_clear(eth->ppe, entry);
1192 free:
1193 kfree(entry);
1194+ if (wed_index >= 0)
1195+ mtk_wed_flow_remove(wed_index);
1196 return err;
1197 }
1198
1199@@ -394,9 +463,11 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
1200 if (!entry)
1201 return -ENOENT;
1202
1203- mtk_foe_entry_clear(&eth->ppe, entry->hash);
1204+ mtk_foe_entry_clear(eth->ppe, entry);
1205 rhashtable_remove_fast(&eth->flow_table, &entry->node,
1206 mtk_flow_ht_params);
1207+ if (entry->wed_index >= 0)
1208+ mtk_wed_flow_remove(entry->wed_index);
1209 kfree(entry);
1210
1211 return 0;
1212@@ -406,7 +477,6 @@ static int
1213 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1214 {
1215 struct mtk_flow_entry *entry;
1216- int timestamp;
1217 u32 idle;
1218
1219 entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
1220@@ -414,11 +484,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
1221 if (!entry)
1222 return -ENOENT;
1223
1224- timestamp = mtk_foe_entry_timestamp(&eth->ppe, entry->hash);
1225- if (timestamp < 0)
1226- return -ETIMEDOUT;
1227-
1228- idle = mtk_eth_timestamp(eth) - timestamp;
1229+ idle = mtk_foe_entry_idle_time(eth->ppe, entry);
1230 f->stats.lastused = jiffies - idle * HZ;
1231
1232 return 0;
1233@@ -470,7 +536,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1234 struct flow_block_cb *block_cb;
1235 flow_setup_cb_t *cb;
1236
1237- if (!eth->ppe.foe_table)
1238+ if (!eth->ppe || !eth->ppe->foe_table)
1239 return -EOPNOTSUPP;
1240
1241 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
1242@@ -511,15 +577,18 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
1243 int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
1244 void *type_data)
1245 {
1246- if (type == TC_SETUP_FT)
1247+ switch (type) {
1248+ case TC_SETUP_BLOCK:
1249+ case TC_SETUP_FT:
1250 return mtk_eth_setup_tc_block(dev, type_data);
1251-
1252- return -EOPNOTSUPP;
1253+ default:
1254+ return -EOPNOTSUPP;
1255+ }
1256 }
1257
1258 int mtk_eth_offload_init(struct mtk_eth *eth)
1259 {
1260- if (!eth->ppe.foe_table)
1261+ if (!eth->ppe || !eth->ppe->foe_table)
1262 return 0;
1263
1264 return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
1265diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
1266new file mode 100644
1267index 000000000..ea1cbdf1a
1268--- /dev/null
1269+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
1270@@ -0,0 +1,876 @@
1271+// SPDX-License-Identifier: GPL-2.0-only
1272+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
1273+
1274+#include <linux/kernel.h>
1275+#include <linux/slab.h>
1276+#include <linux/module.h>
1277+#include <linux/bitfield.h>
1278+#include <linux/dma-mapping.h>
1279+#include <linux/skbuff.h>
1280+#include <linux/of_platform.h>
1281+#include <linux/of_address.h>
1282+#include <linux/mfd/syscon.h>
1283+#include <linux/debugfs.h>
1284+#include <linux/iopoll.h>
1285+#include <linux/soc/mediatek/mtk_wed.h>
1286+#include "mtk_eth_soc.h"
1287+#include "mtk_wed_regs.h"
1288+#include "mtk_wed.h"
1289+#include "mtk_ppe.h"
1290+
1291+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
1292+
1293+#define MTK_WED_PKT_SIZE 1900
1294+#define MTK_WED_BUF_SIZE 2048
1295+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
1296+
1297+#define MTK_WED_TX_RING_SIZE 2048
1298+#define MTK_WED_WDMA_RING_SIZE 1024
1299+
1300+static struct mtk_wed_hw *hw_list[2];
1301+static DEFINE_MUTEX(hw_lock);
1302+
1303+static void
1304+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1305+{
1306+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
1307+}
1308+
1309+static void
1310+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1311+{
1312+ return wed_m32(dev, reg, 0, mask);
1313+}
1314+
1315+static void
1316+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
1317+{
1318+ return wed_m32(dev, reg, mask, 0);
1319+}
1320+
1321+static void
1322+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
1323+{
1324+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
1325+}
1326+
1327+static void
1328+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
1329+{
1330+ wdma_m32(dev, reg, 0, mask);
1331+}
1332+
1333+static u32
1334+mtk_wed_read_reset(struct mtk_wed_device *dev)
1335+{
1336+ return wed_r32(dev, MTK_WED_RESET);
1337+}
1338+
1339+static void
1340+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
1341+{
1342+ u32 status;
1343+
1344+ wed_w32(dev, MTK_WED_RESET, mask);
1345+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
1346+ !(status & mask), 0, 1000))
1347+ WARN_ON_ONCE(1);
1348+}
1349+
1350+static struct mtk_wed_hw *
1351+mtk_wed_assign(struct mtk_wed_device *dev)
1352+{
1353+ struct mtk_wed_hw *hw;
1354+
1355+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
1356+ if (!hw || hw->wed_dev)
1357+ return NULL;
1358+
1359+ hw->wed_dev = dev;
1360+ return hw;
1361+}
1362+
1363+static int
1364+mtk_wed_buffer_alloc(struct mtk_wed_device *dev)
1365+{
1366+ struct mtk_wdma_desc *desc;
1367+ dma_addr_t desc_phys;
1368+ void **page_list;
1369+ int token = dev->wlan.token_start;
1370+ int ring_size;
1371+ int n_pages;
1372+ int i, page_idx;
1373+
1374+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
1375+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
1376+
1377+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
1378+ if (!page_list)
1379+ return -ENOMEM;
1380+
1381+ dev->buf_ring.size = ring_size;
1382+ dev->buf_ring.pages = page_list;
1383+
1384+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
1385+ &desc_phys, GFP_KERNEL);
1386+ if (!desc)
1387+ return -ENOMEM;
1388+
1389+ dev->buf_ring.desc = desc;
1390+ dev->buf_ring.desc_phys = desc_phys;
1391+
1392+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
1393+ dma_addr_t page_phys, buf_phys;
1394+ struct page *page;
1395+ void *buf;
1396+ int s;
1397+
1398+ page = __dev_alloc_pages(GFP_KERNEL, 0);
1399+ if (!page)
1400+ return -ENOMEM;
1401+
1402+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
1403+ DMA_BIDIRECTIONAL);
1404+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
1405+ __free_page(page);
1406+ return -ENOMEM;
1407+ }
1408+
1409+ page_list[page_idx++] = page;
1410+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
1411+ DMA_BIDIRECTIONAL);
1412+
1413+ buf = page_to_virt(page);
1414+ buf_phys = page_phys;
1415+
1416+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
1417+ u32 txd_size;
1418+
1419+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
1420+
1421+ desc->buf0 = buf_phys;
1422+ desc->buf1 = buf_phys + txd_size;
1423+ desc->ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0,
1424+ txd_size) |
1425+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
1426+ MTK_WED_BUF_SIZE - txd_size) |
1427+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
1428+ desc->info = 0;
1429+ desc++;
1430+
1431+ buf += MTK_WED_BUF_SIZE;
1432+ buf_phys += MTK_WED_BUF_SIZE;
1433+ }
1434+
1435+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
1436+ DMA_BIDIRECTIONAL);
1437+ }
1438+
1439+ return 0;
1440+}
1441+
1442+static void
1443+mtk_wed_free_buffer(struct mtk_wed_device *dev)
1444+{
1445+ struct mtk_wdma_desc *desc = dev->buf_ring.desc;
1446+ void **page_list = dev->buf_ring.pages;
1447+ int page_idx;
1448+ int i;
1449+
1450+ if (!page_list)
1451+ return;
1452+
1453+ if (!desc)
1454+ goto free_pagelist;
1455+
1456+ for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
1457+ void *page = page_list[page_idx++];
1458+
1459+ if (!page)
1460+ break;
1461+
1462+ dma_unmap_page(dev->hw->dev, desc[i].buf0,
1463+ PAGE_SIZE, DMA_BIDIRECTIONAL);
1464+ __free_page(page);
1465+ }
1466+
1467+ dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc),
1468+ desc, dev->buf_ring.desc_phys);
1469+
1470+free_pagelist:
1471+ kfree(page_list);
1472+}
1473+
1474+static void
1475+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
1476+{
1477+ if (!ring->desc)
1478+ return;
1479+
1480+ dma_free_coherent(dev->hw->dev, ring->size * sizeof(*ring->desc),
1481+ ring->desc, ring->desc_phys);
1482+}
1483+
1484+static void
1485+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
1486+{
1487+ int i;
1488+
1489+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
1490+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
1491+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1492+ mtk_wed_free_ring(dev, &dev->tx_wdma[i]);
1493+}
1494+
1495+static void
1496+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
1497+{
1498+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
1499+
1500+ if (!dev->hw->num_flows)
1501+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
1502+
1503+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
1504+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
1505+}
1506+
1507+static void
1508+mtk_wed_stop(struct mtk_wed_device *dev)
1509+{
1510+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
1511+ mtk_wed_set_ext_int(dev, false);
1512+
1513+ wed_clr(dev, MTK_WED_CTRL,
1514+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
1515+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
1516+ MTK_WED_CTRL_WED_TX_BM_EN |
1517+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1518+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
1519+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
1520+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
1521+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
1522+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
1523+
1524+ wed_clr(dev, MTK_WED_GLO_CFG,
1525+ MTK_WED_GLO_CFG_TX_DMA_EN |
1526+ MTK_WED_GLO_CFG_RX_DMA_EN);
1527+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
1528+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1529+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1530+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1531+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1532+}
1533+
1534+static void
1535+mtk_wed_detach(struct mtk_wed_device *dev)
1536+{
1537+ struct device_node *wlan_node = dev->wlan.pci_dev->dev.of_node;
1538+ struct mtk_wed_hw *hw = dev->hw;
1539+
1540+ mutex_lock(&hw_lock);
1541+
1542+ mtk_wed_stop(dev);
1543+
1544+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
1545+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
1546+
1547+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1548+
1549+ mtk_wed_free_buffer(dev);
1550+ mtk_wed_free_tx_rings(dev);
1551+
1552+ if (of_dma_is_coherent(wlan_node))
1553+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
1554+ BIT(hw->index), BIT(hw->index));
1555+
1556+ if (!hw_list[!hw->index]->wed_dev &&
1557+ hw->eth->dma_dev != hw->eth->dev)
1558+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
1559+
1560+ memset(dev, 0, sizeof(*dev));
1561+ module_put(THIS_MODULE);
1562+
1563+ hw->wed_dev = NULL;
1564+ mutex_unlock(&hw_lock);
1565+}
1566+
1567+static void
1568+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
1569+{
1570+ u32 mask, set;
1571+ u32 offset;
1572+
1573+ mtk_wed_stop(dev);
1574+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
1575+
1576+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
1577+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
1578+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
1579+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
1580+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
1581+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
1582+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
1583+
1584+ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO_PRERES);
1585+
1586+ offset = dev->hw->index ? 0x04000400 : 0;
1587+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
1588+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
1589+
1590+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index));
1591+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
1592+}
1593+
1594+static void
1595+mtk_wed_hw_init(struct mtk_wed_device *dev)
1596+{
1597+ if (dev->init_done)
1598+ return;
1599+
1600+ dev->init_done = true;
1601+ mtk_wed_set_ext_int(dev, false);
1602+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
1603+ MTK_WED_TX_BM_CTRL_PAUSE |
1604+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
1605+ dev->buf_ring.size / 128) |
1606+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
1607+ MTK_WED_TX_RING_SIZE / 256));
1608+
1609+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys);
1610+
1611+ wed_w32(dev, MTK_WED_TX_BM_TKID,
1612+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
1613+ dev->wlan.token_start) |
1614+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
1615+ dev->wlan.token_start + dev->wlan.nbuf - 1));
1616+
1617+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
1618+
1619+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
1620+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
1621+ MTK_WED_TX_BM_DYN_THR_HI);
1622+
1623+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1624+
1625+ wed_set(dev, MTK_WED_CTRL,
1626+ MTK_WED_CTRL_WED_TX_BM_EN |
1627+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1628+
1629+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
1630+}
1631+
1632+static void
1633+mtk_wed_ring_reset(struct mtk_wdma_desc *desc, int size)
1634+{
1635+ int i;
1636+
1637+ for (i = 0; i < size; i++) {
1638+ desc[i].buf0 = 0;
1639+ desc[i].ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
1640+ desc[i].buf1 = 0;
1641+ desc[i].info = 0;
1642+ }
1643+}
1644+
1645+static u32
1646+mtk_wed_check_busy(struct mtk_wed_device *dev)
1647+{
1648+ if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY)
1649+ return true;
1650+
1651+ if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) &
1652+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY)
1653+ return true;
1654+
1655+ if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY)
1656+ return true;
1657+
1658+ if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) &
1659+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1660+ return true;
1661+
1662+ if (wdma_r32(dev, MTK_WDMA_GLO_CFG) &
1663+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY)
1664+ return true;
1665+
1666+ if (wed_r32(dev, MTK_WED_CTRL) &
1667+ (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY))
1668+ return true;
1669+
1670+ return false;
1671+}
1672+
1673+static int
1674+mtk_wed_poll_busy(struct mtk_wed_device *dev)
1675+{
1676+ int sleep = 15000;
1677+ int timeout = 100 * sleep;
1678+ u32 val;
1679+
1680+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
1681+ timeout, false, dev);
1682+}
1683+
1684+static void
1685+mtk_wed_reset_dma(struct mtk_wed_device *dev)
1686+{
1687+ bool busy = false;
1688+ u32 val;
1689+ int i;
1690+
1691+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
1692+ struct mtk_wdma_desc *desc = dev->tx_ring[i].desc;
1693+
1694+ if (!desc)
1695+ continue;
1696+
1697+ mtk_wed_ring_reset(desc, MTK_WED_TX_RING_SIZE);
1698+ }
1699+
1700+ if (mtk_wed_poll_busy(dev))
1701+ busy = mtk_wed_check_busy(dev);
1702+
1703+ if (busy) {
1704+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
1705+ } else {
1706+ wed_w32(dev, MTK_WED_RESET_IDX,
1707+ MTK_WED_RESET_IDX_TX |
1708+ MTK_WED_RESET_IDX_RX);
1709+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
1710+ }
1711+
1712+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
1713+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
1714+
1715+ if (busy) {
1716+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
1717+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
1718+ } else {
1719+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
1720+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
1721+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
1722+
1723+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1724+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
1725+
1726+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
1727+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
1728+ }
1729+
1730+ for (i = 0; i < 100; i++) {
1731+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
1732+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
1733+ break;
1734+ }
1735+
1736+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
1737+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
1738+
1739+ if (busy) {
1740+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
1741+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
1742+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
1743+ } else {
1744+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
1745+ MTK_WED_WPDMA_RESET_IDX_TX |
1746+ MTK_WED_WPDMA_RESET_IDX_RX);
1747+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
1748+ }
1749+
1750+}
1751+
1752+static int
1753+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
1754+ int size)
1755+{
1756+ ring->desc = dma_alloc_coherent(dev->hw->dev,
1757+ size * sizeof(*ring->desc),
1758+ &ring->desc_phys, GFP_KERNEL);
1759+ if (!ring->desc)
1760+ return -ENOMEM;
1761+
1762+ ring->size = size;
1763+ mtk_wed_ring_reset(ring->desc, size);
1764+
1765+ return 0;
1766+}
1767+
1768+static int
1769+mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size)
1770+{
1771+ struct mtk_wed_ring *wdma = &dev->tx_wdma[idx];
1772+
1773+ if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE))
1774+ return -ENOMEM;
1775+
1776+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
1777+ wdma->desc_phys);
1778+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
1779+ size);
1780+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
1781+
1782+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
1783+ wdma->desc_phys);
1784+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
1785+ size);
1786+
1787+ return 0;
1788+}
1789+
1790+static void
1791+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
1792+{
1793+ u32 wdma_mask;
1794+ u32 val;
1795+ int i;
1796+
1797+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
1798+ if (!dev->tx_wdma[i].desc)
1799+ mtk_wed_wdma_ring_setup(dev, i, 16);
1800+
1801+ wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
1802+
1803+ mtk_wed_hw_init(dev);
1804+
1805+ wed_set(dev, MTK_WED_CTRL,
1806+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
1807+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
1808+ MTK_WED_CTRL_WED_TX_BM_EN |
1809+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
1810+
1811+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS);
1812+
1813+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
1814+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
1815+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
1816+
1817+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
1818+ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
1819+
1820+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
1821+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
1822+
1823+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
1824+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
1825+
1826+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
1827+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
1828+
1829+ wed_set(dev, MTK_WED_GLO_CFG,
1830+ MTK_WED_GLO_CFG_TX_DMA_EN |
1831+ MTK_WED_GLO_CFG_RX_DMA_EN);
1832+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
1833+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
1834+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
1835+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
1836+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
1837+
1838+ mtk_wed_set_ext_int(dev, true);
1839+ val = dev->wlan.wpdma_phys |
1840+ MTK_PCIE_MIRROR_MAP_EN |
1841+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index);
1842+
1843+ if (dev->hw->index)
1844+ val |= BIT(1);
1845+ val |= BIT(0);
1846+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
1847+
1848+ dev->running = true;
1849+}
1850+
1851+static int
1852+mtk_wed_attach(struct mtk_wed_device *dev)
1853+ __releases(RCU)
1854+{
1855+ struct mtk_wed_hw *hw;
1856+ int ret = 0;
1857+
1858+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1859+ "mtk_wed_attach without holding the RCU read lock");
1860+
1861+ if (pci_domain_nr(dev->wlan.pci_dev->bus) > 1 ||
1862+ !try_module_get(THIS_MODULE))
1863+ ret = -ENODEV;
1864+
1865+ rcu_read_unlock();
1866+
1867+ if (ret)
1868+ return ret;
1869+
1870+ mutex_lock(&hw_lock);
1871+
1872+ hw = mtk_wed_assign(dev);
1873+ if (!hw) {
1874+ module_put(THIS_MODULE);
1875+ ret = -ENODEV;
1876+ goto out;
1877+ }
1878+
1879+ dev_info(&dev->wlan.pci_dev->dev, "attaching wed device %d\n", hw->index);
1880+
1881+ dev->hw = hw;
1882+ dev->dev = hw->dev;
1883+ dev->irq = hw->irq;
1884+ dev->wdma_idx = hw->index;
1885+
1886+ if (hw->eth->dma_dev == hw->eth->dev &&
1887+ of_dma_is_coherent(hw->eth->dev->of_node))
1888+ mtk_eth_set_dma_device(hw->eth, hw->dev);
1889+
1890+ ret = mtk_wed_buffer_alloc(dev);
1891+ if (ret) {
1892+ mtk_wed_detach(dev);
1893+ goto out;
1894+ }
1895+
1896+ mtk_wed_hw_init_early(dev);
1897+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0);
1898+
1899+out:
1900+ mutex_unlock(&hw_lock);
1901+
1902+ return ret;
1903+}
1904+
1905+static int
1906+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
1907+{
1908+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
1909+
1910+ /*
1911+ * Tx ring redirection:
1912+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
1913+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
1914+ * registers.
1915+ *
1916+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
1917+ * into MTK_WED_WPDMA_RING_TX(n) registers.
1918+ * It gets filled with packets picked up from WED TX ring and from
1919+ * WDMA RX.
1920+ */
1921+
1922+ BUG_ON(idx > ARRAY_SIZE(dev->tx_ring));
1923+
1924+ if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE))
1925+ return -ENOMEM;
1926+
1927+ if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
1928+ return -ENOMEM;
1929+
1930+ ring->reg_base = MTK_WED_RING_TX(idx);
1931+ ring->wpdma = regs;
1932+
1933+ /* WED -> WPDMA */
1934+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
1935+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
1936+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
1937+
1938+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
1939+ ring->desc_phys);
1940+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
1941+ MTK_WED_TX_RING_SIZE);
1942+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
1943+
1944+ return 0;
1945+}
1946+
1947+static int
1948+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
1949+{
1950+ struct mtk_wed_ring *ring = &dev->txfree_ring;
1951+ int i;
1952+
1953+ /*
1954+ * For txfree event handling, the same DMA ring is shared between WED
1955+ * and WLAN. The WLAN driver accesses the ring index registers through
1956+ * WED
1957+ */
1958+ ring->reg_base = MTK_WED_RING_RX(1);
1959+ ring->wpdma = regs;
1960+
1961+ for (i = 0; i < 12; i += 4) {
1962+ u32 val = readl(regs + i);
1963+
1964+ wed_w32(dev, MTK_WED_RING_RX(1) + i, val);
1965+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(1) + i, val);
1966+ }
1967+
1968+ return 0;
1969+}
1970+
1971+static u32
1972+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
1973+{
1974+ u32 val;
1975+
1976+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
1977+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
1978+ val &= MTK_WED_EXT_INT_STATUS_ERROR_MASK;
1979+ if (!dev->hw->num_flows)
1980+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
1981+ if (val && net_ratelimit())
1982+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
1983+
1984+ val = wed_r32(dev, MTK_WED_INT_STATUS);
1985+ val &= mask;
1986+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
1987+
1988+ return val;
1989+}
1990+
1991+static void
1992+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
1993+{
1994+ if (!dev->running)
1995+ return;
1996+
1997+ mtk_wed_set_ext_int(dev, !!mask);
1998+ wed_w32(dev, MTK_WED_INT_MASK, mask);
1999+}
2000+
2001+int mtk_wed_flow_add(int index)
2002+{
2003+ struct mtk_wed_hw *hw = hw_list[index];
2004+ int ret;
2005+
2006+ if (!hw || !hw->wed_dev)
2007+ return -ENODEV;
2008+
2009+ if (hw->num_flows) {
2010+ hw->num_flows++;
2011+ return 0;
2012+ }
2013+
2014+ mutex_lock(&hw_lock);
2015+ if (!hw->wed_dev) {
2016+ ret = -ENODEV;
2017+ goto out;
2018+ }
2019+
2020+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
2021+ if (!ret)
2022+ hw->num_flows++;
2023+ mtk_wed_set_ext_int(hw->wed_dev, true);
2024+
2025+out:
2026+ mutex_unlock(&hw_lock);
2027+
2028+ return ret;
2029+}
2030+
2031+void mtk_wed_flow_remove(int index)
2032+{
2033+ struct mtk_wed_hw *hw = hw_list[index];
2034+
2035+ if (!hw)
2036+ return;
2037+
2038+ if (--hw->num_flows)
2039+ return;
2040+
2041+ mutex_lock(&hw_lock);
2042+ if (!hw->wed_dev)
2043+ goto out;
2044+
2045+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
2046+ mtk_wed_set_ext_int(hw->wed_dev, true);
2047+
2048+out:
2049+ mutex_unlock(&hw_lock);
2050+}
2051+
2052+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2053+ void __iomem *wdma, int index)
2054+{
2055+ static const struct mtk_wed_ops wed_ops = {
2056+ .attach = mtk_wed_attach,
2057+ .tx_ring_setup = mtk_wed_tx_ring_setup,
2058+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
2059+ .start = mtk_wed_start,
2060+ .stop = mtk_wed_stop,
2061+ .reset_dma = mtk_wed_reset_dma,
2062+ .reg_read = wed_r32,
2063+ .reg_write = wed_w32,
2064+ .irq_get = mtk_wed_irq_get,
2065+ .irq_set_mask = mtk_wed_irq_set_mask,
2066+ .detach = mtk_wed_detach,
2067+ };
2068+ struct device_node *eth_np = eth->dev->of_node;
2069+ struct platform_device *pdev;
2070+ struct mtk_wed_hw *hw;
2071+ struct regmap *regs;
2072+ int irq;
2073+
2074+ if (!np)
2075+ return;
2076+
2077+ pdev = of_find_device_by_node(np);
2078+ if (!pdev)
2079+ return;
2080+
2081+ get_device(&pdev->dev);
2082+ irq = platform_get_irq(pdev, 0);
2083+ if (irq < 0)
2084+ return;
2085+
2086+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
2087+ if (!regs)
2088+ return;
2089+
2090+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
2091+
2092+ mutex_lock(&hw_lock);
2093+
2094+ if (WARN_ON(hw_list[index]))
2095+ goto unlock;
2096+
2097+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
2098+ hw->node = np;
2099+ hw->regs = regs;
2100+ hw->eth = eth;
2101+ hw->dev = &pdev->dev;
2102+ hw->wdma = wdma;
2103+ hw->index = index;
2104+ hw->irq = irq;
2105+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
2106+ "mediatek,pcie-mirror");
2107+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
2108+ "mediatek,hifsys");
2109+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
2110+ kfree(hw);
2111+ goto unlock;
2112+ }
2113+
2114+ if (!index) {
2115+ regmap_write(hw->mirror, 0, 0);
2116+ regmap_write(hw->mirror, 4, 0);
2117+ }
2118+ mtk_wed_hw_add_debugfs(hw);
2119+
2120+ hw_list[index] = hw;
2121+
2122+unlock:
2123+ mutex_unlock(&hw_lock);
2124+}
2125+
2126+void mtk_wed_exit(void)
2127+{
2128+ int i;
2129+
2130+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
2131+
2132+ synchronize_rcu();
2133+
2134+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
2135+ struct mtk_wed_hw *hw;
2136+
2137+ hw = hw_list[i];
2138+ if (!hw)
2139+ continue;
2140+
2141+ hw_list[i] = NULL;
2142+ debugfs_remove(hw->debugfs_dir);
2143+ put_device(hw->dev);
2144+ kfree(hw);
2145+ }
2146+}
2147diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
2148new file mode 100644
2149index 000000000..981ec613f
2150--- /dev/null
2151+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
2152@@ -0,0 +1,135 @@
2153+// SPDX-License-Identifier: GPL-2.0-only
2154+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2155+
2156+#ifndef __MTK_WED_PRIV_H
2157+#define __MTK_WED_PRIV_H
2158+
2159+#include <linux/soc/mediatek/mtk_wed.h>
2160+#include <linux/debugfs.h>
2161+#include <linux/regmap.h>
2162+#include <linux/netdevice.h>
2163+
2164+struct mtk_eth;
2165+
2166+struct mtk_wed_hw {
2167+ struct device_node *node;
2168+ struct mtk_eth *eth;
2169+ struct regmap *regs;
2170+ struct regmap *hifsys;
2171+ struct device *dev;
2172+ void __iomem *wdma;
2173+ struct regmap *mirror;
2174+ struct dentry *debugfs_dir;
2175+ struct mtk_wed_device *wed_dev;
2176+ u32 debugfs_reg;
2177+ u32 num_flows;
2178+ char dirname[5];
2179+ int irq;
2180+ int index;
2181+};
2182+
2183+struct mtk_wdma_info {
2184+ u8 wdma_idx;
2185+ u8 queue;
2186+ u16 wcid;
2187+ u8 bss;
2188+};
2189+
2190+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2191+static inline void
2192+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2193+{
2194+ regmap_write(dev->hw->regs, reg, val);
2195+}
2196+
2197+static inline u32
2198+wed_r32(struct mtk_wed_device *dev, u32 reg)
2199+{
2200+ unsigned int val;
2201+
2202+ regmap_read(dev->hw->regs, reg, &val);
2203+
2204+ return val;
2205+}
2206+
2207+static inline void
2208+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2209+{
2210+ writel(val, dev->hw->wdma + reg);
2211+}
2212+
2213+static inline u32
2214+wdma_r32(struct mtk_wed_device *dev, u32 reg)
2215+{
2216+ return readl(dev->hw->wdma + reg);
2217+}
2218+
2219+static inline u32
2220+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
2221+{
2222+ if (!dev->tx_ring[ring].wpdma)
2223+ return 0;
2224+
2225+ return readl(dev->tx_ring[ring].wpdma + reg);
2226+}
2227+
2228+static inline void
2229+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
2230+{
2231+ if (!dev->tx_ring[ring].wpdma)
2232+ return;
2233+
2234+ writel(val, dev->tx_ring[ring].wpdma + reg);
2235+}
2236+
2237+static inline u32
2238+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
2239+{
2240+ if (!dev->txfree_ring.wpdma)
2241+ return 0;
2242+
2243+ return readl(dev->txfree_ring.wpdma + reg);
2244+}
2245+
2246+static inline void
2247+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
2248+{
2249+ if (!dev->txfree_ring.wpdma)
2250+ return;
2251+
2252+ writel(val, dev->txfree_ring.wpdma + reg);
2253+}
2254+
2255+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2256+ void __iomem *wdma, int index);
2257+void mtk_wed_exit(void);
2258+int mtk_wed_flow_add(int index);
2259+void mtk_wed_flow_remove(int index);
2260+#else
2261+static inline void
2262+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
2263+ void __iomem *wdma, int index)
2264+{
2265+}
2266+static inline void
2267+mtk_wed_exit(void)
2268+{
2269+}
2270+static inline int mtk_wed_flow_add(int index)
2271+{
2272+ return -EINVAL;
2273+}
2274+static inline void mtk_wed_flow_remove(int index)
2275+{
2276+}
2277+#endif
2278+
2279+#ifdef CONFIG_DEBUG_FS
2280+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
2281+#else
2282+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2283+{
2284+}
2285+#endif
2286+
2287+#endif
2288diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2289new file mode 100644
2290index 000000000..a81d3fd1a
2291--- /dev/null
2292+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
2293@@ -0,0 +1,175 @@
2294+// SPDX-License-Identifier: GPL-2.0-only
2295+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
2296+
2297+#include <linux/seq_file.h>
2298+#include "mtk_wed.h"
2299+#include "mtk_wed_regs.h"
2300+
2301+struct reg_dump {
2302+ const char *name;
2303+ u16 offset;
2304+ u8 type;
2305+ u8 base;
2306+};
2307+
2308+enum {
2309+ DUMP_TYPE_STRING,
2310+ DUMP_TYPE_WED,
2311+ DUMP_TYPE_WDMA,
2312+ DUMP_TYPE_WPDMA_TX,
2313+ DUMP_TYPE_WPDMA_TXFREE,
2314+};
2315+
2316+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
2317+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
2318+#define DUMP_RING(_prefix, _base, ...) \
2319+ { _prefix " BASE", _base, __VA_ARGS__ }, \
2320+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
2321+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
2322+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
2323+
2324+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
2325+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
2326+
2327+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
2328+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
2329+
2330+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
2331+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
2332+
2333+static void
2334+print_reg_val(struct seq_file *s, const char *name, u32 val)
2335+{
2336+ seq_printf(s, "%-32s %08x\n", name, val);
2337+}
2338+
2339+static void
2340+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
2341+ const struct reg_dump *regs, int n_regs)
2342+{
2343+ const struct reg_dump *cur;
2344+ u32 val;
2345+
2346+ for (cur = regs; cur < &regs[n_regs]; cur++) {
2347+ switch (cur->type) {
2348+ case DUMP_TYPE_STRING:
2349+ seq_printf(s, "%s======== %s:\n",
2350+ cur > regs ? "\n" : "",
2351+ cur->name);
2352+ continue;
2353+ case DUMP_TYPE_WED:
2354+ val = wed_r32(dev, cur->offset);
2355+ break;
2356+ case DUMP_TYPE_WDMA:
2357+ val = wdma_r32(dev, cur->offset);
2358+ break;
2359+ case DUMP_TYPE_WPDMA_TX:
2360+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
2361+ break;
2362+ case DUMP_TYPE_WPDMA_TXFREE:
2363+ val = wpdma_txfree_r32(dev, cur->offset);
2364+ break;
2365+ }
2366+ print_reg_val(s, cur->name, val);
2367+ }
2368+}
2369+
2370+
2371+static int
2372+wed_txinfo_show(struct seq_file *s, void *data)
2373+{
2374+ static const struct reg_dump regs[] = {
2375+ DUMP_STR("WED TX"),
2376+ DUMP_WED(WED_TX_MIB(0)),
2377+ DUMP_WED_RING(WED_RING_TX(0)),
2378+
2379+ DUMP_WED(WED_TX_MIB(1)),
2380+ DUMP_WED_RING(WED_RING_TX(1)),
2381+
2382+ DUMP_STR("WPDMA TX"),
2383+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
2384+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
2385+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
2386+
2387+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
2388+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
2389+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
2390+
2391+ DUMP_STR("WPDMA TX"),
2392+ DUMP_WPDMA_TX_RING(0),
2393+ DUMP_WPDMA_TX_RING(1),
2394+
2395+ DUMP_STR("WED WDMA RX"),
2396+ DUMP_WED(WED_WDMA_RX_MIB(0)),
2397+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
2398+ DUMP_WED(WED_WDMA_RX_THRES(0)),
2399+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
2400+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
2401+
2402+ DUMP_WED(WED_WDMA_RX_MIB(1)),
2403+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
2404+ DUMP_WED(WED_WDMA_RX_THRES(1)),
2405+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
2406+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
2407+
2408+ DUMP_STR("WDMA RX"),
2409+ DUMP_WDMA(WDMA_GLO_CFG),
2410+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
2411+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
2412+ };
2413+ struct mtk_wed_hw *hw = s->private;
2414+ struct mtk_wed_device *dev = hw->wed_dev;
2415+
2416+ if (!dev)
2417+ return 0;
2418+
2419+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
2420+
2421+ return 0;
2422+}
2423+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
2424+
2425+
2426+static int
2427+mtk_wed_reg_set(void *data, u64 val)
2428+{
2429+ struct mtk_wed_hw *hw = data;
2430+
2431+ regmap_write(hw->regs, hw->debugfs_reg, val);
2432+
2433+ return 0;
2434+}
2435+
2436+static int
2437+mtk_wed_reg_get(void *data, u64 *val)
2438+{
2439+ struct mtk_wed_hw *hw = data;
2440+ unsigned int regval;
2441+ int ret;
2442+
2443+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
2444+ if (ret)
2445+ return ret;
2446+
2447+ *val = regval;
2448+
2449+ return 0;
2450+}
2451+
2452+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
2453+ "0x%08llx\n");
2454+
2455+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
2456+{
2457+ struct dentry *dir;
2458+
2459+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
2460+ dir = debugfs_create_dir(hw->dirname, NULL);
2461+ if (!dir)
2462+ return;
2463+
2464+ hw->debugfs_dir = dir;
2465+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
2466+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
2467+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
2468+}
2469diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2470new file mode 100644
2471index 000000000..a5d9d8a5b
2472--- /dev/null
2473+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
2474@@ -0,0 +1,8 @@
2475+// SPDX-License-Identifier: GPL-2.0-only
2476+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2477+
2478+#include <linux/kernel.h>
2479+#include <linux/soc/mediatek/mtk_wed.h>
2480+
2481+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
2482+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
2483diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2484new file mode 100644
2485index 000000000..0a0465ea5
2486--- /dev/null
2487+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
2488@@ -0,0 +1,251 @@
2489+// SPDX-License-Identifier: GPL-2.0-only
2490+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
2491+
2492+#ifndef __MTK_WED_REGS_H
2493+#define __MTK_WED_REGS_H
2494+
2495+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
2496+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
2497+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
2498+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
2499+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
2500+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
2501+
2502+struct mtk_wdma_desc {
2503+ __le32 buf0;
2504+ __le32 ctrl;
2505+ __le32 buf1;
2506+ __le32 info;
2507+} __packed __aligned(4);
2508+
2509+#define MTK_WED_RESET 0x008
2510+#define MTK_WED_RESET_TX_BM BIT(0)
2511+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
2512+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
2513+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
2514+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
2515+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
2516+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
2517+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
2518+#define MTK_WED_RESET_WED BIT(31)
2519+
2520+#define MTK_WED_CTRL 0x00c
2521+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
2522+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
2523+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
2524+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
2525+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
2526+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
2527+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
2528+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
2529+#define MTK_WED_CTRL_RESERVE_EN BIT(12)
2530+#define MTK_WED_CTRL_RESERVE_BUSY BIT(13)
2531+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
2532+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
2533+
2534+#define MTK_WED_EXT_INT_STATUS 0x020
2535+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
2536+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
2537+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
2538+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
2539+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
2540+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
2541+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
2542+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
2543+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
2544+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
2545+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
2546+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
2547+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
2548+#define MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR BIT(22)
2549+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
2550+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
2551+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
2552+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
2553+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
2554+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
2555+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
2556+ MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR | \
2557+ MTK_WED_EXT_INT_STATUS_TX_DRV_W_RESP_ERR)
2558+
2559+#define MTK_WED_EXT_INT_MASK 0x028
2560+
2561+#define MTK_WED_STATUS 0x060
2562+#define MTK_WED_STATUS_TX GENMASK(15, 8)
2563+
2564+#define MTK_WED_TX_BM_CTRL 0x080
2565+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
2566+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
2567+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
2568+
2569+#define MTK_WED_TX_BM_BASE 0x084
2570+
2571+#define MTK_WED_TX_BM_TKID 0x088
2572+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
2573+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
2574+
2575+#define MTK_WED_TX_BM_BUF_LEN 0x08c
2576+
2577+#define MTK_WED_TX_BM_INTF 0x09c
2578+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
2579+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
2580+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
2581+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
2582+
2583+#define MTK_WED_TX_BM_DYN_THR 0x0a0
2584+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
2585+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
2586+
2587+#define MTK_WED_INT_STATUS 0x200
2588+#define MTK_WED_INT_MASK 0x204
2589+
2590+#define MTK_WED_GLO_CFG 0x208
2591+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
2592+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
2593+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
2594+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
2595+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2596+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
2597+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
2598+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2599+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2600+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2601+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2602+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2603+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2604+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
2605+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2606+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
2607+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
2608+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
2609+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
2610+
2611+#define MTK_WED_RESET_IDX 0x20c
2612+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
2613+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
2614+
2615+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
2616+
2617+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
2618+
2619+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
2620+
2621+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
2622+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
2623+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
2624+
2625+#define MTK_WED_WPDMA_GLO_CFG 0x508
2626+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
2627+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
2628+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
2629+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2630+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
2631+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2632+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
2633+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
2634+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
2635+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
2636+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
2637+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
2638+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
2639+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
2640+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
2641+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
2642+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
2643+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
2644+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
2645+
2646+#define MTK_WED_WPDMA_RESET_IDX 0x50c
2647+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
2648+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
2649+
2650+#define MTK_WED_WPDMA_INT_CTRL 0x520
2651+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
2652+
2653+#define MTK_WED_WPDMA_INT_MASK 0x524
2654+
2655+#define MTK_WED_PCIE_CFG_BASE 0x560
2656+
2657+#define MTK_WED_PCIE_INT_TRIGGER 0x570
2658+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
2659+
2660+#define MTK_WED_WPDMA_CFG_BASE 0x580
2661+
2662+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
2663+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
2664+
2665+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
2666+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
2667+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
2668+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
2669+
2670+#define MTK_WED_WDMA_GLO_CFG 0xa04
2671+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
2672+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
2673+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
2674+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
2675+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
2676+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
2677+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
2678+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
2679+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
2680+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
2681+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
2682+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
2683+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
2684+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
2685+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
2686+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
2687+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
2688+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
2689+
2690+#define MTK_WED_WDMA_RESET_IDX 0xa08
2691+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
2692+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
2693+
2694+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
2695+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
2696+
2697+#define MTK_WED_WDMA_INT_CTRL 0xa2c
2698+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
2699+
2700+#define MTK_WED_WDMA_OFFSET0 0xaa4
2701+#define MTK_WED_WDMA_OFFSET1 0xaa8
2702+
2703+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
2704+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
2705+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
2706+
2707+#define MTK_WED_RING_OFS_BASE 0x00
2708+#define MTK_WED_RING_OFS_COUNT 0x04
2709+#define MTK_WED_RING_OFS_CPU_IDX 0x08
2710+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
2711+
2712+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
2713+
2714+#define MTK_WDMA_GLO_CFG 0x204
2715+#define MTK_WDMA_GLO_CFG_RX_INFO_PRERES GENMASK(28, 26)
2716+
2717+#define MTK_WDMA_RESET_IDX 0x208
2718+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
2719+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
2720+
2721+#define MTK_WDMA_INT_MASK 0x228
2722+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
2723+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
2724+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
2725+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
2726+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
2727+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
2728+
2729+#define MTK_WDMA_INT_GRP1 0x250
2730+#define MTK_WDMA_INT_GRP2 0x254
2731+
2732+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
2733+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
2734+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
2735+
2736+/* DMA channel mapping */
2737+#define HIFSYS_DMA_AG_MAP 0x008
2738+
2739+#endif
2740diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2741index 9f64504ac..35998b1a7 100644
2742--- a/include/linux/netdevice.h
2743+++ b/include/linux/netdevice.h
2744@@ -835,6 +835,7 @@ enum net_device_path_type {
2745 DEV_PATH_BRIDGE,
2746 DEV_PATH_PPPOE,
2747 DEV_PATH_DSA,
2748+ DEV_PATH_MTK_WDMA,
2749 };
2750
2751 struct net_device_path {
2752@@ -860,6 +861,12 @@ struct net_device_path {
2753 int port;
2754 u16 proto;
2755 } dsa;
2756+ struct {
2757+ u8 wdma_idx;
2758+ u8 queue;
2759+ u16 wcid;
2760+ u8 bss;
2761+ } mtk_wdma;
2762 };
2763 };
2764
2765diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
2766new file mode 100644
2767index 000000000..7e00cca06
2768--- /dev/null
2769+++ b/include/linux/soc/mediatek/mtk_wed.h
2770@@ -0,0 +1,131 @@
2771+#ifndef __MTK_WED_H
2772+#define __MTK_WED_H
2773+
2774+#include <linux/kernel.h>
2775+#include <linux/rcupdate.h>
2776+#include <linux/regmap.h>
2777+#include <linux/pci.h>
2778+
2779+#define MTK_WED_TX_QUEUES 2
2780+
2781+struct mtk_wed_hw;
2782+struct mtk_wdma_desc;
2783+
2784+struct mtk_wed_ring {
2785+ struct mtk_wdma_desc *desc;
2786+ dma_addr_t desc_phys;
2787+ int size;
2788+
2789+ u32 reg_base;
2790+ void __iomem *wpdma;
2791+};
2792+
2793+struct mtk_wed_device {
2794+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2795+ const struct mtk_wed_ops *ops;
2796+ struct device *dev;
2797+ struct mtk_wed_hw *hw;
2798+ bool init_done, running;
2799+ int wdma_idx;
2800+ int irq;
2801+
2802+ struct mtk_wed_ring tx_ring[MTK_WED_TX_QUEUES];
2803+ struct mtk_wed_ring txfree_ring;
2804+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
2805+
2806+ struct {
2807+ int size;
2808+ void **pages;
2809+ struct mtk_wdma_desc *desc;
2810+ dma_addr_t desc_phys;
2811+ } buf_ring;
2812+
2813+ /* filled by driver: */
2814+ struct {
2815+ struct pci_dev *pci_dev;
2816+
2817+ u32 wpdma_phys;
2818+
2819+ u16 token_start;
2820+ unsigned int nbuf;
2821+
2822+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
2823+ int (*offload_enable)(struct mtk_wed_device *wed);
2824+ void (*offload_disable)(struct mtk_wed_device *wed);
2825+ } wlan;
2826+#endif
2827+};
2828+
2829+struct mtk_wed_ops {
2830+ int (*attach)(struct mtk_wed_device *dev);
2831+ int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
2832+ void __iomem *regs);
2833+ int (*txfree_ring_setup)(struct mtk_wed_device *dev,
2834+ void __iomem *regs);
2835+ void (*detach)(struct mtk_wed_device *dev);
2836+
2837+ void (*stop)(struct mtk_wed_device *dev);
2838+ void (*start)(struct mtk_wed_device *dev, u32 irq_mask);
2839+ void (*reset_dma)(struct mtk_wed_device *dev);
2840+
2841+ u32 (*reg_read)(struct mtk_wed_device *dev, u32 reg);
2842+ void (*reg_write)(struct mtk_wed_device *dev, u32 reg, u32 val);
2843+
2844+ u32 (*irq_get)(struct mtk_wed_device *dev, u32 mask);
2845+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
2846+};
2847+
2848+extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
2849+
2850+static inline int
2851+mtk_wed_device_attach(struct mtk_wed_device *dev)
2852+{
2853+ int ret = -ENODEV;
2854+
2855+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2856+ rcu_read_lock();
2857+ dev->ops = rcu_dereference(mtk_soc_wed_ops);
2858+ if (dev->ops)
2859+ ret = dev->ops->attach(dev);
2860+ else
2861+ rcu_read_unlock();
2862+
2863+ if (ret)
2864+ dev->ops = NULL;
2865+#endif
2866+
2867+ return ret;
2868+}
2869+
2870+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
2871+#define mtk_wed_device_active(_dev) !!(_dev)->ops
2872+#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
2873+#define mtk_wed_device_start(_dev, _mask) (_dev)->ops->start(_dev, _mask)
2874+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) \
2875+ (_dev)->ops->tx_ring_setup(_dev, _ring, _regs)
2876+#define mtk_wed_device_txfree_ring_setup(_dev, _regs) \
2877+ (_dev)->ops->txfree_ring_setup(_dev, _regs)
2878+#define mtk_wed_device_reg_read(_dev, _reg) \
2879+ (_dev)->ops->reg_read(_dev, _reg)
2880+#define mtk_wed_device_reg_write(_dev, _reg, _val) \
2881+ (_dev)->ops->reg_write(_dev, _reg, _val)
2882+#define mtk_wed_device_irq_get(_dev, _mask) \
2883+ (_dev)->ops->irq_get(_dev, _mask)
2884+#define mtk_wed_device_irq_set_mask(_dev, _mask) \
2885+ (_dev)->ops->irq_set_mask(_dev, _mask)
2886+#else
2887+static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
2888+{
2889+ return false;
2890+}
2891+#define mtk_wed_device_detach(_dev) do {} while (0)
2892+#define mtk_wed_device_start(_dev, _mask) do {} while (0)
2893+#define mtk_wed_device_tx_ring_setup(_dev, _ring, _regs) -ENODEV
2894+#define mtk_wed_device_txfree_ring_setup(_dev, _ring, _regs) -ENODEV
2895+#define mtk_wed_device_reg_read(_dev, _reg) 0
2896+#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
2897+#define mtk_wed_device_irq_get(_dev, _mask) 0
2898+#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
2899+#endif
2900+
2901+#endif
2902diff --git a/net/core/dev.c b/net/core/dev.c
2903index 4f0edb218..031ac7c6f 100644
2904--- a/net/core/dev.c
2905+++ b/net/core/dev.c
2906@@ -675,6 +675,10 @@ int dev_fill_forward_path(const struct net_device *dev, const u8 *daddr,
2907 if (WARN_ON_ONCE(last_dev == ctx.dev))
2908 return -1;
2909 }
2910+
2911+ if (!ctx.dev)
2912+ return ret;
2913+
2914 path = dev_fwd_path(stack);
2915 if (!path)
2916 return -1;
2917--
29182.18.0
2919