blob: f1c1b02cf95d7ac5f640ac7ee9f2edbe075055c7 [file] [log] [blame]
developerc1b2cd12022-07-28 18:35:24 +08001From 6a2341ff1a75100d530982d0c021a133124174cc Mon Sep 17 00:00:00 2001
2From: Sujuan Chen <sujuan.chen@mediatek.com>
3Date: Thu, 28 Jul 2022 11:16:15 +0800
4Subject: [PATCH 3/3] mt76 add ser spport when wed on
5
6Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
7---
8 dma.c | 26 ++++++++++++++++++--------
9 dma.h | 2 +-
10 mt7915/dma.c | 26 +++++++++++++++++++++-----
11 mt7915/mac.c | 6 ++++++
12 4 files changed, 46 insertions(+), 14 deletions(-)
13
14diff --git a/dma.c b/dma.c
15index 4d4d4046..9c821442 100644
16--- a/dma.c
17+++ b/dma.c
18@@ -624,14 +624,18 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
19 return frames;
20 }
21
22-static int
23-mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
24+int
25+mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
26 {
27 #ifdef CONFIG_NET_MEDIATEK_SOC_WED
28 struct mtk_wed_device *wed = &dev->mmio.wed;
29 int ret, type, ring;
30- u8 flags = q->flags;
31+ u8 flags;
32+
33+ if (!q || !q->ndesc)
34+ return -EINVAL;
35
36+ flags = q->flags;
37 if (!mtk_wed_device_active(wed))
38 q->flags &= ~MT_QFLAG_WED;
39
40@@ -643,7 +647,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
41
42 switch (type) {
43 case MT76_WED_Q_TX:
44- ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs);
45+ ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
46 if (!ret)
47 q->wed_regs = wed->tx_ring[ring].reg_base;
48 break;
49@@ -659,7 +663,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
50 q->wed_regs = wed->txfree_ring.reg_base;
51 break;
52 case MT76_WED_Q_RX:
53- ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs);
54+ ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
55 if (!ret)
56 q->wed_regs = wed->rx_ring[ring].reg_base;
57 break;
58@@ -672,6 +676,7 @@ mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
59 return 0;
60 #endif
61 }
62+EXPORT_SYMBOL_GPL(mt76_dma_wed_setup);
63
64 static int
65 mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
66@@ -704,7 +709,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
67 if (!q->entry)
68 return -ENOMEM;
69
70- ret = mt76_dma_wed_setup(dev, q);
71+ ret = mt76_dma_wed_setup(dev, q, false);
72 if (ret)
73 return ret;
74
75@@ -755,8 +760,13 @@ mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
76 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
77
78 mt76_dma_rx_cleanup(dev, q);
79- mt76_dma_sync_idx(dev, q);
80- mt76_dma_rx_fill(dev, q);
81+
82+ mt76_dma_wed_setup(dev, q, true);
83+
84+ if (q->flags != MT_WED_Q_TXFREE) {
85+ mt76_dma_sync_idx(dev, q);
86+ mt76_dma_rx_fill(dev, q);
87+ }
88
89 if (!q->rx_head)
90 return;
91diff --git a/dma.h b/dma.h
92index 90370d12..083cbca4 100644
93--- a/dma.h
94+++ b/dma.h
95@@ -58,5 +58,5 @@ enum mt76_mcu_evt_type {
96 int mt76_dma_rx_poll(struct napi_struct *napi, int budget);
97 void mt76_dma_attach(struct mt76_dev *dev);
98 void mt76_dma_cleanup(struct mt76_dev *dev);
99-
100+int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset);
101 #endif
102diff --git a/mt7915/dma.c b/mt7915/dma.c
103index 7d8d60bb..8df7d0ee 100644
104--- a/mt7915/dma.c
105+++ b/mt7915/dma.c
106@@ -549,28 +549,44 @@ int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
107 mt7915_wfsys_reset(dev);
108
109 /* disable wfdma */
110+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
111+ mtk_wed_device_dma_reset(&dev->mt76.mmio.wed);
112 mt7915_dma_disable(dev, force);
113
114 /* reset hw queues */
115 for (i = 0; i < __MT_TXQ_MAX; i++) {
116 mt76_queue_reset(dev, dev->mphy.q_tx[i]);
117- if (mphy_ext)
118+ if (mphy_ext) {
119 mt76_queue_reset(dev, mphy_ext->q_tx[i]);
120+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
121+ mt76_dma_wed_setup(&dev->mt76,
122+ mphy_ext->q_tx[i],
123+ true);
124+ }
125+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
126+ mt76_dma_wed_setup(&dev->mt76, dev->mphy.q_tx[i],
127+ true);
128 }
129
130 for (i = 0; i < __MT_MCUQ_MAX; i++)
131 mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
132
133- mt76_for_each_q_rx(&dev->mt76, i)
134- mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
135+ mt76_for_each_q_rx(&dev->mt76, i) {
136+ if (dev->mt76.q_rx[i].flags != MT_WED_Q_TXFREE)
137+ mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
138+ }
139
140 mt76_tx_status_check(&dev->mt76, true);
141
142- mt7915_dma_enable(dev);
143-
144 mt76_for_each_q_rx(&dev->mt76, i)
145 mt76_queue_rx_reset(dev, i);
146
147+ if(mtk_wed_device_active(&dev->mt76.mmio.wed) && is_mt7915(&dev->mt76))
148+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
149+ MT_WFDMA0_EXT0_RXWB_KEEP);
150+
151+ mt7915_dma_enable(dev);
152+
153 return 0;
154 }
155
156diff --git a/mt7915/mac.c b/mt7915/mac.c
157index 1f8e1230..f53de870 100644
158--- a/mt7915/mac.c
159+++ b/mt7915/mac.c
160@@ -1674,6 +1674,12 @@ void mt7915_mac_reset_work(struct work_struct *work)
161 if (!(READ_ONCE(dev->reset_state) & MT_MCU_CMD_STOP_DMA))
162 return;
163
164+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
165+ mtk_wed_device_stop(&dev->mt76.mmio.wed, true);
166+ if (!is_mt7986(&dev->mt76))
167+ mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
168+ }
169+
170 ieee80211_stop_queues(mt76_hw(dev));
171 if (ext_phy)
172 ieee80211_stop_queues(ext_phy->hw);
173--
1742.18.0
175