developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 1 | From 73693f3833455addeaa13764ba39917f4fe8e171 Mon Sep 17 00:00:00 2001 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 2 | From: "sujuan.chen" <sujuan.chen@mediatek.com> |
| 3 | Date: Thu, 12 Oct 2023 10:04:54 +0800 |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 4 | Subject: [PATCH 073/199] mtk: mt76: mt7996: add SER0.5 support w/ wed3.0 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 5 | |
| 6 | Signed-off-by: sujuan.chen <sujuan.chen@mediatek.com> |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 7 | --- |
| 8 | dma.c | 9 ++-- |
| 9 | dma.h | 4 +- |
| 10 | mt76.h | 14 ++++-- |
| 11 | mt792x_dma.c | 6 +-- |
| 12 | mt7996/dma.c | 20 ++++++-- |
| 13 | mt7996/init.c | 127 +++++++++++++++++++++++++++++++----------------- |
| 14 | mt7996/mac.c | 25 ++++++++++ |
| 15 | mt7996/mt7996.h | 1 + |
| 16 | wed.c | 4 +- |
| 17 | 9 files changed, 146 insertions(+), 64 deletions(-) |
| 18 | |
| 19 | diff --git a/dma.c b/dma.c |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 20 | index da21f641..e23b744b 100644 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 21 | --- a/dma.c |
| 22 | +++ b/dma.c |
| 23 | @@ -218,9 +218,9 @@ void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, |
| 24 | mt76_dma_sync_idx(dev, q); |
| 25 | } |
| 26 | |
| 27 | -void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q) |
| 28 | +void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
| 29 | { |
| 30 | - __mt76_dma_queue_reset(dev, q, true); |
| 31 | + __mt76_dma_queue_reset(dev, q, reset); |
| 32 | } |
| 33 | |
| 34 | static int |
| 35 | @@ -540,7 +540,8 @@ mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, |
| 36 | if (!q->queued) |
| 37 | return NULL; |
| 38 | |
| 39 | - if (mt76_queue_is_wed_rro_data(q)) |
| 40 | + if (mt76_queue_is_wed_rro_data(q) || |
| 41 | + mt76_queue_is_wed_rro_msdu_pg(q)) |
| 42 | return NULL; |
| 43 | |
| 44 | if (!mt76_queue_is_wed_rro_ind(q)) { |
| 45 | @@ -792,7 +793,7 @@ mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, |
| 46 | return 0; |
| 47 | } |
| 48 | |
| 49 | - mt76_dma_queue_reset(dev, q); |
| 50 | + mt76_dma_queue_reset(dev, q, true); |
| 51 | |
| 52 | return 0; |
| 53 | } |
| 54 | diff --git a/dma.h b/dma.h |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 55 | index 1de5a2b2..3a8c2e55 100644 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 56 | --- a/dma.h |
| 57 | +++ b/dma.h |
| 58 | @@ -83,12 +83,12 @@ int mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q, |
| 59 | bool allow_direct); |
| 60 | void __mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, |
| 61 | bool reset_idx); |
| 62 | -void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q); |
| 63 | +void mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q, bool reset); |
| 64 | |
| 65 | static inline void |
| 66 | mt76_dma_reset_tx_queue(struct mt76_dev *dev, struct mt76_queue *q) |
| 67 | { |
| 68 | - dev->queue_ops->reset_q(dev, q); |
| 69 | + dev->queue_ops->reset_q(dev, q, true); |
| 70 | if (mtk_wed_device_active(&dev->mmio.wed)) |
| 71 | mt76_wed_dma_setup(dev, q, true); |
| 72 | } |
| 73 | diff --git a/mt76.h b/mt76.h |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 74 | index f2052cf7..1236ddb4 100644 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 75 | --- a/mt76.h |
| 76 | +++ b/mt76.h |
| 77 | @@ -301,7 +301,7 @@ struct mt76_queue_ops { |
| 78 | |
| 79 | void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); |
| 80 | |
| 81 | - void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q); |
| 82 | + void (*reset_q)(struct mt76_dev *dev, struct mt76_queue *q, bool reset); |
| 83 | }; |
| 84 | |
| 85 | enum mt76_phy_type { |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 86 | @@ -1742,8 +1742,13 @@ static inline bool mt76_queue_is_wed_rro_ind(struct mt76_queue *q) |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 87 | static inline bool mt76_queue_is_wed_rro_data(struct mt76_queue *q) |
| 88 | { |
| 89 | return mt76_queue_is_wed_rro(q) && |
| 90 | - (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA || |
| 91 | - FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG); |
| 92 | + (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_DATA); |
| 93 | +} |
| 94 | + |
| 95 | +static inline bool mt76_queue_is_wed_rro_msdu_pg(struct mt76_queue *q) |
| 96 | +{ |
| 97 | + return mt76_queue_is_wed_rro(q) && |
| 98 | + (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_RRO_Q_MSDU_PG); |
| 99 | } |
| 100 | |
| 101 | static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 102 | @@ -1752,7 +1757,8 @@ static inline bool mt76_queue_is_wed_rx(struct mt76_queue *q) |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 103 | return false; |
| 104 | |
| 105 | return FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX || |
| 106 | - mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q); |
| 107 | + mt76_queue_is_wed_rro_ind(q) || mt76_queue_is_wed_rro_data(q) || |
| 108 | + mt76_queue_is_wed_rro_msdu_pg(q); |
| 109 | |
| 110 | } |
| 111 | |
| 112 | diff --git a/mt792x_dma.c b/mt792x_dma.c |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 113 | index 5cc2d59b..c224bcc8 100644 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 114 | --- a/mt792x_dma.c |
| 115 | +++ b/mt792x_dma.c |
| 116 | @@ -181,13 +181,13 @@ mt792x_dma_reset(struct mt792x_dev *dev, bool force) |
| 117 | |
| 118 | /* reset hw queues */ |
| 119 | for (i = 0; i < __MT_TXQ_MAX; i++) |
| 120 | - mt76_queue_reset(dev, dev->mphy.q_tx[i]); |
| 121 | + mt76_queue_reset(dev, dev->mphy.q_tx[i], true); |
| 122 | |
| 123 | for (i = 0; i < __MT_MCUQ_MAX; i++) |
| 124 | - mt76_queue_reset(dev, dev->mt76.q_mcu[i]); |
| 125 | + mt76_queue_reset(dev, dev->mt76.q_mcu[i], true); |
| 126 | |
| 127 | mt76_for_each_q_rx(&dev->mt76, i) |
| 128 | - mt76_queue_reset(dev, &dev->mt76.q_rx[i]); |
| 129 | + mt76_queue_reset(dev, &dev->mt76.q_rx[i], true); |
| 130 | |
| 131 | mt76_tx_status_check(&dev->mt76, true); |
| 132 | |
| 133 | diff --git a/mt7996/dma.c b/mt7996/dma.c |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 134 | index 5d85e9ea..d9e1b17f 100644 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 135 | --- a/mt7996/dma.c |
| 136 | +++ b/mt7996/dma.c |
| 137 | @@ -711,21 +711,31 @@ void mt7996_dma_reset(struct mt7996_dev *dev, bool force) |
| 138 | } |
| 139 | |
| 140 | for (i = 0; i < __MT_MCUQ_MAX; i++) |
| 141 | - mt76_queue_reset(dev, dev->mt76.q_mcu[i]); |
| 142 | + mt76_queue_reset(dev, dev->mt76.q_mcu[i], true); |
| 143 | |
| 144 | mt76_for_each_q_rx(&dev->mt76, i) { |
| 145 | - if (mtk_wed_device_active(&dev->mt76.mmio.wed)) |
| 146 | + if (mtk_wed_device_active(&dev->mt76.mmio.wed)) { |
| 147 | if (mt76_queue_is_wed_rro(&dev->mt76.q_rx[i]) || |
| 148 | - mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i])) |
| 149 | + mt76_queue_is_wed_tx_free(&dev->mt76.q_rx[i])) { |
| 150 | + if (force && mt76_queue_is_wed_rro_data(&dev->mt76.q_rx[i])) |
| 151 | + mt76_queue_reset(dev, &dev->mt76.q_rx[i], false); |
| 152 | continue; |
| 153 | + } |
| 154 | + } |
| 155 | |
| 156 | - mt76_queue_reset(dev, &dev->mt76.q_rx[i]); |
| 157 | + mt76_queue_reset(dev, &dev->mt76.q_rx[i], true); |
| 158 | } |
| 159 | |
| 160 | mt76_tx_status_check(&dev->mt76, true); |
| 161 | |
| 162 | - mt76_for_each_q_rx(&dev->mt76, i) |
| 163 | + mt76_for_each_q_rx(&dev->mt76, i) { |
| 164 | + if (mtk_wed_device_active(&dev->mt76.mmio.wed) && force && |
| 165 | + (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) || |
| 166 | + mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i]))) |
| 167 | + continue; |
| 168 | + |
| 169 | mt76_queue_rx_reset(dev, i); |
| 170 | + } |
| 171 | |
| 172 | mt7996_dma_enable(dev, !force); |
| 173 | } |
| 174 | diff --git a/mt7996/init.c b/mt7996/init.c |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 175 | index 440c4b7c..3b1f4273 100644 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 176 | --- a/mt7996/init.c |
| 177 | +++ b/mt7996/init.c |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 178 | @@ -737,11 +737,91 @@ void mt7996_wfsys_reset(struct mt7996_dev *dev) |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 179 | msleep(20); |
| 180 | } |
| 181 | |
| 182 | -static int mt7996_wed_rro_init(struct mt7996_dev *dev) |
| 183 | +void mt7996_rro_hw_init(struct mt7996_dev *dev) |
| 184 | { |
| 185 | #ifdef CONFIG_NET_MEDIATEK_SOC_WED |
| 186 | struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| 187 | u32 reg = MT_RRO_ADDR_ELEM_SEG_ADDR0; |
| 188 | + int i; |
| 189 | + |
| 190 | + if (!dev->has_rro) |
| 191 | + return; |
| 192 | + |
| 193 | + if (is_mt7992(&dev->mt76)) { |
| 194 | + /* set emul 3.0 function */ |
| 195 | + mt76_wr(dev, MT_RRO_3_0_EMU_CONF, |
| 196 | + MT_RRO_3_0_EMU_CONF_EN_MASK); |
| 197 | + |
| 198 | + mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE0, |
| 199 | + dev->wed_rro.addr_elem[0].phy_addr); |
| 200 | + } else { |
| 201 | + /* TODO: remove line after WM has set */ |
| 202 | + mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK); |
| 203 | + |
| 204 | + /* setup BA bitmap cache address */ |
| 205 | + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0, |
| 206 | + dev->wed_rro.ba_bitmap[0].phy_addr); |
| 207 | + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0); |
| 208 | + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0, |
| 209 | + dev->wed_rro.ba_bitmap[1].phy_addr); |
| 210 | + mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0); |
| 211 | + |
| 212 | + /* setup Address element address */ |
| 213 | + for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) { |
| 214 | + mt76_wr(dev, reg, dev->wed_rro.addr_elem[i].phy_addr >> 4); |
| 215 | + reg += 4; |
| 216 | + } |
| 217 | + |
| 218 | + /* setup Address element address - separate address segment mode */ |
| 219 | + mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1, |
| 220 | + MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE); |
| 221 | + } |
| 222 | + wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6; |
| 223 | + if (is_mt7996(&dev->mt76)) |
| 224 | + wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION; |
| 225 | + else |
| 226 | + wed->wlan.ind_cmd.particular_sid = 1; |
| 227 | + wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr; |
| 228 | + wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN; |
| 229 | + wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL; |
| 230 | + |
| 231 | + mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00); |
| 232 | + mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1, |
| 233 | + MT_RRO_IND_CMD_SIGNATURE_BASE1_EN); |
| 234 | + |
| 235 | + /* particular session configure */ |
| 236 | + /* use max session idx + 1 as particular session id */ |
| 237 | + mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr); |
| 238 | + |
| 239 | + if (is_mt7992(&dev->mt76)) { |
| 240 | + reg = MT_RRO_MSDU_PG_SEG_ADDR0; |
| 241 | + |
| 242 | + mt76_set(dev, MT_RRO_3_1_GLOBAL_CONFIG, |
| 243 | + MT_RRO_3_1_GLOBAL_CONFIG_INTERLEAVE_EN); |
| 244 | + |
| 245 | + /* setup Msdu page address */ |
| 246 | + for (i = 0; i < MT7996_RRO_MSDU_PG_CR_CNT; i++) { |
| 247 | + mt76_wr(dev, reg, dev->wed_rro.msdu_pg[i].phy_addr >> 4); |
| 248 | + reg += 4; |
| 249 | + } |
| 250 | + mt76_wr(dev, MT_RRO_PARTICULAR_CFG1, |
| 251 | + MT_RRO_PARTICULAR_CONFG_EN | |
| 252 | + FIELD_PREP(MT_RRO_PARTICULAR_SID, 1)); |
| 253 | + } else { |
| 254 | + mt76_wr(dev, MT_RRO_PARTICULAR_CFG1, |
| 255 | + MT_RRO_PARTICULAR_CONFG_EN | |
| 256 | + FIELD_PREP(MT_RRO_PARTICULAR_SID, MT7996_RRO_MAX_SESSION)); |
| 257 | + } |
| 258 | + /* interrupt enable */ |
| 259 | + mt76_wr(dev, MT_RRO_HOST_INT_ENA, |
| 260 | + MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA); |
| 261 | +#endif |
| 262 | +} |
| 263 | + |
| 264 | +static int mt7996_wed_rro_init(struct mt7996_dev *dev) |
| 265 | +{ |
| 266 | +#ifdef CONFIG_NET_MEDIATEK_SOC_WED |
| 267 | + struct mtk_wed_device *wed = &dev->mt76.mmio.wed; |
| 268 | struct mt7996_wed_rro_addr *addr; |
| 269 | void *ptr; |
| 270 | int i; |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 271 | @@ -801,50 +881,9 @@ static int mt7996_wed_rro_init(struct mt7996_dev *dev) |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 272 | addr++; |
| 273 | } |
| 274 | |
| 275 | - /* rro hw init */ |
| 276 | - /* TODO: remove line after WM has set */ |
| 277 | - mt76_clear(dev, WF_RRO_AXI_MST_CFG, WF_RRO_AXI_MST_CFG_DIDX_OK); |
| 278 | - |
| 279 | - /* setup BA bitmap cache address */ |
| 280 | - mt76_wr(dev, MT_RRO_BA_BITMAP_BASE0, |
| 281 | - dev->wed_rro.ba_bitmap[0].phy_addr); |
| 282 | - mt76_wr(dev, MT_RRO_BA_BITMAP_BASE1, 0); |
| 283 | - mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT0, |
| 284 | - dev->wed_rro.ba_bitmap[1].phy_addr); |
| 285 | - mt76_wr(dev, MT_RRO_BA_BITMAP_BASE_EXT1, 0); |
| 286 | - |
| 287 | - /* setup Address element address */ |
| 288 | - for (i = 0; i < ARRAY_SIZE(dev->wed_rro.addr_elem); i++) { |
| 289 | - mt76_wr(dev, reg, dev->wed_rro.addr_elem[i].phy_addr >> 4); |
| 290 | - reg += 4; |
| 291 | - } |
| 292 | - |
| 293 | - /* setup Address element address - separate address segment mode */ |
| 294 | - mt76_wr(dev, MT_RRO_ADDR_ARRAY_BASE1, |
| 295 | - MT_RRO_ADDR_ARRAY_ELEM_ADDR_SEG_MODE); |
| 296 | - |
| 297 | - wed->wlan.ind_cmd.win_size = ffs(MT7996_RRO_WINDOW_MAX_LEN) - 6; |
| 298 | - wed->wlan.ind_cmd.particular_sid = MT7996_RRO_MAX_SESSION; |
| 299 | - wed->wlan.ind_cmd.particular_se_phys = dev->wed_rro.session.phy_addr; |
| 300 | - wed->wlan.ind_cmd.se_group_nums = MT7996_RRO_ADDR_ELEM_LEN; |
| 301 | - wed->wlan.ind_cmd.ack_sn_addr = MT_RRO_ACK_SN_CTRL; |
| 302 | - |
| 303 | - mt76_wr(dev, MT_RRO_IND_CMD_SIGNATURE_BASE0, 0x15010e00); |
| 304 | - mt76_set(dev, MT_RRO_IND_CMD_SIGNATURE_BASE1, |
| 305 | - MT_RRO_IND_CMD_SIGNATURE_BASE1_EN); |
| 306 | - |
| 307 | - /* particular session configure */ |
| 308 | - /* use max session idx + 1 as particular session id */ |
| 309 | - mt76_wr(dev, MT_RRO_PARTICULAR_CFG0, dev->wed_rro.session.phy_addr); |
| 310 | - mt76_wr(dev, MT_RRO_PARTICULAR_CFG1, |
| 311 | - MT_RRO_PARTICULAR_CONFG_EN | |
| 312 | - FIELD_PREP(MT_RRO_PARTICULAR_SID, MT7996_RRO_MAX_SESSION)); |
| 313 | - |
| 314 | - /* interrupt enable */ |
| 315 | - mt76_wr(dev, MT_RRO_HOST_INT_ENA, |
| 316 | - MT_RRO_HOST_INT_ENA_HOST_RRO_DONE_ENA); |
| 317 | - |
| 318 | /* rro ind cmd queue init */ |
| 319 | + mt7996_rro_hw_init(dev); |
| 320 | + |
| 321 | return mt7996_dma_rro_init(dev); |
| 322 | #else |
| 323 | return 0; |
| 324 | diff --git a/mt7996/mac.c b/mt7996/mac.c |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 325 | index dfc68a19..be3fea21 100644 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 326 | --- a/mt7996/mac.c |
| 327 | +++ b/mt7996/mac.c |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 328 | @@ -1761,6 +1761,31 @@ mt7996_mac_restart(struct mt7996_dev *dev) |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 329 | if (ret) |
| 330 | goto out; |
| 331 | |
| 332 | + if (mtk_wed_device_active(&dev->mt76.mmio.wed) && dev->has_rro) { |
| 333 | + u32 wed_irq_mask = dev->mt76.mmio.irqmask | |
| 334 | + MT_INT_RRO_RX_DONE | |
| 335 | + MT_INT_TX_DONE_BAND2; |
| 336 | + |
| 337 | + mt7996_rro_hw_init(dev); |
| 338 | + mt76_for_each_q_rx(&dev->mt76, i) { |
| 339 | + if (mt76_queue_is_wed_rro_ind(&dev->mt76.q_rx[i]) || |
| 340 | + mt76_queue_is_wed_rro_msdu_pg(&dev->mt76.q_rx[i])) |
| 341 | + mt76_queue_rx_reset(dev, i); |
| 342 | + } |
| 343 | + |
| 344 | + mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask); |
| 345 | + mtk_wed_device_start_hwrro(&dev->mt76.mmio.wed, wed_irq_mask, false); |
| 346 | + mt7996_irq_enable(dev, wed_irq_mask); |
| 347 | + mt7996_irq_disable(dev, 0); |
| 348 | + } |
| 349 | + |
| 350 | + if (mtk_wed_device_active(&dev->mt76.mmio.wed_hif2)) { |
| 351 | + mt76_wr(dev, MT_INT_PCIE1_MASK_CSR, |
| 352 | + MT_INT_TX_RX_DONE_EXT); |
| 353 | + mtk_wed_device_start(&dev->mt76.mmio.wed_hif2, |
| 354 | + MT_INT_TX_RX_DONE_EXT); |
| 355 | + } |
| 356 | + |
| 357 | /* set the necessary init items */ |
| 358 | ret = mt7996_mcu_set_eeprom(dev); |
| 359 | if (ret) |
| 360 | diff --git a/mt7996/mt7996.h b/mt7996/mt7996.h |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 361 | index a933e739..f0288cca 100644 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 362 | --- a/mt7996/mt7996.h |
| 363 | +++ b/mt7996/mt7996.h |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 364 | @@ -719,6 +719,7 @@ extern const struct mt76_testmode_ops mt7996_testmode_ops; |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 365 | struct mt7996_dev *mt7996_mmio_probe(struct device *pdev, |
| 366 | void __iomem *mem_base, u32 device_id); |
| 367 | void mt7996_wfsys_reset(struct mt7996_dev *dev); |
| 368 | +void mt7996_rro_hw_init(struct mt7996_dev *dev); |
| 369 | irqreturn_t mt7996_irq_handler(int irq, void *dev_instance); |
| 370 | u64 __mt7996_get_tsf(struct ieee80211_hw *hw, struct mt7996_vif *mvif); |
| 371 | int mt7996_register_device(struct mt7996_dev *dev); |
| 372 | diff --git a/wed.c b/wed.c |
developer | 05f3b2b | 2024-08-19 19:17:34 +0800 | [diff] [blame^] | 373 | index 1c6d53c8..61a6badf 100644 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 374 | --- a/wed.c |
| 375 | +++ b/wed.c |
| 376 | @@ -155,7 +155,7 @@ int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
| 377 | case MT76_WED_Q_TXFREE: |
| 378 | /* WED txfree queue needs ring to be initialized before setup */ |
| 379 | q->flags = 0; |
| 380 | - mt76_dma_queue_reset(dev, q); |
| 381 | + mt76_dma_queue_reset(dev, q, true); |
| 382 | mt76_dma_rx_fill(dev, q, false); |
| 383 | |
| 384 | ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs); |
| 385 | @@ -184,7 +184,7 @@ int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset) |
| 386 | break; |
| 387 | case MT76_WED_RRO_Q_IND: |
| 388 | q->flags &= ~MT_QFLAG_WED; |
| 389 | - mt76_dma_queue_reset(dev, q); |
| 390 | + mt76_dma_queue_reset(dev, q, true); |
| 391 | mt76_dma_rx_fill(dev, q, false); |
| 392 | mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs); |
| 393 | break; |
| 394 | -- |
developer | 9237f44 | 2024-06-14 17:13:04 +0800 | [diff] [blame] | 395 | 2.18.0 |
developer | 66e89bc | 2024-04-23 14:50:01 +0800 | [diff] [blame] | 396 | |