blob: 7e35e1fd41d48163e4fc9c2505eb97691c5c70fc [file] [log] [blame]
Christian Marangid43f7742025-04-07 22:01:46 +02001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Based on Linux airoha_eth.c majorly rewritten
4 * and simplified for U-Boot usage for single TX/RX ring.
5 *
6 * Copyright (c) 2024 AIROHA Inc
7 * Author: Lorenzo Bianconi <lorenzo@kernel.org>
8 * Christian Marangi <ansuelsmth@gmail.org>
9 */
10
11#include <dm.h>
12#include <dm/devres.h>
13#include <mapmem.h>
14#include <net.h>
15#include <regmap.h>
16#include <reset.h>
17#include <syscon.h>
18#include <linux/bitfield.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/io.h>
22#include <linux/iopoll.h>
23#include <linux/time.h>
24
25#define AIROHA_MAX_NUM_GDM_PORTS 1
26#define AIROHA_MAX_NUM_QDMA 1
27#define AIROHA_MAX_NUM_RSTS 3
28#define AIROHA_MAX_NUM_XSI_RSTS 4
29
30#define AIROHA_MAX_PACKET_SIZE 2048
31#define AIROHA_NUM_TX_RING 1
32#define AIROHA_NUM_RX_RING 1
33#define AIROHA_NUM_TX_IRQ 1
34#define HW_DSCP_NUM 32
35#define IRQ_QUEUE_LEN 1
36#define TX_DSCP_NUM 16
37#define RX_DSCP_NUM PKTBUFSRX
38
39/* SCU */
40#define SCU_SHARE_FEMEM_SEL 0x958
41
42/* SWITCH */
43#define SWITCH_MFC 0x10
44#define SWITCH_BC_FFP GENMASK(31, 24)
45#define SWITCH_UNM_FFP GENMASK(23, 16)
46#define SWITCH_UNU_FFP GENMASK(15, 8)
47#define SWITCH_PMCR(_n) 0x3000 + ((_n) * 0x100)
48#define SWITCH_IPG_CFG GENMASK(19, 18)
49#define SWITCH_IPG_CFG_NORMAL FIELD_PREP(SWITCH_IPG_CFG, 0x0)
50#define SWITCH_IPG_CFG_SHORT FIELD_PREP(SWITCH_IPG_CFG, 0x1)
51#define SWITCH_IPG_CFG_SHRINK FIELD_PREP(SWITCH_IPG_CFG, 0x2)
52#define SWITCH_MAC_MODE BIT(16)
53#define SWITCH_FORCE_MODE BIT(15)
54#define SWITCH_MAC_TX_EN BIT(14)
55#define SWITCH_MAC_RX_EN BIT(13)
56#define SWITCH_BKOFF_EN BIT(9)
57#define SWITCH_BKPR_EN BIT(8)
58#define SWITCH_FORCE_RX_FC BIT(5)
59#define SWITCH_FORCE_TX_FC BIT(4)
60#define SWITCH_FORCE_SPD GENMASK(3, 2)
61#define SWITCH_FORCE_SPD_10 FIELD_PREP(SWITCH_FORCE_SPD, 0x0)
62#define SWITCH_FORCE_SPD_100 FIELD_PREP(SWITCH_FORCE_SPD, 0x1)
63#define SWITCH_FORCE_SPD_1000 FIELD_PREP(SWITCH_FORCE_SPD, 0x2)
64#define SWITCH_FORCE_DPX BIT(1)
65#define SWITCH_FORCE_LNK BIT(0)
66#define SWITCH_SMACCR0 0x30e4
67#define SMACCR0_MAC2 GENMASK(31, 24)
68#define SMACCR0_MAC3 GENMASK(23, 16)
69#define SMACCR0_MAC4 GENMASK(15, 8)
70#define SMACCR0_MAC5 GENMASK(7, 0)
71#define SWITCH_SMACCR1 0x30e8
72#define SMACCR1_MAC0 GENMASK(15, 8)
73#define SMACCR1_MAC1 GENMASK(7, 0)
74#define SWITCH_PHY_POLL 0x7018
75#define SWITCH_PHY_AP_EN GENMASK(30, 24)
76#define SWITCH_EEE_POLL_EN GENMASK(22, 16)
77#define SWITCH_PHY_PRE_EN BIT(15)
78#define SWITCH_PHY_END_ADDR GENMASK(12, 8)
79#define SWITCH_PHY_ST_ADDR GENMASK(4, 0)
80
81/* FE */
82#define PSE_BASE 0x0100
83#define CSR_IFC_BASE 0x0200
84#define CDM1_BASE 0x0400
85#define GDM1_BASE 0x0500
86#define PPE1_BASE 0x0c00
87
88#define CDM2_BASE 0x1400
89#define GDM2_BASE 0x1500
90
91#define GDM3_BASE 0x1100
92#define GDM4_BASE 0x2500
93
94#define GDM_BASE(_n) \
95 ((_n) == 4 ? GDM4_BASE : \
96 (_n) == 3 ? GDM3_BASE : \
97 (_n) == 2 ? GDM2_BASE : GDM1_BASE)
98
99#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
100#define GDM_DROP_CRC_ERR BIT(23)
101#define GDM_IP4_CKSUM BIT(22)
102#define GDM_TCP_CKSUM BIT(21)
103#define GDM_UDP_CKSUM BIT(20)
104#define GDM_UCFQ_MASK GENMASK(15, 12)
105#define GDM_BCFQ_MASK GENMASK(11, 8)
106#define GDM_MCFQ_MASK GENMASK(7, 4)
107#define GDM_OCFQ_MASK GENMASK(3, 0)
108
109/* QDMA */
110#define REG_QDMA_GLOBAL_CFG 0x0004
111#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
112#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
113#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
114#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
115#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
116#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
117#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
118#define GLOBAL_CFG_RESET_MASK BIT(23)
119#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
120#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
121#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
122#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
123#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
124#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
125#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
126#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
127#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
128#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
129#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
130#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
131#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
132#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
133#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
134
135#define REG_FWD_DSCP_BASE 0x0010
136#define REG_FWD_BUF_BASE 0x0014
137
138#define REG_HW_FWD_DSCP_CFG 0x0018
139#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
140#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
141#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
142
143#define REG_INT_STATUS(_n) \
144 (((_n) == 4) ? 0x0730 : \
145 ((_n) == 3) ? 0x0724 : \
146 ((_n) == 2) ? 0x0720 : \
147 ((_n) == 1) ? 0x0024 : 0x0020)
148
149#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
150
151#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
152#define TX_IRQ_THR_MASK GENMASK(27, 16)
153#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
154
155#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
156#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
157
158#define REG_TX_RING_BASE(_n) \
159 (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
160
161#define REG_TX_CPU_IDX(_n) \
162 (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
163
164#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
165
166#define REG_TX_DMA_IDX(_n) \
167 (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
168
169#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
170
171#define IRQ_RING_IDX_MASK GENMASK(20, 16)
172#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
173
174#define REG_RX_RING_BASE(_n) \
175 (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
176
177#define REG_RX_RING_SIZE(_n) \
178 (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
179
180#define RX_RING_THR_MASK GENMASK(31, 16)
181#define RX_RING_SIZE_MASK GENMASK(15, 0)
182
183#define REG_RX_CPU_IDX(_n) \
184 (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
185
186#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
187
188#define REG_RX_DMA_IDX(_n) \
189 (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
190
191#define REG_RX_DELAY_INT_IDX(_n) \
192 (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
193
194#define RX_DELAY_INT_MASK GENMASK(15, 0)
195
196#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
197
198#define REG_LMGR_INIT_CFG 0x1000
199#define LMGR_INIT_START BIT(31)
200#define LMGR_SRAM_MODE_MASK BIT(30)
201#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
202#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
203
204/* CTRL */
205#define QDMA_DESC_DONE_MASK BIT(31)
206#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
207#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
208#define QDMA_DESC_DEI_MASK BIT(25)
209#define QDMA_DESC_NO_DROP_MASK BIT(24)
210#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
211/* DATA */
212#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
213/* TX MSG0 */
214#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
215#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
216#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
217#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
218#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
219#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
220#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
221#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
222#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
223#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
224/* TX MSG1 */
225#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
226#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
227#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
228#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
229#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
230#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
231#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
232#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
233#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
234
235/* RX MSG1 */
236#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
237#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
238#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
239#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
240#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
241#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
242#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
243#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
244#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
245
246struct airoha_qdma_desc {
247 __le32 rsv;
248 __le32 ctrl;
249 __le32 addr;
250 __le32 data;
251 __le32 msg0;
252 __le32 msg1;
253 __le32 msg2;
254 __le32 msg3;
255};
256
257struct airoha_qdma_fwd_desc {
258 __le32 addr;
259 __le32 ctrl0;
260 __le32 ctrl1;
261 __le32 ctrl2;
262 __le32 msg0;
263 __le32 msg1;
264 __le32 rsv0;
265 __le32 rsv1;
266};
267
268struct airoha_queue {
269 struct airoha_qdma_desc *desc;
270 u16 head;
271
272 int ndesc;
273};
274
275struct airoha_tx_irq_queue {
276 struct airoha_qdma *qdma;
277
278 int size;
279 u32 *q;
280};
281
282struct airoha_qdma {
283 struct airoha_eth *eth;
284 void __iomem *regs;
285
286 struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
287
288 struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
289 struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
290
291 /* descriptor and packet buffers for qdma hw forward */
292 struct {
293 void *desc;
294 void *q;
295 } hfwd;
296};
297
298struct airoha_gdm_port {
299 struct airoha_qdma *qdma;
300 int id;
301};
302
303struct airoha_eth {
304 void __iomem *fe_regs;
305 void __iomem *switch_regs;
306
307 struct reset_ctl_bulk rsts;
308 struct reset_ctl_bulk xsi_rsts;
309
310 struct airoha_qdma qdma[AIROHA_MAX_NUM_QDMA];
311 struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
312};
313
314static u32 airoha_rr(void __iomem *base, u32 offset)
315{
316 return readl(base + offset);
317}
318
319static void airoha_wr(void __iomem *base, u32 offset, u32 val)
320{
321 writel(val, base + offset);
322}
323
324static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
325{
326 val |= (airoha_rr(base, offset) & ~mask);
327 airoha_wr(base, offset, val);
328
329 return val;
330}
331
332#define airoha_fe_rr(eth, offset) \
333 airoha_rr((eth)->fe_regs, (offset))
334#define airoha_fe_wr(eth, offset, val) \
335 airoha_wr((eth)->fe_regs, (offset), (val))
336#define airoha_fe_rmw(eth, offset, mask, val) \
337 airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
338#define airoha_fe_set(eth, offset, val) \
339 airoha_rmw((eth)->fe_regs, (offset), 0, (val))
340#define airoha_fe_clear(eth, offset, val) \
341 airoha_rmw((eth)->fe_regs, (offset), (val), 0)
342
343#define airoha_qdma_rr(qdma, offset) \
344 airoha_rr((qdma)->regs, (offset))
345#define airoha_qdma_wr(qdma, offset, val) \
346 airoha_wr((qdma)->regs, (offset), (val))
347#define airoha_qdma_rmw(qdma, offset, mask, val) \
348 airoha_rmw((qdma)->regs, (offset), (mask), (val))
349#define airoha_qdma_set(qdma, offset, val) \
350 airoha_rmw((qdma)->regs, (offset), 0, (val))
351#define airoha_qdma_clear(qdma, offset, val) \
352 airoha_rmw((qdma)->regs, (offset), (val), 0)
353
354#define airoha_switch_wr(eth, offset, val) \
355 airoha_wr((eth)->switch_regs, (offset), (val))
356
357static void airoha_fe_maccr_init(struct airoha_eth *eth)
358{
359 int p;
360
361 for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
362 /* Disable any kind of CRC drop or offload */
363 airoha_fe_wr(eth, REG_GDM_FWD_CFG(p), 0);
364 }
365}
366
367static int airoha_fe_init(struct airoha_eth *eth)
368{
369 airoha_fe_maccr_init(eth);
370
371 return 0;
372}
373
374static void airoha_qdma_reset_rx_desc(struct airoha_queue *q, int index,
375 uchar *rx_packet)
376{
377 struct airoha_qdma_desc *desc;
378 u32 val;
379
380 desc = &q->desc[index];
381 index = (index + 1) % q->ndesc;
382
383 dma_map_single(rx_packet, PKTSIZE_ALIGN, DMA_TO_DEVICE);
384
385 WRITE_ONCE(desc->msg0, cpu_to_le32(0));
386 WRITE_ONCE(desc->msg1, cpu_to_le32(0));
387 WRITE_ONCE(desc->msg2, cpu_to_le32(0));
388 WRITE_ONCE(desc->msg3, cpu_to_le32(0));
389 WRITE_ONCE(desc->addr, cpu_to_le32(virt_to_phys(rx_packet)));
390 WRITE_ONCE(desc->data, cpu_to_le32(index));
391 val = FIELD_PREP(QDMA_DESC_LEN_MASK, PKTSIZE_ALIGN);
392 WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
393
394 dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE);
395}
396
397static void airoha_qdma_init_rx_desc(struct airoha_queue *q)
398{
399 int i;
400
401 for (i = 0; i < q->ndesc; i++)
402 airoha_qdma_reset_rx_desc(q, i, net_rx_packets[i]);
403}
404
405static int airoha_qdma_init_rx_queue(struct airoha_queue *q,
406 struct airoha_qdma *qdma, int ndesc)
407{
408 int qid = q - &qdma->q_rx[0];
409 unsigned long dma_addr;
410
411 q->ndesc = ndesc;
412 q->head = 0;
413
414 q->desc = dma_alloc_coherent(q->ndesc * sizeof(*q->desc), &dma_addr);
415 if (!q->desc)
416 return -ENOMEM;
417
418 memset(q->desc, 0, q->ndesc * sizeof(*q->desc));
419 dma_map_single(q->desc, q->ndesc * sizeof(*q->desc), DMA_TO_DEVICE);
420
421 airoha_qdma_wr(qdma, REG_RX_RING_BASE(qid), dma_addr);
422 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid),
423 RX_RING_SIZE_MASK,
424 FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
425
426 airoha_qdma_rmw(qdma, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
427 FIELD_PREP(RX_RING_THR_MASK, 0));
428 airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
429 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->ndesc - 1));
430 airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
431 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
432
433 return 0;
434}
435
436static int airoha_qdma_init_rx(struct airoha_qdma *qdma)
437{
438 int i;
439
440 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
441 int err;
442
443 err = airoha_qdma_init_rx_queue(&qdma->q_rx[i], qdma,
444 RX_DSCP_NUM);
445 if (err)
446 return err;
447 }
448
449 return 0;
450}
451
452static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
453 struct airoha_qdma *qdma, int size)
454{
455 int qid = q - &qdma->q_tx[0];
456 unsigned long dma_addr;
457
458 q->ndesc = size;
459 q->head = 0;
460
461 q->desc = dma_alloc_coherent(q->ndesc * sizeof(*q->desc), &dma_addr);
462 if (!q->desc)
463 return -ENOMEM;
464
465 memset(q->desc, 0, q->ndesc * sizeof(*q->desc));
466 dma_map_single(q->desc, q->ndesc * sizeof(*q->desc), DMA_TO_DEVICE);
467
468 airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
469 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
470 FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
471 airoha_qdma_rmw(qdma, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
472 FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
473
474 return 0;
475}
476
477static int airoha_qdma_tx_irq_init(struct airoha_tx_irq_queue *irq_q,
478 struct airoha_qdma *qdma, int size)
479{
480 int id = irq_q - &qdma->q_tx_irq[0];
481 unsigned long dma_addr;
482
483 irq_q->q = dma_alloc_coherent(size * sizeof(u32), &dma_addr);
484 if (!irq_q->q)
485 return -ENOMEM;
486
487 memset(irq_q->q, 0xffffffff, size * sizeof(u32));
488 irq_q->size = size;
489 irq_q->qdma = qdma;
490
491 dma_map_single(irq_q->q, size * sizeof(u32), DMA_TO_DEVICE);
492
493 airoha_qdma_wr(qdma, REG_TX_IRQ_BASE(id), dma_addr);
494 airoha_qdma_rmw(qdma, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
495 FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
496
497 return 0;
498}
499
500static int airoha_qdma_init_tx(struct airoha_qdma *qdma)
501{
502 int i, err;
503
504 for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
505 err = airoha_qdma_tx_irq_init(&qdma->q_tx_irq[i], qdma,
506 IRQ_QUEUE_LEN);
507 if (err)
508 return err;
509 }
510
511 for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
512 err = airoha_qdma_init_tx_queue(&qdma->q_tx[i], qdma,
513 TX_DSCP_NUM);
514 if (err)
515 return err;
516 }
517
518 return 0;
519}
520
521static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
522{
523 unsigned long dma_addr;
524 u32 status;
525 int size;
526
527 size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
528 qdma->hfwd.desc = dma_alloc_coherent(size, &dma_addr);
529 if (!qdma->hfwd.desc)
530 return -ENOMEM;
531
532 memset(qdma->hfwd.desc, 0, size);
533 dma_map_single(qdma->hfwd.desc, size, DMA_TO_DEVICE);
534
535 airoha_qdma_wr(qdma, REG_FWD_DSCP_BASE, dma_addr);
536
537 size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
538 qdma->hfwd.q = dma_alloc_coherent(size, &dma_addr);
539 if (!qdma->hfwd.q)
540 return -ENOMEM;
541
542 memset(qdma->hfwd.q, 0, size);
543 dma_map_single(qdma->hfwd.q, size, DMA_TO_DEVICE);
544
545 airoha_qdma_wr(qdma, REG_FWD_BUF_BASE, dma_addr);
546
547 airoha_qdma_rmw(qdma, REG_HW_FWD_DSCP_CFG,
548 HW_FWD_DSCP_PAYLOAD_SIZE_MASK |
549 HW_FWD_DSCP_MIN_SCATTER_LEN_MASK,
550 FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0) |
551 FIELD_PREP(HW_FWD_DSCP_MIN_SCATTER_LEN_MASK, 1));
552 airoha_qdma_rmw(qdma, REG_LMGR_INIT_CFG,
553 LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
554 HW_FWD_DESC_NUM_MASK,
555 FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
556 LMGR_INIT_START);
557
558 udelay(1000);
559 return read_poll_timeout(airoha_qdma_rr, status,
560 !(status & LMGR_INIT_START), USEC_PER_MSEC,
561 30 * USEC_PER_MSEC, qdma,
562 REG_LMGR_INIT_CFG);
563}
564
565static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
566{
567 int i;
568
569 /* clear pending irqs */
570 for (i = 0; i < 2; i++)
571 airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
572
573 airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
574 GLOBAL_CFG_CPU_TXR_RR_MASK |
575 GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
576 GLOBAL_CFG_IRQ0_EN_MASK |
577 GLOBAL_CFG_TX_WB_DONE_MASK |
578 FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 3));
579
580 /* disable qdma rx delay interrupt */
581 for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
582 if (!qdma->q_rx[i].ndesc)
583 continue;
584
585 airoha_qdma_clear(qdma, REG_RX_DELAY_INT_IDX(i),
586 RX_DELAY_INT_MASK);
587 }
588
589 return 0;
590}
591
592static int airoha_qdma_init(struct udevice *dev,
593 struct airoha_eth *eth,
594 struct airoha_qdma *qdma)
595{
596 int err;
597
598 qdma->eth = eth;
599 qdma->regs = dev_remap_addr_name(dev, "qdma0");
600 if (IS_ERR(qdma->regs))
601 return PTR_ERR(qdma->regs);
602
603 err = airoha_qdma_init_rx(qdma);
604 if (err)
605 return err;
606
607 err = airoha_qdma_init_tx(qdma);
608 if (err)
609 return err;
610
611 err = airoha_qdma_init_hfwd_queues(qdma);
612 if (err)
613 return err;
614
615 return airoha_qdma_hw_init(qdma);
616}
617
618static int airoha_hw_init(struct udevice *dev,
619 struct airoha_eth *eth)
620{
621 int ret, i;
622
623 /* disable xsi */
624 ret = reset_assert_bulk(&eth->xsi_rsts);
625 if (ret)
626 return ret;
627
628 ret = reset_assert_bulk(&eth->rsts);
629 if (ret)
630 return ret;
631
632 mdelay(20);
633
634 ret = reset_deassert_bulk(&eth->rsts);
635 if (ret)
636 return ret;
637
638 mdelay(20);
639
640 ret = airoha_fe_init(eth);
641 if (ret)
642 return ret;
643
644 for (i = 0; i < ARRAY_SIZE(eth->qdma); i++) {
645 ret = airoha_qdma_init(dev, eth, &eth->qdma[i]);
646 if (ret)
647 return ret;
648 }
649
650 return 0;
651}
652
653static int airoha_switch_init(struct udevice *dev, struct airoha_eth *eth)
654{
655 ofnode switch_node;
656 fdt_addr_t addr;
657
658 switch_node = ofnode_by_compatible(ofnode_null(), "airoha,en7581-switch");
659 if (!ofnode_valid(switch_node))
660 return -EINVAL;
661
662 addr = ofnode_get_addr(switch_node);
663 if (addr == FDT_ADDR_T_NONE)
664 return -ENOMEM;
665
666 /* Switch doesn't have a DEV, gets address and setup Flood and CPU port */
667 eth->switch_regs = map_sysmem(addr, 0);
668
669 /* Set FLOOD, no CPU switch register */
670 airoha_switch_wr(eth, SWITCH_MFC, SWITCH_BC_FFP | SWITCH_UNM_FFP |
671 SWITCH_UNU_FFP);
672
673 /* Set CPU 6 PMCR */
674 airoha_switch_wr(eth, SWITCH_PMCR(6),
675 SWITCH_IPG_CFG_SHORT | SWITCH_MAC_MODE |
676 SWITCH_FORCE_MODE | SWITCH_MAC_TX_EN |
677 SWITCH_MAC_RX_EN | SWITCH_BKOFF_EN | SWITCH_BKPR_EN |
678 SWITCH_FORCE_RX_FC | SWITCH_FORCE_TX_FC |
679 SWITCH_FORCE_SPD_1000 | SWITCH_FORCE_DPX |
680 SWITCH_FORCE_LNK);
681
682 /* Sideband signal error for Port 3, which need the auto polling */
683 airoha_switch_wr(eth, SWITCH_PHY_POLL,
684 FIELD_PREP(SWITCH_PHY_AP_EN, 0x7f) |
685 FIELD_PREP(SWITCH_EEE_POLL_EN, 0x7f) |
686 SWITCH_PHY_PRE_EN |
687 FIELD_PREP(SWITCH_PHY_END_ADDR, 0xc) |
688 FIELD_PREP(SWITCH_PHY_ST_ADDR, 0x8));
689
690 return 0;
691}
692
693static int airoha_eth_probe(struct udevice *dev)
694{
695 struct airoha_eth *eth = dev_get_priv(dev);
696 struct regmap *scu_regmap;
697 ofnode scu_node;
698 int ret;
699
700 scu_node = ofnode_by_compatible(ofnode_null(), "airoha,en7581-scu");
701 if (!ofnode_valid(scu_node))
702 return -EINVAL;
703
704 scu_regmap = syscon_node_to_regmap(scu_node);
705 if (IS_ERR(scu_regmap))
706 return PTR_ERR(scu_regmap);
707
708 /* It seems by default the FEMEM_SEL is set to Memory (0x1)
709 * preventing any access to any QDMA and FrameEngine register
710 * reporting all 0xdeadbeef (poor cow :( )
711 */
712 regmap_write(scu_regmap, SCU_SHARE_FEMEM_SEL, 0x0);
713
714 eth->fe_regs = dev_remap_addr_name(dev, "fe");
715 if (!eth->fe_regs)
716 return -ENOMEM;
717
718 eth->rsts.resets = devm_kcalloc(dev, AIROHA_MAX_NUM_RSTS,
719 sizeof(struct reset_ctl), GFP_KERNEL);
720 if (!eth->rsts.resets)
721 return -ENOMEM;
722 eth->rsts.count = AIROHA_MAX_NUM_RSTS;
723
724 eth->xsi_rsts.resets = devm_kcalloc(dev, AIROHA_MAX_NUM_XSI_RSTS,
725 sizeof(struct reset_ctl), GFP_KERNEL);
726 if (!eth->xsi_rsts.resets)
727 return -ENOMEM;
728 eth->xsi_rsts.count = AIROHA_MAX_NUM_XSI_RSTS;
729
730 ret = reset_get_by_name(dev, "fe", &eth->rsts.resets[0]);
731 if (ret)
732 return ret;
733
734 ret = reset_get_by_name(dev, "pdma", &eth->rsts.resets[1]);
735 if (ret)
736 return ret;
737
738 ret = reset_get_by_name(dev, "qdma", &eth->rsts.resets[2]);
739 if (ret)
740 return ret;
741
742 ret = reset_get_by_name(dev, "hsi0-mac", &eth->xsi_rsts.resets[0]);
743 if (ret)
744 return ret;
745
746 ret = reset_get_by_name(dev, "hsi1-mac", &eth->xsi_rsts.resets[1]);
747 if (ret)
748 return ret;
749
750 ret = reset_get_by_name(dev, "hsi-mac", &eth->xsi_rsts.resets[2]);
751 if (ret)
752 return ret;
753
754 ret = reset_get_by_name(dev, "xfp-mac", &eth->xsi_rsts.resets[3]);
755 if (ret)
756 return ret;
757
758 ret = airoha_hw_init(dev, eth);
759 if (ret)
760 return ret;
761
762 return airoha_switch_init(dev, eth);
763}
764
765static int airoha_eth_init(struct udevice *dev)
766{
767 struct airoha_eth *eth = dev_get_priv(dev);
768 struct airoha_qdma *qdma = &eth->qdma[0];
769 struct airoha_queue *q;
770 int qid;
771
772 qid = 0;
773 q = &qdma->q_rx[qid];
774
775 airoha_qdma_init_rx_desc(q);
776
777 airoha_qdma_set(qdma, REG_QDMA_GLOBAL_CFG,
778 GLOBAL_CFG_TX_DMA_EN_MASK |
779 GLOBAL_CFG_RX_DMA_EN_MASK);
780
781 return 0;
782}
783
784static void airoha_eth_stop(struct udevice *dev)
785{
786 struct airoha_eth *eth = dev_get_priv(dev);
787 struct airoha_qdma *qdma = &eth->qdma[0];
788
789 airoha_qdma_clear(qdma, REG_QDMA_GLOBAL_CFG,
790 GLOBAL_CFG_TX_DMA_EN_MASK |
791 GLOBAL_CFG_RX_DMA_EN_MASK);
792}
793
794static int airoha_eth_send(struct udevice *dev, void *packet, int length)
795{
796 struct airoha_eth *eth = dev_get_priv(dev);
797 struct airoha_qdma *qdma = &eth->qdma[0];
798 struct airoha_qdma_desc *desc;
799 struct airoha_queue *q;
800 dma_addr_t dma_addr;
801 u32 msg0, msg1;
802 int qid, index;
803 u8 fport;
804 u32 val;
805 int i;
806
807 dma_addr = dma_map_single(packet, length, DMA_TO_DEVICE);
808
809 qid = 0;
810 q = &qdma->q_tx[qid];
811 desc = &q->desc[q->head];
812 index = (q->head + 1) % q->ndesc;
813
814 fport = 1;
815
816 msg0 = 0;
817 msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
818 FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
819
820 val = FIELD_PREP(QDMA_DESC_LEN_MASK, length);
821 WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
822 WRITE_ONCE(desc->addr, cpu_to_le32(dma_addr));
823 val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
824 WRITE_ONCE(desc->data, cpu_to_le32(val));
825 WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
826 WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
827 WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
828
829 dma_map_single(desc, sizeof(*desc), DMA_TO_DEVICE);
830
831 airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
832 FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
833
834 for (i = 0; i < 100; i++) {
835 dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
836 DMA_FROM_DEVICE);
837 if (desc->ctrl & QDMA_DESC_DONE_MASK)
838 break;
839
840 udelay(1);
841 }
842
843 /* Return error if for some reason the descriptor never ACK */
844 if (!(desc->ctrl & QDMA_DESC_DONE_MASK))
845 return -EAGAIN;
846
847 q->head = index;
848 airoha_qdma_rmw(qdma, REG_IRQ_CLEAR_LEN(0),
849 IRQ_CLEAR_LEN_MASK, 1);
850
851 return 0;
852}
853
854static int airoha_eth_recv(struct udevice *dev, int flags, uchar **packetp)
855{
856 struct airoha_eth *eth = dev_get_priv(dev);
857 struct airoha_qdma *qdma = &eth->qdma[0];
858 struct airoha_qdma_desc *desc;
859 struct airoha_queue *q;
860 u16 length;
861 int qid;
862
863 qid = 0;
864 q = &qdma->q_rx[qid];
865 desc = &q->desc[q->head];
866
867 dma_unmap_single(virt_to_phys(desc), sizeof(*desc),
868 DMA_FROM_DEVICE);
869
870 if (!(desc->ctrl & QDMA_DESC_DONE_MASK))
871 return -EAGAIN;
872
873 length = FIELD_GET(QDMA_DESC_LEN_MASK, desc->ctrl);
874 dma_unmap_single(desc->addr, length,
875 DMA_FROM_DEVICE);
876
877 *packetp = phys_to_virt(desc->addr);
878
879 return length;
880}
881
882static int arht_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
883{
884 struct airoha_eth *eth = dev_get_priv(dev);
885 struct airoha_qdma *qdma = &eth->qdma[0];
886 struct airoha_queue *q;
887 int qid;
888
889 if (!packet)
890 return 0;
891
892 qid = 0;
893 q = &qdma->q_rx[qid];
894
895 dma_map_single(packet, length, DMA_TO_DEVICE);
896
897 airoha_qdma_reset_rx_desc(q, q->head, packet);
898
899 airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
900 FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
901 q->head = (q->head + 1) % q->ndesc;
902
903 return 0;
904}
905
906static int arht_eth_write_hwaddr(struct udevice *dev)
907{
908 struct eth_pdata *pdata = dev_get_plat(dev);
909 struct airoha_eth *eth = dev_get_priv(dev);
910 unsigned char *mac = pdata->enetaddr;
911 u32 macaddr_lsb, macaddr_msb;
912
913 macaddr_lsb = FIELD_PREP(SMACCR0_MAC2, mac[2]) |
914 FIELD_PREP(SMACCR0_MAC3, mac[3]) |
915 FIELD_PREP(SMACCR0_MAC4, mac[4]) |
916 FIELD_PREP(SMACCR0_MAC5, mac[5]);
917 macaddr_msb = FIELD_PREP(SMACCR1_MAC1, mac[1]) |
918 FIELD_PREP(SMACCR1_MAC0, mac[0]);
919
920 /* Set MAC for Switch */
921 airoha_switch_wr(eth, SWITCH_SMACCR0, macaddr_lsb);
922 airoha_switch_wr(eth, SWITCH_SMACCR1, macaddr_msb);
923
924 return 0;
925}
926
927static const struct udevice_id airoha_eth_ids[] = {
928 { .compatible = "airoha,en7581-eth" },
929};
930
931static const struct eth_ops airoha_eth_ops = {
932 .start = airoha_eth_init,
933 .stop = airoha_eth_stop,
934 .send = airoha_eth_send,
935 .recv = airoha_eth_recv,
936 .free_pkt = arht_eth_free_pkt,
937 .write_hwaddr = arht_eth_write_hwaddr,
938};
939
940U_BOOT_DRIVER(airoha_eth) = {
941 .name = "airoha-eth",
942 .id = UCLASS_ETH,
943 .of_match = airoha_eth_ids,
944 .probe = airoha_eth_probe,
945 .ops = &airoha_eth_ops,
946 .priv_auto = sizeof(struct airoha_eth),
947 .plat_auto = sizeof(struct eth_pdata),
948};