blob: 5aac9e6004450129a1d4d83d44656d5eefc7e791 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut17714cb2017-05-13 15:54:28 +02002/*
3 * drivers/net/ravb.c
4 * This file is driver for Renesas Ethernet AVB.
5 *
6 * Copyright (C) 2015-2017 Renesas Electronics Corporation
7 *
8 * Based on the SuperH Ethernet driver.
Marek Vasut17714cb2017-05-13 15:54:28 +02009 */
10
Marek Vasutc9746c62017-07-21 23:20:35 +020011#include <clk.h>
Simon Glass63334482019-11-14 12:57:39 -070012#include <cpu_func.h>
Marek Vasut17714cb2017-05-13 15:54:28 +020013#include <dm.h>
14#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Marek Vasut17714cb2017-05-13 15:54:28 +020016#include <miiphy.h>
17#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060018#include <asm/cache.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060019#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060020#include <linux/delay.h>
Marek Vasut17714cb2017-05-13 15:54:28 +020021#include <linux/mii.h>
22#include <wait_bit.h>
23#include <asm/io.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060024#include <asm/global_data.h>
Marek Vasut58f49d62017-09-15 21:11:15 +020025#include <asm/gpio.h>
Marek Vasut17714cb2017-05-13 15:54:28 +020026
27/* Registers */
28#define RAVB_REG_CCC 0x000
29#define RAVB_REG_DBAT 0x004
30#define RAVB_REG_CSR 0x00C
31#define RAVB_REG_APSR 0x08C
32#define RAVB_REG_RCR 0x090
33#define RAVB_REG_TGC 0x300
34#define RAVB_REG_TCCR 0x304
35#define RAVB_REG_RIC0 0x360
36#define RAVB_REG_RIC1 0x368
37#define RAVB_REG_RIC2 0x370
38#define RAVB_REG_TIC 0x378
39#define RAVB_REG_ECMR 0x500
40#define RAVB_REG_RFLR 0x508
41#define RAVB_REG_ECSIPR 0x518
42#define RAVB_REG_PIR 0x520
43#define RAVB_REG_GECMR 0x5b0
44#define RAVB_REG_MAHR 0x5c0
45#define RAVB_REG_MALR 0x5c8
46
47#define CCC_OPC_CONFIG BIT(0)
48#define CCC_OPC_OPERATION BIT(1)
49#define CCC_BOC BIT(20)
50
51#define CSR_OPS 0x0000000F
52#define CSR_OPS_CONFIG BIT(1)
53
Adam Ford25418372022-02-25 14:32:52 -060054#define APSR_RDM BIT(13)
Marek Vasut41855122019-04-13 11:42:34 +020055#define APSR_TDM BIT(14)
56
Marek Vasut17714cb2017-05-13 15:54:28 +020057#define TCCR_TSRQ0 BIT(0)
58
59#define RFLR_RFL_MIN 0x05EE
60
61#define PIR_MDI BIT(3)
62#define PIR_MDO BIT(2)
63#define PIR_MMD BIT(1)
64#define PIR_MDC BIT(0)
65
66#define ECMR_TRCCM BIT(26)
67#define ECMR_RZPF BIT(20)
68#define ECMR_PFR BIT(18)
69#define ECMR_RXF BIT(17)
70#define ECMR_RE BIT(6)
71#define ECMR_TE BIT(5)
72#define ECMR_DM BIT(1)
73#define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
74
75/* DMA Descriptors */
76#define RAVB_NUM_BASE_DESC 16
77#define RAVB_NUM_TX_DESC 8
78#define RAVB_NUM_RX_DESC 8
79
80#define RAVB_TX_QUEUE_OFFSET 0
81#define RAVB_RX_QUEUE_OFFSET 4
82
83#define RAVB_DESC_DT(n) ((n) << 28)
84#define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7)
85#define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9)
86#define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa)
87#define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc)
88#define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3)
89#define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf)
90
91#define RAVB_DESC_DS(n) (((n) & 0xfff) << 0)
92#define RAVB_DESC_DS_MASK 0xfff
93
94#define RAVB_RX_DESC_MSC_MC BIT(23)
95#define RAVB_RX_DESC_MSC_CEEF BIT(22)
96#define RAVB_RX_DESC_MSC_CRL BIT(21)
97#define RAVB_RX_DESC_MSC_FRE BIT(20)
98#define RAVB_RX_DESC_MSC_RTLF BIT(19)
99#define RAVB_RX_DESC_MSC_RTSF BIT(18)
100#define RAVB_RX_DESC_MSC_RFE BIT(17)
101#define RAVB_RX_DESC_MSC_CRC BIT(16)
102#define RAVB_RX_DESC_MSC_MASK (0xff << 16)
103
104#define RAVB_RX_DESC_MSC_RX_ERR_MASK \
105 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
106 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
107
108#define RAVB_TX_TIMEOUT_MS 1000
109
Paul Barker110218d2025-03-19 12:03:57 +0000110struct ravb_device_ops {
111 void (*mac_init)(struct udevice *dev);
112 void (*dmac_init)(struct udevice *dev);
113 void (*config)(struct udevice *dev);
114};
115
Marek Vasut17714cb2017-05-13 15:54:28 +0200116struct ravb_desc {
117 u32 ctrl;
118 u32 dptr;
119};
120
121struct ravb_rxdesc {
122 struct ravb_desc data;
123 struct ravb_desc link;
124 u8 __pad[48];
125 u8 packet[PKTSIZE_ALIGN];
126};
127
128struct ravb_priv {
129 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC];
130 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC];
131 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC];
132 u32 rx_desc_idx;
133 u32 tx_desc_idx;
134
135 struct phy_device *phydev;
136 struct mii_dev *bus;
137 void __iomem *iobase;
Adam Forda4ba7ff2021-12-06 10:29:26 -0600138 struct clk_bulk clks;
Marek Vasut17714cb2017-05-13 15:54:28 +0200139};
140
141static inline void ravb_flush_dcache(u32 addr, u32 len)
142{
143 flush_dcache_range(addr, addr + len);
144}
145
146static inline void ravb_invalidate_dcache(u32 addr, u32 len)
147{
148 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
149 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
150 invalidate_dcache_range(start, end);
151}
152
153static int ravb_send(struct udevice *dev, void *packet, int len)
154{
155 struct ravb_priv *eth = dev_get_priv(dev);
156 struct ravb_desc *desc = &eth->tx_desc[eth->tx_desc_idx];
157 unsigned int start;
158
159 /* Update TX descriptor */
160 ravb_flush_dcache((uintptr_t)packet, len);
161 memset(desc, 0x0, sizeof(*desc));
162 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
163 desc->dptr = (uintptr_t)packet;
164 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
165
166 /* Restart the transmitter if disabled */
167 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
168 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
169
170 /* Wait until packet is transmitted */
171 start = get_timer(0);
172 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
173 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
174 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
175 break;
176 udelay(10);
177 };
178
179 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
180 return -ETIMEDOUT;
181
182 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
183 return 0;
184}
185
186static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
187{
188 struct ravb_priv *eth = dev_get_priv(dev);
189 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
Marek Vasut45550c82025-04-20 18:35:33 +0200190 int len = 0;
Marek Vasut17714cb2017-05-13 15:54:28 +0200191 u8 *packet;
192
193 /* Check if the rx descriptor is ready */
194 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
195 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
196 return -EAGAIN;
197
198 /* Check for errors */
Marek Vasut45550c82025-04-20 18:35:33 +0200199 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK)
Marek Vasut17714cb2017-05-13 15:54:28 +0200200 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
Marek Vasut45550c82025-04-20 18:35:33 +0200201 else
202 len = desc->data.ctrl & RAVB_DESC_DS_MASK;
Marek Vasut17714cb2017-05-13 15:54:28 +0200203
Marek Vasut17714cb2017-05-13 15:54:28 +0200204 packet = (u8 *)(uintptr_t)desc->data.dptr;
205 ravb_invalidate_dcache((uintptr_t)packet, len);
206
207 *packetp = packet;
208 return len;
209}
210
211static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
212{
213 struct ravb_priv *eth = dev_get_priv(dev);
214 struct ravb_rxdesc *desc = &eth->rx_desc[eth->rx_desc_idx];
215
216 /* Make current descriptor available again */
217 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
218 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
219
220 /* Point to the next descriptor */
221 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
222 desc = &eth->rx_desc[eth->rx_desc_idx];
223 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
224
225 return 0;
226}
227
228static int ravb_reset(struct udevice *dev)
229{
230 struct ravb_priv *eth = dev_get_priv(dev);
231
232 /* Set config mode */
233 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
234
235 /* Check the operating mode is changed to the config mode. */
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100236 return wait_for_bit_le32(eth->iobase + RAVB_REG_CSR,
237 CSR_OPS_CONFIG, true, 100, true);
Marek Vasut17714cb2017-05-13 15:54:28 +0200238}
239
240static void ravb_base_desc_init(struct ravb_priv *eth)
241{
242 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
243 int i;
244
245 /* Initialize all descriptors */
246 memset(eth->base_desc, 0x0, desc_size);
247
248 for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
249 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
250
251 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
252
253 /* Register the descriptor base address table */
254 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
255}
256
257static void ravb_tx_desc_init(struct ravb_priv *eth)
258{
259 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
260 int i;
261
262 /* Initialize all descriptors */
263 memset(eth->tx_desc, 0x0, desc_size);
264 eth->tx_desc_idx = 0;
265
266 for (i = 0; i < RAVB_NUM_TX_DESC; i++)
267 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
268
269 /* Mark the end of the descriptors */
270 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
271 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
272 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
273
274 /* Point the controller to the TX descriptor list. */
275 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
276 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
277 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_TX_QUEUE_OFFSET],
278 sizeof(struct ravb_desc));
279}
280
281static void ravb_rx_desc_init(struct ravb_priv *eth)
282{
283 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
284 int i;
285
286 /* Initialize all descriptors */
287 memset(eth->rx_desc, 0x0, desc_size);
288 eth->rx_desc_idx = 0;
289
290 for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
291 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
292 RAVB_DESC_DS(PKTSIZE_ALIGN);
293 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
294
295 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
296 eth->rx_desc[i].link.dptr = (uintptr_t)&eth->rx_desc[i + 1];
297 }
298
299 /* Mark the end of the descriptors */
300 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
301 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
302 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
303
304 /* Point the controller to the rx descriptor list */
305 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
306 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
307 ravb_flush_dcache((uintptr_t)&eth->base_desc[RAVB_RX_QUEUE_OFFSET],
308 sizeof(struct ravb_desc));
309}
310
311static int ravb_phy_config(struct udevice *dev)
312{
313 struct ravb_priv *eth = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700314 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut17714cb2017-05-13 15:54:28 +0200315 struct phy_device *phydev;
Mikhail Lappo8d819f62023-02-28 00:04:11 +0100316 int reg;
Marek Vasut17714cb2017-05-13 15:54:28 +0200317
Mikhail Lappo8d819f62023-02-28 00:04:11 +0100318 phydev = phy_connect(eth->bus, -1, dev, pdata->phy_interface);
Marek Vasut17714cb2017-05-13 15:54:28 +0200319 if (!phydev)
320 return -ENODEV;
321
322 eth->phydev = phydev;
323
Marek Vasut882294d2018-06-18 05:44:53 +0200324 phydev->supported &= SUPPORTED_100baseT_Full |
325 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
326 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_Pause |
327 SUPPORTED_Asym_Pause;
328
Marek Vasut17714cb2017-05-13 15:54:28 +0200329 if (pdata->max_speed != 1000) {
Marek Vasut882294d2018-06-18 05:44:53 +0200330 phydev->supported &= ~SUPPORTED_1000baseT_Full;
Marek Vasut17714cb2017-05-13 15:54:28 +0200331 reg = phy_read(phydev, -1, MII_CTRL1000);
332 reg &= ~(BIT(9) | BIT(8));
333 phy_write(phydev, -1, MII_CTRL1000, reg);
334 }
335
336 phy_config(phydev);
337
338 return 0;
339}
340
341/* Set Mac address */
342static int ravb_write_hwaddr(struct udevice *dev)
343{
344 struct ravb_priv *eth = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700345 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut17714cb2017-05-13 15:54:28 +0200346 unsigned char *mac = pdata->enetaddr;
347
348 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
349 eth->iobase + RAVB_REG_MAHR);
350
351 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
352
353 return 0;
354}
355
356/* E-MAC init function */
Paul Barker110218d2025-03-19 12:03:57 +0000357static void ravb_mac_init(struct udevice *dev)
Marek Vasut17714cb2017-05-13 15:54:28 +0200358{
Paul Barker110218d2025-03-19 12:03:57 +0000359 struct ravb_device_ops *device_ops =
360 (struct ravb_device_ops *)dev_get_driver_data(dev);
361 struct ravb_priv *eth = dev_get_priv(dev);
362
363 device_ops->mac_init(dev);
Marek Vasut17714cb2017-05-13 15:54:28 +0200364
Paul Barkerd8134202025-03-04 20:07:08 +0000365 /*
366 * Set receive frame length
367 *
368 * The length set here describes the frame from the destination address
369 * up to and including the CRC data. However only the frame data,
370 * excluding the CRC, are transferred to memory. To allow for the
371 * largest frames add the CRC length to the maximum Rx descriptor size.
372 */
373 writel(RFLR_RFL_MIN + ETH_FCS_LEN, eth->iobase + RAVB_REG_RFLR);
Paul Barker110218d2025-03-19 12:03:57 +0000374}
Marek Vasut17714cb2017-05-13 15:54:28 +0200375
Paul Barker110218d2025-03-19 12:03:57 +0000376static void ravb_mac_init_rcar(struct udevice *dev)
377{
378 struct ravb_priv *eth = dev_get_priv(dev);
379
380 /* Disable MAC Interrupt */
381 writel(0, eth->iobase + RAVB_REG_ECSIPR);
Marek Vasut17714cb2017-05-13 15:54:28 +0200382}
383
384/* AVB-DMAC init function */
385static int ravb_dmac_init(struct udevice *dev)
386{
Paul Barker110218d2025-03-19 12:03:57 +0000387 struct ravb_device_ops *device_ops =
388 (struct ravb_device_ops *)dev_get_driver_data(dev);
Marek Vasut17714cb2017-05-13 15:54:28 +0200389 struct ravb_priv *eth = dev_get_priv(dev);
Paul Barker110218d2025-03-19 12:03:57 +0000390 int ret;
Marek Vasut17714cb2017-05-13 15:54:28 +0200391
392 /* Set CONFIG mode */
393 ret = ravb_reset(dev);
394 if (ret)
395 return ret;
396
397 /* Disable all interrupts */
398 writel(0, eth->iobase + RAVB_REG_RIC0);
399 writel(0, eth->iobase + RAVB_REG_RIC1);
400 writel(0, eth->iobase + RAVB_REG_RIC2);
401 writel(0, eth->iobase + RAVB_REG_TIC);
402
403 /* Set little endian */
404 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
405
Paul Barker110218d2025-03-19 12:03:57 +0000406 device_ops->dmac_init(dev);
407 return 0;
408}
409
410static void ravb_dmac_init_rcar(struct udevice *dev)
411{
412 struct ravb_priv *eth = dev_get_priv(dev);
413 struct eth_pdata *pdata = dev_get_plat(dev);
414 int mode = 0;
415 unsigned int delay;
416 bool explicit_delay = false;
417
Marek Vasut17714cb2017-05-13 15:54:28 +0200418 /* AVB rx set */
419 writel(0x18000001, eth->iobase + RAVB_REG_RCR);
420
421 /* FIFO size set */
422 writel(0x00222210, eth->iobase + RAVB_REG_TGC);
423
Marek Vasut41855122019-04-13 11:42:34 +0200424 /* Delay CLK: 2ns (not applicable on R-Car E3/D3) */
Marek Vasutf9726612024-02-27 17:05:47 +0100425 if ((renesas_get_cpu_type() == RENESAS_CPU_TYPE_R8A77990) ||
426 (renesas_get_cpu_type() == RENESAS_CPU_TYPE_R8A77995))
Paul Barker110218d2025-03-19 12:03:57 +0000427 return;
Marek Vasut41855122019-04-13 11:42:34 +0200428
Adam Ford25418372022-02-25 14:32:52 -0600429 if (!dev_read_u32(dev, "rx-internal-delay-ps", &delay)) {
430 /* Valid values are 0 and 1800, according to DT bindings */
431 if (delay) {
432 mode |= APSR_RDM;
433 explicit_delay = true;
434 }
435 }
436
437 if (!dev_read_u32(dev, "tx-internal-delay-ps", &delay)) {
438 /* Valid values are 0 and 2000, according to DT bindings */
439 if (delay) {
440 mode |= APSR_TDM;
441 explicit_delay = true;
442 }
443 }
444
445 if (!explicit_delay) {
446 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
447 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
448 mode |= APSR_RDM;
449
450 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
451 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
452 mode |= APSR_TDM;
453 }
454
455 writel(mode, eth->iobase + RAVB_REG_APSR);
Marek Vasut17714cb2017-05-13 15:54:28 +0200456}
457
458static int ravb_config(struct udevice *dev)
459{
Paul Barker110218d2025-03-19 12:03:57 +0000460 struct ravb_device_ops *device_ops =
461 (struct ravb_device_ops *)dev_get_driver_data(dev);
Marek Vasut17714cb2017-05-13 15:54:28 +0200462 struct ravb_priv *eth = dev_get_priv(dev);
Marek Vasut3364d7a2018-02-13 17:21:15 +0100463 struct phy_device *phy = eth->phydev;
Marek Vasut17714cb2017-05-13 15:54:28 +0200464 int ret;
465
466 /* Configure AVB-DMAC register */
467 ravb_dmac_init(dev);
468
469 /* Configure E-MAC registers */
Paul Barker110218d2025-03-19 12:03:57 +0000470 ravb_mac_init(dev);
Marek Vasut17714cb2017-05-13 15:54:28 +0200471 ravb_write_hwaddr(dev);
472
Marek Vasut17714cb2017-05-13 15:54:28 +0200473 ret = phy_startup(phy);
474 if (ret)
475 return ret;
476
Paul Barker110218d2025-03-19 12:03:57 +0000477 device_ops->config(dev);
478 return 0;
479}
480
481static void ravb_config_rcar(struct udevice *dev)
482{
483 struct ravb_priv *eth = dev_get_priv(dev);
484 struct phy_device *phy = eth->phydev;
485 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
486
Marek Vasut17714cb2017-05-13 15:54:28 +0200487 /* Set the transfer speed */
488 if (phy->speed == 100)
489 writel(0, eth->iobase + RAVB_REG_GECMR);
490 else if (phy->speed == 1000)
491 writel(1, eth->iobase + RAVB_REG_GECMR);
492
493 /* Check if full duplex mode is supported by the phy */
494 if (phy->duplex)
495 mask |= ECMR_DM;
496
497 writel(mask, eth->iobase + RAVB_REG_ECMR);
Marek Vasut17714cb2017-05-13 15:54:28 +0200498}
499
Marek Vasut7457ce92018-01-19 23:58:32 +0100500static int ravb_start(struct udevice *dev)
Marek Vasut17714cb2017-05-13 15:54:28 +0200501{
502 struct ravb_priv *eth = dev_get_priv(dev);
503 int ret;
504
Marek Vasutc9746c62017-07-21 23:20:35 +0200505 ret = ravb_reset(dev);
506 if (ret)
Marek Vasut597e0072018-06-18 09:35:45 +0200507 return ret;
Marek Vasutc9746c62017-07-21 23:20:35 +0200508
Marek Vasut17714cb2017-05-13 15:54:28 +0200509 ravb_base_desc_init(eth);
510 ravb_tx_desc_init(eth);
511 ravb_rx_desc_init(eth);
512
513 ret = ravb_config(dev);
514 if (ret)
Marek Vasut597e0072018-06-18 09:35:45 +0200515 return ret;
Marek Vasut17714cb2017-05-13 15:54:28 +0200516
517 /* Setting the control will start the AVB-DMAC process. */
518 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
519
520 return 0;
521}
522
523static void ravb_stop(struct udevice *dev)
524{
Marek Vasutc9746c62017-07-21 23:20:35 +0200525 struct ravb_priv *eth = dev_get_priv(dev);
526
Marek Vasut3364d7a2018-02-13 17:21:15 +0100527 phy_shutdown(eth->phydev);
Marek Vasut17714cb2017-05-13 15:54:28 +0200528 ravb_reset(dev);
529}
530
Marek Vasut980a3c52025-02-22 21:33:14 +0100531/* Bitbang MDIO access */
Marek Vasut183c10a2025-03-02 02:24:45 +0100532static int ravb_bb_mdio_active(struct mii_dev *miidev)
Marek Vasut980a3c52025-02-22 21:33:14 +0100533{
Marek Vasut183c10a2025-03-02 02:24:45 +0100534 struct ravb_priv *eth = miidev->priv;
Marek Vasut980a3c52025-02-22 21:33:14 +0100535
536 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
537
538 return 0;
539}
540
Marek Vasut183c10a2025-03-02 02:24:45 +0100541static int ravb_bb_mdio_tristate(struct mii_dev *miidev)
Marek Vasut980a3c52025-02-22 21:33:14 +0100542{
Marek Vasut183c10a2025-03-02 02:24:45 +0100543 struct ravb_priv *eth = miidev->priv;
Marek Vasut980a3c52025-02-22 21:33:14 +0100544
545 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
546
547 return 0;
548}
549
Marek Vasut183c10a2025-03-02 02:24:45 +0100550static int ravb_bb_set_mdio(struct mii_dev *miidev, int v)
Marek Vasut980a3c52025-02-22 21:33:14 +0100551{
Marek Vasut183c10a2025-03-02 02:24:45 +0100552 struct ravb_priv *eth = miidev->priv;
Marek Vasut980a3c52025-02-22 21:33:14 +0100553
554 if (v)
555 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
556 else
557 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
558
559 return 0;
560}
561
Marek Vasut183c10a2025-03-02 02:24:45 +0100562static int ravb_bb_get_mdio(struct mii_dev *miidev, int *v)
Marek Vasut980a3c52025-02-22 21:33:14 +0100563{
Marek Vasut183c10a2025-03-02 02:24:45 +0100564 struct ravb_priv *eth = miidev->priv;
Marek Vasut980a3c52025-02-22 21:33:14 +0100565
566 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
567
568 return 0;
569}
570
Marek Vasut183c10a2025-03-02 02:24:45 +0100571static int ravb_bb_set_mdc(struct mii_dev *miidev, int v)
Marek Vasut980a3c52025-02-22 21:33:14 +0100572{
Marek Vasut183c10a2025-03-02 02:24:45 +0100573 struct ravb_priv *eth = miidev->priv;
Marek Vasut980a3c52025-02-22 21:33:14 +0100574
575 if (v)
576 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
577 else
578 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
579
580 return 0;
581}
582
Marek Vasut183c10a2025-03-02 02:24:45 +0100583static int ravb_bb_delay(struct mii_dev *miidev)
Marek Vasut980a3c52025-02-22 21:33:14 +0100584{
585 udelay(10);
586
587 return 0;
588}
589
Marek Vasut3d5149c2025-03-02 02:24:42 +0100590static const struct bb_miiphy_bus_ops ravb_bb_miiphy_bus_ops = {
591 .mdio_active = ravb_bb_mdio_active,
592 .mdio_tristate = ravb_bb_mdio_tristate,
593 .set_mdio = ravb_bb_set_mdio,
594 .get_mdio = ravb_bb_get_mdio,
595 .set_mdc = ravb_bb_set_mdc,
596 .delay = ravb_bb_delay,
597};
598
Marek Vasut5814ed42025-03-02 02:24:43 +0100599static int ravb_bb_miiphy_read(struct mii_dev *miidev, int addr,
600 int devad, int reg)
601{
Marek Vasut65867d32025-03-02 02:24:44 +0100602 return bb_miiphy_read(miidev, &ravb_bb_miiphy_bus_ops,
603 addr, devad, reg);
Marek Vasut5814ed42025-03-02 02:24:43 +0100604}
605
606static int ravb_bb_miiphy_write(struct mii_dev *miidev, int addr,
607 int devad, int reg, u16 value)
608{
Marek Vasut65867d32025-03-02 02:24:44 +0100609 return bb_miiphy_write(miidev, &ravb_bb_miiphy_bus_ops,
610 addr, devad, reg, value);
Marek Vasut5814ed42025-03-02 02:24:43 +0100611}
612
Marek Vasut17714cb2017-05-13 15:54:28 +0200613static int ravb_probe(struct udevice *dev)
614{
Simon Glassfa20e932020-12-03 16:55:20 -0700615 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut17714cb2017-05-13 15:54:28 +0200616 struct ravb_priv *eth = dev_get_priv(dev);
617 struct mii_dev *mdiodev;
618 void __iomem *iobase;
619 int ret;
620
621 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
622 eth->iobase = iobase;
623
Adam Forda4ba7ff2021-12-06 10:29:26 -0600624 ret = clk_get_bulk(dev, &eth->clks);
Marek Vasutc9746c62017-07-21 23:20:35 +0200625 if (ret < 0)
Paul Barker433863a2025-03-04 20:07:09 +0000626 goto err_clk_get;
Marek Vasutc9746c62017-07-21 23:20:35 +0200627
Marek Vasut89b02fd2025-03-02 02:24:48 +0100628 mdiodev = mdio_alloc();
629 if (!mdiodev) {
Marek Vasut17714cb2017-05-13 15:54:28 +0200630 ret = -ENOMEM;
631 goto err_mdio_alloc;
632 }
633
Marek Vasut5814ed42025-03-02 02:24:43 +0100634 mdiodev->read = ravb_bb_miiphy_read;
635 mdiodev->write = ravb_bb_miiphy_write;
Marek Vasut183c10a2025-03-02 02:24:45 +0100636 mdiodev->priv = eth;
Marek Vasut17714cb2017-05-13 15:54:28 +0200637 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
638
639 ret = mdio_register(mdiodev);
640 if (ret < 0)
641 goto err_mdio_register;
642
Marek Vasut89b02fd2025-03-02 02:24:48 +0100643 eth->bus = mdiodev;
Marek Vasut17714cb2017-05-13 15:54:28 +0200644
Marek Vasut3364d7a2018-02-13 17:21:15 +0100645 /* Bring up PHY */
Adam Forda4ba7ff2021-12-06 10:29:26 -0600646 ret = clk_enable_bulk(&eth->clks);
Marek Vasut3364d7a2018-02-13 17:21:15 +0100647 if (ret)
Paul Barker433863a2025-03-04 20:07:09 +0000648 goto err_clk_enable;
Marek Vasut3364d7a2018-02-13 17:21:15 +0100649
650 ret = ravb_reset(dev);
651 if (ret)
Paul Barker433863a2025-03-04 20:07:09 +0000652 goto err_clk_enable;
Marek Vasut3364d7a2018-02-13 17:21:15 +0100653
654 ret = ravb_phy_config(dev);
655 if (ret)
Paul Barker433863a2025-03-04 20:07:09 +0000656 goto err_clk_enable;
Marek Vasut3364d7a2018-02-13 17:21:15 +0100657
Marek Vasut17714cb2017-05-13 15:54:28 +0200658 return 0;
659
Paul Barker433863a2025-03-04 20:07:09 +0000660err_clk_enable:
661 mdio_unregister(mdiodev);
Marek Vasut17714cb2017-05-13 15:54:28 +0200662err_mdio_register:
Marek Vasut89b02fd2025-03-02 02:24:48 +0100663 mdio_free(mdiodev);
Marek Vasut17714cb2017-05-13 15:54:28 +0200664err_mdio_alloc:
Paul Barker433863a2025-03-04 20:07:09 +0000665 clk_release_bulk(&eth->clks);
666err_clk_get:
Marek Vasut17714cb2017-05-13 15:54:28 +0200667 unmap_physmem(eth->iobase, MAP_NOCACHE);
668 return ret;
669}
670
671static int ravb_remove(struct udevice *dev)
672{
673 struct ravb_priv *eth = dev_get_priv(dev);
674
Adam Forda4ba7ff2021-12-06 10:29:26 -0600675 clk_release_bulk(&eth->clks);
Marek Vasut3364d7a2018-02-13 17:21:15 +0100676
Marek Vasut17714cb2017-05-13 15:54:28 +0200677 free(eth->phydev);
678 mdio_unregister(eth->bus);
679 mdio_free(eth->bus);
680 unmap_physmem(eth->iobase, MAP_NOCACHE);
681
682 return 0;
683}
684
Marek Vasut17714cb2017-05-13 15:54:28 +0200685static const struct eth_ops ravb_ops = {
686 .start = ravb_start,
687 .send = ravb_send,
688 .recv = ravb_recv,
689 .free_pkt = ravb_free_pkt,
690 .stop = ravb_stop,
691 .write_hwaddr = ravb_write_hwaddr,
692};
693
Simon Glassaad29ae2020-12-03 16:55:21 -0700694int ravb_of_to_plat(struct udevice *dev)
Marek Vasut934fd3b2017-07-21 23:20:33 +0200695{
Simon Glassfa20e932020-12-03 16:55:20 -0700696 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut934fd3b2017-07-21 23:20:33 +0200697
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +0900698 pdata->iobase = dev_read_addr(dev);
Marek Behúnbc194772022-04-07 00:33:01 +0200699
700 pdata->phy_interface = dev_read_phy_mode(dev);
Marek Behún48631e42022-04-07 00:33:03 +0200701 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Marek Vasut934fd3b2017-07-21 23:20:33 +0200702 return -EINVAL;
Marek Vasut934fd3b2017-07-21 23:20:33 +0200703
Paul Barker150f4412024-11-20 09:49:39 +0000704 pdata->max_speed = dev_read_u32_default(dev, "max-speed", 1000);
Marek Vasut934fd3b2017-07-21 23:20:33 +0200705
Marek Behúnbc194772022-04-07 00:33:01 +0200706 return 0;
Marek Vasut934fd3b2017-07-21 23:20:33 +0200707}
708
Paul Barker110218d2025-03-19 12:03:57 +0000709static const struct ravb_device_ops ravb_device_ops_rcar = {
710 .mac_init = ravb_mac_init_rcar,
711 .dmac_init = ravb_dmac_init_rcar,
712 .config = ravb_config_rcar,
713};
714
Marek Vasut934fd3b2017-07-21 23:20:33 +0200715static const struct udevice_id ravb_ids[] = {
Paul Barker110218d2025-03-19 12:03:57 +0000716 {
717 .compatible = "renesas,etheravb-rcar-gen3",
718 .data = (ulong)&ravb_device_ops_rcar,
719 },
720 {
721 .compatible = "renesas,etheravb-rcar-gen4",
722 .data = (ulong)&ravb_device_ops_rcar,
723 },
Marek Vasut934fd3b2017-07-21 23:20:33 +0200724 { }
725};
726
Marek Vasut17714cb2017-05-13 15:54:28 +0200727U_BOOT_DRIVER(eth_ravb) = {
728 .name = "ravb",
729 .id = UCLASS_ETH,
Marek Vasut934fd3b2017-07-21 23:20:33 +0200730 .of_match = ravb_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -0700731 .of_to_plat = ravb_of_to_plat,
Marek Vasut17714cb2017-05-13 15:54:28 +0200732 .probe = ravb_probe,
733 .remove = ravb_remove,
734 .ops = &ravb_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700735 .priv_auto = sizeof(struct ravb_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700736 .plat_auto = sizeof(struct eth_pdata),
Marek Vasut17714cb2017-05-13 15:54:28 +0200737 .flags = DM_FLAG_ALLOC_PRIV_DMA,
738};