blob: a0264dc386de97196154de5886b9d87a84f28d4b [file] [log] [blame]
Amit Singh Tomar8f656c52020-01-27 01:14:42 +00001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2019 Amit Singh Tomar <amittomer25@gmail.com>
4 *
5 * Driver for Broadcom GENETv5 Ethernet controller (as found on the RPi4)
6 * This driver is based on the Linux driver:
7 * drivers/net/ethernet/broadcom/genet/bcmgenet.c
8 * which is: Copyright (c) 2014-2017 Broadcom
9 *
10 * The hardware supports multiple queues (16 priority queues and one
11 * default queue), both for RX and TX. There are 256 DMA descriptors (both
12 * for TX and RX), and they live in MMIO registers. The hardware allows
13 * assigning descriptor ranges to queues, but we choose the most simple setup:
14 * All 256 descriptors are assigned to the default queue (#16).
15 * Also the Linux driver supports multiple generations of the MAC, whereas
16 * we only support v5, as used in the Raspberry Pi 4.
17 */
18
Simon Glass0f2af882020-05-10 11:40:05 -060019#include <log.h>
Simon Glass274e0b02020-05-10 11:39:56 -060020#include <asm/cache.h>
Amit Singh Tomar8f656c52020-01-27 01:14:42 +000021#include <asm/io.h>
22#include <clk.h>
23#include <cpu_func.h>
24#include <dm.h>
25#include <fdt_support.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060026#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060027#include <linux/delay.h>
Amit Singh Tomar8f656c52020-01-27 01:14:42 +000028#include <linux/err.h>
29#include <malloc.h>
30#include <miiphy.h>
31#include <net.h>
32#include <dm/of_access.h>
33#include <dm/ofnode.h>
34#include <linux/iopoll.h>
35#include <linux/sizes.h>
36#include <asm/dma-mapping.h>
37#include <wait_bit.h>
38
39/* Register definitions derived from Linux source */
40#define SYS_REV_CTRL 0x00
41
42#define SYS_PORT_CTRL 0x04
43#define PORT_MODE_EXT_GPHY 3
44
45#define GENET_SYS_OFF 0x0000
46#define SYS_RBUF_FLUSH_CTRL (GENET_SYS_OFF + 0x08)
47#define SYS_TBUF_FLUSH_CTRL (GENET_SYS_OFF + 0x0c)
48
49#define GENET_EXT_OFF 0x0080
50#define EXT_RGMII_OOB_CTRL (GENET_EXT_OFF + 0x0c)
51#define RGMII_LINK BIT(4)
52#define OOB_DISABLE BIT(5)
53#define RGMII_MODE_EN BIT(6)
54#define ID_MODE_DIS BIT(16)
55
56#define GENET_RBUF_OFF 0x0300
57#define RBUF_TBUF_SIZE_CTRL (GENET_RBUF_OFF + 0xb4)
58#define RBUF_CTRL (GENET_RBUF_OFF + 0x00)
59#define RBUF_ALIGN_2B BIT(1)
60
61#define GENET_UMAC_OFF 0x0800
62#define UMAC_MIB_CTRL (GENET_UMAC_OFF + 0x580)
63#define UMAC_MAX_FRAME_LEN (GENET_UMAC_OFF + 0x014)
64#define UMAC_MAC0 (GENET_UMAC_OFF + 0x00c)
65#define UMAC_MAC1 (GENET_UMAC_OFF + 0x010)
66#define UMAC_CMD (GENET_UMAC_OFF + 0x008)
67#define MDIO_CMD (GENET_UMAC_OFF + 0x614)
68#define UMAC_TX_FLUSH (GENET_UMAC_OFF + 0x334)
69#define MDIO_START_BUSY BIT(29)
70#define MDIO_READ_FAIL BIT(28)
71#define MDIO_RD (2 << 26)
72#define MDIO_WR BIT(26)
73#define MDIO_PMD_SHIFT 21
74#define MDIO_PMD_MASK 0x1f
75#define MDIO_REG_SHIFT 16
76#define MDIO_REG_MASK 0x1f
77
78#define CMD_TX_EN BIT(0)
79#define CMD_RX_EN BIT(1)
80#define UMAC_SPEED_10 0
81#define UMAC_SPEED_100 1
82#define UMAC_SPEED_1000 2
83#define UMAC_SPEED_2500 3
84#define CMD_SPEED_SHIFT 2
85#define CMD_SPEED_MASK 3
86#define CMD_SW_RESET BIT(13)
87#define CMD_LCL_LOOP_EN BIT(15)
88#define CMD_TX_EN BIT(0)
89#define CMD_RX_EN BIT(1)
90
91#define MIB_RESET_RX BIT(0)
92#define MIB_RESET_RUNT BIT(1)
93#define MIB_RESET_TX BIT(2)
94
95/* total number of Buffer Descriptors, same for Rx/Tx */
96#define TOTAL_DESCS 256
97#define RX_DESCS TOTAL_DESCS
98#define TX_DESCS TOTAL_DESCS
99
100#define DEFAULT_Q 0x10
101
102/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
103 * 1536 is multiple of 256 bytes
104 */
105#define ENET_BRCM_TAG_LEN 6
106#define ENET_PAD 8
107#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + \
108 VLAN_HLEN + ENET_BRCM_TAG_LEN + \
109 ETH_FCS_LEN + ENET_PAD)
110
111/* Tx/Rx Dma Descriptor common bits */
112#define DMA_EN BIT(0)
113#define DMA_RING_BUF_EN_SHIFT 0x01
114#define DMA_RING_BUF_EN_MASK 0xffff
115#define DMA_BUFLENGTH_MASK 0x0fff
116#define DMA_BUFLENGTH_SHIFT 16
117#define DMA_RING_SIZE_SHIFT 16
118#define DMA_OWN 0x8000
119#define DMA_EOP 0x4000
120#define DMA_SOP 0x2000
121#define DMA_WRAP 0x1000
122#define DMA_MAX_BURST_LENGTH 0x8
123/* Tx specific DMA descriptor bits */
124#define DMA_TX_UNDERRUN 0x0200
125#define DMA_TX_APPEND_CRC 0x0040
126#define DMA_TX_OW_CRC 0x0020
127#define DMA_TX_DO_CSUM 0x0010
128#define DMA_TX_QTAG_SHIFT 7
129
130/* DMA rings size */
131#define DMA_RING_SIZE 0x40
132#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DEFAULT_Q + 1))
133
134/* DMA descriptor */
135#define DMA_DESC_LENGTH_STATUS 0x00
136#define DMA_DESC_ADDRESS_LO 0x04
137#define DMA_DESC_ADDRESS_HI 0x08
138#define DMA_DESC_SIZE 12
139
140#define GENET_RX_OFF 0x2000
141#define GENET_RDMA_REG_OFF \
142 (GENET_RX_OFF + TOTAL_DESCS * DMA_DESC_SIZE)
143#define GENET_TX_OFF 0x4000
144#define GENET_TDMA_REG_OFF \
145 (GENET_TX_OFF + TOTAL_DESCS * DMA_DESC_SIZE)
146
147#define DMA_FC_THRESH_HI (RX_DESCS >> 4)
148#define DMA_FC_THRESH_LO 5
149#define DMA_FC_THRESH_VALUE ((DMA_FC_THRESH_LO << 16) | \
150 DMA_FC_THRESH_HI)
151
152#define DMA_XOFF_THRESHOLD_SHIFT 16
153
154#define TDMA_RING_REG_BASE \
155 (GENET_TDMA_REG_OFF + DEFAULT_Q * DMA_RING_SIZE)
156#define TDMA_READ_PTR (TDMA_RING_REG_BASE + 0x00)
157#define TDMA_CONS_INDEX (TDMA_RING_REG_BASE + 0x08)
158#define TDMA_PROD_INDEX (TDMA_RING_REG_BASE + 0x0c)
159#define DMA_RING_BUF_SIZE 0x10
160#define DMA_START_ADDR 0x14
161#define DMA_END_ADDR 0x1c
162#define DMA_MBUF_DONE_THRESH 0x24
163#define TDMA_FLOW_PERIOD (TDMA_RING_REG_BASE + 0x28)
164#define TDMA_WRITE_PTR (TDMA_RING_REG_BASE + 0x2c)
165
166#define RDMA_RING_REG_BASE \
167 (GENET_RDMA_REG_OFF + DEFAULT_Q * DMA_RING_SIZE)
168#define RDMA_WRITE_PTR (RDMA_RING_REG_BASE + 0x00)
169#define RDMA_PROD_INDEX (RDMA_RING_REG_BASE + 0x08)
170#define RDMA_CONS_INDEX (RDMA_RING_REG_BASE + 0x0c)
171#define RDMA_XON_XOFF_THRESH (RDMA_RING_REG_BASE + 0x28)
172#define RDMA_READ_PTR (RDMA_RING_REG_BASE + 0x2c)
173
174#define TDMA_REG_BASE (GENET_TDMA_REG_OFF + DMA_RINGS_SIZE)
175#define RDMA_REG_BASE (GENET_RDMA_REG_OFF + DMA_RINGS_SIZE)
176#define DMA_RING_CFG 0x00
177#define DMA_CTRL 0x04
178#define DMA_SCB_BURST_SIZE 0x0c
179
180#define RX_BUF_LENGTH 2048
181#define RX_TOTAL_BUFSIZE (RX_BUF_LENGTH * RX_DESCS)
182#define RX_BUF_OFFSET 2
183
184struct bcmgenet_eth_priv {
185 char rxbuffer[RX_TOTAL_BUFSIZE] __aligned(ARCH_DMA_MINALIGN);
186 void *mac_reg;
187 void *tx_desc_base;
188 void *rx_desc_base;
189 int tx_index;
190 int rx_index;
191 int c_index;
192 int phyaddr;
193 u32 interface;
194 u32 speed;
195 struct phy_device *phydev;
196 struct mii_dev *bus;
197};
198
199static void bcmgenet_umac_reset(struct bcmgenet_eth_priv *priv)
200{
201 u32 reg;
202
203 reg = readl(priv->mac_reg + SYS_RBUF_FLUSH_CTRL);
204 reg |= BIT(1);
205 writel(reg, (priv->mac_reg + SYS_RBUF_FLUSH_CTRL));
206 udelay(10);
207
208 reg &= ~BIT(1);
209 writel(reg, (priv->mac_reg + SYS_RBUF_FLUSH_CTRL));
210 udelay(10);
211
212 writel(0, (priv->mac_reg + SYS_RBUF_FLUSH_CTRL));
213 udelay(10);
214
215 writel(0, priv->mac_reg + UMAC_CMD);
216
217 writel(CMD_SW_RESET | CMD_LCL_LOOP_EN, priv->mac_reg + UMAC_CMD);
218 udelay(2);
219 writel(0, priv->mac_reg + UMAC_CMD);
220
221 /* clear tx/rx counter */
222 writel(MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
223 priv->mac_reg + UMAC_MIB_CTRL);
224 writel(0, priv->mac_reg + UMAC_MIB_CTRL);
225
226 writel(ENET_MAX_MTU_SIZE, priv->mac_reg + UMAC_MAX_FRAME_LEN);
227
228 /* init rx registers, enable ip header optimization */
229 reg = readl(priv->mac_reg + RBUF_CTRL);
230 reg |= RBUF_ALIGN_2B;
231 writel(reg, (priv->mac_reg + RBUF_CTRL));
232
233 writel(1, (priv->mac_reg + RBUF_TBUF_SIZE_CTRL));
234}
235
236static int bcmgenet_gmac_write_hwaddr(struct udevice *dev)
237{
238 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700239 struct eth_pdata *pdata = dev_get_plat(dev);
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000240 uchar *addr = pdata->enetaddr;
241 u32 reg;
242
243 reg = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
244 writel_relaxed(reg, priv->mac_reg + UMAC_MAC0);
245
246 reg = addr[4] << 8 | addr[5];
247 writel_relaxed(reg, priv->mac_reg + UMAC_MAC1);
248
249 return 0;
250}
251
252static void bcmgenet_disable_dma(struct bcmgenet_eth_priv *priv)
253{
254 clrbits_32(priv->mac_reg + TDMA_REG_BASE + DMA_CTRL, DMA_EN);
255 clrbits_32(priv->mac_reg + RDMA_REG_BASE + DMA_CTRL, DMA_EN);
256
257 writel(1, priv->mac_reg + UMAC_TX_FLUSH);
258 udelay(10);
259 writel(0, priv->mac_reg + UMAC_TX_FLUSH);
260}
261
262static void bcmgenet_enable_dma(struct bcmgenet_eth_priv *priv)
263{
264 u32 dma_ctrl = (1 << (DEFAULT_Q + DMA_RING_BUF_EN_SHIFT)) | DMA_EN;
265
266 writel(dma_ctrl, priv->mac_reg + TDMA_REG_BASE + DMA_CTRL);
267
268 setbits_32(priv->mac_reg + RDMA_REG_BASE + DMA_CTRL, dma_ctrl);
269}
270
271static int bcmgenet_gmac_eth_send(struct udevice *dev, void *packet, int length)
272{
273 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
274 void *desc_base = priv->tx_desc_base + priv->tx_index * DMA_DESC_SIZE;
275 u32 len_stat = length << DMA_BUFLENGTH_SHIFT;
276 ulong packet_aligned = rounddown((ulong)packet, ARCH_DMA_MINALIGN);
277 u32 prod_index, cons;
278 u32 tries = 100;
279
280 prod_index = readl(priv->mac_reg + TDMA_PROD_INDEX);
281
282 /* There is actually no reason for the rounding here, but the ARMv7
283 * implementation of flush_dcache_range() checks for aligned
284 * boundaries of the flushed range.
285 * Adjust them here to pass that check and avoid misleading messages.
286 */
287 flush_dcache_range(packet_aligned,
288 packet_aligned + roundup(length, ARCH_DMA_MINALIGN));
289
290 len_stat |= 0x3F << DMA_TX_QTAG_SHIFT;
291 len_stat |= DMA_TX_APPEND_CRC | DMA_SOP | DMA_EOP;
292
293 /* Set-up packet for transmission */
294 writel(lower_32_bits((ulong)packet), (desc_base + DMA_DESC_ADDRESS_LO));
295 writel(upper_32_bits((ulong)packet), (desc_base + DMA_DESC_ADDRESS_HI));
296 writel(len_stat, (desc_base + DMA_DESC_LENGTH_STATUS));
297
298 /* Increment index and start transmission */
299 if (++priv->tx_index >= TX_DESCS)
300 priv->tx_index = 0;
301
302 prod_index++;
303
304 /* Start Transmisson */
305 writel(prod_index, priv->mac_reg + TDMA_PROD_INDEX);
306
307 do {
308 cons = readl(priv->mac_reg + TDMA_CONS_INDEX);
309 } while ((cons & 0xffff) < prod_index && --tries);
310 if (!tries)
311 return -ETIMEDOUT;
312
313 return 0;
314}
315
316/* Check whether all cache lines affected by an invalidate are within
317 * the buffer, to make sure we don't accidentally lose unrelated dirty
318 * data stored nearby.
319 * Alignment of the buffer start address will be checked in the implementation
320 * of invalidate_dcache_range().
321 */
322static void invalidate_dcache_check(unsigned long addr, size_t size,
323 size_t buffer_size)
324{
325 size_t inval_size = roundup(size, ARCH_DMA_MINALIGN);
326
327 if (unlikely(inval_size > buffer_size))
328 printf("WARNING: Cache invalidate area exceeds buffer size\n");
329
330 invalidate_dcache_range(addr, addr + inval_size);
331}
332
333static int bcmgenet_gmac_eth_recv(struct udevice *dev,
334 int flags, uchar **packetp)
335{
336 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
337 void *desc_base = priv->rx_desc_base + priv->rx_index * DMA_DESC_SIZE;
338 u32 prod_index = readl(priv->mac_reg + RDMA_PROD_INDEX);
339 u32 length, addr;
340
341 if (prod_index == priv->c_index)
342 return -EAGAIN;
343
344 length = readl(desc_base + DMA_DESC_LENGTH_STATUS);
345 length = (length >> DMA_BUFLENGTH_SHIFT) & DMA_BUFLENGTH_MASK;
346 addr = readl(desc_base + DMA_DESC_ADDRESS_LO);
347
348 invalidate_dcache_check(addr, length, RX_BUF_LENGTH);
349
350 /* To cater for the IP header alignment the hardware does.
351 * This would actually not be needed if we don't program
352 * RBUF_ALIGN_2B
353 */
354 *packetp = (uchar *)(ulong)addr + RX_BUF_OFFSET;
355
356 return length - RX_BUF_OFFSET;
357}
358
359static int bcmgenet_gmac_free_pkt(struct udevice *dev, uchar *packet,
360 int length)
361{
362 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
Yasuharu Shibata1f79d012024-06-02 17:24:03 +0900363 void *desc_base = priv->rx_desc_base + priv->rx_index * DMA_DESC_SIZE;
364 u32 addr = readl(desc_base + DMA_DESC_ADDRESS_LO);
365
366 flush_dcache_range(addr, addr + RX_BUF_LENGTH);
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000367
368 /* Tell the MAC we have consumed that last receive buffer. */
369 priv->c_index = (priv->c_index + 1) & 0xFFFF;
370 writel(priv->c_index, priv->mac_reg + RDMA_CONS_INDEX);
371
372 /* Forward our descriptor pointer, wrapping around if needed. */
373 if (++priv->rx_index >= RX_DESCS)
374 priv->rx_index = 0;
375
376 return 0;
377}
378
379static void rx_descs_init(struct bcmgenet_eth_priv *priv)
380{
381 char *rxbuffs = &priv->rxbuffer[0];
382 u32 len_stat, i;
383 void *desc_base = priv->rx_desc_base;
384
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000385 len_stat = (RX_BUF_LENGTH << DMA_BUFLENGTH_SHIFT) | DMA_OWN;
386
387 for (i = 0; i < RX_DESCS; i++) {
388 writel(lower_32_bits((uintptr_t)&rxbuffs[i * RX_BUF_LENGTH]),
389 desc_base + i * DMA_DESC_SIZE + DMA_DESC_ADDRESS_LO);
390 writel(upper_32_bits((uintptr_t)&rxbuffs[i * RX_BUF_LENGTH]),
391 desc_base + i * DMA_DESC_SIZE + DMA_DESC_ADDRESS_HI);
392 writel(len_stat,
393 desc_base + i * DMA_DESC_SIZE + DMA_DESC_LENGTH_STATUS);
394 }
395}
396
397static void rx_ring_init(struct bcmgenet_eth_priv *priv)
398{
399 writel(DMA_MAX_BURST_LENGTH,
400 priv->mac_reg + RDMA_REG_BASE + DMA_SCB_BURST_SIZE);
401
402 writel(0x0, priv->mac_reg + RDMA_RING_REG_BASE + DMA_START_ADDR);
403 writel(0x0, priv->mac_reg + RDMA_READ_PTR);
404 writel(0x0, priv->mac_reg + RDMA_WRITE_PTR);
405 writel(RX_DESCS * DMA_DESC_SIZE / 4 - 1,
406 priv->mac_reg + RDMA_RING_REG_BASE + DMA_END_ADDR);
407
Jason Wessel80f80132020-07-17 06:31:59 -0700408 /* cannot init RDMA_PROD_INDEX to 0, so align RDMA_CONS_INDEX on it instead */
409 priv->c_index = readl(priv->mac_reg + RDMA_PROD_INDEX);
410 writel(priv->c_index, priv->mac_reg + RDMA_CONS_INDEX);
411 priv->rx_index = priv->c_index;
412 priv->rx_index &= 0xFF;
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000413 writel((RX_DESCS << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH,
414 priv->mac_reg + RDMA_RING_REG_BASE + DMA_RING_BUF_SIZE);
415 writel(DMA_FC_THRESH_VALUE, priv->mac_reg + RDMA_XON_XOFF_THRESH);
416 writel(1 << DEFAULT_Q, priv->mac_reg + RDMA_REG_BASE + DMA_RING_CFG);
417}
418
419static void tx_ring_init(struct bcmgenet_eth_priv *priv)
420{
421 writel(DMA_MAX_BURST_LENGTH,
422 priv->mac_reg + TDMA_REG_BASE + DMA_SCB_BURST_SIZE);
423
424 writel(0x0, priv->mac_reg + TDMA_RING_REG_BASE + DMA_START_ADDR);
425 writel(0x0, priv->mac_reg + TDMA_READ_PTR);
426 writel(0x0, priv->mac_reg + TDMA_WRITE_PTR);
427 writel(TX_DESCS * DMA_DESC_SIZE / 4 - 1,
428 priv->mac_reg + TDMA_RING_REG_BASE + DMA_END_ADDR);
Jason Wessel80f80132020-07-17 06:31:59 -0700429 /* cannot init TDMA_CONS_INDEX to 0, so align TDMA_PROD_INDEX on it instead */
430 priv->tx_index = readl(priv->mac_reg + TDMA_CONS_INDEX);
431 writel(priv->tx_index, priv->mac_reg + TDMA_PROD_INDEX);
432 priv->tx_index &= 0xFF;
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000433 writel(0x1, priv->mac_reg + TDMA_RING_REG_BASE + DMA_MBUF_DONE_THRESH);
434 writel(0x0, priv->mac_reg + TDMA_FLOW_PERIOD);
435 writel((TX_DESCS << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH,
436 priv->mac_reg + TDMA_RING_REG_BASE + DMA_RING_BUF_SIZE);
437
438 writel(1 << DEFAULT_Q, priv->mac_reg + TDMA_REG_BASE + DMA_RING_CFG);
439}
440
441static int bcmgenet_adjust_link(struct bcmgenet_eth_priv *priv)
442{
443 struct phy_device *phy_dev = priv->phydev;
444 u32 speed;
445
446 switch (phy_dev->speed) {
447 case SPEED_1000:
448 speed = UMAC_SPEED_1000;
449 break;
450 case SPEED_100:
451 speed = UMAC_SPEED_100;
452 break;
453 case SPEED_10:
454 speed = UMAC_SPEED_10;
455 break;
456 default:
457 printf("bcmgenet: Unsupported PHY speed: %d\n", phy_dev->speed);
458 return -EINVAL;
459 }
460
461 clrsetbits_32(priv->mac_reg + EXT_RGMII_OOB_CTRL, OOB_DISABLE,
Nicolas Saenz Julienne138f5952020-02-20 17:36:31 +0100462 RGMII_LINK | RGMII_MODE_EN);
463
Jason Wessel165c06a2020-07-17 06:32:00 -0700464 if (phy_dev->interface == PHY_INTERFACE_MODE_RGMII ||
465 phy_dev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
Nicolas Saenz Julienne138f5952020-02-20 17:36:31 +0100466 setbits_32(priv->mac_reg + EXT_RGMII_OOB_CTRL, ID_MODE_DIS);
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000467
468 writel(speed << CMD_SPEED_SHIFT, (priv->mac_reg + UMAC_CMD));
469
470 return 0;
471}
472
473static int bcmgenet_gmac_eth_start(struct udevice *dev)
474{
475 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
476 int ret;
477
478 priv->tx_desc_base = priv->mac_reg + GENET_TX_OFF;
479 priv->rx_desc_base = priv->mac_reg + GENET_RX_OFF;
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000480
481 bcmgenet_umac_reset(priv);
482
483 bcmgenet_gmac_write_hwaddr(dev);
484
485 /* Disable RX/TX DMA and flush TX queues */
486 bcmgenet_disable_dma(priv);
487
488 rx_ring_init(priv);
489 rx_descs_init(priv);
490
491 tx_ring_init(priv);
492
493 /* Enable RX/TX DMA */
494 bcmgenet_enable_dma(priv);
495
496 /* read PHY properties over the wire from generic PHY set-up */
497 ret = phy_startup(priv->phydev);
498 if (ret) {
499 printf("bcmgenet: PHY startup failed: %d\n", ret);
500 return ret;
501 }
502
503 /* Update MAC registers based on PHY property */
504 ret = bcmgenet_adjust_link(priv);
505 if (ret) {
506 printf("bcmgenet: adjust PHY link failed: %d\n", ret);
507 return ret;
508 }
509
510 /* Enable Rx/Tx */
511 setbits_32(priv->mac_reg + UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
512
513 return 0;
514}
515
516static int bcmgenet_phy_init(struct bcmgenet_eth_priv *priv, void *dev)
517{
518 struct phy_device *phydev;
519 int ret;
520
521 phydev = phy_connect(priv->bus, priv->phyaddr, dev, priv->interface);
522 if (!phydev)
523 return -ENODEV;
524
525 phydev->supported &= PHY_GBIT_FEATURES;
526 if (priv->speed) {
527 ret = phy_set_supported(priv->phydev, priv->speed);
528 if (ret)
529 return ret;
530 }
531 phydev->advertising = phydev->supported;
532
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000533 priv->phydev = phydev;
534 phy_config(priv->phydev);
535
536 return 0;
537}
538
539static void bcmgenet_mdio_start(struct bcmgenet_eth_priv *priv)
540{
541 setbits_32(priv->mac_reg + MDIO_CMD, MDIO_START_BUSY);
542}
543
544static int bcmgenet_mdio_write(struct mii_dev *bus, int addr, int devad,
545 int reg, u16 value)
546{
547 struct udevice *dev = bus->priv;
548 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
549 u32 val;
550
551 /* Prepare the read operation */
552 val = MDIO_WR | (addr << MDIO_PMD_SHIFT) |
553 (reg << MDIO_REG_SHIFT) | (0xffff & value);
554 writel_relaxed(val, priv->mac_reg + MDIO_CMD);
555
556 /* Start MDIO transaction */
557 bcmgenet_mdio_start(priv);
558
559 return wait_for_bit_32(priv->mac_reg + MDIO_CMD,
560 MDIO_START_BUSY, false, 20, true);
561}
562
563static int bcmgenet_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
564{
565 struct udevice *dev = bus->priv;
566 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
567 u32 val;
568 int ret;
569
570 /* Prepare the read operation */
571 val = MDIO_RD | (addr << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT);
572 writel_relaxed(val, priv->mac_reg + MDIO_CMD);
573
574 /* Start MDIO transaction */
575 bcmgenet_mdio_start(priv);
576
577 ret = wait_for_bit_32(priv->mac_reg + MDIO_CMD,
578 MDIO_START_BUSY, false, 20, true);
579 if (ret)
580 return ret;
581
582 val = readl_relaxed(priv->mac_reg + MDIO_CMD);
583
584 return val & 0xffff;
585}
586
587static int bcmgenet_mdio_init(const char *name, struct udevice *priv)
588{
589 struct mii_dev *bus = mdio_alloc();
590
591 if (!bus) {
592 debug("Failed to allocate MDIO bus\n");
593 return -ENOMEM;
594 }
595
596 bus->read = bcmgenet_mdio_read;
597 bus->write = bcmgenet_mdio_write;
598 snprintf(bus->name, sizeof(bus->name), name);
599 bus->priv = (void *)priv;
600
601 return mdio_register(bus);
602}
603
604/* We only support RGMII (as used on the RPi4). */
605static int bcmgenet_interface_set(struct bcmgenet_eth_priv *priv)
606{
607 phy_interface_t phy_mode = priv->interface;
608
609 switch (phy_mode) {
610 case PHY_INTERFACE_MODE_RGMII:
611 case PHY_INTERFACE_MODE_RGMII_RXID:
612 writel(PORT_MODE_EXT_GPHY, priv->mac_reg + SYS_PORT_CTRL);
613 break;
614 default:
615 printf("unknown phy mode: %d\n", priv->interface);
616 return -EINVAL;
617 }
618
619 return 0;
620}
621
622static int bcmgenet_eth_probe(struct udevice *dev)
623{
Simon Glassfa20e932020-12-03 16:55:20 -0700624 struct eth_pdata *pdata = dev_get_plat(dev);
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000625 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
626 ofnode mdio_node;
627 const char *name;
628 u32 reg;
629 int ret;
630 u8 major;
631
632 priv->mac_reg = map_physmem(pdata->iobase, SZ_64K, MAP_NOCACHE);
633 priv->interface = pdata->phy_interface;
634 priv->speed = pdata->max_speed;
635
636 /* Read GENET HW version */
637 reg = readl_relaxed(priv->mac_reg + SYS_REV_CTRL);
638 major = (reg >> 24) & 0x0f;
639 if (major != 6) {
640 if (major == 5)
641 major = 4;
642 else if (major == 0)
643 major = 1;
644
645 printf("Unsupported GENETv%d.%d\n", major, (reg >> 16) & 0x0f);
646 return -ENODEV;
647 }
648
649 ret = bcmgenet_interface_set(priv);
650 if (ret)
651 return ret;
652
653 writel(0, priv->mac_reg + SYS_RBUF_FLUSH_CTRL);
654 udelay(10);
655 /* disable MAC while updating its registers */
656 writel(0, priv->mac_reg + UMAC_CMD);
657 /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
658 writel(CMD_SW_RESET | CMD_LCL_LOOP_EN, priv->mac_reg + UMAC_CMD);
659
660 mdio_node = dev_read_first_subnode(dev);
661 name = ofnode_get_name(mdio_node);
662
663 ret = bcmgenet_mdio_init(name, dev);
664 if (ret)
665 return ret;
666
667 priv->bus = miiphy_get_dev_by_name(name);
668
669 return bcmgenet_phy_init(priv, dev);
670}
671
672static void bcmgenet_gmac_eth_stop(struct udevice *dev)
673{
674 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
675
676 clrbits_32(priv->mac_reg + UMAC_CMD, CMD_TX_EN | CMD_RX_EN);
677
678 bcmgenet_disable_dma(priv);
679}
680
681static const struct eth_ops bcmgenet_gmac_eth_ops = {
682 .start = bcmgenet_gmac_eth_start,
683 .write_hwaddr = bcmgenet_gmac_write_hwaddr,
684 .send = bcmgenet_gmac_eth_send,
685 .recv = bcmgenet_gmac_eth_recv,
686 .free_pkt = bcmgenet_gmac_free_pkt,
687 .stop = bcmgenet_gmac_eth_stop,
688};
689
Simon Glassaad29ae2020-12-03 16:55:21 -0700690static int bcmgenet_eth_of_to_plat(struct udevice *dev)
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000691{
Simon Glassfa20e932020-12-03 16:55:20 -0700692 struct eth_pdata *pdata = dev_get_plat(dev);
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000693 struct bcmgenet_eth_priv *priv = dev_get_priv(dev);
694 struct ofnode_phandle_args phy_node;
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000695 int ret;
696
697 pdata->iobase = dev_read_addr(dev);
698
699 /* Get phy mode from DT */
Marek BehĂșnbc194772022-04-07 00:33:01 +0200700 pdata->phy_interface = dev_read_phy_mode(dev);
Marek BehĂșn48631e42022-04-07 00:33:03 +0200701 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000702 return -EINVAL;
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000703
704 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
705 &phy_node);
706 if (!ret) {
707 ofnode_read_s32(phy_node.node, "reg", &priv->phyaddr);
708 ofnode_read_s32(phy_node.node, "max-speed", &pdata->max_speed);
709 }
710
711 return 0;
712}
713
714/* The BCM2711 implementation has a limited burst length compared to a generic
715 * GENETv5 version, but we go with that shorter value (8) in both cases, for
716 * the sake of simplicity.
717 */
718static const struct udevice_id bcmgenet_eth_ids[] = {
719 {.compatible = "brcm,genet-v5"},
720 {.compatible = "brcm,bcm2711-genet-v5"},
721 {}
722};
723
724U_BOOT_DRIVER(eth_bcmgenet) = {
725 .name = "eth_bcmgenet",
726 .id = UCLASS_ETH,
727 .of_match = bcmgenet_eth_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -0700728 .of_to_plat = bcmgenet_eth_of_to_plat,
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000729 .probe = bcmgenet_eth_probe,
730 .ops = &bcmgenet_gmac_eth_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700731 .priv_auto = sizeof(struct bcmgenet_eth_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700732 .plat_auto = sizeof(struct eth_pdata),
Amit Singh Tomar8f656c52020-01-27 01:14:42 +0000733 .flags = DM_FLAG_ALLOC_PRIV_DMA,
734};