blob: d3e5f9255f5ad0bb4f59c926d44dfc5fcf0ecef2 [file] [log] [blame]
Tom Rinidec7ea02024-05-20 13:35:03 -06001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2023, Intel Corporation.
4 *
5 * Portions based on U-Boot's dwc_eth_qos.c.
6 */
7
8/*
9 * This driver supports the Synopsys Designware Ethernet XGMAC (10G Ethernet
10 * MAC) IP block. The IP supports multiple options for bus type, clocking/
11 * reset structure, and feature list.
12 *
13 * The driver is written such that generic core logic is kept separate from
14 * configuration-specific logic. Code that interacts with configuration-
15 * specific resources is split out into separate functions to avoid polluting
16 * common code. If/when this driver is enhanced to support multiple
17 * configurations, the core code should be adapted to call all configuration-
18 * specific functions through function pointers, with the definition of those
19 * function pointers being supplied by struct udevice_id xgmac_ids[]'s .data
20 * field.
21 *
22 * This configuration uses an AXI master/DMA bus, an AHB slave/register bus,
23 * contains the DMA, MTL, and MAC sub-blocks, and supports a single RGMII PHY.
24 * This configuration also has SW control over all clock and reset signals to
25 * the HW block.
26 */
27
28#define LOG_CATEGORY UCLASS_ETH
29
30#include <clk.h>
31#include <cpu_func.h>
32#include <dm.h>
33#include <errno.h>
34#include <eth_phy.h>
35#include <log.h>
36#include <malloc.h>
37#include <memalign.h>
38#include <miiphy.h>
39#include <net.h>
40#include <netdev.h>
41#include <phy.h>
42#include <reset.h>
43#include <wait_bit.h>
44#include <asm/cache.h>
45#include <asm/gpio.h>
46#include <asm/io.h>
47#include <linux/delay.h>
48#include "dwc_eth_xgmac.h"
49
50static void *xgmac_alloc_descs(struct xgmac_priv *xgmac, unsigned int num)
51{
52 return memalign(ARCH_DMA_MINALIGN, num * xgmac->desc_size);
53}
54
55static void xgmac_free_descs(void *descs)
56{
57 free(descs);
58}
59
60static struct xgmac_desc *xgmac_get_desc(struct xgmac_priv *xgmac,
61 unsigned int num, bool rx)
62{
63 return (rx ? xgmac->rx_descs : xgmac->tx_descs) +
64 (num * xgmac->desc_size);
65}
66
67void xgmac_inval_desc_generic(void *desc)
68{
69 unsigned long start;
70 unsigned long end;
71
72 if (!desc) {
73 pr_err("%s invalid input buffer\n", __func__);
74 return;
75 }
76
77 start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
78 end = ALIGN(start + sizeof(struct xgmac_desc),
79 ARCH_DMA_MINALIGN);
80
81 invalidate_dcache_range(start, end);
82}
83
84void xgmac_flush_desc_generic(void *desc)
85{
86 unsigned long start;
87 unsigned long end;
88
89 if (!desc) {
90 pr_err("%s invalid input buffer\n", __func__);
91 return;
92 }
93
94 start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
95 end = ALIGN(start + sizeof(struct xgmac_desc),
96 ARCH_DMA_MINALIGN);
97
98 flush_dcache_range(start, end);
99}
100
101void xgmac_inval_buffer_generic(void *buf, size_t size)
102{
103 unsigned long start;
104 unsigned long end;
105
106 if (!buf) {
107 pr_err("%s invalid input buffer\n", __func__);
108 return;
109 }
110
111 start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
112 end = ALIGN((unsigned long)buf + size,
113 ARCH_DMA_MINALIGN);
114
115 invalidate_dcache_range(start, end);
116}
117
118void xgmac_flush_buffer_generic(void *buf, size_t size)
119{
120 unsigned long start;
121 unsigned long end;
122
123 if (!buf) {
124 pr_err("%s invalid input buffer\n", __func__);
125 return;
126 }
127
128 start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
129 end = ALIGN((unsigned long)buf + size,
130 ARCH_DMA_MINALIGN);
131
132 flush_dcache_range(start, end);
133}
134
135static int xgmac_mdio_wait_idle(struct xgmac_priv *xgmac)
136{
137 return wait_for_bit_le32(&xgmac->mac_regs->mdio_data,
138 XGMAC_MAC_MDIO_ADDRESS_SBUSY, false,
139 XGMAC_TIMEOUT_100MS, true);
140}
141
142static int xgmac_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
143 int mdio_reg)
144{
145 struct xgmac_priv *xgmac = bus->priv;
146 u32 val;
147 u32 hw_addr;
148 int ret;
149
150 debug("%s(dev=%p, addr=0x%x, reg=%d):\n", __func__, xgmac->dev, mdio_addr,
151 mdio_reg);
152
153 ret = xgmac_mdio_wait_idle(xgmac);
154 if (ret) {
155 pr_err("MDIO not idle at entry: %d\n", ret);
156 return ret;
157 }
158
159 /* Set clause 22 format */
160 val = BIT(mdio_addr);
161 writel(val, &xgmac->mac_regs->mdio_clause_22_port);
162
163 hw_addr = (mdio_addr << XGMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
164 (mdio_reg & XGMAC_MAC_MDIO_REG_ADDR_C22P_MASK);
165
166 val = xgmac->config->config_mac_mdio <<
167 XGMAC_MAC_MDIO_ADDRESS_CR_SHIFT;
168
169 val |= XGMAC_MAC_MDIO_ADDRESS_SADDR |
170 XGMAC_MDIO_SINGLE_CMD_ADDR_CMD_READ |
171 XGMAC_MAC_MDIO_ADDRESS_SBUSY;
172
173 ret = xgmac_mdio_wait_idle(xgmac);
174 if (ret) {
175 pr_err("MDIO not idle at entry: %d\n", ret);
176 return ret;
177 }
178
179 writel(hw_addr, &xgmac->mac_regs->mdio_address);
180 writel(val, &xgmac->mac_regs->mdio_data);
181
182 ret = xgmac_mdio_wait_idle(xgmac);
183 if (ret) {
184 pr_err("MDIO read didn't complete: %d\n", ret);
185 return ret;
186 }
187
188 val = readl(&xgmac->mac_regs->mdio_data);
189 val &= XGMAC_MAC_MDIO_DATA_GD_MASK;
190
191 debug("%s: val=0x%x\n", __func__, val);
192
193 return val;
194}
195
196static int xgmac_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
197 int mdio_reg, u16 mdio_val)
198{
199 struct xgmac_priv *xgmac = bus->priv;
200 u32 val;
201 u32 hw_addr;
202 int ret;
203
204 debug("%s(dev=%p, addr=0x%x, reg=%d, val=0x%x):\n", __func__, xgmac->dev,
205 mdio_addr, mdio_reg, mdio_val);
206
207 ret = xgmac_mdio_wait_idle(xgmac);
208 if (ret) {
209 pr_err("MDIO not idle at entry: %d\n", ret);
210 return ret;
211 }
212
213 /* Set clause 22 format */
214 val = BIT(mdio_addr);
215 writel(val, &xgmac->mac_regs->mdio_clause_22_port);
216
217 hw_addr = (mdio_addr << XGMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
218 (mdio_reg & XGMAC_MAC_MDIO_REG_ADDR_C22P_MASK);
219
220 hw_addr |= (mdio_reg >> XGMAC_MAC_MDIO_ADDRESS_PA_SHIFT) <<
221 XGMAC_MAC_MDIO_ADDRESS_DA_SHIFT;
222
223 val = (xgmac->config->config_mac_mdio <<
224 XGMAC_MAC_MDIO_ADDRESS_CR_SHIFT);
225
226 val |= XGMAC_MAC_MDIO_ADDRESS_SADDR |
227 mdio_val | XGMAC_MDIO_SINGLE_CMD_ADDR_CMD_WRITE |
228 XGMAC_MAC_MDIO_ADDRESS_SBUSY;
229
230 ret = xgmac_mdio_wait_idle(xgmac);
231 if (ret) {
232 pr_err("MDIO not idle at entry: %d\n", ret);
233 return ret;
234 }
235
236 writel(hw_addr, &xgmac->mac_regs->mdio_address);
237 writel(val, &xgmac->mac_regs->mdio_data);
238
239 ret = xgmac_mdio_wait_idle(xgmac);
240 if (ret) {
241 pr_err("MDIO write didn't complete: %d\n", ret);
242 return ret;
243 }
244
245 return 0;
246}
247
248static int xgmac_set_full_duplex(struct udevice *dev)
249{
250 struct xgmac_priv *xgmac = dev_get_priv(dev);
251
252 debug("%s(dev=%p):\n", __func__, dev);
253
254 clrbits_le32(&xgmac->mac_regs->mac_extended_conf, XGMAC_MAC_EXT_CONF_HD);
255
256 return 0;
257}
258
259static int xgmac_set_half_duplex(struct udevice *dev)
260{
261 struct xgmac_priv *xgmac = dev_get_priv(dev);
262
263 debug("%s(dev=%p):\n", __func__, dev);
264
265 setbits_le32(&xgmac->mac_regs->mac_extended_conf, XGMAC_MAC_EXT_CONF_HD);
266
267 /* WAR: Flush TX queue when switching to half-duplex */
268 setbits_le32(&xgmac->mtl_regs->txq0_operation_mode,
269 XGMAC_MTL_TXQ0_OPERATION_MODE_FTQ);
270
271 return 0;
272}
273
274static int xgmac_set_gmii_speed(struct udevice *dev)
275{
276 struct xgmac_priv *xgmac = dev_get_priv(dev);
277 u32 val;
278
279 debug("%s(dev=%p):\n", __func__, dev);
280
281 val = XGMAC_MAC_CONF_SS_1G_GMII << XGMAC_MAC_CONF_SS_SHIFT;
282 writel(val, &xgmac->mac_regs->tx_configuration);
283
284 return 0;
285}
286
287static int xgmac_set_mii_speed_100(struct udevice *dev)
288{
289 struct xgmac_priv *xgmac = dev_get_priv(dev);
290 u32 val;
291
292 debug("%s(dev=%p):\n", __func__, dev);
293
294 val = XGMAC_MAC_CONF_SS_100M_MII << XGMAC_MAC_CONF_SS_SHIFT;
295 writel(val, &xgmac->mac_regs->tx_configuration);
296
297 return 0;
298}
299
300static int xgmac_set_mii_speed_10(struct udevice *dev)
301{
302 struct xgmac_priv *xgmac = dev_get_priv(dev);
303 u32 val;
304
305 debug("%s(dev=%p):\n", __func__, dev);
306
307 val = XGMAC_MAC_CONF_SS_2_10M_MII << XGMAC_MAC_CONF_SS_SHIFT;
308 writel(val, &xgmac->mac_regs->tx_configuration);
309
310 return 0;
311}
312
313static int xgmac_adjust_link(struct udevice *dev)
314{
315 struct xgmac_priv *xgmac = dev_get_priv(dev);
316 int ret;
317 bool en_calibration;
318
319 debug("%s(dev=%p):\n", __func__, dev);
320
321 if (xgmac->phy->duplex)
322 ret = xgmac_set_full_duplex(dev);
323 else
324 ret = xgmac_set_half_duplex(dev);
325 if (ret < 0) {
326 pr_err("xgmac_set_*_duplex() failed: %d\n", ret);
327 return ret;
328 }
329
330 switch (xgmac->phy->speed) {
331 case SPEED_1000:
332 en_calibration = true;
333 ret = xgmac_set_gmii_speed(dev);
334 break;
335 case SPEED_100:
336 en_calibration = true;
337 ret = xgmac_set_mii_speed_100(dev);
338 break;
339 case SPEED_10:
340 en_calibration = false;
341 ret = xgmac_set_mii_speed_10(dev);
342 break;
343 default:
344 pr_err("invalid speed %d\n", xgmac->phy->speed);
345 return -EINVAL;
346 }
347 if (ret < 0) {
348 pr_err("xgmac_set_*mii_speed*() failed: %d\n", ret);
349 return ret;
350 }
351
352 if (en_calibration) {
353 ret = xgmac->config->ops->xgmac_calibrate_pads(dev);
354 if (ret < 0) {
355 pr_err("xgmac_calibrate_pads() failed: %d\n",
356 ret);
357 return ret;
358 }
359 } else {
360 ret = xgmac->config->ops->xgmac_disable_calibration(dev);
361 if (ret < 0) {
362 pr_err("xgmac_disable_calibration() failed: %d\n",
363 ret);
364 return ret;
365 }
366 }
367
368 return 0;
369}
370
371static int xgmac_write_hwaddr(struct udevice *dev)
372{
373 struct eth_pdata *plat = dev_get_plat(dev);
374 struct xgmac_priv *xgmac = dev_get_priv(dev);
375 u32 val;
376
377 /*
378 * This function may be called before start() or after stop(). At that
379 * time, on at least some configurations of the XGMAC HW, all clocks to
380 * the XGMAC HW block will be stopped, and a reset signal applied. If
381 * any register access is attempted in this state, bus timeouts or CPU
382 * hangs may occur. This check prevents that.
383 *
384 * A simple solution to this problem would be to not implement
385 * write_hwaddr(), since start() always writes the MAC address into HW
386 * anyway. However, it is desirable to implement write_hwaddr() to
387 * support the case of SW that runs subsequent to U-Boot which expects
388 * the MAC address to already be programmed into the XGMAC registers,
389 * which must happen irrespective of whether the U-Boot user (or
390 * scripts) actually made use of the XGMAC device, and hence
391 * irrespective of whether start() was ever called.
392 *
393 */
394 if (!xgmac->config->reg_access_always_ok && !xgmac->reg_access_ok)
395 return 0;
396
397 /* Update the MAC address */
398 val = (plat->enetaddr[5] << 8) |
399 (plat->enetaddr[4]);
400 writel(val, &xgmac->mac_regs->address0_high);
401 val = (plat->enetaddr[3] << 24) |
402 (plat->enetaddr[2] << 16) |
403 (plat->enetaddr[1] << 8) |
404 (plat->enetaddr[0]);
405 writel(val, &xgmac->mac_regs->address0_low);
406 return 0;
407}
408
409static int xgmac_read_rom_hwaddr(struct udevice *dev)
410{
411 struct eth_pdata *pdata = dev_get_plat(dev);
412 struct xgmac_priv *xgmac = dev_get_priv(dev);
413 int ret;
414
415 ret = xgmac->config->ops->xgmac_get_enetaddr(dev);
416 if (ret < 0)
417 return ret;
418
419 return !is_valid_ethaddr(pdata->enetaddr);
420}
421
422static int xgmac_get_phy_addr(struct xgmac_priv *priv, struct udevice *dev)
423{
424 struct ofnode_phandle_args phandle_args;
425 int reg;
426
427 if (dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
428 &phandle_args)) {
429 debug("Failed to find phy-handle");
430 return -ENODEV;
431 }
432
433 priv->phy_of_node = phandle_args.node;
434
435 reg = ofnode_read_u32_default(phandle_args.node, "reg", 0);
436
437 return reg;
438}
439
440static int xgmac_start(struct udevice *dev)
441{
442 struct xgmac_priv *xgmac = dev_get_priv(dev);
443 int ret, i;
444 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
445 ulong last_rx_desc;
446 ulong desc_pad;
447
448 struct xgmac_desc *tx_desc = NULL;
449 struct xgmac_desc *rx_desc = NULL;
450 int addr = -1;
451
452 debug("%s(dev=%p):\n", __func__, dev);
453
454 xgmac->tx_desc_idx = 0;
455 xgmac->rx_desc_idx = 0;
456
457 ret = xgmac->config->ops->xgmac_start_resets(dev);
458 if (ret < 0) {
459 pr_err("xgmac_start_resets() failed: %d\n", ret);
460 goto err;
461 }
462
463 xgmac->reg_access_ok = true;
464
465 ret = wait_for_bit_le32(&xgmac->dma_regs->mode,
466 XGMAC_DMA_MODE_SWR, false,
467 xgmac->config->swr_wait, false);
468 if (ret) {
469 pr_err("XGMAC_DMA_MODE_SWR stuck: %d\n", ret);
470 goto err_stop_resets;
471 }
472
473 ret = xgmac->config->ops->xgmac_calibrate_pads(dev);
474 if (ret < 0) {
475 pr_err("xgmac_calibrate_pads() failed: %d\n", ret);
476 goto err_stop_resets;
477 }
478
479 /*
480 * if PHY was already connected and configured,
481 * don't need to reconnect/reconfigure again
482 */
483 if (!xgmac->phy) {
484 addr = xgmac_get_phy_addr(xgmac, dev);
485 xgmac->phy = phy_connect(xgmac->mii, addr, dev,
486 xgmac->config->interface(dev));
487 if (!xgmac->phy) {
488 pr_err("phy_connect() failed\n");
489 goto err_stop_resets;
490 }
491
492 if (xgmac->max_speed) {
493 ret = phy_set_supported(xgmac->phy, xgmac->max_speed);
494 if (ret) {
495 pr_err("phy_set_supported() failed: %d\n", ret);
496 goto err_shutdown_phy;
497 }
498 }
499
500 xgmac->phy->node = xgmac->phy_of_node;
501 ret = phy_config(xgmac->phy);
502 if (ret < 0) {
503 pr_err("phy_config() failed: %d\n", ret);
504 goto err_shutdown_phy;
505 }
506 }
507
508 ret = phy_startup(xgmac->phy);
509 if (ret < 0) {
510 pr_err("phy_startup() failed: %d\n", ret);
511 goto err_shutdown_phy;
512 }
513
514 if (!xgmac->phy->link) {
515 pr_err("No link\n");
516 goto err_shutdown_phy;
517 }
518
519 ret = xgmac_adjust_link(dev);
520 if (ret < 0) {
521 pr_err("xgmac_adjust_link() failed: %d\n", ret);
522 goto err_shutdown_phy;
523 }
524
525 /* Configure MTL */
526
527 /* Enable Store and Forward mode for TX */
528 /* Program Tx operating mode */
529 setbits_le32(&xgmac->mtl_regs->txq0_operation_mode,
530 XGMAC_MTL_TXQ0_OPERATION_MODE_TSF |
531 (XGMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
532 XGMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
533
534 /* Transmit Queue weight */
535 writel(0x10, &xgmac->mtl_regs->txq0_quantum_weight);
536
537 /* Enable Store and Forward mode for RX, since no jumbo frame */
538 setbits_le32(&xgmac->mtl_regs->rxq0_operation_mode,
539 XGMAC_MTL_RXQ0_OPERATION_MODE_RSF);
540
541 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
542 val = readl(&xgmac->mac_regs->hw_feature1);
543 tx_fifo_sz = (val >> XGMAC_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
544 XGMAC_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
545 rx_fifo_sz = (val >> XGMAC_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
546 XGMAC_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
547
548 /*
549 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
550 * r/tqs is encoded as (n / 256) - 1.
551 */
552 tqs = (128 << tx_fifo_sz) / 256 - 1;
553 rqs = (128 << rx_fifo_sz) / 256 - 1;
554
555 clrsetbits_le32(&xgmac->mtl_regs->txq0_operation_mode,
556 XGMAC_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
557 XGMAC_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
558 tqs << XGMAC_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
559 clrsetbits_le32(&xgmac->mtl_regs->rxq0_operation_mode,
560 XGMAC_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
561 XGMAC_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
562 rqs << XGMAC_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
563
564 setbits_le32(&xgmac->mtl_regs->rxq0_operation_mode,
565 XGMAC_MTL_RXQ0_OPERATION_MODE_EHFC);
566
567 /* Configure MAC */
568 clrsetbits_le32(&xgmac->mac_regs->rxq_ctrl0,
569 XGMAC_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
570 XGMAC_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
571 xgmac->config->config_mac <<
572 XGMAC_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
573
574 /* Multicast and Broadcast Queue Enable */
575 setbits_le32(&xgmac->mac_regs->rxq_ctrl1,
576 XGMAC_MAC_RXQ_CTRL1_MCBCQEN);
577
578 /* enable promise mode and receive all mode */
579 setbits_le32(&xgmac->mac_regs->mac_packet_filter,
580 XGMAC_MAC_PACKET_FILTER_RA |
581 XGMAC_MAC_PACKET_FILTER_PR);
582
583 /* Set TX flow control parameters */
584 /* Set Pause Time */
585 setbits_le32(&xgmac->mac_regs->q0_tx_flow_ctrl,
586 XGMAC_MAC_Q0_TX_FLOW_CTRL_PT_MASK <<
587 XGMAC_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
588
589 /* Assign priority for RX flow control */
590 clrbits_le32(&xgmac->mac_regs->rxq_ctrl2,
591 XGMAC_MAC_RXQ_CTRL2_PSRQ0_MASK <<
592 XGMAC_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
593
594 /* Enable flow control */
595 setbits_le32(&xgmac->mac_regs->q0_tx_flow_ctrl,
596 XGMAC_MAC_Q0_TX_FLOW_CTRL_TFE);
597 setbits_le32(&xgmac->mac_regs->rx_flow_ctrl,
598 XGMAC_MAC_RX_FLOW_CTRL_RFE);
599
600 clrbits_le32(&xgmac->mac_regs->tx_configuration,
601 XGMAC_MAC_CONF_JD);
602
603 clrbits_le32(&xgmac->mac_regs->rx_configuration,
604 XGMAC_MAC_CONF_JE |
605 XGMAC_MAC_CONF_GPSLCE |
606 XGMAC_MAC_CONF_WD);
607
608 setbits_le32(&xgmac->mac_regs->rx_configuration,
609 XGMAC_MAC_CONF_ACS |
610 XGMAC_MAC_CONF_CST);
611
612 ret = xgmac_write_hwaddr(dev);
613 if (ret < 0) {
614 pr_err("xgmac_write_hwaddr() failed: %d\n", ret);
615 goto err;
616 }
617
618 /* Configure DMA */
619 clrsetbits_le32(&xgmac->dma_regs->sysbus_mode,
620 XGMAC_DMA_SYSBUS_MODE_AAL,
621 XGMAC_DMA_SYSBUS_MODE_EAME |
622 XGMAC_DMA_SYSBUS_MODE_UNDEF);
623
624 /* Enable OSP mode */
625 setbits_le32(&xgmac->dma_regs->ch0_tx_control,
626 XGMAC_DMA_CH0_TX_CONTROL_OSP);
627
628 /* RX buffer size. Must be a multiple of bus width */
629 clrsetbits_le32(&xgmac->dma_regs->ch0_rx_control,
630 XGMAC_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
631 XGMAC_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
632 XGMAC_MAX_PACKET_SIZE <<
633 XGMAC_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
634
635 desc_pad = (xgmac->desc_size - sizeof(struct xgmac_desc)) /
636 xgmac->config->axi_bus_width;
637
638 setbits_le32(&xgmac->dma_regs->ch0_control,
639 XGMAC_DMA_CH0_CONTROL_PBLX8 |
640 (desc_pad << XGMAC_DMA_CH0_CONTROL_DSL_SHIFT));
641
642 /*
643 * Burst length must be < 1/2 FIFO size.
644 * FIFO size in tqs is encoded as (n / 256) - 1.
645 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
646 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
647 */
648 pbl = tqs + 1;
649 if (pbl > 32)
650 pbl = 32;
651
652 clrsetbits_le32(&xgmac->dma_regs->ch0_tx_control,
653 XGMAC_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
654 XGMAC_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
655 pbl << XGMAC_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
656
657 clrsetbits_le32(&xgmac->dma_regs->ch0_rx_control,
658 XGMAC_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
659 XGMAC_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
660 8 << XGMAC_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
661
662 /* DMA performance configuration */
663 val = (XGMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK <<
664 XGMAC_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
665 (XGMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_MASK <<
666 XGMAC_DMA_SYSBUS_MODE_WR_OSR_LMT_SHIFT) |
667 XGMAC_DMA_SYSBUS_MODE_EAME |
668 XGMAC_DMA_SYSBUS_MODE_BLEN16 |
669 XGMAC_DMA_SYSBUS_MODE_BLEN8 |
670 XGMAC_DMA_SYSBUS_MODE_BLEN4 |
671 XGMAC_DMA_SYSBUS_MODE_BLEN32;
672
673 writel(val, &xgmac->dma_regs->sysbus_mode);
674
675 /* Set up descriptors */
676
677 memset(xgmac->tx_descs, 0, xgmac->desc_size * XGMAC_DESCRIPTORS_TX);
678 memset(xgmac->rx_descs, 0, xgmac->desc_size * XGMAC_DESCRIPTORS_RX);
679
680 for (i = 0; i < XGMAC_DESCRIPTORS_TX; i++) {
681 tx_desc = (struct xgmac_desc *)xgmac_get_desc(xgmac, i, false);
682
683 xgmac->config->ops->xgmac_flush_desc(tx_desc);
684 }
685
686 for (i = 0; i < XGMAC_DESCRIPTORS_RX; i++) {
687 rx_desc = (struct xgmac_desc *)xgmac_get_desc(xgmac, i, true);
688
689 rx_desc->des0 = (uintptr_t)(xgmac->rx_dma_buf +
690 (i * XGMAC_MAX_PACKET_SIZE));
691 rx_desc->des3 = XGMAC_DESC3_OWN;
692 /* Flush the cache to the memory */
693 mb();
694 xgmac->config->ops->xgmac_flush_desc(rx_desc);
695 xgmac->config->ops->xgmac_inval_buffer(xgmac->rx_dma_buf +
696 (i * XGMAC_MAX_PACKET_SIZE),
697 XGMAC_MAX_PACKET_SIZE);
698 }
699
700 writel(0, &xgmac->dma_regs->ch0_txdesc_list_haddress);
701 writel((ulong)xgmac_get_desc(xgmac, 0, false),
702 &xgmac->dma_regs->ch0_txdesc_list_address);
703 writel(XGMAC_DESCRIPTORS_TX - 1,
704 &xgmac->dma_regs->ch0_txdesc_ring_length);
705 writel(0, &xgmac->dma_regs->ch0_rxdesc_list_haddress);
706 writel((ulong)xgmac_get_desc(xgmac, 0, true),
707 &xgmac->dma_regs->ch0_rxdesc_list_address);
708 writel(XGMAC_DESCRIPTORS_RX - 1,
709 &xgmac->dma_regs->ch0_rxdesc_ring_length);
710
711 /* Enable everything */
712 setbits_le32(&xgmac->dma_regs->ch0_tx_control,
713 XGMAC_DMA_CH0_TX_CONTROL_ST);
714 setbits_le32(&xgmac->dma_regs->ch0_rx_control,
715 XGMAC_DMA_CH0_RX_CONTROL_SR);
716 setbits_le32(&xgmac->mac_regs->tx_configuration,
717 XGMAC_MAC_CONF_TE);
718 setbits_le32(&xgmac->mac_regs->rx_configuration,
719 XGMAC_MAC_CONF_RE);
720
721 /* TX tail pointer not written until we need to TX a packet */
722 /*
723 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
724 * first descriptor, implying all descriptors were available. However,
725 * that's not distinguishable from none of the descriptors being
726 * available.
727 */
728 last_rx_desc = (ulong)xgmac_get_desc(xgmac, XGMAC_DESCRIPTORS_RX - 1, true);
729 writel(last_rx_desc, &xgmac->dma_regs->ch0_rxdesc_tail_pointer);
730
731 xgmac->started = true;
732
733 debug("%s: OK\n", __func__);
734 return 0;
735
736err_shutdown_phy:
737 phy_shutdown(xgmac->phy);
738err_stop_resets:
739 xgmac->config->ops->xgmac_stop_resets(dev);
740err:
741 pr_err("FAILED: %d\n", ret);
742 return ret;
743}
744
745static void xgmac_stop(struct udevice *dev)
746{
747 struct xgmac_priv *xgmac = dev_get_priv(dev);
748 unsigned long start_time;
749 u32 val;
750 u32 trcsts;
751 u32 txqsts;
752 u32 prxq;
753 u32 rxqsts;
754
755 debug("%s(dev=%p):\n", __func__, dev);
756
757 if (!xgmac->started)
758 return;
759 xgmac->started = false;
760 xgmac->reg_access_ok = false;
761
762 /* Disable TX DMA */
763 clrbits_le32(&xgmac->dma_regs->ch0_tx_control,
764 XGMAC_DMA_CH0_TX_CONTROL_ST);
765
766 /* Wait for TX all packets to drain out of MTL */
767 start_time = get_timer(0);
768
769 while (get_timer(start_time) < XGMAC_TIMEOUT_100MS) {
770 val = readl(&xgmac->mtl_regs->txq0_debug);
771
772 trcsts = (val >> XGMAC_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
773 XGMAC_MTL_TXQ0_DEBUG_TRCSTS_MASK;
774
775 txqsts = val & XGMAC_MTL_TXQ0_DEBUG_TXQSTS;
776
777 if (trcsts != XGMAC_MTL_TXQ0_DEBUG_TRCSTS_READ_STATE && !txqsts)
778 break;
779 }
780
781 /* Turn off MAC TX and RX */
782 clrbits_le32(&xgmac->mac_regs->tx_configuration,
783 XGMAC_MAC_CONF_RE);
784 clrbits_le32(&xgmac->mac_regs->rx_configuration,
785 XGMAC_MAC_CONF_RE);
786
787 /* Wait for all RX packets to drain out of MTL */
788 start_time = get_timer(0);
789
790 while (get_timer(start_time) < XGMAC_TIMEOUT_100MS) {
791 val = readl(&xgmac->mtl_regs->rxq0_debug);
792
793 prxq = (val >> XGMAC_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
794 XGMAC_MTL_RXQ0_DEBUG_PRXQ_MASK;
795
796 rxqsts = (val >> XGMAC_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
797 XGMAC_MTL_RXQ0_DEBUG_RXQSTS_MASK;
798
799 if (!prxq && !rxqsts)
800 break;
801 }
802
803 /* Turn off RX DMA */
804 clrbits_le32(&xgmac->dma_regs->ch0_rx_control,
805 XGMAC_DMA_CH0_RX_CONTROL_SR);
806
807 if (xgmac->phy)
808 phy_shutdown(xgmac->phy);
809
810 xgmac->config->ops->xgmac_stop_resets(dev);
811
812 debug("%s: OK\n", __func__);
813}
814
815static int xgmac_send(struct udevice *dev, void *packet, int length)
816{
817 struct xgmac_priv *xgmac = dev_get_priv(dev);
818 struct xgmac_desc *tx_desc;
819 unsigned long start_time;
820
821 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
822 length);
823
824 memcpy(xgmac->tx_dma_buf, packet, length);
825 xgmac->config->ops->xgmac_flush_buffer(xgmac->tx_dma_buf, length);
826
827 tx_desc = xgmac_get_desc(xgmac, xgmac->tx_desc_idx, false);
828 xgmac->tx_desc_idx++;
829 xgmac->tx_desc_idx %= XGMAC_DESCRIPTORS_TX;
830
831 tx_desc->des0 = (ulong)xgmac->tx_dma_buf;
832 tx_desc->des1 = 0;
833 tx_desc->des2 = length;
834 /*
835 * Make sure that if HW sees the _OWN write below, it will see all the
836 * writes to the rest of the descriptor too.
837 */
838 mb();
839 tx_desc->des3 = XGMAC_DESC3_OWN | XGMAC_DESC3_FD | XGMAC_DESC3_LD | length;
840 xgmac->config->ops->xgmac_flush_desc(tx_desc);
841
842 writel((ulong)xgmac_get_desc(xgmac, xgmac->tx_desc_idx, false),
843 &xgmac->dma_regs->ch0_txdesc_tail_pointer);
844
845 start_time = get_timer(0);
846
847 while (get_timer(start_time) < XGMAC_TIMEOUT_100MS) {
848 xgmac->config->ops->xgmac_inval_desc(tx_desc);
849 if (!(readl(&tx_desc->des3) & XGMAC_DESC3_OWN))
850 return 0;
851 }
852 debug("%s: TX timeout\n", __func__);
853
854 return -ETIMEDOUT;
855}
856
857static int xgmac_recv(struct udevice *dev, int flags, uchar **packetp)
858{
859 struct xgmac_priv *xgmac = dev_get_priv(dev);
860 struct xgmac_desc *rx_desc;
861 int length;
862
863 debug("%s(dev=%p, flags=0x%x):\n", __func__, dev, flags);
864
865 rx_desc = xgmac_get_desc(xgmac, xgmac->rx_desc_idx, true);
866 xgmac->config->ops->xgmac_inval_desc(rx_desc);
867 if (rx_desc->des3 & XGMAC_DESC3_OWN) {
868 debug("%s: RX packet not available\n", __func__);
869 return -EAGAIN;
870 }
871
872 *packetp = xgmac->rx_dma_buf +
873 (xgmac->rx_desc_idx * XGMAC_MAX_PACKET_SIZE);
874 length = rx_desc->des3 & XGMAC_RDES3_PKT_LENGTH_MASK;
875 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
876
877 xgmac->config->ops->xgmac_inval_buffer(*packetp, length);
878
879 return length;
880}
881
882static int xgmac_free_pkt(struct udevice *dev, uchar *packet, int length)
883{
884 struct xgmac_priv *xgmac = dev_get_priv(dev);
885 u32 idx, idx_mask = xgmac->desc_per_cacheline - 1;
886 uchar *packet_expected;
887 struct xgmac_desc *rx_desc;
888
889 debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
890
891 packet_expected = xgmac->rx_dma_buf +
892 (xgmac->rx_desc_idx * XGMAC_MAX_PACKET_SIZE);
893 if (packet != packet_expected) {
894 debug("%s: Unexpected packet (expected %p)\n", __func__,
895 packet_expected);
896 return -EINVAL;
897 }
898
899 xgmac->config->ops->xgmac_inval_buffer(packet, length);
900
901 if ((xgmac->rx_desc_idx & idx_mask) == idx_mask) {
902 for (idx = xgmac->rx_desc_idx - idx_mask;
903 idx <= xgmac->rx_desc_idx;
904 idx++) {
905 rx_desc = xgmac_get_desc(xgmac, idx, true);
906 rx_desc->des0 = 0;
907 /* Flush the cache to the memory */
908 mb();
909 xgmac->config->ops->xgmac_flush_desc(rx_desc);
910 xgmac->config->ops->xgmac_inval_buffer(packet, length);
911 rx_desc->des0 = (u32)(ulong)(xgmac->rx_dma_buf +
912 (idx * XGMAC_MAX_PACKET_SIZE));
913 rx_desc->des1 = 0;
914 rx_desc->des2 = 0;
915 /*
916 * Make sure that if HW sees the _OWN write below,
917 * it will see all the writes to the rest of the
918 * descriptor too.
919 */
920 mb();
921 rx_desc->des3 = XGMAC_DESC3_OWN;
922 xgmac->config->ops->xgmac_flush_desc(rx_desc);
923 }
924 writel((ulong)rx_desc, &xgmac->dma_regs->ch0_rxdesc_tail_pointer);
925 }
926
927 xgmac->rx_desc_idx++;
928 xgmac->rx_desc_idx %= XGMAC_DESCRIPTORS_RX;
929
930 return 0;
931}
932
933static int xgmac_probe_resources_core(struct udevice *dev)
934{
935 struct xgmac_priv *xgmac = dev_get_priv(dev);
936 unsigned int desc_step;
937 int ret;
938
939 debug("%s(dev=%p):\n", __func__, dev);
940
941 /* Maximum distance between neighboring descriptors, in Bytes. */
942 desc_step = sizeof(struct xgmac_desc);
943
944 if (desc_step < ARCH_DMA_MINALIGN) {
945 /*
946 * The hardware implementation cannot place one descriptor
947 * per cacheline, it is necessary to place multiple descriptors
948 * per cacheline in memory and do cache management carefully.
949 */
950 xgmac->desc_size = BIT(fls(desc_step) - 1);
951 } else {
952 xgmac->desc_size = ALIGN(sizeof(struct xgmac_desc),
953 (unsigned int)ARCH_DMA_MINALIGN);
954 }
955 xgmac->desc_per_cacheline = ARCH_DMA_MINALIGN / xgmac->desc_size;
956
957 xgmac->tx_descs = xgmac_alloc_descs(xgmac, XGMAC_DESCRIPTORS_TX);
958 if (!xgmac->tx_descs) {
959 debug("%s: xgmac_alloc_descs(tx) failed\n", __func__);
960 ret = -ENOMEM;
961 goto err;
962 }
963
964 xgmac->rx_descs = xgmac_alloc_descs(xgmac, XGMAC_DESCRIPTORS_RX);
965 if (!xgmac->rx_descs) {
966 debug("%s: xgmac_alloc_descs(rx) failed\n", __func__);
967 ret = -ENOMEM;
968 goto err_free_tx_descs;
969 }
970
971 xgmac->tx_dma_buf = memalign(XGMAC_BUFFER_ALIGN, XGMAC_MAX_PACKET_SIZE);
972 if (!xgmac->tx_dma_buf) {
973 debug("%s: memalign(tx_dma_buf) failed\n", __func__);
974 ret = -ENOMEM;
975 goto err_free_descs;
976 }
977 debug("%s: tx_dma_buf=%p\n", __func__, xgmac->tx_dma_buf);
978
979 xgmac->rx_dma_buf = memalign(XGMAC_BUFFER_ALIGN, XGMAC_RX_BUFFER_SIZE);
980 if (!xgmac->rx_dma_buf) {
981 debug("%s: memalign(rx_dma_buf) failed\n", __func__);
982 ret = -ENOMEM;
983 goto err_free_tx_dma_buf;
984 }
985 debug("%s: rx_dma_buf=%p\n", __func__, xgmac->rx_dma_buf);
986
987 xgmac->rx_pkt = malloc(XGMAC_MAX_PACKET_SIZE);
988 if (!xgmac->rx_pkt) {
989 debug("%s: malloc(rx_pkt) failed\n", __func__);
990 ret = -ENOMEM;
991 goto err_free_rx_dma_buf;
992 }
993 debug("%s: rx_pkt=%p\n", __func__, xgmac->rx_pkt);
994
995 xgmac->config->ops->xgmac_inval_buffer(xgmac->rx_dma_buf,
996 XGMAC_MAX_PACKET_SIZE * XGMAC_DESCRIPTORS_RX);
997
998 debug("%s: OK\n", __func__);
999 return 0;
1000
1001err_free_rx_dma_buf:
1002 free(xgmac->rx_dma_buf);
1003err_free_tx_dma_buf:
1004 free(xgmac->tx_dma_buf);
1005err_free_descs:
1006 xgmac_free_descs(xgmac->rx_descs);
1007err_free_tx_descs:
1008 xgmac_free_descs(xgmac->tx_descs);
1009err:
1010
1011 debug("%s: returns %d\n", __func__, ret);
1012 return ret;
1013}
1014
1015static int xgmac_remove_resources_core(struct udevice *dev)
1016{
1017 struct xgmac_priv *xgmac = dev_get_priv(dev);
1018
1019 debug("%s(dev=%p):\n", __func__, dev);
1020
1021 free(xgmac->rx_pkt);
1022 free(xgmac->rx_dma_buf);
1023 free(xgmac->tx_dma_buf);
1024 xgmac_free_descs(xgmac->rx_descs);
1025 xgmac_free_descs(xgmac->tx_descs);
1026
1027 debug("%s: OK\n", __func__);
1028 return 0;
1029}
1030
1031/* board-specific Ethernet Interface initializations. */
1032__weak int board_interface_eth_init(struct udevice *dev,
1033 phy_interface_t interface_type)
1034{
1035 return 0;
1036}
1037
1038static int xgmac_probe(struct udevice *dev)
1039{
1040 struct xgmac_priv *xgmac = dev_get_priv(dev);
1041 int ret;
1042
1043 debug("%s(dev=%p):\n", __func__, dev);
1044
1045 xgmac->dev = dev;
1046 xgmac->config = (void *)dev_get_driver_data(dev);
1047
1048 xgmac->regs = dev_read_addr(dev);
1049 if (xgmac->regs == FDT_ADDR_T_NONE) {
1050 pr_err("dev_read_addr() failed\n");
1051 return -ENODEV;
1052 }
1053 xgmac->mac_regs = (void *)(xgmac->regs + XGMAC_MAC_REGS_BASE);
1054 xgmac->mtl_regs = (void *)(xgmac->regs + XGMAC_MTL_REGS_BASE);
1055 xgmac->dma_regs = (void *)(xgmac->regs + XGMAC_DMA_REGS_BASE);
1056
1057 xgmac->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1058
1059 ret = xgmac_probe_resources_core(dev);
1060 if (ret < 0) {
1061 pr_err("xgmac_probe_resources_core() failed: %d\n", ret);
1062 return ret;
1063 }
1064
1065 ret = xgmac->config->ops->xgmac_probe_resources(dev);
1066 if (ret < 0) {
1067 pr_err("xgmac_probe_resources() failed: %d\n", ret);
1068 goto err_remove_resources_core;
1069 }
1070
1071 ret = xgmac->config->ops->xgmac_start_clks(dev);
1072 if (ret < 0) {
1073 pr_err("xgmac_start_clks() failed: %d\n", ret);
1074 return ret;
1075 }
1076
1077 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
1078 xgmac->mii = eth_phy_get_mdio_bus(dev);
1079
1080 if (!xgmac->mii) {
1081 xgmac->mii = mdio_alloc();
1082 if (!xgmac->mii) {
1083 pr_err("mdio_alloc() failed\n");
1084 ret = -ENOMEM;
1085 goto err_stop_clks;
1086 }
1087 xgmac->mii->read = xgmac_mdio_read;
1088 xgmac->mii->write = xgmac_mdio_write;
1089 xgmac->mii->priv = xgmac;
1090 strcpy(xgmac->mii->name, dev->name);
1091
1092 ret = mdio_register(xgmac->mii);
1093 if (ret < 0) {
1094 pr_err("mdio_register() failed: %d\n", ret);
1095 goto err_free_mdio;
1096 }
1097 }
1098
1099 if (IS_ENABLED(CONFIG_DM_ETH_PHY))
1100 eth_phy_set_mdio_bus(dev, xgmac->mii);
1101
1102 debug("%s: OK\n", __func__);
1103 return 0;
1104
1105err_free_mdio:
1106 mdio_free(xgmac->mii);
1107err_stop_clks:
1108 xgmac->config->ops->xgmac_stop_clks(dev);
1109err_remove_resources_core:
1110 xgmac_remove_resources_core(dev);
1111
1112 debug("%s: returns %d\n", __func__, ret);
1113 return ret;
1114}
1115
1116static int xgmac_remove(struct udevice *dev)
1117{
1118 struct xgmac_priv *xgmac = dev_get_priv(dev);
1119
1120 debug("%s(dev=%p):\n", __func__, dev);
1121
1122 mdio_unregister(xgmac->mii);
1123 mdio_free(xgmac->mii);
1124 xgmac->config->ops->xgmac_stop_clks(dev);
1125 xgmac->config->ops->xgmac_remove_resources(dev);
1126
1127 xgmac_remove_resources_core(dev);
1128
1129 debug("%s: OK\n", __func__);
1130 return 0;
1131}
1132
1133int xgmac_null_ops(struct udevice *dev)
1134{
1135 return 0;
1136}
1137
1138static const struct eth_ops xgmac_ops = {
1139 .start = xgmac_start,
1140 .stop = xgmac_stop,
1141 .send = xgmac_send,
1142 .recv = xgmac_recv,
1143 .free_pkt = xgmac_free_pkt,
1144 .write_hwaddr = xgmac_write_hwaddr,
1145 .read_rom_hwaddr = xgmac_read_rom_hwaddr,
1146};
1147
1148static const struct udevice_id xgmac_ids[] = {
1149 {
1150 .compatible = "intel,socfpga-dwxgmac",
1151 .data = (ulong)&xgmac_socfpga_config
1152 },
1153 { }
1154};
1155
1156U_BOOT_DRIVER(eth_xgmac) = {
1157 .name = "eth_xgmac",
1158 .id = UCLASS_ETH,
1159 .of_match = of_match_ptr(xgmac_ids),
1160 .probe = xgmac_probe,
1161 .remove = xgmac_remove,
1162 .ops = &xgmac_ops,
1163 .priv_auto = sizeof(struct xgmac_priv),
1164 .plat_auto = sizeof(struct eth_pdata),
1165};