blob: abfdc75ad8d4da1c42e7a2a316b637960d3d7b57 [file] [log] [blame]
Stefan Roesef9947682018-10-26 14:53:27 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * MediaTek ethernet IP driver for U-Boot
4 *
5 * Copyright (C) 2018 Stefan Roese <sr@denx.de>
6 *
7 * This code is mostly based on the code extracted from this MediaTek
8 * github repository:
9 *
10 * https://github.com/MediaTek-Labs/linkit-smart-uboot.git
11 *
12 * I was not able to find a specific license or other developers
13 * copyrights here, so I can't add them here.
14 */
15
16#include <common.h>
17#include <dm.h>
18#include <malloc.h>
19#include <miiphy.h>
20#include <net.h>
developere4387bd2019-09-25 17:45:32 +080021#include <reset.h>
Stefan Roesef9947682018-10-26 14:53:27 +020022#include <wait_bit.h>
23#include <asm/io.h>
24#include <linux/bitfield.h>
25#include <linux/err.h>
26
Stefan Roesef9947682018-10-26 14:53:27 +020027/* Ethernet frame engine register */
28#define PDMA_RELATED 0x0800
29
30#define TX_BASE_PTR0 (PDMA_RELATED + 0x000)
31#define TX_MAX_CNT0 (PDMA_RELATED + 0x004)
32#define TX_CTX_IDX0 (PDMA_RELATED + 0x008)
33#define TX_DTX_IDX0 (PDMA_RELATED + 0x00c)
34
35#define RX_BASE_PTR0 (PDMA_RELATED + 0x100)
36#define RX_MAX_CNT0 (PDMA_RELATED + 0x104)
37#define RX_CALC_IDX0 (PDMA_RELATED + 0x108)
38
39#define PDMA_GLO_CFG (PDMA_RELATED + 0x204)
40#define PDMA_RST_IDX (PDMA_RELATED + 0x208)
41#define DLY_INT_CFG (PDMA_RELATED + 0x20c)
42
43#define SDM_RELATED 0x0c00
44
45#define SDM_MAC_ADRL (SDM_RELATED + 0x0c) /* MAC address LSB */
46#define SDM_MAC_ADRH (SDM_RELATED + 0x10) /* MAC Address MSB */
47
48#define RST_DTX_IDX0 BIT(0)
49#define RST_DRX_IDX0 BIT(16)
50
51#define TX_DMA_EN BIT(0)
52#define TX_DMA_BUSY BIT(1)
53#define RX_DMA_EN BIT(2)
54#define RX_DMA_BUSY BIT(3)
55#define TX_WB_DDONE BIT(6)
56
57/* Ethernet switch register */
58#define MT7628_SWITCH_FCT0 0x0008
59#define MT7628_SWITCH_PFC1 0x0014
60#define MT7628_SWITCH_FPA 0x0084
61#define MT7628_SWITCH_SOCPC 0x008c
62#define MT7628_SWITCH_POC0 0x0090
63#define MT7628_SWITCH_POC2 0x0098
64#define MT7628_SWITCH_SGC 0x009c
65#define MT7628_SWITCH_PCR0 0x00c0
66#define PCR0_PHY_ADDR GENMASK(4, 0)
67#define PCR0_PHY_REG GENMASK(12, 8)
68#define PCR0_WT_PHY_CMD BIT(13)
69#define PCR0_RD_PHY_CMD BIT(14)
70#define PCR0_WT_DATA GENMASK(31, 16)
71
72#define MT7628_SWITCH_PCR1 0x00c4
73#define PCR1_WT_DONE BIT(0)
74#define PCR1_RD_RDY BIT(1)
75#define PCR1_RD_DATA GENMASK(31, 16)
76
77#define MT7628_SWITCH_FPA1 0x00c8
78#define MT7628_SWITCH_FCT2 0x00cc
79#define MT7628_SWITCH_SGC2 0x00e4
80#define MT7628_SWITCH_BMU_CTRL 0x0110
81
82/* rxd2 */
83#define RX_DMA_DONE BIT(31)
84#define RX_DMA_LSO BIT(30)
85#define RX_DMA_PLEN0 GENMASK(29, 16)
86#define RX_DMA_TAG BIT(15)
87
88struct fe_rx_dma {
89 unsigned int rxd1;
90 unsigned int rxd2;
91 unsigned int rxd3;
92 unsigned int rxd4;
93} __packed __aligned(4);
94
95#define TX_DMA_PLEN0 GENMASK(29, 16)
96#define TX_DMA_LS1 BIT(14)
97#define TX_DMA_LS0 BIT(30)
98#define TX_DMA_DONE BIT(31)
99
100#define TX_DMA_INS_VLAN_MT7621 BIT(16)
101#define TX_DMA_INS_VLAN BIT(7)
102#define TX_DMA_INS_PPPOE BIT(12)
103#define TX_DMA_PN GENMASK(26, 24)
104
105struct fe_tx_dma {
106 unsigned int txd1;
107 unsigned int txd2;
108 unsigned int txd3;
109 unsigned int txd4;
110} __packed __aligned(4);
111
112#define NUM_RX_DESC 256
113#define NUM_TX_DESC 4
developera5644112019-09-25 17:45:33 +0800114#define NUM_PHYS 5
Stefan Roesef9947682018-10-26 14:53:27 +0200115
116#define PADDING_LENGTH 60
117
118#define MTK_QDMA_PAGE_SIZE 2048
119
120#define CONFIG_MDIO_TIMEOUT 100
121#define CONFIG_DMA_STOP_TIMEOUT 100
122#define CONFIG_TX_DMA_TIMEOUT 100
123
Stefan Roesef9947682018-10-26 14:53:27 +0200124struct mt7628_eth_dev {
125 void __iomem *base; /* frame engine base address */
126 void __iomem *eth_sw_base; /* switch base address */
Stefan Roesef9947682018-10-26 14:53:27 +0200127
128 struct mii_dev *bus;
129
130 struct fe_tx_dma *tx_ring;
131 struct fe_rx_dma *rx_ring;
132
133 u8 *rx_buf[NUM_RX_DESC];
134
135 /* Point to the next RXD DMA wants to use in RXD Ring0 */
136 int rx_dma_idx;
137 /* Point to the next TXD in TXD Ring0 CPU wants to use */
138 int tx_dma_idx;
developere4387bd2019-09-25 17:45:32 +0800139
140 struct reset_ctl rst_ephy;
developera5644112019-09-25 17:45:33 +0800141
142 struct phy_device *phy;
Stefan Roesef9947682018-10-26 14:53:27 +0200143};
144
145static int mdio_wait_read(struct mt7628_eth_dev *priv, u32 mask, bool mask_set)
146{
147 void __iomem *base = priv->eth_sw_base;
148 int ret;
149
150 ret = wait_for_bit_le32(base + MT7628_SWITCH_PCR1, mask, mask_set,
151 CONFIG_MDIO_TIMEOUT, false);
152 if (ret) {
153 printf("MDIO operation timeout!\n");
154 return -ETIMEDOUT;
155 }
156
157 return 0;
158}
159
160static int mii_mgr_read(struct mt7628_eth_dev *priv,
161 u32 phy_addr, u32 phy_register, u32 *read_data)
162{
163 void __iomem *base = priv->eth_sw_base;
164 u32 status = 0;
165 u32 ret;
166
167 *read_data = 0xffff;
168 /* Make sure previous read operation is complete */
169 ret = mdio_wait_read(priv, PCR1_RD_RDY, false);
170 if (ret)
171 return ret;
172
173 writel(PCR0_RD_PHY_CMD |
174 FIELD_PREP(PCR0_PHY_REG, phy_register) |
175 FIELD_PREP(PCR0_PHY_ADDR, phy_addr),
176 base + MT7628_SWITCH_PCR0);
177
178 /* Make sure previous read operation is complete */
179 ret = mdio_wait_read(priv, PCR1_RD_RDY, true);
180 if (ret)
181 return ret;
182
183 status = readl(base + MT7628_SWITCH_PCR1);
184 *read_data = FIELD_GET(PCR1_RD_DATA, status);
185
186 return 0;
187}
188
189static int mii_mgr_write(struct mt7628_eth_dev *priv,
190 u32 phy_addr, u32 phy_register, u32 write_data)
191{
192 void __iomem *base = priv->eth_sw_base;
193 u32 data;
194 int ret;
195
196 /* Make sure previous write operation is complete */
197 ret = mdio_wait_read(priv, PCR1_WT_DONE, false);
198 if (ret)
199 return ret;
200
201 data = FIELD_PREP(PCR0_WT_DATA, write_data) |
202 FIELD_PREP(PCR0_PHY_REG, phy_register) |
203 FIELD_PREP(PCR0_PHY_ADDR, phy_addr) |
204 PCR0_WT_PHY_CMD;
205 writel(data, base + MT7628_SWITCH_PCR0);
206
207 return mdio_wait_read(priv, PCR1_WT_DONE, true);
208}
209
210static int mt7628_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
211{
212 u32 val;
213 int ret;
214
215 ret = mii_mgr_read(bus->priv, addr, reg, &val);
216 if (ret)
217 return ret;
218
219 return val;
220}
221
222static int mt7628_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
223 u16 value)
224{
225 return mii_mgr_write(bus->priv, addr, reg, value);
226}
227
228static void mt7628_ephy_init(struct mt7628_eth_dev *priv)
229{
230 int i;
231
232 mii_mgr_write(priv, 0, 31, 0x2000); /* change G2 page */
233 mii_mgr_write(priv, 0, 26, 0x0000);
234
235 for (i = 0; i < 5; i++) {
236 mii_mgr_write(priv, i, 31, 0x8000); /* change L0 page */
237 mii_mgr_write(priv, i, 0, 0x3100);
238
239 /* EEE disable */
240 mii_mgr_write(priv, i, 30, 0xa000);
241 mii_mgr_write(priv, i, 31, 0xa000); /* change L2 page */
242 mii_mgr_write(priv, i, 16, 0x0606);
243 mii_mgr_write(priv, i, 23, 0x0f0e);
244 mii_mgr_write(priv, i, 24, 0x1610);
245 mii_mgr_write(priv, i, 30, 0x1f15);
246 mii_mgr_write(priv, i, 28, 0x6111);
247 }
248
249 /* 100Base AOI setting */
250 mii_mgr_write(priv, 0, 31, 0x5000); /* change G5 page */
251 mii_mgr_write(priv, 0, 19, 0x004a);
252 mii_mgr_write(priv, 0, 20, 0x015a);
253 mii_mgr_write(priv, 0, 21, 0x00ee);
254 mii_mgr_write(priv, 0, 22, 0x0033);
255 mii_mgr_write(priv, 0, 23, 0x020a);
256 mii_mgr_write(priv, 0, 24, 0x0000);
257 mii_mgr_write(priv, 0, 25, 0x024a);
258 mii_mgr_write(priv, 0, 26, 0x035a);
259 mii_mgr_write(priv, 0, 27, 0x02ee);
260 mii_mgr_write(priv, 0, 28, 0x0233);
261 mii_mgr_write(priv, 0, 29, 0x000a);
262 mii_mgr_write(priv, 0, 30, 0x0000);
263
264 /* Fix EPHY idle state abnormal behavior */
265 mii_mgr_write(priv, 0, 31, 0x4000); /* change G4 page */
266 mii_mgr_write(priv, 0, 29, 0x000d);
267 mii_mgr_write(priv, 0, 30, 0x0500);
268}
269
270static void rt305x_esw_init(struct mt7628_eth_dev *priv)
271{
272 void __iomem *base = priv->eth_sw_base;
273
274 /*
275 * FC_RLS_TH=200, FC_SET_TH=160
276 * DROP_RLS=120, DROP_SET_TH=80
277 */
278 writel(0xc8a07850, base + MT7628_SWITCH_FCT0);
279 writel(0x00000000, base + MT7628_SWITCH_SGC2);
280 writel(0x00405555, base + MT7628_SWITCH_PFC1);
281 writel(0x00007f7f, base + MT7628_SWITCH_POC0);
282 writel(0x00007f7f, base + MT7628_SWITCH_POC2); /* disable VLAN */
283 writel(0x0002500c, base + MT7628_SWITCH_FCT2);
284 /* hashing algorithm=XOR48, aging interval=300sec */
285 writel(0x0008a301, base + MT7628_SWITCH_SGC);
286 writel(0x02404040, base + MT7628_SWITCH_SOCPC);
287
288 /* Ext PHY Addr=0x1f */
289 writel(0x3f502b28, base + MT7628_SWITCH_FPA1);
290 writel(0x00000000, base + MT7628_SWITCH_FPA);
291 /* 1us cycle number=125 (FE's clock=125Mhz) */
292 writel(0x7d000000, base + MT7628_SWITCH_BMU_CTRL);
293
Stefan Roesef9947682018-10-26 14:53:27 +0200294 /* Reset PHY */
developere4387bd2019-09-25 17:45:32 +0800295 reset_assert(&priv->rst_ephy);
296 reset_deassert(&priv->rst_ephy);
Stefan Roesef9947682018-10-26 14:53:27 +0200297 mdelay(10);
298
Stefan Roesef9947682018-10-26 14:53:27 +0200299 mt7628_ephy_init(priv);
300}
301
302static void eth_dma_start(struct mt7628_eth_dev *priv)
303{
304 void __iomem *base = priv->base;
305
306 setbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
307}
308
309static void eth_dma_stop(struct mt7628_eth_dev *priv)
310{
311 void __iomem *base = priv->base;
312 int ret;
313
314 clrbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
315
316 /* Wait for DMA to stop */
317 ret = wait_for_bit_le32(base + PDMA_GLO_CFG,
318 RX_DMA_BUSY | TX_DMA_BUSY, false,
319 CONFIG_DMA_STOP_TIMEOUT, false);
320 if (ret)
321 printf("DMA stop timeout error!\n");
322}
323
324static int mt7628_eth_write_hwaddr(struct udevice *dev)
325{
326 struct mt7628_eth_dev *priv = dev_get_priv(dev);
327 void __iomem *base = priv->base;
328 u8 *addr = ((struct eth_pdata *)dev_get_platdata(dev))->enetaddr;
329 u32 val;
330
331 /* Set MAC address. */
332 val = addr[0];
333 val = (val << 8) | addr[1];
334 writel(val, base + SDM_MAC_ADRH);
335
336 val = addr[2];
337 val = (val << 8) | addr[3];
338 val = (val << 8) | addr[4];
339 val = (val << 8) | addr[5];
340 writel(val, base + SDM_MAC_ADRL);
341
342 return 0;
343}
344
345static int mt7628_eth_send(struct udevice *dev, void *packet, int length)
346{
347 struct mt7628_eth_dev *priv = dev_get_priv(dev);
348 void __iomem *base = priv->base;
349 int ret;
350 int idx;
351 int i;
352
353 idx = priv->tx_dma_idx;
354
355 /* Pad message to a minimum length */
356 if (length < PADDING_LENGTH) {
357 char *p = (char *)packet;
358
359 for (i = 0; i < PADDING_LENGTH - length; i++)
360 p[length + i] = 0;
361 length = PADDING_LENGTH;
362 }
363
364 /* Check if buffer is ready for next TX DMA */
365 ret = wait_for_bit_le32(&priv->tx_ring[idx].txd2, TX_DMA_DONE, true,
366 CONFIG_TX_DMA_TIMEOUT, false);
367 if (ret) {
368 printf("TX: DMA still busy on buffer %d\n", idx);
369 return ret;
370 }
371
372 flush_dcache_range((u32)packet, (u32)packet + length);
373
374 priv->tx_ring[idx].txd1 = CPHYSADDR(packet);
375 priv->tx_ring[idx].txd2 &= ~TX_DMA_PLEN0;
376 priv->tx_ring[idx].txd2 |= FIELD_PREP(TX_DMA_PLEN0, length);
377 priv->tx_ring[idx].txd2 &= ~TX_DMA_DONE;
378
379 idx = (idx + 1) % NUM_TX_DESC;
380
381 /* Make sure the writes executed at this place */
382 wmb();
383 writel(idx, base + TX_CTX_IDX0);
384
385 priv->tx_dma_idx = idx;
386
387 return 0;
388}
389
390static int mt7628_eth_recv(struct udevice *dev, int flags, uchar **packetp)
391{
392 struct mt7628_eth_dev *priv = dev_get_priv(dev);
393 u32 rxd_info;
394 int length;
395 int idx;
396
397 idx = priv->rx_dma_idx;
398
399 rxd_info = priv->rx_ring[idx].rxd2;
400 if ((rxd_info & RX_DMA_DONE) == 0)
401 return -EAGAIN;
402
403 length = FIELD_GET(RX_DMA_PLEN0, priv->rx_ring[idx].rxd2);
404 if (length == 0 || length > MTK_QDMA_PAGE_SIZE) {
405 printf("%s: invalid length (%d bytes)\n", __func__, length);
406 return -EIO;
407 }
408
409 *packetp = priv->rx_buf[idx];
410 invalidate_dcache_range((u32)*packetp, (u32)*packetp + length);
411
412 priv->rx_ring[idx].rxd4 = 0;
413 priv->rx_ring[idx].rxd2 = RX_DMA_LSO;
414
415 /* Make sure the writes executed at this place */
416 wmb();
417
418 return length;
419}
420
421static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
422{
423 struct mt7628_eth_dev *priv = dev_get_priv(dev);
424 void __iomem *base = priv->base;
425 int idx;
426
427 idx = priv->rx_dma_idx;
428
429 /* Move point to next RXD which wants to alloc */
430 writel(idx, base + RX_CALC_IDX0);
431
432 /* Update to Next packet point that was received */
433 idx = (idx + 1) % NUM_RX_DESC;
434
435 priv->rx_dma_idx = idx;
436
437 return 0;
438}
439
Stefan Roesef9947682018-10-26 14:53:27 +0200440static int mt7628_eth_start(struct udevice *dev)
441{
442 struct mt7628_eth_dev *priv = dev_get_priv(dev);
443 void __iomem *base = priv->base;
444 uchar packet[MTK_QDMA_PAGE_SIZE];
445 uchar *packetp;
developera5644112019-09-25 17:45:33 +0800446 int ret;
Stefan Roesef9947682018-10-26 14:53:27 +0200447 int i;
448
449 for (i = 0; i < NUM_RX_DESC; i++) {
450 memset((void *)&priv->rx_ring[i], 0, sizeof(priv->rx_ring[0]));
451 priv->rx_ring[i].rxd2 |= RX_DMA_LSO;
452 priv->rx_ring[i].rxd1 = CPHYSADDR(priv->rx_buf[i]);
453 }
454
455 for (i = 0; i < NUM_TX_DESC; i++) {
456 memset((void *)&priv->tx_ring[i], 0, sizeof(priv->tx_ring[0]));
457 priv->tx_ring[i].txd2 = TX_DMA_LS0 | TX_DMA_DONE;
458 priv->tx_ring[i].txd4 = FIELD_PREP(TX_DMA_PN, 1);
459 }
460
461 priv->rx_dma_idx = 0;
462 priv->tx_dma_idx = 0;
463
464 /* Make sure the writes executed at this place */
465 wmb();
466
467 /* disable delay interrupt */
468 writel(0, base + DLY_INT_CFG);
469
470 clrbits_le32(base + PDMA_GLO_CFG, 0xffff0000);
471
472 /* Tell the adapter where the TX/RX rings are located. */
473 writel(CPHYSADDR(&priv->rx_ring[0]), base + RX_BASE_PTR0);
474 writel(CPHYSADDR((u32)&priv->tx_ring[0]), base + TX_BASE_PTR0);
475
476 writel(NUM_RX_DESC, base + RX_MAX_CNT0);
477 writel(NUM_TX_DESC, base + TX_MAX_CNT0);
478
479 writel(priv->tx_dma_idx, base + TX_CTX_IDX0);
480 writel(RST_DTX_IDX0, base + PDMA_RST_IDX);
481
482 writel(NUM_RX_DESC - 1, base + RX_CALC_IDX0);
483 writel(RST_DRX_IDX0, base + PDMA_RST_IDX);
484
485 /* Make sure the writes executed at this place */
486 wmb();
487 eth_dma_start(priv);
488
developera5644112019-09-25 17:45:33 +0800489 if (priv->phy) {
490 ret = phy_startup(priv->phy);
491 if (ret)
492 return ret;
Stefan Roesef9947682018-10-26 14:53:27 +0200493
developera5644112019-09-25 17:45:33 +0800494 if (!priv->phy->link)
495 return -EAGAIN;
Stefan Roesef9947682018-10-26 14:53:27 +0200496 }
497
498 /*
499 * The integrated switch seems to queue some received ethernet
500 * packets in some FIFO. Lets read the already queued packets
501 * out by using the receive routine, so that these old messages
502 * are dropped before the new xfer starts.
503 */
504 packetp = &packet[0];
505 while (mt7628_eth_recv(dev, 0, &packetp) != -EAGAIN)
506 mt7628_eth_free_pkt(dev, packetp, 0);
507
508 return 0;
509}
510
511static void mt7628_eth_stop(struct udevice *dev)
512{
513 struct mt7628_eth_dev *priv = dev_get_priv(dev);
514
515 eth_dma_stop(priv);
516}
517
518static int mt7628_eth_probe(struct udevice *dev)
519{
520 struct mt7628_eth_dev *priv = dev_get_priv(dev);
Stefan Roesef9947682018-10-26 14:53:27 +0200521 struct mii_dev *bus;
developera5644112019-09-25 17:45:33 +0800522 int poll_link_phy;
Stefan Roesef9947682018-10-26 14:53:27 +0200523 int ret;
524 int i;
525
526 /* Save frame-engine base address for later use */
527 priv->base = dev_remap_addr_index(dev, 0);
528 if (IS_ERR(priv->base))
529 return PTR_ERR(priv->base);
530
531 /* Save switch base address for later use */
532 priv->eth_sw_base = dev_remap_addr_index(dev, 1);
533 if (IS_ERR(priv->eth_sw_base))
534 return PTR_ERR(priv->eth_sw_base);
535
developere4387bd2019-09-25 17:45:32 +0800536 /* Reset controller */
537 ret = reset_get_by_name(dev, "ephy", &priv->rst_ephy);
Stefan Roesef9947682018-10-26 14:53:27 +0200538 if (ret) {
developere4387bd2019-09-25 17:45:32 +0800539 pr_err("unable to find reset controller for ethernet PHYs\n");
Stefan Roesef9947682018-10-26 14:53:27 +0200540 return ret;
541 }
542
Stefan Roesef9947682018-10-26 14:53:27 +0200543 /* Put rx and tx rings into KSEG1 area (uncached) */
544 priv->tx_ring = (struct fe_tx_dma *)
545 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
546 sizeof(*priv->tx_ring) * NUM_TX_DESC));
547 priv->rx_ring = (struct fe_rx_dma *)
548 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
549 sizeof(*priv->rx_ring) * NUM_RX_DESC));
550
551 for (i = 0; i < NUM_RX_DESC; i++)
552 priv->rx_buf[i] = memalign(PKTALIGN, MTK_QDMA_PAGE_SIZE);
553
554 bus = mdio_alloc();
555 if (!bus) {
556 printf("Failed to allocate MDIO bus\n");
557 return -ENOMEM;
558 }
559
560 bus->read = mt7628_mdio_read;
561 bus->write = mt7628_mdio_write;
562 snprintf(bus->name, sizeof(bus->name), dev->name);
563 bus->priv = (void *)priv;
564
565 ret = mdio_register(bus);
566 if (ret)
567 return ret;
568
developera5644112019-09-25 17:45:33 +0800569 poll_link_phy = dev_read_u32_default(dev, "mediatek,poll-link-phy", -1);
570 if (poll_link_phy >= 0) {
571 if (poll_link_phy >= NUM_PHYS) {
572 pr_err("invalid phy %d for poll-link-phy\n",
573 poll_link_phy);
574 return ret;
575 }
576
577 priv->phy = phy_connect(bus, poll_link_phy, dev,
578 PHY_INTERFACE_MODE_MII);
579 if (!priv->phy) {
580 pr_err("failed to probe phy %d\n", poll_link_phy);
581 return -ENODEV;
582 }
583
584 priv->phy->advertising = priv->phy->supported;
585 phy_config(priv->phy);
586 }
587
Stefan Roesef9947682018-10-26 14:53:27 +0200588 /* Switch configuration */
589 rt305x_esw_init(priv);
590
591 return 0;
592}
593
594static const struct eth_ops mt7628_eth_ops = {
595 .start = mt7628_eth_start,
596 .send = mt7628_eth_send,
597 .recv = mt7628_eth_recv,
598 .free_pkt = mt7628_eth_free_pkt,
599 .stop = mt7628_eth_stop,
600 .write_hwaddr = mt7628_eth_write_hwaddr,
601};
602
603static const struct udevice_id mt7628_eth_ids[] = {
604 { .compatible = "mediatek,mt7628-eth" },
605 { }
606};
607
608U_BOOT_DRIVER(mt7628_eth) = {
609 .name = "mt7628_eth",
610 .id = UCLASS_ETH,
611 .of_match = mt7628_eth_ids,
612 .probe = mt7628_eth_probe,
613 .ops = &mt7628_eth_ops,
614 .priv_auto_alloc_size = sizeof(struct mt7628_eth_dev),
615 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
616};