blob: 8110af2d8251c1a8266c02eb31c70528d92188f8 [file] [log] [blame]
Stefan Roesef9947682018-10-26 14:53:27 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * MediaTek ethernet IP driver for U-Boot
4 *
5 * Copyright (C) 2018 Stefan Roese <sr@denx.de>
6 *
7 * This code is mostly based on the code extracted from this MediaTek
8 * github repository:
9 *
10 * https://github.com/MediaTek-Labs/linkit-smart-uboot.git
11 *
12 * I was not able to find a specific license or other developers
13 * copyrights here, so I can't add them here.
14 */
15
16#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070017#include <cpu_func.h>
Stefan Roesef9947682018-10-26 14:53:27 +020018#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060019#include <log.h>
Stefan Roesef9947682018-10-26 14:53:27 +020020#include <malloc.h>
21#include <miiphy.h>
22#include <net.h>
developere4387bd2019-09-25 17:45:32 +080023#include <reset.h>
Stefan Roesef9947682018-10-26 14:53:27 +020024#include <wait_bit.h>
Simon Glass274e0b02020-05-10 11:39:56 -060025#include <asm/cache.h>
Stefan Roesef9947682018-10-26 14:53:27 +020026#include <asm/io.h>
27#include <linux/bitfield.h>
28#include <linux/err.h>
29
Stefan Roesef9947682018-10-26 14:53:27 +020030/* Ethernet frame engine register */
31#define PDMA_RELATED 0x0800
32
33#define TX_BASE_PTR0 (PDMA_RELATED + 0x000)
34#define TX_MAX_CNT0 (PDMA_RELATED + 0x004)
35#define TX_CTX_IDX0 (PDMA_RELATED + 0x008)
36#define TX_DTX_IDX0 (PDMA_RELATED + 0x00c)
37
38#define RX_BASE_PTR0 (PDMA_RELATED + 0x100)
39#define RX_MAX_CNT0 (PDMA_RELATED + 0x104)
40#define RX_CALC_IDX0 (PDMA_RELATED + 0x108)
41
42#define PDMA_GLO_CFG (PDMA_RELATED + 0x204)
43#define PDMA_RST_IDX (PDMA_RELATED + 0x208)
44#define DLY_INT_CFG (PDMA_RELATED + 0x20c)
45
46#define SDM_RELATED 0x0c00
47
48#define SDM_MAC_ADRL (SDM_RELATED + 0x0c) /* MAC address LSB */
49#define SDM_MAC_ADRH (SDM_RELATED + 0x10) /* MAC Address MSB */
50
51#define RST_DTX_IDX0 BIT(0)
52#define RST_DRX_IDX0 BIT(16)
53
54#define TX_DMA_EN BIT(0)
55#define TX_DMA_BUSY BIT(1)
56#define RX_DMA_EN BIT(2)
57#define RX_DMA_BUSY BIT(3)
58#define TX_WB_DDONE BIT(6)
59
60/* Ethernet switch register */
61#define MT7628_SWITCH_FCT0 0x0008
62#define MT7628_SWITCH_PFC1 0x0014
developer33272b62019-09-25 17:45:35 +080063#define MT7628_SWITCH_PVIDC0 0x0040
64#define MT7628_SWITCH_PVIDC1 0x0044
65#define MT7628_SWITCH_PVIDC2 0x0048
66#define MT7628_SWITCH_PVIDC3 0x004c
67#define MT7628_SWITCH_VMSC0 0x0070
Stefan Roesef9947682018-10-26 14:53:27 +020068#define MT7628_SWITCH_FPA 0x0084
69#define MT7628_SWITCH_SOCPC 0x008c
70#define MT7628_SWITCH_POC0 0x0090
71#define MT7628_SWITCH_POC2 0x0098
72#define MT7628_SWITCH_SGC 0x009c
73#define MT7628_SWITCH_PCR0 0x00c0
74#define PCR0_PHY_ADDR GENMASK(4, 0)
75#define PCR0_PHY_REG GENMASK(12, 8)
76#define PCR0_WT_PHY_CMD BIT(13)
77#define PCR0_RD_PHY_CMD BIT(14)
78#define PCR0_WT_DATA GENMASK(31, 16)
79
80#define MT7628_SWITCH_PCR1 0x00c4
81#define PCR1_WT_DONE BIT(0)
82#define PCR1_RD_RDY BIT(1)
83#define PCR1_RD_DATA GENMASK(31, 16)
84
85#define MT7628_SWITCH_FPA1 0x00c8
86#define MT7628_SWITCH_FCT2 0x00cc
87#define MT7628_SWITCH_SGC2 0x00e4
88#define MT7628_SWITCH_BMU_CTRL 0x0110
89
90/* rxd2 */
91#define RX_DMA_DONE BIT(31)
92#define RX_DMA_LSO BIT(30)
93#define RX_DMA_PLEN0 GENMASK(29, 16)
94#define RX_DMA_TAG BIT(15)
95
96struct fe_rx_dma {
97 unsigned int rxd1;
98 unsigned int rxd2;
99 unsigned int rxd3;
100 unsigned int rxd4;
101} __packed __aligned(4);
102
103#define TX_DMA_PLEN0 GENMASK(29, 16)
104#define TX_DMA_LS1 BIT(14)
105#define TX_DMA_LS0 BIT(30)
106#define TX_DMA_DONE BIT(31)
107
108#define TX_DMA_INS_VLAN_MT7621 BIT(16)
109#define TX_DMA_INS_VLAN BIT(7)
110#define TX_DMA_INS_PPPOE BIT(12)
111#define TX_DMA_PN GENMASK(26, 24)
112
113struct fe_tx_dma {
114 unsigned int txd1;
115 unsigned int txd2;
116 unsigned int txd3;
117 unsigned int txd4;
118} __packed __aligned(4);
119
120#define NUM_RX_DESC 256
121#define NUM_TX_DESC 4
developera5644112019-09-25 17:45:33 +0800122#define NUM_PHYS 5
Stefan Roesef9947682018-10-26 14:53:27 +0200123
124#define PADDING_LENGTH 60
125
126#define MTK_QDMA_PAGE_SIZE 2048
127
128#define CONFIG_MDIO_TIMEOUT 100
129#define CONFIG_DMA_STOP_TIMEOUT 100
130#define CONFIG_TX_DMA_TIMEOUT 100
131
Stefan Roesef9947682018-10-26 14:53:27 +0200132struct mt7628_eth_dev {
133 void __iomem *base; /* frame engine base address */
134 void __iomem *eth_sw_base; /* switch base address */
Stefan Roesef9947682018-10-26 14:53:27 +0200135
136 struct mii_dev *bus;
137
138 struct fe_tx_dma *tx_ring;
139 struct fe_rx_dma *rx_ring;
140
141 u8 *rx_buf[NUM_RX_DESC];
142
143 /* Point to the next RXD DMA wants to use in RXD Ring0 */
144 int rx_dma_idx;
145 /* Point to the next TXD in TXD Ring0 CPU wants to use */
146 int tx_dma_idx;
developere4387bd2019-09-25 17:45:32 +0800147
148 struct reset_ctl rst_ephy;
developera5644112019-09-25 17:45:33 +0800149
150 struct phy_device *phy;
developer33272b62019-09-25 17:45:35 +0800151
152 int wan_port;
Stefan Roesef9947682018-10-26 14:53:27 +0200153};
154
developerba57a6b2019-09-25 17:45:34 +0800155static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length);
156
Stefan Roesef9947682018-10-26 14:53:27 +0200157static int mdio_wait_read(struct mt7628_eth_dev *priv, u32 mask, bool mask_set)
158{
159 void __iomem *base = priv->eth_sw_base;
160 int ret;
161
162 ret = wait_for_bit_le32(base + MT7628_SWITCH_PCR1, mask, mask_set,
163 CONFIG_MDIO_TIMEOUT, false);
164 if (ret) {
165 printf("MDIO operation timeout!\n");
166 return -ETIMEDOUT;
167 }
168
169 return 0;
170}
171
172static int mii_mgr_read(struct mt7628_eth_dev *priv,
173 u32 phy_addr, u32 phy_register, u32 *read_data)
174{
175 void __iomem *base = priv->eth_sw_base;
176 u32 status = 0;
177 u32 ret;
178
179 *read_data = 0xffff;
180 /* Make sure previous read operation is complete */
181 ret = mdio_wait_read(priv, PCR1_RD_RDY, false);
182 if (ret)
183 return ret;
184
185 writel(PCR0_RD_PHY_CMD |
186 FIELD_PREP(PCR0_PHY_REG, phy_register) |
187 FIELD_PREP(PCR0_PHY_ADDR, phy_addr),
188 base + MT7628_SWITCH_PCR0);
189
190 /* Make sure previous read operation is complete */
191 ret = mdio_wait_read(priv, PCR1_RD_RDY, true);
192 if (ret)
193 return ret;
194
195 status = readl(base + MT7628_SWITCH_PCR1);
196 *read_data = FIELD_GET(PCR1_RD_DATA, status);
197
198 return 0;
199}
200
201static int mii_mgr_write(struct mt7628_eth_dev *priv,
202 u32 phy_addr, u32 phy_register, u32 write_data)
203{
204 void __iomem *base = priv->eth_sw_base;
205 u32 data;
206 int ret;
207
208 /* Make sure previous write operation is complete */
209 ret = mdio_wait_read(priv, PCR1_WT_DONE, false);
210 if (ret)
211 return ret;
212
213 data = FIELD_PREP(PCR0_WT_DATA, write_data) |
214 FIELD_PREP(PCR0_PHY_REG, phy_register) |
215 FIELD_PREP(PCR0_PHY_ADDR, phy_addr) |
216 PCR0_WT_PHY_CMD;
217 writel(data, base + MT7628_SWITCH_PCR0);
218
219 return mdio_wait_read(priv, PCR1_WT_DONE, true);
220}
221
222static int mt7628_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
223{
224 u32 val;
225 int ret;
226
227 ret = mii_mgr_read(bus->priv, addr, reg, &val);
228 if (ret)
229 return ret;
230
231 return val;
232}
233
234static int mt7628_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
235 u16 value)
236{
237 return mii_mgr_write(bus->priv, addr, reg, value);
238}
239
240static void mt7628_ephy_init(struct mt7628_eth_dev *priv)
241{
242 int i;
243
244 mii_mgr_write(priv, 0, 31, 0x2000); /* change G2 page */
245 mii_mgr_write(priv, 0, 26, 0x0000);
246
247 for (i = 0; i < 5; i++) {
248 mii_mgr_write(priv, i, 31, 0x8000); /* change L0 page */
249 mii_mgr_write(priv, i, 0, 0x3100);
250
251 /* EEE disable */
252 mii_mgr_write(priv, i, 30, 0xa000);
253 mii_mgr_write(priv, i, 31, 0xa000); /* change L2 page */
254 mii_mgr_write(priv, i, 16, 0x0606);
255 mii_mgr_write(priv, i, 23, 0x0f0e);
256 mii_mgr_write(priv, i, 24, 0x1610);
257 mii_mgr_write(priv, i, 30, 0x1f15);
258 mii_mgr_write(priv, i, 28, 0x6111);
259 }
260
261 /* 100Base AOI setting */
262 mii_mgr_write(priv, 0, 31, 0x5000); /* change G5 page */
263 mii_mgr_write(priv, 0, 19, 0x004a);
264 mii_mgr_write(priv, 0, 20, 0x015a);
265 mii_mgr_write(priv, 0, 21, 0x00ee);
266 mii_mgr_write(priv, 0, 22, 0x0033);
267 mii_mgr_write(priv, 0, 23, 0x020a);
268 mii_mgr_write(priv, 0, 24, 0x0000);
269 mii_mgr_write(priv, 0, 25, 0x024a);
270 mii_mgr_write(priv, 0, 26, 0x035a);
271 mii_mgr_write(priv, 0, 27, 0x02ee);
272 mii_mgr_write(priv, 0, 28, 0x0233);
273 mii_mgr_write(priv, 0, 29, 0x000a);
274 mii_mgr_write(priv, 0, 30, 0x0000);
275
276 /* Fix EPHY idle state abnormal behavior */
277 mii_mgr_write(priv, 0, 31, 0x4000); /* change G4 page */
278 mii_mgr_write(priv, 0, 29, 0x000d);
279 mii_mgr_write(priv, 0, 30, 0x0500);
280}
281
282static void rt305x_esw_init(struct mt7628_eth_dev *priv)
283{
284 void __iomem *base = priv->eth_sw_base;
developer33272b62019-09-25 17:45:35 +0800285 void __iomem *reg;
286 u32 val = 0, pvid;
287 int i;
Stefan Roesef9947682018-10-26 14:53:27 +0200288
289 /*
290 * FC_RLS_TH=200, FC_SET_TH=160
291 * DROP_RLS=120, DROP_SET_TH=80
292 */
293 writel(0xc8a07850, base + MT7628_SWITCH_FCT0);
294 writel(0x00000000, base + MT7628_SWITCH_SGC2);
295 writel(0x00405555, base + MT7628_SWITCH_PFC1);
296 writel(0x00007f7f, base + MT7628_SWITCH_POC0);
297 writel(0x00007f7f, base + MT7628_SWITCH_POC2); /* disable VLAN */
298 writel(0x0002500c, base + MT7628_SWITCH_FCT2);
299 /* hashing algorithm=XOR48, aging interval=300sec */
300 writel(0x0008a301, base + MT7628_SWITCH_SGC);
301 writel(0x02404040, base + MT7628_SWITCH_SOCPC);
302
303 /* Ext PHY Addr=0x1f */
304 writel(0x3f502b28, base + MT7628_SWITCH_FPA1);
305 writel(0x00000000, base + MT7628_SWITCH_FPA);
306 /* 1us cycle number=125 (FE's clock=125Mhz) */
307 writel(0x7d000000, base + MT7628_SWITCH_BMU_CTRL);
308
developer33272b62019-09-25 17:45:35 +0800309 /* LAN/WAN partition, WAN port will be unusable in u-boot network */
310 if (priv->wan_port >= 0 && priv->wan_port < 6) {
311 for (i = 0; i < 8; i++) {
312 pvid = i == priv->wan_port ? 2 : 1;
313 reg = base + MT7628_SWITCH_PVIDC0 + (i / 2) * 4;
314 if (i % 2 == 0) {
315 val = pvid;
316 } else {
317 val |= (pvid << 12);
318 writel(val, reg);
319 }
320 }
321
322 val = 0xffff407f;
323 val |= 1 << (8 + priv->wan_port);
324 val &= ~(1 << priv->wan_port);
325 writel(val, base + MT7628_SWITCH_VMSC0);
326 }
327
Stefan Roesef9947682018-10-26 14:53:27 +0200328 /* Reset PHY */
developere4387bd2019-09-25 17:45:32 +0800329 reset_assert(&priv->rst_ephy);
330 reset_deassert(&priv->rst_ephy);
Stefan Roesef9947682018-10-26 14:53:27 +0200331 mdelay(10);
332
Stefan Roesef9947682018-10-26 14:53:27 +0200333 mt7628_ephy_init(priv);
334}
335
336static void eth_dma_start(struct mt7628_eth_dev *priv)
337{
338 void __iomem *base = priv->base;
339
340 setbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
341}
342
343static void eth_dma_stop(struct mt7628_eth_dev *priv)
344{
345 void __iomem *base = priv->base;
346 int ret;
347
348 clrbits_le32(base + PDMA_GLO_CFG, TX_WB_DDONE | RX_DMA_EN | TX_DMA_EN);
349
350 /* Wait for DMA to stop */
351 ret = wait_for_bit_le32(base + PDMA_GLO_CFG,
352 RX_DMA_BUSY | TX_DMA_BUSY, false,
353 CONFIG_DMA_STOP_TIMEOUT, false);
354 if (ret)
355 printf("DMA stop timeout error!\n");
356}
357
358static int mt7628_eth_write_hwaddr(struct udevice *dev)
359{
360 struct mt7628_eth_dev *priv = dev_get_priv(dev);
361 void __iomem *base = priv->base;
362 u8 *addr = ((struct eth_pdata *)dev_get_platdata(dev))->enetaddr;
363 u32 val;
364
365 /* Set MAC address. */
366 val = addr[0];
367 val = (val << 8) | addr[1];
368 writel(val, base + SDM_MAC_ADRH);
369
370 val = addr[2];
371 val = (val << 8) | addr[3];
372 val = (val << 8) | addr[4];
373 val = (val << 8) | addr[5];
374 writel(val, base + SDM_MAC_ADRL);
375
376 return 0;
377}
378
379static int mt7628_eth_send(struct udevice *dev, void *packet, int length)
380{
381 struct mt7628_eth_dev *priv = dev_get_priv(dev);
382 void __iomem *base = priv->base;
383 int ret;
384 int idx;
385 int i;
386
387 idx = priv->tx_dma_idx;
388
389 /* Pad message to a minimum length */
390 if (length < PADDING_LENGTH) {
391 char *p = (char *)packet;
392
393 for (i = 0; i < PADDING_LENGTH - length; i++)
394 p[length + i] = 0;
395 length = PADDING_LENGTH;
396 }
397
398 /* Check if buffer is ready for next TX DMA */
399 ret = wait_for_bit_le32(&priv->tx_ring[idx].txd2, TX_DMA_DONE, true,
400 CONFIG_TX_DMA_TIMEOUT, false);
401 if (ret) {
402 printf("TX: DMA still busy on buffer %d\n", idx);
403 return ret;
404 }
405
406 flush_dcache_range((u32)packet, (u32)packet + length);
407
408 priv->tx_ring[idx].txd1 = CPHYSADDR(packet);
409 priv->tx_ring[idx].txd2 &= ~TX_DMA_PLEN0;
410 priv->tx_ring[idx].txd2 |= FIELD_PREP(TX_DMA_PLEN0, length);
411 priv->tx_ring[idx].txd2 &= ~TX_DMA_DONE;
412
413 idx = (idx + 1) % NUM_TX_DESC;
414
415 /* Make sure the writes executed at this place */
416 wmb();
417 writel(idx, base + TX_CTX_IDX0);
418
419 priv->tx_dma_idx = idx;
420
421 return 0;
422}
423
424static int mt7628_eth_recv(struct udevice *dev, int flags, uchar **packetp)
425{
426 struct mt7628_eth_dev *priv = dev_get_priv(dev);
427 u32 rxd_info;
428 int length;
429 int idx;
430
431 idx = priv->rx_dma_idx;
432
433 rxd_info = priv->rx_ring[idx].rxd2;
434 if ((rxd_info & RX_DMA_DONE) == 0)
435 return -EAGAIN;
436
437 length = FIELD_GET(RX_DMA_PLEN0, priv->rx_ring[idx].rxd2);
438 if (length == 0 || length > MTK_QDMA_PAGE_SIZE) {
439 printf("%s: invalid length (%d bytes)\n", __func__, length);
developerba57a6b2019-09-25 17:45:34 +0800440 mt7628_eth_free_pkt(dev, NULL, 0);
Stefan Roesef9947682018-10-26 14:53:27 +0200441 return -EIO;
442 }
443
444 *packetp = priv->rx_buf[idx];
445 invalidate_dcache_range((u32)*packetp, (u32)*packetp + length);
446
447 priv->rx_ring[idx].rxd4 = 0;
448 priv->rx_ring[idx].rxd2 = RX_DMA_LSO;
449
450 /* Make sure the writes executed at this place */
451 wmb();
452
453 return length;
454}
455
456static int mt7628_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
457{
458 struct mt7628_eth_dev *priv = dev_get_priv(dev);
459 void __iomem *base = priv->base;
460 int idx;
461
462 idx = priv->rx_dma_idx;
463
464 /* Move point to next RXD which wants to alloc */
465 writel(idx, base + RX_CALC_IDX0);
466
467 /* Update to Next packet point that was received */
468 idx = (idx + 1) % NUM_RX_DESC;
469
470 priv->rx_dma_idx = idx;
471
472 return 0;
473}
474
Stefan Roesef9947682018-10-26 14:53:27 +0200475static int mt7628_eth_start(struct udevice *dev)
476{
477 struct mt7628_eth_dev *priv = dev_get_priv(dev);
478 void __iomem *base = priv->base;
479 uchar packet[MTK_QDMA_PAGE_SIZE];
480 uchar *packetp;
developera5644112019-09-25 17:45:33 +0800481 int ret;
Stefan Roesef9947682018-10-26 14:53:27 +0200482 int i;
483
484 for (i = 0; i < NUM_RX_DESC; i++) {
485 memset((void *)&priv->rx_ring[i], 0, sizeof(priv->rx_ring[0]));
486 priv->rx_ring[i].rxd2 |= RX_DMA_LSO;
487 priv->rx_ring[i].rxd1 = CPHYSADDR(priv->rx_buf[i]);
488 }
489
490 for (i = 0; i < NUM_TX_DESC; i++) {
491 memset((void *)&priv->tx_ring[i], 0, sizeof(priv->tx_ring[0]));
492 priv->tx_ring[i].txd2 = TX_DMA_LS0 | TX_DMA_DONE;
493 priv->tx_ring[i].txd4 = FIELD_PREP(TX_DMA_PN, 1);
494 }
495
496 priv->rx_dma_idx = 0;
497 priv->tx_dma_idx = 0;
498
499 /* Make sure the writes executed at this place */
500 wmb();
501
502 /* disable delay interrupt */
503 writel(0, base + DLY_INT_CFG);
504
505 clrbits_le32(base + PDMA_GLO_CFG, 0xffff0000);
506
507 /* Tell the adapter where the TX/RX rings are located. */
508 writel(CPHYSADDR(&priv->rx_ring[0]), base + RX_BASE_PTR0);
509 writel(CPHYSADDR((u32)&priv->tx_ring[0]), base + TX_BASE_PTR0);
510
511 writel(NUM_RX_DESC, base + RX_MAX_CNT0);
512 writel(NUM_TX_DESC, base + TX_MAX_CNT0);
513
514 writel(priv->tx_dma_idx, base + TX_CTX_IDX0);
515 writel(RST_DTX_IDX0, base + PDMA_RST_IDX);
516
517 writel(NUM_RX_DESC - 1, base + RX_CALC_IDX0);
518 writel(RST_DRX_IDX0, base + PDMA_RST_IDX);
519
520 /* Make sure the writes executed at this place */
521 wmb();
522 eth_dma_start(priv);
523
developera5644112019-09-25 17:45:33 +0800524 if (priv->phy) {
525 ret = phy_startup(priv->phy);
526 if (ret)
527 return ret;
Stefan Roesef9947682018-10-26 14:53:27 +0200528
developera5644112019-09-25 17:45:33 +0800529 if (!priv->phy->link)
530 return -EAGAIN;
Stefan Roesef9947682018-10-26 14:53:27 +0200531 }
532
533 /*
534 * The integrated switch seems to queue some received ethernet
535 * packets in some FIFO. Lets read the already queued packets
536 * out by using the receive routine, so that these old messages
537 * are dropped before the new xfer starts.
538 */
539 packetp = &packet[0];
540 while (mt7628_eth_recv(dev, 0, &packetp) != -EAGAIN)
541 mt7628_eth_free_pkt(dev, packetp, 0);
542
543 return 0;
544}
545
546static void mt7628_eth_stop(struct udevice *dev)
547{
548 struct mt7628_eth_dev *priv = dev_get_priv(dev);
549
550 eth_dma_stop(priv);
551}
552
553static int mt7628_eth_probe(struct udevice *dev)
554{
555 struct mt7628_eth_dev *priv = dev_get_priv(dev);
Stefan Roesef9947682018-10-26 14:53:27 +0200556 struct mii_dev *bus;
developera5644112019-09-25 17:45:33 +0800557 int poll_link_phy;
Stefan Roesef9947682018-10-26 14:53:27 +0200558 int ret;
559 int i;
560
561 /* Save frame-engine base address for later use */
562 priv->base = dev_remap_addr_index(dev, 0);
563 if (IS_ERR(priv->base))
564 return PTR_ERR(priv->base);
565
566 /* Save switch base address for later use */
567 priv->eth_sw_base = dev_remap_addr_index(dev, 1);
568 if (IS_ERR(priv->eth_sw_base))
569 return PTR_ERR(priv->eth_sw_base);
570
developere4387bd2019-09-25 17:45:32 +0800571 /* Reset controller */
572 ret = reset_get_by_name(dev, "ephy", &priv->rst_ephy);
Stefan Roesef9947682018-10-26 14:53:27 +0200573 if (ret) {
developere4387bd2019-09-25 17:45:32 +0800574 pr_err("unable to find reset controller for ethernet PHYs\n");
Stefan Roesef9947682018-10-26 14:53:27 +0200575 return ret;
576 }
577
developer33272b62019-09-25 17:45:35 +0800578 /* WAN port will be isolated from LAN ports */
579 priv->wan_port = dev_read_u32_default(dev, "mediatek,wan-port", -1);
580
Stefan Roesef9947682018-10-26 14:53:27 +0200581 /* Put rx and tx rings into KSEG1 area (uncached) */
582 priv->tx_ring = (struct fe_tx_dma *)
583 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
584 sizeof(*priv->tx_ring) * NUM_TX_DESC));
585 priv->rx_ring = (struct fe_rx_dma *)
586 KSEG1ADDR(memalign(ARCH_DMA_MINALIGN,
587 sizeof(*priv->rx_ring) * NUM_RX_DESC));
588
589 for (i = 0; i < NUM_RX_DESC; i++)
590 priv->rx_buf[i] = memalign(PKTALIGN, MTK_QDMA_PAGE_SIZE);
591
592 bus = mdio_alloc();
593 if (!bus) {
594 printf("Failed to allocate MDIO bus\n");
595 return -ENOMEM;
596 }
597
598 bus->read = mt7628_mdio_read;
599 bus->write = mt7628_mdio_write;
600 snprintf(bus->name, sizeof(bus->name), dev->name);
601 bus->priv = (void *)priv;
602
603 ret = mdio_register(bus);
604 if (ret)
605 return ret;
606
developera5644112019-09-25 17:45:33 +0800607 poll_link_phy = dev_read_u32_default(dev, "mediatek,poll-link-phy", -1);
608 if (poll_link_phy >= 0) {
609 if (poll_link_phy >= NUM_PHYS) {
610 pr_err("invalid phy %d for poll-link-phy\n",
611 poll_link_phy);
612 return ret;
613 }
614
615 priv->phy = phy_connect(bus, poll_link_phy, dev,
616 PHY_INTERFACE_MODE_MII);
617 if (!priv->phy) {
618 pr_err("failed to probe phy %d\n", poll_link_phy);
619 return -ENODEV;
620 }
621
622 priv->phy->advertising = priv->phy->supported;
623 phy_config(priv->phy);
624 }
625
Stefan Roesef9947682018-10-26 14:53:27 +0200626 /* Switch configuration */
627 rt305x_esw_init(priv);
628
629 return 0;
630}
631
632static const struct eth_ops mt7628_eth_ops = {
633 .start = mt7628_eth_start,
634 .send = mt7628_eth_send,
635 .recv = mt7628_eth_recv,
636 .free_pkt = mt7628_eth_free_pkt,
637 .stop = mt7628_eth_stop,
638 .write_hwaddr = mt7628_eth_write_hwaddr,
639};
640
641static const struct udevice_id mt7628_eth_ids[] = {
642 { .compatible = "mediatek,mt7628-eth" },
643 { }
644};
645
646U_BOOT_DRIVER(mt7628_eth) = {
647 .name = "mt7628_eth",
648 .id = UCLASS_ETH,
649 .of_match = mt7628_eth_ids,
650 .probe = mt7628_eth_probe,
651 .ops = &mt7628_eth_ops,
652 .priv_auto_alloc_size = sizeof(struct mt7628_eth_dev),
653 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
654};