blob: 1f7cc322e0533b87d2da41f574e849244f4f8d73 [file] [log] [blame]
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +01001/*
2 * Copyright (C) 2005-2006 Atmel Corporation
3 *
Wolfgang Denkd79de1d2013-07-08 09:37:19 +02004 * SPDX-License-Identifier: GPL-2.0+
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +01005 */
6#include <common.h>
7
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +01008/*
9 * The u-boot networking stack is a little weird. It seems like the
10 * networking core allocates receive buffers up front without any
11 * regard to the hardware that's supposed to actually receive those
12 * packets.
13 *
14 * The MACB receives packets into 128-byte receive buffers, so the
15 * buffers allocated by the core isn't very practical to use. We'll
16 * allocate our own, but we need one such buffer in case a packet
17 * wraps around the DMA ring so that we have to copy it.
18 *
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +020019 * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010020 * configuration header. This way, the core allocates one RX buffer
21 * and one TX buffer, each of which can hold a ethernet packet of
22 * maximum size.
23 *
24 * For some reason, the networking core unconditionally specifies a
25 * 32-byte packet "alignment" (which really should be called
26 * "padding"). MACB shouldn't need that, but we'll refrain from any
27 * core modifications here...
28 */
29
30#include <net.h>
Ben Warren2f2b6b62008-08-31 22:22:04 -070031#include <netdev.h>
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010032#include <malloc.h>
Semih Hazar790088e2009-12-17 15:07:15 +020033#include <miiphy.h>
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010034
35#include <linux/mii.h>
36#include <asm/io.h>
37#include <asm/dma-mapping.h>
38#include <asm/arch/clk.h>
39
40#include "macb.h"
41
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +020042#define CONFIG_SYS_MACB_RX_BUFFER_SIZE 4096
43#define CONFIG_SYS_MACB_RX_RING_SIZE (CONFIG_SYS_MACB_RX_BUFFER_SIZE / 128)
44#define CONFIG_SYS_MACB_TX_RING_SIZE 16
45#define CONFIG_SYS_MACB_TX_TIMEOUT 1000
46#define CONFIG_SYS_MACB_AUTONEG_TIMEOUT 5000000
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010047
48struct macb_dma_desc {
49 u32 addr;
50 u32 ctrl;
51};
52
53#define RXADDR_USED 0x00000001
54#define RXADDR_WRAP 0x00000002
55
56#define RXBUF_FRMLEN_MASK 0x00000fff
57#define RXBUF_FRAME_START 0x00004000
58#define RXBUF_FRAME_END 0x00008000
59#define RXBUF_TYPEID_MATCH 0x00400000
60#define RXBUF_ADDR4_MATCH 0x00800000
61#define RXBUF_ADDR3_MATCH 0x01000000
62#define RXBUF_ADDR2_MATCH 0x02000000
63#define RXBUF_ADDR1_MATCH 0x04000000
64#define RXBUF_BROADCAST 0x80000000
65
66#define TXBUF_FRMLEN_MASK 0x000007ff
67#define TXBUF_FRAME_END 0x00008000
68#define TXBUF_NOCRC 0x00010000
69#define TXBUF_EXHAUSTED 0x08000000
70#define TXBUF_UNDERRUN 0x10000000
71#define TXBUF_MAXRETRY 0x20000000
72#define TXBUF_WRAP 0x40000000
73#define TXBUF_USED 0x80000000
74
75struct macb_device {
76 void *regs;
77
78 unsigned int rx_tail;
79 unsigned int tx_head;
80 unsigned int tx_tail;
81
82 void *rx_buffer;
83 void *tx_buffer;
84 struct macb_dma_desc *rx_ring;
85 struct macb_dma_desc *tx_ring;
86
87 unsigned long rx_buffer_dma;
88 unsigned long rx_ring_dma;
89 unsigned long tx_ring_dma;
90
91 const struct device *dev;
92 struct eth_device netdev;
93 unsigned short phy_addr;
Bo Shen7d91deb2013-04-24 15:59:27 +080094 struct mii_dev *bus;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010095};
96#define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
97
Bo Shen6f7d7d92013-04-24 15:59:28 +080098static int macb_is_gem(struct macb_device *macb)
99{
100 return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2;
101}
102
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100103static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value)
104{
105 unsigned long netctl;
106 unsigned long netstat;
107 unsigned long frame;
108
109 netctl = macb_readl(macb, NCR);
110 netctl |= MACB_BIT(MPE);
111 macb_writel(macb, NCR, netctl);
112
113 frame = (MACB_BF(SOF, 1)
114 | MACB_BF(RW, 1)
115 | MACB_BF(PHYA, macb->phy_addr)
116 | MACB_BF(REGA, reg)
117 | MACB_BF(CODE, 2)
118 | MACB_BF(DATA, value));
119 macb_writel(macb, MAN, frame);
120
121 do {
122 netstat = macb_readl(macb, NSR);
123 } while (!(netstat & MACB_BIT(IDLE)));
124
125 netctl = macb_readl(macb, NCR);
126 netctl &= ~MACB_BIT(MPE);
127 macb_writel(macb, NCR, netctl);
128}
129
130static u16 macb_mdio_read(struct macb_device *macb, u8 reg)
131{
132 unsigned long netctl;
133 unsigned long netstat;
134 unsigned long frame;
135
136 netctl = macb_readl(macb, NCR);
137 netctl |= MACB_BIT(MPE);
138 macb_writel(macb, NCR, netctl);
139
140 frame = (MACB_BF(SOF, 1)
141 | MACB_BF(RW, 2)
142 | MACB_BF(PHYA, macb->phy_addr)
143 | MACB_BF(REGA, reg)
144 | MACB_BF(CODE, 2));
145 macb_writel(macb, MAN, frame);
146
147 do {
148 netstat = macb_readl(macb, NSR);
149 } while (!(netstat & MACB_BIT(IDLE)));
150
151 frame = macb_readl(macb, MAN);
152
153 netctl = macb_readl(macb, NCR);
154 netctl &= ~MACB_BIT(MPE);
155 macb_writel(macb, NCR, netctl);
156
157 return MACB_BFEXT(DATA, frame);
158}
159
Joe Hershberger9e5742b2013-06-24 19:06:38 -0500160void __weak arch_get_mdio_control(const char *name)
Shiraz Hashim77cdf0f2012-12-13 17:22:52 +0530161{
162 return;
163}
164
Bo Shen7d91deb2013-04-24 15:59:27 +0800165#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
Semih Hazar790088e2009-12-17 15:07:15 +0200166
Mike Frysinger5ff5fdb2010-07-27 18:35:08 -0400167int macb_miiphy_read(const char *devname, u8 phy_adr, u8 reg, u16 *value)
Semih Hazar790088e2009-12-17 15:07:15 +0200168{
169 struct eth_device *dev = eth_get_dev_by_name(devname);
170 struct macb_device *macb = to_macb(dev);
171
172 if ( macb->phy_addr != phy_adr )
173 return -1;
174
Shiraz Hashim77cdf0f2012-12-13 17:22:52 +0530175 arch_get_mdio_control(devname);
Semih Hazar790088e2009-12-17 15:07:15 +0200176 *value = macb_mdio_read(macb, reg);
177
178 return 0;
179}
180
Mike Frysinger5ff5fdb2010-07-27 18:35:08 -0400181int macb_miiphy_write(const char *devname, u8 phy_adr, u8 reg, u16 value)
Semih Hazar790088e2009-12-17 15:07:15 +0200182{
183 struct eth_device *dev = eth_get_dev_by_name(devname);
184 struct macb_device *macb = to_macb(dev);
185
186 if ( macb->phy_addr != phy_adr )
187 return -1;
188
Shiraz Hashim77cdf0f2012-12-13 17:22:52 +0530189 arch_get_mdio_control(devname);
Semih Hazar790088e2009-12-17 15:07:15 +0200190 macb_mdio_write(macb, reg, value);
191
192 return 0;
193}
194#endif
195
196
Jon Loeligerb1d408a2007-07-09 17:30:01 -0500197#if defined(CONFIG_CMD_NET)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100198
Joe Hershbergerab862622012-05-21 14:45:31 +0000199static int macb_send(struct eth_device *netdev, void *packet, int length)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100200{
201 struct macb_device *macb = to_macb(netdev);
202 unsigned long paddr, ctrl;
203 unsigned int tx_head = macb->tx_head;
204 int i;
205
206 paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
207
208 ctrl = length & TXBUF_FRMLEN_MASK;
209 ctrl |= TXBUF_FRAME_END;
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200210 if (tx_head == (CONFIG_SYS_MACB_TX_RING_SIZE - 1)) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100211 ctrl |= TXBUF_WRAP;
212 macb->tx_head = 0;
213 } else
214 macb->tx_head++;
215
216 macb->tx_ring[tx_head].ctrl = ctrl;
217 macb->tx_ring[tx_head].addr = paddr;
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200218 barrier();
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100219 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
220
221 /*
222 * I guess this is necessary because the networking core may
223 * re-use the transmit buffer as soon as we return...
224 */
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200225 for (i = 0; i <= CONFIG_SYS_MACB_TX_TIMEOUT; i++) {
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200226 barrier();
227 ctrl = macb->tx_ring[tx_head].ctrl;
228 if (ctrl & TXBUF_USED)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100229 break;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100230 udelay(1);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100231 }
232
233 dma_unmap_single(packet, length, paddr);
234
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200235 if (i <= CONFIG_SYS_MACB_TX_TIMEOUT) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100236 if (ctrl & TXBUF_UNDERRUN)
237 printf("%s: TX underrun\n", netdev->name);
238 if (ctrl & TXBUF_EXHAUSTED)
239 printf("%s: TX buffers exhausted in mid frame\n",
240 netdev->name);
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200241 } else {
242 printf("%s: TX timeout\n", netdev->name);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100243 }
244
245 /* No one cares anyway */
246 return 0;
247}
248
249static void reclaim_rx_buffers(struct macb_device *macb,
250 unsigned int new_tail)
251{
252 unsigned int i;
253
254 i = macb->rx_tail;
255 while (i > new_tail) {
256 macb->rx_ring[i].addr &= ~RXADDR_USED;
257 i++;
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200258 if (i > CONFIG_SYS_MACB_RX_RING_SIZE)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100259 i = 0;
260 }
261
262 while (i < new_tail) {
263 macb->rx_ring[i].addr &= ~RXADDR_USED;
264 i++;
265 }
266
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200267 barrier();
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100268 macb->rx_tail = new_tail;
269}
270
271static int macb_recv(struct eth_device *netdev)
272{
273 struct macb_device *macb = to_macb(netdev);
274 unsigned int rx_tail = macb->rx_tail;
275 void *buffer;
276 int length;
277 int wrapped = 0;
278 u32 status;
279
280 for (;;) {
281 if (!(macb->rx_ring[rx_tail].addr & RXADDR_USED))
282 return -1;
283
284 status = macb->rx_ring[rx_tail].ctrl;
285 if (status & RXBUF_FRAME_START) {
286 if (rx_tail != macb->rx_tail)
287 reclaim_rx_buffers(macb, rx_tail);
288 wrapped = 0;
289 }
290
291 if (status & RXBUF_FRAME_END) {
292 buffer = macb->rx_buffer + 128 * macb->rx_tail;
293 length = status & RXBUF_FRMLEN_MASK;
294 if (wrapped) {
295 unsigned int headlen, taillen;
296
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200297 headlen = 128 * (CONFIG_SYS_MACB_RX_RING_SIZE
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100298 - macb->rx_tail);
299 taillen = length - headlen;
300 memcpy((void *)NetRxPackets[0],
301 buffer, headlen);
302 memcpy((void *)NetRxPackets[0] + headlen,
303 macb->rx_buffer, taillen);
304 buffer = (void *)NetRxPackets[0];
305 }
306
307 NetReceive(buffer, length);
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200308 if (++rx_tail >= CONFIG_SYS_MACB_RX_RING_SIZE)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100309 rx_tail = 0;
310 reclaim_rx_buffers(macb, rx_tail);
311 } else {
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200312 if (++rx_tail >= CONFIG_SYS_MACB_RX_RING_SIZE) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100313 wrapped = 1;
314 rx_tail = 0;
315 }
316 }
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200317 barrier();
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100318 }
319
320 return 0;
321}
322
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200323static void macb_phy_reset(struct macb_device *macb)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100324{
325 struct eth_device *netdev = &macb->netdev;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100326 int i;
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200327 u16 status, adv;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100328
329 adv = ADVERTISE_CSMA | ADVERTISE_ALL;
330 macb_mdio_write(macb, MII_ADVERTISE, adv);
331 printf("%s: Starting autonegotiation...\n", netdev->name);
332 macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE
333 | BMCR_ANRESTART));
334
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200335 for (i = 0; i < CONFIG_SYS_MACB_AUTONEG_TIMEOUT / 100; i++) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100336 status = macb_mdio_read(macb, MII_BMSR);
337 if (status & BMSR_ANEGCOMPLETE)
338 break;
339 udelay(100);
340 }
341
342 if (status & BMSR_ANEGCOMPLETE)
343 printf("%s: Autonegotiation complete\n", netdev->name);
344 else
345 printf("%s: Autonegotiation timed out (status=0x%04x)\n",
346 netdev->name, status);
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200347}
348
Gunnar Rangoy6dd74f32009-01-23 12:56:31 +0100349#ifdef CONFIG_MACB_SEARCH_PHY
350static int macb_phy_find(struct macb_device *macb)
351{
352 int i;
353 u16 phy_id;
354
355 /* Search for PHY... */
356 for (i = 0; i < 32; i++) {
357 macb->phy_addr = i;
358 phy_id = macb_mdio_read(macb, MII_PHYSID1);
359 if (phy_id != 0xffff) {
360 printf("%s: PHY present at %d\n", macb->netdev.name, i);
361 return 1;
362 }
363 }
364
365 /* PHY isn't up to snuff */
Andreas Bießmann7b2159a2012-08-16 01:50:04 +0000366 printf("%s: PHY not found\n", macb->netdev.name);
Gunnar Rangoy6dd74f32009-01-23 12:56:31 +0100367
368 return 0;
369}
370#endif /* CONFIG_MACB_SEARCH_PHY */
371
372
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200373static int macb_phy_init(struct macb_device *macb)
374{
375 struct eth_device *netdev = &macb->netdev;
Bo Shen7d91deb2013-04-24 15:59:27 +0800376#ifdef CONFIG_PHYLIB
377 struct phy_device *phydev;
378#endif
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200379 u32 ncfgr;
380 u16 phy_id, status, adv, lpa;
381 int media, speed, duplex;
382 int i;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100383
Shiraz Hashim77cdf0f2012-12-13 17:22:52 +0530384 arch_get_mdio_control(netdev->name);
Gunnar Rangoy6dd74f32009-01-23 12:56:31 +0100385#ifdef CONFIG_MACB_SEARCH_PHY
386 /* Auto-detect phy_addr */
387 if (!macb_phy_find(macb)) {
388 return 0;
389 }
390#endif /* CONFIG_MACB_SEARCH_PHY */
391
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200392 /* Check if the PHY is up to snuff... */
393 phy_id = macb_mdio_read(macb, MII_PHYSID1);
394 if (phy_id == 0xffff) {
395 printf("%s: No PHY present\n", netdev->name);
396 return 0;
397 }
398
Bo Shen7d91deb2013-04-24 15:59:27 +0800399#ifdef CONFIG_PHYLIB
400 phydev->bus = macb->bus;
401 phydev->dev = netdev;
402 phydev->addr = macb->phy_addr;
403 phy_config(phydev);
404#endif
405
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200406 status = macb_mdio_read(macb, MII_BMSR);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100407 if (!(status & BMSR_LSTATUS)) {
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200408 /* Try to re-negotiate if we don't have link already. */
409 macb_phy_reset(macb);
410
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200411 for (i = 0; i < CONFIG_SYS_MACB_AUTONEG_TIMEOUT / 100; i++) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100412 status = macb_mdio_read(macb, MII_BMSR);
413 if (status & BMSR_LSTATUS)
414 break;
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200415 udelay(100);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100416 }
417 }
418
419 if (!(status & BMSR_LSTATUS)) {
420 printf("%s: link down (status: 0x%04x)\n",
421 netdev->name, status);
422 return 0;
Bo Shen6f7d7d92013-04-24 15:59:28 +0800423 }
424
425 /* First check for GMAC */
426 if (macb_is_gem(macb)) {
427 lpa = macb_mdio_read(macb, MII_STAT1000);
428 if (lpa & (1 << 11)) {
429 speed = 1000;
430 duplex = 1;
431 } else {
432 if (lpa & (1 << 10)) {
433 speed = 1000;
434 duplex = 1;
435 } else {
436 speed = 0;
437 }
438 }
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100439
Bo Shen6f7d7d92013-04-24 15:59:28 +0800440 if (speed == 1000) {
441 printf("%s: link up, %dMbps %s-duplex (lpa: 0x%04x)\n",
442 netdev->name,
443 speed,
444 duplex ? "full" : "half",
445 lpa);
446
447 ncfgr = macb_readl(macb, NCFGR);
448 ncfgr &= ~(GEM_BIT(GBE) | MACB_BIT(SPD) | MACB_BIT(FD));
449 if (speed)
450 ncfgr |= GEM_BIT(GBE);
451 if (duplex)
452 ncfgr |= MACB_BIT(FD);
453 macb_writel(macb, NCFGR, ncfgr);
454
455 return 1;
456 }
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100457 }
Bo Shen6f7d7d92013-04-24 15:59:28 +0800458
459 /* fall back for EMAC checking */
460 adv = macb_mdio_read(macb, MII_ADVERTISE);
461 lpa = macb_mdio_read(macb, MII_LPA);
462 media = mii_nway_result(lpa & adv);
463 speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
464 ? 1 : 0);
465 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
466 printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
467 netdev->name,
468 speed ? "100" : "10",
469 duplex ? "full" : "half",
470 lpa);
471
472 ncfgr = macb_readl(macb, NCFGR);
473 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
474 if (speed)
475 ncfgr |= MACB_BIT(SPD);
476 if (duplex)
477 ncfgr |= MACB_BIT(FD);
478 macb_writel(macb, NCFGR, ncfgr);
479
480 return 1;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100481}
482
483static int macb_init(struct eth_device *netdev, bd_t *bd)
484{
485 struct macb_device *macb = to_macb(netdev);
486 unsigned long paddr;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100487 int i;
488
489 /*
490 * macb_halt should have been called at some point before now,
491 * so we'll assume the controller is idle.
492 */
493
494 /* initialize DMA descriptors */
495 paddr = macb->rx_buffer_dma;
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200496 for (i = 0; i < CONFIG_SYS_MACB_RX_RING_SIZE; i++) {
497 if (i == (CONFIG_SYS_MACB_RX_RING_SIZE - 1))
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100498 paddr |= RXADDR_WRAP;
499 macb->rx_ring[i].addr = paddr;
500 macb->rx_ring[i].ctrl = 0;
501 paddr += 128;
502 }
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200503 for (i = 0; i < CONFIG_SYS_MACB_TX_RING_SIZE; i++) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100504 macb->tx_ring[i].addr = 0;
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200505 if (i == (CONFIG_SYS_MACB_TX_RING_SIZE - 1))
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100506 macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
507 else
508 macb->tx_ring[i].ctrl = TXBUF_USED;
509 }
510 macb->rx_tail = macb->tx_head = macb->tx_tail = 0;
511
512 macb_writel(macb, RBQP, macb->rx_ring_dma);
513 macb_writel(macb, TBQP, macb->tx_ring_dma);
514
Bo Shen6f7d7d92013-04-24 15:59:28 +0800515 if (macb_is_gem(macb)) {
516#ifdef CONFIG_RGMII
517 gem_writel(macb, UR, GEM_BIT(RGMII));
518#else
519 gem_writel(macb, UR, 0);
520#endif
521 } else {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100522 /* choose RMII or MII mode. This depends on the board */
523#ifdef CONFIG_RMII
Bo Shencc29ce52013-04-24 15:59:26 +0800524#ifdef CONFIG_AT91FAMILY
Stelian Pop87a82542008-01-03 21:15:56 +0000525 macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
526#else
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100527 macb_writel(macb, USRIO, 0);
Stelian Pop87a82542008-01-03 21:15:56 +0000528#endif
529#else
Bo Shencc29ce52013-04-24 15:59:26 +0800530#ifdef CONFIG_AT91FAMILY
Stelian Pop87a82542008-01-03 21:15:56 +0000531 macb_writel(macb, USRIO, MACB_BIT(CLKEN));
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100532#else
533 macb_writel(macb, USRIO, MACB_BIT(MII));
534#endif
Stelian Pop87a82542008-01-03 21:15:56 +0000535#endif /* CONFIG_RMII */
Bo Shen6f7d7d92013-04-24 15:59:28 +0800536 }
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100537
538 if (!macb_phy_init(macb))
Ben Warrende9fcb52008-01-09 18:15:53 -0500539 return -1;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100540
541 /* Enable TX and RX */
542 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
543
Ben Warrende9fcb52008-01-09 18:15:53 -0500544 return 0;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100545}
546
547static void macb_halt(struct eth_device *netdev)
548{
549 struct macb_device *macb = to_macb(netdev);
550 u32 ncr, tsr;
551
552 /* Halt the controller and wait for any ongoing transmission to end. */
553 ncr = macb_readl(macb, NCR);
554 ncr |= MACB_BIT(THALT);
555 macb_writel(macb, NCR, ncr);
556
557 do {
558 tsr = macb_readl(macb, TSR);
559 } while (tsr & MACB_BIT(TGO));
560
561 /* Disable TX and RX, and clear statistics */
562 macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
563}
564
Ben Warren33f84312010-06-01 11:55:42 -0700565static int macb_write_hwaddr(struct eth_device *dev)
566{
567 struct macb_device *macb = to_macb(dev);
568 u32 hwaddr_bottom;
569 u16 hwaddr_top;
570
571 /* set hardware address */
andreas.devel@googlemail.com80156552011-06-09 02:08:46 +0000572 hwaddr_bottom = dev->enetaddr[0] | dev->enetaddr[1] << 8 |
573 dev->enetaddr[2] << 16 | dev->enetaddr[3] << 24;
Ben Warren33f84312010-06-01 11:55:42 -0700574 macb_writel(macb, SA1B, hwaddr_bottom);
andreas.devel@googlemail.com80156552011-06-09 02:08:46 +0000575 hwaddr_top = dev->enetaddr[4] | dev->enetaddr[5] << 8;
Ben Warren33f84312010-06-01 11:55:42 -0700576 macb_writel(macb, SA1T, hwaddr_top);
577 return 0;
578}
579
Bo Shen6f7d7d92013-04-24 15:59:28 +0800580static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
581{
582 u32 config;
583 unsigned long macb_hz = get_macb_pclk_rate(id);
584
585 if (macb_hz < 20000000)
586 config = MACB_BF(CLK, MACB_CLK_DIV8);
587 else if (macb_hz < 40000000)
588 config = MACB_BF(CLK, MACB_CLK_DIV16);
589 else if (macb_hz < 80000000)
590 config = MACB_BF(CLK, MACB_CLK_DIV32);
591 else
592 config = MACB_BF(CLK, MACB_CLK_DIV64);
593
594 return config;
595}
596
597static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
598{
599 u32 config;
600 unsigned long macb_hz = get_macb_pclk_rate(id);
601
602 if (macb_hz < 20000000)
603 config = GEM_BF(CLK, GEM_CLK_DIV8);
604 else if (macb_hz < 40000000)
605 config = GEM_BF(CLK, GEM_CLK_DIV16);
606 else if (macb_hz < 80000000)
607 config = GEM_BF(CLK, GEM_CLK_DIV32);
608 else if (macb_hz < 120000000)
609 config = GEM_BF(CLK, GEM_CLK_DIV48);
610 else if (macb_hz < 160000000)
611 config = GEM_BF(CLK, GEM_CLK_DIV64);
612 else
613 config = GEM_BF(CLK, GEM_CLK_DIV96);
614
615 return config;
616}
617
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100618int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
619{
620 struct macb_device *macb;
621 struct eth_device *netdev;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100622 u32 ncfgr;
623
624 macb = malloc(sizeof(struct macb_device));
625 if (!macb) {
626 printf("Error: Failed to allocate memory for MACB%d\n", id);
627 return -1;
628 }
629 memset(macb, 0, sizeof(struct macb_device));
630
631 netdev = &macb->netdev;
632
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200633 macb->rx_buffer = dma_alloc_coherent(CONFIG_SYS_MACB_RX_BUFFER_SIZE,
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100634 &macb->rx_buffer_dma);
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200635 macb->rx_ring = dma_alloc_coherent(CONFIG_SYS_MACB_RX_RING_SIZE
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100636 * sizeof(struct macb_dma_desc),
637 &macb->rx_ring_dma);
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200638 macb->tx_ring = dma_alloc_coherent(CONFIG_SYS_MACB_TX_RING_SIZE
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100639 * sizeof(struct macb_dma_desc),
640 &macb->tx_ring_dma);
641
642 macb->regs = regs;
643 macb->phy_addr = phy_addr;
644
Bo Shen6f7d7d92013-04-24 15:59:28 +0800645 if (macb_is_gem(macb))
646 sprintf(netdev->name, "gmac%d", id);
647 else
648 sprintf(netdev->name, "macb%d", id);
649
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100650 netdev->init = macb_init;
651 netdev->halt = macb_halt;
652 netdev->send = macb_send;
653 netdev->recv = macb_recv;
Ben Warren33f84312010-06-01 11:55:42 -0700654 netdev->write_hwaddr = macb_write_hwaddr;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100655
656 /*
657 * Do some basic initialization so that we at least can talk
658 * to the PHY
659 */
Bo Shen6f7d7d92013-04-24 15:59:28 +0800660 if (macb_is_gem(macb)) {
661 ncfgr = gem_mdc_clk_div(id, macb);
662 ncfgr |= GEM_BF(DBW, 1);
663 } else {
664 ncfgr = macb_mdc_clk_div(id, macb);
665 }
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100666
667 macb_writel(macb, NCFGR, ncfgr);
668
669 eth_register(netdev);
670
Bo Shen7d91deb2013-04-24 15:59:27 +0800671#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
Semih Hazar790088e2009-12-17 15:07:15 +0200672 miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write);
Bo Shen7d91deb2013-04-24 15:59:27 +0800673 macb->bus = miiphy_get_dev_by_name(netdev->name);
Semih Hazar790088e2009-12-17 15:07:15 +0200674#endif
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100675 return 0;
676}
677
Jon Loeligerb1d408a2007-07-09 17:30:01 -0500678#endif