blob: 84bae37f37e4681ba1b582e8da8f4e82340b4e13 [file] [log] [blame]
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +01001/*
2 * Copyright (C) 2005-2006 Atmel Corporation
3 *
Wolfgang Denkd79de1d2013-07-08 09:37:19 +02004 * SPDX-License-Identifier: GPL-2.0+
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +01005 */
6#include <common.h>
7
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +01008/*
9 * The u-boot networking stack is a little weird. It seems like the
10 * networking core allocates receive buffers up front without any
11 * regard to the hardware that's supposed to actually receive those
12 * packets.
13 *
14 * The MACB receives packets into 128-byte receive buffers, so the
15 * buffers allocated by the core isn't very practical to use. We'll
16 * allocate our own, but we need one such buffer in case a packet
17 * wraps around the DMA ring so that we have to copy it.
18 *
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +020019 * Therefore, define CONFIG_SYS_RX_ETH_BUFFER to 1 in the board-specific
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010020 * configuration header. This way, the core allocates one RX buffer
21 * and one TX buffer, each of which can hold a ethernet packet of
22 * maximum size.
23 *
24 * For some reason, the networking core unconditionally specifies a
25 * 32-byte packet "alignment" (which really should be called
26 * "padding"). MACB shouldn't need that, but we'll refrain from any
27 * core modifications here...
28 */
29
30#include <net.h>
Ben Warren2f2b6b62008-08-31 22:22:04 -070031#include <netdev.h>
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010032#include <malloc.h>
Semih Hazar790088e2009-12-17 15:07:15 +020033#include <miiphy.h>
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010034
35#include <linux/mii.h>
36#include <asm/io.h>
37#include <asm/dma-mapping.h>
38#include <asm/arch/clk.h>
Bo Shene04fe552013-08-19 10:35:47 +080039#include <asm-generic/errno.h>
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010040
41#include "macb.h"
42
Andreas Bießmann1e868122014-05-26 22:55:18 +020043#define MACB_RX_BUFFER_SIZE 4096
44#define MACB_RX_RING_SIZE (MACB_RX_BUFFER_SIZE / 128)
45#define MACB_TX_RING_SIZE 16
46#define MACB_TX_TIMEOUT 1000
47#define MACB_AUTONEG_TIMEOUT 5000000
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010048
49struct macb_dma_desc {
50 u32 addr;
51 u32 ctrl;
52};
53
Wu, Josh18052402014-05-27 16:31:05 +080054#define DMA_DESC_BYTES(n) (n * sizeof(struct macb_dma_desc))
55#define MACB_TX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_TX_RING_SIZE))
56#define MACB_RX_DMA_DESC_SIZE (DMA_DESC_BYTES(MACB_RX_RING_SIZE))
Wu, Josh012d68d2015-06-03 16:45:44 +080057#define MACB_TX_DUMMY_DMA_DESC_SIZE (DMA_DESC_BYTES(1))
Wu, Josh18052402014-05-27 16:31:05 +080058
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010059#define RXADDR_USED 0x00000001
60#define RXADDR_WRAP 0x00000002
61
62#define RXBUF_FRMLEN_MASK 0x00000fff
63#define RXBUF_FRAME_START 0x00004000
64#define RXBUF_FRAME_END 0x00008000
65#define RXBUF_TYPEID_MATCH 0x00400000
66#define RXBUF_ADDR4_MATCH 0x00800000
67#define RXBUF_ADDR3_MATCH 0x01000000
68#define RXBUF_ADDR2_MATCH 0x02000000
69#define RXBUF_ADDR1_MATCH 0x04000000
70#define RXBUF_BROADCAST 0x80000000
71
72#define TXBUF_FRMLEN_MASK 0x000007ff
73#define TXBUF_FRAME_END 0x00008000
74#define TXBUF_NOCRC 0x00010000
75#define TXBUF_EXHAUSTED 0x08000000
76#define TXBUF_UNDERRUN 0x10000000
77#define TXBUF_MAXRETRY 0x20000000
78#define TXBUF_WRAP 0x40000000
79#define TXBUF_USED 0x80000000
80
81struct macb_device {
82 void *regs;
83
84 unsigned int rx_tail;
85 unsigned int tx_head;
86 unsigned int tx_tail;
Simon Glass5ad27512016-05-05 07:28:09 -060087 unsigned int next_rx_tail;
88 bool wrapped;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +010089
90 void *rx_buffer;
91 void *tx_buffer;
92 struct macb_dma_desc *rx_ring;
93 struct macb_dma_desc *tx_ring;
94
95 unsigned long rx_buffer_dma;
96 unsigned long rx_ring_dma;
97 unsigned long tx_ring_dma;
98
Wu, Josh012d68d2015-06-03 16:45:44 +080099 struct macb_dma_desc *dummy_desc;
100 unsigned long dummy_desc_dma;
101
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100102 const struct device *dev;
103 struct eth_device netdev;
104 unsigned short phy_addr;
Bo Shen7d91deb2013-04-24 15:59:27 +0800105 struct mii_dev *bus;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100106};
107#define to_macb(_nd) container_of(_nd, struct macb_device, netdev)
108
Bo Shen6f7d7d92013-04-24 15:59:28 +0800109static int macb_is_gem(struct macb_device *macb)
110{
111 return MACB_BFEXT(IDNUM, macb_readl(macb, MID)) == 0x2;
112}
113
Gregory CLEMENTf1a1e582015-12-16 14:50:34 +0100114#ifndef cpu_is_sama5d2
115#define cpu_is_sama5d2() 0
116#endif
117
118#ifndef cpu_is_sama5d4
119#define cpu_is_sama5d4() 0
120#endif
121
122static int gem_is_gigabit_capable(struct macb_device *macb)
123{
124 /*
Robert P. J. Day8c60f922016-05-04 04:47:31 -0400125 * The GEM controllers embedded in SAMA5D2 and SAMA5D4 are
Gregory CLEMENTf1a1e582015-12-16 14:50:34 +0100126 * configured to support only 10/100.
127 */
128 return macb_is_gem(macb) && !cpu_is_sama5d2() && !cpu_is_sama5d4();
129}
130
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100131static void macb_mdio_write(struct macb_device *macb, u8 reg, u16 value)
132{
133 unsigned long netctl;
134 unsigned long netstat;
135 unsigned long frame;
136
137 netctl = macb_readl(macb, NCR);
138 netctl |= MACB_BIT(MPE);
139 macb_writel(macb, NCR, netctl);
140
141 frame = (MACB_BF(SOF, 1)
142 | MACB_BF(RW, 1)
143 | MACB_BF(PHYA, macb->phy_addr)
144 | MACB_BF(REGA, reg)
145 | MACB_BF(CODE, 2)
146 | MACB_BF(DATA, value));
147 macb_writel(macb, MAN, frame);
148
149 do {
150 netstat = macb_readl(macb, NSR);
151 } while (!(netstat & MACB_BIT(IDLE)));
152
153 netctl = macb_readl(macb, NCR);
154 netctl &= ~MACB_BIT(MPE);
155 macb_writel(macb, NCR, netctl);
156}
157
158static u16 macb_mdio_read(struct macb_device *macb, u8 reg)
159{
160 unsigned long netctl;
161 unsigned long netstat;
162 unsigned long frame;
163
164 netctl = macb_readl(macb, NCR);
165 netctl |= MACB_BIT(MPE);
166 macb_writel(macb, NCR, netctl);
167
168 frame = (MACB_BF(SOF, 1)
169 | MACB_BF(RW, 2)
170 | MACB_BF(PHYA, macb->phy_addr)
171 | MACB_BF(REGA, reg)
172 | MACB_BF(CODE, 2));
173 macb_writel(macb, MAN, frame);
174
175 do {
176 netstat = macb_readl(macb, NSR);
177 } while (!(netstat & MACB_BIT(IDLE)));
178
179 frame = macb_readl(macb, MAN);
180
181 netctl = macb_readl(macb, NCR);
182 netctl &= ~MACB_BIT(MPE);
183 macb_writel(macb, NCR, netctl);
184
185 return MACB_BFEXT(DATA, frame);
186}
187
Joe Hershberger9e5742b2013-06-24 19:06:38 -0500188void __weak arch_get_mdio_control(const char *name)
Shiraz Hashim77cdf0f2012-12-13 17:22:52 +0530189{
190 return;
191}
192
Bo Shen7d91deb2013-04-24 15:59:27 +0800193#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
Semih Hazar790088e2009-12-17 15:07:15 +0200194
Mike Frysinger5ff5fdb2010-07-27 18:35:08 -0400195int macb_miiphy_read(const char *devname, u8 phy_adr, u8 reg, u16 *value)
Semih Hazar790088e2009-12-17 15:07:15 +0200196{
197 struct eth_device *dev = eth_get_dev_by_name(devname);
198 struct macb_device *macb = to_macb(dev);
199
Andreas Bießmann1e868122014-05-26 22:55:18 +0200200 if (macb->phy_addr != phy_adr)
Semih Hazar790088e2009-12-17 15:07:15 +0200201 return -1;
202
Shiraz Hashim77cdf0f2012-12-13 17:22:52 +0530203 arch_get_mdio_control(devname);
Semih Hazar790088e2009-12-17 15:07:15 +0200204 *value = macb_mdio_read(macb, reg);
205
206 return 0;
207}
208
Mike Frysinger5ff5fdb2010-07-27 18:35:08 -0400209int macb_miiphy_write(const char *devname, u8 phy_adr, u8 reg, u16 value)
Semih Hazar790088e2009-12-17 15:07:15 +0200210{
211 struct eth_device *dev = eth_get_dev_by_name(devname);
212 struct macb_device *macb = to_macb(dev);
213
Andreas Bießmann1e868122014-05-26 22:55:18 +0200214 if (macb->phy_addr != phy_adr)
Semih Hazar790088e2009-12-17 15:07:15 +0200215 return -1;
216
Shiraz Hashim77cdf0f2012-12-13 17:22:52 +0530217 arch_get_mdio_control(devname);
Semih Hazar790088e2009-12-17 15:07:15 +0200218 macb_mdio_write(macb, reg, value);
219
220 return 0;
221}
222#endif
223
Wu, Josh18052402014-05-27 16:31:05 +0800224#define RX 1
225#define TX 0
226static inline void macb_invalidate_ring_desc(struct macb_device *macb, bool rx)
227{
228 if (rx)
229 invalidate_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
230 MACB_RX_DMA_DESC_SIZE);
231 else
232 invalidate_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
233 MACB_TX_DMA_DESC_SIZE);
234}
235
236static inline void macb_flush_ring_desc(struct macb_device *macb, bool rx)
237{
238 if (rx)
239 flush_dcache_range(macb->rx_ring_dma, macb->rx_ring_dma +
240 MACB_RX_DMA_DESC_SIZE);
241 else
242 flush_dcache_range(macb->tx_ring_dma, macb->tx_ring_dma +
243 MACB_TX_DMA_DESC_SIZE);
244}
245
246static inline void macb_flush_rx_buffer(struct macb_device *macb)
247{
248 flush_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
249 MACB_RX_BUFFER_SIZE);
250}
251
252static inline void macb_invalidate_rx_buffer(struct macb_device *macb)
253{
254 invalidate_dcache_range(macb->rx_buffer_dma, macb->rx_buffer_dma +
255 MACB_RX_BUFFER_SIZE);
256}
Semih Hazar790088e2009-12-17 15:07:15 +0200257
Jon Loeligerb1d408a2007-07-09 17:30:01 -0500258#if defined(CONFIG_CMD_NET)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100259
Simon Glass5ad27512016-05-05 07:28:09 -0600260static int _macb_send(struct macb_device *macb, const char *name, void *packet,
261 int length)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100262{
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100263 unsigned long paddr, ctrl;
264 unsigned int tx_head = macb->tx_head;
265 int i;
266
267 paddr = dma_map_single(packet, length, DMA_TO_DEVICE);
268
269 ctrl = length & TXBUF_FRMLEN_MASK;
270 ctrl |= TXBUF_FRAME_END;
Andreas Bießmann1e868122014-05-26 22:55:18 +0200271 if (tx_head == (MACB_TX_RING_SIZE - 1)) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100272 ctrl |= TXBUF_WRAP;
273 macb->tx_head = 0;
Andreas Bießmann1e868122014-05-26 22:55:18 +0200274 } else {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100275 macb->tx_head++;
Andreas Bießmann1e868122014-05-26 22:55:18 +0200276 }
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100277
278 macb->tx_ring[tx_head].ctrl = ctrl;
279 macb->tx_ring[tx_head].addr = paddr;
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200280 barrier();
Wu, Josh18052402014-05-27 16:31:05 +0800281 macb_flush_ring_desc(macb, TX);
282 /* Do we need check paddr and length is dcache line aligned? */
Simon Glass3d5dcef2016-05-05 07:28:10 -0600283 flush_dcache_range(paddr, paddr + ALIGN(length, ARCH_DMA_MINALIGN));
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100284 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE) | MACB_BIT(TSTART));
285
286 /*
287 * I guess this is necessary because the networking core may
288 * re-use the transmit buffer as soon as we return...
289 */
Andreas Bießmann1e868122014-05-26 22:55:18 +0200290 for (i = 0; i <= MACB_TX_TIMEOUT; i++) {
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200291 barrier();
Wu, Josh18052402014-05-27 16:31:05 +0800292 macb_invalidate_ring_desc(macb, TX);
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200293 ctrl = macb->tx_ring[tx_head].ctrl;
294 if (ctrl & TXBUF_USED)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100295 break;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100296 udelay(1);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100297 }
298
299 dma_unmap_single(packet, length, paddr);
300
Andreas Bießmann1e868122014-05-26 22:55:18 +0200301 if (i <= MACB_TX_TIMEOUT) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100302 if (ctrl & TXBUF_UNDERRUN)
Simon Glass5ad27512016-05-05 07:28:09 -0600303 printf("%s: TX underrun\n", name);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100304 if (ctrl & TXBUF_EXHAUSTED)
Simon Glass5ad27512016-05-05 07:28:09 -0600305 printf("%s: TX buffers exhausted in mid frame\n", name);
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200306 } else {
Simon Glass5ad27512016-05-05 07:28:09 -0600307 printf("%s: TX timeout\n", name);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100308 }
309
310 /* No one cares anyway */
311 return 0;
312}
313
314static void reclaim_rx_buffers(struct macb_device *macb,
315 unsigned int new_tail)
316{
317 unsigned int i;
318
319 i = macb->rx_tail;
Wu, Josh18052402014-05-27 16:31:05 +0800320
321 macb_invalidate_ring_desc(macb, RX);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100322 while (i > new_tail) {
323 macb->rx_ring[i].addr &= ~RXADDR_USED;
324 i++;
Andreas Bießmann1e868122014-05-26 22:55:18 +0200325 if (i > MACB_RX_RING_SIZE)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100326 i = 0;
327 }
328
329 while (i < new_tail) {
330 macb->rx_ring[i].addr &= ~RXADDR_USED;
331 i++;
332 }
333
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200334 barrier();
Wu, Josh18052402014-05-27 16:31:05 +0800335 macb_flush_ring_desc(macb, RX);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100336 macb->rx_tail = new_tail;
337}
338
Simon Glass5ad27512016-05-05 07:28:09 -0600339static int _macb_recv(struct macb_device *macb, uchar **packetp)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100340{
Simon Glass5ad27512016-05-05 07:28:09 -0600341 unsigned int next_rx_tail = macb->next_rx_tail;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100342 void *buffer;
343 int length;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100344 u32 status;
345
Simon Glass5ad27512016-05-05 07:28:09 -0600346 macb->wrapped = false;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100347 for (;;) {
Wu, Josh18052402014-05-27 16:31:05 +0800348 macb_invalidate_ring_desc(macb, RX);
349
Simon Glass5ad27512016-05-05 07:28:09 -0600350 if (!(macb->rx_ring[next_rx_tail].addr & RXADDR_USED))
351 return -EAGAIN;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100352
Simon Glass5ad27512016-05-05 07:28:09 -0600353 status = macb->rx_ring[next_rx_tail].ctrl;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100354 if (status & RXBUF_FRAME_START) {
Simon Glass5ad27512016-05-05 07:28:09 -0600355 if (next_rx_tail != macb->rx_tail)
356 reclaim_rx_buffers(macb, next_rx_tail);
357 macb->wrapped = false;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100358 }
359
360 if (status & RXBUF_FRAME_END) {
361 buffer = macb->rx_buffer + 128 * macb->rx_tail;
362 length = status & RXBUF_FRMLEN_MASK;
Wu, Josh18052402014-05-27 16:31:05 +0800363
364 macb_invalidate_rx_buffer(macb);
Simon Glass5ad27512016-05-05 07:28:09 -0600365 if (macb->wrapped) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100366 unsigned int headlen, taillen;
367
Andreas Bießmann1e868122014-05-26 22:55:18 +0200368 headlen = 128 * (MACB_RX_RING_SIZE
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100369 - macb->rx_tail);
370 taillen = length - headlen;
Joe Hershberger9f09a362015-04-08 01:41:06 -0500371 memcpy((void *)net_rx_packets[0],
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100372 buffer, headlen);
Joe Hershberger9f09a362015-04-08 01:41:06 -0500373 memcpy((void *)net_rx_packets[0] + headlen,
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100374 macb->rx_buffer, taillen);
Simon Glass5ad27512016-05-05 07:28:09 -0600375 *packetp = (void *)net_rx_packets[0];
376 } else {
377 *packetp = buffer;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100378 }
379
Simon Glass5ad27512016-05-05 07:28:09 -0600380 if (++next_rx_tail >= MACB_RX_RING_SIZE)
381 next_rx_tail = 0;
382 macb->next_rx_tail = next_rx_tail;
383 return length;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100384 } else {
Simon Glass5ad27512016-05-05 07:28:09 -0600385 if (++next_rx_tail >= MACB_RX_RING_SIZE) {
386 macb->wrapped = true;
387 next_rx_tail = 0;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100388 }
389 }
Haavard Skinnemoen996e1472007-05-02 13:22:38 +0200390 barrier();
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100391 }
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100392}
393
Simon Glass5ad27512016-05-05 07:28:09 -0600394static void macb_phy_reset(struct macb_device *macb, const char *name)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100395{
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100396 int i;
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200397 u16 status, adv;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100398
399 adv = ADVERTISE_CSMA | ADVERTISE_ALL;
400 macb_mdio_write(macb, MII_ADVERTISE, adv);
Simon Glass5ad27512016-05-05 07:28:09 -0600401 printf("%s: Starting autonegotiation...\n", name);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100402 macb_mdio_write(macb, MII_BMCR, (BMCR_ANENABLE
403 | BMCR_ANRESTART));
404
Andreas Bießmann1e868122014-05-26 22:55:18 +0200405 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100406 status = macb_mdio_read(macb, MII_BMSR);
407 if (status & BMSR_ANEGCOMPLETE)
408 break;
409 udelay(100);
410 }
411
412 if (status & BMSR_ANEGCOMPLETE)
Simon Glass5ad27512016-05-05 07:28:09 -0600413 printf("%s: Autonegotiation complete\n", name);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100414 else
415 printf("%s: Autonegotiation timed out (status=0x%04x)\n",
Simon Glass5ad27512016-05-05 07:28:09 -0600416 name, status);
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200417}
418
Gunnar Rangoy6dd74f32009-01-23 12:56:31 +0100419#ifdef CONFIG_MACB_SEARCH_PHY
420static int macb_phy_find(struct macb_device *macb)
421{
422 int i;
423 u16 phy_id;
424
425 /* Search for PHY... */
426 for (i = 0; i < 32; i++) {
427 macb->phy_addr = i;
428 phy_id = macb_mdio_read(macb, MII_PHYSID1);
429 if (phy_id != 0xffff) {
430 printf("%s: PHY present at %d\n", macb->netdev.name, i);
431 return 1;
432 }
433 }
434
435 /* PHY isn't up to snuff */
Andreas Bießmann7b2159a2012-08-16 01:50:04 +0000436 printf("%s: PHY not found\n", macb->netdev.name);
Gunnar Rangoy6dd74f32009-01-23 12:56:31 +0100437
438 return 0;
439}
440#endif /* CONFIG_MACB_SEARCH_PHY */
441
442
Simon Glass5ad27512016-05-05 07:28:09 -0600443static int macb_phy_init(struct macb_device *macb, const char *name)
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200444{
Bo Shen7d91deb2013-04-24 15:59:27 +0800445#ifdef CONFIG_PHYLIB
446 struct phy_device *phydev;
447#endif
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200448 u32 ncfgr;
449 u16 phy_id, status, adv, lpa;
450 int media, speed, duplex;
451 int i;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100452
Simon Glass5ad27512016-05-05 07:28:09 -0600453 arch_get_mdio_control(name);
Gunnar Rangoy6dd74f32009-01-23 12:56:31 +0100454#ifdef CONFIG_MACB_SEARCH_PHY
455 /* Auto-detect phy_addr */
Andreas Bießmann1e868122014-05-26 22:55:18 +0200456 if (!macb_phy_find(macb))
Gunnar Rangoy6dd74f32009-01-23 12:56:31 +0100457 return 0;
Gunnar Rangoy6dd74f32009-01-23 12:56:31 +0100458#endif /* CONFIG_MACB_SEARCH_PHY */
459
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200460 /* Check if the PHY is up to snuff... */
461 phy_id = macb_mdio_read(macb, MII_PHYSID1);
462 if (phy_id == 0xffff) {
Simon Glass5ad27512016-05-05 07:28:09 -0600463 printf("%s: No PHY present\n", name);
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200464 return 0;
465 }
466
Bo Shen7d91deb2013-04-24 15:59:27 +0800467#ifdef CONFIG_PHYLIB
Bo Shene04fe552013-08-19 10:35:47 +0800468 /* need to consider other phy interface mode */
Simon Glass5ad27512016-05-05 07:28:09 -0600469 phydev = phy_connect(macb->bus, macb->phy_addr, &macb->netdev,
Bo Shene04fe552013-08-19 10:35:47 +0800470 PHY_INTERFACE_MODE_RGMII);
471 if (!phydev) {
472 printf("phy_connect failed\n");
473 return -ENODEV;
474 }
475
Bo Shen7d91deb2013-04-24 15:59:27 +0800476 phy_config(phydev);
477#endif
478
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200479 status = macb_mdio_read(macb, MII_BMSR);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100480 if (!(status & BMSR_LSTATUS)) {
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200481 /* Try to re-negotiate if we don't have link already. */
Simon Glass5ad27512016-05-05 07:28:09 -0600482 macb_phy_reset(macb, name);
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200483
Andreas Bießmann1e868122014-05-26 22:55:18 +0200484 for (i = 0; i < MACB_AUTONEG_TIMEOUT / 100; i++) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100485 status = macb_mdio_read(macb, MII_BMSR);
486 if (status & BMSR_LSTATUS)
487 break;
Haavard Skinnemoenb3ad7722007-05-02 13:31:53 +0200488 udelay(100);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100489 }
490 }
491
492 if (!(status & BMSR_LSTATUS)) {
493 printf("%s: link down (status: 0x%04x)\n",
Simon Glass5ad27512016-05-05 07:28:09 -0600494 name, status);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100495 return 0;
Bo Shen6f7d7d92013-04-24 15:59:28 +0800496 }
497
Gregory CLEMENTf1a1e582015-12-16 14:50:34 +0100498 /* First check for GMAC and that it is GiB capable */
499 if (gem_is_gigabit_capable(macb)) {
Bo Shen6f7d7d92013-04-24 15:59:28 +0800500 lpa = macb_mdio_read(macb, MII_STAT1000);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100501
Andreas Bießmannd43a89a2014-09-18 23:46:48 +0200502 if (lpa & (LPA_1000FULL | LPA_1000HALF)) {
503 duplex = ((lpa & LPA_1000FULL) ? 1 : 0);
504
505 printf("%s: link up, 1000Mbps %s-duplex (lpa: 0x%04x)\n",
Simon Glass5ad27512016-05-05 07:28:09 -0600506 name,
Bo Shen6f7d7d92013-04-24 15:59:28 +0800507 duplex ? "full" : "half",
508 lpa);
509
510 ncfgr = macb_readl(macb, NCFGR);
Andreas Bießmannd43a89a2014-09-18 23:46:48 +0200511 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
512 ncfgr |= GEM_BIT(GBE);
513
Bo Shen6f7d7d92013-04-24 15:59:28 +0800514 if (duplex)
515 ncfgr |= MACB_BIT(FD);
Andreas Bießmannd43a89a2014-09-18 23:46:48 +0200516
Bo Shen6f7d7d92013-04-24 15:59:28 +0800517 macb_writel(macb, NCFGR, ncfgr);
518
519 return 1;
520 }
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100521 }
Bo Shen6f7d7d92013-04-24 15:59:28 +0800522
523 /* fall back for EMAC checking */
524 adv = macb_mdio_read(macb, MII_ADVERTISE);
525 lpa = macb_mdio_read(macb, MII_LPA);
526 media = mii_nway_result(lpa & adv);
527 speed = (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)
528 ? 1 : 0);
529 duplex = (media & ADVERTISE_FULL) ? 1 : 0;
530 printf("%s: link up, %sMbps %s-duplex (lpa: 0x%04x)\n",
Simon Glass5ad27512016-05-05 07:28:09 -0600531 name,
Bo Shen6f7d7d92013-04-24 15:59:28 +0800532 speed ? "100" : "10",
533 duplex ? "full" : "half",
534 lpa);
535
536 ncfgr = macb_readl(macb, NCFGR);
Bo Shenfe19ef32015-03-04 13:35:16 +0800537 ncfgr &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | GEM_BIT(GBE));
Bo Shen6f7d7d92013-04-24 15:59:28 +0800538 if (speed)
539 ncfgr |= MACB_BIT(SPD);
540 if (duplex)
541 ncfgr |= MACB_BIT(FD);
542 macb_writel(macb, NCFGR, ncfgr);
543
544 return 1;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100545}
546
Wu, Josh012d68d2015-06-03 16:45:44 +0800547static int gmac_init_multi_queues(struct macb_device *macb)
548{
549 int i, num_queues = 1;
550 u32 queue_mask;
551
552 /* bit 0 is never set but queue 0 always exists */
553 queue_mask = gem_readl(macb, DCFG6) & 0xff;
554 queue_mask |= 0x1;
555
556 for (i = 1; i < MACB_MAX_QUEUES; i++)
557 if (queue_mask & (1 << i))
558 num_queues++;
559
560 macb->dummy_desc->ctrl = TXBUF_USED;
561 macb->dummy_desc->addr = 0;
562 flush_dcache_range(macb->dummy_desc_dma, macb->dummy_desc_dma +
563 MACB_TX_DUMMY_DMA_DESC_SIZE);
564
565 for (i = 1; i < num_queues; i++)
566 gem_writel_queue_TBQP(macb, macb->dummy_desc_dma, i - 1);
567
568 return 0;
569}
570
Simon Glass5ad27512016-05-05 07:28:09 -0600571static int _macb_init(struct macb_device *macb, const char *name)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100572{
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100573 unsigned long paddr;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100574 int i;
575
576 /*
577 * macb_halt should have been called at some point before now,
578 * so we'll assume the controller is idle.
579 */
580
581 /* initialize DMA descriptors */
582 paddr = macb->rx_buffer_dma;
Andreas Bießmann1e868122014-05-26 22:55:18 +0200583 for (i = 0; i < MACB_RX_RING_SIZE; i++) {
584 if (i == (MACB_RX_RING_SIZE - 1))
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100585 paddr |= RXADDR_WRAP;
586 macb->rx_ring[i].addr = paddr;
587 macb->rx_ring[i].ctrl = 0;
588 paddr += 128;
589 }
Wu, Josh18052402014-05-27 16:31:05 +0800590 macb_flush_ring_desc(macb, RX);
591 macb_flush_rx_buffer(macb);
592
Andreas Bießmann1e868122014-05-26 22:55:18 +0200593 for (i = 0; i < MACB_TX_RING_SIZE; i++) {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100594 macb->tx_ring[i].addr = 0;
Andreas Bießmann1e868122014-05-26 22:55:18 +0200595 if (i == (MACB_TX_RING_SIZE - 1))
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100596 macb->tx_ring[i].ctrl = TXBUF_USED | TXBUF_WRAP;
597 else
598 macb->tx_ring[i].ctrl = TXBUF_USED;
599 }
Wu, Josh18052402014-05-27 16:31:05 +0800600 macb_flush_ring_desc(macb, TX);
601
Andreas Bießmann1e868122014-05-26 22:55:18 +0200602 macb->rx_tail = 0;
603 macb->tx_head = 0;
604 macb->tx_tail = 0;
Simon Glass5ad27512016-05-05 07:28:09 -0600605 macb->next_rx_tail = 0;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100606
607 macb_writel(macb, RBQP, macb->rx_ring_dma);
608 macb_writel(macb, TBQP, macb->tx_ring_dma);
609
Bo Shen6f7d7d92013-04-24 15:59:28 +0800610 if (macb_is_gem(macb)) {
Wu, Josh012d68d2015-06-03 16:45:44 +0800611 /* Check the multi queue and initialize the queue for tx */
612 gmac_init_multi_queues(macb);
613
Bo Shen4660b332014-11-10 15:24:01 +0800614 /*
615 * When the GMAC IP with GE feature, this bit is used to
616 * select interface between RGMII and GMII.
617 * When the GMAC IP without GE feature, this bit is used
618 * to select interface between RMII and MII.
619 */
620#if defined(CONFIG_RGMII) || defined(CONFIG_RMII)
Bo Shen6f7d7d92013-04-24 15:59:28 +0800621 gem_writel(macb, UR, GEM_BIT(RGMII));
622#else
623 gem_writel(macb, UR, 0);
624#endif
625 } else {
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100626 /* choose RMII or MII mode. This depends on the board */
627#ifdef CONFIG_RMII
Bo Shencc29ce52013-04-24 15:59:26 +0800628#ifdef CONFIG_AT91FAMILY
Stelian Pop87a82542008-01-03 21:15:56 +0000629 macb_writel(macb, USRIO, MACB_BIT(RMII) | MACB_BIT(CLKEN));
630#else
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100631 macb_writel(macb, USRIO, 0);
Stelian Pop87a82542008-01-03 21:15:56 +0000632#endif
633#else
Bo Shencc29ce52013-04-24 15:59:26 +0800634#ifdef CONFIG_AT91FAMILY
Stelian Pop87a82542008-01-03 21:15:56 +0000635 macb_writel(macb, USRIO, MACB_BIT(CLKEN));
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100636#else
637 macb_writel(macb, USRIO, MACB_BIT(MII));
638#endif
Stelian Pop87a82542008-01-03 21:15:56 +0000639#endif /* CONFIG_RMII */
Bo Shen6f7d7d92013-04-24 15:59:28 +0800640 }
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100641
Simon Glass5ad27512016-05-05 07:28:09 -0600642 if (!macb_phy_init(macb, name))
Ben Warrende9fcb52008-01-09 18:15:53 -0500643 return -1;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100644
645 /* Enable TX and RX */
646 macb_writel(macb, NCR, MACB_BIT(TE) | MACB_BIT(RE));
647
Ben Warrende9fcb52008-01-09 18:15:53 -0500648 return 0;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100649}
650
Simon Glass5ad27512016-05-05 07:28:09 -0600651static void _macb_halt(struct macb_device *macb)
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100652{
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100653 u32 ncr, tsr;
654
655 /* Halt the controller and wait for any ongoing transmission to end. */
656 ncr = macb_readl(macb, NCR);
657 ncr |= MACB_BIT(THALT);
658 macb_writel(macb, NCR, ncr);
659
660 do {
661 tsr = macb_readl(macb, TSR);
662 } while (tsr & MACB_BIT(TGO));
663
664 /* Disable TX and RX, and clear statistics */
665 macb_writel(macb, NCR, MACB_BIT(CLRSTAT));
666}
667
Simon Glass5ad27512016-05-05 07:28:09 -0600668static int _macb_write_hwaddr(struct macb_device *macb, unsigned char *enetaddr)
Ben Warren33f84312010-06-01 11:55:42 -0700669{
Ben Warren33f84312010-06-01 11:55:42 -0700670 u32 hwaddr_bottom;
671 u16 hwaddr_top;
672
673 /* set hardware address */
Simon Glass5ad27512016-05-05 07:28:09 -0600674 hwaddr_bottom = enetaddr[0] | enetaddr[1] << 8 |
675 enetaddr[2] << 16 | enetaddr[3] << 24;
Ben Warren33f84312010-06-01 11:55:42 -0700676 macb_writel(macb, SA1B, hwaddr_bottom);
Simon Glass5ad27512016-05-05 07:28:09 -0600677 hwaddr_top = enetaddr[4] | enetaddr[5] << 8;
Ben Warren33f84312010-06-01 11:55:42 -0700678 macb_writel(macb, SA1T, hwaddr_top);
679 return 0;
680}
681
Bo Shen6f7d7d92013-04-24 15:59:28 +0800682static u32 macb_mdc_clk_div(int id, struct macb_device *macb)
683{
684 u32 config;
685 unsigned long macb_hz = get_macb_pclk_rate(id);
686
687 if (macb_hz < 20000000)
688 config = MACB_BF(CLK, MACB_CLK_DIV8);
689 else if (macb_hz < 40000000)
690 config = MACB_BF(CLK, MACB_CLK_DIV16);
691 else if (macb_hz < 80000000)
692 config = MACB_BF(CLK, MACB_CLK_DIV32);
693 else
694 config = MACB_BF(CLK, MACB_CLK_DIV64);
695
696 return config;
697}
698
699static u32 gem_mdc_clk_div(int id, struct macb_device *macb)
700{
701 u32 config;
702 unsigned long macb_hz = get_macb_pclk_rate(id);
703
704 if (macb_hz < 20000000)
705 config = GEM_BF(CLK, GEM_CLK_DIV8);
706 else if (macb_hz < 40000000)
707 config = GEM_BF(CLK, GEM_CLK_DIV16);
708 else if (macb_hz < 80000000)
709 config = GEM_BF(CLK, GEM_CLK_DIV32);
710 else if (macb_hz < 120000000)
711 config = GEM_BF(CLK, GEM_CLK_DIV48);
712 else if (macb_hz < 160000000)
713 config = GEM_BF(CLK, GEM_CLK_DIV64);
714 else
715 config = GEM_BF(CLK, GEM_CLK_DIV96);
716
717 return config;
718}
719
Bo Shen0e6624a2013-09-18 15:07:44 +0800720/*
721 * Get the DMA bus width field of the network configuration register that we
722 * should program. We find the width from decoding the design configuration
723 * register to find the maximum supported data bus width.
724 */
725static u32 macb_dbw(struct macb_device *macb)
726{
727 switch (GEM_BFEXT(DBWDEF, gem_readl(macb, DCFG1))) {
728 case 4:
729 return GEM_BF(DBW, GEM_DBW128);
730 case 2:
731 return GEM_BF(DBW, GEM_DBW64);
732 case 1:
733 default:
734 return GEM_BF(DBW, GEM_DBW32);
735 }
Simon Glass5ad27512016-05-05 07:28:09 -0600736}
737
738static void _macb_eth_initialize(struct macb_device *macb)
739{
740 int id = 0; /* This is not used by functions we call */
741 u32 ncfgr;
742
743 /* TODO: we need check the rx/tx_ring_dma is dcache line aligned */
744 macb->rx_buffer = dma_alloc_coherent(MACB_RX_BUFFER_SIZE,
745 &macb->rx_buffer_dma);
746 macb->rx_ring = dma_alloc_coherent(MACB_RX_DMA_DESC_SIZE,
747 &macb->rx_ring_dma);
748 macb->tx_ring = dma_alloc_coherent(MACB_TX_DMA_DESC_SIZE,
749 &macb->tx_ring_dma);
750 macb->dummy_desc = dma_alloc_coherent(MACB_TX_DUMMY_DMA_DESC_SIZE,
751 &macb->dummy_desc_dma);
752
753 /*
754 * Do some basic initialization so that we at least can talk
755 * to the PHY
756 */
757 if (macb_is_gem(macb)) {
758 ncfgr = gem_mdc_clk_div(id, macb);
759 ncfgr |= macb_dbw(macb);
760 } else {
761 ncfgr = macb_mdc_clk_div(id, macb);
762 }
763
764 macb_writel(macb, NCFGR, ncfgr);
765}
766
767static int macb_send(struct eth_device *netdev, void *packet, int length)
768{
769 struct macb_device *macb = to_macb(netdev);
770
771 return _macb_send(macb, netdev->name, packet, length);
Bo Shen0e6624a2013-09-18 15:07:44 +0800772}
773
Simon Glass5ad27512016-05-05 07:28:09 -0600774static int macb_recv(struct eth_device *netdev)
775{
776 struct macb_device *macb = to_macb(netdev);
777 uchar *packet;
778 int length;
779
780 macb->wrapped = false;
781 for (;;) {
782 macb->next_rx_tail = macb->rx_tail;
783 length = _macb_recv(macb, &packet);
784 if (length >= 0) {
785 net_process_received_packet(packet, length);
786 reclaim_rx_buffers(macb, macb->next_rx_tail);
787 } else if (length < 0) {
788 return length;
789 }
790 }
791}
792
793static int macb_init(struct eth_device *netdev, bd_t *bd)
794{
795 struct macb_device *macb = to_macb(netdev);
796
797 return _macb_init(macb, netdev->name);
798}
799
800static void macb_halt(struct eth_device *netdev)
801{
802 struct macb_device *macb = to_macb(netdev);
803
804 return _macb_halt(macb);
805}
806
807static int macb_write_hwaddr(struct eth_device *netdev)
808{
809 struct macb_device *macb = to_macb(netdev);
810
811 return _macb_write_hwaddr(macb, netdev->enetaddr);
812}
813
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100814int macb_eth_initialize(int id, void *regs, unsigned int phy_addr)
815{
816 struct macb_device *macb;
817 struct eth_device *netdev;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100818
819 macb = malloc(sizeof(struct macb_device));
820 if (!macb) {
821 printf("Error: Failed to allocate memory for MACB%d\n", id);
822 return -1;
823 }
824 memset(macb, 0, sizeof(struct macb_device));
825
826 netdev = &macb->netdev;
827
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100828 macb->regs = regs;
829 macb->phy_addr = phy_addr;
830
Bo Shen6f7d7d92013-04-24 15:59:28 +0800831 if (macb_is_gem(macb))
832 sprintf(netdev->name, "gmac%d", id);
833 else
834 sprintf(netdev->name, "macb%d", id);
835
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100836 netdev->init = macb_init;
837 netdev->halt = macb_halt;
838 netdev->send = macb_send;
839 netdev->recv = macb_recv;
Ben Warren33f84312010-06-01 11:55:42 -0700840 netdev->write_hwaddr = macb_write_hwaddr;
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100841
Simon Glass5ad27512016-05-05 07:28:09 -0600842 _macb_eth_initialize(macb);
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100843
844 eth_register(netdev);
845
Bo Shen7d91deb2013-04-24 15:59:27 +0800846#if defined(CONFIG_CMD_MII) || defined(CONFIG_PHYLIB)
Semih Hazar790088e2009-12-17 15:07:15 +0200847 miiphy_register(netdev->name, macb_miiphy_read, macb_miiphy_write);
Bo Shen7d91deb2013-04-24 15:59:27 +0800848 macb->bus = miiphy_get_dev_by_name(netdev->name);
Semih Hazar790088e2009-12-17 15:07:15 +0200849#endif
Haavard Skinnemoen51c8f242006-01-20 10:03:34 +0100850 return 0;
851}
852
Jon Loeligerb1d408a2007-07-09 17:30:01 -0500853#endif