blob: db3e79ade37e0ca467afe614a241d55874bb4fd4 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jiandong Zhengc36e42e2014-08-01 20:37:16 -07002/*
Suji Velupillai07e6b052017-03-03 17:06:34 -08003 * Copyright 2014-2017 Broadcom.
Jiandong Zhengc36e42e2014-08-01 20:37:16 -07004 */
5
6#ifdef BCM_GMAC_DEBUG
7#ifndef DEBUG
8#define DEBUG
9#endif
10#endif
11
12#include <config.h>
13#include <common.h>
14#include <malloc.h>
15#include <net.h>
16#include <asm/io.h>
17#include <phy.h>
18
19#include "bcm-sf2-eth.h"
20#include "bcm-sf2-eth-gmac.h"
21
22#define SPINWAIT(exp, us) { \
23 uint countdown = (us) + 9; \
24 while ((exp) && (countdown >= 10)) {\
25 udelay(10); \
26 countdown -= 10; \
27 } \
28}
29
Suji Velupillai07e6b052017-03-03 17:06:34 -080030#define RX_BUF_SIZE_ALIGNED ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
31#define TX_BUF_SIZE_ALIGNED ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
32#define DESCP_SIZE_ALIGNED ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
33
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070034static int gmac_disable_dma(struct eth_dma *dma, int dir);
35static int gmac_enable_dma(struct eth_dma *dma, int dir);
36
37/* DMA Descriptor */
38typedef struct {
39 /* misc control bits */
40 uint32_t ctrl1;
41 /* buffer count and address extension */
42 uint32_t ctrl2;
43 /* memory address of the date buffer, bits 31:0 */
44 uint32_t addrlow;
45 /* memory address of the date buffer, bits 63:32 */
46 uint32_t addrhigh;
47} dma64dd_t;
48
49uint32_t g_dmactrlflags;
50
51static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
52{
53 debug("%s enter\n", __func__);
54
55 g_dmactrlflags &= ~mask;
56 g_dmactrlflags |= flags;
57
58 /* If trying to enable parity, check if parity is actually supported */
59 if (g_dmactrlflags & DMA_CTRL_PEN) {
60 uint32_t control;
61
62 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
63 writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
64 if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
65 /*
66 * We *can* disable it, therefore it is supported;
67 * restore control register
68 */
69 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
70 } else {
71 /* Not supported, don't allow it to be enabled */
72 g_dmactrlflags &= ~DMA_CTRL_PEN;
73 }
74 }
75
76 return g_dmactrlflags;
77}
78
79static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
80{
81 uint32_t v = readl(reg);
82 v &= ~(value);
83 writel(v, reg);
84}
85
86static inline void reg32_set_bits(uint32_t reg, uint32_t value)
87{
88 uint32_t v = readl(reg);
89 v |= value;
90 writel(v, reg);
91}
92
93#ifdef BCM_GMAC_DEBUG
94static void dma_tx_dump(struct eth_dma *dma)
95{
96 dma64dd_t *descp = NULL;
97 uint8_t *bufp;
98 int i;
99
100 printf("TX DMA Register:\n");
101 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
102 readl(GMAC0_DMA_TX_CTRL_ADDR),
103 readl(GMAC0_DMA_TX_PTR_ADDR),
104 readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
105 readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
106 readl(GMAC0_DMA_TX_STATUS0_ADDR),
107 readl(GMAC0_DMA_TX_STATUS1_ADDR));
108
109 printf("TX Descriptors:\n");
110 for (i = 0; i < TX_BUF_NUM; i++) {
111 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
112 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
113 descp->ctrl1, descp->ctrl2,
114 descp->addrhigh, descp->addrlow);
115 }
116
117 printf("TX Buffers:\n");
118 /* Initialize TX DMA descriptor table */
119 for (i = 0; i < TX_BUF_NUM; i++) {
Suji Velupillai07e6b052017-03-03 17:06:34 -0800120 bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700121 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
122 }
123 printf("\n");
124}
125
126static void dma_rx_dump(struct eth_dma *dma)
127{
128 dma64dd_t *descp = NULL;
129 uint8_t *bufp;
130 int i;
131
132 printf("RX DMA Register:\n");
133 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
134 readl(GMAC0_DMA_RX_CTRL_ADDR),
135 readl(GMAC0_DMA_RX_PTR_ADDR),
136 readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
137 readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
138 readl(GMAC0_DMA_RX_STATUS0_ADDR),
139 readl(GMAC0_DMA_RX_STATUS1_ADDR));
140
141 printf("RX Descriptors:\n");
142 for (i = 0; i < RX_BUF_NUM; i++) {
143 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
144 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
145 descp->ctrl1, descp->ctrl2,
146 descp->addrhigh, descp->addrlow);
147 }
148
149 printf("RX Buffers:\n");
150 for (i = 0; i < RX_BUF_NUM; i++) {
Suji Velupillai07e6b052017-03-03 17:06:34 -0800151 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700152 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
153 }
154 printf("\n");
155}
156#endif
157
158static int dma_tx_init(struct eth_dma *dma)
159{
160 dma64dd_t *descp = NULL;
161 uint8_t *bufp;
162 int i;
163 uint32_t ctrl;
164
165 debug("%s enter\n", __func__);
166
167 /* clear descriptor memory */
168 memset((void *)(dma->tx_desc_aligned), 0,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800169 TX_BUF_NUM * DESCP_SIZE_ALIGNED);
170 memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700171
172 /* Initialize TX DMA descriptor table */
173 for (i = 0; i < TX_BUF_NUM; i++) {
174 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800175 bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700176 /* clear buffer memory */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800177 memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700178
179 ctrl = 0;
180 /* if last descr set endOfTable */
181 if (i == (TX_BUF_NUM-1))
182 ctrl = D64_CTRL1_EOT;
183 descp->ctrl1 = ctrl;
184 descp->ctrl2 = 0;
185 descp->addrlow = (uint32_t)bufp;
186 descp->addrhigh = 0;
187 }
188
189 /* flush descriptor and buffer */
190 descp = dma->tx_desc_aligned;
191 bufp = dma->tx_buf;
192 flush_dcache_range((unsigned long)descp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800193 (unsigned long)descp +
194 DESCP_SIZE_ALIGNED * TX_BUF_NUM);
195 flush_dcache_range((unsigned long)bufp,
196 (unsigned long)bufp +
197 TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700198
199 /* initialize the DMA channel */
200 writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
201 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
202
203 /* now update the dma last descriptor */
204 writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
205 GMAC0_DMA_TX_PTR_ADDR);
206
207 return 0;
208}
209
210static int dma_rx_init(struct eth_dma *dma)
211{
212 uint32_t last_desc;
213 dma64dd_t *descp = NULL;
214 uint8_t *bufp;
215 uint32_t ctrl;
216 int i;
217
218 debug("%s enter\n", __func__);
219
220 /* clear descriptor memory */
221 memset((void *)(dma->rx_desc_aligned), 0,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800222 RX_BUF_NUM * DESCP_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700223 /* clear buffer memory */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800224 memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700225
226 /* Initialize RX DMA descriptor table */
227 for (i = 0; i < RX_BUF_NUM; i++) {
228 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800229 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700230 ctrl = 0;
231 /* if last descr set endOfTable */
232 if (i == (RX_BUF_NUM - 1))
233 ctrl = D64_CTRL1_EOT;
234 descp->ctrl1 = ctrl;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800235 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700236 descp->addrlow = (uint32_t)bufp;
237 descp->addrhigh = 0;
238
239 last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
240 + sizeof(dma64dd_t);
241 }
242
243 descp = dma->rx_desc_aligned;
244 bufp = dma->rx_buf;
245 /* flush descriptor and buffer */
246 flush_dcache_range((unsigned long)descp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800247 (unsigned long)descp +
248 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700249 flush_dcache_range((unsigned long)(bufp),
Suji Velupillai07e6b052017-03-03 17:06:34 -0800250 (unsigned long)bufp +
251 RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700252
253 /* initailize the DMA channel */
254 writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
255 writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
256
257 /* now update the dma last descriptor */
258 writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
259
260 return 0;
261}
262
263static int dma_init(struct eth_dma *dma)
264{
265 debug(" %s enter\n", __func__);
266
267 /*
268 * Default flags: For backwards compatibility both
269 * Rx Overflow Continue and Parity are DISABLED.
270 */
271 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
272
273 debug("rx burst len 0x%x\n",
274 (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
275 >> D64_RC_BL_SHIFT);
276 debug("tx burst len 0x%x\n",
277 (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
278 >> D64_XC_BL_SHIFT);
279
280 dma_tx_init(dma);
281 dma_rx_init(dma);
282
283 /* From end of chip_init() */
284 /* enable the overflow continue feature and disable parity */
285 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
286 DMA_CTRL_ROC /* value */);
287
288 return 0;
289}
290
291static int dma_deinit(struct eth_dma *dma)
292{
293 debug(" %s enter\n", __func__);
294
295 gmac_disable_dma(dma, MAC_DMA_RX);
296 gmac_disable_dma(dma, MAC_DMA_TX);
297
298 free(dma->tx_buf);
299 dma->tx_buf = NULL;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800300 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700301 dma->tx_desc_aligned = NULL;
302
303 free(dma->rx_buf);
304 dma->rx_buf = NULL;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800305 free(dma->rx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700306 dma->rx_desc_aligned = NULL;
307
308 return 0;
309}
310
311int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
312{
Suji Velupillai07e6b052017-03-03 17:06:34 -0800313 uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700314
315 /* kick off the dma */
316 size_t len = length;
317 int txout = dma->cur_tx_index;
318 uint32_t flags;
319 dma64dd_t *descp = NULL;
320 uint32_t ctrl;
321 uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
322 sizeof(dma64dd_t)) & D64_XP_LD_MASK;
323 size_t buflen;
324
325 debug("%s enter\n", __func__);
326
327 /* load the buffer */
328 memcpy(bufp, packet, len);
329
330 /* Add 4 bytes for Ethernet FCS/CRC */
331 buflen = len + 4;
332
333 ctrl = (buflen & D64_CTRL2_BC_MASK);
334
335 /* the transmit will only be one frame or set SOF, EOF */
336 /* also set int on completion */
337 flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
338
339 /* txout points to the descriptor to uset */
340 /* if last descriptor then set EOT */
341 if (txout == (TX_BUF_NUM - 1)) {
342 flags |= D64_CTRL1_EOT;
343 last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
344 }
345
346 /* write the descriptor */
347 descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
348 descp->addrlow = (uint32_t)bufp;
349 descp->addrhigh = 0;
350 descp->ctrl1 = flags;
351 descp->ctrl2 = ctrl;
352
353 /* flush descriptor and buffer */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800354 flush_dcache_range((unsigned long)dma->tx_desc_aligned,
355 (unsigned long)dma->tx_desc_aligned +
356 DESCP_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700357 flush_dcache_range((unsigned long)bufp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800358 (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700359
360 /* now update the dma last descriptor */
361 writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
362
363 /* tx dma should be enabled so packet should go out */
364
365 /* update txout */
366 dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
367
368 return 0;
369}
370
371bool gmac_check_tx_done(struct eth_dma *dma)
372{
373 /* wait for tx to complete */
374 uint32_t intstatus;
375 bool xfrdone = false;
376
377 debug("%s enter\n", __func__);
378
379 intstatus = readl(GMAC0_INT_STATUS_ADDR);
380
381 debug("int(0x%x)\n", intstatus);
382 if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
383 xfrdone = true;
384 /* clear the int bits */
385 intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
386 writel(intstatus, GMAC0_INT_STATUS_ADDR);
387 } else {
388 debug("Tx int(0x%x)\n", intstatus);
389 }
390
391 return xfrdone;
392}
393
394int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
395{
396 void *bufp, *datap;
397 size_t rcvlen = 0, buflen = 0;
398 uint32_t stat0 = 0, stat1 = 0;
399 uint32_t control, offset;
400 uint8_t statbuf[HWRXOFF*2];
401
402 int index, curr, active;
403 dma64dd_t *descp = NULL;
404
405 /* udelay(50); */
406
407 /*
408 * this api will check if a packet has been received.
409 * If so it will return the address of the buffer and current
410 * descriptor index will be incremented to the
411 * next descriptor. Once done with the frame the buffer should be
412 * added back onto the descriptor and the lastdscr should be updated
413 * to this descriptor.
414 */
415 index = dma->cur_rx_index;
416 offset = (uint32_t)(dma->rx_desc_aligned);
417 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
418 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
419 curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
420 active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
421
422 /* check if any frame */
423 if (index == curr)
424 return -1;
425
426 debug("received packet\n");
427 debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
428 /* remove warning */
429 if (index == active)
430 ;
431
432 /* get the packet pointer that corresponds to the rx descriptor */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800433 bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700434
435 descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
436 /* flush descriptor and buffer */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800437 flush_dcache_range((unsigned long)dma->rx_desc_aligned,
438 (unsigned long)dma->rx_desc_aligned +
439 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700440 flush_dcache_range((unsigned long)bufp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800441 (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700442
443 buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
444
445 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
446 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
447
448 debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
449 (uint32_t)bufp, index, buflen, stat0, stat1);
450
451 dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
452
453 /* get buffer offset */
454 control = readl(GMAC0_DMA_RX_CTRL_ADDR);
455 offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
456 rcvlen = *(uint16_t *)bufp;
457
458 debug("Received %d bytes\n", rcvlen);
459 /* copy status into temp buf then copy data from rx buffer */
460 memcpy(statbuf, bufp, offset);
461 datap = (void *)((uint32_t)bufp + offset);
462 memcpy(buf, datap, rcvlen);
463
464 /* update descriptor that is being added back on ring */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800465 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700466 descp->addrlow = (uint32_t)bufp;
467 descp->addrhigh = 0;
468 /* flush descriptor */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800469 flush_dcache_range((unsigned long)dma->rx_desc_aligned,
470 (unsigned long)dma->rx_desc_aligned +
471 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700472
473 /* set the lastdscr for the rx ring */
474 writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
475
476 return (int)rcvlen;
477}
478
479static int gmac_disable_dma(struct eth_dma *dma, int dir)
480{
481 int status;
482
483 debug("%s enter\n", __func__);
484
485 if (dir == MAC_DMA_TX) {
486 /* address PR8249/PR7577 issue */
487 /* suspend tx DMA first */
488 writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
489 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
490 D64_XS0_XS_MASK)) !=
491 D64_XS0_XS_DISABLED) &&
492 (status != D64_XS0_XS_IDLE) &&
493 (status != D64_XS0_XS_STOPPED), 10000);
494
495 /*
496 * PR2414 WAR: DMA engines are not disabled until
497 * transfer finishes
498 */
499 writel(0, GMAC0_DMA_TX_CTRL_ADDR);
500 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
501 D64_XS0_XS_MASK)) !=
502 D64_XS0_XS_DISABLED), 10000);
503
504 /* wait for the last transaction to complete */
505 udelay(2);
506
507 status = (status == D64_XS0_XS_DISABLED);
508 } else {
509 /*
510 * PR2414 WAR: DMA engines are not disabled until
511 * transfer finishes
512 */
513 writel(0, GMAC0_DMA_RX_CTRL_ADDR);
514 SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
515 D64_RS0_RS_MASK)) !=
516 D64_RS0_RS_DISABLED), 10000);
517
518 status = (status == D64_RS0_RS_DISABLED);
519 }
520
521 return status;
522}
523
524static int gmac_enable_dma(struct eth_dma *dma, int dir)
525{
526 uint32_t control;
527
528 debug("%s enter\n", __func__);
529
530 if (dir == MAC_DMA_TX) {
531 dma->cur_tx_index = 0;
532
533 /*
534 * These bits 20:18 (burstLen) of control register can be
535 * written but will take effect only if these bits are
536 * valid. So this will not affect previous versions
537 * of the DMA. They will continue to have those bits set to 0.
538 */
539 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
540
541 control |= D64_XC_XE;
542 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
543 control |= D64_XC_PD;
544
545 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
546
547 /* initailize the DMA channel */
548 writel((uint32_t)(dma->tx_desc_aligned),
549 GMAC0_DMA_TX_ADDR_LOW_ADDR);
550 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
551 } else {
552 dma->cur_rx_index = 0;
553
554 control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
555 D64_RC_AE) | D64_RC_RE;
556
557 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
558 control |= D64_RC_PD;
559
560 if (g_dmactrlflags & DMA_CTRL_ROC)
561 control |= D64_RC_OC;
562
563 /*
564 * These bits 20:18 (burstLen) of control register can be
565 * written but will take effect only if these bits are
566 * valid. So this will not affect previous versions
567 * of the DMA. They will continue to have those bits set to 0.
568 */
569 control &= ~D64_RC_BL_MASK;
570 /* Keep default Rx burstlen */
571 control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
572 control |= HWRXOFF << D64_RC_RO_SHIFT;
573
574 writel(control, GMAC0_DMA_RX_CTRL_ADDR);
575
576 /*
577 * the rx descriptor ring should have
578 * the addresses set properly;
579 * set the lastdscr for the rx ring
580 */
581 writel(((uint32_t)(dma->rx_desc_aligned) +
Suji Velupillai07e6b052017-03-03 17:06:34 -0800582 (RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700583 D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
584 }
585
586 return 0;
587}
588
589bool gmac_mii_busywait(unsigned int timeout)
590{
591 uint32_t tmp = 0;
592
593 while (timeout > 10) {
594 tmp = readl(GMAC_MII_CTRL_ADDR);
595 if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
596 udelay(10);
597 timeout -= 10;
598 } else {
599 break;
600 }
601 }
602 return tmp & (1 << GMAC_MII_BUSY_SHIFT);
603}
604
Joe Hershberger0c333192016-08-08 11:28:39 -0500605int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700606{
607 uint32_t tmp = 0;
Joe Hershberger0c333192016-08-08 11:28:39 -0500608 u16 value = 0;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700609
610 /* Busy wait timeout is 1ms */
611 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900612 pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700613 return -1;
614 }
615
616 /* Read operation */
617 tmp = GMAC_MII_DATA_READ_CMD;
618 tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
619 (reg << GMAC_MII_PHY_REG_SHIFT);
620 debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
621 writel(tmp, GMAC_MII_DATA_ADDR);
622
623 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900624 pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700625 return -1;
626 }
627
Joe Hershberger0c333192016-08-08 11:28:39 -0500628 value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
629 debug("MII read data 0x%x\n", value);
630 return value;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700631}
632
Joe Hershberger0c333192016-08-08 11:28:39 -0500633int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
634 u16 value)
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700635{
636 uint32_t tmp = 0;
637
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700638 /* Busy wait timeout is 1ms */
639 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900640 pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700641 return -1;
642 }
643
644 /* Write operation */
645 tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
646 tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
647 (reg << GMAC_MII_PHY_REG_SHIFT));
648 debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
649 tmp, phyaddr, reg, value);
650 writel(tmp, GMAC_MII_DATA_ADDR);
651
652 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900653 pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700654 return -1;
655 }
656
657 return 0;
658}
659
660void gmac_init_reset(void)
661{
662 debug("%s enter\n", __func__);
663
664 /* set command config reg CC_SR */
665 reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
666 udelay(GMAC_RESET_DELAY);
667}
668
669void gmac_clear_reset(void)
670{
671 debug("%s enter\n", __func__);
672
673 /* clear command config reg CC_SR */
674 reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
675 udelay(GMAC_RESET_DELAY);
676}
677
678static void gmac_enable_local(bool en)
679{
680 uint32_t cmdcfg;
681
682 debug("%s enter\n", __func__);
683
684 /* read command config reg */
685 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
686
687 /* put mac in reset */
688 gmac_init_reset();
689
690 cmdcfg |= CC_SR;
691
692 /* first deassert rx_ena and tx_ena while in reset */
693 cmdcfg &= ~(CC_RE | CC_TE);
694 /* write command config reg */
695 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
696
697 /* bring mac out of reset */
698 gmac_clear_reset();
699
700 /* if not enable exit now */
701 if (!en)
702 return;
703
704 /* enable the mac transmit and receive paths now */
705 udelay(2);
706 cmdcfg &= ~CC_SR;
707 cmdcfg |= (CC_RE | CC_TE);
708
709 /* assert rx_ena and tx_ena when out of reset to enable the mac */
710 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
711
712 return;
713}
714
715int gmac_enable(void)
716{
717 gmac_enable_local(1);
718
719 /* clear interrupts */
720 writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
721 return 0;
722}
723
724int gmac_disable(void)
725{
726 gmac_enable_local(0);
727 return 0;
728}
729
730int gmac_set_speed(int speed, int duplex)
731{
732 uint32_t cmdcfg;
733 uint32_t hd_ena;
734 uint32_t speed_cfg;
735
736 hd_ena = duplex ? 0 : CC_HD;
737 if (speed == 1000) {
738 speed_cfg = 2;
739 } else if (speed == 100) {
740 speed_cfg = 1;
741 } else if (speed == 10) {
742 speed_cfg = 0;
743 } else {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900744 pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700745 return -1;
746 }
747
748 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
749 cmdcfg &= ~(CC_ES_MASK | CC_HD);
750 cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
751
752 printf("Change GMAC speed to %dMB\n", speed);
753 debug("GMAC speed cfg 0x%x\n", cmdcfg);
754 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
755
756 return 0;
757}
758
759int gmac_set_mac_addr(unsigned char *mac)
760{
761 /* set our local address */
762 debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
763 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
764 writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
765 writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
766
767 return 0;
768}
769
770int gmac_mac_init(struct eth_device *dev)
771{
772 struct eth_info *eth = (struct eth_info *)(dev->priv);
773 struct eth_dma *dma = &(eth->dma);
774
775 uint32_t tmp;
776 uint32_t cmdcfg;
777 int chipid;
778
779 debug("%s enter\n", __func__);
780
781 /* Always use GMAC0 */
782 printf("Using GMAC%d\n", 0);
783
784 /* Reset AMAC0 core */
785 writel(0, AMAC0_IDM_RESET_ADDR);
786 tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
787 /* Set clock */
788 tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
789 tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
790 /* Set Tx clock */
791 tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
792 writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
793
794 /* reset gmac */
795 /*
796 * As AMAC is just reset, NO need?
797 * set eth_data into loopback mode to ensure no rx traffic
798 * gmac_loopback(eth_data, TRUE);
799 * ET_TRACE(("%s gmac loopback\n", __func__));
800 * udelay(1);
801 */
802
803 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
804 cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
805 CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
806 CC_PAD_EN | CC_PF);
807 cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
808 /* put mac in reset */
809 gmac_init_reset();
810 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
811 gmac_clear_reset();
812
813 /* enable clear MIB on read */
814 reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
815 /* PHY: set smi_master to drive mdc_clk */
816 reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
817
818 /* clear persistent sw intstatus */
819 writel(0, GMAC0_INT_STATUS_ADDR);
820
821 if (dma_init(dma) < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900822 pr_err("%s: GMAC dma_init failed\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700823 goto err_exit;
824 }
825
826 chipid = CHIPID;
827 printf("%s: Chip ID: 0x%x\n", __func__, chipid);
828
829 /* set switch bypass mode */
830 tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
831 tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
832
833 /* Switch mode */
834 /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
835
836 writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
837
838 tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
839 tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
840 writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
841
842 /* Set MDIO to internal GPHY */
843 tmp = readl(GMAC_MII_CTRL_ADDR);
844 /* Select internal MDC/MDIO bus*/
845 tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
846 /* select MDC/MDIO connecting to on-chip internal PHYs */
847 tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
848 /*
849 * give bit[6:0](MDCDIV) with required divisor to set
850 * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
851 */
852 tmp |= 0x1A;
853
854 writel(tmp, GMAC_MII_CTRL_ADDR);
855
856 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900857 pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700858 goto err_exit;
859 }
860
861 /* Configure GMAC0 */
862 /* enable one rx interrupt per received frame */
863 writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
864
865 /* read command config reg */
866 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
867 /* enable 802.3x tx flow control (honor received PAUSE frames) */
868 cmdcfg &= ~CC_RPI;
869 /* enable promiscuous mode */
870 cmdcfg |= CC_PROM;
871 /* Disable loopback mode */
872 cmdcfg &= ~CC_ML;
873 /* set the speed */
874 cmdcfg &= ~(CC_ES_MASK | CC_HD);
875 /* Set to 1Gbps and full duplex by default */
876 cmdcfg |= (2 << CC_ES_SHIFT);
877
878 /* put mac in reset */
879 gmac_init_reset();
880 /* write register */
881 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
882 /* bring mac out of reset */
883 gmac_clear_reset();
884
885 /* set max frame lengths; account for possible vlan tag */
886 writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
887
888 return 0;
889
890err_exit:
891 dma_deinit(dma);
892 return -1;
893}
894
895int gmac_add(struct eth_device *dev)
896{
897 struct eth_info *eth = (struct eth_info *)(dev->priv);
898 struct eth_dma *dma = &(eth->dma);
899 void *tmp;
900
901 /*
Suji Velupillai07e6b052017-03-03 17:06:34 -0800902 * Desc has to be 16-byte aligned. But for dcache flush it must be
903 * aligned to ARCH_DMA_MINALIGN.
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700904 */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800905 tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700906 if (tmp == NULL) {
907 printf("%s: Failed to allocate TX desc Buffer\n", __func__);
908 return -1;
909 }
910
Suji Velupillai07e6b052017-03-03 17:06:34 -0800911 dma->tx_desc_aligned = (void *)tmp;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700912 debug("TX Descriptor Buffer: %p; length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800913 dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700914
Suji Velupillai07e6b052017-03-03 17:06:34 -0800915 tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700916 if (tmp == NULL) {
917 printf("%s: Failed to allocate TX Data Buffer\n", __func__);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800918 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700919 return -1;
920 }
921 dma->tx_buf = (uint8_t *)tmp;
922 debug("TX Data Buffer: %p; length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800923 dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700924
Suji Velupillai07e6b052017-03-03 17:06:34 -0800925 /* Desc has to be 16-byte aligned */
926 tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700927 if (tmp == NULL) {
928 printf("%s: Failed to allocate RX Descriptor\n", __func__);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800929 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700930 free(dma->tx_buf);
931 return -1;
932 }
Suji Velupillai07e6b052017-03-03 17:06:34 -0800933 dma->rx_desc_aligned = (void *)tmp;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700934 debug("RX Descriptor Buffer: %p, length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800935 dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700936
Suji Velupillai07e6b052017-03-03 17:06:34 -0800937 tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700938 if (tmp == NULL) {
939 printf("%s: Failed to allocate RX Data Buffer\n", __func__);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800940 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700941 free(dma->tx_buf);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800942 free(dma->rx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700943 return -1;
944 }
Suji Velupillai07e6b052017-03-03 17:06:34 -0800945 dma->rx_buf = (uint8_t *)tmp;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700946 debug("RX Data Buffer: %p; length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800947 dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700948
949 g_dmactrlflags = 0;
950
951 eth->phy_interface = PHY_INTERFACE_MODE_GMII;
952
953 dma->tx_packet = gmac_tx_packet;
954 dma->check_tx_done = gmac_check_tx_done;
955
956 dma->check_rx_done = gmac_check_rx_done;
957
958 dma->enable_dma = gmac_enable_dma;
959 dma->disable_dma = gmac_disable_dma;
960
961 eth->miiphy_read = gmac_miiphy_read;
962 eth->miiphy_write = gmac_miiphy_write;
963
964 eth->mac_init = gmac_mac_init;
965 eth->disable_mac = gmac_disable;
966 eth->enable_mac = gmac_enable;
967 eth->set_mac_addr = gmac_set_mac_addr;
968 eth->set_mac_speed = gmac_set_speed;
969
970 return 0;
971}