blob: ba244b4a26e25e3590c2899ee60504522b658cc5 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jiandong Zhengc36e42e2014-08-01 20:37:16 -07002/*
Suji Velupillai07e6b052017-03-03 17:06:34 -08003 * Copyright 2014-2017 Broadcom.
Jiandong Zhengc36e42e2014-08-01 20:37:16 -07004 */
5
6#ifdef BCM_GMAC_DEBUG
7#ifndef DEBUG
8#define DEBUG
Simon Glassbdd5f812023-09-14 18:21:46 -06009#include <linux/printk.h>
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070010#endif
11#endif
12
13#include <config.h>
Simon Glass63334482019-11-14 12:57:39 -070014#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070016#include <malloc.h>
17#include <net.h>
Simon Glass274e0b02020-05-10 11:39:56 -060018#include <asm/cache.h>
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070019#include <asm/io.h>
20#include <phy.h>
Simon Glassdbd79542020-05-10 11:40:11 -060021#include <linux/delay.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060022#include <linux/bitops.h>
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070023
24#include "bcm-sf2-eth.h"
25#include "bcm-sf2-eth-gmac.h"
26
27#define SPINWAIT(exp, us) { \
28 uint countdown = (us) + 9; \
29 while ((exp) && (countdown >= 10)) {\
30 udelay(10); \
31 countdown -= 10; \
32 } \
33}
34
Suji Velupillai07e6b052017-03-03 17:06:34 -080035#define RX_BUF_SIZE_ALIGNED ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
36#define TX_BUF_SIZE_ALIGNED ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
37#define DESCP_SIZE_ALIGNED ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
38
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070039static int gmac_disable_dma(struct eth_dma *dma, int dir);
40static int gmac_enable_dma(struct eth_dma *dma, int dir);
41
42/* DMA Descriptor */
43typedef struct {
44 /* misc control bits */
45 uint32_t ctrl1;
46 /* buffer count and address extension */
47 uint32_t ctrl2;
48 /* memory address of the date buffer, bits 31:0 */
49 uint32_t addrlow;
50 /* memory address of the date buffer, bits 63:32 */
51 uint32_t addrhigh;
52} dma64dd_t;
53
54uint32_t g_dmactrlflags;
55
56static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
57{
58 debug("%s enter\n", __func__);
59
60 g_dmactrlflags &= ~mask;
61 g_dmactrlflags |= flags;
62
63 /* If trying to enable parity, check if parity is actually supported */
64 if (g_dmactrlflags & DMA_CTRL_PEN) {
65 uint32_t control;
66
67 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
68 writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
69 if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
70 /*
71 * We *can* disable it, therefore it is supported;
72 * restore control register
73 */
74 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
75 } else {
76 /* Not supported, don't allow it to be enabled */
77 g_dmactrlflags &= ~DMA_CTRL_PEN;
78 }
79 }
80
81 return g_dmactrlflags;
82}
83
84static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
85{
86 uint32_t v = readl(reg);
87 v &= ~(value);
88 writel(v, reg);
89}
90
91static inline void reg32_set_bits(uint32_t reg, uint32_t value)
92{
93 uint32_t v = readl(reg);
94 v |= value;
95 writel(v, reg);
96}
97
98#ifdef BCM_GMAC_DEBUG
99static void dma_tx_dump(struct eth_dma *dma)
100{
101 dma64dd_t *descp = NULL;
102 uint8_t *bufp;
103 int i;
104
105 printf("TX DMA Register:\n");
106 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
107 readl(GMAC0_DMA_TX_CTRL_ADDR),
108 readl(GMAC0_DMA_TX_PTR_ADDR),
109 readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
110 readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
111 readl(GMAC0_DMA_TX_STATUS0_ADDR),
112 readl(GMAC0_DMA_TX_STATUS1_ADDR));
113
114 printf("TX Descriptors:\n");
115 for (i = 0; i < TX_BUF_NUM; i++) {
116 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
117 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
118 descp->ctrl1, descp->ctrl2,
119 descp->addrhigh, descp->addrlow);
120 }
121
122 printf("TX Buffers:\n");
123 /* Initialize TX DMA descriptor table */
124 for (i = 0; i < TX_BUF_NUM; i++) {
Suji Velupillai07e6b052017-03-03 17:06:34 -0800125 bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700126 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
127 }
128 printf("\n");
129}
130
131static void dma_rx_dump(struct eth_dma *dma)
132{
133 dma64dd_t *descp = NULL;
134 uint8_t *bufp;
135 int i;
136
137 printf("RX DMA Register:\n");
138 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
139 readl(GMAC0_DMA_RX_CTRL_ADDR),
140 readl(GMAC0_DMA_RX_PTR_ADDR),
141 readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
142 readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
143 readl(GMAC0_DMA_RX_STATUS0_ADDR),
144 readl(GMAC0_DMA_RX_STATUS1_ADDR));
145
146 printf("RX Descriptors:\n");
147 for (i = 0; i < RX_BUF_NUM; i++) {
148 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
149 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
150 descp->ctrl1, descp->ctrl2,
151 descp->addrhigh, descp->addrlow);
152 }
153
154 printf("RX Buffers:\n");
155 for (i = 0; i < RX_BUF_NUM; i++) {
Suji Velupillai07e6b052017-03-03 17:06:34 -0800156 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700157 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
158 }
159 printf("\n");
160}
161#endif
162
163static int dma_tx_init(struct eth_dma *dma)
164{
165 dma64dd_t *descp = NULL;
166 uint8_t *bufp;
167 int i;
168 uint32_t ctrl;
169
170 debug("%s enter\n", __func__);
171
172 /* clear descriptor memory */
173 memset((void *)(dma->tx_desc_aligned), 0,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800174 TX_BUF_NUM * DESCP_SIZE_ALIGNED);
175 memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700176
177 /* Initialize TX DMA descriptor table */
178 for (i = 0; i < TX_BUF_NUM; i++) {
179 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800180 bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700181 /* clear buffer memory */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800182 memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700183
184 ctrl = 0;
185 /* if last descr set endOfTable */
186 if (i == (TX_BUF_NUM-1))
187 ctrl = D64_CTRL1_EOT;
188 descp->ctrl1 = ctrl;
189 descp->ctrl2 = 0;
190 descp->addrlow = (uint32_t)bufp;
191 descp->addrhigh = 0;
192 }
193
194 /* flush descriptor and buffer */
195 descp = dma->tx_desc_aligned;
196 bufp = dma->tx_buf;
197 flush_dcache_range((unsigned long)descp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800198 (unsigned long)descp +
199 DESCP_SIZE_ALIGNED * TX_BUF_NUM);
200 flush_dcache_range((unsigned long)bufp,
201 (unsigned long)bufp +
202 TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700203
204 /* initialize the DMA channel */
205 writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
206 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
207
208 /* now update the dma last descriptor */
209 writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
210 GMAC0_DMA_TX_PTR_ADDR);
211
212 return 0;
213}
214
215static int dma_rx_init(struct eth_dma *dma)
216{
217 uint32_t last_desc;
218 dma64dd_t *descp = NULL;
219 uint8_t *bufp;
220 uint32_t ctrl;
221 int i;
222
223 debug("%s enter\n", __func__);
224
225 /* clear descriptor memory */
226 memset((void *)(dma->rx_desc_aligned), 0,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800227 RX_BUF_NUM * DESCP_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700228 /* clear buffer memory */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800229 memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700230
231 /* Initialize RX DMA descriptor table */
232 for (i = 0; i < RX_BUF_NUM; i++) {
233 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800234 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700235 ctrl = 0;
236 /* if last descr set endOfTable */
237 if (i == (RX_BUF_NUM - 1))
238 ctrl = D64_CTRL1_EOT;
239 descp->ctrl1 = ctrl;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800240 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700241 descp->addrlow = (uint32_t)bufp;
242 descp->addrhigh = 0;
243
244 last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
245 + sizeof(dma64dd_t);
246 }
247
248 descp = dma->rx_desc_aligned;
249 bufp = dma->rx_buf;
250 /* flush descriptor and buffer */
251 flush_dcache_range((unsigned long)descp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800252 (unsigned long)descp +
253 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700254 flush_dcache_range((unsigned long)(bufp),
Suji Velupillai07e6b052017-03-03 17:06:34 -0800255 (unsigned long)bufp +
256 RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700257
258 /* initailize the DMA channel */
259 writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
260 writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
261
262 /* now update the dma last descriptor */
263 writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
264
265 return 0;
266}
267
268static int dma_init(struct eth_dma *dma)
269{
270 debug(" %s enter\n", __func__);
271
272 /*
273 * Default flags: For backwards compatibility both
274 * Rx Overflow Continue and Parity are DISABLED.
275 */
276 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
277
278 debug("rx burst len 0x%x\n",
279 (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
280 >> D64_RC_BL_SHIFT);
281 debug("tx burst len 0x%x\n",
282 (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
283 >> D64_XC_BL_SHIFT);
284
285 dma_tx_init(dma);
286 dma_rx_init(dma);
287
288 /* From end of chip_init() */
289 /* enable the overflow continue feature and disable parity */
290 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
291 DMA_CTRL_ROC /* value */);
292
293 return 0;
294}
295
296static int dma_deinit(struct eth_dma *dma)
297{
298 debug(" %s enter\n", __func__);
299
300 gmac_disable_dma(dma, MAC_DMA_RX);
301 gmac_disable_dma(dma, MAC_DMA_TX);
302
303 free(dma->tx_buf);
304 dma->tx_buf = NULL;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800305 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700306 dma->tx_desc_aligned = NULL;
307
308 free(dma->rx_buf);
309 dma->rx_buf = NULL;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800310 free(dma->rx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700311 dma->rx_desc_aligned = NULL;
312
313 return 0;
314}
315
316int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
317{
Suji Velupillai07e6b052017-03-03 17:06:34 -0800318 uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700319
320 /* kick off the dma */
321 size_t len = length;
322 int txout = dma->cur_tx_index;
323 uint32_t flags;
324 dma64dd_t *descp = NULL;
325 uint32_t ctrl;
326 uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
327 sizeof(dma64dd_t)) & D64_XP_LD_MASK;
328 size_t buflen;
329
330 debug("%s enter\n", __func__);
331
332 /* load the buffer */
333 memcpy(bufp, packet, len);
334
335 /* Add 4 bytes for Ethernet FCS/CRC */
336 buflen = len + 4;
337
338 ctrl = (buflen & D64_CTRL2_BC_MASK);
339
340 /* the transmit will only be one frame or set SOF, EOF */
341 /* also set int on completion */
342 flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
343
344 /* txout points to the descriptor to uset */
345 /* if last descriptor then set EOT */
346 if (txout == (TX_BUF_NUM - 1)) {
347 flags |= D64_CTRL1_EOT;
348 last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
349 }
350
351 /* write the descriptor */
352 descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
353 descp->addrlow = (uint32_t)bufp;
354 descp->addrhigh = 0;
355 descp->ctrl1 = flags;
356 descp->ctrl2 = ctrl;
357
358 /* flush descriptor and buffer */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800359 flush_dcache_range((unsigned long)dma->tx_desc_aligned,
360 (unsigned long)dma->tx_desc_aligned +
361 DESCP_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700362 flush_dcache_range((unsigned long)bufp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800363 (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700364
365 /* now update the dma last descriptor */
366 writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
367
368 /* tx dma should be enabled so packet should go out */
369
370 /* update txout */
371 dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
372
373 return 0;
374}
375
376bool gmac_check_tx_done(struct eth_dma *dma)
377{
378 /* wait for tx to complete */
379 uint32_t intstatus;
380 bool xfrdone = false;
381
382 debug("%s enter\n", __func__);
383
384 intstatus = readl(GMAC0_INT_STATUS_ADDR);
385
386 debug("int(0x%x)\n", intstatus);
387 if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
388 xfrdone = true;
389 /* clear the int bits */
390 intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
391 writel(intstatus, GMAC0_INT_STATUS_ADDR);
392 } else {
393 debug("Tx int(0x%x)\n", intstatus);
394 }
395
396 return xfrdone;
397}
398
399int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
400{
401 void *bufp, *datap;
402 size_t rcvlen = 0, buflen = 0;
403 uint32_t stat0 = 0, stat1 = 0;
404 uint32_t control, offset;
405 uint8_t statbuf[HWRXOFF*2];
406
407 int index, curr, active;
408 dma64dd_t *descp = NULL;
409
410 /* udelay(50); */
411
412 /*
413 * this api will check if a packet has been received.
414 * If so it will return the address of the buffer and current
415 * descriptor index will be incremented to the
416 * next descriptor. Once done with the frame the buffer should be
417 * added back onto the descriptor and the lastdscr should be updated
418 * to this descriptor.
419 */
420 index = dma->cur_rx_index;
421 offset = (uint32_t)(dma->rx_desc_aligned);
422 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
423 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
424 curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
425 active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
426
427 /* check if any frame */
428 if (index == curr)
429 return -1;
430
431 debug("received packet\n");
432 debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
433 /* remove warning */
434 if (index == active)
435 ;
436
437 /* get the packet pointer that corresponds to the rx descriptor */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800438 bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700439
440 descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
441 /* flush descriptor and buffer */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800442 flush_dcache_range((unsigned long)dma->rx_desc_aligned,
443 (unsigned long)dma->rx_desc_aligned +
444 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700445 flush_dcache_range((unsigned long)bufp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800446 (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700447
448 buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
449
450 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
451 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
452
453 debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
454 (uint32_t)bufp, index, buflen, stat0, stat1);
455
456 dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
457
458 /* get buffer offset */
459 control = readl(GMAC0_DMA_RX_CTRL_ADDR);
460 offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
461 rcvlen = *(uint16_t *)bufp;
462
463 debug("Received %d bytes\n", rcvlen);
464 /* copy status into temp buf then copy data from rx buffer */
465 memcpy(statbuf, bufp, offset);
466 datap = (void *)((uint32_t)bufp + offset);
467 memcpy(buf, datap, rcvlen);
468
469 /* update descriptor that is being added back on ring */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800470 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700471 descp->addrlow = (uint32_t)bufp;
472 descp->addrhigh = 0;
473 /* flush descriptor */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800474 flush_dcache_range((unsigned long)dma->rx_desc_aligned,
475 (unsigned long)dma->rx_desc_aligned +
476 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700477
478 /* set the lastdscr for the rx ring */
479 writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
480
481 return (int)rcvlen;
482}
483
484static int gmac_disable_dma(struct eth_dma *dma, int dir)
485{
486 int status;
487
488 debug("%s enter\n", __func__);
489
490 if (dir == MAC_DMA_TX) {
491 /* address PR8249/PR7577 issue */
492 /* suspend tx DMA first */
493 writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
494 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
495 D64_XS0_XS_MASK)) !=
496 D64_XS0_XS_DISABLED) &&
497 (status != D64_XS0_XS_IDLE) &&
498 (status != D64_XS0_XS_STOPPED), 10000);
499
500 /*
501 * PR2414 WAR: DMA engines are not disabled until
502 * transfer finishes
503 */
504 writel(0, GMAC0_DMA_TX_CTRL_ADDR);
505 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
506 D64_XS0_XS_MASK)) !=
507 D64_XS0_XS_DISABLED), 10000);
508
509 /* wait for the last transaction to complete */
510 udelay(2);
511
512 status = (status == D64_XS0_XS_DISABLED);
513 } else {
514 /*
515 * PR2414 WAR: DMA engines are not disabled until
516 * transfer finishes
517 */
518 writel(0, GMAC0_DMA_RX_CTRL_ADDR);
519 SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
520 D64_RS0_RS_MASK)) !=
521 D64_RS0_RS_DISABLED), 10000);
522
523 status = (status == D64_RS0_RS_DISABLED);
524 }
525
526 return status;
527}
528
529static int gmac_enable_dma(struct eth_dma *dma, int dir)
530{
531 uint32_t control;
532
533 debug("%s enter\n", __func__);
534
535 if (dir == MAC_DMA_TX) {
536 dma->cur_tx_index = 0;
537
538 /*
539 * These bits 20:18 (burstLen) of control register can be
540 * written but will take effect only if these bits are
541 * valid. So this will not affect previous versions
542 * of the DMA. They will continue to have those bits set to 0.
543 */
544 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
545
546 control |= D64_XC_XE;
547 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
548 control |= D64_XC_PD;
549
550 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
551
552 /* initailize the DMA channel */
553 writel((uint32_t)(dma->tx_desc_aligned),
554 GMAC0_DMA_TX_ADDR_LOW_ADDR);
555 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
556 } else {
557 dma->cur_rx_index = 0;
558
559 control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
560 D64_RC_AE) | D64_RC_RE;
561
562 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
563 control |= D64_RC_PD;
564
565 if (g_dmactrlflags & DMA_CTRL_ROC)
566 control |= D64_RC_OC;
567
568 /*
569 * These bits 20:18 (burstLen) of control register can be
570 * written but will take effect only if these bits are
571 * valid. So this will not affect previous versions
572 * of the DMA. They will continue to have those bits set to 0.
573 */
574 control &= ~D64_RC_BL_MASK;
575 /* Keep default Rx burstlen */
576 control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
577 control |= HWRXOFF << D64_RC_RO_SHIFT;
578
579 writel(control, GMAC0_DMA_RX_CTRL_ADDR);
580
581 /*
582 * the rx descriptor ring should have
583 * the addresses set properly;
584 * set the lastdscr for the rx ring
585 */
586 writel(((uint32_t)(dma->rx_desc_aligned) +
Suji Velupillai07e6b052017-03-03 17:06:34 -0800587 (RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700588 D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
589 }
590
591 return 0;
592}
593
594bool gmac_mii_busywait(unsigned int timeout)
595{
596 uint32_t tmp = 0;
597
598 while (timeout > 10) {
599 tmp = readl(GMAC_MII_CTRL_ADDR);
600 if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
601 udelay(10);
602 timeout -= 10;
603 } else {
604 break;
605 }
606 }
607 return tmp & (1 << GMAC_MII_BUSY_SHIFT);
608}
609
Joe Hershberger0c333192016-08-08 11:28:39 -0500610int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700611{
612 uint32_t tmp = 0;
Joe Hershberger0c333192016-08-08 11:28:39 -0500613 u16 value = 0;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700614
615 /* Busy wait timeout is 1ms */
616 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900617 pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700618 return -1;
619 }
620
621 /* Read operation */
622 tmp = GMAC_MII_DATA_READ_CMD;
623 tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
624 (reg << GMAC_MII_PHY_REG_SHIFT);
625 debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
626 writel(tmp, GMAC_MII_DATA_ADDR);
627
628 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900629 pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700630 return -1;
631 }
632
Joe Hershberger0c333192016-08-08 11:28:39 -0500633 value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
634 debug("MII read data 0x%x\n", value);
635 return value;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700636}
637
Joe Hershberger0c333192016-08-08 11:28:39 -0500638int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
639 u16 value)
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700640{
641 uint32_t tmp = 0;
642
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700643 /* Busy wait timeout is 1ms */
644 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900645 pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700646 return -1;
647 }
648
649 /* Write operation */
650 tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
651 tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
652 (reg << GMAC_MII_PHY_REG_SHIFT));
653 debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
654 tmp, phyaddr, reg, value);
655 writel(tmp, GMAC_MII_DATA_ADDR);
656
657 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900658 pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700659 return -1;
660 }
661
662 return 0;
663}
664
665void gmac_init_reset(void)
666{
667 debug("%s enter\n", __func__);
668
669 /* set command config reg CC_SR */
670 reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
671 udelay(GMAC_RESET_DELAY);
672}
673
674void gmac_clear_reset(void)
675{
676 debug("%s enter\n", __func__);
677
678 /* clear command config reg CC_SR */
679 reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
680 udelay(GMAC_RESET_DELAY);
681}
682
683static void gmac_enable_local(bool en)
684{
685 uint32_t cmdcfg;
686
687 debug("%s enter\n", __func__);
688
689 /* read command config reg */
690 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
691
692 /* put mac in reset */
693 gmac_init_reset();
694
695 cmdcfg |= CC_SR;
696
697 /* first deassert rx_ena and tx_ena while in reset */
698 cmdcfg &= ~(CC_RE | CC_TE);
699 /* write command config reg */
700 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
701
702 /* bring mac out of reset */
703 gmac_clear_reset();
704
705 /* if not enable exit now */
706 if (!en)
707 return;
708
709 /* enable the mac transmit and receive paths now */
710 udelay(2);
711 cmdcfg &= ~CC_SR;
712 cmdcfg |= (CC_RE | CC_TE);
713
714 /* assert rx_ena and tx_ena when out of reset to enable the mac */
715 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
716
717 return;
718}
719
720int gmac_enable(void)
721{
722 gmac_enable_local(1);
723
724 /* clear interrupts */
725 writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
726 return 0;
727}
728
729int gmac_disable(void)
730{
731 gmac_enable_local(0);
732 return 0;
733}
734
735int gmac_set_speed(int speed, int duplex)
736{
737 uint32_t cmdcfg;
738 uint32_t hd_ena;
739 uint32_t speed_cfg;
740
741 hd_ena = duplex ? 0 : CC_HD;
742 if (speed == 1000) {
743 speed_cfg = 2;
744 } else if (speed == 100) {
745 speed_cfg = 1;
746 } else if (speed == 10) {
747 speed_cfg = 0;
748 } else {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900749 pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700750 return -1;
751 }
752
753 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
754 cmdcfg &= ~(CC_ES_MASK | CC_HD);
755 cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
756
757 printf("Change GMAC speed to %dMB\n", speed);
758 debug("GMAC speed cfg 0x%x\n", cmdcfg);
759 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
760
761 return 0;
762}
763
764int gmac_set_mac_addr(unsigned char *mac)
765{
766 /* set our local address */
767 debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
768 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
769 writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
770 writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
771
772 return 0;
773}
774
775int gmac_mac_init(struct eth_device *dev)
776{
777 struct eth_info *eth = (struct eth_info *)(dev->priv);
778 struct eth_dma *dma = &(eth->dma);
779
780 uint32_t tmp;
781 uint32_t cmdcfg;
782 int chipid;
783
784 debug("%s enter\n", __func__);
785
786 /* Always use GMAC0 */
787 printf("Using GMAC%d\n", 0);
788
789 /* Reset AMAC0 core */
790 writel(0, AMAC0_IDM_RESET_ADDR);
791 tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
792 /* Set clock */
793 tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
794 tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
795 /* Set Tx clock */
796 tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
797 writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
798
799 /* reset gmac */
800 /*
801 * As AMAC is just reset, NO need?
802 * set eth_data into loopback mode to ensure no rx traffic
803 * gmac_loopback(eth_data, TRUE);
804 * ET_TRACE(("%s gmac loopback\n", __func__));
805 * udelay(1);
806 */
807
808 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
809 cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
810 CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
811 CC_PAD_EN | CC_PF);
812 cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
813 /* put mac in reset */
814 gmac_init_reset();
815 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
816 gmac_clear_reset();
817
818 /* enable clear MIB on read */
819 reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
820 /* PHY: set smi_master to drive mdc_clk */
821 reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
822
823 /* clear persistent sw intstatus */
824 writel(0, GMAC0_INT_STATUS_ADDR);
825
826 if (dma_init(dma) < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900827 pr_err("%s: GMAC dma_init failed\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700828 goto err_exit;
829 }
830
831 chipid = CHIPID;
832 printf("%s: Chip ID: 0x%x\n", __func__, chipid);
833
834 /* set switch bypass mode */
835 tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
836 tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
837
838 /* Switch mode */
839 /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
840
841 writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
842
843 tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
844 tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
845 writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
846
847 /* Set MDIO to internal GPHY */
848 tmp = readl(GMAC_MII_CTRL_ADDR);
849 /* Select internal MDC/MDIO bus*/
850 tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
851 /* select MDC/MDIO connecting to on-chip internal PHYs */
852 tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
853 /*
854 * give bit[6:0](MDCDIV) with required divisor to set
855 * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
856 */
857 tmp |= 0x1A;
858
859 writel(tmp, GMAC_MII_CTRL_ADDR);
860
861 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900862 pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700863 goto err_exit;
864 }
865
866 /* Configure GMAC0 */
867 /* enable one rx interrupt per received frame */
868 writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
869
870 /* read command config reg */
871 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
872 /* enable 802.3x tx flow control (honor received PAUSE frames) */
873 cmdcfg &= ~CC_RPI;
874 /* enable promiscuous mode */
875 cmdcfg |= CC_PROM;
876 /* Disable loopback mode */
877 cmdcfg &= ~CC_ML;
878 /* set the speed */
879 cmdcfg &= ~(CC_ES_MASK | CC_HD);
880 /* Set to 1Gbps and full duplex by default */
881 cmdcfg |= (2 << CC_ES_SHIFT);
882
883 /* put mac in reset */
884 gmac_init_reset();
885 /* write register */
886 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
887 /* bring mac out of reset */
888 gmac_clear_reset();
889
890 /* set max frame lengths; account for possible vlan tag */
891 writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
892
893 return 0;
894
895err_exit:
896 dma_deinit(dma);
897 return -1;
898}
899
900int gmac_add(struct eth_device *dev)
901{
902 struct eth_info *eth = (struct eth_info *)(dev->priv);
903 struct eth_dma *dma = &(eth->dma);
904 void *tmp;
905
906 /*
Suji Velupillai07e6b052017-03-03 17:06:34 -0800907 * Desc has to be 16-byte aligned. But for dcache flush it must be
908 * aligned to ARCH_DMA_MINALIGN.
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700909 */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800910 tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700911 if (tmp == NULL) {
912 printf("%s: Failed to allocate TX desc Buffer\n", __func__);
913 return -1;
914 }
915
Suji Velupillai07e6b052017-03-03 17:06:34 -0800916 dma->tx_desc_aligned = (void *)tmp;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700917 debug("TX Descriptor Buffer: %p; length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800918 dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700919
Suji Velupillai07e6b052017-03-03 17:06:34 -0800920 tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700921 if (tmp == NULL) {
922 printf("%s: Failed to allocate TX Data Buffer\n", __func__);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800923 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700924 return -1;
925 }
926 dma->tx_buf = (uint8_t *)tmp;
927 debug("TX Data Buffer: %p; length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800928 dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700929
Suji Velupillai07e6b052017-03-03 17:06:34 -0800930 /* Desc has to be 16-byte aligned */
931 tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700932 if (tmp == NULL) {
933 printf("%s: Failed to allocate RX Descriptor\n", __func__);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800934 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700935 free(dma->tx_buf);
936 return -1;
937 }
Suji Velupillai07e6b052017-03-03 17:06:34 -0800938 dma->rx_desc_aligned = (void *)tmp;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700939 debug("RX Descriptor Buffer: %p, length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800940 dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700941
Suji Velupillai07e6b052017-03-03 17:06:34 -0800942 tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700943 if (tmp == NULL) {
944 printf("%s: Failed to allocate RX Data Buffer\n", __func__);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800945 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700946 free(dma->tx_buf);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800947 free(dma->rx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700948 return -1;
949 }
Suji Velupillai07e6b052017-03-03 17:06:34 -0800950 dma->rx_buf = (uint8_t *)tmp;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700951 debug("RX Data Buffer: %p; length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800952 dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700953
954 g_dmactrlflags = 0;
955
956 eth->phy_interface = PHY_INTERFACE_MODE_GMII;
957
958 dma->tx_packet = gmac_tx_packet;
959 dma->check_tx_done = gmac_check_tx_done;
960
961 dma->check_rx_done = gmac_check_rx_done;
962
963 dma->enable_dma = gmac_enable_dma;
964 dma->disable_dma = gmac_disable_dma;
965
966 eth->miiphy_read = gmac_miiphy_read;
967 eth->miiphy_write = gmac_miiphy_write;
968
969 eth->mac_init = gmac_mac_init;
970 eth->disable_mac = gmac_disable;
971 eth->enable_mac = gmac_enable;
972 eth->set_mac_addr = gmac_set_mac_addr;
973 eth->set_mac_speed = gmac_set_speed;
974
975 return 0;
976}