blob: cbe1e85222fa29d1d9dcf36b38c28a7fe51bc44f [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jiandong Zhengc36e42e2014-08-01 20:37:16 -07002/*
Suji Velupillai07e6b052017-03-03 17:06:34 -08003 * Copyright 2014-2017 Broadcom.
Jiandong Zhengc36e42e2014-08-01 20:37:16 -07004 */
5
6#ifdef BCM_GMAC_DEBUG
7#ifndef DEBUG
8#define DEBUG
Simon Glassbdd5f812023-09-14 18:21:46 -06009#include <linux/printk.h>
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070010#endif
11#endif
12
13#include <config.h>
14#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070015#include <cpu_func.h>
Simon Glass0f2af882020-05-10 11:40:05 -060016#include <log.h>
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070017#include <malloc.h>
18#include <net.h>
Simon Glass274e0b02020-05-10 11:39:56 -060019#include <asm/cache.h>
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070020#include <asm/io.h>
21#include <phy.h>
Simon Glassdbd79542020-05-10 11:40:11 -060022#include <linux/delay.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060023#include <linux/bitops.h>
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070024
25#include "bcm-sf2-eth.h"
26#include "bcm-sf2-eth-gmac.h"
27
28#define SPINWAIT(exp, us) { \
29 uint countdown = (us) + 9; \
30 while ((exp) && (countdown >= 10)) {\
31 udelay(10); \
32 countdown -= 10; \
33 } \
34}
35
Suji Velupillai07e6b052017-03-03 17:06:34 -080036#define RX_BUF_SIZE_ALIGNED ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
37#define TX_BUF_SIZE_ALIGNED ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
38#define DESCP_SIZE_ALIGNED ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
39
Jiandong Zhengc36e42e2014-08-01 20:37:16 -070040static int gmac_disable_dma(struct eth_dma *dma, int dir);
41static int gmac_enable_dma(struct eth_dma *dma, int dir);
42
43/* DMA Descriptor */
44typedef struct {
45 /* misc control bits */
46 uint32_t ctrl1;
47 /* buffer count and address extension */
48 uint32_t ctrl2;
49 /* memory address of the date buffer, bits 31:0 */
50 uint32_t addrlow;
51 /* memory address of the date buffer, bits 63:32 */
52 uint32_t addrhigh;
53} dma64dd_t;
54
55uint32_t g_dmactrlflags;
56
57static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
58{
59 debug("%s enter\n", __func__);
60
61 g_dmactrlflags &= ~mask;
62 g_dmactrlflags |= flags;
63
64 /* If trying to enable parity, check if parity is actually supported */
65 if (g_dmactrlflags & DMA_CTRL_PEN) {
66 uint32_t control;
67
68 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
69 writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
70 if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
71 /*
72 * We *can* disable it, therefore it is supported;
73 * restore control register
74 */
75 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
76 } else {
77 /* Not supported, don't allow it to be enabled */
78 g_dmactrlflags &= ~DMA_CTRL_PEN;
79 }
80 }
81
82 return g_dmactrlflags;
83}
84
85static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
86{
87 uint32_t v = readl(reg);
88 v &= ~(value);
89 writel(v, reg);
90}
91
92static inline void reg32_set_bits(uint32_t reg, uint32_t value)
93{
94 uint32_t v = readl(reg);
95 v |= value;
96 writel(v, reg);
97}
98
99#ifdef BCM_GMAC_DEBUG
100static void dma_tx_dump(struct eth_dma *dma)
101{
102 dma64dd_t *descp = NULL;
103 uint8_t *bufp;
104 int i;
105
106 printf("TX DMA Register:\n");
107 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
108 readl(GMAC0_DMA_TX_CTRL_ADDR),
109 readl(GMAC0_DMA_TX_PTR_ADDR),
110 readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
111 readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
112 readl(GMAC0_DMA_TX_STATUS0_ADDR),
113 readl(GMAC0_DMA_TX_STATUS1_ADDR));
114
115 printf("TX Descriptors:\n");
116 for (i = 0; i < TX_BUF_NUM; i++) {
117 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
118 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
119 descp->ctrl1, descp->ctrl2,
120 descp->addrhigh, descp->addrlow);
121 }
122
123 printf("TX Buffers:\n");
124 /* Initialize TX DMA descriptor table */
125 for (i = 0; i < TX_BUF_NUM; i++) {
Suji Velupillai07e6b052017-03-03 17:06:34 -0800126 bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700127 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
128 }
129 printf("\n");
130}
131
132static void dma_rx_dump(struct eth_dma *dma)
133{
134 dma64dd_t *descp = NULL;
135 uint8_t *bufp;
136 int i;
137
138 printf("RX DMA Register:\n");
139 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
140 readl(GMAC0_DMA_RX_CTRL_ADDR),
141 readl(GMAC0_DMA_RX_PTR_ADDR),
142 readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
143 readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
144 readl(GMAC0_DMA_RX_STATUS0_ADDR),
145 readl(GMAC0_DMA_RX_STATUS1_ADDR));
146
147 printf("RX Descriptors:\n");
148 for (i = 0; i < RX_BUF_NUM; i++) {
149 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
150 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
151 descp->ctrl1, descp->ctrl2,
152 descp->addrhigh, descp->addrlow);
153 }
154
155 printf("RX Buffers:\n");
156 for (i = 0; i < RX_BUF_NUM; i++) {
Suji Velupillai07e6b052017-03-03 17:06:34 -0800157 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700158 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
159 }
160 printf("\n");
161}
162#endif
163
164static int dma_tx_init(struct eth_dma *dma)
165{
166 dma64dd_t *descp = NULL;
167 uint8_t *bufp;
168 int i;
169 uint32_t ctrl;
170
171 debug("%s enter\n", __func__);
172
173 /* clear descriptor memory */
174 memset((void *)(dma->tx_desc_aligned), 0,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800175 TX_BUF_NUM * DESCP_SIZE_ALIGNED);
176 memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700177
178 /* Initialize TX DMA descriptor table */
179 for (i = 0; i < TX_BUF_NUM; i++) {
180 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800181 bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700182 /* clear buffer memory */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800183 memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700184
185 ctrl = 0;
186 /* if last descr set endOfTable */
187 if (i == (TX_BUF_NUM-1))
188 ctrl = D64_CTRL1_EOT;
189 descp->ctrl1 = ctrl;
190 descp->ctrl2 = 0;
191 descp->addrlow = (uint32_t)bufp;
192 descp->addrhigh = 0;
193 }
194
195 /* flush descriptor and buffer */
196 descp = dma->tx_desc_aligned;
197 bufp = dma->tx_buf;
198 flush_dcache_range((unsigned long)descp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800199 (unsigned long)descp +
200 DESCP_SIZE_ALIGNED * TX_BUF_NUM);
201 flush_dcache_range((unsigned long)bufp,
202 (unsigned long)bufp +
203 TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700204
205 /* initialize the DMA channel */
206 writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
207 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
208
209 /* now update the dma last descriptor */
210 writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
211 GMAC0_DMA_TX_PTR_ADDR);
212
213 return 0;
214}
215
216static int dma_rx_init(struct eth_dma *dma)
217{
218 uint32_t last_desc;
219 dma64dd_t *descp = NULL;
220 uint8_t *bufp;
221 uint32_t ctrl;
222 int i;
223
224 debug("%s enter\n", __func__);
225
226 /* clear descriptor memory */
227 memset((void *)(dma->rx_desc_aligned), 0,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800228 RX_BUF_NUM * DESCP_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700229 /* clear buffer memory */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800230 memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700231
232 /* Initialize RX DMA descriptor table */
233 for (i = 0; i < RX_BUF_NUM; i++) {
234 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800235 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700236 ctrl = 0;
237 /* if last descr set endOfTable */
238 if (i == (RX_BUF_NUM - 1))
239 ctrl = D64_CTRL1_EOT;
240 descp->ctrl1 = ctrl;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800241 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700242 descp->addrlow = (uint32_t)bufp;
243 descp->addrhigh = 0;
244
245 last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
246 + sizeof(dma64dd_t);
247 }
248
249 descp = dma->rx_desc_aligned;
250 bufp = dma->rx_buf;
251 /* flush descriptor and buffer */
252 flush_dcache_range((unsigned long)descp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800253 (unsigned long)descp +
254 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700255 flush_dcache_range((unsigned long)(bufp),
Suji Velupillai07e6b052017-03-03 17:06:34 -0800256 (unsigned long)bufp +
257 RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700258
259 /* initailize the DMA channel */
260 writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
261 writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
262
263 /* now update the dma last descriptor */
264 writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
265
266 return 0;
267}
268
269static int dma_init(struct eth_dma *dma)
270{
271 debug(" %s enter\n", __func__);
272
273 /*
274 * Default flags: For backwards compatibility both
275 * Rx Overflow Continue and Parity are DISABLED.
276 */
277 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
278
279 debug("rx burst len 0x%x\n",
280 (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
281 >> D64_RC_BL_SHIFT);
282 debug("tx burst len 0x%x\n",
283 (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
284 >> D64_XC_BL_SHIFT);
285
286 dma_tx_init(dma);
287 dma_rx_init(dma);
288
289 /* From end of chip_init() */
290 /* enable the overflow continue feature and disable parity */
291 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
292 DMA_CTRL_ROC /* value */);
293
294 return 0;
295}
296
297static int dma_deinit(struct eth_dma *dma)
298{
299 debug(" %s enter\n", __func__);
300
301 gmac_disable_dma(dma, MAC_DMA_RX);
302 gmac_disable_dma(dma, MAC_DMA_TX);
303
304 free(dma->tx_buf);
305 dma->tx_buf = NULL;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800306 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700307 dma->tx_desc_aligned = NULL;
308
309 free(dma->rx_buf);
310 dma->rx_buf = NULL;
Suji Velupillai07e6b052017-03-03 17:06:34 -0800311 free(dma->rx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700312 dma->rx_desc_aligned = NULL;
313
314 return 0;
315}
316
317int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
318{
Suji Velupillai07e6b052017-03-03 17:06:34 -0800319 uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700320
321 /* kick off the dma */
322 size_t len = length;
323 int txout = dma->cur_tx_index;
324 uint32_t flags;
325 dma64dd_t *descp = NULL;
326 uint32_t ctrl;
327 uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
328 sizeof(dma64dd_t)) & D64_XP_LD_MASK;
329 size_t buflen;
330
331 debug("%s enter\n", __func__);
332
333 /* load the buffer */
334 memcpy(bufp, packet, len);
335
336 /* Add 4 bytes for Ethernet FCS/CRC */
337 buflen = len + 4;
338
339 ctrl = (buflen & D64_CTRL2_BC_MASK);
340
341 /* the transmit will only be one frame or set SOF, EOF */
342 /* also set int on completion */
343 flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
344
345 /* txout points to the descriptor to uset */
346 /* if last descriptor then set EOT */
347 if (txout == (TX_BUF_NUM - 1)) {
348 flags |= D64_CTRL1_EOT;
349 last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
350 }
351
352 /* write the descriptor */
353 descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
354 descp->addrlow = (uint32_t)bufp;
355 descp->addrhigh = 0;
356 descp->ctrl1 = flags;
357 descp->ctrl2 = ctrl;
358
359 /* flush descriptor and buffer */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800360 flush_dcache_range((unsigned long)dma->tx_desc_aligned,
361 (unsigned long)dma->tx_desc_aligned +
362 DESCP_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700363 flush_dcache_range((unsigned long)bufp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800364 (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700365
366 /* now update the dma last descriptor */
367 writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
368
369 /* tx dma should be enabled so packet should go out */
370
371 /* update txout */
372 dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
373
374 return 0;
375}
376
377bool gmac_check_tx_done(struct eth_dma *dma)
378{
379 /* wait for tx to complete */
380 uint32_t intstatus;
381 bool xfrdone = false;
382
383 debug("%s enter\n", __func__);
384
385 intstatus = readl(GMAC0_INT_STATUS_ADDR);
386
387 debug("int(0x%x)\n", intstatus);
388 if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
389 xfrdone = true;
390 /* clear the int bits */
391 intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
392 writel(intstatus, GMAC0_INT_STATUS_ADDR);
393 } else {
394 debug("Tx int(0x%x)\n", intstatus);
395 }
396
397 return xfrdone;
398}
399
400int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
401{
402 void *bufp, *datap;
403 size_t rcvlen = 0, buflen = 0;
404 uint32_t stat0 = 0, stat1 = 0;
405 uint32_t control, offset;
406 uint8_t statbuf[HWRXOFF*2];
407
408 int index, curr, active;
409 dma64dd_t *descp = NULL;
410
411 /* udelay(50); */
412
413 /*
414 * this api will check if a packet has been received.
415 * If so it will return the address of the buffer and current
416 * descriptor index will be incremented to the
417 * next descriptor. Once done with the frame the buffer should be
418 * added back onto the descriptor and the lastdscr should be updated
419 * to this descriptor.
420 */
421 index = dma->cur_rx_index;
422 offset = (uint32_t)(dma->rx_desc_aligned);
423 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
424 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
425 curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
426 active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
427
428 /* check if any frame */
429 if (index == curr)
430 return -1;
431
432 debug("received packet\n");
433 debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
434 /* remove warning */
435 if (index == active)
436 ;
437
438 /* get the packet pointer that corresponds to the rx descriptor */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800439 bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700440
441 descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
442 /* flush descriptor and buffer */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800443 flush_dcache_range((unsigned long)dma->rx_desc_aligned,
444 (unsigned long)dma->rx_desc_aligned +
445 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700446 flush_dcache_range((unsigned long)bufp,
Suji Velupillai07e6b052017-03-03 17:06:34 -0800447 (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700448
449 buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
450
451 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
452 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
453
454 debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
455 (uint32_t)bufp, index, buflen, stat0, stat1);
456
457 dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
458
459 /* get buffer offset */
460 control = readl(GMAC0_DMA_RX_CTRL_ADDR);
461 offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
462 rcvlen = *(uint16_t *)bufp;
463
464 debug("Received %d bytes\n", rcvlen);
465 /* copy status into temp buf then copy data from rx buffer */
466 memcpy(statbuf, bufp, offset);
467 datap = (void *)((uint32_t)bufp + offset);
468 memcpy(buf, datap, rcvlen);
469
470 /* update descriptor that is being added back on ring */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800471 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700472 descp->addrlow = (uint32_t)bufp;
473 descp->addrhigh = 0;
474 /* flush descriptor */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800475 flush_dcache_range((unsigned long)dma->rx_desc_aligned,
476 (unsigned long)dma->rx_desc_aligned +
477 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700478
479 /* set the lastdscr for the rx ring */
480 writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
481
482 return (int)rcvlen;
483}
484
485static int gmac_disable_dma(struct eth_dma *dma, int dir)
486{
487 int status;
488
489 debug("%s enter\n", __func__);
490
491 if (dir == MAC_DMA_TX) {
492 /* address PR8249/PR7577 issue */
493 /* suspend tx DMA first */
494 writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
495 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
496 D64_XS0_XS_MASK)) !=
497 D64_XS0_XS_DISABLED) &&
498 (status != D64_XS0_XS_IDLE) &&
499 (status != D64_XS0_XS_STOPPED), 10000);
500
501 /*
502 * PR2414 WAR: DMA engines are not disabled until
503 * transfer finishes
504 */
505 writel(0, GMAC0_DMA_TX_CTRL_ADDR);
506 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
507 D64_XS0_XS_MASK)) !=
508 D64_XS0_XS_DISABLED), 10000);
509
510 /* wait for the last transaction to complete */
511 udelay(2);
512
513 status = (status == D64_XS0_XS_DISABLED);
514 } else {
515 /*
516 * PR2414 WAR: DMA engines are not disabled until
517 * transfer finishes
518 */
519 writel(0, GMAC0_DMA_RX_CTRL_ADDR);
520 SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
521 D64_RS0_RS_MASK)) !=
522 D64_RS0_RS_DISABLED), 10000);
523
524 status = (status == D64_RS0_RS_DISABLED);
525 }
526
527 return status;
528}
529
530static int gmac_enable_dma(struct eth_dma *dma, int dir)
531{
532 uint32_t control;
533
534 debug("%s enter\n", __func__);
535
536 if (dir == MAC_DMA_TX) {
537 dma->cur_tx_index = 0;
538
539 /*
540 * These bits 20:18 (burstLen) of control register can be
541 * written but will take effect only if these bits are
542 * valid. So this will not affect previous versions
543 * of the DMA. They will continue to have those bits set to 0.
544 */
545 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
546
547 control |= D64_XC_XE;
548 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
549 control |= D64_XC_PD;
550
551 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
552
553 /* initailize the DMA channel */
554 writel((uint32_t)(dma->tx_desc_aligned),
555 GMAC0_DMA_TX_ADDR_LOW_ADDR);
556 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
557 } else {
558 dma->cur_rx_index = 0;
559
560 control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
561 D64_RC_AE) | D64_RC_RE;
562
563 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
564 control |= D64_RC_PD;
565
566 if (g_dmactrlflags & DMA_CTRL_ROC)
567 control |= D64_RC_OC;
568
569 /*
570 * These bits 20:18 (burstLen) of control register can be
571 * written but will take effect only if these bits are
572 * valid. So this will not affect previous versions
573 * of the DMA. They will continue to have those bits set to 0.
574 */
575 control &= ~D64_RC_BL_MASK;
576 /* Keep default Rx burstlen */
577 control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
578 control |= HWRXOFF << D64_RC_RO_SHIFT;
579
580 writel(control, GMAC0_DMA_RX_CTRL_ADDR);
581
582 /*
583 * the rx descriptor ring should have
584 * the addresses set properly;
585 * set the lastdscr for the rx ring
586 */
587 writel(((uint32_t)(dma->rx_desc_aligned) +
Suji Velupillai07e6b052017-03-03 17:06:34 -0800588 (RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700589 D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
590 }
591
592 return 0;
593}
594
595bool gmac_mii_busywait(unsigned int timeout)
596{
597 uint32_t tmp = 0;
598
599 while (timeout > 10) {
600 tmp = readl(GMAC_MII_CTRL_ADDR);
601 if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
602 udelay(10);
603 timeout -= 10;
604 } else {
605 break;
606 }
607 }
608 return tmp & (1 << GMAC_MII_BUSY_SHIFT);
609}
610
Joe Hershberger0c333192016-08-08 11:28:39 -0500611int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700612{
613 uint32_t tmp = 0;
Joe Hershberger0c333192016-08-08 11:28:39 -0500614 u16 value = 0;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700615
616 /* Busy wait timeout is 1ms */
617 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900618 pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700619 return -1;
620 }
621
622 /* Read operation */
623 tmp = GMAC_MII_DATA_READ_CMD;
624 tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
625 (reg << GMAC_MII_PHY_REG_SHIFT);
626 debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
627 writel(tmp, GMAC_MII_DATA_ADDR);
628
629 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900630 pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700631 return -1;
632 }
633
Joe Hershberger0c333192016-08-08 11:28:39 -0500634 value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
635 debug("MII read data 0x%x\n", value);
636 return value;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700637}
638
Joe Hershberger0c333192016-08-08 11:28:39 -0500639int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
640 u16 value)
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700641{
642 uint32_t tmp = 0;
643
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700644 /* Busy wait timeout is 1ms */
645 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900646 pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700647 return -1;
648 }
649
650 /* Write operation */
651 tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
652 tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
653 (reg << GMAC_MII_PHY_REG_SHIFT));
654 debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
655 tmp, phyaddr, reg, value);
656 writel(tmp, GMAC_MII_DATA_ADDR);
657
658 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900659 pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700660 return -1;
661 }
662
663 return 0;
664}
665
666void gmac_init_reset(void)
667{
668 debug("%s enter\n", __func__);
669
670 /* set command config reg CC_SR */
671 reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
672 udelay(GMAC_RESET_DELAY);
673}
674
675void gmac_clear_reset(void)
676{
677 debug("%s enter\n", __func__);
678
679 /* clear command config reg CC_SR */
680 reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
681 udelay(GMAC_RESET_DELAY);
682}
683
684static void gmac_enable_local(bool en)
685{
686 uint32_t cmdcfg;
687
688 debug("%s enter\n", __func__);
689
690 /* read command config reg */
691 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
692
693 /* put mac in reset */
694 gmac_init_reset();
695
696 cmdcfg |= CC_SR;
697
698 /* first deassert rx_ena and tx_ena while in reset */
699 cmdcfg &= ~(CC_RE | CC_TE);
700 /* write command config reg */
701 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
702
703 /* bring mac out of reset */
704 gmac_clear_reset();
705
706 /* if not enable exit now */
707 if (!en)
708 return;
709
710 /* enable the mac transmit and receive paths now */
711 udelay(2);
712 cmdcfg &= ~CC_SR;
713 cmdcfg |= (CC_RE | CC_TE);
714
715 /* assert rx_ena and tx_ena when out of reset to enable the mac */
716 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
717
718 return;
719}
720
721int gmac_enable(void)
722{
723 gmac_enable_local(1);
724
725 /* clear interrupts */
726 writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
727 return 0;
728}
729
730int gmac_disable(void)
731{
732 gmac_enable_local(0);
733 return 0;
734}
735
736int gmac_set_speed(int speed, int duplex)
737{
738 uint32_t cmdcfg;
739 uint32_t hd_ena;
740 uint32_t speed_cfg;
741
742 hd_ena = duplex ? 0 : CC_HD;
743 if (speed == 1000) {
744 speed_cfg = 2;
745 } else if (speed == 100) {
746 speed_cfg = 1;
747 } else if (speed == 10) {
748 speed_cfg = 0;
749 } else {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900750 pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700751 return -1;
752 }
753
754 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
755 cmdcfg &= ~(CC_ES_MASK | CC_HD);
756 cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
757
758 printf("Change GMAC speed to %dMB\n", speed);
759 debug("GMAC speed cfg 0x%x\n", cmdcfg);
760 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
761
762 return 0;
763}
764
765int gmac_set_mac_addr(unsigned char *mac)
766{
767 /* set our local address */
768 debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
769 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
770 writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
771 writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
772
773 return 0;
774}
775
776int gmac_mac_init(struct eth_device *dev)
777{
778 struct eth_info *eth = (struct eth_info *)(dev->priv);
779 struct eth_dma *dma = &(eth->dma);
780
781 uint32_t tmp;
782 uint32_t cmdcfg;
783 int chipid;
784
785 debug("%s enter\n", __func__);
786
787 /* Always use GMAC0 */
788 printf("Using GMAC%d\n", 0);
789
790 /* Reset AMAC0 core */
791 writel(0, AMAC0_IDM_RESET_ADDR);
792 tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
793 /* Set clock */
794 tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
795 tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
796 /* Set Tx clock */
797 tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
798 writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
799
800 /* reset gmac */
801 /*
802 * As AMAC is just reset, NO need?
803 * set eth_data into loopback mode to ensure no rx traffic
804 * gmac_loopback(eth_data, TRUE);
805 * ET_TRACE(("%s gmac loopback\n", __func__));
806 * udelay(1);
807 */
808
809 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
810 cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
811 CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
812 CC_PAD_EN | CC_PF);
813 cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
814 /* put mac in reset */
815 gmac_init_reset();
816 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
817 gmac_clear_reset();
818
819 /* enable clear MIB on read */
820 reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
821 /* PHY: set smi_master to drive mdc_clk */
822 reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
823
824 /* clear persistent sw intstatus */
825 writel(0, GMAC0_INT_STATUS_ADDR);
826
827 if (dma_init(dma) < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900828 pr_err("%s: GMAC dma_init failed\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700829 goto err_exit;
830 }
831
832 chipid = CHIPID;
833 printf("%s: Chip ID: 0x%x\n", __func__, chipid);
834
835 /* set switch bypass mode */
836 tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
837 tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
838
839 /* Switch mode */
840 /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
841
842 writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
843
844 tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
845 tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
846 writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
847
848 /* Set MDIO to internal GPHY */
849 tmp = readl(GMAC_MII_CTRL_ADDR);
850 /* Select internal MDC/MDIO bus*/
851 tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
852 /* select MDC/MDIO connecting to on-chip internal PHYs */
853 tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
854 /*
855 * give bit[6:0](MDCDIV) with required divisor to set
856 * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
857 */
858 tmp |= 0x1A;
859
860 writel(tmp, GMAC_MII_CTRL_ADDR);
861
862 if (gmac_mii_busywait(1000)) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900863 pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700864 goto err_exit;
865 }
866
867 /* Configure GMAC0 */
868 /* enable one rx interrupt per received frame */
869 writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
870
871 /* read command config reg */
872 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
873 /* enable 802.3x tx flow control (honor received PAUSE frames) */
874 cmdcfg &= ~CC_RPI;
875 /* enable promiscuous mode */
876 cmdcfg |= CC_PROM;
877 /* Disable loopback mode */
878 cmdcfg &= ~CC_ML;
879 /* set the speed */
880 cmdcfg &= ~(CC_ES_MASK | CC_HD);
881 /* Set to 1Gbps and full duplex by default */
882 cmdcfg |= (2 << CC_ES_SHIFT);
883
884 /* put mac in reset */
885 gmac_init_reset();
886 /* write register */
887 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
888 /* bring mac out of reset */
889 gmac_clear_reset();
890
891 /* set max frame lengths; account for possible vlan tag */
892 writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
893
894 return 0;
895
896err_exit:
897 dma_deinit(dma);
898 return -1;
899}
900
901int gmac_add(struct eth_device *dev)
902{
903 struct eth_info *eth = (struct eth_info *)(dev->priv);
904 struct eth_dma *dma = &(eth->dma);
905 void *tmp;
906
907 /*
Suji Velupillai07e6b052017-03-03 17:06:34 -0800908 * Desc has to be 16-byte aligned. But for dcache flush it must be
909 * aligned to ARCH_DMA_MINALIGN.
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700910 */
Suji Velupillai07e6b052017-03-03 17:06:34 -0800911 tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700912 if (tmp == NULL) {
913 printf("%s: Failed to allocate TX desc Buffer\n", __func__);
914 return -1;
915 }
916
Suji Velupillai07e6b052017-03-03 17:06:34 -0800917 dma->tx_desc_aligned = (void *)tmp;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700918 debug("TX Descriptor Buffer: %p; length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800919 dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700920
Suji Velupillai07e6b052017-03-03 17:06:34 -0800921 tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700922 if (tmp == NULL) {
923 printf("%s: Failed to allocate TX Data Buffer\n", __func__);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800924 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700925 return -1;
926 }
927 dma->tx_buf = (uint8_t *)tmp;
928 debug("TX Data Buffer: %p; length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800929 dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700930
Suji Velupillai07e6b052017-03-03 17:06:34 -0800931 /* Desc has to be 16-byte aligned */
932 tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700933 if (tmp == NULL) {
934 printf("%s: Failed to allocate RX Descriptor\n", __func__);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800935 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700936 free(dma->tx_buf);
937 return -1;
938 }
Suji Velupillai07e6b052017-03-03 17:06:34 -0800939 dma->rx_desc_aligned = (void *)tmp;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700940 debug("RX Descriptor Buffer: %p, length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800941 dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700942
Suji Velupillai07e6b052017-03-03 17:06:34 -0800943 tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700944 if (tmp == NULL) {
945 printf("%s: Failed to allocate RX Data Buffer\n", __func__);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800946 free(dma->tx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700947 free(dma->tx_buf);
Suji Velupillai07e6b052017-03-03 17:06:34 -0800948 free(dma->rx_desc_aligned);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700949 return -1;
950 }
Suji Velupillai07e6b052017-03-03 17:06:34 -0800951 dma->rx_buf = (uint8_t *)tmp;
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700952 debug("RX Data Buffer: %p; length: 0x%x\n",
Suji Velupillai07e6b052017-03-03 17:06:34 -0800953 dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
Jiandong Zhengc36e42e2014-08-01 20:37:16 -0700954
955 g_dmactrlflags = 0;
956
957 eth->phy_interface = PHY_INTERFACE_MODE_GMII;
958
959 dma->tx_packet = gmac_tx_packet;
960 dma->check_tx_done = gmac_check_tx_done;
961
962 dma->check_rx_done = gmac_check_rx_done;
963
964 dma->enable_dma = gmac_enable_dma;
965 dma->disable_dma = gmac_disable_dma;
966
967 eth->miiphy_read = gmac_miiphy_read;
968 eth->miiphy_write = gmac_miiphy_write;
969
970 eth->mac_init = gmac_mac_init;
971 eth->disable_mac = gmac_disable;
972 eth->enable_mac = gmac_enable;
973 eth->set_mac_addr = gmac_set_mac_addr;
974 eth->set_mac_speed = gmac_set_speed;
975
976 return 0;
977}