blob: 8637a6b1fc6e51ed2e0d2d89c6096cfb68be77db [file] [log] [blame]
Stephan Linze1fd4be2012-02-25 00:48:31 +00001/*
2 * Xilinx xps_ll_temac ethernet driver for u-boot
3 *
4 * SDMA sub-controller
5 *
6 * Copyright (C) 2011 - 2012 Stephan Linz <linz@li-pro.net>
7 * Copyright (C) 2008 - 2011 Michal Simek <monstr@monstr.eu>
8 * Copyright (C) 2008 - 2011 PetaLogix
9 *
10 * Based on Yoshio Kashiwagi kashiwagi@co-nss.co.jp driver
11 * Copyright (C) 2008 Nissin Systems Co.,Ltd.
12 * March 2008 created
13 *
14 * CREDITS: tsec driver
15 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 *
21 * [0]: http://www.xilinx.com/support/documentation
22 *
23 * [M]: [0]/ip_documentation/mpmc.pdf
24 * [S]: [0]/ip_documentation/xps_ll_temac.pdf
25 * [A]: [0]/application_notes/xapp1041.pdf
26 */
27
28#include <config.h>
29#include <common.h>
30#include <net.h>
31
32#include <asm/types.h>
33#include <asm/io.h>
34
35#include "xilinx_ll_temac.h"
36#include "xilinx_ll_temac_sdma.h"
37
38#define TX_BUF_CNT 2
39
40static unsigned int rx_idx; /* index of the current RX buffer */
41static unsigned int tx_idx; /* index of the current TX buffer */
42
43struct rtx_cdmac_bd {
44 struct cdmac_bd rx[PKTBUFSRX];
45 struct cdmac_bd tx[TX_BUF_CNT];
46};
47
48/*
49 * DMA Buffer Descriptor alignment
50 *
51 * If the address contained in the Next Descriptor Pointer register is not
52 * 8-word aligned or reaches beyond the range of available memory, the SDMA
53 * halts processing and sets the CDMAC_BD_STCTRL_ERROR bit in the respective
54 * status register (tx_chnl_sts or rx_chnl_sts).
55 *
56 * [1]: [0]/ip_documentation/mpmc.pdf
57 * page 161, Next Descriptor Pointer
58 */
59static struct rtx_cdmac_bd cdmac_bd __aligned(32);
60
61#if defined(CONFIG_XILINX_440) || defined(CONFIG_XILINX_405)
62
63/*
64 * Indirect DCR access operations mi{ft}dcr_xilinx() espacialy
65 * for Xilinx PowerPC implementations on FPGA.
66 *
67 * FIXME: This part should go up to arch/powerpc -- but where?
68 */
69#include <asm/processor.h>
70#define XILINX_INDIRECT_DCR_ADDRESS_REG 0
71#define XILINX_INDIRECT_DCR_ACCESS_REG 1
72inline unsigned mifdcr_xilinx(const unsigned dcrn)
73{
74 mtdcr(XILINX_INDIRECT_DCR_ADDRESS_REG, dcrn);
75 return mfdcr(XILINX_INDIRECT_DCR_ACCESS_REG);
76}
77inline void mitdcr_xilinx(const unsigned dcrn, int val)
78{
79 mtdcr(XILINX_INDIRECT_DCR_ADDRESS_REG, dcrn);
80 mtdcr(XILINX_INDIRECT_DCR_ACCESS_REG, val);
81}
82
83/* Xilinx Device Control Register (DCR) in/out accessors */
84inline unsigned ll_temac_xldcr_in32(phys_addr_t addr)
85{
86 return mifdcr_xilinx((const unsigned)addr);
87}
88inline void ll_temac_xldcr_out32(phys_addr_t addr, unsigned value)
89{
90 mitdcr_xilinx((const unsigned)addr, value);
91}
92
93void ll_temac_collect_xldcr_sdma_reg_addr(struct eth_device *dev)
94{
95 struct ll_temac *ll_temac = dev->priv;
96 phys_addr_t dmac_ctrl = ll_temac->ctrladdr;
97 phys_addr_t *ra = ll_temac->sdma_reg_addr;
98
99 ra[TX_NXTDESC_PTR] = dmac_ctrl + TX_NXTDESC_PTR;
100 ra[TX_CURBUF_ADDR] = dmac_ctrl + TX_CURBUF_ADDR;
101 ra[TX_CURBUF_LENGTH] = dmac_ctrl + TX_CURBUF_LENGTH;
102 ra[TX_CURDESC_PTR] = dmac_ctrl + TX_CURDESC_PTR;
103 ra[TX_TAILDESC_PTR] = dmac_ctrl + TX_TAILDESC_PTR;
104 ra[TX_CHNL_CTRL] = dmac_ctrl + TX_CHNL_CTRL;
105 ra[TX_IRQ_REG] = dmac_ctrl + TX_IRQ_REG;
106 ra[TX_CHNL_STS] = dmac_ctrl + TX_CHNL_STS;
107 ra[RX_NXTDESC_PTR] = dmac_ctrl + RX_NXTDESC_PTR;
108 ra[RX_CURBUF_ADDR] = dmac_ctrl + RX_CURBUF_ADDR;
109 ra[RX_CURBUF_LENGTH] = dmac_ctrl + RX_CURBUF_LENGTH;
110 ra[RX_CURDESC_PTR] = dmac_ctrl + RX_CURDESC_PTR;
111 ra[RX_TAILDESC_PTR] = dmac_ctrl + RX_TAILDESC_PTR;
112 ra[RX_CHNL_CTRL] = dmac_ctrl + RX_CHNL_CTRL;
113 ra[RX_IRQ_REG] = dmac_ctrl + RX_IRQ_REG;
114 ra[RX_CHNL_STS] = dmac_ctrl + RX_CHNL_STS;
115 ra[DMA_CONTROL_REG] = dmac_ctrl + DMA_CONTROL_REG;
116}
117
118#endif /* CONFIG_XILINX_440 || ONFIG_XILINX_405 */
119
120/* Xilinx Processor Local Bus (PLB) in/out accessors */
121inline unsigned ll_temac_xlplb_in32(phys_addr_t addr)
122{
123 return in_be32((void *)addr);
124}
125inline void ll_temac_xlplb_out32(phys_addr_t addr, unsigned value)
126{
127 out_be32((void *)addr, value);
128}
129
130/* collect all register addresses for Xilinx PLB in/out accessors */
131void ll_temac_collect_xlplb_sdma_reg_addr(struct eth_device *dev)
132{
133 struct ll_temac *ll_temac = dev->priv;
134 struct sdma_ctrl *sdma_ctrl = (void *)ll_temac->ctrladdr;
135 phys_addr_t *ra = ll_temac->sdma_reg_addr;
136
137 ra[TX_NXTDESC_PTR] = (phys_addr_t)&sdma_ctrl->tx_nxtdesc_ptr;
138 ra[TX_CURBUF_ADDR] = (phys_addr_t)&sdma_ctrl->tx_curbuf_addr;
139 ra[TX_CURBUF_LENGTH] = (phys_addr_t)&sdma_ctrl->tx_curbuf_length;
140 ra[TX_CURDESC_PTR] = (phys_addr_t)&sdma_ctrl->tx_curdesc_ptr;
141 ra[TX_TAILDESC_PTR] = (phys_addr_t)&sdma_ctrl->tx_taildesc_ptr;
142 ra[TX_CHNL_CTRL] = (phys_addr_t)&sdma_ctrl->tx_chnl_ctrl;
143 ra[TX_IRQ_REG] = (phys_addr_t)&sdma_ctrl->tx_irq_reg;
144 ra[TX_CHNL_STS] = (phys_addr_t)&sdma_ctrl->tx_chnl_sts;
145 ra[RX_NXTDESC_PTR] = (phys_addr_t)&sdma_ctrl->rx_nxtdesc_ptr;
146 ra[RX_CURBUF_ADDR] = (phys_addr_t)&sdma_ctrl->rx_curbuf_addr;
147 ra[RX_CURBUF_LENGTH] = (phys_addr_t)&sdma_ctrl->rx_curbuf_length;
148 ra[RX_CURDESC_PTR] = (phys_addr_t)&sdma_ctrl->rx_curdesc_ptr;
149 ra[RX_TAILDESC_PTR] = (phys_addr_t)&sdma_ctrl->rx_taildesc_ptr;
150 ra[RX_CHNL_CTRL] = (phys_addr_t)&sdma_ctrl->rx_chnl_ctrl;
151 ra[RX_IRQ_REG] = (phys_addr_t)&sdma_ctrl->rx_irq_reg;
152 ra[RX_CHNL_STS] = (phys_addr_t)&sdma_ctrl->rx_chnl_sts;
153 ra[DMA_CONTROL_REG] = (phys_addr_t)&sdma_ctrl->dma_control_reg;
154}
155
156/* Check for TX and RX channel errors. */
157static inline int ll_temac_sdma_error(struct eth_device *dev)
158{
159 int err;
160 struct ll_temac *ll_temac = dev->priv;
161 phys_addr_t *ra = ll_temac->sdma_reg_addr;
162
163 err = ll_temac->in32(ra[TX_CHNL_STS]) & CHNL_STS_ERROR;
164 err |= ll_temac->in32(ra[RX_CHNL_STS]) & CHNL_STS_ERROR;
165
166 return err;
167}
168
169int ll_temac_init_sdma(struct eth_device *dev)
170{
171 struct ll_temac *ll_temac = dev->priv;
172 struct cdmac_bd *rx_dp;
173 struct cdmac_bd *tx_dp;
174 phys_addr_t *ra = ll_temac->sdma_reg_addr;
175 int i;
176
177 printf("%s: SDMA: %d Rx buffers, %d Tx buffers\n",
178 dev->name, PKTBUFSRX, TX_BUF_CNT);
179
180 /* Initialize the Rx Buffer descriptors */
181 for (i = 0; i < PKTBUFSRX; i++) {
182 rx_dp = &cdmac_bd.rx[i];
183 memset(rx_dp, 0, sizeof(*rx_dp));
184 rx_dp->next_p = rx_dp;
185 rx_dp->buf_len = PKTSIZE_ALIGN;
186 rx_dp->phys_buf_p = (u8 *)NetRxPackets[i];
187 flush_cache((u32)rx_dp->phys_buf_p, PKTSIZE_ALIGN);
188 }
189 flush_cache((u32)cdmac_bd.rx, sizeof(cdmac_bd.rx));
190
191 /* Initialize the TX Buffer Descriptors */
192 for (i = 0; i < TX_BUF_CNT; i++) {
193 tx_dp = &cdmac_bd.tx[i];
194 memset(tx_dp, 0, sizeof(*tx_dp));
195 tx_dp->next_p = tx_dp;
196 }
197 flush_cache((u32)cdmac_bd.tx, sizeof(cdmac_bd.tx));
198
199 /* Reset index counter to the Rx and Tx Buffer descriptors */
200 rx_idx = tx_idx = 0;
201
202 /* initial Rx DMA start by writing to respective TAILDESC_PTR */
203 ll_temac->out32(ra[RX_CURDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
204 ll_temac->out32(ra[RX_TAILDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
205
206 return 0;
207}
208
209int ll_temac_halt_sdma(struct eth_device *dev)
210{
211 unsigned timeout = 50; /* 1usec * 50 = 50usec */
212 struct ll_temac *ll_temac = dev->priv;
213 phys_addr_t *ra = ll_temac->sdma_reg_addr;
214
215 /*
216 * Soft reset the DMA
217 *
218 * Quote from MPMC documentation: Writing a 1 to this field
219 * forces the DMA engine to shutdown and reset itself. After
220 * setting this bit, software must poll it until the bit is
221 * cleared by the DMA. This indicates that the reset process
222 * is done and the pipeline has been flushed.
223 */
224 ll_temac->out32(ra[DMA_CONTROL_REG], DMA_CONTROL_RESET);
225 while (timeout && (ll_temac->in32(ra[DMA_CONTROL_REG])
226 & DMA_CONTROL_RESET)) {
227 timeout--;
228 udelay(1);
229 }
230
231 if (!timeout) {
232 printf("%s: Timeout\n", __func__);
233 return -1;
234 }
235
236 return 0;
237}
238
239int ll_temac_reset_sdma(struct eth_device *dev)
240{
241 u32 r;
242 struct ll_temac *ll_temac = dev->priv;
243 phys_addr_t *ra = ll_temac->sdma_reg_addr;
244
245 /* Soft reset the DMA. */
246 if (ll_temac_halt_sdma(dev))
247 return -1;
248
249 /* Now clear the interrupts. */
250 r = ll_temac->in32(ra[TX_CHNL_CTRL]);
251 r &= ~CHNL_CTRL_IRQ_MASK;
252 ll_temac->out32(ra[TX_CHNL_CTRL], r);
253
254 r = ll_temac->in32(ra[RX_CHNL_CTRL]);
255 r &= ~CHNL_CTRL_IRQ_MASK;
256 ll_temac->out32(ra[RX_CHNL_CTRL], r);
257
258 /* Now ACK pending IRQs. */
259 ll_temac->out32(ra[TX_IRQ_REG], IRQ_REG_IRQ_MASK);
260 ll_temac->out32(ra[RX_IRQ_REG], IRQ_REG_IRQ_MASK);
261
262 /* Set tail-ptr mode, disable errors for both channels. */
263 ll_temac->out32(ra[DMA_CONTROL_REG],
264 /* Enable use of tail pointer register */
265 DMA_CONTROL_TPE |
266 /* Disable error when 2 or 4 bit coalesce cnt overfl */
267 DMA_CONTROL_RXOCEID |
268 /* Disable error when 2 or 4 bit coalesce cnt overfl */
269 DMA_CONTROL_TXOCEID);
270
271 return 0;
272}
273
274int ll_temac_recv_sdma(struct eth_device *dev)
275{
276 int length, pb_idx;
277 struct cdmac_bd *rx_dp = &cdmac_bd.rx[rx_idx];
278 struct ll_temac *ll_temac = dev->priv;
279 phys_addr_t *ra = ll_temac->sdma_reg_addr;
280
281 if (ll_temac_sdma_error(dev)) {
282
283 if (ll_temac_reset_sdma(dev))
284 return -1;
285
286 ll_temac_init_sdma(dev);
287 }
288
289 flush_cache((u32)rx_dp, sizeof(*rx_dp));
290
291 if (!(rx_dp->sca.stctrl & CDMAC_BD_STCTRL_COMPLETED))
292 return 0;
293
294 if (rx_dp->sca.stctrl & (CDMAC_BD_STCTRL_SOP | CDMAC_BD_STCTRL_EOP)) {
295 pb_idx = rx_idx;
296 length = rx_dp->sca.app[4] & CDMAC_BD_APP4_RXBYTECNT_MASK;
297 } else {
298 pb_idx = -1;
299 length = 0;
300 printf("%s: Got part of package, unsupported (%x)\n",
301 __func__, rx_dp->sca.stctrl);
302 }
303
304 /* flip the buffer */
305 flush_cache((u32)rx_dp->phys_buf_p, length);
306
307 /* reset the current descriptor */
308 rx_dp->sca.stctrl = 0;
309 rx_dp->sca.app[4] = 0;
310 flush_cache((u32)rx_dp, sizeof(*rx_dp));
311
312 /* Find next empty buffer descriptor, preparation for next iteration */
313 rx_idx = (rx_idx + 1) % PKTBUFSRX;
314 rx_dp = &cdmac_bd.rx[rx_idx];
315 flush_cache((u32)rx_dp, sizeof(*rx_dp));
316
317 /* DMA start by writing to respective TAILDESC_PTR */
318 ll_temac->out32(ra[RX_CURDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
319 ll_temac->out32(ra[RX_TAILDESC_PTR], (int)&cdmac_bd.rx[rx_idx]);
320
321 if (length > 0 && pb_idx != -1)
322 NetReceive(NetRxPackets[pb_idx], length);
323
324 return 0;
325}
326
Stephan Linz458e4fe2012-05-22 12:18:09 +0000327int ll_temac_send_sdma(struct eth_device *dev, void *packet, int length)
Stephan Linze1fd4be2012-02-25 00:48:31 +0000328{
329 unsigned timeout = 50; /* 1usec * 50 = 50usec */
330 struct cdmac_bd *tx_dp = &cdmac_bd.tx[tx_idx];
331 struct ll_temac *ll_temac = dev->priv;
332 phys_addr_t *ra = ll_temac->sdma_reg_addr;
333
334 if (ll_temac_sdma_error(dev)) {
335
336 if (ll_temac_reset_sdma(dev))
337 return -1;
338
339 ll_temac_init_sdma(dev);
340 }
341
342 tx_dp->phys_buf_p = (u8 *)packet;
343 tx_dp->buf_len = length;
344 tx_dp->sca.stctrl = CDMAC_BD_STCTRL_SOP | CDMAC_BD_STCTRL_EOP |
345 CDMAC_BD_STCTRL_STOP_ON_END;
346
347 flush_cache((u32)packet, length);
348 flush_cache((u32)tx_dp, sizeof(*tx_dp));
349
350 /* DMA start by writing to respective TAILDESC_PTR */
351 ll_temac->out32(ra[TX_CURDESC_PTR], (int)tx_dp);
352 ll_temac->out32(ra[TX_TAILDESC_PTR], (int)tx_dp);
353
354 /* Find next empty buffer descriptor, preparation for next iteration */
355 tx_idx = (tx_idx + 1) % TX_BUF_CNT;
356 tx_dp = &cdmac_bd.tx[tx_idx];
357
358 do {
359 flush_cache((u32)tx_dp, sizeof(*tx_dp));
360 udelay(1);
361 } while (timeout-- && !(tx_dp->sca.stctrl & CDMAC_BD_STCTRL_COMPLETED));
362
363 if (!timeout) {
364 printf("%s: Timeout\n", __func__);
365 return -1;
366 }
367
368 return 0;
369}