blob: 3c5db19cb084fce8ad98505cbf9e5ce685c29ab3 [file] [log] [blame]
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +05301/*
2 * (C) Copyright 2009
3 * Marvell Semiconductor <www.marvell.com>
4 * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
5 *
6 * (C) Copyright 2003
7 * Ingo Assmus <ingo.assmus@keymile.com>
8 *
9 * based on - Driver for MV64360X ethernet ports
10 * Copyright (C) 2002 rabeeh@galileo.co.il
11 *
12 * See file CREDITS for list of people who contributed to this
13 * project.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
28 * MA 02110-1301 USA
29 */
30
31#include <common.h>
32#include <net.h>
33#include <malloc.h>
34#include <miiphy.h>
35#include <asm/errno.h>
36#include <asm/types.h>
37#include <asm/byteorder.h>
38#include <asm/arch/kirkwood.h>
39#include "kirkwood_egiga.h"
40
41/*
42 * smi_reg_read - miiphy_read callback function.
43 *
44 * Returns 16bit phy register value, or 0xffff on error
45 */
46static int smi_reg_read(char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
47{
48 struct eth_device *dev = eth_get_dev_by_name(devname);
49 struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
50 struct kwgbe_registers *regs = dkwgbe->regs;
51 u32 smi_reg;
52 volatile u32 timeout;
53
54 /* Phyadr read request */
55 if (phy_adr == 0xEE && reg_ofs == 0xEE) {
56 /* */
57 *data = (u16) (KWGBEREG_RD(regs->phyadr) & PHYADR_MASK);
58 return 0;
59 }
60 /* check parameters */
61 if (phy_adr > PHYADR_MASK) {
62 printf("Err..(%s) Invalid PHY address %d\n",
63 __FUNCTION__, phy_adr);
64 return -EFAULT;
65 }
66 if (reg_ofs > PHYREG_MASK) {
67 printf("Err..(%s) Invalid register offset %d\n",
68 __FUNCTION__, reg_ofs);
69 return -EFAULT;
70 }
71
72 timeout = KWGBE_PHY_SMI_TIMEOUT;
73 /* wait till the SMI is not busy */
74 do {
75 /* read smi register */
76 smi_reg = KWGBEREG_RD(regs->smi);
77 if (timeout-- == 0) {
78 printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
79 return -EFAULT;
80 }
81 } while (smi_reg & KWGBE_PHY_SMI_BUSY_MASK);
82
83 /* fill the phy address and regiser offset and read opcode */
84 smi_reg = (phy_adr << KWGBE_PHY_SMI_DEV_ADDR_OFFS)
85 | (reg_ofs << KWGBE_SMI_REG_ADDR_OFFS)
86 | KWGBE_PHY_SMI_OPCODE_READ;
87
88 /* write the smi register */
89 KWGBEREG_WR(regs->smi, smi_reg);
90
91 /*wait till read value is ready */
92 timeout = KWGBE_PHY_SMI_TIMEOUT;
93
94 do {
95 /* read smi register */
96 smi_reg = KWGBEREG_RD(regs->smi);
97 if (timeout-- == 0) {
98 printf("Err..(%s) SMI read ready timeout\n",
99 __FUNCTION__);
100 return -EFAULT;
101 }
102 } while (!(smi_reg & KWGBE_PHY_SMI_READ_VALID_MASK));
103
104 /* Wait for the data to update in the SMI register */
105 for (timeout = 0; timeout < KWGBE_PHY_SMI_TIMEOUT; timeout++) ;
106
107 *data = (u16) (KWGBEREG_RD(regs->smi) & KWGBE_PHY_SMI_DATA_MASK);
108
109 debug("%s:(adr %d, off %d) value= %04x\n", __FUNCTION__, phy_adr,
110 reg_ofs, *data);
111
112 return 0;
113}
114
115/*
116 * smi_reg_write - imiiphy_write callback function.
117 *
118 * Returns 0 if write succeed, -EINVAL on bad parameters
119 * -ETIME on timeout
120 */
121static int smi_reg_write(char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
122{
123 struct eth_device *dev = eth_get_dev_by_name(devname);
124 struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
125 struct kwgbe_registers *regs = dkwgbe->regs;
126 u32 smi_reg;
127 volatile u32 timeout;
128
129 /* Phyadr write request*/
130 if (phy_adr == 0xEE && reg_ofs == 0xEE) {
131 KWGBEREG_WR(regs->phyadr, data);
132 return 0;
133 }
134
135 /* check parameters */
136 if (phy_adr > PHYADR_MASK) {
137 printf("Err..(%s) Invalid phy address\n", __FUNCTION__);
138 return -EINVAL;
139 }
140 if (reg_ofs > PHYREG_MASK) {
141 printf("Err..(%s) Invalid register offset\n", __FUNCTION__);
142 return -EINVAL;
143 }
144
145 /* wait till the SMI is not busy */
146 timeout = KWGBE_PHY_SMI_TIMEOUT;
147 do {
148 /* read smi register */
149 smi_reg = KWGBEREG_RD(regs->smi);
150 if (timeout-- == 0) {
151 printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
152 return -ETIME;
153 }
154 } while (smi_reg & KWGBE_PHY_SMI_BUSY_MASK);
155
156 /* fill the phy addr and reg offset and write opcode and data */
157 smi_reg = (data << KWGBE_PHY_SMI_DATA_OFFS);
158 smi_reg |= (phy_adr << KWGBE_PHY_SMI_DEV_ADDR_OFFS)
159 | (reg_ofs << KWGBE_SMI_REG_ADDR_OFFS);
160 smi_reg &= ~KWGBE_PHY_SMI_OPCODE_READ;
161
162 /* write the smi register */
163 KWGBEREG_WR(regs->smi, smi_reg);
164
165 return 0;
166}
167
168/* Stop and checks all queues */
169static void stop_queue(u32 * qreg)
170{
171 u32 reg_data;
172
173 reg_data = readl(qreg);
174
175 if (reg_data & 0xFF) {
176 /* Issue stop command for active channels only */
177 writel((reg_data << 8), qreg);
178
179 /* Wait for all queue activity to terminate. */
180 do {
181 /*
182 * Check port cause register that all queues
183 * are stopped
184 */
185 reg_data = readl(qreg);
186 }
187 while (reg_data & 0xFF);
188 }
189}
190
191/*
192 * set_access_control - Config address decode parameters for Ethernet unit
193 *
194 * This function configures the address decode parameters for the Gigabit
195 * Ethernet Controller according the given parameters struct.
196 *
197 * @regs Register struct pointer.
198 * @param Address decode parameter struct.
199 */
200static void set_access_control(struct kwgbe_registers *regs,
201 struct kwgbe_winparam *param)
202{
203 u32 access_prot_reg;
204
205 /* Set access control register */
206 access_prot_reg = KWGBEREG_RD(regs->epap);
207 /* clear window permission */
208 access_prot_reg &= (~(3 << (param->win * 2)));
209 access_prot_reg |= (param->access_ctrl << (param->win * 2));
210 KWGBEREG_WR(regs->epap, access_prot_reg);
211
212 /* Set window Size reg (SR) */
213 KWGBEREG_WR(regs->barsz[param->win].size,
214 (((param->size / 0x10000) - 1) << 16));
215
216 /* Set window Base address reg (BA) */
217 KWGBEREG_WR(regs->barsz[param->win].bar,
218 (param->target | param->attrib | param->base_addr));
219 /* High address remap reg (HARR) */
220 if (param->win < 4)
221 KWGBEREG_WR(regs->ha_remap[param->win], param->high_addr);
222
223 /* Base address enable reg (BARER) */
224 if (param->enable == 1)
225 KWGBEREG_BITS_RESET(regs->bare, (1 << param->win));
226 else
227 KWGBEREG_BITS_SET(regs->bare, (1 << param->win));
228}
229
230static void set_dram_access(struct kwgbe_registers *regs)
231{
232 struct kwgbe_winparam win_param;
233 int i;
234
235 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
236 /* Set access parameters for DRAM bank i */
237 win_param.win = i; /* Use Ethernet window i */
238 /* Window target - DDR */
239 win_param.target = KWGBE_TARGET_DRAM;
240 /* Enable full access */
241 win_param.access_ctrl = EWIN_ACCESS_FULL;
242 win_param.high_addr = 0;
243 /* Get bank base */
244 win_param.base_addr = kw_sdram_bar(i);
245 win_param.size = kw_sdram_bs(i); /* Get bank size */
246 if (win_param.size == 0)
247 win_param.enable = 0;
248 else
249 win_param.enable = 1; /* Enable the access */
250
251 /* Enable DRAM bank */
252 switch (i) {
253 case 0:
254 win_param.attrib = EBAR_DRAM_CS0;
255 break;
256 case 1:
257 win_param.attrib = EBAR_DRAM_CS1;
258 break;
259 case 2:
260 win_param.attrib = EBAR_DRAM_CS2;
261 break;
262 case 3:
263 win_param.attrib = EBAR_DRAM_CS3;
264 break;
265 default:
266 /* invalide bank, disable access */
267 win_param.enable = 0;
268 win_param.attrib = 0;
269 break;
270 }
271 /* Set the access control for address window(EPAPR) RD/WR */
272 set_access_control(regs, &win_param);
273 }
274}
275
276/*
277 * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
278 *
279 * Go through all the DA filter tables (Unicast, Special Multicast & Other
280 * Multicast) and set each entry to 0.
281 */
282static void port_init_mac_tables(struct kwgbe_registers *regs)
283{
284 int table_index;
285
286 /* Clear DA filter unicast table (Ex_dFUT) */
287 for (table_index = 0; table_index < 4; ++table_index)
288 KWGBEREG_WR(regs->dfut[table_index], 0);
289
290 for (table_index = 0; table_index < 64; ++table_index) {
291 /* Clear DA filter special multicast table (Ex_dFSMT) */
292 KWGBEREG_WR(regs->dfsmt[table_index], 0);
293 /* Clear DA filter other multicast table (Ex_dFOMT) */
294 KWGBEREG_WR(regs->dfomt[table_index], 0);
295 }
296}
297
298/*
299 * port_uc_addr - This function Set the port unicast address table
300 *
301 * This function locates the proper entry in the Unicast table for the
302 * specified MAC nibble and sets its properties according to function
303 * parameters.
304 * This function add/removes MAC addresses from the port unicast address
305 * table.
306 *
307 * @uc_nibble Unicast MAC Address last nibble.
308 * @option 0 = Add, 1 = remove address.
309 *
310 * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
311 */
312static int port_uc_addr(struct kwgbe_registers *regs, u8 uc_nibble,
313 int option)
314{
315 u32 unicast_reg;
316 u32 tbl_offset;
317 u32 reg_offset;
318
319 /* Locate the Unicast table entry */
320 uc_nibble = (0xf & uc_nibble);
321 /* Register offset from unicast table base */
322 tbl_offset = (uc_nibble / 4);
323 /* Entry offset within the above register */
324 reg_offset = uc_nibble % 4;
325
326 switch (option) {
327 case REJECT_MAC_ADDR:
328 /*
329 * Clear accepts frame bit at specified unicast
330 * DA table entry
331 */
332 unicast_reg = KWGBEREG_RD(regs->dfut[tbl_offset]);
333 unicast_reg &= (0xFF << (8 * reg_offset));
334 KWGBEREG_WR(regs->dfut[tbl_offset], unicast_reg);
335 break;
336 case ACCEPT_MAC_ADDR:
337 /* Set accepts frame bit at unicast DA filter table entry */
338 unicast_reg = KWGBEREG_RD(regs->dfut[tbl_offset]);
339 unicast_reg &= (0xFF << (8 * reg_offset));
340 unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
341 KWGBEREG_WR(regs->dfut[tbl_offset], unicast_reg);
342 break;
343 default:
344 return 0;
345 }
346 return 1;
347}
348
349/*
350 * port_uc_addr_set - This function Set the port Unicast address.
351 */
352static void port_uc_addr_set(struct kwgbe_registers *regs, u8 * p_addr)
353{
354 u32 mac_h;
355 u32 mac_l;
356
357 mac_l = (p_addr[4] << 8) | (p_addr[5]);
358 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
359 (p_addr[3] << 0);
360
361 KWGBEREG_WR(regs->macal, mac_l);
362 KWGBEREG_WR(regs->macah, mac_h);
363
364 /* Accept frames of this address */
365 port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
366}
367
368/*
369 * kwgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
370 */
371static void kwgbe_init_rx_desc_ring(struct kwgbe_device *dkwgbe)
372{
373 volatile struct kwgbe_rxdesc *p_rx_desc;
374 int i;
375
376 /* initialize the Rx descriptors ring */
377 p_rx_desc = dkwgbe->p_rxdesc;
378 for (i = 0; i < RINGSZ; i++) {
379 p_rx_desc->cmd_sts =
380 KWGBE_BUFFER_OWNED_BY_DMA | KWGBE_RX_EN_INTERRUPT;
381 p_rx_desc->buf_size = PKTSIZE_ALIGN;
382 p_rx_desc->byte_cnt = 0;
383 p_rx_desc->buf_ptr = dkwgbe->p_rxbuf + i * PKTSIZE_ALIGN;
384 if (i == (RINGSZ - 1))
385 p_rx_desc->nxtdesc_p = dkwgbe->p_rxdesc;
386 else {
387 p_rx_desc->nxtdesc_p = (struct kwgbe_rxdesc *)
388 ((u32) p_rx_desc + KW_RXQ_DESC_ALIGNED_SIZE);
389 p_rx_desc = p_rx_desc->nxtdesc_p;
390 }
391 }
392 dkwgbe->p_rxdesc_curr = dkwgbe->p_rxdesc;
393}
394
395static int kwgbe_init(struct eth_device *dev)
396{
397 struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
398 struct kwgbe_registers *regs = dkwgbe->regs;
399
400 /* setup RX rings */
401 kwgbe_init_rx_desc_ring(dkwgbe);
402
403 /* Clear the ethernet port interrupts */
404 KWGBEREG_WR(regs->ic, 0);
405 KWGBEREG_WR(regs->ice, 0);
406 /* Unmask RX buffer and TX end interrupt */
407 KWGBEREG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
408 /* Unmask phy and link status changes interrupts */
409 KWGBEREG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
410
411 set_dram_access(regs);
412 port_init_mac_tables(regs);
413 port_uc_addr_set(regs, dkwgbe->dev.enetaddr);
414
415 /* Assign port configuration and command. */
416 KWGBEREG_WR(regs->pxc, PRT_CFG_VAL);
417 KWGBEREG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
418 KWGBEREG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
419 /* Disable port initially */
420 KWGBEREG_BITS_SET(regs->psc0, KWGBE_SERIAL_PORT_EN);
421
422 /* Assign port SDMA configuration */
423 KWGBEREG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
424 KWGBEREG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
425 KWGBEREG_WR(regs->tqx[0].tqxtbc, (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
426 /* Turn off the port/RXUQ bandwidth limitation */
427 KWGBEREG_WR(regs->pmtu, 0);
428
429 /* Set maximum receive buffer to 9700 bytes */
430 KWGBEREG_WR(regs->psc0, KWGBE_MAX_RX_PACKET_9700BYTE
431 | (KWGBEREG_RD(regs->psc0) & MRU_MASK));
432
433 /*
434 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
435 * disable the leaky bucket mechanism .
436 */
437 KWGBEREG_WR(regs->pmtu, 0);
438
439 /* Assignment of Rx CRDB of given RXUQ */
440 KWGBEREG_WR(regs->rxcdp[RXUQ].rxcdp, (u32) dkwgbe->p_rxdesc_curr);
441 /* Enable port Rx. */
442 KWGBEREG_WR(regs->rqc, (1 << RXUQ));
443
444#if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
445 && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
446 u16 phyadr;
447 miiphy_read(dev->name, 0xEE, 0xEE, &phyadr);
448 if (!miiphy_link(dev->name, phyadr)) {
449 printf("%s: No link on %s\n", __FUNCTION__, dev->name);
450 return -1;
451 }
452#endif
453 return 0;
454}
455
456static int kwgbe_halt(struct eth_device *dev)
457{
458 struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
459 struct kwgbe_registers *regs = dkwgbe->regs;
460
461 /* Disable all gigE address decoder */
462 KWGBEREG_WR(regs->bare, 0x3f);
463
464 stop_queue(&regs->tqc);
465 stop_queue(&regs->rqc);
466
467 /* Enable port */
468 KWGBEREG_BITS_RESET(regs->psc0, KWGBE_SERIAL_PORT_EN);
469 /* Set port is not reset */
470 KWGBEREG_BITS_RESET(regs->psc1, 1 << 4);
471#ifdef CONFIG_SYS_MII_MODE
472 /* Set MMI interface up */
473 KWGBEREG_BITS_RESET(regs->psc1, 1 << 3);
474#endif
475 /* Disable & mask ethernet port interrupts */
476 KWGBEREG_WR(regs->ic, 0);
477 KWGBEREG_WR(regs->ice, 0);
478 KWGBEREG_WR(regs->pim, 0);
479 KWGBEREG_WR(regs->peim, 0);
480
481 return 0;
482}
483
484static int kwgbe_send(struct eth_device *dev, volatile void *dataptr,
485 int datasize)
486{
487 struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
488 struct kwgbe_registers *regs = dkwgbe->regs;
489 struct kwgbe_txdesc *p_txdesc = dkwgbe->p_txdesc;
490
491 if ((u32) dataptr & 0x07) {
492 printf("Err..(%s) xmit dataptr not 64bit aligned\n",
493 __FUNCTION__);
494 return -1;
495 }
496 p_txdesc->cmd_sts = KWGBE_ZERO_PADDING | KWGBE_GEN_CRC;
497 p_txdesc->cmd_sts |= KWGBE_TX_FIRST_DESC | KWGBE_TX_LAST_DESC;
498 p_txdesc->cmd_sts |= KWGBE_BUFFER_OWNED_BY_DMA;
499 p_txdesc->cmd_sts |= KWGBE_TX_EN_INTERRUPT;
500 p_txdesc->buf_ptr = (u8 *) dataptr;
501 p_txdesc->byte_cnt = datasize;
502
503 /* Apply send command using zeroth RXUQ */
504 KWGBEREG_WR(regs->tcqdp[TXUQ], (u32) p_txdesc);
505 KWGBEREG_WR(regs->tqc, (1 << TXUQ));
506
507 /*
508 * wait for packet xmit completion
509 */
510 while (p_txdesc->cmd_sts & KWGBE_BUFFER_OWNED_BY_DMA) {
511 /* return fail if error is detected */
512 if (p_txdesc->cmd_sts & (KWGBE_UR_ERROR | KWGBE_RL_ERROR)) {
513 printf("Err..(%s) in xmit packet\n", __FUNCTION__);
514 return -1;
515 }
516 };
517 return 0;
518}
519
520static int kwgbe_recv(struct eth_device *dev)
521{
522 volatile struct kwgbe_device *dkwgbe = to_dkwgbe(dev);
523 volatile struct kwgbe_rxdesc *p_rxdesc_curr = dkwgbe->p_rxdesc_curr;
524 volatile u32 timeout = 0;
525
526 /* wait untill rx packet available or timeout */
527 do {
528 if (timeout < KWGBE_PHY_SMI_TIMEOUT)
529 timeout++;
530 else {
531 debug("%s time out...\n", __FUNCTION__);
532 return -1;
533 }
534 } while (p_rxdesc_curr->cmd_sts & KWGBE_BUFFER_OWNED_BY_DMA);
535
536 if (p_rxdesc_curr->byte_cnt != 0) {
537 debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
538 __FUNCTION__, (u32) p_rxdesc_curr->byte_cnt,
539 (u32) p_rxdesc_curr->buf_ptr,
540 (u32) p_rxdesc_curr->cmd_sts);
541 }
542
543 /*
544 * In case received a packet without first/last bits on
545 * OR the error summary bit is on,
546 * the packets needs to be dropeed.
547 */
548 if ((p_rxdesc_curr->cmd_sts &
549 (KWGBE_RX_FIRST_DESC | KWGBE_RX_LAST_DESC))
550 != (KWGBE_RX_FIRST_DESC | KWGBE_RX_LAST_DESC)) {
551
552 printf("Err..(%s) Dropping packet spread on"
553 " multiple descriptors\n", __FUNCTION__);
554
555 } else if (p_rxdesc_curr->cmd_sts & KWGBE_ERROR_SUMMARY) {
556
557 printf("Err..(%s) Dropping packet with errors\n",
558 __FUNCTION__);
559
560 } else {
561 /* !!! call higher layer processing */
562 debug("%s: Sending Received packet to"
563 " upper layer (NetReceive)\n", __FUNCTION__);
564
565 /* let the upper layer handle the packet */
566 NetReceive((p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET),
567 (int)(p_rxdesc_curr->byte_cnt - RX_BUF_OFFSET));
568 }
569 /*
570 * free these descriptors and point next in the ring
571 */
572 p_rxdesc_curr->cmd_sts =
573 KWGBE_BUFFER_OWNED_BY_DMA | KWGBE_RX_EN_INTERRUPT;
574 p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
575 p_rxdesc_curr->byte_cnt = 0;
576
577 dkwgbe->p_rxdesc_curr = p_rxdesc_curr->nxtdesc_p;
578 return 0;
579}
580
581int kirkwood_egiga_initialize(bd_t * bis)
582{
583 struct kwgbe_device *dkwgbe;
584 struct eth_device *dev;
585 int devnum;
586 char *s, buf[NAMESIZE * 2];
587 u8 used_ports[MAX_KWGBE_DEVS] = CONFIG_KIRKWOOD_EGIGA_PORTS;
588
589 for (devnum = 0; devnum < MAX_KWGBE_DEVS; devnum++) {
590 /*skip if port is configured not to use */
591 if (used_ports[devnum] == 0)
592 continue;
593
594 if (!(dkwgbe = malloc(sizeof(struct kwgbe_device))))
595 goto error1;
596
597 memset(dkwgbe, 0, sizeof(struct kwgbe_device));
598
599 if (!(dkwgbe->p_rxdesc =
600 (struct kwgbe_rxdesc *)memalign(PKTALIGN,
601 KW_RXQ_DESC_ALIGNED_SIZE
602 * RINGSZ + 1)))
603 goto error2;
604
605 if (!(dkwgbe->p_rxbuf = (u8 *) memalign(PKTALIGN, RINGSZ
606 * PKTSIZE_ALIGN + 1)))
607 goto error3;
608
609 if (!(dkwgbe->p_txdesc = (struct kwgbe_txdesc *)
610 memalign(PKTALIGN, sizeof(struct kwgbe_txdesc) + 1))) {
611 free(dkwgbe->p_rxbuf);
612 error3:
613 free(dkwgbe->p_rxdesc);
614 error2:
615 free(dkwgbe);
616 error1:
617 printf("Err.. %s Failed to allocate memory\n",
618 __FUNCTION__);
619 return -1;
620 }
621
622 dev = &dkwgbe->dev;
623
624 /* must be less than NAMESIZE (16) */
625 sprintf(dev->name, "egiga%d", devnum);
626
627 /* Extract the MAC address from the environment */
628 switch (devnum) {
629 case 0:
630 dkwgbe->regs = (void *)KW_EGIGA0_BASE;
631 s = "ethaddr";
632 break;
633 case 1:
634 dkwgbe->regs = (void *)KW_EGIGA1_BASE;
635 s = "eth1addr";
636 break;
637 default: /* this should never happen */
638 printf("Err..(%s) Invalid device number %d\n",
639 __FUNCTION__, devnum);
640 return -1;
641 }
642
643 while (!eth_getenv_enetaddr(s, dev->enetaddr)) {
644 /* Generate Ramdom MAC addresses if not set */
645 sprintf(buf, "00:50:43:%02x:%02x:%02x",
646 get_random_hex(), get_random_hex(),
647 get_random_hex());
648 setenv(s, buf);
649 }
650
651 dev->init = (void *)kwgbe_init;
652 dev->halt = (void *)kwgbe_halt;
653 dev->send = (void *)kwgbe_send;
654 dev->recv = (void *)kwgbe_recv;
655
656 eth_register(dev);
657
658#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
659 miiphy_register(dev->name, smi_reg_read, smi_reg_write);
660 /* Set phy address of the port */
661 miiphy_write(dev->name, 0xEE, 0xEE, PHY_BASE_ADR + devnum);
662#endif
663 }
664 return 0;
Prafulla Wadaskar12618ef2009-07-01 20:34:51 +0200665}