blob: 00b381ec4a432e1a27fa6f21542a63ffab10d30a [file] [log] [blame]
Wolfgang Denk4646d2a2006-05-30 15:56:48 +02001/*
2 * (C) Copyright 2005-2006
3 * Stefan Roese, DENX Software Engineering, sr@denx.de.
4 *
Wolfgang Denkd79de1d2013-07-08 09:37:19 +02005 * SPDX-License-Identifier: GPL-2.0+
Wolfgang Denk4646d2a2006-05-30 15:56:48 +02006 */
7
8#if 0
9#define DEBUG /* define for debug output */
10#endif
11
12#include <config.h>
13#include <common.h>
14#include <net.h>
15#include <miiphy.h>
16#include <malloc.h>
17#include <asm/processor.h>
18#include <asm/arch-ixp/ixp425.h>
19
20#include <IxOsal.h>
21#include <IxEthAcc.h>
22#include <IxEthDB.h>
23#include <IxNpeDl.h>
24#include <IxQMgr.h>
25#include <IxNpeMh.h>
26#include <ix_ossl.h>
27#include <IxFeatureCtrl.h>
28
29#include <npe.h>
30
Wolfgang Denk4646d2a2006-05-30 15:56:48 +020031static IxQMgrDispatcherFuncPtr qDispatcherFunc = NULL;
32static int npe_exists[NPE_NUM_PORTS];
33static int npe_used[NPE_NUM_PORTS];
34
35/* A little extra so we can align to cacheline. */
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +020036static u8 npe_alloc_pool[NPE_MEM_POOL_SIZE + CONFIG_SYS_CACHELINE_SIZE - 1];
Wolfgang Denk4646d2a2006-05-30 15:56:48 +020037static u8 *npe_alloc_end;
38static u8 *npe_alloc_free;
39
40static void *npe_alloc(int size)
41{
42 static int count = 0;
43 void *p = NULL;
44
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +020045 size = (size + (CONFIG_SYS_CACHELINE_SIZE-1)) & ~(CONFIG_SYS_CACHELINE_SIZE-1);
Wolfgang Denk4646d2a2006-05-30 15:56:48 +020046 count++;
47
48 if ((npe_alloc_free + size) < npe_alloc_end) {
49 p = npe_alloc_free;
50 npe_alloc_free += size;
51 } else {
Wolfgang Denk81490f42008-07-13 23:07:35 +020052 printf("npe_alloc: failed (count=%d, size=%d)!\n", count, size);
Wolfgang Denk4646d2a2006-05-30 15:56:48 +020053 }
54 return p;
55}
56
57/* Not interrupt safe! */
58static void mbuf_enqueue(IX_OSAL_MBUF **q, IX_OSAL_MBUF *new)
59{
60 IX_OSAL_MBUF *m = *q;
61
62 IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(new) = NULL;
63
64 if (m) {
65 while(IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m))
66 m = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
67 IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = new;
68 } else
69 *q = new;
70}
71
72/* Not interrupt safe! */
73static IX_OSAL_MBUF *mbuf_dequeue(IX_OSAL_MBUF **q)
74{
75 IX_OSAL_MBUF *m = *q;
76 if (m)
77 *q = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
78 return m;
79}
80
81static void reset_tx_mbufs(struct npe* p_npe)
82{
83 IX_OSAL_MBUF *m;
84 int i;
85
86 p_npe->txQHead = NULL;
87
88 for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS; i++) {
89 m = &p_npe->tx_mbufs[i];
90
91 memset(m, 0, sizeof(*m));
92
93 IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->tx_pkts[i * NPE_PKT_SIZE];
94 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
95 mbuf_enqueue(&p_npe->txQHead, m);
96 }
97}
98
99static void reset_rx_mbufs(struct npe* p_npe)
100{
101 IX_OSAL_MBUF *m;
102 int i;
103
104 p_npe->rxQHead = NULL;
105
106 HAL_DCACHE_INVALIDATE(p_npe->rx_pkts, NPE_PKT_SIZE *
107 CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
108
109 for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS; i++) {
110 m = &p_npe->rx_mbufs[i];
111
112 memset(m, 0, sizeof(*m));
113
114 IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->rx_pkts[i * NPE_PKT_SIZE];
115 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
116
117 if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
118 printf("ixEthAccPortRxFreeReplenish failed for port %d\n", p_npe->eth_id);
119 break;
120 }
121 }
122}
123
124static void init_rx_mbufs(struct npe* p_npe)
125{
126 p_npe->rxQHead = NULL;
127
128 p_npe->rx_pkts = npe_alloc(NPE_PKT_SIZE *
129 CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
130 if (p_npe->rx_pkts == NULL) {
131 printf("alloc of packets failed.\n");
132 return;
133 }
134
135 p_npe->rx_mbufs = (IX_OSAL_MBUF *)
136 npe_alloc(sizeof(IX_OSAL_MBUF) *
137 CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
138 if (p_npe->rx_mbufs == NULL) {
139 printf("alloc of mbufs failed.\n");
140 return;
141 }
142
143 reset_rx_mbufs(p_npe);
144}
145
146static void init_tx_mbufs(struct npe* p_npe)
147{
148 p_npe->tx_pkts = npe_alloc(NPE_PKT_SIZE *
149 CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
150 if (p_npe->tx_pkts == NULL) {
151 printf("alloc of packets failed.\n");
152 return;
153 }
154
155 p_npe->tx_mbufs = (IX_OSAL_MBUF *)
156 npe_alloc(sizeof(IX_OSAL_MBUF) *
157 CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
158 if (p_npe->tx_mbufs == NULL) {
159 printf("alloc of mbufs failed.\n");
160 return;
161 }
162
163 reset_tx_mbufs(p_npe);
164}
165
166/* Convert IX_ETH_PORT_n to IX_NPEMH_NPEID_NPEx */
167static int __eth_to_npe(int eth_id)
168{
169 switch(eth_id) {
170 case IX_ETH_PORT_1:
171 return IX_NPEMH_NPEID_NPEB;
172
173 case IX_ETH_PORT_2:
174 return IX_NPEMH_NPEID_NPEC;
175
176 case IX_ETH_PORT_3:
177 return IX_NPEMH_NPEID_NPEA;
178 }
179 return 0;
180}
181
182/* Poll the CSR machinery. */
183static void npe_poll(int eth_id)
184{
185 if (qDispatcherFunc != NULL) {
186 ixNpeMhMessagesReceive(__eth_to_npe(eth_id));
187 (*qDispatcherFunc)(IX_QMGR_QUELOW_GROUP);
188 }
189}
190
191/* ethAcc RX callback */
192static void npe_rx_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
193{
194 struct npe* p_npe = (struct npe *)cbTag;
195
196 if (IX_OSAL_MBUF_MLEN(m) > 0) {
197 mbuf_enqueue(&p_npe->rxQHead, m);
198
199 if (p_npe->rx_write == ((p_npe->rx_read-1) & (PKTBUFSRX-1))) {
200 debug("Rx overflow: rx_write=%d rx_read=%d\n",
201 p_npe->rx_write, p_npe->rx_read);
202 } else {
203 debug("Received message #%d (len=%d)\n", p_npe->rx_write,
204 IX_OSAL_MBUF_MLEN(m));
205 memcpy((void *)NetRxPackets[p_npe->rx_write], IX_OSAL_MBUF_MDATA(m),
206 IX_OSAL_MBUF_MLEN(m));
207 p_npe->rx_len[p_npe->rx_write] = IX_OSAL_MBUF_MLEN(m);
208 p_npe->rx_write++;
209 if (p_npe->rx_write == PKTBUFSRX)
210 p_npe->rx_write = 0;
211
212#ifdef CONFIG_PRINT_RX_FRAMES
213 {
214 u8 *ptr = IX_OSAL_MBUF_MDATA(m);
215 int i;
216
217 for (i=0; i<60; i++) {
218 debug("%02x ", *ptr++);
219 }
220 debug("\n");
221 }
222#endif
223 }
224
225 m = mbuf_dequeue(&p_npe->rxQHead);
226 } else {
227 debug("Received frame with length 0!!!\n");
228 m = mbuf_dequeue(&p_npe->rxQHead);
229 }
230
231 /* Now return mbuf to NPE */
232 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
233 IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
234 IX_OSAL_MBUF_FLAGS(m) = 0;
235
236 if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
237 debug("npe_rx_callback: Error returning mbuf.\n");
238 }
239}
240
241/* ethAcc TX callback */
242static void npe_tx_callback(u32 cbTag, IX_OSAL_MBUF *m)
243{
244 struct npe* p_npe = (struct npe *)cbTag;
245
246 debug("%s\n", __FUNCTION__);
247
248 IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
249 IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
250 IX_OSAL_MBUF_FLAGS(m) = 0;
251
252 mbuf_enqueue(&p_npe->txQHead, m);
253}
254
255
256static int npe_set_mac_address(struct eth_device *dev)
257{
258 struct npe *p_npe = (struct npe *)dev->priv;
259 IxEthAccMacAddr npeMac;
260
261 debug("%s\n", __FUNCTION__);
262
263 /* Set MAC address */
264 memcpy(npeMac.macAddress, dev->enetaddr, 6);
265
266 if (ixEthAccPortUnicastMacAddressSet(p_npe->eth_id, &npeMac) != IX_ETH_ACC_SUCCESS) {
267 printf("Error setting unicast address! %02x:%02x:%02x:%02x:%02x:%02x\n",
268 npeMac.macAddress[0], npeMac.macAddress[1],
269 npeMac.macAddress[2], npeMac.macAddress[3],
270 npeMac.macAddress[4], npeMac.macAddress[5]);
271 return 0;
272 }
273
274 return 1;
275}
276
277/* Boot-time CSR library initialization. */
278static int npe_csr_load(void)
279{
280 int i;
281
282 if (ixQMgrInit() != IX_SUCCESS) {
283 debug("Error initialising queue manager!\n");
284 return 0;
285 }
286
287 ixQMgrDispatcherLoopGet(&qDispatcherFunc);
288
289 if(ixNpeMhInitialize(IX_NPEMH_NPEINTERRUPTS_YES) != IX_SUCCESS) {
290 printf("Error initialising NPE Message handler!\n");
291 return 0;
292 }
293
294 if (npe_used[IX_ETH_PORT_1] && npe_exists[IX_ETH_PORT_1] &&
295 ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
296 != IX_SUCCESS) {
297 printf("Error downloading firmware to NPE-B!\n");
298 return 0;
299 }
300
301 if (npe_used[IX_ETH_PORT_2] && npe_exists[IX_ETH_PORT_2] &&
302 ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
303 != IX_SUCCESS) {
304 printf("Error downloading firmware to NPE-C!\n");
305 return 0;
306 }
307
308 /* don't need this for U-Boot */
York Sun4a598092013-04-01 11:29:11 -0700309 ixFeatureCtrlSwConfigurationWrite(IX_FEATURECTRL_ETH_LEARNING, false);
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200310
311 if (ixEthAccInit() != IX_ETH_ACC_SUCCESS) {
312 printf("Error initialising Ethernet access driver!\n");
313 return 0;
314 }
315
316 for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
317 if (!npe_used[i] || !npe_exists[i])
318 continue;
319 if (ixEthAccPortInit(i) != IX_ETH_ACC_SUCCESS) {
320 printf("Error initialising Ethernet port%d!\n", i);
321 }
322 if (ixEthAccTxSchedulingDisciplineSet(i, FIFO_NO_PRIORITY) != IX_ETH_ACC_SUCCESS) {
323 printf("Error setting scheduling discipline for port %d.\n", i);
324 }
325 if (ixEthAccPortRxFrameAppendFCSDisable(i) != IX_ETH_ACC_SUCCESS) {
326 printf("Error disabling RX FCS for port %d.\n", i);
327 }
328 if (ixEthAccPortTxFrameAppendFCSEnable(i) != IX_ETH_ACC_SUCCESS) {
329 printf("Error enabling TX FCS for port %d.\n", i);
330 }
331 }
332
333 return 1;
334}
335
336static int npe_init(struct eth_device *dev, bd_t * bis)
337{
338 struct npe *p_npe = (struct npe *)dev->priv;
339 int i;
340 u16 reg_short;
341 int speed;
342 int duplex;
343
344 debug("%s: 1\n", __FUNCTION__);
345
Michael Schwingen2292a762011-05-23 00:00:08 +0200346#ifdef CONFIG_MII_NPE0_FIXEDLINK
347 if (0 == p_npe->eth_id) {
348 speed = CONFIG_MII_NPE0_SPEED;
349 duplex = CONFIG_MII_NPE0_FULLDUPLEX ? FULL : HALF;
350 } else
351#endif
352#ifdef CONFIG_MII_NPE1_FIXEDLINK
353 if (1 == p_npe->eth_id) {
354 speed = CONFIG_MII_NPE1_SPEED;
355 duplex = CONFIG_MII_NPE1_FULLDUPLEX ? FULL : HALF;
356 } else
357#endif
358 {
359 miiphy_read(dev->name, p_npe->phy_no, MII_BMSR, &reg_short);
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200360
Michael Schwingen2292a762011-05-23 00:00:08 +0200361 /*
362 * Wait if PHY is capable of autonegotiation and
363 * autonegotiation is not complete
364 */
365 if ((reg_short & BMSR_ANEGCAPABLE) &&
366 !(reg_short & BMSR_ANEGCOMPLETE)) {
367 puts("Waiting for PHY auto negotiation to complete");
368 i = 0;
369 while (!(reg_short & BMSR_ANEGCOMPLETE)) {
370 /*
371 * Timeout reached ?
372 */
373 if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
374 puts(" TIMEOUT !\n");
375 break;
376 }
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200377
Michael Schwingen2292a762011-05-23 00:00:08 +0200378 if ((i++ % 1000) == 0) {
379 putc('.');
380 miiphy_read(dev->name, p_npe->phy_no,
381 MII_BMSR, &reg_short);
382 }
383 udelay(1000); /* 1 ms */
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200384 }
Michael Schwingen2292a762011-05-23 00:00:08 +0200385 puts(" done\n");
386 /* another 500 ms (results in faster booting) */
387 udelay(500000);
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200388 }
Michael Schwingen2292a762011-05-23 00:00:08 +0200389 speed = miiphy_speed(dev->name, p_npe->phy_no);
390 duplex = miiphy_duplex(dev->name, p_npe->phy_no);
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200391 }
392
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200393 if (p_npe->print_speed) {
394 p_npe->print_speed = 0;
395 printf ("ENET Speed is %d Mbps - %s duplex connection\n",
396 (int) speed, (duplex == HALF) ? "HALF" : "FULL");
397 }
398
399 npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
400 npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200401 CONFIG_SYS_CACHELINE_SIZE - 1) & ~(CONFIG_SYS_CACHELINE_SIZE - 1));
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200402
403 /* initialize mbuf pool */
404 init_rx_mbufs(p_npe);
405 init_tx_mbufs(p_npe);
406
407 if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_callback,
408 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
409 printf("can't register RX callback!\n");
Ben Warrende9fcb52008-01-09 18:15:53 -0500410 return -1;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200411 }
412
413 if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_callback,
414 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
415 printf("can't register TX callback!\n");
Ben Warrende9fcb52008-01-09 18:15:53 -0500416 return -1;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200417 }
418
419 npe_set_mac_address(dev);
420
421 if (ixEthAccPortEnable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
422 printf("can't enable port!\n");
Ben Warrende9fcb52008-01-09 18:15:53 -0500423 return -1;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200424 }
425
426 p_npe->active = 1;
427
Ben Warrende9fcb52008-01-09 18:15:53 -0500428 return 0;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200429}
430
431#if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
432/* Uninitialize CSR library. */
433static void npe_csr_unload(void)
434{
435 ixEthAccUnload();
436 ixEthDBUnload();
437 ixNpeMhUnload();
438 ixQMgrUnload();
439}
440
441/* callback which is used by ethAcc to recover RX buffers when stopping */
442static void npe_rx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
443{
444 debug("%s\n", __FUNCTION__);
445}
446
447/* callback which is used by ethAcc to recover TX buffers when stopping */
448static void npe_tx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m)
449{
450 debug("%s\n", __FUNCTION__);
451}
452#endif
453
454static void npe_halt(struct eth_device *dev)
455{
456 struct npe *p_npe = (struct npe *)dev->priv;
457 int i;
458
459 debug("%s\n", __FUNCTION__);
460
461 /* Delay to give time for recovery of mbufs */
462 for (i = 0; i < 100; i++) {
463 npe_poll(p_npe->eth_id);
464 udelay(100);
465 }
466
467#if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
468 if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_stop_callback,
469 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
470 debug("Error registering rx callback!\n");
471 }
472
473 if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_stop_callback,
474 (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
475 debug("Error registering tx callback!\n");
476 }
477
478 if (ixEthAccPortDisable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
479 debug("npe_stop: Error disabling NPEB!\n");
480 }
481
482 /* Delay to give time for recovery of mbufs */
483 for (i = 0; i < 100; i++) {
484 npe_poll(p_npe->eth_id);
485 udelay(10000);
486 }
487
488 /*
489 * For U-Boot only, we are probably launching Linux or other OS that
490 * needs a clean slate for its NPE library.
491 */
492#if 0 /* test-only */
493 for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
494 if (npe_used[i] && npe_exists[i])
495 if (ixNpeDlNpeStopAndReset(__eth_to_npe(i)) != IX_SUCCESS)
496 printf("Failed to stop and reset NPE B.\n");
497 }
498#endif
499
500#endif
501 p_npe->active = 0;
502}
503
504
Anatolij Gustschin4a8b3052012-05-20 12:22:58 +0000505static int npe_send(struct eth_device *dev, void *packet, int len)
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200506{
507 struct npe *p_npe = (struct npe *)dev->priv;
508 u8 *dest;
509 int err;
510 IX_OSAL_MBUF *m;
511
512 debug("%s\n", __FUNCTION__);
513 m = mbuf_dequeue(&p_npe->txQHead);
514 dest = IX_OSAL_MBUF_MDATA(m);
515 IX_OSAL_MBUF_PKT_LEN(m) = IX_OSAL_MBUF_MLEN(m) = len;
516 IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = NULL;
517
518 memcpy(dest, (char *)packet, len);
519
520 if ((err = ixEthAccPortTxFrameSubmit(p_npe->eth_id, m, IX_ETH_ACC_TX_DEFAULT_PRIORITY))
521 != IX_ETH_ACC_SUCCESS) {
522 printf("npe_send: Can't submit frame. err[%d]\n", err);
523 mbuf_enqueue(&p_npe->txQHead, m);
524 return 0;
525 }
526
527#ifdef DEBUG_PRINT_TX_FRAMES
528 {
529 u8 *ptr = IX_OSAL_MBUF_MDATA(m);
530 int i;
531
532 for (i=0; i<IX_OSAL_MBUF_MLEN(m); i++) {
533 printf("%02x ", *ptr++);
534 }
535 printf(" (tx-len=%d)\n", IX_OSAL_MBUF_MLEN(m));
536 }
537#endif
538
539 npe_poll(p_npe->eth_id);
540
541 return len;
542}
543
544static int npe_rx(struct eth_device *dev)
545{
546 struct npe *p_npe = (struct npe *)dev->priv;
547
548 debug("%s\n", __FUNCTION__);
549 npe_poll(p_npe->eth_id);
550
551 debug("%s: rx_write=%d rx_read=%d\n", __FUNCTION__, p_npe->rx_write, p_npe->rx_read);
552 while (p_npe->rx_write != p_npe->rx_read) {
553 debug("Reading message #%d\n", p_npe->rx_read);
554 NetReceive(NetRxPackets[p_npe->rx_read], p_npe->rx_len[p_npe->rx_read]);
555 p_npe->rx_read++;
556 if (p_npe->rx_read == PKTBUFSRX)
557 p_npe->rx_read = 0;
558 }
559
560 return 0;
561}
562
563int npe_initialize(bd_t * bis)
564{
565 static int virgin = 0;
566 struct eth_device *dev;
567 int eth_num = 0;
568 struct npe *p_npe = NULL;
Mike Frysinger5cca5072009-02-11 19:19:54 -0500569 uchar enetaddr[6];
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200570
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200571 for (eth_num = 0; eth_num < CONFIG_SYS_NPE_NUMS; eth_num++) {
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200572
573 /* See if we can actually bring up the interface, otherwise, skip it */
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200574#ifdef CONFIG_HAS_ETH1
Mike Frysinger5cca5072009-02-11 19:19:54 -0500575 if (eth_num == 1) {
576 if (!eth_getenv_enetaddr("eth1addr", enetaddr))
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200577 continue;
Mike Frysinger5cca5072009-02-11 19:19:54 -0500578 } else
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200579#endif
Mike Frysinger5cca5072009-02-11 19:19:54 -0500580 if (!eth_getenv_enetaddr("ethaddr", enetaddr))
581 continue;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200582
583 /* Allocate device structure */
584 dev = (struct eth_device *)malloc(sizeof(*dev));
585 if (dev == NULL) {
586 printf ("%s: Cannot allocate eth_device %d\n", __FUNCTION__, eth_num);
587 return -1;
588 }
589 memset(dev, 0, sizeof(*dev));
590
591 /* Allocate our private use data */
592 p_npe = (struct npe *)malloc(sizeof(struct npe));
593 if (p_npe == NULL) {
594 printf("%s: Cannot allocate private hw data for eth_device %d",
595 __FUNCTION__, eth_num);
596 free(dev);
597 return -1;
598 }
599 memset(p_npe, 0, sizeof(struct npe));
600
Mike Frysinger5cca5072009-02-11 19:19:54 -0500601 p_npe->eth_id = eth_num;
602 memcpy(dev->enetaddr, enetaddr, 6);
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200603#ifdef CONFIG_HAS_ETH1
Mike Frysinger5cca5072009-02-11 19:19:54 -0500604 if (eth_num == 1)
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200605 p_npe->phy_no = CONFIG_PHY1_ADDR;
Mike Frysinger5cca5072009-02-11 19:19:54 -0500606 else
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200607#endif
Mike Frysinger5cca5072009-02-11 19:19:54 -0500608 p_npe->phy_no = CONFIG_PHY_ADDR;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200609
610 sprintf(dev->name, "NPE%d", eth_num);
611 dev->priv = (void *)p_npe;
612 dev->init = npe_init;
613 dev->halt = npe_halt;
614 dev->send = npe_send;
615 dev->recv = npe_rx;
616
617 p_npe->print_speed = 1;
618
619 if (0 == virgin) {
620 virgin = 1;
621
622 if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP42X) {
623 switch (ixFeatureCtrlProductIdRead() & IX_FEATURE_CTRL_SILICON_STEPPING_MASK) {
624 case IX_FEATURE_CTRL_SILICON_TYPE_B0:
Michael Schwingenf2ff1182011-05-22 23:59:58 +0200625 default: /* newer than B0 */
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200626 /*
Michael Schwingenf2ff1182011-05-22 23:59:58 +0200627 * If it is B0 or newer Silicon, we
628 * only enable port when its
629 * corresponding Eth Coprocessor is
630 * available.
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200631 */
632 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
633 IX_FEATURE_CTRL_COMPONENT_ENABLED)
York Sun4a598092013-04-01 11:29:11 -0700634 npe_exists[IX_ETH_PORT_1] = true;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200635
636 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
637 IX_FEATURE_CTRL_COMPONENT_ENABLED)
York Sun4a598092013-04-01 11:29:11 -0700638 npe_exists[IX_ETH_PORT_2] = true;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200639 break;
640 case IX_FEATURE_CTRL_SILICON_TYPE_A0:
641 /*
642 * If it is A0 Silicon, we enable both as both Eth Coprocessors
643 * are available.
644 */
York Sun4a598092013-04-01 11:29:11 -0700645 npe_exists[IX_ETH_PORT_1] = true;
646 npe_exists[IX_ETH_PORT_2] = true;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200647 break;
648 }
649 } else if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP46X) {
650 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
651 IX_FEATURE_CTRL_COMPONENT_ENABLED)
York Sun4a598092013-04-01 11:29:11 -0700652 npe_exists[IX_ETH_PORT_1] = true;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200653
654 if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
655 IX_FEATURE_CTRL_COMPONENT_ENABLED)
York Sun4a598092013-04-01 11:29:11 -0700656 npe_exists[IX_ETH_PORT_2] = true;
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200657 }
658
659 npe_used[IX_ETH_PORT_1] = 1;
660 npe_used[IX_ETH_PORT_2] = 1;
661
662 npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
663 npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
Jean-Christophe PLAGNIOL-VILLARD03836942008-10-16 15:01:15 +0200664 CONFIG_SYS_CACHELINE_SIZE - 1)
665 & ~(CONFIG_SYS_CACHELINE_SIZE - 1));
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200666
667 if (!npe_csr_load())
668 return 0;
669 }
670
671 eth_register(dev);
672
Jon Loeligera5217742007-07-09 18:57:22 -0500673#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
Wolfgang Denk4646d2a2006-05-30 15:56:48 +0200674 miiphy_register(dev->name, npe_miiphy_read, npe_miiphy_write);
675#endif
676
677 } /* end for each supported device */
678
679 return 1;
680}