blob: fb4d621a88eec816aaec5c6dba53228cf1e02f66 [file] [log] [blame]
Cyril Chemparathy464adc82012-07-24 12:22:16 +00001/*
2 * CPSW Ethernet Switch Driver
3 *
4 * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <common.h>
17#include <command.h>
18#include <net.h>
19#include <miiphy.h>
20#include <malloc.h>
21#include <net.h>
22#include <netdev.h>
23#include <cpsw.h>
24#include <asm/errno.h>
25#include <asm/io.h>
26#include <phy.h>
Tom Rini8eb48ff2013-03-14 11:15:25 +000027#include <asm/arch/cpu.h>
Cyril Chemparathy464adc82012-07-24 12:22:16 +000028
29#define BITMASK(bits) (BIT(bits) - 1)
30#define PHY_REG_MASK 0x1f
31#define PHY_ID_MASK 0x1f
32#define NUM_DESCS (PKTBUFSRX * 2)
33#define PKT_MIN 60
34#define PKT_MAX (1500 + 14 + 4 + 4)
35#define CLEAR_BIT 1
36#define GIGABITEN BIT(7)
37#define FULLDUPLEXEN BIT(0)
38#define MIIEN BIT(15)
39
40/* DMA Registers */
41#define CPDMA_TXCONTROL 0x004
42#define CPDMA_RXCONTROL 0x014
43#define CPDMA_SOFTRESET 0x01c
44#define CPDMA_RXFREE 0x0e0
45#define CPDMA_TXHDP_VER1 0x100
46#define CPDMA_TXHDP_VER2 0x200
47#define CPDMA_RXHDP_VER1 0x120
48#define CPDMA_RXHDP_VER2 0x220
49#define CPDMA_TXCP_VER1 0x140
50#define CPDMA_TXCP_VER2 0x240
51#define CPDMA_RXCP_VER1 0x160
52#define CPDMA_RXCP_VER2 0x260
53
Cyril Chemparathy464adc82012-07-24 12:22:16 +000054/* Descriptor mode bits */
55#define CPDMA_DESC_SOP BIT(31)
56#define CPDMA_DESC_EOP BIT(30)
57#define CPDMA_DESC_OWNER BIT(29)
58#define CPDMA_DESC_EOQ BIT(28)
59
60/*
61 * This timeout definition is a worst-case ultra defensive measure against
62 * unexpected controller lock ups. Ideally, we should never ever hit this
63 * scenario in practice.
64 */
65#define MDIO_TIMEOUT 100 /* msecs */
66#define CPDMA_TIMEOUT 100 /* msecs */
67
68struct cpsw_mdio_regs {
69 u32 version;
70 u32 control;
71#define CONTROL_IDLE BIT(31)
72#define CONTROL_ENABLE BIT(30)
73
74 u32 alive;
75 u32 link;
76 u32 linkintraw;
77 u32 linkintmasked;
78 u32 __reserved_0[2];
79 u32 userintraw;
80 u32 userintmasked;
81 u32 userintmaskset;
82 u32 userintmaskclr;
83 u32 __reserved_1[20];
84
85 struct {
86 u32 access;
87 u32 physel;
88#define USERACCESS_GO BIT(31)
89#define USERACCESS_WRITE BIT(30)
90#define USERACCESS_ACK BIT(29)
91#define USERACCESS_READ (0)
92#define USERACCESS_DATA (0xffff)
93 } user[0];
94};
95
96struct cpsw_regs {
97 u32 id_ver;
98 u32 control;
99 u32 soft_reset;
100 u32 stat_port_en;
101 u32 ptype;
102};
103
104struct cpsw_slave_regs {
105 u32 max_blks;
106 u32 blk_cnt;
107 u32 flow_thresh;
108 u32 port_vlan;
109 u32 tx_pri_map;
Matt Porter1ef0e872013-03-20 05:38:12 +0000110#ifdef CONFIG_AM33XX
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000111 u32 gap_thresh;
Matt Porter1ef0e872013-03-20 05:38:12 +0000112#elif defined(CONFIG_TI814X)
113 u32 ts_ctl;
114 u32 ts_seq_ltype;
115 u32 ts_vlan;
116#endif
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000117 u32 sa_lo;
118 u32 sa_hi;
119};
120
121struct cpsw_host_regs {
122 u32 max_blks;
123 u32 blk_cnt;
124 u32 flow_thresh;
125 u32 port_vlan;
126 u32 tx_pri_map;
127 u32 cpdma_tx_pri_map;
128 u32 cpdma_rx_chan_map;
129};
130
131struct cpsw_sliver_regs {
132 u32 id_ver;
133 u32 mac_control;
134 u32 mac_status;
135 u32 soft_reset;
136 u32 rx_maxlen;
137 u32 __reserved_0;
138 u32 rx_pause;
139 u32 tx_pause;
140 u32 __reserved_1;
141 u32 rx_pri_map;
142};
143
144#define ALE_ENTRY_BITS 68
145#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
146
147/* ALE Registers */
148#define ALE_CONTROL 0x08
149#define ALE_UNKNOWNVLAN 0x18
150#define ALE_TABLE_CONTROL 0x20
151#define ALE_TABLE 0x34
152#define ALE_PORTCTL 0x40
153
154#define ALE_TABLE_WRITE BIT(31)
155
156#define ALE_TYPE_FREE 0
157#define ALE_TYPE_ADDR 1
158#define ALE_TYPE_VLAN 2
159#define ALE_TYPE_VLAN_ADDR 3
160
161#define ALE_UCAST_PERSISTANT 0
162#define ALE_UCAST_UNTOUCHED 1
163#define ALE_UCAST_OUI 2
164#define ALE_UCAST_TOUCHED 3
165
166#define ALE_MCAST_FWD 0
167#define ALE_MCAST_BLOCK_LEARN_FWD 1
168#define ALE_MCAST_FWD_LEARN 2
169#define ALE_MCAST_FWD_2 3
170
171enum cpsw_ale_port_state {
172 ALE_PORT_STATE_DISABLE = 0x00,
173 ALE_PORT_STATE_BLOCK = 0x01,
174 ALE_PORT_STATE_LEARN = 0x02,
175 ALE_PORT_STATE_FORWARD = 0x03,
176};
177
178/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
179#define ALE_SECURE 1
180#define ALE_BLOCKED 2
181
182struct cpsw_slave {
183 struct cpsw_slave_regs *regs;
184 struct cpsw_sliver_regs *sliver;
185 int slave_num;
186 u32 mac_control;
187 struct cpsw_slave_data *data;
188};
189
190struct cpdma_desc {
191 /* hardware fields */
192 u32 hw_next;
193 u32 hw_buffer;
194 u32 hw_len;
195 u32 hw_mode;
196 /* software fields */
197 u32 sw_buffer;
198 u32 sw_len;
199};
200
201struct cpdma_chan {
202 struct cpdma_desc *head, *tail;
203 void *hdp, *cp, *rxfree;
204};
205
206#define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
207#define desc_read(desc, fld) __raw_readl(&(desc)->fld)
208#define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
209
210#define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
211#define chan_read(chan, fld) __raw_readl((chan)->fld)
212#define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
213
Mugunthan V N33e073e2014-05-22 14:37:10 +0530214#define for_active_slave(slave, priv) \
215 slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000216#define for_each_slave(slave, priv) \
217 for (slave = (priv)->slaves; slave != (priv)->slaves + \
218 (priv)->data.slaves; slave++)
219
220struct cpsw_priv {
221 struct eth_device *dev;
222 struct cpsw_platform_data data;
223 int host_port;
224
225 struct cpsw_regs *regs;
226 void *dma_regs;
227 struct cpsw_host_regs *host_port_regs;
228 void *ale_regs;
229
230 struct cpdma_desc *descs;
231 struct cpdma_desc *desc_free;
232 struct cpdma_chan rx_chan, tx_chan;
233
234 struct cpsw_slave *slaves;
235 struct phy_device *phydev;
236 struct mii_dev *bus;
Mugunthan V Nc3fdab42013-02-19 21:34:44 +0000237
Mugunthan V Nc3fdab42013-02-19 21:34:44 +0000238 u32 phy_mask;
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000239};
240
241static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
242{
243 int idx;
244
245 idx = start / 32;
246 start -= idx * 32;
247 idx = 2 - idx; /* flip */
248 return (ale_entry[idx] >> start) & BITMASK(bits);
249}
250
251static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
252 u32 value)
253{
254 int idx;
255
256 value &= BITMASK(bits);
257 idx = start / 32;
258 start -= idx * 32;
259 idx = 2 - idx; /* flip */
260 ale_entry[idx] &= ~(BITMASK(bits) << start);
261 ale_entry[idx] |= (value << start);
262}
263
264#define DEFINE_ALE_FIELD(name, start, bits) \
265static inline int cpsw_ale_get_##name(u32 *ale_entry) \
266{ \
267 return cpsw_ale_get_field(ale_entry, start, bits); \
268} \
269static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
270{ \
271 cpsw_ale_set_field(ale_entry, start, bits, value); \
272}
273
274DEFINE_ALE_FIELD(entry_type, 60, 2)
275DEFINE_ALE_FIELD(mcast_state, 62, 2)
276DEFINE_ALE_FIELD(port_mask, 66, 3)
277DEFINE_ALE_FIELD(ucast_type, 62, 2)
278DEFINE_ALE_FIELD(port_num, 66, 2)
279DEFINE_ALE_FIELD(blocked, 65, 1)
280DEFINE_ALE_FIELD(secure, 64, 1)
281DEFINE_ALE_FIELD(mcast, 40, 1)
282
283/* The MAC address field in the ALE entry cannot be macroized as above */
284static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
285{
286 int i;
287
288 for (i = 0; i < 6; i++)
289 addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
290}
291
Joe Hershberger8ecdbed2015-04-08 01:41:04 -0500292static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000293{
294 int i;
295
296 for (i = 0; i < 6; i++)
297 cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
298}
299
300static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
301{
302 int i;
303
304 __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
305
306 for (i = 0; i < ALE_ENTRY_WORDS; i++)
307 ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
308
309 return idx;
310}
311
312static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
313{
314 int i;
315
316 for (i = 0; i < ALE_ENTRY_WORDS; i++)
317 __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
318
319 __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
320
321 return idx;
322}
323
Joe Hershberger8ecdbed2015-04-08 01:41:04 -0500324static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000325{
326 u32 ale_entry[ALE_ENTRY_WORDS];
327 int type, idx;
328
329 for (idx = 0; idx < priv->data.ale_entries; idx++) {
330 u8 entry_addr[6];
331
332 cpsw_ale_read(priv, idx, ale_entry);
333 type = cpsw_ale_get_entry_type(ale_entry);
334 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
335 continue;
336 cpsw_ale_get_addr(ale_entry, entry_addr);
337 if (memcmp(entry_addr, addr, 6) == 0)
338 return idx;
339 }
340 return -ENOENT;
341}
342
343static int cpsw_ale_match_free(struct cpsw_priv *priv)
344{
345 u32 ale_entry[ALE_ENTRY_WORDS];
346 int type, idx;
347
348 for (idx = 0; idx < priv->data.ale_entries; idx++) {
349 cpsw_ale_read(priv, idx, ale_entry);
350 type = cpsw_ale_get_entry_type(ale_entry);
351 if (type == ALE_TYPE_FREE)
352 return idx;
353 }
354 return -ENOENT;
355}
356
357static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
358{
359 u32 ale_entry[ALE_ENTRY_WORDS];
360 int type, idx;
361
362 for (idx = 0; idx < priv->data.ale_entries; idx++) {
363 cpsw_ale_read(priv, idx, ale_entry);
364 type = cpsw_ale_get_entry_type(ale_entry);
365 if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
366 continue;
367 if (cpsw_ale_get_mcast(ale_entry))
368 continue;
369 type = cpsw_ale_get_ucast_type(ale_entry);
370 if (type != ALE_UCAST_PERSISTANT &&
371 type != ALE_UCAST_OUI)
372 return idx;
373 }
374 return -ENOENT;
375}
376
Joe Hershberger8ecdbed2015-04-08 01:41:04 -0500377static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000378 int port, int flags)
379{
380 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
381 int idx;
382
383 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
384 cpsw_ale_set_addr(ale_entry, addr);
385 cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
386 cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
387 cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
388 cpsw_ale_set_port_num(ale_entry, port);
389
390 idx = cpsw_ale_match_addr(priv, addr);
391 if (idx < 0)
392 idx = cpsw_ale_match_free(priv);
393 if (idx < 0)
394 idx = cpsw_ale_find_ageable(priv);
395 if (idx < 0)
396 return -ENOMEM;
397
398 cpsw_ale_write(priv, idx, ale_entry);
399 return 0;
400}
401
Joe Hershberger8ecdbed2015-04-08 01:41:04 -0500402static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
403 int port_mask)
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000404{
405 u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
406 int idx, mask;
407
408 idx = cpsw_ale_match_addr(priv, addr);
409 if (idx >= 0)
410 cpsw_ale_read(priv, idx, ale_entry);
411
412 cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
413 cpsw_ale_set_addr(ale_entry, addr);
414 cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
415
416 mask = cpsw_ale_get_port_mask(ale_entry);
417 port_mask |= mask;
418 cpsw_ale_set_port_mask(ale_entry, port_mask);
419
420 if (idx < 0)
421 idx = cpsw_ale_match_free(priv);
422 if (idx < 0)
423 idx = cpsw_ale_find_ageable(priv);
424 if (idx < 0)
425 return -ENOMEM;
426
427 cpsw_ale_write(priv, idx, ale_entry);
428 return 0;
429}
430
431static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
432{
433 u32 tmp, mask = BIT(bit);
434
435 tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
436 tmp &= ~mask;
437 tmp |= val ? mask : 0;
438 __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
439}
440
441#define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
442#define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
443#define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
444
445static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
446 int val)
447{
448 int offset = ALE_PORTCTL + 4 * port;
449 u32 tmp, mask = 0x3;
450
451 tmp = __raw_readl(priv->ale_regs + offset);
452 tmp &= ~mask;
453 tmp |= val & mask;
454 __raw_writel(tmp, priv->ale_regs + offset);
455}
456
457static struct cpsw_mdio_regs *mdio_regs;
458
459/* wait until hardware is ready for another user access */
460static inline u32 wait_for_user_access(void)
461{
462 u32 reg = 0;
463 int timeout = MDIO_TIMEOUT;
464
465 while (timeout-- &&
466 ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
467 udelay(10);
468
469 if (timeout == -1) {
470 printf("wait_for_user_access Timeout\n");
471 return -ETIMEDOUT;
472 }
473 return reg;
474}
475
476/* wait until hardware state machine is idle */
477static inline void wait_for_idle(void)
478{
479 int timeout = MDIO_TIMEOUT;
480
481 while (timeout-- &&
482 ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
483 udelay(10);
484
485 if (timeout == -1)
486 printf("wait_for_idle Timeout\n");
487}
488
489static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
490 int dev_addr, int phy_reg)
491{
Heiko Schocher94c35022013-07-23 15:32:36 +0200492 int data;
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000493 u32 reg;
494
495 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
496 return -EINVAL;
497
498 wait_for_user_access();
499 reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
500 (phy_id << 16));
501 __raw_writel(reg, &mdio_regs->user[0].access);
502 reg = wait_for_user_access();
503
504 data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
505 return data;
506}
507
508static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
509 int phy_reg, u16 data)
510{
511 u32 reg;
512
513 if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
514 return -EINVAL;
515
516 wait_for_user_access();
517 reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
518 (phy_id << 16) | (data & USERACCESS_DATA));
519 __raw_writel(reg, &mdio_regs->user[0].access);
520 wait_for_user_access();
521
522 return 0;
523}
524
525static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div)
526{
527 struct mii_dev *bus = mdio_alloc();
528
529 mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
530
531 /* set enable and clock divider */
532 __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
533
534 /*
535 * wait for scan logic to settle:
536 * the scan time consists of (a) a large fixed component, and (b) a
537 * small component that varies with the mii bus frequency. These
538 * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
539 * silicon. Since the effect of (b) was found to be largely
540 * negligible, we keep things simple here.
541 */
542 udelay(1000);
543
544 bus->read = cpsw_mdio_read;
545 bus->write = cpsw_mdio_write;
546 sprintf(bus->name, name);
547
548 mdio_register(bus);
549}
550
551/* Set a self-clearing bit in a register, and wait for it to clear */
552static inline void setbit_and_wait_for_clear32(void *addr)
553{
554 __raw_writel(CLEAR_BIT, addr);
555 while (__raw_readl(addr) & CLEAR_BIT)
556 ;
557}
558
559#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
560 ((mac)[2] << 16) | ((mac)[3] << 24))
561#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
562
563static void cpsw_set_slave_mac(struct cpsw_slave *slave,
564 struct cpsw_priv *priv)
565{
566 __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
567 __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
568}
569
570static void cpsw_slave_update_link(struct cpsw_slave *slave,
571 struct cpsw_priv *priv, int *link)
572{
Heiko Schocheref660032013-09-05 11:50:41 +0200573 struct phy_device *phy;
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000574 u32 mac_control = 0;
575
Heiko Schocheref660032013-09-05 11:50:41 +0200576 phy = priv->phydev;
577
578 if (!phy)
579 return;
580
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000581 phy_startup(phy);
582 *link = phy->link;
583
584 if (*link) { /* link up */
585 mac_control = priv->data.mac_control;
586 if (phy->speed == 1000)
587 mac_control |= GIGABITEN;
588 if (phy->duplex == DUPLEX_FULL)
589 mac_control |= FULLDUPLEXEN;
590 if (phy->speed == 100)
591 mac_control |= MIIEN;
592 }
593
594 if (mac_control == slave->mac_control)
595 return;
596
597 if (mac_control) {
598 printf("link up on port %d, speed %d, %s duplex\n",
599 slave->slave_num, phy->speed,
600 (phy->duplex == DUPLEX_FULL) ? "full" : "half");
601 } else {
602 printf("link down on port %d\n", slave->slave_num);
603 }
604
605 __raw_writel(mac_control, &slave->sliver->mac_control);
606 slave->mac_control = mac_control;
607}
608
609static int cpsw_update_link(struct cpsw_priv *priv)
610{
611 int link = 0;
612 struct cpsw_slave *slave;
613
Mugunthan V N33e073e2014-05-22 14:37:10 +0530614 for_active_slave(slave, priv)
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000615 cpsw_slave_update_link(slave, priv, &link);
Mugunthan V Nc3fdab42013-02-19 21:34:44 +0000616
Stefan Roesec5086e92014-08-25 11:26:19 +0200617 return link;
Mugunthan V Nc3fdab42013-02-19 21:34:44 +0000618}
619
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000620static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
621{
622 if (priv->host_port == 0)
623 return slave_num + 1;
624 else
625 return slave_num;
626}
627
628static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
629{
630 u32 slave_port;
631
632 setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
633
634 /* setup priority mapping */
635 __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
636 __raw_writel(0x33221100, &slave->regs->tx_pri_map);
637
638 /* setup max packet size, and mac address */
639 __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
640 cpsw_set_slave_mac(slave, priv);
641
642 slave->mac_control = 0; /* no link yet */
643
644 /* enable forwarding */
645 slave_port = cpsw_get_slave_port(priv, slave->slave_num);
646 cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
647
Joe Hershberger8ecdbed2015-04-08 01:41:04 -0500648 cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
Mugunthan V Nc3fdab42013-02-19 21:34:44 +0000649
Mugunthan V N4944f372014-02-18 07:31:52 -0500650 priv->phy_mask |= 1 << slave->data->phy_addr;
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000651}
652
653static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
654{
655 struct cpdma_desc *desc = priv->desc_free;
656
657 if (desc)
658 priv->desc_free = desc_read_ptr(desc, hw_next);
659 return desc;
660}
661
662static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
663{
664 if (desc) {
665 desc_write(desc, hw_next, priv->desc_free);
666 priv->desc_free = desc;
667 }
668}
669
670static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
671 void *buffer, int len)
672{
673 struct cpdma_desc *desc, *prev;
674 u32 mode;
675
676 desc = cpdma_desc_alloc(priv);
677 if (!desc)
678 return -ENOMEM;
679
680 if (len < PKT_MIN)
681 len = PKT_MIN;
682
683 mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
684
685 desc_write(desc, hw_next, 0);
686 desc_write(desc, hw_buffer, buffer);
687 desc_write(desc, hw_len, len);
688 desc_write(desc, hw_mode, mode | len);
689 desc_write(desc, sw_buffer, buffer);
690 desc_write(desc, sw_len, len);
691
692 if (!chan->head) {
693 /* simple case - first packet enqueued */
694 chan->head = desc;
695 chan->tail = desc;
696 chan_write(chan, hdp, desc);
697 goto done;
698 }
699
700 /* not the first packet - enqueue at the tail */
701 prev = chan->tail;
702 desc_write(prev, hw_next, desc);
703 chan->tail = desc;
704
705 /* next check if EOQ has been triggered already */
706 if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
707 chan_write(chan, hdp, desc);
708
709done:
710 if (chan->rxfree)
711 chan_write(chan, rxfree, 1);
712 return 0;
713}
714
715static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
716 void **buffer, int *len)
717{
718 struct cpdma_desc *desc = chan->head;
719 u32 status;
720
721 if (!desc)
722 return -ENOENT;
723
724 status = desc_read(desc, hw_mode);
725
726 if (len)
727 *len = status & 0x7ff;
728
729 if (buffer)
730 *buffer = desc_read_ptr(desc, sw_buffer);
731
732 if (status & CPDMA_DESC_OWNER) {
733 if (chan_read(chan, hdp) == 0) {
734 if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
735 chan_write(chan, hdp, desc);
736 }
737
738 return -EBUSY;
739 }
740
741 chan->head = desc_read_ptr(desc, hw_next);
742 chan_write(chan, cp, desc);
743
744 cpdma_desc_free(priv, desc);
745 return 0;
746}
747
748static int cpsw_init(struct eth_device *dev, bd_t *bis)
749{
750 struct cpsw_priv *priv = dev->priv;
751 struct cpsw_slave *slave;
752 int i, ret;
753
754 /* soft reset the controller and initialize priv */
755 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
756
757 /* initialize and reset the address lookup engine */
758 cpsw_ale_enable(priv, 1);
759 cpsw_ale_clear(priv, 1);
760 cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
761
762 /* setup host port priority mapping */
763 __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
764 __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
765
766 /* disable priority elevation and enable statistics on all ports */
767 __raw_writel(0, &priv->regs->ptype);
768
769 /* enable statistics collection only on the host port */
770 __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
Mugunthan V N2782f3e2013-07-08 16:04:38 +0530771 __raw_writel(0x7, &priv->regs->stat_port_en);
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000772
773 cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
774
775 cpsw_ale_add_ucast(priv, priv->dev->enetaddr, priv->host_port,
776 ALE_SECURE);
Joe Hershberger8ecdbed2015-04-08 01:41:04 -0500777 cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000778
Mugunthan V N33e073e2014-05-22 14:37:10 +0530779 for_active_slave(slave, priv)
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000780 cpsw_slave_init(slave, priv);
781
782 cpsw_update_link(priv);
783
784 /* init descriptor pool */
785 for (i = 0; i < NUM_DESCS; i++) {
786 desc_write(&priv->descs[i], hw_next,
787 (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
788 }
789 priv->desc_free = &priv->descs[0];
790
791 /* initialize channels */
792 if (priv->data.version == CPSW_CTRL_VERSION_2) {
793 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
794 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
795 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
796 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
797
798 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
799 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
800 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
801 } else {
802 memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
803 priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
804 priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
805 priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
806
807 memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
808 priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
809 priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
810 }
811
812 /* clear dma state */
813 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
814
815 if (priv->data.version == CPSW_CTRL_VERSION_2) {
816 for (i = 0; i < priv->data.channels; i++) {
817 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
818 * i);
819 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
820 * i);
821 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
822 * i);
823 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
824 * i);
825 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
826 * i);
827 }
828 } else {
829 for (i = 0; i < priv->data.channels; i++) {
830 __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
831 * i);
832 __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
833 * i);
834 __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
835 * i);
836 __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
837 * i);
838 __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
839 * i);
840
841 }
842 }
843
844 __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
845 __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
846
847 /* submit rx descs */
848 for (i = 0; i < PKTBUFSRX; i++) {
Joe Hershberger9f09a362015-04-08 01:41:06 -0500849 ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000850 PKTSIZE);
851 if (ret < 0) {
852 printf("error %d submitting rx desc\n", ret);
853 break;
854 }
855 }
856
857 return 0;
858}
859
860static void cpsw_halt(struct eth_device *dev)
861{
862 struct cpsw_priv *priv = dev->priv;
863
864 writel(0, priv->dma_regs + CPDMA_TXCONTROL);
865 writel(0, priv->dma_regs + CPDMA_RXCONTROL);
866
867 /* soft reset the controller and initialize priv */
868 setbit_and_wait_for_clear32(&priv->regs->soft_reset);
869
870 /* clear dma state */
871 setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
872
873 priv->data.control(0);
874}
875
876static int cpsw_send(struct eth_device *dev, void *packet, int length)
877{
878 struct cpsw_priv *priv = dev->priv;
879 void *buffer;
880 int len;
881 int timeout = CPDMA_TIMEOUT;
882
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000883 flush_dcache_range((unsigned long)packet,
884 (unsigned long)packet + length);
885
886 /* first reap completed packets */
887 while (timeout-- &&
888 (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
889 ;
890
891 if (timeout == -1) {
892 printf("cpdma_process timeout\n");
893 return -ETIMEDOUT;
894 }
895
896 return cpdma_submit(priv, &priv->tx_chan, packet, length);
897}
898
899static int cpsw_recv(struct eth_device *dev)
900{
901 struct cpsw_priv *priv = dev->priv;
902 void *buffer;
903 int len;
904
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000905 while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) >= 0) {
906 invalidate_dcache_range((unsigned long)buffer,
907 (unsigned long)buffer + PKTSIZE_ALIGN);
Joe Hershberger9f09a362015-04-08 01:41:06 -0500908 net_process_received_packet(buffer, len);
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000909 cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE);
910 }
911
912 return 0;
913}
914
915static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
916 struct cpsw_priv *priv)
917{
918 void *regs = priv->regs;
919 struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
920 slave->slave_num = slave_num;
921 slave->data = data;
922 slave->regs = regs + data->slave_reg_ofs;
923 slave->sliver = regs + data->sliver_reg_ofs;
924}
925
926static int cpsw_phy_init(struct eth_device *dev, struct cpsw_slave *slave)
927{
928 struct cpsw_priv *priv = (struct cpsw_priv *)dev->priv;
929 struct phy_device *phydev;
Ilya Ledvicha1635f02014-03-12 11:26:30 +0200930 u32 supported = PHY_GBIT_FEATURES;
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000931
Yegor Yefremov062bbac2012-11-26 04:03:16 +0000932 phydev = phy_connect(priv->bus,
Mugunthan V N4944f372014-02-18 07:31:52 -0500933 slave->data->phy_addr,
Yegor Yefremov062bbac2012-11-26 04:03:16 +0000934 dev,
935 slave->data->phy_if);
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000936
Heiko Schocheref660032013-09-05 11:50:41 +0200937 if (!phydev)
938 return -1;
939
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000940 phydev->supported &= supported;
941 phydev->advertising = phydev->supported;
942
943 priv->phydev = phydev;
944 phy_config(phydev);
945
946 return 1;
947}
948
949int cpsw_register(struct cpsw_platform_data *data)
950{
951 struct cpsw_priv *priv;
952 struct cpsw_slave *slave;
953 void *regs = (void *)data->cpsw_base;
954 struct eth_device *dev;
955
956 dev = calloc(sizeof(*dev), 1);
957 if (!dev)
958 return -ENOMEM;
959
960 priv = calloc(sizeof(*priv), 1);
961 if (!priv) {
962 free(dev);
963 return -ENOMEM;
964 }
965
966 priv->data = *data;
967 priv->dev = dev;
968
969 priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
970 if (!priv->slaves) {
971 free(dev);
972 free(priv);
973 return -ENOMEM;
974 }
975
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000976 priv->host_port = data->host_port_num;
977 priv->regs = regs;
978 priv->host_port_regs = regs + data->host_port_reg_ofs;
979 priv->dma_regs = regs + data->cpdma_reg_ofs;
980 priv->ale_regs = regs + data->ale_reg_ofs;
Mugunthan V Nff559872013-07-08 16:04:37 +0530981 priv->descs = (void *)regs + data->bd_ram_ofs;
Cyril Chemparathy464adc82012-07-24 12:22:16 +0000982
983 int idx = 0;
984
985 for_each_slave(slave, priv) {
986 cpsw_slave_setup(slave, idx, priv);
987 idx = idx + 1;
988 }
989
990 strcpy(dev->name, "cpsw");
991 dev->iobase = 0;
992 dev->init = cpsw_init;
993 dev->halt = cpsw_halt;
994 dev->send = cpsw_send;
995 dev->recv = cpsw_recv;
996 dev->priv = priv;
997
998 eth_register(dev);
999
1000 cpsw_mdio_init(dev->name, data->mdio_base, data->mdio_div);
1001 priv->bus = miiphy_get_dev_by_name(dev->name);
Mugunthan V N33e073e2014-05-22 14:37:10 +05301002 for_active_slave(slave, priv)
Cyril Chemparathy464adc82012-07-24 12:22:16 +00001003 cpsw_phy_init(dev, slave);
1004
1005 return 1;
1006}