blob: 00e01a41ab3320b0605771f632578e94f180c5e2 [file] [log] [blame]
Heiko Schocher41b64a82020-02-06 09:48:16 +01001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * QE UEC ethernet controller driver
4 *
5 * based on drivers/qe/uec.c from NXP
6 *
7 * Copyright (C) 2020 Heiko Schocher <hs@denx.de>
8 */
9
10#include <common.h>
11#include <dm.h>
12#include <errno.h>
13#include <memalign.h>
14#include <miiphy.h>
15#include <asm/io.h>
16
17#include "dm_qe_uec.h"
18
19#define QE_UEC_DRIVER_NAME "ucc_geth"
20
21/* Default UTBIPAR SMI address */
22#ifndef CONFIG_UTBIPAR_INIT_TBIPA
23#define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
24#endif
25
26static int uec_mac_enable(struct uec_priv *uec, comm_dir_e mode)
27{
28 uec_t *uec_regs;
29 u32 maccfg1;
30
31 uec_regs = uec->uec_regs;
32 maccfg1 = in_be32(&uec_regs->maccfg1);
33
34 if (mode & COMM_DIR_TX) {
35 maccfg1 |= MACCFG1_ENABLE_TX;
36 out_be32(&uec_regs->maccfg1, maccfg1);
37 uec->mac_tx_enabled = 1;
38 }
39
40 if (mode & COMM_DIR_RX) {
41 maccfg1 |= MACCFG1_ENABLE_RX;
42 out_be32(&uec_regs->maccfg1, maccfg1);
43 uec->mac_rx_enabled = 1;
44 }
45
46 return 0;
47}
48
49static int uec_mac_disable(struct uec_priv *uec, comm_dir_e mode)
50{
51 uec_t *uec_regs;
52 u32 maccfg1;
53
54 uec_regs = uec->uec_regs;
55 maccfg1 = in_be32(&uec_regs->maccfg1);
56
57 if (mode & COMM_DIR_TX) {
58 maccfg1 &= ~MACCFG1_ENABLE_TX;
59 out_be32(&uec_regs->maccfg1, maccfg1);
60 uec->mac_tx_enabled = 0;
61 }
62
63 if (mode & COMM_DIR_RX) {
64 maccfg1 &= ~MACCFG1_ENABLE_RX;
65 out_be32(&uec_regs->maccfg1, maccfg1);
66 uec->mac_rx_enabled = 0;
67 }
68
69 return 0;
70}
71
72static int uec_restart_tx(struct uec_priv *uec)
73{
74 struct uec_inf *ui = uec->uec_info;
75 u32 cecr_subblock;
76
77 cecr_subblock = ucc_fast_get_qe_cr_subblock(ui->uf_info.ucc_num);
78 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
79 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
80
81 uec->grace_stopped_tx = 0;
82
83 return 0;
84}
85
86static int uec_restart_rx(struct uec_priv *uec)
87{
88 struct uec_inf *ui = uec->uec_info;
89 u32 cecr_subblock;
90
91 cecr_subblock = ucc_fast_get_qe_cr_subblock(ui->uf_info.ucc_num);
92 qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
93 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
94
95 uec->grace_stopped_rx = 0;
96
97 return 0;
98}
99
100static int uec_open(struct uec_priv *uec, comm_dir_e mode)
101{
102 struct ucc_fast_priv *uccf;
103
104 uccf = uec->uccf;
105
106 /* check if the UCC number is in range. */
107 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
108 printf("%s: ucc_num out of range.\n", __func__);
109 return -EINVAL;
110 }
111
112 /* Enable MAC */
113 uec_mac_enable(uec, mode);
114
115 /* Enable UCC fast */
116 ucc_fast_enable(uccf, mode);
117
118 /* RISC microcode start */
119 if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx)
120 uec_restart_tx(uec);
121
122 if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx)
123 uec_restart_rx(uec);
124
125 return 0;
126}
127
128static int uec_set_mac_if_mode(struct uec_priv *uec)
129{
130 struct uec_inf *uec_info = uec->uec_info;
131 phy_interface_t enet_if_mode;
132 uec_t *uec_regs;
133 u32 upsmr;
134 u32 maccfg2;
135
136 uec_regs = uec->uec_regs;
137 enet_if_mode = uec_info->enet_interface_type;
138
139 maccfg2 = in_be32(&uec_regs->maccfg2);
140 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
141
142 upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
143 upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
144
145 switch (uec_info->speed) {
146 case SPEED_10:
147 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
148 switch (enet_if_mode) {
149 case PHY_INTERFACE_MODE_MII:
150 break;
151 case PHY_INTERFACE_MODE_RGMII:
152 upsmr |= (UPSMR_RPM | UPSMR_R10M);
153 break;
154 case PHY_INTERFACE_MODE_RMII:
155 upsmr |= (UPSMR_R10M | UPSMR_RMM);
156 break;
157 default:
158 return -EINVAL;
159 }
160 break;
161 case SPEED_100:
162 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
163 switch (enet_if_mode) {
164 case PHY_INTERFACE_MODE_MII:
165 break;
166 case PHY_INTERFACE_MODE_RGMII:
167 upsmr |= UPSMR_RPM;
168 break;
169 case PHY_INTERFACE_MODE_RMII:
170 upsmr |= UPSMR_RMM;
171 break;
172 default:
173 return -EINVAL;
174 }
175 break;
176 case SPEED_1000:
177 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
178 switch (enet_if_mode) {
179 case PHY_INTERFACE_MODE_GMII:
180 break;
181 case PHY_INTERFACE_MODE_TBI:
182 upsmr |= UPSMR_TBIM;
183 break;
184 case PHY_INTERFACE_MODE_RTBI:
185 upsmr |= (UPSMR_RPM | UPSMR_TBIM);
186 break;
187 case PHY_INTERFACE_MODE_RGMII_RXID:
188 case PHY_INTERFACE_MODE_RGMII_TXID:
189 case PHY_INTERFACE_MODE_RGMII_ID:
190 case PHY_INTERFACE_MODE_RGMII:
191 upsmr |= UPSMR_RPM;
192 break;
193 case PHY_INTERFACE_MODE_SGMII:
194 upsmr |= UPSMR_SGMM;
195 break;
196 default:
197 return -EINVAL;
198 }
199 break;
200 default:
201 return -EINVAL;
202 }
203
204 out_be32(&uec_regs->maccfg2, maccfg2);
205 out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
206
207 return 0;
208}
209
210static int qe_uec_start(struct udevice *dev)
211{
212 struct qe_uec_priv *priv = dev_get_priv(dev);
213 struct uec_priv *uec = priv->uec;
214 struct phy_device *phydev = priv->phydev;
215 struct uec_inf *uec_info = uec->uec_info;
216 int err;
217
218 if (!phydev)
219 return -ENODEV;
220
221 /* Setup MAC interface mode */
222 genphy_update_link(phydev);
223 genphy_parse_link(phydev);
224 uec_info->speed = phydev->speed;
225 uec_set_mac_if_mode(uec);
226
227 err = uec_open(uec, COMM_DIR_RX_AND_TX);
228 if (err) {
229 printf("%s: cannot enable UEC device\n", dev->name);
230 return -EINVAL;
231 }
232
233 return (phydev->link ? 0 : -EINVAL);
234}
235
236static int qe_uec_send(struct udevice *dev, void *packet, int length)
237{
238 struct qe_uec_priv *priv = dev_get_priv(dev);
239 struct uec_priv *uec = priv->uec;
240 struct ucc_fast_priv *uccf = uec->uccf;
241 struct buffer_descriptor *bd;
242 u16 status;
243 int i;
244 int result = 0;
245
246 uccf = uec->uccf;
247 bd = uec->tx_bd;
248
249 /* Find an empty TxBD */
250 for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
251 if (i > 0x100000) {
252 printf("%s: tx buffer not ready\n", dev->name);
253 return result;
254 }
255 }
256
257 /* Init TxBD */
258 BD_DATA_SET(bd, packet);
259 BD_LENGTH_SET(bd, length);
260 status = BD_STATUS(bd);
261 status &= BD_WRAP;
262 status |= (TX_BD_READY | TX_BD_LAST);
263 BD_STATUS_SET(bd, status);
264
265 /* Tell UCC to transmit the buffer */
266 ucc_fast_transmit_on_demand(uccf);
267
268 /* Wait for buffer to be transmitted */
269 for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
270 if (i > 0x100000) {
271 printf("%s: tx error\n", dev->name);
272 return result;
273 }
274 }
275
276 /* Ok, the buffer be transimitted */
277 BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
278 uec->tx_bd = bd;
279 result = 1;
280
281 return result;
282}
283
284/*
285 * Receive frame:
286 * - wait for the next BD to get ready bit set
287 * - clean up the descriptor
288 * - move on and indicate to HW that the cleaned BD is available for Rx
289 */
290static int qe_uec_recv(struct udevice *dev, int flags, uchar **packetp)
291{
292 struct qe_uec_priv *priv = dev_get_priv(dev);
293 struct uec_priv *uec = priv->uec;
294 struct buffer_descriptor *bd;
295 u16 status;
296 u16 len = 0;
297 u8 *data;
298
299 *packetp = memalign(ARCH_DMA_MINALIGN, MAX_RXBUF_LEN);
300 if (*packetp == 0) {
301 printf("%s: error allocating packetp\n", __func__);
302 return -ENOMEM;
303 }
304
305 bd = uec->rx_bd;
306 status = BD_STATUS(bd);
307
308 while (!(status & RX_BD_EMPTY)) {
309 if (!(status & RX_BD_ERROR)) {
310 data = BD_DATA(bd);
311 len = BD_LENGTH(bd);
312 memcpy(*packetp, (char *)data, len);
313 } else {
314 printf("%s: Rx error\n", dev->name);
315 }
316 status &= BD_CLEAN;
317 BD_LENGTH_SET(bd, 0);
318 BD_STATUS_SET(bd, status | RX_BD_EMPTY);
319 BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
320 status = BD_STATUS(bd);
321 }
322 uec->rx_bd = bd;
323
324 return len;
325}
326
327static int uec_graceful_stop_tx(struct uec_priv *uec)
328{
329 ucc_fast_t *uf_regs;
330 u32 cecr_subblock;
331 u32 ucce;
332
333 uf_regs = uec->uccf->uf_regs;
334
335 /* Clear the grace stop event */
336 out_be32(&uf_regs->ucce, UCCE_GRA);
337
338 /* Issue host command */
339 cecr_subblock =
340 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
341 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
342 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
343
344 /* Wait for command to complete */
345 do {
346 ucce = in_be32(&uf_regs->ucce);
347 } while (!(ucce & UCCE_GRA));
348
349 uec->grace_stopped_tx = 1;
350
351 return 0;
352}
353
354static int uec_graceful_stop_rx(struct uec_priv *uec)
355{
356 u32 cecr_subblock;
357 u8 ack;
358
359 if (!uec->p_rx_glbl_pram) {
360 printf("%s: No init rx global parameter\n", __func__);
361 return -EINVAL;
362 }
363
364 /* Clear acknowledge bit */
365 ack = uec->p_rx_glbl_pram->rxgstpack;
366 ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
367 uec->p_rx_glbl_pram->rxgstpack = ack;
368
369 /* Keep issuing cmd and checking ack bit until it is asserted */
370 do {
371 /* Issue host command */
372 cecr_subblock =
373 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
374 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
375 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
376 ack = uec->p_rx_glbl_pram->rxgstpack;
377 } while (!(ack & GRACEFUL_STOP_ACKNOWLEDGE_RX));
378
379 uec->grace_stopped_rx = 1;
380
381 return 0;
382}
383
384static int uec_stop(struct uec_priv *uec, comm_dir_e mode)
385{
386 /* check if the UCC number is in range. */
387 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
388 printf("%s: ucc_num out of range.\n", __func__);
389 return -EINVAL;
390 }
391 /* Stop any transmissions */
392 if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx)
393 uec_graceful_stop_tx(uec);
394
395 /* Stop any receptions */
396 if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx)
397 uec_graceful_stop_rx(uec);
398
399 /* Disable the UCC fast */
400 ucc_fast_disable(uec->uccf, mode);
401
402 /* Disable the MAC */
403 uec_mac_disable(uec, mode);
404
405 return 0;
406}
407
408static void qe_uec_stop(struct udevice *dev)
409{
410 struct qe_uec_priv *priv = dev_get_priv(dev);
411 struct uec_priv *uec = priv->uec;
412
413 uec_stop(uec, COMM_DIR_RX_AND_TX);
414}
415
416static int qe_uec_set_hwaddr(struct udevice *dev)
417{
418 struct qe_uec_priv *priv = dev_get_priv(dev);
419 struct eth_pdata *pdata = dev_get_platdata(dev);
420 struct uec_priv *uec = priv->uec;
421 uec_t *uec_regs = uec->uec_regs;
422 uchar *mac = pdata->enetaddr;
423 u32 mac_addr1;
424 u32 mac_addr2;
425
426 /*
427 * if a station address of 0x12345678ABCD, perform a write to
428 * MACSTNADDR1 of 0xCDAB7856,
429 * MACSTNADDR2 of 0x34120000
430 */
431
432 mac_addr1 = (mac[5] << 24) | (mac[4] << 16) |
433 (mac[3] << 8) | (mac[2]);
434 out_be32(&uec_regs->macstnaddr1, mac_addr1);
435
436 mac_addr2 = ((mac[1] << 24) | (mac[0] << 16)) & 0xffff0000;
437 out_be32(&uec_regs->macstnaddr2, mac_addr2);
438
439 return 0;
440}
441
442static int qe_uec_free_pkt(struct udevice *dev, uchar *packet, int length)
443{
444 if (packet)
445 free(packet);
446
447 return 0;
448}
449
450static const struct eth_ops qe_uec_eth_ops = {
451 .start = qe_uec_start,
452 .send = qe_uec_send,
453 .recv = qe_uec_recv,
454 .free_pkt = qe_uec_free_pkt,
455 .stop = qe_uec_stop,
456 .write_hwaddr = qe_uec_set_hwaddr,
457};
458
459static int uec_convert_threads_num(enum uec_num_of_threads threads_num,
460 int *threads_num_ret)
461{
462 int num_threads_numerica;
463
464 switch (threads_num) {
465 case UEC_NUM_OF_THREADS_1:
466 num_threads_numerica = 1;
467 break;
468 case UEC_NUM_OF_THREADS_2:
469 num_threads_numerica = 2;
470 break;
471 case UEC_NUM_OF_THREADS_4:
472 num_threads_numerica = 4;
473 break;
474 case UEC_NUM_OF_THREADS_6:
475 num_threads_numerica = 6;
476 break;
477 case UEC_NUM_OF_THREADS_8:
478 num_threads_numerica = 8;
479 break;
480 default:
481 printf("%s: Bad number of threads value.",
482 __func__);
483 return -EINVAL;
484 }
485
486 *threads_num_ret = num_threads_numerica;
487
488 return 0;
489}
490
491static void uec_init_tx_parameter(struct uec_priv *uec, int num_threads_tx)
492{
493 struct uec_inf *uec_info;
494 u32 end_bd;
495 u8 bmrx = 0;
496 int i;
497
498 uec_info = uec->uec_info;
499
500 /* Alloc global Tx parameter RAM page */
501 uec->tx_glbl_pram_offset =
502 qe_muram_alloc(sizeof(struct uec_tx_global_pram),
503 UEC_TX_GLOBAL_PRAM_ALIGNMENT);
504 uec->p_tx_glbl_pram = (struct uec_tx_global_pram *)
505 qe_muram_addr(uec->tx_glbl_pram_offset);
506
507 /* Zero the global Tx prameter RAM */
508 memset(uec->p_tx_glbl_pram, 0, sizeof(struct uec_tx_global_pram));
509
510 /* Init global Tx parameter RAM */
511
512 /* TEMODER, RMON statistics disable, one Tx queue */
513 out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
514
515 /* SQPTR */
516 uec->send_q_mem_reg_offset =
517 qe_muram_alloc(sizeof(struct uec_send_queue_qd),
518 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
519 uec->p_send_q_mem_reg = (struct uec_send_queue_mem_region *)
520 qe_muram_addr(uec->send_q_mem_reg_offset);
521 out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
522
523 /* Setup the table with TxBDs ring */
524 end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
525 * SIZEOFBD;
526 out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
527 (u32)(uec->p_tx_bd_ring));
528 out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
529 end_bd);
530
531 /* Scheduler Base Pointer, we have only one Tx queue, no need it */
532 out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
533
534 /* TxRMON Base Pointer, TxRMON disable, we don't need it */
535 out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
536
537 /* TSTATE, global snooping, big endian, the CSB bus selected */
538 bmrx = BMR_INIT_VALUE;
539 out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
540
541 /* IPH_Offset */
542 for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++)
543 out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
544
545 /* VTAG table */
546 for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++)
547 out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
548
549 /* TQPTR */
550 uec->thread_dat_tx_offset =
551 qe_muram_alloc(num_threads_tx *
552 sizeof(struct uec_thread_data_tx) +
553 32 * (num_threads_tx == 1),
554 UEC_THREAD_DATA_ALIGNMENT);
555
556 uec->p_thread_data_tx = (struct uec_thread_data_tx *)
557 qe_muram_addr(uec->thread_dat_tx_offset);
558 out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
559}
560
561static void uec_init_rx_parameter(struct uec_priv *uec, int num_threads_rx)
562{
563 u8 bmrx = 0;
564 int i;
565 struct uec_82xx_add_filtering_pram *p_af_pram;
566
567 /* Allocate global Rx parameter RAM page */
568 uec->rx_glbl_pram_offset =
569 qe_muram_alloc(sizeof(struct uec_rx_global_pram),
570 UEC_RX_GLOBAL_PRAM_ALIGNMENT);
571 uec->p_rx_glbl_pram = (struct uec_rx_global_pram *)
572 qe_muram_addr(uec->rx_glbl_pram_offset);
573
574 /* Zero Global Rx parameter RAM */
575 memset(uec->p_rx_glbl_pram, 0, sizeof(struct uec_rx_global_pram));
576
577 /* Init global Rx parameter RAM */
578 /*
579 * REMODER, Extended feature mode disable, VLAN disable,
580 * LossLess flow control disable, Receive firmware statisic disable,
581 * Extended address parsing mode disable, One Rx queues,
582 * Dynamic maximum/minimum frame length disable, IP checksum check
583 * disable, IP address alignment disable
584 */
585 out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
586
587 /* RQPTR */
588 uec->thread_dat_rx_offset =
589 qe_muram_alloc(num_threads_rx *
590 sizeof(struct uec_thread_data_rx),
591 UEC_THREAD_DATA_ALIGNMENT);
592 uec->p_thread_data_rx = (struct uec_thread_data_rx *)
593 qe_muram_addr(uec->thread_dat_rx_offset);
594 out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
595
596 /* Type_or_Len */
597 out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
598
599 /* RxRMON base pointer, we don't need it */
600 out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
601
602 /* IntCoalescingPTR, we don't need it, no interrupt */
603 out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
604
605 /* RSTATE, global snooping, big endian, the CSB bus selected */
606 bmrx = BMR_INIT_VALUE;
607 out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
608
609 /* MRBLR */
610 out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
611
612 /* RBDQPTR */
613 uec->rx_bd_qs_tbl_offset =
614 qe_muram_alloc(sizeof(struct uec_rx_bd_queues_entry) +
615 sizeof(struct uec_rx_pref_bds),
616 UEC_RX_BD_QUEUES_ALIGNMENT);
617 uec->p_rx_bd_qs_tbl = (struct uec_rx_bd_queues_entry *)
618 qe_muram_addr(uec->rx_bd_qs_tbl_offset);
619
620 /* Zero it */
621 memset(uec->p_rx_bd_qs_tbl, 0, sizeof(struct uec_rx_bd_queues_entry) +
622 sizeof(struct uec_rx_pref_bds));
623 out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
624 out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
625 (u32)uec->p_rx_bd_ring);
626
627 /* MFLR */
628 out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
629 /* MINFLR */
630 out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
631 /* MAXD1 */
632 out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
633 /* MAXD2 */
634 out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
635 /* ECAM_PTR */
636 out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
637 /* L2QT */
638 out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
639 /* L3QT */
640 for (i = 0; i < 8; i++)
641 out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
642
643 /* VLAN_TYPE */
644 out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
645 /* TCI */
646 out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
647
648 /* Clear PQ2 style address filtering hash table */
649 p_af_pram = (struct uec_82xx_add_filtering_pram *)
650 uec->p_rx_glbl_pram->addressfiltering;
651
652 p_af_pram->iaddr_h = 0;
653 p_af_pram->iaddr_l = 0;
654 p_af_pram->gaddr_h = 0;
655 p_af_pram->gaddr_l = 0;
656}
657
658static int uec_issue_init_enet_rxtx_cmd(struct uec_priv *uec,
659 int thread_tx, int thread_rx)
660{
661 struct uec_init_cmd_pram *p_init_enet_param;
662 u32 init_enet_param_offset;
663 struct uec_inf *uec_info;
664 struct ucc_fast_inf *uf_info;
665 int i;
666 int snum;
667 u32 off;
668 u32 entry_val;
669 u32 command;
670 u32 cecr_subblock;
671
672 uec_info = uec->uec_info;
673 uf_info = &uec_info->uf_info;
674
675 /* Allocate init enet command parameter */
676 uec->init_enet_param_offset =
677 qe_muram_alloc(sizeof(struct uec_init_cmd_pram), 4);
678 init_enet_param_offset = uec->init_enet_param_offset;
679 uec->p_init_enet_param = (struct uec_init_cmd_pram *)
680 qe_muram_addr(uec->init_enet_param_offset);
681
682 /* Zero init enet command struct */
683 memset((void *)uec->p_init_enet_param, 0,
684 sizeof(struct uec_init_cmd_pram));
685
686 /* Init the command struct */
687 p_init_enet_param = uec->p_init_enet_param;
688 p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
689 p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
690 p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
691 p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
692 p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
693 p_init_enet_param->largestexternallookupkeysize = 0;
694
695 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
696 << ENET_INIT_PARAM_RGF_SHIFT;
697 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
698 << ENET_INIT_PARAM_TGF_SHIFT;
699
700 /* Init Rx global parameter pointer */
701 p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
702 (u32)uec_info->risc_rx;
703
704 /* Init Rx threads */
705 for (i = 0; i < (thread_rx + 1); i++) {
706 snum = qe_get_snum();
707 if (snum < 0) {
708 printf("%s can not get snum\n", __func__);
709 return -ENOMEM;
710 }
711
712 if (i == 0) {
713 off = 0;
714 } else {
715 off = qe_muram_alloc(sizeof(struct uec_thread_rx_pram),
716 UEC_THREAD_RX_PRAM_ALIGNMENT);
717 }
718
719 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
720 off | (u32)uec_info->risc_rx;
721 p_init_enet_param->rxthread[i] = entry_val;
722 }
723
724 /* Init Tx global parameter pointer */
725 p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
726 (u32)uec_info->risc_tx;
727
728 /* Init Tx threads */
729 for (i = 0; i < thread_tx; i++) {
730 snum = qe_get_snum();
731 if (snum < 0) {
732 printf("%s can not get snum\n", __func__);
733 return -ENOMEM;
734 }
735
736 off = qe_muram_alloc(sizeof(struct uec_thread_tx_pram),
737 UEC_THREAD_TX_PRAM_ALIGNMENT);
738
739 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
740 off | (u32)uec_info->risc_tx;
741 p_init_enet_param->txthread[i] = entry_val;
742 }
743
744 __asm__ __volatile__("sync");
745
746 /* Issue QE command */
747 command = QE_INIT_TX_RX;
748 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
749 qe_issue_cmd(command, cecr_subblock, (u8)QE_CR_PROTOCOL_ETHERNET,
750 init_enet_param_offset);
751
752 return 0;
753}
754
755static int uec_startup(struct udevice *dev)
756{
757 struct qe_uec_priv *priv = dev_get_priv(dev);
758 struct uec_priv *uec = priv->uec;
759 struct uec_inf *uec_info;
760 struct ucc_fast_inf *uf_info;
761 struct ucc_fast_priv *uccf;
762 ucc_fast_t *uf_regs;
763 uec_t *uec_regs;
764 int num_threads_tx;
765 int num_threads_rx;
766 u32 utbipar;
767 u32 length;
768 u32 align;
769 struct buffer_descriptor *bd;
770 u8 *buf;
771 int i;
772
773 uec_info = uec->uec_info;
774 uf_info = &uec_info->uf_info;
775
776 /* Check if Rx BD ring len is illegal */
777 if (uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN ||
778 uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT) {
779 printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
780 __func__);
781 return -EINVAL;
782 }
783
784 /* Check if Tx BD ring len is illegal */
785 if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
786 printf("%s: Tx BD ring length must not be smaller than 2.\n",
787 __func__);
788 return -EINVAL;
789 }
790
791 /* Check if MRBLR is illegal */
792 if (MAX_RXBUF_LEN == 0 || (MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT)) {
793 printf("%s: max rx buffer length must be mutliple of 128.\n",
794 __func__);
795 return -EINVAL;
796 }
797
798 /* Both Rx and Tx are stopped */
799 uec->grace_stopped_rx = 1;
800 uec->grace_stopped_tx = 1;
801
802 /* Init UCC fast */
803 if (ucc_fast_init(uf_info, &uccf)) {
804 printf("%s: failed to init ucc fast\n", __func__);
805 return -ENOMEM;
806 }
807
808 /* Save uccf */
809 uec->uccf = uccf;
810
811 /* Convert the Tx threads number */
812 if (uec_convert_threads_num(uec_info->num_threads_tx,
813 &num_threads_tx))
814 return -EINVAL;
815
816 /* Convert the Rx threads number */
817 if (uec_convert_threads_num(uec_info->num_threads_rx,
818 &num_threads_rx))
819 return -EINVAL;
820
821 uf_regs = uccf->uf_regs;
822
823 /* UEC register is following UCC fast registers */
824 uec_regs = (uec_t *)(&uf_regs->ucc_eth);
825
826 /* Save the UEC register pointer to UEC private struct */
827 uec->uec_regs = uec_regs;
828
829 /* Init UPSMR, enable hardware statistics (UCC) */
830 out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
831
832 /* Init MACCFG1, flow control disable, disable Tx and Rx */
833 out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
834
835 /* Init MACCFG2, length check, MAC PAD and CRC enable */
836 out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
837
838 /* Setup UTBIPAR */
839 utbipar = in_be32(&uec_regs->utbipar);
840 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
841
842 /* Initialize UTBIPAR address to CONFIG_UTBIPAR_INIT_TBIPA for ALL UEC.
843 * This frees up the remaining SMI addresses for use.
844 */
845 utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT;
846 out_be32(&uec_regs->utbipar, utbipar);
847
848 /* Allocate Tx BDs */
849 length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
850 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
851 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
852 if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
853 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
854 length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
855
856 align = UEC_TX_BD_RING_ALIGNMENT;
857 uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
858 if (uec->tx_bd_ring_offset != 0)
859 uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
860 & ~(align - 1));
861
862 /* Zero all of Tx BDs */
863 memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
864
865 /* Allocate Rx BDs */
866 length = uec_info->rx_bd_ring_len * SIZEOFBD;
867 align = UEC_RX_BD_RING_ALIGNMENT;
868 uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
869 if (uec->rx_bd_ring_offset != 0)
870 uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
871 & ~(align - 1));
872
873 /* Zero all of Rx BDs */
874 memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
875
876 /* Allocate Rx buffer */
877 length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
878 align = UEC_RX_DATA_BUF_ALIGNMENT;
879 uec->rx_buf_offset = (u32)malloc(length + align);
880 if (uec->rx_buf_offset != 0)
881 uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
882 & ~(align - 1));
883
884 /* Zero all of the Rx buffer */
885 memset((void *)(uec->rx_buf_offset), 0, length + align);
886
887 /* Init TxBD ring */
888 bd = (struct buffer_descriptor *)uec->p_tx_bd_ring;
889 uec->tx_bd = bd;
890
891 for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
892 BD_DATA_CLEAR(bd);
893 BD_STATUS_SET(bd, 0);
894 BD_LENGTH_SET(bd, 0);
895 bd++;
896 }
897 BD_STATUS_SET((--bd), TX_BD_WRAP);
898
899 /* Init RxBD ring */
900 bd = (struct buffer_descriptor *)uec->p_rx_bd_ring;
901 uec->rx_bd = bd;
902 buf = uec->p_rx_buf;
903 for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
904 BD_DATA_SET(bd, buf);
905 BD_LENGTH_SET(bd, 0);
906 BD_STATUS_SET(bd, RX_BD_EMPTY);
907 buf += MAX_RXBUF_LEN;
908 bd++;
909 }
910 BD_STATUS_SET((--bd), RX_BD_WRAP | RX_BD_EMPTY);
911
912 /* Init global Tx parameter RAM */
913 uec_init_tx_parameter(uec, num_threads_tx);
914
915 /* Init global Rx parameter RAM */
916 uec_init_rx_parameter(uec, num_threads_rx);
917
918 /* Init ethernet Tx and Rx parameter command */
919 if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
920 num_threads_rx)) {
921 printf("%s issue init enet cmd failed\n", __func__);
922 return -ENOMEM;
923 }
924 return 0;
925}
926
927/* Convert a string to a QE clock source enum
928 *
929 * This function takes a string, typically from a property in the device
930 * tree, and returns the corresponding "enum qe_clock" value.
931 */
932enum qe_clock qe_clock_source(const char *source)
933{
934 unsigned int i;
935
936 if (strcasecmp(source, "none") == 0)
937 return QE_CLK_NONE;
938
939 if (strncasecmp(source, "brg", 3) == 0) {
940 i = simple_strtoul(source + 3, NULL, 10);
941 if (i >= 1 && i <= 16)
942 return (QE_BRG1 - 1) + i;
943 else
944 return QE_CLK_DUMMY;
945 }
946
947 if (strncasecmp(source, "clk", 3) == 0) {
948 i = simple_strtoul(source + 3, NULL, 10);
949 if (i >= 1 && i <= 24)
950 return (QE_CLK1 - 1) + i;
951 else
952 return QE_CLK_DUMMY;
953 }
954
955 return QE_CLK_DUMMY;
956}
957
958static void qe_uec_set_eth_type(struct udevice *dev)
959{
960 struct qe_uec_priv *priv = dev_get_priv(dev);
961 struct uec_priv *uec = priv->uec;
962 struct uec_inf *uec_info = uec->uec_info;
963 struct ucc_fast_inf *uf_info = &uec_info->uf_info;
964
965 switch (uec_info->enet_interface_type) {
966 case PHY_INTERFACE_MODE_GMII:
967 case PHY_INTERFACE_MODE_RGMII:
968 case PHY_INTERFACE_MODE_RGMII_ID:
969 case PHY_INTERFACE_MODE_RGMII_RXID:
970 case PHY_INTERFACE_MODE_RGMII_TXID:
971 case PHY_INTERFACE_MODE_TBI:
972 case PHY_INTERFACE_MODE_RTBI:
973 case PHY_INTERFACE_MODE_SGMII:
974 uf_info->eth_type = GIGA_ETH;
975 break;
976 default:
977 uf_info->eth_type = FAST_ETH;
978 break;
979 }
980}
981
982static int qe_uec_set_uec_info(struct udevice *dev)
983{
984 struct qe_uec_priv *priv = dev_get_priv(dev);
985 struct eth_pdata *pdata = dev_get_platdata(dev);
986 struct uec_priv *uec = priv->uec;
987 struct uec_inf *uec_info;
988 struct ucc_fast_inf *uf_info;
989 const char *s;
990 int ret;
991 u32 val;
992
993 uec_info = (struct uec_inf *)malloc(sizeof(struct uec_inf));
994 if (!uec_info)
995 return -ENOMEM;
996
997 uf_info = &uec_info->uf_info;
998
999 ret = dev_read_u32(dev, "cell-index", &val);
1000 if (ret) {
1001 ret = dev_read_u32(dev, "device-id", &val);
1002 if (ret) {
1003 pr_err("no cell-index nor device-id found!");
1004 goto out;
1005 }
1006 }
1007
1008 uf_info->ucc_num = val - 1;
1009 if (uf_info->ucc_num < 0 || uf_info->ucc_num > 7) {
1010 ret = -ENODEV;
1011 goto out;
1012 }
1013
1014 ret = dev_read_string_index(dev, "rx-clock-name", 0, &s);
1015 if (!ret) {
1016 uf_info->rx_clock = qe_clock_source(s);
1017 if (uf_info->rx_clock < QE_CLK_NONE ||
1018 uf_info->rx_clock > QE_CLK24) {
1019 pr_err("invalid rx-clock-name property\n");
1020 ret = -EINVAL;
1021 goto out;
1022 }
1023 } else {
1024 ret = dev_read_u32(dev, "rx-clock", &val);
1025 if (ret) {
1026 /*
1027 * If both rx-clock-name and rx-clock are missing,
1028 * we want to tell people to use rx-clock-name.
1029 */
1030 pr_err("missing rx-clock-name property\n");
1031 goto out;
1032 }
1033 if (val < QE_CLK_NONE || val > QE_CLK24) {
1034 pr_err("invalid rx-clock property\n");
1035 ret = -EINVAL;
1036 goto out;
1037 }
1038 uf_info->rx_clock = val;
1039 }
1040
1041 ret = dev_read_string_index(dev, "tx-clock-name", 0, &s);
1042 if (!ret) {
1043 uf_info->tx_clock = qe_clock_source(s);
1044 if (uf_info->tx_clock < QE_CLK_NONE ||
1045 uf_info->tx_clock > QE_CLK24) {
1046 pr_err("invalid tx-clock-name property\n");
1047 ret = -EINVAL;
1048 goto out;
1049 }
1050 } else {
1051 ret = dev_read_u32(dev, "tx-clock", &val);
1052 if (ret) {
1053 pr_err("missing tx-clock-name property\n");
1054 goto out;
1055 }
1056 if (val < QE_CLK_NONE || val > QE_CLK24) {
1057 pr_err("invalid tx-clock property\n");
1058 ret = -EINVAL;
1059 goto out;
1060 }
1061 uf_info->tx_clock = val;
1062 }
1063
1064 uec_info->num_threads_tx = UEC_NUM_OF_THREADS_1;
1065 uec_info->num_threads_rx = UEC_NUM_OF_THREADS_1;
1066 uec_info->risc_tx = QE_RISC_ALLOCATION_RISC1_AND_RISC2;
1067 uec_info->risc_rx = QE_RISC_ALLOCATION_RISC1_AND_RISC2;
1068 uec_info->tx_bd_ring_len = 16;
1069 uec_info->rx_bd_ring_len = 16;
1070#if (MAX_QE_RISC == 4)
1071 uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS;
1072 uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS;
1073#endif
1074
1075 uec_info->enet_interface_type = pdata->phy_interface;
1076
1077 uec->uec_info = uec_info;
1078 qe_uec_set_eth_type(dev);
1079
1080 return 0;
1081out:
1082 free(uec_info);
1083 return ret;
1084}
1085
1086static int qe_uec_probe(struct udevice *dev)
1087{
1088 struct qe_uec_priv *priv = dev_get_priv(dev);
1089 struct eth_pdata *pdata = dev_get_platdata(dev);
1090 struct uec_priv *uec;
1091 int ret;
1092
1093 /* Allocate the UEC private struct */
1094 uec = (struct uec_priv *)malloc(sizeof(struct uec_priv));
1095 if (!uec)
1096 return -ENOMEM;
1097
1098 memset(uec, 0, sizeof(struct uec_priv));
1099 priv->uec = uec;
1100 uec->uec_regs = (uec_t *)pdata->iobase;
1101
1102 /* setup uec info struct */
1103 ret = qe_uec_set_uec_info(dev);
1104 if (ret) {
1105 free(uec);
1106 return ret;
1107 }
1108
1109 ret = uec_startup(dev);
1110 if (ret) {
1111 free(uec->uec_info);
1112 free(uec);
1113 return ret;
1114 }
1115
1116 priv->phydev = dm_eth_phy_connect(dev);
1117 return 0;
1118}
1119
1120/*
1121 * Remove the driver from an interface:
1122 * - free up allocated memory
1123 */
1124static int qe_uec_remove(struct udevice *dev)
1125{
1126 struct qe_uec_priv *priv = dev_get_priv(dev);
1127
1128 free(priv->uec);
1129 return 0;
1130}
1131
1132static int qe_uec_ofdata_to_platdata(struct udevice *dev)
1133{
1134 struct eth_pdata *pdata = dev_get_platdata(dev);
1135 const char *phy_mode;
1136
1137 pdata->iobase = (phys_addr_t)devfdt_get_addr(dev);
1138
1139 pdata->phy_interface = -1;
1140 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev),
1141 "phy-connection-type", NULL);
1142 if (phy_mode)
1143 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
1144 if (pdata->phy_interface == -1) {
1145 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
1146 return -EINVAL;
1147 }
1148
1149 return 0;
1150}
1151
1152static const struct udevice_id qe_uec_ids[] = {
1153 { .compatible = QE_UEC_DRIVER_NAME },
1154 { }
1155};
1156
1157U_BOOT_DRIVER(eth_qe_uec) = {
1158 .name = QE_UEC_DRIVER_NAME,
1159 .id = UCLASS_ETH,
1160 .of_match = qe_uec_ids,
1161 .ofdata_to_platdata = qe_uec_ofdata_to_platdata,
1162 .probe = qe_uec_probe,
1163 .remove = qe_uec_remove,
1164 .ops = &qe_uec_eth_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -07001165 .priv_auto = sizeof(struct qe_uec_priv),
1166 .platdata_auto = sizeof(struct eth_pdata),
Heiko Schocher41b64a82020-02-06 09:48:16 +01001167};