blob: c4bd5c4a147f5ff1fc96f3558023c4e17c79c7ab [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Dave Liue732e9c2006-11-03 12:11:15 -06002/*
Haiying Wang8cb2af72011-02-11 01:25:30 -06003 * Copyright (C) 2006-2011 Freescale Semiconductor, Inc.
Dave Liue732e9c2006-11-03 12:11:15 -06004 *
5 * Dave Liu <daveliu@freescale.com>
Dave Liue732e9c2006-11-03 12:11:15 -06006 */
7
Masahiro Yamadaadae2ec2016-09-21 11:28:53 +09008#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -06009#include <log.h>
Masahiro Yamadaadae2ec2016-09-21 11:28:53 +090010#include <net.h>
11#include <malloc.h>
Simon Glassdbd79542020-05-10 11:40:11 -060012#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090013#include <linux/errno.h>
Masahiro Yamadaadae2ec2016-09-21 11:28:53 +090014#include <asm/io.h>
15#include <linux/immap_qe.h>
Dave Liue732e9c2006-11-03 12:11:15 -060016#include "uccf.h"
17#include "uec.h"
18#include "uec_phy.h"
David Saada6b25c132008-03-31 02:37:38 -070019#include "miiphy.h"
Qianyu Gongae6a7582016-02-18 13:01:59 +080020#include <fsl_qe.h>
Andy Fleming7832a462011-04-13 00:37:12 -050021#include <phy.h>
Dave Liue732e9c2006-11-03 12:11:15 -060022
Heiko Schocher41b64a82020-02-06 09:48:16 +010023#if !defined(CONFIG_DM_ETH)
Richard Retanubun6b778142009-07-01 14:03:15 -040024/* Default UTBIPAR SMI address */
25#ifndef CONFIG_UTBIPAR_INIT_TBIPA
26#define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
27#endif
28
Heiko Schocherbaf84a92020-05-25 07:27:26 +020029static struct uec_inf uec_info[] = {
Dave Liue732e9c2006-11-03 12:11:15 -060030#ifdef CONFIG_UEC_ETH1
Haiying Wang511d8282009-06-04 16:12:41 -040031 STD_UEC_INFO(1), /* UEC1 */
Dave Liue732e9c2006-11-03 12:11:15 -060032#endif
33#ifdef CONFIG_UEC_ETH2
Haiying Wang511d8282009-06-04 16:12:41 -040034 STD_UEC_INFO(2), /* UEC2 */
Dave Liue732e9c2006-11-03 12:11:15 -060035#endif
Joakim Tjernlund4e483202007-12-06 16:43:40 +010036#ifdef CONFIG_UEC_ETH3
Haiying Wang511d8282009-06-04 16:12:41 -040037 STD_UEC_INFO(3), /* UEC3 */
Joakim Tjernlund4e483202007-12-06 16:43:40 +010038#endif
David Saadaf3978832008-01-15 10:40:24 +020039#ifdef CONFIG_UEC_ETH4
Haiying Wang511d8282009-06-04 16:12:41 -040040 STD_UEC_INFO(4), /* UEC4 */
David Saadaf3978832008-01-15 10:40:24 +020041#endif
richardretanubune5167f12008-09-29 18:28:23 -040042#ifdef CONFIG_UEC_ETH5
Haiying Wang511d8282009-06-04 16:12:41 -040043 STD_UEC_INFO(5), /* UEC5 */
richardretanubune5167f12008-09-29 18:28:23 -040044#endif
45#ifdef CONFIG_UEC_ETH6
Haiying Wang511d8282009-06-04 16:12:41 -040046 STD_UEC_INFO(6), /* UEC6 */
richardretanubune5167f12008-09-29 18:28:23 -040047#endif
Haiying Wang511d8282009-06-04 16:12:41 -040048#ifdef CONFIG_UEC_ETH7
49 STD_UEC_INFO(7), /* UEC7 */
Haiying Wang9a383822009-05-21 15:34:14 -040050#endif
Haiying Wang511d8282009-06-04 16:12:41 -040051#ifdef CONFIG_UEC_ETH8
52 STD_UEC_INFO(8), /* UEC8 */
richardretanubune5167f12008-09-29 18:28:23 -040053#endif
Haiying Wang511d8282009-06-04 16:12:41 -040054};
Joakim Tjernlund4e483202007-12-06 16:43:40 +010055
Haiying Wang511d8282009-06-04 16:12:41 -040056#define MAXCONTROLLERS (8)
David Saada6b25c132008-03-31 02:37:38 -070057
58static struct eth_device *devlist[MAXCONTROLLERS];
59
Heiko Schocherbaf84a92020-05-25 07:27:26 +020060static int uec_mac_enable(struct uec_priv *uec, comm_dir_e mode)
Dave Liue732e9c2006-11-03 12:11:15 -060061{
62 uec_t *uec_regs;
63 u32 maccfg1;
64
65 if (!uec) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +020066 printf("%s: uec not initial\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -060067 return -EINVAL;
68 }
69 uec_regs = uec->uec_regs;
70
71 maccfg1 = in_be32(&uec_regs->maccfg1);
72
73 if (mode & COMM_DIR_TX) {
74 maccfg1 |= MACCFG1_ENABLE_TX;
75 out_be32(&uec_regs->maccfg1, maccfg1);
76 uec->mac_tx_enabled = 1;
77 }
78
79 if (mode & COMM_DIR_RX) {
80 maccfg1 |= MACCFG1_ENABLE_RX;
81 out_be32(&uec_regs->maccfg1, maccfg1);
82 uec->mac_rx_enabled = 1;
83 }
84
85 return 0;
86}
87
Heiko Schocherbaf84a92020-05-25 07:27:26 +020088static int uec_mac_disable(struct uec_priv *uec, comm_dir_e mode)
Dave Liue732e9c2006-11-03 12:11:15 -060089{
90 uec_t *uec_regs;
91 u32 maccfg1;
92
93 if (!uec) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +020094 printf("%s: uec not initial\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -060095 return -EINVAL;
96 }
97 uec_regs = uec->uec_regs;
98
99 maccfg1 = in_be32(&uec_regs->maccfg1);
100
101 if (mode & COMM_DIR_TX) {
102 maccfg1 &= ~MACCFG1_ENABLE_TX;
103 out_be32(&uec_regs->maccfg1, maccfg1);
104 uec->mac_tx_enabled = 0;
105 }
106
107 if (mode & COMM_DIR_RX) {
108 maccfg1 &= ~MACCFG1_ENABLE_RX;
109 out_be32(&uec_regs->maccfg1, maccfg1);
110 uec->mac_rx_enabled = 0;
111 }
112
113 return 0;
114}
115
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200116static int uec_graceful_stop_tx(struct uec_priv *uec)
Dave Liue732e9c2006-11-03 12:11:15 -0600117{
118 ucc_fast_t *uf_regs;
119 u32 cecr_subblock;
120 u32 ucce;
121
122 if (!uec || !uec->uccf) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200123 printf("%s: No handle passed.\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600124 return -EINVAL;
125 }
126
127 uf_regs = uec->uccf->uf_regs;
128
129 /* Clear the grace stop event */
130 out_be32(&uf_regs->ucce, UCCE_GRA);
131
132 /* Issue host command */
133 cecr_subblock =
134 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
135 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200136 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
Dave Liue732e9c2006-11-03 12:11:15 -0600137
138 /* Wait for command to complete */
139 do {
140 ucce = in_be32(&uf_regs->ucce);
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200141 } while (!(ucce & UCCE_GRA));
Dave Liue732e9c2006-11-03 12:11:15 -0600142
143 uec->grace_stopped_tx = 1;
144
145 return 0;
146}
147
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200148static int uec_graceful_stop_rx(struct uec_priv *uec)
Dave Liue732e9c2006-11-03 12:11:15 -0600149{
150 u32 cecr_subblock;
151 u8 ack;
152
153 if (!uec) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200154 printf("%s: No handle passed.\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600155 return -EINVAL;
156 }
157
158 if (!uec->p_rx_glbl_pram) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200159 printf("%s: No init rx global parameter\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600160 return -EINVAL;
161 }
162
163 /* Clear acknowledge bit */
164 ack = uec->p_rx_glbl_pram->rxgstpack;
165 ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
166 uec->p_rx_glbl_pram->rxgstpack = ack;
167
168 /* Keep issuing cmd and checking ack bit until it is asserted */
169 do {
170 /* Issue host command */
171 cecr_subblock =
172 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
173 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200174 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
Dave Liue732e9c2006-11-03 12:11:15 -0600175 ack = uec->p_rx_glbl_pram->rxgstpack;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200176 } while (!(ack & GRACEFUL_STOP_ACKNOWLEDGE_RX));
Dave Liue732e9c2006-11-03 12:11:15 -0600177
178 uec->grace_stopped_rx = 1;
179
180 return 0;
181}
182
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200183static int uec_restart_tx(struct uec_priv *uec)
Dave Liue732e9c2006-11-03 12:11:15 -0600184{
185 u32 cecr_subblock;
186
187 if (!uec || !uec->uec_info) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200188 printf("%s: No handle passed.\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600189 return -EINVAL;
190 }
191
192 cecr_subblock =
193 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
194 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200195 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
Dave Liue732e9c2006-11-03 12:11:15 -0600196
197 uec->grace_stopped_tx = 0;
198
199 return 0;
200}
201
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200202static int uec_restart_rx(struct uec_priv *uec)
Dave Liue732e9c2006-11-03 12:11:15 -0600203{
204 u32 cecr_subblock;
205
206 if (!uec || !uec->uec_info) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200207 printf("%s: No handle passed.\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600208 return -EINVAL;
209 }
210
211 cecr_subblock =
212 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
213 qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200214 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
Dave Liue732e9c2006-11-03 12:11:15 -0600215
216 uec->grace_stopped_rx = 0;
217
218 return 0;
219}
220
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200221static int uec_open(struct uec_priv *uec, comm_dir_e mode)
Dave Liue732e9c2006-11-03 12:11:15 -0600222{
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200223 struct ucc_fast_priv *uccf;
Dave Liue732e9c2006-11-03 12:11:15 -0600224
225 if (!uec || !uec->uccf) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200226 printf("%s: No handle passed.\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600227 return -EINVAL;
228 }
229 uccf = uec->uccf;
230
231 /* check if the UCC number is in range. */
232 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200233 printf("%s: ucc_num out of range.\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600234 return -EINVAL;
235 }
236
237 /* Enable MAC */
238 uec_mac_enable(uec, mode);
239
240 /* Enable UCC fast */
241 ucc_fast_enable(uccf, mode);
242
243 /* RISC microcode start */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200244 if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx)
Dave Liue732e9c2006-11-03 12:11:15 -0600245 uec_restart_tx(uec);
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200246 if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx)
Dave Liue732e9c2006-11-03 12:11:15 -0600247 uec_restart_rx(uec);
Dave Liue732e9c2006-11-03 12:11:15 -0600248
249 return 0;
250}
251
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200252static int uec_stop(struct uec_priv *uec, comm_dir_e mode)
Dave Liue732e9c2006-11-03 12:11:15 -0600253{
Dave Liue732e9c2006-11-03 12:11:15 -0600254 if (!uec || !uec->uccf) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200255 printf("%s: No handle passed.\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600256 return -EINVAL;
257 }
Dave Liue732e9c2006-11-03 12:11:15 -0600258
259 /* check if the UCC number is in range. */
260 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200261 printf("%s: ucc_num out of range.\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600262 return -EINVAL;
263 }
264 /* Stop any transmissions */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200265 if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx)
Dave Liue732e9c2006-11-03 12:11:15 -0600266 uec_graceful_stop_tx(uec);
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200267
Dave Liue732e9c2006-11-03 12:11:15 -0600268 /* Stop any receptions */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200269 if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx)
Dave Liue732e9c2006-11-03 12:11:15 -0600270 uec_graceful_stop_rx(uec);
Dave Liue732e9c2006-11-03 12:11:15 -0600271
272 /* Disable the UCC fast */
273 ucc_fast_disable(uec->uccf, mode);
274
275 /* Disable the MAC */
276 uec_mac_disable(uec, mode);
277
278 return 0;
279}
280
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200281static int uec_set_mac_duplex(struct uec_priv *uec, int duplex)
Dave Liue732e9c2006-11-03 12:11:15 -0600282{
283 uec_t *uec_regs;
284 u32 maccfg2;
285
286 if (!uec) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200287 printf("%s: uec not initial\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600288 return -EINVAL;
289 }
290 uec_regs = uec->uec_regs;
291
292 if (duplex == DUPLEX_HALF) {
293 maccfg2 = in_be32(&uec_regs->maccfg2);
294 maccfg2 &= ~MACCFG2_FDX;
295 out_be32(&uec_regs->maccfg2, maccfg2);
296 }
297
298 if (duplex == DUPLEX_FULL) {
299 maccfg2 = in_be32(&uec_regs->maccfg2);
300 maccfg2 |= MACCFG2_FDX;
301 out_be32(&uec_regs->maccfg2, maccfg2);
302 }
303
304 return 0;
305}
306
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200307static int uec_set_mac_if_mode(struct uec_priv *uec,
308 phy_interface_t if_mode, int speed)
Dave Liue732e9c2006-11-03 12:11:15 -0600309{
Andy Fleming7832a462011-04-13 00:37:12 -0500310 phy_interface_t enet_if_mode;
Dave Liue732e9c2006-11-03 12:11:15 -0600311 uec_t *uec_regs;
312 u32 upsmr;
313 u32 maccfg2;
314
315 if (!uec) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200316 printf("%s: uec not initial\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600317 return -EINVAL;
318 }
319
Dave Liue732e9c2006-11-03 12:11:15 -0600320 uec_regs = uec->uec_regs;
321 enet_if_mode = if_mode;
322
323 maccfg2 = in_be32(&uec_regs->maccfg2);
324 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
325
326 upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
327 upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
328
Heiko Schocher40b44bc2010-01-20 09:04:28 +0100329 switch (speed) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200330 case SPEED_10:
331 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
332 switch (enet_if_mode) {
333 case PHY_INTERFACE_MODE_MII:
Dave Liue732e9c2006-11-03 12:11:15 -0600334 break;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200335 case PHY_INTERFACE_MODE_RGMII:
336 upsmr |= (UPSMR_RPM | UPSMR_R10M);
Dave Liue732e9c2006-11-03 12:11:15 -0600337 break;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200338 case PHY_INTERFACE_MODE_RMII:
339 upsmr |= (UPSMR_R10M | UPSMR_RMM);
Haiying Wang893b0652009-06-04 16:12:42 -0400340 break;
Dave Liue732e9c2006-11-03 12:11:15 -0600341 default:
342 return -EINVAL;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200343 }
344 break;
345 case SPEED_100:
346 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
347 switch (enet_if_mode) {
348 case PHY_INTERFACE_MODE_MII:
349 break;
350 case PHY_INTERFACE_MODE_RGMII:
351 upsmr |= UPSMR_RPM;
Dave Liue732e9c2006-11-03 12:11:15 -0600352 break;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200353 case PHY_INTERFACE_MODE_RMII:
354 upsmr |= UPSMR_RMM;
355 break;
356 default:
357 return -EINVAL;
358 }
359 break;
360 case SPEED_1000:
361 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
362 switch (enet_if_mode) {
363 case PHY_INTERFACE_MODE_GMII:
364 break;
365 case PHY_INTERFACE_MODE_TBI:
366 upsmr |= UPSMR_TBIM;
367 break;
368 case PHY_INTERFACE_MODE_RTBI:
369 upsmr |= (UPSMR_RPM | UPSMR_TBIM);
370 break;
371 case PHY_INTERFACE_MODE_RGMII_RXID:
372 case PHY_INTERFACE_MODE_RGMII_TXID:
373 case PHY_INTERFACE_MODE_RGMII_ID:
374 case PHY_INTERFACE_MODE_RGMII:
375 upsmr |= UPSMR_RPM;
376 break;
377 case PHY_INTERFACE_MODE_SGMII:
378 upsmr |= UPSMR_SGMM;
379 break;
380 default:
381 return -EINVAL;
382 }
383 break;
384 default:
385 return -EINVAL;
Dave Liue732e9c2006-11-03 12:11:15 -0600386 }
Heiko Schocher40b44bc2010-01-20 09:04:28 +0100387
Dave Liue732e9c2006-11-03 12:11:15 -0600388 out_be32(&uec_regs->maccfg2, maccfg2);
389 out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
390
391 return 0;
392}
393
Andy Flemingee0e9172007-08-14 00:14:25 -0500394static int init_mii_management_configuration(uec_mii_t *uec_mii_regs)
Dave Liue732e9c2006-11-03 12:11:15 -0600395{
396 uint timeout = 0x1000;
397 u32 miimcfg = 0;
398
Andy Flemingee0e9172007-08-14 00:14:25 -0500399 miimcfg = in_be32(&uec_mii_regs->miimcfg);
Dave Liue732e9c2006-11-03 12:11:15 -0600400 miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE;
Andy Flemingee0e9172007-08-14 00:14:25 -0500401 out_be32(&uec_mii_regs->miimcfg, miimcfg);
Dave Liue732e9c2006-11-03 12:11:15 -0600402
403 /* Wait until the bus is free */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200404 while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--)
405 ;
Dave Liue732e9c2006-11-03 12:11:15 -0600406 if (timeout <= 0) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200407 printf("%s: The MII Bus is stuck!", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600408 return -ETIMEDOUT;
409 }
410
411 return 0;
412}
413
414static int init_phy(struct eth_device *dev)
415{
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200416 struct uec_priv *uec;
Andy Flemingee0e9172007-08-14 00:14:25 -0500417 uec_mii_t *umii_regs;
Dave Liue732e9c2006-11-03 12:11:15 -0600418 struct uec_mii_info *mii_info;
419 struct phy_info *curphy;
420 int err;
421
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200422 uec = (struct uec_priv *)dev->priv;
Andy Flemingee0e9172007-08-14 00:14:25 -0500423 umii_regs = uec->uec_mii_regs;
Dave Liue732e9c2006-11-03 12:11:15 -0600424
425 uec->oldlink = 0;
426 uec->oldspeed = 0;
427 uec->oldduplex = -1;
428
429 mii_info = malloc(sizeof(*mii_info));
430 if (!mii_info) {
431 printf("%s: Could not allocate mii_info", dev->name);
432 return -ENOMEM;
433 }
434 memset(mii_info, 0, sizeof(*mii_info));
435
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200436 if (uec->uec_info->uf_info.eth_type == GIGA_ETH)
Dave Liue740c462006-12-07 21:13:15 +0800437 mii_info->speed = SPEED_1000;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200438 else
Dave Liue740c462006-12-07 21:13:15 +0800439 mii_info->speed = SPEED_100;
Dave Liue740c462006-12-07 21:13:15 +0800440
Dave Liue732e9c2006-11-03 12:11:15 -0600441 mii_info->duplex = DUPLEX_FULL;
442 mii_info->pause = 0;
443 mii_info->link = 1;
444
445 mii_info->advertising = (ADVERTISED_10baseT_Half |
446 ADVERTISED_10baseT_Full |
447 ADVERTISED_100baseT_Half |
448 ADVERTISED_100baseT_Full |
449 ADVERTISED_1000baseT_Full);
450 mii_info->autoneg = 1;
451 mii_info->mii_id = uec->uec_info->phy_address;
452 mii_info->dev = dev;
453
Andy Flemingee0e9172007-08-14 00:14:25 -0500454 mii_info->mdio_read = &uec_read_phy_reg;
455 mii_info->mdio_write = &uec_write_phy_reg;
Dave Liue732e9c2006-11-03 12:11:15 -0600456
457 uec->mii_info = mii_info;
458
Kim Phillipsd986cba2008-01-15 14:11:00 -0600459 qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num);
460
Andy Flemingee0e9172007-08-14 00:14:25 -0500461 if (init_mii_management_configuration(umii_regs)) {
Dave Liue732e9c2006-11-03 12:11:15 -0600462 printf("%s: The MII Bus is stuck!", dev->name);
463 err = -1;
464 goto bus_fail;
465 }
466
467 /* get info for this PHY */
Andy Flemingee0e9172007-08-14 00:14:25 -0500468 curphy = uec_get_phy_info(uec->mii_info);
Dave Liue732e9c2006-11-03 12:11:15 -0600469 if (!curphy) {
470 printf("%s: No PHY found", dev->name);
471 err = -1;
472 goto no_phy;
473 }
474
475 mii_info->phyinfo = curphy;
476
477 /* Run the commands which initialize the PHY */
478 if (curphy->init) {
479 err = curphy->init(uec->mii_info);
480 if (err)
481 goto phy_init_fail;
482 }
483
484 return 0;
485
486phy_init_fail:
487no_phy:
488bus_fail:
489 free(mii_info);
490 return err;
491}
492
493static void adjust_link(struct eth_device *dev)
494{
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200495 struct uec_priv *uec = (struct uec_priv *)dev->priv;
Dave Liue732e9c2006-11-03 12:11:15 -0600496 struct uec_mii_info *mii_info = uec->mii_info;
497
Dave Liue732e9c2006-11-03 12:11:15 -0600498 if (mii_info->link) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200499 /*
500 * Now we make sure that we can be in full duplex mode.
501 * If not, we operate in half-duplex mode.
502 */
Dave Liue732e9c2006-11-03 12:11:15 -0600503 if (mii_info->duplex != uec->oldduplex) {
504 if (!(mii_info->duplex)) {
505 uec_set_mac_duplex(uec, DUPLEX_HALF);
506 printf("%s: Half Duplex\n", dev->name);
507 } else {
508 uec_set_mac_duplex(uec, DUPLEX_FULL);
509 printf("%s: Full Duplex\n", dev->name);
510 }
511 uec->oldduplex = mii_info->duplex;
512 }
513
514 if (mii_info->speed != uec->oldspeed) {
Andy Fleming7832a462011-04-13 00:37:12 -0500515 phy_interface_t mode =
Heiko Schocher40b44bc2010-01-20 09:04:28 +0100516 uec->uec_info->enet_interface_type;
Dave Liue740c462006-12-07 21:13:15 +0800517 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
518 switch (mii_info->speed) {
Andy Fleming7832a462011-04-13 00:37:12 -0500519 case SPEED_1000:
Dave Liue732e9c2006-11-03 12:11:15 -0600520 break;
Andy Fleming7832a462011-04-13 00:37:12 -0500521 case SPEED_100:
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200522 printf("switching to rgmii 100\n");
Andy Fleming7832a462011-04-13 00:37:12 -0500523 mode = PHY_INTERFACE_MODE_RGMII;
Dave Liue732e9c2006-11-03 12:11:15 -0600524 break;
Andy Fleming7832a462011-04-13 00:37:12 -0500525 case SPEED_10:
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200526 printf("switching to rgmii 10\n");
Andy Fleming7832a462011-04-13 00:37:12 -0500527 mode = PHY_INTERFACE_MODE_RGMII;
Dave Liue732e9c2006-11-03 12:11:15 -0600528 break;
529 default:
530 printf("%s: Ack,Speed(%d)is illegal\n",
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200531 dev->name, mii_info->speed);
Dave Liue732e9c2006-11-03 12:11:15 -0600532 break;
Dave Liue740c462006-12-07 21:13:15 +0800533 }
Dave Liue732e9c2006-11-03 12:11:15 -0600534 }
535
Heiko Schocher40b44bc2010-01-20 09:04:28 +0100536 /* change phy */
537 change_phy_interface_mode(dev, mode, mii_info->speed);
538 /* change the MAC interface mode */
539 uec_set_mac_if_mode(uec, mode, mii_info->speed);
540
Dave Liue732e9c2006-11-03 12:11:15 -0600541 printf("%s: Speed %dBT\n", dev->name, mii_info->speed);
542 uec->oldspeed = mii_info->speed;
543 }
544
545 if (!uec->oldlink) {
546 printf("%s: Link is up\n", dev->name);
547 uec->oldlink = 1;
548 }
549
550 } else { /* if (mii_info->link) */
551 if (uec->oldlink) {
552 printf("%s: Link is down\n", dev->name);
553 uec->oldlink = 0;
554 uec->oldspeed = 0;
555 uec->oldduplex = -1;
556 }
557 }
558}
559
560static void phy_change(struct eth_device *dev)
561{
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200562 struct uec_priv *uec = (struct uec_priv *)dev->priv;
Dave Liue732e9c2006-11-03 12:11:15 -0600563
York Sun0f577972016-11-18 11:05:38 -0800564#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
Haiying Wang8cb2af72011-02-11 01:25:30 -0600565 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
566
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200567 /* QE9 and QE12 need to be set for enabling QE MII management signals */
Haiying Wang8cb2af72011-02-11 01:25:30 -0600568 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
569 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
570#endif
571
Dave Liue732e9c2006-11-03 12:11:15 -0600572 /* Update the link, speed, duplex */
Kim Phillipsd986cba2008-01-15 14:11:00 -0600573 uec->mii_info->phyinfo->read_status(uec->mii_info);
Dave Liue732e9c2006-11-03 12:11:15 -0600574
York Sun0f577972016-11-18 11:05:38 -0800575#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
Haiying Wang8cb2af72011-02-11 01:25:30 -0600576 /*
577 * QE12 is muxed with LBCTL, it needs to be released for enabling
578 * LBCTL signal for LBC usage.
579 */
580 clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
581#endif
582
Dave Liue732e9c2006-11-03 12:11:15 -0600583 /* Adjust the interface according to speed */
Kim Phillipsd986cba2008-01-15 14:11:00 -0600584 adjust_link(dev);
Dave Liue732e9c2006-11-03 12:11:15 -0600585}
586
Richard Retanubun15e467c2009-06-17 16:00:41 -0400587#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
Ben Warren849c7222008-08-07 23:26:35 -0700588
589/*
richardretanubune2fd3202008-09-26 08:59:12 -0400590 * Find a device index from the devlist by name
591 *
592 * Returns:
593 * The index where the device is located, -1 on error
594 */
Mike Frysinger5ff5fdb2010-07-27 18:35:08 -0400595static int uec_miiphy_find_dev_by_name(const char *devname)
richardretanubune2fd3202008-09-26 08:59:12 -0400596{
597 int i;
598
599 for (i = 0; i < MAXCONTROLLERS; i++) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200600 if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0)
richardretanubune2fd3202008-09-26 08:59:12 -0400601 break;
richardretanubune2fd3202008-09-26 08:59:12 -0400602 }
603
604 /* If device cannot be found, returns -1 */
605 if (i == MAXCONTROLLERS) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200606 debug("%s: device %s not found in devlist\n", __func__,
607 devname);
richardretanubune2fd3202008-09-26 08:59:12 -0400608 i = -1;
609 }
610
611 return i;
612}
613
614/*
Ben Warren849c7222008-08-07 23:26:35 -0700615 * Read a MII PHY register.
616 *
617 * Returns:
618 * 0 on success
619 */
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500620static int uec_miiphy_read(struct mii_dev *bus, int addr, int devad, int reg)
Ben Warren849c7222008-08-07 23:26:35 -0700621{
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500622 unsigned short value = 0;
richardretanubune2fd3202008-09-26 08:59:12 -0400623 int devindex = 0;
Ben Warren849c7222008-08-07 23:26:35 -0700624
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200625 if (!bus->name) {
626 debug("%s: NULL pointer given\n", __func__);
richardretanubune2fd3202008-09-26 08:59:12 -0400627 } else {
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500628 devindex = uec_miiphy_find_dev_by_name(bus->name);
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200629 if (devindex >= 0)
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500630 value = uec_read_phy_reg(devlist[devindex], addr, reg);
richardretanubune2fd3202008-09-26 08:59:12 -0400631 }
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500632 return value;
Ben Warren849c7222008-08-07 23:26:35 -0700633}
634
635/*
636 * Write a MII PHY register.
637 *
638 * Returns:
639 * 0 on success
640 */
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500641static int uec_miiphy_write(struct mii_dev *bus, int addr, int devad, int reg,
642 u16 value)
Ben Warren849c7222008-08-07 23:26:35 -0700643{
richardretanubune2fd3202008-09-26 08:59:12 -0400644 int devindex = 0;
Ben Warren849c7222008-08-07 23:26:35 -0700645
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200646 if (!bus->name) {
647 debug("%s: NULL pointer given\n", __func__);
richardretanubune2fd3202008-09-26 08:59:12 -0400648 } else {
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500649 devindex = uec_miiphy_find_dev_by_name(bus->name);
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200650 if (devindex >= 0)
richardretanubune2fd3202008-09-26 08:59:12 -0400651 uec_write_phy_reg(devlist[devindex], addr, reg, value);
richardretanubune2fd3202008-09-26 08:59:12 -0400652 }
Ben Warren849c7222008-08-07 23:26:35 -0700653 return 0;
654}
Ben Warren849c7222008-08-07 23:26:35 -0700655#endif
656
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200657static int uec_set_mac_address(struct uec_priv *uec, u8 *mac_addr)
Dave Liue732e9c2006-11-03 12:11:15 -0600658{
659 uec_t *uec_regs;
660 u32 mac_addr1;
661 u32 mac_addr2;
662
663 if (!uec) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200664 printf("%s: uec not initial\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600665 return -EINVAL;
666 }
667
668 uec_regs = uec->uec_regs;
669
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200670 /*
671 * if a station address of 0x12345678ABCD, perform a write to
672 * MACSTNADDR1 of 0xCDAB7856,
673 * MACSTNADDR2 of 0x34120000
674 */
Dave Liue732e9c2006-11-03 12:11:15 -0600675
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200676 mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) |
Dave Liue732e9c2006-11-03 12:11:15 -0600677 (mac_addr[3] << 8) | (mac_addr[2]);
678 out_be32(&uec_regs->macstnaddr1, mac_addr1);
679
680 mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000;
681 out_be32(&uec_regs->macstnaddr2, mac_addr2);
682
683 return 0;
684}
685
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200686static int uec_convert_threads_num(enum uec_num_of_threads threads_num,
687 int *threads_num_ret)
Dave Liue732e9c2006-11-03 12:11:15 -0600688{
689 int num_threads_numerica;
690
691 switch (threads_num) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200692 case UEC_NUM_OF_THREADS_1:
693 num_threads_numerica = 1;
694 break;
695 case UEC_NUM_OF_THREADS_2:
696 num_threads_numerica = 2;
697 break;
698 case UEC_NUM_OF_THREADS_4:
699 num_threads_numerica = 4;
700 break;
701 case UEC_NUM_OF_THREADS_6:
702 num_threads_numerica = 6;
703 break;
704 case UEC_NUM_OF_THREADS_8:
705 num_threads_numerica = 8;
706 break;
707 default:
708 printf("%s: Bad number of threads value.",
709 __func__);
710 return -EINVAL;
Dave Liue732e9c2006-11-03 12:11:15 -0600711 }
712
713 *threads_num_ret = num_threads_numerica;
714
715 return 0;
716}
717
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200718static void uec_init_tx_parameter(struct uec_priv *uec, int num_threads_tx)
Dave Liue732e9c2006-11-03 12:11:15 -0600719{
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200720 struct uec_inf *uec_info;
Dave Liue732e9c2006-11-03 12:11:15 -0600721 u32 end_bd;
722 u8 bmrx = 0;
723 int i;
724
725 uec_info = uec->uec_info;
726
727 /* Alloc global Tx parameter RAM page */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200728 uec->tx_glbl_pram_offset =
729 qe_muram_alloc(sizeof(struct uec_tx_global_pram),
730 UEC_TX_GLOBAL_PRAM_ALIGNMENT);
731 uec->p_tx_glbl_pram = (struct uec_tx_global_pram *)
Dave Liue732e9c2006-11-03 12:11:15 -0600732 qe_muram_addr(uec->tx_glbl_pram_offset);
733
734 /* Zero the global Tx prameter RAM */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200735 memset(uec->p_tx_glbl_pram, 0, sizeof(struct uec_tx_global_pram));
Dave Liue732e9c2006-11-03 12:11:15 -0600736
737 /* Init global Tx parameter RAM */
738
739 /* TEMODER, RMON statistics disable, one Tx queue */
740 out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
741
742 /* SQPTR */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200743 uec->send_q_mem_reg_offset =
744 qe_muram_alloc(sizeof(struct uec_send_queue_qd),
745 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
746 uec->p_send_q_mem_reg = (struct uec_send_queue_mem_region *)
Dave Liue732e9c2006-11-03 12:11:15 -0600747 qe_muram_addr(uec->send_q_mem_reg_offset);
748 out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
749
750 /* Setup the table with TxBDs ring */
751 end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
752 * SIZEOFBD;
753 out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200754 (u32)(uec->p_tx_bd_ring));
Dave Liue732e9c2006-11-03 12:11:15 -0600755 out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200756 end_bd);
Dave Liue732e9c2006-11-03 12:11:15 -0600757
758 /* Scheduler Base Pointer, we have only one Tx queue, no need it */
759 out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
760
761 /* TxRMON Base Pointer, TxRMON disable, we don't need it */
762 out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
763
764 /* TSTATE, global snooping, big endian, the CSB bus selected */
765 bmrx = BMR_INIT_VALUE;
766 out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
767
768 /* IPH_Offset */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200769 for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++)
Dave Liue732e9c2006-11-03 12:11:15 -0600770 out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
Dave Liue732e9c2006-11-03 12:11:15 -0600771
772 /* VTAG table */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200773 for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++)
Dave Liue732e9c2006-11-03 12:11:15 -0600774 out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
Dave Liue732e9c2006-11-03 12:11:15 -0600775
776 /* TQPTR */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200777 uec->thread_dat_tx_offset =
778 qe_muram_alloc(num_threads_tx *
779 sizeof(struct uec_thread_data_tx) +
780 32 * (num_threads_tx == 1),
781 UEC_THREAD_DATA_ALIGNMENT);
Dave Liue732e9c2006-11-03 12:11:15 -0600782
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200783 uec->p_thread_data_tx = (struct uec_thread_data_tx *)
Dave Liue732e9c2006-11-03 12:11:15 -0600784 qe_muram_addr(uec->thread_dat_tx_offset);
785 out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
786}
787
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200788static void uec_init_rx_parameter(struct uec_priv *uec, int num_threads_rx)
Dave Liue732e9c2006-11-03 12:11:15 -0600789{
790 u8 bmrx = 0;
791 int i;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200792 struct uec_82xx_add_filtering_pram *p_af_pram;
Dave Liue732e9c2006-11-03 12:11:15 -0600793
794 /* Allocate global Rx parameter RAM page */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200795 uec->rx_glbl_pram_offset =
796 qe_muram_alloc(sizeof(struct uec_rx_global_pram),
797 UEC_RX_GLOBAL_PRAM_ALIGNMENT);
798 uec->p_rx_glbl_pram = (struct uec_rx_global_pram *)
Dave Liue732e9c2006-11-03 12:11:15 -0600799 qe_muram_addr(uec->rx_glbl_pram_offset);
800
801 /* Zero Global Rx parameter RAM */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200802 memset(uec->p_rx_glbl_pram, 0, sizeof(struct uec_rx_global_pram));
Dave Liue732e9c2006-11-03 12:11:15 -0600803
804 /* Init global Rx parameter RAM */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200805 /*
806 * REMODER, Extended feature mode disable, VLAN disable,
807 * LossLess flow control disable, Receive firmware statisic disable,
808 * Extended address parsing mode disable, One Rx queues,
809 * Dynamic maximum/minimum frame length disable, IP checksum check
810 * disable, IP address alignment disable
811 */
Dave Liue732e9c2006-11-03 12:11:15 -0600812 out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
813
814 /* RQPTR */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200815 uec->thread_dat_rx_offset =
816 qe_muram_alloc(num_threads_rx *
817 sizeof(struct uec_thread_data_rx),
818 UEC_THREAD_DATA_ALIGNMENT);
819 uec->p_thread_data_rx = (struct uec_thread_data_rx *)
Dave Liue732e9c2006-11-03 12:11:15 -0600820 qe_muram_addr(uec->thread_dat_rx_offset);
821 out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
822
823 /* Type_or_Len */
824 out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
825
826 /* RxRMON base pointer, we don't need it */
827 out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
828
829 /* IntCoalescingPTR, we don't need it, no interrupt */
830 out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
831
832 /* RSTATE, global snooping, big endian, the CSB bus selected */
833 bmrx = BMR_INIT_VALUE;
834 out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
835
836 /* MRBLR */
837 out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
838
839 /* RBDQPTR */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200840 uec->rx_bd_qs_tbl_offset =
841 qe_muram_alloc(sizeof(struct uec_rx_bd_queues_entry) +
842 sizeof(struct uec_rx_pref_bds),
843 UEC_RX_BD_QUEUES_ALIGNMENT);
844 uec->p_rx_bd_qs_tbl = (struct uec_rx_bd_queues_entry *)
Dave Liue732e9c2006-11-03 12:11:15 -0600845 qe_muram_addr(uec->rx_bd_qs_tbl_offset);
846
847 /* Zero it */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200848 memset(uec->p_rx_bd_qs_tbl, 0, sizeof(struct uec_rx_bd_queues_entry) +
849 sizeof(struct uec_rx_pref_bds));
Dave Liue732e9c2006-11-03 12:11:15 -0600850 out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
851 out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
852 (u32)uec->p_rx_bd_ring);
853
854 /* MFLR */
855 out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
856 /* MINFLR */
857 out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
858 /* MAXD1 */
859 out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
860 /* MAXD2 */
861 out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
862 /* ECAM_PTR */
863 out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
864 /* L2QT */
865 out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
866 /* L3QT */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200867 for (i = 0; i < 8; i++)
Dave Liue732e9c2006-11-03 12:11:15 -0600868 out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
Dave Liue732e9c2006-11-03 12:11:15 -0600869
870 /* VLAN_TYPE */
871 out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
872 /* TCI */
873 out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
874
875 /* Clear PQ2 style address filtering hash table */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200876 p_af_pram = (struct uec_82xx_add_filtering_pram *)
Dave Liue732e9c2006-11-03 12:11:15 -0600877 uec->p_rx_glbl_pram->addressfiltering;
878
879 p_af_pram->iaddr_h = 0;
880 p_af_pram->iaddr_l = 0;
881 p_af_pram->gaddr_h = 0;
882 p_af_pram->gaddr_l = 0;
883}
884
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200885static int uec_issue_init_enet_rxtx_cmd(struct uec_priv *uec,
886 int thread_tx, int thread_rx)
Dave Liue732e9c2006-11-03 12:11:15 -0600887{
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200888 struct uec_init_cmd_pram *p_init_enet_param;
Dave Liue732e9c2006-11-03 12:11:15 -0600889 u32 init_enet_param_offset;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200890 struct uec_inf *uec_info;
891 struct ucc_fast_inf *uf_info;
Dave Liue732e9c2006-11-03 12:11:15 -0600892 int i;
893 int snum;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200894 u32 off;
Dave Liue732e9c2006-11-03 12:11:15 -0600895 u32 entry_val;
896 u32 command;
897 u32 cecr_subblock;
898
899 uec_info = uec->uec_info;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200900 uf_info = &uec_info->uf_info;
Dave Liue732e9c2006-11-03 12:11:15 -0600901
902 /* Allocate init enet command parameter */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200903 uec->init_enet_param_offset =
904 qe_muram_alloc(sizeof(struct uec_init_cmd_pram), 4);
Dave Liue732e9c2006-11-03 12:11:15 -0600905 init_enet_param_offset = uec->init_enet_param_offset;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200906 uec->p_init_enet_param = (struct uec_init_cmd_pram *)
Dave Liue732e9c2006-11-03 12:11:15 -0600907 qe_muram_addr(uec->init_enet_param_offset);
908
909 /* Zero init enet command struct */
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200910 memset((void *)uec->p_init_enet_param, 0,
911 sizeof(struct uec_init_cmd_pram));
Dave Liue732e9c2006-11-03 12:11:15 -0600912
913 /* Init the command struct */
914 p_init_enet_param = uec->p_init_enet_param;
915 p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
916 p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
917 p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
918 p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
919 p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
920 p_init_enet_param->largestexternallookupkeysize = 0;
921
922 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
923 << ENET_INIT_PARAM_RGF_SHIFT;
924 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
925 << ENET_INIT_PARAM_TGF_SHIFT;
926
927 /* Init Rx global parameter pointer */
928 p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
Haiying Wang9f9d6b72009-05-21 15:32:13 -0400929 (u32)uec_info->risc_rx;
Dave Liue732e9c2006-11-03 12:11:15 -0600930
931 /* Init Rx threads */
932 for (i = 0; i < (thread_rx + 1); i++) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200933 snum = qe_get_snum();
934 if (snum < 0) {
935 printf("%s can not get snum\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600936 return -ENOMEM;
937 }
938
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200939 if (i == 0) {
940 off = 0;
Dave Liue732e9c2006-11-03 12:11:15 -0600941 } else {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200942 off = qe_muram_alloc(sizeof(struct uec_thread_rx_pram),
943 UEC_THREAD_RX_PRAM_ALIGNMENT);
Dave Liue732e9c2006-11-03 12:11:15 -0600944 }
945
946 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200947 off | (u32)uec_info->risc_rx;
Dave Liue732e9c2006-11-03 12:11:15 -0600948 p_init_enet_param->rxthread[i] = entry_val;
949 }
950
951 /* Init Tx global parameter pointer */
952 p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
Haiying Wang9f9d6b72009-05-21 15:32:13 -0400953 (u32)uec_info->risc_tx;
Dave Liue732e9c2006-11-03 12:11:15 -0600954
955 /* Init Tx threads */
956 for (i = 0; i < thread_tx; i++) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200957 snum = qe_get_snum();
958 if (snum < 0) {
959 printf("%s can not get snum\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -0600960 return -ENOMEM;
961 }
962
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200963 off = qe_muram_alloc(sizeof(struct uec_thread_tx_pram),
964 UEC_THREAD_TX_PRAM_ALIGNMENT);
Dave Liue732e9c2006-11-03 12:11:15 -0600965
966 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200967 off | (u32)uec_info->risc_tx;
Dave Liue732e9c2006-11-03 12:11:15 -0600968 p_init_enet_param->txthread[i] = entry_val;
969 }
970
971 __asm__ __volatile__("sync");
972
973 /* Issue QE command */
974 command = QE_INIT_TX_RX;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200975 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
976 qe_issue_cmd(command, cecr_subblock, (u8)QE_CR_PROTOCOL_ETHERNET,
977 init_enet_param_offset);
Dave Liue732e9c2006-11-03 12:11:15 -0600978
979 return 0;
980}
981
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200982static int uec_startup(struct uec_priv *uec)
Dave Liue732e9c2006-11-03 12:11:15 -0600983{
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200984 struct uec_inf *uec_info;
985 struct ucc_fast_inf *uf_info;
986 struct ucc_fast_priv *uccf;
Dave Liue732e9c2006-11-03 12:11:15 -0600987 ucc_fast_t *uf_regs;
988 uec_t *uec_regs;
989 int num_threads_tx;
990 int num_threads_rx;
991 u32 utbipar;
Dave Liue732e9c2006-11-03 12:11:15 -0600992 u32 length;
993 u32 align;
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200994 struct buffer_descriptor *bd;
Dave Liue732e9c2006-11-03 12:11:15 -0600995 u8 *buf;
996 int i;
997
998 if (!uec || !uec->uec_info) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +0200999 printf("%s: uec or uec_info not initial\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -06001000 return -EINVAL;
1001 }
1002
1003 uec_info = uec->uec_info;
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001004 uf_info = &uec_info->uf_info;
Dave Liue732e9c2006-11-03 12:11:15 -06001005
1006 /* Check if Rx BD ring len is illegal */
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001007 if (uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN ||
1008 (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
Dave Liue732e9c2006-11-03 12:11:15 -06001009 printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001010 __func__);
Dave Liue732e9c2006-11-03 12:11:15 -06001011 return -EINVAL;
1012 }
1013
1014 /* Check if Tx BD ring len is illegal */
1015 if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
1016 printf("%s: Tx BD ring length must not be smaller than 2.\n",
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001017 __func__);
Dave Liue732e9c2006-11-03 12:11:15 -06001018 return -EINVAL;
1019 }
1020
1021 /* Check if MRBLR is illegal */
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001022 if (MAX_RXBUF_LEN == 0 || MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT) {
Dave Liue732e9c2006-11-03 12:11:15 -06001023 printf("%s: max rx buffer length must be mutliple of 128.\n",
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001024 __func__);
Dave Liue732e9c2006-11-03 12:11:15 -06001025 return -EINVAL;
1026 }
1027
1028 /* Both Rx and Tx are stopped */
1029 uec->grace_stopped_rx = 1;
1030 uec->grace_stopped_tx = 1;
1031
1032 /* Init UCC fast */
1033 if (ucc_fast_init(uf_info, &uccf)) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001034 printf("%s: failed to init ucc fast\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -06001035 return -ENOMEM;
1036 }
1037
1038 /* Save uccf */
1039 uec->uccf = uccf;
1040
1041 /* Convert the Tx threads number */
1042 if (uec_convert_threads_num(uec_info->num_threads_tx,
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001043 &num_threads_tx)) {
Dave Liue732e9c2006-11-03 12:11:15 -06001044 return -EINVAL;
1045 }
1046
1047 /* Convert the Rx threads number */
1048 if (uec_convert_threads_num(uec_info->num_threads_rx,
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001049 &num_threads_rx)) {
Dave Liue732e9c2006-11-03 12:11:15 -06001050 return -EINVAL;
1051 }
1052
1053 uf_regs = uccf->uf_regs;
1054
1055 /* UEC register is following UCC fast registers */
1056 uec_regs = (uec_t *)(&uf_regs->ucc_eth);
1057
1058 /* Save the UEC register pointer to UEC private struct */
1059 uec->uec_regs = uec_regs;
1060
1061 /* Init UPSMR, enable hardware statistics (UCC) */
1062 out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
1063
1064 /* Init MACCFG1, flow control disable, disable Tx and Rx */
1065 out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
1066
1067 /* Init MACCFG2, length check, MAC PAD and CRC enable */
1068 out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
1069
1070 /* Setup MAC interface mode */
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001071 uec_set_mac_if_mode(uec, uec_info->enet_interface_type,
1072 uec_info->speed);
Dave Liue732e9c2006-11-03 12:11:15 -06001073
Andy Flemingee0e9172007-08-14 00:14:25 -05001074 /* Setup MII management base */
1075#ifndef CONFIG_eTSEC_MDIO_BUS
1076 uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg);
1077#else
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001078 uec->uec_mii_regs = (uec_mii_t *)CONFIG_MIIM_ADDRESS;
Andy Flemingee0e9172007-08-14 00:14:25 -05001079#endif
1080
Dave Liue732e9c2006-11-03 12:11:15 -06001081 /* Setup MII master clock source */
1082 qe_set_mii_clk_src(uec_info->uf_info.ucc_num);
1083
1084 /* Setup UTBIPAR */
1085 utbipar = in_be32(&uec_regs->utbipar);
1086 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
Dave Liue732e9c2006-11-03 12:11:15 -06001087
Richard Retanubun6b778142009-07-01 14:03:15 -04001088 /* Initialize UTBIPAR address to CONFIG_UTBIPAR_INIT_TBIPA for ALL UEC.
1089 * This frees up the remaining SMI addresses for use.
1090 */
1091 utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT;
Dave Liue732e9c2006-11-03 12:11:15 -06001092 out_be32(&uec_regs->utbipar, utbipar);
1093
Haiying Wang893b0652009-06-04 16:12:42 -04001094 /* Configure the TBI for SGMII operation */
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001095 if (uec->uec_info->enet_interface_type == PHY_INTERFACE_MODE_SGMII &&
1096 uec->uec_info->speed == SPEED_1000) {
Haiying Wang893b0652009-06-04 16:12:42 -04001097 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001098 ENET_TBI_MII_ANA, TBIANA_SETTINGS);
Haiying Wang893b0652009-06-04 16:12:42 -04001099
1100 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001101 ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
Haiying Wang893b0652009-06-04 16:12:42 -04001102
1103 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001104 ENET_TBI_MII_CR, TBICR_SETTINGS);
Haiying Wang893b0652009-06-04 16:12:42 -04001105 }
1106
Dave Liue732e9c2006-11-03 12:11:15 -06001107 /* Allocate Tx BDs */
1108 length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
1109 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
1110 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1111 if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
1112 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) {
1113 length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1114 }
1115
1116 align = UEC_TX_BD_RING_ALIGNMENT;
1117 uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
1118 if (uec->tx_bd_ring_offset != 0) {
1119 uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
1120 & ~(align - 1));
1121 }
1122
1123 /* Zero all of Tx BDs */
1124 memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
1125
1126 /* Allocate Rx BDs */
1127 length = uec_info->rx_bd_ring_len * SIZEOFBD;
1128 align = UEC_RX_BD_RING_ALIGNMENT;
1129 uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
1130 if (uec->rx_bd_ring_offset != 0) {
1131 uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
1132 & ~(align - 1));
1133 }
1134
1135 /* Zero all of Rx BDs */
1136 memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
1137
1138 /* Allocate Rx buffer */
1139 length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
1140 align = UEC_RX_DATA_BUF_ALIGNMENT;
1141 uec->rx_buf_offset = (u32)malloc(length + align);
1142 if (uec->rx_buf_offset != 0) {
1143 uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
1144 & ~(align - 1));
1145 }
1146
1147 /* Zero all of the Rx buffer */
1148 memset((void *)(uec->rx_buf_offset), 0, length + align);
1149
1150 /* Init TxBD ring */
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001151 bd = (struct buffer_descriptor *)uec->p_tx_bd_ring;
1152 uec->tx_bd = bd;
Dave Liue732e9c2006-11-03 12:11:15 -06001153
1154 for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
1155 BD_DATA_CLEAR(bd);
1156 BD_STATUS_SET(bd, 0);
1157 BD_LENGTH_SET(bd, 0);
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001158 bd++;
Dave Liue732e9c2006-11-03 12:11:15 -06001159 }
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001160 BD_STATUS_SET((--bd), TX_BD_WRAP);
Dave Liue732e9c2006-11-03 12:11:15 -06001161
1162 /* Init RxBD ring */
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001163 bd = (struct buffer_descriptor *)uec->p_rx_bd_ring;
1164 uec->rx_bd = bd;
Dave Liue732e9c2006-11-03 12:11:15 -06001165 buf = uec->p_rx_buf;
1166 for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
1167 BD_DATA_SET(bd, buf);
1168 BD_LENGTH_SET(bd, 0);
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001169 BD_STATUS_SET(bd, RX_BD_EMPTY);
Dave Liue732e9c2006-11-03 12:11:15 -06001170 buf += MAX_RXBUF_LEN;
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001171 bd++;
Dave Liue732e9c2006-11-03 12:11:15 -06001172 }
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001173 BD_STATUS_SET((--bd), RX_BD_WRAP | RX_BD_EMPTY);
Dave Liue732e9c2006-11-03 12:11:15 -06001174
1175 /* Init global Tx parameter RAM */
1176 uec_init_tx_parameter(uec, num_threads_tx);
1177
1178 /* Init global Rx parameter RAM */
1179 uec_init_rx_parameter(uec, num_threads_rx);
1180
1181 /* Init ethernet Tx and Rx parameter command */
1182 if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
1183 num_threads_rx)) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001184 printf("%s issue init enet cmd failed\n", __func__);
Dave Liue732e9c2006-11-03 12:11:15 -06001185 return -ENOMEM;
1186 }
1187
1188 return 0;
1189}
1190
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001191static int uec_init(struct eth_device *dev, struct bd_info *bd)
Dave Liue732e9c2006-11-03 12:11:15 -06001192{
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001193 struct uec_priv *uec;
Kim Phillipsd986cba2008-01-15 14:11:00 -06001194 int err, i;
1195 struct phy_info *curphy;
York Sun0f577972016-11-18 11:05:38 -08001196#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
Haiying Wang8cb2af72011-02-11 01:25:30 -06001197 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
1198#endif
Dave Liue732e9c2006-11-03 12:11:15 -06001199
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001200 uec = (struct uec_priv *)dev->priv;
Dave Liue732e9c2006-11-03 12:11:15 -06001201
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001202 if (!uec->the_first_run) {
York Sun0f577972016-11-18 11:05:38 -08001203#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001204 /*
1205 * QE9 and QE12 need to be set for enabling QE MII
1206 * management signals
1207 */
1208 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
1209 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
Haiying Wang8cb2af72011-02-11 01:25:30 -06001210#endif
1211
Kim Phillipsd986cba2008-01-15 14:11:00 -06001212 err = init_phy(dev);
1213 if (err) {
1214 printf("%s: Cannot initialize PHY, aborting.\n",
1215 dev->name);
1216 return err;
Dave Liue732e9c2006-11-03 12:11:15 -06001217 }
Kim Phillipsd986cba2008-01-15 14:11:00 -06001218
1219 curphy = uec->mii_info->phyinfo;
1220
1221 if (curphy->config_aneg) {
1222 err = curphy->config_aneg(uec->mii_info);
1223 if (err) {
1224 printf("%s: Can't negotiate PHY\n", dev->name);
1225 return err;
1226 }
1227 }
1228
1229 /* Give PHYs up to 5 sec to report a link */
1230 i = 50;
1231 do {
1232 err = curphy->read_status(uec->mii_info);
Joakim Tjernlundc96cc502010-08-11 11:44:21 +02001233 if (!(((i-- > 0) && !uec->mii_info->link) || err))
1234 break;
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001235 mdelay(100);
Joakim Tjernlundc96cc502010-08-11 11:44:21 +02001236 } while (1);
Kim Phillipsd986cba2008-01-15 14:11:00 -06001237
York Sun0f577972016-11-18 11:05:38 -08001238#if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
Haiying Wang8cb2af72011-02-11 01:25:30 -06001239 /* QE12 needs to be released for enabling LBCTL signal*/
1240 clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
1241#endif
1242
Kim Phillipsd986cba2008-01-15 14:11:00 -06001243 if (err || i <= 0)
1244 printf("warning: %s: timeout on PHY link\n", dev->name);
1245
Heiko Schocher40b44bc2010-01-20 09:04:28 +01001246 adjust_link(dev);
Dave Liue732e9c2006-11-03 12:11:15 -06001247 uec->the_first_run = 1;
1248 }
1249
Kim Phillipsd986cba2008-01-15 14:11:00 -06001250 /* Set up the MAC address */
1251 if (dev->enetaddr[0] & 0x01) {
1252 printf("%s: MacAddress is multcast address\n",
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001253 __func__);
Kim Phillipsd986cba2008-01-15 14:11:00 -06001254 return -1;
1255 }
1256 uec_set_mac_address(uec, dev->enetaddr);
1257
Dave Liue732e9c2006-11-03 12:11:15 -06001258 err = uec_open(uec, COMM_DIR_RX_AND_TX);
1259 if (err) {
1260 printf("%s: cannot enable UEC device\n", dev->name);
Ben Warrende9fcb52008-01-09 18:15:53 -05001261 return -1;
Dave Liue732e9c2006-11-03 12:11:15 -06001262 }
1263
Kim Phillipsd986cba2008-01-15 14:11:00 -06001264 phy_change(dev);
1265
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001266 return uec->mii_info->link ? 0 : -1;
Dave Liue732e9c2006-11-03 12:11:15 -06001267}
1268
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001269static void uec_halt(struct eth_device *dev)
Dave Liue732e9c2006-11-03 12:11:15 -06001270{
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001271 struct uec_priv *uec = (struct uec_priv *)dev->priv;
1272
Dave Liue732e9c2006-11-03 12:11:15 -06001273 uec_stop(uec, COMM_DIR_RX_AND_TX);
1274}
1275
Joe Hershberger835e9232012-05-22 07:56:21 +00001276static int uec_send(struct eth_device *dev, void *buf, int len)
Dave Liue732e9c2006-11-03 12:11:15 -06001277{
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001278 struct uec_priv *uec;
1279 struct ucc_fast_priv *uccf;
1280 struct buffer_descriptor *bd;
Dave Liu663cbab2006-12-06 11:38:17 +08001281 u16 status;
Dave Liue732e9c2006-11-03 12:11:15 -06001282 int i;
1283 int result = 0;
1284
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001285 uec = (struct uec_priv *)dev->priv;
Dave Liue732e9c2006-11-03 12:11:15 -06001286 uccf = uec->uccf;
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001287 bd = uec->tx_bd;
Dave Liue732e9c2006-11-03 12:11:15 -06001288
1289 /* Find an empty TxBD */
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001290 for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
Dave Liue732e9c2006-11-03 12:11:15 -06001291 if (i > 0x100000) {
1292 printf("%s: tx buffer not ready\n", dev->name);
1293 return result;
1294 }
1295 }
1296
1297 /* Init TxBD */
1298 BD_DATA_SET(bd, buf);
1299 BD_LENGTH_SET(bd, len);
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001300 status = BD_STATUS(bd);
Dave Liue732e9c2006-11-03 12:11:15 -06001301 status &= BD_WRAP;
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001302 status |= (TX_BD_READY | TX_BD_LAST);
Dave Liue732e9c2006-11-03 12:11:15 -06001303 BD_STATUS_SET(bd, status);
1304
1305 /* Tell UCC to transmit the buffer */
1306 ucc_fast_transmit_on_demand(uccf);
1307
1308 /* Wait for buffer to be transmitted */
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001309 for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
Dave Liue732e9c2006-11-03 12:11:15 -06001310 if (i > 0x100000) {
1311 printf("%s: tx error\n", dev->name);
1312 return result;
1313 }
Dave Liue732e9c2006-11-03 12:11:15 -06001314 }
1315
1316 /* Ok, the buffer be transimitted */
1317 BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001318 uec->tx_bd = bd;
Dave Liue732e9c2006-11-03 12:11:15 -06001319 result = 1;
1320
1321 return result;
1322}
1323
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001324static int uec_recv(struct eth_device *dev)
Dave Liue732e9c2006-11-03 12:11:15 -06001325{
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001326 struct uec_priv *uec = dev->priv;
1327 struct buffer_descriptor *bd;
Dave Liu663cbab2006-12-06 11:38:17 +08001328 u16 status;
Dave Liue732e9c2006-11-03 12:11:15 -06001329 u16 len;
1330 u8 *data;
1331
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001332 bd = uec->rx_bd;
1333 status = BD_STATUS(bd);
Dave Liue732e9c2006-11-03 12:11:15 -06001334
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001335 while (!(status & RX_BD_EMPTY)) {
1336 if (!(status & RX_BD_ERROR)) {
Dave Liue732e9c2006-11-03 12:11:15 -06001337 data = BD_DATA(bd);
1338 len = BD_LENGTH(bd);
Joe Hershberger9f09a362015-04-08 01:41:06 -05001339 net_process_received_packet(data, len);
Dave Liue732e9c2006-11-03 12:11:15 -06001340 } else {
1341 printf("%s: Rx error\n", dev->name);
1342 }
1343 status &= BD_CLEAN;
1344 BD_LENGTH_SET(bd, 0);
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001345 BD_STATUS_SET(bd, status | RX_BD_EMPTY);
Dave Liue732e9c2006-11-03 12:11:15 -06001346 BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001347 status = BD_STATUS(bd);
Dave Liue732e9c2006-11-03 12:11:15 -06001348 }
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001349 uec->rx_bd = bd;
Dave Liue732e9c2006-11-03 12:11:15 -06001350
1351 return 1;
1352}
1353
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001354int uec_initialize(struct bd_info *bis, struct uec_inf *uec_info)
Dave Liue732e9c2006-11-03 12:11:15 -06001355{
1356 struct eth_device *dev;
1357 int i;
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001358 struct uec_priv *uec;
Dave Liue732e9c2006-11-03 12:11:15 -06001359 int err;
1360
1361 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
1362 if (!dev)
1363 return 0;
1364 memset(dev, 0, sizeof(struct eth_device));
1365
1366 /* Allocate the UEC private struct */
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001367 uec = (struct uec_priv *)malloc(sizeof(struct uec_priv));
1368 if (!uec)
Dave Liue732e9c2006-11-03 12:11:15 -06001369 return -ENOMEM;
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001370
1371 memset(uec, 0, sizeof(struct uec_priv));
Dave Liue732e9c2006-11-03 12:11:15 -06001372
Haiying Wang511d8282009-06-04 16:12:41 -04001373 /* Adjust uec_info */
1374#if (MAX_QE_RISC == 4)
1375 uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS;
1376 uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS;
richardretanubun001090f2008-10-06 15:31:43 -04001377#endif
Dave Liue732e9c2006-11-03 12:11:15 -06001378
Haiying Wang511d8282009-06-04 16:12:41 -04001379 devlist[uec_info->uf_info.ucc_num] = dev;
David Saada6b25c132008-03-31 02:37:38 -07001380
Dave Liue732e9c2006-11-03 12:11:15 -06001381 uec->uec_info = uec_info;
Haiying Wang893b0652009-06-04 16:12:42 -04001382 uec->dev = dev;
Dave Liue732e9c2006-11-03 12:11:15 -06001383
Kim Phillipsb42cf5f2010-07-26 18:34:57 -05001384 sprintf(dev->name, "UEC%d", uec_info->uf_info.ucc_num);
Dave Liue732e9c2006-11-03 12:11:15 -06001385 dev->iobase = 0;
1386 dev->priv = (void *)uec;
1387 dev->init = uec_init;
1388 dev->halt = uec_halt;
1389 dev->send = uec_send;
1390 dev->recv = uec_recv;
1391
1392 /* Clear the ethnet address */
1393 for (i = 0; i < 6; i++)
1394 dev->enetaddr[i] = 0;
1395
1396 eth_register(dev);
1397
1398 err = uec_startup(uec);
1399 if (err) {
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001400 printf("%s: Cannot configure net device, aborting.", dev->name);
Dave Liue732e9c2006-11-03 12:11:15 -06001401 return err;
1402 }
1403
Richard Retanubun15e467c2009-06-17 16:00:41 -04001404#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
Joe Hershberger1fbcbed2016-08-08 11:28:38 -05001405 int retval;
1406 struct mii_dev *mdiodev = mdio_alloc();
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001407
Joe Hershberger1fbcbed2016-08-08 11:28:38 -05001408 if (!mdiodev)
1409 return -ENOMEM;
Vladimir Olteanc1759a82021-09-27 14:21:59 +03001410 strlcpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
Joe Hershberger1fbcbed2016-08-08 11:28:38 -05001411 mdiodev->read = uec_miiphy_read;
1412 mdiodev->write = uec_miiphy_write;
1413
1414 retval = mdio_register(mdiodev);
1415 if (retval < 0)
1416 return retval;
David Saada6b25c132008-03-31 02:37:38 -07001417#endif
1418
Dave Liue732e9c2006-11-03 12:11:15 -06001419 return 1;
1420}
Haiying Wang511d8282009-06-04 16:12:41 -04001421
Heiko Schocherbaf84a92020-05-25 07:27:26 +02001422int uec_eth_init(struct bd_info *bis, struct uec_inf *uecs, int num)
Haiying Wang511d8282009-06-04 16:12:41 -04001423{
1424 int i;
1425
1426 for (i = 0; i < num; i++)
1427 uec_initialize(bis, &uecs[i]);
1428
1429 return 0;
1430}
1431
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09001432int uec_standard_init(struct bd_info *bis)
Haiying Wang511d8282009-06-04 16:12:41 -04001433{
1434 return uec_eth_init(bis, uec_info, ARRAY_SIZE(uec_info));
1435}
Heiko Schocher41b64a82020-02-06 09:48:16 +01001436#endif