blob: 9fd26de0d72102c55f8e52da3a441aa5fefa9ba1 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Kumar Gala2683c532011-04-13 08:37:44 -05002/*
Roy Zangbafd8032012-10-08 07:44:21 +00003 * Copyright 2009-2012 Freescale Semiconductor, Inc.
Madalin Bucurb76b0a62020-04-23 16:25:19 +03004 * Copyright 2020 NXP
Kumar Gala2683c532011-04-13 08:37:44 -05005 * Dave Liu <daveliu@freescale.com>
Kumar Gala2683c532011-04-13 08:37:44 -05006 */
7#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -06008#include <log.h>
Simon Glass655306c2020-05-10 11:39:58 -06009#include <part.h>
Kumar Gala2683c532011-04-13 08:37:44 -050010#include <asm/io.h>
Madalin Bucurb76b0a62020-04-23 16:25:19 +030011#include <dm.h>
12#include <dm/ofnode.h>
13#include <linux/compat.h>
14#include <phy_interface.h>
Kumar Gala2683c532011-04-13 08:37:44 -050015#include <malloc.h>
16#include <net.h>
17#include <hwconfig.h>
18#include <fm_eth.h>
19#include <fsl_mdio.h>
20#include <miiphy.h>
21#include <phy.h>
Shaohui Xie513eaf22015-10-26 19:47:47 +080022#include <fsl_dtsec.h>
23#include <fsl_tgec.h>
Shaohui Xie835c72b2015-03-20 19:28:19 -070024#include <fsl_memac.h>
Simon Glassdbd79542020-05-10 11:40:11 -060025#include <linux/delay.h>
Kumar Gala2683c532011-04-13 08:37:44 -050026
27#include "fm.h"
28
Kumar Gala2683c532011-04-13 08:37:44 -050029#if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) && !defined(BITBANGMII)
30
31#define TBIANA_SETTINGS (TBIANA_ASYMMETRIC_PAUSE | TBIANA_SYMMETRIC_PAUSE | \
32 TBIANA_FULL_DUPLEX)
33
34#define TBIANA_SGMII_ACK 0x4001
35
36#define TBICR_SETTINGS (TBICR_ANEG_ENABLE | TBICR_RESTART_ANEG | \
37 TBICR_FULL_DUPLEX | TBICR_SPEED1_SET)
38
39/* Configure the TBI for SGMII operation */
Kim Phillips914b0782012-10-29 13:34:34 +000040static void dtsec_configure_serdes(struct fm_eth *priv)
Kumar Gala2683c532011-04-13 08:37:44 -050041{
Roy Zangbafd8032012-10-08 07:44:21 +000042#ifdef CONFIG_SYS_FMAN_V3
43 u32 value;
44 struct mii_dev bus;
Shengzhou Liu95403682014-10-23 17:20:57 +080045 bool sgmii_2500 = (priv->enet_if ==
Vladimir Oltean6caef972021-09-18 15:32:35 +030046 PHY_INTERFACE_MODE_2500BASEX) ? true : false;
Madalin Bucurb76b0a62020-04-23 16:25:19 +030047 int i = 0, j;
48
Madalin Bucurb76b0a62020-04-23 16:25:19 +030049 bus.priv = priv->pcs_mdio;
Madalin Bucurb76b0a62020-04-23 16:25:19 +030050 bus.read = memac_mdio_read;
51 bus.write = memac_mdio_write;
52 bus.reset = memac_mdio_reset;
Roy Zangbafd8032012-10-08 07:44:21 +000053
Shaohui Xie1b295122015-10-26 19:47:48 +080054qsgmii_loop:
Shengzhou Liu95403682014-10-23 17:20:57 +080055 /* SGMII IF mode + AN enable only for 1G SGMII, not for 2.5G */
shaohui xiea3384a12016-11-15 14:36:47 +080056 if (sgmii_2500)
57 value = PHY_SGMII_CR_PHY_RESET |
58 PHY_SGMII_IF_SPEED_GIGABIT |
59 PHY_SGMII_IF_MODE_SGMII;
60 else
61 value = PHY_SGMII_IF_MODE_SGMII | PHY_SGMII_IF_MODE_AN;
Shengzhou Liu95403682014-10-23 17:20:57 +080062
Madalin Bucurb76b0a62020-04-23 16:25:19 +030063 for (j = 0; j <= 3; j++)
64 debug("dump PCS reg %#x: %#x\n", j,
65 memac_mdio_read(&bus, i, MDIO_DEVAD_NONE, j));
66
Shaohui Xie1b295122015-10-26 19:47:48 +080067 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0x14, value);
Roy Zangbafd8032012-10-08 07:44:21 +000068
69 /* Dev ability according to SGMII specification */
70 value = PHY_SGMII_DEV_ABILITY_SGMII;
Shaohui Xie1b295122015-10-26 19:47:48 +080071 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0x4, value);
Roy Zangbafd8032012-10-08 07:44:21 +000072
shaohui xiea3384a12016-11-15 14:36:47 +080073 if (sgmii_2500) {
74 /* Adjust link timer for 2.5G SGMII,
75 * 1.6 ms in units of 3.2 ns:
76 * 1.6ms / 3.2ns = 5 * 10^5 = 0x7a120.
77 */
78 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0x13, 0x0007);
79 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0x12, 0xa120);
80 } else {
81 /* Adjust link timer for SGMII,
82 * 1.6 ms in units of 8 ns:
83 * 1.6ms / 8ns = 2 * 10^5 = 0x30d40.
84 */
85 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0x13, 0x0003);
86 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0x12, 0x0d40);
87 }
Roy Zangbafd8032012-10-08 07:44:21 +000088
89 /* Restart AN */
shaohui xiea3384a12016-11-15 14:36:47 +080090 value = PHY_SGMII_CR_DEF_VAL | PHY_SGMII_CR_RESET_AN;
Shaohui Xie1b295122015-10-26 19:47:48 +080091 memac_mdio_write(&bus, i, MDIO_DEVAD_NONE, 0, value);
92
93 if ((priv->enet_if == PHY_INTERFACE_MODE_QSGMII) && (i < 3)) {
94 i++;
95 goto qsgmii_loop;
96 }
Roy Zangbafd8032012-10-08 07:44:21 +000097#else
Kumar Gala2683c532011-04-13 08:37:44 -050098 struct dtsec *regs = priv->mac->base;
99 struct tsec_mii_mng *phyregs = priv->mac->phyregs;
100
101 /*
102 * Access TBI PHY registers at given TSEC register offset as
103 * opposed to the register offset used for external PHY accesses
104 */
105 tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0, TBI_TBICON,
106 TBICON_CLK_SELECT);
107 tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0, TBI_ANA,
108 TBIANA_SGMII_ACK);
109 tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0,
110 TBI_CR, TBICR_SETTINGS);
Roy Zangbafd8032012-10-08 07:44:21 +0000111#endif
Kumar Gala2683c532011-04-13 08:37:44 -0500112}
113
Madalin Bucur11fa7a32020-04-23 16:25:16 +0300114static void dtsec_init_phy(struct fm_eth *fm_eth)
Kumar Gala2683c532011-04-13 08:37:44 -0500115{
Roy Zangbafd8032012-10-08 07:44:21 +0000116#ifndef CONFIG_SYS_FMAN_V3
Tom Rini376b88a2022-10-28 20:27:13 -0400117 struct dtsec *regs = (struct dtsec *)CFG_SYS_FSL_FM1_DTSEC1_ADDR;
shaohui xie842e59e2012-10-11 20:25:36 +0000118
Kumar Gala2683c532011-04-13 08:37:44 -0500119 /* Assign a Physical address to the TBI */
Tom Rini6a5dccc2022-11-16 13:10:41 -0500120 out_be32(&regs->tbipa, CFG_SYS_TBIPA_VALUE);
Roy Zangbafd8032012-10-08 07:44:21 +0000121#endif
Kumar Gala2683c532011-04-13 08:37:44 -0500122
Shengzhou Liu95403682014-10-23 17:20:57 +0800123 if (fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII ||
Shaohui Xie1b295122015-10-26 19:47:48 +0800124 fm_eth->enet_if == PHY_INTERFACE_MODE_QSGMII ||
Vladimir Oltean6caef972021-09-18 15:32:35 +0300125 fm_eth->enet_if == PHY_INTERFACE_MODE_2500BASEX)
Kumar Gala2683c532011-04-13 08:37:44 -0500126 dtsec_configure_serdes(fm_eth);
127}
Kumar Gala2683c532011-04-13 08:37:44 -0500128#endif
129
130static u16 muram_readw(u16 *addr)
131{
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800132 ulong base = (ulong)addr & ~0x3UL;
133 u32 val32 = in_be32((void *)base);
Kumar Gala2683c532011-04-13 08:37:44 -0500134 int byte_pos;
135 u16 ret;
136
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800137 byte_pos = (ulong)addr & 0x3UL;
Kumar Gala2683c532011-04-13 08:37:44 -0500138 if (byte_pos)
139 ret = (u16)(val32 & 0x0000ffff);
140 else
141 ret = (u16)((val32 & 0xffff0000) >> 16);
142
143 return ret;
144}
145
146static void muram_writew(u16 *addr, u16 val)
147{
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800148 ulong base = (ulong)addr & ~0x3UL;
149 u32 org32 = in_be32((void *)base);
Kumar Gala2683c532011-04-13 08:37:44 -0500150 u32 val32;
151 int byte_pos;
152
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800153 byte_pos = (ulong)addr & 0x3UL;
Kumar Gala2683c532011-04-13 08:37:44 -0500154 if (byte_pos)
155 val32 = (org32 & 0xffff0000) | val;
156 else
157 val32 = (org32 & 0x0000ffff) | ((u32)val << 16);
158
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800159 out_be32((void *)base, val32);
Kumar Gala2683c532011-04-13 08:37:44 -0500160}
161
162static void bmi_rx_port_disable(struct fm_bmi_rx_port *rx_port)
163{
164 int timeout = 1000000;
165
166 clrbits_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_EN);
167
168 /* wait until the rx port is not busy */
169 while ((in_be32(&rx_port->fmbm_rst) & FMBM_RST_BSY) && timeout--)
170 ;
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300171 if (!timeout)
172 printf("%s - timeout\n", __func__);
Kumar Gala2683c532011-04-13 08:37:44 -0500173}
174
175static void bmi_rx_port_init(struct fm_bmi_rx_port *rx_port)
176{
177 /* set BMI to independent mode, Rx port disable */
178 out_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_IM);
179 /* clear FOF in IM case */
180 out_be32(&rx_port->fmbm_rim, 0);
181 /* Rx frame next engine -RISC */
182 out_be32(&rx_port->fmbm_rfne, NIA_ENG_RISC | NIA_RISC_AC_IM_RX);
183 /* Rx command attribute - no order, MR[3] = 1 */
184 clrbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_ORDER | FMBM_RFCA_MR_MASK);
185 setbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_MR(4));
186 /* enable Rx statistic counters */
187 out_be32(&rx_port->fmbm_rstc, FMBM_RSTC_EN);
188 /* disable Rx performance counters */
189 out_be32(&rx_port->fmbm_rpc, 0);
190}
191
192static void bmi_tx_port_disable(struct fm_bmi_tx_port *tx_port)
193{
194 int timeout = 1000000;
195
196 clrbits_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_EN);
197
198 /* wait until the tx port is not busy */
199 while ((in_be32(&tx_port->fmbm_tst) & FMBM_TST_BSY) && timeout--)
200 ;
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300201 if (!timeout)
202 printf("%s - timeout\n", __func__);
Kumar Gala2683c532011-04-13 08:37:44 -0500203}
204
205static void bmi_tx_port_init(struct fm_bmi_tx_port *tx_port)
206{
207 /* set BMI to independent mode, Tx port disable */
208 out_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_IM);
209 /* Tx frame next engine -RISC */
210 out_be32(&tx_port->fmbm_tfne, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
211 out_be32(&tx_port->fmbm_tfene, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
212 /* Tx command attribute - no order, MR[3] = 1 */
213 clrbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_ORDER | FMBM_TFCA_MR_MASK);
214 setbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_MR(4));
215 /* enable Tx statistic counters */
216 out_be32(&tx_port->fmbm_tstc, FMBM_TSTC_EN);
217 /* disable Tx performance counters */
218 out_be32(&tx_port->fmbm_tpc, 0);
219}
220
221static int fm_eth_rx_port_parameter_init(struct fm_eth *fm_eth)
222{
223 struct fm_port_global_pram *pram;
224 u32 pram_page_offset;
225 void *rx_bd_ring_base;
226 void *rx_buf_pool;
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800227 u32 bd_ring_base_lo, bd_ring_base_hi;
228 u32 buf_lo, buf_hi;
Kumar Gala2683c532011-04-13 08:37:44 -0500229 struct fm_port_bd *rxbd;
230 struct fm_port_qd *rxqd;
231 struct fm_bmi_rx_port *bmi_rx_port = fm_eth->rx_port;
232 int i;
233
234 /* alloc global parameter ram at MURAM */
235 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index,
236 FM_PRAM_SIZE, FM_PRAM_ALIGN);
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800237 if (!pram) {
238 printf("%s: No muram for Rx global parameter\n", __func__);
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800239 return -ENOMEM;
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800240 }
241
Kumar Gala2683c532011-04-13 08:37:44 -0500242 fm_eth->rx_pram = pram;
243
244 /* parameter page offset to MURAM */
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800245 pram_page_offset = (void *)pram - fm_muram_base(fm_eth->fm_index);
Kumar Gala2683c532011-04-13 08:37:44 -0500246
247 /* enable global mode- snooping data buffers and BDs */
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800248 out_be32(&pram->mode, PRAM_MODE_GLOBAL);
Kumar Gala2683c532011-04-13 08:37:44 -0500249
250 /* init the Rx queue descriptor pionter */
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800251 out_be32(&pram->rxqd_ptr, pram_page_offset + 0x20);
Kumar Gala2683c532011-04-13 08:37:44 -0500252
253 /* set the max receive buffer length, power of 2 */
254 muram_writew(&pram->mrblr, MAX_RXBUF_LOG2);
255
256 /* alloc Rx buffer descriptors from main memory */
257 rx_bd_ring_base = malloc(sizeof(struct fm_port_bd)
258 * RX_BD_RING_SIZE);
259 if (!rx_bd_ring_base)
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800260 return -ENOMEM;
261
Kumar Gala2683c532011-04-13 08:37:44 -0500262 memset(rx_bd_ring_base, 0, sizeof(struct fm_port_bd)
263 * RX_BD_RING_SIZE);
264
265 /* alloc Rx buffer from main memory */
266 rx_buf_pool = malloc(MAX_RXBUF_LEN * RX_BD_RING_SIZE);
Hou Zhiqiang46bcbeb2021-06-12 21:15:41 +0300267 if (!rx_buf_pool) {
268 free(rx_bd_ring_base);
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800269 return -ENOMEM;
Hou Zhiqiang46bcbeb2021-06-12 21:15:41 +0300270 }
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800271
Kumar Gala2683c532011-04-13 08:37:44 -0500272 memset(rx_buf_pool, 0, MAX_RXBUF_LEN * RX_BD_RING_SIZE);
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800273 debug("%s: rx_buf_pool = %p\n", __func__, rx_buf_pool);
Kumar Gala2683c532011-04-13 08:37:44 -0500274
275 /* save them to fm_eth */
276 fm_eth->rx_bd_ring = rx_bd_ring_base;
277 fm_eth->cur_rxbd = rx_bd_ring_base;
278 fm_eth->rx_buf = rx_buf_pool;
279
280 /* init Rx BDs ring */
281 rxbd = (struct fm_port_bd *)rx_bd_ring_base;
282 for (i = 0; i < RX_BD_RING_SIZE; i++) {
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800283 muram_writew(&rxbd->status, RxBD_EMPTY);
284 muram_writew(&rxbd->len, 0);
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800285 buf_hi = upper_32_bits(virt_to_phys(rx_buf_pool +
286 i * MAX_RXBUF_LEN));
287 buf_lo = lower_32_bits(virt_to_phys(rx_buf_pool +
288 i * MAX_RXBUF_LEN));
289 muram_writew(&rxbd->buf_ptr_hi, (u16)buf_hi);
290 out_be32(&rxbd->buf_ptr_lo, buf_lo);
Kumar Gala2683c532011-04-13 08:37:44 -0500291 rxbd++;
292 }
293
294 /* set the Rx queue descriptor */
295 rxqd = &pram->rxqd;
296 muram_writew(&rxqd->gen, 0);
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800297 bd_ring_base_hi = upper_32_bits(virt_to_phys(rx_bd_ring_base));
298 bd_ring_base_lo = lower_32_bits(virt_to_phys(rx_bd_ring_base));
299 muram_writew(&rxqd->bd_ring_base_hi, (u16)bd_ring_base_hi);
300 out_be32(&rxqd->bd_ring_base_lo, bd_ring_base_lo);
Kumar Gala2683c532011-04-13 08:37:44 -0500301 muram_writew(&rxqd->bd_ring_size, sizeof(struct fm_port_bd)
302 * RX_BD_RING_SIZE);
303 muram_writew(&rxqd->offset_in, 0);
304 muram_writew(&rxqd->offset_out, 0);
305
306 /* set IM parameter ram pointer to Rx Frame Queue ID */
307 out_be32(&bmi_rx_port->fmbm_rfqid, pram_page_offset);
308
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800309 return 0;
Kumar Gala2683c532011-04-13 08:37:44 -0500310}
311
312static int fm_eth_tx_port_parameter_init(struct fm_eth *fm_eth)
313{
314 struct fm_port_global_pram *pram;
315 u32 pram_page_offset;
316 void *tx_bd_ring_base;
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800317 u32 bd_ring_base_lo, bd_ring_base_hi;
Kumar Gala2683c532011-04-13 08:37:44 -0500318 struct fm_port_bd *txbd;
319 struct fm_port_qd *txqd;
320 struct fm_bmi_tx_port *bmi_tx_port = fm_eth->tx_port;
321 int i;
322
323 /* alloc global parameter ram at MURAM */
324 pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index,
325 FM_PRAM_SIZE, FM_PRAM_ALIGN);
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800326 if (!pram) {
327 printf("%s: No muram for Tx global parameter\n", __func__);
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800328 return -ENOMEM;
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800329 }
Kumar Gala2683c532011-04-13 08:37:44 -0500330 fm_eth->tx_pram = pram;
331
332 /* parameter page offset to MURAM */
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800333 pram_page_offset = (void *)pram - fm_muram_base(fm_eth->fm_index);
Kumar Gala2683c532011-04-13 08:37:44 -0500334
335 /* enable global mode- snooping data buffers and BDs */
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800336 out_be32(&pram->mode, PRAM_MODE_GLOBAL);
Kumar Gala2683c532011-04-13 08:37:44 -0500337
338 /* init the Tx queue descriptor pionter */
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800339 out_be32(&pram->txqd_ptr, pram_page_offset + 0x40);
Kumar Gala2683c532011-04-13 08:37:44 -0500340
341 /* alloc Tx buffer descriptors from main memory */
342 tx_bd_ring_base = malloc(sizeof(struct fm_port_bd)
343 * TX_BD_RING_SIZE);
344 if (!tx_bd_ring_base)
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800345 return -ENOMEM;
346
Kumar Gala2683c532011-04-13 08:37:44 -0500347 memset(tx_bd_ring_base, 0, sizeof(struct fm_port_bd)
348 * TX_BD_RING_SIZE);
349 /* save it to fm_eth */
350 fm_eth->tx_bd_ring = tx_bd_ring_base;
351 fm_eth->cur_txbd = tx_bd_ring_base;
352
353 /* init Tx BDs ring */
354 txbd = (struct fm_port_bd *)tx_bd_ring_base;
355 for (i = 0; i < TX_BD_RING_SIZE; i++) {
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800356 muram_writew(&txbd->status, TxBD_LAST);
357 muram_writew(&txbd->len, 0);
358 muram_writew(&txbd->buf_ptr_hi, 0);
359 out_be32(&txbd->buf_ptr_lo, 0);
360 txbd++;
Kumar Gala2683c532011-04-13 08:37:44 -0500361 }
362
363 /* set the Tx queue decriptor */
364 txqd = &pram->txqd;
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800365 bd_ring_base_hi = upper_32_bits(virt_to_phys(tx_bd_ring_base));
366 bd_ring_base_lo = lower_32_bits(virt_to_phys(tx_bd_ring_base));
367 muram_writew(&txqd->bd_ring_base_hi, (u16)bd_ring_base_hi);
368 out_be32(&txqd->bd_ring_base_lo, bd_ring_base_lo);
Kumar Gala2683c532011-04-13 08:37:44 -0500369 muram_writew(&txqd->bd_ring_size, sizeof(struct fm_port_bd)
370 * TX_BD_RING_SIZE);
371 muram_writew(&txqd->offset_in, 0);
372 muram_writew(&txqd->offset_out, 0);
373
374 /* set IM parameter ram pointer to Tx Confirmation Frame Queue ID */
375 out_be32(&bmi_tx_port->fmbm_tcfqid, pram_page_offset);
376
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800377 return 0;
Kumar Gala2683c532011-04-13 08:37:44 -0500378}
379
380static int fm_eth_init(struct fm_eth *fm_eth)
381{
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800382 int ret;
Kumar Gala2683c532011-04-13 08:37:44 -0500383
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800384 ret = fm_eth_rx_port_parameter_init(fm_eth);
385 if (ret)
386 return ret;
Kumar Gala2683c532011-04-13 08:37:44 -0500387
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800388 ret = fm_eth_tx_port_parameter_init(fm_eth);
389 if (ret)
390 return ret;
Kumar Gala2683c532011-04-13 08:37:44 -0500391
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800392 return 0;
Kumar Gala2683c532011-04-13 08:37:44 -0500393}
394
395static int fm_eth_startup(struct fm_eth *fm_eth)
396{
397 struct fsl_enet_mac *mac;
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800398 int ret;
399
Kumar Gala2683c532011-04-13 08:37:44 -0500400 mac = fm_eth->mac;
401
402 /* Rx/TxBDs, Rx/TxQDs, Rx buff and parameter ram init */
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800403 ret = fm_eth_init(fm_eth);
404 if (ret)
405 return ret;
Kumar Gala2683c532011-04-13 08:37:44 -0500406 /* setup the MAC controller */
407 mac->init_mac(mac);
408
409 /* For some reason we need to set SPEED_100 */
Shaohui Xiec218d292013-08-19 18:58:52 +0800410 if (((fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII) ||
Vladimir Oltean6caef972021-09-18 15:32:35 +0300411 (fm_eth->enet_if == PHY_INTERFACE_MODE_2500BASEX) ||
Shaohui Xiec218d292013-08-19 18:58:52 +0800412 (fm_eth->enet_if == PHY_INTERFACE_MODE_QSGMII)) &&
413 mac->set_if_mode)
Kumar Gala2683c532011-04-13 08:37:44 -0500414 mac->set_if_mode(mac, fm_eth->enet_if, SPEED_100);
415
416 /* init bmi rx port, IM mode and disable */
417 bmi_rx_port_init(fm_eth->rx_port);
418 /* init bmi tx port, IM mode and disable */
419 bmi_tx_port_init(fm_eth->tx_port);
420
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800421 return 0;
Kumar Gala2683c532011-04-13 08:37:44 -0500422}
423
424static void fmc_tx_port_graceful_stop_enable(struct fm_eth *fm_eth)
425{
426 struct fm_port_global_pram *pram;
427
428 pram = fm_eth->tx_pram;
429 /* graceful stop transmission of frames */
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800430 setbits_be32(&pram->mode, PRAM_MODE_GRACEFUL_STOP);
Kumar Gala2683c532011-04-13 08:37:44 -0500431 sync();
432}
433
434static void fmc_tx_port_graceful_stop_disable(struct fm_eth *fm_eth)
435{
436 struct fm_port_global_pram *pram;
437
438 pram = fm_eth->tx_pram;
439 /* re-enable transmission of frames */
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800440 clrbits_be32(&pram->mode, PRAM_MODE_GRACEFUL_STOP);
Kumar Gala2683c532011-04-13 08:37:44 -0500441 sync();
442}
443
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300444static int fm_eth_open(struct udevice *dev)
Kumar Gala2683c532011-04-13 08:37:44 -0500445{
Simon Glassfa20e932020-12-03 16:55:20 -0700446 struct eth_pdata *pdata = dev_get_plat(dev);
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300447 struct fm_eth *fm_eth = dev_get_priv(dev);
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300448 unsigned char *enetaddr;
Kumar Gala2683c532011-04-13 08:37:44 -0500449 struct fsl_enet_mac *mac;
Timur Tabi42387462012-07-09 08:52:43 +0000450#ifdef CONFIG_PHYLIB
451 int ret;
452#endif
Kumar Gala2683c532011-04-13 08:37:44 -0500453
Kumar Gala2683c532011-04-13 08:37:44 -0500454 mac = fm_eth->mac;
455
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300456 enetaddr = pdata->enetaddr;
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300457
Kumar Gala2683c532011-04-13 08:37:44 -0500458 /* setup the MAC address */
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300459 if (enetaddr[0] & 0x01) {
460 printf("%s: MacAddress is multicast address\n", __func__);
461 enetaddr[0] = 0;
462 enetaddr[5] = fm_eth->num;
Kumar Gala2683c532011-04-13 08:37:44 -0500463 }
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300464 mac->set_mac_addr(mac, enetaddr);
Kumar Gala2683c532011-04-13 08:37:44 -0500465
466 /* enable bmi Rx port */
467 setbits_be32(&fm_eth->rx_port->fmbm_rcfg, FMBM_RCFG_EN);
468 /* enable MAC rx/tx port */
469 mac->enable_mac(mac);
470 /* enable bmi Tx port */
471 setbits_be32(&fm_eth->tx_port->fmbm_tcfg, FMBM_TCFG_EN);
472 /* re-enable transmission of frame */
473 fmc_tx_port_graceful_stop_disable(fm_eth);
474
475#ifdef CONFIG_PHYLIB
Codrin Ciubotariu1ee90f92015-01-12 14:08:29 +0200476 if (fm_eth->phydev) {
477 ret = phy_startup(fm_eth->phydev);
478 if (ret) {
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300479 printf("%s: Could not initialize\n", dev->name);
Codrin Ciubotariu1ee90f92015-01-12 14:08:29 +0200480 return ret;
481 }
482 } else {
483 return 0;
Timur Tabi42387462012-07-09 08:52:43 +0000484 }
Kumar Gala2683c532011-04-13 08:37:44 -0500485#else
486 fm_eth->phydev->speed = SPEED_1000;
487 fm_eth->phydev->link = 1;
488 fm_eth->phydev->duplex = DUPLEX_FULL;
489#endif
490
491 /* set the MAC-PHY mode */
492 mac->set_if_mode(mac, fm_eth->enet_if, fm_eth->phydev->speed);
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300493 debug("MAC IF mode %d, speed %d, link %d\n", fm_eth->enet_if,
494 fm_eth->phydev->speed, fm_eth->phydev->link);
Kumar Gala2683c532011-04-13 08:37:44 -0500495
496 if (!fm_eth->phydev->link)
497 printf("%s: No link.\n", fm_eth->phydev->dev->name);
498
499 return fm_eth->phydev->link ? 0 : -1;
500}
501
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300502static void fm_eth_halt(struct udevice *dev)
Kumar Gala2683c532011-04-13 08:37:44 -0500503{
504 struct fm_eth *fm_eth;
505 struct fsl_enet_mac *mac;
506
Simon Glass95588622020-12-22 19:30:28 -0700507 fm_eth = dev_get_priv(dev);
Kumar Gala2683c532011-04-13 08:37:44 -0500508 mac = fm_eth->mac;
509
510 /* graceful stop the transmission of frames */
511 fmc_tx_port_graceful_stop_enable(fm_eth);
512 /* disable bmi Tx port */
513 bmi_tx_port_disable(fm_eth->tx_port);
514 /* disable MAC rx/tx port */
515 mac->disable_mac(mac);
516 /* disable bmi Rx port */
517 bmi_rx_port_disable(fm_eth->rx_port);
518
Shaohui Xieab687cc2015-10-26 19:47:46 +0800519#ifdef CONFIG_PHYLIB
Codrin Ciubotariu1ee90f92015-01-12 14:08:29 +0200520 if (fm_eth->phydev)
521 phy_shutdown(fm_eth->phydev);
Shaohui Xieab687cc2015-10-26 19:47:46 +0800522#endif
Kumar Gala2683c532011-04-13 08:37:44 -0500523}
524
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300525static int fm_eth_send(struct udevice *dev, void *buf, int len)
Kumar Gala2683c532011-04-13 08:37:44 -0500526{
527 struct fm_eth *fm_eth;
528 struct fm_port_global_pram *pram;
529 struct fm_port_bd *txbd, *txbd_base;
530 u16 offset_in;
531 int i;
532
Simon Glass95588622020-12-22 19:30:28 -0700533 fm_eth = dev_get_priv(dev);
Kumar Gala2683c532011-04-13 08:37:44 -0500534 pram = fm_eth->tx_pram;
535 txbd = fm_eth->cur_txbd;
536
537 /* find one empty TxBD */
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800538 for (i = 0; muram_readw(&txbd->status) & TxBD_READY; i++) {
Kumar Gala2683c532011-04-13 08:37:44 -0500539 udelay(100);
540 if (i > 0x1000) {
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800541 printf("%s: Tx buffer not ready, txbd->status = 0x%x\n",
542 dev->name, muram_readw(&txbd->status));
Kumar Gala2683c532011-04-13 08:37:44 -0500543 return 0;
544 }
545 }
546 /* setup TxBD */
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800547 muram_writew(&txbd->buf_ptr_hi, (u16)upper_32_bits(virt_to_phys(buf)));
548 out_be32(&txbd->buf_ptr_lo, lower_32_bits(virt_to_phys(buf)));
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800549 muram_writew(&txbd->len, len);
Kumar Gala2683c532011-04-13 08:37:44 -0500550 sync();
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800551 muram_writew(&txbd->status, TxBD_READY | TxBD_LAST);
Kumar Gala2683c532011-04-13 08:37:44 -0500552 sync();
553
554 /* update TxQD, let RISC to send the packet */
555 offset_in = muram_readw(&pram->txqd.offset_in);
556 offset_in += sizeof(struct fm_port_bd);
557 if (offset_in >= muram_readw(&pram->txqd.bd_ring_size))
558 offset_in = 0;
559 muram_writew(&pram->txqd.offset_in, offset_in);
560 sync();
561
562 /* wait for buffer to be transmitted */
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800563 for (i = 0; muram_readw(&txbd->status) & TxBD_READY; i++) {
Kumar Gala2683c532011-04-13 08:37:44 -0500564 udelay(100);
565 if (i > 0x10000) {
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800566 printf("%s: Tx error, txbd->status = 0x%x\n",
567 dev->name, muram_readw(&txbd->status));
Kumar Gala2683c532011-04-13 08:37:44 -0500568 return 0;
569 }
570 }
571
572 /* advance the TxBD */
573 txbd++;
574 txbd_base = (struct fm_port_bd *)fm_eth->tx_bd_ring;
575 if (txbd >= (txbd_base + TX_BD_RING_SIZE))
576 txbd = txbd_base;
577 /* update current txbd */
578 fm_eth->cur_txbd = (void *)txbd;
579
580 return 1;
581}
582
Madalin Bucur248f9202020-04-23 16:25:17 +0300583static struct fm_port_bd *fm_eth_free_one(struct fm_eth *fm_eth,
584 struct fm_port_bd *rxbd)
Kumar Gala2683c532011-04-13 08:37:44 -0500585{
Kumar Gala2683c532011-04-13 08:37:44 -0500586 struct fm_port_global_pram *pram;
Madalin Bucur248f9202020-04-23 16:25:17 +0300587 struct fm_port_bd *rxbd_base;
Kumar Gala2683c532011-04-13 08:37:44 -0500588 u16 offset_out;
589
Kumar Gala2683c532011-04-13 08:37:44 -0500590 pram = fm_eth->rx_pram;
Madalin Bucur248f9202020-04-23 16:25:17 +0300591
592 /* clear the RxBDs */
593 muram_writew(&rxbd->status, RxBD_EMPTY);
594 muram_writew(&rxbd->len, 0);
595 sync();
596
597 /* advance RxBD */
598 rxbd++;
599 rxbd_base = (struct fm_port_bd *)fm_eth->rx_bd_ring;
600 if (rxbd >= (rxbd_base + RX_BD_RING_SIZE))
601 rxbd = rxbd_base;
602
603 /* update RxQD */
604 offset_out = muram_readw(&pram->rxqd.offset_out);
605 offset_out += sizeof(struct fm_port_bd);
606 if (offset_out >= muram_readw(&pram->rxqd.bd_ring_size))
607 offset_out = 0;
608 muram_writew(&pram->rxqd.offset_out, offset_out);
609 sync();
610
611 return rxbd;
612}
613
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300614static int fm_eth_recv(struct udevice *dev, int flags, uchar **packetp)
Madalin Bucur248f9202020-04-23 16:25:17 +0300615{
Simon Glass95588622020-12-22 19:30:28 -0700616 struct fm_eth *fm_eth;
617 struct fm_port_bd *rxbd;
Madalin Bucur248f9202020-04-23 16:25:17 +0300618 u32 buf_lo, buf_hi;
619 u16 status, len;
620 int ret = -1;
621 u8 *data;
622
Simon Glass95588622020-12-22 19:30:28 -0700623 fm_eth = dev_get_priv(dev);
Simon Glass95588622020-12-22 19:30:28 -0700624 rxbd = fm_eth->cur_rxbd;
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800625 status = muram_readw(&rxbd->status);
Kumar Gala2683c532011-04-13 08:37:44 -0500626
627 while (!(status & RxBD_EMPTY)) {
628 if (!(status & RxBD_ERROR)) {
Hou Zhiqiangea52d332015-10-26 19:47:44 +0800629 buf_hi = muram_readw(&rxbd->buf_ptr_hi);
630 buf_lo = in_be32(&rxbd->buf_ptr_lo);
631 data = (u8 *)((ulong)(buf_hi << 16) << 16 | buf_lo);
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800632 len = muram_readw(&rxbd->len);
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300633 *packetp = data;
634 return len;
Kumar Gala2683c532011-04-13 08:37:44 -0500635 } else {
636 printf("%s: Rx error\n", dev->name);
Daniel Inderbitzinfdbc5c72015-07-10 14:06:02 +0200637 ret = 0;
Kumar Gala2683c532011-04-13 08:37:44 -0500638 }
639
Madalin Bucur248f9202020-04-23 16:25:17 +0300640 /* free current bd, advance to next one */
641 rxbd = fm_eth_free_one(fm_eth, rxbd);
Kumar Gala2683c532011-04-13 08:37:44 -0500642
Kumar Gala2683c532011-04-13 08:37:44 -0500643 /* read next status */
Hou Zhiqiang3a25ece2015-10-26 19:47:43 +0800644 status = muram_readw(&rxbd->status);
Kumar Gala2683c532011-04-13 08:37:44 -0500645 }
646 fm_eth->cur_rxbd = (void *)rxbd;
647
Daniel Inderbitzinfdbc5c72015-07-10 14:06:02 +0200648 return ret;
Kumar Gala2683c532011-04-13 08:37:44 -0500649}
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300650
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300651static int fm_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
652{
Simon Glass95588622020-12-22 19:30:28 -0700653 struct fm_eth *fm_eth = (struct fm_eth *)dev_get_priv(dev);
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300654
655 fm_eth->cur_rxbd = fm_eth_free_one(fm_eth, fm_eth->cur_rxbd);
656
657 return 0;
658}
Kumar Gala2683c532011-04-13 08:37:44 -0500659
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300660static int fm_eth_init_mac(struct fm_eth *fm_eth, void *reg)
661{
662#ifndef CONFIG_SYS_FMAN_V3
663 void *mdio;
664#endif
665
666 fm_eth->mac = kzalloc(sizeof(*fm_eth->mac), GFP_KERNEL);
667 if (!fm_eth->mac)
668 return -ENOMEM;
669
670#ifndef CONFIG_SYS_FMAN_V3
671 mdio = fman_mdio(fm_eth->dev->parent, fm_eth->mac_type, fm_eth->num);
672 debug("MDIO %d @ %p\n", fm_eth->num, mdio);
673#endif
674
675 switch (fm_eth->mac_type) {
676#ifdef CONFIG_SYS_FMAN_V3
677 case FM_MEMAC:
678 init_memac(fm_eth->mac, reg, NULL, MAX_RXBUF_LEN);
679 break;
680#else
681 case FM_DTSEC:
682 init_dtsec(fm_eth->mac, reg, mdio, MAX_RXBUF_LEN);
683 break;
684 case FM_TGEC:
685 init_tgec(fm_eth->mac, reg, mdio, MAX_RXBUF_LEN);
686 break;
687#endif
688 }
689
690 return 0;
691}
Kumar Gala2683c532011-04-13 08:37:44 -0500692
Madalin Bucur11fa7a32020-04-23 16:25:16 +0300693static int init_phy(struct fm_eth *fm_eth)
Kumar Gala2683c532011-04-13 08:37:44 -0500694{
Shaohui Xieab687cc2015-10-26 19:47:46 +0800695#ifdef CONFIG_PHYLIB
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300696 u32 supported = PHY_GBIT_FEATURES;
Kumar Gala2683c532011-04-13 08:37:44 -0500697
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300698 if (fm_eth->type == FM_ETH_10G_E)
699 supported = PHY_10G_FEATURES;
Vladimir Oltean6caef972021-09-18 15:32:35 +0300700 if (fm_eth->enet_if == PHY_INTERFACE_MODE_2500BASEX)
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300701 supported |= SUPPORTED_2500baseX_Full;
702#endif
703
Kumar Gala2683c532011-04-13 08:37:44 -0500704 if (fm_eth->type == FM_ETH_1G_E)
Madalin Bucur11fa7a32020-04-23 16:25:16 +0300705 dtsec_init_phy(fm_eth);
Kumar Gala2683c532011-04-13 08:37:44 -0500706
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300707#ifdef CONFIG_PHYLIB
708#ifdef CONFIG_DM_MDIO
709 fm_eth->phydev = dm_eth_phy_connect(fm_eth->dev);
710 if (!fm_eth->phydev)
711 return -ENODEV;
712#endif
713 fm_eth->phydev->advertising &= supported;
714 fm_eth->phydev->supported &= supported;
715
716 phy_config(fm_eth->phydev);
717#endif
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300718 return 0;
719}
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300720
721static int fm_eth_bind(struct udevice *dev)
722{
723 char mac_name[11];
724 u32 fm, num;
725
Simon Glassa7ece582020-12-19 10:40:14 -0700726 if (ofnode_read_u32(ofnode_get_parent(dev_ofnode(dev)), "cell-index", &fm)) {
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300727 printf("FMan node property cell-index missing\n");
728 return -EINVAL;
729 }
730
731 if (dev && dev_read_u32(dev, "cell-index", &num)) {
732 printf("FMan MAC node property cell-index missing\n");
733 return -EINVAL;
734 }
735
736 sprintf(mac_name, "fm%d-mac%d", fm + 1, num + 1);
737 device_set_name(dev, mac_name);
738
739 debug("%s - binding %s\n", __func__, mac_name);
740
741 return 0;
742}
743
744static struct udevice *fm_get_internal_mdio(struct udevice *dev)
745{
746 struct ofnode_phandle_args phandle = {.node = ofnode_null()};
747 struct udevice *mdiodev;
748
749 if (dev_read_phandle_with_args(dev, "pcsphy-handle", NULL,
750 0, 0, &phandle) ||
751 !ofnode_valid(phandle.node)) {
752 if (dev_read_phandle_with_args(dev, "tbi-handle", NULL,
753 0, 0, &phandle) ||
754 !ofnode_valid(phandle.node)) {
755 printf("Issue reading pcsphy-handle/tbi-handle for MAC %s\n",
756 dev->name);
757 return NULL;
758 }
759 }
760
761 if (uclass_get_device_by_ofnode(UCLASS_MDIO,
762 ofnode_get_parent(phandle.node),
763 &mdiodev)) {
764 printf("can't find MDIO bus for node %s\n",
765 ofnode_get_name(ofnode_get_parent(phandle.node)));
766 return NULL;
767 }
768 debug("Found internal MDIO bus %p\n", mdiodev);
769
770 return mdiodev;
771}
772
773static int fm_eth_probe(struct udevice *dev)
774{
Simon Glass95588622020-12-22 19:30:28 -0700775 struct fm_eth *fm_eth = (struct fm_eth *)dev_get_priv(dev);
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300776 struct ofnode_phandle_args args;
777 void *reg;
778 int ret, index;
779
780 debug("%s enter for dev %p fm_eth %p - %s\n", __func__, dev, fm_eth,
781 (dev) ? dev->name : "-");
782
783 if (fm_eth->dev) {
784 printf("%s already probed, exit\n", (dev) ? dev->name : "-");
785 return 0;
786 }
787
788 fm_eth->dev = dev;
789 fm_eth->fm_index = fman_id(dev->parent);
790 reg = (void *)(uintptr_t)dev_read_addr(dev);
791 fm_eth->mac_type = dev_get_driver_data(dev);
792#ifdef CONFIG_PHYLIB
Marek BehĂșnbc194772022-04-07 00:33:01 +0200793 fm_eth->enet_if = dev_read_phy_mode(dev);
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300794#else
795 fm_eth->enet_if = PHY_INTERFACE_MODE_SGMII;
796 printf("%s: warning - unable to determine interface type\n", __func__);
797#endif
798 switch (fm_eth->mac_type) {
799#ifndef CONFIG_SYS_FMAN_V3
800 case FM_TGEC:
801 fm_eth->type = FM_ETH_10G_E;
802 break;
803 case FM_DTSEC:
804#else
805 case FM_MEMAC:
806 /* default to 1G, 10G is indicated by port property in dts */
807#endif
808 fm_eth->type = FM_ETH_1G_E;
809 break;
810 }
811
812 if (dev_read_u32(dev, "cell-index", &fm_eth->num)) {
813 printf("FMan MAC node property cell-index missing\n");
814 return -EINVAL;
815 }
816
817 if (dev_read_phandle_with_args(dev, "fsl,fman-ports", NULL,
818 0, 0, &args))
819 goto ports_ref_failure;
820 index = ofnode_read_u32_default(args.node, "cell-index", 0);
821 if (index <= 0)
822 goto ports_ref_failure;
823 fm_eth->rx_port = fman_port(dev->parent, index);
824
825 if (ofnode_read_bool(args.node, "fsl,fman-10g-port"))
826 fm_eth->type = FM_ETH_10G_E;
827
828 if (dev_read_phandle_with_args(dev, "fsl,fman-ports", NULL,
829 0, 1, &args))
830 goto ports_ref_failure;
831 index = ofnode_read_u32_default(args.node, "cell-index", 0);
832 if (index <= 0)
833 goto ports_ref_failure;
834 fm_eth->tx_port = fman_port(dev->parent, index);
835
836 /* set the ethernet max receive length */
837 fm_eth->max_rx_len = MAX_RXBUF_LEN;
838
839 switch (fm_eth->enet_if) {
840 case PHY_INTERFACE_MODE_QSGMII:
841 /* all PCS blocks are accessed on one controller */
842 if (fm_eth->num != 0)
843 break;
844 case PHY_INTERFACE_MODE_SGMII:
Vladimir Oltean6caef972021-09-18 15:32:35 +0300845 case PHY_INTERFACE_MODE_2500BASEX:
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300846 fm_eth->pcs_mdio = fm_get_internal_mdio(dev);
847 break;
848 default:
849 break;
850 }
851
852 /* init global mac structure */
853 ret = fm_eth_init_mac(fm_eth, reg);
854 if (ret)
855 return ret;
856
857 /* startup the FM im */
858 ret = fm_eth_startup(fm_eth);
859
860 if (!ret)
861 ret = init_phy(fm_eth);
862
863 return ret;
864
865ports_ref_failure:
866 printf("Issue reading fsl,fman-ports for MAC %s\n", dev->name);
867 return -ENOENT;
868}
869
870static int fm_eth_remove(struct udevice *dev)
871{
Hou Zhiqianga02fc982015-10-26 19:47:45 +0800872 return 0;
Kumar Gala2683c532011-04-13 08:37:44 -0500873}
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300874
875static const struct eth_ops fm_eth_ops = {
876 .start = fm_eth_open,
877 .send = fm_eth_send,
878 .recv = fm_eth_recv,
879 .free_pkt = fm_eth_free_pkt,
880 .stop = fm_eth_halt,
881};
882
883static const struct udevice_id fm_eth_ids[] = {
884#ifdef CONFIG_SYS_FMAN_V3
885 { .compatible = "fsl,fman-memac", .data = FM_MEMAC },
886#else
887 { .compatible = "fsl,fman-dtsec", .data = FM_DTSEC },
888 { .compatible = "fsl,fman-xgec", .data = FM_TGEC },
889#endif
890 {}
891};
892
893U_BOOT_DRIVER(eth_fman) = {
894 .name = "eth_fman",
895 .id = UCLASS_ETH,
896 .of_match = fm_eth_ids,
897 .bind = fm_eth_bind,
898 .probe = fm_eth_probe,
899 .remove = fm_eth_remove,
900 .ops = &fm_eth_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700901 .priv_auto = sizeof(struct fm_eth),
Simon Glass71fa5b42020-12-03 16:55:18 -0700902 .plat_auto = sizeof(struct eth_pdata),
Madalin Bucurb76b0a62020-04-23 16:25:19 +0300903 .flags = DM_FLAG_ALLOC_PRIV_DMA,
904};