blob: 58363fce05705d92ce26963472571302aec6a281 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +05302/*
3 * (C) Copyright 2009
4 * Marvell Semiconductor <www.marvell.com>
5 * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
6 *
7 * (C) Copyright 2003
8 * Ingo Assmus <ingo.assmus@keymile.com>
9 *
10 * based on - Driver for MV64360X ethernet ports
11 * Copyright (C) 2002 rabeeh@galileo.co.il
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053012 */
13
14#include <common.h>
Chris Packham1de16f72018-07-09 21:34:00 +120015#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060016#include <log.h>
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053017#include <net.h>
18#include <malloc.h>
19#include <miiphy.h>
Chris Packhamf42e5b92018-06-09 20:46:16 +120020#include <wait_bit.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060021#include <asm/global_data.h>
Lei Wen298ae912011-10-18 20:11:42 +053022#include <asm/io.h>
Simon Glassdbd79542020-05-10 11:40:11 -060023#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090024#include <linux/errno.h>
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053025#include <asm/types.h>
Lei Wen298ae912011-10-18 20:11:42 +053026#include <asm/system.h>
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053027#include <asm/byteorder.h>
Anatolij Gustschinc8b222e2011-10-29 10:09:22 +000028#include <asm/arch/cpu.h>
Albert Aribaude91d7d32010-07-12 22:24:28 +020029
Trevor Woernerbb7ab072020-05-06 08:02:40 -040030#if defined(CONFIG_ARCH_KIRKWOOD)
Stefan Roesec2437842014-10-22 12:13:06 +020031#include <asm/arch/soc.h>
Trevor Woernerf9953752020-05-06 08:02:38 -040032#elif defined(CONFIG_ARCH_ORION5X)
Albert Aribaud8a995232010-07-12 22:24:29 +020033#include <asm/arch/orion5x.h>
Albert Aribaude91d7d32010-07-12 22:24:28 +020034#endif
35
Albert Aribaud0d027d92010-07-12 22:24:27 +020036#include "mvgbe.h"
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053037
Albert Aribauda7564072010-07-05 20:15:25 +020038DECLARE_GLOBAL_DATA_PTR;
39
Luka Perkov95acd992013-11-11 07:27:53 +010040#ifndef CONFIG_MVGBE_PORTS
41# define CONFIG_MVGBE_PORTS {0, 0}
42#endif
43
Albert Aribaude91d7d32010-07-12 22:24:28 +020044#define MV_PHY_ADR_REQUEST 0xee
45#define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
Tony Dinh77943da2022-04-12 13:18:19 -070046#define MVGBE_PGADR_REG 22
Simon Kagstromab9ca512009-08-20 10:12:28 +020047
Sebastian Hesselbarth94a483c2012-12-04 09:32:00 +010048#if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
Chris Packhamf42e5b92018-06-09 20:46:16 +120049static int smi_wait_ready(struct mvgbe_device *dmvgbe)
50{
51 int ret;
52
53 ret = wait_for_bit_le32(&MVGBE_SMI_REG, MVGBE_PHY_SMI_BUSY_MASK, false,
54 MVGBE_PHY_SMI_TIMEOUT_MS, false);
55 if (ret) {
56 printf("Error: SMI busy timeout\n");
57 return ret;
58 }
59
60 return 0;
61}
62
Chris Packham465f5cf2018-07-09 21:33:59 +120063static int __mvgbe_mdio_read(struct mvgbe_device *dmvgbe, int phy_adr,
64 int devad, int reg_ofs)
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053065{
Albert Aribaude91d7d32010-07-12 22:24:28 +020066 struct mvgbe_registers *regs = dmvgbe->regs;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053067 u32 smi_reg;
Simon Kagstrom4d0941c2009-07-08 13:03:18 +020068 u32 timeout;
Chris Packham465f5cf2018-07-09 21:33:59 +120069 u16 data = 0;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053070
71 /* Phyadr read request */
Albert Aribaude91d7d32010-07-12 22:24:28 +020072 if (phy_adr == MV_PHY_ADR_REQUEST &&
73 reg_ofs == MV_PHY_ADR_REQUEST) {
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053074 /* */
Joe Hershberger1fbcbed2016-08-08 11:28:38 -050075 data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
76 return data;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053077 }
78 /* check parameters */
79 if (phy_adr > PHYADR_MASK) {
80 printf("Err..(%s) Invalid PHY address %d\n",
Joe Hershberger9f09a362015-04-08 01:41:06 -050081 __func__, phy_adr);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053082 return -EFAULT;
83 }
84 if (reg_ofs > PHYREG_MASK) {
85 printf("Err..(%s) Invalid register offset %d\n",
Joe Hershberger9f09a362015-04-08 01:41:06 -050086 __func__, reg_ofs);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053087 return -EFAULT;
88 }
89
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053090 /* wait till the SMI is not busy */
Chris Packhamf42e5b92018-06-09 20:46:16 +120091 if (smi_wait_ready(dmvgbe) < 0)
92 return -EFAULT;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053093
94 /* fill the phy address and regiser offset and read opcode */
Albert Aribaude91d7d32010-07-12 22:24:28 +020095 smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
96 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
97 | MVGBE_PHY_SMI_OPCODE_READ;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +053098
99 /* write the smi register */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200100 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530101
102 /*wait till read value is ready */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200103 timeout = MVGBE_PHY_SMI_TIMEOUT;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530104
105 do {
106 /* read smi register */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200107 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530108 if (timeout-- == 0) {
109 printf("Err..(%s) SMI read ready timeout\n",
Joe Hershberger9f09a362015-04-08 01:41:06 -0500110 __func__);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530111 return -EFAULT;
112 }
Albert Aribaude91d7d32010-07-12 22:24:28 +0200113 } while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530114
115 /* Wait for the data to update in the SMI register */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200116 for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
117 ;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530118
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500119 data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530120
Joe Hershberger9f09a362015-04-08 01:41:06 -0500121 debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500122 data);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530123
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500124 return data;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530125}
126
127/*
Chris Packham465f5cf2018-07-09 21:33:59 +1200128 * smi_reg_read - miiphy_read callback function.
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530129 *
Chris Packham465f5cf2018-07-09 21:33:59 +1200130 * Returns 16bit phy register value, or -EFAULT on error
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530131 */
Chris Packham465f5cf2018-07-09 21:33:59 +1200132static int smi_reg_read(struct mii_dev *bus, int phy_adr, int devad,
133 int reg_ofs)
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530134{
Chris Packham1de16f72018-07-09 21:34:00 +1200135 struct mvgbe_device *dmvgbe = bus->priv;
Chris Packham465f5cf2018-07-09 21:33:59 +1200136
137 return __mvgbe_mdio_read(dmvgbe, phy_adr, devad, reg_ofs);
138}
139
140static int __mvgbe_mdio_write(struct mvgbe_device *dmvgbe, int phy_adr,
141 int devad, int reg_ofs, u16 data)
142{
Albert Aribaude91d7d32010-07-12 22:24:28 +0200143 struct mvgbe_registers *regs = dmvgbe->regs;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530144 u32 smi_reg;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530145
146 /* Phyadr write request*/
Albert Aribaude91d7d32010-07-12 22:24:28 +0200147 if (phy_adr == MV_PHY_ADR_REQUEST &&
148 reg_ofs == MV_PHY_ADR_REQUEST) {
149 MVGBE_REG_WR(regs->phyadr, data);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530150 return 0;
151 }
152
153 /* check parameters */
154 if (phy_adr > PHYADR_MASK) {
Joe Hershberger9f09a362015-04-08 01:41:06 -0500155 printf("Err..(%s) Invalid phy address\n", __func__);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530156 return -EINVAL;
157 }
158 if (reg_ofs > PHYREG_MASK) {
Joe Hershberger9f09a362015-04-08 01:41:06 -0500159 printf("Err..(%s) Invalid register offset\n", __func__);
Chris Packhamf42e5b92018-06-09 20:46:16 +1200160 return -EFAULT;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530161 }
162
163 /* wait till the SMI is not busy */
Chris Packhamf42e5b92018-06-09 20:46:16 +1200164 if (smi_wait_ready(dmvgbe) < 0)
165 return -EFAULT;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530166
167 /* fill the phy addr and reg offset and write opcode and data */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200168 smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
169 smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
170 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
171 smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530172
173 /* write the smi register */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200174 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530175
176 return 0;
177}
Chris Packham465f5cf2018-07-09 21:33:59 +1200178
179/*
180 * smi_reg_write - miiphy_write callback function.
181 *
182 * Returns 0 if write succeed, -EFAULT on error
183 */
184static int smi_reg_write(struct mii_dev *bus, int phy_adr, int devad,
185 int reg_ofs, u16 data)
186{
Chris Packham1de16f72018-07-09 21:34:00 +1200187 struct mvgbe_device *dmvgbe = bus->priv;
Chris Packham465f5cf2018-07-09 21:33:59 +1200188
189 return __mvgbe_mdio_write(dmvgbe, phy_adr, devad, reg_ofs, data);
190}
Stefan Bigler96455292012-03-26 00:02:13 +0000191#endif
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530192
193/* Stop and checks all queues */
194static void stop_queue(u32 * qreg)
195{
196 u32 reg_data;
197
198 reg_data = readl(qreg);
199
200 if (reg_data & 0xFF) {
201 /* Issue stop command for active channels only */
202 writel((reg_data << 8), qreg);
203
204 /* Wait for all queue activity to terminate. */
205 do {
206 /*
207 * Check port cause register that all queues
208 * are stopped
209 */
210 reg_data = readl(qreg);
211 }
212 while (reg_data & 0xFF);
213 }
214}
215
216/*
217 * set_access_control - Config address decode parameters for Ethernet unit
218 *
219 * This function configures the address decode parameters for the Gigabit
220 * Ethernet Controller according the given parameters struct.
221 *
222 * @regs Register struct pointer.
223 * @param Address decode parameter struct.
224 */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200225static void set_access_control(struct mvgbe_registers *regs,
226 struct mvgbe_winparam *param)
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530227{
228 u32 access_prot_reg;
229
230 /* Set access control register */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200231 access_prot_reg = MVGBE_REG_RD(regs->epap);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530232 /* clear window permission */
233 access_prot_reg &= (~(3 << (param->win * 2)));
234 access_prot_reg |= (param->access_ctrl << (param->win * 2));
Albert Aribaude91d7d32010-07-12 22:24:28 +0200235 MVGBE_REG_WR(regs->epap, access_prot_reg);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530236
237 /* Set window Size reg (SR) */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200238 MVGBE_REG_WR(regs->barsz[param->win].size,
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530239 (((param->size / 0x10000) - 1) << 16));
240
241 /* Set window Base address reg (BA) */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200242 MVGBE_REG_WR(regs->barsz[param->win].bar,
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530243 (param->target | param->attrib | param->base_addr));
244 /* High address remap reg (HARR) */
245 if (param->win < 4)
Albert Aribaude91d7d32010-07-12 22:24:28 +0200246 MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530247
248 /* Base address enable reg (BARER) */
249 if (param->enable == 1)
Albert Aribaude91d7d32010-07-12 22:24:28 +0200250 MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530251 else
Albert Aribaude91d7d32010-07-12 22:24:28 +0200252 MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530253}
254
Albert Aribaude91d7d32010-07-12 22:24:28 +0200255static void set_dram_access(struct mvgbe_registers *regs)
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530256{
Albert Aribaude91d7d32010-07-12 22:24:28 +0200257 struct mvgbe_winparam win_param;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530258 int i;
259
260 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
261 /* Set access parameters for DRAM bank i */
262 win_param.win = i; /* Use Ethernet window i */
263 /* Window target - DDR */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200264 win_param.target = MVGBE_TARGET_DRAM;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530265 /* Enable full access */
266 win_param.access_ctrl = EWIN_ACCESS_FULL;
267 win_param.high_addr = 0;
Albert Aribauda7564072010-07-05 20:15:25 +0200268 /* Get bank base and size */
269 win_param.base_addr = gd->bd->bi_dram[i].start;
270 win_param.size = gd->bd->bi_dram[i].size;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530271 if (win_param.size == 0)
272 win_param.enable = 0;
273 else
274 win_param.enable = 1; /* Enable the access */
275
276 /* Enable DRAM bank */
277 switch (i) {
278 case 0:
279 win_param.attrib = EBAR_DRAM_CS0;
280 break;
281 case 1:
282 win_param.attrib = EBAR_DRAM_CS1;
283 break;
284 case 2:
285 win_param.attrib = EBAR_DRAM_CS2;
286 break;
287 case 3:
288 win_param.attrib = EBAR_DRAM_CS3;
289 break;
290 default:
Albert Aribauda7564072010-07-05 20:15:25 +0200291 /* invalid bank, disable access */
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530292 win_param.enable = 0;
293 win_param.attrib = 0;
294 break;
295 }
296 /* Set the access control for address window(EPAPR) RD/WR */
297 set_access_control(regs, &win_param);
298 }
299}
300
301/*
302 * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
303 *
304 * Go through all the DA filter tables (Unicast, Special Multicast & Other
305 * Multicast) and set each entry to 0.
306 */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200307static void port_init_mac_tables(struct mvgbe_registers *regs)
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530308{
309 int table_index;
310
311 /* Clear DA filter unicast table (Ex_dFUT) */
312 for (table_index = 0; table_index < 4; ++table_index)
Albert Aribaude91d7d32010-07-12 22:24:28 +0200313 MVGBE_REG_WR(regs->dfut[table_index], 0);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530314
315 for (table_index = 0; table_index < 64; ++table_index) {
316 /* Clear DA filter special multicast table (Ex_dFSMT) */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200317 MVGBE_REG_WR(regs->dfsmt[table_index], 0);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530318 /* Clear DA filter other multicast table (Ex_dFOMT) */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200319 MVGBE_REG_WR(regs->dfomt[table_index], 0);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530320 }
321}
322
323/*
324 * port_uc_addr - This function Set the port unicast address table
325 *
326 * This function locates the proper entry in the Unicast table for the
327 * specified MAC nibble and sets its properties according to function
328 * parameters.
329 * This function add/removes MAC addresses from the port unicast address
330 * table.
331 *
332 * @uc_nibble Unicast MAC Address last nibble.
333 * @option 0 = Add, 1 = remove address.
334 *
335 * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
336 */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200337static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530338 int option)
339{
340 u32 unicast_reg;
341 u32 tbl_offset;
342 u32 reg_offset;
343
344 /* Locate the Unicast table entry */
345 uc_nibble = (0xf & uc_nibble);
346 /* Register offset from unicast table base */
347 tbl_offset = (uc_nibble / 4);
348 /* Entry offset within the above register */
349 reg_offset = uc_nibble % 4;
350
351 switch (option) {
352 case REJECT_MAC_ADDR:
353 /*
354 * Clear accepts frame bit at specified unicast
355 * DA table entry
356 */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200357 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530358 unicast_reg &= (0xFF << (8 * reg_offset));
Albert Aribaude91d7d32010-07-12 22:24:28 +0200359 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530360 break;
361 case ACCEPT_MAC_ADDR:
362 /* Set accepts frame bit at unicast DA filter table entry */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200363 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530364 unicast_reg &= (0xFF << (8 * reg_offset));
365 unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
Albert Aribaude91d7d32010-07-12 22:24:28 +0200366 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530367 break;
368 default:
369 return 0;
370 }
371 return 1;
372}
373
374/*
375 * port_uc_addr_set - This function Set the port Unicast address.
376 */
Chris Packham465f5cf2018-07-09 21:33:59 +1200377static void port_uc_addr_set(struct mvgbe_device *dmvgbe, u8 *p_addr)
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530378{
Chris Packham465f5cf2018-07-09 21:33:59 +1200379 struct mvgbe_registers *regs = dmvgbe->regs;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530380 u32 mac_h;
381 u32 mac_l;
382
383 mac_l = (p_addr[4] << 8) | (p_addr[5]);
384 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
385 (p_addr[3] << 0);
386
Albert Aribaude91d7d32010-07-12 22:24:28 +0200387 MVGBE_REG_WR(regs->macal, mac_l);
388 MVGBE_REG_WR(regs->macah, mac_h);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530389
390 /* Accept frames of this address */
391 port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
392}
393
394/*
Albert Aribaude91d7d32010-07-12 22:24:28 +0200395 * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530396 */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200397static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530398{
Albert Aribaude91d7d32010-07-12 22:24:28 +0200399 struct mvgbe_rxdesc *p_rx_desc;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530400 int i;
401
402 /* initialize the Rx descriptors ring */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200403 p_rx_desc = dmvgbe->p_rxdesc;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530404 for (i = 0; i < RINGSZ; i++) {
405 p_rx_desc->cmd_sts =
Albert Aribaude91d7d32010-07-12 22:24:28 +0200406 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530407 p_rx_desc->buf_size = PKTSIZE_ALIGN;
408 p_rx_desc->byte_cnt = 0;
Albert Aribaude91d7d32010-07-12 22:24:28 +0200409 p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530410 if (i == (RINGSZ - 1))
Albert Aribaude91d7d32010-07-12 22:24:28 +0200411 p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530412 else {
Albert Aribaude91d7d32010-07-12 22:24:28 +0200413 p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
414 ((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530415 p_rx_desc = p_rx_desc->nxtdesc_p;
416 }
417 }
Albert Aribaude91d7d32010-07-12 22:24:28 +0200418 dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530419}
420
Chris Packham1de16f72018-07-09 21:34:00 +1200421static int __mvgbe_init(struct mvgbe_device *dmvgbe, u8 *enetaddr,
422 const char *name)
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530423{
Albert Aribaude91d7d32010-07-12 22:24:28 +0200424 struct mvgbe_registers *regs = dmvgbe->regs;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530425 /* setup RX rings */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200426 mvgbe_init_rx_desc_ring(dmvgbe);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530427
428 /* Clear the ethernet port interrupts */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200429 MVGBE_REG_WR(regs->ic, 0);
430 MVGBE_REG_WR(regs->ice, 0);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530431 /* Unmask RX buffer and TX end interrupt */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200432 MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530433 /* Unmask phy and link status changes interrupts */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200434 MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530435
436 set_dram_access(regs);
437 port_init_mac_tables(regs);
Chris Packham1de16f72018-07-09 21:34:00 +1200438 port_uc_addr_set(dmvgbe, enetaddr);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530439
440 /* Assign port configuration and command. */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200441 MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
442 MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
443 MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530444
445 /* Assign port SDMA configuration */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200446 MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
447 MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
448 MVGBE_REG_WR(regs->tqx[0].tqxtbc,
449 (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530450 /* Turn off the port/RXUQ bandwidth limitation */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200451 MVGBE_REG_WR(regs->pmtu, 0);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530452
453 /* Set maximum receive buffer to 9700 bytes */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200454 MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
455 | (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530456
Prafulla Wadaskar60ee8a22010-04-06 21:33:08 +0530457 /* Enable port initially */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200458 MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
Prafulla Wadaskar60ee8a22010-04-06 21:33:08 +0530459
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530460 /*
461 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
462 * disable the leaky bucket mechanism .
463 */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200464 MVGBE_REG_WR(regs->pmtu, 0);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530465
466 /* Assignment of Rx CRDB of given RXUQ */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200467 MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
Albert Aribaudcc2b8e32010-07-10 15:41:29 +0200468 /* ensure previous write is done before enabling Rx DMA */
469 isb();
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530470 /* Enable port Rx. */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200471 MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530472
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530473 return 0;
474}
Chris Packham465f5cf2018-07-09 21:33:59 +1200475
476static void __mvgbe_halt(struct mvgbe_device *dmvgbe)
477{
Albert Aribaude91d7d32010-07-12 22:24:28 +0200478 struct mvgbe_registers *regs = dmvgbe->regs;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530479
480 /* Disable all gigE address decoder */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200481 MVGBE_REG_WR(regs->bare, 0x3f);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530482
483 stop_queue(&regs->tqc);
484 stop_queue(&regs->rqc);
485
Prafulla Wadaskar60ee8a22010-04-06 21:33:08 +0530486 /* Disable port */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200487 MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530488 /* Set port is not reset */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200489 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530490#ifdef CONFIG_SYS_MII_MODE
491 /* Set MMI interface up */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200492 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530493#endif
494 /* Disable & mask ethernet port interrupts */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200495 MVGBE_REG_WR(regs->ic, 0);
496 MVGBE_REG_WR(regs->ice, 0);
497 MVGBE_REG_WR(regs->pim, 0);
498 MVGBE_REG_WR(regs->peim, 0);
Chris Packham465f5cf2018-07-09 21:33:59 +1200499}
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530500
Chris Packham1de16f72018-07-09 21:34:00 +1200501static int mvgbe_write_hwaddr(struct udevice *dev)
502{
Simon Glassfa20e932020-12-03 16:55:20 -0700503 struct eth_pdata *pdata = dev_get_plat(dev);
Chris Packham1de16f72018-07-09 21:34:00 +1200504
505 port_uc_addr_set(dev_get_priv(dev), pdata->enetaddr);
506
507 return 0;
508}
Prafulla Wadaskar7dae2eb2010-04-06 22:21:33 +0530509
Chris Packham465f5cf2018-07-09 21:33:59 +1200510static int __mvgbe_send(struct mvgbe_device *dmvgbe, void *dataptr,
511 int datasize)
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530512{
Albert Aribaude91d7d32010-07-12 22:24:28 +0200513 struct mvgbe_registers *regs = dmvgbe->regs;
514 struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
Simon Kagstrome9220b32009-08-20 10:14:11 +0200515 void *p = (void *)dataptr;
Simon Kagstrom4d0941c2009-07-08 13:03:18 +0200516 u32 cmd_sts;
Anatolij Gustschinda42f242011-11-19 08:59:36 +0000517 u32 txuq0_reg_addr;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530518
Simon Kagstrome9220b32009-08-20 10:14:11 +0200519 /* Copy buffer if it's misaligned */
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530520 if ((u32) dataptr & 0x07) {
Simon Kagstrome9220b32009-08-20 10:14:11 +0200521 if (datasize > PKTSIZE_ALIGN) {
522 printf("Non-aligned data too large (%d)\n",
523 datasize);
524 return -1;
525 }
526
Albert Aribaude91d7d32010-07-12 22:24:28 +0200527 memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
528 p = dmvgbe->p_aligned_txbuf;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530529 }
Simon Kagstrome9220b32009-08-20 10:14:11 +0200530
Albert Aribaude91d7d32010-07-12 22:24:28 +0200531 p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
532 p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
533 p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
534 p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
Simon Kagstrome9220b32009-08-20 10:14:11 +0200535 p_txdesc->buf_ptr = (u8 *) p;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530536 p_txdesc->byte_cnt = datasize;
537
Albert Aribaudcc2b8e32010-07-10 15:41:29 +0200538 /* Set this tc desc as zeroth TXUQ */
Anatolij Gustschinda42f242011-11-19 08:59:36 +0000539 txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
540 writel((u32) p_txdesc, txuq0_reg_addr);
Albert Aribaudcc2b8e32010-07-10 15:41:29 +0200541
542 /* ensure tx desc writes above are performed before we start Tx DMA */
543 isb();
544
545 /* Apply send command using zeroth TXUQ */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200546 MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530547
548 /*
549 * wait for packet xmit completion
550 */
Simon Kagstrom4d0941c2009-07-08 13:03:18 +0200551 cmd_sts = readl(&p_txdesc->cmd_sts);
Albert Aribaude91d7d32010-07-12 22:24:28 +0200552 while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530553 /* return fail if error is detected */
Albert Aribaude91d7d32010-07-12 22:24:28 +0200554 if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
555 (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
556 cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
Joe Hershberger9f09a362015-04-08 01:41:06 -0500557 printf("Err..(%s) in xmit packet\n", __func__);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530558 return -1;
559 }
Simon Kagstrom4d0941c2009-07-08 13:03:18 +0200560 cmd_sts = readl(&p_txdesc->cmd_sts);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530561 };
562 return 0;
563}
Chris Packham465f5cf2018-07-09 21:33:59 +1200564
565static int __mvgbe_recv(struct mvgbe_device *dmvgbe, uchar **packetp)
566{
Albert Aribaude91d7d32010-07-12 22:24:28 +0200567 struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
Simon Kagstrom4d0941c2009-07-08 13:03:18 +0200568 u32 cmd_sts;
569 u32 timeout = 0;
Anatolij Gustschinda42f242011-11-19 08:59:36 +0000570 u32 rxdesc_curr_addr;
Chris Packham465f5cf2018-07-09 21:33:59 +1200571 unsigned char *data;
572 int rx_bytes = 0;
573
574 *packetp = NULL;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530575
576 /* wait untill rx packet available or timeout */
577 do {
Albert Aribaude91d7d32010-07-12 22:24:28 +0200578 if (timeout < MVGBE_PHY_SMI_TIMEOUT)
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530579 timeout++;
580 else {
Joe Hershberger9f09a362015-04-08 01:41:06 -0500581 debug("%s time out...\n", __func__);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530582 return -1;
583 }
Albert Aribaude91d7d32010-07-12 22:24:28 +0200584 } while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530585
586 if (p_rxdesc_curr->byte_cnt != 0) {
587 debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
Joe Hershberger9f09a362015-04-08 01:41:06 -0500588 __func__, (u32) p_rxdesc_curr->byte_cnt,
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530589 (u32) p_rxdesc_curr->buf_ptr,
590 (u32) p_rxdesc_curr->cmd_sts);
591 }
592
593 /*
594 * In case received a packet without first/last bits on
595 * OR the error summary bit is on,
596 * the packets needs to be dropeed.
597 */
Simon Kagstrom4d0941c2009-07-08 13:03:18 +0200598 cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
599
600 if ((cmd_sts &
Albert Aribaude91d7d32010-07-12 22:24:28 +0200601 (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
602 != (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530603
604 printf("Err..(%s) Dropping packet spread on"
Joe Hershberger9f09a362015-04-08 01:41:06 -0500605 " multiple descriptors\n", __func__);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530606
Albert Aribaude91d7d32010-07-12 22:24:28 +0200607 } else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530608
609 printf("Err..(%s) Dropping packet with errors\n",
Joe Hershberger9f09a362015-04-08 01:41:06 -0500610 __func__);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530611
612 } else {
613 /* !!! call higher layer processing */
614 debug("%s: Sending Received packet to"
Joe Hershberger9f09a362015-04-08 01:41:06 -0500615 " upper layer (net_process_received_packet)\n",
616 __func__);
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530617
Chris Packham465f5cf2018-07-09 21:33:59 +1200618 data = (p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET);
619 rx_bytes = (int)(p_rxdesc_curr->byte_cnt -
620 RX_BUF_OFFSET);
621
622 *packetp = data;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530623 }
624 /*
625 * free these descriptors and point next in the ring
626 */
627 p_rxdesc_curr->cmd_sts =
Albert Aribaude91d7d32010-07-12 22:24:28 +0200628 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530629 p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
630 p_rxdesc_curr->byte_cnt = 0;
631
Anatolij Gustschinda42f242011-11-19 08:59:36 +0000632 rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
633 writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
Simon Kagstrom4d0941c2009-07-08 13:03:18 +0200634
Chris Packham465f5cf2018-07-09 21:33:59 +1200635 return rx_bytes;
636}
637
Tom Rini5b280922022-11-27 10:25:17 -0500638#if defined(CONFIG_PHYLIB)
Chris Packham1de16f72018-07-09 21:34:00 +1200639static struct phy_device *__mvgbe_phy_init(struct udevice *dev,
640 struct mii_dev *bus,
641 phy_interface_t phy_interface,
642 int phyid)
Chris Packham1de16f72018-07-09 21:34:00 +1200643{
644 struct phy_device *phydev;
645
646 /* Set phy address of the port */
647 miiphy_write(dev->name, MV_PHY_ADR_REQUEST, MV_PHY_ADR_REQUEST,
648 phyid);
649
Tony Dinh77943da2022-04-12 13:18:19 -0700650 /* Make sure the selected PHY page is 0 before connecting */
651 miiphy_write(dev->name, phyid, MVGBE_PGADR_REG, 0);
652
Chris Packham1de16f72018-07-09 21:34:00 +1200653 phydev = phy_connect(bus, phyid, dev, phy_interface);
654 if (!phydev) {
655 printf("phy_connect failed\n");
656 return NULL;
657 }
658
659 phy_config(phydev);
660 phy_startup(phydev);
661
662 return phydev;
663}
Tom Rini5b280922022-11-27 10:25:17 -0500664#endif /* CONFIG_PHYLIB */
Sebastian Hesselbarth94a483c2012-12-04 09:32:00 +0100665
Chris Packham1de16f72018-07-09 21:34:00 +1200666static int mvgbe_alloc_buffers(struct mvgbe_device *dmvgbe)
667{
668 dmvgbe->p_rxdesc = memalign(PKTALIGN,
669 MV_RXQ_DESC_ALIGNED_SIZE * RINGSZ + 1);
670 if (!dmvgbe->p_rxdesc)
671 goto error1;
672
673 dmvgbe->p_rxbuf = memalign(PKTALIGN,
674 RINGSZ * PKTSIZE_ALIGN + 1);
675 if (!dmvgbe->p_rxbuf)
676 goto error2;
677
678 dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
679 if (!dmvgbe->p_aligned_txbuf)
680 goto error3;
681
682 dmvgbe->p_txdesc = memalign(PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
683 if (!dmvgbe->p_txdesc)
684 goto error4;
685
686 return 0;
687
688error4:
689 free(dmvgbe->p_aligned_txbuf);
690error3:
691 free(dmvgbe->p_rxbuf);
692error2:
693 free(dmvgbe->p_rxdesc);
694error1:
695 return -ENOMEM;
696}
697
Chris Packham1de16f72018-07-09 21:34:00 +1200698static int mvgbe_port_is_fixed_link(struct mvgbe_device *dmvgbe)
699{
700 return dmvgbe->phyaddr > PHY_MAX_ADDR;
701}
702
703static int mvgbe_start(struct udevice *dev)
704{
Simon Glassfa20e932020-12-03 16:55:20 -0700705 struct eth_pdata *pdata = dev_get_plat(dev);
Chris Packham1de16f72018-07-09 21:34:00 +1200706 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
707 int ret;
708
709 ret = __mvgbe_init(dmvgbe, pdata->enetaddr, dev->name);
710 if (ret)
711 return ret;
712
713 if (!mvgbe_port_is_fixed_link(dmvgbe)) {
714 dmvgbe->phydev = __mvgbe_phy_init(dev, dmvgbe->bus,
715 dmvgbe->phy_interface,
716 dmvgbe->phyaddr);
717 if (!dmvgbe->phydev)
718 return -ENODEV;
719 }
720
721 return 0;
722}
723
724static int mvgbe_send(struct udevice *dev, void *packet, int length)
725{
726 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
727
728 return __mvgbe_send(dmvgbe, packet, length);
729}
730
731static int mvgbe_recv(struct udevice *dev, int flags, uchar **packetp)
732{
733 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
734
735 return __mvgbe_recv(dmvgbe, packetp);
736}
737
738static void mvgbe_stop(struct udevice *dev)
739{
740 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
741
742 __mvgbe_halt(dmvgbe);
743}
744
745static int mvgbe_probe(struct udevice *dev)
746{
Simon Glassfa20e932020-12-03 16:55:20 -0700747 struct eth_pdata *pdata = dev_get_plat(dev);
Chris Packham1de16f72018-07-09 21:34:00 +1200748 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
749 struct mii_dev *bus;
750 int ret;
751
752 ret = mvgbe_alloc_buffers(dmvgbe);
753 if (ret)
754 return ret;
755
756 dmvgbe->regs = (void __iomem *)pdata->iobase;
757
758 bus = mdio_alloc();
759 if (!bus) {
760 printf("Failed to allocate MDIO bus\n");
761 return -ENOMEM;
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530762 }
Chris Packham1de16f72018-07-09 21:34:00 +1200763
764 bus->read = smi_reg_read;
765 bus->write = smi_reg_write;
766 snprintf(bus->name, sizeof(bus->name), dev->name);
767 bus->priv = dmvgbe;
768 dmvgbe->bus = bus;
769
770 ret = mdio_register(bus);
771 if (ret < 0)
772 return ret;
773
Prafulla Wadaskarb7a280d2009-06-14 22:33:46 +0530774 return 0;
Prafulla Wadaskar12618ef2009-07-01 20:34:51 +0200775}
Chris Packham1de16f72018-07-09 21:34:00 +1200776
777static const struct eth_ops mvgbe_ops = {
778 .start = mvgbe_start,
779 .send = mvgbe_send,
780 .recv = mvgbe_recv,
781 .stop = mvgbe_stop,
782 .write_hwaddr = mvgbe_write_hwaddr,
783};
784
Simon Glassaad29ae2020-12-03 16:55:21 -0700785static int mvgbe_of_to_plat(struct udevice *dev)
Chris Packham1de16f72018-07-09 21:34:00 +1200786{
Simon Glassfa20e932020-12-03 16:55:20 -0700787 struct eth_pdata *pdata = dev_get_plat(dev);
Chris Packham1de16f72018-07-09 21:34:00 +1200788 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
789 void *blob = (void *)gd->fdt_blob;
790 int node = dev_of_offset(dev);
Chris Packham1de16f72018-07-09 21:34:00 +1200791 int fl_node;
792 int pnode;
793 unsigned long addr;
794
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +0900795 pdata->iobase = dev_read_addr(dev);
Chris Packham1de16f72018-07-09 21:34:00 +1200796 pdata->phy_interface = -1;
797
798 pnode = fdt_node_offset_by_compatible(blob, node,
799 "marvell,kirkwood-eth-port");
800
801 /* Get phy-mode / phy_interface from DT */
Marek BehĂșnbc194772022-04-07 00:33:01 +0200802 pdata->phy_interface = dev_read_phy_mode(dev);
Marek BehĂșn48631e42022-04-07 00:33:03 +0200803 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Chris Packham7ca5e692018-12-04 19:54:30 +1300804 pdata->phy_interface = PHY_INTERFACE_MODE_GMII;
Chris Packham1de16f72018-07-09 21:34:00 +1200805
806 dmvgbe->phy_interface = pdata->phy_interface;
807
808 /* fetch 'fixed-link' property */
809 fl_node = fdt_subnode_offset(blob, pnode, "fixed-link");
810 if (fl_node != -FDT_ERR_NOTFOUND) {
811 /* set phy_addr to invalid value for fixed link */
812 dmvgbe->phyaddr = PHY_MAX_ADDR + 1;
813 dmvgbe->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
814 dmvgbe->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
815 } else {
816 /* Now read phyaddr from DT */
817 addr = fdtdec_lookup_phandle(blob, pnode, "phy-handle");
818 if (addr > 0)
819 dmvgbe->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
820 }
821
822 return 0;
823}
824
825static const struct udevice_id mvgbe_ids[] = {
826 { .compatible = "marvell,kirkwood-eth" },
827 { }
828};
829
830U_BOOT_DRIVER(mvgbe) = {
831 .name = "mvgbe",
832 .id = UCLASS_ETH,
833 .of_match = mvgbe_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -0700834 .of_to_plat = mvgbe_of_to_plat,
Chris Packham1de16f72018-07-09 21:34:00 +1200835 .probe = mvgbe_probe,
836 .ops = &mvgbe_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700837 .priv_auto = sizeof(struct mvgbe_device),
Simon Glass71fa5b42020-12-03 16:55:18 -0700838 .plat_auto = sizeof(struct eth_pdata),
Chris Packham1de16f72018-07-09 21:34:00 +1200839};