blob: da4b6fba9ff67a066e88a8325d2dee394ca0a02b [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Michal Simek19dfc472012-09-13 20:23:34 +00002/*
3 * (C) Copyright 2011 Michal Simek
4 *
5 * Michal SIMEK <monstr@monstr.eu>
6 *
7 * Based on Xilinx gmac driver:
8 * (C) Copyright 2011 Xilinx
Michal Simek19dfc472012-09-13 20:23:34 +00009 */
10
Siva Durga Prasad Paladugubaa20352016-11-15 16:15:42 +053011#include <clk.h>
Michal Simek19dfc472012-09-13 20:23:34 +000012#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070013#include <cpu_func.h>
Michal Simek250e05e2015-11-30 14:14:56 +010014#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Michal Simek19dfc472012-09-13 20:23:34 +000016#include <net.h>
Michal Simekb055f672014-04-25 14:17:38 +020017#include <netdev.h>
Michal Simek19dfc472012-09-13 20:23:34 +000018#include <config.h>
Michal Simekd9cfa972015-09-24 20:13:45 +020019#include <console.h>
Michal Simek19dfc472012-09-13 20:23:34 +000020#include <malloc.h>
Simon Glass274e0b02020-05-10 11:39:56 -060021#include <asm/cache.h>
Michal Simek19dfc472012-09-13 20:23:34 +000022#include <asm/io.h>
23#include <phy.h>
24#include <miiphy.h>
Mateusz Kulikowski93597d72016-01-23 11:54:33 +010025#include <wait_bit.h>
Michal Simek19dfc472012-09-13 20:23:34 +000026#include <watchdog.h>
Siva Durga Prasad Paladugu2b0690e2014-12-06 12:57:53 +053027#include <asm/system.h>
David Andrey73875dc2013-04-05 17:24:24 +020028#include <asm/arch/hardware.h>
Michal Simekd9f2c112012-10-15 14:01:23 +020029#include <asm/arch/sys_proto.h>
Simon Glass9bc15642020-02-03 07:36:16 -070030#include <dm/device_compat.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060031#include <linux/bitops.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070032#include <linux/err.h>
Masahiro Yamada64e4f7f2016-09-21 11:28:57 +090033#include <linux/errno.h>
Michal Simek19dfc472012-09-13 20:23:34 +000034
Michal Simek19dfc472012-09-13 20:23:34 +000035/* Bit/mask specification */
36#define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
37#define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
38#define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
39#define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
40#define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
41
42#define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
43#define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
44#define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
45
46#define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
47#define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
48#define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
49
50/* Wrap bit, last descriptor */
51#define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
52#define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
Michal Simek1dc446e2015-08-17 09:58:54 +020053#define ZYNQ_GEM_TXBUF_USED_MASK 0x80000000 /* Used by Hw */
Michal Simek19dfc472012-09-13 20:23:34 +000054
Michal Simek19dfc472012-09-13 20:23:34 +000055#define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
56#define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
57#define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
58#define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
59
Siva Durga Prasad Paladugu7e7fcc32016-05-16 15:31:37 +053060#define ZYNQ_GEM_NWCFG_SPEED100 0x00000001 /* 100 Mbps operation */
61#define ZYNQ_GEM_NWCFG_SPEED1000 0x00000400 /* 1Gbps operation */
62#define ZYNQ_GEM_NWCFG_FDEN 0x00000002 /* Full Duplex mode */
63#define ZYNQ_GEM_NWCFG_FSREM 0x00020000 /* FCS removal */
Siva Durga Prasad Paladuguf6c2d202016-05-16 15:31:38 +053064#define ZYNQ_GEM_NWCFG_SGMII_ENBL 0x08000000 /* SGMII Enable */
Siva Durga Prasad Paladugu7e7fcc32016-05-16 15:31:37 +053065#define ZYNQ_GEM_NWCFG_PCS_SEL 0x00000800 /* PCS select */
Michal Simek780c5352015-09-08 17:20:01 +020066#ifdef CONFIG_ARM64
Siva Durga Prasad Paladugu7e7fcc32016-05-16 15:31:37 +053067#define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x00100000 /* Div pclk by 64, max 160MHz */
Michal Simek780c5352015-09-08 17:20:01 +020068#else
Siva Durga Prasad Paladugu7e7fcc32016-05-16 15:31:37 +053069#define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000c0000 /* Div pclk by 48, max 120MHz */
Michal Simek780c5352015-09-08 17:20:01 +020070#endif
Michal Simek19dfc472012-09-13 20:23:34 +000071
Siva Durga Prasad Paladugu71245a42014-07-08 15:31:03 +053072#ifdef CONFIG_ARM64
73# define ZYNQ_GEM_DBUS_WIDTH (1 << 21) /* 64 bit bus */
74#else
75# define ZYNQ_GEM_DBUS_WIDTH (0 << 21) /* 32 bit bus */
76#endif
77
78#define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_DBUS_WIDTH | \
79 ZYNQ_GEM_NWCFG_FDEN | \
Michal Simek19dfc472012-09-13 20:23:34 +000080 ZYNQ_GEM_NWCFG_FSREM | \
81 ZYNQ_GEM_NWCFG_MDCCLKDIV)
82
83#define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
84
85#define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */
86/* Use full configured addressable space (8 Kb) */
87#define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
88/* Use full configured addressable space (4 Kb) */
89#define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
90/* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
91#define ZYNQ_GEM_DMACR_RXBUF 0x00180000
92
Vipul Kumarcbc2ed62018-11-26 16:27:38 +053093#if defined(CONFIG_PHYS_64BIT)
94# define ZYNQ_GEM_DMA_BUS_WIDTH BIT(30) /* 64 bit bus */
95#else
96# define ZYNQ_GEM_DMA_BUS_WIDTH (0 << 30) /* 32 bit bus */
97#endif
98
Michal Simek19dfc472012-09-13 20:23:34 +000099#define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
100 ZYNQ_GEM_DMACR_RXSIZE | \
101 ZYNQ_GEM_DMACR_TXSIZE | \
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530102 ZYNQ_GEM_DMACR_RXBUF | \
103 ZYNQ_GEM_DMA_BUS_WIDTH)
Michal Simek19dfc472012-09-13 20:23:34 +0000104
Michal Simek975ae352015-08-17 09:57:46 +0200105#define ZYNQ_GEM_TSR_DONE 0x00000020 /* Tx done mask */
106
Siva Durga Prasad Paladugu45467002016-03-25 12:53:44 +0530107#define ZYNQ_GEM_PCS_CTL_ANEG_ENBL 0x1000
108
Siva Durga Prasad Paladugub7b36372018-11-26 16:27:39 +0530109#define ZYNQ_GEM_DCFG_DBG6_DMA_64B BIT(23)
110
Michal Simekab72cb42013-04-22 14:41:09 +0200111/* Use MII register 1 (MII status register) to detect PHY */
112#define PHY_DETECT_REG 1
113
114/* Mask used to verify certain PHY features (or register contents)
115 * in the register above:
116 * 0x1000: 10Mbps full duplex support
117 * 0x0800: 10Mbps half duplex support
118 * 0x0008: Auto-negotiation support
119 */
120#define PHY_DETECT_MASK 0x1808
121
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530122/* TX BD status masks */
123#define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
124#define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
125#define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
126
Soren Brinkmann4dded982013-11-21 13:39:01 -0800127/* Clock frequencies for different speeds */
128#define ZYNQ_GEM_FREQUENCY_10 2500000UL
129#define ZYNQ_GEM_FREQUENCY_100 25000000UL
130#define ZYNQ_GEM_FREQUENCY_1000 125000000UL
131
Michal Simek19dfc472012-09-13 20:23:34 +0000132/* Device registers */
133struct zynq_gem_regs {
Michal Simek74a86e82015-10-05 11:49:43 +0200134 u32 nwctrl; /* 0x0 - Network Control reg */
135 u32 nwcfg; /* 0x4 - Network Config reg */
136 u32 nwsr; /* 0x8 - Network Status reg */
Michal Simek19dfc472012-09-13 20:23:34 +0000137 u32 reserved1;
Michal Simek74a86e82015-10-05 11:49:43 +0200138 u32 dmacr; /* 0x10 - DMA Control reg */
139 u32 txsr; /* 0x14 - TX Status reg */
140 u32 rxqbase; /* 0x18 - RX Q Base address reg */
141 u32 txqbase; /* 0x1c - TX Q Base address reg */
142 u32 rxsr; /* 0x20 - RX Status reg */
Michal Simek19dfc472012-09-13 20:23:34 +0000143 u32 reserved2[2];
Michal Simek74a86e82015-10-05 11:49:43 +0200144 u32 idr; /* 0x2c - Interrupt Disable reg */
Michal Simek19dfc472012-09-13 20:23:34 +0000145 u32 reserved3;
Michal Simek74a86e82015-10-05 11:49:43 +0200146 u32 phymntnc; /* 0x34 - Phy Maintaince reg */
Michal Simek19dfc472012-09-13 20:23:34 +0000147 u32 reserved4[18];
Michal Simek74a86e82015-10-05 11:49:43 +0200148 u32 hashl; /* 0x80 - Hash Low address reg */
149 u32 hashh; /* 0x84 - Hash High address reg */
Michal Simek19dfc472012-09-13 20:23:34 +0000150#define LADDR_LOW 0
151#define LADDR_HIGH 1
Michal Simek74a86e82015-10-05 11:49:43 +0200152 u32 laddr[4][LADDR_HIGH + 1]; /* 0x8c - Specific1 addr low/high reg */
153 u32 match[4]; /* 0xa8 - Type ID1 Match reg */
Michal Simek19dfc472012-09-13 20:23:34 +0000154 u32 reserved6[18];
Michal Simekff5dbef2015-10-05 12:49:48 +0200155#define STAT_SIZE 44
156 u32 stat[STAT_SIZE]; /* 0x100 - Octects transmitted Low reg */
Siva Durga Prasad Paladugu45467002016-03-25 12:53:44 +0530157 u32 reserved9[20];
158 u32 pcscntrl;
Siva Durga Prasad Paladugub7b36372018-11-26 16:27:39 +0530159 u32 rserved12[36];
160 u32 dcfg6; /* 0x294 Design config reg6 */
161 u32 reserved7[106];
Edgar E. Iglesias23045112015-09-25 23:50:07 -0700162 u32 transmit_q1_ptr; /* 0x440 - Transmit priority queue 1 */
163 u32 reserved8[15];
164 u32 receive_q1_ptr; /* 0x480 - Receive priority queue 1 */
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530165 u32 reserved10[17];
166 u32 upper_txqbase; /* 0x4C8 - Upper tx_q base addr */
167 u32 reserved11[2];
168 u32 upper_rxqbase; /* 0x4D4 - Upper rx_q base addr */
Michal Simek19dfc472012-09-13 20:23:34 +0000169};
170
171/* BD descriptors */
172struct emac_bd {
173 u32 addr; /* Next descriptor pointer */
174 u32 status;
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530175#if defined(CONFIG_PHYS_64BIT)
176 u32 addr_hi;
177 u32 reserved;
178#endif
Michal Simek19dfc472012-09-13 20:23:34 +0000179};
180
Michal Simekc40c93e2019-05-22 14:12:20 +0200181/* Reduce amount of BUFs if you have limited amount of memory */
Siva Durga Prasad Paladugu55931cf2015-04-15 12:15:01 +0530182#define RX_BUF 32
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530183/* Page table entries are set to 1MB, or multiples of 1MB
184 * (not < 1MB). driver uses less bd's so use 1MB bdspace.
185 */
186#define BD_SPACE 0x100000
187/* BD separation space */
Michal Simekc6eb0bc2015-08-17 09:45:53 +0200188#define BD_SEPRN_SPACE (RX_BUF * sizeof(struct emac_bd))
Michal Simek19dfc472012-09-13 20:23:34 +0000189
Edgar E. Iglesias23045112015-09-25 23:50:07 -0700190/* Setup the first free TX descriptor */
191#define TX_FREE_DESC 2
192
Michal Simek19dfc472012-09-13 20:23:34 +0000193/* Initialized, rxbd_current, rx_first_buf must be 0 after init */
194struct zynq_gem_priv {
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530195 struct emac_bd *tx_bd;
196 struct emac_bd *rx_bd;
197 char *rxbuffers;
Michal Simek19dfc472012-09-13 20:23:34 +0000198 u32 rxbd_current;
199 u32 rx_first_buf;
200 int phyaddr;
Michal Simeka94f84d2013-01-24 13:04:12 +0100201 int init;
Michal Simek1a63ee22015-11-30 10:24:15 +0100202 struct zynq_gem_regs *iobase;
Michal Simek55ee1862016-05-30 10:43:11 +0200203 struct zynq_gem_regs *mdiobase;
Michal Simek492de0f2015-10-07 16:42:56 +0200204 phy_interface_t interface;
Michal Simek19dfc472012-09-13 20:23:34 +0000205 struct phy_device *phydev;
Siva Durga Prasad Paladugu34a48e52018-07-16 18:25:45 +0530206 ofnode phy_of_node;
Michal Simek19dfc472012-09-13 20:23:34 +0000207 struct mii_dev *bus;
Siva Durga Prasad Paladugubaa20352016-11-15 16:15:42 +0530208 struct clk clk;
Siva Durga Prasad Paladugu0703cc52018-04-12 12:22:17 +0200209 u32 max_speed;
Siva Durga Prasad Paladugu134cfa62017-11-23 12:56:55 +0530210 bool int_pcs;
Siva Durga Prasad Paladugub7b36372018-11-26 16:27:39 +0530211 bool dma_64bit;
Michal Simek19dfc472012-09-13 20:23:34 +0000212};
213
Michal Simek70551ca2018-06-13 10:00:30 +0200214static int phy_setup_op(struct zynq_gem_priv *priv, u32 phy_addr, u32 regnum,
Michal Simek1a63ee22015-11-30 10:24:15 +0100215 u32 op, u16 *data)
Michal Simek19dfc472012-09-13 20:23:34 +0000216{
217 u32 mgtcr;
Michal Simek55ee1862016-05-30 10:43:11 +0200218 struct zynq_gem_regs *regs = priv->mdiobase;
Michal Simeke6709652016-12-12 09:47:26 +0100219 int err;
Michal Simek19dfc472012-09-13 20:23:34 +0000220
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100221 err = wait_for_bit_le32(&regs->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
222 true, 20000, false);
Michal Simeke6709652016-12-12 09:47:26 +0100223 if (err)
224 return err;
Michal Simek19dfc472012-09-13 20:23:34 +0000225
226 /* Construct mgtcr mask for the operation */
227 mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
228 (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
229 (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
230
231 /* Write mgtcr and wait for completion */
232 writel(mgtcr, &regs->phymntnc);
233
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100234 err = wait_for_bit_le32(&regs->nwsr, ZYNQ_GEM_NWSR_MDIOIDLE_MASK,
235 true, 20000, false);
Michal Simeke6709652016-12-12 09:47:26 +0100236 if (err)
237 return err;
Michal Simek19dfc472012-09-13 20:23:34 +0000238
239 if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
240 *data = readl(&regs->phymntnc);
241
242 return 0;
243}
244
Michal Simek70551ca2018-06-13 10:00:30 +0200245static int phyread(struct zynq_gem_priv *priv, u32 phy_addr,
Michal Simek1a63ee22015-11-30 10:24:15 +0100246 u32 regnum, u16 *val)
Michal Simek19dfc472012-09-13 20:23:34 +0000247{
Michal Simek70551ca2018-06-13 10:00:30 +0200248 int ret;
Michal Simekc919c2c2015-10-07 16:34:51 +0200249
Michal Simek1a63ee22015-11-30 10:24:15 +0100250 ret = phy_setup_op(priv, phy_addr, regnum,
251 ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
Michal Simekc919c2c2015-10-07 16:34:51 +0200252
253 if (!ret)
254 debug("%s: phy_addr %d, regnum 0x%x, val 0x%x\n", __func__,
255 phy_addr, regnum, *val);
256
257 return ret;
Michal Simek19dfc472012-09-13 20:23:34 +0000258}
259
Michal Simek70551ca2018-06-13 10:00:30 +0200260static int phywrite(struct zynq_gem_priv *priv, u32 phy_addr,
Michal Simek1a63ee22015-11-30 10:24:15 +0100261 u32 regnum, u16 data)
Michal Simek19dfc472012-09-13 20:23:34 +0000262{
Michal Simekc919c2c2015-10-07 16:34:51 +0200263 debug("%s: phy_addr %d, regnum 0x%x, data 0x%x\n", __func__, phy_addr,
264 regnum, data);
265
Michal Simek1a63ee22015-11-30 10:24:15 +0100266 return phy_setup_op(priv, phy_addr, regnum,
267 ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
Michal Simek19dfc472012-09-13 20:23:34 +0000268}
269
Michal Simek250e05e2015-11-30 14:14:56 +0100270static int zynq_gem_setup_mac(struct udevice *dev)
Michal Simek19dfc472012-09-13 20:23:34 +0000271{
272 u32 i, macaddrlow, macaddrhigh;
Michal Simek250e05e2015-11-30 14:14:56 +0100273 struct eth_pdata *pdata = dev_get_platdata(dev);
274 struct zynq_gem_priv *priv = dev_get_priv(dev);
275 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek19dfc472012-09-13 20:23:34 +0000276
277 /* Set the MAC bits [31:0] in BOT */
Michal Simek250e05e2015-11-30 14:14:56 +0100278 macaddrlow = pdata->enetaddr[0];
279 macaddrlow |= pdata->enetaddr[1] << 8;
280 macaddrlow |= pdata->enetaddr[2] << 16;
281 macaddrlow |= pdata->enetaddr[3] << 24;
Michal Simek19dfc472012-09-13 20:23:34 +0000282
283 /* Set MAC bits [47:32] in TOP */
Michal Simek250e05e2015-11-30 14:14:56 +0100284 macaddrhigh = pdata->enetaddr[4];
285 macaddrhigh |= pdata->enetaddr[5] << 8;
Michal Simek19dfc472012-09-13 20:23:34 +0000286
287 for (i = 0; i < 4; i++) {
288 writel(0, &regs->laddr[i][LADDR_LOW]);
289 writel(0, &regs->laddr[i][LADDR_HIGH]);
290 /* Do not use MATCHx register */
291 writel(0, &regs->match[i]);
292 }
293
294 writel(macaddrlow, &regs->laddr[0][LADDR_LOW]);
295 writel(macaddrhigh, &regs->laddr[0][LADDR_HIGH]);
296
297 return 0;
298}
299
Michal Simek250e05e2015-11-30 14:14:56 +0100300static int zynq_phy_init(struct udevice *dev)
Michal Simek19dfc472012-09-13 20:23:34 +0000301{
Michal Simek75fbb692015-11-30 13:38:32 +0100302 int ret;
Michal Simek250e05e2015-11-30 14:14:56 +0100303 struct zynq_gem_priv *priv = dev_get_priv(dev);
Michal Simek55ee1862016-05-30 10:43:11 +0200304 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
Michal Simek19dfc472012-09-13 20:23:34 +0000305 const u32 supported = SUPPORTED_10baseT_Half |
306 SUPPORTED_10baseT_Full |
307 SUPPORTED_100baseT_Half |
308 SUPPORTED_100baseT_Full |
309 SUPPORTED_1000baseT_Half |
310 SUPPORTED_1000baseT_Full;
311
Michal Simeke9ecc1c2015-11-30 13:58:36 +0100312 /* Enable only MDIO bus */
Michal Simek55ee1862016-05-30 10:43:11 +0200313 writel(ZYNQ_GEM_NWCTRL_MDEN_MASK, &regs_mdio->nwctrl);
Michal Simeke9ecc1c2015-11-30 13:58:36 +0100314
Michal Simek7cd7ea62015-11-30 13:54:43 +0100315 priv->phydev = phy_connect(priv->bus, priv->phyaddr, dev,
316 priv->interface);
Michal Simek2c68e082015-11-30 14:03:37 +0100317 if (!priv->phydev)
318 return -ENODEV;
Michal Simek7cd7ea62015-11-30 13:54:43 +0100319
Siva Durga Prasad Paladugu0703cc52018-04-12 12:22:17 +0200320 if (priv->max_speed) {
321 ret = phy_set_supported(priv->phydev, priv->max_speed);
322 if (ret)
323 return ret;
324 }
325
Siva Durga Prasad Paladugu12203502019-03-27 17:39:59 +0530326 priv->phydev->supported &= supported | ADVERTISED_Pause |
327 ADVERTISED_Asym_Pause;
328
Michal Simek7cd7ea62015-11-30 13:54:43 +0100329 priv->phydev->advertising = priv->phydev->supported;
Siva Durga Prasad Paladugu34a48e52018-07-16 18:25:45 +0530330 priv->phydev->node = priv->phy_of_node;
Dan Murphya5828712016-05-02 15:45:57 -0500331
Michal Simek24ce2322016-05-18 14:37:23 +0200332 return phy_config(priv->phydev);
Michal Simek7cd7ea62015-11-30 13:54:43 +0100333}
334
Michal Simek250e05e2015-11-30 14:14:56 +0100335static int zynq_gem_init(struct udevice *dev)
Michal Simek7cd7ea62015-11-30 13:54:43 +0100336{
Siva Durga Prasad Paladugu65d3f3a2016-02-05 13:22:11 +0530337 u32 i, nwconfig;
Michal Simekdbc0cfc2016-05-18 12:37:22 +0200338 int ret;
Michal Simek7cd7ea62015-11-30 13:54:43 +0100339 unsigned long clk_rate = 0;
Michal Simek250e05e2015-11-30 14:14:56 +0100340 struct zynq_gem_priv *priv = dev_get_priv(dev);
341 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek55ee1862016-05-30 10:43:11 +0200342 struct zynq_gem_regs *regs_mdio = priv->mdiobase;
Michal Simek7cd7ea62015-11-30 13:54:43 +0100343 struct emac_bd *dummy_tx_bd = &priv->tx_bd[TX_FREE_DESC];
344 struct emac_bd *dummy_rx_bd = &priv->tx_bd[TX_FREE_DESC + 2];
345
Siva Durga Prasad Paladugub7b36372018-11-26 16:27:39 +0530346 if (readl(&regs->dcfg6) & ZYNQ_GEM_DCFG_DBG6_DMA_64B)
347 priv->dma_64bit = true;
348 else
349 priv->dma_64bit = false;
350
351#if defined(CONFIG_PHYS_64BIT)
352 if (!priv->dma_64bit) {
353 printf("ERR: %s: Using 64-bit DMA but HW doesn't support it\n",
354 __func__);
355 return -EINVAL;
356 }
357#else
358 if (priv->dma_64bit)
359 debug("WARN: %s: Not using 64-bit dma even HW supports it\n",
360 __func__);
361#endif
362
Michal Simeka94f84d2013-01-24 13:04:12 +0100363 if (!priv->init) {
364 /* Disable all interrupts */
365 writel(0xFFFFFFFF, &regs->idr);
Michal Simek19dfc472012-09-13 20:23:34 +0000366
Michal Simeka94f84d2013-01-24 13:04:12 +0100367 /* Disable the receiver & transmitter */
368 writel(0, &regs->nwctrl);
369 writel(0, &regs->txsr);
370 writel(0, &regs->rxsr);
371 writel(0, &regs->phymntnc);
Michal Simek19dfc472012-09-13 20:23:34 +0000372
Michal Simeka94f84d2013-01-24 13:04:12 +0100373 /* Clear the Hash registers for the mac address
374 * pointed by AddressPtr
375 */
376 writel(0x0, &regs->hashl);
377 /* Write bits [63:32] in TOP */
378 writel(0x0, &regs->hashh);
Michal Simek19dfc472012-09-13 20:23:34 +0000379
Michal Simeka94f84d2013-01-24 13:04:12 +0100380 /* Clear all counters */
Michal Simekff5dbef2015-10-05 12:49:48 +0200381 for (i = 0; i < STAT_SIZE; i++)
Michal Simeka94f84d2013-01-24 13:04:12 +0100382 readl(&regs->stat[i]);
Michal Simek19dfc472012-09-13 20:23:34 +0000383
Michal Simeka94f84d2013-01-24 13:04:12 +0100384 /* Setup RxBD space */
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530385 memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
Michal Simek19dfc472012-09-13 20:23:34 +0000386
Michal Simeka94f84d2013-01-24 13:04:12 +0100387 for (i = 0; i < RX_BUF; i++) {
388 priv->rx_bd[i].status = 0xF0000000;
389 priv->rx_bd[i].addr =
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530390 (lower_32_bits((ulong)(priv->rxbuffers)
391 + (i * PKTSIZE_ALIGN)));
392#if defined(CONFIG_PHYS_64BIT)
393 priv->rx_bd[i].addr_hi =
394 (upper_32_bits((ulong)(priv->rxbuffers)
395 + (i * PKTSIZE_ALIGN)));
396#endif
397 }
Michal Simeka94f84d2013-01-24 13:04:12 +0100398 /* WRAP bit to last BD */
399 priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
400 /* Write RxBDs to IP */
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530401 writel(lower_32_bits((ulong)priv->rx_bd), &regs->rxqbase);
402#if defined(CONFIG_PHYS_64BIT)
403 writel(upper_32_bits((ulong)priv->rx_bd), &regs->upper_rxqbase);
404#endif
Michal Simek19dfc472012-09-13 20:23:34 +0000405
Michal Simeka94f84d2013-01-24 13:04:12 +0100406 /* Setup for DMA Configuration register */
407 writel(ZYNQ_GEM_DMACR_INIT, &regs->dmacr);
Michal Simek19dfc472012-09-13 20:23:34 +0000408
Michal Simeka94f84d2013-01-24 13:04:12 +0100409 /* Setup for Network Control register, MDIO, Rx and Tx enable */
Michal Simek55ee1862016-05-30 10:43:11 +0200410 setbits_le32(&regs_mdio->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
Michal Simek19dfc472012-09-13 20:23:34 +0000411
Edgar E. Iglesias23045112015-09-25 23:50:07 -0700412 /* Disable the second priority queue */
413 dummy_tx_bd->addr = 0;
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530414#if defined(CONFIG_PHYS_64BIT)
415 dummy_tx_bd->addr_hi = 0;
416#endif
Edgar E. Iglesias23045112015-09-25 23:50:07 -0700417 dummy_tx_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
418 ZYNQ_GEM_TXBUF_LAST_MASK|
419 ZYNQ_GEM_TXBUF_USED_MASK;
420
421 dummy_rx_bd->addr = ZYNQ_GEM_RXBUF_WRAP_MASK |
422 ZYNQ_GEM_RXBUF_NEW_MASK;
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530423#if defined(CONFIG_PHYS_64BIT)
424 dummy_rx_bd->addr_hi = 0;
425#endif
Edgar E. Iglesias23045112015-09-25 23:50:07 -0700426 dummy_rx_bd->status = 0;
Edgar E. Iglesias23045112015-09-25 23:50:07 -0700427
428 writel((ulong)dummy_tx_bd, &regs->transmit_q1_ptr);
429 writel((ulong)dummy_rx_bd, &regs->receive_q1_ptr);
430
Michal Simeka94f84d2013-01-24 13:04:12 +0100431 priv->init++;
432 }
433
Michal Simekdbc0cfc2016-05-18 12:37:22 +0200434 ret = phy_startup(priv->phydev);
435 if (ret)
436 return ret;
Michal Simek19dfc472012-09-13 20:23:34 +0000437
Michal Simek43b38322015-11-30 13:44:49 +0100438 if (!priv->phydev->link) {
439 printf("%s: No link.\n", priv->phydev->dev->name);
Michal Simek216b96d2013-11-12 14:25:29 +0100440 return -1;
441 }
442
Siva Durga Prasad Paladugu65d3f3a2016-02-05 13:22:11 +0530443 nwconfig = ZYNQ_GEM_NWCFG_INIT;
444
Siva Durga Prasad Paladugu134cfa62017-11-23 12:56:55 +0530445 /*
446 * Set SGMII enable PCS selection only if internal PCS/PMA
447 * core is used and interface is SGMII.
448 */
449 if (priv->interface == PHY_INTERFACE_MODE_SGMII &&
450 priv->int_pcs) {
Siva Durga Prasad Paladugu65d3f3a2016-02-05 13:22:11 +0530451 nwconfig |= ZYNQ_GEM_NWCFG_SGMII_ENBL |
452 ZYNQ_GEM_NWCFG_PCS_SEL;
Siva Durga Prasad Paladugu45467002016-03-25 12:53:44 +0530453#ifdef CONFIG_ARM64
Michal Simek43d99722020-05-13 08:05:01 -0600454 if (priv->phydev->phy_id != PHY_FIXED_ID)
Siva Durga Prasad Paladugu45467002016-03-25 12:53:44 +0530455 writel(readl(&regs->pcscntrl) | ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
456 &regs->pcscntrl);
Michal Simek43d99722020-05-13 08:05:01 -0600457 else
458 writel(readl(&regs->pcscntrl) & ~ZYNQ_GEM_PCS_CTL_ANEG_ENBL,
459 &regs->pcscntrl);
Siva Durga Prasad Paladugu45467002016-03-25 12:53:44 +0530460#endif
461 }
Siva Durga Prasad Paladugu65d3f3a2016-02-05 13:22:11 +0530462
Michal Simek43b38322015-11-30 13:44:49 +0100463 switch (priv->phydev->speed) {
Michal Simekd9f2c112012-10-15 14:01:23 +0200464 case SPEED_1000:
Siva Durga Prasad Paladugu65d3f3a2016-02-05 13:22:11 +0530465 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED1000,
Michal Simekd9f2c112012-10-15 14:01:23 +0200466 &regs->nwcfg);
Soren Brinkmann4dded982013-11-21 13:39:01 -0800467 clk_rate = ZYNQ_GEM_FREQUENCY_1000;
Michal Simekd9f2c112012-10-15 14:01:23 +0200468 break;
469 case SPEED_100:
Siva Durga Prasad Paladugu65d3f3a2016-02-05 13:22:11 +0530470 writel(nwconfig | ZYNQ_GEM_NWCFG_SPEED100,
Michal Simek64295952015-09-08 16:55:42 +0200471 &regs->nwcfg);
Soren Brinkmann4dded982013-11-21 13:39:01 -0800472 clk_rate = ZYNQ_GEM_FREQUENCY_100;
Michal Simekd9f2c112012-10-15 14:01:23 +0200473 break;
474 case SPEED_10:
Soren Brinkmann4dded982013-11-21 13:39:01 -0800475 clk_rate = ZYNQ_GEM_FREQUENCY_10;
Michal Simekd9f2c112012-10-15 14:01:23 +0200476 break;
477 }
David Andrey73875dc2013-04-05 17:24:24 +0200478
Stefan Herbrechtsmeierbb433972017-01-17 16:27:25 +0100479 ret = clk_set_rate(&priv->clk, clk_rate);
480 if (IS_ERR_VALUE(ret) && ret != (unsigned long)-ENOSYS) {
481 dev_err(dev, "failed to set tx clock rate\n");
482 return ret;
483 }
484
485 ret = clk_enable(&priv->clk);
486 if (ret && ret != -ENOSYS) {
487 dev_err(dev, "failed to enable tx clock\n");
488 return ret;
489 }
Michal Simekd9f2c112012-10-15 14:01:23 +0200490
491 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
492 ZYNQ_GEM_NWCTRL_TXEN_MASK);
493
Michal Simek19dfc472012-09-13 20:23:34 +0000494 return 0;
495}
496
Michal Simek250e05e2015-11-30 14:14:56 +0100497static int zynq_gem_send(struct udevice *dev, void *ptr, int len)
Michal Simek19dfc472012-09-13 20:23:34 +0000498{
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530499 dma_addr_t addr;
500 u32 size;
Michal Simek250e05e2015-11-30 14:14:56 +0100501 struct zynq_gem_priv *priv = dev_get_priv(dev);
502 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek1dc446e2015-08-17 09:58:54 +0200503 struct emac_bd *current_bd = &priv->tx_bd[1];
Michal Simek19dfc472012-09-13 20:23:34 +0000504
Michal Simek19dfc472012-09-13 20:23:34 +0000505 /* Setup Tx BD */
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530506 memset(priv->tx_bd, 0, sizeof(struct emac_bd));
Michal Simek19dfc472012-09-13 20:23:34 +0000507
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530508 priv->tx_bd->addr = lower_32_bits((ulong)ptr);
509#if defined(CONFIG_PHYS_64BIT)
510 priv->tx_bd->addr_hi = upper_32_bits((ulong)ptr);
511#endif
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530512 priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
Michal Simek1dc446e2015-08-17 09:58:54 +0200513 ZYNQ_GEM_TXBUF_LAST_MASK;
514 /* Dummy descriptor to mark it as the last in descriptor chain */
515 current_bd->addr = 0x0;
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530516#if defined(CONFIG_PHYS_64BIT)
517 current_bd->addr_hi = 0x0;
518#endif
Michal Simek1dc446e2015-08-17 09:58:54 +0200519 current_bd->status = ZYNQ_GEM_TXBUF_WRAP_MASK |
520 ZYNQ_GEM_TXBUF_LAST_MASK|
521 ZYNQ_GEM_TXBUF_USED_MASK;
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530522
Michal Simekb6fe7ad2015-08-17 09:50:09 +0200523 /* setup BD */
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530524 writel(lower_32_bits((ulong)priv->tx_bd), &regs->txqbase);
525#if defined(CONFIG_PHYS_64BIT)
526 writel(upper_32_bits((ulong)priv->tx_bd), &regs->upper_txqbase);
527#endif
Michal Simekb6fe7ad2015-08-17 09:50:09 +0200528
Prabhakar Kushwaha1e9e6192015-10-25 13:18:54 +0530529 addr = (ulong) ptr;
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530530 addr &= ~(ARCH_DMA_MINALIGN - 1);
531 size = roundup(len, ARCH_DMA_MINALIGN);
532 flush_dcache_range(addr, addr + size);
533 barrier();
Michal Simek19dfc472012-09-13 20:23:34 +0000534
535 /* Start transmit */
536 setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
537
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530538 /* Read TX BD status */
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530539 if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
540 printf("TX buffers exhausted in mid frame\n");
Michal Simek19dfc472012-09-13 20:23:34 +0000541
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100542 return wait_for_bit_le32(&regs->txsr, ZYNQ_GEM_TSR_DONE,
543 true, 20000, true);
Michal Simek19dfc472012-09-13 20:23:34 +0000544}
545
546/* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
Michal Simek250e05e2015-11-30 14:14:56 +0100547static int zynq_gem_recv(struct udevice *dev, int flags, uchar **packetp)
Michal Simek19dfc472012-09-13 20:23:34 +0000548{
549 int frame_len;
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530550 dma_addr_t addr;
Michal Simek250e05e2015-11-30 14:14:56 +0100551 struct zynq_gem_priv *priv = dev_get_priv(dev);
Michal Simek19dfc472012-09-13 20:23:34 +0000552 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
Michal Simek19dfc472012-09-13 20:23:34 +0000553
554 if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
Michal Simek57b02692015-12-09 14:26:48 +0100555 return -1;
Michal Simek19dfc472012-09-13 20:23:34 +0000556
557 if (!(current_bd->status &
558 (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
559 printf("GEM: SOF or EOF not set for last buffer received!\n");
Michal Simek57b02692015-12-09 14:26:48 +0100560 return -1;
Michal Simek19dfc472012-09-13 20:23:34 +0000561 }
562
563 frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
Michal Simek57b02692015-12-09 14:26:48 +0100564 if (!frame_len) {
565 printf("%s: Zero size packet?\n", __func__);
566 return -1;
567 }
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530568
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530569#if defined(CONFIG_PHYS_64BIT)
570 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
571 | ((dma_addr_t)current_bd->addr_hi << 32));
572#else
Michal Simek57b02692015-12-09 14:26:48 +0100573 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530574#endif
Michal Simek57b02692015-12-09 14:26:48 +0100575 addr &= ~(ARCH_DMA_MINALIGN - 1);
Vipul Kumarcbc2ed62018-11-26 16:27:38 +0530576
Michal Simek57b02692015-12-09 14:26:48 +0100577 *packetp = (uchar *)(uintptr_t)addr;
Michal Simek19dfc472012-09-13 20:23:34 +0000578
Stefan Theil0f407c92018-12-17 09:12:30 +0100579 invalidate_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
580 barrier();
581
Michal Simek57b02692015-12-09 14:26:48 +0100582 return frame_len;
583}
584
585static int zynq_gem_free_pkt(struct udevice *dev, uchar *packet, int length)
586{
587 struct zynq_gem_priv *priv = dev_get_priv(dev);
588 struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
589 struct emac_bd *first_bd;
Ashok Reddy Soma47572532020-02-23 08:01:29 -0700590 dma_addr_t addr;
Michal Simek19dfc472012-09-13 20:23:34 +0000591
Michal Simek57b02692015-12-09 14:26:48 +0100592 if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK) {
593 priv->rx_first_buf = priv->rxbd_current;
594 } else {
595 current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
596 current_bd->status = 0xF0000000; /* FIXME */
597 }
Michal Simek19dfc472012-09-13 20:23:34 +0000598
Michal Simek57b02692015-12-09 14:26:48 +0100599 if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
600 first_bd = &priv->rx_bd[priv->rx_first_buf];
601 first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
602 first_bd->status = 0xF0000000;
Michal Simek19dfc472012-09-13 20:23:34 +0000603 }
604
Ashok Reddy Soma47572532020-02-23 08:01:29 -0700605 /* Flush the cache for the packet as well */
606#if defined(CONFIG_PHYS_64BIT)
607 addr = (dma_addr_t)((current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK)
608 | ((dma_addr_t)current_bd->addr_hi << 32));
609#else
610 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
611#endif
612 flush_dcache_range(addr, addr + roundup(PKTSIZE_ALIGN,
613 ARCH_DMA_MINALIGN));
614 barrier();
615
Michal Simek57b02692015-12-09 14:26:48 +0100616 if ((++priv->rxbd_current) >= RX_BUF)
617 priv->rxbd_current = 0;
618
Michal Simek139f4102015-12-09 14:16:32 +0100619 return 0;
Michal Simek19dfc472012-09-13 20:23:34 +0000620}
621
Michal Simek250e05e2015-11-30 14:14:56 +0100622static void zynq_gem_halt(struct udevice *dev)
Michal Simek19dfc472012-09-13 20:23:34 +0000623{
Michal Simek250e05e2015-11-30 14:14:56 +0100624 struct zynq_gem_priv *priv = dev_get_priv(dev);
625 struct zynq_gem_regs *regs = priv->iobase;
Michal Simek19dfc472012-09-13 20:23:34 +0000626
Michal Simekd9f2c112012-10-15 14:01:23 +0200627 clrsetbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
628 ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
Michal Simek19dfc472012-09-13 20:23:34 +0000629}
630
Joe Hershberger7f4e5552016-01-26 11:57:03 -0600631__weak int zynq_board_read_rom_ethaddr(unsigned char *ethaddr)
632{
633 return -ENOSYS;
634}
635
636static int zynq_gem_read_rom_mac(struct udevice *dev)
637{
Joe Hershberger7f4e5552016-01-26 11:57:03 -0600638 struct eth_pdata *pdata = dev_get_platdata(dev);
639
Olliver Schinaglfee13c32017-04-03 16:18:53 +0200640 if (!pdata)
641 return -ENOSYS;
Joe Hershberger7f4e5552016-01-26 11:57:03 -0600642
Olliver Schinaglfee13c32017-04-03 16:18:53 +0200643 return zynq_board_read_rom_ethaddr(pdata->enetaddr);
Joe Hershberger7f4e5552016-01-26 11:57:03 -0600644}
645
Michal Simek250e05e2015-11-30 14:14:56 +0100646static int zynq_gem_miiphy_read(struct mii_dev *bus, int addr,
647 int devad, int reg)
Michal Simek19dfc472012-09-13 20:23:34 +0000648{
Michal Simek250e05e2015-11-30 14:14:56 +0100649 struct zynq_gem_priv *priv = bus->priv;
Michal Simek19dfc472012-09-13 20:23:34 +0000650 int ret;
Michal Simekd061bfd2018-06-14 09:08:44 +0200651 u16 val = 0;
Michal Simek19dfc472012-09-13 20:23:34 +0000652
Michal Simek250e05e2015-11-30 14:14:56 +0100653 ret = phyread(priv, addr, reg, &val);
654 debug("%s 0x%x, 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val, ret);
655 return val;
Michal Simek19dfc472012-09-13 20:23:34 +0000656}
657
Michal Simek250e05e2015-11-30 14:14:56 +0100658static int zynq_gem_miiphy_write(struct mii_dev *bus, int addr, int devad,
659 int reg, u16 value)
Michal Simek19dfc472012-09-13 20:23:34 +0000660{
Michal Simek250e05e2015-11-30 14:14:56 +0100661 struct zynq_gem_priv *priv = bus->priv;
Michal Simek19dfc472012-09-13 20:23:34 +0000662
Michal Simek250e05e2015-11-30 14:14:56 +0100663 debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, value);
664 return phywrite(priv, addr, reg, value);
Michal Simek19dfc472012-09-13 20:23:34 +0000665}
666
Michal Simek250e05e2015-11-30 14:14:56 +0100667static int zynq_gem_probe(struct udevice *dev)
Michal Simek19dfc472012-09-13 20:23:34 +0000668{
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530669 void *bd_space;
Michal Simek250e05e2015-11-30 14:14:56 +0100670 struct zynq_gem_priv *priv = dev_get_priv(dev);
671 int ret;
Michal Simek19dfc472012-09-13 20:23:34 +0000672
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530673 /* Align rxbuffers to ARCH_DMA_MINALIGN */
674 priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
Michal Simekc8959f42018-06-13 15:20:35 +0200675 if (!priv->rxbuffers)
676 return -ENOMEM;
677
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530678 memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
T Karthik Reddy60bf2162020-01-15 02:15:13 -0700679 ulong addr = (ulong)priv->rxbuffers;
Stefan Theil0f407c92018-12-17 09:12:30 +0100680 flush_dcache_range(addr, addr + roundup(RX_BUF * PKTSIZE_ALIGN, ARCH_DMA_MINALIGN));
681 barrier();
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530682
Siva Durga Prasad Paladugu2b0690e2014-12-06 12:57:53 +0530683 /* Align bd_space to MMU_SECTION_SHIFT */
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530684 bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
Michal Simek049c65b2020-02-06 14:36:46 +0100685 if (!bd_space) {
686 ret = -ENOMEM;
687 goto err1;
688 }
Michal Simekc8959f42018-06-13 15:20:35 +0200689
Michal Simek0afb6b22015-04-15 13:31:28 +0200690 mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
691 BD_SPACE, DCACHE_OFF);
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530692
693 /* Initialize the bd spaces for tx and rx bd's */
694 priv->tx_bd = (struct emac_bd *)bd_space;
Prabhakar Kushwaha1e9e6192015-10-25 13:18:54 +0530695 priv->rx_bd = (struct emac_bd *)((ulong)bd_space + BD_SEPRN_SPACE);
Srikanth Thokalacbf20b22013-11-08 22:55:48 +0530696
Siva Durga Prasad Paladugubaa20352016-11-15 16:15:42 +0530697 ret = clk_get_by_name(dev, "tx_clk", &priv->clk);
698 if (ret < 0) {
699 dev_err(dev, "failed to get clock\n");
Michal Simek049c65b2020-02-06 14:36:46 +0100700 goto err1;
Siva Durga Prasad Paladugubaa20352016-11-15 16:15:42 +0530701 }
Siva Durga Prasad Paladugubaa20352016-11-15 16:15:42 +0530702
Michal Simek250e05e2015-11-30 14:14:56 +0100703 priv->bus = mdio_alloc();
704 priv->bus->read = zynq_gem_miiphy_read;
705 priv->bus->write = zynq_gem_miiphy_write;
706 priv->bus->priv = priv;
Michal Simek19dfc472012-09-13 20:23:34 +0000707
Michal Simeke4dab432016-12-08 10:25:44 +0100708 ret = mdio_register_seq(priv->bus, dev->seq);
Michal Simek250e05e2015-11-30 14:14:56 +0100709 if (ret)
Michal Simek049c65b2020-02-06 14:36:46 +0100710 goto err2;
711
712 ret = zynq_phy_init(dev);
713 if (ret)
714 goto err2;
715
716 return ret;
Michal Simek19dfc472012-09-13 20:23:34 +0000717
Michal Simek049c65b2020-02-06 14:36:46 +0100718err2:
719 free(priv->rxbuffers);
720err1:
721 free(priv->tx_bd);
722 return ret;
Michal Simek250e05e2015-11-30 14:14:56 +0100723}
Michal Simek19dfc472012-09-13 20:23:34 +0000724
Michal Simek250e05e2015-11-30 14:14:56 +0100725static int zynq_gem_remove(struct udevice *dev)
726{
727 struct zynq_gem_priv *priv = dev_get_priv(dev);
Michal Simek19dfc472012-09-13 20:23:34 +0000728
Michal Simek250e05e2015-11-30 14:14:56 +0100729 free(priv->phydev);
730 mdio_unregister(priv->bus);
731 mdio_free(priv->bus);
Michal Simek19dfc472012-09-13 20:23:34 +0000732
Michal Simek250e05e2015-11-30 14:14:56 +0100733 return 0;
734}
735
736static const struct eth_ops zynq_gem_ops = {
737 .start = zynq_gem_init,
738 .send = zynq_gem_send,
739 .recv = zynq_gem_recv,
Michal Simek57b02692015-12-09 14:26:48 +0100740 .free_pkt = zynq_gem_free_pkt,
Michal Simek250e05e2015-11-30 14:14:56 +0100741 .stop = zynq_gem_halt,
742 .write_hwaddr = zynq_gem_setup_mac,
Joe Hershberger7f4e5552016-01-26 11:57:03 -0600743 .read_rom_hwaddr = zynq_gem_read_rom_mac,
Michal Simek250e05e2015-11-30 14:14:56 +0100744};
Michal Simeke9ecc1c2015-11-30 13:58:36 +0100745
Michal Simek250e05e2015-11-30 14:14:56 +0100746static int zynq_gem_ofdata_to_platdata(struct udevice *dev)
747{
748 struct eth_pdata *pdata = dev_get_platdata(dev);
749 struct zynq_gem_priv *priv = dev_get_priv(dev);
Siva Durga Prasad Paladugu34a48e52018-07-16 18:25:45 +0530750 struct ofnode_phandle_args phandle_args;
Michal Simek3c4ce3c2015-11-30 14:17:50 +0100751 const char *phy_mode;
Michal Simek250e05e2015-11-30 14:14:56 +0100752
Siva Durga Prasad Paladugu34a48e52018-07-16 18:25:45 +0530753 pdata->iobase = (phys_addr_t)dev_read_addr(dev);
Michal Simek250e05e2015-11-30 14:14:56 +0100754 priv->iobase = (struct zynq_gem_regs *)pdata->iobase;
Michal Simek55ee1862016-05-30 10:43:11 +0200755 priv->mdiobase = priv->iobase;
Michal Simek250e05e2015-11-30 14:14:56 +0100756 /* Hardcode for now */
Michal Simekc6aa4132015-12-09 09:29:12 +0100757 priv->phyaddr = -1;
Michal Simek250e05e2015-11-30 14:14:56 +0100758
Michal Simek81145382018-09-20 09:42:27 +0200759 if (!dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0,
760 &phandle_args)) {
761 debug("phy-handle does exist %s\n", dev->name);
762 priv->phyaddr = ofnode_read_u32_default(phandle_args.node,
763 "reg", -1);
764 priv->phy_of_node = phandle_args.node;
765 priv->max_speed = ofnode_read_u32_default(phandle_args.node,
766 "max-speed",
767 SPEED_1000);
Siva Durga Prasad Paladugu34a48e52018-07-16 18:25:45 +0530768 }
Michal Simek250e05e2015-11-30 14:14:56 +0100769
Siva Durga Prasad Paladugu34a48e52018-07-16 18:25:45 +0530770 phy_mode = dev_read_prop(dev, "phy-mode", NULL);
Michal Simek3c4ce3c2015-11-30 14:17:50 +0100771 if (phy_mode)
772 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
773 if (pdata->phy_interface == -1) {
774 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
775 return -EINVAL;
776 }
777 priv->interface = pdata->phy_interface;
778
Siva Durga Prasad Paladugu34a48e52018-07-16 18:25:45 +0530779 priv->int_pcs = dev_read_bool(dev, "is-internal-pcspma");
Siva Durga Prasad Paladugu134cfa62017-11-23 12:56:55 +0530780
Michal Simek55ee1862016-05-30 10:43:11 +0200781 printf("\nZYNQ GEM: %lx, mdio bus %lx, phyaddr %d, interface %s\n",
782 (ulong)priv->iobase, (ulong)priv->mdiobase, priv->phyaddr,
783 phy_string_for_interface(priv->interface));
Michal Simek250e05e2015-11-30 14:14:56 +0100784
785 return 0;
Michal Simek19dfc472012-09-13 20:23:34 +0000786}
Michal Simek250e05e2015-11-30 14:14:56 +0100787
788static const struct udevice_id zynq_gem_ids[] = {
Siva Durga Prasad Paladugu3d611612019-07-25 23:07:59 -0700789 { .compatible = "cdns,versal-gem" },
Michal Simek250e05e2015-11-30 14:14:56 +0100790 { .compatible = "cdns,zynqmp-gem" },
791 { .compatible = "cdns,zynq-gem" },
792 { .compatible = "cdns,gem" },
793 { }
794};
795
796U_BOOT_DRIVER(zynq_gem) = {
797 .name = "zynq_gem",
798 .id = UCLASS_ETH,
799 .of_match = zynq_gem_ids,
800 .ofdata_to_platdata = zynq_gem_ofdata_to_platdata,
801 .probe = zynq_gem_probe,
802 .remove = zynq_gem_remove,
803 .ops = &zynq_gem_ops,
804 .priv_auto_alloc_size = sizeof(struct zynq_gem_priv),
805 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
806};