blob: 585101804d335890b9f32697f6f308faaf968599 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stephen Warren50709602016-10-21 14:46:47 -06002/*
3 * Copyright (c) 2016, NVIDIA CORPORATION.
4 *
Stephen Warren50709602016-10-21 14:46:47 -06005 * Portions based on U-Boot's rtl8169.c.
6 */
7
8/*
9 * This driver supports the Synopsys Designware Ethernet QOS (Quality Of
10 * Service) IP block. The IP supports multiple options for bus type, clocking/
11 * reset structure, and feature list.
12 *
13 * The driver is written such that generic core logic is kept separate from
14 * configuration-specific logic. Code that interacts with configuration-
15 * specific resources is split out into separate functions to avoid polluting
16 * common code. If/when this driver is enhanced to support multiple
17 * configurations, the core code should be adapted to call all configuration-
18 * specific functions through function pointers, with the definition of those
19 * function pointers being supplied by struct udevice_id eqos_ids[]'s .data
20 * field.
21 *
22 * The following configurations are currently supported:
23 * tegra186:
24 * NVIDIA's Tegra186 chip. This configuration uses an AXI master/DMA bus, an
25 * AHB slave/register bus, contains the DMA, MTL, and MAC sub-blocks, and
26 * supports a single RGMII PHY. This configuration also has SW control over
27 * all clock and reset signals to the HW block.
28 */
Patrick Delaunay9e6ed382020-09-09 18:30:06 +020029
Patrick Delaunay57872842021-07-20 20:15:29 +020030#define LOG_CATEGORY UCLASS_ETH
31
Stephen Warren50709602016-10-21 14:46:47 -060032#include <common.h>
33#include <clk.h>
Simon Glass63334482019-11-14 12:57:39 -070034#include <cpu_func.h>
Stephen Warren50709602016-10-21 14:46:47 -060035#include <dm.h>
36#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060037#include <log.h>
Simon Glass9bc15642020-02-03 07:36:16 -070038#include <malloc.h>
Stephen Warren50709602016-10-21 14:46:47 -060039#include <memalign.h>
40#include <miiphy.h>
41#include <net.h>
42#include <netdev.h>
43#include <phy.h>
44#include <reset.h>
45#include <wait_bit.h>
Simon Glass274e0b02020-05-10 11:39:56 -060046#include <asm/cache.h>
Stephen Warren50709602016-10-21 14:46:47 -060047#include <asm/gpio.h>
48#include <asm/io.h>
Ye Liad122b72020-05-03 22:41:15 +080049#include <eth_phy.h>
Fugang Duandd455e62020-05-03 22:41:18 +080050#ifdef CONFIG_ARCH_IMX8M
51#include <asm/arch/clock.h>
52#include <asm/mach-imx/sys_proto.h>
53#endif
Simon Glass4dcacfc2020-05-10 11:40:13 -060054#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060055#include <linux/delay.h>
Stephen Warren50709602016-10-21 14:46:47 -060056
57/* Core registers */
58
59#define EQOS_MAC_REGS_BASE 0x000
60struct eqos_mac_regs {
61 uint32_t configuration; /* 0x000 */
62 uint32_t unused_004[(0x070 - 0x004) / 4]; /* 0x004 */
63 uint32_t q0_tx_flow_ctrl; /* 0x070 */
64 uint32_t unused_070[(0x090 - 0x074) / 4]; /* 0x074 */
65 uint32_t rx_flow_ctrl; /* 0x090 */
66 uint32_t unused_094; /* 0x094 */
67 uint32_t txq_prty_map0; /* 0x098 */
68 uint32_t unused_09c; /* 0x09c */
69 uint32_t rxq_ctrl0; /* 0x0a0 */
70 uint32_t unused_0a4; /* 0x0a4 */
71 uint32_t rxq_ctrl2; /* 0x0a8 */
72 uint32_t unused_0ac[(0x0dc - 0x0ac) / 4]; /* 0x0ac */
73 uint32_t us_tic_counter; /* 0x0dc */
74 uint32_t unused_0e0[(0x11c - 0x0e0) / 4]; /* 0x0e0 */
75 uint32_t hw_feature0; /* 0x11c */
76 uint32_t hw_feature1; /* 0x120 */
77 uint32_t hw_feature2; /* 0x124 */
78 uint32_t unused_128[(0x200 - 0x128) / 4]; /* 0x128 */
79 uint32_t mdio_address; /* 0x200 */
80 uint32_t mdio_data; /* 0x204 */
81 uint32_t unused_208[(0x300 - 0x208) / 4]; /* 0x208 */
82 uint32_t address0_high; /* 0x300 */
83 uint32_t address0_low; /* 0x304 */
84};
85
86#define EQOS_MAC_CONFIGURATION_GPSLCE BIT(23)
87#define EQOS_MAC_CONFIGURATION_CST BIT(21)
88#define EQOS_MAC_CONFIGURATION_ACS BIT(20)
89#define EQOS_MAC_CONFIGURATION_WD BIT(19)
90#define EQOS_MAC_CONFIGURATION_JD BIT(17)
91#define EQOS_MAC_CONFIGURATION_JE BIT(16)
92#define EQOS_MAC_CONFIGURATION_PS BIT(15)
93#define EQOS_MAC_CONFIGURATION_FES BIT(14)
94#define EQOS_MAC_CONFIGURATION_DM BIT(13)
Fugang Duan37aae5f2020-05-03 22:41:17 +080095#define EQOS_MAC_CONFIGURATION_LM BIT(12)
Stephen Warren50709602016-10-21 14:46:47 -060096#define EQOS_MAC_CONFIGURATION_TE BIT(1)
97#define EQOS_MAC_CONFIGURATION_RE BIT(0)
98
99#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT 16
100#define EQOS_MAC_Q0_TX_FLOW_CTRL_PT_MASK 0xffff
101#define EQOS_MAC_Q0_TX_FLOW_CTRL_TFE BIT(1)
102
103#define EQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
104
105#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT 0
106#define EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK 0xff
107
108#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT 0
109#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK 3
110#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_NOT_ENABLED 0
111#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB 2
Christophe Roullier6beb7802019-05-17 15:08:44 +0200112#define EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV 1
Stephen Warren50709602016-10-21 14:46:47 -0600113
114#define EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT 0
115#define EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK 0xff
116
Fugang Duan37aae5f2020-05-03 22:41:17 +0800117#define EQOS_MAC_HW_FEATURE0_MMCSEL_SHIFT 8
118#define EQOS_MAC_HW_FEATURE0_HDSEL_SHIFT 2
119#define EQOS_MAC_HW_FEATURE0_GMIISEL_SHIFT 1
120#define EQOS_MAC_HW_FEATURE0_MIISEL_SHIFT 0
121
Stephen Warren50709602016-10-21 14:46:47 -0600122#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT 6
123#define EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK 0x1f
124#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT 0
125#define EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK 0x1f
126
Fugang Duan37aae5f2020-05-03 22:41:17 +0800127#define EQOS_MAC_HW_FEATURE3_ASP_SHIFT 28
128#define EQOS_MAC_HW_FEATURE3_ASP_MASK 0x3
129
Stephen Warren50709602016-10-21 14:46:47 -0600130#define EQOS_MAC_MDIO_ADDRESS_PA_SHIFT 21
131#define EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT 16
132#define EQOS_MAC_MDIO_ADDRESS_CR_SHIFT 8
133#define EQOS_MAC_MDIO_ADDRESS_CR_20_35 2
Christophe Roullier6beb7802019-05-17 15:08:44 +0200134#define EQOS_MAC_MDIO_ADDRESS_CR_250_300 5
Stephen Warren50709602016-10-21 14:46:47 -0600135#define EQOS_MAC_MDIO_ADDRESS_SKAP BIT(4)
136#define EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT 2
137#define EQOS_MAC_MDIO_ADDRESS_GOC_READ 3
138#define EQOS_MAC_MDIO_ADDRESS_GOC_WRITE 1
139#define EQOS_MAC_MDIO_ADDRESS_C45E BIT(1)
140#define EQOS_MAC_MDIO_ADDRESS_GB BIT(0)
141
142#define EQOS_MAC_MDIO_DATA_GD_MASK 0xffff
143
144#define EQOS_MTL_REGS_BASE 0xd00
145struct eqos_mtl_regs {
146 uint32_t txq0_operation_mode; /* 0xd00 */
147 uint32_t unused_d04; /* 0xd04 */
148 uint32_t txq0_debug; /* 0xd08 */
149 uint32_t unused_d0c[(0xd18 - 0xd0c) / 4]; /* 0xd0c */
150 uint32_t txq0_quantum_weight; /* 0xd18 */
151 uint32_t unused_d1c[(0xd30 - 0xd1c) / 4]; /* 0xd1c */
152 uint32_t rxq0_operation_mode; /* 0xd30 */
153 uint32_t unused_d34; /* 0xd34 */
154 uint32_t rxq0_debug; /* 0xd38 */
155};
156
157#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT 16
158#define EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK 0x1ff
159#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT 2
160#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_MASK 3
161#define EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED 2
162#define EQOS_MTL_TXQ0_OPERATION_MODE_TSF BIT(1)
163#define EQOS_MTL_TXQ0_OPERATION_MODE_FTQ BIT(0)
164
165#define EQOS_MTL_TXQ0_DEBUG_TXQSTS BIT(4)
166#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT 1
167#define EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK 3
168
169#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT 20
170#define EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK 0x3ff
171#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT 14
172#define EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK 0x3f
173#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT 8
174#define EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK 0x3f
175#define EQOS_MTL_RXQ0_OPERATION_MODE_EHFC BIT(7)
176#define EQOS_MTL_RXQ0_OPERATION_MODE_RSF BIT(5)
177
178#define EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT 16
179#define EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK 0x7fff
180#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT 4
181#define EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK 3
182
183#define EQOS_DMA_REGS_BASE 0x1000
184struct eqos_dma_regs {
185 uint32_t mode; /* 0x1000 */
186 uint32_t sysbus_mode; /* 0x1004 */
187 uint32_t unused_1008[(0x1100 - 0x1008) / 4]; /* 0x1008 */
188 uint32_t ch0_control; /* 0x1100 */
189 uint32_t ch0_tx_control; /* 0x1104 */
190 uint32_t ch0_rx_control; /* 0x1108 */
191 uint32_t unused_110c; /* 0x110c */
192 uint32_t ch0_txdesc_list_haddress; /* 0x1110 */
193 uint32_t ch0_txdesc_list_address; /* 0x1114 */
194 uint32_t ch0_rxdesc_list_haddress; /* 0x1118 */
195 uint32_t ch0_rxdesc_list_address; /* 0x111c */
196 uint32_t ch0_txdesc_tail_pointer; /* 0x1120 */
197 uint32_t unused_1124; /* 0x1124 */
198 uint32_t ch0_rxdesc_tail_pointer; /* 0x1128 */
199 uint32_t ch0_txdesc_ring_length; /* 0x112c */
200 uint32_t ch0_rxdesc_ring_length; /* 0x1130 */
201};
202
203#define EQOS_DMA_MODE_SWR BIT(0)
204
205#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT 16
206#define EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_MASK 0xf
207#define EQOS_DMA_SYSBUS_MODE_EAME BIT(11)
208#define EQOS_DMA_SYSBUS_MODE_BLEN16 BIT(3)
209#define EQOS_DMA_SYSBUS_MODE_BLEN8 BIT(2)
210#define EQOS_DMA_SYSBUS_MODE_BLEN4 BIT(1)
211
Marek Vasut89077732021-01-07 11:12:16 +0100212#define EQOS_DMA_CH0_CONTROL_DSL_SHIFT 18
Stephen Warren50709602016-10-21 14:46:47 -0600213#define EQOS_DMA_CH0_CONTROL_PBLX8 BIT(16)
214
215#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT 16
216#define EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK 0x3f
217#define EQOS_DMA_CH0_TX_CONTROL_OSP BIT(4)
218#define EQOS_DMA_CH0_TX_CONTROL_ST BIT(0)
219
220#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT 16
221#define EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK 0x3f
222#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT 1
223#define EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK 0x3fff
224#define EQOS_DMA_CH0_RX_CONTROL_SR BIT(0)
225
226/* These registers are Tegra186-specific */
227#define EQOS_TEGRA186_REGS_BASE 0x8800
228struct eqos_tegra186_regs {
229 uint32_t sdmemcomppadctrl; /* 0x8800 */
230 uint32_t auto_cal_config; /* 0x8804 */
231 uint32_t unused_8808; /* 0x8808 */
232 uint32_t auto_cal_status; /* 0x880c */
233};
234
235#define EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
236
237#define EQOS_AUTO_CAL_CONFIG_START BIT(31)
238#define EQOS_AUTO_CAL_CONFIG_ENABLE BIT(29)
239
240#define EQOS_AUTO_CAL_STATUS_ACTIVE BIT(31)
241
242/* Descriptors */
Stephen Warren50709602016-10-21 14:46:47 -0600243#define EQOS_DESCRIPTORS_TX 4
244#define EQOS_DESCRIPTORS_RX 4
245#define EQOS_DESCRIPTORS_NUM (EQOS_DESCRIPTORS_TX + EQOS_DESCRIPTORS_RX)
Stephen Warren50709602016-10-21 14:46:47 -0600246#define EQOS_BUFFER_ALIGN ARCH_DMA_MINALIGN
247#define EQOS_MAX_PACKET_SIZE ALIGN(1568, ARCH_DMA_MINALIGN)
248#define EQOS_RX_BUFFER_SIZE (EQOS_DESCRIPTORS_RX * EQOS_MAX_PACKET_SIZE)
249
Stephen Warren50709602016-10-21 14:46:47 -0600250struct eqos_desc {
251 u32 des0;
252 u32 des1;
253 u32 des2;
254 u32 des3;
255};
256
257#define EQOS_DESC3_OWN BIT(31)
258#define EQOS_DESC3_FD BIT(29)
259#define EQOS_DESC3_LD BIT(28)
260#define EQOS_DESC3_BUF1V BIT(24)
261
Marek Vasut89077732021-01-07 11:12:16 +0100262#define EQOS_AXI_WIDTH_32 4
263#define EQOS_AXI_WIDTH_64 8
264#define EQOS_AXI_WIDTH_128 16
265
Stephen Warren50709602016-10-21 14:46:47 -0600266struct eqos_config {
267 bool reg_access_always_ok;
Christophe Roullier6beb7802019-05-17 15:08:44 +0200268 int mdio_wait;
269 int swr_wait;
270 int config_mac;
271 int config_mac_mdio;
Marek Vasut89077732021-01-07 11:12:16 +0100272 unsigned int axi_bus_width;
Christophe Roullier6beb7802019-05-17 15:08:44 +0200273 phy_interface_t (*interface)(struct udevice *dev);
274 struct eqos_ops *ops;
275};
276
277struct eqos_ops {
278 void (*eqos_inval_desc)(void *desc);
279 void (*eqos_flush_desc)(void *desc);
280 void (*eqos_inval_buffer)(void *buf, size_t size);
281 void (*eqos_flush_buffer)(void *buf, size_t size);
282 int (*eqos_probe_resources)(struct udevice *dev);
283 int (*eqos_remove_resources)(struct udevice *dev);
284 int (*eqos_stop_resets)(struct udevice *dev);
285 int (*eqos_start_resets)(struct udevice *dev);
Patrick Delaunay1bc6ce72021-07-20 20:09:56 +0200286 int (*eqos_stop_clks)(struct udevice *dev);
Christophe Roullier6beb7802019-05-17 15:08:44 +0200287 int (*eqos_start_clks)(struct udevice *dev);
288 int (*eqos_calibrate_pads)(struct udevice *dev);
289 int (*eqos_disable_calibration)(struct udevice *dev);
290 int (*eqos_set_tx_clk_speed)(struct udevice *dev);
291 ulong (*eqos_get_tick_clk_rate)(struct udevice *dev);
Stephen Warren50709602016-10-21 14:46:47 -0600292};
293
294struct eqos_priv {
295 struct udevice *dev;
296 const struct eqos_config *config;
297 fdt_addr_t regs;
298 struct eqos_mac_regs *mac_regs;
299 struct eqos_mtl_regs *mtl_regs;
300 struct eqos_dma_regs *dma_regs;
301 struct eqos_tegra186_regs *tegra186_regs;
302 struct reset_ctl reset_ctl;
303 struct gpio_desc phy_reset_gpio;
304 struct clk clk_master_bus;
305 struct clk clk_rx;
306 struct clk clk_ptp_ref;
307 struct clk clk_tx;
Christophe Roullier6beb7802019-05-17 15:08:44 +0200308 struct clk clk_ck;
Stephen Warren50709602016-10-21 14:46:47 -0600309 struct clk clk_slave_bus;
310 struct mii_dev *mii;
311 struct phy_device *phy;
Patrick Delaunay5c8db372020-03-18 10:50:16 +0100312 u32 max_speed;
Stephen Warren50709602016-10-21 14:46:47 -0600313 void *descs;
Stephen Warren50709602016-10-21 14:46:47 -0600314 int tx_desc_idx, rx_desc_idx;
Marek Vasut89077732021-01-07 11:12:16 +0100315 unsigned int desc_size;
Stephen Warren50709602016-10-21 14:46:47 -0600316 void *tx_dma_buf;
317 void *rx_dma_buf;
318 void *rx_pkt;
319 bool started;
320 bool reg_access_ok;
Daniil Stas81597922021-05-23 22:24:48 +0000321 bool clk_ck_enabled;
Stephen Warren50709602016-10-21 14:46:47 -0600322};
323
324/*
325 * TX and RX descriptors are 16 bytes. This causes problems with the cache
326 * maintenance on CPUs where the cache-line size exceeds the size of these
327 * descriptors. What will happen is that when the driver receives a packet
328 * it will be immediately requeued for the hardware to reuse. The CPU will
329 * therefore need to flush the cache-line containing the descriptor, which
330 * will cause all other descriptors in the same cache-line to be flushed
331 * along with it. If one of those descriptors had been written to by the
332 * device those changes (and the associated packet) will be lost.
333 *
334 * To work around this, we make use of non-cached memory if available. If
335 * descriptors are mapped uncached there's no need to manually flush them
336 * or invalidate them.
337 *
338 * Note that this only applies to descriptors. The packet data buffers do
339 * not have the same constraints since they are 1536 bytes large, so they
340 * are unlikely to share cache-lines.
341 */
Marek Vasut89077732021-01-07 11:12:16 +0100342static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
Stephen Warren50709602016-10-21 14:46:47 -0600343{
Marek Vasut89077732021-01-07 11:12:16 +0100344 eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
345 (unsigned int)ARCH_DMA_MINALIGN);
346
347 return memalign(eqos->desc_size, num * eqos->desc_size);
Stephen Warren50709602016-10-21 14:46:47 -0600348}
349
350static void eqos_free_descs(void *descs)
351{
Stephen Warren50709602016-10-21 14:46:47 -0600352 free(descs);
Stephen Warren50709602016-10-21 14:46:47 -0600353}
354
Marek Vasut89077732021-01-07 11:12:16 +0100355static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
356 unsigned int num, bool rx)
Stephen Warren50709602016-10-21 14:46:47 -0600357{
Marek Vasut89077732021-01-07 11:12:16 +0100358 return eqos->descs +
359 ((rx ? EQOS_DESCRIPTORS_TX : 0) + num) * eqos->desc_size;
Stephen Warren50709602016-10-21 14:46:47 -0600360}
361
Fugang Duan37aae5f2020-05-03 22:41:17 +0800362static void eqos_inval_desc_generic(void *desc)
Stephen Warren50709602016-10-21 14:46:47 -0600363{
Marek Vasut89077732021-01-07 11:12:16 +0100364 unsigned long start = (unsigned long)desc;
365 unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
366 ARCH_DMA_MINALIGN);
Christophe Roullier6beb7802019-05-17 15:08:44 +0200367
368 invalidate_dcache_range(start, end);
Stephen Warren50709602016-10-21 14:46:47 -0600369}
370
Fugang Duan37aae5f2020-05-03 22:41:17 +0800371static void eqos_flush_desc_generic(void *desc)
Christophe Roullier6beb7802019-05-17 15:08:44 +0200372{
Marek Vasut89077732021-01-07 11:12:16 +0100373 unsigned long start = (unsigned long)desc;
374 unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
375 ARCH_DMA_MINALIGN);
Christophe Roullier6beb7802019-05-17 15:08:44 +0200376
377 flush_dcache_range(start, end);
Christophe Roullier6beb7802019-05-17 15:08:44 +0200378}
379
380static void eqos_inval_buffer_tegra186(void *buf, size_t size)
Stephen Warren50709602016-10-21 14:46:47 -0600381{
382 unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1);
383 unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN);
384
385 invalidate_dcache_range(start, end);
386}
387
Fugang Duan37aae5f2020-05-03 22:41:17 +0800388static void eqos_inval_buffer_generic(void *buf, size_t size)
Christophe Roullier6beb7802019-05-17 15:08:44 +0200389{
390 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
391 unsigned long end = roundup((unsigned long)buf + size,
392 ARCH_DMA_MINALIGN);
393
394 invalidate_dcache_range(start, end);
395}
396
397static void eqos_flush_buffer_tegra186(void *buf, size_t size)
Stephen Warren50709602016-10-21 14:46:47 -0600398{
399 flush_cache((unsigned long)buf, size);
400}
401
Fugang Duan37aae5f2020-05-03 22:41:17 +0800402static void eqos_flush_buffer_generic(void *buf, size_t size)
Christophe Roullier6beb7802019-05-17 15:08:44 +0200403{
404 unsigned long start = rounddown((unsigned long)buf, ARCH_DMA_MINALIGN);
405 unsigned long end = roundup((unsigned long)buf + size,
406 ARCH_DMA_MINALIGN);
407
408 flush_dcache_range(start, end);
409}
410
Stephen Warren50709602016-10-21 14:46:47 -0600411static int eqos_mdio_wait_idle(struct eqos_priv *eqos)
412{
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100413 return wait_for_bit_le32(&eqos->mac_regs->mdio_address,
414 EQOS_MAC_MDIO_ADDRESS_GB, false,
415 1000000, true);
Stephen Warren50709602016-10-21 14:46:47 -0600416}
417
418static int eqos_mdio_read(struct mii_dev *bus, int mdio_addr, int mdio_devad,
419 int mdio_reg)
420{
421 struct eqos_priv *eqos = bus->priv;
422 u32 val;
423 int ret;
424
425 debug("%s(dev=%p, addr=%x, reg=%d):\n", __func__, eqos->dev, mdio_addr,
426 mdio_reg);
427
428 ret = eqos_mdio_wait_idle(eqos);
429 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900430 pr_err("MDIO not idle at entry");
Stephen Warren50709602016-10-21 14:46:47 -0600431 return ret;
432 }
433
434 val = readl(&eqos->mac_regs->mdio_address);
435 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
436 EQOS_MAC_MDIO_ADDRESS_C45E;
437 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
438 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
Christophe Roullier6beb7802019-05-17 15:08:44 +0200439 (eqos->config->config_mac_mdio <<
Stephen Warren50709602016-10-21 14:46:47 -0600440 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
441 (EQOS_MAC_MDIO_ADDRESS_GOC_READ <<
442 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
443 EQOS_MAC_MDIO_ADDRESS_GB;
444 writel(val, &eqos->mac_regs->mdio_address);
445
Christophe Roullier6beb7802019-05-17 15:08:44 +0200446 udelay(eqos->config->mdio_wait);
Stephen Warren50709602016-10-21 14:46:47 -0600447
448 ret = eqos_mdio_wait_idle(eqos);
449 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900450 pr_err("MDIO read didn't complete");
Stephen Warren50709602016-10-21 14:46:47 -0600451 return ret;
452 }
453
454 val = readl(&eqos->mac_regs->mdio_data);
455 val &= EQOS_MAC_MDIO_DATA_GD_MASK;
456
457 debug("%s: val=%x\n", __func__, val);
458
459 return val;
460}
461
462static int eqos_mdio_write(struct mii_dev *bus, int mdio_addr, int mdio_devad,
463 int mdio_reg, u16 mdio_val)
464{
465 struct eqos_priv *eqos = bus->priv;
466 u32 val;
467 int ret;
468
469 debug("%s(dev=%p, addr=%x, reg=%d, val=%x):\n", __func__, eqos->dev,
470 mdio_addr, mdio_reg, mdio_val);
471
472 ret = eqos_mdio_wait_idle(eqos);
473 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900474 pr_err("MDIO not idle at entry");
Stephen Warren50709602016-10-21 14:46:47 -0600475 return ret;
476 }
477
478 writel(mdio_val, &eqos->mac_regs->mdio_data);
479
480 val = readl(&eqos->mac_regs->mdio_address);
481 val &= EQOS_MAC_MDIO_ADDRESS_SKAP |
482 EQOS_MAC_MDIO_ADDRESS_C45E;
483 val |= (mdio_addr << EQOS_MAC_MDIO_ADDRESS_PA_SHIFT) |
484 (mdio_reg << EQOS_MAC_MDIO_ADDRESS_RDA_SHIFT) |
Christophe Roullier6beb7802019-05-17 15:08:44 +0200485 (eqos->config->config_mac_mdio <<
Stephen Warren50709602016-10-21 14:46:47 -0600486 EQOS_MAC_MDIO_ADDRESS_CR_SHIFT) |
487 (EQOS_MAC_MDIO_ADDRESS_GOC_WRITE <<
488 EQOS_MAC_MDIO_ADDRESS_GOC_SHIFT) |
489 EQOS_MAC_MDIO_ADDRESS_GB;
490 writel(val, &eqos->mac_regs->mdio_address);
491
Christophe Roullier6beb7802019-05-17 15:08:44 +0200492 udelay(eqos->config->mdio_wait);
Stephen Warren50709602016-10-21 14:46:47 -0600493
494 ret = eqos_mdio_wait_idle(eqos);
495 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900496 pr_err("MDIO read didn't complete");
Stephen Warren50709602016-10-21 14:46:47 -0600497 return ret;
498 }
499
500 return 0;
501}
502
503static int eqos_start_clks_tegra186(struct udevice *dev)
504{
Fugang Duan37aae5f2020-05-03 22:41:17 +0800505#ifdef CONFIG_CLK
Stephen Warren50709602016-10-21 14:46:47 -0600506 struct eqos_priv *eqos = dev_get_priv(dev);
507 int ret;
508
509 debug("%s(dev=%p):\n", __func__, dev);
510
511 ret = clk_enable(&eqos->clk_slave_bus);
512 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900513 pr_err("clk_enable(clk_slave_bus) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600514 goto err;
515 }
516
517 ret = clk_enable(&eqos->clk_master_bus);
518 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900519 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600520 goto err_disable_clk_slave_bus;
521 }
522
523 ret = clk_enable(&eqos->clk_rx);
524 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900525 pr_err("clk_enable(clk_rx) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600526 goto err_disable_clk_master_bus;
527 }
528
529 ret = clk_enable(&eqos->clk_ptp_ref);
530 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900531 pr_err("clk_enable(clk_ptp_ref) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600532 goto err_disable_clk_rx;
533 }
534
535 ret = clk_set_rate(&eqos->clk_ptp_ref, 125 * 1000 * 1000);
536 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900537 pr_err("clk_set_rate(clk_ptp_ref) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600538 goto err_disable_clk_ptp_ref;
539 }
540
541 ret = clk_enable(&eqos->clk_tx);
542 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900543 pr_err("clk_enable(clk_tx) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600544 goto err_disable_clk_ptp_ref;
545 }
Fugang Duan37aae5f2020-05-03 22:41:17 +0800546#endif
Stephen Warren50709602016-10-21 14:46:47 -0600547
548 debug("%s: OK\n", __func__);
549 return 0;
550
Fugang Duan37aae5f2020-05-03 22:41:17 +0800551#ifdef CONFIG_CLK
Stephen Warren50709602016-10-21 14:46:47 -0600552err_disable_clk_ptp_ref:
553 clk_disable(&eqos->clk_ptp_ref);
554err_disable_clk_rx:
555 clk_disable(&eqos->clk_rx);
556err_disable_clk_master_bus:
557 clk_disable(&eqos->clk_master_bus);
558err_disable_clk_slave_bus:
559 clk_disable(&eqos->clk_slave_bus);
560err:
561 debug("%s: FAILED: %d\n", __func__, ret);
562 return ret;
Fugang Duan37aae5f2020-05-03 22:41:17 +0800563#endif
Stephen Warren50709602016-10-21 14:46:47 -0600564}
565
Christophe Roullier6beb7802019-05-17 15:08:44 +0200566static int eqos_start_clks_stm32(struct udevice *dev)
567{
Fugang Duan37aae5f2020-05-03 22:41:17 +0800568#ifdef CONFIG_CLK
Christophe Roullier6beb7802019-05-17 15:08:44 +0200569 struct eqos_priv *eqos = dev_get_priv(dev);
570 int ret;
571
572 debug("%s(dev=%p):\n", __func__, dev);
573
574 ret = clk_enable(&eqos->clk_master_bus);
575 if (ret < 0) {
576 pr_err("clk_enable(clk_master_bus) failed: %d", ret);
577 goto err;
578 }
579
580 ret = clk_enable(&eqos->clk_rx);
581 if (ret < 0) {
582 pr_err("clk_enable(clk_rx) failed: %d", ret);
583 goto err_disable_clk_master_bus;
584 }
585
586 ret = clk_enable(&eqos->clk_tx);
587 if (ret < 0) {
588 pr_err("clk_enable(clk_tx) failed: %d", ret);
589 goto err_disable_clk_rx;
590 }
591
Daniil Stas81597922021-05-23 22:24:48 +0000592 if (clk_valid(&eqos->clk_ck) && !eqos->clk_ck_enabled) {
Christophe Roullier6beb7802019-05-17 15:08:44 +0200593 ret = clk_enable(&eqos->clk_ck);
594 if (ret < 0) {
595 pr_err("clk_enable(clk_ck) failed: %d", ret);
596 goto err_disable_clk_tx;
597 }
Daniil Stas81597922021-05-23 22:24:48 +0000598 eqos->clk_ck_enabled = true;
Christophe Roullier6beb7802019-05-17 15:08:44 +0200599 }
Fugang Duan37aae5f2020-05-03 22:41:17 +0800600#endif
Christophe Roullier6beb7802019-05-17 15:08:44 +0200601
602 debug("%s: OK\n", __func__);
603 return 0;
604
Fugang Duan37aae5f2020-05-03 22:41:17 +0800605#ifdef CONFIG_CLK
Christophe Roullier6beb7802019-05-17 15:08:44 +0200606err_disable_clk_tx:
607 clk_disable(&eqos->clk_tx);
608err_disable_clk_rx:
609 clk_disable(&eqos->clk_rx);
610err_disable_clk_master_bus:
611 clk_disable(&eqos->clk_master_bus);
612err:
613 debug("%s: FAILED: %d\n", __func__, ret);
614 return ret;
Fugang Duan37aae5f2020-05-03 22:41:17 +0800615#endif
Christophe Roullier6beb7802019-05-17 15:08:44 +0200616}
617
Patrick Delaunay1bc6ce72021-07-20 20:09:56 +0200618static int eqos_stop_clks_tegra186(struct udevice *dev)
Stephen Warren50709602016-10-21 14:46:47 -0600619{
Fugang Duan37aae5f2020-05-03 22:41:17 +0800620#ifdef CONFIG_CLK
Stephen Warren50709602016-10-21 14:46:47 -0600621 struct eqos_priv *eqos = dev_get_priv(dev);
622
623 debug("%s(dev=%p):\n", __func__, dev);
624
625 clk_disable(&eqos->clk_tx);
626 clk_disable(&eqos->clk_ptp_ref);
627 clk_disable(&eqos->clk_rx);
628 clk_disable(&eqos->clk_master_bus);
629 clk_disable(&eqos->clk_slave_bus);
Fugang Duan37aae5f2020-05-03 22:41:17 +0800630#endif
Stephen Warren50709602016-10-21 14:46:47 -0600631
632 debug("%s: OK\n", __func__);
Patrick Delaunay1bc6ce72021-07-20 20:09:56 +0200633 return 0;
Stephen Warren50709602016-10-21 14:46:47 -0600634}
635
Patrick Delaunay1bc6ce72021-07-20 20:09:56 +0200636static int eqos_stop_clks_stm32(struct udevice *dev)
Christophe Roullier6beb7802019-05-17 15:08:44 +0200637{
Fugang Duan37aae5f2020-05-03 22:41:17 +0800638#ifdef CONFIG_CLK
Christophe Roullier6beb7802019-05-17 15:08:44 +0200639 struct eqos_priv *eqos = dev_get_priv(dev);
640
641 debug("%s(dev=%p):\n", __func__, dev);
642
643 clk_disable(&eqos->clk_tx);
644 clk_disable(&eqos->clk_rx);
645 clk_disable(&eqos->clk_master_bus);
Fugang Duan37aae5f2020-05-03 22:41:17 +0800646#endif
Christophe Roullier6beb7802019-05-17 15:08:44 +0200647
648 debug("%s: OK\n", __func__);
Patrick Delaunay1bc6ce72021-07-20 20:09:56 +0200649 return 0;
Fugang Duan37aae5f2020-05-03 22:41:17 +0800650}
651
Stephen Warren50709602016-10-21 14:46:47 -0600652static int eqos_start_resets_tegra186(struct udevice *dev)
653{
654 struct eqos_priv *eqos = dev_get_priv(dev);
655 int ret;
656
657 debug("%s(dev=%p):\n", __func__, dev);
658
659 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
660 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900661 pr_err("dm_gpio_set_value(phy_reset, assert) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600662 return ret;
663 }
664
665 udelay(2);
666
667 ret = dm_gpio_set_value(&eqos->phy_reset_gpio, 0);
668 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900669 pr_err("dm_gpio_set_value(phy_reset, deassert) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600670 return ret;
671 }
672
673 ret = reset_assert(&eqos->reset_ctl);
674 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900675 pr_err("reset_assert() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600676 return ret;
677 }
678
679 udelay(2);
680
681 ret = reset_deassert(&eqos->reset_ctl);
682 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900683 pr_err("reset_deassert() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600684 return ret;
685 }
686
687 debug("%s: OK\n", __func__);
688 return 0;
689}
690
691static int eqos_stop_resets_tegra186(struct udevice *dev)
692{
693 struct eqos_priv *eqos = dev_get_priv(dev);
694
695 reset_assert(&eqos->reset_ctl);
696 dm_gpio_set_value(&eqos->phy_reset_gpio, 1);
697
Christophe Roullier6beb7802019-05-17 15:08:44 +0200698 return 0;
699}
700
Stephen Warren50709602016-10-21 14:46:47 -0600701static int eqos_calibrate_pads_tegra186(struct udevice *dev)
702{
703 struct eqos_priv *eqos = dev_get_priv(dev);
704 int ret;
705
706 debug("%s(dev=%p):\n", __func__, dev);
707
708 setbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
709 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
710
711 udelay(1);
712
713 setbits_le32(&eqos->tegra186_regs->auto_cal_config,
714 EQOS_AUTO_CAL_CONFIG_START | EQOS_AUTO_CAL_CONFIG_ENABLE);
715
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100716 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
717 EQOS_AUTO_CAL_STATUS_ACTIVE, true, 10, false);
Stephen Warren50709602016-10-21 14:46:47 -0600718 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900719 pr_err("calibrate didn't start");
Stephen Warren50709602016-10-21 14:46:47 -0600720 goto failed;
721 }
722
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +0100723 ret = wait_for_bit_le32(&eqos->tegra186_regs->auto_cal_status,
724 EQOS_AUTO_CAL_STATUS_ACTIVE, false, 10, false);
Stephen Warren50709602016-10-21 14:46:47 -0600725 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900726 pr_err("calibrate didn't finish");
Stephen Warren50709602016-10-21 14:46:47 -0600727 goto failed;
728 }
729
730 ret = 0;
731
732failed:
733 clrbits_le32(&eqos->tegra186_regs->sdmemcomppadctrl,
734 EQOS_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD);
735
736 debug("%s: returns %d\n", __func__, ret);
737
738 return ret;
739}
740
741static int eqos_disable_calibration_tegra186(struct udevice *dev)
742{
743 struct eqos_priv *eqos = dev_get_priv(dev);
744
745 debug("%s(dev=%p):\n", __func__, dev);
746
747 clrbits_le32(&eqos->tegra186_regs->auto_cal_config,
748 EQOS_AUTO_CAL_CONFIG_ENABLE);
749
750 return 0;
751}
752
753static ulong eqos_get_tick_clk_rate_tegra186(struct udevice *dev)
754{
Fugang Duan37aae5f2020-05-03 22:41:17 +0800755#ifdef CONFIG_CLK
Stephen Warren50709602016-10-21 14:46:47 -0600756 struct eqos_priv *eqos = dev_get_priv(dev);
757
758 return clk_get_rate(&eqos->clk_slave_bus);
Fugang Duan37aae5f2020-05-03 22:41:17 +0800759#else
760 return 0;
761#endif
Stephen Warren50709602016-10-21 14:46:47 -0600762}
763
Christophe Roullier6beb7802019-05-17 15:08:44 +0200764static ulong eqos_get_tick_clk_rate_stm32(struct udevice *dev)
765{
Fugang Duan37aae5f2020-05-03 22:41:17 +0800766#ifdef CONFIG_CLK
Christophe Roullier6beb7802019-05-17 15:08:44 +0200767 struct eqos_priv *eqos = dev_get_priv(dev);
768
769 return clk_get_rate(&eqos->clk_master_bus);
Fugang Duan37aae5f2020-05-03 22:41:17 +0800770#else
771 return 0;
772#endif
773}
774
Fugang Duandd455e62020-05-03 22:41:18 +0800775__weak u32 imx_get_eqos_csr_clk(void)
Fugang Duan37aae5f2020-05-03 22:41:17 +0800776{
Fugang Duan37aae5f2020-05-03 22:41:17 +0800777 return 100 * 1000000;
Christophe Roullier6beb7802019-05-17 15:08:44 +0200778}
Fugang Duandd455e62020-05-03 22:41:18 +0800779__weak int imx_eqos_txclk_set_rate(unsigned long rate)
780{
781 return 0;
782}
783
784static ulong eqos_get_tick_clk_rate_imx(struct udevice *dev)
785{
786 return imx_get_eqos_csr_clk();
787}
Christophe Roullier6beb7802019-05-17 15:08:44 +0200788
Stephen Warren50709602016-10-21 14:46:47 -0600789static int eqos_set_full_duplex(struct udevice *dev)
790{
791 struct eqos_priv *eqos = dev_get_priv(dev);
792
793 debug("%s(dev=%p):\n", __func__, dev);
794
795 setbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
796
797 return 0;
798}
799
800static int eqos_set_half_duplex(struct udevice *dev)
801{
802 struct eqos_priv *eqos = dev_get_priv(dev);
803
804 debug("%s(dev=%p):\n", __func__, dev);
805
806 clrbits_le32(&eqos->mac_regs->configuration, EQOS_MAC_CONFIGURATION_DM);
807
808 /* WAR: Flush TX queue when switching to half-duplex */
809 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
810 EQOS_MTL_TXQ0_OPERATION_MODE_FTQ);
811
812 return 0;
813}
814
815static int eqos_set_gmii_speed(struct udevice *dev)
816{
817 struct eqos_priv *eqos = dev_get_priv(dev);
818
819 debug("%s(dev=%p):\n", __func__, dev);
820
821 clrbits_le32(&eqos->mac_regs->configuration,
822 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
823
824 return 0;
825}
826
827static int eqos_set_mii_speed_100(struct udevice *dev)
828{
829 struct eqos_priv *eqos = dev_get_priv(dev);
830
831 debug("%s(dev=%p):\n", __func__, dev);
832
833 setbits_le32(&eqos->mac_regs->configuration,
834 EQOS_MAC_CONFIGURATION_PS | EQOS_MAC_CONFIGURATION_FES);
835
836 return 0;
837}
838
839static int eqos_set_mii_speed_10(struct udevice *dev)
840{
841 struct eqos_priv *eqos = dev_get_priv(dev);
842
843 debug("%s(dev=%p):\n", __func__, dev);
844
845 clrsetbits_le32(&eqos->mac_regs->configuration,
846 EQOS_MAC_CONFIGURATION_FES, EQOS_MAC_CONFIGURATION_PS);
847
848 return 0;
849}
850
851static int eqos_set_tx_clk_speed_tegra186(struct udevice *dev)
852{
Fugang Duan37aae5f2020-05-03 22:41:17 +0800853#ifdef CONFIG_CLK
Stephen Warren50709602016-10-21 14:46:47 -0600854 struct eqos_priv *eqos = dev_get_priv(dev);
855 ulong rate;
856 int ret;
857
858 debug("%s(dev=%p):\n", __func__, dev);
859
860 switch (eqos->phy->speed) {
861 case SPEED_1000:
862 rate = 125 * 1000 * 1000;
863 break;
864 case SPEED_100:
865 rate = 25 * 1000 * 1000;
866 break;
867 case SPEED_10:
868 rate = 2.5 * 1000 * 1000;
869 break;
870 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900871 pr_err("invalid speed %d", eqos->phy->speed);
Stephen Warren50709602016-10-21 14:46:47 -0600872 return -EINVAL;
873 }
874
875 ret = clk_set_rate(&eqos->clk_tx, rate);
876 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900877 pr_err("clk_set_rate(tx_clk, %lu) failed: %d", rate, ret);
Stephen Warren50709602016-10-21 14:46:47 -0600878 return ret;
879 }
Fugang Duan37aae5f2020-05-03 22:41:17 +0800880#endif
Stephen Warren50709602016-10-21 14:46:47 -0600881
882 return 0;
883}
884
Fugang Duan37aae5f2020-05-03 22:41:17 +0800885static int eqos_set_tx_clk_speed_imx(struct udevice *dev)
886{
Fugang Duandd455e62020-05-03 22:41:18 +0800887 struct eqos_priv *eqos = dev_get_priv(dev);
888 ulong rate;
889 int ret;
890
891 debug("%s(dev=%p):\n", __func__, dev);
892
893 switch (eqos->phy->speed) {
894 case SPEED_1000:
895 rate = 125 * 1000 * 1000;
896 break;
897 case SPEED_100:
898 rate = 25 * 1000 * 1000;
899 break;
900 case SPEED_10:
901 rate = 2.5 * 1000 * 1000;
902 break;
903 default:
904 pr_err("invalid speed %d", eqos->phy->speed);
905 return -EINVAL;
906 }
907
908 ret = imx_eqos_txclk_set_rate(rate);
909 if (ret < 0) {
910 pr_err("imx (tx_clk, %lu) failed: %d", rate, ret);
911 return ret;
912 }
913
Fugang Duan37aae5f2020-05-03 22:41:17 +0800914 return 0;
915}
916
Stephen Warren50709602016-10-21 14:46:47 -0600917static int eqos_adjust_link(struct udevice *dev)
918{
919 struct eqos_priv *eqos = dev_get_priv(dev);
920 int ret;
921 bool en_calibration;
922
923 debug("%s(dev=%p):\n", __func__, dev);
924
925 if (eqos->phy->duplex)
926 ret = eqos_set_full_duplex(dev);
927 else
928 ret = eqos_set_half_duplex(dev);
929 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900930 pr_err("eqos_set_*_duplex() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600931 return ret;
932 }
933
934 switch (eqos->phy->speed) {
935 case SPEED_1000:
936 en_calibration = true;
937 ret = eqos_set_gmii_speed(dev);
938 break;
939 case SPEED_100:
940 en_calibration = true;
941 ret = eqos_set_mii_speed_100(dev);
942 break;
943 case SPEED_10:
944 en_calibration = false;
945 ret = eqos_set_mii_speed_10(dev);
946 break;
947 default:
Masahiro Yamada81e10422017-09-16 14:10:41 +0900948 pr_err("invalid speed %d", eqos->phy->speed);
Stephen Warren50709602016-10-21 14:46:47 -0600949 return -EINVAL;
950 }
951 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +0900952 pr_err("eqos_set_*mii_speed*() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600953 return ret;
954 }
955
956 if (en_calibration) {
Christophe Roullier6beb7802019-05-17 15:08:44 +0200957 ret = eqos->config->ops->eqos_calibrate_pads(dev);
Stephen Warren50709602016-10-21 14:46:47 -0600958 if (ret < 0) {
Christophe Roullier6beb7802019-05-17 15:08:44 +0200959 pr_err("eqos_calibrate_pads() failed: %d",
960 ret);
Stephen Warren50709602016-10-21 14:46:47 -0600961 return ret;
962 }
963 } else {
Christophe Roullier6beb7802019-05-17 15:08:44 +0200964 ret = eqos->config->ops->eqos_disable_calibration(dev);
Stephen Warren50709602016-10-21 14:46:47 -0600965 if (ret < 0) {
Christophe Roullier6beb7802019-05-17 15:08:44 +0200966 pr_err("eqos_disable_calibration() failed: %d",
967 ret);
Stephen Warren50709602016-10-21 14:46:47 -0600968 return ret;
969 }
970 }
Christophe Roullier6beb7802019-05-17 15:08:44 +0200971 ret = eqos->config->ops->eqos_set_tx_clk_speed(dev);
Stephen Warren50709602016-10-21 14:46:47 -0600972 if (ret < 0) {
Christophe Roullier6beb7802019-05-17 15:08:44 +0200973 pr_err("eqos_set_tx_clk_speed() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -0600974 return ret;
975 }
976
977 return 0;
978}
979
980static int eqos_write_hwaddr(struct udevice *dev)
981{
Simon Glassfa20e932020-12-03 16:55:20 -0700982 struct eth_pdata *plat = dev_get_plat(dev);
Stephen Warren50709602016-10-21 14:46:47 -0600983 struct eqos_priv *eqos = dev_get_priv(dev);
984 uint32_t val;
985
986 /*
987 * This function may be called before start() or after stop(). At that
988 * time, on at least some configurations of the EQoS HW, all clocks to
989 * the EQoS HW block will be stopped, and a reset signal applied. If
990 * any register access is attempted in this state, bus timeouts or CPU
991 * hangs may occur. This check prevents that.
992 *
993 * A simple solution to this problem would be to not implement
994 * write_hwaddr(), since start() always writes the MAC address into HW
995 * anyway. However, it is desirable to implement write_hwaddr() to
996 * support the case of SW that runs subsequent to U-Boot which expects
997 * the MAC address to already be programmed into the EQoS registers,
998 * which must happen irrespective of whether the U-Boot user (or
999 * scripts) actually made use of the EQoS device, and hence
1000 * irrespective of whether start() was ever called.
1001 *
1002 * Note that this requirement by subsequent SW is not valid for
1003 * Tegra186, and is likely not valid for any non-PCI instantiation of
1004 * the EQoS HW block. This function is implemented solely as
1005 * future-proofing with the expectation the driver will eventually be
1006 * ported to some system where the expectation above is true.
1007 */
1008 if (!eqos->config->reg_access_always_ok && !eqos->reg_access_ok)
1009 return 0;
1010
1011 /* Update the MAC address */
1012 val = (plat->enetaddr[5] << 8) |
1013 (plat->enetaddr[4]);
1014 writel(val, &eqos->mac_regs->address0_high);
1015 val = (plat->enetaddr[3] << 24) |
1016 (plat->enetaddr[2] << 16) |
1017 (plat->enetaddr[1] << 8) |
1018 (plat->enetaddr[0]);
1019 writel(val, &eqos->mac_regs->address0_low);
1020
1021 return 0;
1022}
1023
Ye Li3fb1a0e2020-05-03 22:41:20 +08001024static int eqos_read_rom_hwaddr(struct udevice *dev)
1025{
Simon Glassfa20e932020-12-03 16:55:20 -07001026 struct eth_pdata *pdata = dev_get_plat(dev);
Ye Li3fb1a0e2020-05-03 22:41:20 +08001027
1028#ifdef CONFIG_ARCH_IMX8M
Simon Glass3e14a222020-12-16 21:20:16 -07001029 imx_get_mac_from_fuse(dev_seq(dev), pdata->enetaddr);
Ye Li3fb1a0e2020-05-03 22:41:20 +08001030#endif
1031 return !is_valid_ethaddr(pdata->enetaddr);
1032}
1033
Stephen Warren50709602016-10-21 14:46:47 -06001034static int eqos_start(struct udevice *dev)
1035{
1036 struct eqos_priv *eqos = dev_get_priv(dev);
1037 int ret, i;
1038 ulong rate;
1039 u32 val, tx_fifo_sz, rx_fifo_sz, tqs, rqs, pbl;
1040 ulong last_rx_desc;
Marek Vasut89077732021-01-07 11:12:16 +01001041 ulong desc_pad;
Stephen Warren50709602016-10-21 14:46:47 -06001042
1043 debug("%s(dev=%p):\n", __func__, dev);
1044
1045 eqos->tx_desc_idx = 0;
1046 eqos->rx_desc_idx = 0;
1047
Christophe Roullier6beb7802019-05-17 15:08:44 +02001048 ret = eqos->config->ops->eqos_start_clks(dev);
Stephen Warren50709602016-10-21 14:46:47 -06001049 if (ret < 0) {
Christophe Roullier6beb7802019-05-17 15:08:44 +02001050 pr_err("eqos_start_clks() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001051 goto err;
1052 }
1053
Christophe Roullier6beb7802019-05-17 15:08:44 +02001054 ret = eqos->config->ops->eqos_start_resets(dev);
Stephen Warren50709602016-10-21 14:46:47 -06001055 if (ret < 0) {
Christophe Roullier6beb7802019-05-17 15:08:44 +02001056 pr_err("eqos_start_resets() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001057 goto err_stop_clks;
1058 }
1059
1060 udelay(10);
1061
1062 eqos->reg_access_ok = true;
1063
Álvaro Fernández Rojas918de032018-01-23 17:14:55 +01001064 ret = wait_for_bit_le32(&eqos->dma_regs->mode,
Christophe Roullier6beb7802019-05-17 15:08:44 +02001065 EQOS_DMA_MODE_SWR, false,
1066 eqos->config->swr_wait, false);
Stephen Warren50709602016-10-21 14:46:47 -06001067 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001068 pr_err("EQOS_DMA_MODE_SWR stuck");
Stephen Warren50709602016-10-21 14:46:47 -06001069 goto err_stop_resets;
1070 }
1071
Christophe Roullier6beb7802019-05-17 15:08:44 +02001072 ret = eqos->config->ops->eqos_calibrate_pads(dev);
Stephen Warren50709602016-10-21 14:46:47 -06001073 if (ret < 0) {
Christophe Roullier6beb7802019-05-17 15:08:44 +02001074 pr_err("eqos_calibrate_pads() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001075 goto err_stop_resets;
1076 }
Christophe Roullier6beb7802019-05-17 15:08:44 +02001077 rate = eqos->config->ops->eqos_get_tick_clk_rate(dev);
Stephen Warren50709602016-10-21 14:46:47 -06001078
Stephen Warren50709602016-10-21 14:46:47 -06001079 val = (rate / 1000000) - 1;
1080 writel(val, &eqos->mac_regs->us_tic_counter);
1081
Christophe Roullier6beb7802019-05-17 15:08:44 +02001082 /*
1083 * if PHY was already connected and configured,
1084 * don't need to reconnect/reconfigure again
1085 */
Stephen Warren50709602016-10-21 14:46:47 -06001086 if (!eqos->phy) {
Ye Liad122b72020-05-03 22:41:15 +08001087 int addr = -1;
1088#ifdef CONFIG_DM_ETH_PHY
1089 addr = eth_phy_get_addr(dev);
1090#endif
1091#ifdef DWC_NET_PHYADDR
1092 addr = DWC_NET_PHYADDR;
1093#endif
1094 eqos->phy = phy_connect(eqos->mii, addr, dev,
Christophe Roullier6beb7802019-05-17 15:08:44 +02001095 eqos->config->interface(dev));
1096 if (!eqos->phy) {
1097 pr_err("phy_connect() failed");
1098 goto err_stop_resets;
1099 }
Patrick Delaunay5c8db372020-03-18 10:50:16 +01001100
1101 if (eqos->max_speed) {
1102 ret = phy_set_supported(eqos->phy, eqos->max_speed);
1103 if (ret) {
1104 pr_err("phy_set_supported() failed: %d", ret);
1105 goto err_shutdown_phy;
1106 }
1107 }
1108
Christophe Roullier6beb7802019-05-17 15:08:44 +02001109 ret = phy_config(eqos->phy);
1110 if (ret < 0) {
1111 pr_err("phy_config() failed: %d", ret);
1112 goto err_shutdown_phy;
1113 }
Stephen Warren50709602016-10-21 14:46:47 -06001114 }
Christophe Roullier6beb7802019-05-17 15:08:44 +02001115
Stephen Warren50709602016-10-21 14:46:47 -06001116 ret = phy_startup(eqos->phy);
1117 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001118 pr_err("phy_startup() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001119 goto err_shutdown_phy;
1120 }
1121
1122 if (!eqos->phy->link) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001123 pr_err("No link");
Stephen Warren50709602016-10-21 14:46:47 -06001124 goto err_shutdown_phy;
1125 }
1126
1127 ret = eqos_adjust_link(dev);
1128 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001129 pr_err("eqos_adjust_link() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001130 goto err_shutdown_phy;
1131 }
1132
1133 /* Configure MTL */
1134
1135 /* Enable Store and Forward mode for TX */
1136 /* Program Tx operating mode */
1137 setbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1138 EQOS_MTL_TXQ0_OPERATION_MODE_TSF |
1139 (EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_ENABLED <<
1140 EQOS_MTL_TXQ0_OPERATION_MODE_TXQEN_SHIFT));
1141
1142 /* Transmit Queue weight */
1143 writel(0x10, &eqos->mtl_regs->txq0_quantum_weight);
1144
1145 /* Enable Store and Forward mode for RX, since no jumbo frame */
1146 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
Daniil Stas470c06c2021-05-30 13:34:09 +00001147 EQOS_MTL_RXQ0_OPERATION_MODE_RSF);
Stephen Warren50709602016-10-21 14:46:47 -06001148
1149 /* Transmit/Receive queue fifo size; use all RAM for 1 queue */
1150 val = readl(&eqos->mac_regs->hw_feature1);
1151 tx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_SHIFT) &
1152 EQOS_MAC_HW_FEATURE1_TXFIFOSIZE_MASK;
1153 rx_fifo_sz = (val >> EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_SHIFT) &
1154 EQOS_MAC_HW_FEATURE1_RXFIFOSIZE_MASK;
1155
1156 /*
1157 * r/tx_fifo_sz is encoded as log2(n / 128). Undo that by shifting.
1158 * r/tqs is encoded as (n / 256) - 1.
1159 */
1160 tqs = (128 << tx_fifo_sz) / 256 - 1;
1161 rqs = (128 << rx_fifo_sz) / 256 - 1;
1162
1163 clrsetbits_le32(&eqos->mtl_regs->txq0_operation_mode,
1164 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_MASK <<
1165 EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT,
1166 tqs << EQOS_MTL_TXQ0_OPERATION_MODE_TQS_SHIFT);
1167 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1168 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_MASK <<
1169 EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT,
1170 rqs << EQOS_MTL_RXQ0_OPERATION_MODE_RQS_SHIFT);
1171
1172 /* Flow control used only if each channel gets 4KB or more FIFO */
1173 if (rqs >= ((4096 / 256) - 1)) {
1174 u32 rfd, rfa;
1175
1176 setbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1177 EQOS_MTL_RXQ0_OPERATION_MODE_EHFC);
1178
1179 /*
1180 * Set Threshold for Activating Flow Contol space for min 2
1181 * frames ie, (1500 * 1) = 1500 bytes.
1182 *
1183 * Set Threshold for Deactivating Flow Contol for space of
1184 * min 1 frame (frame size 1500bytes) in receive fifo
1185 */
1186 if (rqs == ((4096 / 256) - 1)) {
1187 /*
1188 * This violates the above formula because of FIFO size
1189 * limit therefore overflow may occur inspite of this.
1190 */
1191 rfd = 0x3; /* Full-3K */
1192 rfa = 0x1; /* Full-1.5K */
1193 } else if (rqs == ((8192 / 256) - 1)) {
1194 rfd = 0x6; /* Full-4K */
1195 rfa = 0xa; /* Full-6K */
1196 } else if (rqs == ((16384 / 256) - 1)) {
1197 rfd = 0x6; /* Full-4K */
1198 rfa = 0x12; /* Full-10K */
1199 } else {
1200 rfd = 0x6; /* Full-4K */
1201 rfa = 0x1E; /* Full-16K */
1202 }
1203
1204 clrsetbits_le32(&eqos->mtl_regs->rxq0_operation_mode,
1205 (EQOS_MTL_RXQ0_OPERATION_MODE_RFD_MASK <<
1206 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1207 (EQOS_MTL_RXQ0_OPERATION_MODE_RFA_MASK <<
1208 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT),
1209 (rfd <<
1210 EQOS_MTL_RXQ0_OPERATION_MODE_RFD_SHIFT) |
1211 (rfa <<
1212 EQOS_MTL_RXQ0_OPERATION_MODE_RFA_SHIFT));
1213 }
1214
1215 /* Configure MAC */
1216
1217 clrsetbits_le32(&eqos->mac_regs->rxq_ctrl0,
1218 EQOS_MAC_RXQ_CTRL0_RXQ0EN_MASK <<
1219 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT,
Christophe Roullier6beb7802019-05-17 15:08:44 +02001220 eqos->config->config_mac <<
Stephen Warren50709602016-10-21 14:46:47 -06001221 EQOS_MAC_RXQ_CTRL0_RXQ0EN_SHIFT);
1222
Fugang Duan37aae5f2020-05-03 22:41:17 +08001223 /* Multicast and Broadcast Queue Enable */
1224 setbits_le32(&eqos->mac_regs->unused_0a4,
1225 0x00100000);
1226 /* enable promise mode */
1227 setbits_le32(&eqos->mac_regs->unused_004[1],
1228 0x1);
1229
Stephen Warren50709602016-10-21 14:46:47 -06001230 /* Set TX flow control parameters */
1231 /* Set Pause Time */
1232 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1233 0xffff << EQOS_MAC_Q0_TX_FLOW_CTRL_PT_SHIFT);
1234 /* Assign priority for TX flow control */
1235 clrbits_le32(&eqos->mac_regs->txq_prty_map0,
1236 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_MASK <<
1237 EQOS_MAC_TXQ_PRTY_MAP0_PSTQ0_SHIFT);
1238 /* Assign priority for RX flow control */
1239 clrbits_le32(&eqos->mac_regs->rxq_ctrl2,
1240 EQOS_MAC_RXQ_CTRL2_PSRQ0_MASK <<
1241 EQOS_MAC_RXQ_CTRL2_PSRQ0_SHIFT);
1242 /* Enable flow control */
1243 setbits_le32(&eqos->mac_regs->q0_tx_flow_ctrl,
1244 EQOS_MAC_Q0_TX_FLOW_CTRL_TFE);
1245 setbits_le32(&eqos->mac_regs->rx_flow_ctrl,
1246 EQOS_MAC_RX_FLOW_CTRL_RFE);
1247
1248 clrsetbits_le32(&eqos->mac_regs->configuration,
1249 EQOS_MAC_CONFIGURATION_GPSLCE |
1250 EQOS_MAC_CONFIGURATION_WD |
1251 EQOS_MAC_CONFIGURATION_JD |
1252 EQOS_MAC_CONFIGURATION_JE,
1253 EQOS_MAC_CONFIGURATION_CST |
1254 EQOS_MAC_CONFIGURATION_ACS);
1255
1256 eqos_write_hwaddr(dev);
1257
1258 /* Configure DMA */
1259
1260 /* Enable OSP mode */
1261 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1262 EQOS_DMA_CH0_TX_CONTROL_OSP);
1263
1264 /* RX buffer size. Must be a multiple of bus width */
1265 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1266 EQOS_DMA_CH0_RX_CONTROL_RBSZ_MASK <<
1267 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT,
1268 EQOS_MAX_PACKET_SIZE <<
1269 EQOS_DMA_CH0_RX_CONTROL_RBSZ_SHIFT);
1270
Marek Vasut89077732021-01-07 11:12:16 +01001271 desc_pad = (eqos->desc_size - sizeof(struct eqos_desc)) /
1272 eqos->config->axi_bus_width;
1273
Stephen Warren50709602016-10-21 14:46:47 -06001274 setbits_le32(&eqos->dma_regs->ch0_control,
Marek Vasut89077732021-01-07 11:12:16 +01001275 EQOS_DMA_CH0_CONTROL_PBLX8 |
1276 (desc_pad << EQOS_DMA_CH0_CONTROL_DSL_SHIFT));
Stephen Warren50709602016-10-21 14:46:47 -06001277
1278 /*
1279 * Burst length must be < 1/2 FIFO size.
1280 * FIFO size in tqs is encoded as (n / 256) - 1.
1281 * Each burst is n * 8 (PBLX8) * 16 (AXI width) == 128 bytes.
1282 * Half of n * 256 is n * 128, so pbl == tqs, modulo the -1.
1283 */
1284 pbl = tqs + 1;
1285 if (pbl > 32)
1286 pbl = 32;
1287 clrsetbits_le32(&eqos->dma_regs->ch0_tx_control,
1288 EQOS_DMA_CH0_TX_CONTROL_TXPBL_MASK <<
1289 EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT,
1290 pbl << EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT);
1291
1292 clrsetbits_le32(&eqos->dma_regs->ch0_rx_control,
1293 EQOS_DMA_CH0_RX_CONTROL_RXPBL_MASK <<
1294 EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT,
1295 8 << EQOS_DMA_CH0_RX_CONTROL_RXPBL_SHIFT);
1296
1297 /* DMA performance configuration */
1298 val = (2 << EQOS_DMA_SYSBUS_MODE_RD_OSR_LMT_SHIFT) |
1299 EQOS_DMA_SYSBUS_MODE_EAME | EQOS_DMA_SYSBUS_MODE_BLEN16 |
1300 EQOS_DMA_SYSBUS_MODE_BLEN8 | EQOS_DMA_SYSBUS_MODE_BLEN4;
1301 writel(val, &eqos->dma_regs->sysbus_mode);
1302
1303 /* Set up descriptors */
1304
Marek Vasut89077732021-01-07 11:12:16 +01001305 memset(eqos->descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_NUM);
1306
1307 for (i = 0; i < EQOS_DESCRIPTORS_TX; i++) {
1308 struct eqos_desc *tx_desc = eqos_get_desc(eqos, i, false);
1309 eqos->config->ops->eqos_flush_desc(tx_desc);
1310 }
1311
Stephen Warren50709602016-10-21 14:46:47 -06001312 for (i = 0; i < EQOS_DESCRIPTORS_RX; i++) {
Marek Vasut89077732021-01-07 11:12:16 +01001313 struct eqos_desc *rx_desc = eqos_get_desc(eqos, i, true);
Stephen Warren50709602016-10-21 14:46:47 -06001314 rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
1315 (i * EQOS_MAX_PACKET_SIZE));
Marek Vasutd54c98e2020-03-23 02:02:57 +01001316 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
Fugang Duan37aae5f2020-05-03 22:41:17 +08001317 mb();
Marek Vasut873f8e42020-03-23 02:09:01 +01001318 eqos->config->ops->eqos_flush_desc(rx_desc);
Fugang Duan37aae5f2020-05-03 22:41:17 +08001319 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf +
1320 (i * EQOS_MAX_PACKET_SIZE),
1321 EQOS_MAX_PACKET_SIZE);
Stephen Warren50709602016-10-21 14:46:47 -06001322 }
Stephen Warren50709602016-10-21 14:46:47 -06001323
1324 writel(0, &eqos->dma_regs->ch0_txdesc_list_haddress);
Marek Vasut89077732021-01-07 11:12:16 +01001325 writel((ulong)eqos_get_desc(eqos, 0, false),
1326 &eqos->dma_regs->ch0_txdesc_list_address);
Stephen Warren50709602016-10-21 14:46:47 -06001327 writel(EQOS_DESCRIPTORS_TX - 1,
1328 &eqos->dma_regs->ch0_txdesc_ring_length);
1329
1330 writel(0, &eqos->dma_regs->ch0_rxdesc_list_haddress);
Marek Vasut89077732021-01-07 11:12:16 +01001331 writel((ulong)eqos_get_desc(eqos, 0, true),
1332 &eqos->dma_regs->ch0_rxdesc_list_address);
Stephen Warren50709602016-10-21 14:46:47 -06001333 writel(EQOS_DESCRIPTORS_RX - 1,
1334 &eqos->dma_regs->ch0_rxdesc_ring_length);
1335
1336 /* Enable everything */
Stephen Warren50709602016-10-21 14:46:47 -06001337 setbits_le32(&eqos->dma_regs->ch0_tx_control,
1338 EQOS_DMA_CH0_TX_CONTROL_ST);
1339 setbits_le32(&eqos->dma_regs->ch0_rx_control,
1340 EQOS_DMA_CH0_RX_CONTROL_SR);
Fugang Duan37aae5f2020-05-03 22:41:17 +08001341 setbits_le32(&eqos->mac_regs->configuration,
1342 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
Stephen Warren50709602016-10-21 14:46:47 -06001343
1344 /* TX tail pointer not written until we need to TX a packet */
1345 /*
1346 * Point RX tail pointer at last descriptor. Ideally, we'd point at the
1347 * first descriptor, implying all descriptors were available. However,
1348 * that's not distinguishable from none of the descriptors being
1349 * available.
1350 */
Marek Vasut89077732021-01-07 11:12:16 +01001351 last_rx_desc = (ulong)eqos_get_desc(eqos, EQOS_DESCRIPTORS_RX - 1, true);
Stephen Warren50709602016-10-21 14:46:47 -06001352 writel(last_rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1353
1354 eqos->started = true;
1355
1356 debug("%s: OK\n", __func__);
1357 return 0;
1358
1359err_shutdown_phy:
1360 phy_shutdown(eqos->phy);
Stephen Warren50709602016-10-21 14:46:47 -06001361err_stop_resets:
Christophe Roullier6beb7802019-05-17 15:08:44 +02001362 eqos->config->ops->eqos_stop_resets(dev);
Stephen Warren50709602016-10-21 14:46:47 -06001363err_stop_clks:
Christophe Roullier6beb7802019-05-17 15:08:44 +02001364 eqos->config->ops->eqos_stop_clks(dev);
Stephen Warren50709602016-10-21 14:46:47 -06001365err:
Masahiro Yamada81e10422017-09-16 14:10:41 +09001366 pr_err("FAILED: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001367 return ret;
1368}
1369
Patrick Delaunay6864a5992019-08-01 11:29:02 +02001370static void eqos_stop(struct udevice *dev)
Stephen Warren50709602016-10-21 14:46:47 -06001371{
1372 struct eqos_priv *eqos = dev_get_priv(dev);
1373 int i;
1374
1375 debug("%s(dev=%p):\n", __func__, dev);
1376
1377 if (!eqos->started)
1378 return;
1379 eqos->started = false;
1380 eqos->reg_access_ok = false;
1381
1382 /* Disable TX DMA */
1383 clrbits_le32(&eqos->dma_regs->ch0_tx_control,
1384 EQOS_DMA_CH0_TX_CONTROL_ST);
1385
1386 /* Wait for TX all packets to drain out of MTL */
1387 for (i = 0; i < 1000000; i++) {
1388 u32 val = readl(&eqos->mtl_regs->txq0_debug);
1389 u32 trcsts = (val >> EQOS_MTL_TXQ0_DEBUG_TRCSTS_SHIFT) &
1390 EQOS_MTL_TXQ0_DEBUG_TRCSTS_MASK;
1391 u32 txqsts = val & EQOS_MTL_TXQ0_DEBUG_TXQSTS;
1392 if ((trcsts != 1) && (!txqsts))
1393 break;
1394 }
1395
1396 /* Turn off MAC TX and RX */
1397 clrbits_le32(&eqos->mac_regs->configuration,
1398 EQOS_MAC_CONFIGURATION_TE | EQOS_MAC_CONFIGURATION_RE);
1399
1400 /* Wait for all RX packets to drain out of MTL */
1401 for (i = 0; i < 1000000; i++) {
1402 u32 val = readl(&eqos->mtl_regs->rxq0_debug);
1403 u32 prxq = (val >> EQOS_MTL_RXQ0_DEBUG_PRXQ_SHIFT) &
1404 EQOS_MTL_RXQ0_DEBUG_PRXQ_MASK;
1405 u32 rxqsts = (val >> EQOS_MTL_RXQ0_DEBUG_RXQSTS_SHIFT) &
1406 EQOS_MTL_RXQ0_DEBUG_RXQSTS_MASK;
1407 if ((!prxq) && (!rxqsts))
1408 break;
1409 }
1410
1411 /* Turn off RX DMA */
1412 clrbits_le32(&eqos->dma_regs->ch0_rx_control,
1413 EQOS_DMA_CH0_RX_CONTROL_SR);
1414
1415 if (eqos->phy) {
1416 phy_shutdown(eqos->phy);
Stephen Warren50709602016-10-21 14:46:47 -06001417 }
Christophe Roullier6beb7802019-05-17 15:08:44 +02001418 eqos->config->ops->eqos_stop_resets(dev);
1419 eqos->config->ops->eqos_stop_clks(dev);
Stephen Warren50709602016-10-21 14:46:47 -06001420
1421 debug("%s: OK\n", __func__);
1422}
1423
Patrick Delaunay6864a5992019-08-01 11:29:02 +02001424static int eqos_send(struct udevice *dev, void *packet, int length)
Stephen Warren50709602016-10-21 14:46:47 -06001425{
1426 struct eqos_priv *eqos = dev_get_priv(dev);
1427 struct eqos_desc *tx_desc;
1428 int i;
1429
1430 debug("%s(dev=%p, packet=%p, length=%d):\n", __func__, dev, packet,
1431 length);
1432
1433 memcpy(eqos->tx_dma_buf, packet, length);
Christophe Roullier6beb7802019-05-17 15:08:44 +02001434 eqos->config->ops->eqos_flush_buffer(eqos->tx_dma_buf, length);
Stephen Warren50709602016-10-21 14:46:47 -06001435
Marek Vasut89077732021-01-07 11:12:16 +01001436 tx_desc = eqos_get_desc(eqos, eqos->tx_desc_idx, false);
Stephen Warren50709602016-10-21 14:46:47 -06001437 eqos->tx_desc_idx++;
1438 eqos->tx_desc_idx %= EQOS_DESCRIPTORS_TX;
1439
1440 tx_desc->des0 = (ulong)eqos->tx_dma_buf;
1441 tx_desc->des1 = 0;
1442 tx_desc->des2 = length;
1443 /*
1444 * Make sure that if HW sees the _OWN write below, it will see all the
1445 * writes to the rest of the descriptor too.
1446 */
1447 mb();
1448 tx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_FD | EQOS_DESC3_LD | length;
Christophe Roullier6beb7802019-05-17 15:08:44 +02001449 eqos->config->ops->eqos_flush_desc(tx_desc);
Stephen Warren50709602016-10-21 14:46:47 -06001450
Marek Vasut89077732021-01-07 11:12:16 +01001451 writel((ulong)eqos_get_desc(eqos, eqos->tx_desc_idx, false),
Marek Vasutf4f1f4d2020-03-23 02:03:50 +01001452 &eqos->dma_regs->ch0_txdesc_tail_pointer);
Stephen Warren50709602016-10-21 14:46:47 -06001453
1454 for (i = 0; i < 1000000; i++) {
Christophe Roullier6beb7802019-05-17 15:08:44 +02001455 eqos->config->ops->eqos_inval_desc(tx_desc);
Stephen Warren50709602016-10-21 14:46:47 -06001456 if (!(readl(&tx_desc->des3) & EQOS_DESC3_OWN))
1457 return 0;
1458 udelay(1);
1459 }
1460
1461 debug("%s: TX timeout\n", __func__);
1462
1463 return -ETIMEDOUT;
1464}
1465
Patrick Delaunay6864a5992019-08-01 11:29:02 +02001466static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
Stephen Warren50709602016-10-21 14:46:47 -06001467{
1468 struct eqos_priv *eqos = dev_get_priv(dev);
1469 struct eqos_desc *rx_desc;
1470 int length;
1471
1472 debug("%s(dev=%p, flags=%x):\n", __func__, dev, flags);
1473
Marek Vasut89077732021-01-07 11:12:16 +01001474 rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
Marek Vasutc4db8442020-03-23 02:09:21 +01001475 eqos->config->ops->eqos_inval_desc(rx_desc);
Stephen Warren50709602016-10-21 14:46:47 -06001476 if (rx_desc->des3 & EQOS_DESC3_OWN) {
1477 debug("%s: RX packet not available\n", __func__);
1478 return -EAGAIN;
1479 }
1480
1481 *packetp = eqos->rx_dma_buf +
1482 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1483 length = rx_desc->des3 & 0x7fff;
1484 debug("%s: *packetp=%p, length=%d\n", __func__, *packetp, length);
1485
Christophe Roullier6beb7802019-05-17 15:08:44 +02001486 eqos->config->ops->eqos_inval_buffer(*packetp, length);
Stephen Warren50709602016-10-21 14:46:47 -06001487
1488 return length;
1489}
1490
Patrick Delaunay6864a5992019-08-01 11:29:02 +02001491static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
Stephen Warren50709602016-10-21 14:46:47 -06001492{
1493 struct eqos_priv *eqos = dev_get_priv(dev);
1494 uchar *packet_expected;
1495 struct eqos_desc *rx_desc;
1496
1497 debug("%s(packet=%p, length=%d)\n", __func__, packet, length);
1498
1499 packet_expected = eqos->rx_dma_buf +
1500 (eqos->rx_desc_idx * EQOS_MAX_PACKET_SIZE);
1501 if (packet != packet_expected) {
1502 debug("%s: Unexpected packet (expected %p)\n", __func__,
1503 packet_expected);
1504 return -EINVAL;
1505 }
1506
Fugang Duan37aae5f2020-05-03 22:41:17 +08001507 eqos->config->ops->eqos_inval_buffer(packet, length);
1508
Marek Vasut89077732021-01-07 11:12:16 +01001509 rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
Marek Vasute8e5c2b2020-03-23 02:09:55 +01001510
Marek Vasut091b6db2020-03-23 02:11:46 +01001511 rx_desc->des0 = 0;
1512 mb();
1513 eqos->config->ops->eqos_flush_desc(rx_desc);
Marek Vasute8e5c2b2020-03-23 02:09:55 +01001514 eqos->config->ops->eqos_inval_buffer(packet, length);
Stephen Warren50709602016-10-21 14:46:47 -06001515 rx_desc->des0 = (u32)(ulong)packet;
1516 rx_desc->des1 = 0;
1517 rx_desc->des2 = 0;
1518 /*
1519 * Make sure that if HW sees the _OWN write below, it will see all the
1520 * writes to the rest of the descriptor too.
1521 */
1522 mb();
Marek Vasutd54c98e2020-03-23 02:02:57 +01001523 rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
Christophe Roullier6beb7802019-05-17 15:08:44 +02001524 eqos->config->ops->eqos_flush_desc(rx_desc);
Stephen Warren50709602016-10-21 14:46:47 -06001525
1526 writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
1527
1528 eqos->rx_desc_idx++;
1529 eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
1530
1531 return 0;
1532}
1533
1534static int eqos_probe_resources_core(struct udevice *dev)
1535{
1536 struct eqos_priv *eqos = dev_get_priv(dev);
1537 int ret;
1538
1539 debug("%s(dev=%p):\n", __func__, dev);
1540
Marek Vasut89077732021-01-07 11:12:16 +01001541 eqos->descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_NUM);
Stephen Warren50709602016-10-21 14:46:47 -06001542 if (!eqos->descs) {
1543 debug("%s: eqos_alloc_descs() failed\n", __func__);
1544 ret = -ENOMEM;
1545 goto err;
1546 }
Stephen Warren50709602016-10-21 14:46:47 -06001547
1548 eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
1549 if (!eqos->tx_dma_buf) {
1550 debug("%s: memalign(tx_dma_buf) failed\n", __func__);
1551 ret = -ENOMEM;
1552 goto err_free_descs;
1553 }
Christophe Roullier6beb7802019-05-17 15:08:44 +02001554 debug("%s: tx_dma_buf=%p\n", __func__, eqos->tx_dma_buf);
Stephen Warren50709602016-10-21 14:46:47 -06001555
1556 eqos->rx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_RX_BUFFER_SIZE);
1557 if (!eqos->rx_dma_buf) {
1558 debug("%s: memalign(rx_dma_buf) failed\n", __func__);
1559 ret = -ENOMEM;
1560 goto err_free_tx_dma_buf;
1561 }
Christophe Roullier6beb7802019-05-17 15:08:44 +02001562 debug("%s: rx_dma_buf=%p\n", __func__, eqos->rx_dma_buf);
Stephen Warren50709602016-10-21 14:46:47 -06001563
1564 eqos->rx_pkt = malloc(EQOS_MAX_PACKET_SIZE);
1565 if (!eqos->rx_pkt) {
1566 debug("%s: malloc(rx_pkt) failed\n", __func__);
1567 ret = -ENOMEM;
1568 goto err_free_rx_dma_buf;
1569 }
1570 debug("%s: rx_pkt=%p\n", __func__, eqos->rx_pkt);
1571
Marek Vasute8e5c2b2020-03-23 02:09:55 +01001572 eqos->config->ops->eqos_inval_buffer(eqos->rx_dma_buf,
1573 EQOS_MAX_PACKET_SIZE * EQOS_DESCRIPTORS_RX);
1574
Stephen Warren50709602016-10-21 14:46:47 -06001575 debug("%s: OK\n", __func__);
1576 return 0;
1577
1578err_free_rx_dma_buf:
1579 free(eqos->rx_dma_buf);
1580err_free_tx_dma_buf:
1581 free(eqos->tx_dma_buf);
1582err_free_descs:
1583 eqos_free_descs(eqos->descs);
1584err:
1585
1586 debug("%s: returns %d\n", __func__, ret);
1587 return ret;
1588}
1589
1590static int eqos_remove_resources_core(struct udevice *dev)
1591{
1592 struct eqos_priv *eqos = dev_get_priv(dev);
1593
1594 debug("%s(dev=%p):\n", __func__, dev);
1595
1596 free(eqos->rx_pkt);
1597 free(eqos->rx_dma_buf);
1598 free(eqos->tx_dma_buf);
1599 eqos_free_descs(eqos->descs);
1600
1601 debug("%s: OK\n", __func__);
1602 return 0;
1603}
1604
1605static int eqos_probe_resources_tegra186(struct udevice *dev)
1606{
1607 struct eqos_priv *eqos = dev_get_priv(dev);
1608 int ret;
1609
1610 debug("%s(dev=%p):\n", __func__, dev);
1611
1612 ret = reset_get_by_name(dev, "eqos", &eqos->reset_ctl);
1613 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001614 pr_err("reset_get_by_name(rst) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001615 return ret;
1616 }
1617
1618 ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
1619 &eqos->phy_reset_gpio,
1620 GPIOD_IS_OUT | GPIOD_IS_OUT_ACTIVE);
1621 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001622 pr_err("gpio_request_by_name(phy reset) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001623 goto err_free_reset_eqos;
1624 }
1625
1626 ret = clk_get_by_name(dev, "slave_bus", &eqos->clk_slave_bus);
1627 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001628 pr_err("clk_get_by_name(slave_bus) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001629 goto err_free_gpio_phy_reset;
1630 }
1631
1632 ret = clk_get_by_name(dev, "master_bus", &eqos->clk_master_bus);
1633 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001634 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001635 goto err_free_clk_slave_bus;
1636 }
1637
1638 ret = clk_get_by_name(dev, "rx", &eqos->clk_rx);
1639 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001640 pr_err("clk_get_by_name(rx) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001641 goto err_free_clk_master_bus;
1642 }
1643
1644 ret = clk_get_by_name(dev, "ptp_ref", &eqos->clk_ptp_ref);
1645 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001646 pr_err("clk_get_by_name(ptp_ref) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001647 goto err_free_clk_rx;
1648 return ret;
1649 }
1650
1651 ret = clk_get_by_name(dev, "tx", &eqos->clk_tx);
1652 if (ret) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001653 pr_err("clk_get_by_name(tx) failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001654 goto err_free_clk_ptp_ref;
1655 }
1656
1657 debug("%s: OK\n", __func__);
1658 return 0;
1659
1660err_free_clk_ptp_ref:
1661 clk_free(&eqos->clk_ptp_ref);
1662err_free_clk_rx:
1663 clk_free(&eqos->clk_rx);
1664err_free_clk_master_bus:
1665 clk_free(&eqos->clk_master_bus);
1666err_free_clk_slave_bus:
1667 clk_free(&eqos->clk_slave_bus);
1668err_free_gpio_phy_reset:
1669 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1670err_free_reset_eqos:
1671 reset_free(&eqos->reset_ctl);
1672
1673 debug("%s: returns %d\n", __func__, ret);
1674 return ret;
1675}
1676
Christophe Roullier6beb7802019-05-17 15:08:44 +02001677/* board-specific Ethernet Interface initializations. */
Patrick Delaunaybff66f92019-08-01 11:29:03 +02001678__weak int board_interface_eth_init(struct udevice *dev,
1679 phy_interface_t interface_type)
Christophe Roullier6beb7802019-05-17 15:08:44 +02001680{
1681 return 0;
1682}
1683
1684static int eqos_probe_resources_stm32(struct udevice *dev)
1685{
1686 struct eqos_priv *eqos = dev_get_priv(dev);
1687 int ret;
1688 phy_interface_t interface;
Christophe Roullier6beb7802019-05-17 15:08:44 +02001689
1690 debug("%s(dev=%p):\n", __func__, dev);
1691
1692 interface = eqos->config->interface(dev);
1693
1694 if (interface == PHY_INTERFACE_MODE_NONE) {
1695 pr_err("Invalid PHY interface\n");
1696 return -EINVAL;
1697 }
1698
Patrick Delaunaybff66f92019-08-01 11:29:03 +02001699 ret = board_interface_eth_init(dev, interface);
Christophe Roullier6beb7802019-05-17 15:08:44 +02001700 if (ret)
1701 return -EINVAL;
1702
Patrick Delaunay5c8db372020-03-18 10:50:16 +01001703 eqos->max_speed = dev_read_u32_default(dev, "max-speed", 0);
1704
Christophe Roullier6beb7802019-05-17 15:08:44 +02001705 ret = clk_get_by_name(dev, "stmmaceth", &eqos->clk_master_bus);
1706 if (ret) {
1707 pr_err("clk_get_by_name(master_bus) failed: %d", ret);
1708 goto err_probe;
1709 }
1710
1711 ret = clk_get_by_name(dev, "mac-clk-rx", &eqos->clk_rx);
1712 if (ret) {
1713 pr_err("clk_get_by_name(rx) failed: %d", ret);
1714 goto err_free_clk_master_bus;
1715 }
1716
1717 ret = clk_get_by_name(dev, "mac-clk-tx", &eqos->clk_tx);
1718 if (ret) {
1719 pr_err("clk_get_by_name(tx) failed: %d", ret);
1720 goto err_free_clk_rx;
1721 }
1722
1723 /* Get ETH_CLK clocks (optional) */
1724 ret = clk_get_by_name(dev, "eth-ck", &eqos->clk_ck);
1725 if (ret)
1726 pr_warn("No phy clock provided %d", ret);
1727
1728 debug("%s: OK\n", __func__);
1729 return 0;
1730
1731err_free_clk_rx:
1732 clk_free(&eqos->clk_rx);
1733err_free_clk_master_bus:
1734 clk_free(&eqos->clk_master_bus);
1735err_probe:
1736
1737 debug("%s: returns %d\n", __func__, ret);
1738 return ret;
1739}
1740
1741static phy_interface_t eqos_get_interface_stm32(struct udevice *dev)
1742{
1743 const char *phy_mode;
1744 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1745
1746 debug("%s(dev=%p):\n", __func__, dev);
1747
Patrick Delaunay9e6ed382020-09-09 18:30:06 +02001748 phy_mode = dev_read_prop(dev, "phy-mode", NULL);
Christophe Roullier6beb7802019-05-17 15:08:44 +02001749 if (phy_mode)
1750 interface = phy_get_interface_by_name(phy_mode);
1751
1752 return interface;
1753}
1754
1755static phy_interface_t eqos_get_interface_tegra186(struct udevice *dev)
1756{
1757 return PHY_INTERFACE_MODE_MII;
1758}
1759
Fugang Duan37aae5f2020-05-03 22:41:17 +08001760static int eqos_probe_resources_imx(struct udevice *dev)
1761{
1762 struct eqos_priv *eqos = dev_get_priv(dev);
1763 phy_interface_t interface;
1764
1765 debug("%s(dev=%p):\n", __func__, dev);
1766
1767 interface = eqos->config->interface(dev);
1768
1769 if (interface == PHY_INTERFACE_MODE_NONE) {
1770 pr_err("Invalid PHY interface\n");
1771 return -EINVAL;
1772 }
1773
1774 debug("%s: OK\n", __func__);
1775 return 0;
1776}
1777
1778static phy_interface_t eqos_get_interface_imx(struct udevice *dev)
1779{
Fugang Duandd455e62020-05-03 22:41:18 +08001780 const char *phy_mode;
1781 phy_interface_t interface = PHY_INTERFACE_MODE_NONE;
1782
1783 debug("%s(dev=%p):\n", __func__, dev);
1784
Patrick Delaunay9e6ed382020-09-09 18:30:06 +02001785 phy_mode = dev_read_prop(dev, "phy-mode", NULL);
Fugang Duandd455e62020-05-03 22:41:18 +08001786 if (phy_mode)
1787 interface = phy_get_interface_by_name(phy_mode);
1788
1789 return interface;
Fugang Duan37aae5f2020-05-03 22:41:17 +08001790}
1791
Stephen Warren50709602016-10-21 14:46:47 -06001792static int eqos_remove_resources_tegra186(struct udevice *dev)
1793{
1794 struct eqos_priv *eqos = dev_get_priv(dev);
1795
1796 debug("%s(dev=%p):\n", __func__, dev);
1797
Fugang Duan37aae5f2020-05-03 22:41:17 +08001798#ifdef CONFIG_CLK
Stephen Warren50709602016-10-21 14:46:47 -06001799 clk_free(&eqos->clk_tx);
1800 clk_free(&eqos->clk_ptp_ref);
1801 clk_free(&eqos->clk_rx);
1802 clk_free(&eqos->clk_slave_bus);
1803 clk_free(&eqos->clk_master_bus);
Fugang Duan37aae5f2020-05-03 22:41:17 +08001804#endif
Stephen Warren50709602016-10-21 14:46:47 -06001805 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1806 reset_free(&eqos->reset_ctl);
1807
1808 debug("%s: OK\n", __func__);
1809 return 0;
1810}
1811
Christophe Roullier6beb7802019-05-17 15:08:44 +02001812static int eqos_remove_resources_stm32(struct udevice *dev)
1813{
Fugang Duan37aae5f2020-05-03 22:41:17 +08001814#ifdef CONFIG_CLK
Christophe Roullier6beb7802019-05-17 15:08:44 +02001815 struct eqos_priv *eqos = dev_get_priv(dev);
1816
1817 debug("%s(dev=%p):\n", __func__, dev);
1818
1819 clk_free(&eqos->clk_tx);
1820 clk_free(&eqos->clk_rx);
1821 clk_free(&eqos->clk_master_bus);
1822 if (clk_valid(&eqos->clk_ck))
1823 clk_free(&eqos->clk_ck);
Fugang Duan37aae5f2020-05-03 22:41:17 +08001824#endif
Christophe Roullier6beb7802019-05-17 15:08:44 +02001825
Christophe Roullier104dab52020-03-18 10:50:15 +01001826 if (dm_gpio_is_valid(&eqos->phy_reset_gpio))
1827 dm_gpio_free(dev, &eqos->phy_reset_gpio);
1828
Christophe Roullier6beb7802019-05-17 15:08:44 +02001829 debug("%s: OK\n", __func__);
1830 return 0;
1831}
1832
Stephen Warren50709602016-10-21 14:46:47 -06001833static int eqos_probe(struct udevice *dev)
1834{
1835 struct eqos_priv *eqos = dev_get_priv(dev);
1836 int ret;
1837
1838 debug("%s(dev=%p):\n", __func__, dev);
1839
1840 eqos->dev = dev;
1841 eqos->config = (void *)dev_get_driver_data(dev);
1842
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +09001843 eqos->regs = dev_read_addr(dev);
Stephen Warren50709602016-10-21 14:46:47 -06001844 if (eqos->regs == FDT_ADDR_T_NONE) {
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +09001845 pr_err("dev_read_addr() failed");
Stephen Warren50709602016-10-21 14:46:47 -06001846 return -ENODEV;
1847 }
1848 eqos->mac_regs = (void *)(eqos->regs + EQOS_MAC_REGS_BASE);
1849 eqos->mtl_regs = (void *)(eqos->regs + EQOS_MTL_REGS_BASE);
1850 eqos->dma_regs = (void *)(eqos->regs + EQOS_DMA_REGS_BASE);
1851 eqos->tegra186_regs = (void *)(eqos->regs + EQOS_TEGRA186_REGS_BASE);
1852
1853 ret = eqos_probe_resources_core(dev);
1854 if (ret < 0) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001855 pr_err("eqos_probe_resources_core() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001856 return ret;
1857 }
1858
Christophe Roullier6beb7802019-05-17 15:08:44 +02001859 ret = eqos->config->ops->eqos_probe_resources(dev);
Stephen Warren50709602016-10-21 14:46:47 -06001860 if (ret < 0) {
Christophe Roullier6beb7802019-05-17 15:08:44 +02001861 pr_err("eqos_probe_resources() failed: %d", ret);
Stephen Warren50709602016-10-21 14:46:47 -06001862 goto err_remove_resources_core;
1863 }
1864
Ye Liad122b72020-05-03 22:41:15 +08001865#ifdef CONFIG_DM_ETH_PHY
1866 eqos->mii = eth_phy_get_mdio_bus(dev);
1867#endif
Stephen Warren50709602016-10-21 14:46:47 -06001868 if (!eqos->mii) {
Ye Liad122b72020-05-03 22:41:15 +08001869 eqos->mii = mdio_alloc();
1870 if (!eqos->mii) {
1871 pr_err("mdio_alloc() failed");
1872 ret = -ENOMEM;
1873 goto err_remove_resources_tegra;
1874 }
1875 eqos->mii->read = eqos_mdio_read;
1876 eqos->mii->write = eqos_mdio_write;
1877 eqos->mii->priv = eqos;
1878 strcpy(eqos->mii->name, dev->name);
Stephen Warren50709602016-10-21 14:46:47 -06001879
Ye Liad122b72020-05-03 22:41:15 +08001880 ret = mdio_register(eqos->mii);
1881 if (ret < 0) {
1882 pr_err("mdio_register() failed: %d", ret);
1883 goto err_free_mdio;
1884 }
Stephen Warren50709602016-10-21 14:46:47 -06001885 }
1886
Ye Liad122b72020-05-03 22:41:15 +08001887#ifdef CONFIG_DM_ETH_PHY
1888 eth_phy_set_mdio_bus(dev, eqos->mii);
1889#endif
1890
Stephen Warren50709602016-10-21 14:46:47 -06001891 debug("%s: OK\n", __func__);
1892 return 0;
1893
1894err_free_mdio:
1895 mdio_free(eqos->mii);
1896err_remove_resources_tegra:
Christophe Roullier6beb7802019-05-17 15:08:44 +02001897 eqos->config->ops->eqos_remove_resources(dev);
Stephen Warren50709602016-10-21 14:46:47 -06001898err_remove_resources_core:
1899 eqos_remove_resources_core(dev);
1900
1901 debug("%s: returns %d\n", __func__, ret);
1902 return ret;
1903}
1904
1905static int eqos_remove(struct udevice *dev)
1906{
1907 struct eqos_priv *eqos = dev_get_priv(dev);
1908
1909 debug("%s(dev=%p):\n", __func__, dev);
1910
1911 mdio_unregister(eqos->mii);
1912 mdio_free(eqos->mii);
Christophe Roullier6beb7802019-05-17 15:08:44 +02001913 eqos->config->ops->eqos_remove_resources(dev);
1914
Stephen Warren50709602016-10-21 14:46:47 -06001915 eqos_probe_resources_core(dev);
1916
1917 debug("%s: OK\n", __func__);
1918 return 0;
1919}
1920
Patrick Delaunay1bc6ce72021-07-20 20:09:56 +02001921static int eqos_null_ops(struct udevice *dev)
1922{
1923 return 0;
1924}
1925
Stephen Warren50709602016-10-21 14:46:47 -06001926static const struct eth_ops eqos_ops = {
1927 .start = eqos_start,
1928 .stop = eqos_stop,
1929 .send = eqos_send,
1930 .recv = eqos_recv,
1931 .free_pkt = eqos_free_pkt,
1932 .write_hwaddr = eqos_write_hwaddr,
Ye Li3fb1a0e2020-05-03 22:41:20 +08001933 .read_rom_hwaddr = eqos_read_rom_hwaddr,
Stephen Warren50709602016-10-21 14:46:47 -06001934};
1935
Christophe Roullier6beb7802019-05-17 15:08:44 +02001936static struct eqos_ops eqos_tegra186_ops = {
Marek Vasut89077732021-01-07 11:12:16 +01001937 .eqos_inval_desc = eqos_inval_desc_generic,
1938 .eqos_flush_desc = eqos_flush_desc_generic,
Christophe Roullier6beb7802019-05-17 15:08:44 +02001939 .eqos_inval_buffer = eqos_inval_buffer_tegra186,
1940 .eqos_flush_buffer = eqos_flush_buffer_tegra186,
1941 .eqos_probe_resources = eqos_probe_resources_tegra186,
1942 .eqos_remove_resources = eqos_remove_resources_tegra186,
1943 .eqos_stop_resets = eqos_stop_resets_tegra186,
1944 .eqos_start_resets = eqos_start_resets_tegra186,
1945 .eqos_stop_clks = eqos_stop_clks_tegra186,
1946 .eqos_start_clks = eqos_start_clks_tegra186,
1947 .eqos_calibrate_pads = eqos_calibrate_pads_tegra186,
1948 .eqos_disable_calibration = eqos_disable_calibration_tegra186,
1949 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_tegra186,
1950 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_tegra186
1951};
1952
Patrick Delaunay68083902020-06-08 11:27:19 +02001953static const struct eqos_config __maybe_unused eqos_tegra186_config = {
Stephen Warren50709602016-10-21 14:46:47 -06001954 .reg_access_always_ok = false,
Christophe Roullier6beb7802019-05-17 15:08:44 +02001955 .mdio_wait = 10,
1956 .swr_wait = 10,
1957 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
1958 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_20_35,
Marek Vasut89077732021-01-07 11:12:16 +01001959 .axi_bus_width = EQOS_AXI_WIDTH_128,
Christophe Roullier6beb7802019-05-17 15:08:44 +02001960 .interface = eqos_get_interface_tegra186,
1961 .ops = &eqos_tegra186_ops
1962};
1963
1964static struct eqos_ops eqos_stm32_ops = {
Fugang Duan37aae5f2020-05-03 22:41:17 +08001965 .eqos_inval_desc = eqos_inval_desc_generic,
1966 .eqos_flush_desc = eqos_flush_desc_generic,
1967 .eqos_inval_buffer = eqos_inval_buffer_generic,
1968 .eqos_flush_buffer = eqos_flush_buffer_generic,
Christophe Roullier6beb7802019-05-17 15:08:44 +02001969 .eqos_probe_resources = eqos_probe_resources_stm32,
1970 .eqos_remove_resources = eqos_remove_resources_stm32,
Patrick Delaunay1bc6ce72021-07-20 20:09:56 +02001971 .eqos_stop_resets = eqos_null_ops,
1972 .eqos_start_resets = eqos_null_ops,
Christophe Roullier6beb7802019-05-17 15:08:44 +02001973 .eqos_stop_clks = eqos_stop_clks_stm32,
1974 .eqos_start_clks = eqos_start_clks_stm32,
Patrick Delaunay1bc6ce72021-07-20 20:09:56 +02001975 .eqos_calibrate_pads = eqos_null_ops,
1976 .eqos_disable_calibration = eqos_null_ops,
1977 .eqos_set_tx_clk_speed = eqos_null_ops,
Christophe Roullier6beb7802019-05-17 15:08:44 +02001978 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_stm32
1979};
1980
Patrick Delaunay68083902020-06-08 11:27:19 +02001981static const struct eqos_config __maybe_unused eqos_stm32_config = {
Christophe Roullier6beb7802019-05-17 15:08:44 +02001982 .reg_access_always_ok = false,
1983 .mdio_wait = 10000,
1984 .swr_wait = 50,
1985 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_AV,
1986 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
Marek Vasut89077732021-01-07 11:12:16 +01001987 .axi_bus_width = EQOS_AXI_WIDTH_64,
Christophe Roullier6beb7802019-05-17 15:08:44 +02001988 .interface = eqos_get_interface_stm32,
1989 .ops = &eqos_stm32_ops
Stephen Warren50709602016-10-21 14:46:47 -06001990};
1991
Fugang Duan37aae5f2020-05-03 22:41:17 +08001992static struct eqos_ops eqos_imx_ops = {
1993 .eqos_inval_desc = eqos_inval_desc_generic,
1994 .eqos_flush_desc = eqos_flush_desc_generic,
1995 .eqos_inval_buffer = eqos_inval_buffer_generic,
1996 .eqos_flush_buffer = eqos_flush_buffer_generic,
1997 .eqos_probe_resources = eqos_probe_resources_imx,
Patrick Delaunay1bc6ce72021-07-20 20:09:56 +02001998 .eqos_remove_resources = eqos_null_ops,
1999 .eqos_stop_resets = eqos_null_ops,
2000 .eqos_start_resets = eqos_null_ops,
2001 .eqos_stop_clks = eqos_null_ops,
2002 .eqos_start_clks = eqos_null_ops,
2003 .eqos_calibrate_pads = eqos_null_ops,
2004 .eqos_disable_calibration = eqos_null_ops,
Fugang Duan37aae5f2020-05-03 22:41:17 +08002005 .eqos_set_tx_clk_speed = eqos_set_tx_clk_speed_imx,
2006 .eqos_get_tick_clk_rate = eqos_get_tick_clk_rate_imx
2007};
2008
Patrick Delaunay68083902020-06-08 11:27:19 +02002009struct eqos_config __maybe_unused eqos_imx_config = {
Fugang Duan37aae5f2020-05-03 22:41:17 +08002010 .reg_access_always_ok = false,
Ye Lif369e692020-12-28 20:15:10 +08002011 .mdio_wait = 10,
Fugang Duan37aae5f2020-05-03 22:41:17 +08002012 .swr_wait = 50,
2013 .config_mac = EQOS_MAC_RXQ_CTRL0_RXQ0EN_ENABLED_DCB,
2014 .config_mac_mdio = EQOS_MAC_MDIO_ADDRESS_CR_250_300,
Marek Vasut89077732021-01-07 11:12:16 +01002015 .axi_bus_width = EQOS_AXI_WIDTH_64,
Fugang Duan37aae5f2020-05-03 22:41:17 +08002016 .interface = eqos_get_interface_imx,
2017 .ops = &eqos_imx_ops
2018};
2019
Stephen Warren50709602016-10-21 14:46:47 -06002020static const struct udevice_id eqos_ids[] = {
Patrick Delaunay68083902020-06-08 11:27:19 +02002021#if IS_ENABLED(CONFIG_DWC_ETH_QOS_TEGRA186)
Stephen Warren50709602016-10-21 14:46:47 -06002022 {
2023 .compatible = "nvidia,tegra186-eqos",
2024 .data = (ulong)&eqos_tegra186_config
2025 },
Patrick Delaunay68083902020-06-08 11:27:19 +02002026#endif
2027#if IS_ENABLED(CONFIG_DWC_ETH_QOS_STM32)
Christophe Roullier6beb7802019-05-17 15:08:44 +02002028 {
Patrick Delaunaya0466f62020-05-14 15:00:23 +02002029 .compatible = "st,stm32mp1-dwmac",
Christophe Roullier6beb7802019-05-17 15:08:44 +02002030 .data = (ulong)&eqos_stm32_config
2031 },
Patrick Delaunay68083902020-06-08 11:27:19 +02002032#endif
2033#if IS_ENABLED(CONFIG_DWC_ETH_QOS_IMX)
Fugang Duan37aae5f2020-05-03 22:41:17 +08002034 {
2035 .compatible = "fsl,imx-eqos",
2036 .data = (ulong)&eqos_imx_config
2037 },
Patrick Delaunay68083902020-06-08 11:27:19 +02002038#endif
Christophe Roullier6beb7802019-05-17 15:08:44 +02002039
Stephen Warren50709602016-10-21 14:46:47 -06002040 { }
2041};
2042
2043U_BOOT_DRIVER(eth_eqos) = {
2044 .name = "eth_eqos",
2045 .id = UCLASS_ETH,
Fugang Duan37aae5f2020-05-03 22:41:17 +08002046 .of_match = of_match_ptr(eqos_ids),
Stephen Warren50709602016-10-21 14:46:47 -06002047 .probe = eqos_probe,
2048 .remove = eqos_remove,
2049 .ops = &eqos_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -07002050 .priv_auto = sizeof(struct eqos_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -07002051 .plat_auto = sizeof(struct eth_pdata),
Stephen Warren50709602016-10-21 14:46:47 -06002052};