blob: 8f162ca58fbb53cb6c6210e4f955681d41058872 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09002/*
Robert P. J. Day8c60f922016-05-04 04:47:31 -04003 * sh_eth.c - Driver for Renesas ethernet controller.
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09004 *
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +09005 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09006 * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09007 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09008 * Copyright (C) 2013, 2014 Renesas Electronics Corporation
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09009 */
10
11#include <config.h>
12#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070013#include <cpu_func.h>
Simon Glass0af6e2d2019-08-01 09:46:52 -060014#include <env.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090016#include <malloc.h>
17#include <net.h>
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090018#include <netdev.h>
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +090019#include <miiphy.h>
Simon Glass274e0b02020-05-10 11:39:56 -060020#include <asm/cache.h>
Simon Glassdbd79542020-05-10 11:40:11 -060021#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090022#include <linux/errno.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060023#include <asm/global_data.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090024#include <asm/io.h>
25
Marek Vasut020d3942018-01-19 18:57:17 +010026#include <clk.h>
27#include <dm.h>
28#include <linux/mii.h>
29#include <asm/gpio.h>
Marek Vasut020d3942018-01-19 18:57:17 +010030
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090031#include "sh_eth.h"
32
Tom Rini9996ab82022-12-04 10:13:52 -050033#ifndef CFG_SH_ETHER_USE_PORT
34# error "Please define CFG_SH_ETHER_USE_PORT"
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090035#endif
Tom Rini45ec5fd2022-12-04 10:13:50 -050036#ifndef CFG_SH_ETHER_PHY_ADDR
37# error "Please define CFG_SH_ETHER_PHY_ADDR"
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090038#endif
Nobuhiro Iwamatsu6bff09d2013-08-22 13:22:01 +090039
Tom Rini872054f2022-12-04 10:13:49 -050040#if defined(CFG_SH_ETHER_CACHE_WRITEBACK) && \
Trevor Woerner43ec7e02019-05-03 09:41:00 -040041 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090042#define flush_cache_wback(addr, len) \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020043 flush_dcache_range((unsigned long)addr, \
Tom Rinidd2eba02022-12-04 10:13:47 -050044 (unsigned long)(addr + ALIGN(len, CFG_SH_ETHER_ALIGNE_SIZE)))
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090045#else
46#define flush_cache_wback(...)
47#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090048
Tom Rinia44cb162022-12-04 10:13:48 -050049#if defined(CFG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090050#define invalidate_cache(addr, len) \
51 { \
Tom Rinidd2eba02022-12-04 10:13:47 -050052 unsigned long line_size = CFG_SH_ETHER_ALIGNE_SIZE; \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020053 unsigned long start, end; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090054 \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020055 start = (unsigned long)addr; \
56 end = start + len; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090057 start &= ~(line_size - 1); \
58 end = ((end + line_size - 1) & ~(line_size - 1)); \
59 \
60 invalidate_dcache_range(start, end); \
61 }
62#else
63#define invalidate_cache(...)
64#endif
65
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +090066#define TIMEOUT_CNT 1000
67
Marek Vasut044eb2d2018-01-21 14:27:51 +010068static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090069{
Marek Vasut7a309cf2018-02-17 00:46:26 +010070 int ret = 0, timeout;
71 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090072
73 if (!packet || len > 0xffff) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090074 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
75 ret = -EINVAL;
76 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090077 }
78
79 /* packet must be a 4 byte boundary */
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020080 if ((uintptr_t)packet & 3) {
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +090081 printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +090082 , __func__);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090083 ret = -EFAULT;
84 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090085 }
86
87 /* Update tx descriptor */
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090088 flush_cache_wback(packet, len);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090089 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
90 port_info->tx_desc_cur->td1 = len << 16;
91 /* Must preserve the end of descriptor list indication */
92 if (port_info->tx_desc_cur->td0 & TD_TDLE)
93 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
94 else
95 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
96
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +090097 flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
98
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090099 /* Restart the transmitter if disabled */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900100 if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
101 sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900102
103 /* Wait until packet is transmitted */
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900104 timeout = TIMEOUT_CNT;
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900105 do {
106 invalidate_cache(port_info->tx_desc_cur,
107 sizeof(struct tx_desc_s));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900108 udelay(100);
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900109 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900110
111 if (timeout < 0) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900112 printf(SHETHER_NAME ": transmit timeout\n");
113 ret = -ETIMEDOUT;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900114 goto err;
115 }
116
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900117 port_info->tx_desc_cur++;
118 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
119 port_info->tx_desc_cur = port_info->tx_desc_base;
120
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900121err:
122 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900123}
124
Marek Vasut48de90d2018-01-21 15:39:50 +0100125static int sh_eth_recv_start(struct sh_eth_dev *eth)
Marek Vasut044eb2d2018-01-21 14:27:51 +0100126{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100127 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900128
129 /* Check if the rx descriptor is ready */
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900130 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
Marek Vasut48de90d2018-01-21 15:39:50 +0100131 if (port_info->rx_desc_cur->rd0 & RD_RACT)
132 return -EINVAL;
133
134 /* Check for errors */
135 if (port_info->rx_desc_cur->rd0 & RD_RFE)
136 return -EINVAL;
137
Marek Vasut2526b792018-02-17 00:47:38 +0100138 return port_info->rx_desc_cur->rd1 & 0xffff;
Marek Vasut48de90d2018-01-21 15:39:50 +0100139}
140
141static void sh_eth_recv_finish(struct sh_eth_dev *eth)
142{
143 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900144
Marek Vasut48de90d2018-01-21 15:39:50 +0100145 /* Make current descriptor available again */
146 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
147 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
148 else
149 port_info->rx_desc_cur->rd0 = RD_RACT;
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +0900150
Marek Vasut48de90d2018-01-21 15:39:50 +0100151 flush_cache_wback(port_info->rx_desc_cur,
152 sizeof(struct rx_desc_s));
153
154 /* Point to the next descriptor */
155 port_info->rx_desc_cur++;
156 if (port_info->rx_desc_cur >=
157 port_info->rx_desc_base + NUM_RX_DESC)
158 port_info->rx_desc_cur = port_info->rx_desc_base;
159}
160
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900161static int sh_eth_reset(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900162{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900163 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900164#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900165 int ret = 0, i;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900166
167 /* Start e-dmac transmitter and receiver */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900168 sh_eth_write(port_info, EDSR_ENALL, EDSR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900169
170 /* Perform a software reset and wait for it to complete */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900171 sh_eth_write(port_info, EDMR_SRST, EDMR);
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900172 for (i = 0; i < TIMEOUT_CNT; i++) {
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900173 if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900174 break;
175 udelay(1000);
176 }
177
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900178 if (i == TIMEOUT_CNT) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900179 printf(SHETHER_NAME ": Software reset timeout\n");
180 ret = -EIO;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900181 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900182
183 return ret;
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900184#else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900185 sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
Marek Vasut42a33402018-02-17 00:57:49 +0100186 mdelay(3);
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900187 sh_eth_write(port_info,
188 sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900189
190 return 0;
191#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900192}
193
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900194static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900195{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100196 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900197 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100198 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900199 struct tx_desc_s *cur_tx_desc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900200
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900201 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900202 * Allocate rx descriptors. They must be aligned to size of struct
203 * tx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900204 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900205 port_info->tx_desc_alloc =
206 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
207 if (!port_info->tx_desc_alloc) {
208 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900209 ret = -ENOMEM;
210 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900211 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900212
Nobuhiro Iwamatsu425a3a52017-12-01 13:56:08 +0900213 flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900214
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900215 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900216 port_info->tx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200217 (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900218 port_info->tx_desc_cur = port_info->tx_desc_base;
219
220 /* Initialize all descriptors */
221 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
222 cur_tx_desc++, i++) {
223 cur_tx_desc->td0 = 0x00;
224 cur_tx_desc->td1 = 0x00;
225 cur_tx_desc->td2 = 0x00;
226 }
227
228 /* Mark the end of the descriptors */
229 cur_tx_desc--;
230 cur_tx_desc->td0 |= TD_TDLE;
231
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900232 /*
233 * Point the controller to the tx descriptor list. Must use physical
234 * addresses
235 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900236 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900237#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900238 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
239 sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
240 sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900241#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900242
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900243err:
244 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900245}
246
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900247static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900248{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100249 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900250 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100251 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900252 struct rx_desc_s *cur_rx_desc;
253 u8 *rx_buf;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900254
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900255 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900256 * Allocate rx descriptors. They must be aligned to size of struct
257 * rx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900258 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900259 port_info->rx_desc_alloc =
260 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
261 if (!port_info->rx_desc_alloc) {
262 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900263 ret = -ENOMEM;
264 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900265 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900266
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900267 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
268
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900269 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900270 port_info->rx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200271 (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900272
273 port_info->rx_desc_cur = port_info->rx_desc_base;
274
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900275 /*
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900276 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
277 * aligned and in P2 area.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900278 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900279 port_info->rx_buf_alloc =
280 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
281 if (!port_info->rx_buf_alloc) {
282 printf(SHETHER_NAME ": alloc failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900283 ret = -ENOMEM;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900284 goto err_buf_alloc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900285 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900286
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200287 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900288
289 /* Initialize all descriptors */
290 for (cur_rx_desc = port_info->rx_desc_base,
291 rx_buf = port_info->rx_buf_base, i = 0;
292 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
293 cur_rx_desc->rd0 = RD_RACT;
294 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900295 cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900296 }
297
298 /* Mark the end of the descriptors */
299 cur_rx_desc--;
300 cur_rx_desc->rd0 |= RD_RDLE;
301
302 /* Point the controller to the rx descriptor list */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900303 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900304#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900305 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
306 sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
307 sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900308#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900309
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900310 return ret;
311
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900312err_buf_alloc:
313 free(port_info->rx_desc_alloc);
314 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900315
316err:
317 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900318}
319
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900320static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900321{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100322 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900323
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900324 if (port_info->tx_desc_alloc) {
325 free(port_info->tx_desc_alloc);
326 port_info->tx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900327 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900328}
329
330static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
331{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100332 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900333
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900334 if (port_info->rx_desc_alloc) {
335 free(port_info->rx_desc_alloc);
336 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900337 }
338
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900339 if (port_info->rx_buf_alloc) {
340 free(port_info->rx_buf_alloc);
341 port_info->rx_buf_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900342 }
343}
344
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900345static int sh_eth_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900346{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900347 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900348
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900349 ret = sh_eth_tx_desc_init(eth);
350 if (ret)
351 goto err_tx_init;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900352
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900353 ret = sh_eth_rx_desc_init(eth);
354 if (ret)
355 goto err_rx_init;
356
357 return ret;
358err_rx_init:
359 sh_eth_tx_desc_free(eth);
360
361err_tx_init:
362 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900363}
364
Marek Vasutccdfc5e2018-01-21 14:55:44 +0100365static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
366 unsigned char *mac)
367{
368 u32 val;
369
370 val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
371 sh_eth_write(port_info, val, MAHR);
372
373 val = (mac[4] << 8) | mac[5];
374 sh_eth_write(port_info, val, MALR);
375}
376
Marek Vasutc13be6a2018-01-21 15:10:21 +0100377static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900378{
Marek Vasutc13be6a2018-01-21 15:10:21 +0100379 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut31124502019-07-31 12:58:06 +0200380 unsigned long edmr;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900381
382 /* Configure e-dmac registers */
Marek Vasut31124502019-07-31 12:58:06 +0200383 edmr = sh_eth_read(port_info, EDMR);
384 edmr &= ~EMDR_DESC_R;
385 edmr |= EMDR_DESC | EDMR_EL;
386#if defined(CONFIG_R8A77980)
387 edmr |= EDMR_NBST;
388#endif
389 sh_eth_write(port_info, edmr, EDMR);
Nobuhiro Iwamatsu7a2142c2013-08-22 13:22:02 +0900390
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900391 sh_eth_write(port_info, 0, EESIPR);
392 sh_eth_write(port_info, 0, TRSCER);
393 sh_eth_write(port_info, 0, TFTR);
394 sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
395 sh_eth_write(port_info, RMCR_RST, RMCR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900396#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900397 sh_eth_write(port_info, 0, RPADIR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900398#endif
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900399 sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900400
401 /* Configure e-mac registers */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900402 sh_eth_write(port_info, 0, ECSIPR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900403
404 /* Set Mac address */
Marek Vasutc13be6a2018-01-21 15:10:21 +0100405 sh_eth_write_hwaddr(port_info, mac);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900406
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900407 sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000408#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900409 sh_eth_write(port_info, 0, PIPR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900410#endif
411#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900412 sh_eth_write(port_info, APR_AP, APR);
413 sh_eth_write(port_info, MPR_MP, MPR);
414 sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900415#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900416
Nobuhiro Iwamatsu4ad2c2a2012-08-02 22:08:40 +0000417#if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900418 sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
Marek Vasut31124502019-07-31 12:58:06 +0200419#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900420 sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000421#endif
Marek Vasutc13be6a2018-01-21 15:10:21 +0100422}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900423
Marek Vasutc13be6a2018-01-21 15:10:21 +0100424static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
425{
426 struct sh_eth_info *port_info = &eth->port_info[eth->port];
427 struct phy_device *phy = port_info->phydev;
428 int ret = 0;
429 u32 val = 0;
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900430
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900431 /* Set the transfer speed */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900432 if (phy->speed == 100) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900433 printf(SHETHER_NAME ": 100Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000434#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900435 sh_eth_write(port_info, GECMR_100B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000436#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900437 sh_eth_write(port_info, 1, RTRATE);
Marek Vasut31124502019-07-31 12:58:06 +0200438#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900439 val = ECMR_RTM;
440#endif
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900441 } else if (phy->speed == 10) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900442 printf(SHETHER_NAME ": 10Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000443#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900444 sh_eth_write(port_info, GECMR_10B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000445#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900446 sh_eth_write(port_info, 0, RTRATE);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900447#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900448 }
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000449#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000450 else if (phy->speed == 1000) {
451 printf(SHETHER_NAME ": 1000Base/");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900452 sh_eth_write(port_info, GECMR_1000B, GECMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000453 }
454#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900455
456 /* Check if full duplex mode is supported by the phy */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900457 if (phy->duplex) {
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900458 printf("Full\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900459 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900460 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
Yoshihiro Shimoda4c4aa6c2012-06-26 16:38:09 +0000461 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900462 } else {
463 printf("Half\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900464 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900465 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
466 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900467 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900468
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900469 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900470}
471
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900472static void sh_eth_start(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900473{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900474 struct sh_eth_info *port_info = &eth->port_info[eth->port];
475
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900476 /*
477 * Enable the e-dmac receiver only. The transmitter will be enabled when
478 * we have something to transmit
479 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900480 sh_eth_write(port_info, EDRRR_R, EDRRR);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900481}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900482
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900483static void sh_eth_stop(struct sh_eth_dev *eth)
484{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900485 struct sh_eth_info *port_info = &eth->port_info[eth->port];
486
487 sh_eth_write(port_info, ~EDRRR_R, EDRRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900488}
489
Marek Vasutc13be6a2018-01-21 15:10:21 +0100490static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900491{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900492 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900493
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900494 ret = sh_eth_reset(eth);
495 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100496 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900497
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900498 ret = sh_eth_desc_init(eth);
499 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100500 return ret;
501
502 sh_eth_mac_regs_config(eth, mac);
503
504 return 0;
505}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900506
Marek Vasutc13be6a2018-01-21 15:10:21 +0100507static int sh_eth_start_common(struct sh_eth_dev *eth)
508{
509 struct sh_eth_info *port_info = &eth->port_info[eth->port];
510 int ret;
511
512 ret = phy_startup(port_info->phydev);
513 if (ret) {
514 printf(SHETHER_NAME ": phy startup failure\n");
515 return ret;
516 }
517
518 ret = sh_eth_phy_regs_config(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900519 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100520 return ret;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900521
522 sh_eth_start(eth);
523
Marek Vasutc13be6a2018-01-21 15:10:21 +0100524 return 0;
525}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900526
Marek Vasut020d3942018-01-19 18:57:17 +0100527struct sh_ether_priv {
528 struct sh_eth_dev shdev;
529
530 struct mii_dev *bus;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100531 phys_addr_t iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100532 struct clk clk;
533 struct gpio_desc reset_gpio;
534};
535
536static int sh_ether_send(struct udevice *dev, void *packet, int len)
537{
538 struct sh_ether_priv *priv = dev_get_priv(dev);
539 struct sh_eth_dev *eth = &priv->shdev;
540
541 return sh_eth_send_common(eth, packet, len);
542}
543
544static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
545{
546 struct sh_ether_priv *priv = dev_get_priv(dev);
547 struct sh_eth_dev *eth = &priv->shdev;
548 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200549 uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
Marek Vasut020d3942018-01-19 18:57:17 +0100550 int len;
551
552 len = sh_eth_recv_start(eth);
553 if (len > 0) {
554 invalidate_cache(packet, len);
555 *packetp = packet;
556
557 return len;
558 } else {
559 len = 0;
560
561 /* Restart the receiver if disabled */
562 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
563 sh_eth_write(port_info, EDRRR_R, EDRRR);
564
565 return -EAGAIN;
566 }
567}
568
569static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
570{
571 struct sh_ether_priv *priv = dev_get_priv(dev);
572 struct sh_eth_dev *eth = &priv->shdev;
573 struct sh_eth_info *port_info = &eth->port_info[eth->port];
574
575 sh_eth_recv_finish(eth);
576
577 /* Restart the receiver if disabled */
578 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
579 sh_eth_write(port_info, EDRRR_R, EDRRR);
580
581 return 0;
582}
583
584static int sh_ether_write_hwaddr(struct udevice *dev)
585{
586 struct sh_ether_priv *priv = dev_get_priv(dev);
587 struct sh_eth_dev *eth = &priv->shdev;
588 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Simon Glassfa20e932020-12-03 16:55:20 -0700589 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100590
591 sh_eth_write_hwaddr(port_info, pdata->enetaddr);
592
593 return 0;
594}
595
596static int sh_eth_phy_config(struct udevice *dev)
597{
598 struct sh_ether_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700599 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100600 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut7a309cf2018-02-17 00:46:26 +0100601 int ret = 0;
602 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100603 struct phy_device *phydev;
604 int mask = 0xffffffff;
605
Marek BehĂșn3927efb2022-04-07 00:33:08 +0200606 phydev = phy_find_by_mask(priv->bus, mask);
Marek Vasut020d3942018-01-19 18:57:17 +0100607 if (!phydev)
608 return -ENODEV;
609
Marek BehĂșn3927efb2022-04-07 00:33:08 +0200610 phy_connect_dev(phydev, dev, pdata->phy_interface);
Marek Vasut020d3942018-01-19 18:57:17 +0100611
612 port_info->phydev = phydev;
613 phy_config(phydev);
614
615 return ret;
616}
617
618static int sh_ether_start(struct udevice *dev)
619{
620 struct sh_ether_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700621 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100622 struct sh_eth_dev *eth = &priv->shdev;
623 int ret;
624
Marek Vasut020d3942018-01-19 18:57:17 +0100625 ret = sh_eth_init_common(eth, pdata->enetaddr);
626 if (ret)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100627 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100628
629 ret = sh_eth_start_common(eth);
630 if (ret)
631 goto err_start;
632
633 return 0;
634
635err_start:
636 sh_eth_tx_desc_free(eth);
637 sh_eth_rx_desc_free(eth);
Marek Vasut020d3942018-01-19 18:57:17 +0100638 return ret;
639}
640
641static void sh_ether_stop(struct udevice *dev)
642{
643 struct sh_ether_priv *priv = dev_get_priv(dev);
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100644 struct sh_eth_dev *eth = &priv->shdev;
645 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100646
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100647 phy_shutdown(port_info->phydev);
Marek Vasut020d3942018-01-19 18:57:17 +0100648 sh_eth_stop(&priv->shdev);
Marek Vasut020d3942018-01-19 18:57:17 +0100649}
650
651static int sh_ether_probe(struct udevice *udev)
652{
Simon Glassfa20e932020-12-03 16:55:20 -0700653 struct eth_pdata *pdata = dev_get_plat(udev);
Marek Vasut020d3942018-01-19 18:57:17 +0100654 struct sh_ether_priv *priv = dev_get_priv(udev);
655 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut27e06332018-06-18 04:03:01 +0200656 struct ofnode_phandle_args phandle_args;
Marek Vasut020d3942018-01-19 18:57:17 +0100657 struct mii_dev *mdiodev;
Marek Vasut020d3942018-01-19 18:57:17 +0100658 int ret;
659
Marek Vasut63ab72c2018-02-17 00:57:49 +0100660 priv->iobase = pdata->iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100661
Marek Vasut77f69f82019-05-02 00:03:26 +0200662#if CONFIG_IS_ENABLED(CLK)
Marek Vasut020d3942018-01-19 18:57:17 +0100663 ret = clk_get_by_index(udev, 0, &priv->clk);
664 if (ret < 0)
Marek Vasut63ab72c2018-02-17 00:57:49 +0100665 return ret;
Marek Vasut77f69f82019-05-02 00:03:26 +0200666#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100667
Marek Vasut27e06332018-06-18 04:03:01 +0200668 ret = dev_read_phandle_with_args(udev, "phy-handle", NULL, 0, 0, &phandle_args);
669 if (!ret) {
670 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
671 &priv->reset_gpio, GPIOD_IS_OUT);
672 }
673
674 if (!dm_gpio_is_valid(&priv->reset_gpio)) {
675 gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio,
676 GPIOD_IS_OUT);
677 }
Marek Vasut020d3942018-01-19 18:57:17 +0100678
679 mdiodev = mdio_alloc();
680 if (!mdiodev) {
681 ret = -ENOMEM;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100682 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100683 }
684
685 mdiodev->read = bb_miiphy_read;
686 mdiodev->write = bb_miiphy_write;
687 bb_miiphy_buses[0].priv = eth;
688 snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
689
690 ret = mdio_register(mdiodev);
691 if (ret < 0)
692 goto err_mdio_register;
693
694 priv->bus = miiphy_get_dev_by_name(udev->name);
695
Tom Rini9996ab82022-12-04 10:13:52 -0500696 eth->port = CFG_SH_ETHER_USE_PORT;
Tom Rini45ec5fd2022-12-04 10:13:50 -0500697 eth->port_info[eth->port].phy_addr = CFG_SH_ETHER_PHY_ADDR;
Marek Vasut020d3942018-01-19 18:57:17 +0100698 eth->port_info[eth->port].iobase =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200699 (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
Marek Vasut020d3942018-01-19 18:57:17 +0100700
Marek Vasut77f69f82019-05-02 00:03:26 +0200701#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100702 ret = clk_enable(&priv->clk);
703 if (ret)
704 goto err_mdio_register;
Marek Vasut77f69f82019-05-02 00:03:26 +0200705#endif
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100706
Marek Vasutd53dd502020-04-04 15:01:22 +0200707 ret = sh_eth_init_common(eth, pdata->enetaddr);
708 if (ret)
709 goto err_phy_config;
710
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100711 ret = sh_eth_phy_config(udev);
712 if (ret) {
713 printf(SHETHER_NAME ": phy config timeout\n");
714 goto err_phy_config;
715 }
716
Marek Vasut020d3942018-01-19 18:57:17 +0100717 return 0;
718
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100719err_phy_config:
Marek Vasut77f69f82019-05-02 00:03:26 +0200720#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100721 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200722#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100723err_mdio_register:
724 mdio_free(mdiodev);
Marek Vasut020d3942018-01-19 18:57:17 +0100725 return ret;
726}
727
728static int sh_ether_remove(struct udevice *udev)
729{
730 struct sh_ether_priv *priv = dev_get_priv(udev);
731 struct sh_eth_dev *eth = &priv->shdev;
732 struct sh_eth_info *port_info = &eth->port_info[eth->port];
733
Marek Vasut77f69f82019-05-02 00:03:26 +0200734#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100735 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200736#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100737 free(port_info->phydev);
738 mdio_unregister(priv->bus);
739 mdio_free(priv->bus);
740
741 if (dm_gpio_is_valid(&priv->reset_gpio))
742 dm_gpio_free(udev, &priv->reset_gpio);
743
Marek Vasut020d3942018-01-19 18:57:17 +0100744 return 0;
745}
746
747static const struct eth_ops sh_ether_ops = {
748 .start = sh_ether_start,
749 .send = sh_ether_send,
750 .recv = sh_ether_recv,
751 .free_pkt = sh_ether_free_pkt,
752 .stop = sh_ether_stop,
753 .write_hwaddr = sh_ether_write_hwaddr,
754};
755
Simon Glassaad29ae2020-12-03 16:55:21 -0700756int sh_ether_of_to_plat(struct udevice *dev)
Marek Vasut020d3942018-01-19 18:57:17 +0100757{
Simon Glassfa20e932020-12-03 16:55:20 -0700758 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100759 const fdt32_t *cell;
Marek Vasut020d3942018-01-19 18:57:17 +0100760
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +0900761 pdata->iobase = dev_read_addr(dev);
Marek BehĂșnbc194772022-04-07 00:33:01 +0200762
763 pdata->phy_interface = dev_read_phy_mode(dev);
Marek BehĂșn48631e42022-04-07 00:33:03 +0200764 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Marek Vasut020d3942018-01-19 18:57:17 +0100765 return -EINVAL;
Marek Vasut020d3942018-01-19 18:57:17 +0100766
767 pdata->max_speed = 1000;
768 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
769 if (cell)
770 pdata->max_speed = fdt32_to_cpu(*cell);
771
772 sprintf(bb_miiphy_buses[0].name, dev->name);
773
Marek BehĂșnbc194772022-04-07 00:33:01 +0200774 return 0;
Marek Vasut020d3942018-01-19 18:57:17 +0100775}
776
777static const struct udevice_id sh_ether_ids[] = {
Marek Vasut77f69f82019-05-02 00:03:26 +0200778 { .compatible = "renesas,ether-r7s72100" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200779 { .compatible = "renesas,ether-r8a7790" },
Marek Vasut020d3942018-01-19 18:57:17 +0100780 { .compatible = "renesas,ether-r8a7791" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200781 { .compatible = "renesas,ether-r8a7793" },
782 { .compatible = "renesas,ether-r8a7794" },
Marek Vasut31124502019-07-31 12:58:06 +0200783 { .compatible = "renesas,gether-r8a77980" },
Marek Vasut020d3942018-01-19 18:57:17 +0100784 { }
785};
786
787U_BOOT_DRIVER(eth_sh_ether) = {
788 .name = "sh_ether",
789 .id = UCLASS_ETH,
790 .of_match = sh_ether_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -0700791 .of_to_plat = sh_ether_of_to_plat,
Marek Vasut020d3942018-01-19 18:57:17 +0100792 .probe = sh_ether_probe,
793 .remove = sh_ether_remove,
794 .ops = &sh_ether_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700795 .priv_auto = sizeof(struct sh_ether_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700796 .plat_auto = sizeof(struct eth_pdata),
Marek Vasut020d3942018-01-19 18:57:17 +0100797 .flags = DM_FLAG_ALLOC_PRIV_DMA,
798};
Marek Vasut020d3942018-01-19 18:57:17 +0100799
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900800/******* for bb_miiphy *******/
801static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
802{
803 return 0;
804}
805
806static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
807{
808 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900809 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900810
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900811 sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900812
813 return 0;
814}
815
816static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
817{
818 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900819 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900820
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900821 sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900822
823 return 0;
824}
825
826static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
827{
828 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900829 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900830
831 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900832 sh_eth_write(port_info,
833 sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900834 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900835 sh_eth_write(port_info,
836 sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900837
838 return 0;
839}
840
841static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
842{
843 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900844 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900845
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900846 *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900847
848 return 0;
849}
850
851static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
852{
853 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900854 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900855
856 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900857 sh_eth_write(port_info,
858 sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900859 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900860 sh_eth_write(port_info,
861 sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900862
863 return 0;
864}
865
866static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
867{
868 udelay(10);
869
870 return 0;
871}
872
873struct bb_miiphy_bus bb_miiphy_buses[] = {
874 {
875 .name = "sh_eth",
876 .init = sh_eth_bb_init,
877 .mdio_active = sh_eth_bb_mdio_active,
878 .mdio_tristate = sh_eth_bb_mdio_tristate,
879 .set_mdio = sh_eth_bb_set_mdio,
880 .get_mdio = sh_eth_bb_get_mdio,
881 .set_mdc = sh_eth_bb_set_mdc,
882 .delay = sh_eth_bb_delay,
883 }
884};
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900885
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900886int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);