blob: f1ce994cfd53b2f1affc9f248f8c3344fc9a20e8 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09002/*
Robert P. J. Day8c60f922016-05-04 04:47:31 -04003 * sh_eth.c - Driver for Renesas ethernet controller.
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09004 *
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +09005 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09006 * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09007 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09008 * Copyright (C) 2013, 2014 Renesas Electronics Corporation
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09009 */
10
11#include <config.h>
Simon Glass63334482019-11-14 12:57:39 -070012#include <cpu_func.h>
Simon Glass0af6e2d2019-08-01 09:46:52 -060013#include <env.h>
Simon Glass0f2af882020-05-10 11:40:05 -060014#include <log.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090015#include <malloc.h>
16#include <net.h>
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090017#include <netdev.h>
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +090018#include <miiphy.h>
Simon Glass274e0b02020-05-10 11:39:56 -060019#include <asm/cache.h>
Simon Glassdbd79542020-05-10 11:40:11 -060020#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090021#include <linux/errno.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060022#include <asm/global_data.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090023#include <asm/io.h>
24
Marek Vasut020d3942018-01-19 18:57:17 +010025#include <clk.h>
26#include <dm.h>
27#include <linux/mii.h>
28#include <asm/gpio.h>
Marek Vasut020d3942018-01-19 18:57:17 +010029
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090030#include "sh_eth.h"
31
Tom Rini9996ab82022-12-04 10:13:52 -050032#ifndef CFG_SH_ETHER_USE_PORT
33# error "Please define CFG_SH_ETHER_USE_PORT"
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090034#endif
Tom Rini45ec5fd2022-12-04 10:13:50 -050035#ifndef CFG_SH_ETHER_PHY_ADDR
36# error "Please define CFG_SH_ETHER_PHY_ADDR"
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090037#endif
Nobuhiro Iwamatsu6bff09d2013-08-22 13:22:01 +090038
Tom Rini872054f2022-12-04 10:13:49 -050039#if defined(CFG_SH_ETHER_CACHE_WRITEBACK) && \
Trevor Woerner43ec7e02019-05-03 09:41:00 -040040 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090041#define flush_cache_wback(addr, len) \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020042 flush_dcache_range((unsigned long)addr, \
Tom Rinidd2eba02022-12-04 10:13:47 -050043 (unsigned long)(addr + ALIGN(len, CFG_SH_ETHER_ALIGNE_SIZE)))
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090044#else
45#define flush_cache_wback(...)
46#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090047
Tom Rinia44cb162022-12-04 10:13:48 -050048#if defined(CFG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090049#define invalidate_cache(addr, len) \
50 { \
Tom Rinidd2eba02022-12-04 10:13:47 -050051 unsigned long line_size = CFG_SH_ETHER_ALIGNE_SIZE; \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020052 unsigned long start, end; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090053 \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020054 start = (unsigned long)addr; \
55 end = start + len; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090056 start &= ~(line_size - 1); \
57 end = ((end + line_size - 1) & ~(line_size - 1)); \
58 \
59 invalidate_dcache_range(start, end); \
60 }
61#else
62#define invalidate_cache(...)
63#endif
64
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +090065#define TIMEOUT_CNT 1000
66
Marek Vasut044eb2d2018-01-21 14:27:51 +010067static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090068{
Marek Vasut7a309cf2018-02-17 00:46:26 +010069 int ret = 0, timeout;
70 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090071
72 if (!packet || len > 0xffff) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090073 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
74 ret = -EINVAL;
75 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090076 }
77
78 /* packet must be a 4 byte boundary */
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020079 if ((uintptr_t)packet & 3) {
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +090080 printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +090081 , __func__);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090082 ret = -EFAULT;
83 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090084 }
85
86 /* Update tx descriptor */
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090087 flush_cache_wback(packet, len);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090088 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
89 port_info->tx_desc_cur->td1 = len << 16;
90 /* Must preserve the end of descriptor list indication */
91 if (port_info->tx_desc_cur->td0 & TD_TDLE)
92 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
93 else
94 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
95
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +090096 flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
97
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090098 /* Restart the transmitter if disabled */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +090099 if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
100 sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900101
102 /* Wait until packet is transmitted */
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900103 timeout = TIMEOUT_CNT;
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900104 do {
105 invalidate_cache(port_info->tx_desc_cur,
106 sizeof(struct tx_desc_s));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900107 udelay(100);
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900108 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900109
110 if (timeout < 0) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900111 printf(SHETHER_NAME ": transmit timeout\n");
112 ret = -ETIMEDOUT;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900113 goto err;
114 }
115
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900116 port_info->tx_desc_cur++;
117 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
118 port_info->tx_desc_cur = port_info->tx_desc_base;
119
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900120err:
121 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900122}
123
Marek Vasut48de90d2018-01-21 15:39:50 +0100124static int sh_eth_recv_start(struct sh_eth_dev *eth)
Marek Vasut044eb2d2018-01-21 14:27:51 +0100125{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100126 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900127
128 /* Check if the rx descriptor is ready */
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900129 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
Marek Vasut48de90d2018-01-21 15:39:50 +0100130 if (port_info->rx_desc_cur->rd0 & RD_RACT)
Valentine Barshakf6c5c142023-05-31 00:51:31 +0200131 return -EAGAIN;
Marek Vasut48de90d2018-01-21 15:39:50 +0100132
133 /* Check for errors */
134 if (port_info->rx_desc_cur->rd0 & RD_RFE)
Valentine Barshakf6c5c142023-05-31 00:51:31 +0200135 return 0;
Marek Vasut48de90d2018-01-21 15:39:50 +0100136
Marek Vasut2526b792018-02-17 00:47:38 +0100137 return port_info->rx_desc_cur->rd1 & 0xffff;
Marek Vasut48de90d2018-01-21 15:39:50 +0100138}
139
140static void sh_eth_recv_finish(struct sh_eth_dev *eth)
141{
142 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900143
Valentine Barshak0c6bba82023-05-31 00:51:30 +0200144 invalidate_cache(ADDR_TO_P2(port_info->rx_desc_cur->rd2), MAX_BUF_SIZE);
145
Marek Vasut48de90d2018-01-21 15:39:50 +0100146 /* Make current descriptor available again */
147 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
148 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
149 else
150 port_info->rx_desc_cur->rd0 = RD_RACT;
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +0900151
Marek Vasut48de90d2018-01-21 15:39:50 +0100152 flush_cache_wback(port_info->rx_desc_cur,
153 sizeof(struct rx_desc_s));
154
155 /* Point to the next descriptor */
156 port_info->rx_desc_cur++;
157 if (port_info->rx_desc_cur >=
158 port_info->rx_desc_base + NUM_RX_DESC)
159 port_info->rx_desc_cur = port_info->rx_desc_base;
160}
161
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900162static int sh_eth_reset(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900163{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900164 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900165#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900166 int ret = 0, i;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900167
168 /* Start e-dmac transmitter and receiver */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900169 sh_eth_write(port_info, EDSR_ENALL, EDSR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900170
171 /* Perform a software reset and wait for it to complete */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900172 sh_eth_write(port_info, EDMR_SRST, EDMR);
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900173 for (i = 0; i < TIMEOUT_CNT; i++) {
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900174 if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900175 break;
176 udelay(1000);
177 }
178
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900179 if (i == TIMEOUT_CNT) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900180 printf(SHETHER_NAME ": Software reset timeout\n");
181 ret = -EIO;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900182 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900183
184 return ret;
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900185#else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900186 sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
Marek Vasut42a33402018-02-17 00:57:49 +0100187 mdelay(3);
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900188 sh_eth_write(port_info,
189 sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900190
191 return 0;
192#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900193}
194
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900195static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900196{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100197 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900198 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100199 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900200 struct tx_desc_s *cur_tx_desc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900201
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900202 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900203 * Allocate rx descriptors. They must be aligned to size of struct
204 * tx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900205 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900206 port_info->tx_desc_alloc =
207 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
208 if (!port_info->tx_desc_alloc) {
209 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900210 ret = -ENOMEM;
211 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900212 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900213
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900214 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900215 port_info->tx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200216 (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900217 port_info->tx_desc_cur = port_info->tx_desc_base;
218
219 /* Initialize all descriptors */
220 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
221 cur_tx_desc++, i++) {
222 cur_tx_desc->td0 = 0x00;
223 cur_tx_desc->td1 = 0x00;
224 cur_tx_desc->td2 = 0x00;
225 }
226
227 /* Mark the end of the descriptors */
228 cur_tx_desc--;
229 cur_tx_desc->td0 |= TD_TDLE;
230
Valentine Barshak0c6bba82023-05-31 00:51:30 +0200231 flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900232 /*
233 * Point the controller to the tx descriptor list. Must use physical
234 * addresses
235 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900236 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900237#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900238 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
239 sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
240 sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900241#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900242
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900243err:
244 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900245}
246
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900247static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900248{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100249 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900250 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100251 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900252 struct rx_desc_s *cur_rx_desc;
253 u8 *rx_buf;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900254
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900255 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900256 * Allocate rx descriptors. They must be aligned to size of struct
257 * rx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900258 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900259 port_info->rx_desc_alloc =
260 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
261 if (!port_info->rx_desc_alloc) {
262 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900263 ret = -ENOMEM;
264 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900265 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900266
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900267 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900268 port_info->rx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200269 (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900270
271 port_info->rx_desc_cur = port_info->rx_desc_base;
272
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900273 /*
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900274 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
275 * aligned and in P2 area.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900276 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900277 port_info->rx_buf_alloc =
278 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
279 if (!port_info->rx_buf_alloc) {
280 printf(SHETHER_NAME ": alloc failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900281 ret = -ENOMEM;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900282 goto err_buf_alloc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900283 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900284
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200285 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900286
287 /* Initialize all descriptors */
288 for (cur_rx_desc = port_info->rx_desc_base,
289 rx_buf = port_info->rx_buf_base, i = 0;
290 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
291 cur_rx_desc->rd0 = RD_RACT;
292 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900293 cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900294 }
295
296 /* Mark the end of the descriptors */
297 cur_rx_desc--;
298 cur_rx_desc->rd0 |= RD_RDLE;
299
Valentine Barshak0c6bba82023-05-31 00:51:30 +0200300 invalidate_cache(port_info->rx_buf_alloc, NUM_RX_DESC * MAX_BUF_SIZE);
301 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
302
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900303 /* Point the controller to the rx descriptor list */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900304 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900305#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900306 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
307 sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
308 sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900309#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900310
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900311 return ret;
312
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900313err_buf_alloc:
314 free(port_info->rx_desc_alloc);
315 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900316
317err:
318 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900319}
320
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900321static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900322{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100323 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900324
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900325 if (port_info->tx_desc_alloc) {
326 free(port_info->tx_desc_alloc);
327 port_info->tx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900328 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900329}
330
331static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
332{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100333 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900334
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900335 if (port_info->rx_desc_alloc) {
336 free(port_info->rx_desc_alloc);
337 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900338 }
339
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900340 if (port_info->rx_buf_alloc) {
341 free(port_info->rx_buf_alloc);
342 port_info->rx_buf_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900343 }
344}
345
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900346static int sh_eth_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900347{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900348 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900349
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900350 ret = sh_eth_tx_desc_init(eth);
351 if (ret)
352 goto err_tx_init;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900353
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900354 ret = sh_eth_rx_desc_init(eth);
355 if (ret)
356 goto err_rx_init;
357
358 return ret;
359err_rx_init:
360 sh_eth_tx_desc_free(eth);
361
362err_tx_init:
363 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900364}
365
Marek Vasutccdfc5e2018-01-21 14:55:44 +0100366static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
367 unsigned char *mac)
368{
369 u32 val;
370
371 val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
372 sh_eth_write(port_info, val, MAHR);
373
374 val = (mac[4] << 8) | mac[5];
375 sh_eth_write(port_info, val, MALR);
376}
377
Marek Vasutc13be6a2018-01-21 15:10:21 +0100378static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900379{
Marek Vasutc13be6a2018-01-21 15:10:21 +0100380 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut31124502019-07-31 12:58:06 +0200381 unsigned long edmr;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900382
383 /* Configure e-dmac registers */
Marek Vasut31124502019-07-31 12:58:06 +0200384 edmr = sh_eth_read(port_info, EDMR);
385 edmr &= ~EMDR_DESC_R;
386 edmr |= EMDR_DESC | EDMR_EL;
387#if defined(CONFIG_R8A77980)
388 edmr |= EDMR_NBST;
389#endif
390 sh_eth_write(port_info, edmr, EDMR);
Nobuhiro Iwamatsu7a2142c2013-08-22 13:22:02 +0900391
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900392 sh_eth_write(port_info, 0, EESIPR);
393 sh_eth_write(port_info, 0, TRSCER);
394 sh_eth_write(port_info, 0, TFTR);
395 sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
396 sh_eth_write(port_info, RMCR_RST, RMCR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900397#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900398 sh_eth_write(port_info, 0, RPADIR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900399#endif
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900400 sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900401
402 /* Configure e-mac registers */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900403 sh_eth_write(port_info, 0, ECSIPR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900404
405 /* Set Mac address */
Marek Vasutc13be6a2018-01-21 15:10:21 +0100406 sh_eth_write_hwaddr(port_info, mac);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900407
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900408 sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000409#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900410 sh_eth_write(port_info, 0, PIPR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900411#endif
412#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900413 sh_eth_write(port_info, APR_AP, APR);
414 sh_eth_write(port_info, MPR_MP, MPR);
415 sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900416#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900417
Nobuhiro Iwamatsu4ad2c2a2012-08-02 22:08:40 +0000418#if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900419 sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
Marek Vasut31124502019-07-31 12:58:06 +0200420#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900421 sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000422#endif
Marek Vasutc13be6a2018-01-21 15:10:21 +0100423}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900424
Marek Vasutc13be6a2018-01-21 15:10:21 +0100425static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
426{
427 struct sh_eth_info *port_info = &eth->port_info[eth->port];
428 struct phy_device *phy = port_info->phydev;
429 int ret = 0;
430 u32 val = 0;
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900431
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900432 /* Set the transfer speed */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900433 if (phy->speed == 100) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900434 printf(SHETHER_NAME ": 100Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000435#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900436 sh_eth_write(port_info, GECMR_100B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000437#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900438 sh_eth_write(port_info, 1, RTRATE);
Marek Vasut31124502019-07-31 12:58:06 +0200439#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900440 val = ECMR_RTM;
441#endif
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900442 } else if (phy->speed == 10) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900443 printf(SHETHER_NAME ": 10Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000444#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900445 sh_eth_write(port_info, GECMR_10B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000446#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900447 sh_eth_write(port_info, 0, RTRATE);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900448#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900449 }
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000450#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000451 else if (phy->speed == 1000) {
452 printf(SHETHER_NAME ": 1000Base/");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900453 sh_eth_write(port_info, GECMR_1000B, GECMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000454 }
455#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900456
457 /* Check if full duplex mode is supported by the phy */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900458 if (phy->duplex) {
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900459 printf("Full\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900460 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900461 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
Yoshihiro Shimoda4c4aa6c2012-06-26 16:38:09 +0000462 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900463 } else {
464 printf("Half\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900465 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900466 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
467 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900468 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900469
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900470 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900471}
472
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900473static void sh_eth_start(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900474{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900475 struct sh_eth_info *port_info = &eth->port_info[eth->port];
476
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900477 /*
478 * Enable the e-dmac receiver only. The transmitter will be enabled when
479 * we have something to transmit
480 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900481 sh_eth_write(port_info, EDRRR_R, EDRRR);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900482}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900483
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900484static void sh_eth_stop(struct sh_eth_dev *eth)
485{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900486 struct sh_eth_info *port_info = &eth->port_info[eth->port];
487
488 sh_eth_write(port_info, ~EDRRR_R, EDRRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900489}
490
Marek Vasutc13be6a2018-01-21 15:10:21 +0100491static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900492{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900493 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900494
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900495 ret = sh_eth_reset(eth);
496 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100497 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900498
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900499 ret = sh_eth_desc_init(eth);
500 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100501 return ret;
502
503 sh_eth_mac_regs_config(eth, mac);
504
505 return 0;
506}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900507
Marek Vasutc13be6a2018-01-21 15:10:21 +0100508static int sh_eth_start_common(struct sh_eth_dev *eth)
509{
510 struct sh_eth_info *port_info = &eth->port_info[eth->port];
511 int ret;
512
513 ret = phy_startup(port_info->phydev);
514 if (ret) {
515 printf(SHETHER_NAME ": phy startup failure\n");
516 return ret;
517 }
518
519 ret = sh_eth_phy_regs_config(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900520 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100521 return ret;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900522
523 sh_eth_start(eth);
524
Marek Vasutc13be6a2018-01-21 15:10:21 +0100525 return 0;
526}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900527
Marek Vasut020d3942018-01-19 18:57:17 +0100528struct sh_ether_priv {
529 struct sh_eth_dev shdev;
530
531 struct mii_dev *bus;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100532 phys_addr_t iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100533 struct clk clk;
Marek Vasut020d3942018-01-19 18:57:17 +0100534};
535
536static int sh_ether_send(struct udevice *dev, void *packet, int len)
537{
538 struct sh_ether_priv *priv = dev_get_priv(dev);
539 struct sh_eth_dev *eth = &priv->shdev;
540
541 return sh_eth_send_common(eth, packet, len);
542}
543
544static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
545{
546 struct sh_ether_priv *priv = dev_get_priv(dev);
547 struct sh_eth_dev *eth = &priv->shdev;
548 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200549 uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
Marek Vasut020d3942018-01-19 18:57:17 +0100550 int len;
551
552 len = sh_eth_recv_start(eth);
553 if (len > 0) {
554 invalidate_cache(packet, len);
555 *packetp = packet;
556
557 return len;
Valentine Barshakf6c5c142023-05-31 00:51:31 +0200558 }
Marek Vasut020d3942018-01-19 18:57:17 +0100559
Valentine Barshakf6c5c142023-05-31 00:51:31 +0200560 /* Restart the receiver if disabled */
561 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
562 sh_eth_write(port_info, EDRRR_R, EDRRR);
Marek Vasut020d3942018-01-19 18:57:17 +0100563
Valentine Barshakf6c5c142023-05-31 00:51:31 +0200564 return len;
Marek Vasut020d3942018-01-19 18:57:17 +0100565}
566
567static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
568{
569 struct sh_ether_priv *priv = dev_get_priv(dev);
570 struct sh_eth_dev *eth = &priv->shdev;
571 struct sh_eth_info *port_info = &eth->port_info[eth->port];
572
573 sh_eth_recv_finish(eth);
574
575 /* Restart the receiver if disabled */
576 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
577 sh_eth_write(port_info, EDRRR_R, EDRRR);
578
579 return 0;
580}
581
582static int sh_ether_write_hwaddr(struct udevice *dev)
583{
584 struct sh_ether_priv *priv = dev_get_priv(dev);
585 struct sh_eth_dev *eth = &priv->shdev;
586 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Simon Glassfa20e932020-12-03 16:55:20 -0700587 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100588
589 sh_eth_write_hwaddr(port_info, pdata->enetaddr);
590
591 return 0;
592}
593
594static int sh_eth_phy_config(struct udevice *dev)
595{
596 struct sh_ether_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700597 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100598 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut7a309cf2018-02-17 00:46:26 +0100599 int ret = 0;
600 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100601 struct phy_device *phydev;
Marek Vasut020d3942018-01-19 18:57:17 +0100602
Marek Vasut3b89e532023-05-31 00:51:23 +0200603 phydev = phy_connect(priv->bus, -1, dev, pdata->phy_interface);
Marek Vasut020d3942018-01-19 18:57:17 +0100604 if (!phydev)
605 return -ENODEV;
606
Marek Vasut020d3942018-01-19 18:57:17 +0100607 port_info->phydev = phydev;
608 phy_config(phydev);
609
610 return ret;
611}
612
613static int sh_ether_start(struct udevice *dev)
614{
615 struct sh_ether_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700616 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100617 struct sh_eth_dev *eth = &priv->shdev;
618 int ret;
619
Marek Vasut020d3942018-01-19 18:57:17 +0100620 ret = sh_eth_init_common(eth, pdata->enetaddr);
621 if (ret)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100622 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100623
624 ret = sh_eth_start_common(eth);
625 if (ret)
626 goto err_start;
627
628 return 0;
629
630err_start:
631 sh_eth_tx_desc_free(eth);
632 sh_eth_rx_desc_free(eth);
Marek Vasut020d3942018-01-19 18:57:17 +0100633 return ret;
634}
635
636static void sh_ether_stop(struct udevice *dev)
637{
638 struct sh_ether_priv *priv = dev_get_priv(dev);
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100639 struct sh_eth_dev *eth = &priv->shdev;
640 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100641
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100642 phy_shutdown(port_info->phydev);
Marek Vasut020d3942018-01-19 18:57:17 +0100643 sh_eth_stop(&priv->shdev);
Marek Vasut020d3942018-01-19 18:57:17 +0100644}
645
646static int sh_ether_probe(struct udevice *udev)
647{
Simon Glassfa20e932020-12-03 16:55:20 -0700648 struct eth_pdata *pdata = dev_get_plat(udev);
Marek Vasut020d3942018-01-19 18:57:17 +0100649 struct sh_ether_priv *priv = dev_get_priv(udev);
650 struct sh_eth_dev *eth = &priv->shdev;
651 struct mii_dev *mdiodev;
Marek Vasut020d3942018-01-19 18:57:17 +0100652 int ret;
653
Marek Vasut63ab72c2018-02-17 00:57:49 +0100654 priv->iobase = pdata->iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100655
Marek Vasut77f69f82019-05-02 00:03:26 +0200656#if CONFIG_IS_ENABLED(CLK)
Marek Vasut020d3942018-01-19 18:57:17 +0100657 ret = clk_get_by_index(udev, 0, &priv->clk);
658 if (ret < 0)
Marek Vasut63ab72c2018-02-17 00:57:49 +0100659 return ret;
Marek Vasut77f69f82019-05-02 00:03:26 +0200660#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100661 mdiodev = mdio_alloc();
662 if (!mdiodev) {
663 ret = -ENOMEM;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100664 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100665 }
666
667 mdiodev->read = bb_miiphy_read;
668 mdiodev->write = bb_miiphy_write;
669 bb_miiphy_buses[0].priv = eth;
670 snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
671
672 ret = mdio_register(mdiodev);
673 if (ret < 0)
674 goto err_mdio_register;
675
676 priv->bus = miiphy_get_dev_by_name(udev->name);
677
Tom Rini9996ab82022-12-04 10:13:52 -0500678 eth->port = CFG_SH_ETHER_USE_PORT;
Tom Rini45ec5fd2022-12-04 10:13:50 -0500679 eth->port_info[eth->port].phy_addr = CFG_SH_ETHER_PHY_ADDR;
Marek Vasut020d3942018-01-19 18:57:17 +0100680 eth->port_info[eth->port].iobase =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200681 (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
Marek Vasut020d3942018-01-19 18:57:17 +0100682
Marek Vasut77f69f82019-05-02 00:03:26 +0200683#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100684 ret = clk_enable(&priv->clk);
685 if (ret)
686 goto err_mdio_register;
Marek Vasut77f69f82019-05-02 00:03:26 +0200687#endif
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100688
Marek Vasutd53dd502020-04-04 15:01:22 +0200689 ret = sh_eth_init_common(eth, pdata->enetaddr);
690 if (ret)
691 goto err_phy_config;
692
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100693 ret = sh_eth_phy_config(udev);
694 if (ret) {
695 printf(SHETHER_NAME ": phy config timeout\n");
696 goto err_phy_config;
697 }
698
Marek Vasut020d3942018-01-19 18:57:17 +0100699 return 0;
700
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100701err_phy_config:
Marek Vasut77f69f82019-05-02 00:03:26 +0200702#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100703 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200704#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100705err_mdio_register:
706 mdio_free(mdiodev);
Marek Vasut020d3942018-01-19 18:57:17 +0100707 return ret;
708}
709
710static int sh_ether_remove(struct udevice *udev)
711{
712 struct sh_ether_priv *priv = dev_get_priv(udev);
713 struct sh_eth_dev *eth = &priv->shdev;
714 struct sh_eth_info *port_info = &eth->port_info[eth->port];
715
Marek Vasut77f69f82019-05-02 00:03:26 +0200716#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100717 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200718#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100719 free(port_info->phydev);
720 mdio_unregister(priv->bus);
721 mdio_free(priv->bus);
722
Marek Vasut020d3942018-01-19 18:57:17 +0100723 return 0;
724}
725
726static const struct eth_ops sh_ether_ops = {
727 .start = sh_ether_start,
728 .send = sh_ether_send,
729 .recv = sh_ether_recv,
730 .free_pkt = sh_ether_free_pkt,
731 .stop = sh_ether_stop,
732 .write_hwaddr = sh_ether_write_hwaddr,
733};
734
Simon Glassaad29ae2020-12-03 16:55:21 -0700735int sh_ether_of_to_plat(struct udevice *dev)
Marek Vasut020d3942018-01-19 18:57:17 +0100736{
Simon Glassfa20e932020-12-03 16:55:20 -0700737 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100738 const fdt32_t *cell;
Marek Vasut020d3942018-01-19 18:57:17 +0100739
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +0900740 pdata->iobase = dev_read_addr(dev);
Marek BehĂșnbc194772022-04-07 00:33:01 +0200741
742 pdata->phy_interface = dev_read_phy_mode(dev);
Marek BehĂșn48631e42022-04-07 00:33:03 +0200743 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
Marek Vasut020d3942018-01-19 18:57:17 +0100744 return -EINVAL;
Marek Vasut020d3942018-01-19 18:57:17 +0100745
746 pdata->max_speed = 1000;
747 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
748 if (cell)
749 pdata->max_speed = fdt32_to_cpu(*cell);
750
751 sprintf(bb_miiphy_buses[0].name, dev->name);
752
Marek BehĂșnbc194772022-04-07 00:33:01 +0200753 return 0;
Marek Vasut020d3942018-01-19 18:57:17 +0100754}
755
756static const struct udevice_id sh_ether_ids[] = {
Marek Vasut77f69f82019-05-02 00:03:26 +0200757 { .compatible = "renesas,ether-r7s72100" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200758 { .compatible = "renesas,ether-r8a7790" },
Marek Vasut020d3942018-01-19 18:57:17 +0100759 { .compatible = "renesas,ether-r8a7791" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200760 { .compatible = "renesas,ether-r8a7793" },
761 { .compatible = "renesas,ether-r8a7794" },
Marek Vasut31124502019-07-31 12:58:06 +0200762 { .compatible = "renesas,gether-r8a77980" },
Marek Vasut020d3942018-01-19 18:57:17 +0100763 { }
764};
765
766U_BOOT_DRIVER(eth_sh_ether) = {
767 .name = "sh_ether",
768 .id = UCLASS_ETH,
769 .of_match = sh_ether_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -0700770 .of_to_plat = sh_ether_of_to_plat,
Marek Vasut020d3942018-01-19 18:57:17 +0100771 .probe = sh_ether_probe,
772 .remove = sh_ether_remove,
773 .ops = &sh_ether_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700774 .priv_auto = sizeof(struct sh_ether_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700775 .plat_auto = sizeof(struct eth_pdata),
Marek Vasut020d3942018-01-19 18:57:17 +0100776 .flags = DM_FLAG_ALLOC_PRIV_DMA,
777};
Marek Vasut020d3942018-01-19 18:57:17 +0100778
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900779/******* for bb_miiphy *******/
780static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
781{
782 return 0;
783}
784
785static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
786{
787 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900788 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900789
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900790 sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900791
792 return 0;
793}
794
795static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
796{
797 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900798 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900799
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900800 sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900801
802 return 0;
803}
804
805static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
806{
807 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900808 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900809
810 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900811 sh_eth_write(port_info,
812 sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900813 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900814 sh_eth_write(port_info,
815 sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900816
817 return 0;
818}
819
820static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
821{
822 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900823 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900824
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900825 *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900826
827 return 0;
828}
829
830static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
831{
832 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900833 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900834
835 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900836 sh_eth_write(port_info,
837 sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900838 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900839 sh_eth_write(port_info,
840 sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900841
842 return 0;
843}
844
845static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
846{
847 udelay(10);
848
849 return 0;
850}
851
852struct bb_miiphy_bus bb_miiphy_buses[] = {
853 {
854 .name = "sh_eth",
855 .init = sh_eth_bb_init,
856 .mdio_active = sh_eth_bb_mdio_active,
857 .mdio_tristate = sh_eth_bb_mdio_tristate,
858 .set_mdio = sh_eth_bb_set_mdio,
859 .get_mdio = sh_eth_bb_get_mdio,
860 .set_mdc = sh_eth_bb_set_mdc,
861 .delay = sh_eth_bb_delay,
862 }
863};
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900864
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900865int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);