blob: 3143a5813a6db4ea5c179922b1514de51771dab5 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09002/*
Robert P. J. Day8c60f922016-05-04 04:47:31 -04003 * sh_eth.c - Driver for Renesas ethernet controller.
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09004 *
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +09005 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09006 * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09007 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09008 * Copyright (C) 2013, 2014 Renesas Electronics Corporation
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09009 */
10
11#include <config.h>
12#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070013#include <cpu_func.h>
Simon Glass0af6e2d2019-08-01 09:46:52 -060014#include <env.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090016#include <malloc.h>
17#include <net.h>
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090018#include <netdev.h>
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +090019#include <miiphy.h>
Simon Glass274e0b02020-05-10 11:39:56 -060020#include <asm/cache.h>
Simon Glassdbd79542020-05-10 11:40:11 -060021#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090022#include <linux/errno.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060023#include <asm/global_data.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090024#include <asm/io.h>
25
Marek Vasut020d3942018-01-19 18:57:17 +010026#ifdef CONFIG_DM_ETH
27#include <clk.h>
28#include <dm.h>
29#include <linux/mii.h>
30#include <asm/gpio.h>
31#endif
32
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090033#include "sh_eth.h"
34
35#ifndef CONFIG_SH_ETHER_USE_PORT
36# error "Please define CONFIG_SH_ETHER_USE_PORT"
37#endif
38#ifndef CONFIG_SH_ETHER_PHY_ADDR
39# error "Please define CONFIG_SH_ETHER_PHY_ADDR"
40#endif
Nobuhiro Iwamatsu6bff09d2013-08-22 13:22:01 +090041
Trevor Woerner43ec7e02019-05-03 09:41:00 -040042#if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && \
43 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090044#define flush_cache_wback(addr, len) \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020045 flush_dcache_range((unsigned long)addr, \
46 (unsigned long)(addr + ALIGN(len, CONFIG_SH_ETHER_ALIGNE_SIZE)))
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090047#else
48#define flush_cache_wback(...)
49#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090050
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090051#if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
52#define invalidate_cache(addr, len) \
53 { \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020054 unsigned long line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
55 unsigned long start, end; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090056 \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020057 start = (unsigned long)addr; \
58 end = start + len; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090059 start &= ~(line_size - 1); \
60 end = ((end + line_size - 1) & ~(line_size - 1)); \
61 \
62 invalidate_dcache_range(start, end); \
63 }
64#else
65#define invalidate_cache(...)
66#endif
67
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +090068#define TIMEOUT_CNT 1000
69
Marek Vasut044eb2d2018-01-21 14:27:51 +010070static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090071{
Marek Vasut7a309cf2018-02-17 00:46:26 +010072 int ret = 0, timeout;
73 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090074
75 if (!packet || len > 0xffff) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090076 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
77 ret = -EINVAL;
78 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090079 }
80
81 /* packet must be a 4 byte boundary */
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020082 if ((uintptr_t)packet & 3) {
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +090083 printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +090084 , __func__);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090085 ret = -EFAULT;
86 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090087 }
88
89 /* Update tx descriptor */
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090090 flush_cache_wback(packet, len);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090091 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
92 port_info->tx_desc_cur->td1 = len << 16;
93 /* Must preserve the end of descriptor list indication */
94 if (port_info->tx_desc_cur->td0 & TD_TDLE)
95 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
96 else
97 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
98
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +090099 flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
100
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900101 /* Restart the transmitter if disabled */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900102 if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
103 sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900104
105 /* Wait until packet is transmitted */
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900106 timeout = TIMEOUT_CNT;
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900107 do {
108 invalidate_cache(port_info->tx_desc_cur,
109 sizeof(struct tx_desc_s));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900110 udelay(100);
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900111 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900112
113 if (timeout < 0) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900114 printf(SHETHER_NAME ": transmit timeout\n");
115 ret = -ETIMEDOUT;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900116 goto err;
117 }
118
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900119 port_info->tx_desc_cur++;
120 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
121 port_info->tx_desc_cur = port_info->tx_desc_base;
122
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900123err:
124 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900125}
126
Marek Vasut48de90d2018-01-21 15:39:50 +0100127static int sh_eth_recv_start(struct sh_eth_dev *eth)
Marek Vasut044eb2d2018-01-21 14:27:51 +0100128{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100129 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900130
131 /* Check if the rx descriptor is ready */
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900132 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
Marek Vasut48de90d2018-01-21 15:39:50 +0100133 if (port_info->rx_desc_cur->rd0 & RD_RACT)
134 return -EINVAL;
135
136 /* Check for errors */
137 if (port_info->rx_desc_cur->rd0 & RD_RFE)
138 return -EINVAL;
139
Marek Vasut2526b792018-02-17 00:47:38 +0100140 return port_info->rx_desc_cur->rd1 & 0xffff;
Marek Vasut48de90d2018-01-21 15:39:50 +0100141}
142
143static void sh_eth_recv_finish(struct sh_eth_dev *eth)
144{
145 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900146
Marek Vasut48de90d2018-01-21 15:39:50 +0100147 /* Make current descriptor available again */
148 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
149 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
150 else
151 port_info->rx_desc_cur->rd0 = RD_RACT;
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +0900152
Marek Vasut48de90d2018-01-21 15:39:50 +0100153 flush_cache_wback(port_info->rx_desc_cur,
154 sizeof(struct rx_desc_s));
155
156 /* Point to the next descriptor */
157 port_info->rx_desc_cur++;
158 if (port_info->rx_desc_cur >=
159 port_info->rx_desc_base + NUM_RX_DESC)
160 port_info->rx_desc_cur = port_info->rx_desc_base;
161}
162
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900163static int sh_eth_reset(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900164{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900165 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900166#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900167 int ret = 0, i;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900168
169 /* Start e-dmac transmitter and receiver */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900170 sh_eth_write(port_info, EDSR_ENALL, EDSR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900171
172 /* Perform a software reset and wait for it to complete */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900173 sh_eth_write(port_info, EDMR_SRST, EDMR);
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900174 for (i = 0; i < TIMEOUT_CNT; i++) {
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900175 if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900176 break;
177 udelay(1000);
178 }
179
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900180 if (i == TIMEOUT_CNT) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900181 printf(SHETHER_NAME ": Software reset timeout\n");
182 ret = -EIO;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900183 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900184
185 return ret;
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900186#else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900187 sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
Marek Vasut42a33402018-02-17 00:57:49 +0100188 mdelay(3);
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900189 sh_eth_write(port_info,
190 sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900191
192 return 0;
193#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900194}
195
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900196static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900197{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100198 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900199 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100200 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900201 struct tx_desc_s *cur_tx_desc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900202
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900203 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900204 * Allocate rx descriptors. They must be aligned to size of struct
205 * tx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900206 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900207 port_info->tx_desc_alloc =
208 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
209 if (!port_info->tx_desc_alloc) {
210 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900211 ret = -ENOMEM;
212 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900213 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900214
Nobuhiro Iwamatsu425a3a52017-12-01 13:56:08 +0900215 flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900216
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900217 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900218 port_info->tx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200219 (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900220 port_info->tx_desc_cur = port_info->tx_desc_base;
221
222 /* Initialize all descriptors */
223 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
224 cur_tx_desc++, i++) {
225 cur_tx_desc->td0 = 0x00;
226 cur_tx_desc->td1 = 0x00;
227 cur_tx_desc->td2 = 0x00;
228 }
229
230 /* Mark the end of the descriptors */
231 cur_tx_desc--;
232 cur_tx_desc->td0 |= TD_TDLE;
233
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900234 /*
235 * Point the controller to the tx descriptor list. Must use physical
236 * addresses
237 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900238 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900239#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900240 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
241 sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
242 sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900243#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900244
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900245err:
246 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900247}
248
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900249static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900250{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100251 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900252 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100253 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900254 struct rx_desc_s *cur_rx_desc;
255 u8 *rx_buf;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900256
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900257 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900258 * Allocate rx descriptors. They must be aligned to size of struct
259 * rx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900260 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900261 port_info->rx_desc_alloc =
262 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
263 if (!port_info->rx_desc_alloc) {
264 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900265 ret = -ENOMEM;
266 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900267 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900268
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900269 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
270
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900271 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900272 port_info->rx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200273 (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900274
275 port_info->rx_desc_cur = port_info->rx_desc_base;
276
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900277 /*
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900278 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
279 * aligned and in P2 area.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900280 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900281 port_info->rx_buf_alloc =
282 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
283 if (!port_info->rx_buf_alloc) {
284 printf(SHETHER_NAME ": alloc failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900285 ret = -ENOMEM;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900286 goto err_buf_alloc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900287 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900288
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200289 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900290
291 /* Initialize all descriptors */
292 for (cur_rx_desc = port_info->rx_desc_base,
293 rx_buf = port_info->rx_buf_base, i = 0;
294 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
295 cur_rx_desc->rd0 = RD_RACT;
296 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900297 cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900298 }
299
300 /* Mark the end of the descriptors */
301 cur_rx_desc--;
302 cur_rx_desc->rd0 |= RD_RDLE;
303
304 /* Point the controller to the rx descriptor list */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900305 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900306#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900307 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
308 sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
309 sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900310#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900311
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900312 return ret;
313
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900314err_buf_alloc:
315 free(port_info->rx_desc_alloc);
316 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900317
318err:
319 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900320}
321
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900322static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900323{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100324 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900325
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900326 if (port_info->tx_desc_alloc) {
327 free(port_info->tx_desc_alloc);
328 port_info->tx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900329 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900330}
331
332static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
333{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100334 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900335
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900336 if (port_info->rx_desc_alloc) {
337 free(port_info->rx_desc_alloc);
338 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900339 }
340
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900341 if (port_info->rx_buf_alloc) {
342 free(port_info->rx_buf_alloc);
343 port_info->rx_buf_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900344 }
345}
346
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900347static int sh_eth_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900348{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900349 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900350
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900351 ret = sh_eth_tx_desc_init(eth);
352 if (ret)
353 goto err_tx_init;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900354
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900355 ret = sh_eth_rx_desc_init(eth);
356 if (ret)
357 goto err_rx_init;
358
359 return ret;
360err_rx_init:
361 sh_eth_tx_desc_free(eth);
362
363err_tx_init:
364 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900365}
366
Marek Vasutccdfc5e2018-01-21 14:55:44 +0100367static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
368 unsigned char *mac)
369{
370 u32 val;
371
372 val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
373 sh_eth_write(port_info, val, MAHR);
374
375 val = (mac[4] << 8) | mac[5];
376 sh_eth_write(port_info, val, MALR);
377}
378
Marek Vasutc13be6a2018-01-21 15:10:21 +0100379static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900380{
Marek Vasutc13be6a2018-01-21 15:10:21 +0100381 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut31124502019-07-31 12:58:06 +0200382 unsigned long edmr;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900383
384 /* Configure e-dmac registers */
Marek Vasut31124502019-07-31 12:58:06 +0200385 edmr = sh_eth_read(port_info, EDMR);
386 edmr &= ~EMDR_DESC_R;
387 edmr |= EMDR_DESC | EDMR_EL;
388#if defined(CONFIG_R8A77980)
389 edmr |= EDMR_NBST;
390#endif
391 sh_eth_write(port_info, edmr, EDMR);
Nobuhiro Iwamatsu7a2142c2013-08-22 13:22:02 +0900392
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900393 sh_eth_write(port_info, 0, EESIPR);
394 sh_eth_write(port_info, 0, TRSCER);
395 sh_eth_write(port_info, 0, TFTR);
396 sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
397 sh_eth_write(port_info, RMCR_RST, RMCR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900398#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900399 sh_eth_write(port_info, 0, RPADIR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900400#endif
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900401 sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900402
403 /* Configure e-mac registers */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900404 sh_eth_write(port_info, 0, ECSIPR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900405
406 /* Set Mac address */
Marek Vasutc13be6a2018-01-21 15:10:21 +0100407 sh_eth_write_hwaddr(port_info, mac);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900408
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900409 sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000410#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900411 sh_eth_write(port_info, 0, PIPR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900412#endif
413#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900414 sh_eth_write(port_info, APR_AP, APR);
415 sh_eth_write(port_info, MPR_MP, MPR);
416 sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900417#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900418
Nobuhiro Iwamatsu4ad2c2a2012-08-02 22:08:40 +0000419#if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900420 sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
Marek Vasut31124502019-07-31 12:58:06 +0200421#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900422 sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000423#endif
Marek Vasutc13be6a2018-01-21 15:10:21 +0100424}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900425
Marek Vasutc13be6a2018-01-21 15:10:21 +0100426static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
427{
428 struct sh_eth_info *port_info = &eth->port_info[eth->port];
429 struct phy_device *phy = port_info->phydev;
430 int ret = 0;
431 u32 val = 0;
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900432
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900433 /* Set the transfer speed */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900434 if (phy->speed == 100) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900435 printf(SHETHER_NAME ": 100Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000436#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900437 sh_eth_write(port_info, GECMR_100B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000438#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900439 sh_eth_write(port_info, 1, RTRATE);
Marek Vasut31124502019-07-31 12:58:06 +0200440#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900441 val = ECMR_RTM;
442#endif
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900443 } else if (phy->speed == 10) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900444 printf(SHETHER_NAME ": 10Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000445#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900446 sh_eth_write(port_info, GECMR_10B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000447#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900448 sh_eth_write(port_info, 0, RTRATE);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900449#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900450 }
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000451#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000452 else if (phy->speed == 1000) {
453 printf(SHETHER_NAME ": 1000Base/");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900454 sh_eth_write(port_info, GECMR_1000B, GECMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000455 }
456#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900457
458 /* Check if full duplex mode is supported by the phy */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900459 if (phy->duplex) {
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900460 printf("Full\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900461 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900462 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
Yoshihiro Shimoda4c4aa6c2012-06-26 16:38:09 +0000463 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900464 } else {
465 printf("Half\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900466 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900467 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
468 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900469 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900470
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900471 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900472}
473
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900474static void sh_eth_start(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900475{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900476 struct sh_eth_info *port_info = &eth->port_info[eth->port];
477
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900478 /*
479 * Enable the e-dmac receiver only. The transmitter will be enabled when
480 * we have something to transmit
481 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900482 sh_eth_write(port_info, EDRRR_R, EDRRR);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900483}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900484
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900485static void sh_eth_stop(struct sh_eth_dev *eth)
486{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900487 struct sh_eth_info *port_info = &eth->port_info[eth->port];
488
489 sh_eth_write(port_info, ~EDRRR_R, EDRRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900490}
491
Marek Vasutc13be6a2018-01-21 15:10:21 +0100492static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900493{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900494 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900495
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900496 ret = sh_eth_reset(eth);
497 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100498 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900499
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900500 ret = sh_eth_desc_init(eth);
501 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100502 return ret;
503
504 sh_eth_mac_regs_config(eth, mac);
505
506 return 0;
507}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900508
Marek Vasutc13be6a2018-01-21 15:10:21 +0100509static int sh_eth_start_common(struct sh_eth_dev *eth)
510{
511 struct sh_eth_info *port_info = &eth->port_info[eth->port];
512 int ret;
513
514 ret = phy_startup(port_info->phydev);
515 if (ret) {
516 printf(SHETHER_NAME ": phy startup failure\n");
517 return ret;
518 }
519
520 ret = sh_eth_phy_regs_config(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900521 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100522 return ret;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900523
524 sh_eth_start(eth);
525
Marek Vasutc13be6a2018-01-21 15:10:21 +0100526 return 0;
527}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900528
Marek Vasut020d3942018-01-19 18:57:17 +0100529#ifndef CONFIG_DM_ETH
Marek Vasut7ba52622018-01-21 15:31:48 +0100530static int sh_eth_phy_config_legacy(struct sh_eth_dev *eth)
531{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100532 int ret = 0;
533 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7ba52622018-01-21 15:31:48 +0100534 struct eth_device *dev = port_info->dev;
535 struct phy_device *phydev;
536
537 phydev = phy_connect(
538 miiphy_get_dev_by_name(dev->name),
539 port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
540 port_info->phydev = phydev;
541 phy_config(phydev);
542
543 return ret;
544}
545
546static int sh_eth_send_legacy(struct eth_device *dev, void *packet, int len)
547{
548 struct sh_eth_dev *eth = dev->priv;
549
550 return sh_eth_send_common(eth, packet, len);
551}
552
553static int sh_eth_recv_common(struct sh_eth_dev *eth)
554{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100555 int len = 0;
556 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7ba52622018-01-21 15:31:48 +0100557 uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
558
559 len = sh_eth_recv_start(eth);
560 if (len > 0) {
561 invalidate_cache(packet, len);
562 net_process_received_packet(packet, len);
563 sh_eth_recv_finish(eth);
564 } else
565 len = 0;
566
567 /* Restart the receiver if disabled */
568 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
569 sh_eth_write(port_info, EDRRR_R, EDRRR);
570
571 return len;
572}
573
574static int sh_eth_recv_legacy(struct eth_device *dev)
575{
576 struct sh_eth_dev *eth = dev->priv;
577
578 return sh_eth_recv_common(eth);
579}
580
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +0900581static int sh_eth_init_legacy(struct eth_device *dev, struct bd_info *bd)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100582{
583 struct sh_eth_dev *eth = dev->priv;
584 int ret;
585
586 ret = sh_eth_init_common(eth, dev->enetaddr);
587 if (ret)
588 return ret;
589
590 ret = sh_eth_phy_config_legacy(eth);
591 if (ret) {
592 printf(SHETHER_NAME ": phy config timeout\n");
593 goto err_start;
594 }
595
596 ret = sh_eth_start_common(eth);
597 if (ret)
598 goto err_start;
599
600 return 0;
601
602err_start:
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900603 sh_eth_tx_desc_free(eth);
604 sh_eth_rx_desc_free(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900605 return ret;
606}
607
Marek Vasutc13be6a2018-01-21 15:10:21 +0100608void sh_eth_halt_legacy(struct eth_device *dev)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900609{
610 struct sh_eth_dev *eth = dev->priv;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900611
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900612 sh_eth_stop(eth);
613}
614
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +0900615int sh_eth_initialize(struct bd_info *bd)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900616{
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900617 int ret = 0;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900618 struct sh_eth_dev *eth = NULL;
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900619 struct eth_device *dev = NULL;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900620 struct mii_dev *mdiodev;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900621
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900622 eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900623 if (!eth) {
624 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
625 ret = -ENOMEM;
626 goto err;
627 }
628
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900629 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900630 if (!dev) {
631 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
632 ret = -ENOMEM;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900633 goto err;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900634 }
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900635 memset(dev, 0, sizeof(struct eth_device));
636 memset(eth, 0, sizeof(struct sh_eth_dev));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900637
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900638 eth->port = CONFIG_SH_ETHER_USE_PORT;
639 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900640 eth->port_info[eth->port].iobase =
641 (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900642
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900643 dev->priv = (void *)eth;
644 dev->iobase = 0;
Marek Vasutc13be6a2018-01-21 15:10:21 +0100645 dev->init = sh_eth_init_legacy;
646 dev->halt = sh_eth_halt_legacy;
Marek Vasut044eb2d2018-01-21 14:27:51 +0100647 dev->send = sh_eth_send_legacy;
648 dev->recv = sh_eth_recv_legacy;
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900649 eth->port_info[eth->port].dev = dev;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900650
Ben Whitten34fd6c92015-12-30 13:05:58 +0000651 strcpy(dev->name, SHETHER_NAME);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900652
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900653 /* Register Device to EtherNet subsystem */
654 eth_register(dev);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900655
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900656 bb_miiphy_buses[0].priv = eth;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900657 mdiodev = mdio_alloc();
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500658 if (!mdiodev)
659 return -ENOMEM;
660 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
661 mdiodev->read = bb_miiphy_read;
662 mdiodev->write = bb_miiphy_write;
663
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900664 ret = mdio_register(mdiodev);
665 if (ret < 0)
666 return ret;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900667
Simon Glass399a9ce2017-08-03 12:22:14 -0600668 if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr))
Mike Frysingera86bf132009-02-11 19:14:09 -0500669 puts("Please set MAC address\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900670
671 return ret;
672
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900673err:
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900674 if (dev)
675 free(dev);
676
677 if (eth)
678 free(eth);
679
680 printf(SHETHER_NAME ": Failed\n");
681 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900682}
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900683
Marek Vasut020d3942018-01-19 18:57:17 +0100684#else /* CONFIG_DM_ETH */
685
686struct sh_ether_priv {
687 struct sh_eth_dev shdev;
688
689 struct mii_dev *bus;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100690 phys_addr_t iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100691 struct clk clk;
692 struct gpio_desc reset_gpio;
693};
694
695static int sh_ether_send(struct udevice *dev, void *packet, int len)
696{
697 struct sh_ether_priv *priv = dev_get_priv(dev);
698 struct sh_eth_dev *eth = &priv->shdev;
699
700 return sh_eth_send_common(eth, packet, len);
701}
702
703static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
704{
705 struct sh_ether_priv *priv = dev_get_priv(dev);
706 struct sh_eth_dev *eth = &priv->shdev;
707 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200708 uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
Marek Vasut020d3942018-01-19 18:57:17 +0100709 int len;
710
711 len = sh_eth_recv_start(eth);
712 if (len > 0) {
713 invalidate_cache(packet, len);
714 *packetp = packet;
715
716 return len;
717 } else {
718 len = 0;
719
720 /* Restart the receiver if disabled */
721 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
722 sh_eth_write(port_info, EDRRR_R, EDRRR);
723
724 return -EAGAIN;
725 }
726}
727
728static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
729{
730 struct sh_ether_priv *priv = dev_get_priv(dev);
731 struct sh_eth_dev *eth = &priv->shdev;
732 struct sh_eth_info *port_info = &eth->port_info[eth->port];
733
734 sh_eth_recv_finish(eth);
735
736 /* Restart the receiver if disabled */
737 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
738 sh_eth_write(port_info, EDRRR_R, EDRRR);
739
740 return 0;
741}
742
743static int sh_ether_write_hwaddr(struct udevice *dev)
744{
745 struct sh_ether_priv *priv = dev_get_priv(dev);
746 struct sh_eth_dev *eth = &priv->shdev;
747 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Simon Glassfa20e932020-12-03 16:55:20 -0700748 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100749
750 sh_eth_write_hwaddr(port_info, pdata->enetaddr);
751
752 return 0;
753}
754
755static int sh_eth_phy_config(struct udevice *dev)
756{
757 struct sh_ether_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700758 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100759 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut7a309cf2018-02-17 00:46:26 +0100760 int ret = 0;
761 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100762 struct phy_device *phydev;
763 int mask = 0xffffffff;
764
765 phydev = phy_find_by_mask(priv->bus, mask, pdata->phy_interface);
766 if (!phydev)
767 return -ENODEV;
768
769 phy_connect_dev(phydev, dev);
770
771 port_info->phydev = phydev;
772 phy_config(phydev);
773
774 return ret;
775}
776
777static int sh_ether_start(struct udevice *dev)
778{
779 struct sh_ether_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700780 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100781 struct sh_eth_dev *eth = &priv->shdev;
782 int ret;
783
Marek Vasut020d3942018-01-19 18:57:17 +0100784 ret = sh_eth_init_common(eth, pdata->enetaddr);
785 if (ret)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100786 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100787
788 ret = sh_eth_start_common(eth);
789 if (ret)
790 goto err_start;
791
792 return 0;
793
794err_start:
795 sh_eth_tx_desc_free(eth);
796 sh_eth_rx_desc_free(eth);
Marek Vasut020d3942018-01-19 18:57:17 +0100797 return ret;
798}
799
800static void sh_ether_stop(struct udevice *dev)
801{
802 struct sh_ether_priv *priv = dev_get_priv(dev);
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100803 struct sh_eth_dev *eth = &priv->shdev;
804 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100805
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100806 phy_shutdown(port_info->phydev);
Marek Vasut020d3942018-01-19 18:57:17 +0100807 sh_eth_stop(&priv->shdev);
Marek Vasut020d3942018-01-19 18:57:17 +0100808}
809
810static int sh_ether_probe(struct udevice *udev)
811{
Simon Glassfa20e932020-12-03 16:55:20 -0700812 struct eth_pdata *pdata = dev_get_plat(udev);
Marek Vasut020d3942018-01-19 18:57:17 +0100813 struct sh_ether_priv *priv = dev_get_priv(udev);
814 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut27e06332018-06-18 04:03:01 +0200815 struct ofnode_phandle_args phandle_args;
Marek Vasut020d3942018-01-19 18:57:17 +0100816 struct mii_dev *mdiodev;
Marek Vasut020d3942018-01-19 18:57:17 +0100817 int ret;
818
Marek Vasut63ab72c2018-02-17 00:57:49 +0100819 priv->iobase = pdata->iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100820
Marek Vasut77f69f82019-05-02 00:03:26 +0200821#if CONFIG_IS_ENABLED(CLK)
Marek Vasut020d3942018-01-19 18:57:17 +0100822 ret = clk_get_by_index(udev, 0, &priv->clk);
823 if (ret < 0)
Marek Vasut63ab72c2018-02-17 00:57:49 +0100824 return ret;
Marek Vasut77f69f82019-05-02 00:03:26 +0200825#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100826
Marek Vasut27e06332018-06-18 04:03:01 +0200827 ret = dev_read_phandle_with_args(udev, "phy-handle", NULL, 0, 0, &phandle_args);
828 if (!ret) {
829 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
830 &priv->reset_gpio, GPIOD_IS_OUT);
831 }
832
833 if (!dm_gpio_is_valid(&priv->reset_gpio)) {
834 gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio,
835 GPIOD_IS_OUT);
836 }
Marek Vasut020d3942018-01-19 18:57:17 +0100837
838 mdiodev = mdio_alloc();
839 if (!mdiodev) {
840 ret = -ENOMEM;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100841 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100842 }
843
844 mdiodev->read = bb_miiphy_read;
845 mdiodev->write = bb_miiphy_write;
846 bb_miiphy_buses[0].priv = eth;
847 snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
848
849 ret = mdio_register(mdiodev);
850 if (ret < 0)
851 goto err_mdio_register;
852
853 priv->bus = miiphy_get_dev_by_name(udev->name);
854
855 eth->port = CONFIG_SH_ETHER_USE_PORT;
856 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
857 eth->port_info[eth->port].iobase =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200858 (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
Marek Vasut020d3942018-01-19 18:57:17 +0100859
Marek Vasut77f69f82019-05-02 00:03:26 +0200860#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100861 ret = clk_enable(&priv->clk);
862 if (ret)
863 goto err_mdio_register;
Marek Vasut77f69f82019-05-02 00:03:26 +0200864#endif
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100865
Marek Vasutd53dd502020-04-04 15:01:22 +0200866 ret = sh_eth_init_common(eth, pdata->enetaddr);
867 if (ret)
868 goto err_phy_config;
869
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100870 ret = sh_eth_phy_config(udev);
871 if (ret) {
872 printf(SHETHER_NAME ": phy config timeout\n");
873 goto err_phy_config;
874 }
875
Marek Vasut020d3942018-01-19 18:57:17 +0100876 return 0;
877
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100878err_phy_config:
Marek Vasut77f69f82019-05-02 00:03:26 +0200879#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100880 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200881#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100882err_mdio_register:
883 mdio_free(mdiodev);
Marek Vasut020d3942018-01-19 18:57:17 +0100884 return ret;
885}
886
887static int sh_ether_remove(struct udevice *udev)
888{
889 struct sh_ether_priv *priv = dev_get_priv(udev);
890 struct sh_eth_dev *eth = &priv->shdev;
891 struct sh_eth_info *port_info = &eth->port_info[eth->port];
892
Marek Vasut77f69f82019-05-02 00:03:26 +0200893#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100894 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200895#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100896 free(port_info->phydev);
897 mdio_unregister(priv->bus);
898 mdio_free(priv->bus);
899
900 if (dm_gpio_is_valid(&priv->reset_gpio))
901 dm_gpio_free(udev, &priv->reset_gpio);
902
Marek Vasut020d3942018-01-19 18:57:17 +0100903 return 0;
904}
905
906static const struct eth_ops sh_ether_ops = {
907 .start = sh_ether_start,
908 .send = sh_ether_send,
909 .recv = sh_ether_recv,
910 .free_pkt = sh_ether_free_pkt,
911 .stop = sh_ether_stop,
912 .write_hwaddr = sh_ether_write_hwaddr,
913};
914
Simon Glassaad29ae2020-12-03 16:55:21 -0700915int sh_ether_of_to_plat(struct udevice *dev)
Marek Vasut020d3942018-01-19 18:57:17 +0100916{
Simon Glassfa20e932020-12-03 16:55:20 -0700917 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100918 const char *phy_mode;
919 const fdt32_t *cell;
920 int ret = 0;
921
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +0900922 pdata->iobase = dev_read_addr(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100923 pdata->phy_interface = -1;
924 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
925 NULL);
926 if (phy_mode)
927 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
928 if (pdata->phy_interface == -1) {
929 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
930 return -EINVAL;
931 }
932
933 pdata->max_speed = 1000;
934 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
935 if (cell)
936 pdata->max_speed = fdt32_to_cpu(*cell);
937
938 sprintf(bb_miiphy_buses[0].name, dev->name);
939
940 return ret;
941}
942
943static const struct udevice_id sh_ether_ids[] = {
Marek Vasut77f69f82019-05-02 00:03:26 +0200944 { .compatible = "renesas,ether-r7s72100" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200945 { .compatible = "renesas,ether-r8a7790" },
Marek Vasut020d3942018-01-19 18:57:17 +0100946 { .compatible = "renesas,ether-r8a7791" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200947 { .compatible = "renesas,ether-r8a7793" },
948 { .compatible = "renesas,ether-r8a7794" },
Marek Vasut31124502019-07-31 12:58:06 +0200949 { .compatible = "renesas,gether-r8a77980" },
Marek Vasut020d3942018-01-19 18:57:17 +0100950 { }
951};
952
953U_BOOT_DRIVER(eth_sh_ether) = {
954 .name = "sh_ether",
955 .id = UCLASS_ETH,
956 .of_match = sh_ether_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -0700957 .of_to_plat = sh_ether_of_to_plat,
Marek Vasut020d3942018-01-19 18:57:17 +0100958 .probe = sh_ether_probe,
959 .remove = sh_ether_remove,
960 .ops = &sh_ether_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700961 .priv_auto = sizeof(struct sh_ether_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700962 .plat_auto = sizeof(struct eth_pdata),
Marek Vasut020d3942018-01-19 18:57:17 +0100963 .flags = DM_FLAG_ALLOC_PRIV_DMA,
964};
965#endif
966
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900967/******* for bb_miiphy *******/
968static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
969{
970 return 0;
971}
972
973static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
974{
975 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900976 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900977
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900978 sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900979
980 return 0;
981}
982
983static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
984{
985 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900986 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900987
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900988 sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900989
990 return 0;
991}
992
993static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
994{
995 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900996 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900997
998 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900999 sh_eth_write(port_info,
1000 sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001001 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001002 sh_eth_write(port_info,
1003 sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001004
1005 return 0;
1006}
1007
1008static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
1009{
1010 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001011 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001012
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001013 *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001014
1015 return 0;
1016}
1017
1018static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
1019{
1020 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001021 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001022
1023 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001024 sh_eth_write(port_info,
1025 sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001026 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001027 sh_eth_write(port_info,
1028 sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001029
1030 return 0;
1031}
1032
1033static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
1034{
1035 udelay(10);
1036
1037 return 0;
1038}
1039
1040struct bb_miiphy_bus bb_miiphy_buses[] = {
1041 {
1042 .name = "sh_eth",
1043 .init = sh_eth_bb_init,
1044 .mdio_active = sh_eth_bb_mdio_active,
1045 .mdio_tristate = sh_eth_bb_mdio_tristate,
1046 .set_mdio = sh_eth_bb_set_mdio,
1047 .get_mdio = sh_eth_bb_get_mdio,
1048 .set_mdc = sh_eth_bb_set_mdc,
1049 .delay = sh_eth_bb_delay,
1050 }
1051};
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +09001052
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001053int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);