blob: 1666a8cdb21249666a570392c00881fefee17298 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09002/*
Robert P. J. Day8c60f922016-05-04 04:47:31 -04003 * sh_eth.c - Driver for Renesas ethernet controller.
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09004 *
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +09005 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09006 * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09007 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09008 * Copyright (C) 2013, 2014 Renesas Electronics Corporation
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09009 */
10
11#include <config.h>
12#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070013#include <cpu_func.h>
Simon Glass0af6e2d2019-08-01 09:46:52 -060014#include <env.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090016#include <malloc.h>
17#include <net.h>
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090018#include <netdev.h>
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +090019#include <miiphy.h>
Simon Glass274e0b02020-05-10 11:39:56 -060020#include <asm/cache.h>
Simon Glassdbd79542020-05-10 11:40:11 -060021#include <linux/delay.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090022#include <linux/errno.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090023#include <asm/io.h>
24
Marek Vasut020d3942018-01-19 18:57:17 +010025#ifdef CONFIG_DM_ETH
26#include <clk.h>
27#include <dm.h>
28#include <linux/mii.h>
29#include <asm/gpio.h>
30#endif
31
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090032#include "sh_eth.h"
33
34#ifndef CONFIG_SH_ETHER_USE_PORT
35# error "Please define CONFIG_SH_ETHER_USE_PORT"
36#endif
37#ifndef CONFIG_SH_ETHER_PHY_ADDR
38# error "Please define CONFIG_SH_ETHER_PHY_ADDR"
39#endif
Nobuhiro Iwamatsu6bff09d2013-08-22 13:22:01 +090040
Trevor Woerner43ec7e02019-05-03 09:41:00 -040041#if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && \
42 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090043#define flush_cache_wback(addr, len) \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020044 flush_dcache_range((unsigned long)addr, \
45 (unsigned long)(addr + ALIGN(len, CONFIG_SH_ETHER_ALIGNE_SIZE)))
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090046#else
47#define flush_cache_wback(...)
48#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090049
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090050#if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
51#define invalidate_cache(addr, len) \
52 { \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020053 unsigned long line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
54 unsigned long start, end; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090055 \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020056 start = (unsigned long)addr; \
57 end = start + len; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090058 start &= ~(line_size - 1); \
59 end = ((end + line_size - 1) & ~(line_size - 1)); \
60 \
61 invalidate_dcache_range(start, end); \
62 }
63#else
64#define invalidate_cache(...)
65#endif
66
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +090067#define TIMEOUT_CNT 1000
68
Marek Vasut044eb2d2018-01-21 14:27:51 +010069static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090070{
Marek Vasut7a309cf2018-02-17 00:46:26 +010071 int ret = 0, timeout;
72 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090073
74 if (!packet || len > 0xffff) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090075 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
76 ret = -EINVAL;
77 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090078 }
79
80 /* packet must be a 4 byte boundary */
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020081 if ((uintptr_t)packet & 3) {
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +090082 printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +090083 , __func__);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090084 ret = -EFAULT;
85 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090086 }
87
88 /* Update tx descriptor */
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090089 flush_cache_wback(packet, len);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090090 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
91 port_info->tx_desc_cur->td1 = len << 16;
92 /* Must preserve the end of descriptor list indication */
93 if (port_info->tx_desc_cur->td0 & TD_TDLE)
94 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
95 else
96 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
97
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +090098 flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
99
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900100 /* Restart the transmitter if disabled */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900101 if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
102 sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900103
104 /* Wait until packet is transmitted */
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900105 timeout = TIMEOUT_CNT;
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900106 do {
107 invalidate_cache(port_info->tx_desc_cur,
108 sizeof(struct tx_desc_s));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900109 udelay(100);
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900110 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900111
112 if (timeout < 0) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900113 printf(SHETHER_NAME ": transmit timeout\n");
114 ret = -ETIMEDOUT;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900115 goto err;
116 }
117
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900118 port_info->tx_desc_cur++;
119 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
120 port_info->tx_desc_cur = port_info->tx_desc_base;
121
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900122err:
123 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900124}
125
Marek Vasut48de90d2018-01-21 15:39:50 +0100126static int sh_eth_recv_start(struct sh_eth_dev *eth)
Marek Vasut044eb2d2018-01-21 14:27:51 +0100127{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100128 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900129
130 /* Check if the rx descriptor is ready */
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900131 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
Marek Vasut48de90d2018-01-21 15:39:50 +0100132 if (port_info->rx_desc_cur->rd0 & RD_RACT)
133 return -EINVAL;
134
135 /* Check for errors */
136 if (port_info->rx_desc_cur->rd0 & RD_RFE)
137 return -EINVAL;
138
Marek Vasut2526b792018-02-17 00:47:38 +0100139 return port_info->rx_desc_cur->rd1 & 0xffff;
Marek Vasut48de90d2018-01-21 15:39:50 +0100140}
141
142static void sh_eth_recv_finish(struct sh_eth_dev *eth)
143{
144 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900145
Marek Vasut48de90d2018-01-21 15:39:50 +0100146 /* Make current descriptor available again */
147 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
148 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
149 else
150 port_info->rx_desc_cur->rd0 = RD_RACT;
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +0900151
Marek Vasut48de90d2018-01-21 15:39:50 +0100152 flush_cache_wback(port_info->rx_desc_cur,
153 sizeof(struct rx_desc_s));
154
155 /* Point to the next descriptor */
156 port_info->rx_desc_cur++;
157 if (port_info->rx_desc_cur >=
158 port_info->rx_desc_base + NUM_RX_DESC)
159 port_info->rx_desc_cur = port_info->rx_desc_base;
160}
161
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900162static int sh_eth_reset(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900163{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900164 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900165#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900166 int ret = 0, i;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900167
168 /* Start e-dmac transmitter and receiver */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900169 sh_eth_write(port_info, EDSR_ENALL, EDSR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900170
171 /* Perform a software reset and wait for it to complete */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900172 sh_eth_write(port_info, EDMR_SRST, EDMR);
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900173 for (i = 0; i < TIMEOUT_CNT; i++) {
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900174 if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900175 break;
176 udelay(1000);
177 }
178
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900179 if (i == TIMEOUT_CNT) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900180 printf(SHETHER_NAME ": Software reset timeout\n");
181 ret = -EIO;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900182 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900183
184 return ret;
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900185#else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900186 sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
Marek Vasut42a33402018-02-17 00:57:49 +0100187 mdelay(3);
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900188 sh_eth_write(port_info,
189 sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900190
191 return 0;
192#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900193}
194
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900195static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900196{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100197 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900198 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100199 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900200 struct tx_desc_s *cur_tx_desc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900201
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900202 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900203 * Allocate rx descriptors. They must be aligned to size of struct
204 * tx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900205 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900206 port_info->tx_desc_alloc =
207 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
208 if (!port_info->tx_desc_alloc) {
209 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900210 ret = -ENOMEM;
211 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900212 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900213
Nobuhiro Iwamatsu425a3a52017-12-01 13:56:08 +0900214 flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900215
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900216 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900217 port_info->tx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200218 (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900219 port_info->tx_desc_cur = port_info->tx_desc_base;
220
221 /* Initialize all descriptors */
222 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
223 cur_tx_desc++, i++) {
224 cur_tx_desc->td0 = 0x00;
225 cur_tx_desc->td1 = 0x00;
226 cur_tx_desc->td2 = 0x00;
227 }
228
229 /* Mark the end of the descriptors */
230 cur_tx_desc--;
231 cur_tx_desc->td0 |= TD_TDLE;
232
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900233 /*
234 * Point the controller to the tx descriptor list. Must use physical
235 * addresses
236 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900237 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900238#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900239 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
240 sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
241 sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900242#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900243
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900244err:
245 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900246}
247
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900248static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900249{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100250 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900251 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100252 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900253 struct rx_desc_s *cur_rx_desc;
254 u8 *rx_buf;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900255
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900256 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900257 * Allocate rx descriptors. They must be aligned to size of struct
258 * rx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900259 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900260 port_info->rx_desc_alloc =
261 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
262 if (!port_info->rx_desc_alloc) {
263 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900264 ret = -ENOMEM;
265 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900266 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900267
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900268 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
269
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900270 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900271 port_info->rx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200272 (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900273
274 port_info->rx_desc_cur = port_info->rx_desc_base;
275
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900276 /*
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900277 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
278 * aligned and in P2 area.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900279 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900280 port_info->rx_buf_alloc =
281 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
282 if (!port_info->rx_buf_alloc) {
283 printf(SHETHER_NAME ": alloc failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900284 ret = -ENOMEM;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900285 goto err_buf_alloc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900286 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900287
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200288 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900289
290 /* Initialize all descriptors */
291 for (cur_rx_desc = port_info->rx_desc_base,
292 rx_buf = port_info->rx_buf_base, i = 0;
293 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
294 cur_rx_desc->rd0 = RD_RACT;
295 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900296 cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900297 }
298
299 /* Mark the end of the descriptors */
300 cur_rx_desc--;
301 cur_rx_desc->rd0 |= RD_RDLE;
302
303 /* Point the controller to the rx descriptor list */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900304 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900305#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900306 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
307 sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
308 sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900309#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900310
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900311 return ret;
312
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900313err_buf_alloc:
314 free(port_info->rx_desc_alloc);
315 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900316
317err:
318 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900319}
320
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900321static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900322{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100323 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900324
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900325 if (port_info->tx_desc_alloc) {
326 free(port_info->tx_desc_alloc);
327 port_info->tx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900328 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900329}
330
331static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
332{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100333 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900334
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900335 if (port_info->rx_desc_alloc) {
336 free(port_info->rx_desc_alloc);
337 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900338 }
339
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900340 if (port_info->rx_buf_alloc) {
341 free(port_info->rx_buf_alloc);
342 port_info->rx_buf_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900343 }
344}
345
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900346static int sh_eth_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900347{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900348 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900349
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900350 ret = sh_eth_tx_desc_init(eth);
351 if (ret)
352 goto err_tx_init;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900353
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900354 ret = sh_eth_rx_desc_init(eth);
355 if (ret)
356 goto err_rx_init;
357
358 return ret;
359err_rx_init:
360 sh_eth_tx_desc_free(eth);
361
362err_tx_init:
363 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900364}
365
Marek Vasutccdfc5e2018-01-21 14:55:44 +0100366static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
367 unsigned char *mac)
368{
369 u32 val;
370
371 val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
372 sh_eth_write(port_info, val, MAHR);
373
374 val = (mac[4] << 8) | mac[5];
375 sh_eth_write(port_info, val, MALR);
376}
377
Marek Vasutc13be6a2018-01-21 15:10:21 +0100378static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900379{
Marek Vasutc13be6a2018-01-21 15:10:21 +0100380 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut31124502019-07-31 12:58:06 +0200381 unsigned long edmr;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900382
383 /* Configure e-dmac registers */
Marek Vasut31124502019-07-31 12:58:06 +0200384 edmr = sh_eth_read(port_info, EDMR);
385 edmr &= ~EMDR_DESC_R;
386 edmr |= EMDR_DESC | EDMR_EL;
387#if defined(CONFIG_R8A77980)
388 edmr |= EDMR_NBST;
389#endif
390 sh_eth_write(port_info, edmr, EDMR);
Nobuhiro Iwamatsu7a2142c2013-08-22 13:22:02 +0900391
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900392 sh_eth_write(port_info, 0, EESIPR);
393 sh_eth_write(port_info, 0, TRSCER);
394 sh_eth_write(port_info, 0, TFTR);
395 sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
396 sh_eth_write(port_info, RMCR_RST, RMCR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900397#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900398 sh_eth_write(port_info, 0, RPADIR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900399#endif
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900400 sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900401
402 /* Configure e-mac registers */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900403 sh_eth_write(port_info, 0, ECSIPR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900404
405 /* Set Mac address */
Marek Vasutc13be6a2018-01-21 15:10:21 +0100406 sh_eth_write_hwaddr(port_info, mac);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900407
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900408 sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000409#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900410 sh_eth_write(port_info, 0, PIPR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900411#endif
412#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900413 sh_eth_write(port_info, APR_AP, APR);
414 sh_eth_write(port_info, MPR_MP, MPR);
415 sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900416#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900417
Nobuhiro Iwamatsu4ad2c2a2012-08-02 22:08:40 +0000418#if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900419 sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
Marek Vasut31124502019-07-31 12:58:06 +0200420#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900421 sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000422#endif
Marek Vasutc13be6a2018-01-21 15:10:21 +0100423}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900424
Marek Vasutc13be6a2018-01-21 15:10:21 +0100425static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
426{
427 struct sh_eth_info *port_info = &eth->port_info[eth->port];
428 struct phy_device *phy = port_info->phydev;
429 int ret = 0;
430 u32 val = 0;
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900431
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900432 /* Set the transfer speed */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900433 if (phy->speed == 100) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900434 printf(SHETHER_NAME ": 100Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000435#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900436 sh_eth_write(port_info, GECMR_100B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000437#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900438 sh_eth_write(port_info, 1, RTRATE);
Marek Vasut31124502019-07-31 12:58:06 +0200439#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900440 val = ECMR_RTM;
441#endif
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900442 } else if (phy->speed == 10) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900443 printf(SHETHER_NAME ": 10Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000444#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900445 sh_eth_write(port_info, GECMR_10B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000446#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900447 sh_eth_write(port_info, 0, RTRATE);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900448#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900449 }
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000450#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000451 else if (phy->speed == 1000) {
452 printf(SHETHER_NAME ": 1000Base/");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900453 sh_eth_write(port_info, GECMR_1000B, GECMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000454 }
455#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900456
457 /* Check if full duplex mode is supported by the phy */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900458 if (phy->duplex) {
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900459 printf("Full\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900460 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900461 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
Yoshihiro Shimoda4c4aa6c2012-06-26 16:38:09 +0000462 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900463 } else {
464 printf("Half\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900465 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900466 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
467 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900468 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900469
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900470 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900471}
472
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900473static void sh_eth_start(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900474{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900475 struct sh_eth_info *port_info = &eth->port_info[eth->port];
476
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900477 /*
478 * Enable the e-dmac receiver only. The transmitter will be enabled when
479 * we have something to transmit
480 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900481 sh_eth_write(port_info, EDRRR_R, EDRRR);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900482}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900483
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900484static void sh_eth_stop(struct sh_eth_dev *eth)
485{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900486 struct sh_eth_info *port_info = &eth->port_info[eth->port];
487
488 sh_eth_write(port_info, ~EDRRR_R, EDRRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900489}
490
Marek Vasutc13be6a2018-01-21 15:10:21 +0100491static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900492{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900493 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900494
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900495 ret = sh_eth_reset(eth);
496 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100497 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900498
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900499 ret = sh_eth_desc_init(eth);
500 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100501 return ret;
502
503 sh_eth_mac_regs_config(eth, mac);
504
505 return 0;
506}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900507
Marek Vasutc13be6a2018-01-21 15:10:21 +0100508static int sh_eth_start_common(struct sh_eth_dev *eth)
509{
510 struct sh_eth_info *port_info = &eth->port_info[eth->port];
511 int ret;
512
513 ret = phy_startup(port_info->phydev);
514 if (ret) {
515 printf(SHETHER_NAME ": phy startup failure\n");
516 return ret;
517 }
518
519 ret = sh_eth_phy_regs_config(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900520 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100521 return ret;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900522
523 sh_eth_start(eth);
524
Marek Vasutc13be6a2018-01-21 15:10:21 +0100525 return 0;
526}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900527
Marek Vasut020d3942018-01-19 18:57:17 +0100528#ifndef CONFIG_DM_ETH
Marek Vasut7ba52622018-01-21 15:31:48 +0100529static int sh_eth_phy_config_legacy(struct sh_eth_dev *eth)
530{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100531 int ret = 0;
532 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7ba52622018-01-21 15:31:48 +0100533 struct eth_device *dev = port_info->dev;
534 struct phy_device *phydev;
535
536 phydev = phy_connect(
537 miiphy_get_dev_by_name(dev->name),
538 port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
539 port_info->phydev = phydev;
540 phy_config(phydev);
541
542 return ret;
543}
544
545static int sh_eth_send_legacy(struct eth_device *dev, void *packet, int len)
546{
547 struct sh_eth_dev *eth = dev->priv;
548
549 return sh_eth_send_common(eth, packet, len);
550}
551
552static int sh_eth_recv_common(struct sh_eth_dev *eth)
553{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100554 int len = 0;
555 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7ba52622018-01-21 15:31:48 +0100556 uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
557
558 len = sh_eth_recv_start(eth);
559 if (len > 0) {
560 invalidate_cache(packet, len);
561 net_process_received_packet(packet, len);
562 sh_eth_recv_finish(eth);
563 } else
564 len = 0;
565
566 /* Restart the receiver if disabled */
567 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
568 sh_eth_write(port_info, EDRRR_R, EDRRR);
569
570 return len;
571}
572
573static int sh_eth_recv_legacy(struct eth_device *dev)
574{
575 struct sh_eth_dev *eth = dev->priv;
576
577 return sh_eth_recv_common(eth);
578}
579
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +0900580static int sh_eth_init_legacy(struct eth_device *dev, struct bd_info *bd)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100581{
582 struct sh_eth_dev *eth = dev->priv;
583 int ret;
584
585 ret = sh_eth_init_common(eth, dev->enetaddr);
586 if (ret)
587 return ret;
588
589 ret = sh_eth_phy_config_legacy(eth);
590 if (ret) {
591 printf(SHETHER_NAME ": phy config timeout\n");
592 goto err_start;
593 }
594
595 ret = sh_eth_start_common(eth);
596 if (ret)
597 goto err_start;
598
599 return 0;
600
601err_start:
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900602 sh_eth_tx_desc_free(eth);
603 sh_eth_rx_desc_free(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900604 return ret;
605}
606
Marek Vasutc13be6a2018-01-21 15:10:21 +0100607void sh_eth_halt_legacy(struct eth_device *dev)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900608{
609 struct sh_eth_dev *eth = dev->priv;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900610
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900611 sh_eth_stop(eth);
612}
613
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +0900614int sh_eth_initialize(struct bd_info *bd)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900615{
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900616 int ret = 0;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900617 struct sh_eth_dev *eth = NULL;
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900618 struct eth_device *dev = NULL;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900619 struct mii_dev *mdiodev;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900620
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900621 eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900622 if (!eth) {
623 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
624 ret = -ENOMEM;
625 goto err;
626 }
627
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900628 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900629 if (!dev) {
630 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
631 ret = -ENOMEM;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900632 goto err;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900633 }
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900634 memset(dev, 0, sizeof(struct eth_device));
635 memset(eth, 0, sizeof(struct sh_eth_dev));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900636
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900637 eth->port = CONFIG_SH_ETHER_USE_PORT;
638 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900639 eth->port_info[eth->port].iobase =
640 (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900641
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900642 dev->priv = (void *)eth;
643 dev->iobase = 0;
Marek Vasutc13be6a2018-01-21 15:10:21 +0100644 dev->init = sh_eth_init_legacy;
645 dev->halt = sh_eth_halt_legacy;
Marek Vasut044eb2d2018-01-21 14:27:51 +0100646 dev->send = sh_eth_send_legacy;
647 dev->recv = sh_eth_recv_legacy;
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900648 eth->port_info[eth->port].dev = dev;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900649
Ben Whitten34fd6c92015-12-30 13:05:58 +0000650 strcpy(dev->name, SHETHER_NAME);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900651
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900652 /* Register Device to EtherNet subsystem */
653 eth_register(dev);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900654
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900655 bb_miiphy_buses[0].priv = eth;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900656 mdiodev = mdio_alloc();
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500657 if (!mdiodev)
658 return -ENOMEM;
659 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
660 mdiodev->read = bb_miiphy_read;
661 mdiodev->write = bb_miiphy_write;
662
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900663 ret = mdio_register(mdiodev);
664 if (ret < 0)
665 return ret;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900666
Simon Glass399a9ce2017-08-03 12:22:14 -0600667 if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr))
Mike Frysingera86bf132009-02-11 19:14:09 -0500668 puts("Please set MAC address\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900669
670 return ret;
671
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900672err:
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900673 if (dev)
674 free(dev);
675
676 if (eth)
677 free(eth);
678
679 printf(SHETHER_NAME ": Failed\n");
680 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900681}
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900682
Marek Vasut020d3942018-01-19 18:57:17 +0100683#else /* CONFIG_DM_ETH */
684
685struct sh_ether_priv {
686 struct sh_eth_dev shdev;
687
688 struct mii_dev *bus;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100689 phys_addr_t iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100690 struct clk clk;
691 struct gpio_desc reset_gpio;
692};
693
694static int sh_ether_send(struct udevice *dev, void *packet, int len)
695{
696 struct sh_ether_priv *priv = dev_get_priv(dev);
697 struct sh_eth_dev *eth = &priv->shdev;
698
699 return sh_eth_send_common(eth, packet, len);
700}
701
702static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
703{
704 struct sh_ether_priv *priv = dev_get_priv(dev);
705 struct sh_eth_dev *eth = &priv->shdev;
706 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200707 uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
Marek Vasut020d3942018-01-19 18:57:17 +0100708 int len;
709
710 len = sh_eth_recv_start(eth);
711 if (len > 0) {
712 invalidate_cache(packet, len);
713 *packetp = packet;
714
715 return len;
716 } else {
717 len = 0;
718
719 /* Restart the receiver if disabled */
720 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
721 sh_eth_write(port_info, EDRRR_R, EDRRR);
722
723 return -EAGAIN;
724 }
725}
726
727static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
728{
729 struct sh_ether_priv *priv = dev_get_priv(dev);
730 struct sh_eth_dev *eth = &priv->shdev;
731 struct sh_eth_info *port_info = &eth->port_info[eth->port];
732
733 sh_eth_recv_finish(eth);
734
735 /* Restart the receiver if disabled */
736 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
737 sh_eth_write(port_info, EDRRR_R, EDRRR);
738
739 return 0;
740}
741
742static int sh_ether_write_hwaddr(struct udevice *dev)
743{
744 struct sh_ether_priv *priv = dev_get_priv(dev);
745 struct sh_eth_dev *eth = &priv->shdev;
746 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Simon Glassfa20e932020-12-03 16:55:20 -0700747 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100748
749 sh_eth_write_hwaddr(port_info, pdata->enetaddr);
750
751 return 0;
752}
753
754static int sh_eth_phy_config(struct udevice *dev)
755{
756 struct sh_ether_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700757 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100758 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut7a309cf2018-02-17 00:46:26 +0100759 int ret = 0;
760 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100761 struct phy_device *phydev;
762 int mask = 0xffffffff;
763
764 phydev = phy_find_by_mask(priv->bus, mask, pdata->phy_interface);
765 if (!phydev)
766 return -ENODEV;
767
768 phy_connect_dev(phydev, dev);
769
770 port_info->phydev = phydev;
771 phy_config(phydev);
772
773 return ret;
774}
775
776static int sh_ether_start(struct udevice *dev)
777{
778 struct sh_ether_priv *priv = dev_get_priv(dev);
Simon Glassfa20e932020-12-03 16:55:20 -0700779 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100780 struct sh_eth_dev *eth = &priv->shdev;
781 int ret;
782
Marek Vasut020d3942018-01-19 18:57:17 +0100783 ret = sh_eth_init_common(eth, pdata->enetaddr);
784 if (ret)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100785 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100786
787 ret = sh_eth_start_common(eth);
788 if (ret)
789 goto err_start;
790
791 return 0;
792
793err_start:
794 sh_eth_tx_desc_free(eth);
795 sh_eth_rx_desc_free(eth);
Marek Vasut020d3942018-01-19 18:57:17 +0100796 return ret;
797}
798
799static void sh_ether_stop(struct udevice *dev)
800{
801 struct sh_ether_priv *priv = dev_get_priv(dev);
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100802 struct sh_eth_dev *eth = &priv->shdev;
803 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100804
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100805 phy_shutdown(port_info->phydev);
Marek Vasut020d3942018-01-19 18:57:17 +0100806 sh_eth_stop(&priv->shdev);
Marek Vasut020d3942018-01-19 18:57:17 +0100807}
808
809static int sh_ether_probe(struct udevice *udev)
810{
Simon Glassfa20e932020-12-03 16:55:20 -0700811 struct eth_pdata *pdata = dev_get_plat(udev);
Marek Vasut020d3942018-01-19 18:57:17 +0100812 struct sh_ether_priv *priv = dev_get_priv(udev);
813 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut27e06332018-06-18 04:03:01 +0200814 struct ofnode_phandle_args phandle_args;
Marek Vasut020d3942018-01-19 18:57:17 +0100815 struct mii_dev *mdiodev;
Marek Vasut020d3942018-01-19 18:57:17 +0100816 int ret;
817
Marek Vasut63ab72c2018-02-17 00:57:49 +0100818 priv->iobase = pdata->iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100819
Marek Vasut77f69f82019-05-02 00:03:26 +0200820#if CONFIG_IS_ENABLED(CLK)
Marek Vasut020d3942018-01-19 18:57:17 +0100821 ret = clk_get_by_index(udev, 0, &priv->clk);
822 if (ret < 0)
Marek Vasut63ab72c2018-02-17 00:57:49 +0100823 return ret;
Marek Vasut77f69f82019-05-02 00:03:26 +0200824#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100825
Marek Vasut27e06332018-06-18 04:03:01 +0200826 ret = dev_read_phandle_with_args(udev, "phy-handle", NULL, 0, 0, &phandle_args);
827 if (!ret) {
828 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
829 &priv->reset_gpio, GPIOD_IS_OUT);
830 }
831
832 if (!dm_gpio_is_valid(&priv->reset_gpio)) {
833 gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio,
834 GPIOD_IS_OUT);
835 }
Marek Vasut020d3942018-01-19 18:57:17 +0100836
837 mdiodev = mdio_alloc();
838 if (!mdiodev) {
839 ret = -ENOMEM;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100840 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100841 }
842
843 mdiodev->read = bb_miiphy_read;
844 mdiodev->write = bb_miiphy_write;
845 bb_miiphy_buses[0].priv = eth;
846 snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
847
848 ret = mdio_register(mdiodev);
849 if (ret < 0)
850 goto err_mdio_register;
851
852 priv->bus = miiphy_get_dev_by_name(udev->name);
853
854 eth->port = CONFIG_SH_ETHER_USE_PORT;
855 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
856 eth->port_info[eth->port].iobase =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200857 (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
Marek Vasut020d3942018-01-19 18:57:17 +0100858
Marek Vasut77f69f82019-05-02 00:03:26 +0200859#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100860 ret = clk_enable(&priv->clk);
861 if (ret)
862 goto err_mdio_register;
Marek Vasut77f69f82019-05-02 00:03:26 +0200863#endif
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100864
Marek Vasutd53dd502020-04-04 15:01:22 +0200865 ret = sh_eth_init_common(eth, pdata->enetaddr);
866 if (ret)
867 goto err_phy_config;
868
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100869 ret = sh_eth_phy_config(udev);
870 if (ret) {
871 printf(SHETHER_NAME ": phy config timeout\n");
872 goto err_phy_config;
873 }
874
Marek Vasut020d3942018-01-19 18:57:17 +0100875 return 0;
876
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100877err_phy_config:
Marek Vasut77f69f82019-05-02 00:03:26 +0200878#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100879 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200880#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100881err_mdio_register:
882 mdio_free(mdiodev);
Marek Vasut020d3942018-01-19 18:57:17 +0100883 return ret;
884}
885
886static int sh_ether_remove(struct udevice *udev)
887{
888 struct sh_ether_priv *priv = dev_get_priv(udev);
889 struct sh_eth_dev *eth = &priv->shdev;
890 struct sh_eth_info *port_info = &eth->port_info[eth->port];
891
Marek Vasut77f69f82019-05-02 00:03:26 +0200892#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100893 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200894#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100895 free(port_info->phydev);
896 mdio_unregister(priv->bus);
897 mdio_free(priv->bus);
898
899 if (dm_gpio_is_valid(&priv->reset_gpio))
900 dm_gpio_free(udev, &priv->reset_gpio);
901
Marek Vasut020d3942018-01-19 18:57:17 +0100902 return 0;
903}
904
905static const struct eth_ops sh_ether_ops = {
906 .start = sh_ether_start,
907 .send = sh_ether_send,
908 .recv = sh_ether_recv,
909 .free_pkt = sh_ether_free_pkt,
910 .stop = sh_ether_stop,
911 .write_hwaddr = sh_ether_write_hwaddr,
912};
913
Simon Glassaad29ae2020-12-03 16:55:21 -0700914int sh_ether_of_to_plat(struct udevice *dev)
Marek Vasut020d3942018-01-19 18:57:17 +0100915{
Simon Glassfa20e932020-12-03 16:55:20 -0700916 struct eth_pdata *pdata = dev_get_plat(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100917 const char *phy_mode;
918 const fdt32_t *cell;
919 int ret = 0;
920
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +0900921 pdata->iobase = dev_read_addr(dev);
Marek Vasut020d3942018-01-19 18:57:17 +0100922 pdata->phy_interface = -1;
923 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
924 NULL);
925 if (phy_mode)
926 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
927 if (pdata->phy_interface == -1) {
928 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
929 return -EINVAL;
930 }
931
932 pdata->max_speed = 1000;
933 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
934 if (cell)
935 pdata->max_speed = fdt32_to_cpu(*cell);
936
937 sprintf(bb_miiphy_buses[0].name, dev->name);
938
939 return ret;
940}
941
942static const struct udevice_id sh_ether_ids[] = {
Marek Vasut77f69f82019-05-02 00:03:26 +0200943 { .compatible = "renesas,ether-r7s72100" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200944 { .compatible = "renesas,ether-r8a7790" },
Marek Vasut020d3942018-01-19 18:57:17 +0100945 { .compatible = "renesas,ether-r8a7791" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200946 { .compatible = "renesas,ether-r8a7793" },
947 { .compatible = "renesas,ether-r8a7794" },
Marek Vasut31124502019-07-31 12:58:06 +0200948 { .compatible = "renesas,gether-r8a77980" },
Marek Vasut020d3942018-01-19 18:57:17 +0100949 { }
950};
951
952U_BOOT_DRIVER(eth_sh_ether) = {
953 .name = "sh_ether",
954 .id = UCLASS_ETH,
955 .of_match = sh_ether_ids,
Simon Glassaad29ae2020-12-03 16:55:21 -0700956 .of_to_plat = sh_ether_of_to_plat,
Marek Vasut020d3942018-01-19 18:57:17 +0100957 .probe = sh_ether_probe,
958 .remove = sh_ether_remove,
959 .ops = &sh_ether_ops,
Simon Glass8a2b47f2020-12-03 16:55:17 -0700960 .priv_auto = sizeof(struct sh_ether_priv),
Simon Glass71fa5b42020-12-03 16:55:18 -0700961 .plat_auto = sizeof(struct eth_pdata),
Marek Vasut020d3942018-01-19 18:57:17 +0100962 .flags = DM_FLAG_ALLOC_PRIV_DMA,
963};
964#endif
965
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900966/******* for bb_miiphy *******/
967static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
968{
969 return 0;
970}
971
972static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
973{
974 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900975 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900976
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900977 sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900978
979 return 0;
980}
981
982static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
983{
984 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900985 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900986
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900987 sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900988
989 return 0;
990}
991
992static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
993{
994 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900995 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900996
997 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900998 sh_eth_write(port_info,
999 sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001000 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001001 sh_eth_write(port_info,
1002 sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001003
1004 return 0;
1005}
1006
1007static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
1008{
1009 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001010 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001011
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001012 *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001013
1014 return 0;
1015}
1016
1017static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
1018{
1019 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001020 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001021
1022 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001023 sh_eth_write(port_info,
1024 sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001025 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001026 sh_eth_write(port_info,
1027 sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001028
1029 return 0;
1030}
1031
1032static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
1033{
1034 udelay(10);
1035
1036 return 0;
1037}
1038
1039struct bb_miiphy_bus bb_miiphy_buses[] = {
1040 {
1041 .name = "sh_eth",
1042 .init = sh_eth_bb_init,
1043 .mdio_active = sh_eth_bb_mdio_active,
1044 .mdio_tristate = sh_eth_bb_mdio_tristate,
1045 .set_mdio = sh_eth_bb_set_mdio,
1046 .get_mdio = sh_eth_bb_get_mdio,
1047 .set_mdc = sh_eth_bb_set_mdc,
1048 .delay = sh_eth_bb_delay,
1049 }
1050};
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +09001051
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001052int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);