blob: f8e5d05722ab90feabeebeacbf6cdf0cef2a900e [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09002/*
Robert P. J. Day8c60f922016-05-04 04:47:31 -04003 * sh_eth.c - Driver for Renesas ethernet controller.
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09004 *
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +09005 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09006 * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09007 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09008 * Copyright (C) 2013, 2014 Renesas Electronics Corporation
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09009 */
10
11#include <config.h>
12#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070013#include <cpu_func.h>
Simon Glass0af6e2d2019-08-01 09:46:52 -060014#include <env.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090015#include <malloc.h>
16#include <net.h>
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090017#include <netdev.h>
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +090018#include <miiphy.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090019#include <linux/errno.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090020#include <asm/io.h>
21
Marek Vasut020d3942018-01-19 18:57:17 +010022#ifdef CONFIG_DM_ETH
23#include <clk.h>
24#include <dm.h>
25#include <linux/mii.h>
26#include <asm/gpio.h>
27#endif
28
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090029#include "sh_eth.h"
30
31#ifndef CONFIG_SH_ETHER_USE_PORT
32# error "Please define CONFIG_SH_ETHER_USE_PORT"
33#endif
34#ifndef CONFIG_SH_ETHER_PHY_ADDR
35# error "Please define CONFIG_SH_ETHER_PHY_ADDR"
36#endif
Nobuhiro Iwamatsu6bff09d2013-08-22 13:22:01 +090037
Trevor Woerner43ec7e02019-05-03 09:41:00 -040038#if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && \
39 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090040#define flush_cache_wback(addr, len) \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020041 flush_dcache_range((unsigned long)addr, \
42 (unsigned long)(addr + ALIGN(len, CONFIG_SH_ETHER_ALIGNE_SIZE)))
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090043#else
44#define flush_cache_wback(...)
45#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090046
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090047#if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
48#define invalidate_cache(addr, len) \
49 { \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020050 unsigned long line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
51 unsigned long start, end; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090052 \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020053 start = (unsigned long)addr; \
54 end = start + len; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090055 start &= ~(line_size - 1); \
56 end = ((end + line_size - 1) & ~(line_size - 1)); \
57 \
58 invalidate_dcache_range(start, end); \
59 }
60#else
61#define invalidate_cache(...)
62#endif
63
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +090064#define TIMEOUT_CNT 1000
65
Marek Vasut044eb2d2018-01-21 14:27:51 +010066static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090067{
Marek Vasut7a309cf2018-02-17 00:46:26 +010068 int ret = 0, timeout;
69 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090070
71 if (!packet || len > 0xffff) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090072 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
73 ret = -EINVAL;
74 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090075 }
76
77 /* packet must be a 4 byte boundary */
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020078 if ((uintptr_t)packet & 3) {
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +090079 printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +090080 , __func__);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090081 ret = -EFAULT;
82 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090083 }
84
85 /* Update tx descriptor */
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090086 flush_cache_wback(packet, len);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090087 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
88 port_info->tx_desc_cur->td1 = len << 16;
89 /* Must preserve the end of descriptor list indication */
90 if (port_info->tx_desc_cur->td0 & TD_TDLE)
91 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
92 else
93 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
94
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +090095 flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
96
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090097 /* Restart the transmitter if disabled */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +090098 if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
99 sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900100
101 /* Wait until packet is transmitted */
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900102 timeout = TIMEOUT_CNT;
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900103 do {
104 invalidate_cache(port_info->tx_desc_cur,
105 sizeof(struct tx_desc_s));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900106 udelay(100);
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900107 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900108
109 if (timeout < 0) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900110 printf(SHETHER_NAME ": transmit timeout\n");
111 ret = -ETIMEDOUT;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900112 goto err;
113 }
114
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900115 port_info->tx_desc_cur++;
116 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
117 port_info->tx_desc_cur = port_info->tx_desc_base;
118
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900119err:
120 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900121}
122
Marek Vasut48de90d2018-01-21 15:39:50 +0100123static int sh_eth_recv_start(struct sh_eth_dev *eth)
Marek Vasut044eb2d2018-01-21 14:27:51 +0100124{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100125 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900126
127 /* Check if the rx descriptor is ready */
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900128 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
Marek Vasut48de90d2018-01-21 15:39:50 +0100129 if (port_info->rx_desc_cur->rd0 & RD_RACT)
130 return -EINVAL;
131
132 /* Check for errors */
133 if (port_info->rx_desc_cur->rd0 & RD_RFE)
134 return -EINVAL;
135
Marek Vasut2526b792018-02-17 00:47:38 +0100136 return port_info->rx_desc_cur->rd1 & 0xffff;
Marek Vasut48de90d2018-01-21 15:39:50 +0100137}
138
139static void sh_eth_recv_finish(struct sh_eth_dev *eth)
140{
141 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900142
Marek Vasut48de90d2018-01-21 15:39:50 +0100143 /* Make current descriptor available again */
144 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
145 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
146 else
147 port_info->rx_desc_cur->rd0 = RD_RACT;
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +0900148
Marek Vasut48de90d2018-01-21 15:39:50 +0100149 flush_cache_wback(port_info->rx_desc_cur,
150 sizeof(struct rx_desc_s));
151
152 /* Point to the next descriptor */
153 port_info->rx_desc_cur++;
154 if (port_info->rx_desc_cur >=
155 port_info->rx_desc_base + NUM_RX_DESC)
156 port_info->rx_desc_cur = port_info->rx_desc_base;
157}
158
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900159static int sh_eth_reset(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900160{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900161 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900162#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900163 int ret = 0, i;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900164
165 /* Start e-dmac transmitter and receiver */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900166 sh_eth_write(port_info, EDSR_ENALL, EDSR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900167
168 /* Perform a software reset and wait for it to complete */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900169 sh_eth_write(port_info, EDMR_SRST, EDMR);
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900170 for (i = 0; i < TIMEOUT_CNT; i++) {
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900171 if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900172 break;
173 udelay(1000);
174 }
175
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900176 if (i == TIMEOUT_CNT) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900177 printf(SHETHER_NAME ": Software reset timeout\n");
178 ret = -EIO;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900179 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900180
181 return ret;
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900182#else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900183 sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
Marek Vasut42a33402018-02-17 00:57:49 +0100184 mdelay(3);
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900185 sh_eth_write(port_info,
186 sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900187
188 return 0;
189#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900190}
191
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900192static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900193{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100194 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900195 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100196 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900197 struct tx_desc_s *cur_tx_desc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900198
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900199 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900200 * Allocate rx descriptors. They must be aligned to size of struct
201 * tx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900202 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900203 port_info->tx_desc_alloc =
204 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
205 if (!port_info->tx_desc_alloc) {
206 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900207 ret = -ENOMEM;
208 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900209 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900210
Nobuhiro Iwamatsu425a3a52017-12-01 13:56:08 +0900211 flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900212
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900213 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900214 port_info->tx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200215 (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900216 port_info->tx_desc_cur = port_info->tx_desc_base;
217
218 /* Initialize all descriptors */
219 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
220 cur_tx_desc++, i++) {
221 cur_tx_desc->td0 = 0x00;
222 cur_tx_desc->td1 = 0x00;
223 cur_tx_desc->td2 = 0x00;
224 }
225
226 /* Mark the end of the descriptors */
227 cur_tx_desc--;
228 cur_tx_desc->td0 |= TD_TDLE;
229
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900230 /*
231 * Point the controller to the tx descriptor list. Must use physical
232 * addresses
233 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900234 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900235#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900236 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
237 sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
238 sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900239#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900240
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900241err:
242 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900243}
244
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900245static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900246{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100247 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900248 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100249 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900250 struct rx_desc_s *cur_rx_desc;
251 u8 *rx_buf;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900252
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900253 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900254 * Allocate rx descriptors. They must be aligned to size of struct
255 * rx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900256 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900257 port_info->rx_desc_alloc =
258 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
259 if (!port_info->rx_desc_alloc) {
260 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900261 ret = -ENOMEM;
262 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900263 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900264
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900265 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
266
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900267 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900268 port_info->rx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200269 (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900270
271 port_info->rx_desc_cur = port_info->rx_desc_base;
272
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900273 /*
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900274 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
275 * aligned and in P2 area.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900276 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900277 port_info->rx_buf_alloc =
278 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
279 if (!port_info->rx_buf_alloc) {
280 printf(SHETHER_NAME ": alloc failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900281 ret = -ENOMEM;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900282 goto err_buf_alloc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900283 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900284
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200285 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900286
287 /* Initialize all descriptors */
288 for (cur_rx_desc = port_info->rx_desc_base,
289 rx_buf = port_info->rx_buf_base, i = 0;
290 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
291 cur_rx_desc->rd0 = RD_RACT;
292 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900293 cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900294 }
295
296 /* Mark the end of the descriptors */
297 cur_rx_desc--;
298 cur_rx_desc->rd0 |= RD_RDLE;
299
300 /* Point the controller to the rx descriptor list */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900301 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900302#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900303 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
304 sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
305 sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900306#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900307
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900308 return ret;
309
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900310err_buf_alloc:
311 free(port_info->rx_desc_alloc);
312 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900313
314err:
315 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900316}
317
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900318static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900319{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100320 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900321
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900322 if (port_info->tx_desc_alloc) {
323 free(port_info->tx_desc_alloc);
324 port_info->tx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900325 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900326}
327
328static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
329{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100330 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900331
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900332 if (port_info->rx_desc_alloc) {
333 free(port_info->rx_desc_alloc);
334 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900335 }
336
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900337 if (port_info->rx_buf_alloc) {
338 free(port_info->rx_buf_alloc);
339 port_info->rx_buf_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900340 }
341}
342
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900343static int sh_eth_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900344{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900345 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900346
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900347 ret = sh_eth_tx_desc_init(eth);
348 if (ret)
349 goto err_tx_init;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900350
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900351 ret = sh_eth_rx_desc_init(eth);
352 if (ret)
353 goto err_rx_init;
354
355 return ret;
356err_rx_init:
357 sh_eth_tx_desc_free(eth);
358
359err_tx_init:
360 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900361}
362
Marek Vasutccdfc5e2018-01-21 14:55:44 +0100363static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
364 unsigned char *mac)
365{
366 u32 val;
367
368 val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
369 sh_eth_write(port_info, val, MAHR);
370
371 val = (mac[4] << 8) | mac[5];
372 sh_eth_write(port_info, val, MALR);
373}
374
Marek Vasutc13be6a2018-01-21 15:10:21 +0100375static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900376{
Marek Vasutc13be6a2018-01-21 15:10:21 +0100377 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut31124502019-07-31 12:58:06 +0200378 unsigned long edmr;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900379
380 /* Configure e-dmac registers */
Marek Vasut31124502019-07-31 12:58:06 +0200381 edmr = sh_eth_read(port_info, EDMR);
382 edmr &= ~EMDR_DESC_R;
383 edmr |= EMDR_DESC | EDMR_EL;
384#if defined(CONFIG_R8A77980)
385 edmr |= EDMR_NBST;
386#endif
387 sh_eth_write(port_info, edmr, EDMR);
Nobuhiro Iwamatsu7a2142c2013-08-22 13:22:02 +0900388
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900389 sh_eth_write(port_info, 0, EESIPR);
390 sh_eth_write(port_info, 0, TRSCER);
391 sh_eth_write(port_info, 0, TFTR);
392 sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
393 sh_eth_write(port_info, RMCR_RST, RMCR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900394#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900395 sh_eth_write(port_info, 0, RPADIR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900396#endif
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900397 sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900398
399 /* Configure e-mac registers */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900400 sh_eth_write(port_info, 0, ECSIPR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900401
402 /* Set Mac address */
Marek Vasutc13be6a2018-01-21 15:10:21 +0100403 sh_eth_write_hwaddr(port_info, mac);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900404
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900405 sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000406#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900407 sh_eth_write(port_info, 0, PIPR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900408#endif
409#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900410 sh_eth_write(port_info, APR_AP, APR);
411 sh_eth_write(port_info, MPR_MP, MPR);
412 sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900413#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900414
Nobuhiro Iwamatsu4ad2c2a2012-08-02 22:08:40 +0000415#if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900416 sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
Marek Vasut31124502019-07-31 12:58:06 +0200417#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900418 sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000419#endif
Marek Vasutc13be6a2018-01-21 15:10:21 +0100420}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900421
Marek Vasutc13be6a2018-01-21 15:10:21 +0100422static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
423{
424 struct sh_eth_info *port_info = &eth->port_info[eth->port];
425 struct phy_device *phy = port_info->phydev;
426 int ret = 0;
427 u32 val = 0;
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900428
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900429 /* Set the transfer speed */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900430 if (phy->speed == 100) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900431 printf(SHETHER_NAME ": 100Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000432#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900433 sh_eth_write(port_info, GECMR_100B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000434#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900435 sh_eth_write(port_info, 1, RTRATE);
Marek Vasut31124502019-07-31 12:58:06 +0200436#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900437 val = ECMR_RTM;
438#endif
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900439 } else if (phy->speed == 10) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900440 printf(SHETHER_NAME ": 10Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000441#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900442 sh_eth_write(port_info, GECMR_10B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000443#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900444 sh_eth_write(port_info, 0, RTRATE);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900445#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900446 }
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000447#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000448 else if (phy->speed == 1000) {
449 printf(SHETHER_NAME ": 1000Base/");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900450 sh_eth_write(port_info, GECMR_1000B, GECMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000451 }
452#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900453
454 /* Check if full duplex mode is supported by the phy */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900455 if (phy->duplex) {
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900456 printf("Full\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900457 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900458 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
Yoshihiro Shimoda4c4aa6c2012-06-26 16:38:09 +0000459 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900460 } else {
461 printf("Half\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900462 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900463 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
464 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900465 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900466
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900467 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900468}
469
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900470static void sh_eth_start(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900471{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900472 struct sh_eth_info *port_info = &eth->port_info[eth->port];
473
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900474 /*
475 * Enable the e-dmac receiver only. The transmitter will be enabled when
476 * we have something to transmit
477 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900478 sh_eth_write(port_info, EDRRR_R, EDRRR);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900479}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900480
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900481static void sh_eth_stop(struct sh_eth_dev *eth)
482{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900483 struct sh_eth_info *port_info = &eth->port_info[eth->port];
484
485 sh_eth_write(port_info, ~EDRRR_R, EDRRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900486}
487
Marek Vasutc13be6a2018-01-21 15:10:21 +0100488static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900489{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900490 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900491
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900492 ret = sh_eth_reset(eth);
493 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100494 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900495
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900496 ret = sh_eth_desc_init(eth);
497 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100498 return ret;
499
500 sh_eth_mac_regs_config(eth, mac);
501
502 return 0;
503}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900504
Marek Vasutc13be6a2018-01-21 15:10:21 +0100505static int sh_eth_start_common(struct sh_eth_dev *eth)
506{
507 struct sh_eth_info *port_info = &eth->port_info[eth->port];
508 int ret;
509
510 ret = phy_startup(port_info->phydev);
511 if (ret) {
512 printf(SHETHER_NAME ": phy startup failure\n");
513 return ret;
514 }
515
516 ret = sh_eth_phy_regs_config(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900517 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100518 return ret;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900519
520 sh_eth_start(eth);
521
Marek Vasutc13be6a2018-01-21 15:10:21 +0100522 return 0;
523}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900524
Marek Vasut020d3942018-01-19 18:57:17 +0100525#ifndef CONFIG_DM_ETH
Marek Vasut7ba52622018-01-21 15:31:48 +0100526static int sh_eth_phy_config_legacy(struct sh_eth_dev *eth)
527{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100528 int ret = 0;
529 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7ba52622018-01-21 15:31:48 +0100530 struct eth_device *dev = port_info->dev;
531 struct phy_device *phydev;
532
533 phydev = phy_connect(
534 miiphy_get_dev_by_name(dev->name),
535 port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
536 port_info->phydev = phydev;
537 phy_config(phydev);
538
539 return ret;
540}
541
542static int sh_eth_send_legacy(struct eth_device *dev, void *packet, int len)
543{
544 struct sh_eth_dev *eth = dev->priv;
545
546 return sh_eth_send_common(eth, packet, len);
547}
548
549static int sh_eth_recv_common(struct sh_eth_dev *eth)
550{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100551 int len = 0;
552 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7ba52622018-01-21 15:31:48 +0100553 uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
554
555 len = sh_eth_recv_start(eth);
556 if (len > 0) {
557 invalidate_cache(packet, len);
558 net_process_received_packet(packet, len);
559 sh_eth_recv_finish(eth);
560 } else
561 len = 0;
562
563 /* Restart the receiver if disabled */
564 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
565 sh_eth_write(port_info, EDRRR_R, EDRRR);
566
567 return len;
568}
569
570static int sh_eth_recv_legacy(struct eth_device *dev)
571{
572 struct sh_eth_dev *eth = dev->priv;
573
574 return sh_eth_recv_common(eth);
575}
576
Marek Vasutc13be6a2018-01-21 15:10:21 +0100577static int sh_eth_init_legacy(struct eth_device *dev, bd_t *bd)
578{
579 struct sh_eth_dev *eth = dev->priv;
580 int ret;
581
582 ret = sh_eth_init_common(eth, dev->enetaddr);
583 if (ret)
584 return ret;
585
586 ret = sh_eth_phy_config_legacy(eth);
587 if (ret) {
588 printf(SHETHER_NAME ": phy config timeout\n");
589 goto err_start;
590 }
591
592 ret = sh_eth_start_common(eth);
593 if (ret)
594 goto err_start;
595
596 return 0;
597
598err_start:
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900599 sh_eth_tx_desc_free(eth);
600 sh_eth_rx_desc_free(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900601 return ret;
602}
603
Marek Vasutc13be6a2018-01-21 15:10:21 +0100604void sh_eth_halt_legacy(struct eth_device *dev)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900605{
606 struct sh_eth_dev *eth = dev->priv;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900607
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900608 sh_eth_stop(eth);
609}
610
611int sh_eth_initialize(bd_t *bd)
612{
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900613 int ret = 0;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900614 struct sh_eth_dev *eth = NULL;
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900615 struct eth_device *dev = NULL;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900616 struct mii_dev *mdiodev;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900617
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900618 eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900619 if (!eth) {
620 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
621 ret = -ENOMEM;
622 goto err;
623 }
624
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900625 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900626 if (!dev) {
627 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
628 ret = -ENOMEM;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900629 goto err;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900630 }
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900631 memset(dev, 0, sizeof(struct eth_device));
632 memset(eth, 0, sizeof(struct sh_eth_dev));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900633
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900634 eth->port = CONFIG_SH_ETHER_USE_PORT;
635 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900636 eth->port_info[eth->port].iobase =
637 (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900638
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900639 dev->priv = (void *)eth;
640 dev->iobase = 0;
Marek Vasutc13be6a2018-01-21 15:10:21 +0100641 dev->init = sh_eth_init_legacy;
642 dev->halt = sh_eth_halt_legacy;
Marek Vasut044eb2d2018-01-21 14:27:51 +0100643 dev->send = sh_eth_send_legacy;
644 dev->recv = sh_eth_recv_legacy;
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900645 eth->port_info[eth->port].dev = dev;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900646
Ben Whitten34fd6c92015-12-30 13:05:58 +0000647 strcpy(dev->name, SHETHER_NAME);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900648
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900649 /* Register Device to EtherNet subsystem */
650 eth_register(dev);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900651
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900652 bb_miiphy_buses[0].priv = eth;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900653 mdiodev = mdio_alloc();
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500654 if (!mdiodev)
655 return -ENOMEM;
656 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
657 mdiodev->read = bb_miiphy_read;
658 mdiodev->write = bb_miiphy_write;
659
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900660 ret = mdio_register(mdiodev);
661 if (ret < 0)
662 return ret;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900663
Simon Glass399a9ce2017-08-03 12:22:14 -0600664 if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr))
Mike Frysingera86bf132009-02-11 19:14:09 -0500665 puts("Please set MAC address\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900666
667 return ret;
668
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900669err:
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900670 if (dev)
671 free(dev);
672
673 if (eth)
674 free(eth);
675
676 printf(SHETHER_NAME ": Failed\n");
677 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900678}
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900679
Marek Vasut020d3942018-01-19 18:57:17 +0100680#else /* CONFIG_DM_ETH */
681
682struct sh_ether_priv {
683 struct sh_eth_dev shdev;
684
685 struct mii_dev *bus;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100686 phys_addr_t iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100687 struct clk clk;
688 struct gpio_desc reset_gpio;
689};
690
691static int sh_ether_send(struct udevice *dev, void *packet, int len)
692{
693 struct sh_ether_priv *priv = dev_get_priv(dev);
694 struct sh_eth_dev *eth = &priv->shdev;
695
696 return sh_eth_send_common(eth, packet, len);
697}
698
699static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
700{
701 struct sh_ether_priv *priv = dev_get_priv(dev);
702 struct sh_eth_dev *eth = &priv->shdev;
703 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200704 uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
Marek Vasut020d3942018-01-19 18:57:17 +0100705 int len;
706
707 len = sh_eth_recv_start(eth);
708 if (len > 0) {
709 invalidate_cache(packet, len);
710 *packetp = packet;
711
712 return len;
713 } else {
714 len = 0;
715
716 /* Restart the receiver if disabled */
717 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
718 sh_eth_write(port_info, EDRRR_R, EDRRR);
719
720 return -EAGAIN;
721 }
722}
723
724static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
725{
726 struct sh_ether_priv *priv = dev_get_priv(dev);
727 struct sh_eth_dev *eth = &priv->shdev;
728 struct sh_eth_info *port_info = &eth->port_info[eth->port];
729
730 sh_eth_recv_finish(eth);
731
732 /* Restart the receiver if disabled */
733 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
734 sh_eth_write(port_info, EDRRR_R, EDRRR);
735
736 return 0;
737}
738
739static int sh_ether_write_hwaddr(struct udevice *dev)
740{
741 struct sh_ether_priv *priv = dev_get_priv(dev);
742 struct sh_eth_dev *eth = &priv->shdev;
743 struct sh_eth_info *port_info = &eth->port_info[eth->port];
744 struct eth_pdata *pdata = dev_get_platdata(dev);
745
746 sh_eth_write_hwaddr(port_info, pdata->enetaddr);
747
748 return 0;
749}
750
751static int sh_eth_phy_config(struct udevice *dev)
752{
753 struct sh_ether_priv *priv = dev_get_priv(dev);
754 struct eth_pdata *pdata = dev_get_platdata(dev);
755 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut7a309cf2018-02-17 00:46:26 +0100756 int ret = 0;
757 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100758 struct phy_device *phydev;
759 int mask = 0xffffffff;
760
761 phydev = phy_find_by_mask(priv->bus, mask, pdata->phy_interface);
762 if (!phydev)
763 return -ENODEV;
764
765 phy_connect_dev(phydev, dev);
766
767 port_info->phydev = phydev;
768 phy_config(phydev);
769
770 return ret;
771}
772
773static int sh_ether_start(struct udevice *dev)
774{
775 struct sh_ether_priv *priv = dev_get_priv(dev);
776 struct eth_pdata *pdata = dev_get_platdata(dev);
777 struct sh_eth_dev *eth = &priv->shdev;
778 int ret;
779
Marek Vasut020d3942018-01-19 18:57:17 +0100780 ret = sh_eth_init_common(eth, pdata->enetaddr);
781 if (ret)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100782 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100783
784 ret = sh_eth_start_common(eth);
785 if (ret)
786 goto err_start;
787
788 return 0;
789
790err_start:
791 sh_eth_tx_desc_free(eth);
792 sh_eth_rx_desc_free(eth);
Marek Vasut020d3942018-01-19 18:57:17 +0100793 return ret;
794}
795
796static void sh_ether_stop(struct udevice *dev)
797{
798 struct sh_ether_priv *priv = dev_get_priv(dev);
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100799 struct sh_eth_dev *eth = &priv->shdev;
800 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100801
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100802 phy_shutdown(port_info->phydev);
Marek Vasut020d3942018-01-19 18:57:17 +0100803 sh_eth_stop(&priv->shdev);
Marek Vasut020d3942018-01-19 18:57:17 +0100804}
805
806static int sh_ether_probe(struct udevice *udev)
807{
808 struct eth_pdata *pdata = dev_get_platdata(udev);
809 struct sh_ether_priv *priv = dev_get_priv(udev);
810 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut27e06332018-06-18 04:03:01 +0200811 struct ofnode_phandle_args phandle_args;
Marek Vasut020d3942018-01-19 18:57:17 +0100812 struct mii_dev *mdiodev;
Marek Vasut020d3942018-01-19 18:57:17 +0100813 int ret;
814
Marek Vasut63ab72c2018-02-17 00:57:49 +0100815 priv->iobase = pdata->iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100816
Marek Vasut77f69f82019-05-02 00:03:26 +0200817#if CONFIG_IS_ENABLED(CLK)
Marek Vasut020d3942018-01-19 18:57:17 +0100818 ret = clk_get_by_index(udev, 0, &priv->clk);
819 if (ret < 0)
Marek Vasut63ab72c2018-02-17 00:57:49 +0100820 return ret;
Marek Vasut77f69f82019-05-02 00:03:26 +0200821#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100822
Marek Vasut27e06332018-06-18 04:03:01 +0200823 ret = dev_read_phandle_with_args(udev, "phy-handle", NULL, 0, 0, &phandle_args);
824 if (!ret) {
825 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
826 &priv->reset_gpio, GPIOD_IS_OUT);
827 }
828
829 if (!dm_gpio_is_valid(&priv->reset_gpio)) {
830 gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio,
831 GPIOD_IS_OUT);
832 }
Marek Vasut020d3942018-01-19 18:57:17 +0100833
834 mdiodev = mdio_alloc();
835 if (!mdiodev) {
836 ret = -ENOMEM;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100837 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100838 }
839
840 mdiodev->read = bb_miiphy_read;
841 mdiodev->write = bb_miiphy_write;
842 bb_miiphy_buses[0].priv = eth;
843 snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
844
845 ret = mdio_register(mdiodev);
846 if (ret < 0)
847 goto err_mdio_register;
848
849 priv->bus = miiphy_get_dev_by_name(udev->name);
850
851 eth->port = CONFIG_SH_ETHER_USE_PORT;
852 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
853 eth->port_info[eth->port].iobase =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200854 (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
Marek Vasut020d3942018-01-19 18:57:17 +0100855
Marek Vasut77f69f82019-05-02 00:03:26 +0200856#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100857 ret = clk_enable(&priv->clk);
858 if (ret)
859 goto err_mdio_register;
Marek Vasut77f69f82019-05-02 00:03:26 +0200860#endif
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100861
Marek Vasutd53dd502020-04-04 15:01:22 +0200862 ret = sh_eth_init_common(eth, pdata->enetaddr);
863 if (ret)
864 goto err_phy_config;
865
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100866 ret = sh_eth_phy_config(udev);
867 if (ret) {
868 printf(SHETHER_NAME ": phy config timeout\n");
869 goto err_phy_config;
870 }
871
Marek Vasut020d3942018-01-19 18:57:17 +0100872 return 0;
873
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100874err_phy_config:
Marek Vasut77f69f82019-05-02 00:03:26 +0200875#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100876 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200877#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100878err_mdio_register:
879 mdio_free(mdiodev);
Marek Vasut020d3942018-01-19 18:57:17 +0100880 return ret;
881}
882
883static int sh_ether_remove(struct udevice *udev)
884{
885 struct sh_ether_priv *priv = dev_get_priv(udev);
886 struct sh_eth_dev *eth = &priv->shdev;
887 struct sh_eth_info *port_info = &eth->port_info[eth->port];
888
Marek Vasut77f69f82019-05-02 00:03:26 +0200889#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100890 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200891#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100892 free(port_info->phydev);
893 mdio_unregister(priv->bus);
894 mdio_free(priv->bus);
895
896 if (dm_gpio_is_valid(&priv->reset_gpio))
897 dm_gpio_free(udev, &priv->reset_gpio);
898
Marek Vasut020d3942018-01-19 18:57:17 +0100899 return 0;
900}
901
902static const struct eth_ops sh_ether_ops = {
903 .start = sh_ether_start,
904 .send = sh_ether_send,
905 .recv = sh_ether_recv,
906 .free_pkt = sh_ether_free_pkt,
907 .stop = sh_ether_stop,
908 .write_hwaddr = sh_ether_write_hwaddr,
909};
910
911int sh_ether_ofdata_to_platdata(struct udevice *dev)
912{
913 struct eth_pdata *pdata = dev_get_platdata(dev);
914 const char *phy_mode;
915 const fdt32_t *cell;
916 int ret = 0;
917
918 pdata->iobase = devfdt_get_addr(dev);
919 pdata->phy_interface = -1;
920 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
921 NULL);
922 if (phy_mode)
923 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
924 if (pdata->phy_interface == -1) {
925 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
926 return -EINVAL;
927 }
928
929 pdata->max_speed = 1000;
930 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
931 if (cell)
932 pdata->max_speed = fdt32_to_cpu(*cell);
933
934 sprintf(bb_miiphy_buses[0].name, dev->name);
935
936 return ret;
937}
938
939static const struct udevice_id sh_ether_ids[] = {
Marek Vasut77f69f82019-05-02 00:03:26 +0200940 { .compatible = "renesas,ether-r7s72100" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200941 { .compatible = "renesas,ether-r8a7790" },
Marek Vasut020d3942018-01-19 18:57:17 +0100942 { .compatible = "renesas,ether-r8a7791" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200943 { .compatible = "renesas,ether-r8a7793" },
944 { .compatible = "renesas,ether-r8a7794" },
Marek Vasut31124502019-07-31 12:58:06 +0200945 { .compatible = "renesas,gether-r8a77980" },
Marek Vasut020d3942018-01-19 18:57:17 +0100946 { }
947};
948
949U_BOOT_DRIVER(eth_sh_ether) = {
950 .name = "sh_ether",
951 .id = UCLASS_ETH,
952 .of_match = sh_ether_ids,
953 .ofdata_to_platdata = sh_ether_ofdata_to_platdata,
954 .probe = sh_ether_probe,
955 .remove = sh_ether_remove,
956 .ops = &sh_ether_ops,
957 .priv_auto_alloc_size = sizeof(struct sh_ether_priv),
958 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
959 .flags = DM_FLAG_ALLOC_PRIV_DMA,
960};
961#endif
962
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900963/******* for bb_miiphy *******/
964static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
965{
966 return 0;
967}
968
969static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
970{
971 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900972 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900973
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900974 sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900975
976 return 0;
977}
978
979static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
980{
981 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900982 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900983
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900984 sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900985
986 return 0;
987}
988
989static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
990{
991 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900992 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900993
994 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900995 sh_eth_write(port_info,
996 sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900997 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900998 sh_eth_write(port_info,
999 sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001000
1001 return 0;
1002}
1003
1004static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
1005{
1006 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001007 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001008
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001009 *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001010
1011 return 0;
1012}
1013
1014static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
1015{
1016 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001017 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001018
1019 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001020 sh_eth_write(port_info,
1021 sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001022 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001023 sh_eth_write(port_info,
1024 sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001025
1026 return 0;
1027}
1028
1029static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
1030{
1031 udelay(10);
1032
1033 return 0;
1034}
1035
1036struct bb_miiphy_bus bb_miiphy_buses[] = {
1037 {
1038 .name = "sh_eth",
1039 .init = sh_eth_bb_init,
1040 .mdio_active = sh_eth_bb_mdio_active,
1041 .mdio_tristate = sh_eth_bb_mdio_tristate,
1042 .set_mdio = sh_eth_bb_set_mdio,
1043 .get_mdio = sh_eth_bb_get_mdio,
1044 .set_mdc = sh_eth_bb_set_mdc,
1045 .delay = sh_eth_bb_delay,
1046 }
1047};
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +09001048
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001049int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);