blob: 749f651920828955b5be7469e26fd4c16a1b9fee [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09002/*
Robert P. J. Day8c60f922016-05-04 04:47:31 -04003 * sh_eth.c - Driver for Renesas ethernet controller.
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09004 *
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +09005 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09006 * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09007 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09008 * Copyright (C) 2013, 2014 Renesas Electronics Corporation
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09009 */
10
11#include <config.h>
12#include <common.h>
Simon Glass0af6e2d2019-08-01 09:46:52 -060013#include <env.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090014#include <malloc.h>
15#include <net.h>
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090016#include <netdev.h>
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +090017#include <miiphy.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090018#include <linux/errno.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090019#include <asm/io.h>
20
Marek Vasut020d3942018-01-19 18:57:17 +010021#ifdef CONFIG_DM_ETH
22#include <clk.h>
23#include <dm.h>
24#include <linux/mii.h>
25#include <asm/gpio.h>
26#endif
27
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090028#include "sh_eth.h"
29
30#ifndef CONFIG_SH_ETHER_USE_PORT
31# error "Please define CONFIG_SH_ETHER_USE_PORT"
32#endif
33#ifndef CONFIG_SH_ETHER_PHY_ADDR
34# error "Please define CONFIG_SH_ETHER_PHY_ADDR"
35#endif
Nobuhiro Iwamatsu6bff09d2013-08-22 13:22:01 +090036
Trevor Woerner43ec7e02019-05-03 09:41:00 -040037#if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && \
38 !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090039#define flush_cache_wback(addr, len) \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020040 flush_dcache_range((unsigned long)addr, \
41 (unsigned long)(addr + ALIGN(len, CONFIG_SH_ETHER_ALIGNE_SIZE)))
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090042#else
43#define flush_cache_wback(...)
44#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090045
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090046#if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
47#define invalidate_cache(addr, len) \
48 { \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020049 unsigned long line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
50 unsigned long start, end; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090051 \
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020052 start = (unsigned long)addr; \
53 end = start + len; \
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090054 start &= ~(line_size - 1); \
55 end = ((end + line_size - 1) & ~(line_size - 1)); \
56 \
57 invalidate_dcache_range(start, end); \
58 }
59#else
60#define invalidate_cache(...)
61#endif
62
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +090063#define TIMEOUT_CNT 1000
64
Marek Vasut044eb2d2018-01-21 14:27:51 +010065static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090066{
Marek Vasut7a309cf2018-02-17 00:46:26 +010067 int ret = 0, timeout;
68 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090069
70 if (!packet || len > 0xffff) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090071 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
72 ret = -EINVAL;
73 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090074 }
75
76 /* packet must be a 4 byte boundary */
Marek Vasut9aa1d5b2019-07-31 14:48:17 +020077 if ((uintptr_t)packet & 3) {
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +090078 printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +090079 , __func__);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090080 ret = -EFAULT;
81 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090082 }
83
84 /* Update tx descriptor */
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090085 flush_cache_wback(packet, len);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090086 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
87 port_info->tx_desc_cur->td1 = len << 16;
88 /* Must preserve the end of descriptor list indication */
89 if (port_info->tx_desc_cur->td0 & TD_TDLE)
90 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
91 else
92 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
93
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +090094 flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
95
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090096 /* Restart the transmitter if disabled */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +090097 if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
98 sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090099
100 /* Wait until packet is transmitted */
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900101 timeout = TIMEOUT_CNT;
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900102 do {
103 invalidate_cache(port_info->tx_desc_cur,
104 sizeof(struct tx_desc_s));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900105 udelay(100);
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900106 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900107
108 if (timeout < 0) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900109 printf(SHETHER_NAME ": transmit timeout\n");
110 ret = -ETIMEDOUT;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900111 goto err;
112 }
113
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900114 port_info->tx_desc_cur++;
115 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
116 port_info->tx_desc_cur = port_info->tx_desc_base;
117
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900118err:
119 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900120}
121
Marek Vasut48de90d2018-01-21 15:39:50 +0100122static int sh_eth_recv_start(struct sh_eth_dev *eth)
Marek Vasut044eb2d2018-01-21 14:27:51 +0100123{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100124 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900125
126 /* Check if the rx descriptor is ready */
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900127 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
Marek Vasut48de90d2018-01-21 15:39:50 +0100128 if (port_info->rx_desc_cur->rd0 & RD_RACT)
129 return -EINVAL;
130
131 /* Check for errors */
132 if (port_info->rx_desc_cur->rd0 & RD_RFE)
133 return -EINVAL;
134
Marek Vasut2526b792018-02-17 00:47:38 +0100135 return port_info->rx_desc_cur->rd1 & 0xffff;
Marek Vasut48de90d2018-01-21 15:39:50 +0100136}
137
138static void sh_eth_recv_finish(struct sh_eth_dev *eth)
139{
140 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900141
Marek Vasut48de90d2018-01-21 15:39:50 +0100142 /* Make current descriptor available again */
143 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
144 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
145 else
146 port_info->rx_desc_cur->rd0 = RD_RACT;
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +0900147
Marek Vasut48de90d2018-01-21 15:39:50 +0100148 flush_cache_wback(port_info->rx_desc_cur,
149 sizeof(struct rx_desc_s));
150
151 /* Point to the next descriptor */
152 port_info->rx_desc_cur++;
153 if (port_info->rx_desc_cur >=
154 port_info->rx_desc_base + NUM_RX_DESC)
155 port_info->rx_desc_cur = port_info->rx_desc_base;
156}
157
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900158static int sh_eth_reset(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900159{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900160 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900161#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900162 int ret = 0, i;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900163
164 /* Start e-dmac transmitter and receiver */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900165 sh_eth_write(port_info, EDSR_ENALL, EDSR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900166
167 /* Perform a software reset and wait for it to complete */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900168 sh_eth_write(port_info, EDMR_SRST, EDMR);
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900169 for (i = 0; i < TIMEOUT_CNT; i++) {
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900170 if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900171 break;
172 udelay(1000);
173 }
174
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900175 if (i == TIMEOUT_CNT) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900176 printf(SHETHER_NAME ": Software reset timeout\n");
177 ret = -EIO;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900178 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900179
180 return ret;
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900181#else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900182 sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
Marek Vasut42a33402018-02-17 00:57:49 +0100183 mdelay(3);
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900184 sh_eth_write(port_info,
185 sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900186
187 return 0;
188#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900189}
190
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900191static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900192{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100193 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900194 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100195 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900196 struct tx_desc_s *cur_tx_desc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900197
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900198 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900199 * Allocate rx descriptors. They must be aligned to size of struct
200 * tx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900201 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900202 port_info->tx_desc_alloc =
203 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
204 if (!port_info->tx_desc_alloc) {
205 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900206 ret = -ENOMEM;
207 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900208 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900209
Nobuhiro Iwamatsu425a3a52017-12-01 13:56:08 +0900210 flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900211
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900212 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900213 port_info->tx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200214 (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900215 port_info->tx_desc_cur = port_info->tx_desc_base;
216
217 /* Initialize all descriptors */
218 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
219 cur_tx_desc++, i++) {
220 cur_tx_desc->td0 = 0x00;
221 cur_tx_desc->td1 = 0x00;
222 cur_tx_desc->td2 = 0x00;
223 }
224
225 /* Mark the end of the descriptors */
226 cur_tx_desc--;
227 cur_tx_desc->td0 |= TD_TDLE;
228
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900229 /*
230 * Point the controller to the tx descriptor list. Must use physical
231 * addresses
232 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900233 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900234#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900235 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
236 sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
237 sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900238#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900239
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900240err:
241 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900242}
243
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900244static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900245{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100246 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900247 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100248 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900249 struct rx_desc_s *cur_rx_desc;
250 u8 *rx_buf;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900251
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900252 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900253 * Allocate rx descriptors. They must be aligned to size of struct
254 * rx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900255 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900256 port_info->rx_desc_alloc =
257 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
258 if (!port_info->rx_desc_alloc) {
259 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900260 ret = -ENOMEM;
261 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900262 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900263
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900264 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
265
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900266 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900267 port_info->rx_desc_base =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200268 (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900269
270 port_info->rx_desc_cur = port_info->rx_desc_base;
271
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900272 /*
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900273 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
274 * aligned and in P2 area.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900275 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900276 port_info->rx_buf_alloc =
277 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
278 if (!port_info->rx_buf_alloc) {
279 printf(SHETHER_NAME ": alloc failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900280 ret = -ENOMEM;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900281 goto err_buf_alloc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900282 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900283
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200284 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900285
286 /* Initialize all descriptors */
287 for (cur_rx_desc = port_info->rx_desc_base,
288 rx_buf = port_info->rx_buf_base, i = 0;
289 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
290 cur_rx_desc->rd0 = RD_RACT;
291 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900292 cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900293 }
294
295 /* Mark the end of the descriptors */
296 cur_rx_desc--;
297 cur_rx_desc->rd0 |= RD_RDLE;
298
299 /* Point the controller to the rx descriptor list */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900300 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900301#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900302 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
303 sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
304 sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900305#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900306
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900307 return ret;
308
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900309err_buf_alloc:
310 free(port_info->rx_desc_alloc);
311 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900312
313err:
314 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900315}
316
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900317static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900318{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100319 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900320
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900321 if (port_info->tx_desc_alloc) {
322 free(port_info->tx_desc_alloc);
323 port_info->tx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900324 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900325}
326
327static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
328{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100329 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900330
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900331 if (port_info->rx_desc_alloc) {
332 free(port_info->rx_desc_alloc);
333 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900334 }
335
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900336 if (port_info->rx_buf_alloc) {
337 free(port_info->rx_buf_alloc);
338 port_info->rx_buf_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900339 }
340}
341
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900342static int sh_eth_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900343{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900344 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900345
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900346 ret = sh_eth_tx_desc_init(eth);
347 if (ret)
348 goto err_tx_init;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900349
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900350 ret = sh_eth_rx_desc_init(eth);
351 if (ret)
352 goto err_rx_init;
353
354 return ret;
355err_rx_init:
356 sh_eth_tx_desc_free(eth);
357
358err_tx_init:
359 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900360}
361
Marek Vasutccdfc5e2018-01-21 14:55:44 +0100362static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
363 unsigned char *mac)
364{
365 u32 val;
366
367 val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
368 sh_eth_write(port_info, val, MAHR);
369
370 val = (mac[4] << 8) | mac[5];
371 sh_eth_write(port_info, val, MALR);
372}
373
Marek Vasutc13be6a2018-01-21 15:10:21 +0100374static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900375{
Marek Vasutc13be6a2018-01-21 15:10:21 +0100376 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut31124502019-07-31 12:58:06 +0200377 unsigned long edmr;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900378
379 /* Configure e-dmac registers */
Marek Vasut31124502019-07-31 12:58:06 +0200380 edmr = sh_eth_read(port_info, EDMR);
381 edmr &= ~EMDR_DESC_R;
382 edmr |= EMDR_DESC | EDMR_EL;
383#if defined(CONFIG_R8A77980)
384 edmr |= EDMR_NBST;
385#endif
386 sh_eth_write(port_info, edmr, EDMR);
Nobuhiro Iwamatsu7a2142c2013-08-22 13:22:02 +0900387
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900388 sh_eth_write(port_info, 0, EESIPR);
389 sh_eth_write(port_info, 0, TRSCER);
390 sh_eth_write(port_info, 0, TFTR);
391 sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
392 sh_eth_write(port_info, RMCR_RST, RMCR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900393#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900394 sh_eth_write(port_info, 0, RPADIR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900395#endif
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900396 sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900397
398 /* Configure e-mac registers */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900399 sh_eth_write(port_info, 0, ECSIPR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900400
401 /* Set Mac address */
Marek Vasutc13be6a2018-01-21 15:10:21 +0100402 sh_eth_write_hwaddr(port_info, mac);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900403
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900404 sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000405#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900406 sh_eth_write(port_info, 0, PIPR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900407#endif
408#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900409 sh_eth_write(port_info, APR_AP, APR);
410 sh_eth_write(port_info, MPR_MP, MPR);
411 sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900412#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900413
Nobuhiro Iwamatsu4ad2c2a2012-08-02 22:08:40 +0000414#if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900415 sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
Marek Vasut31124502019-07-31 12:58:06 +0200416#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900417 sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000418#endif
Marek Vasutc13be6a2018-01-21 15:10:21 +0100419}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900420
Marek Vasutc13be6a2018-01-21 15:10:21 +0100421static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
422{
423 struct sh_eth_info *port_info = &eth->port_info[eth->port];
424 struct phy_device *phy = port_info->phydev;
425 int ret = 0;
426 u32 val = 0;
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900427
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900428 /* Set the transfer speed */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900429 if (phy->speed == 100) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900430 printf(SHETHER_NAME ": 100Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000431#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900432 sh_eth_write(port_info, GECMR_100B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000433#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900434 sh_eth_write(port_info, 1, RTRATE);
Marek Vasut31124502019-07-31 12:58:06 +0200435#elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980)
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900436 val = ECMR_RTM;
437#endif
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900438 } else if (phy->speed == 10) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900439 printf(SHETHER_NAME ": 10Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000440#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900441 sh_eth_write(port_info, GECMR_10B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000442#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900443 sh_eth_write(port_info, 0, RTRATE);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900444#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900445 }
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000446#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000447 else if (phy->speed == 1000) {
448 printf(SHETHER_NAME ": 1000Base/");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900449 sh_eth_write(port_info, GECMR_1000B, GECMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000450 }
451#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900452
453 /* Check if full duplex mode is supported by the phy */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900454 if (phy->duplex) {
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900455 printf("Full\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900456 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900457 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
Yoshihiro Shimoda4c4aa6c2012-06-26 16:38:09 +0000458 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900459 } else {
460 printf("Half\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900461 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900462 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
463 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900464 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900465
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900466 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900467}
468
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900469static void sh_eth_start(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900470{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900471 struct sh_eth_info *port_info = &eth->port_info[eth->port];
472
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900473 /*
474 * Enable the e-dmac receiver only. The transmitter will be enabled when
475 * we have something to transmit
476 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900477 sh_eth_write(port_info, EDRRR_R, EDRRR);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900478}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900479
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900480static void sh_eth_stop(struct sh_eth_dev *eth)
481{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900482 struct sh_eth_info *port_info = &eth->port_info[eth->port];
483
484 sh_eth_write(port_info, ~EDRRR_R, EDRRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900485}
486
Marek Vasutc13be6a2018-01-21 15:10:21 +0100487static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900488{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900489 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900490
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900491 ret = sh_eth_reset(eth);
492 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100493 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900494
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900495 ret = sh_eth_desc_init(eth);
496 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100497 return ret;
498
499 sh_eth_mac_regs_config(eth, mac);
500
501 return 0;
502}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900503
Marek Vasutc13be6a2018-01-21 15:10:21 +0100504static int sh_eth_start_common(struct sh_eth_dev *eth)
505{
506 struct sh_eth_info *port_info = &eth->port_info[eth->port];
507 int ret;
508
509 ret = phy_startup(port_info->phydev);
510 if (ret) {
511 printf(SHETHER_NAME ": phy startup failure\n");
512 return ret;
513 }
514
515 ret = sh_eth_phy_regs_config(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900516 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100517 return ret;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900518
519 sh_eth_start(eth);
520
Marek Vasutc13be6a2018-01-21 15:10:21 +0100521 return 0;
522}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900523
Marek Vasut020d3942018-01-19 18:57:17 +0100524#ifndef CONFIG_DM_ETH
Marek Vasut7ba52622018-01-21 15:31:48 +0100525static int sh_eth_phy_config_legacy(struct sh_eth_dev *eth)
526{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100527 int ret = 0;
528 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7ba52622018-01-21 15:31:48 +0100529 struct eth_device *dev = port_info->dev;
530 struct phy_device *phydev;
531
532 phydev = phy_connect(
533 miiphy_get_dev_by_name(dev->name),
534 port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
535 port_info->phydev = phydev;
536 phy_config(phydev);
537
538 return ret;
539}
540
541static int sh_eth_send_legacy(struct eth_device *dev, void *packet, int len)
542{
543 struct sh_eth_dev *eth = dev->priv;
544
545 return sh_eth_send_common(eth, packet, len);
546}
547
548static int sh_eth_recv_common(struct sh_eth_dev *eth)
549{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100550 int len = 0;
551 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7ba52622018-01-21 15:31:48 +0100552 uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
553
554 len = sh_eth_recv_start(eth);
555 if (len > 0) {
556 invalidate_cache(packet, len);
557 net_process_received_packet(packet, len);
558 sh_eth_recv_finish(eth);
559 } else
560 len = 0;
561
562 /* Restart the receiver if disabled */
563 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
564 sh_eth_write(port_info, EDRRR_R, EDRRR);
565
566 return len;
567}
568
569static int sh_eth_recv_legacy(struct eth_device *dev)
570{
571 struct sh_eth_dev *eth = dev->priv;
572
573 return sh_eth_recv_common(eth);
574}
575
Marek Vasutc13be6a2018-01-21 15:10:21 +0100576static int sh_eth_init_legacy(struct eth_device *dev, bd_t *bd)
577{
578 struct sh_eth_dev *eth = dev->priv;
579 int ret;
580
581 ret = sh_eth_init_common(eth, dev->enetaddr);
582 if (ret)
583 return ret;
584
585 ret = sh_eth_phy_config_legacy(eth);
586 if (ret) {
587 printf(SHETHER_NAME ": phy config timeout\n");
588 goto err_start;
589 }
590
591 ret = sh_eth_start_common(eth);
592 if (ret)
593 goto err_start;
594
595 return 0;
596
597err_start:
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900598 sh_eth_tx_desc_free(eth);
599 sh_eth_rx_desc_free(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900600 return ret;
601}
602
Marek Vasutc13be6a2018-01-21 15:10:21 +0100603void sh_eth_halt_legacy(struct eth_device *dev)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900604{
605 struct sh_eth_dev *eth = dev->priv;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900606
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900607 sh_eth_stop(eth);
608}
609
610int sh_eth_initialize(bd_t *bd)
611{
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900612 int ret = 0;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900613 struct sh_eth_dev *eth = NULL;
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900614 struct eth_device *dev = NULL;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900615 struct mii_dev *mdiodev;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900616
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900617 eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900618 if (!eth) {
619 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
620 ret = -ENOMEM;
621 goto err;
622 }
623
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900624 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900625 if (!dev) {
626 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
627 ret = -ENOMEM;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900628 goto err;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900629 }
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900630 memset(dev, 0, sizeof(struct eth_device));
631 memset(eth, 0, sizeof(struct sh_eth_dev));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900632
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900633 eth->port = CONFIG_SH_ETHER_USE_PORT;
634 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900635 eth->port_info[eth->port].iobase =
636 (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900637
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900638 dev->priv = (void *)eth;
639 dev->iobase = 0;
Marek Vasutc13be6a2018-01-21 15:10:21 +0100640 dev->init = sh_eth_init_legacy;
641 dev->halt = sh_eth_halt_legacy;
Marek Vasut044eb2d2018-01-21 14:27:51 +0100642 dev->send = sh_eth_send_legacy;
643 dev->recv = sh_eth_recv_legacy;
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900644 eth->port_info[eth->port].dev = dev;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900645
Ben Whitten34fd6c92015-12-30 13:05:58 +0000646 strcpy(dev->name, SHETHER_NAME);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900647
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900648 /* Register Device to EtherNet subsystem */
649 eth_register(dev);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900650
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900651 bb_miiphy_buses[0].priv = eth;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900652 mdiodev = mdio_alloc();
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500653 if (!mdiodev)
654 return -ENOMEM;
655 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
656 mdiodev->read = bb_miiphy_read;
657 mdiodev->write = bb_miiphy_write;
658
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900659 ret = mdio_register(mdiodev);
660 if (ret < 0)
661 return ret;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900662
Simon Glass399a9ce2017-08-03 12:22:14 -0600663 if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr))
Mike Frysingera86bf132009-02-11 19:14:09 -0500664 puts("Please set MAC address\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900665
666 return ret;
667
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900668err:
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900669 if (dev)
670 free(dev);
671
672 if (eth)
673 free(eth);
674
675 printf(SHETHER_NAME ": Failed\n");
676 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900677}
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900678
Marek Vasut020d3942018-01-19 18:57:17 +0100679#else /* CONFIG_DM_ETH */
680
681struct sh_ether_priv {
682 struct sh_eth_dev shdev;
683
684 struct mii_dev *bus;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100685 phys_addr_t iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100686 struct clk clk;
687 struct gpio_desc reset_gpio;
688};
689
690static int sh_ether_send(struct udevice *dev, void *packet, int len)
691{
692 struct sh_ether_priv *priv = dev_get_priv(dev);
693 struct sh_eth_dev *eth = &priv->shdev;
694
695 return sh_eth_send_common(eth, packet, len);
696}
697
698static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
699{
700 struct sh_ether_priv *priv = dev_get_priv(dev);
701 struct sh_eth_dev *eth = &priv->shdev;
702 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200703 uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2);
Marek Vasut020d3942018-01-19 18:57:17 +0100704 int len;
705
706 len = sh_eth_recv_start(eth);
707 if (len > 0) {
708 invalidate_cache(packet, len);
709 *packetp = packet;
710
711 return len;
712 } else {
713 len = 0;
714
715 /* Restart the receiver if disabled */
716 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
717 sh_eth_write(port_info, EDRRR_R, EDRRR);
718
719 return -EAGAIN;
720 }
721}
722
723static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
724{
725 struct sh_ether_priv *priv = dev_get_priv(dev);
726 struct sh_eth_dev *eth = &priv->shdev;
727 struct sh_eth_info *port_info = &eth->port_info[eth->port];
728
729 sh_eth_recv_finish(eth);
730
731 /* Restart the receiver if disabled */
732 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
733 sh_eth_write(port_info, EDRRR_R, EDRRR);
734
735 return 0;
736}
737
738static int sh_ether_write_hwaddr(struct udevice *dev)
739{
740 struct sh_ether_priv *priv = dev_get_priv(dev);
741 struct sh_eth_dev *eth = &priv->shdev;
742 struct sh_eth_info *port_info = &eth->port_info[eth->port];
743 struct eth_pdata *pdata = dev_get_platdata(dev);
744
745 sh_eth_write_hwaddr(port_info, pdata->enetaddr);
746
747 return 0;
748}
749
750static int sh_eth_phy_config(struct udevice *dev)
751{
752 struct sh_ether_priv *priv = dev_get_priv(dev);
753 struct eth_pdata *pdata = dev_get_platdata(dev);
754 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut7a309cf2018-02-17 00:46:26 +0100755 int ret = 0;
756 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100757 struct phy_device *phydev;
758 int mask = 0xffffffff;
759
760 phydev = phy_find_by_mask(priv->bus, mask, pdata->phy_interface);
761 if (!phydev)
762 return -ENODEV;
763
764 phy_connect_dev(phydev, dev);
765
766 port_info->phydev = phydev;
767 phy_config(phydev);
768
769 return ret;
770}
771
772static int sh_ether_start(struct udevice *dev)
773{
774 struct sh_ether_priv *priv = dev_get_priv(dev);
775 struct eth_pdata *pdata = dev_get_platdata(dev);
776 struct sh_eth_dev *eth = &priv->shdev;
777 int ret;
778
Marek Vasut020d3942018-01-19 18:57:17 +0100779 ret = sh_eth_init_common(eth, pdata->enetaddr);
780 if (ret)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100781 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100782
783 ret = sh_eth_start_common(eth);
784 if (ret)
785 goto err_start;
786
787 return 0;
788
789err_start:
790 sh_eth_tx_desc_free(eth);
791 sh_eth_rx_desc_free(eth);
Marek Vasut020d3942018-01-19 18:57:17 +0100792 return ret;
793}
794
795static void sh_ether_stop(struct udevice *dev)
796{
797 struct sh_ether_priv *priv = dev_get_priv(dev);
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100798 struct sh_eth_dev *eth = &priv->shdev;
799 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100800
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100801 phy_shutdown(port_info->phydev);
Marek Vasut020d3942018-01-19 18:57:17 +0100802 sh_eth_stop(&priv->shdev);
Marek Vasut020d3942018-01-19 18:57:17 +0100803}
804
805static int sh_ether_probe(struct udevice *udev)
806{
807 struct eth_pdata *pdata = dev_get_platdata(udev);
808 struct sh_ether_priv *priv = dev_get_priv(udev);
809 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut27e06332018-06-18 04:03:01 +0200810 struct ofnode_phandle_args phandle_args;
Marek Vasut020d3942018-01-19 18:57:17 +0100811 struct mii_dev *mdiodev;
Marek Vasut020d3942018-01-19 18:57:17 +0100812 int ret;
813
Marek Vasut63ab72c2018-02-17 00:57:49 +0100814 priv->iobase = pdata->iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100815
Marek Vasut77f69f82019-05-02 00:03:26 +0200816#if CONFIG_IS_ENABLED(CLK)
Marek Vasut020d3942018-01-19 18:57:17 +0100817 ret = clk_get_by_index(udev, 0, &priv->clk);
818 if (ret < 0)
Marek Vasut63ab72c2018-02-17 00:57:49 +0100819 return ret;
Marek Vasut77f69f82019-05-02 00:03:26 +0200820#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100821
Marek Vasut27e06332018-06-18 04:03:01 +0200822 ret = dev_read_phandle_with_args(udev, "phy-handle", NULL, 0, 0, &phandle_args);
823 if (!ret) {
824 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
825 &priv->reset_gpio, GPIOD_IS_OUT);
826 }
827
828 if (!dm_gpio_is_valid(&priv->reset_gpio)) {
829 gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio,
830 GPIOD_IS_OUT);
831 }
Marek Vasut020d3942018-01-19 18:57:17 +0100832
833 mdiodev = mdio_alloc();
834 if (!mdiodev) {
835 ret = -ENOMEM;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100836 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100837 }
838
839 mdiodev->read = bb_miiphy_read;
840 mdiodev->write = bb_miiphy_write;
841 bb_miiphy_buses[0].priv = eth;
842 snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
843
844 ret = mdio_register(mdiodev);
845 if (ret < 0)
846 goto err_mdio_register;
847
848 priv->bus = miiphy_get_dev_by_name(udev->name);
849
850 eth->port = CONFIG_SH_ETHER_USE_PORT;
851 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
852 eth->port_info[eth->port].iobase =
Marek Vasut9aa1d5b2019-07-31 14:48:17 +0200853 (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port);
Marek Vasut020d3942018-01-19 18:57:17 +0100854
Marek Vasut77f69f82019-05-02 00:03:26 +0200855#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100856 ret = clk_enable(&priv->clk);
857 if (ret)
858 goto err_mdio_register;
Marek Vasut77f69f82019-05-02 00:03:26 +0200859#endif
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100860
861 ret = sh_eth_phy_config(udev);
862 if (ret) {
863 printf(SHETHER_NAME ": phy config timeout\n");
864 goto err_phy_config;
865 }
866
Marek Vasut020d3942018-01-19 18:57:17 +0100867 return 0;
868
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100869err_phy_config:
Marek Vasut77f69f82019-05-02 00:03:26 +0200870#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100871 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200872#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100873err_mdio_register:
874 mdio_free(mdiodev);
Marek Vasut020d3942018-01-19 18:57:17 +0100875 return ret;
876}
877
878static int sh_ether_remove(struct udevice *udev)
879{
880 struct sh_ether_priv *priv = dev_get_priv(udev);
881 struct sh_eth_dev *eth = &priv->shdev;
882 struct sh_eth_info *port_info = &eth->port_info[eth->port];
883
Marek Vasut77f69f82019-05-02 00:03:26 +0200884#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100885 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200886#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100887 free(port_info->phydev);
888 mdio_unregister(priv->bus);
889 mdio_free(priv->bus);
890
891 if (dm_gpio_is_valid(&priv->reset_gpio))
892 dm_gpio_free(udev, &priv->reset_gpio);
893
Marek Vasut020d3942018-01-19 18:57:17 +0100894 return 0;
895}
896
897static const struct eth_ops sh_ether_ops = {
898 .start = sh_ether_start,
899 .send = sh_ether_send,
900 .recv = sh_ether_recv,
901 .free_pkt = sh_ether_free_pkt,
902 .stop = sh_ether_stop,
903 .write_hwaddr = sh_ether_write_hwaddr,
904};
905
906int sh_ether_ofdata_to_platdata(struct udevice *dev)
907{
908 struct eth_pdata *pdata = dev_get_platdata(dev);
909 const char *phy_mode;
910 const fdt32_t *cell;
911 int ret = 0;
912
913 pdata->iobase = devfdt_get_addr(dev);
914 pdata->phy_interface = -1;
915 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
916 NULL);
917 if (phy_mode)
918 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
919 if (pdata->phy_interface == -1) {
920 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
921 return -EINVAL;
922 }
923
924 pdata->max_speed = 1000;
925 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
926 if (cell)
927 pdata->max_speed = fdt32_to_cpu(*cell);
928
929 sprintf(bb_miiphy_buses[0].name, dev->name);
930
931 return ret;
932}
933
934static const struct udevice_id sh_ether_ids[] = {
Marek Vasut77f69f82019-05-02 00:03:26 +0200935 { .compatible = "renesas,ether-r7s72100" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200936 { .compatible = "renesas,ether-r8a7790" },
Marek Vasut020d3942018-01-19 18:57:17 +0100937 { .compatible = "renesas,ether-r8a7791" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200938 { .compatible = "renesas,ether-r8a7793" },
939 { .compatible = "renesas,ether-r8a7794" },
Marek Vasut31124502019-07-31 12:58:06 +0200940 { .compatible = "renesas,gether-r8a77980" },
Marek Vasut020d3942018-01-19 18:57:17 +0100941 { }
942};
943
944U_BOOT_DRIVER(eth_sh_ether) = {
945 .name = "sh_ether",
946 .id = UCLASS_ETH,
947 .of_match = sh_ether_ids,
948 .ofdata_to_platdata = sh_ether_ofdata_to_platdata,
949 .probe = sh_ether_probe,
950 .remove = sh_ether_remove,
951 .ops = &sh_ether_ops,
952 .priv_auto_alloc_size = sizeof(struct sh_ether_priv),
953 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
954 .flags = DM_FLAG_ALLOC_PRIV_DMA,
955};
956#endif
957
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900958/******* for bb_miiphy *******/
959static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
960{
961 return 0;
962}
963
964static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
965{
966 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900967 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900968
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900969 sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900970
971 return 0;
972}
973
974static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
975{
976 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900977 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900978
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900979 sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900980
981 return 0;
982}
983
984static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
985{
986 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900987 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900988
989 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900990 sh_eth_write(port_info,
991 sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900992 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900993 sh_eth_write(port_info,
994 sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900995
996 return 0;
997}
998
999static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
1000{
1001 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001002 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001003
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001004 *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001005
1006 return 0;
1007}
1008
1009static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
1010{
1011 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001012 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001013
1014 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001015 sh_eth_write(port_info,
1016 sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001017 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001018 sh_eth_write(port_info,
1019 sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001020
1021 return 0;
1022}
1023
1024static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
1025{
1026 udelay(10);
1027
1028 return 0;
1029}
1030
1031struct bb_miiphy_bus bb_miiphy_buses[] = {
1032 {
1033 .name = "sh_eth",
1034 .init = sh_eth_bb_init,
1035 .mdio_active = sh_eth_bb_mdio_active,
1036 .mdio_tristate = sh_eth_bb_mdio_tristate,
1037 .set_mdio = sh_eth_bb_set_mdio,
1038 .get_mdio = sh_eth_bb_get_mdio,
1039 .set_mdc = sh_eth_bb_set_mdc,
1040 .delay = sh_eth_bb_delay,
1041 }
1042};
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +09001043
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001044int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);