blob: 8e54e7cc7a7feaa7d0815f516b6751fffc960b84 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09002/*
Robert P. J. Day8c60f922016-05-04 04:47:31 -04003 * sh_eth.c - Driver for Renesas ethernet controller.
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09004 *
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +09005 * Copyright (C) 2008, 2011 Renesas Solutions Corp.
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09006 * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09007 * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +09008 * Copyright (C) 2013, 2014 Renesas Electronics Corporation
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +09009 */
10
11#include <config.h>
12#include <common.h>
Alex Kiernan9c215492018-04-01 09:22:38 +000013#include <environment.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090014#include <malloc.h>
15#include <net.h>
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090016#include <netdev.h>
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +090017#include <miiphy.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090018#include <linux/errno.h>
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090019#include <asm/io.h>
20
Marek Vasut020d3942018-01-19 18:57:17 +010021#ifdef CONFIG_DM_ETH
22#include <clk.h>
23#include <dm.h>
24#include <linux/mii.h>
25#include <asm/gpio.h>
26#endif
27
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090028#include "sh_eth.h"
29
30#ifndef CONFIG_SH_ETHER_USE_PORT
31# error "Please define CONFIG_SH_ETHER_USE_PORT"
32#endif
33#ifndef CONFIG_SH_ETHER_PHY_ADDR
34# error "Please define CONFIG_SH_ETHER_PHY_ADDR"
35#endif
Nobuhiro Iwamatsu6bff09d2013-08-22 13:22:01 +090036
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090037#if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && !defined(CONFIG_SYS_DCACHE_OFF)
38#define flush_cache_wback(addr, len) \
Nobuhiro Iwamatsu425a3a52017-12-01 13:56:08 +090039 flush_dcache_range((u32)addr, \
40 (u32)(addr + ALIGN(len, CONFIG_SH_ETHER_ALIGNE_SIZE)))
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090041#else
42#define flush_cache_wback(...)
43#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090044
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +090045#if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
46#define invalidate_cache(addr, len) \
47 { \
48 u32 line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
49 u32 start, end; \
50 \
51 start = (u32)addr; \
52 end = start + len; \
53 start &= ~(line_size - 1); \
54 end = ((end + line_size - 1) & ~(line_size - 1)); \
55 \
56 invalidate_dcache_range(start, end); \
57 }
58#else
59#define invalidate_cache(...)
60#endif
61
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +090062#define TIMEOUT_CNT 1000
63
Marek Vasut044eb2d2018-01-21 14:27:51 +010064static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090065{
Marek Vasut7a309cf2018-02-17 00:46:26 +010066 int ret = 0, timeout;
67 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090068
69 if (!packet || len > 0xffff) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090070 printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
71 ret = -EINVAL;
72 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090073 }
74
75 /* packet must be a 4 byte boundary */
Nobuhiro Iwamatsu58802902012-02-02 21:28:49 +000076 if ((int)packet & 3) {
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +090077 printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +090078 , __func__);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +090079 ret = -EFAULT;
80 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090081 }
82
83 /* Update tx descriptor */
Yoshihiro Shimoda281aa052011-01-27 10:06:08 +090084 flush_cache_wback(packet, len);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090085 port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
86 port_info->tx_desc_cur->td1 = len << 16;
87 /* Must preserve the end of descriptor list indication */
88 if (port_info->tx_desc_cur->td0 & TD_TDLE)
89 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
90 else
91 port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
92
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +090093 flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
94
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090095 /* Restart the transmitter if disabled */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +090096 if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
97 sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +090098
99 /* Wait until packet is transmitted */
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900100 timeout = TIMEOUT_CNT;
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900101 do {
102 invalidate_cache(port_info->tx_desc_cur,
103 sizeof(struct tx_desc_s));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900104 udelay(100);
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900105 } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900106
107 if (timeout < 0) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900108 printf(SHETHER_NAME ": transmit timeout\n");
109 ret = -ETIMEDOUT;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900110 goto err;
111 }
112
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900113 port_info->tx_desc_cur++;
114 if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
115 port_info->tx_desc_cur = port_info->tx_desc_base;
116
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900117err:
118 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900119}
120
Marek Vasut48de90d2018-01-21 15:39:50 +0100121static int sh_eth_recv_start(struct sh_eth_dev *eth)
Marek Vasut044eb2d2018-01-21 14:27:51 +0100122{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100123 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900124
125 /* Check if the rx descriptor is ready */
Nobuhiro Iwamatsuee74c702013-08-22 13:22:03 +0900126 invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
Marek Vasut48de90d2018-01-21 15:39:50 +0100127 if (port_info->rx_desc_cur->rd0 & RD_RACT)
128 return -EINVAL;
129
130 /* Check for errors */
131 if (port_info->rx_desc_cur->rd0 & RD_RFE)
132 return -EINVAL;
133
Marek Vasut2526b792018-02-17 00:47:38 +0100134 return port_info->rx_desc_cur->rd1 & 0xffff;
Marek Vasut48de90d2018-01-21 15:39:50 +0100135}
136
137static void sh_eth_recv_finish(struct sh_eth_dev *eth)
138{
139 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900140
Marek Vasut48de90d2018-01-21 15:39:50 +0100141 /* Make current descriptor available again */
142 if (port_info->rx_desc_cur->rd0 & RD_RDLE)
143 port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
144 else
145 port_info->rx_desc_cur->rd0 = RD_RACT;
Nobuhiro Iwamatsu5ba66ad2014-11-04 09:15:48 +0900146
Marek Vasut48de90d2018-01-21 15:39:50 +0100147 flush_cache_wback(port_info->rx_desc_cur,
148 sizeof(struct rx_desc_s));
149
150 /* Point to the next descriptor */
151 port_info->rx_desc_cur++;
152 if (port_info->rx_desc_cur >=
153 port_info->rx_desc_base + NUM_RX_DESC)
154 port_info->rx_desc_cur = port_info->rx_desc_base;
155}
156
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900157static int sh_eth_reset(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900158{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900159 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900160#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900161 int ret = 0, i;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900162
163 /* Start e-dmac transmitter and receiver */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900164 sh_eth_write(port_info, EDSR_ENALL, EDSR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900165
166 /* Perform a software reset and wait for it to complete */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900167 sh_eth_write(port_info, EDMR_SRST, EDMR);
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900168 for (i = 0; i < TIMEOUT_CNT; i++) {
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900169 if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900170 break;
171 udelay(1000);
172 }
173
Nobuhiro Iwamatsu71f507c2012-01-11 10:23:51 +0900174 if (i == TIMEOUT_CNT) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900175 printf(SHETHER_NAME ": Software reset timeout\n");
176 ret = -EIO;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900177 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900178
179 return ret;
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900180#else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900181 sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
Marek Vasut42a33402018-02-17 00:57:49 +0100182 mdelay(3);
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900183 sh_eth_write(port_info,
184 sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900185
186 return 0;
187#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900188}
189
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900190static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900191{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100192 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900193 u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100194 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900195 struct tx_desc_s *cur_tx_desc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900196
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900197 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900198 * Allocate rx descriptors. They must be aligned to size of struct
199 * tx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900200 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900201 port_info->tx_desc_alloc =
202 memalign(sizeof(struct tx_desc_s), alloc_desc_size);
203 if (!port_info->tx_desc_alloc) {
204 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900205 ret = -ENOMEM;
206 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900207 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900208
Nobuhiro Iwamatsu425a3a52017-12-01 13:56:08 +0900209 flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900210
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900211 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900212 port_info->tx_desc_base =
213 (struct tx_desc_s *)ADDR_TO_P2((u32)port_info->tx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900214 port_info->tx_desc_cur = port_info->tx_desc_base;
215
216 /* Initialize all descriptors */
217 for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
218 cur_tx_desc++, i++) {
219 cur_tx_desc->td0 = 0x00;
220 cur_tx_desc->td1 = 0x00;
221 cur_tx_desc->td2 = 0x00;
222 }
223
224 /* Mark the end of the descriptors */
225 cur_tx_desc--;
226 cur_tx_desc->td0 |= TD_TDLE;
227
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900228 /*
229 * Point the controller to the tx descriptor list. Must use physical
230 * addresses
231 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900232 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900233#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900234 sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
235 sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
236 sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900237#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900238
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900239err:
240 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900241}
242
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900243static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900244{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100245 int i, ret = 0;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900246 u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
Marek Vasut7a309cf2018-02-17 00:46:26 +0100247 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900248 struct rx_desc_s *cur_rx_desc;
249 u8 *rx_buf;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900250
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900251 /*
Nobuhiro Iwamatsuc24b3eb2014-11-04 09:15:46 +0900252 * Allocate rx descriptors. They must be aligned to size of struct
253 * rx_desc_s.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900254 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900255 port_info->rx_desc_alloc =
256 memalign(sizeof(struct rx_desc_s), alloc_desc_size);
257 if (!port_info->rx_desc_alloc) {
258 printf(SHETHER_NAME ": memalign failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900259 ret = -ENOMEM;
260 goto err;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900261 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900262
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900263 flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
264
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900265 /* Make sure we use a P2 address (non-cacheable) */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900266 port_info->rx_desc_base =
267 (struct rx_desc_s *)ADDR_TO_P2((u32)port_info->rx_desc_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900268
269 port_info->rx_desc_cur = port_info->rx_desc_base;
270
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900271 /*
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900272 * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
273 * aligned and in P2 area.
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900274 */
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900275 port_info->rx_buf_alloc =
276 memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
277 if (!port_info->rx_buf_alloc) {
278 printf(SHETHER_NAME ": alloc failed\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900279 ret = -ENOMEM;
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900280 goto err_buf_alloc;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900281 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900282
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900283 port_info->rx_buf_base = (u8 *)ADDR_TO_P2((u32)port_info->rx_buf_alloc);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900284
285 /* Initialize all descriptors */
286 for (cur_rx_desc = port_info->rx_desc_base,
287 rx_buf = port_info->rx_buf_base, i = 0;
288 i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
289 cur_rx_desc->rd0 = RD_RACT;
290 cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900291 cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900292 }
293
294 /* Mark the end of the descriptors */
295 cur_rx_desc--;
296 cur_rx_desc->rd0 |= RD_RDLE;
297
298 /* Point the controller to the rx descriptor list */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900299 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900300#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900301 sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
302 sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
303 sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900304#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900305
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900306 return ret;
307
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900308err_buf_alloc:
309 free(port_info->rx_desc_alloc);
310 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900311
312err:
313 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900314}
315
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900316static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900317{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100318 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900319
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900320 if (port_info->tx_desc_alloc) {
321 free(port_info->tx_desc_alloc);
322 port_info->tx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900323 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900324}
325
326static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
327{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100328 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900329
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900330 if (port_info->rx_desc_alloc) {
331 free(port_info->rx_desc_alloc);
332 port_info->rx_desc_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900333 }
334
Nobuhiro Iwamatsu1c822112014-11-04 09:15:47 +0900335 if (port_info->rx_buf_alloc) {
336 free(port_info->rx_buf_alloc);
337 port_info->rx_buf_alloc = NULL;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900338 }
339}
340
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900341static int sh_eth_desc_init(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900342{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900343 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900344
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900345 ret = sh_eth_tx_desc_init(eth);
346 if (ret)
347 goto err_tx_init;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900348
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900349 ret = sh_eth_rx_desc_init(eth);
350 if (ret)
351 goto err_rx_init;
352
353 return ret;
354err_rx_init:
355 sh_eth_tx_desc_free(eth);
356
357err_tx_init:
358 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900359}
360
Marek Vasutccdfc5e2018-01-21 14:55:44 +0100361static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
362 unsigned char *mac)
363{
364 u32 val;
365
366 val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
367 sh_eth_write(port_info, val, MAHR);
368
369 val = (mac[4] << 8) | mac[5];
370 sh_eth_write(port_info, val, MALR);
371}
372
Marek Vasutc13be6a2018-01-21 15:10:21 +0100373static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900374{
Marek Vasutc13be6a2018-01-21 15:10:21 +0100375 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900376
377 /* Configure e-dmac registers */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900378 sh_eth_write(port_info, (sh_eth_read(port_info, EDMR) & ~EMDR_DESC_R) |
Nobuhiro Iwamatsu7a2142c2013-08-22 13:22:02 +0900379 (EMDR_DESC | EDMR_EL), EDMR);
380
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900381 sh_eth_write(port_info, 0, EESIPR);
382 sh_eth_write(port_info, 0, TRSCER);
383 sh_eth_write(port_info, 0, TFTR);
384 sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
385 sh_eth_write(port_info, RMCR_RST, RMCR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900386#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900387 sh_eth_write(port_info, 0, RPADIR);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900388#endif
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900389 sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900390
391 /* Configure e-mac registers */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900392 sh_eth_write(port_info, 0, ECSIPR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900393
394 /* Set Mac address */
Marek Vasutc13be6a2018-01-21 15:10:21 +0100395 sh_eth_write_hwaddr(port_info, mac);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900396
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900397 sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000398#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900399 sh_eth_write(port_info, 0, PIPR);
Nobuhiro Iwamatsu46288f42014-01-23 07:52:18 +0900400#endif
401#if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900402 sh_eth_write(port_info, APR_AP, APR);
403 sh_eth_write(port_info, MPR_MP, MPR);
404 sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900405#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900406
Nobuhiro Iwamatsu4ad2c2a2012-08-02 22:08:40 +0000407#if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900408 sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
Marek Vasutee2f21b2018-01-22 01:42:32 +0100409#elif defined(CONFIG_RCAR_GEN2)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900410 sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000411#endif
Marek Vasutc13be6a2018-01-21 15:10:21 +0100412}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900413
Marek Vasutc13be6a2018-01-21 15:10:21 +0100414static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
415{
416 struct sh_eth_info *port_info = &eth->port_info[eth->port];
417 struct phy_device *phy = port_info->phydev;
418 int ret = 0;
419 u32 val = 0;
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900420
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900421 /* Set the transfer speed */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900422 if (phy->speed == 100) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900423 printf(SHETHER_NAME ": 100Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000424#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900425 sh_eth_write(port_info, GECMR_100B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000426#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900427 sh_eth_write(port_info, 1, RTRATE);
Marek Vasuta71ce472019-05-04 13:31:06 +0200428#elif defined(CONFIG_RCAR_GEN2)
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900429 val = ECMR_RTM;
430#endif
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900431 } else if (phy->speed == 10) {
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900432 printf(SHETHER_NAME ": 10Base/");
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000433#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900434 sh_eth_write(port_info, GECMR_10B, GECMR);
Yoshihiro Shimodad27e8c92012-11-04 15:54:30 +0000435#elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900436 sh_eth_write(port_info, 0, RTRATE);
Yoshihiro Shimoda34cca922011-01-18 17:53:45 +0900437#endif
Nobuhiro Iwamatsu9dfac0a2011-11-14 16:56:59 +0900438 }
Yoshihiro Shimoda9d553032012-06-26 16:38:06 +0000439#if defined(SH_ETH_TYPE_GETHER)
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000440 else if (phy->speed == 1000) {
441 printf(SHETHER_NAME ": 1000Base/");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900442 sh_eth_write(port_info, GECMR_1000B, GECMR);
Nobuhiro Iwamatsu475f40d2012-05-15 15:49:39 +0000443 }
444#endif
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900445
446 /* Check if full duplex mode is supported by the phy */
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900447 if (phy->duplex) {
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900448 printf("Full\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900449 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900450 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
Yoshihiro Shimoda4c4aa6c2012-06-26 16:38:09 +0000451 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900452 } else {
453 printf("Half\n");
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900454 sh_eth_write(port_info,
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900455 val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
456 ECMR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900457 }
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900458
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900459 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900460}
461
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900462static void sh_eth_start(struct sh_eth_dev *eth)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900463{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900464 struct sh_eth_info *port_info = &eth->port_info[eth->port];
465
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900466 /*
467 * Enable the e-dmac receiver only. The transmitter will be enabled when
468 * we have something to transmit
469 */
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900470 sh_eth_write(port_info, EDRRR_R, EDRRR);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900471}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900472
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900473static void sh_eth_stop(struct sh_eth_dev *eth)
474{
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900475 struct sh_eth_info *port_info = &eth->port_info[eth->port];
476
477 sh_eth_write(port_info, ~EDRRR_R, EDRRR);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900478}
479
Marek Vasutc13be6a2018-01-21 15:10:21 +0100480static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900481{
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900482 int ret = 0;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900483
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900484 ret = sh_eth_reset(eth);
485 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100486 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900487
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900488 ret = sh_eth_desc_init(eth);
489 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100490 return ret;
491
492 sh_eth_mac_regs_config(eth, mac);
493
494 return 0;
495}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900496
Marek Vasutc13be6a2018-01-21 15:10:21 +0100497static int sh_eth_start_common(struct sh_eth_dev *eth)
498{
499 struct sh_eth_info *port_info = &eth->port_info[eth->port];
500 int ret;
501
502 ret = phy_startup(port_info->phydev);
503 if (ret) {
504 printf(SHETHER_NAME ": phy startup failure\n");
505 return ret;
506 }
507
508 ret = sh_eth_phy_regs_config(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900509 if (ret)
Marek Vasutc13be6a2018-01-21 15:10:21 +0100510 return ret;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900511
512 sh_eth_start(eth);
513
Marek Vasutc13be6a2018-01-21 15:10:21 +0100514 return 0;
515}
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900516
Marek Vasut020d3942018-01-19 18:57:17 +0100517#ifndef CONFIG_DM_ETH
Marek Vasut7ba52622018-01-21 15:31:48 +0100518static int sh_eth_phy_config_legacy(struct sh_eth_dev *eth)
519{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100520 int ret = 0;
521 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7ba52622018-01-21 15:31:48 +0100522 struct eth_device *dev = port_info->dev;
523 struct phy_device *phydev;
524
525 phydev = phy_connect(
526 miiphy_get_dev_by_name(dev->name),
527 port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
528 port_info->phydev = phydev;
529 phy_config(phydev);
530
531 return ret;
532}
533
534static int sh_eth_send_legacy(struct eth_device *dev, void *packet, int len)
535{
536 struct sh_eth_dev *eth = dev->priv;
537
538 return sh_eth_send_common(eth, packet, len);
539}
540
541static int sh_eth_recv_common(struct sh_eth_dev *eth)
542{
Marek Vasut7a309cf2018-02-17 00:46:26 +0100543 int len = 0;
544 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut7ba52622018-01-21 15:31:48 +0100545 uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
546
547 len = sh_eth_recv_start(eth);
548 if (len > 0) {
549 invalidate_cache(packet, len);
550 net_process_received_packet(packet, len);
551 sh_eth_recv_finish(eth);
552 } else
553 len = 0;
554
555 /* Restart the receiver if disabled */
556 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
557 sh_eth_write(port_info, EDRRR_R, EDRRR);
558
559 return len;
560}
561
562static int sh_eth_recv_legacy(struct eth_device *dev)
563{
564 struct sh_eth_dev *eth = dev->priv;
565
566 return sh_eth_recv_common(eth);
567}
568
Marek Vasutc13be6a2018-01-21 15:10:21 +0100569static int sh_eth_init_legacy(struct eth_device *dev, bd_t *bd)
570{
571 struct sh_eth_dev *eth = dev->priv;
572 int ret;
573
574 ret = sh_eth_init_common(eth, dev->enetaddr);
575 if (ret)
576 return ret;
577
578 ret = sh_eth_phy_config_legacy(eth);
579 if (ret) {
580 printf(SHETHER_NAME ": phy config timeout\n");
581 goto err_start;
582 }
583
584 ret = sh_eth_start_common(eth);
585 if (ret)
586 goto err_start;
587
588 return 0;
589
590err_start:
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900591 sh_eth_tx_desc_free(eth);
592 sh_eth_rx_desc_free(eth);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900593 return ret;
594}
595
Marek Vasutc13be6a2018-01-21 15:10:21 +0100596void sh_eth_halt_legacy(struct eth_device *dev)
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900597{
598 struct sh_eth_dev *eth = dev->priv;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900599
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900600 sh_eth_stop(eth);
601}
602
603int sh_eth_initialize(bd_t *bd)
604{
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900605 int ret = 0;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900606 struct sh_eth_dev *eth = NULL;
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900607 struct eth_device *dev = NULL;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900608 struct mii_dev *mdiodev;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900609
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900610 eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900611 if (!eth) {
612 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
613 ret = -ENOMEM;
614 goto err;
615 }
616
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900617 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900618 if (!dev) {
619 printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
620 ret = -ENOMEM;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900621 goto err;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900622 }
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900623 memset(dev, 0, sizeof(struct eth_device));
624 memset(eth, 0, sizeof(struct sh_eth_dev));
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900625
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900626 eth->port = CONFIG_SH_ETHER_USE_PORT;
627 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900628 eth->port_info[eth->port].iobase =
629 (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900630
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900631 dev->priv = (void *)eth;
632 dev->iobase = 0;
Marek Vasutc13be6a2018-01-21 15:10:21 +0100633 dev->init = sh_eth_init_legacy;
634 dev->halt = sh_eth_halt_legacy;
Marek Vasut044eb2d2018-01-21 14:27:51 +0100635 dev->send = sh_eth_send_legacy;
636 dev->recv = sh_eth_recv_legacy;
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900637 eth->port_info[eth->port].dev = dev;
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900638
Ben Whitten34fd6c92015-12-30 13:05:58 +0000639 strcpy(dev->name, SHETHER_NAME);
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900640
Nobuhiro Iwamatsu31e84df2014-01-23 07:52:19 +0900641 /* Register Device to EtherNet subsystem */
642 eth_register(dev);
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900643
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900644 bb_miiphy_buses[0].priv = eth;
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900645 mdiodev = mdio_alloc();
Joe Hershberger1fbcbed2016-08-08 11:28:38 -0500646 if (!mdiodev)
647 return -ENOMEM;
648 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
649 mdiodev->read = bb_miiphy_read;
650 mdiodev->write = bb_miiphy_write;
651
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +0900652 ret = mdio_register(mdiodev);
653 if (ret < 0)
654 return ret;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900655
Simon Glass399a9ce2017-08-03 12:22:14 -0600656 if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr))
Mike Frysingera86bf132009-02-11 19:14:09 -0500657 puts("Please set MAC address\n");
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900658
659 return ret;
660
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900661err:
Nobuhiro Iwamatsud8f5d502008-11-21 12:04:18 +0900662 if (dev)
663 free(dev);
664
665 if (eth)
666 free(eth);
667
668 printf(SHETHER_NAME ": Failed\n");
669 return ret;
Nobuhiro Iwamatsu240b7232008-06-11 21:05:00 +0900670}
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900671
Marek Vasut020d3942018-01-19 18:57:17 +0100672#else /* CONFIG_DM_ETH */
673
674struct sh_ether_priv {
675 struct sh_eth_dev shdev;
676
677 struct mii_dev *bus;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100678 phys_addr_t iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100679 struct clk clk;
680 struct gpio_desc reset_gpio;
681};
682
683static int sh_ether_send(struct udevice *dev, void *packet, int len)
684{
685 struct sh_ether_priv *priv = dev_get_priv(dev);
686 struct sh_eth_dev *eth = &priv->shdev;
687
688 return sh_eth_send_common(eth, packet, len);
689}
690
691static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
692{
693 struct sh_ether_priv *priv = dev_get_priv(dev);
694 struct sh_eth_dev *eth = &priv->shdev;
695 struct sh_eth_info *port_info = &eth->port_info[eth->port];
696 uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
697 int len;
698
699 len = sh_eth_recv_start(eth);
700 if (len > 0) {
701 invalidate_cache(packet, len);
702 *packetp = packet;
703
704 return len;
705 } else {
706 len = 0;
707
708 /* Restart the receiver if disabled */
709 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
710 sh_eth_write(port_info, EDRRR_R, EDRRR);
711
712 return -EAGAIN;
713 }
714}
715
716static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
717{
718 struct sh_ether_priv *priv = dev_get_priv(dev);
719 struct sh_eth_dev *eth = &priv->shdev;
720 struct sh_eth_info *port_info = &eth->port_info[eth->port];
721
722 sh_eth_recv_finish(eth);
723
724 /* Restart the receiver if disabled */
725 if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
726 sh_eth_write(port_info, EDRRR_R, EDRRR);
727
728 return 0;
729}
730
731static int sh_ether_write_hwaddr(struct udevice *dev)
732{
733 struct sh_ether_priv *priv = dev_get_priv(dev);
734 struct sh_eth_dev *eth = &priv->shdev;
735 struct sh_eth_info *port_info = &eth->port_info[eth->port];
736 struct eth_pdata *pdata = dev_get_platdata(dev);
737
738 sh_eth_write_hwaddr(port_info, pdata->enetaddr);
739
740 return 0;
741}
742
743static int sh_eth_phy_config(struct udevice *dev)
744{
745 struct sh_ether_priv *priv = dev_get_priv(dev);
746 struct eth_pdata *pdata = dev_get_platdata(dev);
747 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut7a309cf2018-02-17 00:46:26 +0100748 int ret = 0;
749 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100750 struct phy_device *phydev;
751 int mask = 0xffffffff;
752
753 phydev = phy_find_by_mask(priv->bus, mask, pdata->phy_interface);
754 if (!phydev)
755 return -ENODEV;
756
757 phy_connect_dev(phydev, dev);
758
759 port_info->phydev = phydev;
760 phy_config(phydev);
761
762 return ret;
763}
764
765static int sh_ether_start(struct udevice *dev)
766{
767 struct sh_ether_priv *priv = dev_get_priv(dev);
768 struct eth_pdata *pdata = dev_get_platdata(dev);
769 struct sh_eth_dev *eth = &priv->shdev;
770 int ret;
771
Marek Vasut020d3942018-01-19 18:57:17 +0100772 ret = sh_eth_init_common(eth, pdata->enetaddr);
773 if (ret)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100774 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100775
776 ret = sh_eth_start_common(eth);
777 if (ret)
778 goto err_start;
779
780 return 0;
781
782err_start:
783 sh_eth_tx_desc_free(eth);
784 sh_eth_rx_desc_free(eth);
Marek Vasut020d3942018-01-19 18:57:17 +0100785 return ret;
786}
787
788static void sh_ether_stop(struct udevice *dev)
789{
790 struct sh_ether_priv *priv = dev_get_priv(dev);
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100791 struct sh_eth_dev *eth = &priv->shdev;
792 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Marek Vasut020d3942018-01-19 18:57:17 +0100793
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100794 phy_shutdown(port_info->phydev);
Marek Vasut020d3942018-01-19 18:57:17 +0100795 sh_eth_stop(&priv->shdev);
Marek Vasut020d3942018-01-19 18:57:17 +0100796}
797
798static int sh_ether_probe(struct udevice *udev)
799{
800 struct eth_pdata *pdata = dev_get_platdata(udev);
801 struct sh_ether_priv *priv = dev_get_priv(udev);
802 struct sh_eth_dev *eth = &priv->shdev;
Marek Vasut27e06332018-06-18 04:03:01 +0200803 struct ofnode_phandle_args phandle_args;
Marek Vasut020d3942018-01-19 18:57:17 +0100804 struct mii_dev *mdiodev;
Marek Vasut020d3942018-01-19 18:57:17 +0100805 int ret;
806
Marek Vasut63ab72c2018-02-17 00:57:49 +0100807 priv->iobase = pdata->iobase;
Marek Vasut020d3942018-01-19 18:57:17 +0100808
Marek Vasut77f69f82019-05-02 00:03:26 +0200809#if CONFIG_IS_ENABLED(CLK)
Marek Vasut020d3942018-01-19 18:57:17 +0100810 ret = clk_get_by_index(udev, 0, &priv->clk);
811 if (ret < 0)
Marek Vasut63ab72c2018-02-17 00:57:49 +0100812 return ret;
Marek Vasut77f69f82019-05-02 00:03:26 +0200813#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100814
Marek Vasut27e06332018-06-18 04:03:01 +0200815 ret = dev_read_phandle_with_args(udev, "phy-handle", NULL, 0, 0, &phandle_args);
816 if (!ret) {
817 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
818 &priv->reset_gpio, GPIOD_IS_OUT);
819 }
820
821 if (!dm_gpio_is_valid(&priv->reset_gpio)) {
822 gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio,
823 GPIOD_IS_OUT);
824 }
Marek Vasut020d3942018-01-19 18:57:17 +0100825
826 mdiodev = mdio_alloc();
827 if (!mdiodev) {
828 ret = -ENOMEM;
Marek Vasut63ab72c2018-02-17 00:57:49 +0100829 return ret;
Marek Vasut020d3942018-01-19 18:57:17 +0100830 }
831
832 mdiodev->read = bb_miiphy_read;
833 mdiodev->write = bb_miiphy_write;
834 bb_miiphy_buses[0].priv = eth;
835 snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
836
837 ret = mdio_register(mdiodev);
838 if (ret < 0)
839 goto err_mdio_register;
840
841 priv->bus = miiphy_get_dev_by_name(udev->name);
842
843 eth->port = CONFIG_SH_ETHER_USE_PORT;
844 eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
845 eth->port_info[eth->port].iobase =
846 (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
847
Marek Vasut77f69f82019-05-02 00:03:26 +0200848#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100849 ret = clk_enable(&priv->clk);
850 if (ret)
851 goto err_mdio_register;
Marek Vasut77f69f82019-05-02 00:03:26 +0200852#endif
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100853
854 ret = sh_eth_phy_config(udev);
855 if (ret) {
856 printf(SHETHER_NAME ": phy config timeout\n");
857 goto err_phy_config;
858 }
859
Marek Vasut020d3942018-01-19 18:57:17 +0100860 return 0;
861
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100862err_phy_config:
Marek Vasut77f69f82019-05-02 00:03:26 +0200863#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100864 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200865#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100866err_mdio_register:
867 mdio_free(mdiodev);
Marek Vasut020d3942018-01-19 18:57:17 +0100868 return ret;
869}
870
871static int sh_ether_remove(struct udevice *udev)
872{
873 struct sh_ether_priv *priv = dev_get_priv(udev);
874 struct sh_eth_dev *eth = &priv->shdev;
875 struct sh_eth_info *port_info = &eth->port_info[eth->port];
876
Marek Vasut77f69f82019-05-02 00:03:26 +0200877#if CONFIG_IS_ENABLED(CLK)
Marek Vasutf6cf4ba2019-03-30 07:22:09 +0100878 clk_disable(&priv->clk);
Marek Vasut77f69f82019-05-02 00:03:26 +0200879#endif
Marek Vasut020d3942018-01-19 18:57:17 +0100880 free(port_info->phydev);
881 mdio_unregister(priv->bus);
882 mdio_free(priv->bus);
883
884 if (dm_gpio_is_valid(&priv->reset_gpio))
885 dm_gpio_free(udev, &priv->reset_gpio);
886
Marek Vasut020d3942018-01-19 18:57:17 +0100887 return 0;
888}
889
890static const struct eth_ops sh_ether_ops = {
891 .start = sh_ether_start,
892 .send = sh_ether_send,
893 .recv = sh_ether_recv,
894 .free_pkt = sh_ether_free_pkt,
895 .stop = sh_ether_stop,
896 .write_hwaddr = sh_ether_write_hwaddr,
897};
898
899int sh_ether_ofdata_to_platdata(struct udevice *dev)
900{
901 struct eth_pdata *pdata = dev_get_platdata(dev);
902 const char *phy_mode;
903 const fdt32_t *cell;
904 int ret = 0;
905
906 pdata->iobase = devfdt_get_addr(dev);
907 pdata->phy_interface = -1;
908 phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
909 NULL);
910 if (phy_mode)
911 pdata->phy_interface = phy_get_interface_by_name(phy_mode);
912 if (pdata->phy_interface == -1) {
913 debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
914 return -EINVAL;
915 }
916
917 pdata->max_speed = 1000;
918 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
919 if (cell)
920 pdata->max_speed = fdt32_to_cpu(*cell);
921
922 sprintf(bb_miiphy_buses[0].name, dev->name);
923
924 return ret;
925}
926
927static const struct udevice_id sh_ether_ids[] = {
Marek Vasut77f69f82019-05-02 00:03:26 +0200928 { .compatible = "renesas,ether-r7s72100" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200929 { .compatible = "renesas,ether-r8a7790" },
Marek Vasut020d3942018-01-19 18:57:17 +0100930 { .compatible = "renesas,ether-r8a7791" },
Marek Vasut337ab3b2018-04-12 15:23:46 +0200931 { .compatible = "renesas,ether-r8a7793" },
932 { .compatible = "renesas,ether-r8a7794" },
Marek Vasut020d3942018-01-19 18:57:17 +0100933 { }
934};
935
936U_BOOT_DRIVER(eth_sh_ether) = {
937 .name = "sh_ether",
938 .id = UCLASS_ETH,
939 .of_match = sh_ether_ids,
940 .ofdata_to_platdata = sh_ether_ofdata_to_platdata,
941 .probe = sh_ether_probe,
942 .remove = sh_ether_remove,
943 .ops = &sh_ether_ops,
944 .priv_auto_alloc_size = sizeof(struct sh_ether_priv),
945 .platdata_auto_alloc_size = sizeof(struct eth_pdata),
946 .flags = DM_FLAG_ALLOC_PRIV_DMA,
947};
948#endif
949
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900950/******* for bb_miiphy *******/
951static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
952{
953 return 0;
954}
955
956static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
957{
958 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900959 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900960
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900961 sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900962
963 return 0;
964}
965
966static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
967{
968 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900969 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900970
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900971 sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900972
973 return 0;
974}
975
976static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
977{
978 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900979 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900980
981 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900982 sh_eth_write(port_info,
983 sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900984 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900985 sh_eth_write(port_info,
986 sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900987
988 return 0;
989}
990
991static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
992{
993 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900994 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900995
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +0900996 *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +0900997
998 return 0;
999}
1000
1001static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
1002{
1003 struct sh_eth_dev *eth = bus->priv;
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001004 struct sh_eth_info *port_info = &eth->port_info[eth->port];
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001005
1006 if (v)
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001007 sh_eth_write(port_info,
1008 sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001009 else
Nobuhiro Iwamatsuec921f12017-12-01 08:10:32 +09001010 sh_eth_write(port_info,
1011 sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001012
1013 return 0;
1014}
1015
1016static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
1017{
1018 udelay(10);
1019
1020 return 0;
1021}
1022
1023struct bb_miiphy_bus bb_miiphy_buses[] = {
1024 {
1025 .name = "sh_eth",
1026 .init = sh_eth_bb_init,
1027 .mdio_active = sh_eth_bb_mdio_active,
1028 .mdio_tristate = sh_eth_bb_mdio_tristate,
1029 .set_mdio = sh_eth_bb_set_mdio,
1030 .get_mdio = sh_eth_bb_get_mdio,
1031 .set_mdc = sh_eth_bb_set_mdc,
1032 .delay = sh_eth_bb_delay,
1033 }
1034};
Nobuhiro Iwamatsuca36b0e2017-12-01 08:08:00 +09001035
Yoshihiro Shimoda677f6cd2011-10-11 18:10:14 +09001036int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);