Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 2 | /* |
Robert P. J. Day | 8c60f92 | 2016-05-04 04:47:31 -0400 | [diff] [blame] | 3 | * sh_eth.c - Driver for Renesas ethernet controller. |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 4 | * |
Nobuhiro Iwamatsu | 9dfac0a | 2011-11-14 16:56:59 +0900 | [diff] [blame] | 5 | * Copyright (C) 2008, 2011 Renesas Solutions Corp. |
Nobuhiro Iwamatsu | 5ba66ad | 2014-11-04 09:15:48 +0900 | [diff] [blame] | 6 | * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 7 | * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com> |
Nobuhiro Iwamatsu | 5ba66ad | 2014-11-04 09:15:48 +0900 | [diff] [blame] | 8 | * Copyright (C) 2013, 2014 Renesas Electronics Corporation |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 9 | */ |
| 10 | |
| 11 | #include <config.h> |
Tom Rini | abb9a04 | 2024-05-18 20:20:43 -0600 | [diff] [blame] | 12 | #include <common.h> |
Simon Glass | 6333448 | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 13 | #include <cpu_func.h> |
Simon Glass | 0af6e2d | 2019-08-01 09:46:52 -0600 | [diff] [blame] | 14 | #include <env.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 15 | #include <log.h> |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 16 | #include <malloc.h> |
| 17 | #include <net.h> |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 18 | #include <netdev.h> |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 19 | #include <miiphy.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 20 | #include <asm/cache.h> |
Simon Glass | dbd7954 | 2020-05-10 11:40:11 -0600 | [diff] [blame] | 21 | #include <linux/delay.h> |
Masahiro Yamada | 56a931c | 2016-09-21 11:28:55 +0900 | [diff] [blame] | 22 | #include <linux/errno.h> |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 23 | #include <asm/global_data.h> |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 24 | #include <asm/io.h> |
| 25 | |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 26 | #include <clk.h> |
| 27 | #include <dm.h> |
| 28 | #include <linux/mii.h> |
| 29 | #include <asm/gpio.h> |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 30 | |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 31 | #include "sh_eth.h" |
| 32 | |
Tom Rini | 9996ab8 | 2022-12-04 10:13:52 -0500 | [diff] [blame] | 33 | #ifndef CFG_SH_ETHER_USE_PORT |
| 34 | # error "Please define CFG_SH_ETHER_USE_PORT" |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 35 | #endif |
Tom Rini | 45ec5fd | 2022-12-04 10:13:50 -0500 | [diff] [blame] | 36 | #ifndef CFG_SH_ETHER_PHY_ADDR |
| 37 | # error "Please define CFG_SH_ETHER_PHY_ADDR" |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 38 | #endif |
Nobuhiro Iwamatsu | 6bff09d | 2013-08-22 13:22:01 +0900 | [diff] [blame] | 39 | |
Tom Rini | 872054f | 2022-12-04 10:13:49 -0500 | [diff] [blame] | 40 | #if defined(CFG_SH_ETHER_CACHE_WRITEBACK) && \ |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 41 | !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) |
Nobuhiro Iwamatsu | ee74c70 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 42 | #define flush_cache_wback(addr, len) \ |
Marek Vasut | 9aa1d5b | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 43 | flush_dcache_range((unsigned long)addr, \ |
Tom Rini | dd2eba0 | 2022-12-04 10:13:47 -0500 | [diff] [blame] | 44 | (unsigned long)(addr + ALIGN(len, CFG_SH_ETHER_ALIGNE_SIZE))) |
Yoshihiro Shimoda | 281aa05 | 2011-01-27 10:06:08 +0900 | [diff] [blame] | 45 | #else |
| 46 | #define flush_cache_wback(...) |
| 47 | #endif |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 48 | |
Tom Rini | a44cb16 | 2022-12-04 10:13:48 -0500 | [diff] [blame] | 49 | #if defined(CFG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM) |
Nobuhiro Iwamatsu | ee74c70 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 50 | #define invalidate_cache(addr, len) \ |
| 51 | { \ |
Tom Rini | dd2eba0 | 2022-12-04 10:13:47 -0500 | [diff] [blame] | 52 | unsigned long line_size = CFG_SH_ETHER_ALIGNE_SIZE; \ |
Marek Vasut | 9aa1d5b | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 53 | unsigned long start, end; \ |
Nobuhiro Iwamatsu | ee74c70 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 54 | \ |
Marek Vasut | 9aa1d5b | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 55 | start = (unsigned long)addr; \ |
| 56 | end = start + len; \ |
Nobuhiro Iwamatsu | ee74c70 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 57 | start &= ~(line_size - 1); \ |
| 58 | end = ((end + line_size - 1) & ~(line_size - 1)); \ |
| 59 | \ |
| 60 | invalidate_dcache_range(start, end); \ |
| 61 | } |
| 62 | #else |
| 63 | #define invalidate_cache(...) |
| 64 | #endif |
| 65 | |
Nobuhiro Iwamatsu | 71f507c | 2012-01-11 10:23:51 +0900 | [diff] [blame] | 66 | #define TIMEOUT_CNT 1000 |
| 67 | |
Marek Vasut | 044eb2d | 2018-01-21 14:27:51 +0100 | [diff] [blame] | 68 | static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len) |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 69 | { |
Marek Vasut | 7a309cf | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 70 | int ret = 0, timeout; |
| 71 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 72 | |
| 73 | if (!packet || len > 0xffff) { |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 74 | printf(SHETHER_NAME ": %s: Invalid argument\n", __func__); |
| 75 | ret = -EINVAL; |
| 76 | goto err; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 77 | } |
| 78 | |
| 79 | /* packet must be a 4 byte boundary */ |
Marek Vasut | 9aa1d5b | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 80 | if ((uintptr_t)packet & 3) { |
Nobuhiro Iwamatsu | ca36b0e | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 81 | printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n" |
Nobuhiro Iwamatsu | 31e84df | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 82 | , __func__); |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 83 | ret = -EFAULT; |
| 84 | goto err; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | /* Update tx descriptor */ |
Yoshihiro Shimoda | 281aa05 | 2011-01-27 10:06:08 +0900 | [diff] [blame] | 88 | flush_cache_wback(packet, len); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 89 | port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet); |
| 90 | port_info->tx_desc_cur->td1 = len << 16; |
| 91 | /* Must preserve the end of descriptor list indication */ |
| 92 | if (port_info->tx_desc_cur->td0 & TD_TDLE) |
| 93 | port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE; |
| 94 | else |
| 95 | port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP; |
| 96 | |
Nobuhiro Iwamatsu | 5ba66ad | 2014-11-04 09:15:48 +0900 | [diff] [blame] | 97 | flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s)); |
| 98 | |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 99 | /* Restart the transmitter if disabled */ |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 100 | if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS)) |
| 101 | sh_eth_write(port_info, EDTRR_TRNS, EDTRR); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 102 | |
| 103 | /* Wait until packet is transmitted */ |
Nobuhiro Iwamatsu | 71f507c | 2012-01-11 10:23:51 +0900 | [diff] [blame] | 104 | timeout = TIMEOUT_CNT; |
Nobuhiro Iwamatsu | ee74c70 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 105 | do { |
| 106 | invalidate_cache(port_info->tx_desc_cur, |
| 107 | sizeof(struct tx_desc_s)); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 108 | udelay(100); |
Nobuhiro Iwamatsu | ee74c70 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 109 | } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 110 | |
| 111 | if (timeout < 0) { |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 112 | printf(SHETHER_NAME ": transmit timeout\n"); |
| 113 | ret = -ETIMEDOUT; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 114 | goto err; |
| 115 | } |
| 116 | |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 117 | port_info->tx_desc_cur++; |
| 118 | if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC) |
| 119 | port_info->tx_desc_cur = port_info->tx_desc_base; |
| 120 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 121 | err: |
| 122 | return ret; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 123 | } |
| 124 | |
Marek Vasut | 48de90d | 2018-01-21 15:39:50 +0100 | [diff] [blame] | 125 | static int sh_eth_recv_start(struct sh_eth_dev *eth) |
Marek Vasut | 044eb2d | 2018-01-21 14:27:51 +0100 | [diff] [blame] | 126 | { |
Marek Vasut | 7a309cf | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 127 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 128 | |
| 129 | /* Check if the rx descriptor is ready */ |
Nobuhiro Iwamatsu | ee74c70 | 2013-08-22 13:22:03 +0900 | [diff] [blame] | 130 | invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s)); |
Marek Vasut | 48de90d | 2018-01-21 15:39:50 +0100 | [diff] [blame] | 131 | if (port_info->rx_desc_cur->rd0 & RD_RACT) |
Valentine Barshak | f6c5c14 | 2023-05-31 00:51:31 +0200 | [diff] [blame] | 132 | return -EAGAIN; |
Marek Vasut | 48de90d | 2018-01-21 15:39:50 +0100 | [diff] [blame] | 133 | |
| 134 | /* Check for errors */ |
| 135 | if (port_info->rx_desc_cur->rd0 & RD_RFE) |
Valentine Barshak | f6c5c14 | 2023-05-31 00:51:31 +0200 | [diff] [blame] | 136 | return 0; |
Marek Vasut | 48de90d | 2018-01-21 15:39:50 +0100 | [diff] [blame] | 137 | |
Marek Vasut | 2526b79 | 2018-02-17 00:47:38 +0100 | [diff] [blame] | 138 | return port_info->rx_desc_cur->rd1 & 0xffff; |
Marek Vasut | 48de90d | 2018-01-21 15:39:50 +0100 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | static void sh_eth_recv_finish(struct sh_eth_dev *eth) |
| 142 | { |
| 143 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 144 | |
Valentine Barshak | 0c6bba8 | 2023-05-31 00:51:30 +0200 | [diff] [blame] | 145 | invalidate_cache(ADDR_TO_P2(port_info->rx_desc_cur->rd2), MAX_BUF_SIZE); |
| 146 | |
Marek Vasut | 48de90d | 2018-01-21 15:39:50 +0100 | [diff] [blame] | 147 | /* Make current descriptor available again */ |
| 148 | if (port_info->rx_desc_cur->rd0 & RD_RDLE) |
| 149 | port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE; |
| 150 | else |
| 151 | port_info->rx_desc_cur->rd0 = RD_RACT; |
Nobuhiro Iwamatsu | 5ba66ad | 2014-11-04 09:15:48 +0900 | [diff] [blame] | 152 | |
Marek Vasut | 48de90d | 2018-01-21 15:39:50 +0100 | [diff] [blame] | 153 | flush_cache_wback(port_info->rx_desc_cur, |
| 154 | sizeof(struct rx_desc_s)); |
| 155 | |
| 156 | /* Point to the next descriptor */ |
| 157 | port_info->rx_desc_cur++; |
| 158 | if (port_info->rx_desc_cur >= |
| 159 | port_info->rx_desc_base + NUM_RX_DESC) |
| 160 | port_info->rx_desc_cur = port_info->rx_desc_base; |
| 161 | } |
| 162 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 163 | static int sh_eth_reset(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 164 | { |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 165 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 46288f4 | 2014-01-23 07:52:18 +0900 | [diff] [blame] | 166 | #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 167 | int ret = 0, i; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 168 | |
| 169 | /* Start e-dmac transmitter and receiver */ |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 170 | sh_eth_write(port_info, EDSR_ENALL, EDSR); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 171 | |
| 172 | /* Perform a software reset and wait for it to complete */ |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 173 | sh_eth_write(port_info, EDMR_SRST, EDMR); |
Nobuhiro Iwamatsu | 31e84df | 2014-01-23 07:52:19 +0900 | [diff] [blame] | 174 | for (i = 0; i < TIMEOUT_CNT; i++) { |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 175 | if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST)) |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 176 | break; |
| 177 | udelay(1000); |
| 178 | } |
| 179 | |
Nobuhiro Iwamatsu | 71f507c | 2012-01-11 10:23:51 +0900 | [diff] [blame] | 180 | if (i == TIMEOUT_CNT) { |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 181 | printf(SHETHER_NAME ": Software reset timeout\n"); |
| 182 | ret = -EIO; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 183 | } |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 184 | |
| 185 | return ret; |
Yoshihiro Shimoda | 34cca92 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 186 | #else |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 187 | sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR); |
Marek Vasut | 42a3340 | 2018-02-17 00:57:49 +0100 | [diff] [blame] | 188 | mdelay(3); |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 189 | sh_eth_write(port_info, |
| 190 | sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR); |
Yoshihiro Shimoda | 34cca92 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 191 | |
| 192 | return 0; |
| 193 | #endif |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 194 | } |
| 195 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 196 | static int sh_eth_tx_desc_init(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 197 | { |
Marek Vasut | 7a309cf | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 198 | int i, ret = 0; |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 199 | u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s); |
Marek Vasut | 7a309cf | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 200 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 201 | struct tx_desc_s *cur_tx_desc; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 202 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 203 | /* |
Nobuhiro Iwamatsu | c24b3eb | 2014-11-04 09:15:46 +0900 | [diff] [blame] | 204 | * Allocate rx descriptors. They must be aligned to size of struct |
| 205 | * tx_desc_s. |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 206 | */ |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 207 | port_info->tx_desc_alloc = |
| 208 | memalign(sizeof(struct tx_desc_s), alloc_desc_size); |
| 209 | if (!port_info->tx_desc_alloc) { |
| 210 | printf(SHETHER_NAME ": memalign failed\n"); |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 211 | ret = -ENOMEM; |
| 212 | goto err; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 213 | } |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 214 | |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 215 | /* Make sure we use a P2 address (non-cacheable) */ |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 216 | port_info->tx_desc_base = |
Marek Vasut | 9aa1d5b | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 217 | (struct tx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->tx_desc_alloc); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 218 | port_info->tx_desc_cur = port_info->tx_desc_base; |
| 219 | |
| 220 | /* Initialize all descriptors */ |
| 221 | for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC; |
| 222 | cur_tx_desc++, i++) { |
| 223 | cur_tx_desc->td0 = 0x00; |
| 224 | cur_tx_desc->td1 = 0x00; |
| 225 | cur_tx_desc->td2 = 0x00; |
| 226 | } |
| 227 | |
| 228 | /* Mark the end of the descriptors */ |
| 229 | cur_tx_desc--; |
| 230 | cur_tx_desc->td0 |= TD_TDLE; |
| 231 | |
Valentine Barshak | 0c6bba8 | 2023-05-31 00:51:30 +0200 | [diff] [blame] | 232 | flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size); |
Nobuhiro Iwamatsu | ca36b0e | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 233 | /* |
| 234 | * Point the controller to the tx descriptor list. Must use physical |
| 235 | * addresses |
| 236 | */ |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 237 | sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR); |
Nobuhiro Iwamatsu | 46288f4 | 2014-01-23 07:52:18 +0900 | [diff] [blame] | 238 | #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 239 | sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR); |
| 240 | sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR); |
| 241 | sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */ |
Yoshihiro Shimoda | 34cca92 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 242 | #endif |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 243 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 244 | err: |
| 245 | return ret; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 246 | } |
| 247 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 248 | static int sh_eth_rx_desc_init(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 249 | { |
Marek Vasut | 7a309cf | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 250 | int i, ret = 0; |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 251 | u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s); |
Marek Vasut | 7a309cf | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 252 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 253 | struct rx_desc_s *cur_rx_desc; |
| 254 | u8 *rx_buf; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 255 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 256 | /* |
Nobuhiro Iwamatsu | c24b3eb | 2014-11-04 09:15:46 +0900 | [diff] [blame] | 257 | * Allocate rx descriptors. They must be aligned to size of struct |
| 258 | * rx_desc_s. |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 259 | */ |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 260 | port_info->rx_desc_alloc = |
| 261 | memalign(sizeof(struct rx_desc_s), alloc_desc_size); |
| 262 | if (!port_info->rx_desc_alloc) { |
| 263 | printf(SHETHER_NAME ": memalign failed\n"); |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 264 | ret = -ENOMEM; |
| 265 | goto err; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 266 | } |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 267 | |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 268 | /* Make sure we use a P2 address (non-cacheable) */ |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 269 | port_info->rx_desc_base = |
Marek Vasut | 9aa1d5b | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 270 | (struct rx_desc_s *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_alloc); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 271 | |
| 272 | port_info->rx_desc_cur = port_info->rx_desc_base; |
| 273 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 274 | /* |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 275 | * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes |
| 276 | * aligned and in P2 area. |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 277 | */ |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 278 | port_info->rx_buf_alloc = |
| 279 | memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE); |
| 280 | if (!port_info->rx_buf_alloc) { |
| 281 | printf(SHETHER_NAME ": alloc failed\n"); |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 282 | ret = -ENOMEM; |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 283 | goto err_buf_alloc; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 284 | } |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 285 | |
Marek Vasut | 9aa1d5b | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 286 | port_info->rx_buf_base = (u8 *)ADDR_TO_P2((uintptr_t)port_info->rx_buf_alloc); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 287 | |
| 288 | /* Initialize all descriptors */ |
| 289 | for (cur_rx_desc = port_info->rx_desc_base, |
| 290 | rx_buf = port_info->rx_buf_base, i = 0; |
| 291 | i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) { |
| 292 | cur_rx_desc->rd0 = RD_RACT; |
| 293 | cur_rx_desc->rd1 = MAX_BUF_SIZE << 16; |
Nobuhiro Iwamatsu | ca36b0e | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 294 | cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 295 | } |
| 296 | |
| 297 | /* Mark the end of the descriptors */ |
| 298 | cur_rx_desc--; |
| 299 | cur_rx_desc->rd0 |= RD_RDLE; |
| 300 | |
Valentine Barshak | 0c6bba8 | 2023-05-31 00:51:30 +0200 | [diff] [blame] | 301 | invalidate_cache(port_info->rx_buf_alloc, NUM_RX_DESC * MAX_BUF_SIZE); |
| 302 | flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size); |
| 303 | |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 304 | /* Point the controller to the rx descriptor list */ |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 305 | sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR); |
Nobuhiro Iwamatsu | 46288f4 | 2014-01-23 07:52:18 +0900 | [diff] [blame] | 306 | #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 307 | sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR); |
| 308 | sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR); |
| 309 | sh_eth_write(port_info, RDFFR_RDLF, RDFFR); |
Yoshihiro Shimoda | 34cca92 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 310 | #endif |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 311 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 312 | return ret; |
| 313 | |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 314 | err_buf_alloc: |
| 315 | free(port_info->rx_desc_alloc); |
| 316 | port_info->rx_desc_alloc = NULL; |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 317 | |
| 318 | err: |
| 319 | return ret; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 320 | } |
| 321 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 322 | static void sh_eth_tx_desc_free(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 323 | { |
Marek Vasut | 7a309cf | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 324 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 325 | |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 326 | if (port_info->tx_desc_alloc) { |
| 327 | free(port_info->tx_desc_alloc); |
| 328 | port_info->tx_desc_alloc = NULL; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 329 | } |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 330 | } |
| 331 | |
| 332 | static void sh_eth_rx_desc_free(struct sh_eth_dev *eth) |
| 333 | { |
Marek Vasut | 7a309cf | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 334 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 335 | |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 336 | if (port_info->rx_desc_alloc) { |
| 337 | free(port_info->rx_desc_alloc); |
| 338 | port_info->rx_desc_alloc = NULL; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 339 | } |
| 340 | |
Nobuhiro Iwamatsu | 1c82211 | 2014-11-04 09:15:47 +0900 | [diff] [blame] | 341 | if (port_info->rx_buf_alloc) { |
| 342 | free(port_info->rx_buf_alloc); |
| 343 | port_info->rx_buf_alloc = NULL; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 344 | } |
| 345 | } |
| 346 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 347 | static int sh_eth_desc_init(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 348 | { |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 349 | int ret = 0; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 350 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 351 | ret = sh_eth_tx_desc_init(eth); |
| 352 | if (ret) |
| 353 | goto err_tx_init; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 354 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 355 | ret = sh_eth_rx_desc_init(eth); |
| 356 | if (ret) |
| 357 | goto err_rx_init; |
| 358 | |
| 359 | return ret; |
| 360 | err_rx_init: |
| 361 | sh_eth_tx_desc_free(eth); |
| 362 | |
| 363 | err_tx_init: |
| 364 | return ret; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 365 | } |
| 366 | |
Marek Vasut | ccdfc5e | 2018-01-21 14:55:44 +0100 | [diff] [blame] | 367 | static void sh_eth_write_hwaddr(struct sh_eth_info *port_info, |
| 368 | unsigned char *mac) |
| 369 | { |
| 370 | u32 val; |
| 371 | |
| 372 | val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3]; |
| 373 | sh_eth_write(port_info, val, MAHR); |
| 374 | |
| 375 | val = (mac[4] << 8) | mac[5]; |
| 376 | sh_eth_write(port_info, val, MALR); |
| 377 | } |
| 378 | |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 379 | static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac) |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 380 | { |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 381 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | 3112450 | 2019-07-31 12:58:06 +0200 | [diff] [blame] | 382 | unsigned long edmr; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 383 | |
| 384 | /* Configure e-dmac registers */ |
Marek Vasut | 3112450 | 2019-07-31 12:58:06 +0200 | [diff] [blame] | 385 | edmr = sh_eth_read(port_info, EDMR); |
| 386 | edmr &= ~EMDR_DESC_R; |
| 387 | edmr |= EMDR_DESC | EDMR_EL; |
| 388 | #if defined(CONFIG_R8A77980) |
| 389 | edmr |= EDMR_NBST; |
| 390 | #endif |
| 391 | sh_eth_write(port_info, edmr, EDMR); |
Nobuhiro Iwamatsu | 7a2142c | 2013-08-22 13:22:02 +0900 | [diff] [blame] | 392 | |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 393 | sh_eth_write(port_info, 0, EESIPR); |
| 394 | sh_eth_write(port_info, 0, TRSCER); |
| 395 | sh_eth_write(port_info, 0, TFTR); |
| 396 | sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR); |
| 397 | sh_eth_write(port_info, RMCR_RST, RMCR); |
Nobuhiro Iwamatsu | 46288f4 | 2014-01-23 07:52:18 +0900 | [diff] [blame] | 398 | #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 399 | sh_eth_write(port_info, 0, RPADIR); |
Yoshihiro Shimoda | 34cca92 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 400 | #endif |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 401 | sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 402 | |
| 403 | /* Configure e-mac registers */ |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 404 | sh_eth_write(port_info, 0, ECSIPR); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 405 | |
| 406 | /* Set Mac address */ |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 407 | sh_eth_write_hwaddr(port_info, mac); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 408 | |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 409 | sh_eth_write(port_info, RFLR_RFL_MIN, RFLR); |
Yoshihiro Shimoda | 9d55303 | 2012-06-26 16:38:06 +0000 | [diff] [blame] | 410 | #if defined(SH_ETH_TYPE_GETHER) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 411 | sh_eth_write(port_info, 0, PIPR); |
Nobuhiro Iwamatsu | 46288f4 | 2014-01-23 07:52:18 +0900 | [diff] [blame] | 412 | #endif |
| 413 | #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 414 | sh_eth_write(port_info, APR_AP, APR); |
| 415 | sh_eth_write(port_info, MPR_MP, MPR); |
| 416 | sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER); |
Yoshihiro Shimoda | 34cca92 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 417 | #endif |
Nobuhiro Iwamatsu | 9dfac0a | 2011-11-14 16:56:59 +0900 | [diff] [blame] | 418 | |
Nobuhiro Iwamatsu | 4ad2c2a | 2012-08-02 22:08:40 +0000 | [diff] [blame] | 419 | #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 420 | sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII); |
Marek Vasut | 3112450 | 2019-07-31 12:58:06 +0200 | [diff] [blame] | 421 | #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 422 | sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR); |
Nobuhiro Iwamatsu | 475f40d | 2012-05-15 15:49:39 +0000 | [diff] [blame] | 423 | #endif |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 424 | } |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 425 | |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 426 | static int sh_eth_phy_regs_config(struct sh_eth_dev *eth) |
| 427 | { |
| 428 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 429 | struct phy_device *phy = port_info->phydev; |
| 430 | int ret = 0; |
| 431 | u32 val = 0; |
Nobuhiro Iwamatsu | 9dfac0a | 2011-11-14 16:56:59 +0900 | [diff] [blame] | 432 | |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 433 | /* Set the transfer speed */ |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 434 | if (phy->speed == 100) { |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 435 | printf(SHETHER_NAME ": 100Base/"); |
Yoshihiro Shimoda | 9d55303 | 2012-06-26 16:38:06 +0000 | [diff] [blame] | 436 | #if defined(SH_ETH_TYPE_GETHER) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 437 | sh_eth_write(port_info, GECMR_100B, GECMR); |
Yoshihiro Shimoda | d27e8c9 | 2012-11-04 15:54:30 +0000 | [diff] [blame] | 438 | #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 439 | sh_eth_write(port_info, 1, RTRATE); |
Marek Vasut | 3112450 | 2019-07-31 12:58:06 +0200 | [diff] [blame] | 440 | #elif defined(CONFIG_RCAR_GEN2) || defined(CONFIG_R8A77980) |
Nobuhiro Iwamatsu | 9dfac0a | 2011-11-14 16:56:59 +0900 | [diff] [blame] | 441 | val = ECMR_RTM; |
| 442 | #endif |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 443 | } else if (phy->speed == 10) { |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 444 | printf(SHETHER_NAME ": 10Base/"); |
Yoshihiro Shimoda | 9d55303 | 2012-06-26 16:38:06 +0000 | [diff] [blame] | 445 | #if defined(SH_ETH_TYPE_GETHER) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 446 | sh_eth_write(port_info, GECMR_10B, GECMR); |
Yoshihiro Shimoda | d27e8c9 | 2012-11-04 15:54:30 +0000 | [diff] [blame] | 447 | #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 448 | sh_eth_write(port_info, 0, RTRATE); |
Yoshihiro Shimoda | 34cca92 | 2011-01-18 17:53:45 +0900 | [diff] [blame] | 449 | #endif |
Nobuhiro Iwamatsu | 9dfac0a | 2011-11-14 16:56:59 +0900 | [diff] [blame] | 450 | } |
Yoshihiro Shimoda | 9d55303 | 2012-06-26 16:38:06 +0000 | [diff] [blame] | 451 | #if defined(SH_ETH_TYPE_GETHER) |
Nobuhiro Iwamatsu | 475f40d | 2012-05-15 15:49:39 +0000 | [diff] [blame] | 452 | else if (phy->speed == 1000) { |
| 453 | printf(SHETHER_NAME ": 1000Base/"); |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 454 | sh_eth_write(port_info, GECMR_1000B, GECMR); |
Nobuhiro Iwamatsu | 475f40d | 2012-05-15 15:49:39 +0000 | [diff] [blame] | 455 | } |
| 456 | #endif |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 457 | |
| 458 | /* Check if full duplex mode is supported by the phy */ |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 459 | if (phy->duplex) { |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 460 | printf("Full\n"); |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 461 | sh_eth_write(port_info, |
Nobuhiro Iwamatsu | ca36b0e | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 462 | val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM), |
Yoshihiro Shimoda | 4c4aa6c | 2012-06-26 16:38:09 +0000 | [diff] [blame] | 463 | ECMR); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 464 | } else { |
| 465 | printf("Half\n"); |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 466 | sh_eth_write(port_info, |
Nobuhiro Iwamatsu | ca36b0e | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 467 | val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE), |
| 468 | ECMR); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 469 | } |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 470 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 471 | return ret; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 472 | } |
| 473 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 474 | static void sh_eth_start(struct sh_eth_dev *eth) |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 475 | { |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 476 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 477 | |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 478 | /* |
| 479 | * Enable the e-dmac receiver only. The transmitter will be enabled when |
| 480 | * we have something to transmit |
| 481 | */ |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 482 | sh_eth_write(port_info, EDRRR_R, EDRRR); |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 483 | } |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 484 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 485 | static void sh_eth_stop(struct sh_eth_dev *eth) |
| 486 | { |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 487 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 488 | |
| 489 | sh_eth_write(port_info, ~EDRRR_R, EDRRR); |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 490 | } |
| 491 | |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 492 | static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac) |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 493 | { |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 494 | int ret = 0; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 495 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 496 | ret = sh_eth_reset(eth); |
| 497 | if (ret) |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 498 | return ret; |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 499 | |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 500 | ret = sh_eth_desc_init(eth); |
| 501 | if (ret) |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 502 | return ret; |
| 503 | |
| 504 | sh_eth_mac_regs_config(eth, mac); |
| 505 | |
| 506 | return 0; |
| 507 | } |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 508 | |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 509 | static int sh_eth_start_common(struct sh_eth_dev *eth) |
| 510 | { |
| 511 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 512 | int ret; |
| 513 | |
| 514 | ret = phy_startup(port_info->phydev); |
| 515 | if (ret) { |
| 516 | printf(SHETHER_NAME ": phy startup failure\n"); |
| 517 | return ret; |
| 518 | } |
| 519 | |
| 520 | ret = sh_eth_phy_regs_config(eth); |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 521 | if (ret) |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 522 | return ret; |
Nobuhiro Iwamatsu | d8f5d50 | 2008-11-21 12:04:18 +0900 | [diff] [blame] | 523 | |
| 524 | sh_eth_start(eth); |
| 525 | |
Marek Vasut | c13be6a | 2018-01-21 15:10:21 +0100 | [diff] [blame] | 526 | return 0; |
| 527 | } |
Nobuhiro Iwamatsu | 240b723 | 2008-06-11 21:05:00 +0900 | [diff] [blame] | 528 | |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 529 | struct sh_ether_priv { |
| 530 | struct sh_eth_dev shdev; |
| 531 | |
| 532 | struct mii_dev *bus; |
Marek Vasut | 63ab72c | 2018-02-17 00:57:49 +0100 | [diff] [blame] | 533 | phys_addr_t iobase; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 534 | struct clk clk; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 535 | }; |
| 536 | |
| 537 | static int sh_ether_send(struct udevice *dev, void *packet, int len) |
| 538 | { |
| 539 | struct sh_ether_priv *priv = dev_get_priv(dev); |
| 540 | struct sh_eth_dev *eth = &priv->shdev; |
| 541 | |
| 542 | return sh_eth_send_common(eth, packet, len); |
| 543 | } |
| 544 | |
| 545 | static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp) |
| 546 | { |
| 547 | struct sh_ether_priv *priv = dev_get_priv(dev); |
| 548 | struct sh_eth_dev *eth = &priv->shdev; |
| 549 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | 9aa1d5b | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 550 | uchar *packet = (uchar *)ADDR_TO_P2((uintptr_t)port_info->rx_desc_cur->rd2); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 551 | int len; |
| 552 | |
| 553 | len = sh_eth_recv_start(eth); |
| 554 | if (len > 0) { |
| 555 | invalidate_cache(packet, len); |
| 556 | *packetp = packet; |
| 557 | |
| 558 | return len; |
Valentine Barshak | f6c5c14 | 2023-05-31 00:51:31 +0200 | [diff] [blame] | 559 | } |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 560 | |
Valentine Barshak | f6c5c14 | 2023-05-31 00:51:31 +0200 | [diff] [blame] | 561 | /* Restart the receiver if disabled */ |
| 562 | if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R)) |
| 563 | sh_eth_write(port_info, EDRRR_R, EDRRR); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 564 | |
Valentine Barshak | f6c5c14 | 2023-05-31 00:51:31 +0200 | [diff] [blame] | 565 | return len; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 566 | } |
| 567 | |
| 568 | static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length) |
| 569 | { |
| 570 | struct sh_ether_priv *priv = dev_get_priv(dev); |
| 571 | struct sh_eth_dev *eth = &priv->shdev; |
| 572 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 573 | |
| 574 | sh_eth_recv_finish(eth); |
| 575 | |
| 576 | /* Restart the receiver if disabled */ |
| 577 | if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R)) |
| 578 | sh_eth_write(port_info, EDRRR_R, EDRRR); |
| 579 | |
| 580 | return 0; |
| 581 | } |
| 582 | |
| 583 | static int sh_ether_write_hwaddr(struct udevice *dev) |
| 584 | { |
| 585 | struct sh_ether_priv *priv = dev_get_priv(dev); |
| 586 | struct sh_eth_dev *eth = &priv->shdev; |
| 587 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Simon Glass | fa20e93 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 588 | struct eth_pdata *pdata = dev_get_plat(dev); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 589 | |
| 590 | sh_eth_write_hwaddr(port_info, pdata->enetaddr); |
| 591 | |
| 592 | return 0; |
| 593 | } |
| 594 | |
| 595 | static int sh_eth_phy_config(struct udevice *dev) |
| 596 | { |
| 597 | struct sh_ether_priv *priv = dev_get_priv(dev); |
Simon Glass | fa20e93 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 598 | struct eth_pdata *pdata = dev_get_plat(dev); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 599 | struct sh_eth_dev *eth = &priv->shdev; |
Marek Vasut | 7a309cf | 2018-02-17 00:46:26 +0100 | [diff] [blame] | 600 | int ret = 0; |
| 601 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 602 | struct phy_device *phydev; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 603 | |
Marek Vasut | 3b89e53 | 2023-05-31 00:51:23 +0200 | [diff] [blame] | 604 | phydev = phy_connect(priv->bus, -1, dev, pdata->phy_interface); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 605 | if (!phydev) |
| 606 | return -ENODEV; |
| 607 | |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 608 | port_info->phydev = phydev; |
| 609 | phy_config(phydev); |
| 610 | |
| 611 | return ret; |
| 612 | } |
| 613 | |
| 614 | static int sh_ether_start(struct udevice *dev) |
| 615 | { |
| 616 | struct sh_ether_priv *priv = dev_get_priv(dev); |
Simon Glass | fa20e93 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 617 | struct eth_pdata *pdata = dev_get_plat(dev); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 618 | struct sh_eth_dev *eth = &priv->shdev; |
| 619 | int ret; |
| 620 | |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 621 | ret = sh_eth_init_common(eth, pdata->enetaddr); |
| 622 | if (ret) |
Marek Vasut | f6cf4ba | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 623 | return ret; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 624 | |
| 625 | ret = sh_eth_start_common(eth); |
| 626 | if (ret) |
| 627 | goto err_start; |
| 628 | |
| 629 | return 0; |
| 630 | |
| 631 | err_start: |
| 632 | sh_eth_tx_desc_free(eth); |
| 633 | sh_eth_rx_desc_free(eth); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 634 | return ret; |
| 635 | } |
| 636 | |
| 637 | static void sh_ether_stop(struct udevice *dev) |
| 638 | { |
| 639 | struct sh_ether_priv *priv = dev_get_priv(dev); |
Marek Vasut | f6cf4ba | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 640 | struct sh_eth_dev *eth = &priv->shdev; |
| 641 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 642 | |
Marek Vasut | f6cf4ba | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 643 | phy_shutdown(port_info->phydev); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 644 | sh_eth_stop(&priv->shdev); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 645 | } |
| 646 | |
| 647 | static int sh_ether_probe(struct udevice *udev) |
| 648 | { |
Simon Glass | fa20e93 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 649 | struct eth_pdata *pdata = dev_get_plat(udev); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 650 | struct sh_ether_priv *priv = dev_get_priv(udev); |
| 651 | struct sh_eth_dev *eth = &priv->shdev; |
| 652 | struct mii_dev *mdiodev; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 653 | int ret; |
| 654 | |
Marek Vasut | 63ab72c | 2018-02-17 00:57:49 +0100 | [diff] [blame] | 655 | priv->iobase = pdata->iobase; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 656 | |
Marek Vasut | 77f69f8 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 657 | #if CONFIG_IS_ENABLED(CLK) |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 658 | ret = clk_get_by_index(udev, 0, &priv->clk); |
| 659 | if (ret < 0) |
Marek Vasut | 63ab72c | 2018-02-17 00:57:49 +0100 | [diff] [blame] | 660 | return ret; |
Marek Vasut | 77f69f8 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 661 | #endif |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 662 | mdiodev = mdio_alloc(); |
| 663 | if (!mdiodev) { |
| 664 | ret = -ENOMEM; |
Marek Vasut | 63ab72c | 2018-02-17 00:57:49 +0100 | [diff] [blame] | 665 | return ret; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 666 | } |
| 667 | |
| 668 | mdiodev->read = bb_miiphy_read; |
| 669 | mdiodev->write = bb_miiphy_write; |
| 670 | bb_miiphy_buses[0].priv = eth; |
| 671 | snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name); |
| 672 | |
| 673 | ret = mdio_register(mdiodev); |
| 674 | if (ret < 0) |
| 675 | goto err_mdio_register; |
| 676 | |
| 677 | priv->bus = miiphy_get_dev_by_name(udev->name); |
| 678 | |
Tom Rini | 9996ab8 | 2022-12-04 10:13:52 -0500 | [diff] [blame] | 679 | eth->port = CFG_SH_ETHER_USE_PORT; |
Tom Rini | 45ec5fd | 2022-12-04 10:13:50 -0500 | [diff] [blame] | 680 | eth->port_info[eth->port].phy_addr = CFG_SH_ETHER_PHY_ADDR; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 681 | eth->port_info[eth->port].iobase = |
Marek Vasut | 9aa1d5b | 2019-07-31 14:48:17 +0200 | [diff] [blame] | 682 | (void __iomem *)(uintptr_t)(BASE_IO_ADDR + 0x800 * eth->port); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 683 | |
Marek Vasut | 77f69f8 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 684 | #if CONFIG_IS_ENABLED(CLK) |
Marek Vasut | f6cf4ba | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 685 | ret = clk_enable(&priv->clk); |
| 686 | if (ret) |
| 687 | goto err_mdio_register; |
Marek Vasut | 77f69f8 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 688 | #endif |
Marek Vasut | f6cf4ba | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 689 | |
Marek Vasut | d53dd50 | 2020-04-04 15:01:22 +0200 | [diff] [blame] | 690 | ret = sh_eth_init_common(eth, pdata->enetaddr); |
| 691 | if (ret) |
| 692 | goto err_phy_config; |
| 693 | |
Marek Vasut | f6cf4ba | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 694 | ret = sh_eth_phy_config(udev); |
| 695 | if (ret) { |
| 696 | printf(SHETHER_NAME ": phy config timeout\n"); |
| 697 | goto err_phy_config; |
| 698 | } |
| 699 | |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 700 | return 0; |
| 701 | |
Marek Vasut | f6cf4ba | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 702 | err_phy_config: |
Marek Vasut | 77f69f8 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 703 | #if CONFIG_IS_ENABLED(CLK) |
Marek Vasut | f6cf4ba | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 704 | clk_disable(&priv->clk); |
Marek Vasut | 77f69f8 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 705 | #endif |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 706 | err_mdio_register: |
| 707 | mdio_free(mdiodev); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 708 | return ret; |
| 709 | } |
| 710 | |
| 711 | static int sh_ether_remove(struct udevice *udev) |
| 712 | { |
| 713 | struct sh_ether_priv *priv = dev_get_priv(udev); |
| 714 | struct sh_eth_dev *eth = &priv->shdev; |
| 715 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
| 716 | |
Marek Vasut | 77f69f8 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 717 | #if CONFIG_IS_ENABLED(CLK) |
Marek Vasut | f6cf4ba | 2019-03-30 07:22:09 +0100 | [diff] [blame] | 718 | clk_disable(&priv->clk); |
Marek Vasut | 77f69f8 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 719 | #endif |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 720 | free(port_info->phydev); |
| 721 | mdio_unregister(priv->bus); |
| 722 | mdio_free(priv->bus); |
| 723 | |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 724 | return 0; |
| 725 | } |
| 726 | |
| 727 | static const struct eth_ops sh_ether_ops = { |
| 728 | .start = sh_ether_start, |
| 729 | .send = sh_ether_send, |
| 730 | .recv = sh_ether_recv, |
| 731 | .free_pkt = sh_ether_free_pkt, |
| 732 | .stop = sh_ether_stop, |
| 733 | .write_hwaddr = sh_ether_write_hwaddr, |
| 734 | }; |
| 735 | |
Simon Glass | aad29ae | 2020-12-03 16:55:21 -0700 | [diff] [blame] | 736 | int sh_ether_of_to_plat(struct udevice *dev) |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 737 | { |
Simon Glass | fa20e93 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 738 | struct eth_pdata *pdata = dev_get_plat(dev); |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 739 | const fdt32_t *cell; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 740 | |
Masahiro Yamada | a89b4de | 2020-07-17 14:36:48 +0900 | [diff] [blame] | 741 | pdata->iobase = dev_read_addr(dev); |
Marek BehĂșn | bc19477 | 2022-04-07 00:33:01 +0200 | [diff] [blame] | 742 | |
| 743 | pdata->phy_interface = dev_read_phy_mode(dev); |
Marek BehĂșn | 48631e4 | 2022-04-07 00:33:03 +0200 | [diff] [blame] | 744 | if (pdata->phy_interface == PHY_INTERFACE_MODE_NA) |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 745 | return -EINVAL; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 746 | |
| 747 | pdata->max_speed = 1000; |
| 748 | cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL); |
| 749 | if (cell) |
| 750 | pdata->max_speed = fdt32_to_cpu(*cell); |
| 751 | |
| 752 | sprintf(bb_miiphy_buses[0].name, dev->name); |
| 753 | |
Marek BehĂșn | bc19477 | 2022-04-07 00:33:01 +0200 | [diff] [blame] | 754 | return 0; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 755 | } |
| 756 | |
| 757 | static const struct udevice_id sh_ether_ids[] = { |
Marek Vasut | 77f69f8 | 2019-05-02 00:03:26 +0200 | [diff] [blame] | 758 | { .compatible = "renesas,ether-r7s72100" }, |
Marek Vasut | 337ab3b | 2018-04-12 15:23:46 +0200 | [diff] [blame] | 759 | { .compatible = "renesas,ether-r8a7790" }, |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 760 | { .compatible = "renesas,ether-r8a7791" }, |
Marek Vasut | 337ab3b | 2018-04-12 15:23:46 +0200 | [diff] [blame] | 761 | { .compatible = "renesas,ether-r8a7793" }, |
| 762 | { .compatible = "renesas,ether-r8a7794" }, |
Marek Vasut | 3112450 | 2019-07-31 12:58:06 +0200 | [diff] [blame] | 763 | { .compatible = "renesas,gether-r8a77980" }, |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 764 | { } |
| 765 | }; |
| 766 | |
| 767 | U_BOOT_DRIVER(eth_sh_ether) = { |
| 768 | .name = "sh_ether", |
| 769 | .id = UCLASS_ETH, |
| 770 | .of_match = sh_ether_ids, |
Simon Glass | aad29ae | 2020-12-03 16:55:21 -0700 | [diff] [blame] | 771 | .of_to_plat = sh_ether_of_to_plat, |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 772 | .probe = sh_ether_probe, |
| 773 | .remove = sh_ether_remove, |
| 774 | .ops = &sh_ether_ops, |
Simon Glass | 8a2b47f | 2020-12-03 16:55:17 -0700 | [diff] [blame] | 775 | .priv_auto = sizeof(struct sh_ether_priv), |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 776 | .plat_auto = sizeof(struct eth_pdata), |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 777 | .flags = DM_FLAG_ALLOC_PRIV_DMA, |
| 778 | }; |
Marek Vasut | 020d394 | 2018-01-19 18:57:17 +0100 | [diff] [blame] | 779 | |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 780 | /******* for bb_miiphy *******/ |
| 781 | static int sh_eth_bb_init(struct bb_miiphy_bus *bus) |
| 782 | { |
| 783 | return 0; |
| 784 | } |
| 785 | |
| 786 | static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus) |
| 787 | { |
| 788 | struct sh_eth_dev *eth = bus->priv; |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 789 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 790 | |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 791 | sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR); |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 792 | |
| 793 | return 0; |
| 794 | } |
| 795 | |
| 796 | static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus) |
| 797 | { |
| 798 | struct sh_eth_dev *eth = bus->priv; |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 799 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 800 | |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 801 | sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR); |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 802 | |
| 803 | return 0; |
| 804 | } |
| 805 | |
| 806 | static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v) |
| 807 | { |
| 808 | struct sh_eth_dev *eth = bus->priv; |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 809 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 810 | |
| 811 | if (v) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 812 | sh_eth_write(port_info, |
| 813 | sh_eth_read(port_info, PIR) | PIR_MDO, PIR); |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 814 | else |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 815 | sh_eth_write(port_info, |
| 816 | sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR); |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 817 | |
| 818 | return 0; |
| 819 | } |
| 820 | |
| 821 | static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v) |
| 822 | { |
| 823 | struct sh_eth_dev *eth = bus->priv; |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 824 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 825 | |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 826 | *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3; |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 827 | |
| 828 | return 0; |
| 829 | } |
| 830 | |
| 831 | static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v) |
| 832 | { |
| 833 | struct sh_eth_dev *eth = bus->priv; |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 834 | struct sh_eth_info *port_info = ð->port_info[eth->port]; |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 835 | |
| 836 | if (v) |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 837 | sh_eth_write(port_info, |
| 838 | sh_eth_read(port_info, PIR) | PIR_MDC, PIR); |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 839 | else |
Nobuhiro Iwamatsu | ec921f1 | 2017-12-01 08:10:32 +0900 | [diff] [blame] | 840 | sh_eth_write(port_info, |
| 841 | sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR); |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 842 | |
| 843 | return 0; |
| 844 | } |
| 845 | |
| 846 | static int sh_eth_bb_delay(struct bb_miiphy_bus *bus) |
| 847 | { |
| 848 | udelay(10); |
| 849 | |
| 850 | return 0; |
| 851 | } |
| 852 | |
| 853 | struct bb_miiphy_bus bb_miiphy_buses[] = { |
| 854 | { |
| 855 | .name = "sh_eth", |
| 856 | .init = sh_eth_bb_init, |
| 857 | .mdio_active = sh_eth_bb_mdio_active, |
| 858 | .mdio_tristate = sh_eth_bb_mdio_tristate, |
| 859 | .set_mdio = sh_eth_bb_set_mdio, |
| 860 | .get_mdio = sh_eth_bb_get_mdio, |
| 861 | .set_mdc = sh_eth_bb_set_mdc, |
| 862 | .delay = sh_eth_bb_delay, |
| 863 | } |
| 864 | }; |
Nobuhiro Iwamatsu | ca36b0e | 2017-12-01 08:08:00 +0900 | [diff] [blame] | 865 | |
Yoshihiro Shimoda | 677f6cd | 2011-10-11 18:10:14 +0900 | [diff] [blame] | 866 | int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses); |