Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. |
| 4 | * |
| 5 | * U-Boot version: |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 6 | * Copyright (C) 2014-2015 Stefan Roese <sr@denx.de> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 7 | * |
| 8 | * Based on the Linux version which is: |
| 9 | * Copyright (C) 2012 Marvell |
| 10 | * |
| 11 | * Rami Rosen <rosenr@marvell.com> |
| 12 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 13 | */ |
| 14 | |
| 15 | #include <common.h> |
Simon Glass | 6333448 | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 16 | #include <cpu_func.h> |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 17 | #include <dm.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 18 | #include <log.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 19 | #include <net.h> |
| 20 | #include <netdev.h> |
| 21 | #include <config.h> |
| 22 | #include <malloc.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 23 | #include <asm/cache.h> |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 24 | #include <asm/global_data.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 25 | #include <asm/io.h> |
Simon Glass | 9bc1564 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 26 | #include <dm/device_compat.h> |
Simon Glass | d66c5f7 | 2020-02-03 07:36:15 -0700 | [diff] [blame] | 27 | #include <dm/devres.h> |
Simon Glass | 4dcacfc | 2020-05-10 11:40:13 -0600 | [diff] [blame] | 28 | #include <linux/bitops.h> |
Simon Glass | c06c1be | 2020-05-10 11:40:08 -0600 | [diff] [blame] | 29 | #include <linux/bug.h> |
Simon Glass | dbd7954 | 2020-05-10 11:40:11 -0600 | [diff] [blame] | 30 | #include <linux/delay.h> |
Masahiro Yamada | 56a931c | 2016-09-21 11:28:55 +0900 | [diff] [blame] | 31 | #include <linux/errno.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 32 | #include <phy.h> |
| 33 | #include <miiphy.h> |
| 34 | #include <watchdog.h> |
| 35 | #include <asm/arch/cpu.h> |
| 36 | #include <asm/arch/soc.h> |
| 37 | #include <linux/compat.h> |
| 38 | #include <linux/mbus.h> |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 39 | #include <asm-generic/gpio.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 40 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 41 | DECLARE_GLOBAL_DATA_PTR; |
| 42 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 43 | #if !defined(CONFIG_PHYLIB) |
| 44 | # error Marvell mvneta requires PHYLIB |
| 45 | #endif |
| 46 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 47 | #define CONFIG_NR_CPUS 1 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 48 | #define ETH_HLEN 14 /* Total octets in header */ |
| 49 | |
| 50 | /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ |
| 51 | #define WRAP (2 + ETH_HLEN + 4 + 32) |
| 52 | #define MTU 1500 |
| 53 | #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) |
| 54 | |
| 55 | #define MVNETA_SMI_TIMEOUT 10000 |
| 56 | |
| 57 | /* Registers */ |
| 58 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) |
| 59 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) |
| 60 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) |
| 61 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) |
| 62 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) |
| 63 | #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) |
| 64 | #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) |
| 65 | #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) |
| 66 | #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 |
| 67 | #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) |
| 68 | #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) |
| 69 | #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff |
| 70 | #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) |
| 71 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 |
| 72 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 |
| 73 | #define MVNETA_PORT_RX_RESET 0x1cc0 |
| 74 | #define MVNETA_PORT_RX_DMA_RESET BIT(0) |
| 75 | #define MVNETA_PHY_ADDR 0x2000 |
| 76 | #define MVNETA_PHY_ADDR_MASK 0x1f |
| 77 | #define MVNETA_SMI 0x2004 |
| 78 | #define MVNETA_PHY_REG_MASK 0x1f |
| 79 | /* SMI register fields */ |
| 80 | #define MVNETA_SMI_DATA_OFFS 0 /* Data */ |
| 81 | #define MVNETA_SMI_DATA_MASK (0xffff << MVNETA_SMI_DATA_OFFS) |
| 82 | #define MVNETA_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ |
| 83 | #define MVNETA_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ |
| 84 | #define MVNETA_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ |
| 85 | #define MVNETA_SMI_OPCODE_READ (1 << MVNETA_SMI_OPCODE_OFFS) |
| 86 | #define MVNETA_SMI_READ_VALID (1 << 27) /* Read Valid */ |
| 87 | #define MVNETA_SMI_BUSY (1 << 28) /* Busy */ |
| 88 | #define MVNETA_MBUS_RETRY 0x2010 |
| 89 | #define MVNETA_UNIT_INTR_CAUSE 0x2080 |
| 90 | #define MVNETA_UNIT_CONTROL 0x20B0 |
| 91 | #define MVNETA_PHY_POLLING_ENABLE BIT(1) |
| 92 | #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) |
| 93 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) |
| 94 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 95 | #define MVNETA_WIN_SIZE_MASK (0xffff0000) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 96 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 97 | #define MVNETA_BASE_ADDR_ENABLE_BIT 0x1 |
| 98 | #define MVNETA_PORT_ACCESS_PROTECT 0x2294 |
| 99 | #define MVNETA_PORT_ACCESS_PROTECT_WIN0_RW 0x3 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 100 | #define MVNETA_PORT_CONFIG 0x2400 |
| 101 | #define MVNETA_UNI_PROMISC_MODE BIT(0) |
| 102 | #define MVNETA_DEF_RXQ(q) ((q) << 1) |
| 103 | #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) |
| 104 | #define MVNETA_TX_UNSET_ERR_SUM BIT(12) |
| 105 | #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) |
| 106 | #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) |
| 107 | #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) |
| 108 | #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) |
| 109 | #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ |
| 110 | MVNETA_DEF_RXQ_ARP(q) | \ |
| 111 | MVNETA_DEF_RXQ_TCP(q) | \ |
| 112 | MVNETA_DEF_RXQ_UDP(q) | \ |
| 113 | MVNETA_DEF_RXQ_BPDU(q) | \ |
| 114 | MVNETA_TX_UNSET_ERR_SUM | \ |
| 115 | MVNETA_RX_CSUM_WITH_PSEUDO_HDR) |
| 116 | #define MVNETA_PORT_CONFIG_EXTEND 0x2404 |
| 117 | #define MVNETA_MAC_ADDR_LOW 0x2414 |
| 118 | #define MVNETA_MAC_ADDR_HIGH 0x2418 |
| 119 | #define MVNETA_SDMA_CONFIG 0x241c |
| 120 | #define MVNETA_SDMA_BRST_SIZE_16 4 |
| 121 | #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) |
| 122 | #define MVNETA_RX_NO_DATA_SWAP BIT(4) |
| 123 | #define MVNETA_TX_NO_DATA_SWAP BIT(5) |
| 124 | #define MVNETA_DESC_SWAP BIT(6) |
| 125 | #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) |
| 126 | #define MVNETA_PORT_STATUS 0x2444 |
| 127 | #define MVNETA_TX_IN_PRGRS BIT(1) |
| 128 | #define MVNETA_TX_FIFO_EMPTY BIT(8) |
| 129 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c |
| 130 | #define MVNETA_SERDES_CFG 0x24A0 |
| 131 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 |
| 132 | #define MVNETA_QSGMII_SERDES_PROTO 0x0667 |
| 133 | #define MVNETA_TYPE_PRIO 0x24bc |
| 134 | #define MVNETA_FORCE_UNI BIT(21) |
| 135 | #define MVNETA_TXQ_CMD_1 0x24e4 |
| 136 | #define MVNETA_TXQ_CMD 0x2448 |
| 137 | #define MVNETA_TXQ_DISABLE_SHIFT 8 |
| 138 | #define MVNETA_TXQ_ENABLE_MASK 0x000000ff |
| 139 | #define MVNETA_ACC_MODE 0x2500 |
| 140 | #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) |
| 141 | #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff |
| 142 | #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 |
| 143 | #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) |
| 144 | |
| 145 | /* Exception Interrupt Port/Queue Cause register */ |
| 146 | |
| 147 | #define MVNETA_INTR_NEW_CAUSE 0x25a0 |
| 148 | #define MVNETA_INTR_NEW_MASK 0x25a4 |
| 149 | |
| 150 | /* bits 0..7 = TXQ SENT, one bit per queue. |
| 151 | * bits 8..15 = RXQ OCCUP, one bit per queue. |
| 152 | * bits 16..23 = RXQ FREE, one bit per queue. |
| 153 | * bit 29 = OLD_REG_SUM, see old reg ? |
| 154 | * bit 30 = TX_ERR_SUM, one bit for 4 ports |
| 155 | * bit 31 = MISC_SUM, one bit for 4 ports |
| 156 | */ |
| 157 | #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) |
| 158 | #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) |
| 159 | #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) |
| 160 | #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) |
| 161 | |
| 162 | #define MVNETA_INTR_OLD_CAUSE 0x25a8 |
| 163 | #define MVNETA_INTR_OLD_MASK 0x25ac |
| 164 | |
| 165 | /* Data Path Port/Queue Cause Register */ |
| 166 | #define MVNETA_INTR_MISC_CAUSE 0x25b0 |
| 167 | #define MVNETA_INTR_MISC_MASK 0x25b4 |
| 168 | #define MVNETA_INTR_ENABLE 0x25b8 |
| 169 | |
| 170 | #define MVNETA_RXQ_CMD 0x2680 |
| 171 | #define MVNETA_RXQ_DISABLE_SHIFT 8 |
| 172 | #define MVNETA_RXQ_ENABLE_MASK 0x000000ff |
| 173 | #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) |
| 174 | #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) |
| 175 | #define MVNETA_GMAC_CTRL_0 0x2c00 |
| 176 | #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 |
| 177 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc |
| 178 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) |
| 179 | #define MVNETA_GMAC_CTRL_2 0x2c08 |
| 180 | #define MVNETA_GMAC2_PCS_ENABLE BIT(3) |
| 181 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) |
| 182 | #define MVNETA_GMAC2_PORT_RESET BIT(6) |
| 183 | #define MVNETA_GMAC_STATUS 0x2c10 |
| 184 | #define MVNETA_GMAC_LINK_UP BIT(0) |
| 185 | #define MVNETA_GMAC_SPEED_1000 BIT(1) |
| 186 | #define MVNETA_GMAC_SPEED_100 BIT(2) |
| 187 | #define MVNETA_GMAC_FULL_DUPLEX BIT(3) |
| 188 | #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) |
| 189 | #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) |
| 190 | #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) |
| 191 | #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) |
| 192 | #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c |
| 193 | #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) |
| 194 | #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 195 | #define MVNETA_GMAC_FORCE_LINK_UP (BIT(0) | BIT(1)) |
| 196 | #define MVNETA_GMAC_IB_BYPASS_AN_EN BIT(3) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 197 | #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) |
| 198 | #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) |
| 199 | #define MVNETA_GMAC_AN_SPEED_EN BIT(7) |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 200 | #define MVNETA_GMAC_SET_FC_EN BIT(8) |
| 201 | #define MVNETA_GMAC_ADVERT_FC_EN BIT(9) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 202 | #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) |
| 203 | #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 204 | #define MVNETA_GMAC_SAMPLE_TX_CFG_EN BIT(15) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 205 | #define MVNETA_MIB_COUNTERS_BASE 0x3080 |
| 206 | #define MVNETA_MIB_LATE_COLLISION 0x7c |
| 207 | #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 |
| 208 | #define MVNETA_DA_FILT_OTH_MCAST 0x3500 |
| 209 | #define MVNETA_DA_FILT_UCAST_BASE 0x3600 |
| 210 | #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) |
| 211 | #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) |
| 212 | #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 |
| 213 | #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) |
| 214 | #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) |
| 215 | #define MVNETA_TXQ_DEC_SENT_SHIFT 16 |
| 216 | #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) |
| 217 | #define MVNETA_TXQ_SENT_DESC_SHIFT 16 |
| 218 | #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 |
| 219 | #define MVNETA_PORT_TX_RESET 0x3cf0 |
| 220 | #define MVNETA_PORT_TX_DMA_RESET BIT(0) |
| 221 | #define MVNETA_TX_MTU 0x3e0c |
| 222 | #define MVNETA_TX_TOKEN_SIZE 0x3e14 |
| 223 | #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff |
| 224 | #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) |
| 225 | #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff |
| 226 | |
| 227 | /* Descriptor ring Macros */ |
| 228 | #define MVNETA_QUEUE_NEXT_DESC(q, index) \ |
| 229 | (((index) < (q)->last_desc) ? ((index) + 1) : 0) |
| 230 | |
| 231 | /* Various constants */ |
| 232 | |
| 233 | /* Coalescing */ |
| 234 | #define MVNETA_TXDONE_COAL_PKTS 16 |
| 235 | #define MVNETA_RX_COAL_PKTS 32 |
| 236 | #define MVNETA_RX_COAL_USEC 100 |
| 237 | |
| 238 | /* The two bytes Marvell header. Either contains a special value used |
| 239 | * by Marvell switches when a specific hardware mode is enabled (not |
| 240 | * supported by this driver) or is filled automatically by zeroes on |
| 241 | * the RX side. Those two bytes being at the front of the Ethernet |
| 242 | * header, they allow to have the IP header aligned on a 4 bytes |
| 243 | * boundary automatically: the hardware skips those two bytes on its |
| 244 | * own. |
| 245 | */ |
| 246 | #define MVNETA_MH_SIZE 2 |
| 247 | |
| 248 | #define MVNETA_VLAN_TAG_LEN 4 |
| 249 | |
| 250 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 |
| 251 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 |
| 252 | #define MVNETA_ACC_MODE_EXT 1 |
| 253 | |
| 254 | /* Timeout constants */ |
| 255 | #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 |
| 256 | #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 |
| 257 | #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 |
| 258 | |
| 259 | #define MVNETA_TX_MTU_MAX 0x3ffff |
| 260 | |
| 261 | /* Max number of Rx descriptors */ |
| 262 | #define MVNETA_MAX_RXD 16 |
| 263 | |
| 264 | /* Max number of Tx descriptors */ |
| 265 | #define MVNETA_MAX_TXD 16 |
| 266 | |
| 267 | /* descriptor aligned size */ |
| 268 | #define MVNETA_DESC_ALIGNED_SIZE 32 |
| 269 | |
| 270 | struct mvneta_port { |
| 271 | void __iomem *base; |
| 272 | struct mvneta_rx_queue *rxqs; |
| 273 | struct mvneta_tx_queue *txqs; |
| 274 | |
| 275 | u8 mcast_count[256]; |
| 276 | u16 tx_ring_size; |
| 277 | u16 rx_ring_size; |
| 278 | |
| 279 | phy_interface_t phy_interface; |
Marek Behún | e942d8e | 2022-04-27 12:41:47 +0200 | [diff] [blame] | 280 | bool fixed_link; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 281 | unsigned int link; |
| 282 | unsigned int duplex; |
| 283 | unsigned int speed; |
| 284 | |
| 285 | int init; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 286 | struct phy_device *phydev; |
Simon Glass | fa4689a | 2019-12-06 21:41:35 -0700 | [diff] [blame] | 287 | #if CONFIG_IS_ENABLED(DM_GPIO) |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 288 | struct gpio_desc phy_reset_gpio; |
Robert Marko | 58c9873 | 2022-03-24 10:57:37 +0100 | [diff] [blame] | 289 | struct gpio_desc sfp_tx_disable_gpio; |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 290 | #endif |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 291 | }; |
| 292 | |
| 293 | /* The mvneta_tx_desc and mvneta_rx_desc structures describe the |
| 294 | * layout of the transmit and reception DMA descriptors, and their |
| 295 | * layout is therefore defined by the hardware design |
| 296 | */ |
| 297 | |
| 298 | #define MVNETA_TX_L3_OFF_SHIFT 0 |
| 299 | #define MVNETA_TX_IP_HLEN_SHIFT 8 |
| 300 | #define MVNETA_TX_L4_UDP BIT(16) |
| 301 | #define MVNETA_TX_L3_IP6 BIT(17) |
| 302 | #define MVNETA_TXD_IP_CSUM BIT(18) |
| 303 | #define MVNETA_TXD_Z_PAD BIT(19) |
| 304 | #define MVNETA_TXD_L_DESC BIT(20) |
| 305 | #define MVNETA_TXD_F_DESC BIT(21) |
| 306 | #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ |
| 307 | MVNETA_TXD_L_DESC | \ |
| 308 | MVNETA_TXD_F_DESC) |
| 309 | #define MVNETA_TX_L4_CSUM_FULL BIT(30) |
| 310 | #define MVNETA_TX_L4_CSUM_NOT BIT(31) |
| 311 | |
| 312 | #define MVNETA_RXD_ERR_CRC 0x0 |
| 313 | #define MVNETA_RXD_ERR_SUMMARY BIT(16) |
| 314 | #define MVNETA_RXD_ERR_OVERRUN BIT(17) |
| 315 | #define MVNETA_RXD_ERR_LEN BIT(18) |
| 316 | #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) |
| 317 | #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) |
| 318 | #define MVNETA_RXD_L3_IP4 BIT(25) |
| 319 | #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) |
| 320 | #define MVNETA_RXD_L4_CSUM_OK BIT(30) |
| 321 | |
| 322 | struct mvneta_tx_desc { |
| 323 | u32 command; /* Options used by HW for packet transmitting.*/ |
| 324 | u16 reserverd1; /* csum_l4 (for future use) */ |
| 325 | u16 data_size; /* Data size of transmitted packet in bytes */ |
| 326 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ |
| 327 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ |
| 328 | u32 reserved3[4]; /* Reserved - (for future use) */ |
| 329 | }; |
| 330 | |
| 331 | struct mvneta_rx_desc { |
| 332 | u32 status; /* Info about received packet */ |
| 333 | u16 reserved1; /* pnc_info - (for future use, PnC) */ |
| 334 | u16 data_size; /* Size of received packet in bytes */ |
| 335 | |
| 336 | u32 buf_phys_addr; /* Physical address of the buffer */ |
| 337 | u32 reserved2; /* pnc_flow_id (for future use, PnC) */ |
| 338 | |
| 339 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ |
| 340 | u16 reserved3; /* prefetch_cmd, for future use */ |
| 341 | u16 reserved4; /* csum_l4 - (for future use, PnC) */ |
| 342 | |
| 343 | u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ |
| 344 | u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ |
| 345 | }; |
| 346 | |
| 347 | struct mvneta_tx_queue { |
| 348 | /* Number of this TX queue, in the range 0-7 */ |
| 349 | u8 id; |
| 350 | |
| 351 | /* Number of TX DMA descriptors in the descriptor ring */ |
| 352 | int size; |
| 353 | |
| 354 | /* Index of last TX DMA descriptor that was inserted */ |
| 355 | int txq_put_index; |
| 356 | |
| 357 | /* Index of the TX DMA descriptor to be cleaned up */ |
| 358 | int txq_get_index; |
| 359 | |
| 360 | /* Virtual address of the TX DMA descriptors array */ |
| 361 | struct mvneta_tx_desc *descs; |
| 362 | |
| 363 | /* DMA address of the TX DMA descriptors array */ |
| 364 | dma_addr_t descs_phys; |
| 365 | |
| 366 | /* Index of the last TX DMA descriptor */ |
| 367 | int last_desc; |
| 368 | |
| 369 | /* Index of the next TX DMA descriptor to process */ |
| 370 | int next_desc_to_proc; |
| 371 | }; |
| 372 | |
| 373 | struct mvneta_rx_queue { |
| 374 | /* rx queue number, in the range 0-7 */ |
| 375 | u8 id; |
| 376 | |
| 377 | /* num of rx descriptors in the rx descriptor ring */ |
| 378 | int size; |
| 379 | |
| 380 | /* Virtual address of the RX DMA descriptors array */ |
| 381 | struct mvneta_rx_desc *descs; |
| 382 | |
| 383 | /* DMA address of the RX DMA descriptors array */ |
| 384 | dma_addr_t descs_phys; |
| 385 | |
| 386 | /* Index of the last RX DMA descriptor */ |
| 387 | int last_desc; |
| 388 | |
| 389 | /* Index of the next RX DMA descriptor to process */ |
| 390 | int next_desc_to_proc; |
| 391 | }; |
| 392 | |
| 393 | /* U-Boot doesn't use the queues, so set the number to 1 */ |
| 394 | static int rxq_number = 1; |
| 395 | static int txq_number = 1; |
| 396 | static int rxq_def; |
| 397 | |
| 398 | struct buffer_location { |
| 399 | struct mvneta_tx_desc *tx_descs; |
| 400 | struct mvneta_rx_desc *rx_descs; |
| 401 | u32 rx_buffers; |
| 402 | }; |
| 403 | |
| 404 | /* |
| 405 | * All 4 interfaces use the same global buffer, since only one interface |
| 406 | * can be enabled at once |
| 407 | */ |
| 408 | static struct buffer_location buffer_loc; |
| 409 | |
| 410 | /* |
| 411 | * Page table entries are set to 1MB, or multiples of 1MB |
| 412 | * (not < 1MB). driver uses less bd's so use 1MB bdspace. |
| 413 | */ |
| 414 | #define BD_SPACE (1 << 20) |
| 415 | |
| 416 | /* Utility/helper methods */ |
| 417 | |
| 418 | /* Write helper method */ |
| 419 | static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) |
| 420 | { |
| 421 | writel(data, pp->base + offset); |
| 422 | } |
| 423 | |
| 424 | /* Read helper method */ |
| 425 | static u32 mvreg_read(struct mvneta_port *pp, u32 offset) |
| 426 | { |
| 427 | return readl(pp->base + offset); |
| 428 | } |
| 429 | |
| 430 | /* Clear all MIB counters */ |
| 431 | static void mvneta_mib_counters_clear(struct mvneta_port *pp) |
| 432 | { |
| 433 | int i; |
| 434 | |
| 435 | /* Perform dummy reads from MIB counters */ |
| 436 | for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) |
| 437 | mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); |
| 438 | } |
| 439 | |
| 440 | /* Rx descriptors helper methods */ |
| 441 | |
| 442 | /* Checks whether the RX descriptor having this status is both the first |
| 443 | * and the last descriptor for the RX packet. Each RX packet is currently |
| 444 | * received through a single RX descriptor, so not having each RX |
| 445 | * descriptor with its first and last bits set is an error |
| 446 | */ |
| 447 | static int mvneta_rxq_desc_is_first_last(u32 status) |
| 448 | { |
| 449 | return (status & MVNETA_RXD_FIRST_LAST_DESC) == |
| 450 | MVNETA_RXD_FIRST_LAST_DESC; |
| 451 | } |
| 452 | |
| 453 | /* Add number of descriptors ready to receive new packets */ |
| 454 | static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, |
| 455 | struct mvneta_rx_queue *rxq, |
| 456 | int ndescs) |
| 457 | { |
| 458 | /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can |
| 459 | * be added at once |
| 460 | */ |
| 461 | while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { |
| 462 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), |
| 463 | (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << |
| 464 | MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); |
| 465 | ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; |
| 466 | } |
| 467 | |
| 468 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), |
| 469 | (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); |
| 470 | } |
| 471 | |
| 472 | /* Get number of RX descriptors occupied by received packets */ |
| 473 | static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, |
| 474 | struct mvneta_rx_queue *rxq) |
| 475 | { |
| 476 | u32 val; |
| 477 | |
| 478 | val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); |
| 479 | return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; |
| 480 | } |
| 481 | |
| 482 | /* Update num of rx desc called upon return from rx path or |
| 483 | * from mvneta_rxq_drop_pkts(). |
| 484 | */ |
| 485 | static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, |
| 486 | struct mvneta_rx_queue *rxq, |
| 487 | int rx_done, int rx_filled) |
| 488 | { |
| 489 | u32 val; |
| 490 | |
| 491 | if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { |
| 492 | val = rx_done | |
| 493 | (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); |
| 494 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); |
| 495 | return; |
| 496 | } |
| 497 | |
| 498 | /* Only 255 descriptors can be added at once */ |
| 499 | while ((rx_done > 0) || (rx_filled > 0)) { |
| 500 | if (rx_done <= 0xff) { |
| 501 | val = rx_done; |
| 502 | rx_done = 0; |
| 503 | } else { |
| 504 | val = 0xff; |
| 505 | rx_done -= 0xff; |
| 506 | } |
| 507 | if (rx_filled <= 0xff) { |
| 508 | val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; |
| 509 | rx_filled = 0; |
| 510 | } else { |
| 511 | val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; |
| 512 | rx_filled -= 0xff; |
| 513 | } |
| 514 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); |
| 515 | } |
| 516 | } |
| 517 | |
| 518 | /* Get pointer to next RX descriptor to be processed by SW */ |
| 519 | static struct mvneta_rx_desc * |
| 520 | mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) |
| 521 | { |
| 522 | int rx_desc = rxq->next_desc_to_proc; |
| 523 | |
| 524 | rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); |
| 525 | return rxq->descs + rx_desc; |
| 526 | } |
| 527 | |
| 528 | /* Tx descriptors helper methods */ |
| 529 | |
| 530 | /* Update HW with number of TX descriptors to be sent */ |
| 531 | static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, |
| 532 | struct mvneta_tx_queue *txq, |
| 533 | int pend_desc) |
| 534 | { |
| 535 | u32 val; |
| 536 | |
| 537 | /* Only 255 descriptors can be added at once ; Assume caller |
Heinrich Schuchardt | 4237696 | 2017-08-29 18:44:37 +0200 | [diff] [blame] | 538 | * process TX descriptors in quanta less than 256 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 539 | */ |
| 540 | val = pend_desc; |
| 541 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); |
| 542 | } |
| 543 | |
| 544 | /* Get pointer to next TX descriptor to be processed (send) by HW */ |
| 545 | static struct mvneta_tx_desc * |
| 546 | mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) |
| 547 | { |
| 548 | int tx_desc = txq->next_desc_to_proc; |
| 549 | |
| 550 | txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); |
| 551 | return txq->descs + tx_desc; |
| 552 | } |
| 553 | |
| 554 | /* Set rxq buf size */ |
| 555 | static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, |
| 556 | struct mvneta_rx_queue *rxq, |
| 557 | int buf_size) |
| 558 | { |
| 559 | u32 val; |
| 560 | |
| 561 | val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); |
| 562 | |
| 563 | val &= ~MVNETA_RXQ_BUF_SIZE_MASK; |
| 564 | val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); |
| 565 | |
| 566 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); |
| 567 | } |
| 568 | |
| 569 | /* Start the Ethernet port RX and TX activity */ |
| 570 | static void mvneta_port_up(struct mvneta_port *pp) |
| 571 | { |
| 572 | int queue; |
| 573 | u32 q_map; |
| 574 | |
| 575 | /* Enable all initialized TXs. */ |
| 576 | mvneta_mib_counters_clear(pp); |
| 577 | q_map = 0; |
| 578 | for (queue = 0; queue < txq_number; queue++) { |
| 579 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
| 580 | if (txq->descs != NULL) |
| 581 | q_map |= (1 << queue); |
| 582 | } |
| 583 | mvreg_write(pp, MVNETA_TXQ_CMD, q_map); |
| 584 | |
| 585 | /* Enable all initialized RXQs. */ |
| 586 | q_map = 0; |
| 587 | for (queue = 0; queue < rxq_number; queue++) { |
| 588 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
| 589 | if (rxq->descs != NULL) |
| 590 | q_map |= (1 << queue); |
| 591 | } |
| 592 | mvreg_write(pp, MVNETA_RXQ_CMD, q_map); |
| 593 | } |
| 594 | |
| 595 | /* Stop the Ethernet port activity */ |
| 596 | static void mvneta_port_down(struct mvneta_port *pp) |
| 597 | { |
| 598 | u32 val; |
| 599 | int count; |
| 600 | |
| 601 | /* Stop Rx port activity. Check port Rx activity. */ |
| 602 | val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; |
| 603 | |
| 604 | /* Issue stop command for active channels only */ |
| 605 | if (val != 0) |
| 606 | mvreg_write(pp, MVNETA_RXQ_CMD, |
| 607 | val << MVNETA_RXQ_DISABLE_SHIFT); |
| 608 | |
| 609 | /* Wait for all Rx activity to terminate. */ |
| 610 | count = 0; |
| 611 | do { |
| 612 | if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 613 | dev_warn(pp->phydev->dev, |
| 614 | "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", |
| 615 | val); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 616 | break; |
| 617 | } |
| 618 | mdelay(1); |
| 619 | |
| 620 | val = mvreg_read(pp, MVNETA_RXQ_CMD); |
| 621 | } while (val & 0xff); |
| 622 | |
| 623 | /* Stop Tx port activity. Check port Tx activity. Issue stop |
| 624 | * command for active channels only |
| 625 | */ |
| 626 | val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; |
| 627 | |
| 628 | if (val != 0) |
| 629 | mvreg_write(pp, MVNETA_TXQ_CMD, |
| 630 | (val << MVNETA_TXQ_DISABLE_SHIFT)); |
| 631 | |
| 632 | /* Wait for all Tx activity to terminate. */ |
| 633 | count = 0; |
| 634 | do { |
| 635 | if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 636 | dev_warn(pp->phydev->dev, |
| 637 | "TIMEOUT for TX stopped status=0x%08x\n", |
| 638 | val); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 639 | break; |
| 640 | } |
| 641 | mdelay(1); |
| 642 | |
| 643 | /* Check TX Command reg that all Txqs are stopped */ |
| 644 | val = mvreg_read(pp, MVNETA_TXQ_CMD); |
| 645 | |
| 646 | } while (val & 0xff); |
| 647 | |
| 648 | /* Double check to verify that TX FIFO is empty */ |
| 649 | count = 0; |
| 650 | do { |
| 651 | if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 652 | dev_warn(pp->phydev->dev, |
| 653 | "TX FIFO empty timeout status=0x08%x\n", |
| 654 | val); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 655 | break; |
| 656 | } |
| 657 | mdelay(1); |
| 658 | |
| 659 | val = mvreg_read(pp, MVNETA_PORT_STATUS); |
| 660 | } while (!(val & MVNETA_TX_FIFO_EMPTY) && |
| 661 | (val & MVNETA_TX_IN_PRGRS)); |
| 662 | |
| 663 | udelay(200); |
| 664 | } |
| 665 | |
| 666 | /* Enable the port by setting the port enable bit of the MAC control register */ |
| 667 | static void mvneta_port_enable(struct mvneta_port *pp) |
| 668 | { |
| 669 | u32 val; |
| 670 | |
| 671 | /* Enable port */ |
| 672 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); |
| 673 | val |= MVNETA_GMAC0_PORT_ENABLE; |
| 674 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); |
| 675 | } |
| 676 | |
| 677 | /* Disable the port and wait for about 200 usec before retuning */ |
| 678 | static void mvneta_port_disable(struct mvneta_port *pp) |
| 679 | { |
| 680 | u32 val; |
| 681 | |
| 682 | /* Reset the Enable bit in the Serial Control Register */ |
| 683 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); |
| 684 | val &= ~MVNETA_GMAC0_PORT_ENABLE; |
| 685 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); |
| 686 | |
| 687 | udelay(200); |
| 688 | } |
| 689 | |
| 690 | /* Multicast tables methods */ |
| 691 | |
| 692 | /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ |
| 693 | static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) |
| 694 | { |
| 695 | int offset; |
| 696 | u32 val; |
| 697 | |
| 698 | if (queue == -1) { |
| 699 | val = 0; |
| 700 | } else { |
| 701 | val = 0x1 | (queue << 1); |
| 702 | val |= (val << 24) | (val << 16) | (val << 8); |
| 703 | } |
| 704 | |
| 705 | for (offset = 0; offset <= 0xc; offset += 4) |
| 706 | mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); |
| 707 | } |
| 708 | |
| 709 | /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ |
| 710 | static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) |
| 711 | { |
| 712 | int offset; |
| 713 | u32 val; |
| 714 | |
| 715 | if (queue == -1) { |
| 716 | val = 0; |
| 717 | } else { |
| 718 | val = 0x1 | (queue << 1); |
| 719 | val |= (val << 24) | (val << 16) | (val << 8); |
| 720 | } |
| 721 | |
| 722 | for (offset = 0; offset <= 0xfc; offset += 4) |
| 723 | mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); |
| 724 | } |
| 725 | |
| 726 | /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ |
| 727 | static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) |
| 728 | { |
| 729 | int offset; |
| 730 | u32 val; |
| 731 | |
| 732 | if (queue == -1) { |
| 733 | memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); |
| 734 | val = 0; |
| 735 | } else { |
| 736 | memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); |
| 737 | val = 0x1 | (queue << 1); |
| 738 | val |= (val << 24) | (val << 16) | (val << 8); |
| 739 | } |
| 740 | |
| 741 | for (offset = 0; offset <= 0xfc; offset += 4) |
| 742 | mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); |
| 743 | } |
| 744 | |
| 745 | /* This method sets defaults to the NETA port: |
| 746 | * Clears interrupt Cause and Mask registers. |
| 747 | * Clears all MAC tables. |
| 748 | * Sets defaults to all registers. |
| 749 | * Resets RX and TX descriptor rings. |
| 750 | * Resets PHY. |
| 751 | * This method can be called after mvneta_port_down() to return the port |
| 752 | * settings to defaults. |
| 753 | */ |
| 754 | static void mvneta_defaults_set(struct mvneta_port *pp) |
| 755 | { |
| 756 | int cpu; |
| 757 | int queue; |
| 758 | u32 val; |
| 759 | |
| 760 | /* Clear all Cause registers */ |
| 761 | mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); |
| 762 | mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); |
| 763 | mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); |
| 764 | |
| 765 | /* Mask all interrupts */ |
| 766 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); |
| 767 | mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); |
| 768 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); |
| 769 | mvreg_write(pp, MVNETA_INTR_ENABLE, 0); |
| 770 | |
| 771 | /* Enable MBUS Retry bit16 */ |
| 772 | mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); |
| 773 | |
| 774 | /* Set CPU queue access map - all CPUs have access to all RX |
| 775 | * queues and to all TX queues |
| 776 | */ |
| 777 | for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) |
| 778 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), |
| 779 | (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | |
| 780 | MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); |
| 781 | |
| 782 | /* Reset RX and TX DMAs */ |
| 783 | mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); |
| 784 | mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); |
| 785 | |
| 786 | /* Disable Legacy WRR, Disable EJP, Release from reset */ |
| 787 | mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); |
| 788 | for (queue = 0; queue < txq_number; queue++) { |
| 789 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); |
| 790 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); |
| 791 | } |
| 792 | |
| 793 | mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); |
| 794 | mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); |
| 795 | |
| 796 | /* Set Port Acceleration Mode */ |
| 797 | val = MVNETA_ACC_MODE_EXT; |
| 798 | mvreg_write(pp, MVNETA_ACC_MODE, val); |
| 799 | |
| 800 | /* Update val of portCfg register accordingly with all RxQueue types */ |
| 801 | val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); |
| 802 | mvreg_write(pp, MVNETA_PORT_CONFIG, val); |
| 803 | |
| 804 | val = 0; |
| 805 | mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); |
| 806 | mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); |
| 807 | |
| 808 | /* Build PORT_SDMA_CONFIG_REG */ |
| 809 | val = 0; |
| 810 | |
| 811 | /* Default burst size */ |
| 812 | val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); |
| 813 | val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); |
| 814 | val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; |
| 815 | |
| 816 | /* Assign port SDMA configuration */ |
| 817 | mvreg_write(pp, MVNETA_SDMA_CONFIG, val); |
| 818 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 819 | /* Enable PHY polling in hardware if not in fixed-link mode */ |
Marek Behún | e942d8e | 2022-04-27 12:41:47 +0200 | [diff] [blame] | 820 | if (!pp->fixed_link) { |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 821 | val = mvreg_read(pp, MVNETA_UNIT_CONTROL); |
| 822 | val |= MVNETA_PHY_POLLING_ENABLE; |
| 823 | mvreg_write(pp, MVNETA_UNIT_CONTROL, val); |
| 824 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 825 | |
| 826 | mvneta_set_ucast_table(pp, -1); |
| 827 | mvneta_set_special_mcast_table(pp, -1); |
| 828 | mvneta_set_other_mcast_table(pp, -1); |
| 829 | } |
| 830 | |
| 831 | /* Set unicast address */ |
| 832 | static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, |
| 833 | int queue) |
| 834 | { |
| 835 | unsigned int unicast_reg; |
| 836 | unsigned int tbl_offset; |
| 837 | unsigned int reg_offset; |
| 838 | |
| 839 | /* Locate the Unicast table entry */ |
| 840 | last_nibble = (0xf & last_nibble); |
| 841 | |
| 842 | /* offset from unicast tbl base */ |
| 843 | tbl_offset = (last_nibble / 4) * 4; |
| 844 | |
| 845 | /* offset within the above reg */ |
| 846 | reg_offset = last_nibble % 4; |
| 847 | |
| 848 | unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); |
| 849 | |
| 850 | if (queue == -1) { |
| 851 | /* Clear accepts frame bit at specified unicast DA tbl entry */ |
| 852 | unicast_reg &= ~(0xff << (8 * reg_offset)); |
| 853 | } else { |
| 854 | unicast_reg &= ~(0xff << (8 * reg_offset)); |
| 855 | unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); |
| 856 | } |
| 857 | |
| 858 | mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); |
| 859 | } |
| 860 | |
| 861 | /* Set mac address */ |
| 862 | static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, |
| 863 | int queue) |
| 864 | { |
| 865 | unsigned int mac_h; |
| 866 | unsigned int mac_l; |
| 867 | |
| 868 | if (queue != -1) { |
| 869 | mac_l = (addr[4] << 8) | (addr[5]); |
| 870 | mac_h = (addr[0] << 24) | (addr[1] << 16) | |
| 871 | (addr[2] << 8) | (addr[3] << 0); |
| 872 | |
| 873 | mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); |
| 874 | mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); |
| 875 | } |
| 876 | |
| 877 | /* Accept frames of this address */ |
| 878 | mvneta_set_ucast_addr(pp, addr[5], queue); |
| 879 | } |
| 880 | |
Matt Pelland | 668a5f2 | 2018-03-27 13:18:25 -0400 | [diff] [blame] | 881 | static int mvneta_write_hwaddr(struct udevice *dev) |
| 882 | { |
| 883 | mvneta_mac_addr_set(dev_get_priv(dev), |
Simon Glass | fa20e93 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 884 | ((struct eth_pdata *)dev_get_plat(dev))->enetaddr, |
Matt Pelland | 668a5f2 | 2018-03-27 13:18:25 -0400 | [diff] [blame] | 885 | rxq_def); |
| 886 | |
| 887 | return 0; |
| 888 | } |
| 889 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 890 | /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ |
| 891 | static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, |
| 892 | u32 phys_addr, u32 cookie) |
| 893 | { |
| 894 | rx_desc->buf_cookie = cookie; |
| 895 | rx_desc->buf_phys_addr = phys_addr; |
| 896 | } |
| 897 | |
| 898 | /* Decrement sent descriptors counter */ |
| 899 | static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, |
| 900 | struct mvneta_tx_queue *txq, |
| 901 | int sent_desc) |
| 902 | { |
| 903 | u32 val; |
| 904 | |
| 905 | /* Only 255 TX descriptors can be updated at once */ |
| 906 | while (sent_desc > 0xff) { |
| 907 | val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; |
| 908 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); |
| 909 | sent_desc = sent_desc - 0xff; |
| 910 | } |
| 911 | |
| 912 | val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; |
| 913 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); |
| 914 | } |
| 915 | |
| 916 | /* Get number of TX descriptors already sent by HW */ |
| 917 | static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, |
| 918 | struct mvneta_tx_queue *txq) |
| 919 | { |
| 920 | u32 val; |
| 921 | int sent_desc; |
| 922 | |
| 923 | val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); |
| 924 | sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> |
| 925 | MVNETA_TXQ_SENT_DESC_SHIFT; |
| 926 | |
| 927 | return sent_desc; |
| 928 | } |
| 929 | |
| 930 | /* Display more error info */ |
| 931 | static void mvneta_rx_error(struct mvneta_port *pp, |
| 932 | struct mvneta_rx_desc *rx_desc) |
| 933 | { |
| 934 | u32 status = rx_desc->status; |
| 935 | |
| 936 | if (!mvneta_rxq_desc_is_first_last(status)) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 937 | dev_err(pp->phydev->dev, |
| 938 | "bad rx status %08x (buffer oversize), size=%d\n", |
| 939 | status, rx_desc->data_size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 940 | return; |
| 941 | } |
| 942 | |
| 943 | switch (status & MVNETA_RXD_ERR_CODE_MASK) { |
| 944 | case MVNETA_RXD_ERR_CRC: |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 945 | dev_err(pp->phydev->dev, |
| 946 | "bad rx status %08x (crc error), size=%d\n", status, |
| 947 | rx_desc->data_size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 948 | break; |
| 949 | case MVNETA_RXD_ERR_OVERRUN: |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 950 | dev_err(pp->phydev->dev, |
| 951 | "bad rx status %08x (overrun error), size=%d\n", status, |
| 952 | rx_desc->data_size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 953 | break; |
| 954 | case MVNETA_RXD_ERR_LEN: |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 955 | dev_err(pp->phydev->dev, |
| 956 | "bad rx status %08x (max frame length error), size=%d\n", |
| 957 | status, rx_desc->data_size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 958 | break; |
| 959 | case MVNETA_RXD_ERR_RESOURCE: |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 960 | dev_err(pp->phydev->dev, |
| 961 | "bad rx status %08x (resource error), size=%d\n", |
| 962 | status, rx_desc->data_size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 963 | break; |
| 964 | } |
| 965 | } |
| 966 | |
| 967 | static struct mvneta_rx_queue *mvneta_rxq_handle_get(struct mvneta_port *pp, |
| 968 | int rxq) |
| 969 | { |
| 970 | return &pp->rxqs[rxq]; |
| 971 | } |
| 972 | |
| 973 | |
| 974 | /* Drop packets received by the RXQ and free buffers */ |
| 975 | static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, |
| 976 | struct mvneta_rx_queue *rxq) |
| 977 | { |
| 978 | int rx_done; |
| 979 | |
| 980 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); |
| 981 | if (rx_done) |
| 982 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); |
| 983 | } |
| 984 | |
| 985 | /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ |
| 986 | static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, |
| 987 | int num) |
| 988 | { |
| 989 | int i; |
| 990 | |
| 991 | for (i = 0; i < num; i++) { |
| 992 | u32 addr; |
| 993 | |
| 994 | /* U-Boot special: Fill in the rx buffer addresses */ |
| 995 | addr = buffer_loc.rx_buffers + (i * RX_BUFFER_SIZE); |
| 996 | mvneta_rx_desc_fill(rxq->descs + i, addr, addr); |
| 997 | } |
| 998 | |
| 999 | /* Add this number of RX descriptors as non occupied (ready to |
| 1000 | * get packets) |
| 1001 | */ |
| 1002 | mvneta_rxq_non_occup_desc_add(pp, rxq, i); |
| 1003 | |
| 1004 | return 0; |
| 1005 | } |
| 1006 | |
| 1007 | /* Rx/Tx queue initialization/cleanup methods */ |
| 1008 | |
| 1009 | /* Create a specified RX queue */ |
| 1010 | static int mvneta_rxq_init(struct mvneta_port *pp, |
| 1011 | struct mvneta_rx_queue *rxq) |
| 1012 | |
| 1013 | { |
| 1014 | rxq->size = pp->rx_ring_size; |
| 1015 | |
| 1016 | /* Allocate memory for RX descriptors */ |
| 1017 | rxq->descs_phys = (dma_addr_t)rxq->descs; |
| 1018 | if (rxq->descs == NULL) |
| 1019 | return -ENOMEM; |
| 1020 | |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1021 | WARN_ON(rxq->descs != PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN)); |
| 1022 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1023 | rxq->last_desc = rxq->size - 1; |
| 1024 | |
| 1025 | /* Set Rx descriptors queue starting address */ |
| 1026 | mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); |
| 1027 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); |
| 1028 | |
| 1029 | /* Fill RXQ with buffers from RX pool */ |
| 1030 | mvneta_rxq_buf_size_set(pp, rxq, RX_BUFFER_SIZE); |
| 1031 | mvneta_rxq_fill(pp, rxq, rxq->size); |
| 1032 | |
| 1033 | return 0; |
| 1034 | } |
| 1035 | |
| 1036 | /* Cleanup Rx queue */ |
| 1037 | static void mvneta_rxq_deinit(struct mvneta_port *pp, |
| 1038 | struct mvneta_rx_queue *rxq) |
| 1039 | { |
| 1040 | mvneta_rxq_drop_pkts(pp, rxq); |
| 1041 | |
| 1042 | rxq->descs = NULL; |
| 1043 | rxq->last_desc = 0; |
| 1044 | rxq->next_desc_to_proc = 0; |
| 1045 | rxq->descs_phys = 0; |
| 1046 | } |
| 1047 | |
| 1048 | /* Create and initialize a tx queue */ |
| 1049 | static int mvneta_txq_init(struct mvneta_port *pp, |
| 1050 | struct mvneta_tx_queue *txq) |
| 1051 | { |
| 1052 | txq->size = pp->tx_ring_size; |
| 1053 | |
| 1054 | /* Allocate memory for TX descriptors */ |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1055 | txq->descs_phys = (dma_addr_t)txq->descs; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1056 | if (txq->descs == NULL) |
| 1057 | return -ENOMEM; |
| 1058 | |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1059 | WARN_ON(txq->descs != PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN)); |
| 1060 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1061 | txq->last_desc = txq->size - 1; |
| 1062 | |
| 1063 | /* Set maximum bandwidth for enabled TXQs */ |
| 1064 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); |
| 1065 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); |
| 1066 | |
| 1067 | /* Set Tx descriptors queue starting address */ |
| 1068 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); |
| 1069 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); |
| 1070 | |
| 1071 | return 0; |
| 1072 | } |
| 1073 | |
| 1074 | /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ |
| 1075 | static void mvneta_txq_deinit(struct mvneta_port *pp, |
| 1076 | struct mvneta_tx_queue *txq) |
| 1077 | { |
| 1078 | txq->descs = NULL; |
| 1079 | txq->last_desc = 0; |
| 1080 | txq->next_desc_to_proc = 0; |
| 1081 | txq->descs_phys = 0; |
| 1082 | |
| 1083 | /* Set minimum bandwidth for disabled TXQs */ |
| 1084 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); |
| 1085 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); |
| 1086 | |
| 1087 | /* Set Tx descriptors queue starting address and size */ |
| 1088 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); |
| 1089 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); |
| 1090 | } |
| 1091 | |
| 1092 | /* Cleanup all Tx queues */ |
| 1093 | static void mvneta_cleanup_txqs(struct mvneta_port *pp) |
| 1094 | { |
| 1095 | int queue; |
| 1096 | |
| 1097 | for (queue = 0; queue < txq_number; queue++) |
| 1098 | mvneta_txq_deinit(pp, &pp->txqs[queue]); |
| 1099 | } |
| 1100 | |
| 1101 | /* Cleanup all Rx queues */ |
| 1102 | static void mvneta_cleanup_rxqs(struct mvneta_port *pp) |
| 1103 | { |
| 1104 | int queue; |
| 1105 | |
| 1106 | for (queue = 0; queue < rxq_number; queue++) |
| 1107 | mvneta_rxq_deinit(pp, &pp->rxqs[queue]); |
| 1108 | } |
| 1109 | |
| 1110 | |
| 1111 | /* Init all Rx queues */ |
| 1112 | static int mvneta_setup_rxqs(struct mvneta_port *pp) |
| 1113 | { |
| 1114 | int queue; |
| 1115 | |
| 1116 | for (queue = 0; queue < rxq_number; queue++) { |
| 1117 | int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); |
| 1118 | if (err) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 1119 | dev_err(pp->phydev->dev, "%s: can't create rxq=%d\n", |
| 1120 | __func__, queue); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1121 | mvneta_cleanup_rxqs(pp); |
| 1122 | return err; |
| 1123 | } |
| 1124 | } |
| 1125 | |
| 1126 | return 0; |
| 1127 | } |
| 1128 | |
| 1129 | /* Init all tx queues */ |
| 1130 | static int mvneta_setup_txqs(struct mvneta_port *pp) |
| 1131 | { |
| 1132 | int queue; |
| 1133 | |
| 1134 | for (queue = 0; queue < txq_number; queue++) { |
| 1135 | int err = mvneta_txq_init(pp, &pp->txqs[queue]); |
| 1136 | if (err) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 1137 | dev_err(pp->phydev->dev, "%s: can't create txq=%d\n", |
| 1138 | __func__, queue); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1139 | mvneta_cleanup_txqs(pp); |
| 1140 | return err; |
| 1141 | } |
| 1142 | } |
| 1143 | |
| 1144 | return 0; |
| 1145 | } |
| 1146 | |
| 1147 | static void mvneta_start_dev(struct mvneta_port *pp) |
| 1148 | { |
| 1149 | /* start the Rx/Tx activity */ |
| 1150 | mvneta_port_enable(pp); |
| 1151 | } |
| 1152 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1153 | static void mvneta_adjust_link(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1154 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1155 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1156 | struct phy_device *phydev = pp->phydev; |
| 1157 | int status_change = 0; |
| 1158 | |
Marek Behún | e942d8e | 2022-04-27 12:41:47 +0200 | [diff] [blame] | 1159 | if (pp->fixed_link) { |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1160 | debug("Using fixed link, skip link adjust\n"); |
| 1161 | return; |
| 1162 | } |
| 1163 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1164 | if (phydev->link) { |
| 1165 | if ((pp->speed != phydev->speed) || |
| 1166 | (pp->duplex != phydev->duplex)) { |
| 1167 | u32 val; |
| 1168 | |
| 1169 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
| 1170 | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | |
| 1171 | MVNETA_GMAC_CONFIG_GMII_SPEED | |
| 1172 | MVNETA_GMAC_CONFIG_FULL_DUPLEX | |
| 1173 | MVNETA_GMAC_AN_SPEED_EN | |
| 1174 | MVNETA_GMAC_AN_DUPLEX_EN); |
| 1175 | |
| 1176 | if (phydev->duplex) |
| 1177 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; |
| 1178 | |
| 1179 | if (phydev->speed == SPEED_1000) |
| 1180 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; |
Marek Behún | 0250f26 | 2022-04-27 12:41:45 +0200 | [diff] [blame] | 1181 | else if (pp->speed == SPEED_100) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1182 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; |
| 1183 | |
| 1184 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); |
| 1185 | |
| 1186 | pp->duplex = phydev->duplex; |
| 1187 | pp->speed = phydev->speed; |
| 1188 | } |
| 1189 | } |
| 1190 | |
| 1191 | if (phydev->link != pp->link) { |
| 1192 | if (!phydev->link) { |
| 1193 | pp->duplex = -1; |
| 1194 | pp->speed = 0; |
| 1195 | } |
| 1196 | |
| 1197 | pp->link = phydev->link; |
| 1198 | status_change = 1; |
| 1199 | } |
| 1200 | |
| 1201 | if (status_change) { |
| 1202 | if (phydev->link) { |
| 1203 | u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
| 1204 | val |= (MVNETA_GMAC_FORCE_LINK_PASS | |
| 1205 | MVNETA_GMAC_FORCE_LINK_DOWN); |
| 1206 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); |
| 1207 | mvneta_port_up(pp); |
| 1208 | } else { |
| 1209 | mvneta_port_down(pp); |
| 1210 | } |
| 1211 | } |
| 1212 | } |
| 1213 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1214 | static int mvneta_open(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1215 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1216 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1217 | int ret; |
| 1218 | |
| 1219 | ret = mvneta_setup_rxqs(pp); |
| 1220 | if (ret) |
| 1221 | return ret; |
| 1222 | |
| 1223 | ret = mvneta_setup_txqs(pp); |
| 1224 | if (ret) |
| 1225 | return ret; |
| 1226 | |
| 1227 | mvneta_adjust_link(dev); |
| 1228 | |
| 1229 | mvneta_start_dev(pp); |
| 1230 | |
| 1231 | return 0; |
| 1232 | } |
| 1233 | |
| 1234 | /* Initialize hw */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1235 | static int mvneta_init2(struct mvneta_port *pp) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1236 | { |
| 1237 | int queue; |
| 1238 | |
| 1239 | /* Disable port */ |
| 1240 | mvneta_port_disable(pp); |
| 1241 | |
| 1242 | /* Set port default values */ |
| 1243 | mvneta_defaults_set(pp); |
| 1244 | |
| 1245 | pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), |
| 1246 | GFP_KERNEL); |
| 1247 | if (!pp->txqs) |
| 1248 | return -ENOMEM; |
| 1249 | |
| 1250 | /* U-Boot special: use preallocated area */ |
| 1251 | pp->txqs[0].descs = buffer_loc.tx_descs; |
| 1252 | |
| 1253 | /* Initialize TX descriptor rings */ |
| 1254 | for (queue = 0; queue < txq_number; queue++) { |
| 1255 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
| 1256 | txq->id = queue; |
| 1257 | txq->size = pp->tx_ring_size; |
| 1258 | } |
| 1259 | |
| 1260 | pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), |
| 1261 | GFP_KERNEL); |
| 1262 | if (!pp->rxqs) { |
| 1263 | kfree(pp->txqs); |
| 1264 | return -ENOMEM; |
| 1265 | } |
| 1266 | |
| 1267 | /* U-Boot special: use preallocated area */ |
| 1268 | pp->rxqs[0].descs = buffer_loc.rx_descs; |
| 1269 | |
| 1270 | /* Create Rx descriptor rings */ |
| 1271 | for (queue = 0; queue < rxq_number; queue++) { |
| 1272 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
| 1273 | rxq->id = queue; |
| 1274 | rxq->size = pp->rx_ring_size; |
| 1275 | } |
| 1276 | |
| 1277 | return 0; |
| 1278 | } |
| 1279 | |
| 1280 | /* platform glue : initialize decoding windows */ |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 1281 | |
| 1282 | /* |
| 1283 | * Not like A380, in Armada3700, there are two layers of decode windows for GBE: |
| 1284 | * First layer is: GbE Address window that resides inside the GBE unit, |
| 1285 | * Second layer is: Fabric address window which is located in the NIC400 |
| 1286 | * (South Fabric). |
| 1287 | * To simplify the address decode configuration for Armada3700, we bypass the |
| 1288 | * first layer of GBE decode window by setting the first window to 4GB. |
| 1289 | */ |
| 1290 | static void mvneta_bypass_mbus_windows(struct mvneta_port *pp) |
| 1291 | { |
| 1292 | /* |
| 1293 | * Set window size to 4GB, to bypass GBE address decode, leave the |
| 1294 | * work to MBUS decode window |
| 1295 | */ |
| 1296 | mvreg_write(pp, MVNETA_WIN_SIZE(0), MVNETA_WIN_SIZE_MASK); |
| 1297 | |
| 1298 | /* Enable GBE address decode window 0 by set bit 0 to 0 */ |
| 1299 | clrbits_le32(pp->base + MVNETA_BASE_ADDR_ENABLE, |
| 1300 | MVNETA_BASE_ADDR_ENABLE_BIT); |
| 1301 | |
| 1302 | /* Set GBE address decode window 0 to full Access (read or write) */ |
| 1303 | setbits_le32(pp->base + MVNETA_PORT_ACCESS_PROTECT, |
| 1304 | MVNETA_PORT_ACCESS_PROTECT_WIN0_RW); |
| 1305 | } |
| 1306 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1307 | static void mvneta_conf_mbus_windows(struct mvneta_port *pp) |
| 1308 | { |
| 1309 | const struct mbus_dram_target_info *dram; |
| 1310 | u32 win_enable; |
| 1311 | u32 win_protect; |
| 1312 | int i; |
| 1313 | |
| 1314 | dram = mvebu_mbus_dram_info(); |
| 1315 | for (i = 0; i < 6; i++) { |
| 1316 | mvreg_write(pp, MVNETA_WIN_BASE(i), 0); |
| 1317 | mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); |
| 1318 | |
| 1319 | if (i < 4) |
| 1320 | mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); |
| 1321 | } |
| 1322 | |
| 1323 | win_enable = 0x3f; |
| 1324 | win_protect = 0; |
| 1325 | |
| 1326 | for (i = 0; i < dram->num_cs; i++) { |
| 1327 | const struct mbus_dram_window *cs = dram->cs + i; |
| 1328 | mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | |
| 1329 | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); |
| 1330 | |
| 1331 | mvreg_write(pp, MVNETA_WIN_SIZE(i), |
| 1332 | (cs->size - 1) & 0xffff0000); |
| 1333 | |
| 1334 | win_enable &= ~(1 << i); |
| 1335 | win_protect |= 3 << (2 * i); |
| 1336 | } |
| 1337 | |
| 1338 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); |
| 1339 | } |
| 1340 | |
| 1341 | /* Power up the port */ |
| 1342 | static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) |
| 1343 | { |
| 1344 | u32 ctrl; |
| 1345 | |
| 1346 | /* MAC Cause register should be cleared */ |
| 1347 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); |
| 1348 | |
| 1349 | ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); |
| 1350 | |
| 1351 | /* Even though it might look weird, when we're configured in |
| 1352 | * SGMII or QSGMII mode, the RGMII bit needs to be set. |
| 1353 | */ |
| 1354 | switch (phy_mode) { |
| 1355 | case PHY_INTERFACE_MODE_QSGMII: |
| 1356 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); |
| 1357 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; |
| 1358 | break; |
| 1359 | case PHY_INTERFACE_MODE_SGMII: |
| 1360 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); |
| 1361 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; |
| 1362 | break; |
| 1363 | case PHY_INTERFACE_MODE_RGMII: |
| 1364 | case PHY_INTERFACE_MODE_RGMII_ID: |
| 1365 | ctrl |= MVNETA_GMAC2_PORT_RGMII; |
| 1366 | break; |
| 1367 | default: |
| 1368 | return -EINVAL; |
| 1369 | } |
| 1370 | |
| 1371 | /* Cancel Port Reset */ |
| 1372 | ctrl &= ~MVNETA_GMAC2_PORT_RESET; |
| 1373 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); |
| 1374 | |
| 1375 | while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & |
| 1376 | MVNETA_GMAC2_PORT_RESET) != 0) |
| 1377 | continue; |
| 1378 | |
| 1379 | return 0; |
| 1380 | } |
| 1381 | |
| 1382 | /* Device initialization routine */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1383 | static int mvneta_init(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1384 | { |
Simon Glass | fa20e93 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 1385 | struct eth_pdata *pdata = dev_get_plat(dev); |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1386 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1387 | int err; |
| 1388 | |
| 1389 | pp->tx_ring_size = MVNETA_MAX_TXD; |
| 1390 | pp->rx_ring_size = MVNETA_MAX_RXD; |
| 1391 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1392 | err = mvneta_init2(pp); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1393 | if (err < 0) { |
Sean Anderson | e0d0004 | 2020-09-15 10:44:54 -0400 | [diff] [blame] | 1394 | dev_err(dev, "can't init eth hal\n"); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1395 | return err; |
| 1396 | } |
| 1397 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1398 | mvneta_mac_addr_set(pp, pdata->enetaddr, rxq_def); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1399 | |
| 1400 | err = mvneta_port_power_up(pp, pp->phy_interface); |
| 1401 | if (err < 0) { |
Sean Anderson | e0d0004 | 2020-09-15 10:44:54 -0400 | [diff] [blame] | 1402 | dev_err(dev, "can't power up port\n"); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1403 | return err; |
| 1404 | } |
| 1405 | |
| 1406 | /* Call open() now as it needs to be done before runing send() */ |
| 1407 | mvneta_open(dev); |
| 1408 | |
| 1409 | return 0; |
| 1410 | } |
| 1411 | |
| 1412 | /* U-Boot only functions follow here */ |
| 1413 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1414 | static int mvneta_start(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1415 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1416 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1417 | struct phy_device *phydev; |
| 1418 | |
| 1419 | mvneta_port_power_up(pp, pp->phy_interface); |
| 1420 | |
| 1421 | if (!pp->init || pp->link == 0) { |
Marek Behún | e942d8e | 2022-04-27 12:41:47 +0200 | [diff] [blame] | 1422 | if (pp->fixed_link) { |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1423 | u32 val; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1424 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1425 | pp->init = 1; |
| 1426 | pp->link = 1; |
| 1427 | mvneta_init(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1428 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1429 | val = MVNETA_GMAC_FORCE_LINK_UP | |
| 1430 | MVNETA_GMAC_IB_BYPASS_AN_EN | |
| 1431 | MVNETA_GMAC_SET_FC_EN | |
| 1432 | MVNETA_GMAC_ADVERT_FC_EN | |
| 1433 | MVNETA_GMAC_SAMPLE_TX_CFG_EN; |
| 1434 | |
| 1435 | if (pp->duplex) |
| 1436 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; |
| 1437 | |
| 1438 | if (pp->speed == SPEED_1000) |
| 1439 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; |
| 1440 | else if (pp->speed == SPEED_100) |
| 1441 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; |
| 1442 | |
| 1443 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); |
| 1444 | } else { |
Marek Behún | 99e296f | 2022-04-27 12:41:46 +0200 | [diff] [blame] | 1445 | phydev = dm_eth_phy_connect(dev); |
Marek Behún | 075ccb1 | 2018-04-24 17:21:29 +0200 | [diff] [blame] | 1446 | if (!phydev) { |
Marek Behún | 99e296f | 2022-04-27 12:41:46 +0200 | [diff] [blame] | 1447 | printf("dm_eth_phy_connect failed\n"); |
Marek Behún | 075ccb1 | 2018-04-24 17:21:29 +0200 | [diff] [blame] | 1448 | return -ENODEV; |
| 1449 | } |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1450 | |
Marek Behún | 99e296f | 2022-04-27 12:41:46 +0200 | [diff] [blame] | 1451 | /* Set PHY address in case we will enable HW polling */ |
| 1452 | mvreg_write(pp, MVNETA_PHY_ADDR, phydev->addr); |
| 1453 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1454 | pp->phydev = phydev; |
| 1455 | phy_config(phydev); |
| 1456 | phy_startup(phydev); |
| 1457 | if (!phydev->link) { |
| 1458 | printf("%s: No link.\n", phydev->dev->name); |
| 1459 | return -1; |
| 1460 | } |
| 1461 | |
| 1462 | /* Full init on first call */ |
| 1463 | mvneta_init(dev); |
| 1464 | pp->init = 1; |
| 1465 | return 0; |
| 1466 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1467 | } |
| 1468 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1469 | /* Upon all following calls, this is enough */ |
| 1470 | mvneta_port_up(pp); |
| 1471 | mvneta_port_enable(pp); |
| 1472 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1473 | return 0; |
| 1474 | } |
| 1475 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1476 | static int mvneta_send(struct udevice *dev, void *packet, int length) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1477 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1478 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1479 | struct mvneta_tx_queue *txq = &pp->txqs[0]; |
| 1480 | struct mvneta_tx_desc *tx_desc; |
| 1481 | int sent_desc; |
| 1482 | u32 timeout = 0; |
| 1483 | |
| 1484 | /* Get a descriptor for the first part of the packet */ |
| 1485 | tx_desc = mvneta_txq_next_desc_get(txq); |
| 1486 | |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1487 | tx_desc->buf_phys_addr = (u32)(uintptr_t)packet; |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1488 | tx_desc->data_size = length; |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1489 | flush_dcache_range((ulong)packet, |
| 1490 | (ulong)packet + ALIGN(length, PKTALIGN)); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1491 | |
| 1492 | /* First and Last descriptor */ |
| 1493 | tx_desc->command = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC; |
| 1494 | mvneta_txq_pend_desc_add(pp, txq, 1); |
| 1495 | |
| 1496 | /* Wait for packet to be sent (queue might help with speed here) */ |
| 1497 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); |
| 1498 | while (!sent_desc) { |
| 1499 | if (timeout++ > 10000) { |
| 1500 | printf("timeout: packet not sent\n"); |
| 1501 | return -1; |
| 1502 | } |
| 1503 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); |
| 1504 | } |
| 1505 | |
| 1506 | /* txDone has increased - hw sent packet */ |
| 1507 | mvneta_txq_sent_desc_dec(pp, txq, sent_desc); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1508 | |
| 1509 | return 0; |
| 1510 | } |
| 1511 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1512 | static int mvneta_recv(struct udevice *dev, int flags, uchar **packetp) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1513 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1514 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1515 | int rx_done; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1516 | struct mvneta_rx_queue *rxq; |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1517 | int rx_bytes = 0; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1518 | |
| 1519 | /* get rx queue */ |
| 1520 | rxq = mvneta_rxq_handle_get(pp, rxq_def); |
| 1521 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1522 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1523 | if (rx_done) { |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1524 | struct mvneta_rx_desc *rx_desc; |
| 1525 | unsigned char *data; |
| 1526 | u32 rx_status; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1527 | |
| 1528 | /* |
| 1529 | * No cache invalidation needed here, since the desc's are |
| 1530 | * located in a uncached memory region |
| 1531 | */ |
| 1532 | rx_desc = mvneta_rxq_next_desc_get(rxq); |
| 1533 | |
| 1534 | rx_status = rx_desc->status; |
| 1535 | if (!mvneta_rxq_desc_is_first_last(rx_status) || |
| 1536 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { |
| 1537 | mvneta_rx_error(pp, rx_desc); |
| 1538 | /* leave the descriptor untouched */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1539 | return -EIO; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1540 | } |
| 1541 | |
| 1542 | /* 2 bytes for marvell header. 4 bytes for crc */ |
| 1543 | rx_bytes = rx_desc->data_size - 6; |
| 1544 | |
| 1545 | /* give packet to stack - skip on first 2 bytes */ |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1546 | data = (u8 *)(uintptr_t)rx_desc->buf_cookie + 2; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1547 | /* |
| 1548 | * No cache invalidation needed here, since the rx_buffer's are |
| 1549 | * located in a uncached memory region |
| 1550 | */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1551 | *packetp = data; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1552 | |
Jason Brown | c7bc183 | 2017-11-28 11:12:43 -0800 | [diff] [blame] | 1553 | /* |
| 1554 | * Only mark one descriptor as free |
| 1555 | * since only one was processed |
| 1556 | */ |
| 1557 | mvneta_rxq_desc_num_update(pp, rxq, 1, 1); |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1558 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1559 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1560 | return rx_bytes; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1561 | } |
| 1562 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1563 | static int mvneta_probe(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1564 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1565 | struct mvneta_port *pp = dev_get_priv(dev); |
Robert Marko | 58c9873 | 2022-03-24 10:57:37 +0100 | [diff] [blame] | 1566 | #if CONFIG_IS_ENABLED(DM_GPIO) |
| 1567 | struct ofnode_phandle_args sfp_args; |
| 1568 | #endif |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1569 | void *blob = (void *)gd->fdt_blob; |
Simon Glass | dd79d6e | 2017-01-17 16:52:55 -0700 | [diff] [blame] | 1570 | int node = dev_of_offset(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1571 | void *bd_space; |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1572 | int fl_node; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1573 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1574 | /* |
| 1575 | * Allocate buffer area for descs and rx_buffers. This is only |
| 1576 | * done once for all interfaces. As only one interface can |
Chris Packham | 0f81d7a | 2016-08-29 20:54:02 +1200 | [diff] [blame] | 1577 | * be active. Make this area DMA safe by disabling the D-cache |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1578 | */ |
| 1579 | if (!buffer_loc.tx_descs) { |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1580 | u32 size; |
| 1581 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1582 | /* Align buffer area for descs and rx_buffers to 1MiB */ |
| 1583 | bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); |
Rabeeh Khoury | 31ad3ce | 2018-06-19 21:36:50 +0300 | [diff] [blame] | 1584 | flush_dcache_range((ulong)bd_space, (ulong)bd_space + BD_SPACE); |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1585 | mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE, |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1586 | DCACHE_OFF); |
| 1587 | buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space; |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1588 | size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc), |
| 1589 | ARCH_DMA_MINALIGN); |
Rabeeh Khoury | f046bed | 2018-06-19 21:36:51 +0300 | [diff] [blame] | 1590 | memset(buffer_loc.tx_descs, 0, size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1591 | buffer_loc.rx_descs = (struct mvneta_rx_desc *) |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1592 | ((phys_addr_t)bd_space + size); |
| 1593 | size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc), |
| 1594 | ARCH_DMA_MINALIGN); |
| 1595 | buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1596 | } |
| 1597 | |
Marek Behún | 645c744 | 2022-04-27 12:41:44 +0200 | [diff] [blame] | 1598 | pp->base = dev_read_addr_ptr(dev); |
| 1599 | pp->phy_interface = dev_read_phy_mode(dev); |
| 1600 | if (pp->phy_interface == PHY_INTERFACE_MODE_NA) |
| 1601 | return -EINVAL; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1602 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1603 | /* Configure MBUS address windows */ |
Simon Glass | 54cbcc8 | 2017-05-18 20:08:57 -0600 | [diff] [blame] | 1604 | if (device_is_compatible(dev, "marvell,armada-3700-neta")) |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 1605 | mvneta_bypass_mbus_windows(pp); |
| 1606 | else |
| 1607 | mvneta_conf_mbus_windows(pp); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1608 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1609 | /* fetch 'fixed-link' property from 'neta' node */ |
| 1610 | fl_node = fdt_subnode_offset(blob, node, "fixed-link"); |
| 1611 | if (fl_node != -FDT_ERR_NOTFOUND) { |
| 1612 | /* set phy_addr to invalid value for fixed link */ |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1613 | pp->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex"); |
| 1614 | pp->speed = fdtdec_get_int(blob, fl_node, "speed", 0); |
Marek Behún | e942d8e | 2022-04-27 12:41:47 +0200 | [diff] [blame] | 1615 | pp->fixed_link = true; |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1616 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1617 | |
Simon Glass | fa4689a | 2019-12-06 21:41:35 -0700 | [diff] [blame] | 1618 | #if CONFIG_IS_ENABLED(DM_GPIO) |
Marek Behún | 1cbd104 | 2022-04-27 12:41:52 +0200 | [diff] [blame^] | 1619 | if (!dev_read_phandle_with_args(dev, "sfp", NULL, 0, 0, &sfp_args) && |
| 1620 | ofnode_is_enabled(sfp_args.node)) |
Robert Marko | 58c9873 | 2022-03-24 10:57:37 +0100 | [diff] [blame] | 1621 | gpio_request_by_name_nodev(sfp_args.node, "tx-disable-gpio", 0, |
| 1622 | &pp->sfp_tx_disable_gpio, GPIOD_IS_OUT); |
| 1623 | |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 1624 | gpio_request_by_name(dev, "phy-reset-gpios", 0, |
| 1625 | &pp->phy_reset_gpio, GPIOD_IS_OUT); |
| 1626 | |
| 1627 | if (dm_gpio_is_valid(&pp->phy_reset_gpio)) { |
| 1628 | dm_gpio_set_value(&pp->phy_reset_gpio, 1); |
| 1629 | mdelay(10); |
| 1630 | dm_gpio_set_value(&pp->phy_reset_gpio, 0); |
| 1631 | } |
Robert Marko | 58c9873 | 2022-03-24 10:57:37 +0100 | [diff] [blame] | 1632 | |
| 1633 | if (dm_gpio_is_valid(&pp->sfp_tx_disable_gpio)) |
| 1634 | dm_gpio_set_value(&pp->sfp_tx_disable_gpio, 0); |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 1635 | #endif |
| 1636 | |
Marek Behún | b407baa | 2022-04-27 12:41:51 +0200 | [diff] [blame] | 1637 | return 0; |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1638 | } |
| 1639 | |
| 1640 | static void mvneta_stop(struct udevice *dev) |
| 1641 | { |
| 1642 | struct mvneta_port *pp = dev_get_priv(dev); |
| 1643 | |
| 1644 | mvneta_port_down(pp); |
| 1645 | mvneta_port_disable(pp); |
| 1646 | } |
| 1647 | |
| 1648 | static const struct eth_ops mvneta_ops = { |
| 1649 | .start = mvneta_start, |
| 1650 | .send = mvneta_send, |
| 1651 | .recv = mvneta_recv, |
| 1652 | .stop = mvneta_stop, |
Matt Pelland | 668a5f2 | 2018-03-27 13:18:25 -0400 | [diff] [blame] | 1653 | .write_hwaddr = mvneta_write_hwaddr, |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1654 | }; |
| 1655 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1656 | static const struct udevice_id mvneta_ids[] = { |
| 1657 | { .compatible = "marvell,armada-370-neta" }, |
| 1658 | { .compatible = "marvell,armada-xp-neta" }, |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 1659 | { .compatible = "marvell,armada-3700-neta" }, |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1660 | { } |
| 1661 | }; |
| 1662 | |
| 1663 | U_BOOT_DRIVER(mvneta) = { |
| 1664 | .name = "mvneta", |
| 1665 | .id = UCLASS_ETH, |
| 1666 | .of_match = mvneta_ids, |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1667 | .probe = mvneta_probe, |
| 1668 | .ops = &mvneta_ops, |
Simon Glass | 8a2b47f | 2020-12-03 16:55:17 -0700 | [diff] [blame] | 1669 | .priv_auto = sizeof(struct mvneta_port), |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 1670 | .plat_auto = sizeof(struct eth_pdata), |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1671 | }; |