Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. |
| 4 | * |
| 5 | * U-Boot version: |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 6 | * Copyright (C) 2014-2015 Stefan Roese <sr@denx.de> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 7 | * |
| 8 | * Based on the Linux version which is: |
| 9 | * Copyright (C) 2012 Marvell |
| 10 | * |
| 11 | * Rami Rosen <rosenr@marvell.com> |
| 12 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 13 | */ |
| 14 | |
Simon Glass | 6333448 | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 15 | #include <cpu_func.h> |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 16 | #include <dm.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 17 | #include <log.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 18 | #include <net.h> |
| 19 | #include <netdev.h> |
| 20 | #include <config.h> |
| 21 | #include <malloc.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 22 | #include <asm/cache.h> |
Simon Glass | 3ba929a | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 23 | #include <asm/global_data.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 24 | #include <asm/io.h> |
Simon Glass | 9bc1564 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 25 | #include <dm/device_compat.h> |
Simon Glass | d66c5f7 | 2020-02-03 07:36:15 -0700 | [diff] [blame] | 26 | #include <dm/devres.h> |
Simon Glass | 4dcacfc | 2020-05-10 11:40:13 -0600 | [diff] [blame] | 27 | #include <linux/bitops.h> |
Simon Glass | c06c1be | 2020-05-10 11:40:08 -0600 | [diff] [blame] | 28 | #include <linux/bug.h> |
Simon Glass | dbd7954 | 2020-05-10 11:40:11 -0600 | [diff] [blame] | 29 | #include <linux/delay.h> |
Masahiro Yamada | 56a931c | 2016-09-21 11:28:55 +0900 | [diff] [blame] | 30 | #include <linux/errno.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 31 | #include <phy.h> |
| 32 | #include <miiphy.h> |
| 33 | #include <watchdog.h> |
| 34 | #include <asm/arch/cpu.h> |
| 35 | #include <asm/arch/soc.h> |
| 36 | #include <linux/compat.h> |
| 37 | #include <linux/mbus.h> |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 38 | #include <asm-generic/gpio.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 39 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 40 | DECLARE_GLOBAL_DATA_PTR; |
| 41 | |
Marek Behún | 06c91f0 | 2022-04-27 12:41:57 +0200 | [diff] [blame] | 42 | #define MVNETA_NR_CPUS 1 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 43 | #define ETH_HLEN 14 /* Total octets in header */ |
| 44 | |
| 45 | /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ |
| 46 | #define WRAP (2 + ETH_HLEN + 4 + 32) |
| 47 | #define MTU 1500 |
| 48 | #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) |
| 49 | |
| 50 | #define MVNETA_SMI_TIMEOUT 10000 |
| 51 | |
| 52 | /* Registers */ |
| 53 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) |
| 54 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) |
| 55 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) |
| 56 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) |
| 57 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) |
| 58 | #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) |
| 59 | #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) |
| 60 | #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) |
| 61 | #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 |
| 62 | #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) |
| 63 | #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) |
| 64 | #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff |
| 65 | #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) |
| 66 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 |
| 67 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 |
| 68 | #define MVNETA_PORT_RX_RESET 0x1cc0 |
| 69 | #define MVNETA_PORT_RX_DMA_RESET BIT(0) |
| 70 | #define MVNETA_PHY_ADDR 0x2000 |
| 71 | #define MVNETA_PHY_ADDR_MASK 0x1f |
| 72 | #define MVNETA_SMI 0x2004 |
| 73 | #define MVNETA_PHY_REG_MASK 0x1f |
| 74 | /* SMI register fields */ |
| 75 | #define MVNETA_SMI_DATA_OFFS 0 /* Data */ |
| 76 | #define MVNETA_SMI_DATA_MASK (0xffff << MVNETA_SMI_DATA_OFFS) |
| 77 | #define MVNETA_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ |
| 78 | #define MVNETA_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ |
| 79 | #define MVNETA_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ |
| 80 | #define MVNETA_SMI_OPCODE_READ (1 << MVNETA_SMI_OPCODE_OFFS) |
| 81 | #define MVNETA_SMI_READ_VALID (1 << 27) /* Read Valid */ |
| 82 | #define MVNETA_SMI_BUSY (1 << 28) /* Busy */ |
| 83 | #define MVNETA_MBUS_RETRY 0x2010 |
| 84 | #define MVNETA_UNIT_INTR_CAUSE 0x2080 |
| 85 | #define MVNETA_UNIT_CONTROL 0x20B0 |
| 86 | #define MVNETA_PHY_POLLING_ENABLE BIT(1) |
| 87 | #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) |
| 88 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) |
| 89 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 90 | #define MVNETA_WIN_SIZE_MASK (0xffff0000) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 91 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 92 | #define MVNETA_BASE_ADDR_ENABLE_BIT 0x1 |
Chris Packham | 44b7cc7 | 2022-11-05 17:23:56 +1300 | [diff] [blame] | 93 | #define MVNETA_AC5_CNM_DDR_TARGET 0x2 |
| 94 | #define MVNETA_AC5_CNM_DDR_ATTR 0xb |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 95 | #define MVNETA_PORT_ACCESS_PROTECT 0x2294 |
| 96 | #define MVNETA_PORT_ACCESS_PROTECT_WIN0_RW 0x3 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 97 | #define MVNETA_PORT_CONFIG 0x2400 |
| 98 | #define MVNETA_UNI_PROMISC_MODE BIT(0) |
| 99 | #define MVNETA_DEF_RXQ(q) ((q) << 1) |
| 100 | #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) |
| 101 | #define MVNETA_TX_UNSET_ERR_SUM BIT(12) |
| 102 | #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) |
| 103 | #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) |
| 104 | #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) |
| 105 | #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) |
| 106 | #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ |
| 107 | MVNETA_DEF_RXQ_ARP(q) | \ |
| 108 | MVNETA_DEF_RXQ_TCP(q) | \ |
| 109 | MVNETA_DEF_RXQ_UDP(q) | \ |
| 110 | MVNETA_DEF_RXQ_BPDU(q) | \ |
| 111 | MVNETA_TX_UNSET_ERR_SUM | \ |
| 112 | MVNETA_RX_CSUM_WITH_PSEUDO_HDR) |
| 113 | #define MVNETA_PORT_CONFIG_EXTEND 0x2404 |
| 114 | #define MVNETA_MAC_ADDR_LOW 0x2414 |
| 115 | #define MVNETA_MAC_ADDR_HIGH 0x2418 |
| 116 | #define MVNETA_SDMA_CONFIG 0x241c |
| 117 | #define MVNETA_SDMA_BRST_SIZE_16 4 |
| 118 | #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) |
| 119 | #define MVNETA_RX_NO_DATA_SWAP BIT(4) |
| 120 | #define MVNETA_TX_NO_DATA_SWAP BIT(5) |
| 121 | #define MVNETA_DESC_SWAP BIT(6) |
| 122 | #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) |
| 123 | #define MVNETA_PORT_STATUS 0x2444 |
| 124 | #define MVNETA_TX_IN_PRGRS BIT(1) |
| 125 | #define MVNETA_TX_FIFO_EMPTY BIT(8) |
| 126 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c |
| 127 | #define MVNETA_SERDES_CFG 0x24A0 |
| 128 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 |
| 129 | #define MVNETA_QSGMII_SERDES_PROTO 0x0667 |
| 130 | #define MVNETA_TYPE_PRIO 0x24bc |
| 131 | #define MVNETA_FORCE_UNI BIT(21) |
| 132 | #define MVNETA_TXQ_CMD_1 0x24e4 |
| 133 | #define MVNETA_TXQ_CMD 0x2448 |
| 134 | #define MVNETA_TXQ_DISABLE_SHIFT 8 |
| 135 | #define MVNETA_TXQ_ENABLE_MASK 0x000000ff |
| 136 | #define MVNETA_ACC_MODE 0x2500 |
| 137 | #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) |
| 138 | #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff |
| 139 | #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 |
| 140 | #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) |
| 141 | |
| 142 | /* Exception Interrupt Port/Queue Cause register */ |
| 143 | |
| 144 | #define MVNETA_INTR_NEW_CAUSE 0x25a0 |
| 145 | #define MVNETA_INTR_NEW_MASK 0x25a4 |
| 146 | |
| 147 | /* bits 0..7 = TXQ SENT, one bit per queue. |
| 148 | * bits 8..15 = RXQ OCCUP, one bit per queue. |
| 149 | * bits 16..23 = RXQ FREE, one bit per queue. |
| 150 | * bit 29 = OLD_REG_SUM, see old reg ? |
| 151 | * bit 30 = TX_ERR_SUM, one bit for 4 ports |
| 152 | * bit 31 = MISC_SUM, one bit for 4 ports |
| 153 | */ |
| 154 | #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) |
| 155 | #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) |
| 156 | #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) |
| 157 | #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) |
| 158 | |
| 159 | #define MVNETA_INTR_OLD_CAUSE 0x25a8 |
| 160 | #define MVNETA_INTR_OLD_MASK 0x25ac |
| 161 | |
| 162 | /* Data Path Port/Queue Cause Register */ |
| 163 | #define MVNETA_INTR_MISC_CAUSE 0x25b0 |
| 164 | #define MVNETA_INTR_MISC_MASK 0x25b4 |
| 165 | #define MVNETA_INTR_ENABLE 0x25b8 |
| 166 | |
| 167 | #define MVNETA_RXQ_CMD 0x2680 |
| 168 | #define MVNETA_RXQ_DISABLE_SHIFT 8 |
| 169 | #define MVNETA_RXQ_ENABLE_MASK 0x000000ff |
| 170 | #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) |
| 171 | #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) |
| 172 | #define MVNETA_GMAC_CTRL_0 0x2c00 |
| 173 | #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 |
| 174 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc |
| 175 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) |
| 176 | #define MVNETA_GMAC_CTRL_2 0x2c08 |
| 177 | #define MVNETA_GMAC2_PCS_ENABLE BIT(3) |
| 178 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) |
| 179 | #define MVNETA_GMAC2_PORT_RESET BIT(6) |
| 180 | #define MVNETA_GMAC_STATUS 0x2c10 |
| 181 | #define MVNETA_GMAC_LINK_UP BIT(0) |
| 182 | #define MVNETA_GMAC_SPEED_1000 BIT(1) |
| 183 | #define MVNETA_GMAC_SPEED_100 BIT(2) |
| 184 | #define MVNETA_GMAC_FULL_DUPLEX BIT(3) |
| 185 | #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) |
| 186 | #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) |
| 187 | #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) |
| 188 | #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) |
| 189 | #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c |
| 190 | #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) |
| 191 | #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 192 | #define MVNETA_GMAC_IB_BYPASS_AN_EN BIT(3) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 193 | #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) |
| 194 | #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) |
| 195 | #define MVNETA_GMAC_AN_SPEED_EN BIT(7) |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 196 | #define MVNETA_GMAC_SET_FC_EN BIT(8) |
| 197 | #define MVNETA_GMAC_ADVERT_FC_EN BIT(9) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 198 | #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) |
| 199 | #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 200 | #define MVNETA_GMAC_SAMPLE_TX_CFG_EN BIT(15) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 201 | #define MVNETA_MIB_COUNTERS_BASE 0x3080 |
| 202 | #define MVNETA_MIB_LATE_COLLISION 0x7c |
| 203 | #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 |
| 204 | #define MVNETA_DA_FILT_OTH_MCAST 0x3500 |
| 205 | #define MVNETA_DA_FILT_UCAST_BASE 0x3600 |
| 206 | #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) |
| 207 | #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) |
| 208 | #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 |
| 209 | #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) |
| 210 | #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) |
| 211 | #define MVNETA_TXQ_DEC_SENT_SHIFT 16 |
| 212 | #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) |
| 213 | #define MVNETA_TXQ_SENT_DESC_SHIFT 16 |
| 214 | #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 |
| 215 | #define MVNETA_PORT_TX_RESET 0x3cf0 |
| 216 | #define MVNETA_PORT_TX_DMA_RESET BIT(0) |
| 217 | #define MVNETA_TX_MTU 0x3e0c |
| 218 | #define MVNETA_TX_TOKEN_SIZE 0x3e14 |
| 219 | #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff |
| 220 | #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) |
| 221 | #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff |
| 222 | |
| 223 | /* Descriptor ring Macros */ |
| 224 | #define MVNETA_QUEUE_NEXT_DESC(q, index) \ |
| 225 | (((index) < (q)->last_desc) ? ((index) + 1) : 0) |
| 226 | |
| 227 | /* Various constants */ |
| 228 | |
| 229 | /* Coalescing */ |
| 230 | #define MVNETA_TXDONE_COAL_PKTS 16 |
| 231 | #define MVNETA_RX_COAL_PKTS 32 |
| 232 | #define MVNETA_RX_COAL_USEC 100 |
| 233 | |
| 234 | /* The two bytes Marvell header. Either contains a special value used |
| 235 | * by Marvell switches when a specific hardware mode is enabled (not |
| 236 | * supported by this driver) or is filled automatically by zeroes on |
| 237 | * the RX side. Those two bytes being at the front of the Ethernet |
| 238 | * header, they allow to have the IP header aligned on a 4 bytes |
| 239 | * boundary automatically: the hardware skips those two bytes on its |
| 240 | * own. |
| 241 | */ |
| 242 | #define MVNETA_MH_SIZE 2 |
| 243 | |
| 244 | #define MVNETA_VLAN_TAG_LEN 4 |
| 245 | |
| 246 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 |
| 247 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 |
| 248 | #define MVNETA_ACC_MODE_EXT 1 |
| 249 | |
| 250 | /* Timeout constants */ |
| 251 | #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 |
| 252 | #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 |
| 253 | #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 |
| 254 | |
| 255 | #define MVNETA_TX_MTU_MAX 0x3ffff |
| 256 | |
| 257 | /* Max number of Rx descriptors */ |
| 258 | #define MVNETA_MAX_RXD 16 |
| 259 | |
| 260 | /* Max number of Tx descriptors */ |
| 261 | #define MVNETA_MAX_TXD 16 |
| 262 | |
| 263 | /* descriptor aligned size */ |
| 264 | #define MVNETA_DESC_ALIGNED_SIZE 32 |
| 265 | |
| 266 | struct mvneta_port { |
| 267 | void __iomem *base; |
| 268 | struct mvneta_rx_queue *rxqs; |
| 269 | struct mvneta_tx_queue *txqs; |
| 270 | |
| 271 | u8 mcast_count[256]; |
| 272 | u16 tx_ring_size; |
| 273 | u16 rx_ring_size; |
| 274 | |
| 275 | phy_interface_t phy_interface; |
| 276 | unsigned int link; |
| 277 | unsigned int duplex; |
| 278 | unsigned int speed; |
| 279 | |
| 280 | int init; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 281 | struct phy_device *phydev; |
Simon Glass | fa4689a | 2019-12-06 21:41:35 -0700 | [diff] [blame] | 282 | #if CONFIG_IS_ENABLED(DM_GPIO) |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 283 | struct gpio_desc phy_reset_gpio; |
Robert Marko | 58c9873 | 2022-03-24 10:57:37 +0100 | [diff] [blame] | 284 | struct gpio_desc sfp_tx_disable_gpio; |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 285 | #endif |
Chris Packham | 44b7cc7 | 2022-11-05 17:23:56 +1300 | [diff] [blame] | 286 | |
| 287 | uintptr_t dma_base; /* base address for DMA address decoding */ |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 288 | }; |
| 289 | |
| 290 | /* The mvneta_tx_desc and mvneta_rx_desc structures describe the |
| 291 | * layout of the transmit and reception DMA descriptors, and their |
| 292 | * layout is therefore defined by the hardware design |
| 293 | */ |
| 294 | |
| 295 | #define MVNETA_TX_L3_OFF_SHIFT 0 |
| 296 | #define MVNETA_TX_IP_HLEN_SHIFT 8 |
| 297 | #define MVNETA_TX_L4_UDP BIT(16) |
| 298 | #define MVNETA_TX_L3_IP6 BIT(17) |
| 299 | #define MVNETA_TXD_IP_CSUM BIT(18) |
| 300 | #define MVNETA_TXD_Z_PAD BIT(19) |
| 301 | #define MVNETA_TXD_L_DESC BIT(20) |
| 302 | #define MVNETA_TXD_F_DESC BIT(21) |
| 303 | #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ |
| 304 | MVNETA_TXD_L_DESC | \ |
| 305 | MVNETA_TXD_F_DESC) |
| 306 | #define MVNETA_TX_L4_CSUM_FULL BIT(30) |
| 307 | #define MVNETA_TX_L4_CSUM_NOT BIT(31) |
| 308 | |
| 309 | #define MVNETA_RXD_ERR_CRC 0x0 |
| 310 | #define MVNETA_RXD_ERR_SUMMARY BIT(16) |
| 311 | #define MVNETA_RXD_ERR_OVERRUN BIT(17) |
| 312 | #define MVNETA_RXD_ERR_LEN BIT(18) |
| 313 | #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) |
| 314 | #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) |
| 315 | #define MVNETA_RXD_L3_IP4 BIT(25) |
| 316 | #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) |
| 317 | #define MVNETA_RXD_L4_CSUM_OK BIT(30) |
| 318 | |
| 319 | struct mvneta_tx_desc { |
| 320 | u32 command; /* Options used by HW for packet transmitting.*/ |
| 321 | u16 reserverd1; /* csum_l4 (for future use) */ |
| 322 | u16 data_size; /* Data size of transmitted packet in bytes */ |
| 323 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ |
| 324 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ |
| 325 | u32 reserved3[4]; /* Reserved - (for future use) */ |
| 326 | }; |
| 327 | |
| 328 | struct mvneta_rx_desc { |
| 329 | u32 status; /* Info about received packet */ |
| 330 | u16 reserved1; /* pnc_info - (for future use, PnC) */ |
| 331 | u16 data_size; /* Size of received packet in bytes */ |
| 332 | |
| 333 | u32 buf_phys_addr; /* Physical address of the buffer */ |
| 334 | u32 reserved2; /* pnc_flow_id (for future use, PnC) */ |
| 335 | |
| 336 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ |
| 337 | u16 reserved3; /* prefetch_cmd, for future use */ |
| 338 | u16 reserved4; /* csum_l4 - (for future use, PnC) */ |
| 339 | |
| 340 | u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ |
| 341 | u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ |
| 342 | }; |
| 343 | |
| 344 | struct mvneta_tx_queue { |
| 345 | /* Number of this TX queue, in the range 0-7 */ |
| 346 | u8 id; |
| 347 | |
| 348 | /* Number of TX DMA descriptors in the descriptor ring */ |
| 349 | int size; |
| 350 | |
| 351 | /* Index of last TX DMA descriptor that was inserted */ |
| 352 | int txq_put_index; |
| 353 | |
| 354 | /* Index of the TX DMA descriptor to be cleaned up */ |
| 355 | int txq_get_index; |
| 356 | |
| 357 | /* Virtual address of the TX DMA descriptors array */ |
| 358 | struct mvneta_tx_desc *descs; |
| 359 | |
| 360 | /* DMA address of the TX DMA descriptors array */ |
| 361 | dma_addr_t descs_phys; |
| 362 | |
| 363 | /* Index of the last TX DMA descriptor */ |
| 364 | int last_desc; |
| 365 | |
| 366 | /* Index of the next TX DMA descriptor to process */ |
| 367 | int next_desc_to_proc; |
| 368 | }; |
| 369 | |
| 370 | struct mvneta_rx_queue { |
| 371 | /* rx queue number, in the range 0-7 */ |
| 372 | u8 id; |
| 373 | |
| 374 | /* num of rx descriptors in the rx descriptor ring */ |
| 375 | int size; |
| 376 | |
| 377 | /* Virtual address of the RX DMA descriptors array */ |
| 378 | struct mvneta_rx_desc *descs; |
| 379 | |
| 380 | /* DMA address of the RX DMA descriptors array */ |
| 381 | dma_addr_t descs_phys; |
| 382 | |
| 383 | /* Index of the last RX DMA descriptor */ |
| 384 | int last_desc; |
| 385 | |
| 386 | /* Index of the next RX DMA descriptor to process */ |
| 387 | int next_desc_to_proc; |
| 388 | }; |
| 389 | |
| 390 | /* U-Boot doesn't use the queues, so set the number to 1 */ |
| 391 | static int rxq_number = 1; |
| 392 | static int txq_number = 1; |
| 393 | static int rxq_def; |
| 394 | |
| 395 | struct buffer_location { |
| 396 | struct mvneta_tx_desc *tx_descs; |
| 397 | struct mvneta_rx_desc *rx_descs; |
| 398 | u32 rx_buffers; |
| 399 | }; |
| 400 | |
| 401 | /* |
| 402 | * All 4 interfaces use the same global buffer, since only one interface |
| 403 | * can be enabled at once |
| 404 | */ |
| 405 | static struct buffer_location buffer_loc; |
| 406 | |
| 407 | /* |
| 408 | * Page table entries are set to 1MB, or multiples of 1MB |
| 409 | * (not < 1MB). driver uses less bd's so use 1MB bdspace. |
| 410 | */ |
| 411 | #define BD_SPACE (1 << 20) |
| 412 | |
| 413 | /* Utility/helper methods */ |
| 414 | |
| 415 | /* Write helper method */ |
| 416 | static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) |
| 417 | { |
| 418 | writel(data, pp->base + offset); |
| 419 | } |
| 420 | |
| 421 | /* Read helper method */ |
| 422 | static u32 mvreg_read(struct mvneta_port *pp, u32 offset) |
| 423 | { |
| 424 | return readl(pp->base + offset); |
| 425 | } |
| 426 | |
| 427 | /* Clear all MIB counters */ |
| 428 | static void mvneta_mib_counters_clear(struct mvneta_port *pp) |
| 429 | { |
| 430 | int i; |
| 431 | |
| 432 | /* Perform dummy reads from MIB counters */ |
| 433 | for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) |
| 434 | mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); |
| 435 | } |
| 436 | |
| 437 | /* Rx descriptors helper methods */ |
| 438 | |
| 439 | /* Checks whether the RX descriptor having this status is both the first |
| 440 | * and the last descriptor for the RX packet. Each RX packet is currently |
| 441 | * received through a single RX descriptor, so not having each RX |
| 442 | * descriptor with its first and last bits set is an error |
| 443 | */ |
| 444 | static int mvneta_rxq_desc_is_first_last(u32 status) |
| 445 | { |
| 446 | return (status & MVNETA_RXD_FIRST_LAST_DESC) == |
| 447 | MVNETA_RXD_FIRST_LAST_DESC; |
| 448 | } |
| 449 | |
| 450 | /* Add number of descriptors ready to receive new packets */ |
| 451 | static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, |
| 452 | struct mvneta_rx_queue *rxq, |
| 453 | int ndescs) |
| 454 | { |
| 455 | /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can |
| 456 | * be added at once |
| 457 | */ |
| 458 | while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { |
| 459 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), |
| 460 | (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << |
| 461 | MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); |
| 462 | ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; |
| 463 | } |
| 464 | |
| 465 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), |
| 466 | (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); |
| 467 | } |
| 468 | |
| 469 | /* Get number of RX descriptors occupied by received packets */ |
| 470 | static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, |
| 471 | struct mvneta_rx_queue *rxq) |
| 472 | { |
| 473 | u32 val; |
| 474 | |
| 475 | val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); |
| 476 | return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; |
| 477 | } |
| 478 | |
| 479 | /* Update num of rx desc called upon return from rx path or |
| 480 | * from mvneta_rxq_drop_pkts(). |
| 481 | */ |
| 482 | static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, |
| 483 | struct mvneta_rx_queue *rxq, |
| 484 | int rx_done, int rx_filled) |
| 485 | { |
| 486 | u32 val; |
| 487 | |
| 488 | if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { |
| 489 | val = rx_done | |
| 490 | (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); |
| 491 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); |
| 492 | return; |
| 493 | } |
| 494 | |
| 495 | /* Only 255 descriptors can be added at once */ |
| 496 | while ((rx_done > 0) || (rx_filled > 0)) { |
| 497 | if (rx_done <= 0xff) { |
| 498 | val = rx_done; |
| 499 | rx_done = 0; |
| 500 | } else { |
| 501 | val = 0xff; |
| 502 | rx_done -= 0xff; |
| 503 | } |
| 504 | if (rx_filled <= 0xff) { |
| 505 | val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; |
| 506 | rx_filled = 0; |
| 507 | } else { |
| 508 | val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; |
| 509 | rx_filled -= 0xff; |
| 510 | } |
| 511 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); |
| 512 | } |
| 513 | } |
| 514 | |
| 515 | /* Get pointer to next RX descriptor to be processed by SW */ |
| 516 | static struct mvneta_rx_desc * |
| 517 | mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) |
| 518 | { |
| 519 | int rx_desc = rxq->next_desc_to_proc; |
| 520 | |
| 521 | rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); |
| 522 | return rxq->descs + rx_desc; |
| 523 | } |
| 524 | |
| 525 | /* Tx descriptors helper methods */ |
| 526 | |
| 527 | /* Update HW with number of TX descriptors to be sent */ |
| 528 | static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, |
| 529 | struct mvneta_tx_queue *txq, |
| 530 | int pend_desc) |
| 531 | { |
| 532 | u32 val; |
| 533 | |
| 534 | /* Only 255 descriptors can be added at once ; Assume caller |
Heinrich Schuchardt | 4237696 | 2017-08-29 18:44:37 +0200 | [diff] [blame] | 535 | * process TX descriptors in quanta less than 256 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 536 | */ |
| 537 | val = pend_desc; |
| 538 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); |
| 539 | } |
| 540 | |
| 541 | /* Get pointer to next TX descriptor to be processed (send) by HW */ |
| 542 | static struct mvneta_tx_desc * |
| 543 | mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) |
| 544 | { |
| 545 | int tx_desc = txq->next_desc_to_proc; |
| 546 | |
| 547 | txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); |
| 548 | return txq->descs + tx_desc; |
| 549 | } |
| 550 | |
| 551 | /* Set rxq buf size */ |
| 552 | static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, |
| 553 | struct mvneta_rx_queue *rxq, |
| 554 | int buf_size) |
| 555 | { |
| 556 | u32 val; |
| 557 | |
| 558 | val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); |
| 559 | |
| 560 | val &= ~MVNETA_RXQ_BUF_SIZE_MASK; |
| 561 | val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); |
| 562 | |
| 563 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); |
| 564 | } |
| 565 | |
| 566 | /* Start the Ethernet port RX and TX activity */ |
| 567 | static void mvneta_port_up(struct mvneta_port *pp) |
| 568 | { |
| 569 | int queue; |
| 570 | u32 q_map; |
| 571 | |
| 572 | /* Enable all initialized TXs. */ |
| 573 | mvneta_mib_counters_clear(pp); |
| 574 | q_map = 0; |
| 575 | for (queue = 0; queue < txq_number; queue++) { |
| 576 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
| 577 | if (txq->descs != NULL) |
| 578 | q_map |= (1 << queue); |
| 579 | } |
| 580 | mvreg_write(pp, MVNETA_TXQ_CMD, q_map); |
| 581 | |
| 582 | /* Enable all initialized RXQs. */ |
| 583 | q_map = 0; |
| 584 | for (queue = 0; queue < rxq_number; queue++) { |
| 585 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
| 586 | if (rxq->descs != NULL) |
| 587 | q_map |= (1 << queue); |
| 588 | } |
| 589 | mvreg_write(pp, MVNETA_RXQ_CMD, q_map); |
| 590 | } |
| 591 | |
| 592 | /* Stop the Ethernet port activity */ |
| 593 | static void mvneta_port_down(struct mvneta_port *pp) |
| 594 | { |
| 595 | u32 val; |
| 596 | int count; |
| 597 | |
| 598 | /* Stop Rx port activity. Check port Rx activity. */ |
| 599 | val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; |
| 600 | |
| 601 | /* Issue stop command for active channels only */ |
| 602 | if (val != 0) |
| 603 | mvreg_write(pp, MVNETA_RXQ_CMD, |
| 604 | val << MVNETA_RXQ_DISABLE_SHIFT); |
| 605 | |
| 606 | /* Wait for all Rx activity to terminate. */ |
| 607 | count = 0; |
| 608 | do { |
| 609 | if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 610 | dev_warn(pp->phydev->dev, |
| 611 | "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", |
| 612 | val); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 613 | break; |
| 614 | } |
| 615 | mdelay(1); |
| 616 | |
| 617 | val = mvreg_read(pp, MVNETA_RXQ_CMD); |
| 618 | } while (val & 0xff); |
| 619 | |
| 620 | /* Stop Tx port activity. Check port Tx activity. Issue stop |
| 621 | * command for active channels only |
| 622 | */ |
| 623 | val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; |
| 624 | |
| 625 | if (val != 0) |
| 626 | mvreg_write(pp, MVNETA_TXQ_CMD, |
| 627 | (val << MVNETA_TXQ_DISABLE_SHIFT)); |
| 628 | |
| 629 | /* Wait for all Tx activity to terminate. */ |
| 630 | count = 0; |
| 631 | do { |
| 632 | if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 633 | dev_warn(pp->phydev->dev, |
| 634 | "TIMEOUT for TX stopped status=0x%08x\n", |
| 635 | val); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 636 | break; |
| 637 | } |
| 638 | mdelay(1); |
| 639 | |
| 640 | /* Check TX Command reg that all Txqs are stopped */ |
| 641 | val = mvreg_read(pp, MVNETA_TXQ_CMD); |
| 642 | |
| 643 | } while (val & 0xff); |
| 644 | |
| 645 | /* Double check to verify that TX FIFO is empty */ |
| 646 | count = 0; |
| 647 | do { |
| 648 | if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 649 | dev_warn(pp->phydev->dev, |
| 650 | "TX FIFO empty timeout status=0x08%x\n", |
| 651 | val); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 652 | break; |
| 653 | } |
| 654 | mdelay(1); |
| 655 | |
| 656 | val = mvreg_read(pp, MVNETA_PORT_STATUS); |
| 657 | } while (!(val & MVNETA_TX_FIFO_EMPTY) && |
| 658 | (val & MVNETA_TX_IN_PRGRS)); |
| 659 | |
| 660 | udelay(200); |
| 661 | } |
| 662 | |
| 663 | /* Enable the port by setting the port enable bit of the MAC control register */ |
| 664 | static void mvneta_port_enable(struct mvneta_port *pp) |
| 665 | { |
| 666 | u32 val; |
| 667 | |
| 668 | /* Enable port */ |
| 669 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); |
| 670 | val |= MVNETA_GMAC0_PORT_ENABLE; |
| 671 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); |
| 672 | } |
| 673 | |
| 674 | /* Disable the port and wait for about 200 usec before retuning */ |
| 675 | static void mvneta_port_disable(struct mvneta_port *pp) |
| 676 | { |
| 677 | u32 val; |
| 678 | |
| 679 | /* Reset the Enable bit in the Serial Control Register */ |
| 680 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); |
| 681 | val &= ~MVNETA_GMAC0_PORT_ENABLE; |
| 682 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); |
| 683 | |
| 684 | udelay(200); |
| 685 | } |
| 686 | |
| 687 | /* Multicast tables methods */ |
| 688 | |
| 689 | /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ |
| 690 | static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) |
| 691 | { |
| 692 | int offset; |
| 693 | u32 val; |
| 694 | |
| 695 | if (queue == -1) { |
| 696 | val = 0; |
| 697 | } else { |
| 698 | val = 0x1 | (queue << 1); |
| 699 | val |= (val << 24) | (val << 16) | (val << 8); |
| 700 | } |
| 701 | |
| 702 | for (offset = 0; offset <= 0xc; offset += 4) |
| 703 | mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); |
| 704 | } |
| 705 | |
| 706 | /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ |
| 707 | static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) |
| 708 | { |
| 709 | int offset; |
| 710 | u32 val; |
| 711 | |
| 712 | if (queue == -1) { |
| 713 | val = 0; |
| 714 | } else { |
| 715 | val = 0x1 | (queue << 1); |
| 716 | val |= (val << 24) | (val << 16) | (val << 8); |
| 717 | } |
| 718 | |
| 719 | for (offset = 0; offset <= 0xfc; offset += 4) |
| 720 | mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); |
| 721 | } |
| 722 | |
| 723 | /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ |
| 724 | static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) |
| 725 | { |
| 726 | int offset; |
| 727 | u32 val; |
| 728 | |
| 729 | if (queue == -1) { |
| 730 | memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); |
| 731 | val = 0; |
| 732 | } else { |
| 733 | memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); |
| 734 | val = 0x1 | (queue << 1); |
| 735 | val |= (val << 24) | (val << 16) | (val << 8); |
| 736 | } |
| 737 | |
| 738 | for (offset = 0; offset <= 0xfc; offset += 4) |
| 739 | mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); |
| 740 | } |
| 741 | |
| 742 | /* This method sets defaults to the NETA port: |
| 743 | * Clears interrupt Cause and Mask registers. |
| 744 | * Clears all MAC tables. |
| 745 | * Sets defaults to all registers. |
| 746 | * Resets RX and TX descriptor rings. |
| 747 | * Resets PHY. |
| 748 | * This method can be called after mvneta_port_down() to return the port |
| 749 | * settings to defaults. |
| 750 | */ |
| 751 | static void mvneta_defaults_set(struct mvneta_port *pp) |
| 752 | { |
| 753 | int cpu; |
| 754 | int queue; |
| 755 | u32 val; |
| 756 | |
| 757 | /* Clear all Cause registers */ |
| 758 | mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); |
| 759 | mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); |
| 760 | mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); |
| 761 | |
| 762 | /* Mask all interrupts */ |
| 763 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); |
| 764 | mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); |
| 765 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); |
| 766 | mvreg_write(pp, MVNETA_INTR_ENABLE, 0); |
| 767 | |
| 768 | /* Enable MBUS Retry bit16 */ |
| 769 | mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); |
| 770 | |
| 771 | /* Set CPU queue access map - all CPUs have access to all RX |
| 772 | * queues and to all TX queues |
| 773 | */ |
Marek Behún | 06c91f0 | 2022-04-27 12:41:57 +0200 | [diff] [blame] | 774 | for (cpu = 0; cpu < MVNETA_NR_CPUS; cpu++) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 775 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), |
| 776 | (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | |
| 777 | MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); |
| 778 | |
| 779 | /* Reset RX and TX DMAs */ |
| 780 | mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); |
| 781 | mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); |
| 782 | |
| 783 | /* Disable Legacy WRR, Disable EJP, Release from reset */ |
| 784 | mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); |
| 785 | for (queue = 0; queue < txq_number; queue++) { |
| 786 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); |
| 787 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); |
| 788 | } |
| 789 | |
| 790 | mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); |
| 791 | mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); |
| 792 | |
| 793 | /* Set Port Acceleration Mode */ |
| 794 | val = MVNETA_ACC_MODE_EXT; |
| 795 | mvreg_write(pp, MVNETA_ACC_MODE, val); |
| 796 | |
| 797 | /* Update val of portCfg register accordingly with all RxQueue types */ |
| 798 | val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); |
| 799 | mvreg_write(pp, MVNETA_PORT_CONFIG, val); |
| 800 | |
| 801 | val = 0; |
| 802 | mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); |
| 803 | mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); |
| 804 | |
| 805 | /* Build PORT_SDMA_CONFIG_REG */ |
| 806 | val = 0; |
| 807 | |
| 808 | /* Default burst size */ |
| 809 | val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); |
| 810 | val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); |
| 811 | val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; |
| 812 | |
| 813 | /* Assign port SDMA configuration */ |
| 814 | mvreg_write(pp, MVNETA_SDMA_CONFIG, val); |
| 815 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 816 | /* Enable PHY polling in hardware if not in fixed-link mode */ |
Simon Glass | c3128da | 2023-02-22 09:33:51 -0700 | [diff] [blame] | 817 | if (!IS_ENABLED(CONFIG_PHY_FIXED) || |
Marek Behún | ea059c4 | 2022-04-27 12:42:01 +0200 | [diff] [blame] | 818 | pp->phydev->phy_id != PHY_FIXED_ID) { |
Marek Behún | 32c5ef6 | 2022-04-27 12:41:59 +0200 | [diff] [blame] | 819 | mvreg_write(pp, MVNETA_PHY_ADDR, pp->phydev->addr); |
| 820 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 821 | val = mvreg_read(pp, MVNETA_UNIT_CONTROL); |
| 822 | val |= MVNETA_PHY_POLLING_ENABLE; |
| 823 | mvreg_write(pp, MVNETA_UNIT_CONTROL, val); |
| 824 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 825 | |
| 826 | mvneta_set_ucast_table(pp, -1); |
| 827 | mvneta_set_special_mcast_table(pp, -1); |
| 828 | mvneta_set_other_mcast_table(pp, -1); |
| 829 | } |
| 830 | |
| 831 | /* Set unicast address */ |
| 832 | static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, |
| 833 | int queue) |
| 834 | { |
| 835 | unsigned int unicast_reg; |
| 836 | unsigned int tbl_offset; |
| 837 | unsigned int reg_offset; |
| 838 | |
| 839 | /* Locate the Unicast table entry */ |
| 840 | last_nibble = (0xf & last_nibble); |
| 841 | |
| 842 | /* offset from unicast tbl base */ |
| 843 | tbl_offset = (last_nibble / 4) * 4; |
| 844 | |
| 845 | /* offset within the above reg */ |
| 846 | reg_offset = last_nibble % 4; |
| 847 | |
| 848 | unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); |
| 849 | |
| 850 | if (queue == -1) { |
| 851 | /* Clear accepts frame bit at specified unicast DA tbl entry */ |
| 852 | unicast_reg &= ~(0xff << (8 * reg_offset)); |
| 853 | } else { |
| 854 | unicast_reg &= ~(0xff << (8 * reg_offset)); |
| 855 | unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); |
| 856 | } |
| 857 | |
| 858 | mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); |
| 859 | } |
| 860 | |
| 861 | /* Set mac address */ |
| 862 | static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, |
| 863 | int queue) |
| 864 | { |
| 865 | unsigned int mac_h; |
| 866 | unsigned int mac_l; |
| 867 | |
| 868 | if (queue != -1) { |
| 869 | mac_l = (addr[4] << 8) | (addr[5]); |
| 870 | mac_h = (addr[0] << 24) | (addr[1] << 16) | |
| 871 | (addr[2] << 8) | (addr[3] << 0); |
| 872 | |
| 873 | mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); |
| 874 | mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); |
| 875 | } |
| 876 | |
| 877 | /* Accept frames of this address */ |
| 878 | mvneta_set_ucast_addr(pp, addr[5], queue); |
| 879 | } |
| 880 | |
Matt Pelland | 668a5f2 | 2018-03-27 13:18:25 -0400 | [diff] [blame] | 881 | static int mvneta_write_hwaddr(struct udevice *dev) |
| 882 | { |
| 883 | mvneta_mac_addr_set(dev_get_priv(dev), |
Simon Glass | fa20e93 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 884 | ((struct eth_pdata *)dev_get_plat(dev))->enetaddr, |
Matt Pelland | 668a5f2 | 2018-03-27 13:18:25 -0400 | [diff] [blame] | 885 | rxq_def); |
| 886 | |
| 887 | return 0; |
| 888 | } |
| 889 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 890 | /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ |
| 891 | static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, |
| 892 | u32 phys_addr, u32 cookie) |
| 893 | { |
| 894 | rx_desc->buf_cookie = cookie; |
| 895 | rx_desc->buf_phys_addr = phys_addr; |
| 896 | } |
| 897 | |
| 898 | /* Decrement sent descriptors counter */ |
| 899 | static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, |
| 900 | struct mvneta_tx_queue *txq, |
| 901 | int sent_desc) |
| 902 | { |
| 903 | u32 val; |
| 904 | |
| 905 | /* Only 255 TX descriptors can be updated at once */ |
| 906 | while (sent_desc > 0xff) { |
| 907 | val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; |
| 908 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); |
| 909 | sent_desc = sent_desc - 0xff; |
| 910 | } |
| 911 | |
| 912 | val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; |
| 913 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); |
| 914 | } |
| 915 | |
| 916 | /* Get number of TX descriptors already sent by HW */ |
| 917 | static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, |
| 918 | struct mvneta_tx_queue *txq) |
| 919 | { |
| 920 | u32 val; |
| 921 | int sent_desc; |
| 922 | |
| 923 | val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); |
| 924 | sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> |
| 925 | MVNETA_TXQ_SENT_DESC_SHIFT; |
| 926 | |
| 927 | return sent_desc; |
| 928 | } |
| 929 | |
| 930 | /* Display more error info */ |
| 931 | static void mvneta_rx_error(struct mvneta_port *pp, |
| 932 | struct mvneta_rx_desc *rx_desc) |
| 933 | { |
| 934 | u32 status = rx_desc->status; |
| 935 | |
| 936 | if (!mvneta_rxq_desc_is_first_last(status)) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 937 | dev_err(pp->phydev->dev, |
| 938 | "bad rx status %08x (buffer oversize), size=%d\n", |
| 939 | status, rx_desc->data_size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 940 | return; |
| 941 | } |
| 942 | |
| 943 | switch (status & MVNETA_RXD_ERR_CODE_MASK) { |
| 944 | case MVNETA_RXD_ERR_CRC: |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 945 | dev_err(pp->phydev->dev, |
| 946 | "bad rx status %08x (crc error), size=%d\n", status, |
| 947 | rx_desc->data_size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 948 | break; |
| 949 | case MVNETA_RXD_ERR_OVERRUN: |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 950 | dev_err(pp->phydev->dev, |
| 951 | "bad rx status %08x (overrun error), size=%d\n", status, |
| 952 | rx_desc->data_size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 953 | break; |
| 954 | case MVNETA_RXD_ERR_LEN: |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 955 | dev_err(pp->phydev->dev, |
| 956 | "bad rx status %08x (max frame length error), size=%d\n", |
| 957 | status, rx_desc->data_size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 958 | break; |
| 959 | case MVNETA_RXD_ERR_RESOURCE: |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 960 | dev_err(pp->phydev->dev, |
| 961 | "bad rx status %08x (resource error), size=%d\n", |
| 962 | status, rx_desc->data_size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 963 | break; |
| 964 | } |
| 965 | } |
| 966 | |
| 967 | static struct mvneta_rx_queue *mvneta_rxq_handle_get(struct mvneta_port *pp, |
| 968 | int rxq) |
| 969 | { |
| 970 | return &pp->rxqs[rxq]; |
| 971 | } |
| 972 | |
| 973 | |
| 974 | /* Drop packets received by the RXQ and free buffers */ |
| 975 | static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, |
| 976 | struct mvneta_rx_queue *rxq) |
| 977 | { |
| 978 | int rx_done; |
| 979 | |
| 980 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); |
| 981 | if (rx_done) |
| 982 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); |
| 983 | } |
| 984 | |
| 985 | /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ |
| 986 | static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, |
| 987 | int num) |
| 988 | { |
| 989 | int i; |
| 990 | |
| 991 | for (i = 0; i < num; i++) { |
| 992 | u32 addr; |
| 993 | |
| 994 | /* U-Boot special: Fill in the rx buffer addresses */ |
| 995 | addr = buffer_loc.rx_buffers + (i * RX_BUFFER_SIZE); |
| 996 | mvneta_rx_desc_fill(rxq->descs + i, addr, addr); |
| 997 | } |
| 998 | |
| 999 | /* Add this number of RX descriptors as non occupied (ready to |
| 1000 | * get packets) |
| 1001 | */ |
| 1002 | mvneta_rxq_non_occup_desc_add(pp, rxq, i); |
| 1003 | |
| 1004 | return 0; |
| 1005 | } |
| 1006 | |
| 1007 | /* Rx/Tx queue initialization/cleanup methods */ |
| 1008 | |
| 1009 | /* Create a specified RX queue */ |
| 1010 | static int mvneta_rxq_init(struct mvneta_port *pp, |
| 1011 | struct mvneta_rx_queue *rxq) |
| 1012 | |
| 1013 | { |
| 1014 | rxq->size = pp->rx_ring_size; |
| 1015 | |
| 1016 | /* Allocate memory for RX descriptors */ |
| 1017 | rxq->descs_phys = (dma_addr_t)rxq->descs; |
| 1018 | if (rxq->descs == NULL) |
| 1019 | return -ENOMEM; |
| 1020 | |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1021 | WARN_ON(rxq->descs != PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN)); |
| 1022 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1023 | rxq->last_desc = rxq->size - 1; |
| 1024 | |
| 1025 | /* Set Rx descriptors queue starting address */ |
| 1026 | mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); |
| 1027 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); |
| 1028 | |
| 1029 | /* Fill RXQ with buffers from RX pool */ |
| 1030 | mvneta_rxq_buf_size_set(pp, rxq, RX_BUFFER_SIZE); |
| 1031 | mvneta_rxq_fill(pp, rxq, rxq->size); |
| 1032 | |
| 1033 | return 0; |
| 1034 | } |
| 1035 | |
| 1036 | /* Cleanup Rx queue */ |
| 1037 | static void mvneta_rxq_deinit(struct mvneta_port *pp, |
| 1038 | struct mvneta_rx_queue *rxq) |
| 1039 | { |
| 1040 | mvneta_rxq_drop_pkts(pp, rxq); |
| 1041 | |
| 1042 | rxq->descs = NULL; |
| 1043 | rxq->last_desc = 0; |
| 1044 | rxq->next_desc_to_proc = 0; |
| 1045 | rxq->descs_phys = 0; |
| 1046 | } |
| 1047 | |
| 1048 | /* Create and initialize a tx queue */ |
| 1049 | static int mvneta_txq_init(struct mvneta_port *pp, |
| 1050 | struct mvneta_tx_queue *txq) |
| 1051 | { |
| 1052 | txq->size = pp->tx_ring_size; |
| 1053 | |
| 1054 | /* Allocate memory for TX descriptors */ |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1055 | txq->descs_phys = (dma_addr_t)txq->descs; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1056 | if (txq->descs == NULL) |
| 1057 | return -ENOMEM; |
| 1058 | |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1059 | WARN_ON(txq->descs != PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN)); |
| 1060 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1061 | txq->last_desc = txq->size - 1; |
| 1062 | |
| 1063 | /* Set maximum bandwidth for enabled TXQs */ |
| 1064 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); |
| 1065 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); |
| 1066 | |
| 1067 | /* Set Tx descriptors queue starting address */ |
| 1068 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); |
| 1069 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); |
| 1070 | |
| 1071 | return 0; |
| 1072 | } |
| 1073 | |
| 1074 | /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ |
| 1075 | static void mvneta_txq_deinit(struct mvneta_port *pp, |
| 1076 | struct mvneta_tx_queue *txq) |
| 1077 | { |
| 1078 | txq->descs = NULL; |
| 1079 | txq->last_desc = 0; |
| 1080 | txq->next_desc_to_proc = 0; |
| 1081 | txq->descs_phys = 0; |
| 1082 | |
| 1083 | /* Set minimum bandwidth for disabled TXQs */ |
| 1084 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); |
| 1085 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); |
| 1086 | |
| 1087 | /* Set Tx descriptors queue starting address and size */ |
| 1088 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); |
| 1089 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); |
| 1090 | } |
| 1091 | |
| 1092 | /* Cleanup all Tx queues */ |
| 1093 | static void mvneta_cleanup_txqs(struct mvneta_port *pp) |
| 1094 | { |
| 1095 | int queue; |
| 1096 | |
| 1097 | for (queue = 0; queue < txq_number; queue++) |
| 1098 | mvneta_txq_deinit(pp, &pp->txqs[queue]); |
| 1099 | } |
| 1100 | |
| 1101 | /* Cleanup all Rx queues */ |
| 1102 | static void mvneta_cleanup_rxqs(struct mvneta_port *pp) |
| 1103 | { |
| 1104 | int queue; |
| 1105 | |
| 1106 | for (queue = 0; queue < rxq_number; queue++) |
| 1107 | mvneta_rxq_deinit(pp, &pp->rxqs[queue]); |
| 1108 | } |
| 1109 | |
| 1110 | |
| 1111 | /* Init all Rx queues */ |
| 1112 | static int mvneta_setup_rxqs(struct mvneta_port *pp) |
| 1113 | { |
| 1114 | int queue; |
| 1115 | |
| 1116 | for (queue = 0; queue < rxq_number; queue++) { |
| 1117 | int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); |
| 1118 | if (err) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 1119 | dev_err(pp->phydev->dev, "%s: can't create rxq=%d\n", |
| 1120 | __func__, queue); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1121 | mvneta_cleanup_rxqs(pp); |
| 1122 | return err; |
| 1123 | } |
| 1124 | } |
| 1125 | |
| 1126 | return 0; |
| 1127 | } |
| 1128 | |
| 1129 | /* Init all tx queues */ |
| 1130 | static int mvneta_setup_txqs(struct mvneta_port *pp) |
| 1131 | { |
| 1132 | int queue; |
| 1133 | |
| 1134 | for (queue = 0; queue < txq_number; queue++) { |
| 1135 | int err = mvneta_txq_init(pp, &pp->txqs[queue]); |
| 1136 | if (err) { |
Sean Anderson | ecbbddf | 2020-09-15 10:44:55 -0400 | [diff] [blame] | 1137 | dev_err(pp->phydev->dev, "%s: can't create txq=%d\n", |
| 1138 | __func__, queue); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1139 | mvneta_cleanup_txqs(pp); |
| 1140 | return err; |
| 1141 | } |
| 1142 | } |
| 1143 | |
| 1144 | return 0; |
| 1145 | } |
| 1146 | |
| 1147 | static void mvneta_start_dev(struct mvneta_port *pp) |
| 1148 | { |
| 1149 | /* start the Rx/Tx activity */ |
| 1150 | mvneta_port_enable(pp); |
| 1151 | } |
| 1152 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1153 | static void mvneta_adjust_link(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1154 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1155 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1156 | struct phy_device *phydev = pp->phydev; |
Marek Behún | b4d0ea7 | 2022-04-27 12:41:54 +0200 | [diff] [blame] | 1157 | bool status_change = false; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1158 | |
Marek Behún | 4c46dab | 2022-04-27 12:41:53 +0200 | [diff] [blame] | 1159 | if (phydev->link && |
| 1160 | (pp->speed != phydev->speed || pp->duplex != phydev->duplex)) { |
| 1161 | u32 val; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1162 | |
Marek Behún | 4c46dab | 2022-04-27 12:41:53 +0200 | [diff] [blame] | 1163 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
| 1164 | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | |
| 1165 | MVNETA_GMAC_CONFIG_GMII_SPEED | |
| 1166 | MVNETA_GMAC_CONFIG_FULL_DUPLEX | |
| 1167 | MVNETA_GMAC_AN_SPEED_EN | |
| 1168 | MVNETA_GMAC_AN_DUPLEX_EN); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1169 | |
Marek Behún | 7eba470eb | 2022-04-27 12:41:58 +0200 | [diff] [blame] | 1170 | /* FIXME: For fixed-link case, these were the initial settings |
| 1171 | * used before the code was converted to use PHY_FIXED. Some of |
| 1172 | * these may look nonsensical (for example BYPASS_AN makes sense |
| 1173 | * for 1000base-x and 2500base-x modes, AFAIK), and in fact this |
| 1174 | * may be changed in the future (when support for inband AN will |
| 1175 | * be added). Also, why is ADVERT_FC enabled if we don't enable |
| 1176 | * inband AN at all? |
| 1177 | */ |
Simon Glass | c3128da | 2023-02-22 09:33:51 -0700 | [diff] [blame] | 1178 | if (IS_ENABLED(CONFIG_PHY_FIXED) && |
Marek Behún | ea059c4 | 2022-04-27 12:42:01 +0200 | [diff] [blame] | 1179 | pp->phydev->phy_id == PHY_FIXED_ID) |
Marek Behún | 346b15a | 2022-04-27 12:42:02 +0200 | [diff] [blame] | 1180 | val = MVNETA_GMAC_IB_BYPASS_AN_EN | |
Marek Behún | 7eba470eb | 2022-04-27 12:41:58 +0200 | [diff] [blame] | 1181 | MVNETA_GMAC_SET_FC_EN | |
| 1182 | MVNETA_GMAC_ADVERT_FC_EN | |
| 1183 | MVNETA_GMAC_SAMPLE_TX_CFG_EN; |
| 1184 | |
Marek Behún | 4c46dab | 2022-04-27 12:41:53 +0200 | [diff] [blame] | 1185 | if (phydev->duplex) |
| 1186 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1187 | |
Marek Behún | 4c46dab | 2022-04-27 12:41:53 +0200 | [diff] [blame] | 1188 | if (phydev->speed == SPEED_1000) |
| 1189 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; |
| 1190 | else if (pp->speed == SPEED_100) |
| 1191 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1192 | |
Marek Behún | 4c46dab | 2022-04-27 12:41:53 +0200 | [diff] [blame] | 1193 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1194 | |
Marek Behún | 4c46dab | 2022-04-27 12:41:53 +0200 | [diff] [blame] | 1195 | pp->duplex = phydev->duplex; |
Marek Behún | 2db231e | 2022-04-27 12:41:55 +0200 | [diff] [blame] | 1196 | pp->speed = phydev->speed; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1197 | } |
| 1198 | |
| 1199 | if (phydev->link != pp->link) { |
| 1200 | if (!phydev->link) { |
| 1201 | pp->duplex = -1; |
| 1202 | pp->speed = 0; |
| 1203 | } |
| 1204 | |
| 1205 | pp->link = phydev->link; |
Marek Behún | b4d0ea7 | 2022-04-27 12:41:54 +0200 | [diff] [blame] | 1206 | status_change = true; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1207 | } |
| 1208 | |
| 1209 | if (status_change) { |
| 1210 | if (phydev->link) { |
| 1211 | u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
| 1212 | val |= (MVNETA_GMAC_FORCE_LINK_PASS | |
| 1213 | MVNETA_GMAC_FORCE_LINK_DOWN); |
| 1214 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); |
| 1215 | mvneta_port_up(pp); |
| 1216 | } else { |
| 1217 | mvneta_port_down(pp); |
| 1218 | } |
| 1219 | } |
| 1220 | } |
| 1221 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1222 | static int mvneta_open(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1223 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1224 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1225 | int ret; |
| 1226 | |
| 1227 | ret = mvneta_setup_rxqs(pp); |
| 1228 | if (ret) |
| 1229 | return ret; |
| 1230 | |
| 1231 | ret = mvneta_setup_txqs(pp); |
| 1232 | if (ret) |
| 1233 | return ret; |
| 1234 | |
| 1235 | mvneta_adjust_link(dev); |
| 1236 | |
| 1237 | mvneta_start_dev(pp); |
| 1238 | |
| 1239 | return 0; |
| 1240 | } |
| 1241 | |
| 1242 | /* Initialize hw */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1243 | static int mvneta_init2(struct mvneta_port *pp) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1244 | { |
| 1245 | int queue; |
| 1246 | |
| 1247 | /* Disable port */ |
| 1248 | mvneta_port_disable(pp); |
| 1249 | |
| 1250 | /* Set port default values */ |
| 1251 | mvneta_defaults_set(pp); |
| 1252 | |
| 1253 | pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), |
| 1254 | GFP_KERNEL); |
| 1255 | if (!pp->txqs) |
| 1256 | return -ENOMEM; |
| 1257 | |
| 1258 | /* U-Boot special: use preallocated area */ |
| 1259 | pp->txqs[0].descs = buffer_loc.tx_descs; |
| 1260 | |
| 1261 | /* Initialize TX descriptor rings */ |
| 1262 | for (queue = 0; queue < txq_number; queue++) { |
| 1263 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
| 1264 | txq->id = queue; |
| 1265 | txq->size = pp->tx_ring_size; |
| 1266 | } |
| 1267 | |
| 1268 | pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), |
| 1269 | GFP_KERNEL); |
| 1270 | if (!pp->rxqs) { |
| 1271 | kfree(pp->txqs); |
| 1272 | return -ENOMEM; |
| 1273 | } |
| 1274 | |
| 1275 | /* U-Boot special: use preallocated area */ |
| 1276 | pp->rxqs[0].descs = buffer_loc.rx_descs; |
| 1277 | |
| 1278 | /* Create Rx descriptor rings */ |
| 1279 | for (queue = 0; queue < rxq_number; queue++) { |
| 1280 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
| 1281 | rxq->id = queue; |
| 1282 | rxq->size = pp->rx_ring_size; |
| 1283 | } |
| 1284 | |
| 1285 | return 0; |
| 1286 | } |
| 1287 | |
| 1288 | /* platform glue : initialize decoding windows */ |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 1289 | |
| 1290 | /* |
| 1291 | * Not like A380, in Armada3700, there are two layers of decode windows for GBE: |
| 1292 | * First layer is: GbE Address window that resides inside the GBE unit, |
| 1293 | * Second layer is: Fabric address window which is located in the NIC400 |
| 1294 | * (South Fabric). |
| 1295 | * To simplify the address decode configuration for Armada3700, we bypass the |
| 1296 | * first layer of GBE decode window by setting the first window to 4GB. |
| 1297 | */ |
| 1298 | static void mvneta_bypass_mbus_windows(struct mvneta_port *pp) |
| 1299 | { |
| 1300 | /* |
| 1301 | * Set window size to 4GB, to bypass GBE address decode, leave the |
| 1302 | * work to MBUS decode window |
| 1303 | */ |
| 1304 | mvreg_write(pp, MVNETA_WIN_SIZE(0), MVNETA_WIN_SIZE_MASK); |
| 1305 | |
| 1306 | /* Enable GBE address decode window 0 by set bit 0 to 0 */ |
| 1307 | clrbits_le32(pp->base + MVNETA_BASE_ADDR_ENABLE, |
| 1308 | MVNETA_BASE_ADDR_ENABLE_BIT); |
| 1309 | |
| 1310 | /* Set GBE address decode window 0 to full Access (read or write) */ |
| 1311 | setbits_le32(pp->base + MVNETA_PORT_ACCESS_PROTECT, |
| 1312 | MVNETA_PORT_ACCESS_PROTECT_WIN0_RW); |
| 1313 | } |
| 1314 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1315 | static void mvneta_conf_mbus_windows(struct mvneta_port *pp) |
| 1316 | { |
| 1317 | const struct mbus_dram_target_info *dram; |
| 1318 | u32 win_enable; |
| 1319 | u32 win_protect; |
| 1320 | int i; |
| 1321 | |
| 1322 | dram = mvebu_mbus_dram_info(); |
| 1323 | for (i = 0; i < 6; i++) { |
| 1324 | mvreg_write(pp, MVNETA_WIN_BASE(i), 0); |
| 1325 | mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); |
| 1326 | |
| 1327 | if (i < 4) |
| 1328 | mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); |
| 1329 | } |
| 1330 | |
| 1331 | win_enable = 0x3f; |
| 1332 | win_protect = 0; |
| 1333 | |
| 1334 | for (i = 0; i < dram->num_cs; i++) { |
| 1335 | const struct mbus_dram_window *cs = dram->cs + i; |
| 1336 | mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | |
| 1337 | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); |
| 1338 | |
| 1339 | mvreg_write(pp, MVNETA_WIN_SIZE(i), |
| 1340 | (cs->size - 1) & 0xffff0000); |
| 1341 | |
| 1342 | win_enable &= ~(1 << i); |
| 1343 | win_protect |= 3 << (2 * i); |
| 1344 | } |
| 1345 | |
| 1346 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); |
| 1347 | } |
| 1348 | |
Chris Packham | 44b7cc7 | 2022-11-05 17:23:56 +1300 | [diff] [blame] | 1349 | static void mvneta_conf_ac5_cnm_xbar_windows(struct mvneta_port *pp) |
| 1350 | { |
| 1351 | int i; |
| 1352 | |
| 1353 | /* Clear all windows */ |
| 1354 | for (i = 0; i < 6; i++) { |
| 1355 | mvreg_write(pp, MVNETA_WIN_BASE(i), 0); |
| 1356 | mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); |
| 1357 | |
| 1358 | if (i < 4) |
| 1359 | mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); |
| 1360 | } |
| 1361 | |
| 1362 | /* |
| 1363 | * Setup window #0 base 0x0 to target XBAR port 2 (AMB2), attribute 0xb, size 4GB |
| 1364 | * AMB2 address decoder remaps 0x0 to DDR 64 bit base address |
| 1365 | */ |
| 1366 | mvreg_write(pp, MVNETA_WIN_BASE(0), |
| 1367 | (MVNETA_AC5_CNM_DDR_ATTR << 8) | MVNETA_AC5_CNM_DDR_TARGET); |
| 1368 | mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); |
| 1369 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, 0x3e); |
| 1370 | } |
| 1371 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1372 | /* Power up the port */ |
| 1373 | static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) |
| 1374 | { |
| 1375 | u32 ctrl; |
| 1376 | |
| 1377 | /* MAC Cause register should be cleared */ |
| 1378 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); |
| 1379 | |
| 1380 | ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); |
| 1381 | |
| 1382 | /* Even though it might look weird, when we're configured in |
| 1383 | * SGMII or QSGMII mode, the RGMII bit needs to be set. |
| 1384 | */ |
| 1385 | switch (phy_mode) { |
| 1386 | case PHY_INTERFACE_MODE_QSGMII: |
| 1387 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); |
| 1388 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; |
| 1389 | break; |
| 1390 | case PHY_INTERFACE_MODE_SGMII: |
| 1391 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); |
| 1392 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; |
| 1393 | break; |
| 1394 | case PHY_INTERFACE_MODE_RGMII: |
| 1395 | case PHY_INTERFACE_MODE_RGMII_ID: |
| 1396 | ctrl |= MVNETA_GMAC2_PORT_RGMII; |
| 1397 | break; |
| 1398 | default: |
| 1399 | return -EINVAL; |
| 1400 | } |
| 1401 | |
| 1402 | /* Cancel Port Reset */ |
| 1403 | ctrl &= ~MVNETA_GMAC2_PORT_RESET; |
| 1404 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); |
| 1405 | |
| 1406 | while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & |
| 1407 | MVNETA_GMAC2_PORT_RESET) != 0) |
| 1408 | continue; |
| 1409 | |
| 1410 | return 0; |
| 1411 | } |
| 1412 | |
| 1413 | /* Device initialization routine */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1414 | static int mvneta_init(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1415 | { |
Simon Glass | fa20e93 | 2020-12-03 16:55:20 -0700 | [diff] [blame] | 1416 | struct eth_pdata *pdata = dev_get_plat(dev); |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1417 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1418 | int err; |
| 1419 | |
| 1420 | pp->tx_ring_size = MVNETA_MAX_TXD; |
| 1421 | pp->rx_ring_size = MVNETA_MAX_RXD; |
| 1422 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1423 | err = mvneta_init2(pp); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1424 | if (err < 0) { |
Sean Anderson | e0d0004 | 2020-09-15 10:44:54 -0400 | [diff] [blame] | 1425 | dev_err(dev, "can't init eth hal\n"); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1426 | return err; |
| 1427 | } |
| 1428 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1429 | mvneta_mac_addr_set(pp, pdata->enetaddr, rxq_def); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1430 | |
| 1431 | err = mvneta_port_power_up(pp, pp->phy_interface); |
| 1432 | if (err < 0) { |
Sean Anderson | e0d0004 | 2020-09-15 10:44:54 -0400 | [diff] [blame] | 1433 | dev_err(dev, "can't power up port\n"); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1434 | return err; |
| 1435 | } |
| 1436 | |
| 1437 | /* Call open() now as it needs to be done before runing send() */ |
| 1438 | mvneta_open(dev); |
| 1439 | |
| 1440 | return 0; |
| 1441 | } |
| 1442 | |
| 1443 | /* U-Boot only functions follow here */ |
| 1444 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1445 | static int mvneta_start(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1446 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1447 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1448 | struct phy_device *phydev; |
| 1449 | |
| 1450 | mvneta_port_power_up(pp, pp->phy_interface); |
| 1451 | |
| 1452 | if (!pp->init || pp->link == 0) { |
Marek Behún | 7eba470eb | 2022-04-27 12:41:58 +0200 | [diff] [blame] | 1453 | phydev = dm_eth_phy_connect(dev); |
| 1454 | if (!phydev) { |
| 1455 | printf("dm_eth_phy_connect failed\n"); |
| 1456 | return -ENODEV; |
| 1457 | } |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1458 | |
Marek Behún | 7eba470eb | 2022-04-27 12:41:58 +0200 | [diff] [blame] | 1459 | pp->phydev = phydev; |
| 1460 | phy_config(phydev); |
| 1461 | phy_startup(phydev); |
| 1462 | if (!phydev->link) { |
| 1463 | printf("%s: No link.\n", phydev->dev->name); |
| 1464 | return -1; |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1465 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1466 | |
Marek Behún | 7eba470eb | 2022-04-27 12:41:58 +0200 | [diff] [blame] | 1467 | /* Full init on first call */ |
| 1468 | mvneta_init(dev); |
| 1469 | pp->init = 1; |
| 1470 | } else { |
| 1471 | /* Upon all following calls, this is enough */ |
| 1472 | mvneta_port_up(pp); |
| 1473 | mvneta_port_enable(pp); |
| 1474 | } |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1475 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1476 | return 0; |
| 1477 | } |
| 1478 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1479 | static int mvneta_send(struct udevice *dev, void *packet, int length) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1480 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1481 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1482 | struct mvneta_tx_queue *txq = &pp->txqs[0]; |
| 1483 | struct mvneta_tx_desc *tx_desc; |
| 1484 | int sent_desc; |
| 1485 | u32 timeout = 0; |
| 1486 | |
| 1487 | /* Get a descriptor for the first part of the packet */ |
| 1488 | tx_desc = mvneta_txq_next_desc_get(txq); |
| 1489 | |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1490 | tx_desc->buf_phys_addr = (u32)(uintptr_t)packet; |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1491 | tx_desc->data_size = length; |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1492 | flush_dcache_range((ulong)packet, |
| 1493 | (ulong)packet + ALIGN(length, PKTALIGN)); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1494 | |
| 1495 | /* First and Last descriptor */ |
| 1496 | tx_desc->command = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC; |
| 1497 | mvneta_txq_pend_desc_add(pp, txq, 1); |
| 1498 | |
| 1499 | /* Wait for packet to be sent (queue might help with speed here) */ |
| 1500 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); |
| 1501 | while (!sent_desc) { |
| 1502 | if (timeout++ > 10000) { |
| 1503 | printf("timeout: packet not sent\n"); |
| 1504 | return -1; |
| 1505 | } |
| 1506 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); |
| 1507 | } |
| 1508 | |
| 1509 | /* txDone has increased - hw sent packet */ |
| 1510 | mvneta_txq_sent_desc_dec(pp, txq, sent_desc); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1511 | |
| 1512 | return 0; |
| 1513 | } |
| 1514 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1515 | static int mvneta_recv(struct udevice *dev, int flags, uchar **packetp) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1516 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1517 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1518 | int rx_done; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1519 | struct mvneta_rx_queue *rxq; |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1520 | int rx_bytes = 0; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1521 | |
| 1522 | /* get rx queue */ |
| 1523 | rxq = mvneta_rxq_handle_get(pp, rxq_def); |
| 1524 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1525 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1526 | if (rx_done) { |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1527 | struct mvneta_rx_desc *rx_desc; |
| 1528 | unsigned char *data; |
| 1529 | u32 rx_status; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1530 | |
| 1531 | /* |
| 1532 | * No cache invalidation needed here, since the desc's are |
| 1533 | * located in a uncached memory region |
| 1534 | */ |
| 1535 | rx_desc = mvneta_rxq_next_desc_get(rxq); |
| 1536 | |
| 1537 | rx_status = rx_desc->status; |
| 1538 | if (!mvneta_rxq_desc_is_first_last(rx_status) || |
| 1539 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { |
| 1540 | mvneta_rx_error(pp, rx_desc); |
| 1541 | /* leave the descriptor untouched */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1542 | return -EIO; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1543 | } |
| 1544 | |
| 1545 | /* 2 bytes for marvell header. 4 bytes for crc */ |
| 1546 | rx_bytes = rx_desc->data_size - 6; |
| 1547 | |
| 1548 | /* give packet to stack - skip on first 2 bytes */ |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1549 | data = (u8 *)(uintptr_t)rx_desc->buf_cookie + 2; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1550 | /* |
| 1551 | * No cache invalidation needed here, since the rx_buffer's are |
| 1552 | * located in a uncached memory region |
| 1553 | */ |
Chris Packham | 44b7cc7 | 2022-11-05 17:23:56 +1300 | [diff] [blame] | 1554 | *packetp = data + pp->dma_base; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1555 | |
Jason Brown | c7bc183 | 2017-11-28 11:12:43 -0800 | [diff] [blame] | 1556 | /* |
| 1557 | * Only mark one descriptor as free |
| 1558 | * since only one was processed |
| 1559 | */ |
| 1560 | mvneta_rxq_desc_num_update(pp, rxq, 1, 1); |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1561 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1562 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1563 | return rx_bytes; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1564 | } |
| 1565 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1566 | static int mvneta_probe(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1567 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1568 | struct mvneta_port *pp = dev_get_priv(dev); |
Robert Marko | 58c9873 | 2022-03-24 10:57:37 +0100 | [diff] [blame] | 1569 | #if CONFIG_IS_ENABLED(DM_GPIO) |
| 1570 | struct ofnode_phandle_args sfp_args; |
| 1571 | #endif |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1572 | void *bd_space; |
Chris Packham | 44b7cc7 | 2022-11-05 17:23:56 +1300 | [diff] [blame] | 1573 | phys_addr_t cpu; |
| 1574 | dma_addr_t bus; |
| 1575 | u64 size; |
| 1576 | int ret; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1577 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1578 | /* |
| 1579 | * Allocate buffer area for descs and rx_buffers. This is only |
| 1580 | * done once for all interfaces. As only one interface can |
Chris Packham | 0f81d7a | 2016-08-29 20:54:02 +1200 | [diff] [blame] | 1581 | * be active. Make this area DMA safe by disabling the D-cache |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1582 | */ |
| 1583 | if (!buffer_loc.tx_descs) { |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1584 | u32 size; |
| 1585 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1586 | /* Align buffer area for descs and rx_buffers to 1MiB */ |
| 1587 | bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); |
Rabeeh Khoury | 31ad3ce | 2018-06-19 21:36:50 +0300 | [diff] [blame] | 1588 | flush_dcache_range((ulong)bd_space, (ulong)bd_space + BD_SPACE); |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1589 | mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE, |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1590 | DCACHE_OFF); |
| 1591 | buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space; |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1592 | size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc), |
| 1593 | ARCH_DMA_MINALIGN); |
Rabeeh Khoury | f046bed | 2018-06-19 21:36:51 +0300 | [diff] [blame] | 1594 | memset(buffer_loc.tx_descs, 0, size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1595 | buffer_loc.rx_descs = (struct mvneta_rx_desc *) |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1596 | ((phys_addr_t)bd_space + size); |
| 1597 | size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc), |
| 1598 | ARCH_DMA_MINALIGN); |
| 1599 | buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1600 | } |
| 1601 | |
Marek Behún | 645c744 | 2022-04-27 12:41:44 +0200 | [diff] [blame] | 1602 | pp->base = dev_read_addr_ptr(dev); |
| 1603 | pp->phy_interface = dev_read_phy_mode(dev); |
| 1604 | if (pp->phy_interface == PHY_INTERFACE_MODE_NA) |
| 1605 | return -EINVAL; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1606 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1607 | /* Configure MBUS address windows */ |
Simon Glass | 54cbcc8 | 2017-05-18 20:08:57 -0600 | [diff] [blame] | 1608 | if (device_is_compatible(dev, "marvell,armada-3700-neta")) |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 1609 | mvneta_bypass_mbus_windows(pp); |
Chris Packham | 44b7cc7 | 2022-11-05 17:23:56 +1300 | [diff] [blame] | 1610 | else if (device_is_compatible(dev, "marvell,armada-ac5-neta")) |
| 1611 | mvneta_conf_ac5_cnm_xbar_windows(pp); |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 1612 | else |
| 1613 | mvneta_conf_mbus_windows(pp); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1614 | |
Chris Packham | 44b7cc7 | 2022-11-05 17:23:56 +1300 | [diff] [blame] | 1615 | /* fetch dma ranges property */ |
| 1616 | ret = dev_get_dma_range(dev, &cpu, &bus, &size); |
| 1617 | if (!ret) |
| 1618 | pp->dma_base = cpu; |
| 1619 | else |
| 1620 | pp->dma_base = 0; |
| 1621 | |
Simon Glass | fa4689a | 2019-12-06 21:41:35 -0700 | [diff] [blame] | 1622 | #if CONFIG_IS_ENABLED(DM_GPIO) |
Marek Behún | 1cbd104 | 2022-04-27 12:41:52 +0200 | [diff] [blame] | 1623 | if (!dev_read_phandle_with_args(dev, "sfp", NULL, 0, 0, &sfp_args) && |
| 1624 | ofnode_is_enabled(sfp_args.node)) |
Robert Marko | 58c9873 | 2022-03-24 10:57:37 +0100 | [diff] [blame] | 1625 | gpio_request_by_name_nodev(sfp_args.node, "tx-disable-gpio", 0, |
| 1626 | &pp->sfp_tx_disable_gpio, GPIOD_IS_OUT); |
| 1627 | |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 1628 | gpio_request_by_name(dev, "phy-reset-gpios", 0, |
| 1629 | &pp->phy_reset_gpio, GPIOD_IS_OUT); |
| 1630 | |
| 1631 | if (dm_gpio_is_valid(&pp->phy_reset_gpio)) { |
| 1632 | dm_gpio_set_value(&pp->phy_reset_gpio, 1); |
| 1633 | mdelay(10); |
| 1634 | dm_gpio_set_value(&pp->phy_reset_gpio, 0); |
| 1635 | } |
Robert Marko | 58c9873 | 2022-03-24 10:57:37 +0100 | [diff] [blame] | 1636 | |
| 1637 | if (dm_gpio_is_valid(&pp->sfp_tx_disable_gpio)) |
| 1638 | dm_gpio_set_value(&pp->sfp_tx_disable_gpio, 0); |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 1639 | #endif |
| 1640 | |
Marek Behún | b407baa | 2022-04-27 12:41:51 +0200 | [diff] [blame] | 1641 | return 0; |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1642 | } |
| 1643 | |
| 1644 | static void mvneta_stop(struct udevice *dev) |
| 1645 | { |
| 1646 | struct mvneta_port *pp = dev_get_priv(dev); |
| 1647 | |
| 1648 | mvneta_port_down(pp); |
| 1649 | mvneta_port_disable(pp); |
| 1650 | } |
| 1651 | |
| 1652 | static const struct eth_ops mvneta_ops = { |
| 1653 | .start = mvneta_start, |
| 1654 | .send = mvneta_send, |
| 1655 | .recv = mvneta_recv, |
| 1656 | .stop = mvneta_stop, |
Matt Pelland | 668a5f2 | 2018-03-27 13:18:25 -0400 | [diff] [blame] | 1657 | .write_hwaddr = mvneta_write_hwaddr, |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1658 | }; |
| 1659 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1660 | static const struct udevice_id mvneta_ids[] = { |
| 1661 | { .compatible = "marvell,armada-370-neta" }, |
Chris Packham | 44b7cc7 | 2022-11-05 17:23:56 +1300 | [diff] [blame] | 1662 | { .compatible = "marvell,armada-ac5-neta" }, |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1663 | { .compatible = "marvell,armada-xp-neta" }, |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 1664 | { .compatible = "marvell,armada-3700-neta" }, |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1665 | { } |
| 1666 | }; |
| 1667 | |
| 1668 | U_BOOT_DRIVER(mvneta) = { |
| 1669 | .name = "mvneta", |
| 1670 | .id = UCLASS_ETH, |
| 1671 | .of_match = mvneta_ids, |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1672 | .probe = mvneta_probe, |
| 1673 | .ops = &mvneta_ops, |
Simon Glass | 8a2b47f | 2020-12-03 16:55:17 -0700 | [diff] [blame] | 1674 | .priv_auto = sizeof(struct mvneta_port), |
Simon Glass | 71fa5b4 | 2020-12-03 16:55:18 -0700 | [diff] [blame] | 1675 | .plat_auto = sizeof(struct eth_pdata), |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1676 | }; |