Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 2 | /* |
| 3 | * Driver for Marvell NETA network card for Armada XP and Armada 370 SoCs. |
| 4 | * |
| 5 | * U-Boot version: |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 6 | * Copyright (C) 2014-2015 Stefan Roese <sr@denx.de> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 7 | * |
| 8 | * Based on the Linux version which is: |
| 9 | * Copyright (C) 2012 Marvell |
| 10 | * |
| 11 | * Rami Rosen <rosenr@marvell.com> |
| 12 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 13 | */ |
| 14 | |
| 15 | #include <common.h> |
Simon Glass | 6333448 | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 16 | #include <cpu_func.h> |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 17 | #include <dm.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 18 | #include <log.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 19 | #include <net.h> |
| 20 | #include <netdev.h> |
| 21 | #include <config.h> |
| 22 | #include <malloc.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 23 | #include <asm/cache.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 24 | #include <asm/io.h> |
Simon Glass | 9bc1564 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 25 | #include <dm/device_compat.h> |
Simon Glass | d66c5f7 | 2020-02-03 07:36:15 -0700 | [diff] [blame] | 26 | #include <dm/devres.h> |
Simon Glass | 4dcacfc | 2020-05-10 11:40:13 -0600 | [diff] [blame] | 27 | #include <linux/bitops.h> |
Simon Glass | c06c1be | 2020-05-10 11:40:08 -0600 | [diff] [blame] | 28 | #include <linux/bug.h> |
Simon Glass | dbd7954 | 2020-05-10 11:40:11 -0600 | [diff] [blame] | 29 | #include <linux/delay.h> |
Masahiro Yamada | 56a931c | 2016-09-21 11:28:55 +0900 | [diff] [blame] | 30 | #include <linux/errno.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 31 | #include <phy.h> |
| 32 | #include <miiphy.h> |
| 33 | #include <watchdog.h> |
| 34 | #include <asm/arch/cpu.h> |
| 35 | #include <asm/arch/soc.h> |
| 36 | #include <linux/compat.h> |
| 37 | #include <linux/mbus.h> |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 38 | #include <asm-generic/gpio.h> |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 39 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 40 | DECLARE_GLOBAL_DATA_PTR; |
| 41 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 42 | #if !defined(CONFIG_PHYLIB) |
| 43 | # error Marvell mvneta requires PHYLIB |
| 44 | #endif |
| 45 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 46 | #define CONFIG_NR_CPUS 1 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 47 | #define ETH_HLEN 14 /* Total octets in header */ |
| 48 | |
| 49 | /* 2(HW hdr) 14(MAC hdr) 4(CRC) 32(extra for cache prefetch) */ |
| 50 | #define WRAP (2 + ETH_HLEN + 4 + 32) |
| 51 | #define MTU 1500 |
| 52 | #define RX_BUFFER_SIZE (ALIGN(MTU + WRAP, ARCH_DMA_MINALIGN)) |
| 53 | |
| 54 | #define MVNETA_SMI_TIMEOUT 10000 |
| 55 | |
| 56 | /* Registers */ |
| 57 | #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) |
| 58 | #define MVNETA_RXQ_HW_BUF_ALLOC BIT(1) |
| 59 | #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) |
| 60 | #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) |
| 61 | #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) |
| 62 | #define MVNETA_RXQ_NON_OCCUPIED(v) ((v) << 16) |
| 63 | #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) |
| 64 | #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) |
| 65 | #define MVNETA_RXQ_BUF_SIZE_SHIFT 19 |
| 66 | #define MVNETA_RXQ_BUF_SIZE_MASK (0x1fff << 19) |
| 67 | #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) |
| 68 | #define MVNETA_RXQ_OCCUPIED_ALL_MASK 0x3fff |
| 69 | #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) |
| 70 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 |
| 71 | #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 |
| 72 | #define MVNETA_PORT_RX_RESET 0x1cc0 |
| 73 | #define MVNETA_PORT_RX_DMA_RESET BIT(0) |
| 74 | #define MVNETA_PHY_ADDR 0x2000 |
| 75 | #define MVNETA_PHY_ADDR_MASK 0x1f |
| 76 | #define MVNETA_SMI 0x2004 |
| 77 | #define MVNETA_PHY_REG_MASK 0x1f |
| 78 | /* SMI register fields */ |
| 79 | #define MVNETA_SMI_DATA_OFFS 0 /* Data */ |
| 80 | #define MVNETA_SMI_DATA_MASK (0xffff << MVNETA_SMI_DATA_OFFS) |
| 81 | #define MVNETA_SMI_DEV_ADDR_OFFS 16 /* PHY device address */ |
| 82 | #define MVNETA_SMI_REG_ADDR_OFFS 21 /* PHY device reg addr*/ |
| 83 | #define MVNETA_SMI_OPCODE_OFFS 26 /* Write/Read opcode */ |
| 84 | #define MVNETA_SMI_OPCODE_READ (1 << MVNETA_SMI_OPCODE_OFFS) |
| 85 | #define MVNETA_SMI_READ_VALID (1 << 27) /* Read Valid */ |
| 86 | #define MVNETA_SMI_BUSY (1 << 28) /* Busy */ |
| 87 | #define MVNETA_MBUS_RETRY 0x2010 |
| 88 | #define MVNETA_UNIT_INTR_CAUSE 0x2080 |
| 89 | #define MVNETA_UNIT_CONTROL 0x20B0 |
| 90 | #define MVNETA_PHY_POLLING_ENABLE BIT(1) |
| 91 | #define MVNETA_WIN_BASE(w) (0x2200 + ((w) << 3)) |
| 92 | #define MVNETA_WIN_SIZE(w) (0x2204 + ((w) << 3)) |
| 93 | #define MVNETA_WIN_REMAP(w) (0x2280 + ((w) << 2)) |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 94 | #define MVNETA_WIN_SIZE_MASK (0xffff0000) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 95 | #define MVNETA_BASE_ADDR_ENABLE 0x2290 |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 96 | #define MVNETA_BASE_ADDR_ENABLE_BIT 0x1 |
| 97 | #define MVNETA_PORT_ACCESS_PROTECT 0x2294 |
| 98 | #define MVNETA_PORT_ACCESS_PROTECT_WIN0_RW 0x3 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 99 | #define MVNETA_PORT_CONFIG 0x2400 |
| 100 | #define MVNETA_UNI_PROMISC_MODE BIT(0) |
| 101 | #define MVNETA_DEF_RXQ(q) ((q) << 1) |
| 102 | #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) |
| 103 | #define MVNETA_TX_UNSET_ERR_SUM BIT(12) |
| 104 | #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) |
| 105 | #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) |
| 106 | #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) |
| 107 | #define MVNETA_RX_CSUM_WITH_PSEUDO_HDR BIT(25) |
| 108 | #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ |
| 109 | MVNETA_DEF_RXQ_ARP(q) | \ |
| 110 | MVNETA_DEF_RXQ_TCP(q) | \ |
| 111 | MVNETA_DEF_RXQ_UDP(q) | \ |
| 112 | MVNETA_DEF_RXQ_BPDU(q) | \ |
| 113 | MVNETA_TX_UNSET_ERR_SUM | \ |
| 114 | MVNETA_RX_CSUM_WITH_PSEUDO_HDR) |
| 115 | #define MVNETA_PORT_CONFIG_EXTEND 0x2404 |
| 116 | #define MVNETA_MAC_ADDR_LOW 0x2414 |
| 117 | #define MVNETA_MAC_ADDR_HIGH 0x2418 |
| 118 | #define MVNETA_SDMA_CONFIG 0x241c |
| 119 | #define MVNETA_SDMA_BRST_SIZE_16 4 |
| 120 | #define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1) |
| 121 | #define MVNETA_RX_NO_DATA_SWAP BIT(4) |
| 122 | #define MVNETA_TX_NO_DATA_SWAP BIT(5) |
| 123 | #define MVNETA_DESC_SWAP BIT(6) |
| 124 | #define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22) |
| 125 | #define MVNETA_PORT_STATUS 0x2444 |
| 126 | #define MVNETA_TX_IN_PRGRS BIT(1) |
| 127 | #define MVNETA_TX_FIFO_EMPTY BIT(8) |
| 128 | #define MVNETA_RX_MIN_FRAME_SIZE 0x247c |
| 129 | #define MVNETA_SERDES_CFG 0x24A0 |
| 130 | #define MVNETA_SGMII_SERDES_PROTO 0x0cc7 |
| 131 | #define MVNETA_QSGMII_SERDES_PROTO 0x0667 |
| 132 | #define MVNETA_TYPE_PRIO 0x24bc |
| 133 | #define MVNETA_FORCE_UNI BIT(21) |
| 134 | #define MVNETA_TXQ_CMD_1 0x24e4 |
| 135 | #define MVNETA_TXQ_CMD 0x2448 |
| 136 | #define MVNETA_TXQ_DISABLE_SHIFT 8 |
| 137 | #define MVNETA_TXQ_ENABLE_MASK 0x000000ff |
| 138 | #define MVNETA_ACC_MODE 0x2500 |
| 139 | #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) |
| 140 | #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff |
| 141 | #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 |
| 142 | #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) |
| 143 | |
| 144 | /* Exception Interrupt Port/Queue Cause register */ |
| 145 | |
| 146 | #define MVNETA_INTR_NEW_CAUSE 0x25a0 |
| 147 | #define MVNETA_INTR_NEW_MASK 0x25a4 |
| 148 | |
| 149 | /* bits 0..7 = TXQ SENT, one bit per queue. |
| 150 | * bits 8..15 = RXQ OCCUP, one bit per queue. |
| 151 | * bits 16..23 = RXQ FREE, one bit per queue. |
| 152 | * bit 29 = OLD_REG_SUM, see old reg ? |
| 153 | * bit 30 = TX_ERR_SUM, one bit for 4 ports |
| 154 | * bit 31 = MISC_SUM, one bit for 4 ports |
| 155 | */ |
| 156 | #define MVNETA_TX_INTR_MASK(nr_txqs) (((1 << nr_txqs) - 1) << 0) |
| 157 | #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) |
| 158 | #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) |
| 159 | #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) |
| 160 | |
| 161 | #define MVNETA_INTR_OLD_CAUSE 0x25a8 |
| 162 | #define MVNETA_INTR_OLD_MASK 0x25ac |
| 163 | |
| 164 | /* Data Path Port/Queue Cause Register */ |
| 165 | #define MVNETA_INTR_MISC_CAUSE 0x25b0 |
| 166 | #define MVNETA_INTR_MISC_MASK 0x25b4 |
| 167 | #define MVNETA_INTR_ENABLE 0x25b8 |
| 168 | |
| 169 | #define MVNETA_RXQ_CMD 0x2680 |
| 170 | #define MVNETA_RXQ_DISABLE_SHIFT 8 |
| 171 | #define MVNETA_RXQ_ENABLE_MASK 0x000000ff |
| 172 | #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) |
| 173 | #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) |
| 174 | #define MVNETA_GMAC_CTRL_0 0x2c00 |
| 175 | #define MVNETA_GMAC_MAX_RX_SIZE_SHIFT 2 |
| 176 | #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc |
| 177 | #define MVNETA_GMAC0_PORT_ENABLE BIT(0) |
| 178 | #define MVNETA_GMAC_CTRL_2 0x2c08 |
| 179 | #define MVNETA_GMAC2_PCS_ENABLE BIT(3) |
| 180 | #define MVNETA_GMAC2_PORT_RGMII BIT(4) |
| 181 | #define MVNETA_GMAC2_PORT_RESET BIT(6) |
| 182 | #define MVNETA_GMAC_STATUS 0x2c10 |
| 183 | #define MVNETA_GMAC_LINK_UP BIT(0) |
| 184 | #define MVNETA_GMAC_SPEED_1000 BIT(1) |
| 185 | #define MVNETA_GMAC_SPEED_100 BIT(2) |
| 186 | #define MVNETA_GMAC_FULL_DUPLEX BIT(3) |
| 187 | #define MVNETA_GMAC_RX_FLOW_CTRL_ENABLE BIT(4) |
| 188 | #define MVNETA_GMAC_TX_FLOW_CTRL_ENABLE BIT(5) |
| 189 | #define MVNETA_GMAC_RX_FLOW_CTRL_ACTIVE BIT(6) |
| 190 | #define MVNETA_GMAC_TX_FLOW_CTRL_ACTIVE BIT(7) |
| 191 | #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c |
| 192 | #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) |
| 193 | #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 194 | #define MVNETA_GMAC_FORCE_LINK_UP (BIT(0) | BIT(1)) |
| 195 | #define MVNETA_GMAC_IB_BYPASS_AN_EN BIT(3) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 196 | #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) |
| 197 | #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) |
| 198 | #define MVNETA_GMAC_AN_SPEED_EN BIT(7) |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 199 | #define MVNETA_GMAC_SET_FC_EN BIT(8) |
| 200 | #define MVNETA_GMAC_ADVERT_FC_EN BIT(9) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 201 | #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) |
| 202 | #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 203 | #define MVNETA_GMAC_SAMPLE_TX_CFG_EN BIT(15) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 204 | #define MVNETA_MIB_COUNTERS_BASE 0x3080 |
| 205 | #define MVNETA_MIB_LATE_COLLISION 0x7c |
| 206 | #define MVNETA_DA_FILT_SPEC_MCAST 0x3400 |
| 207 | #define MVNETA_DA_FILT_OTH_MCAST 0x3500 |
| 208 | #define MVNETA_DA_FILT_UCAST_BASE 0x3600 |
| 209 | #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) |
| 210 | #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) |
| 211 | #define MVNETA_TXQ_SENT_THRESH_ALL_MASK 0x3fff0000 |
| 212 | #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16) |
| 213 | #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) |
| 214 | #define MVNETA_TXQ_DEC_SENT_SHIFT 16 |
| 215 | #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) |
| 216 | #define MVNETA_TXQ_SENT_DESC_SHIFT 16 |
| 217 | #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 |
| 218 | #define MVNETA_PORT_TX_RESET 0x3cf0 |
| 219 | #define MVNETA_PORT_TX_DMA_RESET BIT(0) |
| 220 | #define MVNETA_TX_MTU 0x3e0c |
| 221 | #define MVNETA_TX_TOKEN_SIZE 0x3e14 |
| 222 | #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff |
| 223 | #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) |
| 224 | #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff |
| 225 | |
| 226 | /* Descriptor ring Macros */ |
| 227 | #define MVNETA_QUEUE_NEXT_DESC(q, index) \ |
| 228 | (((index) < (q)->last_desc) ? ((index) + 1) : 0) |
| 229 | |
| 230 | /* Various constants */ |
| 231 | |
| 232 | /* Coalescing */ |
| 233 | #define MVNETA_TXDONE_COAL_PKTS 16 |
| 234 | #define MVNETA_RX_COAL_PKTS 32 |
| 235 | #define MVNETA_RX_COAL_USEC 100 |
| 236 | |
| 237 | /* The two bytes Marvell header. Either contains a special value used |
| 238 | * by Marvell switches when a specific hardware mode is enabled (not |
| 239 | * supported by this driver) or is filled automatically by zeroes on |
| 240 | * the RX side. Those two bytes being at the front of the Ethernet |
| 241 | * header, they allow to have the IP header aligned on a 4 bytes |
| 242 | * boundary automatically: the hardware skips those two bytes on its |
| 243 | * own. |
| 244 | */ |
| 245 | #define MVNETA_MH_SIZE 2 |
| 246 | |
| 247 | #define MVNETA_VLAN_TAG_LEN 4 |
| 248 | |
| 249 | #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 |
| 250 | #define MVNETA_TX_CSUM_MAX_SIZE 9800 |
| 251 | #define MVNETA_ACC_MODE_EXT 1 |
| 252 | |
| 253 | /* Timeout constants */ |
| 254 | #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 |
| 255 | #define MVNETA_RX_DISABLE_TIMEOUT_MSEC 1000 |
| 256 | #define MVNETA_TX_FIFO_EMPTY_TIMEOUT 10000 |
| 257 | |
| 258 | #define MVNETA_TX_MTU_MAX 0x3ffff |
| 259 | |
| 260 | /* Max number of Rx descriptors */ |
| 261 | #define MVNETA_MAX_RXD 16 |
| 262 | |
| 263 | /* Max number of Tx descriptors */ |
| 264 | #define MVNETA_MAX_TXD 16 |
| 265 | |
| 266 | /* descriptor aligned size */ |
| 267 | #define MVNETA_DESC_ALIGNED_SIZE 32 |
| 268 | |
| 269 | struct mvneta_port { |
| 270 | void __iomem *base; |
| 271 | struct mvneta_rx_queue *rxqs; |
| 272 | struct mvneta_tx_queue *txqs; |
| 273 | |
| 274 | u8 mcast_count[256]; |
| 275 | u16 tx_ring_size; |
| 276 | u16 rx_ring_size; |
| 277 | |
| 278 | phy_interface_t phy_interface; |
| 279 | unsigned int link; |
| 280 | unsigned int duplex; |
| 281 | unsigned int speed; |
| 282 | |
| 283 | int init; |
| 284 | int phyaddr; |
| 285 | struct phy_device *phydev; |
Simon Glass | fa4689a | 2019-12-06 21:41:35 -0700 | [diff] [blame] | 286 | #if CONFIG_IS_ENABLED(DM_GPIO) |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 287 | struct gpio_desc phy_reset_gpio; |
| 288 | #endif |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 289 | struct mii_dev *bus; |
| 290 | }; |
| 291 | |
| 292 | /* The mvneta_tx_desc and mvneta_rx_desc structures describe the |
| 293 | * layout of the transmit and reception DMA descriptors, and their |
| 294 | * layout is therefore defined by the hardware design |
| 295 | */ |
| 296 | |
| 297 | #define MVNETA_TX_L3_OFF_SHIFT 0 |
| 298 | #define MVNETA_TX_IP_HLEN_SHIFT 8 |
| 299 | #define MVNETA_TX_L4_UDP BIT(16) |
| 300 | #define MVNETA_TX_L3_IP6 BIT(17) |
| 301 | #define MVNETA_TXD_IP_CSUM BIT(18) |
| 302 | #define MVNETA_TXD_Z_PAD BIT(19) |
| 303 | #define MVNETA_TXD_L_DESC BIT(20) |
| 304 | #define MVNETA_TXD_F_DESC BIT(21) |
| 305 | #define MVNETA_TXD_FLZ_DESC (MVNETA_TXD_Z_PAD | \ |
| 306 | MVNETA_TXD_L_DESC | \ |
| 307 | MVNETA_TXD_F_DESC) |
| 308 | #define MVNETA_TX_L4_CSUM_FULL BIT(30) |
| 309 | #define MVNETA_TX_L4_CSUM_NOT BIT(31) |
| 310 | |
| 311 | #define MVNETA_RXD_ERR_CRC 0x0 |
| 312 | #define MVNETA_RXD_ERR_SUMMARY BIT(16) |
| 313 | #define MVNETA_RXD_ERR_OVERRUN BIT(17) |
| 314 | #define MVNETA_RXD_ERR_LEN BIT(18) |
| 315 | #define MVNETA_RXD_ERR_RESOURCE (BIT(17) | BIT(18)) |
| 316 | #define MVNETA_RXD_ERR_CODE_MASK (BIT(17) | BIT(18)) |
| 317 | #define MVNETA_RXD_L3_IP4 BIT(25) |
| 318 | #define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27)) |
| 319 | #define MVNETA_RXD_L4_CSUM_OK BIT(30) |
| 320 | |
| 321 | struct mvneta_tx_desc { |
| 322 | u32 command; /* Options used by HW for packet transmitting.*/ |
| 323 | u16 reserverd1; /* csum_l4 (for future use) */ |
| 324 | u16 data_size; /* Data size of transmitted packet in bytes */ |
| 325 | u32 buf_phys_addr; /* Physical addr of transmitted buffer */ |
| 326 | u32 reserved2; /* hw_cmd - (for future use, PMT) */ |
| 327 | u32 reserved3[4]; /* Reserved - (for future use) */ |
| 328 | }; |
| 329 | |
| 330 | struct mvneta_rx_desc { |
| 331 | u32 status; /* Info about received packet */ |
| 332 | u16 reserved1; /* pnc_info - (for future use, PnC) */ |
| 333 | u16 data_size; /* Size of received packet in bytes */ |
| 334 | |
| 335 | u32 buf_phys_addr; /* Physical address of the buffer */ |
| 336 | u32 reserved2; /* pnc_flow_id (for future use, PnC) */ |
| 337 | |
| 338 | u32 buf_cookie; /* cookie for access to RX buffer in rx path */ |
| 339 | u16 reserved3; /* prefetch_cmd, for future use */ |
| 340 | u16 reserved4; /* csum_l4 - (for future use, PnC) */ |
| 341 | |
| 342 | u32 reserved5; /* pnc_extra PnC (for future use, PnC) */ |
| 343 | u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */ |
| 344 | }; |
| 345 | |
| 346 | struct mvneta_tx_queue { |
| 347 | /* Number of this TX queue, in the range 0-7 */ |
| 348 | u8 id; |
| 349 | |
| 350 | /* Number of TX DMA descriptors in the descriptor ring */ |
| 351 | int size; |
| 352 | |
| 353 | /* Index of last TX DMA descriptor that was inserted */ |
| 354 | int txq_put_index; |
| 355 | |
| 356 | /* Index of the TX DMA descriptor to be cleaned up */ |
| 357 | int txq_get_index; |
| 358 | |
| 359 | /* Virtual address of the TX DMA descriptors array */ |
| 360 | struct mvneta_tx_desc *descs; |
| 361 | |
| 362 | /* DMA address of the TX DMA descriptors array */ |
| 363 | dma_addr_t descs_phys; |
| 364 | |
| 365 | /* Index of the last TX DMA descriptor */ |
| 366 | int last_desc; |
| 367 | |
| 368 | /* Index of the next TX DMA descriptor to process */ |
| 369 | int next_desc_to_proc; |
| 370 | }; |
| 371 | |
| 372 | struct mvneta_rx_queue { |
| 373 | /* rx queue number, in the range 0-7 */ |
| 374 | u8 id; |
| 375 | |
| 376 | /* num of rx descriptors in the rx descriptor ring */ |
| 377 | int size; |
| 378 | |
| 379 | /* Virtual address of the RX DMA descriptors array */ |
| 380 | struct mvneta_rx_desc *descs; |
| 381 | |
| 382 | /* DMA address of the RX DMA descriptors array */ |
| 383 | dma_addr_t descs_phys; |
| 384 | |
| 385 | /* Index of the last RX DMA descriptor */ |
| 386 | int last_desc; |
| 387 | |
| 388 | /* Index of the next RX DMA descriptor to process */ |
| 389 | int next_desc_to_proc; |
| 390 | }; |
| 391 | |
| 392 | /* U-Boot doesn't use the queues, so set the number to 1 */ |
| 393 | static int rxq_number = 1; |
| 394 | static int txq_number = 1; |
| 395 | static int rxq_def; |
| 396 | |
| 397 | struct buffer_location { |
| 398 | struct mvneta_tx_desc *tx_descs; |
| 399 | struct mvneta_rx_desc *rx_descs; |
| 400 | u32 rx_buffers; |
| 401 | }; |
| 402 | |
| 403 | /* |
| 404 | * All 4 interfaces use the same global buffer, since only one interface |
| 405 | * can be enabled at once |
| 406 | */ |
| 407 | static struct buffer_location buffer_loc; |
| 408 | |
| 409 | /* |
| 410 | * Page table entries are set to 1MB, or multiples of 1MB |
| 411 | * (not < 1MB). driver uses less bd's so use 1MB bdspace. |
| 412 | */ |
| 413 | #define BD_SPACE (1 << 20) |
| 414 | |
Konstantin Porotchkin | fb8e202 | 2017-02-16 13:52:27 +0200 | [diff] [blame] | 415 | /* |
| 416 | * Dummy implementation that can be overwritten by a board |
| 417 | * specific function |
| 418 | */ |
| 419 | __weak int board_network_enable(struct mii_dev *bus) |
| 420 | { |
| 421 | return 0; |
| 422 | } |
| 423 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 424 | /* Utility/helper methods */ |
| 425 | |
| 426 | /* Write helper method */ |
| 427 | static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) |
| 428 | { |
| 429 | writel(data, pp->base + offset); |
| 430 | } |
| 431 | |
| 432 | /* Read helper method */ |
| 433 | static u32 mvreg_read(struct mvneta_port *pp, u32 offset) |
| 434 | { |
| 435 | return readl(pp->base + offset); |
| 436 | } |
| 437 | |
| 438 | /* Clear all MIB counters */ |
| 439 | static void mvneta_mib_counters_clear(struct mvneta_port *pp) |
| 440 | { |
| 441 | int i; |
| 442 | |
| 443 | /* Perform dummy reads from MIB counters */ |
| 444 | for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4) |
| 445 | mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); |
| 446 | } |
| 447 | |
| 448 | /* Rx descriptors helper methods */ |
| 449 | |
| 450 | /* Checks whether the RX descriptor having this status is both the first |
| 451 | * and the last descriptor for the RX packet. Each RX packet is currently |
| 452 | * received through a single RX descriptor, so not having each RX |
| 453 | * descriptor with its first and last bits set is an error |
| 454 | */ |
| 455 | static int mvneta_rxq_desc_is_first_last(u32 status) |
| 456 | { |
| 457 | return (status & MVNETA_RXD_FIRST_LAST_DESC) == |
| 458 | MVNETA_RXD_FIRST_LAST_DESC; |
| 459 | } |
| 460 | |
| 461 | /* Add number of descriptors ready to receive new packets */ |
| 462 | static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, |
| 463 | struct mvneta_rx_queue *rxq, |
| 464 | int ndescs) |
| 465 | { |
| 466 | /* Only MVNETA_RXQ_ADD_NON_OCCUPIED_MAX (255) descriptors can |
| 467 | * be added at once |
| 468 | */ |
| 469 | while (ndescs > MVNETA_RXQ_ADD_NON_OCCUPIED_MAX) { |
| 470 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), |
| 471 | (MVNETA_RXQ_ADD_NON_OCCUPIED_MAX << |
| 472 | MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); |
| 473 | ndescs -= MVNETA_RXQ_ADD_NON_OCCUPIED_MAX; |
| 474 | } |
| 475 | |
| 476 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), |
| 477 | (ndescs << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT)); |
| 478 | } |
| 479 | |
| 480 | /* Get number of RX descriptors occupied by received packets */ |
| 481 | static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, |
| 482 | struct mvneta_rx_queue *rxq) |
| 483 | { |
| 484 | u32 val; |
| 485 | |
| 486 | val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); |
| 487 | return val & MVNETA_RXQ_OCCUPIED_ALL_MASK; |
| 488 | } |
| 489 | |
| 490 | /* Update num of rx desc called upon return from rx path or |
| 491 | * from mvneta_rxq_drop_pkts(). |
| 492 | */ |
| 493 | static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, |
| 494 | struct mvneta_rx_queue *rxq, |
| 495 | int rx_done, int rx_filled) |
| 496 | { |
| 497 | u32 val; |
| 498 | |
| 499 | if ((rx_done <= 0xff) && (rx_filled <= 0xff)) { |
| 500 | val = rx_done | |
| 501 | (rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT); |
| 502 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); |
| 503 | return; |
| 504 | } |
| 505 | |
| 506 | /* Only 255 descriptors can be added at once */ |
| 507 | while ((rx_done > 0) || (rx_filled > 0)) { |
| 508 | if (rx_done <= 0xff) { |
| 509 | val = rx_done; |
| 510 | rx_done = 0; |
| 511 | } else { |
| 512 | val = 0xff; |
| 513 | rx_done -= 0xff; |
| 514 | } |
| 515 | if (rx_filled <= 0xff) { |
| 516 | val |= rx_filled << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; |
| 517 | rx_filled = 0; |
| 518 | } else { |
| 519 | val |= 0xff << MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT; |
| 520 | rx_filled -= 0xff; |
| 521 | } |
| 522 | mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); |
| 523 | } |
| 524 | } |
| 525 | |
| 526 | /* Get pointer to next RX descriptor to be processed by SW */ |
| 527 | static struct mvneta_rx_desc * |
| 528 | mvneta_rxq_next_desc_get(struct mvneta_rx_queue *rxq) |
| 529 | { |
| 530 | int rx_desc = rxq->next_desc_to_proc; |
| 531 | |
| 532 | rxq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(rxq, rx_desc); |
| 533 | return rxq->descs + rx_desc; |
| 534 | } |
| 535 | |
| 536 | /* Tx descriptors helper methods */ |
| 537 | |
| 538 | /* Update HW with number of TX descriptors to be sent */ |
| 539 | static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, |
| 540 | struct mvneta_tx_queue *txq, |
| 541 | int pend_desc) |
| 542 | { |
| 543 | u32 val; |
| 544 | |
| 545 | /* Only 255 descriptors can be added at once ; Assume caller |
Heinrich Schuchardt | 4237696 | 2017-08-29 18:44:37 +0200 | [diff] [blame] | 546 | * process TX descriptors in quanta less than 256 |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 547 | */ |
| 548 | val = pend_desc; |
| 549 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); |
| 550 | } |
| 551 | |
| 552 | /* Get pointer to next TX descriptor to be processed (send) by HW */ |
| 553 | static struct mvneta_tx_desc * |
| 554 | mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq) |
| 555 | { |
| 556 | int tx_desc = txq->next_desc_to_proc; |
| 557 | |
| 558 | txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc); |
| 559 | return txq->descs + tx_desc; |
| 560 | } |
| 561 | |
| 562 | /* Set rxq buf size */ |
| 563 | static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, |
| 564 | struct mvneta_rx_queue *rxq, |
| 565 | int buf_size) |
| 566 | { |
| 567 | u32 val; |
| 568 | |
| 569 | val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); |
| 570 | |
| 571 | val &= ~MVNETA_RXQ_BUF_SIZE_MASK; |
| 572 | val |= ((buf_size >> 3) << MVNETA_RXQ_BUF_SIZE_SHIFT); |
| 573 | |
| 574 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); |
| 575 | } |
| 576 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 577 | static int mvneta_port_is_fixed_link(struct mvneta_port *pp) |
| 578 | { |
| 579 | /* phy_addr is set to invalid value for fixed link */ |
| 580 | return pp->phyaddr > PHY_MAX_ADDR; |
| 581 | } |
| 582 | |
| 583 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 584 | /* Start the Ethernet port RX and TX activity */ |
| 585 | static void mvneta_port_up(struct mvneta_port *pp) |
| 586 | { |
| 587 | int queue; |
| 588 | u32 q_map; |
| 589 | |
| 590 | /* Enable all initialized TXs. */ |
| 591 | mvneta_mib_counters_clear(pp); |
| 592 | q_map = 0; |
| 593 | for (queue = 0; queue < txq_number; queue++) { |
| 594 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
| 595 | if (txq->descs != NULL) |
| 596 | q_map |= (1 << queue); |
| 597 | } |
| 598 | mvreg_write(pp, MVNETA_TXQ_CMD, q_map); |
| 599 | |
| 600 | /* Enable all initialized RXQs. */ |
| 601 | q_map = 0; |
| 602 | for (queue = 0; queue < rxq_number; queue++) { |
| 603 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
| 604 | if (rxq->descs != NULL) |
| 605 | q_map |= (1 << queue); |
| 606 | } |
| 607 | mvreg_write(pp, MVNETA_RXQ_CMD, q_map); |
| 608 | } |
| 609 | |
| 610 | /* Stop the Ethernet port activity */ |
| 611 | static void mvneta_port_down(struct mvneta_port *pp) |
| 612 | { |
| 613 | u32 val; |
| 614 | int count; |
| 615 | |
| 616 | /* Stop Rx port activity. Check port Rx activity. */ |
| 617 | val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; |
| 618 | |
| 619 | /* Issue stop command for active channels only */ |
| 620 | if (val != 0) |
| 621 | mvreg_write(pp, MVNETA_RXQ_CMD, |
| 622 | val << MVNETA_RXQ_DISABLE_SHIFT); |
| 623 | |
| 624 | /* Wait for all Rx activity to terminate. */ |
| 625 | count = 0; |
| 626 | do { |
| 627 | if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { |
| 628 | netdev_warn(pp->dev, |
| 629 | "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", |
| 630 | val); |
| 631 | break; |
| 632 | } |
| 633 | mdelay(1); |
| 634 | |
| 635 | val = mvreg_read(pp, MVNETA_RXQ_CMD); |
| 636 | } while (val & 0xff); |
| 637 | |
| 638 | /* Stop Tx port activity. Check port Tx activity. Issue stop |
| 639 | * command for active channels only |
| 640 | */ |
| 641 | val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; |
| 642 | |
| 643 | if (val != 0) |
| 644 | mvreg_write(pp, MVNETA_TXQ_CMD, |
| 645 | (val << MVNETA_TXQ_DISABLE_SHIFT)); |
| 646 | |
| 647 | /* Wait for all Tx activity to terminate. */ |
| 648 | count = 0; |
| 649 | do { |
| 650 | if (count++ >= MVNETA_TX_DISABLE_TIMEOUT_MSEC) { |
| 651 | netdev_warn(pp->dev, |
| 652 | "TIMEOUT for TX stopped status=0x%08x\n", |
| 653 | val); |
| 654 | break; |
| 655 | } |
| 656 | mdelay(1); |
| 657 | |
| 658 | /* Check TX Command reg that all Txqs are stopped */ |
| 659 | val = mvreg_read(pp, MVNETA_TXQ_CMD); |
| 660 | |
| 661 | } while (val & 0xff); |
| 662 | |
| 663 | /* Double check to verify that TX FIFO is empty */ |
| 664 | count = 0; |
| 665 | do { |
| 666 | if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { |
| 667 | netdev_warn(pp->dev, |
| 668 | "TX FIFO empty timeout status=0x08%x\n", |
| 669 | val); |
| 670 | break; |
| 671 | } |
| 672 | mdelay(1); |
| 673 | |
| 674 | val = mvreg_read(pp, MVNETA_PORT_STATUS); |
| 675 | } while (!(val & MVNETA_TX_FIFO_EMPTY) && |
| 676 | (val & MVNETA_TX_IN_PRGRS)); |
| 677 | |
| 678 | udelay(200); |
| 679 | } |
| 680 | |
| 681 | /* Enable the port by setting the port enable bit of the MAC control register */ |
| 682 | static void mvneta_port_enable(struct mvneta_port *pp) |
| 683 | { |
| 684 | u32 val; |
| 685 | |
| 686 | /* Enable port */ |
| 687 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); |
| 688 | val |= MVNETA_GMAC0_PORT_ENABLE; |
| 689 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); |
| 690 | } |
| 691 | |
| 692 | /* Disable the port and wait for about 200 usec before retuning */ |
| 693 | static void mvneta_port_disable(struct mvneta_port *pp) |
| 694 | { |
| 695 | u32 val; |
| 696 | |
| 697 | /* Reset the Enable bit in the Serial Control Register */ |
| 698 | val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); |
| 699 | val &= ~MVNETA_GMAC0_PORT_ENABLE; |
| 700 | mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); |
| 701 | |
| 702 | udelay(200); |
| 703 | } |
| 704 | |
| 705 | /* Multicast tables methods */ |
| 706 | |
| 707 | /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ |
| 708 | static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) |
| 709 | { |
| 710 | int offset; |
| 711 | u32 val; |
| 712 | |
| 713 | if (queue == -1) { |
| 714 | val = 0; |
| 715 | } else { |
| 716 | val = 0x1 | (queue << 1); |
| 717 | val |= (val << 24) | (val << 16) | (val << 8); |
| 718 | } |
| 719 | |
| 720 | for (offset = 0; offset <= 0xc; offset += 4) |
| 721 | mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); |
| 722 | } |
| 723 | |
| 724 | /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ |
| 725 | static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) |
| 726 | { |
| 727 | int offset; |
| 728 | u32 val; |
| 729 | |
| 730 | if (queue == -1) { |
| 731 | val = 0; |
| 732 | } else { |
| 733 | val = 0x1 | (queue << 1); |
| 734 | val |= (val << 24) | (val << 16) | (val << 8); |
| 735 | } |
| 736 | |
| 737 | for (offset = 0; offset <= 0xfc; offset += 4) |
| 738 | mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); |
| 739 | } |
| 740 | |
| 741 | /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ |
| 742 | static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) |
| 743 | { |
| 744 | int offset; |
| 745 | u32 val; |
| 746 | |
| 747 | if (queue == -1) { |
| 748 | memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); |
| 749 | val = 0; |
| 750 | } else { |
| 751 | memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); |
| 752 | val = 0x1 | (queue << 1); |
| 753 | val |= (val << 24) | (val << 16) | (val << 8); |
| 754 | } |
| 755 | |
| 756 | for (offset = 0; offset <= 0xfc; offset += 4) |
| 757 | mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); |
| 758 | } |
| 759 | |
| 760 | /* This method sets defaults to the NETA port: |
| 761 | * Clears interrupt Cause and Mask registers. |
| 762 | * Clears all MAC tables. |
| 763 | * Sets defaults to all registers. |
| 764 | * Resets RX and TX descriptor rings. |
| 765 | * Resets PHY. |
| 766 | * This method can be called after mvneta_port_down() to return the port |
| 767 | * settings to defaults. |
| 768 | */ |
| 769 | static void mvneta_defaults_set(struct mvneta_port *pp) |
| 770 | { |
| 771 | int cpu; |
| 772 | int queue; |
| 773 | u32 val; |
| 774 | |
| 775 | /* Clear all Cause registers */ |
| 776 | mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); |
| 777 | mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); |
| 778 | mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); |
| 779 | |
| 780 | /* Mask all interrupts */ |
| 781 | mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); |
| 782 | mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); |
| 783 | mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); |
| 784 | mvreg_write(pp, MVNETA_INTR_ENABLE, 0); |
| 785 | |
| 786 | /* Enable MBUS Retry bit16 */ |
| 787 | mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); |
| 788 | |
| 789 | /* Set CPU queue access map - all CPUs have access to all RX |
| 790 | * queues and to all TX queues |
| 791 | */ |
| 792 | for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) |
| 793 | mvreg_write(pp, MVNETA_CPU_MAP(cpu), |
| 794 | (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | |
| 795 | MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); |
| 796 | |
| 797 | /* Reset RX and TX DMAs */ |
| 798 | mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); |
| 799 | mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); |
| 800 | |
| 801 | /* Disable Legacy WRR, Disable EJP, Release from reset */ |
| 802 | mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); |
| 803 | for (queue = 0; queue < txq_number; queue++) { |
| 804 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); |
| 805 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); |
| 806 | } |
| 807 | |
| 808 | mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); |
| 809 | mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); |
| 810 | |
| 811 | /* Set Port Acceleration Mode */ |
| 812 | val = MVNETA_ACC_MODE_EXT; |
| 813 | mvreg_write(pp, MVNETA_ACC_MODE, val); |
| 814 | |
| 815 | /* Update val of portCfg register accordingly with all RxQueue types */ |
| 816 | val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); |
| 817 | mvreg_write(pp, MVNETA_PORT_CONFIG, val); |
| 818 | |
| 819 | val = 0; |
| 820 | mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); |
| 821 | mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); |
| 822 | |
| 823 | /* Build PORT_SDMA_CONFIG_REG */ |
| 824 | val = 0; |
| 825 | |
| 826 | /* Default burst size */ |
| 827 | val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); |
| 828 | val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16); |
| 829 | val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP; |
| 830 | |
| 831 | /* Assign port SDMA configuration */ |
| 832 | mvreg_write(pp, MVNETA_SDMA_CONFIG, val); |
| 833 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 834 | /* Enable PHY polling in hardware if not in fixed-link mode */ |
| 835 | if (!mvneta_port_is_fixed_link(pp)) { |
| 836 | val = mvreg_read(pp, MVNETA_UNIT_CONTROL); |
| 837 | val |= MVNETA_PHY_POLLING_ENABLE; |
| 838 | mvreg_write(pp, MVNETA_UNIT_CONTROL, val); |
| 839 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 840 | |
| 841 | mvneta_set_ucast_table(pp, -1); |
| 842 | mvneta_set_special_mcast_table(pp, -1); |
| 843 | mvneta_set_other_mcast_table(pp, -1); |
| 844 | } |
| 845 | |
| 846 | /* Set unicast address */ |
| 847 | static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, |
| 848 | int queue) |
| 849 | { |
| 850 | unsigned int unicast_reg; |
| 851 | unsigned int tbl_offset; |
| 852 | unsigned int reg_offset; |
| 853 | |
| 854 | /* Locate the Unicast table entry */ |
| 855 | last_nibble = (0xf & last_nibble); |
| 856 | |
| 857 | /* offset from unicast tbl base */ |
| 858 | tbl_offset = (last_nibble / 4) * 4; |
| 859 | |
| 860 | /* offset within the above reg */ |
| 861 | reg_offset = last_nibble % 4; |
| 862 | |
| 863 | unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); |
| 864 | |
| 865 | if (queue == -1) { |
| 866 | /* Clear accepts frame bit at specified unicast DA tbl entry */ |
| 867 | unicast_reg &= ~(0xff << (8 * reg_offset)); |
| 868 | } else { |
| 869 | unicast_reg &= ~(0xff << (8 * reg_offset)); |
| 870 | unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); |
| 871 | } |
| 872 | |
| 873 | mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); |
| 874 | } |
| 875 | |
| 876 | /* Set mac address */ |
| 877 | static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, |
| 878 | int queue) |
| 879 | { |
| 880 | unsigned int mac_h; |
| 881 | unsigned int mac_l; |
| 882 | |
| 883 | if (queue != -1) { |
| 884 | mac_l = (addr[4] << 8) | (addr[5]); |
| 885 | mac_h = (addr[0] << 24) | (addr[1] << 16) | |
| 886 | (addr[2] << 8) | (addr[3] << 0); |
| 887 | |
| 888 | mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); |
| 889 | mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); |
| 890 | } |
| 891 | |
| 892 | /* Accept frames of this address */ |
| 893 | mvneta_set_ucast_addr(pp, addr[5], queue); |
| 894 | } |
| 895 | |
Matt Pelland | 668a5f2 | 2018-03-27 13:18:25 -0400 | [diff] [blame] | 896 | static int mvneta_write_hwaddr(struct udevice *dev) |
| 897 | { |
| 898 | mvneta_mac_addr_set(dev_get_priv(dev), |
| 899 | ((struct eth_pdata *)dev_get_platdata(dev))->enetaddr, |
| 900 | rxq_def); |
| 901 | |
| 902 | return 0; |
| 903 | } |
| 904 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 905 | /* Handle rx descriptor fill by setting buf_cookie and buf_phys_addr */ |
| 906 | static void mvneta_rx_desc_fill(struct mvneta_rx_desc *rx_desc, |
| 907 | u32 phys_addr, u32 cookie) |
| 908 | { |
| 909 | rx_desc->buf_cookie = cookie; |
| 910 | rx_desc->buf_phys_addr = phys_addr; |
| 911 | } |
| 912 | |
| 913 | /* Decrement sent descriptors counter */ |
| 914 | static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, |
| 915 | struct mvneta_tx_queue *txq, |
| 916 | int sent_desc) |
| 917 | { |
| 918 | u32 val; |
| 919 | |
| 920 | /* Only 255 TX descriptors can be updated at once */ |
| 921 | while (sent_desc > 0xff) { |
| 922 | val = 0xff << MVNETA_TXQ_DEC_SENT_SHIFT; |
| 923 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); |
| 924 | sent_desc = sent_desc - 0xff; |
| 925 | } |
| 926 | |
| 927 | val = sent_desc << MVNETA_TXQ_DEC_SENT_SHIFT; |
| 928 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); |
| 929 | } |
| 930 | |
| 931 | /* Get number of TX descriptors already sent by HW */ |
| 932 | static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, |
| 933 | struct mvneta_tx_queue *txq) |
| 934 | { |
| 935 | u32 val; |
| 936 | int sent_desc; |
| 937 | |
| 938 | val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); |
| 939 | sent_desc = (val & MVNETA_TXQ_SENT_DESC_MASK) >> |
| 940 | MVNETA_TXQ_SENT_DESC_SHIFT; |
| 941 | |
| 942 | return sent_desc; |
| 943 | } |
| 944 | |
| 945 | /* Display more error info */ |
| 946 | static void mvneta_rx_error(struct mvneta_port *pp, |
| 947 | struct mvneta_rx_desc *rx_desc) |
| 948 | { |
| 949 | u32 status = rx_desc->status; |
| 950 | |
| 951 | if (!mvneta_rxq_desc_is_first_last(status)) { |
| 952 | netdev_err(pp->dev, |
| 953 | "bad rx status %08x (buffer oversize), size=%d\n", |
| 954 | status, rx_desc->data_size); |
| 955 | return; |
| 956 | } |
| 957 | |
| 958 | switch (status & MVNETA_RXD_ERR_CODE_MASK) { |
| 959 | case MVNETA_RXD_ERR_CRC: |
| 960 | netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", |
| 961 | status, rx_desc->data_size); |
| 962 | break; |
| 963 | case MVNETA_RXD_ERR_OVERRUN: |
| 964 | netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", |
| 965 | status, rx_desc->data_size); |
| 966 | break; |
| 967 | case MVNETA_RXD_ERR_LEN: |
| 968 | netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", |
| 969 | status, rx_desc->data_size); |
| 970 | break; |
| 971 | case MVNETA_RXD_ERR_RESOURCE: |
| 972 | netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", |
| 973 | status, rx_desc->data_size); |
| 974 | break; |
| 975 | } |
| 976 | } |
| 977 | |
| 978 | static struct mvneta_rx_queue *mvneta_rxq_handle_get(struct mvneta_port *pp, |
| 979 | int rxq) |
| 980 | { |
| 981 | return &pp->rxqs[rxq]; |
| 982 | } |
| 983 | |
| 984 | |
| 985 | /* Drop packets received by the RXQ and free buffers */ |
| 986 | static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, |
| 987 | struct mvneta_rx_queue *rxq) |
| 988 | { |
| 989 | int rx_done; |
| 990 | |
| 991 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); |
| 992 | if (rx_done) |
| 993 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); |
| 994 | } |
| 995 | |
| 996 | /* Handle rxq fill: allocates rxq skbs; called when initializing a port */ |
| 997 | static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, |
| 998 | int num) |
| 999 | { |
| 1000 | int i; |
| 1001 | |
| 1002 | for (i = 0; i < num; i++) { |
| 1003 | u32 addr; |
| 1004 | |
| 1005 | /* U-Boot special: Fill in the rx buffer addresses */ |
| 1006 | addr = buffer_loc.rx_buffers + (i * RX_BUFFER_SIZE); |
| 1007 | mvneta_rx_desc_fill(rxq->descs + i, addr, addr); |
| 1008 | } |
| 1009 | |
| 1010 | /* Add this number of RX descriptors as non occupied (ready to |
| 1011 | * get packets) |
| 1012 | */ |
| 1013 | mvneta_rxq_non_occup_desc_add(pp, rxq, i); |
| 1014 | |
| 1015 | return 0; |
| 1016 | } |
| 1017 | |
| 1018 | /* Rx/Tx queue initialization/cleanup methods */ |
| 1019 | |
| 1020 | /* Create a specified RX queue */ |
| 1021 | static int mvneta_rxq_init(struct mvneta_port *pp, |
| 1022 | struct mvneta_rx_queue *rxq) |
| 1023 | |
| 1024 | { |
| 1025 | rxq->size = pp->rx_ring_size; |
| 1026 | |
| 1027 | /* Allocate memory for RX descriptors */ |
| 1028 | rxq->descs_phys = (dma_addr_t)rxq->descs; |
| 1029 | if (rxq->descs == NULL) |
| 1030 | return -ENOMEM; |
| 1031 | |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1032 | WARN_ON(rxq->descs != PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN)); |
| 1033 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1034 | rxq->last_desc = rxq->size - 1; |
| 1035 | |
| 1036 | /* Set Rx descriptors queue starting address */ |
| 1037 | mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); |
| 1038 | mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); |
| 1039 | |
| 1040 | /* Fill RXQ with buffers from RX pool */ |
| 1041 | mvneta_rxq_buf_size_set(pp, rxq, RX_BUFFER_SIZE); |
| 1042 | mvneta_rxq_fill(pp, rxq, rxq->size); |
| 1043 | |
| 1044 | return 0; |
| 1045 | } |
| 1046 | |
| 1047 | /* Cleanup Rx queue */ |
| 1048 | static void mvneta_rxq_deinit(struct mvneta_port *pp, |
| 1049 | struct mvneta_rx_queue *rxq) |
| 1050 | { |
| 1051 | mvneta_rxq_drop_pkts(pp, rxq); |
| 1052 | |
| 1053 | rxq->descs = NULL; |
| 1054 | rxq->last_desc = 0; |
| 1055 | rxq->next_desc_to_proc = 0; |
| 1056 | rxq->descs_phys = 0; |
| 1057 | } |
| 1058 | |
| 1059 | /* Create and initialize a tx queue */ |
| 1060 | static int mvneta_txq_init(struct mvneta_port *pp, |
| 1061 | struct mvneta_tx_queue *txq) |
| 1062 | { |
| 1063 | txq->size = pp->tx_ring_size; |
| 1064 | |
| 1065 | /* Allocate memory for TX descriptors */ |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1066 | txq->descs_phys = (dma_addr_t)txq->descs; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1067 | if (txq->descs == NULL) |
| 1068 | return -ENOMEM; |
| 1069 | |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1070 | WARN_ON(txq->descs != PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN)); |
| 1071 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1072 | txq->last_desc = txq->size - 1; |
| 1073 | |
| 1074 | /* Set maximum bandwidth for enabled TXQs */ |
| 1075 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); |
| 1076 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); |
| 1077 | |
| 1078 | /* Set Tx descriptors queue starting address */ |
| 1079 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); |
| 1080 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); |
| 1081 | |
| 1082 | return 0; |
| 1083 | } |
| 1084 | |
| 1085 | /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/ |
| 1086 | static void mvneta_txq_deinit(struct mvneta_port *pp, |
| 1087 | struct mvneta_tx_queue *txq) |
| 1088 | { |
| 1089 | txq->descs = NULL; |
| 1090 | txq->last_desc = 0; |
| 1091 | txq->next_desc_to_proc = 0; |
| 1092 | txq->descs_phys = 0; |
| 1093 | |
| 1094 | /* Set minimum bandwidth for disabled TXQs */ |
| 1095 | mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); |
| 1096 | mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); |
| 1097 | |
| 1098 | /* Set Tx descriptors queue starting address and size */ |
| 1099 | mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); |
| 1100 | mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); |
| 1101 | } |
| 1102 | |
| 1103 | /* Cleanup all Tx queues */ |
| 1104 | static void mvneta_cleanup_txqs(struct mvneta_port *pp) |
| 1105 | { |
| 1106 | int queue; |
| 1107 | |
| 1108 | for (queue = 0; queue < txq_number; queue++) |
| 1109 | mvneta_txq_deinit(pp, &pp->txqs[queue]); |
| 1110 | } |
| 1111 | |
| 1112 | /* Cleanup all Rx queues */ |
| 1113 | static void mvneta_cleanup_rxqs(struct mvneta_port *pp) |
| 1114 | { |
| 1115 | int queue; |
| 1116 | |
| 1117 | for (queue = 0; queue < rxq_number; queue++) |
| 1118 | mvneta_rxq_deinit(pp, &pp->rxqs[queue]); |
| 1119 | } |
| 1120 | |
| 1121 | |
| 1122 | /* Init all Rx queues */ |
| 1123 | static int mvneta_setup_rxqs(struct mvneta_port *pp) |
| 1124 | { |
| 1125 | int queue; |
| 1126 | |
| 1127 | for (queue = 0; queue < rxq_number; queue++) { |
| 1128 | int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); |
| 1129 | if (err) { |
| 1130 | netdev_err(pp->dev, "%s: can't create rxq=%d\n", |
| 1131 | __func__, queue); |
| 1132 | mvneta_cleanup_rxqs(pp); |
| 1133 | return err; |
| 1134 | } |
| 1135 | } |
| 1136 | |
| 1137 | return 0; |
| 1138 | } |
| 1139 | |
| 1140 | /* Init all tx queues */ |
| 1141 | static int mvneta_setup_txqs(struct mvneta_port *pp) |
| 1142 | { |
| 1143 | int queue; |
| 1144 | |
| 1145 | for (queue = 0; queue < txq_number; queue++) { |
| 1146 | int err = mvneta_txq_init(pp, &pp->txqs[queue]); |
| 1147 | if (err) { |
| 1148 | netdev_err(pp->dev, "%s: can't create txq=%d\n", |
| 1149 | __func__, queue); |
| 1150 | mvneta_cleanup_txqs(pp); |
| 1151 | return err; |
| 1152 | } |
| 1153 | } |
| 1154 | |
| 1155 | return 0; |
| 1156 | } |
| 1157 | |
| 1158 | static void mvneta_start_dev(struct mvneta_port *pp) |
| 1159 | { |
| 1160 | /* start the Rx/Tx activity */ |
| 1161 | mvneta_port_enable(pp); |
| 1162 | } |
| 1163 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1164 | static void mvneta_adjust_link(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1165 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1166 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1167 | struct phy_device *phydev = pp->phydev; |
| 1168 | int status_change = 0; |
| 1169 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1170 | if (mvneta_port_is_fixed_link(pp)) { |
| 1171 | debug("Using fixed link, skip link adjust\n"); |
| 1172 | return; |
| 1173 | } |
| 1174 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1175 | if (phydev->link) { |
| 1176 | if ((pp->speed != phydev->speed) || |
| 1177 | (pp->duplex != phydev->duplex)) { |
| 1178 | u32 val; |
| 1179 | |
| 1180 | val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
| 1181 | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | |
| 1182 | MVNETA_GMAC_CONFIG_GMII_SPEED | |
| 1183 | MVNETA_GMAC_CONFIG_FULL_DUPLEX | |
| 1184 | MVNETA_GMAC_AN_SPEED_EN | |
| 1185 | MVNETA_GMAC_AN_DUPLEX_EN); |
| 1186 | |
| 1187 | if (phydev->duplex) |
| 1188 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; |
| 1189 | |
| 1190 | if (phydev->speed == SPEED_1000) |
| 1191 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; |
| 1192 | else |
| 1193 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; |
| 1194 | |
| 1195 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); |
| 1196 | |
| 1197 | pp->duplex = phydev->duplex; |
| 1198 | pp->speed = phydev->speed; |
| 1199 | } |
| 1200 | } |
| 1201 | |
| 1202 | if (phydev->link != pp->link) { |
| 1203 | if (!phydev->link) { |
| 1204 | pp->duplex = -1; |
| 1205 | pp->speed = 0; |
| 1206 | } |
| 1207 | |
| 1208 | pp->link = phydev->link; |
| 1209 | status_change = 1; |
| 1210 | } |
| 1211 | |
| 1212 | if (status_change) { |
| 1213 | if (phydev->link) { |
| 1214 | u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); |
| 1215 | val |= (MVNETA_GMAC_FORCE_LINK_PASS | |
| 1216 | MVNETA_GMAC_FORCE_LINK_DOWN); |
| 1217 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); |
| 1218 | mvneta_port_up(pp); |
| 1219 | } else { |
| 1220 | mvneta_port_down(pp); |
| 1221 | } |
| 1222 | } |
| 1223 | } |
| 1224 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1225 | static int mvneta_open(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1226 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1227 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1228 | int ret; |
| 1229 | |
| 1230 | ret = mvneta_setup_rxqs(pp); |
| 1231 | if (ret) |
| 1232 | return ret; |
| 1233 | |
| 1234 | ret = mvneta_setup_txqs(pp); |
| 1235 | if (ret) |
| 1236 | return ret; |
| 1237 | |
| 1238 | mvneta_adjust_link(dev); |
| 1239 | |
| 1240 | mvneta_start_dev(pp); |
| 1241 | |
| 1242 | return 0; |
| 1243 | } |
| 1244 | |
| 1245 | /* Initialize hw */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1246 | static int mvneta_init2(struct mvneta_port *pp) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1247 | { |
| 1248 | int queue; |
| 1249 | |
| 1250 | /* Disable port */ |
| 1251 | mvneta_port_disable(pp); |
| 1252 | |
| 1253 | /* Set port default values */ |
| 1254 | mvneta_defaults_set(pp); |
| 1255 | |
| 1256 | pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue), |
| 1257 | GFP_KERNEL); |
| 1258 | if (!pp->txqs) |
| 1259 | return -ENOMEM; |
| 1260 | |
| 1261 | /* U-Boot special: use preallocated area */ |
| 1262 | pp->txqs[0].descs = buffer_loc.tx_descs; |
| 1263 | |
| 1264 | /* Initialize TX descriptor rings */ |
| 1265 | for (queue = 0; queue < txq_number; queue++) { |
| 1266 | struct mvneta_tx_queue *txq = &pp->txqs[queue]; |
| 1267 | txq->id = queue; |
| 1268 | txq->size = pp->tx_ring_size; |
| 1269 | } |
| 1270 | |
| 1271 | pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue), |
| 1272 | GFP_KERNEL); |
| 1273 | if (!pp->rxqs) { |
| 1274 | kfree(pp->txqs); |
| 1275 | return -ENOMEM; |
| 1276 | } |
| 1277 | |
| 1278 | /* U-Boot special: use preallocated area */ |
| 1279 | pp->rxqs[0].descs = buffer_loc.rx_descs; |
| 1280 | |
| 1281 | /* Create Rx descriptor rings */ |
| 1282 | for (queue = 0; queue < rxq_number; queue++) { |
| 1283 | struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; |
| 1284 | rxq->id = queue; |
| 1285 | rxq->size = pp->rx_ring_size; |
| 1286 | } |
| 1287 | |
| 1288 | return 0; |
| 1289 | } |
| 1290 | |
| 1291 | /* platform glue : initialize decoding windows */ |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 1292 | |
| 1293 | /* |
| 1294 | * Not like A380, in Armada3700, there are two layers of decode windows for GBE: |
| 1295 | * First layer is: GbE Address window that resides inside the GBE unit, |
| 1296 | * Second layer is: Fabric address window which is located in the NIC400 |
| 1297 | * (South Fabric). |
| 1298 | * To simplify the address decode configuration for Armada3700, we bypass the |
| 1299 | * first layer of GBE decode window by setting the first window to 4GB. |
| 1300 | */ |
| 1301 | static void mvneta_bypass_mbus_windows(struct mvneta_port *pp) |
| 1302 | { |
| 1303 | /* |
| 1304 | * Set window size to 4GB, to bypass GBE address decode, leave the |
| 1305 | * work to MBUS decode window |
| 1306 | */ |
| 1307 | mvreg_write(pp, MVNETA_WIN_SIZE(0), MVNETA_WIN_SIZE_MASK); |
| 1308 | |
| 1309 | /* Enable GBE address decode window 0 by set bit 0 to 0 */ |
| 1310 | clrbits_le32(pp->base + MVNETA_BASE_ADDR_ENABLE, |
| 1311 | MVNETA_BASE_ADDR_ENABLE_BIT); |
| 1312 | |
| 1313 | /* Set GBE address decode window 0 to full Access (read or write) */ |
| 1314 | setbits_le32(pp->base + MVNETA_PORT_ACCESS_PROTECT, |
| 1315 | MVNETA_PORT_ACCESS_PROTECT_WIN0_RW); |
| 1316 | } |
| 1317 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1318 | static void mvneta_conf_mbus_windows(struct mvneta_port *pp) |
| 1319 | { |
| 1320 | const struct mbus_dram_target_info *dram; |
| 1321 | u32 win_enable; |
| 1322 | u32 win_protect; |
| 1323 | int i; |
| 1324 | |
| 1325 | dram = mvebu_mbus_dram_info(); |
| 1326 | for (i = 0; i < 6; i++) { |
| 1327 | mvreg_write(pp, MVNETA_WIN_BASE(i), 0); |
| 1328 | mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); |
| 1329 | |
| 1330 | if (i < 4) |
| 1331 | mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); |
| 1332 | } |
| 1333 | |
| 1334 | win_enable = 0x3f; |
| 1335 | win_protect = 0; |
| 1336 | |
| 1337 | for (i = 0; i < dram->num_cs; i++) { |
| 1338 | const struct mbus_dram_window *cs = dram->cs + i; |
| 1339 | mvreg_write(pp, MVNETA_WIN_BASE(i), (cs->base & 0xffff0000) | |
| 1340 | (cs->mbus_attr << 8) | dram->mbus_dram_target_id); |
| 1341 | |
| 1342 | mvreg_write(pp, MVNETA_WIN_SIZE(i), |
| 1343 | (cs->size - 1) & 0xffff0000); |
| 1344 | |
| 1345 | win_enable &= ~(1 << i); |
| 1346 | win_protect |= 3 << (2 * i); |
| 1347 | } |
| 1348 | |
| 1349 | mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); |
| 1350 | } |
| 1351 | |
| 1352 | /* Power up the port */ |
| 1353 | static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) |
| 1354 | { |
| 1355 | u32 ctrl; |
| 1356 | |
| 1357 | /* MAC Cause register should be cleared */ |
| 1358 | mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); |
| 1359 | |
| 1360 | ctrl = mvreg_read(pp, MVNETA_GMAC_CTRL_2); |
| 1361 | |
| 1362 | /* Even though it might look weird, when we're configured in |
| 1363 | * SGMII or QSGMII mode, the RGMII bit needs to be set. |
| 1364 | */ |
| 1365 | switch (phy_mode) { |
| 1366 | case PHY_INTERFACE_MODE_QSGMII: |
| 1367 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); |
| 1368 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; |
| 1369 | break; |
| 1370 | case PHY_INTERFACE_MODE_SGMII: |
| 1371 | mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); |
| 1372 | ctrl |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII; |
| 1373 | break; |
| 1374 | case PHY_INTERFACE_MODE_RGMII: |
| 1375 | case PHY_INTERFACE_MODE_RGMII_ID: |
| 1376 | ctrl |= MVNETA_GMAC2_PORT_RGMII; |
| 1377 | break; |
| 1378 | default: |
| 1379 | return -EINVAL; |
| 1380 | } |
| 1381 | |
| 1382 | /* Cancel Port Reset */ |
| 1383 | ctrl &= ~MVNETA_GMAC2_PORT_RESET; |
| 1384 | mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); |
| 1385 | |
| 1386 | while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & |
| 1387 | MVNETA_GMAC2_PORT_RESET) != 0) |
| 1388 | continue; |
| 1389 | |
| 1390 | return 0; |
| 1391 | } |
| 1392 | |
| 1393 | /* Device initialization routine */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1394 | static int mvneta_init(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1395 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1396 | struct eth_pdata *pdata = dev_get_platdata(dev); |
| 1397 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1398 | int err; |
| 1399 | |
| 1400 | pp->tx_ring_size = MVNETA_MAX_TXD; |
| 1401 | pp->rx_ring_size = MVNETA_MAX_RXD; |
| 1402 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1403 | err = mvneta_init2(pp); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1404 | if (err < 0) { |
| 1405 | dev_err(&pdev->dev, "can't init eth hal\n"); |
| 1406 | return err; |
| 1407 | } |
| 1408 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1409 | mvneta_mac_addr_set(pp, pdata->enetaddr, rxq_def); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1410 | |
| 1411 | err = mvneta_port_power_up(pp, pp->phy_interface); |
| 1412 | if (err < 0) { |
| 1413 | dev_err(&pdev->dev, "can't power up port\n"); |
| 1414 | return err; |
| 1415 | } |
| 1416 | |
| 1417 | /* Call open() now as it needs to be done before runing send() */ |
| 1418 | mvneta_open(dev); |
| 1419 | |
| 1420 | return 0; |
| 1421 | } |
| 1422 | |
| 1423 | /* U-Boot only functions follow here */ |
| 1424 | |
| 1425 | /* SMI / MDIO functions */ |
| 1426 | |
| 1427 | static int smi_wait_ready(struct mvneta_port *pp) |
| 1428 | { |
| 1429 | u32 timeout = MVNETA_SMI_TIMEOUT; |
| 1430 | u32 smi_reg; |
| 1431 | |
| 1432 | /* wait till the SMI is not busy */ |
| 1433 | do { |
| 1434 | /* read smi register */ |
| 1435 | smi_reg = mvreg_read(pp, MVNETA_SMI); |
| 1436 | if (timeout-- == 0) { |
| 1437 | printf("Error: SMI busy timeout\n"); |
| 1438 | return -EFAULT; |
| 1439 | } |
| 1440 | } while (smi_reg & MVNETA_SMI_BUSY); |
| 1441 | |
| 1442 | return 0; |
| 1443 | } |
| 1444 | |
| 1445 | /* |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1446 | * mvneta_mdio_read - miiphy_read callback function. |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1447 | * |
| 1448 | * Returns 16bit phy register value, or 0xffff on error |
| 1449 | */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1450 | static int mvneta_mdio_read(struct mii_dev *bus, int addr, int devad, int reg) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1451 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1452 | struct mvneta_port *pp = bus->priv; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1453 | u32 smi_reg; |
| 1454 | u32 timeout; |
| 1455 | |
| 1456 | /* check parameters */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1457 | if (addr > MVNETA_PHY_ADDR_MASK) { |
| 1458 | printf("Error: Invalid PHY address %d\n", addr); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1459 | return -EFAULT; |
| 1460 | } |
| 1461 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1462 | if (reg > MVNETA_PHY_REG_MASK) { |
| 1463 | printf("Err: Invalid register offset %d\n", reg); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1464 | return -EFAULT; |
| 1465 | } |
| 1466 | |
| 1467 | /* wait till the SMI is not busy */ |
| 1468 | if (smi_wait_ready(pp) < 0) |
| 1469 | return -EFAULT; |
| 1470 | |
| 1471 | /* fill the phy address and regiser offset and read opcode */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1472 | smi_reg = (addr << MVNETA_SMI_DEV_ADDR_OFFS) |
| 1473 | | (reg << MVNETA_SMI_REG_ADDR_OFFS) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1474 | | MVNETA_SMI_OPCODE_READ; |
| 1475 | |
| 1476 | /* write the smi register */ |
| 1477 | mvreg_write(pp, MVNETA_SMI, smi_reg); |
| 1478 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1479 | /* wait till read value is ready */ |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1480 | timeout = MVNETA_SMI_TIMEOUT; |
| 1481 | |
| 1482 | do { |
| 1483 | /* read smi register */ |
| 1484 | smi_reg = mvreg_read(pp, MVNETA_SMI); |
| 1485 | if (timeout-- == 0) { |
| 1486 | printf("Err: SMI read ready timeout\n"); |
| 1487 | return -EFAULT; |
| 1488 | } |
| 1489 | } while (!(smi_reg & MVNETA_SMI_READ_VALID)); |
| 1490 | |
| 1491 | /* Wait for the data to update in the SMI register */ |
| 1492 | for (timeout = 0; timeout < MVNETA_SMI_TIMEOUT; timeout++) |
| 1493 | ; |
| 1494 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1495 | return mvreg_read(pp, MVNETA_SMI) & MVNETA_SMI_DATA_MASK; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1496 | } |
| 1497 | |
| 1498 | /* |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1499 | * mvneta_mdio_write - miiphy_write callback function. |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1500 | * |
| 1501 | * Returns 0 if write succeed, -EINVAL on bad parameters |
| 1502 | * -ETIME on timeout |
| 1503 | */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1504 | static int mvneta_mdio_write(struct mii_dev *bus, int addr, int devad, int reg, |
| 1505 | u16 value) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1506 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1507 | struct mvneta_port *pp = bus->priv; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1508 | u32 smi_reg; |
| 1509 | |
| 1510 | /* check parameters */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1511 | if (addr > MVNETA_PHY_ADDR_MASK) { |
| 1512 | printf("Error: Invalid PHY address %d\n", addr); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1513 | return -EFAULT; |
| 1514 | } |
| 1515 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1516 | if (reg > MVNETA_PHY_REG_MASK) { |
| 1517 | printf("Err: Invalid register offset %d\n", reg); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1518 | return -EFAULT; |
| 1519 | } |
| 1520 | |
| 1521 | /* wait till the SMI is not busy */ |
| 1522 | if (smi_wait_ready(pp) < 0) |
| 1523 | return -EFAULT; |
| 1524 | |
| 1525 | /* fill the phy addr and reg offset and write opcode and data */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1526 | smi_reg = value << MVNETA_SMI_DATA_OFFS; |
| 1527 | smi_reg |= (addr << MVNETA_SMI_DEV_ADDR_OFFS) |
| 1528 | | (reg << MVNETA_SMI_REG_ADDR_OFFS); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1529 | smi_reg &= ~MVNETA_SMI_OPCODE_READ; |
| 1530 | |
| 1531 | /* write the smi register */ |
| 1532 | mvreg_write(pp, MVNETA_SMI, smi_reg); |
| 1533 | |
| 1534 | return 0; |
| 1535 | } |
| 1536 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1537 | static int mvneta_start(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1538 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1539 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1540 | struct phy_device *phydev; |
| 1541 | |
| 1542 | mvneta_port_power_up(pp, pp->phy_interface); |
| 1543 | |
| 1544 | if (!pp->init || pp->link == 0) { |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1545 | if (mvneta_port_is_fixed_link(pp)) { |
| 1546 | u32 val; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1547 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1548 | pp->init = 1; |
| 1549 | pp->link = 1; |
| 1550 | mvneta_init(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1551 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1552 | val = MVNETA_GMAC_FORCE_LINK_UP | |
| 1553 | MVNETA_GMAC_IB_BYPASS_AN_EN | |
| 1554 | MVNETA_GMAC_SET_FC_EN | |
| 1555 | MVNETA_GMAC_ADVERT_FC_EN | |
| 1556 | MVNETA_GMAC_SAMPLE_TX_CFG_EN; |
| 1557 | |
| 1558 | if (pp->duplex) |
| 1559 | val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; |
| 1560 | |
| 1561 | if (pp->speed == SPEED_1000) |
| 1562 | val |= MVNETA_GMAC_CONFIG_GMII_SPEED; |
| 1563 | else if (pp->speed == SPEED_100) |
| 1564 | val |= MVNETA_GMAC_CONFIG_MII_SPEED; |
| 1565 | |
| 1566 | mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); |
| 1567 | } else { |
| 1568 | /* Set phy address of the port */ |
| 1569 | mvreg_write(pp, MVNETA_PHY_ADDR, pp->phyaddr); |
| 1570 | |
| 1571 | phydev = phy_connect(pp->bus, pp->phyaddr, dev, |
| 1572 | pp->phy_interface); |
Marek Behún | 075ccb1 | 2018-04-24 17:21:29 +0200 | [diff] [blame] | 1573 | if (!phydev) { |
| 1574 | printf("phy_connect failed\n"); |
| 1575 | return -ENODEV; |
| 1576 | } |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1577 | |
| 1578 | pp->phydev = phydev; |
| 1579 | phy_config(phydev); |
| 1580 | phy_startup(phydev); |
| 1581 | if (!phydev->link) { |
| 1582 | printf("%s: No link.\n", phydev->dev->name); |
| 1583 | return -1; |
| 1584 | } |
| 1585 | |
| 1586 | /* Full init on first call */ |
| 1587 | mvneta_init(dev); |
| 1588 | pp->init = 1; |
| 1589 | return 0; |
| 1590 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1591 | } |
| 1592 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1593 | /* Upon all following calls, this is enough */ |
| 1594 | mvneta_port_up(pp); |
| 1595 | mvneta_port_enable(pp); |
| 1596 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1597 | return 0; |
| 1598 | } |
| 1599 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1600 | static int mvneta_send(struct udevice *dev, void *packet, int length) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1601 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1602 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1603 | struct mvneta_tx_queue *txq = &pp->txqs[0]; |
| 1604 | struct mvneta_tx_desc *tx_desc; |
| 1605 | int sent_desc; |
| 1606 | u32 timeout = 0; |
| 1607 | |
| 1608 | /* Get a descriptor for the first part of the packet */ |
| 1609 | tx_desc = mvneta_txq_next_desc_get(txq); |
| 1610 | |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1611 | tx_desc->buf_phys_addr = (u32)(uintptr_t)packet; |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1612 | tx_desc->data_size = length; |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1613 | flush_dcache_range((ulong)packet, |
| 1614 | (ulong)packet + ALIGN(length, PKTALIGN)); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1615 | |
| 1616 | /* First and Last descriptor */ |
| 1617 | tx_desc->command = MVNETA_TX_L4_CSUM_NOT | MVNETA_TXD_FLZ_DESC; |
| 1618 | mvneta_txq_pend_desc_add(pp, txq, 1); |
| 1619 | |
| 1620 | /* Wait for packet to be sent (queue might help with speed here) */ |
| 1621 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); |
| 1622 | while (!sent_desc) { |
| 1623 | if (timeout++ > 10000) { |
| 1624 | printf("timeout: packet not sent\n"); |
| 1625 | return -1; |
| 1626 | } |
| 1627 | sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); |
| 1628 | } |
| 1629 | |
| 1630 | /* txDone has increased - hw sent packet */ |
| 1631 | mvneta_txq_sent_desc_dec(pp, txq, sent_desc); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1632 | |
| 1633 | return 0; |
| 1634 | } |
| 1635 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1636 | static int mvneta_recv(struct udevice *dev, int flags, uchar **packetp) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1637 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1638 | struct mvneta_port *pp = dev_get_priv(dev); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1639 | int rx_done; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1640 | struct mvneta_rx_queue *rxq; |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1641 | int rx_bytes = 0; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1642 | |
| 1643 | /* get rx queue */ |
| 1644 | rxq = mvneta_rxq_handle_get(pp, rxq_def); |
| 1645 | rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1646 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1647 | if (rx_done) { |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1648 | struct mvneta_rx_desc *rx_desc; |
| 1649 | unsigned char *data; |
| 1650 | u32 rx_status; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1651 | |
| 1652 | /* |
| 1653 | * No cache invalidation needed here, since the desc's are |
| 1654 | * located in a uncached memory region |
| 1655 | */ |
| 1656 | rx_desc = mvneta_rxq_next_desc_get(rxq); |
| 1657 | |
| 1658 | rx_status = rx_desc->status; |
| 1659 | if (!mvneta_rxq_desc_is_first_last(rx_status) || |
| 1660 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { |
| 1661 | mvneta_rx_error(pp, rx_desc); |
| 1662 | /* leave the descriptor untouched */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1663 | return -EIO; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1664 | } |
| 1665 | |
| 1666 | /* 2 bytes for marvell header. 4 bytes for crc */ |
| 1667 | rx_bytes = rx_desc->data_size - 6; |
| 1668 | |
| 1669 | /* give packet to stack - skip on first 2 bytes */ |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1670 | data = (u8 *)(uintptr_t)rx_desc->buf_cookie + 2; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1671 | /* |
| 1672 | * No cache invalidation needed here, since the rx_buffer's are |
| 1673 | * located in a uncached memory region |
| 1674 | */ |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1675 | *packetp = data; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1676 | |
Jason Brown | c7bc183 | 2017-11-28 11:12:43 -0800 | [diff] [blame] | 1677 | /* |
| 1678 | * Only mark one descriptor as free |
| 1679 | * since only one was processed |
| 1680 | */ |
| 1681 | mvneta_rxq_desc_num_update(pp, rxq, 1, 1); |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1682 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1683 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1684 | return rx_bytes; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1685 | } |
| 1686 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1687 | static int mvneta_probe(struct udevice *dev) |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1688 | { |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1689 | struct eth_pdata *pdata = dev_get_platdata(dev); |
| 1690 | struct mvneta_port *pp = dev_get_priv(dev); |
| 1691 | void *blob = (void *)gd->fdt_blob; |
Simon Glass | dd79d6e | 2017-01-17 16:52:55 -0700 | [diff] [blame] | 1692 | int node = dev_of_offset(dev); |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1693 | struct mii_dev *bus; |
| 1694 | unsigned long addr; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1695 | void *bd_space; |
Konstantin Porotchkin | fb8e202 | 2017-02-16 13:52:27 +0200 | [diff] [blame] | 1696 | int ret; |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1697 | int fl_node; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1698 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1699 | /* |
| 1700 | * Allocate buffer area for descs and rx_buffers. This is only |
| 1701 | * done once for all interfaces. As only one interface can |
Chris Packham | 0f81d7a | 2016-08-29 20:54:02 +1200 | [diff] [blame] | 1702 | * be active. Make this area DMA safe by disabling the D-cache |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1703 | */ |
| 1704 | if (!buffer_loc.tx_descs) { |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1705 | u32 size; |
| 1706 | |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1707 | /* Align buffer area for descs and rx_buffers to 1MiB */ |
| 1708 | bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE); |
Rabeeh Khoury | 31ad3ce | 2018-06-19 21:36:50 +0300 | [diff] [blame] | 1709 | flush_dcache_range((ulong)bd_space, (ulong)bd_space + BD_SPACE); |
Stefan Roese | 6564d99 | 2016-05-19 18:09:17 +0200 | [diff] [blame] | 1710 | mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE, |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1711 | DCACHE_OFF); |
| 1712 | buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space; |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1713 | size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc), |
| 1714 | ARCH_DMA_MINALIGN); |
Rabeeh Khoury | f046bed | 2018-06-19 21:36:51 +0300 | [diff] [blame] | 1715 | memset(buffer_loc.tx_descs, 0, size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1716 | buffer_loc.rx_descs = (struct mvneta_rx_desc *) |
Jon Nettleton | 543efd1 | 2018-05-30 08:52:29 +0300 | [diff] [blame] | 1717 | ((phys_addr_t)bd_space + size); |
| 1718 | size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc), |
| 1719 | ARCH_DMA_MINALIGN); |
| 1720 | buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1721 | } |
| 1722 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1723 | pp->base = (void __iomem *)pdata->iobase; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1724 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1725 | /* Configure MBUS address windows */ |
Simon Glass | 54cbcc8 | 2017-05-18 20:08:57 -0600 | [diff] [blame] | 1726 | if (device_is_compatible(dev, "marvell,armada-3700-neta")) |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 1727 | mvneta_bypass_mbus_windows(pp); |
| 1728 | else |
| 1729 | mvneta_conf_mbus_windows(pp); |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1730 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1731 | /* PHY interface is already decoded in mvneta_ofdata_to_platdata() */ |
| 1732 | pp->phy_interface = pdata->phy_interface; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1733 | |
Konstantin Porotchkin | 95d0af3 | 2017-02-16 13:52:28 +0200 | [diff] [blame] | 1734 | /* fetch 'fixed-link' property from 'neta' node */ |
| 1735 | fl_node = fdt_subnode_offset(blob, node, "fixed-link"); |
| 1736 | if (fl_node != -FDT_ERR_NOTFOUND) { |
| 1737 | /* set phy_addr to invalid value for fixed link */ |
| 1738 | pp->phyaddr = PHY_MAX_ADDR + 1; |
| 1739 | pp->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex"); |
| 1740 | pp->speed = fdtdec_get_int(blob, fl_node, "speed", 0); |
| 1741 | } else { |
| 1742 | /* Now read phyaddr from DT */ |
| 1743 | addr = fdtdec_get_int(blob, node, "phy", 0); |
| 1744 | addr = fdt_node_offset_by_phandle(blob, addr); |
| 1745 | pp->phyaddr = fdtdec_get_int(blob, addr, "reg", 0); |
| 1746 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1747 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1748 | bus = mdio_alloc(); |
| 1749 | if (!bus) { |
| 1750 | printf("Failed to allocate MDIO bus\n"); |
| 1751 | return -ENOMEM; |
| 1752 | } |
| 1753 | |
| 1754 | bus->read = mvneta_mdio_read; |
| 1755 | bus->write = mvneta_mdio_write; |
| 1756 | snprintf(bus->name, sizeof(bus->name), dev->name); |
| 1757 | bus->priv = (void *)pp; |
| 1758 | pp->bus = bus; |
| 1759 | |
Konstantin Porotchkin | fb8e202 | 2017-02-16 13:52:27 +0200 | [diff] [blame] | 1760 | ret = mdio_register(bus); |
| 1761 | if (ret) |
| 1762 | return ret; |
| 1763 | |
Simon Glass | fa4689a | 2019-12-06 21:41:35 -0700 | [diff] [blame] | 1764 | #if CONFIG_IS_ENABLED(DM_GPIO) |
Aditya Prayoga | c9fe02a | 2018-12-05 00:39:23 +0800 | [diff] [blame] | 1765 | gpio_request_by_name(dev, "phy-reset-gpios", 0, |
| 1766 | &pp->phy_reset_gpio, GPIOD_IS_OUT); |
| 1767 | |
| 1768 | if (dm_gpio_is_valid(&pp->phy_reset_gpio)) { |
| 1769 | dm_gpio_set_value(&pp->phy_reset_gpio, 1); |
| 1770 | mdelay(10); |
| 1771 | dm_gpio_set_value(&pp->phy_reset_gpio, 0); |
| 1772 | } |
| 1773 | #endif |
| 1774 | |
Konstantin Porotchkin | fb8e202 | 2017-02-16 13:52:27 +0200 | [diff] [blame] | 1775 | return board_network_enable(bus); |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1776 | } |
| 1777 | |
| 1778 | static void mvneta_stop(struct udevice *dev) |
| 1779 | { |
| 1780 | struct mvneta_port *pp = dev_get_priv(dev); |
| 1781 | |
| 1782 | mvneta_port_down(pp); |
| 1783 | mvneta_port_disable(pp); |
| 1784 | } |
| 1785 | |
| 1786 | static const struct eth_ops mvneta_ops = { |
| 1787 | .start = mvneta_start, |
| 1788 | .send = mvneta_send, |
| 1789 | .recv = mvneta_recv, |
| 1790 | .stop = mvneta_stop, |
Matt Pelland | 668a5f2 | 2018-03-27 13:18:25 -0400 | [diff] [blame] | 1791 | .write_hwaddr = mvneta_write_hwaddr, |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1792 | }; |
| 1793 | |
| 1794 | static int mvneta_ofdata_to_platdata(struct udevice *dev) |
| 1795 | { |
| 1796 | struct eth_pdata *pdata = dev_get_platdata(dev); |
| 1797 | const char *phy_mode; |
| 1798 | |
Masahiro Yamada | a89b4de | 2020-07-17 14:36:48 +0900 | [diff] [blame] | 1799 | pdata->iobase = dev_read_addr(dev); |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1800 | |
| 1801 | /* Get phy-mode / phy_interface from DT */ |
| 1802 | pdata->phy_interface = -1; |
Simon Glass | dd79d6e | 2017-01-17 16:52:55 -0700 | [diff] [blame] | 1803 | phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode", |
| 1804 | NULL); |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1805 | if (phy_mode) |
| 1806 | pdata->phy_interface = phy_get_interface_by_name(phy_mode); |
| 1807 | if (pdata->phy_interface == -1) { |
| 1808 | debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode); |
| 1809 | return -EINVAL; |
| 1810 | } |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1811 | |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1812 | return 0; |
Stefan Roese | 3e10381 | 2014-10-22 12:13:14 +0200 | [diff] [blame] | 1813 | } |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1814 | |
| 1815 | static const struct udevice_id mvneta_ids[] = { |
| 1816 | { .compatible = "marvell,armada-370-neta" }, |
| 1817 | { .compatible = "marvell,armada-xp-neta" }, |
Stefan Roese | 572be4a | 2016-05-19 17:46:36 +0200 | [diff] [blame] | 1818 | { .compatible = "marvell,armada-3700-neta" }, |
Stefan Roese | 05b38c1 | 2015-11-19 07:46:15 +0100 | [diff] [blame] | 1819 | { } |
| 1820 | }; |
| 1821 | |
| 1822 | U_BOOT_DRIVER(mvneta) = { |
| 1823 | .name = "mvneta", |
| 1824 | .id = UCLASS_ETH, |
| 1825 | .of_match = mvneta_ids, |
| 1826 | .ofdata_to_platdata = mvneta_ofdata_to_platdata, |
| 1827 | .probe = mvneta_probe, |
| 1828 | .ops = &mvneta_ops, |
| 1829 | .priv_auto_alloc_size = sizeof(struct mvneta_port), |
| 1830 | .platdata_auto_alloc_size = sizeof(struct eth_pdata), |
| 1831 | }; |