Robert Marko | ea1b88b | 2024-06-03 14:06:15 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | /* |
| 3 | * Copyright (c) 2020 Sartura Ltd. |
| 4 | * |
| 5 | * Author: Robert Marko <robert.marko@sartura.hr> |
| 6 | * |
| 7 | * Copyright (c) 2021 Toco Technologies FZE <contact@toco.ae> |
| 8 | * Copyright (c) 2021 Gabor Juhos <j4g8y7@gmail.com> |
| 9 | * |
| 10 | * Qualcomm ESS EDMA ethernet driver |
| 11 | */ |
| 12 | |
| 13 | #include <asm/io.h> |
| 14 | #include <clk.h> |
| 15 | #include <cpu_func.h> |
| 16 | #include <dm.h> |
| 17 | #include <dm/device_compat.h> |
| 18 | #include <errno.h> |
| 19 | #include <linux/bitfield.h> |
| 20 | #include <linux/bitops.h> |
| 21 | #include <linux/delay.h> |
| 22 | #include <log.h> |
| 23 | #include <miiphy.h> |
| 24 | #include <net.h> |
| 25 | #include <reset.h> |
| 26 | |
| 27 | #include "essedma.h" |
| 28 | |
| 29 | #define EDMA_MAX_PKT_SIZE (PKTSIZE_ALIGN + PKTALIGN) |
| 30 | |
| 31 | #define EDMA_RXQ_ID 0 |
| 32 | #define EDMA_TXQ_ID 0 |
| 33 | |
| 34 | /* descriptor ring */ |
| 35 | struct edma_ring { |
| 36 | u16 count; /* number of descriptors in the ring */ |
| 37 | void *hw_desc; /* descriptor ring virtual address */ |
| 38 | unsigned int hw_size; /* hw descriptor ring length in bytes */ |
| 39 | dma_addr_t dma; /* descriptor ring physical address */ |
| 40 | u16 head; /* next Tx descriptor to fill */ |
| 41 | u16 tail; /* next Tx descriptor to clean */ |
| 42 | }; |
| 43 | |
| 44 | struct ess_switch { |
| 45 | phys_addr_t base; |
| 46 | struct phy_device *phydev[ESS_PORTS_NUM]; |
| 47 | u32 phy_mask; |
| 48 | ofnode ports_node; |
| 49 | phy_interface_t port_wrapper_mode; |
| 50 | int num_phy; |
| 51 | }; |
| 52 | |
| 53 | struct essedma_priv { |
| 54 | phys_addr_t base; |
| 55 | struct udevice *dev; |
| 56 | struct clk ess_clk; |
| 57 | struct reset_ctl ess_rst; |
| 58 | struct udevice *mdio_dev; |
| 59 | struct ess_switch esw; |
| 60 | phys_addr_t psgmii_base; |
| 61 | struct edma_ring tpd_ring; |
| 62 | struct edma_ring rfd_ring; |
| 63 | }; |
| 64 | |
| 65 | static void esw_port_loopback_set(struct ess_switch *esw, int port, |
| 66 | bool enable) |
| 67 | { |
| 68 | u32 t; |
| 69 | |
| 70 | t = readl(esw->base + ESS_PORT_LOOKUP_CTRL(port)); |
| 71 | if (enable) |
| 72 | t |= ESS_PORT_LOOP_BACK_EN; |
| 73 | else |
| 74 | t &= ~ESS_PORT_LOOP_BACK_EN; |
| 75 | writel(t, esw->base + ESS_PORT_LOOKUP_CTRL(port)); |
| 76 | } |
| 77 | |
| 78 | static void esw_port_loopback_set_all(struct ess_switch *esw, bool enable) |
| 79 | { |
| 80 | int i; |
| 81 | |
| 82 | for (i = 1; i < ESS_PORTS_NUM; i++) |
| 83 | esw_port_loopback_set(esw, i, enable); |
| 84 | } |
| 85 | |
| 86 | static void ess_reset(struct udevice *dev) |
| 87 | { |
| 88 | struct essedma_priv *priv = dev_get_priv(dev); |
| 89 | |
| 90 | reset_assert(&priv->ess_rst); |
| 91 | mdelay(10); |
| 92 | |
| 93 | reset_deassert(&priv->ess_rst); |
| 94 | mdelay(10); |
| 95 | } |
| 96 | |
| 97 | void qca8075_ess_reset(struct udevice *dev) |
| 98 | { |
| 99 | struct essedma_priv *priv = dev_get_priv(dev); |
| 100 | struct phy_device *psgmii_phy; |
| 101 | int i, val; |
| 102 | |
| 103 | /* Find the PSGMII PHY */ |
| 104 | psgmii_phy = priv->esw.phydev[priv->esw.num_phy - 1]; |
| 105 | |
| 106 | /* Fix phy psgmii RX 20bit */ |
| 107 | phy_write(psgmii_phy, MDIO_DEVAD_NONE, MII_BMCR, 0x005b); |
| 108 | |
| 109 | /* Reset phy psgmii */ |
| 110 | phy_write(psgmii_phy, MDIO_DEVAD_NONE, MII_BMCR, 0x001b); |
| 111 | |
| 112 | /* Release reset phy psgmii */ |
| 113 | phy_write(psgmii_phy, MDIO_DEVAD_NONE, MII_BMCR, 0x005b); |
| 114 | for (i = 0; i < 100; i++) { |
| 115 | val = phy_read_mmd(psgmii_phy, MDIO_MMD_PMAPMD, 0x28); |
| 116 | if (val & 0x1) |
| 117 | break; |
| 118 | mdelay(1); |
| 119 | } |
| 120 | if (i >= 100) |
| 121 | printf("QCA807x PSGMII PLL_VCO_CALIB Not Ready\n"); |
| 122 | |
| 123 | /* |
| 124 | * Check qca8075 psgmii calibration done end. |
| 125 | * Freeze phy psgmii RX CDR |
| 126 | */ |
| 127 | phy_write(psgmii_phy, MDIO_DEVAD_NONE, 0x1a, 0x2230); |
| 128 | |
| 129 | ess_reset(dev); |
| 130 | |
| 131 | /* Check ipq psgmii calibration done start */ |
| 132 | for (i = 0; i < 100; i++) { |
| 133 | val = readl(priv->psgmii_base + PSGMIIPHY_VCO_CALIBRATION_CTRL_REGISTER_2); |
| 134 | if (val & 0x1) |
| 135 | break; |
| 136 | mdelay(1); |
| 137 | } |
| 138 | if (i >= 100) |
| 139 | printf("PSGMII PLL_VCO_CALIB Not Ready\n"); |
| 140 | |
| 141 | /* |
| 142 | * Check ipq psgmii calibration done end. |
| 143 | * Relesae phy psgmii RX CDR |
| 144 | */ |
| 145 | phy_write(psgmii_phy, MDIO_DEVAD_NONE, 0x1a, 0x3230); |
| 146 | |
| 147 | /* Release phy psgmii RX 20bit */ |
| 148 | phy_write(psgmii_phy, MDIO_DEVAD_NONE, MII_BMCR, 0x005f); |
| 149 | } |
| 150 | |
| 151 | #define PSGMII_ST_NUM_RETRIES 20 |
| 152 | #define PSGMII_ST_PKT_COUNT (4 * 1024) |
| 153 | #define PSGMII_ST_PKT_SIZE 1504 |
| 154 | |
| 155 | /* |
| 156 | * Transmitting one byte over a 1000Mbps link requires 8 ns. |
| 157 | * Additionally, use + 1 ns for safety to compensate latencies |
| 158 | * and such. |
| 159 | */ |
| 160 | #define PSGMII_ST_TRAFFIC_TIMEOUT_NS \ |
| 161 | (PSGMII_ST_PKT_COUNT * PSGMII_ST_PKT_SIZE * (8 + 1)) |
| 162 | |
| 163 | #define PSGMII_ST_TRAFFIC_TIMEOUT \ |
| 164 | DIV_ROUND_UP(PSGMII_ST_TRAFFIC_TIMEOUT_NS, 1000000) |
| 165 | |
Tom Rini | 5690c25 | 2024-07-25 18:04:16 -0600 | [diff] [blame] | 166 | static bool psgmii_self_test_repeat; |
Robert Marko | ea1b88b | 2024-06-03 14:06:15 +0200 | [diff] [blame] | 167 | |
| 168 | static void psgmii_st_phy_power_down(struct phy_device *phydev) |
| 169 | { |
| 170 | int val; |
| 171 | |
| 172 | val = phy_read(phydev, MDIO_DEVAD_NONE, MII_BMCR); |
| 173 | val |= QCA807X_POWER_DOWN; |
| 174 | phy_write(phydev, MDIO_DEVAD_NONE, MII_BMCR, val); |
| 175 | } |
| 176 | |
| 177 | static void psgmii_st_phy_prepare(struct phy_device *phydev) |
| 178 | { |
| 179 | int val; |
| 180 | |
| 181 | /* check phydev combo port */ |
| 182 | val = phy_read(phydev, MDIO_DEVAD_NONE, |
| 183 | QCA807X_CHIP_CONFIGURATION); |
| 184 | if (val) { |
| 185 | /* Select copper page */ |
| 186 | val |= QCA807X_MEDIA_PAGE_SELECT; |
| 187 | phy_write(phydev, MDIO_DEVAD_NONE, |
| 188 | QCA807X_CHIP_CONFIGURATION, val); |
| 189 | } |
| 190 | |
| 191 | /* Force no link by power down */ |
| 192 | psgmii_st_phy_power_down(phydev); |
| 193 | |
| 194 | /* Packet number (Non documented) */ |
| 195 | phy_write_mmd(phydev, MDIO_MMD_AN, 0x8021, PSGMII_ST_PKT_COUNT); |
| 196 | phy_write_mmd(phydev, MDIO_MMD_AN, 0x8062, PSGMII_ST_PKT_SIZE); |
| 197 | |
| 198 | /* Fix MDI status */ |
| 199 | val = phy_read(phydev, MDIO_DEVAD_NONE, QCA807X_FUNCTION_CONTROL); |
| 200 | val &= ~QCA807X_MDI_CROSSOVER_MODE_MASK; |
| 201 | val |= FIELD_PREP(QCA807X_MDI_CROSSOVER_MODE_MASK, |
| 202 | QCA807X_MDI_CROSSOVER_MODE_MANUAL_MDI); |
| 203 | val &= ~QCA807X_POLARITY_REVERSAL; |
| 204 | phy_write(phydev, MDIO_DEVAD_NONE, QCA807X_FUNCTION_CONTROL, val); |
| 205 | } |
| 206 | |
| 207 | static void psgmii_st_phy_recover(struct phy_device *phydev) |
| 208 | { |
| 209 | int val; |
| 210 | |
| 211 | /* Packet number (Non documented) */ |
| 212 | phy_write_mmd(phydev, MDIO_MMD_AN, 0x8021, 0x0); |
| 213 | |
| 214 | /* Disable CRC checker and packet counter */ |
| 215 | val = phy_read_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_CRC_PACKET_COUNTER); |
| 216 | val &= ~QCA807X_MMD7_PACKET_COUNTER_SELFCLR; |
| 217 | val &= ~QCA807X_MMD7_CRC_PACKET_COUNTER_EN; |
| 218 | phy_write_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_CRC_PACKET_COUNTER, val); |
| 219 | |
| 220 | /* Disable traffic (Undocumented) */ |
| 221 | phy_write_mmd(phydev, MDIO_MMD_AN, 0x8020, 0x0); |
| 222 | } |
| 223 | |
| 224 | static void psgmii_st_phy_start_traffic(struct phy_device *phydev) |
| 225 | { |
| 226 | int val; |
| 227 | |
| 228 | /* Enable CRC checker and packet counter */ |
| 229 | val = phy_read_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_CRC_PACKET_COUNTER); |
| 230 | val |= QCA807X_MMD7_CRC_PACKET_COUNTER_EN; |
| 231 | phy_write_mmd(phydev, MDIO_MMD_AN, QCA807X_MMD7_CRC_PACKET_COUNTER, val); |
| 232 | |
| 233 | /* Start traffic (Undocumented) */ |
| 234 | phy_write_mmd(phydev, MDIO_MMD_AN, 0x8020, 0xa000); |
| 235 | } |
| 236 | |
| 237 | static bool psgmii_st_phy_check_counters(struct phy_device *phydev) |
| 238 | { |
| 239 | u32 tx_ok; |
| 240 | |
| 241 | /* |
| 242 | * The number of test packets is limited to 65535 so |
| 243 | * only read the lower 16 bits of the counter. |
| 244 | */ |
| 245 | tx_ok = phy_read_mmd(phydev, MDIO_MMD_AN, |
| 246 | QCA807X_MMD7_VALID_EGRESS_COUNTER_2); |
| 247 | |
| 248 | return (tx_ok == PSGMII_ST_PKT_COUNT); |
| 249 | } |
| 250 | |
| 251 | static void psgmii_st_phy_reset_loopback(struct phy_device *phydev) |
| 252 | { |
| 253 | /* reset the PHY */ |
| 254 | phy_write(phydev, MDIO_DEVAD_NONE, MII_BMCR, 0x9000); |
| 255 | |
| 256 | /* enable loopback mode */ |
| 257 | phy_write(phydev, MDIO_DEVAD_NONE, MII_BMCR, 0x4140); |
| 258 | } |
| 259 | |
| 260 | static inline bool psgmii_st_phy_link_is_up(struct phy_device *phydev) |
| 261 | { |
| 262 | int val; |
| 263 | |
| 264 | val = phy_read(phydev, MDIO_DEVAD_NONE, QCA807X_PHY_SPECIFIC); |
| 265 | return !!(val & QCA807X_PHY_SPECIFIC_LINK); |
| 266 | } |
| 267 | |
| 268 | static bool psgmii_st_phy_wait(struct ess_switch *esw, u32 mask, |
| 269 | int retries, int delay, |
Tom Rini | 5690c25 | 2024-07-25 18:04:16 -0600 | [diff] [blame] | 270 | bool (*check)(struct phy_device *)) |
Robert Marko | ea1b88b | 2024-06-03 14:06:15 +0200 | [diff] [blame] | 271 | { |
| 272 | int i; |
| 273 | |
| 274 | for (i = 0; i < retries; i++) { |
| 275 | int phy; |
| 276 | |
| 277 | for (phy = 0; phy < esw->num_phy - 1; phy++) { |
| 278 | u32 phybit = BIT(phy); |
| 279 | |
| 280 | if (!(mask & phybit)) |
| 281 | continue; |
| 282 | |
| 283 | if (check(esw->phydev[phy])) |
| 284 | mask &= ~phybit; |
| 285 | } |
| 286 | |
| 287 | if (!mask) |
| 288 | break; |
| 289 | |
| 290 | mdelay(delay); |
| 291 | } |
| 292 | |
| 293 | return (!mask); |
| 294 | } |
| 295 | |
| 296 | static bool psgmii_st_phy_wait_link(struct ess_switch *esw, u32 mask) |
| 297 | { |
| 298 | return psgmii_st_phy_wait(esw, mask, 100, 10, |
| 299 | psgmii_st_phy_link_is_up); |
| 300 | } |
| 301 | |
| 302 | static bool psgmii_st_phy_wait_tx_complete(struct ess_switch *esw, u32 mask) |
| 303 | { |
| 304 | return psgmii_st_phy_wait(esw, mask, PSGMII_ST_TRAFFIC_TIMEOUT, 1, |
| 305 | psgmii_st_phy_check_counters); |
| 306 | } |
| 307 | |
| 308 | static bool psgmii_st_run_test_serial(struct ess_switch *esw) |
| 309 | { |
| 310 | bool result = true; |
| 311 | int i; |
| 312 | |
| 313 | for (i = 0; i < esw->num_phy - 1; i++) { |
| 314 | struct phy_device *phydev = esw->phydev[i]; |
| 315 | |
| 316 | psgmii_st_phy_reset_loopback(phydev); |
| 317 | |
| 318 | psgmii_st_phy_wait_link(esw, BIT(i)); |
| 319 | |
| 320 | psgmii_st_phy_start_traffic(phydev); |
| 321 | |
| 322 | /* wait for the traffic to complete */ |
| 323 | result &= psgmii_st_phy_wait_tx_complete(esw, BIT(i)); |
| 324 | |
| 325 | /* Power down */ |
| 326 | psgmii_st_phy_power_down(phydev); |
| 327 | |
| 328 | if (!result) |
| 329 | break; |
| 330 | } |
| 331 | |
| 332 | return result; |
| 333 | } |
| 334 | |
| 335 | static bool psgmii_st_run_test_parallel(struct ess_switch *esw) |
| 336 | { |
| 337 | bool result; |
| 338 | int i; |
| 339 | |
| 340 | /* enable loopback mode on all PHYs */ |
| 341 | for (i = 0; i < esw->num_phy - 1; i++) |
| 342 | psgmii_st_phy_reset_loopback(esw->phydev[i]); |
| 343 | |
| 344 | psgmii_st_phy_wait_link(esw, esw->phy_mask); |
| 345 | |
| 346 | /* start traffic on all PHYs parallely */ |
| 347 | for (i = 0; i < esw->num_phy - 1; i++) |
| 348 | psgmii_st_phy_start_traffic(esw->phydev[i]); |
| 349 | |
| 350 | /* wait for the traffic to complete on all PHYs */ |
| 351 | result = psgmii_st_phy_wait_tx_complete(esw, esw->phy_mask); |
| 352 | |
| 353 | /* Power down all PHYs */ |
| 354 | for (i = 0; i < esw->num_phy - 1; i++) |
| 355 | psgmii_st_phy_power_down(esw->phydev[i]); |
| 356 | |
| 357 | return result; |
| 358 | } |
| 359 | |
| 360 | struct psgmii_st_stats { |
| 361 | int succeed; |
| 362 | int failed; |
| 363 | int failed_max; |
| 364 | int failed_cont; |
| 365 | }; |
| 366 | |
| 367 | static void psgmii_st_update_stats(struct psgmii_st_stats *stats, |
| 368 | bool success) |
| 369 | { |
| 370 | if (success) { |
| 371 | stats->succeed++; |
| 372 | stats->failed_cont = 0; |
| 373 | return; |
| 374 | } |
| 375 | |
| 376 | stats->failed++; |
| 377 | stats->failed_cont++; |
| 378 | if (stats->failed_max < stats->failed_cont) |
| 379 | stats->failed_max = stats->failed_cont; |
| 380 | } |
| 381 | |
| 382 | static void psgmii_self_test(struct udevice *dev) |
| 383 | { |
| 384 | struct essedma_priv *priv = dev_get_priv(dev); |
| 385 | struct ess_switch *esw = &priv->esw; |
| 386 | struct psgmii_st_stats stats; |
| 387 | bool result = false; |
| 388 | unsigned long tm; |
| 389 | int i; |
| 390 | |
| 391 | memset(&stats, 0, sizeof(stats)); |
| 392 | |
| 393 | tm = get_timer(0); |
| 394 | |
| 395 | for (i = 0; i < esw->num_phy - 1; i++) |
| 396 | psgmii_st_phy_prepare(esw->phydev[i]); |
| 397 | |
| 398 | for (i = 0; i < PSGMII_ST_NUM_RETRIES; i++) { |
| 399 | qca8075_ess_reset(dev); |
| 400 | |
| 401 | /* enable loopback mode on the switch's ports */ |
| 402 | esw_port_loopback_set_all(esw, true); |
| 403 | |
| 404 | /* run test on each PHYs individually after each other */ |
| 405 | result = psgmii_st_run_test_serial(esw); |
| 406 | |
| 407 | if (result) { |
| 408 | /* run test on each PHYs parallely */ |
| 409 | result = psgmii_st_run_test_parallel(esw); |
| 410 | } |
| 411 | |
| 412 | psgmii_st_update_stats(&stats, result); |
| 413 | |
| 414 | if (psgmii_self_test_repeat) |
| 415 | continue; |
| 416 | |
| 417 | if (result) |
| 418 | break; |
| 419 | } |
| 420 | |
| 421 | for (i = 0; i < esw->num_phy - 1; i++) { |
| 422 | /* Configuration recover */ |
| 423 | psgmii_st_phy_recover(esw->phydev[i]); |
| 424 | |
| 425 | /* Disable loopback */ |
| 426 | phy_write(esw->phydev[i], MDIO_DEVAD_NONE, |
| 427 | QCA807X_FUNCTION_CONTROL, 0x6860); |
| 428 | phy_write(esw->phydev[i], MDIO_DEVAD_NONE, MII_BMCR, 0x9040); |
| 429 | } |
| 430 | |
| 431 | /* disable loopback mode on the switch's ports */ |
| 432 | esw_port_loopback_set_all(esw, false); |
| 433 | |
| 434 | tm = get_timer(tm); |
| 435 | dev_dbg(priv->dev, "\nPSGMII self-test: succeed %d, failed %d (max %d), duration %lu.%03lu secs\n", |
| 436 | stats.succeed, stats.failed, stats.failed_max, |
| 437 | tm / 1000, tm % 1000); |
| 438 | } |
| 439 | |
| 440 | static int ess_switch_disable_lookup(struct ess_switch *esw) |
| 441 | { |
| 442 | int val; |
| 443 | int i; |
| 444 | |
| 445 | /* Disable port lookup for all ports*/ |
| 446 | for (i = 0; i < ESS_PORTS_NUM; i++) { |
| 447 | int ess_port_vid; |
| 448 | |
| 449 | val = readl(esw->base + ESS_PORT_LOOKUP_CTRL(i)); |
| 450 | val &= ~ESS_PORT_VID_MEM_MASK; |
| 451 | |
| 452 | switch (i) { |
| 453 | case 0: |
| 454 | fallthrough; |
| 455 | case 5: |
| 456 | /* CPU,WAN port -> nothing */ |
| 457 | ess_port_vid = 0; |
| 458 | break; |
| 459 | case 1 ... 4: |
| 460 | /* LAN ports -> all other LAN ports */ |
| 461 | ess_port_vid = GENMASK(4, 1); |
| 462 | ess_port_vid &= ~BIT(i); |
| 463 | break; |
| 464 | default: |
| 465 | return -EINVAL; |
| 466 | } |
| 467 | |
| 468 | val |= FIELD_PREP(ESS_PORT_VID_MEM_MASK, ess_port_vid); |
| 469 | |
| 470 | writel(val, esw->base + ESS_PORT_LOOKUP_CTRL(i)); |
| 471 | } |
| 472 | |
| 473 | /* Set magic value for the global forwarding register 1 */ |
| 474 | writel(0x3e3e3e, esw->base + ESS_GLOBAL_FW_CTRL1); |
| 475 | |
| 476 | return 0; |
| 477 | } |
| 478 | |
| 479 | static int ess_switch_enable_lookup(struct ess_switch *esw) |
| 480 | { |
| 481 | int val; |
| 482 | int i; |
| 483 | |
| 484 | /* Enable port lookup for all ports*/ |
| 485 | for (i = 0; i < ESS_PORTS_NUM; i++) { |
| 486 | int ess_port_vid; |
| 487 | |
| 488 | val = readl(esw->base + ESS_PORT_LOOKUP_CTRL(i)); |
| 489 | val &= ~ESS_PORT_VID_MEM_MASK; |
| 490 | |
| 491 | switch (i) { |
| 492 | case 0: |
| 493 | /* CPU port -> all other ports */ |
| 494 | ess_port_vid = GENMASK(5, 1); |
| 495 | break; |
| 496 | case 1 ... 4: |
| 497 | /* LAN ports -> CPU and all other LAN ports */ |
| 498 | ess_port_vid = GENMASK(4, 0); |
| 499 | ess_port_vid &= ~BIT(i); |
| 500 | break; |
| 501 | case 5: |
| 502 | /* WAN port -> CPU port only */ |
| 503 | ess_port_vid = BIT(0); |
| 504 | break; |
| 505 | default: |
| 506 | return -EINVAL; |
| 507 | } |
| 508 | |
| 509 | val |= FIELD_PREP(ESS_PORT_VID_MEM_MASK, ess_port_vid); |
| 510 | |
| 511 | writel(val, esw->base + ESS_PORT_LOOKUP_CTRL(i)); |
| 512 | } |
| 513 | |
| 514 | /* Set magic value for the global forwarding register 1 */ |
| 515 | writel(0x3f3f3f, esw->base + ESS_GLOBAL_FW_CTRL1); |
| 516 | |
| 517 | return 0; |
| 518 | } |
| 519 | |
| 520 | static void ess_switch_init(struct ess_switch *esw) |
| 521 | { |
| 522 | int val = 0; |
| 523 | int i; |
| 524 | |
| 525 | /* Set magic value for the global forwarding register 1 */ |
| 526 | writel(0x3e3e3e, esw->base + ESS_GLOBAL_FW_CTRL1); |
| 527 | |
| 528 | /* Set 1000M speed, full duplex and RX/TX flow control for the CPU port*/ |
| 529 | val &= ~ESS_PORT_SPEED_MASK; |
| 530 | val |= FIELD_PREP(ESS_PORT_SPEED_MASK, ESS_PORT_SPEED_1000); |
| 531 | val |= ESS_PORT_DUPLEX_MODE; |
| 532 | val |= ESS_PORT_TX_FLOW_EN; |
| 533 | val |= ESS_PORT_RX_FLOW_EN; |
| 534 | |
| 535 | writel(val, esw->base + ESS_PORT0_STATUS); |
| 536 | |
| 537 | /* Disable port lookup for all ports*/ |
| 538 | for (i = 0; i < ESS_PORTS_NUM; i++) { |
| 539 | val = readl(esw->base + ESS_PORT_LOOKUP_CTRL(i)); |
| 540 | val &= ~ESS_PORT_VID_MEM_MASK; |
| 541 | |
| 542 | writel(val, esw->base + ESS_PORT_LOOKUP_CTRL(i)); |
| 543 | } |
| 544 | |
| 545 | /* Set HOL settings for all ports*/ |
| 546 | for (i = 0; i < ESS_PORTS_NUM; i++) { |
| 547 | val = 0; |
| 548 | |
| 549 | val |= FIELD_PREP(EG_PORT_QUEUE_NUM_MASK, 30); |
| 550 | if (i == 0 || i == 5) { |
| 551 | val |= FIELD_PREP(EG_PRI5_QUEUE_NUM_MASK, 4); |
| 552 | val |= FIELD_PREP(EG_PRI4_QUEUE_NUM_MASK, 4); |
| 553 | } |
| 554 | val |= FIELD_PREP(EG_PRI3_QUEUE_NUM_MASK, 4); |
| 555 | val |= FIELD_PREP(EG_PRI2_QUEUE_NUM_MASK, 4); |
| 556 | val |= FIELD_PREP(EG_PRI1_QUEUE_NUM_MASK, 4); |
| 557 | val |= FIELD_PREP(EG_PRI0_QUEUE_NUM_MASK, 4); |
| 558 | |
| 559 | writel(val, esw->base + ESS_PORT_HOL_CTRL0(i)); |
| 560 | |
| 561 | val = readl(esw->base + ESS_PORT_HOL_CTRL1(i)); |
| 562 | val &= ~ESS_ING_BUF_NUM_0_MASK; |
| 563 | val |= FIELD_PREP(ESS_ING_BUF_NUM_0_MASK, 6); |
| 564 | |
| 565 | writel(val, esw->base + ESS_PORT_HOL_CTRL1(i)); |
| 566 | } |
| 567 | |
| 568 | /* Give switch some time */ |
| 569 | mdelay(1); |
| 570 | |
| 571 | /* Enable RX and TX MAC-s */ |
| 572 | val = readl(esw->base + ESS_PORT0_STATUS); |
| 573 | val |= ESS_PORT_TXMAC_EN; |
| 574 | val |= ESS_PORT_RXMAC_EN; |
| 575 | |
| 576 | writel(val, esw->base + ESS_PORT0_STATUS); |
| 577 | |
| 578 | /* Set magic value for the global forwarding register 1 */ |
| 579 | writel(0x7f7f7f, esw->base + ESS_GLOBAL_FW_CTRL1); |
| 580 | } |
| 581 | |
| 582 | static int essedma_of_phy(struct udevice *dev) |
| 583 | { |
| 584 | struct essedma_priv *priv = dev_get_priv(dev); |
| 585 | struct ess_switch *esw = &priv->esw; |
| 586 | int num_phy = 0, ret = 0; |
| 587 | ofnode node; |
| 588 | int i; |
| 589 | |
| 590 | ofnode_for_each_subnode(node, esw->ports_node) { |
| 591 | struct ofnode_phandle_args phandle_args; |
| 592 | struct phy_device *phydev; |
| 593 | u32 phy_addr; |
| 594 | |
| 595 | if (ofnode_is_enabled(node)) { |
| 596 | if (ofnode_parse_phandle_with_args(node, "phy-handle", NULL, 0, 0, |
| 597 | &phandle_args)) { |
| 598 | dev_dbg(priv->dev, "Failed to find phy-handle\n"); |
| 599 | return -ENODEV; |
| 600 | } |
| 601 | |
| 602 | ret = ofnode_read_u32(phandle_args.node, "reg", &phy_addr); |
| 603 | if (ret) { |
| 604 | dev_dbg(priv->dev, "Missing reg property in PHY node %s\n", |
| 605 | ofnode_get_name(phandle_args.node)); |
| 606 | return ret; |
| 607 | } |
| 608 | |
| 609 | phydev = dm_mdio_phy_connect(priv->mdio_dev, phy_addr, |
| 610 | dev, priv->esw.port_wrapper_mode); |
| 611 | if (!phydev) { |
| 612 | dev_dbg(priv->dev, "Failed to find phy on addr %d\n", phy_addr); |
| 613 | return -ENODEV; |
| 614 | } |
| 615 | |
| 616 | phydev->node = phandle_args.node; |
| 617 | ret = phy_config(phydev); |
| 618 | |
| 619 | esw->phydev[num_phy] = phydev; |
| 620 | |
| 621 | num_phy++; |
| 622 | } |
| 623 | } |
| 624 | |
| 625 | esw->num_phy = num_phy; |
| 626 | |
| 627 | for (i = 0; i < esw->num_phy - 1; i++) |
| 628 | esw->phy_mask |= BIT(i); |
| 629 | |
| 630 | return ret; |
| 631 | } |
| 632 | |
| 633 | static int essedma_of_switch(struct udevice *dev) |
| 634 | { |
| 635 | struct essedma_priv *priv = dev_get_priv(dev); |
| 636 | int port_wrapper_mode = -1; |
| 637 | |
| 638 | priv->esw.ports_node = ofnode_find_subnode(dev_ofnode(dev), "ports"); |
| 639 | if (!ofnode_valid(priv->esw.ports_node)) { |
| 640 | printf("Failed to find ports node\n"); |
| 641 | return -EINVAL; |
| 642 | } |
| 643 | |
| 644 | port_wrapper_mode = ofnode_read_phy_mode(priv->esw.ports_node); |
| 645 | if (port_wrapper_mode == -1) |
| 646 | return -EINVAL; |
| 647 | |
| 648 | priv->esw.port_wrapper_mode = port_wrapper_mode; |
| 649 | |
| 650 | return essedma_of_phy(dev); |
| 651 | } |
| 652 | |
| 653 | static void ipq40xx_edma_start_rx_tx(struct essedma_priv *priv) |
| 654 | { |
| 655 | volatile u32 data; |
| 656 | |
| 657 | /* enable RX queues */ |
| 658 | data = readl(priv->base + EDMA_REG_RXQ_CTRL); |
| 659 | data |= EDMA_RXQ_CTRL_EN; |
| 660 | writel(data, priv->base + EDMA_REG_RXQ_CTRL); |
| 661 | |
| 662 | /* enable TX queues */ |
| 663 | data = readl(priv->base + EDMA_REG_TXQ_CTRL); |
| 664 | data |= EDMA_TXQ_CTRL_TXQ_EN; |
| 665 | writel(data, priv->base + EDMA_REG_TXQ_CTRL); |
| 666 | } |
| 667 | |
| 668 | /* |
| 669 | * ipq40xx_edma_init_desc() |
| 670 | * Update descriptor ring size, |
| 671 | * Update buffer and producer/consumer index |
| 672 | */ |
| 673 | static void ipq40xx_edma_init_desc(struct essedma_priv *priv) |
| 674 | { |
| 675 | struct edma_ring *rfd_ring; |
| 676 | struct edma_ring *etdr; |
| 677 | volatile u32 data = 0; |
| 678 | u16 hw_cons_idx = 0; |
| 679 | |
| 680 | /* Set the base address of every TPD ring. */ |
| 681 | etdr = &priv->tpd_ring; |
| 682 | |
| 683 | /* Update TX descriptor ring base address. */ |
| 684 | writel((u32)(etdr->dma & 0xffffffff), |
| 685 | priv->base + EDMA_REG_TPD_BASE_ADDR_Q(EDMA_TXQ_ID)); |
| 686 | data = readl(priv->base + EDMA_REG_TPD_IDX_Q(EDMA_TXQ_ID)); |
| 687 | |
| 688 | /* Calculate hardware consumer index for Tx. */ |
| 689 | hw_cons_idx = FIELD_GET(EDMA_TPD_CONS_IDX_MASK, data); |
| 690 | etdr->head = hw_cons_idx; |
| 691 | etdr->tail = hw_cons_idx; |
| 692 | data &= ~EDMA_TPD_PROD_IDX_MASK; |
| 693 | data |= hw_cons_idx; |
| 694 | |
| 695 | /* Update producer index for Tx. */ |
| 696 | writel(data, priv->base + EDMA_REG_TPD_IDX_Q(EDMA_TXQ_ID)); |
| 697 | |
| 698 | /* Update SW consumer index register for Tx. */ |
| 699 | writel(hw_cons_idx, |
| 700 | priv->base + EDMA_REG_TX_SW_CONS_IDX_Q(EDMA_TXQ_ID)); |
| 701 | |
| 702 | /* Set TPD ring size. */ |
| 703 | writel((u32)(etdr->count & EDMA_TPD_RING_SIZE_MASK), |
| 704 | priv->base + EDMA_REG_TPD_RING_SIZE); |
| 705 | |
| 706 | /* Configure Rx ring. */ |
| 707 | rfd_ring = &priv->rfd_ring; |
| 708 | |
| 709 | /* Update Receive Free descriptor ring base address. */ |
| 710 | writel((u32)(rfd_ring->dma & 0xffffffff), |
| 711 | priv->base + EDMA_REG_RFD_BASE_ADDR_Q(EDMA_RXQ_ID)); |
| 712 | data = readl(priv->base + EDMA_REG_RFD_BASE_ADDR_Q(EDMA_RXQ_ID)); |
| 713 | |
| 714 | /* Update RFD ring size and RX buffer size. */ |
| 715 | data = (rfd_ring->count & EDMA_RFD_RING_SIZE_MASK) |
| 716 | << EDMA_RFD_RING_SIZE_SHIFT; |
| 717 | data |= (EDMA_MAX_PKT_SIZE & EDMA_RX_BUF_SIZE_MASK) |
| 718 | << EDMA_RX_BUF_SIZE_SHIFT; |
| 719 | writel(data, priv->base + EDMA_REG_RX_DESC0); |
| 720 | |
| 721 | /* Disable TX FIFO low watermark and high watermark */ |
| 722 | writel(0, priv->base + EDMA_REG_TXF_WATER_MARK); |
| 723 | |
| 724 | /* Load all of base address above */ |
| 725 | data = readl(priv->base + EDMA_REG_TX_SRAM_PART); |
| 726 | data |= 1 << EDMA_LOAD_PTR_SHIFT; |
| 727 | writel(data, priv->base + EDMA_REG_TX_SRAM_PART); |
| 728 | } |
| 729 | |
| 730 | static void ipq40xx_edma_init_rfd_ring(struct essedma_priv *priv) |
| 731 | { |
| 732 | struct edma_ring *erdr = &priv->rfd_ring; |
| 733 | struct edma_rfd *rfds = erdr->hw_desc; |
| 734 | int i; |
| 735 | |
| 736 | for (i = 0; i < erdr->count; i++) |
| 737 | rfds[i].buffer_addr = virt_to_phys(net_rx_packets[i]); |
| 738 | |
| 739 | flush_dcache_range(erdr->dma, erdr->dma + erdr->hw_size); |
| 740 | |
| 741 | /* setup producer index */ |
| 742 | erdr->head = erdr->count - 1; |
| 743 | writel(erdr->head, priv->base + EDMA_REG_RFD_IDX_Q(EDMA_RXQ_ID)); |
| 744 | } |
| 745 | |
| 746 | static void ipq40xx_edma_configure(struct essedma_priv *priv) |
| 747 | { |
| 748 | u32 tmp; |
| 749 | int i; |
| 750 | |
| 751 | /* Set RSS type */ |
| 752 | writel(IPQ40XX_EDMA_RSS_TYPE_NONE, priv->base + EDMA_REG_RSS_TYPE); |
| 753 | |
| 754 | /* Configure RSS indirection table. |
| 755 | * 128 hash will be configured in the following |
| 756 | * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively |
| 757 | * and so on |
| 758 | */ |
| 759 | for (i = 0; i < EDMA_NUM_IDT; i++) |
| 760 | writel(EDMA_RSS_IDT_VALUE, priv->base + EDMA_REG_RSS_IDT(i)); |
| 761 | |
| 762 | /* Set RFD burst number */ |
| 763 | tmp = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT); |
| 764 | |
| 765 | /* Set RFD prefetch threshold */ |
| 766 | tmp |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT); |
| 767 | |
| 768 | /* Set RFD in host ring low threshold to generte interrupt */ |
| 769 | tmp |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT); |
| 770 | writel(tmp, priv->base + EDMA_REG_RX_DESC1); |
| 771 | |
| 772 | /* configure reception control data. */ |
| 773 | |
| 774 | /* Set Rx FIFO threshold to start to DMA data to host */ |
| 775 | tmp = EDMA_FIFO_THRESH_128_BYTE; |
| 776 | |
| 777 | /* Set RX remove vlan bit */ |
| 778 | tmp |= EDMA_RXQ_CTRL_RMV_VLAN; |
| 779 | writel(tmp, priv->base + EDMA_REG_RXQ_CTRL); |
| 780 | |
| 781 | /* Configure transmission control data */ |
| 782 | tmp = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT); |
| 783 | tmp |= EDMA_TXQ_CTRL_TPD_BURST_EN; |
| 784 | tmp |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT); |
| 785 | writel(tmp, priv->base + EDMA_REG_TXQ_CTRL); |
| 786 | } |
| 787 | |
| 788 | static void ipq40xx_edma_stop_rx_tx(struct essedma_priv *priv) |
| 789 | { |
| 790 | volatile u32 data; |
| 791 | |
| 792 | data = readl(priv->base + EDMA_REG_RXQ_CTRL); |
| 793 | data &= ~EDMA_RXQ_CTRL_EN; |
| 794 | writel(data, priv->base + EDMA_REG_RXQ_CTRL); |
| 795 | data = readl(priv->base + EDMA_REG_TXQ_CTRL); |
| 796 | data &= ~EDMA_TXQ_CTRL_TXQ_EN; |
| 797 | writel(data, priv->base + EDMA_REG_TXQ_CTRL); |
| 798 | } |
| 799 | |
| 800 | static int ipq40xx_eth_recv(struct udevice *dev, int flags, uchar **packetp) |
| 801 | { |
| 802 | struct essedma_priv *priv = dev_get_priv(dev); |
| 803 | struct edma_ring *erdr = &priv->rfd_ring; |
| 804 | struct edma_rrd *rrd; |
| 805 | u32 hw_tail; |
| 806 | u8 *rx_pkt; |
| 807 | |
| 808 | hw_tail = readl(priv->base + EDMA_REG_RFD_IDX_Q(EDMA_RXQ_ID)); |
| 809 | hw_tail = FIELD_GET(EDMA_RFD_CONS_IDX_MASK, hw_tail); |
| 810 | |
| 811 | if (hw_tail == erdr->tail) |
| 812 | return -EAGAIN; |
| 813 | |
| 814 | rx_pkt = net_rx_packets[erdr->tail]; |
| 815 | invalidate_dcache_range((unsigned long)rx_pkt, |
| 816 | (unsigned long)(rx_pkt + EDMA_MAX_PKT_SIZE)); |
| 817 | |
| 818 | rrd = (struct edma_rrd *)rx_pkt; |
| 819 | |
| 820 | /* Check if RRD is valid */ |
| 821 | if (!(rrd->rrd7 & EDMA_RRD7_DESC_VALID)) |
| 822 | return 0; |
| 823 | |
| 824 | *packetp = rx_pkt + EDMA_RRD_SIZE; |
| 825 | |
| 826 | /* get the packet size */ |
| 827 | return rrd->rrd6; |
| 828 | } |
| 829 | |
| 830 | static int ipq40xx_eth_free_pkt(struct udevice *dev, uchar *packet, |
| 831 | int length) |
| 832 | { |
| 833 | struct essedma_priv *priv = dev_get_priv(dev); |
| 834 | struct edma_ring *erdr; |
| 835 | |
| 836 | erdr = &priv->rfd_ring; |
| 837 | |
| 838 | /* Update the producer index */ |
| 839 | writel(erdr->head, priv->base + EDMA_REG_RFD_IDX_Q(EDMA_RXQ_ID)); |
| 840 | |
| 841 | erdr->head++; |
| 842 | if (erdr->head == erdr->count) |
| 843 | erdr->head = 0; |
| 844 | |
| 845 | /* Update the consumer index */ |
| 846 | erdr->tail++; |
| 847 | if (erdr->tail == erdr->count) |
| 848 | erdr->tail = 0; |
| 849 | |
| 850 | writel(erdr->tail, |
| 851 | priv->base + EDMA_REG_RX_SW_CONS_IDX_Q(EDMA_RXQ_ID)); |
| 852 | |
| 853 | return 0; |
| 854 | } |
| 855 | |
| 856 | static int ipq40xx_eth_start(struct udevice *dev) |
| 857 | { |
| 858 | struct essedma_priv *priv = dev_get_priv(dev); |
| 859 | |
| 860 | ipq40xx_edma_init_rfd_ring(priv); |
| 861 | |
| 862 | ipq40xx_edma_start_rx_tx(priv); |
| 863 | ess_switch_enable_lookup(&priv->esw); |
| 864 | |
| 865 | return 0; |
| 866 | } |
| 867 | |
| 868 | /* |
| 869 | * One TPD would be enough for sending a packet, however because the |
| 870 | * minimal cache line size is larger than the size of a TPD it is not |
| 871 | * possible to flush only one at once. To overcome this limitation |
| 872 | * multiple TPDs are used for sending a single packet. |
| 873 | */ |
| 874 | #define EDMA_TPDS_PER_PACKET 4 |
| 875 | #define EDMA_TPD_MIN_BYTES 4 |
| 876 | #define EDMA_MIN_PKT_SIZE (EDMA_TPDS_PER_PACKET * EDMA_TPD_MIN_BYTES) |
| 877 | |
| 878 | #define EDMA_TX_COMPLETE_TIMEOUT 1000000 |
| 879 | |
| 880 | static int ipq40xx_eth_send(struct udevice *dev, void *packet, int length) |
| 881 | { |
| 882 | struct essedma_priv *priv = dev_get_priv(dev); |
| 883 | struct edma_tpd *first_tpd; |
| 884 | struct edma_tpd *tpds; |
| 885 | int i; |
| 886 | |
| 887 | if (length < EDMA_MIN_PKT_SIZE) |
| 888 | return 0; |
| 889 | |
| 890 | flush_dcache_range((unsigned long)(packet), |
| 891 | (unsigned long)(packet) + |
| 892 | roundup(length, ARCH_DMA_MINALIGN)); |
| 893 | |
| 894 | tpds = priv->tpd_ring.hw_desc; |
| 895 | for (i = 0; i < EDMA_TPDS_PER_PACKET; i++) { |
| 896 | struct edma_tpd *tpd; |
| 897 | void *frag; |
| 898 | |
| 899 | frag = packet + (i * EDMA_TPD_MIN_BYTES); |
| 900 | |
| 901 | /* get the next TPD */ |
| 902 | tpd = &tpds[priv->tpd_ring.head]; |
| 903 | if (i == 0) |
| 904 | first_tpd = tpd; |
| 905 | |
| 906 | /* update the software index */ |
| 907 | priv->tpd_ring.head++; |
| 908 | if (priv->tpd_ring.head == priv->tpd_ring.count) |
| 909 | priv->tpd_ring.head = 0; |
| 910 | |
| 911 | tpd->svlan_tag = 0; |
| 912 | tpd->addr = virt_to_phys(frag); |
| 913 | tpd->word3 = EDMA_PORT_ENABLE_ALL << EDMA_TPD_PORT_BITMAP_SHIFT; |
| 914 | |
| 915 | if (i < (EDMA_TPDS_PER_PACKET - 1)) { |
| 916 | tpd->len = EDMA_TPD_MIN_BYTES; |
| 917 | tpd->word1 = 0; |
| 918 | } else { |
| 919 | tpd->len = length; |
| 920 | tpd->word1 = 1 << EDMA_TPD_EOP_SHIFT; |
| 921 | } |
| 922 | |
| 923 | length -= EDMA_TPD_MIN_BYTES; |
| 924 | } |
| 925 | |
| 926 | /* make sure that memory writing completes */ |
| 927 | wmb(); |
| 928 | |
| 929 | flush_dcache_range((unsigned long)first_tpd, |
| 930 | (unsigned long)first_tpd + |
| 931 | EDMA_TPDS_PER_PACKET * sizeof(struct edma_tpd)); |
| 932 | |
| 933 | /* update the TX producer index */ |
| 934 | writel(priv->tpd_ring.head, |
| 935 | priv->base + EDMA_REG_TPD_IDX_Q(EDMA_TXQ_ID)); |
| 936 | |
| 937 | /* Wait for TX DMA completion */ |
| 938 | for (i = 0; i < EDMA_TX_COMPLETE_TIMEOUT; i++) { |
| 939 | u32 r, prod, cons; |
| 940 | |
| 941 | r = readl(priv->base + EDMA_REG_TPD_IDX_Q(EDMA_TXQ_ID)); |
| 942 | prod = FIELD_GET(EDMA_TPD_PROD_IDX_MASK, r); |
| 943 | cons = FIELD_GET(EDMA_TPD_CONS_IDX_MASK, r); |
| 944 | |
| 945 | if (cons == prod) |
| 946 | break; |
| 947 | |
| 948 | udelay(1); |
| 949 | } |
| 950 | |
| 951 | if (i == EDMA_TX_COMPLETE_TIMEOUT) |
| 952 | printf("TX timeout: packet not sent!\n"); |
| 953 | |
| 954 | /* update the software TX consumer index register */ |
| 955 | writel(priv->tpd_ring.head, |
| 956 | priv->base + EDMA_REG_TX_SW_CONS_IDX_Q(EDMA_TXQ_ID)); |
| 957 | |
| 958 | return 0; |
| 959 | } |
| 960 | |
| 961 | static void ipq40xx_eth_stop(struct udevice *dev) |
| 962 | { |
| 963 | struct essedma_priv *priv = dev_get_priv(dev); |
| 964 | |
| 965 | ess_switch_disable_lookup(&priv->esw); |
| 966 | ipq40xx_edma_stop_rx_tx(priv); |
| 967 | } |
| 968 | |
| 969 | static void ipq40xx_edma_free_ring(struct edma_ring *ring) |
| 970 | { |
| 971 | free(ring->hw_desc); |
| 972 | } |
| 973 | |
| 974 | /* |
| 975 | * Free Tx and Rx rings |
| 976 | */ |
| 977 | static void ipq40xx_edma_free_rings(struct essedma_priv *priv) |
| 978 | { |
| 979 | ipq40xx_edma_free_ring(&priv->tpd_ring); |
| 980 | ipq40xx_edma_free_ring(&priv->rfd_ring); |
| 981 | } |
| 982 | |
| 983 | /* |
| 984 | * ipq40xx_edma_alloc_ring() |
| 985 | * allocate edma ring descriptor. |
| 986 | */ |
| 987 | static int ipq40xx_edma_alloc_ring(struct edma_ring *erd, |
| 988 | unsigned int desc_size) |
| 989 | { |
| 990 | erd->head = 0; |
| 991 | erd->tail = 0; |
| 992 | |
| 993 | /* Alloc HW descriptors */ |
| 994 | erd->hw_size = roundup(desc_size * erd->count, |
| 995 | ARCH_DMA_MINALIGN); |
| 996 | |
| 997 | erd->hw_desc = memalign(CONFIG_SYS_CACHELINE_SIZE, erd->hw_size); |
| 998 | if (!erd->hw_desc) |
| 999 | return -ENOMEM; |
| 1000 | |
| 1001 | memset(erd->hw_desc, 0, erd->hw_size); |
| 1002 | erd->dma = virt_to_phys(erd->hw_desc); |
| 1003 | |
| 1004 | return 0; |
| 1005 | |
| 1006 | } |
| 1007 | |
| 1008 | /* |
| 1009 | * ipq40xx_allocate_tx_rx_rings() |
| 1010 | */ |
| 1011 | static int ipq40xx_edma_alloc_tx_rx_rings(struct essedma_priv *priv) |
| 1012 | { |
| 1013 | int ret; |
| 1014 | |
| 1015 | ret = ipq40xx_edma_alloc_ring(&priv->tpd_ring, |
| 1016 | sizeof(struct edma_tpd)); |
| 1017 | if (ret) |
| 1018 | return ret; |
| 1019 | |
| 1020 | ret = ipq40xx_edma_alloc_ring(&priv->rfd_ring, |
| 1021 | sizeof(struct edma_rfd)); |
| 1022 | if (ret) |
| 1023 | goto err_free_tpd; |
| 1024 | |
| 1025 | return 0; |
| 1026 | |
| 1027 | err_free_tpd: |
| 1028 | ipq40xx_edma_free_ring(&priv->tpd_ring); |
| 1029 | return ret; |
| 1030 | } |
| 1031 | |
| 1032 | static int ipq40xx_eth_write_hwaddr(struct udevice *dev) |
| 1033 | { |
| 1034 | struct eth_pdata *pdata = dev_get_plat(dev); |
| 1035 | struct essedma_priv *priv = dev_get_priv(dev); |
| 1036 | unsigned char *mac = pdata->enetaddr; |
| 1037 | u32 mac_lo, mac_hi; |
| 1038 | |
| 1039 | mac_hi = ((u32)mac[0]) << 8 | (u32)mac[1]; |
| 1040 | mac_lo = ((u32)mac[2]) << 24 | ((u32)mac[3]) << 16 | |
| 1041 | ((u32)mac[4]) << 8 | (u32)mac[5]; |
| 1042 | |
| 1043 | writel(mac_lo, priv->base + REG_MAC_CTRL0); |
| 1044 | writel(mac_hi, priv->base + REG_MAC_CTRL1); |
| 1045 | |
| 1046 | return 0; |
| 1047 | } |
| 1048 | |
| 1049 | static int edma_init(struct udevice *dev) |
| 1050 | { |
| 1051 | struct essedma_priv *priv = dev_get_priv(dev); |
| 1052 | int ret; |
| 1053 | |
| 1054 | priv->tpd_ring.count = IPQ40XX_EDMA_TX_RING_SIZE; |
| 1055 | priv->rfd_ring.count = PKTBUFSRX; |
| 1056 | |
| 1057 | ret = ipq40xx_edma_alloc_tx_rx_rings(priv); |
| 1058 | if (ret) |
| 1059 | return -ENOMEM; |
| 1060 | |
| 1061 | ipq40xx_edma_stop_rx_tx(priv); |
| 1062 | |
| 1063 | /* Configure EDMA. */ |
| 1064 | ipq40xx_edma_configure(priv); |
| 1065 | |
| 1066 | /* Configure descriptor Ring */ |
| 1067 | ipq40xx_edma_init_desc(priv); |
| 1068 | |
| 1069 | ess_switch_disable_lookup(&priv->esw); |
| 1070 | |
| 1071 | return 0; |
| 1072 | } |
| 1073 | |
| 1074 | static int essedma_probe(struct udevice *dev) |
| 1075 | { |
| 1076 | struct essedma_priv *priv = dev_get_priv(dev); |
| 1077 | int ret; |
| 1078 | |
| 1079 | priv->dev = dev; |
| 1080 | |
| 1081 | priv->base = dev_read_addr_name(dev, "edma"); |
| 1082 | if (priv->base == FDT_ADDR_T_NONE) |
| 1083 | return -EINVAL; |
| 1084 | |
| 1085 | priv->psgmii_base = dev_read_addr_name(dev, "psgmii_phy"); |
| 1086 | if (priv->psgmii_base == FDT_ADDR_T_NONE) |
| 1087 | return -EINVAL; |
| 1088 | |
| 1089 | priv->esw.base = dev_read_addr_name(dev, "base"); |
| 1090 | if (priv->esw.base == FDT_ADDR_T_NONE) |
| 1091 | return -EINVAL; |
| 1092 | |
| 1093 | ret = clk_get_by_name(dev, "ess", &priv->ess_clk); |
| 1094 | if (ret) |
| 1095 | return ret; |
| 1096 | |
| 1097 | ret = reset_get_by_name(dev, "ess", &priv->ess_rst); |
| 1098 | if (ret) |
| 1099 | return ret; |
| 1100 | |
| 1101 | ret = clk_enable(&priv->ess_clk); |
| 1102 | if (ret) |
| 1103 | return ret; |
| 1104 | |
| 1105 | ess_reset(dev); |
| 1106 | |
| 1107 | ret = uclass_get_device_by_driver(UCLASS_MDIO, |
| 1108 | DM_DRIVER_GET(ipq4019_mdio), |
| 1109 | &priv->mdio_dev); |
| 1110 | if (ret) { |
| 1111 | dev_dbg(dev, "Cant find IPQ4019 MDIO: %d\n", ret); |
| 1112 | goto err; |
| 1113 | } |
| 1114 | |
| 1115 | /* OF switch and PHY parsing and configuration */ |
| 1116 | ret = essedma_of_switch(dev); |
| 1117 | if (ret) |
| 1118 | goto err; |
| 1119 | |
| 1120 | switch (priv->esw.port_wrapper_mode) { |
| 1121 | case PHY_INTERFACE_MODE_PSGMII: |
| 1122 | writel(PSGMIIPHY_PLL_VCO_VAL, |
| 1123 | priv->psgmii_base + PSGMIIPHY_PLL_VCO_RELATED_CTRL); |
| 1124 | writel(PSGMIIPHY_VCO_VAL, priv->psgmii_base + |
| 1125 | PSGMIIPHY_VCO_CALIBRATION_CTRL_REGISTER_1); |
| 1126 | /* wait for 10ms */ |
| 1127 | mdelay(10); |
| 1128 | writel(PSGMIIPHY_VCO_RST_VAL, priv->psgmii_base + |
| 1129 | PSGMIIPHY_VCO_CALIBRATION_CTRL_REGISTER_1); |
| 1130 | break; |
| 1131 | case PHY_INTERFACE_MODE_RGMII: |
| 1132 | writel(0x1, RGMII_TCSR_ESS_CFG); |
| 1133 | writel(0x400, priv->esw.base + ESS_RGMII_CTRL); |
| 1134 | break; |
| 1135 | default: |
| 1136 | printf("Unknown MII interface\n"); |
| 1137 | } |
| 1138 | |
| 1139 | if (priv->esw.port_wrapper_mode == PHY_INTERFACE_MODE_PSGMII) |
| 1140 | psgmii_self_test(dev); |
| 1141 | |
| 1142 | ess_switch_init(&priv->esw); |
| 1143 | |
| 1144 | ret = edma_init(dev); |
| 1145 | if (ret) |
| 1146 | goto err; |
| 1147 | |
| 1148 | return 0; |
| 1149 | |
| 1150 | err: |
| 1151 | reset_assert(&priv->ess_rst); |
| 1152 | clk_disable(&priv->ess_clk); |
| 1153 | return ret; |
| 1154 | } |
| 1155 | |
| 1156 | static int essedma_remove(struct udevice *dev) |
| 1157 | { |
| 1158 | struct essedma_priv *priv = dev_get_priv(dev); |
| 1159 | |
| 1160 | ipq40xx_edma_free_rings(priv); |
| 1161 | |
| 1162 | clk_disable(&priv->ess_clk); |
| 1163 | reset_assert(&priv->ess_rst); |
| 1164 | |
| 1165 | return 0; |
| 1166 | } |
| 1167 | |
| 1168 | static const struct eth_ops essedma_eth_ops = { |
| 1169 | .start = ipq40xx_eth_start, |
| 1170 | .send = ipq40xx_eth_send, |
| 1171 | .recv = ipq40xx_eth_recv, |
| 1172 | .free_pkt = ipq40xx_eth_free_pkt, |
| 1173 | .stop = ipq40xx_eth_stop, |
| 1174 | .write_hwaddr = ipq40xx_eth_write_hwaddr, |
| 1175 | }; |
| 1176 | |
| 1177 | static const struct udevice_id essedma_ids[] = { |
| 1178 | { .compatible = "qcom,ipq4019-ess", }, |
| 1179 | { } |
| 1180 | }; |
| 1181 | |
| 1182 | U_BOOT_DRIVER(essedma) = { |
| 1183 | .name = "essedma", |
| 1184 | .id = UCLASS_ETH, |
| 1185 | .of_match = essedma_ids, |
| 1186 | .probe = essedma_probe, |
| 1187 | .remove = essedma_remove, |
| 1188 | .priv_auto = sizeof(struct essedma_priv), |
| 1189 | .plat_auto = sizeof(struct eth_pdata), |
| 1190 | .ops = &essedma_eth_ops, |
| 1191 | .flags = DM_FLAG_ALLOC_PRIV_DMA, |
| 1192 | }; |