Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Calvin Johnson | 781b838 | 2018-03-08 15:30:25 +0530 | [diff] [blame] | 2 | /* |
| 3 | * Copyright 2015-2016 Freescale Semiconductor, Inc. |
| 4 | * Copyright 2017 NXP |
Calvin Johnson | 781b838 | 2018-03-08 15:30:25 +0530 | [diff] [blame] | 5 | */ |
| 6 | |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 7 | #include <log.h> |
Simon Glass | 9bc1564 | 2020-02-03 07:36:16 -0700 | [diff] [blame] | 8 | #include <malloc.h> |
Simon Glass | dbd7954 | 2020-05-10 11:40:11 -0600 | [diff] [blame] | 9 | #include <linux/delay.h> |
Calvin Johnson | 781b838 | 2018-03-08 15:30:25 +0530 | [diff] [blame] | 10 | #include <net/pfe_eth/pfe_eth.h> |
| 11 | #include <net/pfe_eth/pfe_firmware.h> |
| 12 | |
| 13 | static struct tx_desc_s *g_tx_desc; |
| 14 | static struct rx_desc_s *g_rx_desc; |
| 15 | |
| 16 | /* |
| 17 | * HIF Rx interface function |
| 18 | * Reads the rx descriptor from the current location (rx_to_read). |
| 19 | * - If the descriptor has a valid data/pkt, then get the data pointer |
| 20 | * - check for the input rx phy number |
| 21 | * - increment the rx data pointer by pkt_head_room_size |
| 22 | * - decrement the data length by pkt_head_room_size |
| 23 | * - handover the packet to caller. |
| 24 | * |
| 25 | * @param[out] pkt_ptr - Pointer to store rx packet |
| 26 | * @param[out] phy_port - Pointer to store recv phy port |
| 27 | * |
| 28 | * @return -1 if no packet, else return length of packet. |
| 29 | */ |
| 30 | int pfe_recv(uchar **pkt_ptr, int *phy_port) |
| 31 | { |
| 32 | struct rx_desc_s *rx_desc = g_rx_desc; |
| 33 | struct buf_desc *bd; |
| 34 | int len = 0; |
| 35 | |
| 36 | struct hif_header_s *hif_header; |
| 37 | |
| 38 | bd = rx_desc->rx_base + rx_desc->rx_to_read; |
| 39 | |
| 40 | if (readl(&bd->ctrl) & BD_CTRL_DESC_EN) |
| 41 | return len; /* No pending Rx packet */ |
| 42 | |
| 43 | /* this len include hif_header(8 bytes) */ |
| 44 | len = readl(&bd->ctrl) & 0xFFFF; |
| 45 | |
| 46 | hif_header = (struct hif_header_s *)DDR_PFE_TO_VIRT(readl(&bd->data)); |
| 47 | |
| 48 | /* Get the receive port info from the packet */ |
| 49 | debug("Pkt received:"); |
| 50 | debug(" Pkt ptr(%p), len(%d), gemac_port(%d) status(%08x)\n", |
| 51 | hif_header, len, hif_header->port_no, readl(&bd->status)); |
| 52 | #ifdef DEBUG |
| 53 | { |
| 54 | int i; |
| 55 | unsigned char *p = (unsigned char *)hif_header; |
| 56 | |
| 57 | for (i = 0; i < len; i++) { |
| 58 | if (!(i % 16)) |
| 59 | printf("\n"); |
| 60 | printf(" %02x", p[i]); |
| 61 | } |
| 62 | printf("\n"); |
| 63 | } |
| 64 | #endif |
| 65 | |
| 66 | *pkt_ptr = (uchar *)(hif_header + 1); |
| 67 | *phy_port = hif_header->port_no; |
| 68 | len -= sizeof(struct hif_header_s); |
| 69 | |
| 70 | return len; |
| 71 | } |
| 72 | |
| 73 | /* |
| 74 | * HIF function to check the Rx done |
| 75 | * This function will check the rx done indication of the current rx_to_read |
| 76 | * locations |
| 77 | * if success, moves the rx_to_read to next location. |
| 78 | */ |
| 79 | int pfe_eth_free_pkt(struct udevice *dev, uchar *packet, int length) |
| 80 | { |
| 81 | struct rx_desc_s *rx_desc = g_rx_desc; |
| 82 | struct buf_desc *bd; |
| 83 | |
| 84 | debug("%s:rx_base: %p, rx_to_read: %d\n", __func__, rx_desc->rx_base, |
| 85 | rx_desc->rx_to_read); |
| 86 | |
| 87 | bd = rx_desc->rx_base + rx_desc->rx_to_read; |
| 88 | |
| 89 | /* reset the control field */ |
| 90 | writel((MAX_FRAME_SIZE | BD_CTRL_LIFM | BD_CTRL_DESC_EN |
| 91 | | BD_CTRL_DIR), &bd->ctrl); |
| 92 | writel(0, &bd->status); |
| 93 | |
| 94 | debug("Rx Done : status: %08x, ctrl: %08x\n", readl(&bd->status), |
| 95 | readl(&bd->ctrl)); |
| 96 | |
| 97 | /* Give START_STROBE to BDP to fetch the descriptor __NOW__, |
| 98 | * BDP need not wait for rx_poll_cycle time to fetch the descriptor, |
| 99 | * In idle state (ie., no rx pkt), BDP will not fetch |
| 100 | * the descriptor even if strobe is given. |
| 101 | */ |
| 102 | writel((readl(HIF_RX_CTRL) | HIF_CTRL_BDP_CH_START_WSTB), HIF_RX_CTRL); |
| 103 | |
| 104 | /* increment the rx_to_read index to next location */ |
| 105 | rx_desc->rx_to_read = (rx_desc->rx_to_read + 1) |
| 106 | & (rx_desc->rx_ring_size - 1); |
| 107 | |
| 108 | debug("Rx next pkt location: %d\n", rx_desc->rx_to_read); |
| 109 | |
| 110 | return 0; |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * HIF Tx interface function |
| 115 | * This function sends a single packet to PFE from HIF interface. |
| 116 | * - No interrupt indication on tx completion. |
| 117 | * - Data is copied to tx buffers before tx descriptor is updated |
| 118 | * and TX DMA is enabled. |
| 119 | * |
| 120 | * @param[in] phy_port Phy port number to send out this packet |
| 121 | * @param[in] data Pointer to the data |
| 122 | * @param[in] length Length of the ethernet packet to be transferred. |
| 123 | * |
| 124 | * @return -1 if tx Q is full, else returns the tx location where the pkt is |
| 125 | * placed. |
| 126 | */ |
| 127 | int pfe_send(int phy_port, void *data, int length) |
| 128 | { |
| 129 | struct tx_desc_s *tx_desc = g_tx_desc; |
| 130 | struct buf_desc *bd; |
| 131 | struct hif_header_s hif_header; |
| 132 | u8 *tx_buf_va; |
| 133 | |
| 134 | debug("%s:pkt: %p, len: %d, tx_base: %p, tx_to_send: %d\n", __func__, |
| 135 | data, length, tx_desc->tx_base, tx_desc->tx_to_send); |
| 136 | |
| 137 | bd = tx_desc->tx_base + tx_desc->tx_to_send; |
| 138 | |
| 139 | /* check queue-full condition */ |
| 140 | if (readl(&bd->ctrl) & BD_CTRL_DESC_EN) |
| 141 | return -1; |
| 142 | |
| 143 | /* PFE checks for min pkt size */ |
| 144 | if (length < MIN_PKT_SIZE) |
| 145 | length = MIN_PKT_SIZE; |
| 146 | |
| 147 | tx_buf_va = (void *)DDR_PFE_TO_VIRT(readl(&bd->data)); |
| 148 | debug("%s: tx_buf_va: %p, tx_buf_pa: %08x\n", __func__, tx_buf_va, |
| 149 | readl(&bd->data)); |
| 150 | |
| 151 | /* Fill the gemac/phy port number to send this packet out */ |
| 152 | memset(&hif_header, 0, sizeof(struct hif_header_s)); |
| 153 | hif_header.port_no = phy_port; |
| 154 | |
| 155 | memcpy(tx_buf_va, (u8 *)&hif_header, sizeof(struct hif_header_s)); |
| 156 | memcpy(tx_buf_va + sizeof(struct hif_header_s), data, length); |
| 157 | length += sizeof(struct hif_header_s); |
| 158 | |
| 159 | #ifdef DEBUG |
| 160 | { |
| 161 | int i; |
| 162 | unsigned char *p = (unsigned char *)tx_buf_va; |
| 163 | |
| 164 | for (i = 0; i < length; i++) { |
| 165 | if (!(i % 16)) |
| 166 | printf("\n"); |
| 167 | printf("%02x ", p[i]); |
| 168 | } |
| 169 | } |
| 170 | #endif |
| 171 | |
| 172 | debug("Tx Done: status: %08x, ctrl: %08x\n", readl(&bd->status), |
| 173 | readl(&bd->ctrl)); |
| 174 | |
| 175 | /* fill the tx desc */ |
| 176 | writel((u32)(BD_CTRL_DESC_EN | BD_CTRL_LIFM | (length & 0xFFFF)), |
| 177 | &bd->ctrl); |
| 178 | writel(0, &bd->status); |
| 179 | |
| 180 | writel((HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB), HIF_TX_CTRL); |
| 181 | |
| 182 | udelay(100); |
| 183 | |
| 184 | return tx_desc->tx_to_send; |
| 185 | } |
| 186 | |
| 187 | /* |
| 188 | * HIF function to check the Tx done |
| 189 | * This function will check the tx done indication of the current tx_to_send |
| 190 | * locations |
| 191 | * if success, moves the tx_to_send to next location. |
| 192 | * |
| 193 | * @return -1 if TX ownership bit is not cleared by hw. |
| 194 | * else on success (tx done completion) return zero. |
| 195 | */ |
| 196 | int pfe_tx_done(void) |
| 197 | { |
| 198 | struct tx_desc_s *tx_desc = g_tx_desc; |
| 199 | struct buf_desc *bd; |
| 200 | |
| 201 | debug("%s:tx_base: %p, tx_to_send: %d\n", __func__, tx_desc->tx_base, |
| 202 | tx_desc->tx_to_send); |
| 203 | |
| 204 | bd = tx_desc->tx_base + tx_desc->tx_to_send; |
| 205 | |
| 206 | /* check queue-full condition */ |
| 207 | if (readl(&bd->ctrl) & BD_CTRL_DESC_EN) |
| 208 | return -1; |
| 209 | |
| 210 | /* reset the control field */ |
| 211 | writel(0, &bd->ctrl); |
| 212 | writel(0, &bd->status); |
| 213 | |
| 214 | debug("Tx Done : status: %08x, ctrl: %08x\n", readl(&bd->status), |
| 215 | readl(&bd->ctrl)); |
| 216 | |
| 217 | /* increment the txtosend index to next location */ |
| 218 | tx_desc->tx_to_send = (tx_desc->tx_to_send + 1) |
| 219 | & (tx_desc->tx_ring_size - 1); |
| 220 | |
| 221 | debug("Tx next pkt location: %d\n", tx_desc->tx_to_send); |
| 222 | |
| 223 | return 0; |
| 224 | } |
| 225 | |
| 226 | /* |
| 227 | * Helper function to dump Rx descriptors. |
| 228 | */ |
| 229 | static inline void hif_rx_desc_dump(void) |
| 230 | { |
| 231 | struct buf_desc *bd_va; |
| 232 | int i; |
| 233 | struct rx_desc_s *rx_desc; |
| 234 | |
| 235 | if (!g_rx_desc) { |
| 236 | printf("%s: HIF Rx desc no init\n", __func__); |
| 237 | return; |
| 238 | } |
| 239 | |
| 240 | rx_desc = g_rx_desc; |
| 241 | bd_va = rx_desc->rx_base; |
| 242 | |
| 243 | debug("HIF rx desc: base_va: %p, base_pa: %08x\n", rx_desc->rx_base, |
| 244 | rx_desc->rx_base_pa); |
| 245 | for (i = 0; i < rx_desc->rx_ring_size; i++) { |
| 246 | debug("status: %08x, ctrl: %08x, data: %08x, next: 0x%08x\n", |
| 247 | readl(&bd_va->status), |
| 248 | readl(&bd_va->ctrl), |
| 249 | readl(&bd_va->data), |
| 250 | readl(&bd_va->next)); |
| 251 | bd_va++; |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | /* |
| 256 | * This function mark all Rx descriptors as LAST_BD. |
| 257 | */ |
| 258 | void hif_rx_desc_disable(void) |
| 259 | { |
| 260 | int i; |
| 261 | struct rx_desc_s *rx_desc; |
| 262 | struct buf_desc *bd_va; |
| 263 | |
| 264 | if (!g_rx_desc) { |
| 265 | printf("%s: HIF Rx desc not initialized\n", __func__); |
| 266 | return; |
| 267 | } |
| 268 | |
| 269 | rx_desc = g_rx_desc; |
| 270 | bd_va = rx_desc->rx_base; |
| 271 | |
| 272 | for (i = 0; i < rx_desc->rx_ring_size; i++) { |
| 273 | writel(readl(&bd_va->ctrl) | BD_CTRL_LAST_BD, &bd_va->ctrl); |
| 274 | bd_va++; |
| 275 | } |
| 276 | } |
| 277 | |
| 278 | /* |
| 279 | * HIF Rx Desc initialization function. |
| 280 | */ |
| 281 | static int hif_rx_desc_init(struct pfe_ddr_address *pfe_addr) |
| 282 | { |
| 283 | u32 ctrl; |
| 284 | struct buf_desc *bd_va; |
| 285 | struct buf_desc *bd_pa; |
| 286 | struct rx_desc_s *rx_desc; |
| 287 | u32 rx_buf_pa; |
| 288 | int i; |
| 289 | |
| 290 | /* sanity check */ |
| 291 | if (g_rx_desc) { |
| 292 | printf("%s: HIF Rx desc re-init request\n", __func__); |
| 293 | return 0; |
| 294 | } |
| 295 | |
| 296 | rx_desc = (struct rx_desc_s *)malloc(sizeof(struct rx_desc_s)); |
| 297 | if (!rx_desc) { |
| 298 | printf("%s: Memory allocation failure\n", __func__); |
| 299 | return -ENOMEM; |
| 300 | } |
| 301 | memset(rx_desc, 0, sizeof(struct rx_desc_s)); |
| 302 | |
| 303 | /* init: Rx ring buffer */ |
| 304 | rx_desc->rx_ring_size = HIF_RX_DESC_NT; |
| 305 | |
| 306 | /* NOTE: must be 64bit aligned */ |
| 307 | bd_va = (struct buf_desc *)(pfe_addr->ddr_pfe_baseaddr |
| 308 | + RX_BD_BASEADDR); |
| 309 | bd_pa = (struct buf_desc *)(pfe_addr->ddr_pfe_phys_baseaddr |
| 310 | + RX_BD_BASEADDR); |
| 311 | |
| 312 | rx_desc->rx_base = bd_va; |
| 313 | rx_desc->rx_base_pa = (unsigned long)bd_pa; |
| 314 | |
| 315 | rx_buf_pa = pfe_addr->ddr_pfe_phys_baseaddr + HIF_RX_PKT_DDR_BASEADDR; |
| 316 | |
| 317 | debug("%s: Rx desc base: %p, base_pa: %08x, desc_count: %d\n", |
| 318 | __func__, rx_desc->rx_base, rx_desc->rx_base_pa, |
| 319 | rx_desc->rx_ring_size); |
| 320 | |
| 321 | memset(bd_va, 0, sizeof(struct buf_desc) * rx_desc->rx_ring_size); |
| 322 | |
| 323 | ctrl = (MAX_FRAME_SIZE | BD_CTRL_DESC_EN | BD_CTRL_DIR | BD_CTRL_LIFM); |
| 324 | |
| 325 | for (i = 0; i < rx_desc->rx_ring_size; i++) { |
| 326 | writel((unsigned long)(bd_pa + 1), &bd_va->next); |
| 327 | writel(ctrl, &bd_va->ctrl); |
| 328 | writel(rx_buf_pa + (i * MAX_FRAME_SIZE), &bd_va->data); |
| 329 | bd_va++; |
| 330 | bd_pa++; |
| 331 | } |
| 332 | --bd_va; |
| 333 | writel((u32)rx_desc->rx_base_pa, &bd_va->next); |
| 334 | |
| 335 | writel(rx_desc->rx_base_pa, HIF_RX_BDP_ADDR); |
| 336 | writel((readl(HIF_RX_CTRL) | HIF_CTRL_BDP_CH_START_WSTB), HIF_RX_CTRL); |
| 337 | |
| 338 | g_rx_desc = rx_desc; |
| 339 | |
| 340 | return 0; |
| 341 | } |
| 342 | |
| 343 | /* |
| 344 | * Helper function to dump Tx Descriptors. |
| 345 | */ |
| 346 | static inline void hif_tx_desc_dump(void) |
| 347 | { |
| 348 | struct tx_desc_s *tx_desc; |
| 349 | int i; |
| 350 | struct buf_desc *bd_va; |
| 351 | |
| 352 | if (!g_tx_desc) { |
| 353 | printf("%s: HIF Tx desc no init\n", __func__); |
| 354 | return; |
| 355 | } |
| 356 | |
| 357 | tx_desc = g_tx_desc; |
| 358 | bd_va = tx_desc->tx_base; |
| 359 | |
| 360 | debug("HIF tx desc: base_va: %p, base_pa: %08x\n", tx_desc->tx_base, |
| 361 | tx_desc->tx_base_pa); |
| 362 | |
| 363 | for (i = 0; i < tx_desc->tx_ring_size; i++) |
| 364 | bd_va++; |
| 365 | } |
| 366 | |
| 367 | /* |
| 368 | * HIF Tx descriptor initialization function. |
| 369 | */ |
| 370 | static int hif_tx_desc_init(struct pfe_ddr_address *pfe_addr) |
| 371 | { |
| 372 | struct buf_desc *bd_va; |
| 373 | struct buf_desc *bd_pa; |
| 374 | int i; |
| 375 | struct tx_desc_s *tx_desc; |
| 376 | u32 tx_buf_pa; |
| 377 | |
| 378 | /* sanity check */ |
| 379 | if (g_tx_desc) { |
| 380 | printf("%s: HIF Tx desc re-init request\n", __func__); |
| 381 | return 0; |
| 382 | } |
| 383 | |
| 384 | tx_desc = (struct tx_desc_s *)malloc(sizeof(struct tx_desc_s)); |
| 385 | if (!tx_desc) { |
| 386 | printf("%s:%d:Memory allocation failure\n", __func__, |
| 387 | __LINE__); |
| 388 | return -ENOMEM; |
| 389 | } |
| 390 | memset(tx_desc, 0, sizeof(struct tx_desc_s)); |
| 391 | |
| 392 | /* init: Tx ring buffer */ |
| 393 | tx_desc->tx_ring_size = HIF_TX_DESC_NT; |
| 394 | |
| 395 | /* NOTE: must be 64bit aligned */ |
| 396 | bd_va = (struct buf_desc *)(pfe_addr->ddr_pfe_baseaddr |
| 397 | + TX_BD_BASEADDR); |
| 398 | bd_pa = (struct buf_desc *)(pfe_addr->ddr_pfe_phys_baseaddr |
| 399 | + TX_BD_BASEADDR); |
| 400 | |
| 401 | tx_desc->tx_base_pa = (unsigned long)bd_pa; |
| 402 | tx_desc->tx_base = bd_va; |
| 403 | |
| 404 | debug("%s: Tx desc_base: %p, base_pa: %08x, desc_count: %d\n", |
| 405 | __func__, tx_desc->tx_base, tx_desc->tx_base_pa, |
| 406 | tx_desc->tx_ring_size); |
| 407 | |
| 408 | memset(bd_va, 0, sizeof(struct buf_desc) * tx_desc->tx_ring_size); |
| 409 | |
| 410 | tx_buf_pa = pfe_addr->ddr_pfe_phys_baseaddr + HIF_TX_PKT_DDR_BASEADDR; |
| 411 | |
| 412 | for (i = 0; i < tx_desc->tx_ring_size; i++) { |
| 413 | writel((unsigned long)(bd_pa + 1), &bd_va->next); |
| 414 | writel(tx_buf_pa + (i * MAX_FRAME_SIZE), &bd_va->data); |
| 415 | bd_va++; |
| 416 | bd_pa++; |
| 417 | } |
| 418 | --bd_va; |
| 419 | writel((u32)tx_desc->tx_base_pa, &bd_va->next); |
| 420 | |
| 421 | writel(tx_desc->tx_base_pa, HIF_TX_BDP_ADDR); |
| 422 | |
| 423 | g_tx_desc = tx_desc; |
| 424 | |
| 425 | return 0; |
| 426 | } |
| 427 | |
| 428 | /* |
| 429 | * PFE/Class initialization. |
| 430 | */ |
| 431 | static void pfe_class_init(struct pfe_ddr_address *pfe_addr) |
| 432 | { |
| 433 | struct class_cfg class_cfg = { |
| 434 | .route_table_baseaddr = pfe_addr->ddr_pfe_phys_baseaddr + |
| 435 | ROUTE_TABLE_BASEADDR, |
| 436 | .route_table_hash_bits = ROUTE_TABLE_HASH_BITS, |
| 437 | }; |
| 438 | |
| 439 | class_init(&class_cfg); |
| 440 | |
| 441 | debug("class init complete\n"); |
| 442 | } |
| 443 | |
| 444 | /* |
| 445 | * PFE/TMU initialization. |
| 446 | */ |
| 447 | static void pfe_tmu_init(struct pfe_ddr_address *pfe_addr) |
| 448 | { |
| 449 | struct tmu_cfg tmu_cfg = { |
| 450 | .llm_base_addr = pfe_addr->ddr_pfe_phys_baseaddr |
| 451 | + TMU_LLM_BASEADDR, |
| 452 | .llm_queue_len = TMU_LLM_QUEUE_LEN, |
| 453 | }; |
| 454 | |
| 455 | tmu_init(&tmu_cfg); |
| 456 | |
| 457 | debug("tmu init complete\n"); |
| 458 | } |
| 459 | |
| 460 | /* |
| 461 | * PFE/BMU (both BMU1 & BMU2) initialization. |
| 462 | */ |
| 463 | static void pfe_bmu_init(struct pfe_ddr_address *pfe_addr) |
| 464 | { |
| 465 | struct bmu_cfg bmu1_cfg = { |
| 466 | .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR + |
| 467 | BMU1_LMEM_BASEADDR), |
| 468 | .count = BMU1_BUF_COUNT, |
| 469 | .size = BMU1_BUF_SIZE, |
| 470 | }; |
| 471 | |
| 472 | struct bmu_cfg bmu2_cfg = { |
| 473 | .baseaddr = pfe_addr->ddr_pfe_phys_baseaddr + BMU2_DDR_BASEADDR, |
| 474 | .count = BMU2_BUF_COUNT, |
| 475 | .size = BMU2_BUF_SIZE, |
| 476 | }; |
| 477 | |
| 478 | bmu_init(BMU1_BASE_ADDR, &bmu1_cfg); |
| 479 | debug("bmu1 init: done\n"); |
| 480 | |
| 481 | bmu_init(BMU2_BASE_ADDR, &bmu2_cfg); |
| 482 | debug("bmu2 init: done\n"); |
| 483 | } |
| 484 | |
| 485 | /* |
| 486 | * PFE/GPI initialization function. |
| 487 | * - egpi1, egpi2, egpi3, hgpi |
| 488 | */ |
| 489 | static void pfe_gpi_init(struct pfe_ddr_address *pfe_addr) |
| 490 | { |
| 491 | struct gpi_cfg egpi1_cfg = { |
| 492 | .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT, |
| 493 | .tmlf_txthres = EGPI1_TMLF_TXTHRES, |
| 494 | .aseq_len = EGPI1_ASEQ_LEN, |
| 495 | }; |
| 496 | |
| 497 | struct gpi_cfg egpi2_cfg = { |
| 498 | .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT, |
| 499 | .tmlf_txthres = EGPI2_TMLF_TXTHRES, |
| 500 | .aseq_len = EGPI2_ASEQ_LEN, |
| 501 | }; |
| 502 | |
| 503 | struct gpi_cfg hgpi_cfg = { |
| 504 | .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT, |
| 505 | .tmlf_txthres = HGPI_TMLF_TXTHRES, |
| 506 | .aseq_len = HGPI_ASEQ_LEN, |
| 507 | }; |
| 508 | |
| 509 | gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg); |
| 510 | debug("GPI1 init complete\n"); |
| 511 | |
| 512 | gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg); |
| 513 | debug("GPI2 init complete\n"); |
| 514 | |
| 515 | gpi_init(HGPI_BASE_ADDR, &hgpi_cfg); |
| 516 | debug("HGPI init complete\n"); |
| 517 | } |
| 518 | |
| 519 | /* |
| 520 | * PFE/HIF initialization function. |
| 521 | */ |
| 522 | static int pfe_hif_init(struct pfe_ddr_address *pfe_addr) |
| 523 | { |
| 524 | int ret = 0; |
| 525 | |
| 526 | hif_tx_disable(); |
| 527 | hif_rx_disable(); |
| 528 | |
| 529 | ret = hif_tx_desc_init(pfe_addr); |
| 530 | if (ret) |
| 531 | return ret; |
| 532 | ret = hif_rx_desc_init(pfe_addr); |
| 533 | if (ret) |
| 534 | return ret; |
| 535 | |
| 536 | hif_init(); |
| 537 | |
| 538 | hif_tx_enable(); |
| 539 | hif_rx_enable(); |
| 540 | |
| 541 | hif_rx_desc_dump(); |
| 542 | hif_tx_desc_dump(); |
| 543 | |
| 544 | debug("HIF init complete\n"); |
| 545 | return ret; |
| 546 | } |
| 547 | |
| 548 | /* |
| 549 | * PFE initialization |
| 550 | * - Firmware loading (CLASS-PE and TMU-PE) |
| 551 | * - BMU1 and BMU2 init |
| 552 | * - GEMAC init |
| 553 | * - GPI init |
| 554 | * - CLASS-PE init |
| 555 | * - TMU-PE init |
| 556 | * - HIF tx and rx descriptors init |
| 557 | * |
| 558 | * @param[in] edev Pointer to eth device structure. |
| 559 | * |
| 560 | * @return 0, on success. |
| 561 | */ |
| 562 | static int pfe_hw_init(struct pfe_ddr_address *pfe_addr) |
| 563 | { |
| 564 | int ret = 0; |
| 565 | |
| 566 | debug("%s: start\n", __func__); |
| 567 | |
| 568 | writel(0x3, CLASS_PE_SYS_CLK_RATIO); |
| 569 | writel(0x3, TMU_PE_SYS_CLK_RATIO); |
| 570 | writel(0x3, UTIL_PE_SYS_CLK_RATIO); |
| 571 | udelay(10); |
| 572 | |
| 573 | pfe_class_init(pfe_addr); |
| 574 | |
| 575 | pfe_tmu_init(pfe_addr); |
| 576 | |
| 577 | pfe_bmu_init(pfe_addr); |
| 578 | |
| 579 | pfe_gpi_init(pfe_addr); |
| 580 | |
| 581 | ret = pfe_hif_init(pfe_addr); |
| 582 | if (ret) |
| 583 | return ret; |
| 584 | |
| 585 | bmu_enable(BMU1_BASE_ADDR); |
| 586 | debug("bmu1 enabled\n"); |
| 587 | |
| 588 | bmu_enable(BMU2_BASE_ADDR); |
| 589 | debug("bmu2 enabled\n"); |
| 590 | |
| 591 | debug("%s: done\n", __func__); |
| 592 | |
| 593 | return ret; |
| 594 | } |
| 595 | |
| 596 | /* |
| 597 | * PFE driver init function. |
| 598 | * - Initializes pfe_lib |
| 599 | * - pfe hw init |
| 600 | * - fw loading and enables PEs |
| 601 | * - should be executed once. |
| 602 | * |
| 603 | * @param[in] pfe Pointer the pfe control block |
| 604 | */ |
| 605 | int pfe_drv_init(struct pfe_ddr_address *pfe_addr) |
| 606 | { |
| 607 | int ret = 0; |
| 608 | |
| 609 | pfe_lib_init(); |
| 610 | |
| 611 | ret = pfe_hw_init(pfe_addr); |
| 612 | if (ret) |
| 613 | return ret; |
| 614 | |
| 615 | /* Load the class,TM, Util fw. |
| 616 | * By now pfe is: |
| 617 | * - out of reset + disabled + configured. |
| 618 | * Fw loading should be done after pfe_hw_init() |
| 619 | */ |
| 620 | /* It loads default inbuilt sbl firmware */ |
| 621 | pfe_firmware_init(); |
| 622 | |
| 623 | return ret; |
| 624 | } |
| 625 | |
| 626 | /* |
| 627 | * PFE remove function |
| 628 | * - stops PEs |
| 629 | * - frees tx/rx descriptor resources |
| 630 | * - should be called once. |
| 631 | * |
| 632 | * @param[in] pfe Pointer to pfe control block. |
| 633 | */ |
| 634 | int pfe_eth_remove(struct udevice *dev) |
| 635 | { |
| 636 | if (g_tx_desc) |
| 637 | free(g_tx_desc); |
| 638 | |
| 639 | if (g_rx_desc) |
| 640 | free(g_rx_desc); |
| 641 | |
| 642 | pfe_firmware_exit(); |
| 643 | |
| 644 | return 0; |
| 645 | } |