Suneel Garapati | 53dc448 | 2020-08-26 14:37:33 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2018 Marvell International Ltd. |
| 4 | */ |
| 5 | |
| 6 | #include <dm.h> |
| 7 | #include <malloc.h> |
| 8 | #include <misc.h> |
| 9 | #include <net.h> |
| 10 | #include <pci.h> |
| 11 | #include <pci_ids.h> |
| 12 | #include <phy.h> |
| 13 | #include <asm/io.h> |
| 14 | #include <linux/delay.h> |
| 15 | |
| 16 | #include "nic_reg.h" |
| 17 | #include "nic.h" |
| 18 | #include "nicvf_queues.h" |
| 19 | |
| 20 | /* Register read/write APIs */ |
| 21 | void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val) |
| 22 | { |
| 23 | writeq(val, nic->reg_base + offset); |
| 24 | } |
| 25 | |
| 26 | u64 nicvf_reg_read(struct nicvf *nic, u64 offset) |
| 27 | { |
| 28 | return readq(nic->reg_base + offset); |
| 29 | } |
| 30 | |
| 31 | void nicvf_queue_reg_write(struct nicvf *nic, u64 offset, |
| 32 | u64 qidx, u64 val) |
| 33 | { |
| 34 | void *addr = nic->reg_base + offset; |
| 35 | |
| 36 | writeq(val, (void *)(addr + (qidx << NIC_Q_NUM_SHIFT))); |
| 37 | } |
| 38 | |
| 39 | u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx) |
| 40 | { |
| 41 | void *addr = nic->reg_base + offset; |
| 42 | |
| 43 | return readq((void *)(addr + (qidx << NIC_Q_NUM_SHIFT))); |
| 44 | } |
| 45 | |
| 46 | static void nicvf_handle_mbx_intr(struct nicvf *nic); |
| 47 | |
| 48 | /* VF -> PF mailbox communication */ |
| 49 | static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx) |
| 50 | { |
| 51 | u64 *msg = (u64 *)mbx; |
| 52 | |
| 53 | nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]); |
| 54 | nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]); |
| 55 | } |
| 56 | |
| 57 | int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) |
| 58 | { |
| 59 | int timeout = NIC_PF_VF_MBX_TIMEOUT; |
| 60 | int sleep = 10; |
| 61 | |
| 62 | nic->pf_acked = false; |
| 63 | nic->pf_nacked = false; |
| 64 | |
| 65 | nicvf_write_to_mbx(nic, mbx); |
| 66 | |
| 67 | nic_handle_mbx_intr(nic->nicpf, nic->vf_id); |
| 68 | |
| 69 | /* Wait for previous message to be acked, timeout 2sec */ |
| 70 | while (!nic->pf_acked) { |
| 71 | if (nic->pf_nacked) |
| 72 | return -1; |
| 73 | mdelay(sleep); |
| 74 | nicvf_handle_mbx_intr(nic); |
| 75 | |
| 76 | if (nic->pf_acked) |
| 77 | break; |
| 78 | timeout -= sleep; |
| 79 | if (!timeout) { |
| 80 | printf("PF didn't ack to mbox msg %d from VF%d\n", |
| 81 | (mbx->msg.msg & 0xFF), nic->vf_id); |
| 82 | return -1; |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | return 0; |
| 87 | } |
| 88 | |
| 89 | /* Checks if VF is able to comminicate with PF |
| 90 | * and also gets the VNIC number this VF is associated to. |
| 91 | */ |
| 92 | static int nicvf_check_pf_ready(struct nicvf *nic) |
| 93 | { |
| 94 | union nic_mbx mbx = {}; |
| 95 | |
| 96 | mbx.msg.msg = NIC_MBOX_MSG_READY; |
| 97 | if (nicvf_send_msg_to_pf(nic, &mbx)) { |
| 98 | printf("PF didn't respond to READY msg\n"); |
| 99 | return 0; |
| 100 | } |
| 101 | |
| 102 | return 1; |
| 103 | } |
| 104 | |
| 105 | static void nicvf_handle_mbx_intr(struct nicvf *nic) |
| 106 | { |
| 107 | union nic_mbx mbx = {}; |
| 108 | struct eth_pdata *pdata = dev_get_platdata(nic->dev); |
| 109 | u64 *mbx_data; |
| 110 | u64 mbx_addr; |
| 111 | int i; |
| 112 | |
| 113 | mbx_addr = NIC_VF_PF_MAILBOX_0_1; |
| 114 | mbx_data = (u64 *)&mbx; |
| 115 | |
| 116 | for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { |
| 117 | *mbx_data = nicvf_reg_read(nic, mbx_addr); |
| 118 | mbx_data++; |
| 119 | mbx_addr += sizeof(u64); |
| 120 | } |
| 121 | |
| 122 | debug("Mbox message: msg: 0x%x\n", mbx.msg.msg); |
| 123 | switch (mbx.msg.msg) { |
| 124 | case NIC_MBOX_MSG_READY: |
| 125 | nic->pf_acked = true; |
| 126 | nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; |
| 127 | nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; |
| 128 | nic->node = mbx.nic_cfg.node_id; |
| 129 | if (!nic->set_mac_pending) |
| 130 | memcpy(pdata->enetaddr, |
| 131 | mbx.nic_cfg.mac_addr, 6); |
| 132 | nic->loopback_supported = mbx.nic_cfg.loopback_supported; |
| 133 | nic->link_up = false; |
| 134 | nic->duplex = 0; |
| 135 | nic->speed = 0; |
| 136 | break; |
| 137 | case NIC_MBOX_MSG_ACK: |
| 138 | nic->pf_acked = true; |
| 139 | break; |
| 140 | case NIC_MBOX_MSG_NACK: |
| 141 | nic->pf_nacked = true; |
| 142 | break; |
| 143 | case NIC_MBOX_MSG_BGX_LINK_CHANGE: |
| 144 | nic->pf_acked = true; |
| 145 | nic->link_up = mbx.link_status.link_up; |
| 146 | nic->duplex = mbx.link_status.duplex; |
| 147 | nic->speed = mbx.link_status.speed; |
| 148 | if (nic->link_up) { |
| 149 | printf("%s: Link is Up %d Mbps %s\n", |
| 150 | nic->dev->name, nic->speed, |
| 151 | nic->duplex == 1 ? |
| 152 | "Full duplex" : "Half duplex"); |
| 153 | } else { |
| 154 | printf("%s: Link is Down\n", nic->dev->name); |
| 155 | } |
| 156 | break; |
| 157 | default: |
| 158 | printf("Invalid message from PF, msg 0x%x\n", mbx.msg.msg); |
| 159 | break; |
| 160 | } |
| 161 | |
| 162 | nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0); |
| 163 | } |
| 164 | |
| 165 | static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct udevice *dev) |
| 166 | { |
| 167 | union nic_mbx mbx = {}; |
| 168 | struct eth_pdata *pdata = dev_get_platdata(dev); |
| 169 | |
| 170 | mbx.mac.msg = NIC_MBOX_MSG_SET_MAC; |
| 171 | mbx.mac.vf_id = nic->vf_id; |
| 172 | memcpy(mbx.mac.mac_addr, pdata->enetaddr, 6); |
| 173 | |
| 174 | return nicvf_send_msg_to_pf(nic, &mbx); |
| 175 | } |
| 176 | |
| 177 | static void nicvf_config_cpi(struct nicvf *nic) |
| 178 | { |
| 179 | union nic_mbx mbx = {}; |
| 180 | |
| 181 | mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG; |
| 182 | mbx.cpi_cfg.vf_id = nic->vf_id; |
| 183 | mbx.cpi_cfg.cpi_alg = nic->cpi_alg; |
| 184 | mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; |
| 185 | |
| 186 | nicvf_send_msg_to_pf(nic, &mbx); |
| 187 | } |
| 188 | |
| 189 | static int nicvf_init_resources(struct nicvf *nic) |
| 190 | { |
| 191 | int err; |
| 192 | |
| 193 | nic->num_qs = 1; |
| 194 | |
| 195 | /* Enable Qset */ |
| 196 | nicvf_qset_config(nic, true); |
| 197 | |
| 198 | /* Initialize queues and HW for data transfer */ |
| 199 | err = nicvf_config_data_transfer(nic, true); |
| 200 | |
| 201 | if (err) { |
| 202 | printf("Failed to alloc/config VF's QSet resources\n"); |
| 203 | return err; |
| 204 | } |
| 205 | return 0; |
| 206 | } |
| 207 | |
| 208 | static void nicvf_snd_pkt_handler(struct nicvf *nic, |
| 209 | struct cmp_queue *cq, |
| 210 | void *cq_desc, int cqe_type) |
| 211 | { |
| 212 | struct cqe_send_t *cqe_tx; |
| 213 | struct snd_queue *sq; |
| 214 | struct sq_hdr_subdesc *hdr; |
| 215 | |
| 216 | cqe_tx = (struct cqe_send_t *)cq_desc; |
| 217 | sq = &nic->qs->sq[cqe_tx->sq_idx]; |
| 218 | |
| 219 | hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); |
| 220 | if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) |
| 221 | return; |
| 222 | |
| 223 | nicvf_check_cqe_tx_errs(nic, cq, cq_desc); |
| 224 | nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); |
| 225 | } |
| 226 | |
| 227 | static int nicvf_rcv_pkt_handler(struct nicvf *nic, |
| 228 | struct cmp_queue *cq, void *cq_desc, |
| 229 | void **ppkt, int cqe_type) |
| 230 | { |
| 231 | void *pkt; |
| 232 | |
| 233 | size_t pkt_len; |
| 234 | struct cqe_rx_t *cqe_rx = (struct cqe_rx_t *)cq_desc; |
| 235 | int err = 0; |
| 236 | |
| 237 | /* Check for errors */ |
| 238 | err = nicvf_check_cqe_rx_errs(nic, cq, cq_desc); |
| 239 | if (err && !cqe_rx->rb_cnt) |
| 240 | return -1; |
| 241 | |
| 242 | pkt = nicvf_get_rcv_pkt(nic, cq_desc, &pkt_len); |
| 243 | if (!pkt) { |
| 244 | debug("Packet not received\n"); |
| 245 | return -1; |
| 246 | } |
| 247 | |
| 248 | if (pkt) |
| 249 | *ppkt = pkt; |
| 250 | |
| 251 | return pkt_len; |
| 252 | } |
| 253 | |
| 254 | int nicvf_cq_handler(struct nicvf *nic, void **ppkt, int *pkt_len) |
| 255 | { |
| 256 | int cq_qnum = 0; |
| 257 | int processed_sq_cqe = 0; |
| 258 | int processed_rq_cqe = 0; |
| 259 | int processed_cqe = 0; |
| 260 | |
| 261 | unsigned long cqe_count, cqe_head; |
| 262 | struct queue_set *qs = nic->qs; |
| 263 | struct cmp_queue *cq = &qs->cq[cq_qnum]; |
| 264 | struct cqe_rx_t *cq_desc; |
| 265 | |
| 266 | /* Get num of valid CQ entries expect next one to be SQ completion */ |
| 267 | cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_qnum); |
| 268 | cqe_count &= 0xFFFF; |
| 269 | if (!cqe_count) |
| 270 | return 0; |
| 271 | |
| 272 | /* Get head of the valid CQ entries */ |
| 273 | cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_qnum); |
| 274 | cqe_head >>= 9; |
| 275 | cqe_head &= 0xFFFF; |
| 276 | |
| 277 | if (cqe_count) { |
| 278 | /* Get the CQ descriptor */ |
| 279 | cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); |
| 280 | cqe_head++; |
| 281 | cqe_head &= (cq->dmem.q_len - 1); |
| 282 | /* Initiate prefetch for next descriptor */ |
| 283 | prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); |
| 284 | |
| 285 | switch (cq_desc->cqe_type) { |
| 286 | case CQE_TYPE_RX: |
| 287 | debug("%s: Got Rx CQE\n", nic->dev->name); |
| 288 | *pkt_len = nicvf_rcv_pkt_handler(nic, cq, cq_desc, |
| 289 | ppkt, CQE_TYPE_RX); |
| 290 | processed_rq_cqe++; |
| 291 | break; |
| 292 | case CQE_TYPE_SEND: |
| 293 | debug("%s: Got Tx CQE\n", nic->dev->name); |
| 294 | nicvf_snd_pkt_handler(nic, cq, cq_desc, CQE_TYPE_SEND); |
| 295 | processed_sq_cqe++; |
| 296 | break; |
| 297 | default: |
| 298 | debug("%s: Got CQ type %u\n", nic->dev->name, |
| 299 | cq_desc->cqe_type); |
| 300 | break; |
| 301 | } |
| 302 | processed_cqe++; |
| 303 | } |
| 304 | |
| 305 | /* Dequeue CQE */ |
| 306 | nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, |
| 307 | cq_qnum, processed_cqe); |
| 308 | |
| 309 | asm volatile ("dsb sy"); |
| 310 | |
| 311 | return (processed_sq_cqe | processed_rq_cqe); |
| 312 | } |
| 313 | |
| 314 | /* Qset error interrupt handler |
| 315 | * |
| 316 | * As of now only CQ errors are handled |
| 317 | */ |
| 318 | void nicvf_handle_qs_err(struct nicvf *nic) |
| 319 | { |
| 320 | struct queue_set *qs = nic->qs; |
| 321 | int qidx; |
| 322 | u64 status; |
| 323 | |
| 324 | /* Check if it is CQ err */ |
| 325 | for (qidx = 0; qidx < qs->cq_cnt; qidx++) { |
| 326 | status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, |
| 327 | qidx); |
| 328 | if (!(status & CQ_ERR_MASK)) |
| 329 | continue; |
| 330 | /* Process already queued CQEs and reconfig CQ */ |
| 331 | nicvf_sq_disable(nic, qidx); |
| 332 | nicvf_cmp_queue_config(nic, qs, qidx, true); |
| 333 | nicvf_sq_free_used_descs(nic->dev, &qs->sq[qidx], qidx); |
| 334 | nicvf_sq_enable(nic, &qs->sq[qidx], qidx); |
| 335 | } |
| 336 | } |
| 337 | |
| 338 | static int nicvf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len) |
| 339 | { |
| 340 | struct nicvf *nic = dev_get_priv(dev); |
| 341 | |
| 342 | if (pkt && pkt_len) |
| 343 | free(pkt); |
| 344 | nicvf_refill_rbdr(nic); |
| 345 | return 0; |
| 346 | } |
| 347 | |
| 348 | static int nicvf_xmit(struct udevice *dev, void *pkt, int pkt_len) |
| 349 | { |
| 350 | struct nicvf *nic = dev_get_priv(dev); |
| 351 | int ret = 0; |
| 352 | int rcv_len = 0; |
| 353 | unsigned int timeout = 5000; |
| 354 | void *rpkt = NULL; |
| 355 | |
| 356 | if (!nicvf_sq_append_pkt(nic, pkt, pkt_len)) { |
| 357 | printf("VF%d: TX ring full\n", nic->vf_id); |
| 358 | return -1; |
| 359 | } |
| 360 | |
| 361 | /* check and update CQ for pkt sent */ |
| 362 | while (!ret && timeout--) { |
| 363 | ret = nicvf_cq_handler(nic, &rpkt, &rcv_len); |
| 364 | if (!ret) { |
| 365 | debug("%s: %d, Not sent\n", __func__, __LINE__); |
| 366 | udelay(10); |
| 367 | } |
| 368 | } |
| 369 | |
| 370 | return 0; |
| 371 | } |
| 372 | |
| 373 | static int nicvf_recv(struct udevice *dev, int flags, uchar **packetp) |
| 374 | { |
| 375 | struct nicvf *nic = dev_get_priv(dev); |
| 376 | void *pkt; |
| 377 | int pkt_len = 0; |
| 378 | #ifdef DEBUG |
| 379 | u8 *dpkt; |
| 380 | int i, j; |
| 381 | #endif |
| 382 | |
| 383 | nicvf_cq_handler(nic, &pkt, &pkt_len); |
| 384 | |
| 385 | if (pkt_len) { |
| 386 | #ifdef DEBUG |
| 387 | dpkt = pkt; |
| 388 | printf("RX packet contents:\n"); |
| 389 | for (i = 0; i < 8; i++) { |
| 390 | puts("\t"); |
| 391 | for (j = 0; j < 10; j++) |
| 392 | printf("%02x ", dpkt[i * 10 + j]); |
| 393 | puts("\n"); |
| 394 | } |
| 395 | #endif |
| 396 | *packetp = pkt; |
| 397 | } |
| 398 | |
| 399 | return pkt_len; |
| 400 | } |
| 401 | |
| 402 | void nicvf_stop(struct udevice *dev) |
| 403 | { |
| 404 | struct nicvf *nic = dev_get_priv(dev); |
| 405 | |
| 406 | if (!nic->open) |
| 407 | return; |
| 408 | |
| 409 | /* Free resources */ |
| 410 | nicvf_config_data_transfer(nic, false); |
| 411 | |
| 412 | /* Disable HW Qset */ |
| 413 | nicvf_qset_config(nic, false); |
| 414 | |
| 415 | nic->open = false; |
| 416 | } |
| 417 | |
| 418 | int nicvf_open(struct udevice *dev) |
| 419 | { |
| 420 | int err; |
| 421 | struct nicvf *nic = dev_get_priv(dev); |
| 422 | |
| 423 | nicvf_hw_set_mac_addr(nic, dev); |
| 424 | |
| 425 | /* Configure CPI alorithm */ |
| 426 | nic->cpi_alg = CPI_ALG_NONE; |
| 427 | nicvf_config_cpi(nic); |
| 428 | |
| 429 | /* Initialize the queues */ |
| 430 | err = nicvf_init_resources(nic); |
| 431 | if (err) |
| 432 | return -1; |
| 433 | |
| 434 | if (!nicvf_check_pf_ready(nic)) |
| 435 | return -1; |
| 436 | |
| 437 | nic->open = true; |
| 438 | |
| 439 | /* Make sure queue initialization is written */ |
| 440 | asm volatile("dsb sy"); |
| 441 | |
| 442 | return 0; |
| 443 | } |
| 444 | |
| 445 | int nicvf_write_hwaddr(struct udevice *dev) |
| 446 | { |
| 447 | unsigned char ethaddr[ARP_HLEN]; |
| 448 | struct eth_pdata *pdata = dev_get_platdata(dev); |
| 449 | struct nicvf *nic = dev_get_priv(dev); |
| 450 | |
| 451 | /* If lower level firmware fails to set proper MAC |
| 452 | * u-boot framework updates MAC to random address. |
| 453 | * Use this hook to update mac address in environment. |
| 454 | */ |
| 455 | if (!eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr)) { |
| 456 | eth_env_set_enetaddr_by_index("eth", dev->seq, pdata->enetaddr); |
| 457 | debug("%s: pMAC %pM\n", __func__, pdata->enetaddr); |
| 458 | } |
| 459 | eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr); |
| 460 | if (memcmp(ethaddr, pdata->enetaddr, ARP_HLEN)) { |
| 461 | debug("%s: pMAC %pM\n", __func__, pdata->enetaddr); |
| 462 | nicvf_hw_set_mac_addr(nic, dev); |
| 463 | } |
| 464 | return 0; |
| 465 | } |
| 466 | |
| 467 | static void nicvf_probe_mdio_devices(void) |
| 468 | { |
| 469 | struct udevice *pdev; |
| 470 | int err; |
| 471 | static int probed; |
| 472 | |
| 473 | if (probed) |
| 474 | return; |
| 475 | |
| 476 | err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM, |
| 477 | PCI_DEVICE_ID_CAVIUM_SMI, 0, |
| 478 | &pdev); |
| 479 | if (err) |
| 480 | debug("%s couldn't find SMI device\n", __func__); |
| 481 | probed = 1; |
| 482 | } |
| 483 | |
| 484 | int nicvf_initialize(struct udevice *dev) |
| 485 | { |
| 486 | struct nicvf *nicvf = dev_get_priv(dev); |
| 487 | struct eth_pdata *pdata = dev_get_platdata(dev); |
| 488 | int ret = 0, bgx, lmac; |
| 489 | char name[16]; |
| 490 | unsigned char ethaddr[ARP_HLEN]; |
| 491 | struct udevice *pfdev; |
| 492 | struct nicpf *pf; |
| 493 | static int vfid; |
| 494 | |
| 495 | if (dm_pci_find_device(PCI_VENDOR_ID_CAVIUM, |
| 496 | PCI_DEVICE_ID_CAVIUM_NIC, 0, &pfdev)) { |
| 497 | printf("%s NIC PF device not found..VF probe failed\n", |
| 498 | __func__); |
| 499 | return -1; |
| 500 | } |
| 501 | pf = dev_get_priv(pfdev); |
| 502 | nicvf->vf_id = vfid++; |
| 503 | nicvf->dev = dev; |
| 504 | nicvf->nicpf = pf; |
| 505 | |
| 506 | nicvf_probe_mdio_devices(); |
| 507 | |
| 508 | /* Enable TSO support */ |
| 509 | nicvf->hw_tso = true; |
| 510 | |
| 511 | nicvf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, |
| 512 | PCI_REGION_MEM); |
| 513 | |
| 514 | debug("nicvf->reg_base: %p\n", nicvf->reg_base); |
| 515 | |
| 516 | if (!nicvf->reg_base) { |
| 517 | printf("Cannot map config register space, aborting\n"); |
| 518 | ret = -1; |
| 519 | goto fail; |
| 520 | } |
| 521 | |
| 522 | ret = nicvf_set_qset_resources(nicvf); |
| 523 | if (ret) |
| 524 | return -1; |
| 525 | |
| 526 | sprintf(name, "vnic%u", nicvf->vf_id); |
| 527 | debug("%s name %s\n", __func__, name); |
| 528 | device_set_name(dev, name); |
| 529 | |
| 530 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]); |
| 531 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]); |
| 532 | debug("%s VF %d BGX %d LMAC %d\n", __func__, nicvf->vf_id, bgx, lmac); |
| 533 | debug("%s PF %p pfdev %p VF %p vfdev %p vf->pdata %p\n", |
| 534 | __func__, nicvf->nicpf, nicvf->nicpf->udev, nicvf, nicvf->dev, |
| 535 | pdata); |
| 536 | |
| 537 | fdt_board_get_ethaddr(bgx, lmac, ethaddr); |
| 538 | |
| 539 | debug("%s bgx %d lmac %d ethaddr %pM\n", __func__, bgx, lmac, ethaddr); |
| 540 | |
| 541 | if (is_valid_ethaddr(ethaddr)) { |
| 542 | memcpy(pdata->enetaddr, ethaddr, ARP_HLEN); |
| 543 | eth_env_set_enetaddr_by_index("eth", dev->seq, ethaddr); |
| 544 | } |
| 545 | debug("%s enetaddr %pM ethaddr %pM\n", __func__, |
| 546 | pdata->enetaddr, ethaddr); |
| 547 | |
| 548 | fail: |
| 549 | return ret; |
| 550 | } |
| 551 | |
| 552 | int octeontx_vnic_probe(struct udevice *dev) |
| 553 | { |
| 554 | return nicvf_initialize(dev); |
| 555 | } |
| 556 | |
| 557 | static const struct eth_ops octeontx_vnic_ops = { |
| 558 | .start = nicvf_open, |
| 559 | .stop = nicvf_stop, |
| 560 | .send = nicvf_xmit, |
| 561 | .recv = nicvf_recv, |
| 562 | .free_pkt = nicvf_free_pkt, |
| 563 | .write_hwaddr = nicvf_write_hwaddr, |
| 564 | }; |
| 565 | |
| 566 | U_BOOT_DRIVER(octeontx_vnic) = { |
| 567 | .name = "vnic", |
| 568 | .id = UCLASS_ETH, |
| 569 | .probe = octeontx_vnic_probe, |
| 570 | .ops = &octeontx_vnic_ops, |
| 571 | .priv_auto_alloc_size = sizeof(struct nicvf), |
| 572 | .platdata_auto_alloc_size = sizeof(struct eth_pdata), |
| 573 | }; |
| 574 | |
| 575 | static struct pci_device_id octeontx_vnic_supported[] = { |
| 576 | { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF) }, |
| 577 | { PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF_1) }, |
| 578 | {} |
| 579 | }; |
| 580 | |
| 581 | U_BOOT_PCI_DEVICE(octeontx_vnic, octeontx_vnic_supported); |