blob: 25fbcd7b116131879a21b3ee28bda5af3c63c375 [file] [log] [blame]
Bharat Gootyb48ee742021-11-08 14:46:10 -08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2019-2021 Broadcom.
4 */
5
Bharat Gootyb48ee742021-11-08 14:46:10 -08006
7#include <asm/io.h>
8#include <dm.h>
9#include <linux/delay.h>
10#include <memalign.h>
11#include <net.h>
12
13#include "bnxt.h"
14#include "bnxt_dbg.h"
15
16#define bnxt_down_chip(bp) bnxt_hwrm_run(down_chip, bp, 0)
17#define bnxt_bring_chip(bp) bnxt_hwrm_run(bring_chip, bp, 1)
18
19/* Broadcom ethernet driver PCI APIs. */
20static void bnxt_bring_pci(struct bnxt *bp)
21{
22 u16 cmd_reg = 0;
23
24 dm_pci_read_config16(bp->pdev, PCI_VENDOR_ID, &bp->vendor_id);
25 dm_pci_read_config16(bp->pdev, PCI_DEVICE_ID, &bp->device_id);
26 dm_pci_read_config16(bp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &bp->subsystem_vendor);
27 dm_pci_read_config16(bp->pdev, PCI_SUBSYSTEM_ID, &bp->subsystem_device);
28 dm_pci_read_config16(bp->pdev, PCI_COMMAND, &bp->cmd_reg);
29 dm_pci_read_config8(bp->pdev, PCI_INTERRUPT_LINE, &bp->irq);
Andrew Scull6520c822022-04-21 16:11:13 +000030 bp->bar0 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_0, 0, 0,
31 PCI_REGION_TYPE, PCI_REGION_MEM);
32 bp->bar1 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_2, 0, 0,
33 PCI_REGION_TYPE, PCI_REGION_MEM);
34 bp->bar2 = dm_pci_map_bar(bp->pdev, PCI_BASE_ADDRESS_4, 0, 0,
35 PCI_REGION_TYPE, PCI_REGION_MEM);
Bharat Gootyb48ee742021-11-08 14:46:10 -080036 cmd_reg = bp->cmd_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
37 cmd_reg |= PCI_COMMAND_INTX_DISABLE; /* disable intr */
38 dm_pci_write_config16(bp->pdev, PCI_COMMAND, cmd_reg);
39 dm_pci_read_config16(bp->pdev, PCI_COMMAND, &cmd_reg);
40 dbg_pci(bp, __func__, cmd_reg);
41}
42
43int bnxt_free_rx_iob(struct bnxt *bp)
44{
45 unsigned int i;
46
47 if (!(FLAG_TEST(bp->flag_hwrm, VALID_RX_IOB)))
48 return STATUS_SUCCESS;
49
50 for (i = 0; i < bp->rx.buf_cnt; i++) {
51 if (bp->rx.iob[i]) {
52 free(bp->rx.iob[i]);
53 bp->rx.iob[i] = NULL;
54 }
55 }
56
57 FLAG_RESET(bp->flag_hwrm, VALID_RX_IOB);
58
59 return STATUS_SUCCESS;
60}
61
62static void set_rx_desc(u8 *buf, void *iob, u16 cons_id, u32 iob_idx)
63{
64 struct rx_prod_pkt_bd *desc;
65 u16 off = cons_id * sizeof(struct rx_prod_pkt_bd);
66
67 desc = (struct rx_prod_pkt_bd *)&buf[off];
68 desc->flags_type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT;
69 desc->len = MAX_ETHERNET_PACKET_BUFFER_SIZE;
70 desc->opaque = iob_idx;
71 desc->dma.addr = virt_to_bus(iob);
72}
73
74static int bnxt_alloc_rx_iob(struct bnxt *bp, u16 cons_id, u16 iob_idx)
75{
76 void *iob;
77
78 iob = memalign(BNXT_DMA_ALIGNMENT, RX_STD_DMA_ALIGNED);
79 if (!iob)
80 return -ENOMEM;
81
82 dbg_rx_iob(iob, iob_idx, cons_id);
83 set_rx_desc((u8 *)bp->rx.bd_virt, iob, cons_id, (u32)iob_idx);
84 bp->rx.iob[iob_idx] = iob;
85
86 return 0;
87}
88
89void bnxt_mm_init(struct bnxt *bp, const char *func)
90{
91 memset(bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE);
92 memset(bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE);
93 memset(bp->cq.bd_virt, 0, CQ_RING_DMA_BUFFER_SIZE);
94 memset(bp->tx.bd_virt, 0, TX_RING_DMA_BUFFER_SIZE);
95 memset(bp->rx.bd_virt, 0, RX_RING_DMA_BUFFER_SIZE);
96
97 bp->data_addr_mapping = virt_to_bus(bp->hwrm_addr_data);
98 bp->req_addr_mapping = virt_to_bus(bp->hwrm_addr_req);
99 bp->resp_addr_mapping = virt_to_bus(bp->hwrm_addr_resp);
100 bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
101 bp->link_status = STATUS_LINK_DOWN;
102 bp->media_change = 1;
103 bp->mtu = MAX_ETHERNET_PACKET_BUFFER_SIZE;
104 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
105 bp->rx.buf_cnt = NUM_RX_BUFFERS;
106 bp->rx.ring_cnt = MAX_RX_DESC_CNT;
107 bp->tx.ring_cnt = MAX_TX_DESC_CNT;
108 bp->cq.ring_cnt = MAX_CQ_DESC_CNT;
109 bp->cq.completion_bit = 0x1;
110 bp->link_set = LINK_SPEED_DRV_100G;
111 dbg_mem(bp, func);
112}
113
114void bnxt_free_mem(struct bnxt *bp)
115{
116 if (bp->cq.bd_virt) {
117 free(bp->cq.bd_virt);
118 bp->cq.bd_virt = NULL;
119 }
120
121 if (bp->rx.bd_virt) {
122 free(bp->rx.bd_virt);
123 bp->rx.bd_virt = NULL;
124 }
125
126 if (bp->tx.bd_virt) {
127 free(bp->tx.bd_virt);
128 bp->tx.bd_virt = NULL;
129 }
130
131 if (bp->hwrm_addr_resp) {
132 free(bp->hwrm_addr_resp);
133 bp->resp_addr_mapping = 0;
134 bp->hwrm_addr_resp = NULL;
135 }
136
137 if (bp->hwrm_addr_req) {
138 free(bp->hwrm_addr_req);
139 bp->req_addr_mapping = 0;
140 bp->hwrm_addr_req = NULL;
141 }
142
143 if (bp->hwrm_addr_data) {
144 free(bp->hwrm_addr_data);
145 bp->data_addr_mapping = 0;
146 bp->hwrm_addr_data = NULL;
147 }
148
149 dbg_mem_free_done(__func__);
150}
151
152int bnxt_alloc_mem(struct bnxt *bp)
153{
154 bp->hwrm_addr_data = memalign(BNXT_DMA_ALIGNMENT, DMA_BUF_SIZE_ALIGNED);
155 bp->hwrm_addr_req = memalign(BNXT_DMA_ALIGNMENT, REQ_BUF_SIZE_ALIGNED);
156 bp->hwrm_addr_resp = MEM_HWRM_RESP;
157
158 memset(&bp->tx, 0, sizeof(struct lm_tx_info_t));
159 memset(&bp->rx, 0, sizeof(struct lm_rx_info_t));
160 memset(&bp->cq, 0, sizeof(struct lm_cmp_info_t));
161
162 bp->tx.bd_virt = memalign(BNXT_DMA_ALIGNMENT, TX_RING_DMA_BUFFER_SIZE);
163 bp->rx.bd_virt = memalign(BNXT_DMA_ALIGNMENT, RX_RING_DMA_BUFFER_SIZE);
164 bp->cq.bd_virt = memalign(BNXT_DMA_ALIGNMENT, CQ_RING_DMA_BUFFER_SIZE);
165
166 if (bp->hwrm_addr_req &&
167 bp->hwrm_addr_resp &&
168 bp->hwrm_addr_data &&
169 bp->tx.bd_virt &&
170 bp->rx.bd_virt &&
171 bp->cq.bd_virt) {
172 bnxt_mm_init(bp, __func__);
173 return STATUS_SUCCESS;
174 }
175
176 dbg_mem_alloc_fail(__func__);
177 bnxt_free_mem(bp);
178
179 return -ENOMEM;
180}
181
182static void hwrm_init(struct bnxt *bp, struct input *req, u16 cmd, u16 len)
183{
184 memset(req, 0, len);
185 req->req_type = cmd;
186 req->cmpl_ring = (u16)HWRM_NA_SIGNATURE;
187 req->target_id = (u16)HWRM_NA_SIGNATURE;
188 req->resp_addr = bp->resp_addr_mapping;
189 req->seq_id = bp->seq_id++;
190}
191
192static void hwrm_write_req(struct bnxt *bp, void *req, u32 cnt)
193{
194 u32 i = 0;
195
196 for (i = 0; i < cnt; i++)
197 writel(((u32 *)req)[i], bp->bar0 + GRC_COM_CHAN_BASE + (i * 4));
198
199 writel(0x1, (bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG));
200}
201
202static void short_hwrm_cmd_req(struct bnxt *bp, u16 len)
203{
204 struct hwrm_short_input sreq;
205
206 memset(&sreq, 0, sizeof(struct hwrm_short_input));
207 sreq.req_type = (u16)((struct input *)bp->hwrm_addr_req)->req_type;
208 sreq.signature = SHORT_REQ_SIGNATURE_SHORT_CMD;
209 sreq.size = len;
210 sreq.req_addr = bp->req_addr_mapping;
211 dbg_short_cmd((u8 *)&sreq, __func__, sizeof(struct hwrm_short_input));
212 hwrm_write_req(bp, &sreq, sizeof(struct hwrm_short_input) / 4);
213}
214
215static int wait_resp(struct bnxt *bp, u32 tmo, u16 len, const char *func)
216{
217 struct input *req = (struct input *)bp->hwrm_addr_req;
218 struct output *resp = (struct output *)bp->hwrm_addr_resp;
219 u8 *ptr = (u8 *)resp;
220 u32 idx;
221 u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER((u32)tmo);
222 u16 resp_len = 0;
223 u16 ret = STATUS_TIMEOUT;
224
225 if (len > bp->hwrm_max_req_len)
226 short_hwrm_cmd_req(bp, len);
227 else
228 hwrm_write_req(bp, req, (u32)(len / 4));
229
230 for (idx = 0; idx < wait_cnt; idx++) {
231 resp_len = resp->resp_len;
232 if (resp->seq_id == req->seq_id && resp->req_type == req->req_type &&
233 ptr[resp_len - 1] == 1) {
234 bp->last_resp_code = resp->error_code;
235 ret = resp->error_code;
236 break;
237 }
238
239 udelay(HWRM_CMD_POLL_WAIT_TIME);
240 }
241
242 dbg_hw_cmd(bp, func, len, resp_len, tmo, ret);
243
244 return (int)ret;
245}
246
247static void bnxt_db_cq(struct bnxt *bp)
248{
249 writel(CQ_DOORBELL_KEY_IDX(bp->cq.cons_idx), bp->bar1);
250}
251
252static void bnxt_db_rx(struct bnxt *bp, u32 idx)
253{
254 writel(RX_DOORBELL_KEY_RX | idx, bp->bar1);
255}
256
257static void bnxt_db_tx(struct bnxt *bp, u32 idx)
258{
259 writel((u32)(TX_DOORBELL_KEY_TX | idx), bp->bar1);
260}
261
262int iob_pad(void *packet, int length)
263{
264 if (length >= ETH_ZLEN)
265 return length;
266
267 memset(((u8 *)packet + length), 0x00, (ETH_ZLEN - length));
268
269 return ETH_ZLEN;
270}
271
272static inline u32 bnxt_tx_avail(struct bnxt *bp)
273{
274 barrier();
275
276 return TX_AVAIL(bp->tx.ring_cnt) -
277 ((bp->tx.prod_id - bp->tx.cons_id) &
278 (bp->tx.ring_cnt - 1));
279}
280
281void set_txq(struct bnxt *bp, int entry, dma_addr_t mapping, int len)
282{
283 struct tx_bd_short *prod_bd;
284
285 prod_bd = (struct tx_bd_short *)BD_NOW(bp->tx.bd_virt,
286 entry,
287 sizeof(struct tx_bd_short));
288 if (len < 512)
289 prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT512;
290 else if (len < 1024)
291 prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT1K;
292 else if (len < 2048)
293 prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_LT2K;
294 else
295 prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_GTE2K;
296
297 prod_bd->flags_type |= TX_BD_FLAGS;
298 prod_bd->dma.addr = mapping;
299 prod_bd->len = len;
300 prod_bd->opaque = (u32)entry;
301 dump_tx_bd(prod_bd, (u16)(sizeof(struct tx_bd_short)));
302}
303
304static void bnxt_tx_complete(struct bnxt *bp)
305{
306 bp->tx.cons_id = NEXT_IDX(bp->tx.cons_id, bp->tx.ring_cnt);
307 bp->tx.cnt++;
308 dump_tx_stat(bp);
309}
310
311int post_rx_buffers(struct bnxt *bp)
312{
313 u16 cons_id = (bp->rx.cons_idx % bp->rx.ring_cnt);
314 u16 iob_idx;
315
316 while (bp->rx.iob_cnt < bp->rx.buf_cnt) {
317 iob_idx = (cons_id % bp->rx.buf_cnt);
318 if (!bp->rx.iob[iob_idx]) {
319 if (bnxt_alloc_rx_iob(bp, cons_id, iob_idx) < 0) {
320 dbg_rx_alloc_iob_fail(iob_idx, cons_id);
321 break;
322 }
323 }
324
325 cons_id = NEXT_IDX(cons_id, bp->rx.ring_cnt);
326 bp->rx.iob_cnt++;
327 }
328
329 if (cons_id != bp->rx.cons_idx) {
330 dbg_rx_cid(bp->rx.cons_idx, cons_id);
331 bp->rx.cons_idx = cons_id;
332 bnxt_db_rx(bp, (u32)cons_id);
333 }
334
335 FLAG_SET(bp->flag_hwrm, VALID_RX_IOB);
336
337 return STATUS_SUCCESS;
338}
339
340u8 bnxt_rx_drop(struct bnxt *bp, u8 *rx_buf, struct rx_pkt_cmpl_hi *rx_cmp_hi)
341{
342 u8 chksum_err = 0;
343 u8 i;
344 u16 error_flags;
345
346 error_flags = (rx_cmp_hi->errors_v2 >>
347 RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT);
348 if (rx_cmp_hi->errors_v2 == 0x20 || rx_cmp_hi->errors_v2 == 0x21)
349 chksum_err = 1;
350
351 if (error_flags && !chksum_err) {
352 bp->rx.err++;
353 return 1;
354 }
355
356 for (i = 0; i < 6; i++) {
357 if (rx_buf[6 + i] != bp->mac_set[i])
358 break;
359 }
360
361 if (i == 6) {
362 bp->rx.dropped++;
363 return 2; /* Drop the loopback packets */
364 }
365
366 return 0;
367}
368
369static void bnxt_adv_cq_index(struct bnxt *bp, u16 count)
370{
371 u16 cons_idx = bp->cq.cons_idx + count;
372
373 if (cons_idx >= MAX_CQ_DESC_CNT) {
374 /* Toggle completion bit when the ring wraps. */
375 bp->cq.completion_bit ^= 1;
376 cons_idx = cons_idx - MAX_CQ_DESC_CNT;
377 }
378
379 bp->cq.cons_idx = cons_idx;
380}
381
382void bnxt_adv_rx_index(struct bnxt *bp, u8 *iob, u32 iob_idx)
383{
384 u16 cons_id = (bp->rx.cons_idx % bp->rx.ring_cnt);
385
386 set_rx_desc((u8 *)bp->rx.bd_virt, (void *)iob, cons_id, iob_idx);
387 cons_id = NEXT_IDX(cons_id, bp->rx.ring_cnt);
388 if (cons_id != bp->rx.cons_idx) {
389 dbg_rx_cid(bp->rx.cons_idx, cons_id);
390 bp->rx.cons_idx = cons_id;
391 bnxt_db_rx(bp, (u32)cons_id);
392 }
393}
394
395void rx_process(struct bnxt *bp, struct rx_pkt_cmpl *rx_cmp,
396 struct rx_pkt_cmpl_hi *rx_cmp_hi)
397{
398 u32 desc_idx = rx_cmp->opaque;
399 u8 *iob = bp->rx.iob[desc_idx];
400
401 dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx);
402 bp->rx.iob_len = rx_cmp->len;
403 bp->rx.iob_rx = iob;
404 if (bnxt_rx_drop(bp, iob, rx_cmp_hi))
405 bp->rx.iob_recv = PKT_DROPPED;
406 else
407 bp->rx.iob_recv = PKT_RECEIVED;
408
409 bp->rx.rx_cnt++;
410
411 dbg_rxp(bp->rx.iob_rx, bp->rx.iob_len, bp->rx.iob_recv);
412 bnxt_adv_rx_index(bp, iob, desc_idx);
413 bnxt_adv_cq_index(bp, 2); /* Rx completion is 2 entries. */
414}
415
416static int bnxt_rx_complete(struct bnxt *bp, struct rx_pkt_cmpl *rx_cmp)
417{
418 struct rx_pkt_cmpl_hi *rx_cmp_hi;
419 u8 completion_bit = bp->cq.completion_bit;
420
421 if (bp->cq.cons_idx == (bp->cq.ring_cnt - 1)) {
422 rx_cmp_hi = (struct rx_pkt_cmpl_hi *)bp->cq.bd_virt;
423 completion_bit ^= 0x1; /* Ring has wrapped. */
424 } else {
425 rx_cmp_hi = (struct rx_pkt_cmpl_hi *)(rx_cmp + 1);
426 }
427
428 if (!((rx_cmp_hi->errors_v2 & RX_PKT_CMPL_V2) ^ completion_bit))
429 rx_process(bp, rx_cmp, rx_cmp_hi);
430
431 return NO_MORE_CQ_BD_TO_SERVICE;
432}
433
434static int bnxt_hwrm_ver_get(struct bnxt *bp)
435{
436 u16 cmd_len = (u16)sizeof(struct hwrm_ver_get_input);
437 struct hwrm_ver_get_input *req;
438 struct hwrm_ver_get_output *resp;
439 int rc;
440
441 req = (struct hwrm_ver_get_input *)bp->hwrm_addr_req;
442 resp = (struct hwrm_ver_get_output *)bp->hwrm_addr_resp;
443 hwrm_init(bp, (void *)req, (u16)HWRM_VER_GET, cmd_len);
444 req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
445 req->hwrm_intf_min = HWRM_VERSION_MINOR;
446 req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
447 rc = wait_resp(bp, HWRM_CMD_DEFAULT_TIMEOUT, cmd_len, __func__);
448 if (rc)
449 return STATUS_FAILURE;
450
451 bp->hwrm_spec_code =
452 resp->hwrm_intf_maj_8b << 16 |
453 resp->hwrm_intf_min_8b << 8 |
454 resp->hwrm_intf_upd_8b;
455 bp->hwrm_cmd_timeout = (u32)resp->def_req_timeout;
456 if (!bp->hwrm_cmd_timeout)
457 bp->hwrm_cmd_timeout = (u32)HWRM_CMD_DEFAULT_TIMEOUT;
458
459 if (resp->hwrm_intf_maj_8b >= 1)
460 bp->hwrm_max_req_len = resp->max_req_win_len;
461
462 bp->chip_id =
463 resp->chip_rev << 24 |
464 resp->chip_metal << 16 |
465 resp->chip_bond_id << 8 |
466 resp->chip_platform_type;
467 bp->chip_num = resp->chip_num;
468 if ((resp->dev_caps_cfg & SHORT_CMD_SUPPORTED) &&
469 (resp->dev_caps_cfg & SHORT_CMD_REQUIRED))
470 FLAG_SET(bp->flags, BNXT_FLAG_HWRM_SHORT_CMD_SUPP);
471
472 bp->hwrm_max_ext_req_len = resp->max_ext_req_len;
473 bp->fw_maj = resp->hwrm_fw_maj_8b;
474 bp->fw_min = resp->hwrm_fw_min_8b;
475 bp->fw_bld = resp->hwrm_fw_bld_8b;
476 bp->fw_rsvd = resp->hwrm_fw_rsvd_8b;
477 print_fw_ver(resp, bp->hwrm_cmd_timeout);
478
479 return STATUS_SUCCESS;
480}
481
482/* Broadcom ethernet driver Function HW cmds APIs. */
483static int bnxt_hwrm_func_resource_qcaps(struct bnxt *bp)
484{
485 u16 cmd_len = (u16)sizeof(struct hwrm_func_resource_qcaps_input);
486 struct hwrm_func_resource_qcaps_input *req;
487 struct hwrm_func_resource_qcaps_output *resp;
488 int rc;
489
490 req = (struct hwrm_func_resource_qcaps_input *)bp->hwrm_addr_req;
491 resp = (struct hwrm_func_resource_qcaps_output *)bp->hwrm_addr_resp;
492 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_RESOURCE_QCAPS, cmd_len);
493 req->fid = (u16)HWRM_NA_SIGNATURE;
494 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
495 if (rc != STATUS_SUCCESS)
496 return STATUS_SUCCESS;
497
498 FLAG_SET(bp->flags, BNXT_FLAG_RESOURCE_QCAPS_SUPPORT);
499 /* VFs */
500 bp->max_vfs = resp->max_vfs;
501 bp->vf_res_strategy = resp->vf_reservation_strategy;
502 /* vNICs */
503 bp->min_vnics = resp->min_vnics;
504 bp->max_vnics = resp->max_vnics;
505 /* MSI-X */
506 bp->max_msix = resp->max_msix;
507 /* Ring Groups */
508 bp->min_hw_ring_grps = resp->min_hw_ring_grps;
509 bp->max_hw_ring_grps = resp->max_hw_ring_grps;
510 /* TX Rings */
511 bp->min_tx_rings = resp->min_tx_rings;
512 bp->max_tx_rings = resp->max_tx_rings;
513 /* RX Rings */
514 bp->min_rx_rings = resp->min_rx_rings;
515 bp->max_rx_rings = resp->max_rx_rings;
516 /* Completion Rings */
517 bp->min_cp_rings = resp->min_cmpl_rings;
518 bp->max_cp_rings = resp->max_cmpl_rings;
519 /* RSS Contexts */
520 bp->min_rsscos_ctxs = resp->min_rsscos_ctx;
521 bp->max_rsscos_ctxs = resp->max_rsscos_ctx;
522 /* L2 Contexts */
523 bp->min_l2_ctxs = resp->min_l2_ctxs;
524 bp->max_l2_ctxs = resp->max_l2_ctxs;
525 /* Statistic Contexts */
526 bp->min_stat_ctxs = resp->min_stat_ctx;
527 bp->max_stat_ctxs = resp->max_stat_ctx;
528 dbg_func_resource_qcaps(bp);
529
530 return STATUS_SUCCESS;
531}
532
533static u32 set_ring_info(struct bnxt *bp)
534{
535 u32 enables = 0;
536
537 bp->num_cmpl_rings = DEFAULT_NUMBER_OF_CMPL_RINGS;
538 bp->num_tx_rings = DEFAULT_NUMBER_OF_TX_RINGS;
539 bp->num_rx_rings = DEFAULT_NUMBER_OF_RX_RINGS;
540 bp->num_hw_ring_grps = DEFAULT_NUMBER_OF_RING_GRPS;
541 bp->num_stat_ctxs = DEFAULT_NUMBER_OF_STAT_CTXS;
542 if (bp->min_cp_rings <= DEFAULT_NUMBER_OF_CMPL_RINGS)
543 bp->num_cmpl_rings = bp->min_cp_rings;
544
545 if (bp->min_tx_rings <= DEFAULT_NUMBER_OF_TX_RINGS)
546 bp->num_tx_rings = bp->min_tx_rings;
547
548 if (bp->min_rx_rings <= DEFAULT_NUMBER_OF_RX_RINGS)
549 bp->num_rx_rings = bp->min_rx_rings;
550
551 if (bp->min_hw_ring_grps <= DEFAULT_NUMBER_OF_RING_GRPS)
552 bp->num_hw_ring_grps = bp->min_hw_ring_grps;
553
554 if (bp->min_stat_ctxs <= DEFAULT_NUMBER_OF_STAT_CTXS)
555 bp->num_stat_ctxs = bp->min_stat_ctxs;
556
557 print_num_rings(bp);
558 enables = (FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
559 FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
560 FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
561 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
562 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
563
564 return enables;
565}
566
567static void bnxt_hwrm_assign_resources(struct bnxt *bp)
568{
569 struct hwrm_func_cfg_input *req;
570 u32 enables = 0;
571
572 if (FLAG_TEST(bp->flags, BNXT_FLAG_RESOURCE_QCAPS_SUPPORT))
573 enables = set_ring_info(bp);
574
575 req = (struct hwrm_func_cfg_input *)bp->hwrm_addr_req;
576 req->num_cmpl_rings = bp->num_cmpl_rings;
577 req->num_tx_rings = bp->num_tx_rings;
578 req->num_rx_rings = bp->num_rx_rings;
579 req->num_stat_ctxs = bp->num_stat_ctxs;
580 req->num_hw_ring_grps = bp->num_hw_ring_grps;
581 req->enables = enables;
582}
583
584int bnxt_hwrm_nvm_flush(struct bnxt *bp)
585{
586 u16 cmd_len = (u16)sizeof(struct hwrm_nvm_flush_input);
587 struct hwrm_nvm_flush_input *req;
588 int rc;
589
590 req = (struct hwrm_nvm_flush_input *)bp->hwrm_addr_req;
591
592 hwrm_init(bp, (void *)req, (u16)HWRM_NVM_FLUSH, cmd_len);
593
594 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
595 if (rc)
596 return STATUS_FAILURE;
597
598 return STATUS_SUCCESS;
599}
600
601static int bnxt_hwrm_func_qcaps_req(struct bnxt *bp)
602{
603 u16 cmd_len = (u16)sizeof(struct hwrm_func_qcaps_input);
604 struct hwrm_func_qcaps_input *req;
605 struct hwrm_func_qcaps_output *resp;
606 int rc;
607
608 req = (struct hwrm_func_qcaps_input *)bp->hwrm_addr_req;
609 resp = (struct hwrm_func_qcaps_output *)bp->hwrm_addr_resp;
610 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_QCAPS, cmd_len);
611 req->fid = (u16)HWRM_NA_SIGNATURE;
612 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
613 if (rc)
614 return STATUS_FAILURE;
615
616 bp->fid = resp->fid;
617 bp->port_idx = (u8)resp->port_id;
618
619 /* Get MAC address for this PF */
620 memcpy(&bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN);
621
622 memcpy(&bp->mac_set[0], &bp->mac_addr[0], ETH_ALEN);
623
624 print_func_qcaps(bp);
625
626 return STATUS_SUCCESS;
627}
628
629static int bnxt_hwrm_func_qcfg_req(struct bnxt *bp)
630{
631 u16 cmd_len = (u16)sizeof(struct hwrm_func_qcfg_input);
632 struct hwrm_func_qcfg_input *req;
633 struct hwrm_func_qcfg_output *resp;
634 int rc;
635
636 req = (struct hwrm_func_qcfg_input *)bp->hwrm_addr_req;
637 resp = (struct hwrm_func_qcfg_output *)bp->hwrm_addr_resp;
638 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_QCFG, cmd_len);
639 req->fid = (u16)HWRM_NA_SIGNATURE;
640 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
641 if (rc)
642 return STATUS_FAILURE;
643
644 if (resp->flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)
645 FLAG_SET(bp->flags, BNXT_FLAG_MULTI_HOST);
646
647 if (resp->port_partition_type &
648 FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0)
649 FLAG_SET(bp->flags, BNXT_FLAG_NPAR_MODE);
650
651 bp->ordinal_value = (u8)resp->pci_id & 0x0F;
652 bp->stat_ctx_id = resp->stat_ctx_id;
653 memcpy(&bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN);
654 print_func_qcfg(bp);
655 dbg_flags(__func__, bp->flags);
656
657 return STATUS_SUCCESS;
658}
659
660static int bnxt_hwrm_func_reset_req(struct bnxt *bp)
661{
662 u16 cmd_len = (u16)sizeof(struct hwrm_func_reset_input);
663 struct hwrm_func_reset_input *req;
664
665 req = (struct hwrm_func_reset_input *)bp->hwrm_addr_req;
666 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_RESET, cmd_len);
667 req->func_reset_level = FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME;
668
669 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
670}
671
672static int bnxt_hwrm_func_cfg_req(struct bnxt *bp)
673{
674 u16 cmd_len = (u16)sizeof(struct hwrm_func_cfg_input);
675 struct hwrm_func_cfg_input *req;
676
677 req = (struct hwrm_func_cfg_input *)bp->hwrm_addr_req;
678 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_CFG, cmd_len);
679 req->fid = (u16)HWRM_NA_SIGNATURE;
680 bnxt_hwrm_assign_resources(bp);
681
682 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
683}
684
685static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
686{
687 u16 cmd_len = (u16)sizeof(struct hwrm_func_drv_rgtr_input);
688 struct hwrm_func_drv_rgtr_input *req;
689 int rc;
690
691 req = (struct hwrm_func_drv_rgtr_input *)bp->hwrm_addr_req;
692 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_DRV_RGTR, cmd_len);
693 /* Register with HWRM */
694 req->enables = FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
695 FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD |
696 FUNC_DRV_RGTR_REQ_ENABLES_VER;
697 req->async_event_fwd[0] |= 0x01;
698 req->os_type = FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER;
699 req->ver_maj = DRIVER_VERSION_MAJOR;
700 req->ver_min = DRIVER_VERSION_MINOR;
701 req->ver_upd = DRIVER_VERSION_UPDATE;
702 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
703 if (rc)
704 return STATUS_FAILURE;
705
706 FLAG_SET(bp->flag_hwrm, VALID_DRIVER_REG);
707
708 return STATUS_SUCCESS;
709}
710
711static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
712{
713 u16 cmd_len = (u16)sizeof(struct hwrm_func_drv_unrgtr_input);
714 struct hwrm_func_drv_unrgtr_input *req;
715 int rc;
716
717 if (!(FLAG_TEST(bp->flag_hwrm, VALID_DRIVER_REG)))
718 return STATUS_SUCCESS;
719
720 req = (struct hwrm_func_drv_unrgtr_input *)bp->hwrm_addr_req;
721 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_DRV_UNRGTR, cmd_len);
722 req->flags = FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN;
723 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
724 if (rc)
725 return STATUS_FAILURE;
726
727 FLAG_RESET(bp->flag_hwrm, VALID_DRIVER_REG);
728
729 return STATUS_SUCCESS;
730}
731
732static int bnxt_hwrm_cfa_l2_filter_alloc(struct bnxt *bp)
733{
734 u16 cmd_len = (u16)sizeof(struct hwrm_cfa_l2_filter_alloc_input);
735 struct hwrm_cfa_l2_filter_alloc_input *req;
736 struct hwrm_cfa_l2_filter_alloc_output *resp;
737 int rc;
738 u32 flags = CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX;
739 u32 enables;
740
741 req = (struct hwrm_cfa_l2_filter_alloc_input *)bp->hwrm_addr_req;
742 resp = (struct hwrm_cfa_l2_filter_alloc_output *)bp->hwrm_addr_resp;
743 enables = CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
744 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
745 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK;
746
747 hwrm_init(bp, (void *)req, (u16)HWRM_CFA_L2_FILTER_ALLOC, cmd_len);
748 req->flags = flags;
749 req->enables = enables;
750 memcpy((char *)&req->l2_addr[0], (char *)&bp->mac_set[0], ETH_ALEN);
751 memset((char *)&req->l2_addr_mask[0], 0xff, ETH_ALEN);
752 memcpy((char *)&req->t_l2_addr[0], (char *)&bp->mac_set[0], ETH_ALEN);
753 memset((char *)&req->t_l2_addr_mask[0], 0xff, ETH_ALEN);
754 req->src_type = CFA_L2_FILTER_ALLOC_REQ_SRC_TYPE_NPORT;
755 req->src_id = (u32)bp->port_idx;
756 req->dst_id = bp->vnic_id;
757 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
758 if (rc)
759 return STATUS_FAILURE;
760
761 FLAG_SET(bp->flag_hwrm, VALID_L2_FILTER);
762 bp->l2_filter_id = resp->l2_filter_id;
763
764 return STATUS_SUCCESS;
765}
766
767static int bnxt_hwrm_cfa_l2_filter_free(struct bnxt *bp)
768{
769 u16 cmd_len = (u16)sizeof(struct hwrm_cfa_l2_filter_free_input);
770 struct hwrm_cfa_l2_filter_free_input *req;
771 int rc;
772
773 if (!(FLAG_TEST(bp->flag_hwrm, VALID_L2_FILTER)))
774 return STATUS_SUCCESS;
775
776 req = (struct hwrm_cfa_l2_filter_free_input *)bp->hwrm_addr_req;
777 hwrm_init(bp, (void *)req, (u16)HWRM_CFA_L2_FILTER_FREE, cmd_len);
778 req->l2_filter_id = bp->l2_filter_id;
779 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
780 if (rc)
781 return STATUS_FAILURE;
782
783 FLAG_RESET(bp->flag_hwrm, VALID_L2_FILTER);
784
785 return STATUS_SUCCESS;
786}
787
788u32 bnxt_set_rx_mask(u32 rx_mask)
789{
790 u32 mask = 0;
791
792 if (!rx_mask)
793 return mask;
794 mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
795 if (rx_mask != RX_MASK_ACCEPT_NONE) {
796 if (rx_mask & RX_MASK_ACCEPT_MULTICAST)
797 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
798
799 if (rx_mask & RX_MASK_ACCEPT_ALL_MULTICAST)
800 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
801
802 if (rx_mask & RX_MASK_PROMISCUOUS_MODE)
803 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
804 }
805
806 return mask;
807}
808
809static int bnxt_hwrm_set_rx_mask(struct bnxt *bp, u32 rx_mask)
810{
811 u16 cmd_len = (u16)sizeof(struct hwrm_cfa_l2_set_rx_mask_input);
812 struct hwrm_cfa_l2_set_rx_mask_input *req;
813 u32 mask = bnxt_set_rx_mask(rx_mask);
814
815 req = (struct hwrm_cfa_l2_set_rx_mask_input *)bp->hwrm_addr_req;
816 hwrm_init(bp, (void *)req, (u16)HWRM_CFA_L2_SET_RX_MASK, cmd_len);
817 req->vnic_id = bp->vnic_id;
818 req->mask = mask;
819
820 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
821}
822
823static int bnxt_hwrm_port_mac_cfg(struct bnxt *bp)
824{
825 u16 cmd_len = (u16)sizeof(struct hwrm_port_mac_cfg_input);
826 struct hwrm_port_mac_cfg_input *req;
827
828 req = (struct hwrm_port_mac_cfg_input *)bp->hwrm_addr_req;
829 hwrm_init(bp, (void *)req, (u16)HWRM_PORT_MAC_CFG, cmd_len);
830 req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE;
831
832 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
833}
834
835static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, u16 idx)
836{
837 u16 cmd_len = (u16)sizeof(struct hwrm_port_phy_qcfg_input);
838 struct hwrm_port_phy_qcfg_input *req;
839 struct hwrm_port_phy_qcfg_output *resp;
840 int rc;
841
842 req = (struct hwrm_port_phy_qcfg_input *)bp->hwrm_addr_req;
843 resp = (struct hwrm_port_phy_qcfg_output *)bp->hwrm_addr_resp;
844 hwrm_init(bp, (void *)req, (u16)HWRM_PORT_PHY_QCFG, cmd_len);
845 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
846 if (rc)
847 return STATUS_FAILURE;
848
849 if (idx & SUPPORT_SPEEDS)
850 bp->support_speeds = resp->support_speeds;
851
852 if (idx & DETECT_MEDIA)
853 bp->media_detect = resp->module_status;
854
855 if (idx & PHY_SPEED)
856 bp->current_link_speed = resp->link_speed;
857
858 if (idx & PHY_STATUS) {
859 if (resp->link == PORT_PHY_QCFG_RESP_LINK_LINK)
860 bp->link_status = STATUS_LINK_ACTIVE;
861 else
862 bp->link_status = STATUS_LINK_DOWN;
863 }
864
865 return STATUS_SUCCESS;
866}
867
868u16 set_link_speed_mask(u16 link_cap)
869{
870 u16 speed_mask = 0;
871
872 if (link_cap & SPEED_CAPABILITY_DRV_100M)
873 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB;
874
875 if (link_cap & SPEED_CAPABILITY_DRV_1G)
876 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_1GB;
877
878 if (link_cap & SPEED_CAPABILITY_DRV_10G)
879 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10GB;
880
881 if (link_cap & SPEED_CAPABILITY_DRV_25G)
882 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB;
883
884 if (link_cap & SPEED_CAPABILITY_DRV_40G)
885 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB;
886
887 if (link_cap & SPEED_CAPABILITY_DRV_50G)
888 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB;
889
890 if (link_cap & SPEED_CAPABILITY_DRV_100G)
891 speed_mask |= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB;
892
893 return speed_mask;
894}
895
896static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp)
897{
898 u16 cmd_len = (u16)sizeof(struct hwrm_port_phy_cfg_input);
899 struct hwrm_port_phy_cfg_input *req;
900 u32 flags;
901 u32 enables = 0;
902 u16 force_link_speed = 0;
903 u16 auto_link_speed_mask = 0;
904 u8 auto_mode = 0;
905 u8 auto_pause = 0;
906 u8 auto_duplex = 0;
907
908 /*
909 * If multi_host or NPAR is set to TRUE,
910 * do not issue hwrm_port_phy_cfg
911 */
912 if (FLAG_TEST(bp->flags, PORT_PHY_FLAGS)) {
913 dbg_flags(__func__, bp->flags);
914 return STATUS_SUCCESS;
915 }
916
917 req = (struct hwrm_port_phy_cfg_input *)bp->hwrm_addr_req;
918 flags = PORT_PHY_CFG_REQ_FLAGS_FORCE |
919 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY;
920
921 switch (GET_MEDIUM_SPEED(bp->medium)) {
922 case MEDIUM_SPEED_1000MBPS:
923 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
924 break;
925 case MEDIUM_SPEED_10GBPS:
926 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
927 break;
928 case MEDIUM_SPEED_25GBPS:
929 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
930 break;
931 case MEDIUM_SPEED_40GBPS:
932 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
933 break;
934 case MEDIUM_SPEED_50GBPS:
935 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
936 break;
937 case MEDIUM_SPEED_100GBPS:
938 force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
939 break;
940 default:
941 /* Enable AUTONEG by default */
942 auto_mode = PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
943 flags &= ~PORT_PHY_CFG_REQ_FLAGS_FORCE;
944 enables |= PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE |
945 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK |
946 PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX |
947 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE;
948 auto_pause = PORT_PHY_CFG_REQ_AUTO_PAUSE_TX |
949 PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
950 auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH;
951 auto_link_speed_mask = bp->support_speeds;
952 break;
953 }
954
955 hwrm_init(bp, (void *)req, (u16)HWRM_PORT_PHY_CFG, cmd_len);
956 req->flags = flags;
957 req->enables = enables;
958 req->port_id = bp->port_idx;
959 req->force_link_speed = force_link_speed;
960 req->auto_mode = auto_mode;
961 req->auto_duplex = auto_duplex;
962 req->auto_pause = auto_pause;
963 req->auto_link_speed_mask = auto_link_speed_mask;
964
965 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
966}
967
968static int bnxt_qphy_link(struct bnxt *bp)
969{
970 u16 flag = QCFG_PHY_ALL;
971
972 /* Query Link Status */
973 if (bnxt_hwrm_port_phy_qcfg(bp, flag) != STATUS_SUCCESS)
974 return STATUS_FAILURE;
975
976 if (bp->link_status != STATUS_LINK_ACTIVE) {
977 /*
978 * Configure link if it is not up.
979 * try to bring link up, but don't return
980 * failure if port_phy_cfg() fails
981 */
982 bnxt_hwrm_port_phy_cfg(bp);
983 /* refresh link speed values after bringing link up */
984 if (bnxt_hwrm_port_phy_qcfg(bp, flag) != STATUS_SUCCESS)
985 return STATUS_FAILURE;
986 }
987
988 return STATUS_SUCCESS;
989}
990
991static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
992{
993 u16 cmd_len = (u16)sizeof(struct hwrm_stat_ctx_alloc_input);
994 struct hwrm_stat_ctx_alloc_input *req;
995 struct hwrm_stat_ctx_alloc_output *resp;
996 int rc;
997
998 req = (struct hwrm_stat_ctx_alloc_input *)bp->hwrm_addr_req;
999 resp = (struct hwrm_stat_ctx_alloc_output *)bp->hwrm_addr_resp;
1000 hwrm_init(bp, (void *)req, (u16)HWRM_STAT_CTX_ALLOC, cmd_len);
1001 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
1002 if (rc)
1003 return STATUS_FAILURE;
1004
1005 FLAG_SET(bp->flag_hwrm, VALID_STAT_CTX);
1006 bp->stat_ctx_id = (u16)resp->stat_ctx_id;
1007
1008 return STATUS_SUCCESS;
1009}
1010
1011static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
1012{
1013 u16 cmd_len = (u16)sizeof(struct hwrm_stat_ctx_free_input);
1014 struct hwrm_stat_ctx_free_input *req;
1015 int rc;
1016
1017 if (!(FLAG_TEST(bp->flag_hwrm, VALID_STAT_CTX)))
1018 return STATUS_SUCCESS;
1019
1020 req = (struct hwrm_stat_ctx_free_input *)bp->hwrm_addr_req;
1021 hwrm_init(bp, (void *)req, (u16)HWRM_STAT_CTX_FREE, cmd_len);
1022 req->stat_ctx_id = (u32)bp->stat_ctx_id;
1023 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
1024 if (rc)
1025 return STATUS_FAILURE;
1026
1027 FLAG_RESET(bp->flag_hwrm, VALID_STAT_CTX);
1028
1029 return STATUS_SUCCESS;
1030}
1031
1032static int bnxt_hwrm_ring_free_grp(struct bnxt *bp)
1033{
1034 u16 cmd_len = (u16)sizeof(struct hwrm_ring_grp_free_input);
1035 struct hwrm_ring_grp_free_input *req;
1036 int rc;
1037
1038 if (!(FLAG_TEST(bp->flag_hwrm, VALID_RING_GRP)))
1039 return STATUS_SUCCESS;
1040
1041 req = (struct hwrm_ring_grp_free_input *)bp->hwrm_addr_req;
1042 hwrm_init(bp, (void *)req, (u16)HWRM_RING_GRP_FREE, cmd_len);
1043 req->ring_group_id = (u32)bp->ring_grp_id;
1044 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
1045 if (rc)
1046 return STATUS_FAILURE;
1047
1048 FLAG_RESET(bp->flag_hwrm, VALID_RING_GRP);
1049
1050 return STATUS_SUCCESS;
1051}
1052
1053static int bnxt_hwrm_ring_alloc_grp(struct bnxt *bp)
1054{
1055 u16 cmd_len = (u16)sizeof(struct hwrm_ring_grp_alloc_input);
1056 struct hwrm_ring_grp_alloc_input *req;
1057 struct hwrm_ring_grp_alloc_output *resp;
1058 int rc;
1059
1060 req = (struct hwrm_ring_grp_alloc_input *)bp->hwrm_addr_req;
1061 resp = (struct hwrm_ring_grp_alloc_output *)bp->hwrm_addr_resp;
1062 hwrm_init(bp, (void *)req, (u16)HWRM_RING_GRP_ALLOC, cmd_len);
1063 req->cr = bp->cq_ring_id;
1064 req->rr = bp->rx_ring_id;
1065 req->ar = (u16)HWRM_NA_SIGNATURE;
1066 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
1067 if (rc)
1068 return STATUS_FAILURE;
1069
1070 FLAG_SET(bp->flag_hwrm, VALID_RING_GRP);
1071 bp->ring_grp_id = (u16)resp->ring_group_id;
1072
1073 return STATUS_SUCCESS;
1074}
1075
1076int bnxt_hwrm_ring_free(struct bnxt *bp, u16 ring_id, u8 ring_type)
1077{
1078 u16 cmd_len = (u16)sizeof(struct hwrm_ring_free_input);
1079 struct hwrm_ring_free_input *req;
1080
1081 req = (struct hwrm_ring_free_input *)bp->hwrm_addr_req;
1082 hwrm_init(bp, (void *)req, (u16)HWRM_RING_FREE, cmd_len);
1083 req->ring_type = ring_type;
1084 req->ring_id = ring_id;
1085
1086 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
1087}
1088
1089static int bnxt_hwrm_ring_alloc(struct bnxt *bp,
1090 dma_addr_t ring_map,
1091 u16 length,
1092 u16 ring_id,
1093 u8 ring_type,
1094 u8 int_mode)
1095{
1096 u16 cmd_len = (u16)sizeof(struct hwrm_ring_alloc_input);
1097 struct hwrm_ring_alloc_input *req;
1098 struct hwrm_ring_alloc_output *resp;
1099 int rc;
1100
1101 req = (struct hwrm_ring_alloc_input *)bp->hwrm_addr_req;
1102 resp = (struct hwrm_ring_alloc_output *)bp->hwrm_addr_resp;
1103 hwrm_init(bp, (void *)req, (u16)HWRM_RING_ALLOC, cmd_len);
1104 req->ring_type = ring_type;
1105 req->page_tbl_addr = ring_map;
1106 req->page_size = LM_PAGE_SIZE;
1107 req->length = (u32)length;
1108 req->cmpl_ring_id = ring_id;
1109 req->int_mode = int_mode;
1110 if (ring_type == RING_ALLOC_REQ_RING_TYPE_TX) {
1111 req->queue_id = TX_RING_QID;
1112 } else if (ring_type == RING_ALLOC_REQ_RING_TYPE_RX) {
1113 req->queue_id = RX_RING_QID;
1114 req->enables = RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID;
1115 req->rx_buf_size = MAX_ETHERNET_PACKET_BUFFER_SIZE;
1116 }
1117
1118 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
1119 if (rc)
1120 return STATUS_FAILURE;
1121
1122 if (ring_type == RING_ALLOC_REQ_RING_TYPE_L2_CMPL) {
1123 FLAG_SET(bp->flag_hwrm, VALID_RING_CQ);
1124 bp->cq_ring_id = resp->ring_id;
1125 } else if (ring_type == RING_ALLOC_REQ_RING_TYPE_TX) {
1126 FLAG_SET(bp->flag_hwrm, VALID_RING_TX);
1127 bp->tx_ring_id = resp->ring_id;
1128 } else if (ring_type == RING_ALLOC_REQ_RING_TYPE_RX) {
1129 FLAG_SET(bp->flag_hwrm, VALID_RING_RX);
1130 bp->rx_ring_id = resp->ring_id;
1131 }
1132
1133 return STATUS_SUCCESS;
1134}
1135
1136static int bnxt_hwrm_ring_alloc_cq(struct bnxt *bp)
1137{
1138 return bnxt_hwrm_ring_alloc(bp,
1139 virt_to_bus(bp->cq.bd_virt),
1140 bp->cq.ring_cnt,
1141 0,
1142 RING_ALLOC_REQ_RING_TYPE_L2_CMPL,
1143 BNXT_CQ_INTR_MODE());
1144}
1145
1146static int bnxt_hwrm_ring_alloc_tx(struct bnxt *bp)
1147{
1148 return bnxt_hwrm_ring_alloc(bp,
1149 virt_to_bus(bp->tx.bd_virt),
1150 bp->tx.ring_cnt, bp->cq_ring_id,
1151 RING_ALLOC_REQ_RING_TYPE_TX,
1152 BNXT_INTR_MODE());
1153}
1154
1155static int bnxt_hwrm_ring_alloc_rx(struct bnxt *bp)
1156{
1157 return bnxt_hwrm_ring_alloc(bp,
1158 virt_to_bus(bp->rx.bd_virt),
1159 bp->rx.ring_cnt,
1160 bp->cq_ring_id,
1161 RING_ALLOC_REQ_RING_TYPE_RX,
1162 BNXT_INTR_MODE());
1163}
1164
1165static int bnxt_hwrm_ring_free_cq(struct bnxt *bp)
1166{
1167 int ret = STATUS_SUCCESS;
1168
1169 if (!(FLAG_TEST(bp->flag_hwrm, VALID_RING_CQ)))
1170 return ret;
1171
1172 ret = RING_FREE(bp, bp->cq_ring_id, RING_FREE_REQ_RING_TYPE_L2_CMPL);
1173 if (ret == STATUS_SUCCESS)
1174 FLAG_RESET(bp->flag_hwrm, VALID_RING_CQ);
1175
1176 return ret;
1177}
1178
1179static int bnxt_hwrm_ring_free_tx(struct bnxt *bp)
1180{
1181 int ret = STATUS_SUCCESS;
1182
1183 if (!(FLAG_TEST(bp->flag_hwrm, VALID_RING_TX)))
1184 return ret;
1185
1186 ret = RING_FREE(bp, bp->tx_ring_id, RING_FREE_REQ_RING_TYPE_TX);
1187 if (ret == STATUS_SUCCESS)
1188 FLAG_RESET(bp->flag_hwrm, VALID_RING_TX);
1189
1190 return ret;
1191}
1192
1193static int bnxt_hwrm_ring_free_rx(struct bnxt *bp)
1194{
1195 int ret = STATUS_SUCCESS;
1196
1197 if (!(FLAG_TEST(bp->flag_hwrm, VALID_RING_RX)))
1198 return ret;
1199
1200 ret = RING_FREE(bp, bp->rx_ring_id, RING_FREE_REQ_RING_TYPE_RX);
1201 if (ret == STATUS_SUCCESS)
1202 FLAG_RESET(bp->flag_hwrm, VALID_RING_RX);
1203
1204 return ret;
1205}
1206
1207static int bnxt_hwrm_vnic_alloc(struct bnxt *bp)
1208{
1209 u16 cmd_len = (u16)sizeof(struct hwrm_vnic_alloc_input);
1210 struct hwrm_vnic_alloc_input *req;
1211 struct hwrm_vnic_alloc_output *resp;
1212 int rc;
1213
1214 req = (struct hwrm_vnic_alloc_input *)bp->hwrm_addr_req;
1215 resp = (struct hwrm_vnic_alloc_output *)bp->hwrm_addr_resp;
1216 hwrm_init(bp, (void *)req, (u16)HWRM_VNIC_ALLOC, cmd_len);
1217 req->flags = VNIC_ALLOC_REQ_FLAGS_DEFAULT;
1218 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
1219 if (rc)
1220 return STATUS_FAILURE;
1221
1222 FLAG_SET(bp->flag_hwrm, VALID_VNIC_ID);
1223 bp->vnic_id = resp->vnic_id;
1224
1225 return STATUS_SUCCESS;
1226}
1227
1228static int bnxt_hwrm_vnic_free(struct bnxt *bp)
1229{
1230 u16 cmd_len = (u16)sizeof(struct hwrm_vnic_free_input);
1231 struct hwrm_vnic_free_input *req;
1232 int rc;
1233
1234 if (!(FLAG_TEST(bp->flag_hwrm, VALID_VNIC_ID)))
1235 return STATUS_SUCCESS;
1236
1237 req = (struct hwrm_vnic_free_input *)bp->hwrm_addr_req;
1238 hwrm_init(bp, (void *)req, (u16)HWRM_VNIC_FREE, cmd_len);
1239 req->vnic_id = bp->vnic_id;
1240 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
1241 if (rc)
1242 return STATUS_FAILURE;
1243
1244 FLAG_RESET(bp->flag_hwrm, VALID_VNIC_ID);
1245
1246 return STATUS_SUCCESS;
1247}
1248
1249static int bnxt_hwrm_vnic_cfg(struct bnxt *bp)
1250{
1251 u16 cmd_len = (u16)sizeof(struct hwrm_vnic_cfg_input);
1252 struct hwrm_vnic_cfg_input *req;
1253
1254 req = (struct hwrm_vnic_cfg_input *)bp->hwrm_addr_req;
1255 hwrm_init(bp, (void *)req, (u16)HWRM_VNIC_CFG, cmd_len);
1256 req->enables = VNIC_CFG_REQ_ENABLES_MRU;
1257 req->mru = bp->mtu;
1258 req->enables |= VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP;
1259 req->dflt_ring_grp = bp->ring_grp_id;
1260 req->vnic_id = bp->vnic_id;
1261
1262 return wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
1263}
1264
1265static int set_phy_speed(struct bnxt *bp)
1266{
1267 char name[20];
1268 u16 flag = PHY_STATUS | PHY_SPEED | DETECT_MEDIA;
1269
1270 /* Query Link Status */
1271 if (bnxt_hwrm_port_phy_qcfg(bp, flag) != STATUS_SUCCESS)
1272 return STATUS_FAILURE;
1273
1274 switch (bp->current_link_speed) {
1275 case PORT_PHY_QCFG_RESP_LINK_SPEED_100GB:
1276 sprintf(name, "%s %s", str_100, str_gbps);
1277 break;
1278 case PORT_PHY_QCFG_RESP_LINK_SPEED_50GB:
1279 sprintf(name, "%s %s", str_50, str_gbps);
1280 break;
1281 case PORT_PHY_QCFG_RESP_LINK_SPEED_40GB:
1282 sprintf(name, "%s %s", str_40, str_gbps);
1283 break;
1284 case PORT_PHY_QCFG_RESP_LINK_SPEED_25GB:
1285 sprintf(name, "%s %s", str_25, str_gbps);
1286 break;
1287 case PORT_PHY_QCFG_RESP_LINK_SPEED_20GB:
1288 sprintf(name, "%s %s", str_20, str_gbps);
1289 break;
1290 case PORT_PHY_QCFG_RESP_LINK_SPEED_10GB:
1291 sprintf(name, "%s %s", str_10, str_gbps);
1292 break;
1293 case PORT_PHY_QCFG_RESP_LINK_SPEED_2_5GB:
1294 sprintf(name, "%s %s", str_2_5, str_gbps);
1295 break;
1296 case PORT_PHY_QCFG_RESP_LINK_SPEED_2GB:
1297 sprintf(name, "%s %s", str_2, str_gbps);
1298 break;
1299 case PORT_PHY_QCFG_RESP_LINK_SPEED_1GB:
1300 sprintf(name, "%s %s", str_1, str_gbps);
1301 break;
1302 case PORT_PHY_QCFG_RESP_LINK_SPEED_100MB:
1303 sprintf(name, "%s %s", str_100, str_mbps);
1304 break;
1305 case PORT_PHY_QCFG_RESP_LINK_SPEED_10MB:
1306 sprintf(name, "%s %s", str_10, str_mbps);
1307 break;
1308 default:
1309 sprintf(name, "%s %x", str_unknown, bp->current_link_speed);
1310 }
1311
1312 dbg_phy_speed(bp, name);
1313
1314 return STATUS_SUCCESS;
1315}
1316
1317static int set_phy_link(struct bnxt *bp, u32 tmo)
1318{
1319 int ret;
1320
1321 set_phy_speed(bp);
1322 dbg_link_status(bp);
1323 ret = STATUS_FAILURE;
1324 if (bp->link_status == STATUS_LINK_ACTIVE) {
1325 dbg_link_state(bp, tmo);
1326 ret = STATUS_SUCCESS;
1327 }
1328
1329 return ret;
1330}
1331
1332static int get_phy_link(struct bnxt *bp)
1333{
1334 u16 flag = PHY_STATUS | PHY_SPEED | DETECT_MEDIA;
1335
1336 dbg_chip_info(bp);
1337 /* Query Link Status */
1338 if (bnxt_hwrm_port_phy_qcfg(bp, flag) != STATUS_SUCCESS)
1339 return STATUS_FAILURE;
1340
1341 set_phy_link(bp, 100);
1342
1343 return STATUS_SUCCESS;
1344}
1345
1346static int bnxt_hwrm_set_async_event(struct bnxt *bp)
1347{
1348 int rc;
1349 u16 cmd_len = (u16)sizeof(struct hwrm_func_cfg_input);
1350 struct hwrm_func_cfg_input *req;
1351
1352 req = (struct hwrm_func_cfg_input *)bp->hwrm_addr_req;
1353 hwrm_init(bp, (void *)req, (u16)HWRM_FUNC_CFG, cmd_len);
1354 req->fid = (u16)HWRM_NA_SIGNATURE;
1355 req->enables = FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR;
1356 req->async_event_cr = bp->cq_ring_id;
1357 rc = wait_resp(bp, bp->hwrm_cmd_timeout, cmd_len, __func__);
1358
1359 return rc;
1360}
1361
1362int bnxt_hwrm_get_nvmem(struct bnxt *bp,
1363 u16 data_len,
1364 u16 option_num,
1365 u16 dimensions,
1366 u16 index_0)
1367{
1368 u16 cmd_len = (u16)sizeof(struct hwrm_nvm_get_variable_input);
1369 struct hwrm_nvm_get_variable_input *req;
1370
1371 req = (struct hwrm_nvm_get_variable_input *)bp->hwrm_addr_req;
1372 hwrm_init(bp, (void *)req, (u16)HWRM_NVM_GET_VARIABLE, cmd_len);
1373 req->dest_data_addr = bp->data_addr_mapping;
1374 req->data_len = data_len;
1375 req->option_num = option_num;
1376 req->dimensions = dimensions;
1377 req->index_0 = index_0;
1378
1379 return wait_resp(bp,
1380 HWRM_CMD_FLASH_MULTIPLAYER(bp->hwrm_cmd_timeout),
1381 cmd_len,
1382 __func__);
1383}
1384
1385static void set_medium(struct bnxt *bp)
1386{
1387 switch (bp->link_set & LINK_SPEED_DRV_MASK) {
1388 case LINK_SPEED_DRV_1G:
1389 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_1000MBPS);
1390 break;
1391 case LINK_SPEED_DRV_2_5G:
1392 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_2500MBPS);
1393 break;
1394 case LINK_SPEED_DRV_10G:
1395 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_10GBPS);
1396 break;
1397 case LINK_SPEED_DRV_25G:
1398 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_25GBPS);
1399 break;
1400 case LINK_SPEED_DRV_40G:
1401 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_40GBPS);
1402 break;
1403 case LINK_SPEED_DRV_50G:
1404 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_50GBPS);
1405 break;
1406 case LINK_SPEED_DRV_100G:
1407 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_100GBPS);
1408 break;
1409 case LINK_SPEED_DRV_200G:
1410 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_200GBPS);
1411 break;
1412 case LINK_SPEED_DRV_AUTONEG:
1413 bp->medium = SET_MEDIUM_SPEED(bp, MEDIUM_SPEED_AUTONEG);
1414 break;
1415 default:
1416 bp->medium = SET_MEDIUM_DUPLEX(bp, MEDIUM_FULL_DUPLEX);
1417 break;
1418 }
1419}
1420
1421static int bnxt_hwrm_get_link_speed(struct bnxt *bp)
1422{
1423 u32 *ptr32 = (u32 *)bp->hwrm_addr_data;
1424
1425 if (bnxt_hwrm_get_nvmem(bp,
1426 4,
1427 (u16)LINK_SPEED_DRV_NUM,
1428 1,
1429 (u16)bp->port_idx) != STATUS_SUCCESS)
1430 return STATUS_FAILURE;
1431
1432 bp->link_set = *ptr32;
1433 bp->link_set &= SPEED_DRV_MASK;
1434 set_medium(bp);
1435
1436 return STATUS_SUCCESS;
1437}
1438
1439typedef int (*hwrm_func_t)(struct bnxt *bp);
1440
1441hwrm_func_t down_chip[] = {
1442 bnxt_hwrm_cfa_l2_filter_free, /* Free l2 filter */
1443 bnxt_free_rx_iob, /* Free rx iob */
1444 bnxt_hwrm_vnic_free, /* Free vnic */
1445 bnxt_hwrm_ring_free_grp, /* Free ring group */
1446 bnxt_hwrm_ring_free_rx, /* Free rx ring */
1447 bnxt_hwrm_ring_free_tx, /* Free tx ring */
1448 bnxt_hwrm_ring_free_cq, /* Free CQ ring */
1449 bnxt_hwrm_stat_ctx_free, /* Free Stat ctx */
1450 bnxt_hwrm_func_drv_unrgtr, /* unreg driver */
1451 NULL,
1452};
1453
1454hwrm_func_t bring_chip[] = {
1455 bnxt_hwrm_ver_get, /* HWRM_VER_GET */
1456 bnxt_hwrm_func_reset_req, /* HWRM_FUNC_RESET */
1457 bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */
1458 bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */
1459 bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */
1460 bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */
1461 bnxt_hwrm_get_link_speed, /* HWRM_NVM_GET_VARIABLE - 203 */
1462 bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */
1463 bnxt_qphy_link, /* HWRM_PORT_PHY_QCFG */
1464 bnxt_hwrm_func_cfg_req, /* HWRM_FUNC_CFG - ring resource*/
1465 bnxt_hwrm_stat_ctx_alloc, /* Allocate Stat Ctx ID */
1466 bnxt_hwrm_ring_alloc_cq, /* Allocate CQ Ring */
1467 bnxt_hwrm_ring_alloc_tx, /* Allocate Tx ring */
1468 bnxt_hwrm_ring_alloc_rx, /* Allocate Rx Ring */
1469 bnxt_hwrm_ring_alloc_grp, /* Create Ring Group */
1470 post_rx_buffers, /* Post RX buffers */
1471 bnxt_hwrm_set_async_event, /* ENABLES_ASYNC_EVENT_CR */
1472 bnxt_hwrm_vnic_alloc, /* Alloc VNIC */
1473 bnxt_hwrm_vnic_cfg, /* Config VNIC */
1474 bnxt_hwrm_cfa_l2_filter_alloc, /* Alloc L2 Filter */
1475 get_phy_link, /* Get Physical Link */
1476 NULL,
1477};
1478
1479int bnxt_hwrm_run(hwrm_func_t cmds[], struct bnxt *bp, int flag)
1480{
1481 hwrm_func_t *ptr;
1482 int ret;
1483 int status = STATUS_SUCCESS;
1484
1485 for (ptr = cmds; *ptr; ++ptr) {
1486 ret = (*ptr)(bp);
1487 if (ret) {
1488 status = STATUS_FAILURE;
1489 /* Continue till all cleanup routines are called */
1490 if (flag)
1491 return STATUS_FAILURE;
1492 }
1493 }
1494
1495 return status;
1496}
1497
1498/* Broadcom ethernet driver Network interface APIs. */
1499static int bnxt_start(struct udevice *dev)
1500{
1501 struct bnxt *bp = dev_get_priv(dev);
1502
1503 if (bnxt_hwrm_set_rx_mask(bp, RX_MASK) != STATUS_SUCCESS)
1504 return STATUS_FAILURE;
1505
1506 bp->card_en = true;
1507 return STATUS_SUCCESS;
1508}
1509
1510static int bnxt_send(struct udevice *dev, void *packet, int length)
1511{
1512 struct bnxt *bp = dev_get_priv(dev);
1513 int len;
1514 u16 entry;
1515 dma_addr_t mapping;
1516
1517 if (bnxt_tx_avail(bp) < 1) {
1518 dbg_no_tx_bd();
1519 return -ENOBUFS;
1520 }
1521
1522 entry = bp->tx.prod_id;
1523 len = iob_pad(packet, length);
1524 mapping = virt_to_bus(packet);
1525 set_txq(bp, entry, mapping, len);
1526 entry = NEXT_IDX(entry, bp->tx.ring_cnt);
1527 dump_tx_pkt(packet, mapping, len);
1528 bnxt_db_tx(bp, (u32)entry);
1529 bp->tx.prod_id = entry;
1530 bp->tx.cnt_req++;
1531 bnxt_tx_complete(bp);
1532
1533 return 0;
1534}
1535
1536static void bnxt_link_evt(struct bnxt *bp, struct cmpl_base *cmp)
1537{
1538 struct hwrm_async_event_cmpl *evt;
1539
1540 evt = (struct hwrm_async_event_cmpl *)cmp;
1541 switch (evt->event_id) {
1542 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1543 if (evt->event_data1 & 0x01)
1544 bp->link_status = STATUS_LINK_ACTIVE;
1545 else
1546 bp->link_status = STATUS_LINK_DOWN;
1547
1548 set_phy_link(bp, 0);
1549 break;
1550 default:
1551 break;
1552 }
1553}
1554
1555static int bnxt_recv(struct udevice *dev, int flags, uchar **packetp)
1556{
1557 struct bnxt *bp = dev_get_priv(dev);
1558 struct cmpl_base *cmp;
1559 u16 old_cons_idx = bp->cq.cons_idx;
1560 int done = SERVICE_NEXT_CQ_BD;
1561 u32 cq_type;
1562
1563 while (done == SERVICE_NEXT_CQ_BD) {
1564 cmp = (struct cmpl_base *)BD_NOW(bp->cq.bd_virt,
1565 bp->cq.cons_idx,
1566 sizeof(struct cmpl_base));
1567 if ((cmp->info3_v & CMPL_BASE_V) ^ bp->cq.completion_bit)
1568 break;
1569
1570 cq_type = cmp->type & CMPL_BASE_TYPE_MASK;
1571 dump_evt((u8 *)cmp, cq_type, bp->cq.cons_idx);
1572 dump_CQ(cmp, bp->cq.cons_idx);
1573 switch (cq_type) {
1574 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1575 bnxt_link_evt(bp, cmp);
1576 fallthrough;
1577 case CMPL_BASE_TYPE_TX_L2:
1578 case CMPL_BASE_TYPE_STAT_EJECT:
1579 bnxt_adv_cq_index(bp, 1);
1580 break;
1581 case CMPL_BASE_TYPE_RX_L2:
1582 done = bnxt_rx_complete(bp, (struct rx_pkt_cmpl *)cmp);
1583 break;
1584 default:
1585 done = NO_MORE_CQ_BD_TO_SERVICE;
1586 break;
1587 }
1588 }
1589
1590 if (bp->cq.cons_idx != old_cons_idx)
1591 bnxt_db_cq(bp);
1592
1593 if (bp->rx.iob_recv == PKT_RECEIVED) {
1594 *packetp = bp->rx.iob_rx;
1595 return bp->rx.iob_len;
1596 }
1597
1598 return -EAGAIN;
1599}
1600
1601static void bnxt_stop(struct udevice *dev)
1602{
1603 struct bnxt *bp = dev_get_priv(dev);
1604
1605 if (bp->card_en) {
1606 bnxt_hwrm_set_rx_mask(bp, 0);
1607 bp->card_en = false;
1608 }
1609}
1610
1611static int bnxt_free_pkt(struct udevice *dev, uchar *packet, int length)
1612{
1613 struct bnxt *bp = dev_get_priv(dev);
1614
1615 dbg_rx_pkt(bp, __func__, packet, length);
1616 bp->rx.iob_recv = PKT_DONE;
1617 bp->rx.iob_len = 0;
1618 bp->rx.iob_rx = NULL;
1619
1620 return 0;
1621}
1622
1623static int bnxt_read_rom_hwaddr(struct udevice *dev)
1624{
1625 struct eth_pdata *plat = dev_get_plat(dev);
1626 struct bnxt *bp = dev_get_priv(dev);
1627
1628 memcpy(plat->enetaddr, bp->mac_set, ETH_ALEN);
1629
1630 return 0;
1631}
1632
1633static const struct eth_ops bnxt_eth_ops = {
1634 .start = bnxt_start,
1635 .send = bnxt_send,
1636 .recv = bnxt_recv,
1637 .stop = bnxt_stop,
1638 .free_pkt = bnxt_free_pkt,
1639 .read_rom_hwaddr = bnxt_read_rom_hwaddr,
1640};
1641
1642static const struct udevice_id bnxt_eth_ids[] = {
1643 { .compatible = "broadcom,nxe" },
1644 { }
1645};
1646
1647static int bnxt_eth_bind(struct udevice *dev)
1648{
1649 char name[20];
1650
1651 sprintf(name, "bnxt_eth%u", dev_seq(dev));
1652
1653 return device_set_name(dev, name);
1654}
1655
1656static int bnxt_eth_probe(struct udevice *dev)
1657{
1658 struct bnxt *bp = dev_get_priv(dev);
1659 int ret;
1660
1661 ret = bnxt_alloc_mem(bp);
1662 if (ret) {
1663 printf("*** error: bnxt_alloc_mem failed! ***\n");
1664 return ret;
1665 }
1666
1667 bp->cardnum = dev_seq(dev);
1668 bp->name = dev->name;
1669 bp->pdev = (struct udevice *)dev;
1670
1671 bnxt_bring_pci(bp);
1672
1673 ret = bnxt_bring_chip(bp);
1674 if (ret) {
1675 printf("*** error: bnxt_bring_chip failed! ***\n");
1676 return -ENODATA;
1677 }
1678
1679 return 0;
1680}
1681
1682static int bnxt_eth_remove(struct udevice *dev)
1683{
1684 struct bnxt *bp = dev_get_priv(dev);
1685
1686 bnxt_down_chip(bp);
1687 bnxt_free_mem(bp);
1688
1689 return 0;
1690}
1691
1692static struct pci_device_id bnxt_nics[] = {
1693 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NXT_57320)},
1694 {}
1695};
1696
1697U_BOOT_DRIVER(eth_bnxt) = {
1698 .name = "eth_bnxt",
1699 .id = UCLASS_ETH,
1700 .of_match = bnxt_eth_ids,
1701 .bind = bnxt_eth_bind,
1702 .probe = bnxt_eth_probe,
1703 .remove = bnxt_eth_remove,
1704 .ops = &bnxt_eth_ops,
1705 .priv_auto = sizeof(struct bnxt),
1706 .plat_auto = sizeof(struct eth_pdata),
1707 .flags = DM_FLAG_ACTIVE_DMA,
1708};
1709
1710U_BOOT_PCI_DEVICE(eth_bnxt, bnxt_nics);