blob: ccec57edf844dc10e058e59c1b6fcfbab4c60f5d [file] [log] [blame]
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001// SPDX-License-Identifier: GPL-2.0
2/*
Stefan Roese790978b2022-04-07 09:11:44 +02003 * Copyright (C) 2020-2022 Marvell International Ltd.
Aaron Williamsa7f479c2020-12-11 17:06:02 +01004 *
5 * Helper functions for common, but complicated tasks.
6 */
7
8#include <log.h>
9#include <linux/delay.h>
10
11#include <mach/cvmx-regs.h>
12#include <mach/cvmx-csr.h>
13#include <mach/cvmx-bootmem.h>
14#include <mach/octeon-model.h>
15#include <mach/cvmx-fuse.h>
16#include <mach/octeon-feature.h>
17#include <mach/cvmx-qlm.h>
18#include <mach/octeon_qlm.h>
19#include <mach/cvmx-pcie.h>
20#include <mach/cvmx-coremask.h>
21
22#include <mach/cvmx-agl-defs.h>
23#include <mach/cvmx-asxx-defs.h>
24#include <mach/cvmx-bgxx-defs.h>
25#include <mach/cvmx-dbg-defs.h>
26#include <mach/cvmx-gmxx-defs.h>
27#include <mach/cvmx-gserx-defs.h>
28#include <mach/cvmx-ipd-defs.h>
29#include <mach/cvmx-l2c-defs.h>
30#include <mach/cvmx-npi-defs.h>
31#include <mach/cvmx-pcsx-defs.h>
32#include <mach/cvmx-pexp-defs.h>
33#include <mach/cvmx-pki-defs.h>
34#include <mach/cvmx-pko-defs.h>
35#include <mach/cvmx-smix-defs.h>
36#include <mach/cvmx-sriox-defs.h>
37#include <mach/cvmx-helper.h>
38#include <mach/cvmx-helper-board.h>
39#include <mach/cvmx-helper-fdt.h>
40#include <mach/cvmx-helper-bgx.h>
41#include <mach/cvmx-helper-cfg.h>
42#include <mach/cvmx-helper-ipd.h>
43#include <mach/cvmx-helper-util.h>
44#include <mach/cvmx-helper-pki.h>
45#include <mach/cvmx-helper-pko.h>
46#include <mach/cvmx-helper-pko3.h>
47#include <mach/cvmx-global-resources.h>
48#include <mach/cvmx-pko-internal-ports-range.h>
49#include <mach/cvmx-pko3-queue.h>
50#include <mach/cvmx-gmx.h>
51#include <mach/cvmx-hwpko.h>
52#include <mach/cvmx-ilk.h>
53#include <mach/cvmx-ipd.h>
54#include <mach/cvmx-pip.h>
55
56/**
57 * @INTERNAL
58 * This structure specifies the interface methods used by an interface.
59 *
60 * @param mode Interface mode.
61 *
62 * @param enumerate Method the get number of interface ports.
63 *
64 * @param probe Method to probe an interface to get the number of
65 * connected ports.
66 *
67 * @param enable Method to enable an interface
68 *
69 * @param link_get Method to get the state of an interface link.
70 *
71 * @param link_set Method to configure an interface link to the specified
72 * state.
73 *
74 * @param loopback Method to configure a port in loopback.
75 */
76struct iface_ops {
77 cvmx_helper_interface_mode_t mode;
78 int (*enumerate)(int xiface);
79 int (*probe)(int xiface);
80 int (*enable)(int xiface);
81 cvmx_helper_link_info_t (*link_get)(int ipd_port);
82 int (*link_set)(int ipd_port, cvmx_helper_link_info_t link_info);
83 int (*loopback)(int ipd_port, int en_in, int en_ex);
84};
85
86/**
87 * @INTERNAL
88 * This structure is used by disabled interfaces.
89 */
90static const struct iface_ops iface_ops_dis = {
91 .mode = CVMX_HELPER_INTERFACE_MODE_DISABLED,
92};
93
94/**
95 * @INTERNAL
96 * This structure specifies the interface methods used by interfaces
97 * configured as gmii.
98 */
99static const struct iface_ops iface_ops_gmii = {
100 .mode = CVMX_HELPER_INTERFACE_MODE_GMII,
101 .enumerate = __cvmx_helper_rgmii_probe,
102 .probe = __cvmx_helper_rgmii_probe,
103 .enable = __cvmx_helper_rgmii_enable,
104 .link_get = __cvmx_helper_gmii_link_get,
105 .link_set = __cvmx_helper_rgmii_link_set,
106 .loopback = __cvmx_helper_rgmii_configure_loopback,
107};
108
109/**
110 * @INTERNAL
111 * This structure specifies the interface methods used by interfaces
112 * configured as rgmii.
113 */
114static const struct iface_ops iface_ops_rgmii = {
115 .mode = CVMX_HELPER_INTERFACE_MODE_RGMII,
116 .enumerate = __cvmx_helper_rgmii_probe,
117 .probe = __cvmx_helper_rgmii_probe,
118 .enable = __cvmx_helper_rgmii_enable,
119 .link_get = __cvmx_helper_rgmii_link_get,
120 .link_set = __cvmx_helper_rgmii_link_set,
121 .loopback = __cvmx_helper_rgmii_configure_loopback,
122};
123
124/**
125 * @INTERNAL
126 * This structure specifies the interface methods used by interfaces
127 * configured as sgmii that use the gmx mac.
128 */
129static const struct iface_ops iface_ops_sgmii = {
130 .mode = CVMX_HELPER_INTERFACE_MODE_SGMII,
131 .enumerate = __cvmx_helper_sgmii_enumerate,
132 .probe = __cvmx_helper_sgmii_probe,
133 .enable = __cvmx_helper_sgmii_enable,
134 .link_get = __cvmx_helper_sgmii_link_get,
135 .link_set = __cvmx_helper_sgmii_link_set,
136 .loopback = __cvmx_helper_sgmii_configure_loopback,
137};
138
139/**
140 * @INTERNAL
141 * This structure specifies the interface methods used by interfaces
142 * configured as sgmii that use the bgx mac.
143 */
144static const struct iface_ops iface_ops_bgx_sgmii = {
145 .mode = CVMX_HELPER_INTERFACE_MODE_SGMII,
146 .enumerate = __cvmx_helper_bgx_enumerate,
147 .probe = __cvmx_helper_bgx_probe,
148 .enable = __cvmx_helper_bgx_sgmii_enable,
149 .link_get = __cvmx_helper_bgx_sgmii_link_get,
150 .link_set = __cvmx_helper_bgx_sgmii_link_set,
151 .loopback = __cvmx_helper_bgx_sgmii_configure_loopback,
152};
153
154/**
155 * @INTERNAL
156 * This structure specifies the interface methods used by interfaces
157 * configured as qsgmii.
158 */
159static const struct iface_ops iface_ops_qsgmii = {
160 .mode = CVMX_HELPER_INTERFACE_MODE_QSGMII,
161 .enumerate = __cvmx_helper_sgmii_enumerate,
162 .probe = __cvmx_helper_sgmii_probe,
163 .enable = __cvmx_helper_sgmii_enable,
164 .link_get = __cvmx_helper_sgmii_link_get,
165 .link_set = __cvmx_helper_sgmii_link_set,
166 .loopback = __cvmx_helper_sgmii_configure_loopback,
167};
168
169/**
170 * @INTERNAL
171 * This structure specifies the interface methods used by interfaces
172 * configured as xaui using the gmx mac.
173 */
174static const struct iface_ops iface_ops_xaui = {
175 .mode = CVMX_HELPER_INTERFACE_MODE_XAUI,
176 .enumerate = __cvmx_helper_xaui_enumerate,
177 .probe = __cvmx_helper_xaui_probe,
178 .enable = __cvmx_helper_xaui_enable,
179 .link_get = __cvmx_helper_xaui_link_get,
180 .link_set = __cvmx_helper_xaui_link_set,
181 .loopback = __cvmx_helper_xaui_configure_loopback,
182};
183
184/**
185 * @INTERNAL
186 * This structure specifies the interface methods used by interfaces
187 * configured as xaui using the gmx mac.
188 */
189static const struct iface_ops iface_ops_bgx_xaui = {
190 .mode = CVMX_HELPER_INTERFACE_MODE_XAUI,
191 .enumerate = __cvmx_helper_bgx_enumerate,
192 .probe = __cvmx_helper_bgx_probe,
193 .enable = __cvmx_helper_bgx_xaui_enable,
194 .link_get = __cvmx_helper_bgx_xaui_link_get,
195 .link_set = __cvmx_helper_bgx_xaui_link_set,
196 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
197};
198
199/**
200 * @INTERNAL
201 * This structure specifies the interface methods used by interfaces
202 * configured as rxaui.
203 */
204static const struct iface_ops iface_ops_rxaui = {
205 .mode = CVMX_HELPER_INTERFACE_MODE_RXAUI,
206 .enumerate = __cvmx_helper_xaui_enumerate,
207 .probe = __cvmx_helper_xaui_probe,
208 .enable = __cvmx_helper_xaui_enable,
209 .link_get = __cvmx_helper_xaui_link_get,
210 .link_set = __cvmx_helper_xaui_link_set,
211 .loopback = __cvmx_helper_xaui_configure_loopback,
212};
213
214/**
215 * @INTERNAL
216 * This structure specifies the interface methods used by interfaces
217 * configured as xaui using the gmx mac.
218 */
219static const struct iface_ops iface_ops_bgx_rxaui = {
220 .mode = CVMX_HELPER_INTERFACE_MODE_RXAUI,
221 .enumerate = __cvmx_helper_bgx_enumerate,
222 .probe = __cvmx_helper_bgx_probe,
223 .enable = __cvmx_helper_bgx_xaui_enable,
224 .link_get = __cvmx_helper_bgx_xaui_link_get,
225 .link_set = __cvmx_helper_bgx_xaui_link_set,
226 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
227};
228
229/**
230 * @INTERNAL
231 * This structure specifies the interface methods used by interfaces
232 * configured as xlaui.
233 */
234static const struct iface_ops iface_ops_bgx_xlaui = {
235 .mode = CVMX_HELPER_INTERFACE_MODE_XLAUI,
236 .enumerate = __cvmx_helper_bgx_enumerate,
237 .probe = __cvmx_helper_bgx_probe,
238 .enable = __cvmx_helper_bgx_xaui_enable,
239 .link_get = __cvmx_helper_bgx_xaui_link_get,
240 .link_set = __cvmx_helper_bgx_xaui_link_set,
241 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
242};
243
244/**
245 * @INTERNAL
246 * This structure specifies the interface methods used by interfaces
247 * configured as xfi.
248 */
249static const struct iface_ops iface_ops_bgx_xfi = {
250 .mode = CVMX_HELPER_INTERFACE_MODE_XFI,
251 .enumerate = __cvmx_helper_bgx_enumerate,
252 .probe = __cvmx_helper_bgx_probe,
253 .enable = __cvmx_helper_bgx_xaui_enable,
254 .link_get = __cvmx_helper_bgx_xaui_link_get,
255 .link_set = __cvmx_helper_bgx_xaui_link_set,
256 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
257};
258
259static const struct iface_ops iface_ops_bgx_10G_KR = {
260 .mode = CVMX_HELPER_INTERFACE_MODE_10G_KR,
261 .enumerate = __cvmx_helper_bgx_enumerate,
262 .probe = __cvmx_helper_bgx_probe,
263 .enable = __cvmx_helper_bgx_xaui_enable,
264 .link_get = __cvmx_helper_bgx_xaui_link_get,
265 .link_set = __cvmx_helper_bgx_xaui_link_set,
266 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
267};
268
269static const struct iface_ops iface_ops_bgx_40G_KR4 = {
270 .mode = CVMX_HELPER_INTERFACE_MODE_40G_KR4,
271 .enumerate = __cvmx_helper_bgx_enumerate,
272 .probe = __cvmx_helper_bgx_probe,
273 .enable = __cvmx_helper_bgx_xaui_enable,
274 .link_get = __cvmx_helper_bgx_xaui_link_get,
275 .link_set = __cvmx_helper_bgx_xaui_link_set,
276 .loopback = __cvmx_helper_bgx_xaui_configure_loopback,
277};
278
279/**
280 * @INTERNAL
281 * This structure specifies the interface methods used by interfaces
282 * configured as ilk.
283 */
284static const struct iface_ops iface_ops_ilk = {
285 .mode = CVMX_HELPER_INTERFACE_MODE_ILK,
286 .enumerate = __cvmx_helper_ilk_enumerate,
287 .probe = __cvmx_helper_ilk_probe,
288 .enable = __cvmx_helper_ilk_enable,
289 .link_get = __cvmx_helper_ilk_link_get,
290 .link_set = __cvmx_helper_ilk_link_set,
291};
292
293/**
294 * @INTERNAL
295 * This structure specifies the interface methods used by interfaces
296 * configured as npi.
297 */
298static const struct iface_ops iface_ops_npi = {
299 .mode = CVMX_HELPER_INTERFACE_MODE_NPI,
300 .enumerate = __cvmx_helper_npi_probe,
301 .probe = __cvmx_helper_npi_probe,
302 .enable = __cvmx_helper_npi_enable,
303};
304
305/**
306 * @INTERNAL
307 * This structure specifies the interface methods used by interfaces
Aaron Williamsa7f479c2020-12-11 17:06:02 +0100308 * configured as agl.
309 */
310static const struct iface_ops iface_ops_agl = {
311 .mode = CVMX_HELPER_INTERFACE_MODE_AGL,
312 .enumerate = __cvmx_helper_agl_enumerate,
313 .probe = __cvmx_helper_agl_probe,
314 .enable = __cvmx_helper_agl_enable,
315 .link_get = __cvmx_helper_agl_link_get,
316 .link_set = __cvmx_helper_agl_link_set,
317};
318
319/**
320 * @INTERNAL
321 * This structure specifies the interface methods used by interfaces
322 * configured as mixed mode, some ports are sgmii and some are xfi.
323 */
324static const struct iface_ops iface_ops_bgx_mixed = {
325 .mode = CVMX_HELPER_INTERFACE_MODE_MIXED,
326 .enumerate = __cvmx_helper_bgx_enumerate,
327 .probe = __cvmx_helper_bgx_probe,
328 .enable = __cvmx_helper_bgx_mixed_enable,
329 .link_get = __cvmx_helper_bgx_mixed_link_get,
330 .link_set = __cvmx_helper_bgx_mixed_link_set,
331 .loopback = __cvmx_helper_bgx_mixed_configure_loopback,
332};
333
334/**
335 * @INTERNAL
336 * This structure specifies the interface methods used by interfaces
337 * configured as loop.
338 */
339static const struct iface_ops iface_ops_loop = {
340 .mode = CVMX_HELPER_INTERFACE_MODE_LOOP,
341 .enumerate = __cvmx_helper_loop_enumerate,
342 .probe = __cvmx_helper_loop_probe,
343};
344
345const struct iface_ops *iface_node_ops[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE];
346#define iface_ops iface_node_ops[0]
347
348struct cvmx_iface {
349 int cvif_ipd_nports;
350 int cvif_has_fcs; /* PKO fcs for this interface. */
351 enum cvmx_pko_padding cvif_padding;
352 cvmx_helper_link_info_t *cvif_ipd_port_link_info;
353};
354
355/*
356 * This has to be static as u-boot expects to probe an interface and
357 * gets the number of its ports.
358 */
359static struct cvmx_iface cvmx_interfaces[CVMX_MAX_NODES][CVMX_HELPER_MAX_IFACE];
360
361int __cvmx_helper_get_num_ipd_ports(int xiface)
362{
363 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
364 struct cvmx_iface *piface;
365
366 if (xi.interface >= cvmx_helper_get_number_of_interfaces())
367 return -1;
368
369 piface = &cvmx_interfaces[xi.node][xi.interface];
370 return piface->cvif_ipd_nports;
371}
372
373enum cvmx_pko_padding __cvmx_helper_get_pko_padding(int xiface)
374{
375 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
376 struct cvmx_iface *piface;
377
378 if (xi.interface >= cvmx_helper_get_number_of_interfaces())
379 return CVMX_PKO_PADDING_NONE;
380
381 piface = &cvmx_interfaces[xi.node][xi.interface];
382 return piface->cvif_padding;
383}
384
385int __cvmx_helper_init_interface(int xiface, int num_ipd_ports, int has_fcs,
386 enum cvmx_pko_padding pad)
387{
388 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
389 struct cvmx_iface *piface;
390 cvmx_helper_link_info_t *p;
391 int i;
392 int sz;
393 u64 addr;
394 char name[32];
395
396 if (xi.interface >= cvmx_helper_get_number_of_interfaces())
397 return -1;
398
399 piface = &cvmx_interfaces[xi.node][xi.interface];
400 piface->cvif_ipd_nports = num_ipd_ports;
401 piface->cvif_padding = pad;
402
403 piface->cvif_has_fcs = has_fcs;
404
405 /*
406 * allocate the per-ipd_port link_info structure
407 */
408 sz = piface->cvif_ipd_nports * sizeof(cvmx_helper_link_info_t);
409 snprintf(name, sizeof(name), "__int_%d_link_info", xi.interface);
410 addr = CAST64(cvmx_bootmem_alloc_named_range_once(sz, 0, 0,
411 __alignof(cvmx_helper_link_info_t),
412 name, NULL));
413 piface->cvif_ipd_port_link_info =
414 (cvmx_helper_link_info_t *)__cvmx_phys_addr_to_ptr(addr, sz);
415 if (!piface->cvif_ipd_port_link_info) {
416 if (sz != 0)
417 debug("iface %d failed to alloc link info\n", xi.interface);
418 return -1;
419 }
420
421 /* Initialize them */
422 p = piface->cvif_ipd_port_link_info;
423
424 for (i = 0; i < piface->cvif_ipd_nports; i++) {
425 (*p).u64 = 0;
426 p++;
427 }
428 return 0;
429}
430
Aaron Williamsa7f479c2020-12-11 17:06:02 +0100431int __cvmx_helper_set_link_info(int xiface, int index, cvmx_helper_link_info_t link_info)
432{
433 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
434 struct cvmx_iface *piface;
435
436 if (xi.interface >= cvmx_helper_get_number_of_interfaces())
437 return -1;
438
439 piface = &cvmx_interfaces[xi.node][xi.interface];
440
441 if (piface->cvif_ipd_port_link_info) {
442 piface->cvif_ipd_port_link_info[index] = link_info;
443 return 0;
444 }
445
446 return -1;
447}
448
449cvmx_helper_link_info_t __cvmx_helper_get_link_info(int xiface, int port)
450{
451 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
452 struct cvmx_iface *piface;
453 cvmx_helper_link_info_t err;
454
455 err.u64 = 0;
456
457 if (xi.interface >= cvmx_helper_get_number_of_interfaces())
458 return err;
459 piface = &cvmx_interfaces[xi.node][xi.interface];
460
461 if (piface->cvif_ipd_port_link_info)
462 return piface->cvif_ipd_port_link_info[port];
463
464 return err;
465}
466
467/**
468 * Returns if FCS is enabled for the specified interface and port
469 *
470 * @param xiface - interface to check
471 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100472 * Return: zero if FCS is not used, otherwise FCS is used.
Aaron Williamsa7f479c2020-12-11 17:06:02 +0100473 */
474int __cvmx_helper_get_has_fcs(int xiface)
475{
476 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
477 return cvmx_interfaces[xi.node][xi.interface].cvif_has_fcs;
478}
479
480u64 cvmx_rgmii_backpressure_dis = 1;
481
482typedef int (*cvmx_export_config_t)(void);
483cvmx_export_config_t cvmx_export_app_config;
484
Aaron Williamsa7f479c2020-12-11 17:06:02 +0100485/*
486 * internal functions that are not exported in the .h file but must be
487 * declared to make gcc happy.
488 */
489extern cvmx_helper_link_info_t __cvmx_helper_get_link_info(int interface, int port);
490
491/**
Aaron Williamsa7f479c2020-12-11 17:06:02 +0100492 * cvmx_override_ipd_port_setup(int ipd_port) is a function
493 * pointer. It is meant to allow customization of the IPD
494 * port/port kind setup before packet input/output comes online.
495 * It is called after cvmx-helper does the default IPD configuration,
496 * but before IPD is enabled. Users should set this pointer to a
497 * function before calling any cvmx-helper operations.
498 */
499void (*cvmx_override_ipd_port_setup)(int ipd_port) = NULL;
500
501/**
502 * Return the number of interfaces the chip has. Each interface
503 * may have multiple ports. Most chips support two interfaces,
504 * but the CNX0XX and CNX1XX are exceptions. These only support
505 * one interface.
506 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100507 * Return: Number of interfaces on chip
Aaron Williamsa7f479c2020-12-11 17:06:02 +0100508 */
509int cvmx_helper_get_number_of_interfaces(void)
510{
511 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
512 return 9;
513 else if (OCTEON_IS_MODEL(OCTEON_CN66XX))
514 if (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0))
515 return 7;
516 else
517 return 8;
518 else if (OCTEON_IS_MODEL(OCTEON_CN63XX))
519 return 6;
520 else if (OCTEON_IS_MODEL(OCTEON_CN61XX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
521 return 4;
522 else if (OCTEON_IS_MODEL(OCTEON_CN70XX))
523 return 5;
524 else if (OCTEON_IS_MODEL(OCTEON_CN78XX))
525 return 10;
526 else if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
527 return 5;
528 else if (OCTEON_IS_MODEL(OCTEON_CN73XX))
529 return 5;
530 else
531 return 3;
532}
533
534int __cvmx_helper_early_ports_on_interface(int interface)
535{
536 int ports;
537
538 if (octeon_has_feature(OCTEON_FEATURE_PKND))
539 return cvmx_helper_interface_enumerate(interface);
540
541 ports = cvmx_helper_interface_enumerate(interface);
542 ports = __cvmx_helper_board_interface_probe(interface, ports);
543
544 return ports;
545}
546
547/**
548 * Return the number of ports on an interface. Depending on the
549 * chip and configuration, this can be 1-16. A value of 0
550 * specifies that the interface doesn't exist or isn't usable.
551 *
Stefan Roese790978b2022-04-07 09:11:44 +0200552 * @param xiface to get the port count for
Aaron Williamsa7f479c2020-12-11 17:06:02 +0100553 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100554 * Return: Number of ports on interface. Can be Zero.
Aaron Williamsa7f479c2020-12-11 17:06:02 +0100555 */
556int cvmx_helper_ports_on_interface(int xiface)
557{
558 if (octeon_has_feature(OCTEON_FEATURE_PKND))
559 return cvmx_helper_interface_enumerate(xiface);
560 else
561 return __cvmx_helper_get_num_ipd_ports(xiface);
562}
563
564/**
565 * @INTERNAL
566 * Return interface mode for CN70XX.
567 */
568static cvmx_helper_interface_mode_t __cvmx_get_mode_cn70xx(int interface)
569{
570 /* SGMII/RXAUI/QSGMII */
571 if (interface < 2) {
572 enum cvmx_qlm_mode qlm_mode =
573 cvmx_qlm_get_dlm_mode(0, interface);
574
575 if (qlm_mode == CVMX_QLM_MODE_SGMII)
576 iface_ops[interface] = &iface_ops_sgmii;
577 else if (qlm_mode == CVMX_QLM_MODE_QSGMII)
578 iface_ops[interface] = &iface_ops_qsgmii;
579 else if (qlm_mode == CVMX_QLM_MODE_RXAUI)
580 iface_ops[interface] = &iface_ops_rxaui;
581 else
582 iface_ops[interface] = &iface_ops_dis;
583 } else if (interface == 2) { /* DPI */
584 iface_ops[interface] = &iface_ops_npi;
585 } else if (interface == 3) { /* LOOP */
586 iface_ops[interface] = &iface_ops_loop;
587 } else if (interface == 4) { /* RGMII (AGL) */
588 cvmx_agl_prtx_ctl_t prtx_ctl;
589
590 prtx_ctl.u64 = csr_rd(CVMX_AGL_PRTX_CTL(0));
591 if (prtx_ctl.s.mode == 0)
592 iface_ops[interface] = &iface_ops_agl;
593 else
594 iface_ops[interface] = &iface_ops_dis;
595 } else {
596 iface_ops[interface] = &iface_ops_dis;
597 }
598
599 return iface_ops[interface]->mode;
600}
601
602/**
603 * @INTERNAL
604 * Return interface mode for CN78XX.
605 */
606static cvmx_helper_interface_mode_t __cvmx_get_mode_cn78xx(int xiface)
607{
608 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
609 /* SGMII/RXAUI/XAUI */
610 if (xi.interface < 6) {
611 int qlm = cvmx_qlm_lmac(xiface, 0);
612 enum cvmx_qlm_mode qlm_mode;
613
614 if (qlm == -1) {
615 iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
616 return iface_node_ops[xi.node][xi.interface]->mode;
617 }
618 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, qlm);
619
620 if (qlm_mode == CVMX_QLM_MODE_SGMII)
621 iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_sgmii;
622 else if (qlm_mode == CVMX_QLM_MODE_XAUI)
623 iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xaui;
624 else if (qlm_mode == CVMX_QLM_MODE_XLAUI)
625 iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xlaui;
626 else if (qlm_mode == CVMX_QLM_MODE_XFI)
627 iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_xfi;
628 else if (qlm_mode == CVMX_QLM_MODE_RXAUI)
629 iface_node_ops[xi.node][xi.interface] = &iface_ops_bgx_rxaui;
630 else
631 iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
632 } else if (xi.interface < 8) {
633 enum cvmx_qlm_mode qlm_mode;
634 int found = 0;
635 int i;
636 int intf, lane_mask;
637
638 if (xi.interface == 6) {
639 intf = 6;
640 lane_mask = cvmx_ilk_lane_mask[xi.node][0];
641 } else {
642 intf = 7;
643 lane_mask = cvmx_ilk_lane_mask[xi.node][1];
644 }
645 switch (lane_mask) {
646 default:
647 case 0x0:
648 iface_node_ops[xi.node][intf] = &iface_ops_dis;
649 break;
650 case 0xf:
651 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 4);
652 if (qlm_mode == CVMX_QLM_MODE_ILK)
653 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
654 else
655 iface_node_ops[xi.node][intf] = &iface_ops_dis;
656 break;
657 case 0xff:
658 found = 0;
659 for (i = 4; i < 6; i++) {
660 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
661 if (qlm_mode == CVMX_QLM_MODE_ILK)
662 found++;
663 }
664 if (found == 2)
665 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
666 else
667 iface_node_ops[xi.node][intf] = &iface_ops_dis;
668 break;
669 case 0xfff:
670 found = 0;
671 for (i = 4; i < 7; i++) {
672 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
673 if (qlm_mode == CVMX_QLM_MODE_ILK)
674 found++;
675 }
676 if (found == 3)
677 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
678 else
679 iface_node_ops[xi.node][intf] = &iface_ops_dis;
680 break;
681 case 0xff00:
682 found = 0;
683 for (i = 6; i < 8; i++) {
684 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
685 if (qlm_mode == CVMX_QLM_MODE_ILK)
686 found++;
687 }
688 if (found == 2)
689 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
690 else
691 iface_node_ops[xi.node][intf] = &iface_ops_dis;
692 break;
693 case 0xf0:
694 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 5);
695 if (qlm_mode == CVMX_QLM_MODE_ILK)
696 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
697 else
698 iface_node_ops[xi.node][intf] = &iface_ops_dis;
699 break;
700 case 0xf00:
701 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 6);
702 if (qlm_mode == CVMX_QLM_MODE_ILK)
703 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
704 else
705 iface_node_ops[xi.node][intf] = &iface_ops_dis;
706 break;
707 case 0xf000:
708 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, 7);
709 if (qlm_mode == CVMX_QLM_MODE_ILK)
710 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
711 else
712 iface_node_ops[xi.node][intf] = &iface_ops_dis;
713 break;
714 case 0xfff0:
715 found = 0;
716 for (i = 5; i < 8; i++) {
717 qlm_mode = cvmx_qlm_get_mode_cn78xx(xi.node, i);
718 if (qlm_mode == CVMX_QLM_MODE_ILK)
719 found++;
720 }
721 if (found == 3)
722 iface_node_ops[xi.node][intf] = &iface_ops_ilk;
723 else
724 iface_node_ops[xi.node][intf] = &iface_ops_dis;
725 break;
726 }
727 } else if (xi.interface == 8) { /* DPI */
728 int qlm = 0;
729
730 for (qlm = 0; qlm < 5; qlm++) {
731 /* if GSERX_CFG[pcie] == 1, then enable npi */
732 if (csr_rd_node(xi.node, CVMX_GSERX_CFG(qlm)) & 0x1) {
733 iface_node_ops[xi.node][xi.interface] =
734 &iface_ops_npi;
735 return iface_node_ops[xi.node][xi.interface]->mode;
736 }
737 }
738 iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
739 } else if (xi.interface == 9) { /* LOOP */
740 iface_node_ops[xi.node][xi.interface] = &iface_ops_loop;
741 } else {
742 iface_node_ops[xi.node][xi.interface] = &iface_ops_dis;
743 }
744
745 return iface_node_ops[xi.node][xi.interface]->mode;
746}
747
748/**
749 * @INTERNAL
750 * Return interface mode for CN73XX.
751 */
752static cvmx_helper_interface_mode_t __cvmx_get_mode_cn73xx(int xiface)
753{
754 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
755 int interface = xi.interface;
756
757 /* SGMII/XAUI/XLAUI/XFI */
758 if (interface < 3) {
759 int qlm = cvmx_qlm_lmac(xiface, 0);
760 enum cvmx_qlm_mode qlm_mode;
761
762 if (qlm == -1) {
763 iface_ops[interface] = &iface_ops_dis;
764 return iface_ops[interface]->mode;
765 }
766 qlm_mode = cvmx_qlm_get_mode(qlm);
767
768 switch (qlm_mode) {
769 case CVMX_QLM_MODE_SGMII:
770 case CVMX_QLM_MODE_SGMII_2X1:
771 case CVMX_QLM_MODE_RGMII_SGMII:
772 case CVMX_QLM_MODE_RGMII_SGMII_1X1:
773 iface_ops[interface] = &iface_ops_bgx_sgmii;
774 break;
775 case CVMX_QLM_MODE_XAUI:
776 case CVMX_QLM_MODE_RGMII_XAUI:
777 iface_ops[interface] = &iface_ops_bgx_xaui;
778 break;
779 case CVMX_QLM_MODE_RXAUI:
780 case CVMX_QLM_MODE_RXAUI_1X2:
781 case CVMX_QLM_MODE_RGMII_RXAUI:
782 iface_ops[interface] = &iface_ops_bgx_rxaui;
783 break;
784 case CVMX_QLM_MODE_XLAUI:
785 case CVMX_QLM_MODE_RGMII_XLAUI:
786 iface_ops[interface] = &iface_ops_bgx_xlaui;
787 break;
788 case CVMX_QLM_MODE_XFI:
789 case CVMX_QLM_MODE_XFI_1X2:
790 case CVMX_QLM_MODE_RGMII_XFI:
791 iface_ops[interface] = &iface_ops_bgx_xfi;
792 break;
793 case CVMX_QLM_MODE_10G_KR:
794 case CVMX_QLM_MODE_10G_KR_1X2:
795 case CVMX_QLM_MODE_RGMII_10G_KR:
796 iface_ops[interface] = &iface_ops_bgx_10G_KR;
797 break;
798 case CVMX_QLM_MODE_40G_KR4:
799 case CVMX_QLM_MODE_RGMII_40G_KR4:
800 iface_ops[interface] = &iface_ops_bgx_40G_KR4;
801 break;
802 case CVMX_QLM_MODE_MIXED:
803 iface_ops[interface] = &iface_ops_bgx_mixed;
804 break;
805 default:
806 iface_ops[interface] = &iface_ops_dis;
807 break;
808 }
809 } else if (interface == 3) { /* DPI */
810 iface_ops[interface] = &iface_ops_npi;
811 } else if (interface == 4) { /* LOOP */
812 iface_ops[interface] = &iface_ops_loop;
813 } else {
814 iface_ops[interface] = &iface_ops_dis;
815 }
816
817 return iface_ops[interface]->mode;
818}
819
820/**
821 * @INTERNAL
822 * Return interface mode for CNF75XX.
823 *
824 * CNF75XX has a single BGX block, which is attached to two DLMs,
825 * the first, GSER4 only supports SGMII mode, while the second,
826 * GSER5 supports 1G/10G single late modes, i.e. SGMII, XFI, 10G-KR.
827 * Each half-BGX is thus designated as a separate interface with two ports each.
828 */
829static cvmx_helper_interface_mode_t __cvmx_get_mode_cnf75xx(int xiface)
830{
831 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
832 int interface = xi.interface;
833
834 /* BGX0: SGMII (DLM4/DLM5)/XFI(DLM5) */
835 if (interface < 1) {
836 enum cvmx_qlm_mode qlm_mode;
837 int qlm = cvmx_qlm_lmac(xiface, 0);
838
839 if (qlm == -1) {
840 iface_ops[interface] = &iface_ops_dis;
841 return iface_ops[interface]->mode;
842 }
843 qlm_mode = cvmx_qlm_get_mode(qlm);
844
845 switch (qlm_mode) {
846 case CVMX_QLM_MODE_SGMII:
847 case CVMX_QLM_MODE_SGMII_2X1:
848 iface_ops[interface] = &iface_ops_bgx_sgmii;
849 break;
850 case CVMX_QLM_MODE_XFI_1X2:
851 iface_ops[interface] = &iface_ops_bgx_xfi;
852 break;
853 case CVMX_QLM_MODE_10G_KR_1X2:
854 iface_ops[interface] = &iface_ops_bgx_10G_KR;
855 break;
856 case CVMX_QLM_MODE_MIXED:
857 iface_ops[interface] = &iface_ops_bgx_mixed;
858 break;
859 default:
860 iface_ops[interface] = &iface_ops_dis;
861 break;
862 }
863 } else if ((interface < 3) && OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
Stefan Roese790978b2022-04-07 09:11:44 +0200864 /* SRIO is disabled for now */
865 printf("SRIO disabled for now!\n");
866 iface_ops[interface] = &iface_ops_dis;
Aaron Williamsa7f479c2020-12-11 17:06:02 +0100867 } else if (interface == 3) { /* DPI */
868 iface_ops[interface] = &iface_ops_npi;
869 } else if (interface == 4) { /* LOOP */
870 iface_ops[interface] = &iface_ops_loop;
871 } else {
872 iface_ops[interface] = &iface_ops_dis;
873 }
874
875 return iface_ops[interface]->mode;
876}
877
878/**
879 * @INTERNAL
880 * Return interface mode for CN68xx.
881 */
882static cvmx_helper_interface_mode_t __cvmx_get_mode_cn68xx(int interface)
883{
884 union cvmx_mio_qlmx_cfg qlm_cfg;
885
886 switch (interface) {
887 case 0:
888 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
889 /* QLM is disabled when QLM SPD is 15. */
890 if (qlm_cfg.s.qlm_spd == 15)
891 iface_ops[interface] = &iface_ops_dis;
892 else if (qlm_cfg.s.qlm_cfg == 7)
893 iface_ops[interface] = &iface_ops_rxaui;
894 else if (qlm_cfg.s.qlm_cfg == 2)
895 iface_ops[interface] = &iface_ops_sgmii;
896 else if (qlm_cfg.s.qlm_cfg == 3)
897 iface_ops[interface] = &iface_ops_xaui;
898 else
899 iface_ops[interface] = &iface_ops_dis;
900 break;
901
902 case 1:
903 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
904 /* QLM is disabled when QLM SPD is 15. */
905 if (qlm_cfg.s.qlm_spd == 15)
906 iface_ops[interface] = &iface_ops_dis;
907 else if (qlm_cfg.s.qlm_cfg == 7)
908 iface_ops[interface] = &iface_ops_rxaui;
909 else
910 iface_ops[interface] = &iface_ops_dis;
911 break;
912
913 case 2:
914 case 3:
915 case 4:
916 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(interface));
917 /* QLM is disabled when QLM SPD is 15. */
918 if (qlm_cfg.s.qlm_spd == 15)
919 iface_ops[interface] = &iface_ops_dis;
920 else if (qlm_cfg.s.qlm_cfg == 2)
921 iface_ops[interface] = &iface_ops_sgmii;
922 else if (qlm_cfg.s.qlm_cfg == 3)
923 iface_ops[interface] = &iface_ops_xaui;
924 else
925 iface_ops[interface] = &iface_ops_dis;
926 break;
927
928 case 5:
929 case 6:
930 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(interface - 4));
931 /* QLM is disabled when QLM SPD is 15. */
932 if (qlm_cfg.s.qlm_spd == 15)
933 iface_ops[interface] = &iface_ops_dis;
934 else if (qlm_cfg.s.qlm_cfg == 1)
935 iface_ops[interface] = &iface_ops_ilk;
936 else
937 iface_ops[interface] = &iface_ops_dis;
938 break;
939
940 case 7: {
941 union cvmx_mio_qlmx_cfg qlm_cfg1;
942 /* Check if PCIe0/PCIe1 is configured for PCIe */
943 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(3));
944 qlm_cfg1.u64 = csr_rd(CVMX_MIO_QLMX_CFG(1));
945 /* QLM is disabled when QLM SPD is 15. */
946 if ((qlm_cfg.s.qlm_spd != 15 && qlm_cfg.s.qlm_cfg == 0) ||
947 (qlm_cfg1.s.qlm_spd != 15 && qlm_cfg1.s.qlm_cfg == 0))
948 iface_ops[interface] = &iface_ops_npi;
949 else
950 iface_ops[interface] = &iface_ops_dis;
951 } break;
952
953 case 8:
954 iface_ops[interface] = &iface_ops_loop;
955 break;
956
957 default:
958 iface_ops[interface] = &iface_ops_dis;
959 break;
960 }
961
962 return iface_ops[interface]->mode;
963}
964
965/**
966 * @INTERNAL
967 * Return interface mode for an Octeon II
968 */
969static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
970{
971 union cvmx_gmxx_inf_mode mode;
972
973 if (OCTEON_IS_MODEL(OCTEON_CN68XX))
974 return __cvmx_get_mode_cn68xx(interface);
975
976 if (interface == 2) {
977 iface_ops[interface] = &iface_ops_npi;
978 } else if (interface == 3) {
979 iface_ops[interface] = &iface_ops_loop;
980 } else if ((OCTEON_IS_MODEL(OCTEON_CN63XX) &&
981 (interface == 4 || interface == 5)) ||
982 (OCTEON_IS_MODEL(OCTEON_CN66XX) && interface >= 4 &&
983 interface <= 7)) {
984 /* Only present in CN63XX & CN66XX Octeon model */
Aaron Williamsa7f479c2020-12-11 17:06:02 +0100985
986 /* cn66xx pass1.0 has only 2 SRIO interfaces. */
987 if ((interface == 5 || interface == 7) &&
988 OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_0)) {
989 iface_ops[interface] = &iface_ops_dis;
990 } else if (interface == 5 && OCTEON_IS_MODEL(OCTEON_CN66XX)) {
991 /*
992 * Later passes of cn66xx support SRIO0 - x4/x2/x1,
993 * SRIO2 - x2/x1, SRIO3 - x1
994 */
995 iface_ops[interface] = &iface_ops_dis;
996 } else {
Stefan Roese790978b2022-04-07 09:11:44 +0200997 /* SRIO is disabled for now */
998 printf("SRIO disabled for now!\n");
999 iface_ops[interface] = &iface_ops_dis;
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001000 }
1001 } else if (OCTEON_IS_MODEL(OCTEON_CN66XX)) {
1002 union cvmx_mio_qlmx_cfg mio_qlm_cfg;
1003
1004 /* QLM2 is SGMII0 and QLM1 is SGMII1 */
1005 if (interface == 0) {
1006 mio_qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(2));
1007 } else if (interface == 1) {
1008 mio_qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(1));
1009 } else {
1010 iface_ops[interface] = &iface_ops_dis;
1011 return iface_ops[interface]->mode;
1012 }
1013
1014 if (mio_qlm_cfg.s.qlm_spd == 15)
1015 iface_ops[interface] = &iface_ops_dis;
1016 else if (mio_qlm_cfg.s.qlm_cfg == 9)
1017 iface_ops[interface] = &iface_ops_sgmii;
1018 else if (mio_qlm_cfg.s.qlm_cfg == 11)
1019 iface_ops[interface] = &iface_ops_xaui;
1020 else
1021 iface_ops[interface] = &iface_ops_dis;
1022 } else if (OCTEON_IS_MODEL(OCTEON_CN61XX)) {
1023 union cvmx_mio_qlmx_cfg qlm_cfg;
1024
1025 if (interface == 0) {
1026 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(2));
1027 } else if (interface == 1) {
1028 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
1029 } else {
1030 iface_ops[interface] = &iface_ops_dis;
1031 return iface_ops[interface]->mode;
1032 }
1033
1034 if (qlm_cfg.s.qlm_spd == 15)
1035 iface_ops[interface] = &iface_ops_dis;
1036 else if (qlm_cfg.s.qlm_cfg == 2)
1037 iface_ops[interface] = &iface_ops_sgmii;
1038 else if (qlm_cfg.s.qlm_cfg == 3)
1039 iface_ops[interface] = &iface_ops_xaui;
1040 else
1041 iface_ops[interface] = &iface_ops_dis;
1042 } else if (OCTEON_IS_MODEL(OCTEON_CNF71XX)) {
1043 if (interface == 0) {
1044 union cvmx_mio_qlmx_cfg qlm_cfg;
1045
1046 qlm_cfg.u64 = csr_rd(CVMX_MIO_QLMX_CFG(0));
1047 if (qlm_cfg.s.qlm_cfg == 2)
1048 iface_ops[interface] = &iface_ops_sgmii;
1049 else
1050 iface_ops[interface] = &iface_ops_dis;
1051 } else {
1052 iface_ops[interface] = &iface_ops_dis;
1053 }
1054 } else if (interface == 1 && OCTEON_IS_MODEL(OCTEON_CN63XX)) {
1055 iface_ops[interface] = &iface_ops_dis;
1056 } else {
1057 mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(interface));
1058
1059 if (OCTEON_IS_MODEL(OCTEON_CN63XX)) {
1060 switch (mode.cn63xx.mode) {
1061 case 0:
1062 iface_ops[interface] = &iface_ops_sgmii;
1063 break;
1064
1065 case 1:
1066 iface_ops[interface] = &iface_ops_xaui;
1067 break;
1068
1069 default:
1070 iface_ops[interface] = &iface_ops_dis;
1071 break;
1072 }
1073 } else {
1074 if (!mode.s.en)
1075 iface_ops[interface] = &iface_ops_dis;
1076 else if (mode.s.type)
1077 iface_ops[interface] = &iface_ops_gmii;
1078 else
1079 iface_ops[interface] = &iface_ops_rgmii;
1080 }
1081 }
1082
1083 return iface_ops[interface]->mode;
1084}
1085
1086/**
1087 * Get the operating mode of an interface. Depending on the Octeon
1088 * chip and configuration, this function returns an enumeration
1089 * of the type of packet I/O supported by an interface.
1090 *
1091 * @param xiface Interface to probe
1092 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001093 * Return: Mode of the interface. Unknown or unsupported interfaces return
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001094 * DISABLED.
1095 */
1096cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int xiface)
1097{
1098 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1099
1100 if (xi.interface < 0 ||
1101 xi.interface >= cvmx_helper_get_number_of_interfaces())
1102 return CVMX_HELPER_INTERFACE_MODE_DISABLED;
1103
1104 /*
1105 * Check if the interface mode has been already cached. If it has,
1106 * simply return it. Otherwise, fall through the rest of the code to
1107 * determine the interface mode and cache it in iface_ops.
1108 */
1109 if (iface_node_ops[xi.node][xi.interface]) {
1110 cvmx_helper_interface_mode_t mode;
1111
1112 mode = iface_node_ops[xi.node][xi.interface]->mode;
1113 return mode;
1114 }
1115
1116 /*
1117 * OCTEON III models
1118 */
1119 if (OCTEON_IS_MODEL(OCTEON_CN70XX))
1120 return __cvmx_get_mode_cn70xx(xi.interface);
1121
1122 if (OCTEON_IS_MODEL(OCTEON_CN78XX))
1123 return __cvmx_get_mode_cn78xx(xiface);
1124
1125 if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
1126 cvmx_helper_interface_mode_t mode;
1127
1128 mode = __cvmx_get_mode_cnf75xx(xiface);
1129 return mode;
1130 }
1131
1132 if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
1133 cvmx_helper_interface_mode_t mode;
1134
1135 mode = __cvmx_get_mode_cn73xx(xiface);
1136 return mode;
1137 }
1138
1139 /*
1140 * Octeon II models
1141 */
1142 if (OCTEON_IS_OCTEON2())
1143 return __cvmx_get_mode_octeon2(xi.interface);
1144
1145 /*
1146 * Octeon and Octeon Plus models
1147 */
1148 if (xi.interface == 2) {
1149 iface_ops[xi.interface] = &iface_ops_npi;
1150 } else if (xi.interface == 3) {
1151 iface_ops[xi.interface] = &iface_ops_dis;
1152 } else {
1153 union cvmx_gmxx_inf_mode mode;
1154
1155 mode.u64 = csr_rd(CVMX_GMXX_INF_MODE(xi.interface));
1156
1157 if (!mode.s.en)
1158 iface_ops[xi.interface] = &iface_ops_dis;
1159 else if (mode.s.type)
1160 iface_ops[xi.interface] = &iface_ops_gmii;
1161 else
1162 iface_ops[xi.interface] = &iface_ops_rgmii;
1163 }
1164
1165 return iface_ops[xi.interface]->mode;
1166}
1167
1168/**
1169 * Determine the actual number of hardware ports connected to an
1170 * interface. It doesn't setup the ports or enable them.
1171 *
1172 * @param xiface Interface to enumerate
1173 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001174 * Return: The number of ports on the interface, negative on failure
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001175 */
1176int cvmx_helper_interface_enumerate(int xiface)
1177{
1178 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1179 int result = 0;
1180
1181 cvmx_helper_interface_get_mode(xiface);
1182 if (iface_node_ops[xi.node][xi.interface]->enumerate)
1183 result = iface_node_ops[xi.node][xi.interface]->enumerate(xiface);
1184
1185 return result;
1186}
1187
1188/**
1189 * This function probes an interface to determine the actual number of
1190 * hardware ports connected to it. It does some setup the ports but
1191 * doesn't enable them. The main goal here is to set the global
1192 * interface_port_count[interface] correctly. Final hardware setup of
1193 * the ports will be performed later.
1194 *
1195 * @param xiface Interface to probe
1196 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001197 * Return: Zero on success, negative on failure
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001198 */
1199int cvmx_helper_interface_probe(int xiface)
1200{
1201 /*
1202 * At this stage in the game we don't want packets to be
1203 * moving yet. The following probe calls should perform
1204 * hardware setup needed to determine port counts. Receive
1205 * must still be disabled.
1206 */
1207 int nports;
1208 int has_fcs;
1209 enum cvmx_pko_padding padding = CVMX_PKO_PADDING_NONE;
1210 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1211
1212 nports = -1;
1213 has_fcs = 0;
1214
1215 cvmx_helper_interface_get_mode(xiface);
1216 if (iface_node_ops[xi.node][xi.interface]->probe)
1217 nports = iface_node_ops[xi.node][xi.interface]->probe(xiface);
1218
1219 switch (iface_node_ops[xi.node][xi.interface]->mode) {
1220 /* These types don't support ports to IPD/PKO */
1221 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1222 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1223 nports = 0;
1224 break;
1225 /* XAUI is a single high speed port */
1226 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1227 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1228 case CVMX_HELPER_INTERFACE_MODE_XLAUI:
1229 case CVMX_HELPER_INTERFACE_MODE_XFI:
1230 case CVMX_HELPER_INTERFACE_MODE_10G_KR:
1231 case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
1232 case CVMX_HELPER_INTERFACE_MODE_MIXED:
1233 has_fcs = 1;
1234 padding = CVMX_PKO_PADDING_60;
1235 break;
1236 /*
1237 * RGMII/GMII/MII are all treated about the same. Most
1238 * functions refer to these ports as RGMII.
1239 */
1240 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1241 case CVMX_HELPER_INTERFACE_MODE_GMII:
1242 padding = CVMX_PKO_PADDING_60;
1243 break;
1244 /*
1245 * SPI4 can have 1-16 ports depending on the device at
1246 * the other end.
1247 */
1248 case CVMX_HELPER_INTERFACE_MODE_SPI:
1249 padding = CVMX_PKO_PADDING_60;
1250 break;
1251 /*
1252 * SGMII can have 1-4 ports depending on how many are
1253 * hooked up.
1254 */
1255 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1256 case CVMX_HELPER_INTERFACE_MODE_QSGMII:
1257 padding = CVMX_PKO_PADDING_60;
1258 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1259 has_fcs = 1;
1260 break;
1261 /* PCI target Network Packet Interface */
1262 case CVMX_HELPER_INTERFACE_MODE_NPI:
1263 break;
1264 /*
1265 * Special loopback only ports. These are not the same
1266 * as other ports in loopback mode.
1267 */
1268 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1269 break;
1270 /* SRIO has 2^N ports, where N is number of interfaces */
1271 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1272 break;
1273 case CVMX_HELPER_INTERFACE_MODE_ILK:
1274 padding = CVMX_PKO_PADDING_60;
1275 has_fcs = 1;
1276 break;
1277 case CVMX_HELPER_INTERFACE_MODE_AGL:
1278 has_fcs = 1;
1279 break;
1280 }
1281
1282 if (nports == -1)
1283 return -1;
1284
1285 if (!octeon_has_feature(OCTEON_FEATURE_PKND))
1286 has_fcs = 0;
1287
1288 nports = __cvmx_helper_board_interface_probe(xiface, nports);
1289 __cvmx_helper_init_interface(xiface, nports, has_fcs, padding);
1290 /* Make sure all global variables propagate to other cores */
1291 CVMX_SYNCWS;
1292
1293 return 0;
1294}
1295
1296/**
1297 * @INTERNAL
1298 * Setup backpressure.
1299 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001300 * Return: Zero on success, negative on failure
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001301 */
1302static int __cvmx_helper_global_setup_backpressure(int node)
1303{
1304 cvmx_qos_proto_t qos_proto;
1305 cvmx_qos_pkt_mode_t qos_mode;
1306 int port, xipdport;
1307 unsigned int bpmask;
1308 int interface, xiface, ports;
1309 int num_interfaces = cvmx_helper_get_number_of_interfaces();
1310
1311 if (cvmx_rgmii_backpressure_dis) {
1312 qos_proto = CVMX_QOS_PROTO_NONE;
1313 qos_mode = CVMX_QOS_PKT_MODE_DROP;
1314 } else {
1315 qos_proto = CVMX_QOS_PROTO_PAUSE;
1316 qos_mode = CVMX_QOS_PKT_MODE_HWONLY;
1317 }
1318
1319 for (interface = 0; interface < num_interfaces; interface++) {
1320 xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1321 ports = cvmx_helper_ports_on_interface(xiface);
1322
1323 switch (cvmx_helper_interface_get_mode(xiface)) {
1324 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
1325 case CVMX_HELPER_INTERFACE_MODE_PCIE:
1326 case CVMX_HELPER_INTERFACE_MODE_SRIO:
1327 case CVMX_HELPER_INTERFACE_MODE_ILK:
1328 case CVMX_HELPER_INTERFACE_MODE_NPI:
1329 case CVMX_HELPER_INTERFACE_MODE_PICMG:
1330 break;
1331 case CVMX_HELPER_INTERFACE_MODE_LOOP:
1332 case CVMX_HELPER_INTERFACE_MODE_XAUI:
1333 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
1334 case CVMX_HELPER_INTERFACE_MODE_XLAUI:
1335 case CVMX_HELPER_INTERFACE_MODE_XFI:
1336 case CVMX_HELPER_INTERFACE_MODE_10G_KR:
1337 case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
1338 bpmask = (cvmx_rgmii_backpressure_dis) ? 0xF : 0;
1339 if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
1340 for (port = 0; port < ports; port++) {
1341 xipdport = cvmx_helper_get_ipd_port(xiface, port);
1342 cvmx_bgx_set_flowctl_mode(xipdport, qos_proto, qos_mode);
1343 }
1344 cvmx_bgx_set_backpressure_override(xiface, bpmask);
1345 }
1346 break;
1347 case CVMX_HELPER_INTERFACE_MODE_RGMII:
1348 case CVMX_HELPER_INTERFACE_MODE_GMII:
1349 case CVMX_HELPER_INTERFACE_MODE_SPI:
1350 case CVMX_HELPER_INTERFACE_MODE_SGMII:
1351 case CVMX_HELPER_INTERFACE_MODE_QSGMII:
1352 case CVMX_HELPER_INTERFACE_MODE_MIXED:
1353 bpmask = (cvmx_rgmii_backpressure_dis) ? 0xF : 0;
1354 if (octeon_has_feature(OCTEON_FEATURE_BGX)) {
1355 for (port = 0; port < ports; port++) {
1356 xipdport = cvmx_helper_get_ipd_port(xiface, port);
1357 cvmx_bgx_set_flowctl_mode(xipdport, qos_proto, qos_mode);
1358 }
1359 cvmx_bgx_set_backpressure_override(xiface, bpmask);
1360 } else {
1361 cvmx_gmx_set_backpressure_override(interface, bpmask);
1362 }
1363 break;
1364 case CVMX_HELPER_INTERFACE_MODE_AGL:
1365 bpmask = (cvmx_rgmii_backpressure_dis) ? 0x1 : 0;
1366 cvmx_agl_set_backpressure_override(interface, bpmask);
1367 break;
1368 }
1369 }
1370 return 0;
1371}
1372
1373/**
1374 * @INTERNAL
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001375 * Enable packet input/output from the hardware. This function is
1376 * called after all internal setup is complete and IPD is enabled.
1377 * After this function completes, packets will be accepted from the
1378 * hardware ports. PKO should still be disabled to make sure packets
1379 * aren't sent out partially setup hardware.
1380 *
1381 * @param xiface Interface to enable
1382 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001383 * Return: Zero on success, negative on failure
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001384 */
1385int __cvmx_helper_packet_hardware_enable(int xiface)
1386{
1387 int result = 0;
1388 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1389
1390 if (iface_node_ops[xi.node][xi.interface]->enable)
1391 result = iface_node_ops[xi.node][xi.interface]->enable(xiface);
Stefan Roese790978b2022-04-07 09:11:44 +02001392
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001393 return result;
1394}
1395
1396int cvmx_helper_ipd_and_packet_input_enable(void)
1397{
1398 return cvmx_helper_ipd_and_packet_input_enable_node(cvmx_get_node_num());
1399}
1400
1401/**
1402 * Called after all internal packet IO paths are setup. This
1403 * function enables IPD/PIP and begins packet input and output.
1404 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001405 * Return: Zero on success, negative on failure
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001406 */
1407int cvmx_helper_ipd_and_packet_input_enable_node(int node)
1408{
1409 int num_interfaces;
1410 int interface;
1411 int num_ports;
1412
1413 if (octeon_has_feature(OCTEON_FEATURE_PKI)) {
1414 cvmx_helper_pki_enable(node);
1415 } else {
1416 /* Enable IPD */
1417 cvmx_ipd_enable();
1418 }
1419
1420 /*
1421 * Time to enable hardware ports packet input and output. Note
1422 * that at this point IPD/PIP must be fully functional and PKO
1423 * must be disabled .
1424 */
1425 num_interfaces = cvmx_helper_get_number_of_interfaces();
1426 for (interface = 0; interface < num_interfaces; interface++) {
1427 int xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1428
1429 num_ports = cvmx_helper_ports_on_interface(xiface);
1430 if (num_ports > 0)
1431 __cvmx_helper_packet_hardware_enable(xiface);
1432 }
1433
1434 /* Finally enable PKO now that the entire path is up and running */
1435 /* enable pko */
1436 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
1437 ; // cvmx_pko_enable_78xx(0); already enabled
1438 else
1439 cvmx_pko_enable();
1440
1441 return 0;
1442}
1443
1444/**
1445 * Initialize the PIP, IPD, and PKO hardware to support
1446 * simple priority based queues for the ethernet ports. Each
1447 * port is configured with a number of priority queues based
1448 * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
1449 * priority than the previous.
1450 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001451 * Return: Zero on success, non-zero on failure
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001452 */
1453int cvmx_helper_initialize_packet_io_node(unsigned int node)
1454{
1455 int result = 0;
1456 int interface;
1457 int xiface;
1458 union cvmx_l2c_cfg l2c_cfg;
1459 union cvmx_smix_en smix_en;
1460 const int num_interfaces = cvmx_helper_get_number_of_interfaces();
1461
1462 /*
1463 * Tell L2 to give the IOB statically higher priority compared
1464 * to the cores. This avoids conditions where IO blocks might
1465 * be starved under very high L2 loads.
1466 */
1467 if (OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
1468 union cvmx_l2c_ctl l2c_ctl;
1469
1470 l2c_ctl.u64 = csr_rd_node(node, CVMX_L2C_CTL);
1471 l2c_ctl.s.rsp_arb_mode = 1;
1472 l2c_ctl.s.xmc_arb_mode = 0;
1473 csr_wr_node(node, CVMX_L2C_CTL, l2c_ctl.u64);
1474 } else {
1475 l2c_cfg.u64 = csr_rd(CVMX_L2C_CFG);
1476 l2c_cfg.s.lrf_arb_mode = 0;
1477 l2c_cfg.s.rfb_arb_mode = 0;
1478 csr_wr(CVMX_L2C_CFG, l2c_cfg.u64);
1479 }
1480
1481 int smi_inf;
1482 int i;
1483
1484 /* Newer chips have more than one SMI/MDIO interface */
1485 if (OCTEON_IS_MODEL(OCTEON_CN68XX) || OCTEON_IS_MODEL(OCTEON_CN78XX))
1486 smi_inf = 4;
1487 else if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))
1488 smi_inf = 2;
1489 else
1490 smi_inf = 2;
1491
1492 for (i = 0; i < smi_inf; i++) {
1493 /* Make sure SMI/MDIO is enabled so we can query PHYs */
1494 smix_en.u64 = csr_rd_node(node, CVMX_SMIX_EN(i));
1495 if (!smix_en.s.en) {
1496 smix_en.s.en = 1;
1497 csr_wr_node(node, CVMX_SMIX_EN(i), smix_en.u64);
1498 }
1499 }
1500
1501 //vinita_to_do ask it need to be modify for multinode
1502 __cvmx_helper_init_port_valid();
1503
1504 for (interface = 0; interface < num_interfaces; interface++) {
1505 xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1506 result |= cvmx_helper_interface_probe(xiface);
1507 }
1508
1509 /* PKO3 init precedes that of interfaces */
1510 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1511 __cvmx_helper_init_port_config_data(node);
1512 result = cvmx_helper_pko3_init_global(node);
1513 } else {
1514 result = cvmx_helper_pko_init();
1515 }
1516
1517 /* Errata SSO-29000, Disabling power saving SSO conditional clocking */
1518 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
1519 cvmx_sso_ws_cfg_t cfg;
1520
1521 cfg.u64 = csr_rd_node(node, CVMX_SSO_WS_CFG);
1522 cfg.s.sso_cclk_dis = 1;
1523 csr_wr_node(node, CVMX_SSO_WS_CFG, cfg.u64);
1524 }
1525
1526 if (result < 0)
1527 return result;
1528
1529 for (interface = 0; interface < num_interfaces; interface++) {
1530 xiface = cvmx_helper_node_interface_to_xiface(node, interface);
1531 /* Skip invalid/disabled interfaces */
1532 if (cvmx_helper_ports_on_interface(xiface) <= 0)
1533 continue;
Stefan Roese790978b2022-04-07 09:11:44 +02001534 debug("Node %d Interface %d has %d ports (%s)\n",
1535 node, interface,
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001536 cvmx_helper_ports_on_interface(xiface),
1537 cvmx_helper_interface_mode_to_string(
1538 cvmx_helper_interface_get_mode(xiface)));
1539
1540 result |= __cvmx_helper_ipd_setup_interface(xiface);
1541 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
1542 result |= cvmx_helper_pko3_init_interface(xiface);
1543 else
1544 result |= __cvmx_helper_interface_setup_pko(interface);
1545 }
1546
1547 if (octeon_has_feature(OCTEON_FEATURE_PKI))
1548 result |= __cvmx_helper_pki_global_setup(node);
1549 else
1550 result |= __cvmx_helper_ipd_global_setup();
1551
1552 /* Enable any flow control and backpressure */
1553 result |= __cvmx_helper_global_setup_backpressure(node);
1554
1555 /* export app config if set */
1556 if (cvmx_export_app_config)
1557 result |= (*cvmx_export_app_config)();
1558
1559 if (cvmx_ipd_cfg.ipd_enable && cvmx_pki_dflt_init[node])
1560 result |= cvmx_helper_ipd_and_packet_input_enable_node(node);
1561 return result;
1562}
1563
1564/**
1565 * Initialize the PIP, IPD, and PKO hardware to support
1566 * simple priority based queues for the ethernet ports. Each
1567 * port is configured with a number of priority queues based
1568 * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
1569 * priority than the previous.
1570 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001571 * Return: Zero on success, non-zero on failure
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001572 */
1573int cvmx_helper_initialize_packet_io_global(void)
1574{
1575 unsigned int node = cvmx_get_node_num();
1576
1577 return cvmx_helper_initialize_packet_io_node(node);
1578}
1579
1580/**
1581 * Does core local initialization for packet io
1582 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001583 * Return: Zero on success, non-zero on failure
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001584 */
1585int cvmx_helper_initialize_packet_io_local(void)
1586{
1587 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE))
1588 __cvmx_pko3_dq_table_setup();
1589
1590 return 0;
1591}
1592
1593struct cvmx_buffer_list {
1594 struct cvmx_buffer_list *next;
1595};
1596
1597/**
1598 * Disables the sending of flow control (pause) frames on the specified
1599 * GMX port(s).
1600 *
1601 * @param interface Which interface (0 or 1)
1602 * @param port_mask Mask (4bits) of which ports on the interface to disable
1603 * backpressure on.
1604 * 1 => disable backpressure
1605 * 0 => enable backpressure
1606 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001607 * Return: 0 on success
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001608 * -1 on error
1609 */
1610int cvmx_gmx_set_backpressure_override(u32 interface, uint32_t port_mask)
1611{
1612 union cvmx_gmxx_tx_ovr_bp gmxx_tx_ovr_bp;
1613 /* Check for valid arguments */
1614 if (port_mask & ~0xf || interface & ~0x1)
1615 return -1;
1616 if (interface >= CVMX_HELPER_MAX_GMX)
1617 return -1;
1618
1619 gmxx_tx_ovr_bp.u64 = 0;
1620 gmxx_tx_ovr_bp.s.en = port_mask; /* Per port Enable back pressure override */
1621 gmxx_tx_ovr_bp.s.ign_full = port_mask; /* Ignore the RX FIFO full when computing BP */
1622 csr_wr(CVMX_GMXX_TX_OVR_BP(interface), gmxx_tx_ovr_bp.u64);
1623 return 0;
1624}
1625
1626/**
1627 * Disables the sending of flow control (pause) frames on the specified
1628 * AGL (RGMII) port(s).
1629 *
1630 * @param interface Which interface (0 or 1)
1631 * @param port_mask Mask (4bits) of which ports on the interface to disable
1632 * backpressure on.
1633 * 1 => disable backpressure
1634 * 0 => enable backpressure
1635 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001636 * Return: 0 on success
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001637 * -1 on error
1638 */
1639int cvmx_agl_set_backpressure_override(u32 interface, uint32_t port_mask)
1640{
1641 union cvmx_agl_gmx_tx_ovr_bp agl_gmx_tx_ovr_bp;
1642 int port = cvmx_helper_agl_get_port(interface);
1643
1644 if (port == -1)
1645 return -1;
1646 /* Check for valid arguments */
1647 agl_gmx_tx_ovr_bp.u64 = 0;
1648 /* Per port Enable back pressure override */
1649 agl_gmx_tx_ovr_bp.s.en = port_mask;
1650 /* Ignore the RX FIFO full when computing BP */
1651 agl_gmx_tx_ovr_bp.s.ign_full = port_mask;
1652 csr_wr(CVMX_GMXX_TX_OVR_BP(port), agl_gmx_tx_ovr_bp.u64);
1653 return 0;
1654}
1655
1656/**
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001657 * Auto configure an IPD/PKO port link state and speed. This
1658 * function basically does the equivalent of:
1659 * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
1660 *
1661 * @param xipd_port IPD/PKO port to auto configure
1662 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001663 * Return: Link state after configure
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001664 */
1665cvmx_helper_link_info_t cvmx_helper_link_autoconf(int xipd_port)
1666{
1667 cvmx_helper_link_info_t link_info;
1668 int xiface = cvmx_helper_get_interface_num(xipd_port);
1669 int index = cvmx_helper_get_interface_index_num(xipd_port);
1670 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1671 int interface = xi.interface;
1672
1673 if (interface == -1 || index == -1 || index >= cvmx_helper_ports_on_interface(xiface)) {
1674 link_info.u64 = 0;
1675 return link_info;
1676 }
1677
1678 link_info = cvmx_helper_link_get(xipd_port);
1679 if (link_info.u64 == (__cvmx_helper_get_link_info(xiface, index)).u64)
1680 return link_info;
1681
1682 if (!link_info.s.link_up)
1683 cvmx_error_disable_group(CVMX_ERROR_GROUP_ETHERNET, xipd_port);
1684
1685 /* If we fail to set the link speed, port_link_info will not change */
1686 cvmx_helper_link_set(xipd_port, link_info);
1687
1688 if (link_info.s.link_up)
1689 cvmx_error_enable_group(CVMX_ERROR_GROUP_ETHERNET, xipd_port);
1690
1691 return link_info;
1692}
1693
1694/**
1695 * Return the link state of an IPD/PKO port as returned by
1696 * auto negotiation. The result of this function may not match
1697 * Octeon's link config if auto negotiation has changed since
1698 * the last call to cvmx_helper_link_set().
1699 *
1700 * @param xipd_port IPD/PKO port to query
1701 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001702 * Return: Link state
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001703 */
1704cvmx_helper_link_info_t cvmx_helper_link_get(int xipd_port)
1705{
1706 cvmx_helper_link_info_t result;
1707 int xiface = cvmx_helper_get_interface_num(xipd_port);
1708 int index = cvmx_helper_get_interface_index_num(xipd_port);
1709 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1710 struct cvmx_fdt_sfp_info *sfp_info;
1711
1712 /*
1713 * The default result will be a down link unless the code
1714 * below changes it.
1715 */
1716 result.u64 = 0;
1717
1718 if (__cvmx_helper_xiface_is_null(xiface) || index == -1 ||
1719 index >= cvmx_helper_ports_on_interface(xiface)) {
1720 return result;
1721 }
1722
1723 if (iface_node_ops[xi.node][xi.interface]->link_get)
1724 result = iface_node_ops[xi.node][xi.interface]->link_get(xipd_port);
1725
1726 if (xipd_port >= 0) {
1727 cvmx_helper_update_link_led(xiface, index, result);
1728
1729 sfp_info = cvmx_helper_cfg_get_sfp_info(xiface, index);
1730
1731 while (sfp_info) {
1732 if ((!result.s.link_up || (result.s.link_up && sfp_info->last_mod_abs)))
1733 cvmx_sfp_check_mod_abs(sfp_info, sfp_info->mod_abs_data);
1734 sfp_info = sfp_info->next_iface_sfp;
1735 }
1736 }
1737
1738 return result;
1739}
1740
1741/**
1742 * Configure an IPD/PKO port for the specified link state. This
1743 * function does not influence auto negotiation at the PHY level.
1744 * The passed link state must always match the link state returned
1745 * by cvmx_helper_link_get(). It is normally best to use
1746 * cvmx_helper_link_autoconf() instead.
1747 *
1748 * @param xipd_port IPD/PKO port to configure
1749 * @param link_info The new link state
1750 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001751 * Return: Zero on success, negative on failure
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001752 */
1753int cvmx_helper_link_set(int xipd_port, cvmx_helper_link_info_t link_info)
1754{
1755 int result = -1;
1756 int xiface = cvmx_helper_get_interface_num(xipd_port);
1757 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
1758 int index = cvmx_helper_get_interface_index_num(xipd_port);
1759
1760 if (__cvmx_helper_xiface_is_null(xiface) || index == -1 ||
1761 index >= cvmx_helper_ports_on_interface(xiface))
1762 return -1;
1763
1764 if (iface_node_ops[xi.node][xi.interface]->link_set)
1765 result = iface_node_ops[xi.node][xi.interface]->link_set(xipd_port, link_info);
1766
1767 /*
1768 * Set the port_link_info here so that the link status is
1769 * updated no matter how cvmx_helper_link_set is called. We
1770 * don't change the value if link_set failed.
1771 */
1772 if (result == 0)
1773 __cvmx_helper_set_link_info(xiface, index, link_info);
1774 return result;
1775}
1776
Aaron Williamsa7f479c2020-12-11 17:06:02 +01001777void *cvmx_helper_mem_alloc(int node, uint64_t alloc_size, uint64_t align)
1778{
1779 s64 paddr;
1780
1781 paddr = cvmx_bootmem_phy_alloc_range(alloc_size, align, cvmx_addr_on_node(node, 0ull),
1782 cvmx_addr_on_node(node, 0xffffffffff));
1783 if (paddr <= 0ll) {
1784 printf("ERROR: %s failed size %u\n", __func__, (unsigned int)alloc_size);
1785 return NULL;
1786 }
1787 return cvmx_phys_to_ptr(paddr);
1788}