blob: 53f2e4dd18614938741e4095e30e1e550a46b573 [file] [log] [blame]
Aaron Williamsadbb3a42021-05-06 12:26:07 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2022 Marvell International Ltd.
4 */
5
6#include <errno.h>
7#include <log.h>
8#include <time.h>
9#include <linux/delay.h>
10
11#include <mach/cvmx-regs.h>
12#include <mach/cvmx-csr.h>
13#include <mach/cvmx-bootmem.h>
14#include <mach/octeon-model.h>
15#include <mach/cvmx-fuse.h>
16#include <mach/octeon-feature.h>
17#include <mach/cvmx-qlm.h>
18#include <mach/octeon_qlm.h>
19#include <mach/cvmx-pcie.h>
20#include <mach/cvmx-coremask.h>
21
22#include <mach/cvmx-agl-defs.h>
23#include <mach/cvmx-bgxx-defs.h>
24#include <mach/cvmx-ciu-defs.h>
25#include <mach/cvmx-gmxx-defs.h>
26#include <mach/cvmx-gserx-defs.h>
27#include <mach/cvmx-ilk-defs.h>
28#include <mach/cvmx-ipd-defs.h>
29#include <mach/cvmx-pcsx-defs.h>
30#include <mach/cvmx-pcsxx-defs.h>
31#include <mach/cvmx-pki-defs.h>
32#include <mach/cvmx-pko-defs.h>
33#include <mach/cvmx-xcv-defs.h>
34
35#include <mach/cvmx-hwpko.h>
36#include <mach/cvmx-ilk.h>
37#include <mach/cvmx-pki.h>
38#include <mach/cvmx-pko3.h>
39#include <mach/cvmx-pko3-queue.h>
40#include <mach/cvmx-pko3-resources.h>
41
42#include <mach/cvmx-helper.h>
43#include <mach/cvmx-helper-board.h>
44#include <mach/cvmx-helper-cfg.h>
45
46#include <mach/cvmx-helper-bgx.h>
47#include <mach/cvmx-helper-cfg.h>
48#include <mach/cvmx-helper-util.h>
49#include <mach/cvmx-helper-pki.h>
50
51/* Smalles Round-Robin quantum to use +1 */
52#define CVMX_PKO3_RR_QUANTUM_MIN 0x10
53
54static int debug; /* 1 for basic, 2 for detailed trace */
55
56struct cvmx_pko3_dq {
57 unsigned dq_count : 6; /* Number of descriptor queues */
58 unsigned dq_base : 10; /* Descriptor queue start number */
59#define CVMX_PKO3_SWIZZLE_IPD 0x0
60};
61
62/*
63 * @INTERNAL
64 * Descriptor Queue to IPD port mapping table.
65 *
66 * This pointer is per-core, contains the virtual address
67 * of a global named block which has 2^12 entries per each
68 * possible node.
69 */
70struct cvmx_pko3_dq *__cvmx_pko3_dq_table;
71
72int cvmx_pko3_get_queue_base(int ipd_port)
73{
74 struct cvmx_pko3_dq *dq_table;
75 int ret = -1;
76 unsigned int i;
77 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
78
79 /* get per-node table */
80 if (cvmx_unlikely(!__cvmx_pko3_dq_table))
81 __cvmx_pko3_dq_table_setup();
82
83 i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
84
85 /* get per-node table */
86 dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xp.node;
87
88 if (cvmx_likely(dq_table[i].dq_count > 0))
89 ret = xp.node << 10 | dq_table[i].dq_base;
90 else if (debug)
91 cvmx_printf("ERROR: %s: no queues for ipd_port=%#x\n", __func__,
92 ipd_port);
93
94 return ret;
95}
96
97int cvmx_pko3_get_queue_num(int ipd_port)
98{
99 struct cvmx_pko3_dq *dq_table;
100 int ret = -1;
101 unsigned int i;
102 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
103
104 /* get per-node table */
105 if (cvmx_unlikely(!__cvmx_pko3_dq_table))
106 __cvmx_pko3_dq_table_setup();
107
108 i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
109
110 /* get per-node table */
111 dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xp.node;
112
113 if (cvmx_likely(dq_table[i].dq_count > 0))
114 ret = dq_table[i].dq_count;
115 else if (debug)
116 debug("ERROR: %s: no queues for ipd_port=%#x\n", __func__,
117 ipd_port);
118
119 return ret;
120}
121
122/**
123 * @INTERNAL
124 *
125 * Initialize port/dq table contents
126 */
127static void __cvmx_pko3_dq_table_init(void *ptr)
128{
129 unsigned int size = sizeof(struct cvmx_pko3_dq) *
130 CVMX_PKO3_IPD_NUM_MAX * CVMX_MAX_NODES;
131
132 memset(ptr, 0, size);
133}
134
135/**
136 * @INTERNAL
137 *
138 * Find or allocate global port/dq map table
139 * which is a named table, contains entries for
140 * all possible OCI nodes.
141 *
142 * The table global pointer is stored in core-local variable
143 * so that every core will call this function once, on first use.
144 */
145int __cvmx_pko3_dq_table_setup(void)
146{
147 void *ptr;
148
149 ptr = cvmx_bootmem_alloc_named_range_once(
150 /* size */
151 sizeof(struct cvmx_pko3_dq) * CVMX_PKO3_IPD_NUM_MAX *
152 CVMX_MAX_NODES,
153 /* min_addr, max_addr, align */
154 0ull, 0ull, sizeof(struct cvmx_pko3_dq),
155 /* name */
156 "cvmx_pko3_global_dq_table", __cvmx_pko3_dq_table_init);
157
158 if (debug)
159 debug("%s: dq_table_ptr=%p\n", __func__, ptr);
160
161 if (!ptr)
162 return -1;
163
164 __cvmx_pko3_dq_table = ptr;
165 return 0;
166}
167
168/*
169 * @INTERNAL
170 * Register a range of Descriptor Queues with an interface port
171 *
172 * This function populates the DQ-to-IPD translation table
173 * used by the application to retrieve the DQ range (typically ordered
174 * by priority) for a given IPD-port, which is either a physical port,
175 * or a channel on a channelized interface (i.e. ILK).
176 *
177 * @param xiface is the physical interface number
178 * @param index is either a physical port on an interface
179 * or a channel of an ILK interface
180 * @param dq_base is the first Descriptor Queue number in a consecutive range
181 * @param dq_count is the number of consecutive Descriptor Queues leading
182 * the same channel or port.
183 *
184 * Only a consecutive range of Descriptor Queues can be associated with any
185 * given channel/port, and usually they are ordered from most to least
186 * in terms of scheduling priority.
187 *
188 * Note: thus function only populates the node-local translation table.
189 * NOTE: This function would be cleaner if it had a single ipd_port argument
190 *
191 * @returns 0 on success, -1 on failure.
192 */
193int __cvmx_pko3_ipd_dq_register(int xiface, int index, unsigned int dq_base,
194 unsigned int dq_count)
195{
196 struct cvmx_pko3_dq *dq_table;
197 int ipd_port;
198 unsigned int i;
199 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
200 struct cvmx_xport xp;
201
202 if (__cvmx_helper_xiface_is_null(xiface)) {
203 ipd_port = cvmx_helper_node_to_ipd_port(xi.node,
204 CVMX_PKO3_IPD_PORT_NULL);
205 } else {
206 int p;
207
208 p = cvmx_helper_get_ipd_port(xiface, index);
209 if (p < 0) {
210 cvmx_printf("ERROR: %s: xiface %#x has no IPD port\n",
211 __func__, xiface);
212 return -1;
213 }
214 ipd_port = p;
215 }
216
217 xp = cvmx_helper_ipd_port_to_xport(ipd_port);
218
219 i = CVMX_PKO3_SWIZZLE_IPD ^ xp.port;
220
221 /* get per-node table */
222 if (!__cvmx_pko3_dq_table)
223 __cvmx_pko3_dq_table_setup();
224
225 dq_table = __cvmx_pko3_dq_table + CVMX_PKO3_IPD_NUM_MAX * xi.node;
226
227 if (debug)
228 debug("%s: ipd_port=%#x ix=%#x dq %u cnt %u\n", __func__,
229 ipd_port, i, dq_base, dq_count);
230
231 /* Check the IPD port has not already been configured */
232 if (dq_table[i].dq_count > 0) {
233 cvmx_printf("%s: ERROR: IPD %#x already registered\n", __func__,
234 ipd_port);
235 return -1;
236 }
237
238 /* Store DQ# range in the queue lookup table */
239 dq_table[i].dq_base = dq_base;
240 dq_table[i].dq_count = dq_count;
241
242 return 0;
243}
244
245/*
246 * @INTERNAL
247 * Convert normal CHAN_E (i.e. IPD port) value to compressed channel form
248 * that is used to populate PKO_LUT.
249 *
250 * Note: This code may be model specific.
251 */
252static int cvmx_pko3_chan_2_xchan(uint16_t ipd_port)
253{
254 u16 xchan;
255 u8 off;
256 static const u8 *xchan_base;
257 static const u8 xchan_base_cn78xx[16] = {
258 /* IPD 0x000 */ 0x3c0 >> 4, /* LBK */
259 /* IPD 0x100 */ 0x380 >> 4, /* DPI */
260 /* IPD 0x200 */ 0xfff >> 4, /* not used */
261 /* IPD 0x300 */ 0xfff >> 4, /* not used */
262 /* IPD 0x400 */ 0x000 >> 4, /* ILK0 */
263 /* IPD 0x500 */ 0x100 >> 4, /* ILK1 */
264 /* IPD 0x600 */ 0xfff >> 4, /* not used */
265 /* IPD 0x700 */ 0xfff >> 4, /* not used */
266 /* IPD 0x800 */ 0x200 >> 4, /* BGX0 */
267 /* IPD 0x900 */ 0x240 >> 4, /* BGX1 */
268 /* IPD 0xa00 */ 0x280 >> 4, /* BGX2 */
269 /* IPD 0xb00 */ 0x2c0 >> 4, /* BGX3 */
270 /* IPD 0xc00 */ 0x300 >> 4, /* BGX4 */
271 /* IPD 0xd00 */ 0x340 >> 4, /* BGX5 */
272 /* IPD 0xe00 */ 0xfff >> 4, /* not used */
273 /* IPD 0xf00 */ 0xfff >> 4 /* not used */
274 };
275 static const u8 xchan_base_cn73xx[16] = {
276 /* IPD 0x000 */ 0x0c0 >> 4, /* LBK */
277 /* IPD 0x100 */ 0x100 >> 4, /* DPI */
278 /* IPD 0x200 */ 0xfff >> 4, /* not used */
279 /* IPD 0x300 */ 0xfff >> 4, /* not used */
280 /* IPD 0x400 */ 0xfff >> 4, /* not used */
281 /* IPD 0x500 */ 0xfff >> 4, /* not used */
282 /* IPD 0x600 */ 0xfff >> 4, /* not used */
283 /* IPD 0x700 */ 0xfff >> 4, /* not used */
284 /* IPD 0x800 */ 0x000 >> 4, /* BGX0 */
285 /* IPD 0x900 */ 0x040 >> 4, /* BGX1 */
286 /* IPD 0xa00 */ 0x080 >> 4, /* BGX2 */
287 /* IPD 0xb00 */ 0xfff >> 4, /* not used */
288 /* IPD 0xc00 */ 0xfff >> 4, /* not used */
289 /* IPD 0xd00 */ 0xfff >> 4, /* not used */
290 /* IPD 0xe00 */ 0xfff >> 4, /* not used */
291 /* IPD 0xf00 */ 0xfff >> 4 /* not used */
292 };
293 static const u8 xchan_base_cn75xx[16] = {
294 /* IPD 0x000 */ 0x040 >> 4, /* LBK */
295 /* IPD 0x100 */ 0x080 >> 4, /* DPI */
296 /* IPD 0x200 */ 0xeee >> 4, /* SRIO0 noop */
297 /* IPD 0x300 */ 0xfff >> 4, /* not used */
298 /* IPD 0x400 */ 0xfff >> 4, /* not used */
299 /* IPD 0x500 */ 0xfff >> 4, /* not used */
300 /* IPD 0x600 */ 0xfff >> 4, /* not used */
301 /* IPD 0x700 */ 0xfff >> 4, /* not used */
302 /* IPD 0x800 */ 0x000 >> 4, /* BGX0 */
303 /* IPD 0x900 */ 0xfff >> 4, /* not used */
304 /* IPD 0xa00 */ 0xfff >> 4, /* not used */
305 /* IPD 0xb00 */ 0xfff >> 4, /* not used */
306 /* IPD 0xc00 */ 0xfff >> 4, /* not used */
307 /* IPD 0xd00 */ 0xfff >> 4, /* not used */
308 /* IPD 0xe00 */ 0xfff >> 4, /* not used */
309 /* IPD 0xf00 */ 0xfff >> 4 /* not used */
310 };
311
312 if (OCTEON_IS_MODEL(OCTEON_CN73XX))
313 xchan_base = xchan_base_cn73xx;
314 if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
315 xchan_base = xchan_base_cn75xx;
316 if (OCTEON_IS_MODEL(OCTEON_CN78XX))
317 xchan_base = xchan_base_cn78xx;
318
319 if (!xchan_base)
320 return -1;
321
322 xchan = ipd_port >> 8;
323
324 /* ILKx, DPI has 8 bits logical channels, others just 6 */
325 if (((xchan & 0xfe) == 0x04) || xchan == 0x01)
326 off = ipd_port & 0xff;
327 else
328 off = ipd_port & 0x3f;
329
330 xchan = xchan_base[xchan & 0xf];
331
332 if (xchan == 0xff)
333 return -1; /* Invalid IPD_PORT */
334 else if (xchan == 0xee)
335 return -2; /* LUT not used */
336 else
337 return (xchan << 4) | off;
338}
339
340/*
341 * Map channel number in PKO
342 *
343 * @param node is to specify the node to which this configuration is applied.
344 * @param pq_num specifies the Port Queue (i.e. L1) queue number.
345 * @param l2_l3_q_num specifies L2/L3 queue number.
346 * @param channel specifies the channel number to map to the queue.
347 *
348 * The channel assignment applies to L2 or L3 Shaper Queues depending
349 * on the setting of channel credit level.
350 *
351 * @return returns none.
352 */
353void cvmx_pko3_map_channel(unsigned int node, unsigned int pq_num,
354 unsigned int l2_l3_q_num, uint16_t channel)
355{
356 union cvmx_pko_l3_l2_sqx_channel sqx_channel;
357 cvmx_pko_lutx_t lutx;
358 int xchan;
359
360 sqx_channel.u64 =
361 csr_rd_node(node, CVMX_PKO_L3_L2_SQX_CHANNEL(l2_l3_q_num));
362
363 sqx_channel.s.cc_channel = channel;
364
365 csr_wr_node(node, CVMX_PKO_L3_L2_SQX_CHANNEL(l2_l3_q_num),
366 sqx_channel.u64);
367
368 /* Convert CHAN_E into compressed channel */
369 xchan = cvmx_pko3_chan_2_xchan(channel);
370
371 if (debug)
372 debug("%s: ipd_port=%#x xchan=%#x\n", __func__, channel, xchan);
373
374 if (xchan < 0) {
375 if (xchan == -1)
376 cvmx_printf("%s: ERROR: channel %#x not recognized\n",
377 __func__, channel);
378 return;
379 }
380
381 lutx.u64 = 0;
382 lutx.s.valid = 1;
383 lutx.s.pq_idx = pq_num;
384 lutx.s.queue_number = l2_l3_q_num;
385
386 csr_wr_node(node, CVMX_PKO_LUTX(xchan), lutx.u64);
387
388 if (debug)
389 debug("%s: channel %#x (compressed=%#x) mapped L2/L3 SQ=%u, PQ=%u\n",
390 __func__, channel, xchan, l2_l3_q_num, pq_num);
391}
392
393/*
394 * @INTERNAL
395 * This function configures port queue scheduling and topology parameters
396 * in hardware.
397 *
398 * @param node is to specify the node to which this configuration is applied.
399 * @param port_queue is the port queue number to be configured.
400 * @param mac_num is the mac number of the mac that will be tied to this port_queue.
401 */
402static void cvmx_pko_configure_port_queue(int node, int port_queue, int mac_num)
403{
404 cvmx_pko_l1_sqx_topology_t pko_l1_topology;
405 cvmx_pko_l1_sqx_shape_t pko_l1_shape;
406 cvmx_pko_l1_sqx_link_t pko_l1_link;
407
408 pko_l1_topology.u64 = 0;
409 pko_l1_topology.s.link = mac_num;
410 csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(port_queue),
411 pko_l1_topology.u64);
412
413 pko_l1_shape.u64 = 0;
414 pko_l1_shape.s.link = mac_num;
415 csr_wr_node(node, CVMX_PKO_L1_SQX_SHAPE(port_queue), pko_l1_shape.u64);
416
417 pko_l1_link.u64 = 0;
418 pko_l1_link.s.link = mac_num;
419 csr_wr_node(node, CVMX_PKO_L1_SQX_LINK(port_queue), pko_l1_link.u64);
420}
421
422/*
423 * @INTERNAL
424 * This function configures level 2 queues scheduling and topology parameters
425 * in hardware.
426 *
427 * @param node is to specify the node to which this configuration is applied.
428 * @param queue is the level3 queue number to be configured.
429 * @param parent_queue is the parent queue at next level for this l3 queue.
430 * @param prio is this queue's priority in parent's scheduler.
431 * @param rr_quantum is this queue's round robin quantum value.
432 * @param child_base is the first child queue number in the static prioriy children.
433 * @param child_rr_prio is the round robin children priority.
434 */
435static void cvmx_pko_configure_l2_queue(int node, int queue, int parent_queue,
436 int prio, int rr_quantum,
437 int child_base, int child_rr_prio)
438{
439 cvmx_pko_l2_sqx_schedule_t pko_sq_sched;
440 cvmx_pko_l2_sqx_topology_t pko_child_topology;
441 cvmx_pko_l1_sqx_topology_t pko_parent_topology;
442
443 /* parent topology configuration */
444 pko_parent_topology.u64 =
445 csr_rd_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(parent_queue));
446 pko_parent_topology.s.prio_anchor = child_base;
447 pko_parent_topology.s.rr_prio = child_rr_prio;
448 csr_wr_node(node, CVMX_PKO_L1_SQX_TOPOLOGY(parent_queue),
449 pko_parent_topology.u64);
450
451 if (debug > 1)
452 debug("CVMX_PKO_L1_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
453 parent_queue, pko_parent_topology.s.prio_anchor,
454 pko_parent_topology.s.link);
455
456 /* scheduler configuration for this sq in the parent queue */
457 pko_sq_sched.u64 = 0;
458 pko_sq_sched.s.prio = prio;
459 pko_sq_sched.s.rr_quantum = rr_quantum;
460 csr_wr_node(node, CVMX_PKO_L2_SQX_SCHEDULE(queue), pko_sq_sched.u64);
461
462 /* child topology configuration */
463 pko_child_topology.u64 = 0;
464 pko_child_topology.s.parent = parent_queue;
465 csr_wr_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(queue),
466 pko_child_topology.u64);
467}
468
469/*
470 * @INTERNAL
471 * This function configures level 3 queues scheduling and topology parameters
472 * in hardware.
473 *
474 * @param node is to specify the node to which this configuration is applied.
475 * @param queue is the level3 queue number to be configured.
476 * @param parent_queue is the parent queue at next level for this l3 queue.
477 * @param prio is this queue's priority in parent's scheduler.
478 * @param rr_quantum is this queue's round robin quantum value.
479 * @param child_base is the first child queue number in the static prioriy children.
480 * @param child_rr_prio is the round robin children priority.
481 */
482static void cvmx_pko_configure_l3_queue(int node, int queue, int parent_queue,
483 int prio, int rr_quantum,
484 int child_base, int child_rr_prio)
485{
486 cvmx_pko_l3_sqx_schedule_t pko_sq_sched;
487 cvmx_pko_l3_sqx_topology_t pko_child_topology;
488 cvmx_pko_l2_sqx_topology_t pko_parent_topology;
489
490 /* parent topology configuration */
491 pko_parent_topology.u64 =
492 csr_rd_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(parent_queue));
493 pko_parent_topology.s.prio_anchor = child_base;
494 pko_parent_topology.s.rr_prio = child_rr_prio;
495 csr_wr_node(node, CVMX_PKO_L2_SQX_TOPOLOGY(parent_queue),
496 pko_parent_topology.u64);
497
498 if (debug > 1)
499 debug("CVMX_PKO_L2_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
500 parent_queue, pko_parent_topology.s.prio_anchor,
501 pko_parent_topology.s.parent);
502
503 /* scheduler configuration for this sq in the parent queue */
504 pko_sq_sched.u64 = 0;
505 pko_sq_sched.s.prio = prio;
506 pko_sq_sched.s.rr_quantum = rr_quantum;
507 csr_wr_node(node, CVMX_PKO_L3_SQX_SCHEDULE(queue), pko_sq_sched.u64);
508
509 /* child topology configuration */
510 pko_child_topology.u64 = 0;
511 pko_child_topology.s.parent = parent_queue;
512 csr_wr_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(queue),
513 pko_child_topology.u64);
514}
515
516/*
517 * @INTERNAL
518 * This function configures level 4 queues scheduling and topology parameters
519 * in hardware.
520 *
521 * @param node is to specify the node to which this configuration is applied.
522 * @param queue is the level4 queue number to be configured.
523 * @param parent_queue is the parent queue at next level for this l4 queue.
524 * @param prio is this queue's priority in parent's scheduler.
525 * @param rr_quantum is this queue's round robin quantum value.
526 * @param child_base is the first child queue number in the static prioriy children.
527 * @param child_rr_prio is the round robin children priority.
528 */
529static void cvmx_pko_configure_l4_queue(int node, int queue, int parent_queue,
530 int prio, int rr_quantum,
531 int child_base, int child_rr_prio)
532{
533 cvmx_pko_l4_sqx_schedule_t pko_sq_sched;
534 cvmx_pko_l4_sqx_topology_t pko_child_topology;
535 cvmx_pko_l3_sqx_topology_t pko_parent_topology;
536
537 /* parent topology configuration */
538 pko_parent_topology.u64 =
539 csr_rd_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue));
540 pko_parent_topology.s.prio_anchor = child_base;
541 pko_parent_topology.s.rr_prio = child_rr_prio;
542 csr_wr_node(node, CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue),
543 pko_parent_topology.u64);
544
545 if (debug > 1)
546 debug("CVMX_PKO_L3_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
547 parent_queue, pko_parent_topology.s.prio_anchor,
548 pko_parent_topology.s.parent);
549
550 /* scheduler configuration for this sq in the parent queue */
551 pko_sq_sched.u64 = 0;
552 pko_sq_sched.s.prio = prio;
553 pko_sq_sched.s.rr_quantum = rr_quantum;
554 csr_wr_node(node, CVMX_PKO_L4_SQX_SCHEDULE(queue), pko_sq_sched.u64);
555
556 /* topology configuration */
557 pko_child_topology.u64 = 0;
558 pko_child_topology.s.parent = parent_queue;
559 csr_wr_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(queue),
560 pko_child_topology.u64);
561}
562
563/*
564 * @INTERNAL
565 * This function configures level 5 queues scheduling and topology parameters
566 * in hardware.
567 *
568 * @param node is to specify the node to which this configuration is applied.
569 * @param queue is the level5 queue number to be configured.
570 * @param parent_queue is the parent queue at next level for this l5 queue.
571 * @param prio is this queue's priority in parent's scheduler.
572 * @param rr_quantum is this queue's round robin quantum value.
573 * @param child_base is the first child queue number in the static prioriy children.
574 * @param child_rr_prio is the round robin children priority.
575 */
576static void cvmx_pko_configure_l5_queue(int node, int queue, int parent_queue,
577 int prio, int rr_quantum,
578 int child_base, int child_rr_prio)
579{
580 cvmx_pko_l5_sqx_schedule_t pko_sq_sched;
581 cvmx_pko_l4_sqx_topology_t pko_parent_topology;
582 cvmx_pko_l5_sqx_topology_t pko_child_topology;
583
584 /* parent topology configuration */
585 pko_parent_topology.u64 =
586 csr_rd_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(parent_queue));
587 pko_parent_topology.s.prio_anchor = child_base;
588 pko_parent_topology.s.rr_prio = child_rr_prio;
589 csr_wr_node(node, CVMX_PKO_L4_SQX_TOPOLOGY(parent_queue),
590 pko_parent_topology.u64);
591
592 if (debug > 1)
593 debug("CVMX_PKO_L4_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
594 parent_queue, pko_parent_topology.s.prio_anchor,
595 pko_parent_topology.s.parent);
596
597 /* scheduler configuration for this sq in the parent queue */
598 pko_sq_sched.u64 = 0;
599 pko_sq_sched.s.prio = prio;
600 pko_sq_sched.s.rr_quantum = rr_quantum;
601 csr_wr_node(node, CVMX_PKO_L5_SQX_SCHEDULE(queue), pko_sq_sched.u64);
602
603 /* topology configuration */
604 pko_child_topology.u64 = 0;
605 pko_child_topology.s.parent = parent_queue;
606 csr_wr_node(node, CVMX_PKO_L5_SQX_TOPOLOGY(queue),
607 pko_child_topology.u64);
608}
609
610/*
611 * @INTERNAL
612 * This function configures descriptor queues scheduling and topology parameters
613 * in hardware.
614 *
615 * @param node is to specify the node to which this configuration is applied.
616 * @param dq is the descriptor queue number to be configured.
617 * @param parent_queue is the parent queue at next level for this dq.
618 * @param prio is this queue's priority in parent's scheduler.
619 * @param rr_quantum is this queue's round robin quantum value.
620 * @param child_base is the first child queue number in the static prioriy children.
621 * @param child_rr_prio is the round robin children priority.
622 */
623static void cvmx_pko_configure_dq(int node, int dq, int parent_queue, int prio,
624 int rr_quantum, int child_base,
625 int child_rr_prio)
626{
627 cvmx_pko_dqx_schedule_t pko_dq_sched;
628 cvmx_pko_dqx_topology_t pko_dq_topology;
629 cvmx_pko_l5_sqx_topology_t pko_parent_topology;
630 cvmx_pko_dqx_wm_ctl_t pko_dq_wm_ctl;
631 unsigned long long parent_topology_reg;
632 char lvl;
633
634 if (debug)
635 debug("%s: dq %u parent %u child_base %u\n", __func__, dq,
636 parent_queue, child_base);
637
638 if (__cvmx_pko3_sq_lvl_max() == CVMX_PKO_L5_QUEUES) {
639 parent_topology_reg = CVMX_PKO_L5_SQX_TOPOLOGY(parent_queue);
640 lvl = 5;
641 } else if (__cvmx_pko3_sq_lvl_max() == CVMX_PKO_L3_QUEUES) {
642 parent_topology_reg = CVMX_PKO_L3_SQX_TOPOLOGY(parent_queue);
643 lvl = 3;
644 } else {
645 return;
646 }
647
648 if (debug)
649 debug("%s: parent_topology_reg=%#llx\n", __func__,
650 parent_topology_reg);
651
652 /* parent topology configuration */
653 pko_parent_topology.u64 = csr_rd_node(node, parent_topology_reg);
654 pko_parent_topology.s.prio_anchor = child_base;
655 pko_parent_topology.s.rr_prio = child_rr_prio;
656 csr_wr_node(node, parent_topology_reg, pko_parent_topology.u64);
657
658 if (debug > 1)
659 debug("CVMX_PKO_L%d_SQX_TOPOLOGY(%u): PRIO_ANCHOR=%u PARENT=%u\n",
660 lvl, parent_queue, pko_parent_topology.s.prio_anchor,
661 pko_parent_topology.s.parent);
662
663 /* scheduler configuration for this dq in the parent queue */
664 pko_dq_sched.u64 = 0;
665 pko_dq_sched.s.prio = prio;
666 pko_dq_sched.s.rr_quantum = rr_quantum;
667 csr_wr_node(node, CVMX_PKO_DQX_SCHEDULE(dq), pko_dq_sched.u64);
668
669 /* topology configuration */
670 pko_dq_topology.u64 = 0;
671 pko_dq_topology.s.parent = parent_queue;
672 csr_wr_node(node, CVMX_PKO_DQX_TOPOLOGY(dq), pko_dq_topology.u64);
673
674 /* configure for counting packets, not bytes at this level */
675 pko_dq_wm_ctl.u64 = 0;
676 pko_dq_wm_ctl.s.kind = 1;
677 pko_dq_wm_ctl.s.enable = 0;
678 csr_wr_node(node, CVMX_PKO_DQX_WM_CTL(dq), pko_dq_wm_ctl.u64);
679
680 if (debug > 1) {
681 pko_dq_sched.u64 = csr_rd_node(node, CVMX_PKO_DQX_SCHEDULE(dq));
682 pko_dq_topology.u64 =
683 csr_rd_node(node, CVMX_PKO_DQX_TOPOLOGY(dq));
684 debug("CVMX_PKO_DQX_TOPOLOGY(%u)PARENT=%u CVMX_PKO_DQX_SCHEDULE(%u) PRIO=%u Q=%u\n",
685 dq, pko_dq_topology.s.parent, dq, pko_dq_sched.s.prio,
686 pko_dq_sched.s.rr_quantum);
687 }
688}
689
690/*
691 * @INTERNAL
692 * The following structure selects the Scheduling Queue configuration
693 * routine for each of the supported levels.
694 * The initial content of the table will be setup in accordance
695 * to the specific SoC model and its implemented resources
696 */
697struct pko3_cfg_tab_s {
698 /* function pointer for to configure the given level, last=DQ */
699 struct {
700 u8 parent_level;
701 void (*cfg_sq_func)(int node, int queue, int parent_queue,
702 int prio, int rr_quantum, int child_base,
703 int child_rr_prio);
704 //XXX for debugging exagerated size
705 } lvl[256];
706};
707
708static const struct pko3_cfg_tab_s pko3_cn78xx_cfg = {
709 { [CVMX_PKO_L2_QUEUES] = { CVMX_PKO_PORT_QUEUES,
710 cvmx_pko_configure_l2_queue },
711 [CVMX_PKO_L3_QUEUES] = { CVMX_PKO_L2_QUEUES,
712 cvmx_pko_configure_l3_queue },
713 [CVMX_PKO_L4_QUEUES] = { CVMX_PKO_L3_QUEUES,
714 cvmx_pko_configure_l4_queue },
715 [CVMX_PKO_L5_QUEUES] = { CVMX_PKO_L4_QUEUES,
716 cvmx_pko_configure_l5_queue },
717 [CVMX_PKO_DESCR_QUEUES] = { CVMX_PKO_L5_QUEUES,
718 cvmx_pko_configure_dq } }
719};
720
721static const struct pko3_cfg_tab_s pko3_cn73xx_cfg = {
722 { [CVMX_PKO_L2_QUEUES] = { CVMX_PKO_PORT_QUEUES,
723 cvmx_pko_configure_l2_queue },
724 [CVMX_PKO_L3_QUEUES] = { CVMX_PKO_L2_QUEUES,
725 cvmx_pko_configure_l3_queue },
726 [CVMX_PKO_DESCR_QUEUES] = { CVMX_PKO_L3_QUEUES,
727 cvmx_pko_configure_dq } }
728};
729
730/*
731 * Configure Port Queue and its children Scheduler Queue
732 *
733 * Port Queues (a.k.a L1) are assigned 1-to-1 to MACs.
734 * L2 Scheduler Queues are used for specifying channels, and thus there
735 * could be multiple L2 SQs attached to a single L1 PQ, either in a
736 * fair round-robin scheduling, or with static and/or round-robin priorities.
737 *
738 * @param node on which to operate
739 * @param mac_num is the LMAC number to that is associated with the Port Queue,
740 * @param pq_num is the number of the L1 PQ attached to the MAC
741 *
742 * @returns 0 on success, -1 on failure.
743 */
744int cvmx_pko3_pq_config(unsigned int node, unsigned int mac_num,
745 unsigned int pq_num)
746{
747 char b1[10];
748
749 if (debug)
750 debug("%s: MAC%u -> %s\n", __func__, mac_num,
751 __cvmx_pko3_sq_str(b1, CVMX_PKO_PORT_QUEUES, pq_num));
752
753 cvmx_pko_configure_port_queue(node, pq_num, mac_num);
754
755 return 0;
756}
757
758/*
759 * Configure L3 through L5 Scheduler Queues and Descriptor Queues
760 *
761 * The Scheduler Queues in Levels 3 to 5 and Descriptor Queues are
762 * configured one-to-one or many-to-one to a single parent Scheduler
763 * Queues. The level of the parent SQ is specified in an argument,
Tom Rinid377cae2023-07-13 20:37:32 -0400764 * as well as the number of children to attach to the specific parent.
Aaron Williamsadbb3a42021-05-06 12:26:07 +0200765 * The children can have fair round-robin or priority-based scheduling
766 * when multiple children are assigned a single parent.
767 *
768 * @param node on which to operate
769 * @param child_level is the level of the child queue
770 * @param parent_queue is the number of the parent Scheduler Queue
771 * @param child_base is the number of the first child SQ or DQ to assign to
772 * @param child_count is the number of consecutive children to assign
773 * @param stat_prio_count is the priority setting for the children L2 SQs
774 *
775 * If <stat_prio_count> is -1, the Ln children will have equal Round-Robin
776 * relationship with eachother. If <stat_prio_count> is 0, all Ln children
777 * will be arranged in Weighted-Round-Robin, with the first having the most
778 * precedence. If <stat_prio_count> is between 1 and 8, it indicates how
779 * many children will have static priority settings (with the first having
780 * the most precedence), with the remaining Ln children having WRR scheduling.
781 *
782 * @returns 0 on success, -1 on failure.
783 *
784 * Note: this function supports the configuration of node-local unit.
785 */
786int cvmx_pko3_sq_config_children(unsigned int node,
787 enum cvmx_pko3_level_e child_level,
788 unsigned int parent_queue,
789 unsigned int child_base,
790 unsigned int child_count, int stat_prio_count)
791{
792 enum cvmx_pko3_level_e parent_level;
793 unsigned int num_elem = 0;
794 unsigned int rr_quantum, rr_count;
795 unsigned int child, prio, rr_prio;
796 const struct pko3_cfg_tab_s *cfg_tbl = NULL;
797 char b1[10], b2[10];
798
799 if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
800 num_elem = NUM_ELEMENTS(pko3_cn78xx_cfg.lvl);
801 cfg_tbl = &pko3_cn78xx_cfg;
802 }
803 if (OCTEON_IS_MODEL(OCTEON_CN73XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
804 num_elem = NUM_ELEMENTS(pko3_cn73xx_cfg.lvl);
805 cfg_tbl = &pko3_cn73xx_cfg;
806 }
807
808 if (!cfg_tbl || child_level >= num_elem) {
809 cvmx_printf("ERROR: %s: model or level %#x invalid\n", __func__,
810 child_level);
811 return -1;
812 }
813
814 parent_level = cfg_tbl->lvl[child_level].parent_level;
815
816 if (!cfg_tbl->lvl[child_level].cfg_sq_func ||
817 cfg_tbl->lvl[child_level].parent_level == 0) {
818 cvmx_printf("ERROR: %s: queue level %#x invalid\n", __func__,
819 child_level);
820 return -1;
821 }
822
823 /* First static priority is 0 - top precedence */
824 prio = 0;
825
826 if (stat_prio_count > (signed int)child_count)
827 stat_prio_count = child_count;
828
829 /* Valid PRIO field is 0..9, limit maximum static priorities */
830 if (stat_prio_count > 9)
831 stat_prio_count = 9;
832
833 /* Special case of a single child */
834 if (child_count == 1) {
835 rr_count = 0;
836 rr_prio = 0xF;
837 /* Special case for Fair-RR */
838 } else if (stat_prio_count < 0) {
839 rr_count = child_count;
840 rr_prio = 0;
841 } else {
842 rr_count = child_count - stat_prio_count;
843 rr_prio = stat_prio_count;
844 }
845
846 /* Compute highest RR_QUANTUM */
847 if (stat_prio_count > 0)
848 rr_quantum = CVMX_PKO3_RR_QUANTUM_MIN * rr_count;
849 else
850 rr_quantum = CVMX_PKO3_RR_QUANTUM_MIN;
851
852 if (debug)
853 debug("%s: Parent %s child_base %u rr_pri %u\n", __func__,
854 __cvmx_pko3_sq_str(b1, parent_level, parent_queue),
855 child_base, rr_prio);
856
857 /* Parent is configured with child */
858
859 for (child = child_base; child < (child_base + child_count); child++) {
860 if (debug)
861 debug("%s: Child %s of %s prio %u rr_quantum %#x\n",
862 __func__,
863 __cvmx_pko3_sq_str(b1, child_level, child),
864 __cvmx_pko3_sq_str(b2, parent_level,
865 parent_queue),
866 prio, rr_quantum);
867
868 cfg_tbl->lvl[child_level].cfg_sq_func(node, child, parent_queue,
869 prio, rr_quantum,
870 child_base, rr_prio);
871
872 if (prio < rr_prio)
873 prio++;
874 else if (stat_prio_count > 0)
875 rr_quantum -= CVMX_PKO3_RR_QUANTUM_MIN;
876 } /* for child */
877
878 return 0;
879}