blob: 92298fd8d1e3b9e554f05b218d03ee217563620f [file] [log] [blame]
Aaron Williams47652952020-12-11 17:06:01 +01001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 Marvell International Ltd.
4 *
5 * Small helper utilities.
6 */
7
8#include <log.h>
9#include <time.h>
10#include <linux/delay.h>
11
12#include <mach/cvmx-regs.h>
13#include <mach/cvmx-csr-enums.h>
14#include <mach/octeon-model.h>
15#include <mach/octeon-feature.h>
16#include <mach/cvmx-gmxx-defs.h>
17#include <mach/cvmx-ipd-defs.h>
18#include <mach/cvmx-pko-defs.h>
19#include <mach/cvmx-ipd.h>
20#include <mach/cvmx-hwpko.h>
21#include <mach/cvmx-pki.h>
22#include <mach/cvmx-pip.h>
23#include <mach/cvmx-helper.h>
24#include <mach/cvmx-helper-util.h>
25#include <mach/cvmx-helper-pki.h>
26
27/**
28 * @INTERNAL
29 * These are the interface types needed to convert interface numbers to ipd
30 * ports.
31 *
32 * @param GMII
33 * This type is used for sgmii, rgmii, xaui and rxaui interfaces.
34 * @param ILK
35 * This type is used for ilk interfaces.
36 * @param SRIO
37 * This type is used for serial-RapidIo interfaces.
38 * @param NPI
39 * This type is used for npi interfaces.
40 * @param LB
41 * This type is used for loopback interfaces.
42 * @param INVALID_IF_TYPE
43 * This type indicates the interface hasn't been configured.
44 */
45enum port_map_if_type { INVALID_IF_TYPE = 0, GMII, ILK, SRIO, NPI, LB };
46
47/**
48 * @INTERNAL
49 * This structure is used to map interface numbers to ipd ports.
50 *
51 * @param type
52 * Interface type
53 * @param first_ipd_port
54 * First IPD port number assigned to this interface.
55 * @param last_ipd_port
56 * Last IPD port number assigned to this interface.
57 * @param ipd_port_adj
58 * Different octeon chips require different ipd ports for the
59 * same interface port/mode configuration. This value is used
60 * to account for that difference.
61 */
62struct ipd_port_map {
63 enum port_map_if_type type;
64 int first_ipd_port;
65 int last_ipd_port;
66 int ipd_port_adj;
67};
68
69/**
70 * @INTERNAL
71 * Interface number to ipd port map for the octeon 68xx.
72 */
73static const struct ipd_port_map ipd_port_map_68xx[CVMX_HELPER_MAX_IFACE] = {
74 { GMII, 0x800, 0x8ff, 0x40 }, /* Interface 0 */
75 { GMII, 0x900, 0x9ff, 0x40 }, /* Interface 1 */
76 { GMII, 0xa00, 0xaff, 0x40 }, /* Interface 2 */
77 { GMII, 0xb00, 0xbff, 0x40 }, /* Interface 3 */
78 { GMII, 0xc00, 0xcff, 0x40 }, /* Interface 4 */
79 { ILK, 0x400, 0x4ff, 0x00 }, /* Interface 5 */
80 { ILK, 0x500, 0x5ff, 0x00 }, /* Interface 6 */
81 { NPI, 0x100, 0x120, 0x00 }, /* Interface 7 */
82 { LB, 0x000, 0x008, 0x00 }, /* Interface 8 */
83};
84
85/**
86 * @INTERNAL
87 * Interface number to ipd port map for the octeon 78xx.
88 *
89 * This mapping corresponds to WQE(CHAN) enumeration in
90 * HRM Sections 11.15, PKI_CHAN_E, Section 11.6
91 *
92 */
93static const struct ipd_port_map ipd_port_map_78xx[CVMX_HELPER_MAX_IFACE] = {
94 { GMII, 0x800, 0x83f, 0x00 }, /* Interface 0 - BGX0 */
95 { GMII, 0x900, 0x93f, 0x00 }, /* Interface 1 -BGX1 */
96 { GMII, 0xa00, 0xa3f, 0x00 }, /* Interface 2 -BGX2 */
97 { GMII, 0xb00, 0xb3f, 0x00 }, /* Interface 3 - BGX3 */
98 { GMII, 0xc00, 0xc3f, 0x00 }, /* Interface 4 - BGX4 */
99 { GMII, 0xd00, 0xd3f, 0x00 }, /* Interface 5 - BGX5 */
100 { ILK, 0x400, 0x4ff, 0x00 }, /* Interface 6 - ILK0 */
101 { ILK, 0x500, 0x5ff, 0x00 }, /* Interface 7 - ILK1 */
102 { NPI, 0x100, 0x13f, 0x00 }, /* Interface 8 - DPI */
103 { LB, 0x000, 0x03f, 0x00 }, /* Interface 9 - LOOPBACK */
104};
105
106/**
107 * @INTERNAL
108 * Interface number to ipd port map for the octeon 73xx.
109 */
110static const struct ipd_port_map ipd_port_map_73xx[CVMX_HELPER_MAX_IFACE] = {
111 { GMII, 0x800, 0x83f, 0x00 }, /* Interface 0 - BGX(0,0-3) */
112 { GMII, 0x900, 0x93f, 0x00 }, /* Interface 1 -BGX(1,0-3) */
113 { GMII, 0xa00, 0xa3f, 0x00 }, /* Interface 2 -BGX(2,0-3) */
114 { NPI, 0x100, 0x17f, 0x00 }, /* Interface 3 - DPI */
115 { LB, 0x000, 0x03f, 0x00 }, /* Interface 4 - LOOPBACK */
116};
117
118/**
119 * @INTERNAL
120 * Interface number to ipd port map for the octeon 75xx.
121 */
122static const struct ipd_port_map ipd_port_map_75xx[CVMX_HELPER_MAX_IFACE] = {
123 { GMII, 0x800, 0x83f, 0x00 }, /* Interface 0 - BGX0 */
124 { SRIO, 0x240, 0x241, 0x00 }, /* Interface 1 - SRIO 0 */
125 { SRIO, 0x242, 0x243, 0x00 }, /* Interface 2 - SRIO 1 */
126 { NPI, 0x100, 0x13f, 0x00 }, /* Interface 3 - DPI */
127 { LB, 0x000, 0x03f, 0x00 }, /* Interface 4 - LOOPBACK */
128};
129
130/**
131 * Convert a interface mode into a human readable string
132 *
133 * @param mode Mode to convert
134 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100135 * Return: String
Aaron Williams47652952020-12-11 17:06:01 +0100136 */
137const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode)
138{
139 switch (mode) {
140 case CVMX_HELPER_INTERFACE_MODE_DISABLED:
141 return "DISABLED";
142 case CVMX_HELPER_INTERFACE_MODE_RGMII:
143 return "RGMII";
144 case CVMX_HELPER_INTERFACE_MODE_GMII:
145 return "GMII";
146 case CVMX_HELPER_INTERFACE_MODE_SPI:
147 return "SPI";
148 case CVMX_HELPER_INTERFACE_MODE_PCIE:
149 return "PCIE";
150 case CVMX_HELPER_INTERFACE_MODE_XAUI:
151 return "XAUI";
152 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
153 return "RXAUI";
154 case CVMX_HELPER_INTERFACE_MODE_SGMII:
155 return "SGMII";
156 case CVMX_HELPER_INTERFACE_MODE_QSGMII:
157 return "QSGMII";
158 case CVMX_HELPER_INTERFACE_MODE_PICMG:
159 return "PICMG";
160 case CVMX_HELPER_INTERFACE_MODE_NPI:
161 return "NPI";
162 case CVMX_HELPER_INTERFACE_MODE_LOOP:
163 return "LOOP";
164 case CVMX_HELPER_INTERFACE_MODE_SRIO:
165 return "SRIO";
166 case CVMX_HELPER_INTERFACE_MODE_ILK:
167 return "ILK";
168 case CVMX_HELPER_INTERFACE_MODE_AGL:
169 return "AGL";
170 case CVMX_HELPER_INTERFACE_MODE_XLAUI:
171 return "XLAUI";
172 case CVMX_HELPER_INTERFACE_MODE_XFI:
173 return "XFI";
174 case CVMX_HELPER_INTERFACE_MODE_40G_KR4:
175 return "40G_KR4";
176 case CVMX_HELPER_INTERFACE_MODE_10G_KR:
177 return "10G_KR";
178 case CVMX_HELPER_INTERFACE_MODE_MIXED:
179 return "MIXED";
180 }
181 return "UNKNOWN";
182}
183
184/**
185 * Debug routine to dump the packet structure to the console
186 *
187 * @param work Work queue entry containing the packet to dump
188 * @return
189 */
190int cvmx_helper_dump_packet(cvmx_wqe_t *work)
191{
192 u64 count;
193 u64 remaining_bytes;
194 union cvmx_buf_ptr buffer_ptr;
195 cvmx_buf_ptr_pki_t bptr;
196 cvmx_wqe_78xx_t *wqe = (void *)work;
197 u64 start_of_buffer;
198 u8 *data_address;
199 u8 *end_of_data;
200
201 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
202 cvmx_pki_dump_wqe(wqe);
203 cvmx_wqe_pki_errata_20776(work);
204 } else {
205 debug("WORD0 = %lx\n", (unsigned long)work->word0.u64);
206 debug("WORD1 = %lx\n", (unsigned long)work->word1.u64);
207 debug("WORD2 = %lx\n", (unsigned long)work->word2.u64);
208 debug("Packet Length: %u\n", cvmx_wqe_get_len(work));
209 debug(" Input Port: %u\n", cvmx_wqe_get_port(work));
210 debug(" QoS: %u\n", cvmx_wqe_get_qos(work));
211 debug(" Buffers: %u\n", cvmx_wqe_get_bufs(work));
212 }
213
214 if (cvmx_wqe_get_bufs(work) == 0) {
215 int wqe_pool;
216
217 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
218 debug("%s: ERROR: Unexpected bufs==0 in WQE\n", __func__);
219 return -1;
220 }
221 wqe_pool = (int)cvmx_fpa_get_wqe_pool();
222 buffer_ptr.u64 = 0;
223 buffer_ptr.s.pool = wqe_pool;
224
225 buffer_ptr.s.size = 128;
226 buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
227 if (cvmx_likely(!work->word2.s.not_IP)) {
228 union cvmx_pip_ip_offset pip_ip_offset;
229
230 pip_ip_offset.u64 = csr_rd(CVMX_PIP_IP_OFFSET);
231 buffer_ptr.s.addr +=
232 (pip_ip_offset.s.offset << 3) - work->word2.s.ip_offset;
233 buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
234 } else {
235 /*
236 * WARNING: This code assume that the packet
237 * is not RAW. If it was, we would use
238 * PIP_GBL_CFG[RAW_SHF] instead of
239 * PIP_GBL_CFG[NIP_SHF].
240 */
241 union cvmx_pip_gbl_cfg pip_gbl_cfg;
242
243 pip_gbl_cfg.u64 = csr_rd(CVMX_PIP_GBL_CFG);
244 buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
245 }
246 } else {
247 buffer_ptr = work->packet_ptr;
248 }
249
250 remaining_bytes = cvmx_wqe_get_len(work);
251
252 while (remaining_bytes) {
253 /* native cn78xx buffer format, unless legacy-translated */
254 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) && !wqe->pki_wqe_translated) {
255 bptr.u64 = buffer_ptr.u64;
256 /* XXX- assumes cache-line aligned buffer */
257 start_of_buffer = (bptr.addr >> 7) << 7;
258 debug(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
259 debug(" Buffer Data: %llx\n", (unsigned long long)bptr.addr);
260 debug(" Buffer Size: %u\n", bptr.size);
261 data_address = (uint8_t *)cvmx_phys_to_ptr(bptr.addr);
262 end_of_data = data_address + bptr.size;
263 } else {
264 start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
265 debug(" Buffer Start:%llx\n", (unsigned long long)start_of_buffer);
266 debug(" Buffer I : %u\n", buffer_ptr.s.i);
267 debug(" Buffer Back: %u\n", buffer_ptr.s.back);
268 debug(" Buffer Pool: %u\n", buffer_ptr.s.pool);
269 debug(" Buffer Data: %llx\n", (unsigned long long)buffer_ptr.s.addr);
270 debug(" Buffer Size: %u\n", buffer_ptr.s.size);
271 data_address = (uint8_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr);
272 end_of_data = data_address + buffer_ptr.s.size;
273 }
274
275 debug("\t\t");
276 count = 0;
277 while (data_address < end_of_data) {
278 if (remaining_bytes == 0)
279 break;
280
281 remaining_bytes--;
282 debug("%02x", (unsigned int)*data_address);
283 data_address++;
284 if (remaining_bytes && count == 7) {
285 debug("\n\t\t");
286 count = 0;
287 } else {
288 count++;
289 }
290 }
291 debug("\n");
292
293 if (remaining_bytes) {
294 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) &&
295 !wqe->pki_wqe_translated)
296 buffer_ptr.u64 = *(uint64_t *)cvmx_phys_to_ptr(bptr.addr - 8);
297 else
298 buffer_ptr.u64 =
299 *(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
300 }
301 }
302 return 0;
303}
304
305/**
306 * @INTERNAL
307 *
308 * Extract NO_WPTR mode from PIP/IPD register
309 */
310static int __cvmx_ipd_mode_no_wptr(void)
311{
312 if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
313 cvmx_ipd_ctl_status_t ipd_ctl_status;
314
315 ipd_ctl_status.u64 = csr_rd(CVMX_IPD_CTL_STATUS);
316 return ipd_ctl_status.s.no_wptr;
317 }
318 return 0;
319}
320
321static cvmx_buf_ptr_t __cvmx_packet_short_ptr[4];
322static int8_t __cvmx_wqe_pool = -1;
323
324/**
325 * @INTERNAL
326 * Prepare packet pointer templace for dynamic short
327 * packets.
328 */
329static void cvmx_packet_short_ptr_calculate(void)
330{
331 unsigned int i, off;
332 union cvmx_pip_gbl_cfg pip_gbl_cfg;
333 union cvmx_pip_ip_offset pip_ip_offset;
334
335 /* Fill in the common values for all cases */
336 for (i = 0; i < 4; i++) {
337 if (__cvmx_ipd_mode_no_wptr())
338 /* packet pool, set to 0 in hardware */
339 __cvmx_wqe_pool = 0;
340 else
341 /* WQE pool as configured */
342 __cvmx_wqe_pool = csr_rd(CVMX_IPD_WQE_FPA_QUEUE) & 7;
343
344 __cvmx_packet_short_ptr[i].s.pool = __cvmx_wqe_pool;
345 __cvmx_packet_short_ptr[i].s.size = cvmx_fpa_get_block_size(__cvmx_wqe_pool);
346 __cvmx_packet_short_ptr[i].s.size -= 32;
347 __cvmx_packet_short_ptr[i].s.addr = 32;
348 }
349
350 pip_gbl_cfg.u64 = csr_rd(CVMX_PIP_GBL_CFG);
351 pip_ip_offset.u64 = csr_rd(CVMX_PIP_IP_OFFSET);
352
353 /* RAW_FULL: index = 0 */
354 i = 0;
355 off = pip_gbl_cfg.s.raw_shf;
356 __cvmx_packet_short_ptr[i].s.addr += off;
357 __cvmx_packet_short_ptr[i].s.size -= off;
358 __cvmx_packet_short_ptr[i].s.back += off >> 7;
359
360 /* NON-IP: index = 1 */
361 i = 1;
362 off = pip_gbl_cfg.s.nip_shf;
363 __cvmx_packet_short_ptr[i].s.addr += off;
364 __cvmx_packet_short_ptr[i].s.size -= off;
365 __cvmx_packet_short_ptr[i].s.back += off >> 7;
366
367 /* IPv4: index = 2 */
368 i = 2;
369 off = (pip_ip_offset.s.offset << 3) + 4;
370 __cvmx_packet_short_ptr[i].s.addr += off;
371 __cvmx_packet_short_ptr[i].s.size -= off;
372 __cvmx_packet_short_ptr[i].s.back += off >> 7;
373
374 /* IPv6: index = 3 */
375 i = 3;
376 off = (pip_ip_offset.s.offset << 3) + 0;
377 __cvmx_packet_short_ptr[i].s.addr += off;
378 __cvmx_packet_short_ptr[i].s.size -= off;
379 __cvmx_packet_short_ptr[i].s.back += off >> 7;
380
381 /* For IPv4/IPv6: subtract work->word2.s.ip_offset
382 * to addr, if it is smaller than IP_OFFSET[OFFSET]*8
383 * which is stored in __cvmx_packet_short_ptr[3].s.addr
384 */
385}
386
387/**
388 * Extract packet data buffer pointer from work queue entry.
389 *
390 * Returns the legacy (Octeon1/Octeon2) buffer pointer structure
391 * for the linked buffer list.
392 * On CN78XX, the native buffer pointer structure is converted into
393 * the legacy format.
394 * The legacy buf_ptr is then stored in the WQE, and word0 reserved
395 * field is set to indicate that the buffer pointers were translated.
396 * If the packet data is only found inside the work queue entry,
397 * a standard buffer pointer structure is created for it.
398 */
399cvmx_buf_ptr_t cvmx_wqe_get_packet_ptr(cvmx_wqe_t *work)
400{
401 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
402 cvmx_wqe_78xx_t *wqe = (void *)work;
403 cvmx_buf_ptr_t optr, lptr;
404 cvmx_buf_ptr_pki_t nptr;
405 unsigned int pool, bufs;
406 int node = cvmx_get_node_num();
407
408 /* In case of repeated calls of this function */
409 if (wqe->pki_wqe_translated || wqe->word2.software) {
410 optr.u64 = wqe->packet_ptr.u64;
411 return optr;
412 }
413
414 bufs = wqe->word0.bufs;
415 pool = wqe->word0.aura;
416 nptr.u64 = wqe->packet_ptr.u64;
417
418 optr.u64 = 0;
419 optr.s.pool = pool;
420 optr.s.addr = nptr.addr;
421 if (bufs == 1) {
422 optr.s.size = pki_dflt_pool[node].buffer_size -
423 pki_dflt_style[node].parm_cfg.first_skip - 8 -
424 wqe->word0.apad;
425 } else {
426 optr.s.size = nptr.size;
427 }
428
429 /* Calculate the "back" offset */
430 if (!nptr.packet_outside_wqe) {
431 optr.s.back = (nptr.addr -
432 cvmx_ptr_to_phys(wqe)) >> 7;
433 } else {
434 optr.s.back =
435 (pki_dflt_style[node].parm_cfg.first_skip +
436 8 + wqe->word0.apad) >> 7;
437 }
438 lptr = optr;
439
440 /* Follow pointer and convert all linked pointers */
441 while (bufs > 1) {
442 void *vptr;
443
444 vptr = cvmx_phys_to_ptr(lptr.s.addr);
445
446 memcpy(&nptr, vptr - 8, 8);
447 /*
448 * Errata (PKI-20776) PKI_BUFLINK_S's are endian-swapped
449 * CN78XX pass 1.x has a bug where the packet pointer
450 * in each segment is written in the opposite
451 * endianness of the configured mode. Fix these here
452 */
453 if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
454 nptr.u64 = __builtin_bswap64(nptr.u64);
455 lptr.u64 = 0;
456 lptr.s.pool = pool;
457 lptr.s.addr = nptr.addr;
458 lptr.s.size = nptr.size;
459 lptr.s.back = (pki_dflt_style[0].parm_cfg.later_skip + 8) >>
460 7; /* TBD: not guaranteed !! */
461
462 memcpy(vptr - 8, &lptr, 8);
463 bufs--;
464 }
465 /* Store translated bufptr in WQE, and set indicator */
466 wqe->pki_wqe_translated = 1;
467 wqe->packet_ptr.u64 = optr.u64;
468 return optr;
469
470 } else {
471 unsigned int i;
472 unsigned int off = 0;
473 cvmx_buf_ptr_t bptr;
474
475 if (cvmx_likely(work->word2.s.bufs > 0))
476 return work->packet_ptr;
477
478 if (cvmx_unlikely(work->word2.s.software))
479 return work->packet_ptr;
480
481 /* first packet, precalculate packet_ptr templaces */
482 if (cvmx_unlikely(__cvmx_packet_short_ptr[0].u64 == 0))
483 cvmx_packet_short_ptr_calculate();
484
485 /* calculate templace index */
486 i = work->word2.s_cn38xx.not_IP | work->word2.s_cn38xx.rcv_error;
487 i = 2 ^ (i << 1);
488
489 /* IPv4/IPv6: Adjust IP offset */
490 if (cvmx_likely(i & 2)) {
491 i |= work->word2.s.is_v6;
492 off = work->word2.s.ip_offset;
493 } else {
494 /* RAWFULL/RAWSCHED should be handled here */
495 i = 1; /* not-IP */
496 off = 0;
497 }
498
499 /* Get the right templace */
500 bptr = __cvmx_packet_short_ptr[i];
501 bptr.s.addr -= off;
502 bptr.s.back = bptr.s.addr >> 7;
503
504 /* Add actual WQE paddr to the templace offset */
505 bptr.s.addr += cvmx_ptr_to_phys(work);
506
507 /* Adjust word2.bufs so that _free_data() handles it
508 * in the same way as PKO
509 */
510 work->word2.s.bufs = 1;
511
512 /* Store the new buffer pointer back into WQE */
513 work->packet_ptr = bptr;
514
515 /* Returned the synthetic buffer_pointer */
516 return bptr;
517 }
518}
519
520void cvmx_wqe_free(cvmx_wqe_t *work)
521{
522 unsigned int bufs, ncl = 1;
523 u64 paddr, paddr1;
524
525 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
526 cvmx_wqe_78xx_t *wqe = (void *)work;
527 cvmx_fpa3_gaura_t aura;
528 cvmx_buf_ptr_pki_t bptr;
529
530 bufs = wqe->word0.bufs;
531
532 if (!wqe->pki_wqe_translated && bufs != 0) {
533 /* Handle cn78xx native untralsated WQE */
534
535 bptr = wqe->packet_ptr;
536
537 /* Do nothing - first packet buffer shares WQE buffer */
538 if (!bptr.packet_outside_wqe)
539 return;
540 } else if (cvmx_likely(bufs != 0)) {
541 /* Handle translated 78XX WQE */
542 paddr = (work->packet_ptr.s.addr & (~0x7full)) -
543 (work->packet_ptr.s.back << 7);
544 paddr1 = cvmx_ptr_to_phys(work);
545
546 /* do not free WQE if contains first data buffer */
547 if (paddr == paddr1)
548 return;
549 }
550
551 /* WQE is separate from packet buffer, free it */
552 aura = __cvmx_fpa3_gaura(wqe->word0.aura >> 10, wqe->word0.aura & 0x3ff);
553
554 cvmx_fpa3_free(work, aura, ncl);
555 } else {
556 /* handle legacy WQE */
557 bufs = work->word2.s_cn38xx.bufs;
558
559 if (cvmx_likely(bufs != 0)) {
560 /* Check if the first data buffer is inside WQE */
561 paddr = (work->packet_ptr.s.addr & (~0x7full)) -
562 (work->packet_ptr.s.back << 7);
563 paddr1 = cvmx_ptr_to_phys(work);
564
565 /* do not free WQE if contains first data buffer */
566 if (paddr == paddr1)
567 return;
568 }
569
570 /* precalculate packet_ptr, WQE pool number */
571 if (cvmx_unlikely(__cvmx_wqe_pool < 0))
572 cvmx_packet_short_ptr_calculate();
573 cvmx_fpa1_free(work, __cvmx_wqe_pool, ncl);
574 }
575}
576
577/**
578 * Free the packet buffers contained in a work queue entry.
579 * The work queue entry is also freed if it contains packet data.
580 * If however the packet starts outside the WQE, the WQE will
581 * not be freed. The application should call cvmx_wqe_free()
582 * to free the WQE buffer that contains no packet data.
583 *
584 * @param work Work queue entry with packet to free
585 */
586void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
587{
588 u64 number_buffers;
589 u64 start_of_buffer;
590 u64 next_buffer_ptr;
591 cvmx_fpa3_gaura_t aura;
592 unsigned int ncl;
593 cvmx_buf_ptr_t buffer_ptr;
594 cvmx_buf_ptr_pki_t bptr;
595 cvmx_wqe_78xx_t *wqe = (void *)work;
596 int o3_pki_wqe = 0;
597
598 number_buffers = cvmx_wqe_get_bufs(work);
599
600 buffer_ptr.u64 = work->packet_ptr.u64;
601
602 /* Zero-out WQE WORD3 so that the WQE is freed by cvmx_wqe_free() */
603 work->packet_ptr.u64 = 0;
604
605 if (number_buffers == 0)
606 return;
607
608 /* Interpret PKI-style bufptr unless it has been translated */
609 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE) &&
610 !wqe->pki_wqe_translated) {
611 o3_pki_wqe = 1;
612 cvmx_wqe_pki_errata_20776(work);
613 aura = __cvmx_fpa3_gaura(wqe->word0.aura >> 10,
614 wqe->word0.aura & 0x3ff);
615 } else {
616 start_of_buffer = ((buffer_ptr.s.addr >> 7) -
617 buffer_ptr.s.back) << 7;
618 next_buffer_ptr =
619 *(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
620 /*
621 * Since the number of buffers is not zero, we know this is not
622 * a dynamic short packet. We need to check if it is a packet
623 * received with IPD_CTL_STATUS[NO_WPTR]. If this is true,
624 * we need to free all buffers except for the first one.
625 * The caller doesn't expect their WQE pointer to be freed
626 */
627 if (cvmx_ptr_to_phys(work) == start_of_buffer) {
628 buffer_ptr.u64 = next_buffer_ptr;
629 number_buffers--;
630 }
631 }
632 while (number_buffers--) {
633 if (o3_pki_wqe) {
634 bptr.u64 = buffer_ptr.u64;
635
636 ncl = (bptr.size + CVMX_CACHE_LINE_SIZE - 1) /
637 CVMX_CACHE_LINE_SIZE;
638
639 /* XXX- assumes the buffer is cache-line aligned */
640 start_of_buffer = (bptr.addr >> 7) << 7;
641
642 /*
643 * Read pointer to next buffer before we free the
644 * current buffer.
645 */
646 next_buffer_ptr = *(uint64_t *)cvmx_phys_to_ptr(bptr.addr - 8);
647 /* FPA AURA comes from WQE, includes node */
648 cvmx_fpa3_free(cvmx_phys_to_ptr(start_of_buffer),
649 aura, ncl);
650 } else {
651 ncl = (buffer_ptr.s.size + CVMX_CACHE_LINE_SIZE - 1) /
652 CVMX_CACHE_LINE_SIZE +
653 buffer_ptr.s.back;
654 /*
655 * Calculate buffer start using "back" offset,
656 * Remember the back pointer is in cache lines,
657 * not 64bit words
658 */
659 start_of_buffer = ((buffer_ptr.s.addr >> 7) -
660 buffer_ptr.s.back) << 7;
661 /*
662 * Read pointer to next buffer before we free
663 * the current buffer.
664 */
665 next_buffer_ptr =
666 *(uint64_t *)cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
667 /* FPA pool comes from buf_ptr itself */
668 if (octeon_has_feature(OCTEON_FEATURE_CN78XX_WQE)) {
669 aura = cvmx_fpa1_pool_to_fpa3_aura(buffer_ptr.s.pool);
670 cvmx_fpa3_free(cvmx_phys_to_ptr(start_of_buffer),
671 aura, ncl);
672 } else {
673 cvmx_fpa1_free(cvmx_phys_to_ptr(start_of_buffer),
674 buffer_ptr.s.pool, ncl);
675 }
676 }
677 buffer_ptr.u64 = next_buffer_ptr;
678 }
679}
680
681void cvmx_helper_setup_legacy_red(int pass_thresh, int drop_thresh)
682{
683 unsigned int node = cvmx_get_node_num();
684 int aura, bpid;
685 int buf_cnt;
686 bool ena_red = 0, ena_drop = 0, ena_bp = 0;
687
688#define FPA_RED_AVG_DLY 1
689#define FPA_RED_LVL_DLY 3
690#define FPA_QOS_AVRG 0
691 /* Trying to make it backward compatible with older chips */
692
693 /* Setting up avg_dly and prb_dly, enable bits */
694 if (octeon_has_feature(OCTEON_FEATURE_FPA3)) {
695 cvmx_fpa3_config_red_params(node, FPA_QOS_AVRG,
696 FPA_RED_LVL_DLY, FPA_RED_AVG_DLY);
697 }
698
699 /* Disable backpressure on queued buffers which is aura in 78xx*/
700 /*
701 * Assumption is that all packets from all interface and ports goes
702 * in same poolx/aurax for backward compatibility
703 */
704 aura = cvmx_fpa_get_packet_pool();
705 buf_cnt = cvmx_fpa_get_packet_pool_buffer_count();
706 pass_thresh = buf_cnt - pass_thresh;
707 drop_thresh = buf_cnt - drop_thresh;
708 /* Map aura to bpid 0*/
709 bpid = 0;
710 cvmx_pki_write_aura_bpid(node, aura, bpid);
711 /* Don't enable back pressure */
712 ena_bp = 0;
713 /* enable RED */
714 ena_red = 1;
715 /*
716 * This will enable RED on all interfaces since
717 * they all have packet buffer coming from same aura
718 */
719 cvmx_helper_setup_aura_qos(node, aura, ena_red, ena_drop, pass_thresh,
720 drop_thresh, ena_bp, 0);
721}
722
723/**
724 * Setup Random Early Drop to automatically begin dropping packets.
725 *
726 * @param pass_thresh
727 * Packets will begin slowly dropping when there are less than
728 * this many packet buffers free in FPA 0.
729 * @param drop_thresh
730 * All incoming packets will be dropped when there are less
731 * than this many free packet buffers in FPA 0.
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100732 * Return: Zero on success. Negative on failure
Aaron Williams47652952020-12-11 17:06:01 +0100733 */
734int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
735{
736 if (octeon_has_feature(OCTEON_FEATURE_PKI))
737 cvmx_helper_setup_legacy_red(pass_thresh, drop_thresh);
738 else
739 cvmx_ipd_setup_red(pass_thresh, drop_thresh);
740 return 0;
741}
742
743/**
744 * @INTERNAL
745 * Setup the common GMX settings that determine the number of
746 * ports. These setting apply to almost all configurations of all
747 * chips.
748 *
749 * @param xiface Interface to configure
750 * @param num_ports Number of ports on the interface
751 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100752 * Return: Zero on success, negative on failure
Aaron Williams47652952020-12-11 17:06:01 +0100753 */
754int __cvmx_helper_setup_gmx(int xiface, int num_ports)
755{
756 union cvmx_gmxx_tx_prts gmx_tx_prts;
757 union cvmx_gmxx_rx_prts gmx_rx_prts;
758 union cvmx_pko_reg_gmx_port_mode pko_mode;
759 union cvmx_gmxx_txx_thresh gmx_tx_thresh;
760 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
761 int index;
762
763 /*
764 * The common BGX settings are already done in the appropriate
765 * enable functions, nothing to do here.
766 */
767 if (octeon_has_feature(OCTEON_FEATURE_BGX))
768 return 0;
769
770 /* Tell GMX the number of TX ports on this interface */
771 gmx_tx_prts.u64 = csr_rd(CVMX_GMXX_TX_PRTS(xi.interface));
772 gmx_tx_prts.s.prts = num_ports;
773 csr_wr(CVMX_GMXX_TX_PRTS(xi.interface), gmx_tx_prts.u64);
774
775 /*
776 * Tell GMX the number of RX ports on this interface. This only applies
777 * to *GMII and XAUI ports.
778 */
779 switch (cvmx_helper_interface_get_mode(xiface)) {
780 case CVMX_HELPER_INTERFACE_MODE_RGMII:
781 case CVMX_HELPER_INTERFACE_MODE_SGMII:
782 case CVMX_HELPER_INTERFACE_MODE_QSGMII:
783 case CVMX_HELPER_INTERFACE_MODE_GMII:
784 case CVMX_HELPER_INTERFACE_MODE_XAUI:
785 case CVMX_HELPER_INTERFACE_MODE_RXAUI:
786 if (num_ports > 4) {
787 debug("%s: Illegal num_ports\n", __func__);
788 return -1;
789 }
790
791 gmx_rx_prts.u64 = csr_rd(CVMX_GMXX_RX_PRTS(xi.interface));
792 gmx_rx_prts.s.prts = num_ports;
793 csr_wr(CVMX_GMXX_RX_PRTS(xi.interface), gmx_rx_prts.u64);
794 break;
795
796 default:
797 break;
798 }
799
800 /*
801 * Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, 50XX,
802 * and 68XX.
803 */
804 if (!OCTEON_IS_MODEL(OCTEON_CN68XX)) {
805 /* Tell PKO the number of ports on this interface */
806 pko_mode.u64 = csr_rd(CVMX_PKO_REG_GMX_PORT_MODE);
807 if (xi.interface == 0) {
808 if (num_ports == 1)
809 pko_mode.s.mode0 = 4;
810 else if (num_ports == 2)
811 pko_mode.s.mode0 = 3;
812 else if (num_ports <= 4)
813 pko_mode.s.mode0 = 2;
814 else if (num_ports <= 8)
815 pko_mode.s.mode0 = 1;
816 else
817 pko_mode.s.mode0 = 0;
818 } else {
819 if (num_ports == 1)
820 pko_mode.s.mode1 = 4;
821 else if (num_ports == 2)
822 pko_mode.s.mode1 = 3;
823 else if (num_ports <= 4)
824 pko_mode.s.mode1 = 2;
825 else if (num_ports <= 8)
826 pko_mode.s.mode1 = 1;
827 else
828 pko_mode.s.mode1 = 0;
829 }
830 csr_wr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
831 }
832
833 /*
834 * Set GMX to buffer as much data as possible before starting
835 * transmit. This reduces the chances that we have a TX under run
836 * due to memory contention. Any packet that fits entirely in the
837 * GMX FIFO can never have an under run regardless of memory load.
838 */
839 gmx_tx_thresh.u64 = csr_rd(CVMX_GMXX_TXX_THRESH(0, xi.interface));
840 /* ccn - common cnt numberator */
841 int ccn = 0x100;
842
843 /* Choose the max value for the number of ports */
844 if (num_ports <= 1)
845 gmx_tx_thresh.s.cnt = ccn / 1;
846 else if (num_ports == 2)
847 gmx_tx_thresh.s.cnt = ccn / 2;
848 else
849 gmx_tx_thresh.s.cnt = ccn / 4;
850
851 /*
852 * SPI and XAUI can have lots of ports but the GMX hardware
853 * only ever has a max of 4
854 */
855 if (num_ports > 4)
856 num_ports = 4;
857 for (index = 0; index < num_ports; index++)
858 csr_wr(CVMX_GMXX_TXX_THRESH(index, xi.interface), gmx_tx_thresh.u64);
859
860 /*
861 * For o68, we need to setup the pipes
862 */
863 if (OCTEON_IS_MODEL(OCTEON_CN68XX) && xi.interface < CVMX_HELPER_MAX_GMX) {
864 union cvmx_gmxx_txx_pipe config;
865
866 for (index = 0; index < num_ports; index++) {
867 config.u64 = 0;
868
869 if (__cvmx_helper_cfg_pko_port_base(xiface, index) >= 0) {
870 config.u64 = csr_rd(CVMX_GMXX_TXX_PIPE(index,
871 xi.interface));
872 config.s.nump = __cvmx_helper_cfg_pko_port_num(xiface,
873 index);
874 config.s.base = __cvmx_helper_cfg_pko_port_base(xiface,
875 index);
876 csr_wr(CVMX_GMXX_TXX_PIPE(index, xi.interface),
877 config.u64);
878 }
879 }
880 }
881
882 return 0;
883}
884
885int cvmx_helper_get_pko_port(int interface, int port)
886{
887 return cvmx_pko_get_base_pko_port(interface, port);
888}
889
890int cvmx_helper_get_ipd_port(int xiface, int index)
891{
892 struct cvmx_xiface xi = cvmx_helper_xiface_to_node_interface(xiface);
893
894 if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
895 const struct ipd_port_map *port_map;
896 int ipd_port;
897
898 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
899 port_map = ipd_port_map_68xx;
900 ipd_port = 0;
901 } else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
902 port_map = ipd_port_map_78xx;
903 ipd_port = cvmx_helper_node_to_ipd_port(xi.node, 0);
904 } else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
905 port_map = ipd_port_map_73xx;
906 ipd_port = 0;
907 } else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
908 port_map = ipd_port_map_75xx;
909 ipd_port = 0;
910 } else {
911 return -1;
912 }
913
914 ipd_port += port_map[xi.interface].first_ipd_port;
915 if (port_map[xi.interface].type == GMII) {
916 cvmx_helper_interface_mode_t mode;
917
918 mode = cvmx_helper_interface_get_mode(xiface);
919 if (mode == CVMX_HELPER_INTERFACE_MODE_XAUI ||
920 (mode == CVMX_HELPER_INTERFACE_MODE_RXAUI &&
921 OCTEON_IS_MODEL(OCTEON_CN68XX))) {
922 ipd_port += port_map[xi.interface].ipd_port_adj;
923 return ipd_port;
924 } else {
925 return ipd_port + (index * 16);
926 }
927 } else if (port_map[xi.interface].type == ILK) {
928 return ipd_port + index;
929 } else if (port_map[xi.interface].type == NPI) {
930 return ipd_port + index;
931 } else if (port_map[xi.interface].type == SRIO) {
932 return ipd_port + index;
933 } else if (port_map[xi.interface].type == LB) {
934 return ipd_port + index;
935 }
936
937 debug("ERROR: %s: interface %u:%u bad mode\n",
938 __func__, xi.node, xi.interface);
939 return -1;
940 } else if (cvmx_helper_interface_get_mode(xiface) ==
941 CVMX_HELPER_INTERFACE_MODE_AGL) {
942 return 24;
943 }
944
945 switch (xi.interface) {
946 case 0:
947 return index;
948 case 1:
949 return index + 16;
950 case 2:
951 return index + 32;
952 case 3:
953 return index + 36;
954 case 4:
955 return index + 40;
956 case 5:
957 return index + 42;
958 case 6:
959 return index + 44;
960 case 7:
961 return index + 46;
962 }
963 return -1;
964}
965
966int cvmx_helper_get_pknd(int xiface, int index)
967{
968 if (octeon_has_feature(OCTEON_FEATURE_PKND))
969 return __cvmx_helper_cfg_pknd(xiface, index);
970
971 return CVMX_INVALID_PKND;
972}
973
974int cvmx_helper_get_bpid(int interface, int port)
975{
976 if (octeon_has_feature(OCTEON_FEATURE_PKND))
977 return __cvmx_helper_cfg_bpid(interface, port);
978
979 return CVMX_INVALID_BPID;
980}
981
982/**
983 * Display interface statistics.
984 *
985 * @param port IPD/PKO port number
986 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100987 * Return: none
Aaron Williams47652952020-12-11 17:06:01 +0100988 */
989void cvmx_helper_show_stats(int port)
990{
991 cvmx_pip_port_status_t status;
992 cvmx_pko_port_status_t pko_status;
993
994 /* ILK stats */
995 if (octeon_has_feature(OCTEON_FEATURE_ILK))
996 __cvmx_helper_ilk_show_stats();
997
998 /* PIP stats */
999 cvmx_pip_get_port_stats(port, 0, &status);
1000 debug("port %d: the number of packets - ipd: %d\n", port,
1001 (int)status.packets);
1002
1003 /* PKO stats */
1004 cvmx_pko_get_port_status(port, 0, &pko_status);
1005 debug("port %d: the number of packets - pko: %d\n", port,
1006 (int)pko_status.packets);
1007
1008 /* TODO: other stats */
1009}
1010
1011/**
1012 * Returns the interface number for an IPD/PKO port number.
1013 *
1014 * @param ipd_port IPD/PKO port number
1015 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001016 * Return: Interface number
Aaron Williams47652952020-12-11 17:06:01 +01001017 */
1018int cvmx_helper_get_interface_num(int ipd_port)
1019{
1020 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
1021 const struct ipd_port_map *port_map;
1022 int i;
1023 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
1024
1025 port_map = ipd_port_map_68xx;
1026 for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
1027 if (xp.port >= port_map[i].first_ipd_port &&
1028 xp.port <= port_map[i].last_ipd_port)
1029 return i;
1030 }
1031 return -1;
1032 } else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
1033 const struct ipd_port_map *port_map;
1034 int i;
1035 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
1036
1037 port_map = ipd_port_map_78xx;
1038 for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
1039 if (xp.port >= port_map[i].first_ipd_port &&
1040 xp.port <= port_map[i].last_ipd_port)
1041 return cvmx_helper_node_interface_to_xiface(xp.node, i);
1042 }
1043 return -1;
1044 } else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
1045 const struct ipd_port_map *port_map;
1046 int i;
1047 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
1048
1049 port_map = ipd_port_map_73xx;
1050 for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
1051 if (xp.port >= port_map[i].first_ipd_port &&
1052 xp.port <= port_map[i].last_ipd_port)
1053 return i;
1054 }
1055 return -1;
1056 } else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
1057 const struct ipd_port_map *port_map;
1058 int i;
1059 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
1060
1061 port_map = ipd_port_map_75xx;
1062 for (i = 0; i < CVMX_HELPER_MAX_IFACE; i++) {
1063 if (xp.port >= port_map[i].first_ipd_port &&
1064 xp.port <= port_map[i].last_ipd_port)
1065 return i;
1066 }
1067 return -1;
1068 } else if (OCTEON_IS_MODEL(OCTEON_CN70XX) && ipd_port == 24) {
1069 return 4;
1070 }
1071
1072 if (ipd_port < 16)
1073 return 0;
1074 else if (ipd_port < 32)
1075 return 1;
1076 else if (ipd_port < 36)
1077 return 2;
1078 else if (ipd_port < 40)
1079 return 3;
1080 else if (ipd_port < 42)
1081 return 4;
1082 else if (ipd_port < 44)
1083 return 5;
1084 else if (ipd_port < 46)
1085 return 6;
1086 else if (ipd_port < 48)
1087 return 7;
1088
1089 debug("%s: Illegal IPD port number %d\n", __func__, ipd_port);
1090 return -1;
1091}
1092
1093/**
1094 * Returns the interface index number for an IPD/PKO port
1095 * number.
1096 *
1097 * @param ipd_port IPD/PKO port number
1098 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01001099 * Return: Interface index number
Aaron Williams47652952020-12-11 17:06:01 +01001100 */
1101int cvmx_helper_get_interface_index_num(int ipd_port)
1102{
1103 if (octeon_has_feature(OCTEON_FEATURE_PKND)) {
1104 const struct ipd_port_map *port_map;
1105 int port;
1106 enum port_map_if_type type = INVALID_IF_TYPE;
1107 int i;
1108 int num_interfaces;
1109
1110 if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
1111 port_map = ipd_port_map_68xx;
1112 } else if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
1113 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
1114
1115 port_map = ipd_port_map_78xx;
1116 ipd_port = xp.port;
1117 } else if (OCTEON_IS_MODEL(OCTEON_CN73XX)) {
1118 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
1119
1120 port_map = ipd_port_map_73xx;
1121 ipd_port = xp.port;
1122 } else if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
1123 struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port);
1124
1125 port_map = ipd_port_map_75xx;
1126 ipd_port = xp.port;
1127 } else {
1128 return -1;
1129 }
1130
1131 num_interfaces = cvmx_helper_get_number_of_interfaces();
1132
1133 /* Get the interface type of the ipd port */
1134 for (i = 0; i < num_interfaces; i++) {
1135 if (ipd_port >= port_map[i].first_ipd_port &&
1136 ipd_port <= port_map[i].last_ipd_port) {
1137 type = port_map[i].type;
1138 break;
1139 }
1140 }
1141
1142 /* Convert the ipd port to the interface port */
1143 switch (type) {
1144 /* Ethernet interfaces have a channel in lower 4 bits
1145 * that is does not discriminate traffic, and is ignored.
1146 */
1147 case GMII:
1148 port = ipd_port - port_map[i].first_ipd_port;
1149
1150 /* CN68XX adds 0x40 to IPD_PORT when in XAUI/RXAUI
1151 * mode of operation, adjust for that case
1152 */
1153 if (port >= port_map[i].ipd_port_adj)
1154 port -= port_map[i].ipd_port_adj;
1155
1156 port >>= 4;
1157 return port;
1158
1159 /*
1160 * These interfaces do not have physical ports,
1161 * but have logical channels instead that separate
1162 * traffic into logical streams
1163 */
1164 case ILK:
1165 case SRIO:
1166 case NPI:
1167 case LB:
1168 port = ipd_port - port_map[i].first_ipd_port;
1169 return port;
1170
1171 default:
1172 printf("ERROR: %s: Illegal IPD port number %#x\n",
1173 __func__, ipd_port);
1174 return -1;
1175 }
1176 }
1177 if (OCTEON_IS_MODEL(OCTEON_CN70XX))
1178 return ipd_port & 3;
1179 if (ipd_port < 32)
1180 return ipd_port & 15;
1181 else if (ipd_port < 40)
1182 return ipd_port & 3;
1183 else if (ipd_port < 48)
1184 return ipd_port & 1;
1185
1186 debug("%s: Illegal IPD port number\n", __func__);
1187
1188 return -1;
1189}
1190
1191/**
1192 * Prints out a buffer with the address, hex bytes, and ASCII
1193 *
1194 * @param addr Start address to print on the left
1195 * @param[in] buffer array of bytes to print
1196 * @param count Number of bytes to print
1197 */
1198void cvmx_print_buffer_u8(unsigned int addr, const uint8_t *buffer,
1199 size_t count)
1200{
1201 uint i;
1202
1203 while (count) {
1204 unsigned int linelen = count < 16 ? count : 16;
1205
1206 debug("%08x:", addr);
1207
1208 for (i = 0; i < linelen; i++)
1209 debug(" %0*x", 2, buffer[i]);
1210
1211 while (i++ < 17)
1212 debug(" ");
1213
1214 for (i = 0; i < linelen; i++) {
1215 if (buffer[i] >= 0x20 && buffer[i] < 0x7f)
1216 debug("%c", buffer[i]);
1217 else
1218 debug(".");
1219 }
1220 debug("\n");
1221 addr += linelen;
1222 buffer += linelen;
1223 count -= linelen;
1224 }
1225}