Aaron Williams | fa0ee3f | 2022-04-07 09:11:39 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Copyright (C) 2018-2022 Marvell International Ltd. |
| 4 | * |
| 5 | */ |
| 6 | |
| 7 | #include <errno.h> |
| 8 | #include <log.h> |
| 9 | #include <time.h> |
| 10 | #include <linux/delay.h> |
| 11 | |
| 12 | #include <mach/cvmx-regs.h> |
| 13 | #include <mach/cvmx-csr.h> |
| 14 | #include <mach/cvmx-bootmem.h> |
| 15 | #include <mach/octeon-model.h> |
| 16 | #include <mach/cvmx-fuse.h> |
| 17 | #include <mach/octeon-feature.h> |
| 18 | #include <mach/cvmx-qlm.h> |
| 19 | #include <mach/octeon_qlm.h> |
| 20 | #include <mach/cvmx-pcie.h> |
| 21 | #include <mach/cvmx-coremask.h> |
| 22 | |
| 23 | #include <mach/cvmx-agl-defs.h> |
| 24 | #include <mach/cvmx-bgxx-defs.h> |
| 25 | #include <mach/cvmx-ciu-defs.h> |
| 26 | #include <mach/cvmx-gmxx-defs.h> |
| 27 | #include <mach/cvmx-gserx-defs.h> |
| 28 | #include <mach/cvmx-ilk-defs.h> |
| 29 | #include <mach/cvmx-ipd-defs.h> |
| 30 | #include <mach/cvmx-pcsx-defs.h> |
| 31 | #include <mach/cvmx-pcsxx-defs.h> |
| 32 | #include <mach/cvmx-pki-defs.h> |
| 33 | #include <mach/cvmx-pko-defs.h> |
| 34 | #include <mach/cvmx-xcv-defs.h> |
| 35 | |
| 36 | #include <mach/cvmx-scratch.h> |
| 37 | #include <mach/cvmx-hwfau.h> |
| 38 | #include <mach/cvmx-fau.h> |
| 39 | |
| 40 | #include <mach/cvmx-hwpko.h> |
| 41 | #include <mach/cvmx-ilk.h> |
| 42 | #include <mach/cvmx-pki.h> |
| 43 | #include <mach/cvmx-pko3.h> |
| 44 | #include <mach/cvmx-pko3-queue.h> |
| 45 | #include <mach/cvmx-pko3-resources.h> |
| 46 | |
| 47 | #include <mach/cvmx-helper.h> |
| 48 | #include <mach/cvmx-helper-board.h> |
| 49 | #include <mach/cvmx-helper-cfg.h> |
| 50 | |
| 51 | #include <mach/cvmx-helper-bgx.h> |
| 52 | #include <mach/cvmx-helper-cfg.h> |
| 53 | #include <mach/cvmx-helper-util.h> |
| 54 | #include <mach/cvmx-helper-pki.h> |
| 55 | |
| 56 | /* #undef CVMX_ENABLE_PARAMETER_CHECKING */ |
| 57 | /* #define CVMX_ENABLE_PARAMETER_CHECKING 1 */ |
| 58 | /* #define __PKO3_NATIVE_PTR */ |
| 59 | |
| 60 | static inline u64 cvmx_pko3_legacy_paddr(unsigned int node, u64 addr) |
| 61 | { |
| 62 | u64 paddr; |
| 63 | |
| 64 | paddr = node; |
| 65 | paddr = (addr & ((1ull << 40) - 1)) | (paddr << 40); |
| 66 | return paddr; |
| 67 | } |
| 68 | |
| 69 | #if CVMX_ENABLE_PARAMETER_CHECKING |
| 70 | /** |
| 71 | * @INTERNAL |
| 72 | * |
| 73 | * Verify the integrity of a legacy buffer link pointer, |
| 74 | * |
| 75 | * Note that the IPD/PIP/PKO hardware would sometimes |
| 76 | * round-up the buf_ptr->size field of the last buffer in a chain to the next |
| 77 | * cache line size, so the sum of buf_ptr->size |
| 78 | * fields for a packet may exceed total_bytes by up to 127 bytes. |
| 79 | * |
| 80 | * @returns 0 on success, a negative number on error. |
| 81 | */ |
| 82 | static int cvmx_pko3_legacy_bufptr_validate(cvmx_buf_ptr_t buf_ptr, |
| 83 | unsigned int gather, |
| 84 | unsigned int buffers, |
| 85 | unsigned int total_bytes) |
| 86 | { |
| 87 | unsigned int node = cvmx_get_node_num(); |
| 88 | unsigned int segs = 0, bytes = 0; |
| 89 | unsigned int phys_addr; |
| 90 | cvmx_buf_ptr_t ptr; |
| 91 | int delta; |
| 92 | |
| 93 | if (buffers == 0) { |
| 94 | return -1; |
| 95 | } else if (buffers == 1) { |
| 96 | delta = buf_ptr.s.size - total_bytes; |
| 97 | if (delta < 0 || delta > 127) |
| 98 | return -2; |
| 99 | } else if (gather) { |
| 100 | cvmx_buf_ptr_t *vptr; |
| 101 | /* Validate gather list */ |
| 102 | if (buf_ptr.s.size < buffers) |
| 103 | return -3; |
| 104 | phys_addr = cvmx_pko3_legacy_paddr(node, buf_ptr.s.addr); |
| 105 | vptr = cvmx_phys_to_ptr(phys_addr); |
| 106 | for (segs = 0; segs < buffers; segs++) |
| 107 | bytes += vptr[segs].s.size; |
| 108 | delta = bytes - total_bytes; |
| 109 | if (delta < 0 || delta > 127) |
| 110 | return -4; |
| 111 | } else { |
| 112 | void *vptr; |
| 113 | /* Validate linked buffers */ |
| 114 | ptr = buf_ptr; |
| 115 | for (segs = 0; segs < buffers; segs++) { |
| 116 | bytes += ptr.s.size; |
| 117 | phys_addr = cvmx_pko3_legacy_paddr(node, ptr.s.addr); |
| 118 | vptr = cvmx_phys_to_ptr(phys_addr); |
| 119 | memcpy(&ptr, vptr - sizeof(u64), sizeof(u64)); |
| 120 | } |
| 121 | delta = bytes - total_bytes; |
| 122 | if (delta < 0 || delta > 127) |
| 123 | return -5; |
| 124 | } |
| 125 | return 0; |
| 126 | } |
| 127 | #endif /* CVMX_ENABLE_PARAMETER_CHECKING */ |
| 128 | |
| 129 | /* |
| 130 | * @INTERNAL |
| 131 | * |
| 132 | * Implementation note: |
| 133 | * When the packet is sure to not need a jump_buf, |
| 134 | * it will be written directly into cvmseg. |
| 135 | * When the packet might not fit into cvmseg with all |
| 136 | * of its descriptors, a jump_buf is allocated a priori, |
| 137 | * and only header is first placed into cvmseg, all other |
| 138 | * descriptors are placed into jump_buf, and finally |
| 139 | * the PKO_SEND_JUMP_S is written to cvmseg. |
| 140 | * This is because if there are no EXT or TSO descriptors, |
| 141 | * then HDR must be first, and JMP second and that is all |
| 142 | * that should go into cvmseg. |
| 143 | */ |
| 144 | struct __cvmx_pko3_legacy_desc { |
| 145 | u64 *cmd_words; |
| 146 | u64 *jump_buf_base_ptr; |
| 147 | unsigned short word_count; |
| 148 | short last_pool; |
| 149 | u8 port_node; |
| 150 | u8 aura_node; |
| 151 | u8 jump_buf_size; |
| 152 | }; |
| 153 | |
| 154 | /** |
| 155 | * @INTERNAL |
| 156 | * |
| 157 | * Add a subdescriptor into a command buffer, |
| 158 | * and handle command-buffer overflow by allocating a JUMP_s buffer |
| 159 | * from PKO3 internal AURA. |
| 160 | */ |
| 161 | static int __cvmx_pko3_cmd_subdc_add(struct __cvmx_pko3_legacy_desc *desc, |
| 162 | u64 subdc) |
| 163 | { |
| 164 | /* SEND_JUMP_S missing on Pass1.X */ |
| 165 | if (desc->word_count >= 15) { |
| 166 | printf("%s: ERROR: too many segments\n", __func__); |
| 167 | return -EBADF; |
| 168 | } |
| 169 | |
| 170 | /* Handle small commands simply */ |
| 171 | if (cvmx_likely(!desc->jump_buf_base_ptr)) { |
| 172 | desc->cmd_words[desc->word_count] = subdc; |
| 173 | (desc->word_count)++; |
| 174 | return desc->word_count; |
| 175 | } |
| 176 | |
| 177 | if (cvmx_unlikely(desc->jump_buf_size >= 255)) |
| 178 | return -ENOMEM; |
| 179 | |
| 180 | desc->jump_buf_base_ptr[desc->jump_buf_size++] = subdc; |
| 181 | |
| 182 | return desc->word_count + desc->jump_buf_size; |
| 183 | } |
| 184 | |
| 185 | /** |
| 186 | * @INTERNAL |
| 187 | * |
| 188 | * Finalize command buffer |
| 189 | * |
| 190 | * @returns: number of command words in command buffer and jump buffer |
| 191 | * or negative number on error. |
| 192 | */ |
| 193 | |
| 194 | static int __cvmx_pko3_cmd_done(struct __cvmx_pko3_legacy_desc *desc) |
| 195 | { |
| 196 | short pko_aura; |
| 197 | cvmx_pko_buf_ptr_t jump_s; |
| 198 | cvmx_pko_send_aura_t aura_s; |
| 199 | |
| 200 | /* no jump buffer, nothing to do */ |
| 201 | if (!desc->jump_buf_base_ptr) |
| 202 | return desc->word_count; |
| 203 | |
| 204 | desc->word_count++; |
| 205 | |
| 206 | /* Verify number of words is 15 */ |
| 207 | if (desc->word_count != 2) { |
| 208 | printf("ERROR: %s: internal error, word_count=%d\n", __func__, |
| 209 | desc->word_count); |
| 210 | return -EINVAL; |
| 211 | } |
| 212 | |
| 213 | /* Add SEND_AURA_S at the end of jump_buf */ |
| 214 | pko_aura = __cvmx_pko3_aura_get(desc->port_node); |
| 215 | |
| 216 | aura_s.u64 = 0; |
| 217 | aura_s.s.aura = pko_aura; |
| 218 | aura_s.s.offset = 0; |
| 219 | aura_s.s.alg = AURAALG_NOP; |
| 220 | aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA; |
| 221 | |
| 222 | desc->jump_buf_base_ptr[desc->jump_buf_size++] = aura_s.u64; |
| 223 | |
| 224 | /* Add SEND_JUMPS to point to jump_buf */ |
| 225 | jump_s.u64 = 0; |
| 226 | jump_s.s.subdc3 = CVMX_PKO_SENDSUBDC_JUMP; |
| 227 | jump_s.s.addr = cvmx_ptr_to_phys(desc->jump_buf_base_ptr); |
| 228 | jump_s.s.i = 1; /* F=1: Free this buffer when done */ |
| 229 | jump_s.s.size = desc->jump_buf_size; |
| 230 | desc->cmd_words[1] = jump_s.u64; |
| 231 | |
| 232 | return desc->word_count + desc->jump_buf_size; |
| 233 | } |
| 234 | |
| 235 | /** |
| 236 | * @INTERNAL |
| 237 | * |
| 238 | * Handle buffer pools for PKO legacy transmit operation |
| 239 | */ |
| 240 | static inline int cvmx_pko3_legacy_pool(struct __cvmx_pko3_legacy_desc *desc, |
| 241 | int pool) |
| 242 | { |
| 243 | cvmx_pko_send_aura_t aura_s; |
| 244 | unsigned int aura; |
| 245 | |
| 246 | if (cvmx_unlikely(desc->last_pool == pool)) |
| 247 | return 0; |
| 248 | |
| 249 | aura = desc->aura_node << 10; /* LAURA=AURA[0..9] */ |
| 250 | aura |= pool; |
| 251 | |
| 252 | if (cvmx_likely(desc->last_pool < 0)) { |
| 253 | cvmx_pko_send_hdr_t *hdr_s; |
| 254 | |
| 255 | hdr_s = (void *)&desc->cmd_words[0]; |
| 256 | /* Create AURA from legacy pool (assume LAURA==POOL */ |
| 257 | hdr_s->s.aura = aura; |
| 258 | desc->last_pool = pool; |
| 259 | return 0; |
| 260 | } |
| 261 | |
| 262 | aura_s.u64 = 0; |
| 263 | aura_s.s.subdc4 = CVMX_PKO_SENDSUBDC_AURA; |
| 264 | aura_s.s.offset = 0; |
| 265 | aura_s.s.alg = AURAALG_NOP; |
| 266 | aura |= pool; |
| 267 | aura_s.s.aura = aura; |
| 268 | desc->last_pool = pool; |
| 269 | return __cvmx_pko3_cmd_subdc_add(desc, aura_s.u64); |
| 270 | } |
| 271 | |
| 272 | /** |
| 273 | * @INTERNAL |
| 274 | * |
| 275 | * Backward compatibility for packet transmission using legacy PKO command. |
| 276 | * |
| 277 | * NOTE: Only supports output on node-local ports. |
| 278 | * |
| 279 | * TBD: Could embed destination node in extended DQ number. |
| 280 | */ |
| 281 | cvmx_pko_return_value_t |
| 282 | cvmx_pko3_legacy_xmit(unsigned int dq, cvmx_pko_command_word0_t pko_command, |
| 283 | cvmx_buf_ptr_t packet, u64 addr, bool tag_sw) |
| 284 | { |
| 285 | cvmx_pko_query_rtn_t pko_status; |
| 286 | cvmx_pko_send_hdr_t *hdr_s; |
| 287 | struct __cvmx_pko3_legacy_desc desc; |
| 288 | u8 *data_ptr; |
| 289 | unsigned int node, seg_cnt; |
| 290 | int res; |
| 291 | cvmx_buf_ptr_pki_t bptr; |
| 292 | |
| 293 | seg_cnt = pko_command.s.segs; |
| 294 | desc.cmd_words = cvmx_pko3_cvmseg_addr(); |
| 295 | |
| 296 | /* Allocate from local aura, assume all old-pools are local */ |
| 297 | node = cvmx_get_node_num(); |
| 298 | desc.aura_node = node; |
| 299 | |
| 300 | /* Derive destination node from dq */ |
| 301 | desc.port_node = dq >> 10; |
| 302 | dq &= (1 << 10) - 1; |
| 303 | |
| 304 | desc.word_count = 1; |
| 305 | desc.last_pool = -1; |
| 306 | |
| 307 | /* For small packets, write descriptors directly to CVMSEG |
| 308 | * but for longer packets use jump_buf |
| 309 | */ |
| 310 | if (seg_cnt < 7 || OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) { |
| 311 | desc.jump_buf_size = 0; |
| 312 | desc.jump_buf_base_ptr = NULL; |
| 313 | } else { |
| 314 | unsigned int pko_aura = __cvmx_pko3_aura_get(desc.port_node); |
| 315 | |
| 316 | cvmx_fpa3_gaura_t aura = |
| 317 | __cvmx_fpa3_gaura(pko_aura >> 10, pko_aura & 0x3ff); |
| 318 | |
| 319 | /* Allocate from internal AURA, size is 4KiB */ |
| 320 | desc.jump_buf_base_ptr = cvmx_fpa3_alloc(aura); |
| 321 | |
| 322 | if (!desc.jump_buf_base_ptr) |
| 323 | return -ENOMEM; |
| 324 | desc.jump_buf_size = 0; |
| 325 | } |
| 326 | |
| 327 | /* Native buffer-pointer for error checiing */ |
| 328 | bptr.u64 = packet.u64; |
| 329 | |
| 330 | #if CVMX_ENABLE_PARAMETER_CHECKING |
| 331 | if (seg_cnt == 1 && bptr.size == pko_command.s.total_bytes) { |
| 332 | /* |
| 333 | * Special case for native buffer pointer: |
| 334 | * This is the only case where the native pointer-style can be |
| 335 | * automatically identified, that is when an entire packet |
| 336 | * fits into a single buffer by the PKI. |
| 337 | * The use of the native buffers with this function |
| 338 | * should be avoided. |
| 339 | */ |
| 340 | debug("%s: WARNING: Native buffer-pointer\n", __func__); |
| 341 | } else { |
| 342 | /* The buffer ptr is assume to be received in legacy format */ |
| 343 | res = cvmx_pko3_legacy_bufptr_validate( |
| 344 | packet, pko_command.s.gather, pko_command.s.segs, |
| 345 | pko_command.s.total_bytes); |
| 346 | if (res < 0) { |
| 347 | debug("%s: ERROR: Not a valid packet pointer <%d>\n", |
| 348 | __func__, res); |
| 349 | return CVMX_PKO_CMD_QUEUE_INIT_ERROR; |
| 350 | } |
| 351 | } |
| 352 | #endif /* CVMX_ENABLE_PARAMETER_CHECKING */ |
| 353 | |
| 354 | /* Squash warnings */ |
| 355 | (void)bptr; |
| 356 | |
| 357 | /*** Translate legacy PKO fields into PKO3 PKO_SEND_HDR_S ***/ |
| 358 | |
| 359 | /* PKO_SEND_HDR_S is alwasy the first word in the command */ |
| 360 | hdr_s = (void *)&desc.cmd_words[0]; |
| 361 | hdr_s->u64 = 0; |
| 362 | |
| 363 | /* Copy total packet size */ |
| 364 | hdr_s->s.total = pko_command.s.total_bytes; |
| 365 | |
| 366 | /* Endianness */ |
| 367 | hdr_s->s.le = pko_command.s.le; |
| 368 | |
| 369 | /* N2 is the same meaning */ |
| 370 | if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) |
| 371 | hdr_s->s.n2 = 0; /* L2 allocate everything */ |
| 372 | else |
| 373 | hdr_s->s.n2 = pko_command.s.n2; |
| 374 | |
| 375 | /* DF bit has the same meaning */ |
| 376 | hdr_s->s.df = pko_command.s.dontfree; |
| 377 | |
| 378 | /* II bit has the same meaning */ |
| 379 | hdr_s->s.ii = pko_command.s.ignore_i; |
| 380 | |
| 381 | /* non-zero IP header offset requires L3/L4 checksum calculation */ |
| 382 | if (cvmx_unlikely(pko_command.s.ipoffp1 > 0)) { |
| 383 | u8 ipoff, ip0, l4_proto = 0; |
| 384 | |
| 385 | /* Get data pointer for header inspection below */ |
| 386 | if (pko_command.s.gather) { |
| 387 | cvmx_buf_ptr_t *p_ptr; |
| 388 | cvmx_buf_ptr_t blk; |
| 389 | |
| 390 | p_ptr = cvmx_phys_to_ptr( |
| 391 | cvmx_pko3_legacy_paddr(node, packet.s.addr)); |
| 392 | blk = p_ptr[0]; |
| 393 | data_ptr = cvmx_phys_to_ptr( |
| 394 | cvmx_pko3_legacy_paddr(node, blk.s.addr)); |
| 395 | } else { |
| 396 | data_ptr = cvmx_phys_to_ptr( |
| 397 | cvmx_pko3_legacy_paddr(node, packet.s.addr)); |
| 398 | } |
| 399 | |
| 400 | /* Get IP header offset */ |
| 401 | ipoff = pko_command.s.ipoffp1 - 1; |
| 402 | |
| 403 | /* Parse IP header, version, L4 protocol */ |
| 404 | hdr_s->s.l3ptr = ipoff; |
| 405 | ip0 = data_ptr[ipoff]; |
| 406 | |
| 407 | /* IPv4 header length, checksum offload */ |
| 408 | if ((ip0 >> 4) == 4) { |
| 409 | hdr_s->s.l4ptr = hdr_s->s.l3ptr + ((ip0 & 0xf) << 2); |
| 410 | l4_proto = data_ptr[ipoff + 9]; |
| 411 | hdr_s->s.ckl3 = 1; /* Only valid for IPv4 */ |
| 412 | } |
| 413 | /* IPv6 header length is fixed, no checksum */ |
| 414 | if ((ip0 >> 4) == 6) { |
| 415 | hdr_s->s.l4ptr = hdr_s->s.l3ptr + 40; |
| 416 | l4_proto = data_ptr[ipoff + 6]; |
| 417 | } |
| 418 | /* Set L4 checksum algo based on L4 protocol */ |
| 419 | if (l4_proto == 6) |
| 420 | hdr_s->s.ckl4 = /* TCP */ 2; |
| 421 | else if (l4_proto == 17) |
| 422 | hdr_s->s.ckl4 = /* UDP */ 1; |
| 423 | else if (l4_proto == 132) |
| 424 | hdr_s->s.ckl4 = /* SCTP */ 3; |
| 425 | else |
| 426 | hdr_s->s.ckl4 = /* Unknown */ 0; |
| 427 | } |
| 428 | |
| 429 | if (pko_command.s.gather) { |
| 430 | /* Process legacy gather list */ |
| 431 | cvmx_pko_buf_ptr_t gather_s; |
| 432 | cvmx_buf_ptr_t *p_ptr; |
| 433 | cvmx_buf_ptr_t blk; |
| 434 | unsigned int i; |
| 435 | |
| 436 | /* Get gather list pointer */ |
| 437 | p_ptr = cvmx_phys_to_ptr( |
| 438 | cvmx_pko3_legacy_paddr(node, packet.s.addr)); |
| 439 | blk = p_ptr[0]; |
| 440 | /* setup data_ptr */ |
| 441 | data_ptr = cvmx_phys_to_ptr( |
| 442 | cvmx_pko3_legacy_paddr(node, blk.s.addr)); |
| 443 | |
| 444 | for (i = 0; i < seg_cnt; i++) { |
| 445 | if (cvmx_unlikely(cvmx_pko3_legacy_pool( |
| 446 | &desc, blk.s.pool) < 0)) |
| 447 | return CVMX_PKO_NO_MEMORY; |
| 448 | |
| 449 | /* Insert PKO_SEND_GATHER_S for the current buffer */ |
| 450 | gather_s.u64 = 0; |
| 451 | gather_s.s.subdc3 = CVMX_PKO_SENDSUBDC_GATHER; |
| 452 | gather_s.s.size = blk.s.size; |
| 453 | gather_s.s.i = blk.s.i; |
| 454 | gather_s.s.addr = |
| 455 | cvmx_pko3_legacy_paddr(node, blk.s.addr); |
| 456 | |
| 457 | res = __cvmx_pko3_cmd_subdc_add(&desc, gather_s.u64); |
| 458 | if (res < 0) |
| 459 | return CVMX_PKO_NO_MEMORY; |
| 460 | |
| 461 | /* get next bufptr */ |
| 462 | blk = p_ptr[i + 1]; |
| 463 | } /* for i */ |
| 464 | |
| 465 | /* Free original gather-list buffer */ |
| 466 | if ((pko_command.s.ignore_i && !pko_command.s.dontfree) || |
| 467 | packet.s.i == pko_command.s.dontfree) |
| 468 | cvmx_fpa_free_nosync(p_ptr, packet.s.pool, |
| 469 | (i - 1) / 16 + 1); |
| 470 | } else { |
| 471 | /* Process legacy linked buffer list */ |
| 472 | cvmx_pko_buf_ptr_t gather_s; |
| 473 | cvmx_buf_ptr_t blk; |
| 474 | void *vptr; |
| 475 | |
| 476 | data_ptr = cvmx_phys_to_ptr( |
| 477 | cvmx_pko3_legacy_paddr(node, packet.s.addr)); |
| 478 | blk = packet; |
| 479 | |
| 480 | /* |
| 481 | * Legacy linked-buffers converted into flat gather list |
| 482 | * so that the AURA can optionally be changed to reflect |
| 483 | * the POOL number in the legacy pointers |
| 484 | */ |
| 485 | do { |
| 486 | /* Insert PKO_SEND_AURA_S if pool changes */ |
| 487 | if (cvmx_unlikely(cvmx_pko3_legacy_pool( |
| 488 | &desc, blk.s.pool) < 0)) |
| 489 | return CVMX_PKO_NO_MEMORY; |
| 490 | |
| 491 | /* Insert PKO_SEND_GATHER_S for the current buffer */ |
| 492 | gather_s.u64 = 0; |
| 493 | gather_s.s.subdc3 = CVMX_PKO_SENDSUBDC_GATHER; |
| 494 | gather_s.s.size = blk.s.size; |
| 495 | gather_s.s.i = blk.s.i; |
| 496 | gather_s.s.addr = |
| 497 | cvmx_pko3_legacy_paddr(node, blk.s.addr); |
| 498 | |
| 499 | res = __cvmx_pko3_cmd_subdc_add(&desc, gather_s.u64); |
| 500 | if (res < 0) |
| 501 | return CVMX_PKO_NO_MEMORY; |
| 502 | |
| 503 | /* Get the next buffer pointer */ |
| 504 | vptr = cvmx_phys_to_ptr( |
| 505 | cvmx_pko3_legacy_paddr(node, blk.s.addr)); |
| 506 | memcpy(&blk, vptr - sizeof(blk), sizeof(blk)); |
| 507 | |
| 508 | /* Decrement segment count */ |
| 509 | seg_cnt--; |
| 510 | |
| 511 | } while (seg_cnt > 0); |
| 512 | } |
| 513 | |
| 514 | /* This field indicates the presence of 3rd legacy command word */ |
| 515 | /* NOTE: legacy 3rd word may contain CN78XX native phys addr already */ |
| 516 | if (cvmx_unlikely(pko_command.s.rsp)) { |
| 517 | /* PTP bit in word3 is not supported - |
| 518 | * can not be distibguished from larger phys_addr[42..41] |
| 519 | */ |
| 520 | if (pko_command.s.wqp) { |
| 521 | /* <addr> is an SSO WQE */ |
| 522 | cvmx_wqe_word1_t *wqe_p; |
| 523 | cvmx_pko_send_work_t work_s; |
| 524 | |
| 525 | work_s.u64 = 0; |
| 526 | work_s.s.subdc4 = CVMX_PKO_SENDSUBDC_WORK; |
| 527 | work_s.s.addr = addr; |
| 528 | /* Assume WQE is legacy format too */ |
| 529 | wqe_p = cvmx_phys_to_ptr(addr + sizeof(u64)); |
| 530 | work_s.s.grp = wqe_p->cn38xx.grp; |
| 531 | work_s.s.tt = wqe_p->tag_type; |
| 532 | |
| 533 | res = __cvmx_pko3_cmd_subdc_add(&desc, work_s.u64); |
| 534 | } else { |
| 535 | cvmx_pko_send_mem_t mem_s; |
| 536 | /* MEMALG_SET broken on Pass1 */ |
| 537 | if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_0)) { |
| 538 | debug("%s: ERROR: PKO byte-clear not supported\n", |
| 539 | __func__); |
| 540 | } |
| 541 | /* <addr> is a physical address of byte clear */ |
| 542 | mem_s.u64 = 0; |
| 543 | mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM; |
| 544 | mem_s.s.addr = addr; |
| 545 | mem_s.s.dsz = MEMDSZ_B8; |
| 546 | mem_s.s.alg = MEMALG_SET; |
| 547 | mem_s.s.offset = 0; |
| 548 | |
| 549 | res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64); |
| 550 | } |
| 551 | if (res < 0) |
| 552 | return CVMX_PKO_NO_MEMORY; |
| 553 | } |
| 554 | |
| 555 | /* FAU counter binding reg0 */ |
| 556 | if (pko_command.s.reg0) { |
| 557 | cvmx_pko_send_mem_t mem_s; |
| 558 | |
| 559 | debug("%s: Legacy FAU commands: reg0=%#x sz0=%#x\n", __func__, |
| 560 | pko_command.s.reg0, pko_command.s.size0); |
| 561 | mem_s.u64 = 0; |
| 562 | mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM; |
| 563 | mem_s.s.addr = cvmx_ptr_to_phys( |
| 564 | CASTPTR(void, __cvmx_fau_sw_addr(pko_command.s.reg0))); |
| 565 | if (cvmx_likely(pko_command.s.size0 == CVMX_FAU_OP_SIZE_64)) |
| 566 | mem_s.s.dsz = MEMDSZ_B64; |
| 567 | else if (pko_command.s.size0 == CVMX_FAU_OP_SIZE_32) |
| 568 | mem_s.s.dsz = MEMDSZ_B32; |
| 569 | else if (pko_command.s.size0 == CVMX_FAU_OP_SIZE_16) |
| 570 | mem_s.s.dsz = MEMDSZ_B16; |
| 571 | else |
| 572 | mem_s.s.dsz = MEMDSZ_B8; |
| 573 | |
| 574 | if (mem_s.s.dsz == MEMDSZ_B16 || mem_s.s.dsz == MEMDSZ_B8) |
| 575 | debug("%s: ERROR: 8/16 bit decrement unsupported", |
| 576 | __func__); |
| 577 | |
| 578 | mem_s.s.offset = pko_command.s.subone0; |
| 579 | if (mem_s.s.offset) |
| 580 | mem_s.s.alg = MEMALG_SUB; |
| 581 | else |
| 582 | mem_s.s.alg = MEMALG_SUBLEN; |
| 583 | |
| 584 | res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64); |
| 585 | if (res < 0) |
| 586 | return CVMX_PKO_NO_MEMORY; |
| 587 | } |
| 588 | |
| 589 | /* FAU counter binding reg1 */ |
| 590 | if (cvmx_unlikely(pko_command.s.reg1)) { |
| 591 | cvmx_pko_send_mem_t mem_s; |
| 592 | |
| 593 | debug("%s: Legacy FAU commands: reg1=%#x sz1=%#x\n", __func__, |
| 594 | pko_command.s.reg1, pko_command.s.size1); |
| 595 | mem_s.u64 = 0; |
| 596 | mem_s.s.subdc4 = CVMX_PKO_SENDSUBDC_MEM; |
| 597 | mem_s.s.addr = cvmx_ptr_to_phys( |
| 598 | CASTPTR(void, __cvmx_fau_sw_addr(pko_command.s.reg1))); |
| 599 | if (cvmx_likely(pko_command.s.size1 == CVMX_FAU_OP_SIZE_64)) |
| 600 | mem_s.s.dsz = MEMDSZ_B64; |
| 601 | else if (pko_command.s.size1 == CVMX_FAU_OP_SIZE_32) |
| 602 | mem_s.s.dsz = MEMDSZ_B32; |
| 603 | else if (pko_command.s.size1 == CVMX_FAU_OP_SIZE_16) |
| 604 | mem_s.s.dsz = MEMDSZ_B16; |
| 605 | else |
| 606 | mem_s.s.dsz = MEMDSZ_B8; |
| 607 | |
| 608 | if (mem_s.s.dsz == MEMDSZ_B16 || mem_s.s.dsz == MEMDSZ_B8) |
| 609 | printf("%s: ERROR: 8/16 bit decrement unsupported", |
| 610 | __func__); |
| 611 | |
| 612 | mem_s.s.offset = pko_command.s.subone1; |
| 613 | if (mem_s.s.offset) |
| 614 | mem_s.s.alg = MEMALG_SUB; |
| 615 | else |
| 616 | mem_s.s.alg = MEMALG_SUBLEN; |
| 617 | |
| 618 | res = __cvmx_pko3_cmd_subdc_add(&desc, mem_s.u64); |
| 619 | if (res < 0) |
| 620 | return CVMX_PKO_NO_MEMORY; |
| 621 | } |
| 622 | |
| 623 | /* These PKO_HDR_S fields are not used: */ |
| 624 | /* hdr_s->s.ds does not have legacy equivalent, remains 0 */ |
| 625 | /* hdr_s->s.format has no legacy equivalent, remains 0 */ |
| 626 | |
| 627 | /*** Finalize command buffer ***/ |
| 628 | res = __cvmx_pko3_cmd_done(&desc); |
| 629 | if (res < 0) |
| 630 | return CVMX_PKO_NO_MEMORY; |
| 631 | |
| 632 | /*** Send the PKO3 command into the Descriptor Queue ***/ |
| 633 | pko_status = |
| 634 | __cvmx_pko3_lmtdma(desc.port_node, dq, desc.word_count, tag_sw); |
| 635 | |
| 636 | /*** Map PKO3 result codes to legacy return values ***/ |
| 637 | if (cvmx_likely(pko_status.s.dqstatus == PKO_DQSTATUS_PASS)) |
| 638 | return CVMX_PKO_SUCCESS; |
| 639 | |
| 640 | debug("%s: ERROR: failed to enqueue: %s\n", __func__, |
| 641 | pko_dqstatus_error(pko_status.s.dqstatus)); |
| 642 | |
| 643 | if (pko_status.s.dqstatus == PKO_DQSTATUS_ALREADY) |
| 644 | return CVMX_PKO_PORT_ALREADY_SETUP; |
| 645 | if (pko_status.s.dqstatus == PKO_DQSTATUS_NOFPABUF || |
| 646 | pko_status.s.dqstatus == PKO_DQSTATUS_NOPKOBUF) |
| 647 | return CVMX_PKO_NO_MEMORY; |
| 648 | if (pko_status.s.dqstatus == PKO_DQSTATUS_NOTCREATED) |
| 649 | return CVMX_PKO_INVALID_QUEUE; |
| 650 | if (pko_status.s.dqstatus == PKO_DQSTATUS_BADSTATE) |
| 651 | return CVMX_PKO_CMD_QUEUE_INIT_ERROR; |
| 652 | if (pko_status.s.dqstatus == PKO_DQSTATUS_SENDPKTDROP) |
| 653 | return CVMX_PKO_INVALID_PORT; |
| 654 | |
| 655 | return CVMX_PKO_INVALID_PORT; |
| 656 | } |