Aaron Williams | 4fd1e55 | 2021-04-23 19:56:32 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* |
| 3 | * Copyright (C) 2020 Marvell International Ltd. |
| 4 | * |
| 5 | * Interface to the hardware Packet Input Processing unit. |
| 6 | */ |
| 7 | |
| 8 | #ifndef __CVMX_PIP_H__ |
| 9 | #define __CVMX_PIP_H__ |
| 10 | |
| 11 | #include "cvmx-wqe.h" |
| 12 | #include "cvmx-pki.h" |
| 13 | #include "cvmx-helper-pki.h" |
| 14 | |
| 15 | #include "cvmx-helper.h" |
| 16 | #include "cvmx-helper-util.h" |
| 17 | #include "cvmx-pki-resources.h" |
| 18 | |
| 19 | #define CVMX_PIP_NUM_INPUT_PORTS 46 |
| 20 | #define CVMX_PIP_NUM_WATCHERS 8 |
| 21 | |
| 22 | /* |
| 23 | * Encodes the different error and exception codes |
| 24 | */ |
| 25 | typedef enum { |
| 26 | CVMX_PIP_L4_NO_ERR = 0ull, |
| 27 | /* 1 = TCP (UDP) packet not long enough to cover TCP (UDP) header */ |
| 28 | CVMX_PIP_L4_MAL_ERR = 1ull, |
| 29 | /* 2 = TCP/UDP checksum failure */ |
| 30 | CVMX_PIP_CHK_ERR = 2ull, |
| 31 | /* 3 = TCP/UDP length check (TCP/UDP length does not match IP length) */ |
| 32 | CVMX_PIP_L4_LENGTH_ERR = 3ull, |
| 33 | /* 4 = illegal TCP/UDP port (either source or dest port is zero) */ |
| 34 | CVMX_PIP_BAD_PRT_ERR = 4ull, |
| 35 | /* 8 = TCP flags = FIN only */ |
| 36 | CVMX_PIP_TCP_FLG8_ERR = 8ull, |
| 37 | /* 9 = TCP flags = 0 */ |
| 38 | CVMX_PIP_TCP_FLG9_ERR = 9ull, |
| 39 | /* 10 = TCP flags = FIN+RST+* */ |
| 40 | CVMX_PIP_TCP_FLG10_ERR = 10ull, |
| 41 | /* 11 = TCP flags = SYN+URG+* */ |
| 42 | CVMX_PIP_TCP_FLG11_ERR = 11ull, |
| 43 | /* 12 = TCP flags = SYN+RST+* */ |
| 44 | CVMX_PIP_TCP_FLG12_ERR = 12ull, |
| 45 | /* 13 = TCP flags = SYN+FIN+* */ |
| 46 | CVMX_PIP_TCP_FLG13_ERR = 13ull |
| 47 | } cvmx_pip_l4_err_t; |
| 48 | |
| 49 | typedef enum { |
| 50 | CVMX_PIP_IP_NO_ERR = 0ull, |
| 51 | /* 1 = not IPv4 or IPv6 */ |
| 52 | CVMX_PIP_NOT_IP = 1ull, |
| 53 | /* 2 = IPv4 header checksum violation */ |
| 54 | CVMX_PIP_IPV4_HDR_CHK = 2ull, |
| 55 | /* 3 = malformed (packet not long enough to cover IP hdr) */ |
| 56 | CVMX_PIP_IP_MAL_HDR = 3ull, |
| 57 | /* 4 = malformed (packet not long enough to cover len in IP hdr) */ |
| 58 | CVMX_PIP_IP_MAL_PKT = 4ull, |
| 59 | /* 5 = TTL / hop count equal zero */ |
| 60 | CVMX_PIP_TTL_HOP = 5ull, |
| 61 | /* 6 = IPv4 options / IPv6 early extension headers */ |
| 62 | CVMX_PIP_OPTS = 6ull |
| 63 | } cvmx_pip_ip_exc_t; |
| 64 | |
| 65 | /** |
| 66 | * NOTES |
| 67 | * late collision (data received before collision) |
| 68 | * late collisions cannot be detected by the receiver |
| 69 | * they would appear as JAM bits which would appear as bad FCS |
| 70 | * or carrier extend error which is CVMX_PIP_EXTEND_ERR |
| 71 | */ |
| 72 | typedef enum { |
| 73 | /** |
| 74 | * No error |
| 75 | */ |
| 76 | CVMX_PIP_RX_NO_ERR = 0ull, |
| 77 | |
| 78 | CVMX_PIP_PARTIAL_ERR = |
| 79 | 1ull, /* RGM+SPI 1 = partially received packet (buffering/bandwidth not adequate) */ |
| 80 | CVMX_PIP_JABBER_ERR = |
| 81 | 2ull, /* RGM+SPI 2 = receive packet too large and truncated */ |
| 82 | CVMX_PIP_OVER_FCS_ERR = |
| 83 | 3ull, /* RGM 3 = max frame error (pkt len > max frame len) (with FCS error) */ |
| 84 | CVMX_PIP_OVER_ERR = |
| 85 | 4ull, /* RGM+SPI 4 = max frame error (pkt len > max frame len) */ |
| 86 | CVMX_PIP_ALIGN_ERR = |
| 87 | 5ull, /* RGM 5 = nibble error (data not byte multiple - 100M and 10M only) */ |
| 88 | CVMX_PIP_UNDER_FCS_ERR = |
| 89 | 6ull, /* RGM 6 = min frame error (pkt len < min frame len) (with FCS error) */ |
| 90 | CVMX_PIP_GMX_FCS_ERR = 7ull, /* RGM 7 = FCS error */ |
| 91 | CVMX_PIP_UNDER_ERR = |
| 92 | 8ull, /* RGM+SPI 8 = min frame error (pkt len < min frame len) */ |
| 93 | CVMX_PIP_EXTEND_ERR = 9ull, /* RGM 9 = Frame carrier extend error */ |
| 94 | CVMX_PIP_TERMINATE_ERR = |
| 95 | 9ull, /* XAUI 9 = Packet was terminated with an idle cycle */ |
| 96 | CVMX_PIP_LENGTH_ERR = |
| 97 | 10ull, /* RGM 10 = length mismatch (len did not match len in L2 length/type) */ |
| 98 | CVMX_PIP_DAT_ERR = |
| 99 | 11ull, /* RGM 11 = Frame error (some or all data bits marked err) */ |
| 100 | CVMX_PIP_DIP_ERR = 11ull, /* SPI 11 = DIP4 error */ |
| 101 | CVMX_PIP_SKIP_ERR = |
| 102 | 12ull, /* RGM 12 = packet was not large enough to pass the skipper - no inspection could occur */ |
| 103 | CVMX_PIP_NIBBLE_ERR = |
| 104 | 13ull, /* RGM 13 = studder error (data not repeated - 100M and 10M only) */ |
| 105 | CVMX_PIP_PIP_FCS = 16L, /* RGM+SPI 16 = FCS error */ |
| 106 | CVMX_PIP_PIP_SKIP_ERR = |
| 107 | 17L, /* RGM+SPI+PCI 17 = packet was not large enough to pass the skipper - no inspection could occur */ |
| 108 | CVMX_PIP_PIP_L2_MAL_HDR = |
| 109 | 18L, /* RGM+SPI+PCI 18 = malformed l2 (packet not long enough to cover L2 hdr) */ |
| 110 | CVMX_PIP_PUNY_ERR = |
| 111 | 47L /* SGMII 47 = PUNY error (packet was 4B or less when FCS stripping is enabled) */ |
| 112 | /* NOTES |
| 113 | * xx = late collision (data received before collision) |
| 114 | * late collisions cannot be detected by the receiver |
| 115 | * they would appear as JAM bits which would appear as bad FCS |
| 116 | * or carrier extend error which is CVMX_PIP_EXTEND_ERR |
| 117 | */ |
| 118 | } cvmx_pip_rcv_err_t; |
| 119 | |
| 120 | /** |
| 121 | * This defines the err_code field errors in the work Q entry |
| 122 | */ |
| 123 | typedef union { |
| 124 | cvmx_pip_l4_err_t l4_err; |
| 125 | cvmx_pip_ip_exc_t ip_exc; |
| 126 | cvmx_pip_rcv_err_t rcv_err; |
| 127 | } cvmx_pip_err_t; |
| 128 | |
| 129 | /** |
| 130 | * Status statistics for a port |
| 131 | */ |
| 132 | typedef struct { |
| 133 | u64 dropped_octets; |
| 134 | u64 dropped_packets; |
| 135 | u64 pci_raw_packets; |
| 136 | u64 octets; |
| 137 | u64 packets; |
| 138 | u64 multicast_packets; |
| 139 | u64 broadcast_packets; |
| 140 | u64 len_64_packets; |
| 141 | u64 len_65_127_packets; |
| 142 | u64 len_128_255_packets; |
| 143 | u64 len_256_511_packets; |
| 144 | u64 len_512_1023_packets; |
| 145 | u64 len_1024_1518_packets; |
| 146 | u64 len_1519_max_packets; |
| 147 | u64 fcs_align_err_packets; |
| 148 | u64 runt_packets; |
| 149 | u64 runt_crc_packets; |
| 150 | u64 oversize_packets; |
| 151 | u64 oversize_crc_packets; |
| 152 | u64 inb_packets; |
| 153 | u64 inb_octets; |
| 154 | u64 inb_errors; |
| 155 | u64 mcast_l2_red_packets; |
| 156 | u64 bcast_l2_red_packets; |
| 157 | u64 mcast_l3_red_packets; |
| 158 | u64 bcast_l3_red_packets; |
| 159 | } cvmx_pip_port_status_t; |
| 160 | |
| 161 | /** |
| 162 | * Definition of the PIP custom header that can be prepended |
| 163 | * to a packet by external hardware. |
| 164 | */ |
| 165 | typedef union { |
| 166 | u64 u64; |
| 167 | struct { |
| 168 | u64 rawfull : 1; |
| 169 | u64 reserved0 : 5; |
| 170 | cvmx_pip_port_parse_mode_t parse_mode : 2; |
| 171 | u64 reserved1 : 1; |
| 172 | u64 skip_len : 7; |
| 173 | u64 grpext : 2; |
| 174 | u64 nqos : 1; |
| 175 | u64 ngrp : 1; |
| 176 | u64 ntt : 1; |
| 177 | u64 ntag : 1; |
| 178 | u64 qos : 3; |
| 179 | u64 grp : 4; |
| 180 | u64 rs : 1; |
| 181 | cvmx_pow_tag_type_t tag_type : 2; |
| 182 | u64 tag : 32; |
| 183 | } s; |
| 184 | } cvmx_pip_pkt_inst_hdr_t; |
| 185 | |
| 186 | enum cvmx_pki_pcam_match { |
| 187 | CVMX_PKI_PCAM_MATCH_IP, |
| 188 | CVMX_PKI_PCAM_MATCH_IPV4, |
| 189 | CVMX_PKI_PCAM_MATCH_IPV6, |
| 190 | CVMX_PKI_PCAM_MATCH_TCP |
| 191 | }; |
| 192 | |
| 193 | /* CSR typedefs have been moved to cvmx-pip-defs.h */ |
| 194 | static inline int cvmx_pip_config_watcher(int index, int type, u16 match, u16 mask, int grp, |
| 195 | int qos) |
| 196 | { |
| 197 | if (index >= CVMX_PIP_NUM_WATCHERS) { |
| 198 | debug("ERROR: pip watcher %d is > than supported\n", index); |
| 199 | return -1; |
| 200 | } |
| 201 | if (octeon_has_feature(OCTEON_FEATURE_PKI)) { |
| 202 | /* store in software for now, only when the watcher is enabled program the entry*/ |
| 203 | if (type == CVMX_PIP_QOS_WATCH_PROTNH) { |
| 204 | qos_watcher[index].field = CVMX_PKI_PCAM_TERM_L3_FLAGS; |
| 205 | qos_watcher[index].data = (u32)(match << 16); |
| 206 | qos_watcher[index].data_mask = (u32)(mask << 16); |
| 207 | qos_watcher[index].advance = 0; |
| 208 | } else if (type == CVMX_PIP_QOS_WATCH_TCP) { |
| 209 | qos_watcher[index].field = CVMX_PKI_PCAM_TERM_L4_PORT; |
| 210 | qos_watcher[index].data = 0x060000; |
| 211 | qos_watcher[index].data |= (u32)match; |
| 212 | qos_watcher[index].data_mask = (u32)(mask); |
| 213 | qos_watcher[index].advance = 0; |
| 214 | } else if (type == CVMX_PIP_QOS_WATCH_UDP) { |
| 215 | qos_watcher[index].field = CVMX_PKI_PCAM_TERM_L4_PORT; |
| 216 | qos_watcher[index].data = 0x110000; |
| 217 | qos_watcher[index].data |= (u32)match; |
| 218 | qos_watcher[index].data_mask = (u32)(mask); |
| 219 | qos_watcher[index].advance = 0; |
| 220 | } else if (type == 0x4 /*CVMX_PIP_QOS_WATCH_ETHERTYPE*/) { |
| 221 | qos_watcher[index].field = CVMX_PKI_PCAM_TERM_ETHTYPE0; |
| 222 | if (match == 0x8100) { |
| 223 | debug("ERROR: default vlan entry already exist, cant set watcher\n"); |
| 224 | return -1; |
| 225 | } |
| 226 | qos_watcher[index].data = (u32)(match << 16); |
| 227 | qos_watcher[index].data_mask = (u32)(mask << 16); |
| 228 | qos_watcher[index].advance = 4; |
| 229 | } else { |
| 230 | debug("ERROR: Unsupported watcher type %d\n", type); |
| 231 | return -1; |
| 232 | } |
| 233 | if (grp >= 32) { |
| 234 | debug("ERROR: grp %d out of range for backward compat 78xx\n", grp); |
| 235 | return -1; |
| 236 | } |
| 237 | qos_watcher[index].sso_grp = (u8)(grp << 3 | qos); |
| 238 | qos_watcher[index].configured = 1; |
| 239 | } else { |
| 240 | /* Implement it later */ |
| 241 | } |
| 242 | return 0; |
| 243 | } |
| 244 | |
| 245 | static inline int __cvmx_pip_set_tag_type(int node, int style, int tag_type, int field) |
| 246 | { |
| 247 | struct cvmx_pki_style_config style_cfg; |
| 248 | int style_num; |
| 249 | int pcam_offset; |
| 250 | int bank; |
| 251 | struct cvmx_pki_pcam_input pcam_input; |
| 252 | struct cvmx_pki_pcam_action pcam_action; |
| 253 | |
| 254 | /* All other style parameters remain same except tag type */ |
| 255 | cvmx_pki_read_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &style_cfg); |
| 256 | style_cfg.parm_cfg.tag_type = (enum cvmx_sso_tag_type)tag_type; |
| 257 | style_num = cvmx_pki_style_alloc(node, -1); |
| 258 | if (style_num < 0) { |
| 259 | debug("ERROR: style not available to set tag type\n"); |
| 260 | return -1; |
| 261 | } |
| 262 | cvmx_pki_write_style_config(node, style_num, CVMX_PKI_CLUSTER_ALL, &style_cfg); |
| 263 | memset(&pcam_input, 0, sizeof(pcam_input)); |
| 264 | memset(&pcam_action, 0, sizeof(pcam_action)); |
| 265 | pcam_input.style = style; |
| 266 | pcam_input.style_mask = 0xff; |
| 267 | if (field == CVMX_PKI_PCAM_MATCH_IP) { |
| 268 | pcam_input.field = CVMX_PKI_PCAM_TERM_ETHTYPE0; |
| 269 | pcam_input.field_mask = 0xff; |
| 270 | pcam_input.data = 0x08000000; |
| 271 | pcam_input.data_mask = 0xffff0000; |
| 272 | pcam_action.pointer_advance = 4; |
| 273 | /* legacy will write to all clusters*/ |
| 274 | bank = 0; |
| 275 | pcam_offset = cvmx_pki_pcam_entry_alloc(node, CVMX_PKI_FIND_AVAL_ENTRY, bank, |
| 276 | CVMX_PKI_CLUSTER_ALL); |
| 277 | if (pcam_offset < 0) { |
| 278 | debug("ERROR: pcam entry not available to enable qos watcher\n"); |
| 279 | cvmx_pki_style_free(node, style_num); |
| 280 | return -1; |
| 281 | } |
| 282 | pcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG; |
| 283 | pcam_action.layer_type_set = CVMX_PKI_LTYPE_E_NONE; |
| 284 | pcam_action.style_add = (u8)(style_num - style); |
| 285 | cvmx_pki_pcam_write_entry(node, pcam_offset, CVMX_PKI_CLUSTER_ALL, pcam_input, |
| 286 | pcam_action); |
| 287 | field = CVMX_PKI_PCAM_MATCH_IPV6; |
| 288 | } |
| 289 | if (field == CVMX_PKI_PCAM_MATCH_IPV4) { |
| 290 | pcam_input.field = CVMX_PKI_PCAM_TERM_ETHTYPE0; |
| 291 | pcam_input.field_mask = 0xff; |
| 292 | pcam_input.data = 0x08000000; |
| 293 | pcam_input.data_mask = 0xffff0000; |
| 294 | pcam_action.pointer_advance = 4; |
| 295 | } else if (field == CVMX_PKI_PCAM_MATCH_IPV6) { |
| 296 | pcam_input.field = CVMX_PKI_PCAM_TERM_ETHTYPE0; |
| 297 | pcam_input.field_mask = 0xff; |
| 298 | pcam_input.data = 0x86dd00000; |
| 299 | pcam_input.data_mask = 0xffff0000; |
| 300 | pcam_action.pointer_advance = 4; |
| 301 | } else if (field == CVMX_PKI_PCAM_MATCH_TCP) { |
| 302 | pcam_input.field = CVMX_PKI_PCAM_TERM_L4_PORT; |
| 303 | pcam_input.field_mask = 0xff; |
| 304 | pcam_input.data = 0x60000; |
| 305 | pcam_input.data_mask = 0xff0000; |
| 306 | pcam_action.pointer_advance = 0; |
| 307 | } |
| 308 | pcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG; |
| 309 | pcam_action.layer_type_set = CVMX_PKI_LTYPE_E_NONE; |
| 310 | pcam_action.style_add = (u8)(style_num - style); |
| 311 | bank = pcam_input.field & 0x01; |
| 312 | pcam_offset = cvmx_pki_pcam_entry_alloc(node, CVMX_PKI_FIND_AVAL_ENTRY, bank, |
| 313 | CVMX_PKI_CLUSTER_ALL); |
| 314 | if (pcam_offset < 0) { |
| 315 | debug("ERROR: pcam entry not available to enable qos watcher\n"); |
| 316 | cvmx_pki_style_free(node, style_num); |
| 317 | return -1; |
| 318 | } |
| 319 | cvmx_pki_pcam_write_entry(node, pcam_offset, CVMX_PKI_CLUSTER_ALL, pcam_input, pcam_action); |
| 320 | return style_num; |
| 321 | } |
| 322 | |
| 323 | /* Only for legacy internal use */ |
| 324 | static inline int __cvmx_pip_enable_watcher_78xx(int node, int index, int style) |
| 325 | { |
| 326 | struct cvmx_pki_style_config style_cfg; |
| 327 | struct cvmx_pki_qpg_config qpg_cfg; |
| 328 | struct cvmx_pki_pcam_input pcam_input; |
| 329 | struct cvmx_pki_pcam_action pcam_action; |
| 330 | int style_num; |
| 331 | int qpg_offset; |
| 332 | int pcam_offset; |
| 333 | int bank; |
| 334 | |
| 335 | if (!qos_watcher[index].configured) { |
| 336 | debug("ERROR: qos watcher %d should be configured before enable\n", index); |
| 337 | return -1; |
| 338 | } |
| 339 | /* All other style parameters remain same except grp and qos and qps base */ |
| 340 | cvmx_pki_read_style_config(node, style, CVMX_PKI_CLUSTER_ALL, &style_cfg); |
| 341 | cvmx_pki_read_qpg_entry(node, style_cfg.parm_cfg.qpg_base, &qpg_cfg); |
| 342 | qpg_cfg.qpg_base = CVMX_PKI_FIND_AVAL_ENTRY; |
| 343 | qpg_cfg.grp_ok = qos_watcher[index].sso_grp; |
| 344 | qpg_cfg.grp_bad = qos_watcher[index].sso_grp; |
| 345 | qpg_offset = cvmx_helper_pki_set_qpg_entry(node, &qpg_cfg); |
| 346 | if (qpg_offset == -1) { |
| 347 | debug("Warning: no new qpg entry available to enable watcher\n"); |
| 348 | return -1; |
| 349 | } |
| 350 | /* try to reserve the style, if it is not configured already, reserve |
| 351 | and configure it */ |
| 352 | style_cfg.parm_cfg.qpg_base = qpg_offset; |
| 353 | style_num = cvmx_pki_style_alloc(node, -1); |
| 354 | if (style_num < 0) { |
| 355 | debug("ERROR: style not available to enable qos watcher\n"); |
| 356 | cvmx_pki_qpg_entry_free(node, qpg_offset, 1); |
| 357 | return -1; |
| 358 | } |
| 359 | cvmx_pki_write_style_config(node, style_num, CVMX_PKI_CLUSTER_ALL, &style_cfg); |
| 360 | /* legacy will write to all clusters*/ |
| 361 | bank = qos_watcher[index].field & 0x01; |
| 362 | pcam_offset = cvmx_pki_pcam_entry_alloc(node, CVMX_PKI_FIND_AVAL_ENTRY, bank, |
| 363 | CVMX_PKI_CLUSTER_ALL); |
| 364 | if (pcam_offset < 0) { |
| 365 | debug("ERROR: pcam entry not available to enable qos watcher\n"); |
| 366 | cvmx_pki_style_free(node, style_num); |
| 367 | cvmx_pki_qpg_entry_free(node, qpg_offset, 1); |
| 368 | return -1; |
| 369 | } |
| 370 | memset(&pcam_input, 0, sizeof(pcam_input)); |
| 371 | memset(&pcam_action, 0, sizeof(pcam_action)); |
| 372 | pcam_input.style = style; |
| 373 | pcam_input.style_mask = 0xff; |
| 374 | pcam_input.field = qos_watcher[index].field; |
| 375 | pcam_input.field_mask = 0xff; |
| 376 | pcam_input.data = qos_watcher[index].data; |
| 377 | pcam_input.data_mask = qos_watcher[index].data_mask; |
| 378 | pcam_action.parse_mode_chg = CVMX_PKI_PARSE_NO_CHG; |
| 379 | pcam_action.layer_type_set = CVMX_PKI_LTYPE_E_NONE; |
| 380 | pcam_action.style_add = (u8)(style_num - style); |
| 381 | pcam_action.pointer_advance = qos_watcher[index].advance; |
| 382 | cvmx_pki_pcam_write_entry(node, pcam_offset, CVMX_PKI_CLUSTER_ALL, pcam_input, pcam_action); |
| 383 | return 0; |
| 384 | } |
| 385 | |
| 386 | /** |
| 387 | * Configure an ethernet input port |
| 388 | * |
| 389 | * @param ipd_port Port number to configure |
| 390 | * @param port_cfg Port hardware configuration |
| 391 | * @param port_tag_cfg Port POW tagging configuration |
| 392 | */ |
| 393 | static inline void cvmx_pip_config_port(u64 ipd_port, cvmx_pip_prt_cfgx_t port_cfg, |
| 394 | cvmx_pip_prt_tagx_t port_tag_cfg) |
| 395 | { |
| 396 | struct cvmx_pki_qpg_config qpg_cfg; |
| 397 | int qpg_offset; |
| 398 | u8 tcp_tag = 0xff; |
| 399 | u8 ip_tag = 0xaa; |
| 400 | int style, nstyle, n4style, n6style; |
| 401 | |
| 402 | if (octeon_has_feature(OCTEON_FEATURE_PKI)) { |
| 403 | struct cvmx_pki_port_config pki_prt_cfg; |
| 404 | struct cvmx_xport xp = cvmx_helper_ipd_port_to_xport(ipd_port); |
| 405 | |
| 406 | cvmx_pki_get_port_config(ipd_port, &pki_prt_cfg); |
| 407 | style = pki_prt_cfg.pkind_cfg.initial_style; |
| 408 | if (port_cfg.s.ih_pri || port_cfg.s.vlan_len || port_cfg.s.pad_len) |
| 409 | debug("Warning: 78xx: use different config for this option\n"); |
| 410 | pki_prt_cfg.style_cfg.parm_cfg.minmax_sel = port_cfg.s.len_chk_sel; |
| 411 | pki_prt_cfg.style_cfg.parm_cfg.lenerr_en = port_cfg.s.lenerr_en; |
| 412 | pki_prt_cfg.style_cfg.parm_cfg.maxerr_en = port_cfg.s.maxerr_en; |
| 413 | pki_prt_cfg.style_cfg.parm_cfg.minerr_en = port_cfg.s.minerr_en; |
| 414 | pki_prt_cfg.style_cfg.parm_cfg.fcs_chk = port_cfg.s.crc_en; |
| 415 | if (port_cfg.s.grp_wat || port_cfg.s.qos_wat || port_cfg.s.grp_wat_47 || |
| 416 | port_cfg.s.qos_wat_47) { |
| 417 | u8 group_mask = (u8)(port_cfg.s.grp_wat | (u8)(port_cfg.s.grp_wat_47 << 4)); |
| 418 | u8 qos_mask = (u8)(port_cfg.s.qos_wat | (u8)(port_cfg.s.qos_wat_47 << 4)); |
| 419 | int i; |
| 420 | |
| 421 | for (i = 0; i < CVMX_PIP_NUM_WATCHERS; i++) { |
| 422 | if ((group_mask & (1 << i)) || (qos_mask & (1 << i))) |
| 423 | __cvmx_pip_enable_watcher_78xx(xp.node, i, style); |
| 424 | } |
| 425 | } |
| 426 | if (port_tag_cfg.s.tag_mode) { |
| 427 | if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X)) |
| 428 | cvmx_printf("Warning: mask tag is not supported in 78xx pass1\n"); |
| 429 | else { |
| 430 | } |
| 431 | /* need to implement for 78xx*/ |
| 432 | } |
| 433 | if (port_cfg.s.tag_inc) |
| 434 | debug("Warning: 78xx uses differnet method for tag generation\n"); |
| 435 | pki_prt_cfg.style_cfg.parm_cfg.rawdrp = port_cfg.s.rawdrp; |
| 436 | pki_prt_cfg.pkind_cfg.parse_en.inst_hdr = port_cfg.s.inst_hdr; |
| 437 | if (port_cfg.s.hg_qos) |
| 438 | pki_prt_cfg.style_cfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_HIGIG; |
| 439 | else if (port_cfg.s.qos_vlan) |
| 440 | pki_prt_cfg.style_cfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_VLAN; |
| 441 | else if (port_cfg.s.qos_diff) |
| 442 | pki_prt_cfg.style_cfg.parm_cfg.qpg_qos = CVMX_PKI_QPG_QOS_DIFFSERV; |
| 443 | if (port_cfg.s.qos_vod) |
| 444 | debug("Warning: 78xx needs pcam entries installed to achieve qos_vod\n"); |
| 445 | if (port_cfg.s.qos) { |
| 446 | cvmx_pki_read_qpg_entry(xp.node, pki_prt_cfg.style_cfg.parm_cfg.qpg_base, |
| 447 | &qpg_cfg); |
| 448 | qpg_cfg.qpg_base = CVMX_PKI_FIND_AVAL_ENTRY; |
| 449 | qpg_cfg.grp_ok |= port_cfg.s.qos; |
| 450 | qpg_cfg.grp_bad |= port_cfg.s.qos; |
| 451 | qpg_offset = cvmx_helper_pki_set_qpg_entry(xp.node, &qpg_cfg); |
| 452 | if (qpg_offset == -1) |
| 453 | debug("Warning: no new qpg entry available, will not modify qos\n"); |
| 454 | else |
| 455 | pki_prt_cfg.style_cfg.parm_cfg.qpg_base = qpg_offset; |
| 456 | } |
| 457 | if (port_tag_cfg.s.grp != pki_dflt_sso_grp[xp.node].group) { |
| 458 | cvmx_pki_read_qpg_entry(xp.node, pki_prt_cfg.style_cfg.parm_cfg.qpg_base, |
| 459 | &qpg_cfg); |
| 460 | qpg_cfg.qpg_base = CVMX_PKI_FIND_AVAL_ENTRY; |
| 461 | qpg_cfg.grp_ok |= (u8)(port_tag_cfg.s.grp << 3); |
| 462 | qpg_cfg.grp_bad |= (u8)(port_tag_cfg.s.grp << 3); |
| 463 | qpg_offset = cvmx_helper_pki_set_qpg_entry(xp.node, &qpg_cfg); |
| 464 | if (qpg_offset == -1) |
| 465 | debug("Warning: no new qpg entry available, will not modify group\n"); |
| 466 | else |
| 467 | pki_prt_cfg.style_cfg.parm_cfg.qpg_base = qpg_offset; |
| 468 | } |
| 469 | pki_prt_cfg.pkind_cfg.parse_en.dsa_en = port_cfg.s.dsa_en; |
| 470 | pki_prt_cfg.pkind_cfg.parse_en.hg_en = port_cfg.s.higig_en; |
| 471 | pki_prt_cfg.style_cfg.tag_cfg.tag_fields.layer_c_src = |
| 472 | port_tag_cfg.s.ip6_src_flag | port_tag_cfg.s.ip4_src_flag; |
| 473 | pki_prt_cfg.style_cfg.tag_cfg.tag_fields.layer_c_dst = |
| 474 | port_tag_cfg.s.ip6_dst_flag | port_tag_cfg.s.ip4_dst_flag; |
| 475 | pki_prt_cfg.style_cfg.tag_cfg.tag_fields.ip_prot_nexthdr = |
| 476 | port_tag_cfg.s.ip6_nxth_flag | port_tag_cfg.s.ip4_pctl_flag; |
| 477 | pki_prt_cfg.style_cfg.tag_cfg.tag_fields.layer_d_src = |
| 478 | port_tag_cfg.s.ip6_sprt_flag | port_tag_cfg.s.ip4_sprt_flag; |
| 479 | pki_prt_cfg.style_cfg.tag_cfg.tag_fields.layer_d_dst = |
| 480 | port_tag_cfg.s.ip6_dprt_flag | port_tag_cfg.s.ip4_dprt_flag; |
| 481 | pki_prt_cfg.style_cfg.tag_cfg.tag_fields.input_port = port_tag_cfg.s.inc_prt_flag; |
| 482 | pki_prt_cfg.style_cfg.tag_cfg.tag_fields.first_vlan = port_tag_cfg.s.inc_vlan; |
| 483 | pki_prt_cfg.style_cfg.tag_cfg.tag_fields.second_vlan = port_tag_cfg.s.inc_vs; |
| 484 | |
| 485 | if (port_tag_cfg.s.tcp6_tag_type == port_tag_cfg.s.tcp4_tag_type) |
| 486 | tcp_tag = port_tag_cfg.s.tcp6_tag_type; |
| 487 | if (port_tag_cfg.s.ip6_tag_type == port_tag_cfg.s.ip4_tag_type) |
| 488 | ip_tag = port_tag_cfg.s.ip6_tag_type; |
| 489 | pki_prt_cfg.style_cfg.parm_cfg.tag_type = |
| 490 | (enum cvmx_sso_tag_type)port_tag_cfg.s.non_tag_type; |
| 491 | if (tcp_tag == ip_tag && tcp_tag == port_tag_cfg.s.non_tag_type) |
| 492 | pki_prt_cfg.style_cfg.parm_cfg.tag_type = (enum cvmx_sso_tag_type)tcp_tag; |
| 493 | else if (tcp_tag == ip_tag) { |
| 494 | /* allocate and copy style */ |
| 495 | /* modify tag type */ |
| 496 | /*pcam entry for ip6 && ip4 match*/ |
| 497 | /* default is non tag type */ |
| 498 | __cvmx_pip_set_tag_type(xp.node, style, ip_tag, CVMX_PKI_PCAM_MATCH_IP); |
| 499 | } else if (ip_tag == port_tag_cfg.s.non_tag_type) { |
| 500 | /* allocate and copy style */ |
| 501 | /* modify tag type */ |
| 502 | /*pcam entry for tcp6 & tcp4 match*/ |
| 503 | /* default is non tag type */ |
| 504 | __cvmx_pip_set_tag_type(xp.node, style, tcp_tag, CVMX_PKI_PCAM_MATCH_TCP); |
| 505 | } else { |
| 506 | if (ip_tag != 0xaa) { |
| 507 | nstyle = __cvmx_pip_set_tag_type(xp.node, style, ip_tag, |
| 508 | CVMX_PKI_PCAM_MATCH_IP); |
| 509 | if (tcp_tag != 0xff) |
| 510 | __cvmx_pip_set_tag_type(xp.node, nstyle, tcp_tag, |
| 511 | CVMX_PKI_PCAM_MATCH_TCP); |
| 512 | else { |
| 513 | n4style = __cvmx_pip_set_tag_type(xp.node, nstyle, ip_tag, |
| 514 | CVMX_PKI_PCAM_MATCH_IPV4); |
| 515 | __cvmx_pip_set_tag_type(xp.node, n4style, |
| 516 | port_tag_cfg.s.tcp4_tag_type, |
| 517 | CVMX_PKI_PCAM_MATCH_TCP); |
| 518 | n6style = __cvmx_pip_set_tag_type(xp.node, nstyle, ip_tag, |
| 519 | CVMX_PKI_PCAM_MATCH_IPV6); |
| 520 | __cvmx_pip_set_tag_type(xp.node, n6style, |
| 521 | port_tag_cfg.s.tcp6_tag_type, |
| 522 | CVMX_PKI_PCAM_MATCH_TCP); |
| 523 | } |
| 524 | } else { |
| 525 | n4style = __cvmx_pip_set_tag_type(xp.node, style, |
| 526 | port_tag_cfg.s.ip4_tag_type, |
| 527 | CVMX_PKI_PCAM_MATCH_IPV4); |
| 528 | n6style = __cvmx_pip_set_tag_type(xp.node, style, |
| 529 | port_tag_cfg.s.ip6_tag_type, |
| 530 | CVMX_PKI_PCAM_MATCH_IPV6); |
| 531 | if (tcp_tag != 0xff) { |
| 532 | __cvmx_pip_set_tag_type(xp.node, n4style, tcp_tag, |
| 533 | CVMX_PKI_PCAM_MATCH_TCP); |
| 534 | __cvmx_pip_set_tag_type(xp.node, n6style, tcp_tag, |
| 535 | CVMX_PKI_PCAM_MATCH_TCP); |
| 536 | } else { |
| 537 | __cvmx_pip_set_tag_type(xp.node, n4style, |
| 538 | port_tag_cfg.s.tcp4_tag_type, |
| 539 | CVMX_PKI_PCAM_MATCH_TCP); |
| 540 | __cvmx_pip_set_tag_type(xp.node, n6style, |
| 541 | port_tag_cfg.s.tcp6_tag_type, |
| 542 | CVMX_PKI_PCAM_MATCH_TCP); |
| 543 | } |
| 544 | } |
| 545 | } |
| 546 | pki_prt_cfg.style_cfg.parm_cfg.qpg_dis_padd = !port_tag_cfg.s.portadd_en; |
| 547 | |
| 548 | if (port_cfg.s.mode == 0x1) |
| 549 | pki_prt_cfg.pkind_cfg.initial_parse_mode = CVMX_PKI_PARSE_LA_TO_LG; |
| 550 | else if (port_cfg.s.mode == 0x2) |
| 551 | pki_prt_cfg.pkind_cfg.initial_parse_mode = CVMX_PKI_PARSE_LC_TO_LG; |
| 552 | else |
| 553 | pki_prt_cfg.pkind_cfg.initial_parse_mode = CVMX_PKI_PARSE_NOTHING; |
| 554 | /* This is only for backward compatibility, not all the parameters are supported in 78xx */ |
| 555 | cvmx_pki_set_port_config(ipd_port, &pki_prt_cfg); |
| 556 | } else { |
| 557 | if (octeon_has_feature(OCTEON_FEATURE_PKND)) { |
| 558 | int interface, index, pknd; |
| 559 | |
| 560 | interface = cvmx_helper_get_interface_num(ipd_port); |
| 561 | index = cvmx_helper_get_interface_index_num(ipd_port); |
| 562 | pknd = cvmx_helper_get_pknd(interface, index); |
| 563 | |
| 564 | ipd_port = pknd; /* overload port_num with pknd */ |
| 565 | } |
| 566 | csr_wr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64); |
| 567 | csr_wr(CVMX_PIP_PRT_TAGX(ipd_port), port_tag_cfg.u64); |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | /** |
| 572 | * Configure the VLAN priority to QoS queue mapping. |
| 573 | * |
| 574 | * @param vlan_priority |
| 575 | * VLAN priority (0-7) |
| 576 | * @param qos QoS queue for packets matching this watcher |
| 577 | */ |
| 578 | static inline void cvmx_pip_config_vlan_qos(u64 vlan_priority, u64 qos) |
| 579 | { |
| 580 | if (!octeon_has_feature(OCTEON_FEATURE_PKND)) { |
| 581 | cvmx_pip_qos_vlanx_t pip_qos_vlanx; |
| 582 | |
| 583 | pip_qos_vlanx.u64 = 0; |
| 584 | pip_qos_vlanx.s.qos = qos; |
| 585 | csr_wr(CVMX_PIP_QOS_VLANX(vlan_priority), pip_qos_vlanx.u64); |
| 586 | } |
| 587 | } |
| 588 | |
| 589 | /** |
| 590 | * Configure the Diffserv to QoS queue mapping. |
| 591 | * |
| 592 | * @param diffserv Diffserv field value (0-63) |
| 593 | * @param qos QoS queue for packets matching this watcher |
| 594 | */ |
| 595 | static inline void cvmx_pip_config_diffserv_qos(u64 diffserv, u64 qos) |
| 596 | { |
| 597 | if (!octeon_has_feature(OCTEON_FEATURE_PKND)) { |
| 598 | cvmx_pip_qos_diffx_t pip_qos_diffx; |
| 599 | |
| 600 | pip_qos_diffx.u64 = 0; |
| 601 | pip_qos_diffx.s.qos = qos; |
| 602 | csr_wr(CVMX_PIP_QOS_DIFFX(diffserv), pip_qos_diffx.u64); |
| 603 | } |
| 604 | } |
| 605 | |
| 606 | /** |
| 607 | * Get the status counters for a port for older non PKI chips. |
| 608 | * |
| 609 | * @param port_num Port number (ipd_port) to get statistics for. |
| 610 | * @param clear Set to 1 to clear the counters after they are read |
| 611 | * @param status Where to put the results. |
| 612 | */ |
| 613 | static inline void cvmx_pip_get_port_stats(u64 port_num, u64 clear, cvmx_pip_port_status_t *status) |
| 614 | { |
| 615 | cvmx_pip_stat_ctl_t pip_stat_ctl; |
| 616 | cvmx_pip_stat0_prtx_t stat0; |
| 617 | cvmx_pip_stat1_prtx_t stat1; |
| 618 | cvmx_pip_stat2_prtx_t stat2; |
| 619 | cvmx_pip_stat3_prtx_t stat3; |
| 620 | cvmx_pip_stat4_prtx_t stat4; |
| 621 | cvmx_pip_stat5_prtx_t stat5; |
| 622 | cvmx_pip_stat6_prtx_t stat6; |
| 623 | cvmx_pip_stat7_prtx_t stat7; |
| 624 | cvmx_pip_stat8_prtx_t stat8; |
| 625 | cvmx_pip_stat9_prtx_t stat9; |
| 626 | cvmx_pip_stat10_x_t stat10; |
| 627 | cvmx_pip_stat11_x_t stat11; |
| 628 | cvmx_pip_stat_inb_pktsx_t pip_stat_inb_pktsx; |
| 629 | cvmx_pip_stat_inb_octsx_t pip_stat_inb_octsx; |
| 630 | cvmx_pip_stat_inb_errsx_t pip_stat_inb_errsx; |
| 631 | int interface = cvmx_helper_get_interface_num(port_num); |
| 632 | int index = cvmx_helper_get_interface_index_num(port_num); |
| 633 | |
| 634 | pip_stat_ctl.u64 = 0; |
| 635 | pip_stat_ctl.s.rdclr = clear; |
| 636 | csr_wr(CVMX_PIP_STAT_CTL, pip_stat_ctl.u64); |
| 637 | |
| 638 | if (octeon_has_feature(OCTEON_FEATURE_PKND)) { |
| 639 | int pknd = cvmx_helper_get_pknd(interface, index); |
| 640 | /* |
| 641 | * PIP_STAT_CTL[MODE] 0 means pkind. |
| 642 | */ |
| 643 | stat0.u64 = csr_rd(CVMX_PIP_STAT0_X(pknd)); |
| 644 | stat1.u64 = csr_rd(CVMX_PIP_STAT1_X(pknd)); |
| 645 | stat2.u64 = csr_rd(CVMX_PIP_STAT2_X(pknd)); |
| 646 | stat3.u64 = csr_rd(CVMX_PIP_STAT3_X(pknd)); |
| 647 | stat4.u64 = csr_rd(CVMX_PIP_STAT4_X(pknd)); |
| 648 | stat5.u64 = csr_rd(CVMX_PIP_STAT5_X(pknd)); |
| 649 | stat6.u64 = csr_rd(CVMX_PIP_STAT6_X(pknd)); |
| 650 | stat7.u64 = csr_rd(CVMX_PIP_STAT7_X(pknd)); |
| 651 | stat8.u64 = csr_rd(CVMX_PIP_STAT8_X(pknd)); |
| 652 | stat9.u64 = csr_rd(CVMX_PIP_STAT9_X(pknd)); |
| 653 | stat10.u64 = csr_rd(CVMX_PIP_STAT10_X(pknd)); |
| 654 | stat11.u64 = csr_rd(CVMX_PIP_STAT11_X(pknd)); |
| 655 | } else { |
| 656 | if (port_num >= 40) { |
| 657 | stat0.u64 = csr_rd(CVMX_PIP_XSTAT0_PRTX(port_num)); |
| 658 | stat1.u64 = csr_rd(CVMX_PIP_XSTAT1_PRTX(port_num)); |
| 659 | stat2.u64 = csr_rd(CVMX_PIP_XSTAT2_PRTX(port_num)); |
| 660 | stat3.u64 = csr_rd(CVMX_PIP_XSTAT3_PRTX(port_num)); |
| 661 | stat4.u64 = csr_rd(CVMX_PIP_XSTAT4_PRTX(port_num)); |
| 662 | stat5.u64 = csr_rd(CVMX_PIP_XSTAT5_PRTX(port_num)); |
| 663 | stat6.u64 = csr_rd(CVMX_PIP_XSTAT6_PRTX(port_num)); |
| 664 | stat7.u64 = csr_rd(CVMX_PIP_XSTAT7_PRTX(port_num)); |
| 665 | stat8.u64 = csr_rd(CVMX_PIP_XSTAT8_PRTX(port_num)); |
| 666 | stat9.u64 = csr_rd(CVMX_PIP_XSTAT9_PRTX(port_num)); |
| 667 | if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { |
| 668 | stat10.u64 = csr_rd(CVMX_PIP_XSTAT10_PRTX(port_num)); |
| 669 | stat11.u64 = csr_rd(CVMX_PIP_XSTAT11_PRTX(port_num)); |
| 670 | } |
| 671 | } else { |
| 672 | stat0.u64 = csr_rd(CVMX_PIP_STAT0_PRTX(port_num)); |
| 673 | stat1.u64 = csr_rd(CVMX_PIP_STAT1_PRTX(port_num)); |
| 674 | stat2.u64 = csr_rd(CVMX_PIP_STAT2_PRTX(port_num)); |
| 675 | stat3.u64 = csr_rd(CVMX_PIP_STAT3_PRTX(port_num)); |
| 676 | stat4.u64 = csr_rd(CVMX_PIP_STAT4_PRTX(port_num)); |
| 677 | stat5.u64 = csr_rd(CVMX_PIP_STAT5_PRTX(port_num)); |
| 678 | stat6.u64 = csr_rd(CVMX_PIP_STAT6_PRTX(port_num)); |
| 679 | stat7.u64 = csr_rd(CVMX_PIP_STAT7_PRTX(port_num)); |
| 680 | stat8.u64 = csr_rd(CVMX_PIP_STAT8_PRTX(port_num)); |
| 681 | stat9.u64 = csr_rd(CVMX_PIP_STAT9_PRTX(port_num)); |
| 682 | if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) { |
| 683 | stat10.u64 = csr_rd(CVMX_PIP_STAT10_PRTX(port_num)); |
| 684 | stat11.u64 = csr_rd(CVMX_PIP_STAT11_PRTX(port_num)); |
| 685 | } |
| 686 | } |
| 687 | } |
| 688 | if (octeon_has_feature(OCTEON_FEATURE_PKND)) { |
| 689 | int pknd = cvmx_helper_get_pknd(interface, index); |
| 690 | |
| 691 | pip_stat_inb_pktsx.u64 = csr_rd(CVMX_PIP_STAT_INB_PKTS_PKNDX(pknd)); |
| 692 | pip_stat_inb_octsx.u64 = csr_rd(CVMX_PIP_STAT_INB_OCTS_PKNDX(pknd)); |
| 693 | pip_stat_inb_errsx.u64 = csr_rd(CVMX_PIP_STAT_INB_ERRS_PKNDX(pknd)); |
| 694 | } else { |
| 695 | pip_stat_inb_pktsx.u64 = csr_rd(CVMX_PIP_STAT_INB_PKTSX(port_num)); |
| 696 | pip_stat_inb_octsx.u64 = csr_rd(CVMX_PIP_STAT_INB_OCTSX(port_num)); |
| 697 | pip_stat_inb_errsx.u64 = csr_rd(CVMX_PIP_STAT_INB_ERRSX(port_num)); |
| 698 | } |
| 699 | |
| 700 | status->dropped_octets = stat0.s.drp_octs; |
| 701 | status->dropped_packets = stat0.s.drp_pkts; |
| 702 | status->octets = stat1.s.octs; |
| 703 | status->pci_raw_packets = stat2.s.raw; |
| 704 | status->packets = stat2.s.pkts; |
| 705 | status->multicast_packets = stat3.s.mcst; |
| 706 | status->broadcast_packets = stat3.s.bcst; |
| 707 | status->len_64_packets = stat4.s.h64; |
| 708 | status->len_65_127_packets = stat4.s.h65to127; |
| 709 | status->len_128_255_packets = stat5.s.h128to255; |
| 710 | status->len_256_511_packets = stat5.s.h256to511; |
| 711 | status->len_512_1023_packets = stat6.s.h512to1023; |
| 712 | status->len_1024_1518_packets = stat6.s.h1024to1518; |
| 713 | status->len_1519_max_packets = stat7.s.h1519; |
| 714 | status->fcs_align_err_packets = stat7.s.fcs; |
| 715 | status->runt_packets = stat8.s.undersz; |
| 716 | status->runt_crc_packets = stat8.s.frag; |
| 717 | status->oversize_packets = stat9.s.oversz; |
| 718 | status->oversize_crc_packets = stat9.s.jabber; |
| 719 | if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) { |
| 720 | status->mcast_l2_red_packets = stat10.s.mcast; |
| 721 | status->bcast_l2_red_packets = stat10.s.bcast; |
| 722 | status->mcast_l3_red_packets = stat11.s.mcast; |
| 723 | status->bcast_l3_red_packets = stat11.s.bcast; |
| 724 | } |
| 725 | status->inb_packets = pip_stat_inb_pktsx.s.pkts; |
| 726 | status->inb_octets = pip_stat_inb_octsx.s.octs; |
| 727 | status->inb_errors = pip_stat_inb_errsx.s.errs; |
| 728 | } |
| 729 | |
| 730 | /** |
| 731 | * Get the status counters for a port. |
| 732 | * |
| 733 | * @param port_num Port number (ipd_port) to get statistics for. |
| 734 | * @param clear Set to 1 to clear the counters after they are read |
| 735 | * @param status Where to put the results. |
| 736 | */ |
| 737 | static inline void cvmx_pip_get_port_status(u64 port_num, u64 clear, cvmx_pip_port_status_t *status) |
| 738 | { |
| 739 | if (octeon_has_feature(OCTEON_FEATURE_PKI)) { |
| 740 | unsigned int node = cvmx_get_node_num(); |
| 741 | |
| 742 | cvmx_pki_get_port_stats(node, port_num, (struct cvmx_pki_port_stats *)status); |
| 743 | } else { |
| 744 | cvmx_pip_get_port_stats(port_num, clear, status); |
| 745 | } |
| 746 | } |
| 747 | |
| 748 | /** |
| 749 | * Configure the hardware CRC engine |
| 750 | * |
| 751 | * @param interface Interface to configure (0 or 1) |
| 752 | * @param invert_result |
| 753 | * Invert the result of the CRC |
| 754 | * @param reflect Reflect |
| 755 | * @param initialization_vector |
| 756 | * CRC initialization vector |
| 757 | */ |
| 758 | static inline void cvmx_pip_config_crc(u64 interface, u64 invert_result, u64 reflect, |
| 759 | u32 initialization_vector) |
| 760 | { |
| 761 | /* Only CN38XX & CN58XX */ |
| 762 | } |
| 763 | |
| 764 | /** |
| 765 | * Clear all bits in a tag mask. This should be called on |
| 766 | * startup before any calls to cvmx_pip_tag_mask_set. Each bit |
| 767 | * set in the final mask represent a byte used in the packet for |
| 768 | * tag generation. |
| 769 | * |
| 770 | * @param mask_index Which tag mask to clear (0..3) |
| 771 | */ |
| 772 | static inline void cvmx_pip_tag_mask_clear(u64 mask_index) |
| 773 | { |
| 774 | u64 index; |
| 775 | cvmx_pip_tag_incx_t pip_tag_incx; |
| 776 | |
| 777 | pip_tag_incx.u64 = 0; |
| 778 | pip_tag_incx.s.en = 0; |
| 779 | for (index = mask_index * 16; index < (mask_index + 1) * 16; index++) |
| 780 | csr_wr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64); |
| 781 | } |
| 782 | |
| 783 | /** |
| 784 | * Sets a range of bits in the tag mask. The tag mask is used |
| 785 | * when the cvmx_pip_port_tag_cfg_t tag_mode is non zero. |
| 786 | * There are four separate masks that can be configured. |
| 787 | * |
| 788 | * @param mask_index Which tag mask to modify (0..3) |
| 789 | * @param offset Offset into the bitmask to set bits at. Use the GCC macro |
| 790 | * offsetof() to determine the offsets into packet headers. |
| 791 | * For example, offsetof(ethhdr, protocol) returns the offset |
| 792 | * of the ethernet protocol field. The bitmask selects which bytes |
| 793 | * to include the the tag, with bit offset X selecting byte at offset X |
| 794 | * from the beginning of the packet data. |
| 795 | * @param len Number of bytes to include. Usually this is the sizeof() |
| 796 | * the field. |
| 797 | */ |
| 798 | static inline void cvmx_pip_tag_mask_set(u64 mask_index, u64 offset, u64 len) |
| 799 | { |
| 800 | while (len--) { |
| 801 | cvmx_pip_tag_incx_t pip_tag_incx; |
| 802 | u64 index = mask_index * 16 + offset / 8; |
| 803 | |
| 804 | pip_tag_incx.u64 = csr_rd(CVMX_PIP_TAG_INCX(index)); |
| 805 | pip_tag_incx.s.en |= 0x80 >> (offset & 0x7); |
| 806 | csr_wr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64); |
| 807 | offset++; |
| 808 | } |
| 809 | } |
| 810 | |
| 811 | /** |
| 812 | * Set byte count for Max-Sized and Min Sized frame check. |
| 813 | * |
| 814 | * @param interface Which interface to set the limit |
| 815 | * @param max_size Byte count for Max-Size frame check |
| 816 | */ |
| 817 | static inline void cvmx_pip_set_frame_check(int interface, u32 max_size) |
| 818 | { |
| 819 | cvmx_pip_frm_len_chkx_t frm_len; |
| 820 | |
| 821 | /* max_size and min_size are passed as 0, reset to default values. */ |
| 822 | if (max_size < 1536) |
| 823 | max_size = 1536; |
| 824 | |
| 825 | /* On CN68XX frame check is enabled for a pkind n and |
| 826 | PIP_PRT_CFG[len_chk_sel] selects which set of |
| 827 | MAXLEN/MINLEN to use. */ |
| 828 | if (octeon_has_feature(OCTEON_FEATURE_PKND)) { |
| 829 | int port; |
| 830 | int num_ports = cvmx_helper_ports_on_interface(interface); |
| 831 | |
| 832 | for (port = 0; port < num_ports; port++) { |
| 833 | if (octeon_has_feature(OCTEON_FEATURE_PKI)) { |
| 834 | int ipd_port; |
| 835 | |
| 836 | ipd_port = cvmx_helper_get_ipd_port(interface, port); |
| 837 | cvmx_pki_set_max_frm_len(ipd_port, max_size); |
| 838 | } else { |
| 839 | int pknd; |
| 840 | int sel; |
| 841 | cvmx_pip_prt_cfgx_t config; |
| 842 | |
| 843 | pknd = cvmx_helper_get_pknd(interface, port); |
| 844 | config.u64 = csr_rd(CVMX_PIP_PRT_CFGX(pknd)); |
| 845 | sel = config.s.len_chk_sel; |
| 846 | frm_len.u64 = csr_rd(CVMX_PIP_FRM_LEN_CHKX(sel)); |
| 847 | frm_len.s.maxlen = max_size; |
| 848 | csr_wr(CVMX_PIP_FRM_LEN_CHKX(sel), frm_len.u64); |
| 849 | } |
| 850 | } |
| 851 | } |
| 852 | /* on cn6xxx and cn7xxx models, PIP_FRM_LEN_CHK0 applies to |
| 853 | * all incoming traffic */ |
| 854 | else if (OCTEON_IS_OCTEON2() || OCTEON_IS_MODEL(OCTEON_CN70XX)) { |
| 855 | frm_len.u64 = csr_rd(CVMX_PIP_FRM_LEN_CHKX(0)); |
| 856 | frm_len.s.maxlen = max_size; |
| 857 | csr_wr(CVMX_PIP_FRM_LEN_CHKX(0), frm_len.u64); |
| 858 | } |
| 859 | } |
| 860 | |
| 861 | /** |
| 862 | * Initialize Bit Select Extractor config. Their are 8 bit positions and valids |
| 863 | * to be used when using the corresponding extractor. |
| 864 | * |
| 865 | * @param bit Bit Select Extractor to use |
| 866 | * @param pos Which position to update |
| 867 | * @param val The value to update the position with |
| 868 | */ |
| 869 | static inline void cvmx_pip_set_bsel_pos(int bit, int pos, int val) |
| 870 | { |
| 871 | cvmx_pip_bsel_ext_posx_t bsel_pos; |
| 872 | |
| 873 | /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */ |
| 874 | if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR)) |
| 875 | return; |
| 876 | |
| 877 | if (bit < 0 || bit > 3) { |
| 878 | debug("ERROR: cvmx_pip_set_bsel_pos: Invalid Bit-Select Extractor (%d) passed\n", |
| 879 | bit); |
| 880 | return; |
| 881 | } |
| 882 | |
| 883 | bsel_pos.u64 = csr_rd(CVMX_PIP_BSEL_EXT_POSX(bit)); |
| 884 | switch (pos) { |
| 885 | case 0: |
| 886 | bsel_pos.s.pos0_val = 1; |
| 887 | bsel_pos.s.pos0 = val & 0x7f; |
| 888 | break; |
| 889 | case 1: |
| 890 | bsel_pos.s.pos1_val = 1; |
| 891 | bsel_pos.s.pos1 = val & 0x7f; |
| 892 | break; |
| 893 | case 2: |
| 894 | bsel_pos.s.pos2_val = 1; |
| 895 | bsel_pos.s.pos2 = val & 0x7f; |
| 896 | break; |
| 897 | case 3: |
| 898 | bsel_pos.s.pos3_val = 1; |
| 899 | bsel_pos.s.pos3 = val & 0x7f; |
| 900 | break; |
| 901 | case 4: |
| 902 | bsel_pos.s.pos4_val = 1; |
| 903 | bsel_pos.s.pos4 = val & 0x7f; |
| 904 | break; |
| 905 | case 5: |
| 906 | bsel_pos.s.pos5_val = 1; |
| 907 | bsel_pos.s.pos5 = val & 0x7f; |
| 908 | break; |
| 909 | case 6: |
| 910 | bsel_pos.s.pos6_val = 1; |
| 911 | bsel_pos.s.pos6 = val & 0x7f; |
| 912 | break; |
| 913 | case 7: |
| 914 | bsel_pos.s.pos7_val = 1; |
| 915 | bsel_pos.s.pos7 = val & 0x7f; |
| 916 | break; |
| 917 | default: |
| 918 | debug("Warning: cvmx_pip_set_bsel_pos: Invalid pos(%d)\n", pos); |
| 919 | break; |
| 920 | } |
| 921 | csr_wr(CVMX_PIP_BSEL_EXT_POSX(bit), bsel_pos.u64); |
| 922 | } |
| 923 | |
| 924 | /** |
| 925 | * Initialize offset and skip values to use by bit select extractor. |
| 926 | |
| 927 | * @param bit Bit Select Extractor to use |
| 928 | * @param offset Offset to add to extractor mem addr to get final address |
| 929 | * to lookup table. |
| 930 | * @param skip Number of bytes to skip from start of packet 0-64 |
| 931 | */ |
| 932 | static inline void cvmx_pip_bsel_config(int bit, int offset, int skip) |
| 933 | { |
| 934 | cvmx_pip_bsel_ext_cfgx_t bsel_cfg; |
| 935 | |
| 936 | /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */ |
| 937 | if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR)) |
| 938 | return; |
| 939 | |
| 940 | bsel_cfg.u64 = csr_rd(CVMX_PIP_BSEL_EXT_CFGX(bit)); |
| 941 | bsel_cfg.s.offset = offset; |
| 942 | bsel_cfg.s.skip = skip; |
| 943 | csr_wr(CVMX_PIP_BSEL_EXT_CFGX(bit), bsel_cfg.u64); |
| 944 | } |
| 945 | |
| 946 | /** |
| 947 | * Get the entry for the Bit Select Extractor Table. |
| 948 | * @param work pointer to work queue entry |
Heinrich Schuchardt | 47b4c02 | 2022-01-19 18:05:50 +0100 | [diff] [blame] | 949 | * Return: Index of the Bit Select Extractor Table |
Aaron Williams | 4fd1e55 | 2021-04-23 19:56:32 +0200 | [diff] [blame] | 950 | */ |
| 951 | static inline int cvmx_pip_get_bsel_table_index(cvmx_wqe_t *work) |
| 952 | { |
| 953 | int bit = cvmx_wqe_get_port(work) & 0x3; |
| 954 | /* Get the Bit select table index. */ |
| 955 | int index; |
| 956 | int y; |
| 957 | cvmx_pip_bsel_ext_cfgx_t bsel_cfg; |
| 958 | cvmx_pip_bsel_ext_posx_t bsel_pos; |
| 959 | |
| 960 | /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */ |
| 961 | if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR)) |
| 962 | return -1; |
| 963 | |
| 964 | bsel_cfg.u64 = csr_rd(CVMX_PIP_BSEL_EXT_CFGX(bit)); |
| 965 | bsel_pos.u64 = csr_rd(CVMX_PIP_BSEL_EXT_POSX(bit)); |
| 966 | |
| 967 | for (y = 0; y < 8; y++) { |
| 968 | char *ptr = (char *)cvmx_phys_to_ptr(work->packet_ptr.s.addr); |
| 969 | int bit_loc = 0; |
| 970 | int bit; |
| 971 | |
| 972 | ptr += bsel_cfg.s.skip; |
| 973 | switch (y) { |
| 974 | case 0: |
| 975 | ptr += (bsel_pos.s.pos0 >> 3); |
| 976 | bit_loc = 7 - (bsel_pos.s.pos0 & 0x3); |
| 977 | break; |
| 978 | case 1: |
| 979 | ptr += (bsel_pos.s.pos1 >> 3); |
| 980 | bit_loc = 7 - (bsel_pos.s.pos1 & 0x3); |
| 981 | break; |
| 982 | case 2: |
| 983 | ptr += (bsel_pos.s.pos2 >> 3); |
| 984 | bit_loc = 7 - (bsel_pos.s.pos2 & 0x3); |
| 985 | break; |
| 986 | case 3: |
| 987 | ptr += (bsel_pos.s.pos3 >> 3); |
| 988 | bit_loc = 7 - (bsel_pos.s.pos3 & 0x3); |
| 989 | break; |
| 990 | case 4: |
| 991 | ptr += (bsel_pos.s.pos4 >> 3); |
| 992 | bit_loc = 7 - (bsel_pos.s.pos4 & 0x3); |
| 993 | break; |
| 994 | case 5: |
| 995 | ptr += (bsel_pos.s.pos5 >> 3); |
| 996 | bit_loc = 7 - (bsel_pos.s.pos5 & 0x3); |
| 997 | break; |
| 998 | case 6: |
| 999 | ptr += (bsel_pos.s.pos6 >> 3); |
| 1000 | bit_loc = 7 - (bsel_pos.s.pos6 & 0x3); |
| 1001 | break; |
| 1002 | case 7: |
| 1003 | ptr += (bsel_pos.s.pos7 >> 3); |
| 1004 | bit_loc = 7 - (bsel_pos.s.pos7 & 0x3); |
| 1005 | break; |
| 1006 | } |
| 1007 | bit = (*ptr >> bit_loc) & 1; |
| 1008 | index |= bit << y; |
| 1009 | } |
| 1010 | index += bsel_cfg.s.offset; |
| 1011 | index &= 0x1ff; |
| 1012 | return index; |
| 1013 | } |
| 1014 | |
| 1015 | static inline int cvmx_pip_get_bsel_qos(cvmx_wqe_t *work) |
| 1016 | { |
| 1017 | int index = cvmx_pip_get_bsel_table_index(work); |
| 1018 | cvmx_pip_bsel_tbl_entx_t bsel_tbl; |
| 1019 | |
| 1020 | /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */ |
| 1021 | if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR)) |
| 1022 | return -1; |
| 1023 | |
| 1024 | bsel_tbl.u64 = csr_rd(CVMX_PIP_BSEL_TBL_ENTX(index)); |
| 1025 | |
| 1026 | return bsel_tbl.s.qos; |
| 1027 | } |
| 1028 | |
| 1029 | static inline int cvmx_pip_get_bsel_grp(cvmx_wqe_t *work) |
| 1030 | { |
| 1031 | int index = cvmx_pip_get_bsel_table_index(work); |
| 1032 | cvmx_pip_bsel_tbl_entx_t bsel_tbl; |
| 1033 | |
| 1034 | /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */ |
| 1035 | if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR)) |
| 1036 | return -1; |
| 1037 | |
| 1038 | bsel_tbl.u64 = csr_rd(CVMX_PIP_BSEL_TBL_ENTX(index)); |
| 1039 | |
| 1040 | return bsel_tbl.s.grp; |
| 1041 | } |
| 1042 | |
| 1043 | static inline int cvmx_pip_get_bsel_tt(cvmx_wqe_t *work) |
| 1044 | { |
| 1045 | int index = cvmx_pip_get_bsel_table_index(work); |
| 1046 | cvmx_pip_bsel_tbl_entx_t bsel_tbl; |
| 1047 | |
| 1048 | /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */ |
| 1049 | if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR)) |
| 1050 | return -1; |
| 1051 | |
| 1052 | bsel_tbl.u64 = csr_rd(CVMX_PIP_BSEL_TBL_ENTX(index)); |
| 1053 | |
| 1054 | return bsel_tbl.s.tt; |
| 1055 | } |
| 1056 | |
| 1057 | static inline int cvmx_pip_get_bsel_tag(cvmx_wqe_t *work) |
| 1058 | { |
| 1059 | int index = cvmx_pip_get_bsel_table_index(work); |
| 1060 | int port = cvmx_wqe_get_port(work); |
| 1061 | int bit = port & 0x3; |
| 1062 | int upper_tag = 0; |
| 1063 | cvmx_pip_bsel_tbl_entx_t bsel_tbl; |
| 1064 | cvmx_pip_bsel_ext_cfgx_t bsel_cfg; |
| 1065 | cvmx_pip_prt_tagx_t prt_tag; |
| 1066 | |
| 1067 | /* The bit select extractor is available in CN61XX and CN68XX pass2.0 onwards. */ |
| 1068 | if (!octeon_has_feature(OCTEON_FEATURE_BIT_EXTRACTOR)) |
| 1069 | return -1; |
| 1070 | |
| 1071 | bsel_tbl.u64 = csr_rd(CVMX_PIP_BSEL_TBL_ENTX(index)); |
| 1072 | bsel_cfg.u64 = csr_rd(CVMX_PIP_BSEL_EXT_CFGX(bit)); |
| 1073 | |
| 1074 | prt_tag.u64 = csr_rd(CVMX_PIP_PRT_TAGX(port)); |
| 1075 | if (prt_tag.s.inc_prt_flag == 0) |
| 1076 | upper_tag = bsel_cfg.s.upper_tag; |
| 1077 | return bsel_tbl.s.tag | ((bsel_cfg.s.tag << 8) & 0xff00) | ((upper_tag << 16) & 0xffff0000); |
| 1078 | } |
| 1079 | |
| 1080 | #endif /* __CVMX_PIP_H__ */ |