developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 1 | --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
| 2 | +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c |
developer | 0fb30d5 | 2023-12-04 09:51:36 +0800 | [diff] [blame] | 3 | @@ -1857,6 +1857,12 @@ static void mtk_tx_set_dma_desc_v3(struc |
developer | b1fa7b9 | 2023-11-14 19:15:35 +0800 | [diff] [blame] | 4 | |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 5 | trace_printk("[%s] skb_shinfo(skb)->nr_frags=%x HNAT_SKB_CB2(skb)->magic=%x txd4=%x<-----\n", |
| 6 | __func__, skb_shinfo(skb)->nr_frags, HNAT_SKB_CB2(skb)->magic, data); |
developer | b1fa7b9 | 2023-11-14 19:15:35 +0800 | [diff] [blame] | 7 | + |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 8 | + /* forward to eip197 if this packet is going to encrypt */ |
| 9 | + if (unlikely(skb_hnat_cdrt(skb) && is_magic_tag_valid(skb))) { |
| 10 | + data &= ((~TX_DMA_TPORT_MASK) << TX_DMA_TPORT_SHIFT); |
| 11 | + data |= (EIP197_QDMA_TPORT & TX_DMA_TPORT_MASK) << TX_DMA_TPORT_SHIFT; |
| 12 | + } |
developer | b1fa7b9 | 2023-11-14 19:15:35 +0800 | [diff] [blame] | 13 | #endif |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 14 | WRITE_ONCE(desc->txd4, data); |
| 15 | |
developer | 0fb30d5 | 2023-12-04 09:51:36 +0800 | [diff] [blame] | 16 | @@ -1880,6 +1886,17 @@ static void mtk_tx_set_dma_desc_v3(struc |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 17 | |
| 18 | WRITE_ONCE(desc->txd7, 0); |
| 19 | WRITE_ONCE(desc->txd8, 0); |
developer | b1fa7b9 | 2023-11-14 19:15:35 +0800 | [diff] [blame] | 20 | + |
| 21 | +#if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE) |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 22 | + if (unlikely(skb_hnat_cdrt(skb) && is_magic_tag_valid(skb))) { |
| 23 | + /* carry cdrt index for encryption */ |
| 24 | + data = (skb_hnat_cdrt(skb) & TX_DMA_CDRT_MASK) << TX_DMA_CDRT_SHIFT; |
| 25 | + WRITE_ONCE(desc->txd8, data); |
| 26 | + skb_hnat_magic_tag(skb) = 0; |
| 27 | + } else { |
| 28 | + WRITE_ONCE(desc->txd8, 0); |
| 29 | + } |
developer | b1fa7b9 | 2023-11-14 19:15:35 +0800 | [diff] [blame] | 30 | +#endif |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 31 | } |
| 32 | |
| 33 | static void mtk_tx_set_dma_desc(struct sk_buff *skb, struct net_device *dev, void *txd, |
developer | 0fb30d5 | 2023-12-04 09:51:36 +0800 | [diff] [blame] | 34 | @@ -2307,6 +2324,7 @@ static int mtk_poll_rx(struct napi_struc |
developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 35 | |
| 36 | skb_hnat_alg(skb) = 0; |
| 37 | skb_hnat_filled(skb) = 0; |
| 38 | + skb_hnat_set_cdrt(skb, 0); |
| 39 | skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG; |
| 40 | skb_hnat_set_tops(skb, 0); |
| 41 | skb_hnat_set_is_decap(skb, 0); |
| 42 | --- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c |
| 43 | +++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c |
developer | 035ffd2 | 2023-12-13 14:19:53 +0800 | [diff] [blame] | 44 | @@ -1078,6 +1078,9 @@ static unsigned int hnat_ipv4_get_nextho |
developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 45 | return 0; |
| 46 | } |
| 47 | |
| 48 | + if (!skb_hnat_cdrt(skb) && dst && dst_xfrm(dst)) |
| 49 | + return 0; |
| 50 | + |
| 51 | rcu_read_lock_bh(); |
| 52 | nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr); |
| 53 | neigh = __ipv4_neigh_lookup_noref(dev, nexthop); |
developer | 035ffd2 | 2023-12-13 14:19:53 +0800 | [diff] [blame] | 54 | @@ -1302,6 +1305,9 @@ static inline void hnat_fill_offload_eng |
developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 55 | */ |
| 56 | entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT; |
| 57 | entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb); |
| 58 | + } else if (skb_hnat_cdrt(skb)) { |
| 59 | + entry->ipv4_hnapt.tport_id = NR_EIP197_QDMA_TPORT; |
| 60 | + entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb); |
| 61 | } else { |
| 62 | return; |
| 63 | } |
developer | 035ffd2 | 2023-12-13 14:19:53 +0800 | [diff] [blame] | 64 | @@ -1311,6 +1317,79 @@ static inline void hnat_fill_offload_eng |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 65 | #endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */ |
| 66 | } |
developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 67 | |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 68 | +int hnat_bind_crypto_entry(struct sk_buff *skb, const struct net_device *dev) { |
| 69 | + struct foe_entry *foe; |
| 70 | + struct foe_entry entry = { 0 }; |
| 71 | + struct ethhdr *eth = eth_hdr(skb); |
| 72 | + u32 gmac = NR_DISCARD; |
developer | 035ffd2 | 2023-12-13 14:19:53 +0800 | [diff] [blame] | 73 | + struct mtk_mac *mac = netdev_priv(dev); |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 74 | + |
| 75 | + if (skb_hnat_tops(skb) && mtk_tnl_encap_offload) |
| 76 | + mtk_tnl_encap_offload(skb); |
| 77 | + |
| 78 | + foe = &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)]; |
| 79 | + |
| 80 | + hnat_get_filled_unbind_entry(skb, &entry); |
| 81 | + entry.bfib1.cah = 1; |
| 82 | + entry.bfib1.time_stamp = (hnat_priv->data->version == MTK_HNAT_V2 || |
| 83 | + hnat_priv->data->version == MTK_HNAT_V3) ? |
| 84 | + readl(hnat_priv->fe_base + 0x0010) & (0xFF) : |
| 85 | + readl(hnat_priv->fe_base + 0x0010) & (0x7FFF); |
| 86 | + entry.ipv4_hnapt.iblk2.port_ag = |
| 87 | + (hnat_priv->data->version == MTK_HNAT_V2 || |
| 88 | + hnat_priv->data->version == MTK_HNAT_V3) ? 0xf : 0x3f; |
| 89 | + |
| 90 | + if (IS_LAN(dev)) { |
| 91 | + if (IS_BOND_MODE) |
| 92 | + gmac = ((skb_hnat_entry(skb) >> 1) % hnat_priv->gmac_num) ? |
| 93 | + NR_GMAC2_PORT : NR_GMAC1_PORT; |
| 94 | + else |
| 95 | + gmac = NR_GMAC1_PORT; |
| 96 | + } else if (IS_LAN2(dev)) { |
developer | 035ffd2 | 2023-12-13 14:19:53 +0800 | [diff] [blame] | 97 | + gmac = (mac->id == MTK_GMAC2_ID) ? NR_GMAC2_PORT : NR_GMAC3_PORT; |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 98 | + } else if (IS_WAN(dev)) { |
developer | 035ffd2 | 2023-12-13 14:19:53 +0800 | [diff] [blame] | 99 | + if (IS_GMAC1_MODE) |
| 100 | + gmac = NR_GMAC1_PORT; |
| 101 | + else |
| 102 | + gmac = (mac->id == MTK_GMAC2_ID) ? NR_GMAC2_PORT : NR_GMAC3_PORT; |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 103 | + } else { |
| 104 | + pr_notice("Unknown case of dp, iif=%x --> %s\n", skb_hnat_iface(skb), dev->name); |
| 105 | + return -1; |
| 106 | + } |
| 107 | + |
| 108 | + entry.ipv4_hnapt.iblk2.mibf = 1; |
| 109 | + entry.ipv4_hnapt.iblk2.dp = gmac; |
| 110 | + entry.ipv4_hnapt.iblk2.port_mg = |
| 111 | + (hnat_priv->data->version == MTK_HNAT_V1_1) ? 0x3f : 0; |
| 112 | + entry.bfib1.ttl = 1; |
| 113 | + entry.bfib1.state = BIND; |
| 114 | + |
| 115 | + hnat_fill_offload_engine_entry(skb, &entry, dev); |
| 116 | + |
| 117 | + if (!skb_hnat_tops(skb)) { |
| 118 | + entry.ipv4_hnapt.dmac_hi = swab32(*((u32 *)eth->h_dest)); |
| 119 | + entry.ipv4_hnapt.dmac_lo = swab16(*((u16 *)ð->h_dest[4])); |
| 120 | + entry.ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source)); |
| 121 | + entry.ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4])); |
| 122 | + } |
| 123 | + |
| 124 | + wmb(); |
| 125 | + |
| 126 | + if (entry_hnat_is_bound(foe)) |
| 127 | + return 0; |
| 128 | + |
| 129 | + memcpy(foe, &entry, sizeof(entry)); |
| 130 | + |
| 131 | + if (hnat_priv->data->per_flow_accounting && |
| 132 | + skb_hnat_entry(skb) < hnat_priv->foe_etry_num && |
| 133 | + skb_hnat_ppe(skb) < CFG_PPE_NUM) |
| 134 | + memset(&hnat_priv->acct[skb_hnat_ppe(skb)][skb_hnat_entry(skb)], |
| 135 | + 0, sizeof(struct mib_entry)); |
| 136 | + |
| 137 | + return 0; |
| 138 | +} |
| 139 | +EXPORT_SYMBOL(hnat_bind_crypto_entry); |
| 140 | + |
| 141 | static unsigned int skb_to_hnat_info(struct sk_buff *skb, |
| 142 | const struct net_device *dev, |
| 143 | struct foe_entry *foe, |
developer | 035ffd2 | 2023-12-13 14:19:53 +0800 | [diff] [blame] | 144 | @@ -2360,6 +2439,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s |
developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 145 | |
| 146 | skb_hnat_alg(skb) = 0; |
| 147 | skb_hnat_set_tops(skb, 0); |
| 148 | + skb_hnat_set_cdrt(skb, 0); |
| 149 | skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG; |
| 150 | |
| 151 | if (skb_hnat_iface(skb) == FOE_MAGIC_WED0) |
developer | 035ffd2 | 2023-12-13 14:19:53 +0800 | [diff] [blame] | 152 | @@ -2446,7 +2526,8 @@ static unsigned int mtk_hnat_accel_type( |
developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 153 | * is from local_out which is also filtered in sanity check. |
| 154 | */ |
| 155 | dst = skb_dst(skb); |
| 156 | - if (dst && dst_xfrm(dst)) |
| 157 | + if (dst && dst_xfrm(dst) |
| 158 | + && (!mtk_crypto_offloadable || !mtk_crypto_offloadable(skb))) |
| 159 | return 0; |
| 160 | |
| 161 | ct = nf_ct_get(skb, &ctinfo); |
developer | 035ffd2 | 2023-12-13 14:19:53 +0800 | [diff] [blame] | 162 | @@ -2850,6 +2931,14 @@ static unsigned int mtk_hnat_nf_post_rou |
developer | 5c85103 | 2023-09-04 11:13:18 +0800 | [diff] [blame] | 163 | } |
| 164 | } |
| 165 | |
| 166 | + /* we are not support protocols other than IPv4 TCP for crypto offload yet */ |
| 167 | + if (skb_hnat_is_decrypt(skb) |
| 168 | + && (ntohs(skb->protocol) != ETH_P_IP |
| 169 | + || ip_hdr(skb)->protocol != IPPROTO_TCP)) { |
| 170 | + skb_hnat_alg(skb) = 1; |
| 171 | + return 0; |
| 172 | + } |
| 173 | + |
| 174 | if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out)) |
| 175 | is_virt_dev = true; |
| 176 | |
developer | 035ffd2 | 2023-12-13 14:19:53 +0800 | [diff] [blame] | 177 | @@ -3159,7 +3248,10 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s |
developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 178 | if (iph->protocol == IPPROTO_IPV6) { |
| 179 | entry->udib1.pkt_type = IPV6_6RD; |
| 180 | hnat_set_head_frags(state, skb, 0, hnat_set_alg); |
| 181 | - } else if (!skb_hnat_tops(skb)) { |
| 182 | + } else if (is_magic_tag_valid(skb) |
| 183 | + && (skb_hnat_cdrt(skb) || skb_hnat_tops(skb))) { |
| 184 | + hnat_set_head_frags(state, skb, 0, hnat_set_alg); |
| 185 | + } else { |
| 186 | hnat_set_head_frags(state, skb, 1, hnat_set_alg); |
| 187 | } |
| 188 | |
| 189 | --- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h |
| 190 | +++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h |
| 191 | @@ -46,7 +46,8 @@ struct hnat_desc { |
| 192 | u32 amsdu : 1; |
| 193 | u32 tops : 6; |
| 194 | u32 is_decap : 1; |
| 195 | - u32 resv3 : 12; |
| 196 | + u32 cdrt : 8; |
| 197 | + u32 resv3 : 4; |
| 198 | u32 magic_tag_protect : 16; |
| 199 | } __packed; |
| 200 | #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2) |
| 201 | @@ -99,12 +100,16 @@ struct hnat_desc { |
| 202 | #define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb)) |
| 203 | #define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops)) |
| 204 | #define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap)) |
| 205 | +#define skb_hnat_cdrt(skb) (((struct hnat_desc *)((skb)->head))->cdrt) |
| 206 | +#define skb_hnat_set_cdrt(skb, cdrt) ((skb_hnat_cdrt(skb)) = (cdrt)) |
| 207 | #else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */ |
| 208 | #define skb_hnat_tops(skb) (0) |
| 209 | #define skb_hnat_is_decap(skb) (0) |
| 210 | #define skb_hnat_is_encap(skb) (0) |
| 211 | #define skb_hnat_set_tops(skb, tops) |
| 212 | #define skb_hnat_set_is_decap(skb, is_decap) |
| 213 | +#define skb_hnat_cdrt(skb) (0) |
| 214 | +#define skb_hnat_set_cdrt(skb, cdrt) |
| 215 | #endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */ |
| 216 | #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic) |
| 217 | #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn) |
| 218 | --- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c |
| 219 | +++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c |
| 220 | @@ -49,6 +49,8 @@ int (*mtk_tnl_decap_offload)(struct sk_b |
| 221 | EXPORT_SYMBOL(mtk_tnl_decap_offload); |
| 222 | bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL; |
| 223 | EXPORT_SYMBOL(mtk_tnl_decap_offloadable); |
| 224 | +bool (*mtk_crypto_offloadable)(struct sk_buff *skb) = NULL; |
| 225 | +EXPORT_SYMBOL(mtk_crypto_offloadable); |
| 226 | |
| 227 | static void hnat_sma_build_entry(struct timer_list *t) |
| 228 | { |
| 229 | --- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h |
| 230 | +++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h |
developer | 0fb30d5 | 2023-12-04 09:51:36 +0800 | [diff] [blame] | 231 | @@ -1140,6 +1140,8 @@ enum FoeIpAct { |
developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 232 | #define NR_WDMA1_PORT 9 |
| 233 | #define NR_WDMA2_PORT 13 |
| 234 | #define NR_GMAC3_PORT 15 |
| 235 | +#define NR_EIP197_TPORT 2 |
| 236 | +#define NR_EIP197_QDMA_TPORT 3 |
| 237 | #define NR_TDMA_TPORT 4 |
| 238 | #define NR_TDMA_QDMA_TPORT 5 |
| 239 | #define LAN_DEV_NAME hnat_priv->lan |
developer | 0fb30d5 | 2023-12-04 09:51:36 +0800 | [diff] [blame] | 240 | @@ -1292,6 +1294,8 @@ extern int qos_toggle; |
developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 241 | extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb); |
| 242 | extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb); |
| 243 | extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb); |
| 244 | +extern bool (*mtk_crypto_offloadable)(struct sk_buff *skb); |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 245 | +extern int hnat_bind_crypto_entry(struct sk_buff *skb, const struct net_device *dev); |
developer | 4f0d2ba | 2023-08-21 17:33:25 +0800 | [diff] [blame] | 246 | |
| 247 | int ext_if_add(struct extdev_entry *ext_entry); |
| 248 | int ext_if_del(struct extdev_entry *ext_entry); |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 249 | --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h |
| 250 | +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h |
developer | b1fa7b9 | 2023-11-14 19:15:35 +0800 | [diff] [blame] | 251 | @@ -558,6 +558,10 @@ |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 252 | |
| 253 | #define MTK_QDMA_GMAC2_QID 8 |
| 254 | |
| 255 | +/* QDMA V2 descriptor txd8 */ |
| 256 | +#define TX_DMA_CDRT_SHIFT 0 |
| 257 | +#define TX_DMA_CDRT_MASK 0xff |
| 258 | + |
| 259 | /* QDMA V2 descriptor txd6 */ |
| 260 | #define TX_DMA_INS_VLAN_V2 BIT(16) |
| 261 | |
developer | b1fa7b9 | 2023-11-14 19:15:35 +0800 | [diff] [blame] | 262 | @@ -567,6 +571,9 @@ |
developer | dca80e3 | 2023-10-19 16:24:46 +0800 | [diff] [blame] | 263 | #define TX_DMA_SPTAG_V3 BIT(27) |
| 264 | |
| 265 | /* QDMA V2 descriptor txd4 */ |
| 266 | +#define EIP197_QDMA_TPORT 3 |
| 267 | +#define TX_DMA_TPORT_SHIFT 0 |
| 268 | +#define TX_DMA_TPORT_MASK 0xf |
| 269 | #define TX_DMA_FPORT_SHIFT_V2 8 |
| 270 | #define TX_DMA_FPORT_MASK_V2 0xf |
| 271 | #define TX_DMA_SWC_V2 BIT(30) |