blob: 8bc2300dd94abd9f4d49316852155e813d4f6009 [file] [log] [blame]
developer84f378f2023-08-24 18:26:50 +08001--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
2+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
3@@ -1091,6 +1091,8 @@ enum FoeIpAct {
4 #define NR_EIP197_QDMA_TPORT 3
5 #define NR_TDMA_TPORT 4
6 #define NR_TDMA_QDMA_TPORT 5
7+#define NR_TDMA_EIP197_TPORT 8
8+#define NR_TDMA_EIP197_QDMA_TPORT 9
9 #define LAN_DEV_NAME hnat_priv->lan
10 #define LAN2_DEV_NAME hnat_priv->lan2
11 #define IS_WAN(dev) \
12--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
13+++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
developera78fb922023-09-04 09:48:45 +080014@@ -1099,7 +1099,8 @@ static unsigned int hnat_ipv4_get_nextho
developer84f378f2023-08-24 18:26:50 +080015 * outer header, we must update its outer mac header pointer
16 * before filling outer mac or it may screw up inner mac
17 */
18- if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) || skb_hnat_cdrt(skb)) {
19+ if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
20+ || (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb))) {
21 skb_push(skb, sizeof(struct ethhdr));
22 skb_reset_mac_header(skb);
23 }
developera78fb922023-09-04 09:48:45 +080024@@ -1107,7 +1108,8 @@ static unsigned int hnat_ipv4_get_nextho
developer84f378f2023-08-24 18:26:50 +080025 memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
26 memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
27
28- if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) || skb_hnat_cdrt(skb))
29+ if ((skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
30+ || (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)))
31 skb_pull(skb, sizeof(struct ethhdr));
32
33 rcu_read_unlock_bh();
developera78fb922023-09-04 09:48:45 +080034@@ -1255,6 +1257,38 @@ static inline void hnat_get_filled_unbin
developer84f378f2023-08-24 18:26:50 +080035 entry->bfib1.ps = 0;
36 }
37
38+/*
39+ * check offload engine data is prepared
40+ * return 0 for packets not related to offload engine
41+ * return positive value for offload engine prepared data done
42+ * return negative value for data is still constructing
43+ */
44+static inline int hnat_offload_engine_done(struct sk_buff *skb,
45+ struct flow_offload_hw_path *hw_path)
46+{
47+ struct dst_entry *dst = skb_dst(skb);
48+
49+ if ((skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL))) {
50+ /* tunnel encap'ed */
51+ if (dst && dst_xfrm(dst))
52+ /*
53+ * skb not ready to bind since it is still needs
54+ * to be encrypted
55+ */
56+ return -1;
57+
58+ /* nothing need to be done further for this skb */
59+ return 1;
60+ }
61+
62+ if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb) && dst && !dst_xfrm(dst))
63+ /* crypto encrypted done */
64+ return 1;
65+
66+ /* no need for tunnel encapsulation or crypto encryption */
67+ return 0;
68+}
69+
70 static inline void hnat_qos_tnl(u32 id, const struct net_device *dev)
71 {
72 u32 cfg;
developera78fb922023-09-04 09:48:45 +080073@@ -1299,9 +1333,15 @@ static inline void hnat_fill_offload_eng
developer84f378f2023-08-24 18:26:50 +080074 * we fill in hnat tport and tops_entry for tunnel encapsulation
75 * offloading
76 */
77- entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
78+ if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)) {
79+ entry->ipv4_hnapt.tport_id = NR_TDMA_EIP197_QDMA_TPORT;
80+ entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb);
81+ } else {
82+ entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
83+ }
84 entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
85- } else if (skb_hnat_cdrt(skb)) {
86+
87+ } else if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)) {
88 entry->ipv4_hnapt.tport_id = NR_EIP197_QDMA_TPORT;
89 entry->ipv4_hnapt.cdrt_id = skb_hnat_cdrt(skb);
90 } else {
developera78fb922023-09-04 09:48:45 +080091@@ -1333,6 +1373,7 @@ static unsigned int skb_to_hnat_info(str
developer84f378f2023-08-24 18:26:50 +080092 u32 port_id = 0;
93 u32 payload_len = 0;
94 int mape = 0;
95+ int ret;
96
97 ct = nf_ct_get(skb, &ctinfo);
98
developera78fb922023-09-04 09:48:45 +080099@@ -1349,10 +1390,12 @@ static unsigned int skb_to_hnat_info(str
developer84f378f2023-08-24 18:26:50 +0800100 if (whnat && is_hnat_pre_filled(foe))
101 return 0;
102
103- if ((skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL))
104- || (skb_hnat_cdrt(skb) && skb_dst(skb) && !dst_xfrm(skb_dst(skb)))) {
105+ ret = hnat_offload_engine_done(skb, hw_path);
106+ if (ret == 1) {
107 hnat_get_filled_unbind_entry(skb, &entry);
108 goto hnat_entry_bind;
109+ } else if (ret == -1) {
110+ return 0;
111 }
112
113 entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
developera78fb922023-09-04 09:48:45 +0800114@@ -1751,7 +1794,8 @@ static unsigned int skb_to_hnat_info(str
developer84f378f2023-08-24 18:26:50 +0800115 entry = ppe_fill_L2_info(eth, entry, hw_path);
116
117 if ((skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
118- || (!skb_hnat_cdrt(skb) && skb_dst(skb) && dst_xfrm(skb_dst(skb))))
119+ || (!skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb)
120+ && skb_dst(skb) && dst_xfrm(skb_dst(skb))))
121 goto hnat_entry_skip_bind;
122
123 hnat_entry_bind:
developera78fb922023-09-04 09:48:45 +0800124@@ -1957,7 +2001,7 @@ hnat_entry_bind:
developer84f378f2023-08-24 18:26:50 +0800125
126 #if defined(CONFIG_MEDIATEK_NETSYS_V3)
127 hnat_fill_offload_engine_entry(skb, &entry, dev);
128- if (skb_hnat_cdrt(skb))
129+ if (skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb))
130 entry = ppe_fill_L2_info(eth, entry, hw_path);
131 #endif
132
developera78fb922023-09-04 09:48:45 +0800133@@ -2237,6 +2281,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
developer84f378f2023-08-24 18:26:50 +0800134 skb_hnat_alg(skb) = 0;
135 skb_hnat_set_tops(skb, 0);
136 skb_hnat_set_cdrt(skb, 0);
137+ skb_hnat_set_is_decrypt(skb, 0);
138 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
139
140 if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
developera78fb922023-09-04 09:48:45 +0800141@@ -3017,7 +3062,8 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
developer84f378f2023-08-24 18:26:50 +0800142 entry->udib1.pkt_type = IPV6_6RD;
143 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
144 } else if (is_magic_tag_valid(skb)
145- && (skb_hnat_cdrt(skb) || skb_hnat_tops(skb))) {
146+ && ((skb_hnat_cdrt(skb) && skb_hnat_is_encrypt(skb))
147+ || skb_hnat_tops(skb))) {
148 hnat_set_head_frags(state, skb, 0, hnat_set_alg);
149 } else {
150 hnat_set_head_frags(state, skb, 1, hnat_set_alg);
151--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
152+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
developera78fb922023-09-04 09:48:45 +0800153@@ -2324,10 +2324,11 @@ static int mtk_poll_rx(struct napi_struc
developer84f378f2023-08-24 18:26:50 +0800154
155 skb_hnat_alg(skb) = 0;
156 skb_hnat_filled(skb) = 0;
157- skb_hnat_set_cdrt(skb, 0);
158+ skb_hnat_set_cdrt(skb, RX_DMA_GET_CDRT(trxd.rxd7));
159 skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
160 skb_hnat_set_tops(skb, 0);
161 skb_hnat_set_is_decap(skb, 0);
developera78fb922023-09-04 09:48:45 +0800162+ skb_hnat_set_is_decrypt(skb, (skb_hnat_cdrt(skb) ? 1 : 0));
developer84f378f2023-08-24 18:26:50 +0800163
developer84f378f2023-08-24 18:26:50 +0800164 if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
165 trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
developer84f378f2023-08-24 18:26:50 +0800166--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
167+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
168@@ -640,6 +640,9 @@
169 #define RX_DMA_GET_AGG_CNT_V2(_x) (((_x) >> 16) & 0xff)
170 #define RX_DMA_GET_TOPS_CRSN(_x) (((_x) >> 24) & 0xff)
171
172+/* PDMA V2 descriptor rxd7 */
173+#define RX_DMA_GET_CDRT(_x) (((_x) >> 8) & 0xff)
174+
175 /* PHY Polling and SMI Master Control registers */
176 #define MTK_PPSC 0x10000
177 #define PPSC_MDC_CFG GENMASK(29, 24)
178--- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
179+++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
180@@ -47,7 +47,8 @@ struct hnat_desc {
181 u32 tops : 6;
182 u32 is_decap : 1;
183 u32 cdrt : 8;
184- u32 resv3 : 4;
185+ u32 is_decrypt : 1;
186+ u32 resv3 : 3;
187 u32 magic_tag_protect : 16;
188 } __packed;
189 #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
190@@ -101,7 +102,10 @@ struct hnat_desc {
191 #define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
192 #define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
193 #define skb_hnat_cdrt(skb) (((struct hnat_desc *)((skb)->head))->cdrt)
194+#define skb_hnat_is_decrypt(skb) (((struct hnat_desc *)((skb)->head))->is_decrypt)
195+#define skb_hnat_is_encrypt(skb) (!skb_hnat_is_decrypt(skb))
196 #define skb_hnat_set_cdrt(skb, cdrt) ((skb_hnat_cdrt(skb)) = (cdrt))
197+#define skb_hnat_set_is_decrypt(skb, is_dec) ((skb_hnat_is_decrypt(skb)) = is_dec)
198 #else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
199 #define skb_hnat_tops(skb) (0)
200 #define skb_hnat_is_decap(skb) (0)
201@@ -109,7 +113,10 @@ struct hnat_desc {
202 #define skb_hnat_set_tops(skb, tops)
203 #define skb_hnat_set_is_decap(skb, is_decap)
204 #define skb_hnat_cdrt(skb) (0)
205+#define skb_hnat_is_decrypt(skb) (0)
206+#define skb_hnat_is_encrypt(skb) (0)
207 #define skb_hnat_set_cdrt(skb, cdrt)
208+#define skb_hnat_set_is_decrypt(skb, is_dec)
209 #endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
210 #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
211 #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)