blob: 357977ef5b590de40e4c8bd086c1a5b31c360a0a [file] [log] [blame]
developer7e32f7e2022-05-18 21:10:08 +08001From a21ecf0e033807b976967286e6c392f48ee2049f Mon Sep 17 00:00:00 2001
2From: Era Mayflower <mayflowerera@gmail.com>
3Date: Mon, 9 Mar 2020 19:47:01 +0000
4Subject: macsec: Support XPN frame handling - IEEE 802.1AEbw
5
6Support extended packet number cipher suites (802.1AEbw) frames handling.
7This does not include the needed netlink patches.
8
9 * Added xpn boolean field to `struct macsec_secy`.
10 * Added ssci field to `struct_macsec_tx_sa` (802.1AE figure 10-5).
11 * Added ssci field to `struct_macsec_rx_sa` (802.1AE figure 10-5).
12 * Added salt field to `struct macsec_key` (802.1AE 10.7 NOTE 1).
13 * Created pn_t type for easy access to lower and upper halves.
14 * Created salt_t type for easy access to the "ssci" and "pn" parts.
15 * Created `macsec_fill_iv_xpn` function to create IV in XPN mode.
16 * Support in PN recovery and preliminary replay check in XPN mode.
17
18In addition, according to IEEE 802.1AEbw figure 10-5, the PN of incoming
19frame can be 0 when XPN cipher suite is used, so fixed the function
20`macsec_validate_skb` to fail on PN=0 only if XPN is off.
21
22Signed-off-by: Era Mayflower <mayflowerera@gmail.com>
23Signed-off-by: David S. Miller <davem@davemloft.net>
24---
25 drivers/net/macsec.c | 130 +++++++++++++++++++++++++++++++++++++--------------
26 include/net/macsec.h | 45 ++++++++++++++++--
27 2 files changed, 136 insertions(+), 39 deletions(-)
28
29diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
30index 6ec6fc191a6e4..6c71e250cccb0 100644
31--- a/drivers/net/macsec.c
32+++ b/drivers/net/macsec.c
33@@ -19,6 +19,7 @@
34 #include <net/gro_cells.h>
35 #include <net/macsec.h>
36 #include <linux/phy.h>
37+#include <linux/byteorder/generic.h>
38
39 #include <uapi/linux/if_macsec.h>
40
41@@ -68,6 +69,16 @@ struct macsec_eth_header {
42 sc; \
43 sc = rtnl_dereference(sc->next))
44
45+#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
46+
47+struct gcm_iv_xpn {
48+ union {
49+ u8 short_secure_channel_id[4];
50+ ssci_t ssci;
51+ };
52+ __be64 pn;
53+} __packed;
54+
55 struct gcm_iv {
56 union {
57 u8 secure_channel_id[8];
58@@ -372,8 +383,8 @@ static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
59 return __macsec_get_ops(macsec->offload, macsec, ctx);
60 }
61
62-/* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
63-static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
64+/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
65+static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
66 {
67 struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
68 int len = skb->len - 2 * ETH_ALEN;
69@@ -398,8 +409,8 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
70 if (h->unused)
71 return false;
72
73- /* rx.pn != 0 (figure 10-5) */
74- if (!h->packet_number)
75+ /* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
76+ if (!h->packet_number && !xpn)
77 return false;
78
79 /* length check, f) g) h) i) */
80@@ -411,6 +422,15 @@ static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
81 #define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
82 #define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
83
84+static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
85+ salt_t salt)
86+{
87+ struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
88+
89+ gcm_iv->ssci = ssci ^ salt.ssci;
90+ gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
91+}
92+
93 static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
94 {
95 struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
96@@ -446,14 +466,19 @@ void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
97 }
98 EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
99
100-static u32 tx_sa_update_pn(struct macsec_tx_sa *tx_sa, struct macsec_secy *secy)
101+static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
102+ struct macsec_secy *secy)
103 {
104- u32 pn;
105+ pn_t pn;
106
107 spin_lock_bh(&tx_sa->lock);
108- pn = tx_sa->next_pn;
109
110- tx_sa->next_pn++;
111+ pn = tx_sa->next_pn_halves;
112+ if (secy->xpn)
113+ tx_sa->next_pn++;
114+ else
115+ tx_sa->next_pn_halves.lower++;
116+
117 if (tx_sa->next_pn == 0)
118 __macsec_pn_wrapped(secy, tx_sa);
119 spin_unlock_bh(&tx_sa->lock);
120@@ -568,7 +593,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
121 struct macsec_tx_sa *tx_sa;
122 struct macsec_dev *macsec = macsec_priv(dev);
123 bool sci_present;
124- u32 pn;
125+ pn_t pn;
126
127 secy = &macsec->secy;
128 tx_sc = &secy->tx_sc;
129@@ -610,12 +635,12 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
130 memmove(hh, eth, 2 * ETH_ALEN);
131
132 pn = tx_sa_update_pn(tx_sa, secy);
133- if (pn == 0) {
134+ if (pn.full64 == 0) {
135 macsec_txsa_put(tx_sa);
136 kfree_skb(skb);
137 return ERR_PTR(-ENOLINK);
138 }
139- macsec_fill_sectag(hh, secy, pn, sci_present);
140+ macsec_fill_sectag(hh, secy, pn.lower, sci_present);
141 macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
142
143 skb_put(skb, secy->icv_len);
144@@ -646,7 +671,10 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
145 return ERR_PTR(-ENOMEM);
146 }
147
148- macsec_fill_iv(iv, secy->sci, pn);
149+ if (secy->xpn)
150+ macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
151+ else
152+ macsec_fill_iv(iv, secy->sci, pn.lower);
153
154 sg_init_table(sg, ret);
155 ret = skb_to_sgvec(skb, sg, 0, skb->len);
156@@ -698,13 +726,14 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
157 u32 lowest_pn = 0;
158
159 spin_lock(&rx_sa->lock);
160- if (rx_sa->next_pn >= secy->replay_window)
161- lowest_pn = rx_sa->next_pn - secy->replay_window;
162+ if (rx_sa->next_pn_halves.lower >= secy->replay_window)
163+ lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
164
165 /* Now perform replay protection check again
166 * (see IEEE 802.1AE-2006 figure 10-5)
167 */
168- if (secy->replay_protect && pn < lowest_pn) {
169+ if (secy->replay_protect && pn < lowest_pn &&
170+ (!secy->xpn || pn_same_half(pn, lowest_pn))) {
171 spin_unlock(&rx_sa->lock);
172 u64_stats_update_begin(&rxsc_stats->syncp);
173 rxsc_stats->stats.InPktsLate++;
174@@ -753,8 +782,15 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
175 }
176 u64_stats_update_end(&rxsc_stats->syncp);
177
178- if (pn >= rx_sa->next_pn)
179- rx_sa->next_pn = pn + 1;
180+ // Instead of "pn >=" - to support pn overflow in xpn
181+ if (pn + 1 > rx_sa->next_pn_halves.lower) {
182+ rx_sa->next_pn_halves.lower = pn + 1;
183+ } else if (secy->xpn &&
184+ !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
185+ rx_sa->next_pn_halves.upper++;
186+ rx_sa->next_pn_halves.lower = pn + 1;
187+ }
188+
189 spin_unlock(&rx_sa->lock);
190 }
191
192@@ -841,6 +877,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
193 unsigned char *iv;
194 struct aead_request *req;
195 struct macsec_eth_header *hdr;
196+ u32 hdr_pn;
197 u16 icv_len = secy->icv_len;
198
199 macsec_skb_cb(skb)->valid = false;
200@@ -860,7 +897,21 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
201 }
202
203 hdr = (struct macsec_eth_header *)skb->data;
204- macsec_fill_iv(iv, sci, ntohl(hdr->packet_number));
205+ hdr_pn = ntohl(hdr->packet_number);
206+
207+ if (secy->xpn) {
208+ pn_t recovered_pn = rx_sa->next_pn_halves;
209+
210+ recovered_pn.lower = hdr_pn;
211+ if (hdr_pn < rx_sa->next_pn_halves.lower &&
212+ !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
213+ recovered_pn.upper++;
214+
215+ macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
216+ rx_sa->key.salt);
217+ } else {
218+ macsec_fill_iv(iv, sci, hdr_pn);
219+ }
220
221 sg_init_table(sg, ret);
222 ret = skb_to_sgvec(skb, sg, 0, skb->len);
223@@ -1001,7 +1052,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
224 struct macsec_rxh_data *rxd;
225 struct macsec_dev *macsec;
226 sci_t sci;
227- u32 pn;
228+ u32 hdr_pn;
229 bool cbit;
230 struct pcpu_rx_sc_stats *rxsc_stats;
231 struct pcpu_secy_stats *secy_stats;
232@@ -1072,7 +1123,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
233 secy_stats = this_cpu_ptr(macsec->stats);
234 rxsc_stats = this_cpu_ptr(rx_sc->stats);
235
236- if (!macsec_validate_skb(skb, secy->icv_len)) {
237+ if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
238 u64_stats_update_begin(&secy_stats->syncp);
239 secy_stats->stats.InPktsBadTag++;
240 u64_stats_update_end(&secy_stats->syncp);
241@@ -1104,13 +1155,16 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
242 }
243
244 /* First, PN check to avoid decrypting obviously wrong packets */
245- pn = ntohl(hdr->packet_number);
246+ hdr_pn = ntohl(hdr->packet_number);
247 if (secy->replay_protect) {
248 bool late;
249
250 spin_lock(&rx_sa->lock);
251- late = rx_sa->next_pn >= secy->replay_window &&
252- pn < (rx_sa->next_pn - secy->replay_window);
253+ late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
254+ hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
255+
256+ if (secy->xpn)
257+ late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
258 spin_unlock(&rx_sa->lock);
259
260 if (late) {
261@@ -1139,7 +1193,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
262 return RX_HANDLER_CONSUMED;
263 }
264
265- if (!macsec_post_decrypt(skb, secy, pn))
266+ if (!macsec_post_decrypt(skb, secy, hdr_pn))
267 goto drop;
268
269 deliver:
270@@ -1666,7 +1720,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
271
272 if (tb_sa[MACSEC_SA_ATTR_PN]) {
273 spin_lock_bh(&rx_sa->lock);
274- rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
275+ rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
276 spin_unlock_bh(&rx_sa->lock);
277 }
278
279@@ -1873,7 +1927,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
280 }
281
282 spin_lock_bh(&tx_sa->lock);
283- tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
284+ tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
285 spin_unlock_bh(&tx_sa->lock);
286
287 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
288@@ -2137,9 +2191,11 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
289 u8 assoc_num;
290 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
291 bool was_operational, was_active;
292- u32 prev_pn = 0;
293+ pn_t prev_pn;
294 int ret = 0;
295
296+ prev_pn.full64 = 0;
297+
298 if (!attrs[MACSEC_ATTR_IFINDEX])
299 return -EINVAL;
300
301@@ -2159,8 +2215,8 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
302
303 if (tb_sa[MACSEC_SA_ATTR_PN]) {
304 spin_lock_bh(&tx_sa->lock);
305- prev_pn = tx_sa->next_pn;
306- tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
307+ prev_pn = tx_sa->next_pn_halves;
308+ tx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
309 spin_unlock_bh(&tx_sa->lock);
310 }
311
312@@ -2198,7 +2254,7 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
313 cleanup:
314 if (tb_sa[MACSEC_SA_ATTR_PN]) {
315 spin_lock_bh(&tx_sa->lock);
316- tx_sa->next_pn = prev_pn;
317+ tx_sa->next_pn_halves = prev_pn;
318 spin_unlock_bh(&tx_sa->lock);
319 }
320 tx_sa->active = was_active;
321@@ -2218,9 +2274,11 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
322 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
323 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
324 bool was_active;
325- u32 prev_pn = 0;
326+ pn_t prev_pn;
327 int ret = 0;
328
329+ prev_pn.full64 = 0;
330+
331 if (!attrs[MACSEC_ATTR_IFINDEX])
332 return -EINVAL;
333
334@@ -2243,8 +2301,8 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
335
336 if (tb_sa[MACSEC_SA_ATTR_PN]) {
337 spin_lock_bh(&rx_sa->lock);
338- prev_pn = rx_sa->next_pn;
339- rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
340+ prev_pn = rx_sa->next_pn_halves;
341+ rx_sa->next_pn_halves.lower = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
342 spin_unlock_bh(&rx_sa->lock);
343 }
344
345@@ -2277,7 +2335,7 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
346 cleanup:
347 if (tb_sa[MACSEC_SA_ATTR_PN]) {
348 spin_lock_bh(&rx_sa->lock);
349- rx_sa->next_pn = prev_pn;
350+ rx_sa->next_pn_halves = prev_pn;
351 spin_unlock_bh(&rx_sa->lock);
352 }
353 rx_sa->active = was_active;
354@@ -2796,7 +2854,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
355 }
356
357 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
358- nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
359+ nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn_halves.lower) ||
360 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
361 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
362 nla_nest_cancel(skb, txsa_nest);
363@@ -2900,7 +2958,7 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
364 nla_nest_end(skb, attr);
365
366 if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
367- nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
368+ nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn_halves.lower) ||
369 nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
370 nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
371 nla_nest_cancel(skb, rxsa_nest);
372diff --git a/include/net/macsec.h b/include/net/macsec.h
373index 92e43db8b5667..43cd54e178770 100644
374--- a/include/net/macsec.h
375+++ b/include/net/macsec.h
376@@ -11,18 +11,45 @@
377 #include <uapi/linux/if_link.h>
378 #include <uapi/linux/if_macsec.h>
379
380+#define MACSEC_SALT_LEN 12
381+#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
382+
383 typedef u64 __bitwise sci_t;
384+typedef u32 __bitwise ssci_t;
385
386-#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
387+typedef union salt {
388+ struct {
389+ u32 ssci;
390+ u64 pn;
391+ } __packed;
392+ u8 bytes[MACSEC_SALT_LEN];
393+} __packed salt_t;
394+
395+typedef union pn {
396+ struct {
397+#if defined(__LITTLE_ENDIAN_BITFIELD)
398+ u32 lower;
399+ u32 upper;
400+#elif defined(__BIG_ENDIAN_BITFIELD)
401+ u32 upper;
402+ u32 lower;
403+#else
404+#error "Please fix <asm/byteorder.h>"
405+#endif
406+ };
407+ u64 full64;
408+} pn_t;
409
410 /**
411 * struct macsec_key - SA key
412 * @id: user-provided key identifier
413 * @tfm: crypto struct, key storage
414+ * @salt: salt used to generate IV in XPN cipher suites
415 */
416 struct macsec_key {
417 u8 id[MACSEC_KEYID_LEN];
418 struct crypto_aead *tfm;
419+ salt_t salt;
420 };
421
422 struct macsec_rx_sc_stats {
423@@ -64,12 +91,17 @@ struct macsec_tx_sc_stats {
424 * @next_pn: packet number expected for the next packet
425 * @lock: protects next_pn manipulations
426 * @key: key structure
427+ * @ssci: short secure channel identifier
428 * @stats: per-SA stats
429 */
430 struct macsec_rx_sa {
431 struct macsec_key key;
432+ ssci_t ssci;
433 spinlock_t lock;
434- u32 next_pn;
435+ union {
436+ pn_t next_pn_halves;
437+ u64 next_pn;
438+ };
439 refcount_t refcnt;
440 bool active;
441 struct macsec_rx_sa_stats __percpu *stats;
442@@ -110,12 +142,17 @@ struct macsec_rx_sc {
443 * @next_pn: packet number to use for the next packet
444 * @lock: protects next_pn manipulations
445 * @key: key structure
446+ * @ssci: short secure channel identifier
447 * @stats: per-SA stats
448 */
449 struct macsec_tx_sa {
450 struct macsec_key key;
451+ ssci_t ssci;
452 spinlock_t lock;
453- u32 next_pn;
454+ union {
455+ pn_t next_pn_halves;
456+ u64 next_pn;
457+ };
458 refcount_t refcnt;
459 bool active;
460 struct macsec_tx_sa_stats __percpu *stats;
461@@ -152,6 +189,7 @@ struct macsec_tx_sc {
462 * @key_len: length of keys used by the cipher suite
463 * @icv_len: length of ICV used by the cipher suite
464 * @validate_frames: validation mode
465+ * @xpn: enable XPN for this SecY
466 * @operational: MAC_Operational flag
467 * @protect_frames: enable protection for this SecY
468 * @replay_protect: enable packet number checks on receive
469@@ -166,6 +204,7 @@ struct macsec_secy {
470 u16 key_len;
471 u16 icv_len;
472 enum macsec_validation_type validate_frames;
473+ bool xpn;
474 bool operational;
475 bool protect_frames;
476 bool replay_protect;
477--
478cgit 1.2.3-1.el7
479