blob: 1404301ac6c42a5e134905c207aa88dce019a4af [file] [log] [blame]
developer7e32f7e2022-05-18 21:10:08 +08001From 3cf3227a21d1fb020fe26128e60321bd2151e922 Mon Sep 17 00:00:00 2001
2From: Antoine Tenart <antoine.tenart@bootlin.com>
3Date: Mon, 13 Jan 2020 23:31:43 +0100
4Subject: net: macsec: hardware offloading infrastructure
5
6This patch introduces the MACsec hardware offloading infrastructure.
7
8The main idea here is to re-use the logic and data structures of the
9software MACsec implementation. This allows not to duplicate definitions
10and structure storing the same kind of information. It also allows to
11use a unified genlink interface for both MACsec implementations (so that
12the same userspace tool, `ip macsec`, is used with the same arguments).
13The MACsec offloading support cannot be disabled if an interface
14supports it at the moment.
15
16The MACsec configuration is passed to device drivers supporting it
17through macsec_ops which are called from the MACsec genl helpers. Those
18functions call the macsec ops of PHY and Ethernet drivers in two steps:
19a preparation one, and a commit one. The first step is allowed to fail
20and should be used to check if a provided configuration is compatible
21with the features provided by a MACsec engine, while the second step is
22not allowed to fail and should only be used to enable a given MACsec
23configuration. Two extra calls are made: when a virtual MACsec interface
24is created and when it is deleted, so that the hardware driver can stay
25in sync.
26
27The Rx and TX handlers are modified to take in account the special case
28were the MACsec transformation happens in the hardware, whether in a PHY
29or in a MAC, as the packets seen by the networking stack on both the
30physical and MACsec virtual interface are exactly the same. This leads
31to some limitations: the hardware and software implementations can't be
32used on the same physical interface, as the policies would be impossible
33to fulfill (such as strict validation of the frames). Also only a single
34virtual MACsec interface can be offloaded to a physical port supporting
35hardware offloading as it would be impossible to guess onto which
36interface a given packet should go (for ingress traffic).
37
38Another limitation as of now is that the counters and statistics are not
39reported back from the hardware to the software MACsec implementation.
40This isn't an issue when using offloaded MACsec transformations, but it
41should be added in the future so that the MACsec state can be reported
42to the user (which would also improve the debug).
43
44Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
45Signed-off-by: David S. Miller <davem@davemloft.net>
46---
47 drivers/net/macsec.c | 453 +++++++++++++++++++++++++++++++++++++++++++++++++--
48 1 file changed, 441 insertions(+), 12 deletions(-)
49
50diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
51index a336eee018f0b..36b0416381bf1 100644
52--- a/drivers/net/macsec.c
53+++ b/drivers/net/macsec.c
54@@ -11,12 +11,14 @@
55 #include <linux/module.h>
56 #include <crypto/aead.h>
57 #include <linux/etherdevice.h>
58+#include <linux/netdevice.h>
59 #include <linux/rtnetlink.h>
60 #include <linux/refcount.h>
61 #include <net/genetlink.h>
62 #include <net/sock.h>
63 #include <net/gro_cells.h>
64 #include <net/macsec.h>
65+#include <linux/phy.h>
66
67 #include <uapi/linux/if_macsec.h>
68
69@@ -98,6 +100,7 @@ struct pcpu_secy_stats {
70 * @real_dev: pointer to underlying netdevice
71 * @stats: MACsec device stats
72 * @secys: linked list of SecY's on the underlying device
73+ * @offload: status of offloading on the MACsec device
74 */
75 struct macsec_dev {
76 struct macsec_secy secy;
77@@ -105,6 +108,7 @@ struct macsec_dev {
78 struct pcpu_secy_stats __percpu *stats;
79 struct list_head secys;
80 struct gro_cells gro_cells;
81+ enum macsec_offload offload;
82 };
83
84 /**
85@@ -318,6 +322,56 @@ static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
86 h->short_length = data_len;
87 }
88
89+/* Checks if a MACsec interface is being offloaded to an hardware engine */
90+static bool macsec_is_offloaded(struct macsec_dev *macsec)
91+{
92+ if (macsec->offload == MACSEC_OFFLOAD_PHY)
93+ return true;
94+
95+ return false;
96+}
97+
98+/* Checks if underlying layers implement MACsec offloading functions. */
99+static bool macsec_check_offload(enum macsec_offload offload,
100+ struct macsec_dev *macsec)
101+{
102+ if (!macsec || !macsec->real_dev)
103+ return false;
104+
105+ if (offload == MACSEC_OFFLOAD_PHY)
106+ return macsec->real_dev->phydev &&
107+ macsec->real_dev->phydev->macsec_ops;
108+
109+ return false;
110+}
111+
112+static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
113+ struct macsec_dev *macsec,
114+ struct macsec_context *ctx)
115+{
116+ if (ctx) {
117+ memset(ctx, 0, sizeof(*ctx));
118+ ctx->offload = offload;
119+
120+ if (offload == MACSEC_OFFLOAD_PHY)
121+ ctx->phydev = macsec->real_dev->phydev;
122+ }
123+
124+ return macsec->real_dev->phydev->macsec_ops;
125+}
126+
127+/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
128+ * context device reference if provided.
129+ */
130+static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
131+ struct macsec_context *ctx)
132+{
133+ if (!macsec_check_offload(macsec->offload, macsec))
134+ return NULL;
135+
136+ return __macsec_get_ops(macsec->offload, macsec, ctx);
137+}
138+
139 /* validate MACsec packet according to IEEE 802.1AE-2006 9.12 */
140 static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len)
141 {
142@@ -867,8 +921,10 @@ static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
143 return NULL;
144 }
145
146-static void handle_not_macsec(struct sk_buff *skb)
147+static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
148 {
149+ /* Deliver to the uncontrolled port by default */
150+ enum rx_handler_result ret = RX_HANDLER_PASS;
151 struct macsec_rxh_data *rxd;
152 struct macsec_dev *macsec;
153
154@@ -883,7 +939,8 @@ static void handle_not_macsec(struct sk_buff *skb)
155 struct sk_buff *nskb;
156 struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
157
158- if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
159+ if (!macsec_is_offloaded(macsec) &&
160+ macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
161 u64_stats_update_begin(&secy_stats->syncp);
162 secy_stats->stats.InPktsNoTag++;
163 u64_stats_update_end(&secy_stats->syncp);
164@@ -902,9 +959,17 @@ static void handle_not_macsec(struct sk_buff *skb)
165 secy_stats->stats.InPktsUntagged++;
166 u64_stats_update_end(&secy_stats->syncp);
167 }
168+
169+ if (netif_running(macsec->secy.netdev) &&
170+ macsec_is_offloaded(macsec)) {
171+ ret = RX_HANDLER_EXACT;
172+ goto out;
173+ }
174 }
175
176+out:
177 rcu_read_unlock();
178+ return ret;
179 }
180
181 static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
182@@ -929,12 +994,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
183 goto drop_direct;
184
185 hdr = macsec_ethhdr(skb);
186- if (hdr->eth.h_proto != htons(ETH_P_MACSEC)) {
187- handle_not_macsec(skb);
188-
189- /* and deliver to the uncontrolled port */
190- return RX_HANDLER_PASS;
191- }
192+ if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
193+ return handle_not_macsec(skb);
194
195 skb = skb_unshare(skb, GFP_ATOMIC);
196 *pskb = skb;
197@@ -1440,6 +1501,40 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
198 .len = MACSEC_MAX_KEY_LEN, },
199 };
200
201+/* Offloads an operation to a device driver */
202+static int macsec_offload(int (* const func)(struct macsec_context *),
203+ struct macsec_context *ctx)
204+{
205+ int ret;
206+
207+ if (unlikely(!func))
208+ return 0;
209+
210+ if (ctx->offload == MACSEC_OFFLOAD_PHY)
211+ mutex_lock(&ctx->phydev->lock);
212+
213+ /* Phase I: prepare. The drive should fail here if there are going to be
214+ * issues in the commit phase.
215+ */
216+ ctx->prepare = true;
217+ ret = (*func)(ctx);
218+ if (ret)
219+ goto phy_unlock;
220+
221+ /* Phase II: commit. This step cannot fail. */
222+ ctx->prepare = false;
223+ ret = (*func)(ctx);
224+ /* This should never happen: commit is not allowed to fail */
225+ if (unlikely(ret))
226+ WARN(1, "MACsec offloading commit failed (%d)\n", ret);
227+
228+phy_unlock:
229+ if (ctx->offload == MACSEC_OFFLOAD_PHY)
230+ mutex_unlock(&ctx->phydev->lock);
231+
232+ return ret;
233+}
234+
235 static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
236 {
237 if (!attrs[MACSEC_ATTR_SA_CONFIG])
238@@ -1555,13 +1650,40 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
239 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
240 rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
241
242- nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
243 rx_sa->sc = rx_sc;
244+
245+ /* If h/w offloading is available, propagate to the device */
246+ if (macsec_is_offloaded(netdev_priv(dev))) {
247+ const struct macsec_ops *ops;
248+ struct macsec_context ctx;
249+
250+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
251+ if (!ops) {
252+ err = -EOPNOTSUPP;
253+ goto cleanup;
254+ }
255+
256+ ctx.sa.assoc_num = assoc_num;
257+ ctx.sa.rx_sa = rx_sa;
258+ memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
259+ MACSEC_KEYID_LEN);
260+
261+ err = macsec_offload(ops->mdo_add_rxsa, &ctx);
262+ if (err)
263+ goto cleanup;
264+ }
265+
266+ nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
267 rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
268
269 rtnl_unlock();
270
271 return 0;
272+
273+cleanup:
274+ kfree(rx_sa);
275+ rtnl_unlock();
276+ return err;
277 }
278
279 static bool validate_add_rxsc(struct nlattr **attrs)
280@@ -1584,6 +1706,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
281 struct nlattr **attrs = info->attrs;
282 struct macsec_rx_sc *rx_sc;
283 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
284+ bool was_active;
285+ int ret;
286
287 if (!attrs[MACSEC_ATTR_IFINDEX])
288 return -EINVAL;
289@@ -1609,12 +1733,35 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
290 return PTR_ERR(rx_sc);
291 }
292
293+ was_active = rx_sc->active;
294 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
295 rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
296
297+ if (macsec_is_offloaded(netdev_priv(dev))) {
298+ const struct macsec_ops *ops;
299+ struct macsec_context ctx;
300+
301+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
302+ if (!ops) {
303+ ret = -EOPNOTSUPP;
304+ goto cleanup;
305+ }
306+
307+ ctx.rx_sc = rx_sc;
308+
309+ ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
310+ if (ret)
311+ goto cleanup;
312+ }
313+
314 rtnl_unlock();
315
316 return 0;
317+
318+cleanup:
319+ rx_sc->active = was_active;
320+ rtnl_unlock();
321+ return ret;
322 }
323
324 static bool validate_add_txsa(struct nlattr **attrs)
325@@ -1651,6 +1798,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
326 struct macsec_tx_sa *tx_sa;
327 unsigned char assoc_num;
328 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
329+ bool was_operational;
330 int err;
331
332 if (!attrs[MACSEC_ATTR_IFINDEX])
333@@ -1701,8 +1849,6 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
334 return err;
335 }
336
337- nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
338-
339 spin_lock_bh(&tx_sa->lock);
340 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
341 spin_unlock_bh(&tx_sa->lock);
342@@ -1710,14 +1856,43 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
343 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
344 tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
345
346+ was_operational = secy->operational;
347 if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
348 secy->operational = true;
349
350+ /* If h/w offloading is available, propagate to the device */
351+ if (macsec_is_offloaded(netdev_priv(dev))) {
352+ const struct macsec_ops *ops;
353+ struct macsec_context ctx;
354+
355+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
356+ if (!ops) {
357+ err = -EOPNOTSUPP;
358+ goto cleanup;
359+ }
360+
361+ ctx.sa.assoc_num = assoc_num;
362+ ctx.sa.tx_sa = tx_sa;
363+ memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
364+ MACSEC_KEYID_LEN);
365+
366+ err = macsec_offload(ops->mdo_add_txsa, &ctx);
367+ if (err)
368+ goto cleanup;
369+ }
370+
371+ nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
372 rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
373
374 rtnl_unlock();
375
376 return 0;
377+
378+cleanup:
379+ secy->operational = was_operational;
380+ kfree(tx_sa);
381+ rtnl_unlock();
382+ return err;
383 }
384
385 static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
386@@ -1730,6 +1905,7 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
387 u8 assoc_num;
388 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
389 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
390+ int ret;
391
392 if (!attrs[MACSEC_ATTR_IFINDEX])
393 return -EINVAL;
394@@ -1753,12 +1929,35 @@ static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
395 return -EBUSY;
396 }
397
398+ /* If h/w offloading is available, propagate to the device */
399+ if (macsec_is_offloaded(netdev_priv(dev))) {
400+ const struct macsec_ops *ops;
401+ struct macsec_context ctx;
402+
403+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
404+ if (!ops) {
405+ ret = -EOPNOTSUPP;
406+ goto cleanup;
407+ }
408+
409+ ctx.sa.assoc_num = assoc_num;
410+ ctx.sa.rx_sa = rx_sa;
411+
412+ ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
413+ if (ret)
414+ goto cleanup;
415+ }
416+
417 RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
418 clear_rx_sa(rx_sa);
419
420 rtnl_unlock();
421
422 return 0;
423+
424+cleanup:
425+ rtnl_unlock();
426+ return ret;
427 }
428
429 static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
430@@ -1769,6 +1968,7 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
431 struct macsec_rx_sc *rx_sc;
432 sci_t sci;
433 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
434+ int ret;
435
436 if (!attrs[MACSEC_ATTR_IFINDEX])
437 return -EINVAL;
438@@ -1795,10 +1995,31 @@ static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
439 return -ENODEV;
440 }
441
442+ /* If h/w offloading is available, propagate to the device */
443+ if (macsec_is_offloaded(netdev_priv(dev))) {
444+ const struct macsec_ops *ops;
445+ struct macsec_context ctx;
446+
447+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
448+ if (!ops) {
449+ ret = -EOPNOTSUPP;
450+ goto cleanup;
451+ }
452+
453+ ctx.rx_sc = rx_sc;
454+ ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
455+ if (ret)
456+ goto cleanup;
457+ }
458+
459 free_rx_sc(rx_sc);
460 rtnl_unlock();
461
462 return 0;
463+
464+cleanup:
465+ rtnl_unlock();
466+ return ret;
467 }
468
469 static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
470@@ -1810,6 +2031,7 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
471 struct macsec_tx_sa *tx_sa;
472 u8 assoc_num;
473 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
474+ int ret;
475
476 if (!attrs[MACSEC_ATTR_IFINDEX])
477 return -EINVAL;
478@@ -1830,12 +2052,35 @@ static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
479 return -EBUSY;
480 }
481
482+ /* If h/w offloading is available, propagate to the device */
483+ if (macsec_is_offloaded(netdev_priv(dev))) {
484+ const struct macsec_ops *ops;
485+ struct macsec_context ctx;
486+
487+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
488+ if (!ops) {
489+ ret = -EOPNOTSUPP;
490+ goto cleanup;
491+ }
492+
493+ ctx.sa.assoc_num = assoc_num;
494+ ctx.sa.tx_sa = tx_sa;
495+
496+ ret = macsec_offload(ops->mdo_del_txsa, &ctx);
497+ if (ret)
498+ goto cleanup;
499+ }
500+
501 RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
502 clear_tx_sa(tx_sa);
503
504 rtnl_unlock();
505
506 return 0;
507+
508+cleanup:
509+ rtnl_unlock();
510+ return ret;
511 }
512
513 static bool validate_upd_sa(struct nlattr **attrs)
514@@ -1868,6 +2113,9 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
515 struct macsec_tx_sa *tx_sa;
516 u8 assoc_num;
517 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
518+ bool was_operational, was_active;
519+ u32 prev_pn = 0;
520+ int ret = 0;
521
522 if (!attrs[MACSEC_ATTR_IFINDEX])
523 return -EINVAL;
524@@ -1888,19 +2136,52 @@ static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
525
526 if (tb_sa[MACSEC_SA_ATTR_PN]) {
527 spin_lock_bh(&tx_sa->lock);
528+ prev_pn = tx_sa->next_pn;
529 tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
530 spin_unlock_bh(&tx_sa->lock);
531 }
532
533+ was_active = tx_sa->active;
534 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
535 tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
536
537+ was_operational = secy->operational;
538 if (assoc_num == tx_sc->encoding_sa)
539 secy->operational = tx_sa->active;
540
541+ /* If h/w offloading is available, propagate to the device */
542+ if (macsec_is_offloaded(netdev_priv(dev))) {
543+ const struct macsec_ops *ops;
544+ struct macsec_context ctx;
545+
546+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
547+ if (!ops) {
548+ ret = -EOPNOTSUPP;
549+ goto cleanup;
550+ }
551+
552+ ctx.sa.assoc_num = assoc_num;
553+ ctx.sa.tx_sa = tx_sa;
554+
555+ ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
556+ if (ret)
557+ goto cleanup;
558+ }
559+
560 rtnl_unlock();
561
562 return 0;
563+
564+cleanup:
565+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
566+ spin_lock_bh(&tx_sa->lock);
567+ tx_sa->next_pn = prev_pn;
568+ spin_unlock_bh(&tx_sa->lock);
569+ }
570+ tx_sa->active = was_active;
571+ secy->operational = was_operational;
572+ rtnl_unlock();
573+ return ret;
574 }
575
576 static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
577@@ -1913,6 +2194,9 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
578 u8 assoc_num;
579 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
580 struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
581+ bool was_active;
582+ u32 prev_pn = 0;
583+ int ret = 0;
584
585 if (!attrs[MACSEC_ATTR_IFINDEX])
586 return -EINVAL;
587@@ -1936,15 +2220,46 @@ static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
588
589 if (tb_sa[MACSEC_SA_ATTR_PN]) {
590 spin_lock_bh(&rx_sa->lock);
591+ prev_pn = rx_sa->next_pn;
592 rx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
593 spin_unlock_bh(&rx_sa->lock);
594 }
595
596+ was_active = rx_sa->active;
597 if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
598 rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
599
600+ /* If h/w offloading is available, propagate to the device */
601+ if (macsec_is_offloaded(netdev_priv(dev))) {
602+ const struct macsec_ops *ops;
603+ struct macsec_context ctx;
604+
605+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
606+ if (!ops) {
607+ ret = -EOPNOTSUPP;
608+ goto cleanup;
609+ }
610+
611+ ctx.sa.assoc_num = assoc_num;
612+ ctx.sa.rx_sa = rx_sa;
613+
614+ ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
615+ if (ret)
616+ goto cleanup;
617+ }
618+
619 rtnl_unlock();
620 return 0;
621+
622+cleanup:
623+ if (tb_sa[MACSEC_SA_ATTR_PN]) {
624+ spin_lock_bh(&rx_sa->lock);
625+ rx_sa->next_pn = prev_pn;
626+ spin_unlock_bh(&rx_sa->lock);
627+ }
628+ rx_sa->active = was_active;
629+ rtnl_unlock();
630+ return ret;
631 }
632
633 static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
634@@ -1954,6 +2269,9 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
635 struct macsec_secy *secy;
636 struct macsec_rx_sc *rx_sc;
637 struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
638+ unsigned int prev_n_rx_sc;
639+ bool was_active;
640+ int ret;
641
642 if (!attrs[MACSEC_ATTR_IFINDEX])
643 return -EINVAL;
644@@ -1971,6 +2289,8 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
645 return PTR_ERR(rx_sc);
646 }
647
648+ was_active = rx_sc->active;
649+ prev_n_rx_sc = secy->n_rx_sc;
650 if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
651 bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
652
653@@ -1980,9 +2300,33 @@ static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
654 rx_sc->active = new;
655 }
656
657+ /* If h/w offloading is available, propagate to the device */
658+ if (macsec_is_offloaded(netdev_priv(dev))) {
659+ const struct macsec_ops *ops;
660+ struct macsec_context ctx;
661+
662+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
663+ if (!ops) {
664+ ret = -EOPNOTSUPP;
665+ goto cleanup;
666+ }
667+
668+ ctx.rx_sc = rx_sc;
669+
670+ ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
671+ if (ret)
672+ goto cleanup;
673+ }
674+
675 rtnl_unlock();
676
677 return 0;
678+
679+cleanup:
680+ secy->n_rx_sc = prev_n_rx_sc;
681+ rx_sc->active = was_active;
682+ rtnl_unlock();
683+ return ret;
684 }
685
686 static int copy_tx_sa_stats(struct sk_buff *skb,
687@@ -2550,6 +2894,11 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
688 struct pcpu_secy_stats *secy_stats;
689 int ret, len;
690
691+ if (macsec_is_offloaded(netdev_priv(dev))) {
692+ skb->dev = macsec->real_dev;
693+ return dev_queue_xmit(skb);
694+ }
695+
696 /* 10.5 */
697 if (!secy->protect_frames) {
698 secy_stats = this_cpu_ptr(macsec->stats);
699@@ -2663,6 +3012,22 @@ static int macsec_dev_open(struct net_device *dev)
700 goto clear_allmulti;
701 }
702
703+ /* If h/w offloading is available, propagate to the device */
704+ if (macsec_is_offloaded(macsec)) {
705+ const struct macsec_ops *ops;
706+ struct macsec_context ctx;
707+
708+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
709+ if (!ops) {
710+ err = -EOPNOTSUPP;
711+ goto clear_allmulti;
712+ }
713+
714+ err = macsec_offload(ops->mdo_dev_open, &ctx);
715+ if (err)
716+ goto clear_allmulti;
717+ }
718+
719 if (netif_carrier_ok(real_dev))
720 netif_carrier_on(dev);
721
722@@ -2683,6 +3048,16 @@ static int macsec_dev_stop(struct net_device *dev)
723
724 netif_carrier_off(dev);
725
726+ /* If h/w offloading is available, propagate to the device */
727+ if (macsec_is_offloaded(macsec)) {
728+ const struct macsec_ops *ops;
729+ struct macsec_context ctx;
730+
731+ ops = macsec_get_ops(macsec, &ctx);
732+ if (ops)
733+ macsec_offload(ops->mdo_dev_stop, &ctx);
734+ }
735+
736 dev_mc_unsync(real_dev, dev);
737 dev_uc_unsync(real_dev, dev);
738
739@@ -2914,6 +3289,11 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
740 struct nlattr *data[],
741 struct netlink_ext_ack *extack)
742 {
743+ struct macsec_dev *macsec = macsec_priv(dev);
744+ struct macsec_tx_sa tx_sc;
745+ struct macsec_secy secy;
746+ int ret;
747+
748 if (!data)
749 return 0;
750
751@@ -2923,7 +3303,41 @@ static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
752 data[IFLA_MACSEC_PORT])
753 return -EINVAL;
754
755- return macsec_changelink_common(dev, data);
756+ /* Keep a copy of unmodified secy and tx_sc, in case the offload
757+ * propagation fails, to revert macsec_changelink_common.
758+ */
759+ memcpy(&secy, &macsec->secy, sizeof(secy));
760+ memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
761+
762+ ret = macsec_changelink_common(dev, data);
763+ if (ret)
764+ return ret;
765+
766+ /* If h/w offloading is available, propagate to the device */
767+ if (macsec_is_offloaded(macsec)) {
768+ const struct macsec_ops *ops;
769+ struct macsec_context ctx;
770+ int ret;
771+
772+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
773+ if (!ops) {
774+ ret = -EOPNOTSUPP;
775+ goto cleanup;
776+ }
777+
778+ ctx.secy = &macsec->secy;
779+ ret = macsec_offload(ops->mdo_upd_secy, &ctx);
780+ if (ret)
781+ goto cleanup;
782+ }
783+
784+ return 0;
785+
786+cleanup:
787+ memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
788+ memcpy(&macsec->secy, &secy, sizeof(secy));
789+
790+ return ret;
791 }
792
793 static void macsec_del_dev(struct macsec_dev *macsec)
794@@ -2966,6 +3380,18 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
795 struct net_device *real_dev = macsec->real_dev;
796 struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
797
798+ /* If h/w offloading is available, propagate to the device */
799+ if (macsec_is_offloaded(macsec)) {
800+ const struct macsec_ops *ops;
801+ struct macsec_context ctx;
802+
803+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
804+ if (ops) {
805+ ctx.secy = &macsec->secy;
806+ macsec_offload(ops->mdo_del_secy, &ctx);
807+ }
808+ }
809+
810 macsec_common_dellink(dev, head);
811
812 if (list_empty(&rxd->secys)) {
813@@ -3077,6 +3503,9 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
814
815 macsec->real_dev = real_dev;
816
817+ /* MACsec offloading is off by default */
818+ macsec->offload = MACSEC_OFFLOAD_OFF;
819+
820 if (data && data[IFLA_MACSEC_ICV_LEN])
821 icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
822 dev->mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
823--
824cgit 1.2.3-1.el7
825