blob: 849d2cdc9494bc3d0ad94a5ddbe50562678c52b7 [file] [log] [blame]
developer7e32f7e2022-05-18 21:10:08 +08001From dcb780fb279514f268826f2e9f4df3bc75610703 Mon Sep 17 00:00:00 2001
2From: Antoine Tenart <antoine.tenart@bootlin.com>
3Date: Mon, 13 Jan 2020 23:31:44 +0100
4Subject: net: macsec: add nla support for changing the offloading selection
5
6MACsec offloading to underlying hardware devices is disabled by default
7(the software implementation is used). This patch adds support for
8changing this setting through the MACsec netlink interface. Many checks
9are done when enabling offloading on a given MACsec interface as there
10are limitations (it must be supported by the hardware, only a single
11interface can be offloaded on a given physical device at a time, rules
12can't be moved for now).
13
14Signed-off-by: Antoine Tenart <antoine.tenart@bootlin.com>
15Signed-off-by: David S. Miller <davem@davemloft.net>
16---
17 drivers/net/macsec.c | 145 ++++++++++++++++++++++++++++++++++++++++-
18 include/uapi/linux/if_macsec.h | 11 ++++
19 2 files changed, 153 insertions(+), 3 deletions(-)
20
21diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
22index 36b0416381bf1..e515919e8687f 100644
23--- a/drivers/net/macsec.c
24+++ b/drivers/net/macsec.c
25@@ -1484,6 +1484,7 @@ static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
26 [MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
27 [MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
28 [MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
29+ [MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
30 };
31
32 static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
33@@ -1501,6 +1502,10 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
34 .len = MACSEC_MAX_KEY_LEN, },
35 };
36
37+static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
38+ [MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
39+};
40+
41 /* Offloads an operation to a device driver */
42 static int macsec_offload(int (* const func)(struct macsec_context *),
43 struct macsec_context *ctx)
44@@ -2329,6 +2334,126 @@ cleanup:
45 return ret;
46 }
47
48+static bool macsec_is_configured(struct macsec_dev *macsec)
49+{
50+ struct macsec_secy *secy = &macsec->secy;
51+ struct macsec_tx_sc *tx_sc = &secy->tx_sc;
52+ int i;
53+
54+ if (secy->n_rx_sc > 0)
55+ return true;
56+
57+ for (i = 0; i < MACSEC_NUM_AN; i++)
58+ if (tx_sc->sa[i])
59+ return true;
60+
61+ return false;
62+}
63+
64+static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
65+{
66+ struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
67+ enum macsec_offload offload, prev_offload;
68+ int (*func)(struct macsec_context *ctx);
69+ struct nlattr **attrs = info->attrs;
70+ struct net_device *dev, *loop_dev;
71+ const struct macsec_ops *ops;
72+ struct macsec_context ctx;
73+ struct macsec_dev *macsec;
74+ struct net *loop_net;
75+ int ret;
76+
77+ if (!attrs[MACSEC_ATTR_IFINDEX])
78+ return -EINVAL;
79+
80+ if (!attrs[MACSEC_ATTR_OFFLOAD])
81+ return -EINVAL;
82+
83+ if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
84+ attrs[MACSEC_ATTR_OFFLOAD],
85+ macsec_genl_offload_policy, NULL))
86+ return -EINVAL;
87+
88+ dev = get_dev_from_nl(genl_info_net(info), attrs);
89+ if (IS_ERR(dev))
90+ return PTR_ERR(dev);
91+ macsec = macsec_priv(dev);
92+
93+ offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
94+ if (macsec->offload == offload)
95+ return 0;
96+
97+ /* Check if the offloading mode is supported by the underlying layers */
98+ if (offload != MACSEC_OFFLOAD_OFF &&
99+ !macsec_check_offload(offload, macsec))
100+ return -EOPNOTSUPP;
101+
102+ if (offload == MACSEC_OFFLOAD_OFF)
103+ goto skip_limitation;
104+
105+ /* Check the physical interface isn't offloading another interface
106+ * first.
107+ */
108+ for_each_net(loop_net) {
109+ for_each_netdev(loop_net, loop_dev) {
110+ struct macsec_dev *priv;
111+
112+ if (!netif_is_macsec(loop_dev))
113+ continue;
114+
115+ priv = macsec_priv(loop_dev);
116+
117+ if (priv->real_dev == macsec->real_dev &&
118+ priv->offload != MACSEC_OFFLOAD_OFF)
119+ return -EBUSY;
120+ }
121+ }
122+
123+skip_limitation:
124+ /* Check if the net device is busy. */
125+ if (netif_running(dev))
126+ return -EBUSY;
127+
128+ rtnl_lock();
129+
130+ prev_offload = macsec->offload;
131+ macsec->offload = offload;
132+
133+ /* Check if the device already has rules configured: we do not support
134+ * rules migration.
135+ */
136+ if (macsec_is_configured(macsec)) {
137+ ret = -EBUSY;
138+ goto rollback;
139+ }
140+
141+ ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
142+ macsec, &ctx);
143+ if (!ops) {
144+ ret = -EOPNOTSUPP;
145+ goto rollback;
146+ }
147+
148+ if (prev_offload == MACSEC_OFFLOAD_OFF)
149+ func = ops->mdo_add_secy;
150+ else
151+ func = ops->mdo_del_secy;
152+
153+ ctx.secy = &macsec->secy;
154+ ret = macsec_offload(func, &ctx);
155+ if (ret)
156+ goto rollback;
157+
158+ rtnl_unlock();
159+ return 0;
160+
161+rollback:
162+ macsec->offload = prev_offload;
163+
164+ rtnl_unlock();
165+ return ret;
166+}
167+
168 static int copy_tx_sa_stats(struct sk_buff *skb,
169 struct macsec_tx_sa_stats __percpu *pstats)
170 {
171@@ -2590,12 +2715,13 @@ static noinline_for_stack int
172 dump_secy(struct macsec_secy *secy, struct net_device *dev,
173 struct sk_buff *skb, struct netlink_callback *cb)
174 {
175- struct macsec_rx_sc *rx_sc;
176+ struct macsec_dev *macsec = netdev_priv(dev);
177 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
178 struct nlattr *txsa_list, *rxsc_list;
179- int i, j;
180- void *hdr;
181+ struct macsec_rx_sc *rx_sc;
182 struct nlattr *attr;
183+ void *hdr;
184+ int i, j;
185
186 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
187 &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
188@@ -2607,6 +2733,13 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
189 if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
190 goto nla_put_failure;
191
192+ attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
193+ if (!attr)
194+ goto nla_put_failure;
195+ if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
196+ goto nla_put_failure;
197+ nla_nest_end(skb, attr);
198+
199 if (nla_put_secy(secy, skb))
200 goto nla_put_failure;
201
202@@ -2872,6 +3005,12 @@ static const struct genl_ops macsec_genl_ops[] = {
203 .doit = macsec_upd_rxsa,
204 .flags = GENL_ADMIN_PERM,
205 },
206+ {
207+ .cmd = MACSEC_CMD_UPD_OFFLOAD,
208+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
209+ .doit = macsec_upd_offload,
210+ .flags = GENL_ADMIN_PERM,
211+ },
212 };
213
214 static struct genl_family macsec_fam __ro_after_init = {
215diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
216index 98e4d5d7c45ca..1d63c43c38cca 100644
217--- a/include/uapi/linux/if_macsec.h
218+++ b/include/uapi/linux/if_macsec.h
219@@ -45,6 +45,7 @@ enum macsec_attrs {
220 MACSEC_ATTR_RXSC_LIST, /* dump, nested, macsec_rxsc_attrs for each RXSC */
221 MACSEC_ATTR_TXSC_STATS, /* dump, nested, macsec_txsc_stats_attr */
222 MACSEC_ATTR_SECY_STATS, /* dump, nested, macsec_secy_stats_attr */
223+ MACSEC_ATTR_OFFLOAD, /* config, nested, macsec_offload_attrs */
224 __MACSEC_ATTR_END,
225 NUM_MACSEC_ATTR = __MACSEC_ATTR_END,
226 MACSEC_ATTR_MAX = __MACSEC_ATTR_END - 1,
227@@ -97,6 +98,15 @@ enum macsec_sa_attrs {
228 MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
229 };
230
231+enum macsec_offload_attrs {
232+ MACSEC_OFFLOAD_ATTR_UNSPEC,
233+ MACSEC_OFFLOAD_ATTR_TYPE, /* config/dump, u8 0..2 */
234+ MACSEC_OFFLOAD_ATTR_PAD,
235+ __MACSEC_OFFLOAD_ATTR_END,
236+ NUM_MACSEC_OFFLOAD_ATTR = __MACSEC_OFFLOAD_ATTR_END,
237+ MACSEC_OFFLOAD_ATTR_MAX = __MACSEC_OFFLOAD_ATTR_END - 1,
238+};
239+
240 enum macsec_nl_commands {
241 MACSEC_CMD_GET_TXSC,
242 MACSEC_CMD_ADD_RXSC,
243@@ -108,6 +118,7 @@ enum macsec_nl_commands {
244 MACSEC_CMD_ADD_RXSA,
245 MACSEC_CMD_DEL_RXSA,
246 MACSEC_CMD_UPD_RXSA,
247+ MACSEC_CMD_UPD_OFFLOAD,
248 };
249
250 /* u64 per-RXSC stats */
251--
252cgit 1.2.3-1.el7
253