blob: 5848eebc210e39175388fd4b6d0b2e9a36d9f1ab [file] [log] [blame]
developer7e32f7e2022-05-18 21:10:08 +08001From b62c3624500a7e1cc081e75973299c1f7901a438 Mon Sep 17 00:00:00 2001
2From: Dmitry Bogdanov <dbogdanov@marvell.com>
3Date: Wed, 25 Mar 2020 15:52:37 +0300
4Subject: net: macsec: add support for getting offloaded stats
5
6When HW offloading is enabled, offloaded stats should be used, because
7s/w stats are wrong and out of sync with the HW in this case.
8
9Signed-off-by: Dmitry Bogdanov <dbogdanov@marvell.com>
10Signed-off-by: Mark Starovoytov <mstarovoitov@marvell.com>
11Signed-off-by: Igor Russkikh <irusskikh@marvell.com>
12Signed-off-by: David S. Miller <davem@davemloft.net>
13---
14 drivers/net/macsec.c | 321 ++++++++++++++++++++++++++++++++++-----------------
15 include/net/macsec.h | 24 ++++
16 2 files changed, 237 insertions(+), 108 deletions(-)
17
18diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
19index c7ad7c6f1d1ec..b00a078d13ffe 100644
20--- a/drivers/net/macsec.c
21+++ b/drivers/net/macsec.c
22@@ -88,17 +88,6 @@ struct gcm_iv {
23 __be32 pn;
24 };
25
26-struct macsec_dev_stats {
27- __u64 OutPktsUntagged;
28- __u64 InPktsUntagged;
29- __u64 OutPktsTooLong;
30- __u64 InPktsNoTag;
31- __u64 InPktsBadTag;
32- __u64 InPktsUnknownSCI;
33- __u64 InPktsNoSCI;
34- __u64 InPktsOverrun;
35-};
36-
37 #define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
38
39 struct pcpu_secy_stats {
40@@ -2653,207 +2642,309 @@ rollback:
41 return ret;
42 }
43
44-static int copy_tx_sa_stats(struct sk_buff *skb,
45- struct macsec_tx_sa_stats __percpu *pstats)
46+static void get_tx_sa_stats(struct net_device *dev, int an,
47+ struct macsec_tx_sa *tx_sa,
48+ struct macsec_tx_sa_stats *sum)
49 {
50- struct macsec_tx_sa_stats sum = {0, };
51+ struct macsec_dev *macsec = macsec_priv(dev);
52 int cpu;
53
54+ /* If h/w offloading is available, propagate to the device */
55+ if (macsec_is_offloaded(macsec)) {
56+ const struct macsec_ops *ops;
57+ struct macsec_context ctx;
58+
59+ ops = macsec_get_ops(macsec, &ctx);
60+ if (ops) {
61+ ctx.sa.assoc_num = an;
62+ ctx.sa.tx_sa = tx_sa;
63+ ctx.stats.tx_sa_stats = sum;
64+ ctx.secy = &macsec_priv(dev)->secy;
65+ macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
66+ }
67+ return;
68+ }
69+
70 for_each_possible_cpu(cpu) {
71- const struct macsec_tx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
72+ const struct macsec_tx_sa_stats *stats =
73+ per_cpu_ptr(tx_sa->stats, cpu);
74
75- sum.OutPktsProtected += stats->OutPktsProtected;
76- sum.OutPktsEncrypted += stats->OutPktsEncrypted;
77+ sum->OutPktsProtected += stats->OutPktsProtected;
78+ sum->OutPktsEncrypted += stats->OutPktsEncrypted;
79 }
80+}
81
82- if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
83- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted))
84+static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
85+{
86+ if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
87+ sum->OutPktsProtected) ||
88+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
89+ sum->OutPktsEncrypted))
90 return -EMSGSIZE;
91
92 return 0;
93 }
94
95-static noinline_for_stack int
96-copy_rx_sa_stats(struct sk_buff *skb,
97- struct macsec_rx_sa_stats __percpu *pstats)
98+static void get_rx_sa_stats(struct net_device *dev,
99+ struct macsec_rx_sc *rx_sc, int an,
100+ struct macsec_rx_sa *rx_sa,
101+ struct macsec_rx_sa_stats *sum)
102 {
103- struct macsec_rx_sa_stats sum = {0, };
104+ struct macsec_dev *macsec = macsec_priv(dev);
105 int cpu;
106
107+ /* If h/w offloading is available, propagate to the device */
108+ if (macsec_is_offloaded(macsec)) {
109+ const struct macsec_ops *ops;
110+ struct macsec_context ctx;
111+
112+ ops = macsec_get_ops(macsec, &ctx);
113+ if (ops) {
114+ ctx.sa.assoc_num = an;
115+ ctx.sa.rx_sa = rx_sa;
116+ ctx.stats.rx_sa_stats = sum;
117+ ctx.secy = &macsec_priv(dev)->secy;
118+ ctx.rx_sc = rx_sc;
119+ macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
120+ }
121+ return;
122+ }
123+
124 for_each_possible_cpu(cpu) {
125- const struct macsec_rx_sa_stats *stats = per_cpu_ptr(pstats, cpu);
126+ const struct macsec_rx_sa_stats *stats =
127+ per_cpu_ptr(rx_sa->stats, cpu);
128
129- sum.InPktsOK += stats->InPktsOK;
130- sum.InPktsInvalid += stats->InPktsInvalid;
131- sum.InPktsNotValid += stats->InPktsNotValid;
132- sum.InPktsNotUsingSA += stats->InPktsNotUsingSA;
133- sum.InPktsUnusedSA += stats->InPktsUnusedSA;
134+ sum->InPktsOK += stats->InPktsOK;
135+ sum->InPktsInvalid += stats->InPktsInvalid;
136+ sum->InPktsNotValid += stats->InPktsNotValid;
137+ sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
138+ sum->InPktsUnusedSA += stats->InPktsUnusedSA;
139 }
140+}
141
142- if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
143- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
144- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
145- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
146- nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
147+static int copy_rx_sa_stats(struct sk_buff *skb,
148+ struct macsec_rx_sa_stats *sum)
149+{
150+ if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
151+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
152+ sum->InPktsInvalid) ||
153+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
154+ sum->InPktsNotValid) ||
155+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
156+ sum->InPktsNotUsingSA) ||
157+ nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
158+ sum->InPktsUnusedSA))
159 return -EMSGSIZE;
160
161 return 0;
162 }
163
164-static noinline_for_stack int
165-copy_rx_sc_stats(struct sk_buff *skb, struct pcpu_rx_sc_stats __percpu *pstats)
166+static void get_rx_sc_stats(struct net_device *dev,
167+ struct macsec_rx_sc *rx_sc,
168+ struct macsec_rx_sc_stats *sum)
169 {
170- struct macsec_rx_sc_stats sum = {0, };
171+ struct macsec_dev *macsec = macsec_priv(dev);
172 int cpu;
173
174+ /* If h/w offloading is available, propagate to the device */
175+ if (macsec_is_offloaded(macsec)) {
176+ const struct macsec_ops *ops;
177+ struct macsec_context ctx;
178+
179+ ops = macsec_get_ops(macsec, &ctx);
180+ if (ops) {
181+ ctx.stats.rx_sc_stats = sum;
182+ ctx.secy = &macsec_priv(dev)->secy;
183+ ctx.rx_sc = rx_sc;
184+ macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
185+ }
186+ return;
187+ }
188+
189 for_each_possible_cpu(cpu) {
190 const struct pcpu_rx_sc_stats *stats;
191 struct macsec_rx_sc_stats tmp;
192 unsigned int start;
193
194- stats = per_cpu_ptr(pstats, cpu);
195+ stats = per_cpu_ptr(rx_sc->stats, cpu);
196 do {
197 start = u64_stats_fetch_begin_irq(&stats->syncp);
198 memcpy(&tmp, &stats->stats, sizeof(tmp));
199 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
200
201- sum.InOctetsValidated += tmp.InOctetsValidated;
202- sum.InOctetsDecrypted += tmp.InOctetsDecrypted;
203- sum.InPktsUnchecked += tmp.InPktsUnchecked;
204- sum.InPktsDelayed += tmp.InPktsDelayed;
205- sum.InPktsOK += tmp.InPktsOK;
206- sum.InPktsInvalid += tmp.InPktsInvalid;
207- sum.InPktsLate += tmp.InPktsLate;
208- sum.InPktsNotValid += tmp.InPktsNotValid;
209- sum.InPktsNotUsingSA += tmp.InPktsNotUsingSA;
210- sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
211+ sum->InOctetsValidated += tmp.InOctetsValidated;
212+ sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
213+ sum->InPktsUnchecked += tmp.InPktsUnchecked;
214+ sum->InPktsDelayed += tmp.InPktsDelayed;
215+ sum->InPktsOK += tmp.InPktsOK;
216+ sum->InPktsInvalid += tmp.InPktsInvalid;
217+ sum->InPktsLate += tmp.InPktsLate;
218+ sum->InPktsNotValid += tmp.InPktsNotValid;
219+ sum->InPktsNotUsingSA += tmp.InPktsNotUsingSA;
220+ sum->InPktsUnusedSA += tmp.InPktsUnusedSA;
221 }
222+}
223
224+static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
225+{
226 if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
227- sum.InOctetsValidated,
228+ sum->InOctetsValidated,
229 MACSEC_RXSC_STATS_ATTR_PAD) ||
230 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
231- sum.InOctetsDecrypted,
232+ sum->InOctetsDecrypted,
233 MACSEC_RXSC_STATS_ATTR_PAD) ||
234 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
235- sum.InPktsUnchecked,
236+ sum->InPktsUnchecked,
237 MACSEC_RXSC_STATS_ATTR_PAD) ||
238 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
239- sum.InPktsDelayed,
240+ sum->InPktsDelayed,
241 MACSEC_RXSC_STATS_ATTR_PAD) ||
242 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
243- sum.InPktsOK,
244+ sum->InPktsOK,
245 MACSEC_RXSC_STATS_ATTR_PAD) ||
246 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
247- sum.InPktsInvalid,
248+ sum->InPktsInvalid,
249 MACSEC_RXSC_STATS_ATTR_PAD) ||
250 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
251- sum.InPktsLate,
252+ sum->InPktsLate,
253 MACSEC_RXSC_STATS_ATTR_PAD) ||
254 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
255- sum.InPktsNotValid,
256+ sum->InPktsNotValid,
257 MACSEC_RXSC_STATS_ATTR_PAD) ||
258 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
259- sum.InPktsNotUsingSA,
260+ sum->InPktsNotUsingSA,
261 MACSEC_RXSC_STATS_ATTR_PAD) ||
262 nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
263- sum.InPktsUnusedSA,
264+ sum->InPktsUnusedSA,
265 MACSEC_RXSC_STATS_ATTR_PAD))
266 return -EMSGSIZE;
267
268 return 0;
269 }
270
271-static noinline_for_stack int
272-copy_tx_sc_stats(struct sk_buff *skb, struct pcpu_tx_sc_stats __percpu *pstats)
273+static void get_tx_sc_stats(struct net_device *dev,
274+ struct macsec_tx_sc_stats *sum)
275 {
276- struct macsec_tx_sc_stats sum = {0, };
277+ struct macsec_dev *macsec = macsec_priv(dev);
278 int cpu;
279
280+ /* If h/w offloading is available, propagate to the device */
281+ if (macsec_is_offloaded(macsec)) {
282+ const struct macsec_ops *ops;
283+ struct macsec_context ctx;
284+
285+ ops = macsec_get_ops(macsec, &ctx);
286+ if (ops) {
287+ ctx.stats.tx_sc_stats = sum;
288+ ctx.secy = &macsec_priv(dev)->secy;
289+ macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
290+ }
291+ return;
292+ }
293+
294 for_each_possible_cpu(cpu) {
295 const struct pcpu_tx_sc_stats *stats;
296 struct macsec_tx_sc_stats tmp;
297 unsigned int start;
298
299- stats = per_cpu_ptr(pstats, cpu);
300+ stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
301 do {
302 start = u64_stats_fetch_begin_irq(&stats->syncp);
303 memcpy(&tmp, &stats->stats, sizeof(tmp));
304 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
305
306- sum.OutPktsProtected += tmp.OutPktsProtected;
307- sum.OutPktsEncrypted += tmp.OutPktsEncrypted;
308- sum.OutOctetsProtected += tmp.OutOctetsProtected;
309- sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
310+ sum->OutPktsProtected += tmp.OutPktsProtected;
311+ sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
312+ sum->OutOctetsProtected += tmp.OutOctetsProtected;
313+ sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
314 }
315+}
316
317+static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
318+{
319 if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
320- sum.OutPktsProtected,
321+ sum->OutPktsProtected,
322 MACSEC_TXSC_STATS_ATTR_PAD) ||
323 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
324- sum.OutPktsEncrypted,
325+ sum->OutPktsEncrypted,
326 MACSEC_TXSC_STATS_ATTR_PAD) ||
327 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
328- sum.OutOctetsProtected,
329+ sum->OutOctetsProtected,
330 MACSEC_TXSC_STATS_ATTR_PAD) ||
331 nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
332- sum.OutOctetsEncrypted,
333+ sum->OutOctetsEncrypted,
334 MACSEC_TXSC_STATS_ATTR_PAD))
335 return -EMSGSIZE;
336
337 return 0;
338 }
339
340-static noinline_for_stack int
341-copy_secy_stats(struct sk_buff *skb, struct pcpu_secy_stats __percpu *pstats)
342+static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
343 {
344- struct macsec_dev_stats sum = {0, };
345+ struct macsec_dev *macsec = macsec_priv(dev);
346 int cpu;
347
348+ /* If h/w offloading is available, propagate to the device */
349+ if (macsec_is_offloaded(macsec)) {
350+ const struct macsec_ops *ops;
351+ struct macsec_context ctx;
352+
353+ ops = macsec_get_ops(macsec, &ctx);
354+ if (ops) {
355+ ctx.stats.dev_stats = sum;
356+ ctx.secy = &macsec_priv(dev)->secy;
357+ macsec_offload(ops->mdo_get_dev_stats, &ctx);
358+ }
359+ return;
360+ }
361+
362 for_each_possible_cpu(cpu) {
363 const struct pcpu_secy_stats *stats;
364 struct macsec_dev_stats tmp;
365 unsigned int start;
366
367- stats = per_cpu_ptr(pstats, cpu);
368+ stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
369 do {
370 start = u64_stats_fetch_begin_irq(&stats->syncp);
371 memcpy(&tmp, &stats->stats, sizeof(tmp));
372 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
373
374- sum.OutPktsUntagged += tmp.OutPktsUntagged;
375- sum.InPktsUntagged += tmp.InPktsUntagged;
376- sum.OutPktsTooLong += tmp.OutPktsTooLong;
377- sum.InPktsNoTag += tmp.InPktsNoTag;
378- sum.InPktsBadTag += tmp.InPktsBadTag;
379- sum.InPktsUnknownSCI += tmp.InPktsUnknownSCI;
380- sum.InPktsNoSCI += tmp.InPktsNoSCI;
381- sum.InPktsOverrun += tmp.InPktsOverrun;
382+ sum->OutPktsUntagged += tmp.OutPktsUntagged;
383+ sum->InPktsUntagged += tmp.InPktsUntagged;
384+ sum->OutPktsTooLong += tmp.OutPktsTooLong;
385+ sum->InPktsNoTag += tmp.InPktsNoTag;
386+ sum->InPktsBadTag += tmp.InPktsBadTag;
387+ sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
388+ sum->InPktsNoSCI += tmp.InPktsNoSCI;
389+ sum->InPktsOverrun += tmp.InPktsOverrun;
390 }
391+}
392
393+static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
394+{
395 if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
396- sum.OutPktsUntagged,
397+ sum->OutPktsUntagged,
398 MACSEC_SECY_STATS_ATTR_PAD) ||
399 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
400- sum.InPktsUntagged,
401+ sum->InPktsUntagged,
402 MACSEC_SECY_STATS_ATTR_PAD) ||
403 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
404- sum.OutPktsTooLong,
405+ sum->OutPktsTooLong,
406 MACSEC_SECY_STATS_ATTR_PAD) ||
407 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
408- sum.InPktsNoTag,
409+ sum->InPktsNoTag,
410 MACSEC_SECY_STATS_ATTR_PAD) ||
411 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
412- sum.InPktsBadTag,
413+ sum->InPktsBadTag,
414 MACSEC_SECY_STATS_ATTR_PAD) ||
415 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
416- sum.InPktsUnknownSCI,
417+ sum->InPktsUnknownSCI,
418 MACSEC_SECY_STATS_ATTR_PAD) ||
419 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
420- sum.InPktsNoSCI,
421+ sum->InPktsNoSCI,
422 MACSEC_SECY_STATS_ATTR_PAD) ||
423 nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
424- sum.InPktsOverrun,
425+ sum->InPktsOverrun,
426 MACSEC_SECY_STATS_ATTR_PAD))
427 return -EMSGSIZE;
428
429@@ -2914,7 +3005,12 @@ static noinline_for_stack int
430 dump_secy(struct macsec_secy *secy, struct net_device *dev,
431 struct sk_buff *skb, struct netlink_callback *cb)
432 {
433+ struct macsec_tx_sc_stats tx_sc_stats = {0, };
434+ struct macsec_tx_sa_stats tx_sa_stats = {0, };
435+ struct macsec_rx_sc_stats rx_sc_stats = {0, };
436+ struct macsec_rx_sa_stats rx_sa_stats = {0, };
437 struct macsec_dev *macsec = netdev_priv(dev);
438+ struct macsec_dev_stats dev_stats = {0, };
439 struct macsec_tx_sc *tx_sc = &secy->tx_sc;
440 struct nlattr *txsa_list, *rxsc_list;
441 struct macsec_rx_sc *rx_sc;
442@@ -2945,7 +3041,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
443 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
444 if (!attr)
445 goto nla_put_failure;
446- if (copy_tx_sc_stats(skb, tx_sc->stats)) {
447+
448+ get_tx_sc_stats(dev, &tx_sc_stats);
449+ if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
450 nla_nest_cancel(skb, attr);
451 goto nla_put_failure;
452 }
453@@ -2954,7 +3052,8 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
454 attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
455 if (!attr)
456 goto nla_put_failure;
457- if (copy_secy_stats(skb, macsec_priv(dev)->stats)) {
458+ get_secy_stats(dev, &dev_stats);
459+ if (copy_secy_stats(skb, &dev_stats)) {
460 nla_nest_cancel(skb, attr);
461 goto nla_put_failure;
462 }
463@@ -2978,6 +3077,22 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
464 goto nla_put_failure;
465 }
466
467+ attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
468+ if (!attr) {
469+ nla_nest_cancel(skb, txsa_nest);
470+ nla_nest_cancel(skb, txsa_list);
471+ goto nla_put_failure;
472+ }
473+ memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
474+ get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
475+ if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
476+ nla_nest_cancel(skb, attr);
477+ nla_nest_cancel(skb, txsa_nest);
478+ nla_nest_cancel(skb, txsa_list);
479+ goto nla_put_failure;
480+ }
481+ nla_nest_end(skb, attr);
482+
483 if (secy->xpn) {
484 pn = tx_sa->next_pn;
485 pn_len = MACSEC_XPN_PN_LEN;
486@@ -2996,20 +3111,6 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
487 goto nla_put_failure;
488 }
489
490- attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
491- if (!attr) {
492- nla_nest_cancel(skb, txsa_nest);
493- nla_nest_cancel(skb, txsa_list);
494- goto nla_put_failure;
495- }
496- if (copy_tx_sa_stats(skb, tx_sa->stats)) {
497- nla_nest_cancel(skb, attr);
498- nla_nest_cancel(skb, txsa_nest);
499- nla_nest_cancel(skb, txsa_list);
500- goto nla_put_failure;
501- }
502- nla_nest_end(skb, attr);
503-
504 nla_nest_end(skb, txsa_nest);
505 }
506 nla_nest_end(skb, txsa_list);
507@@ -3043,7 +3144,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
508 nla_nest_cancel(skb, rxsc_list);
509 goto nla_put_failure;
510 }
511- if (copy_rx_sc_stats(skb, rx_sc->stats)) {
512+ memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
513+ get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
514+ if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
515 nla_nest_cancel(skb, attr);
516 nla_nest_cancel(skb, rxsc_nest);
517 nla_nest_cancel(skb, rxsc_list);
518@@ -3084,7 +3187,9 @@ dump_secy(struct macsec_secy *secy, struct net_device *dev,
519 nla_nest_cancel(skb, rxsc_list);
520 goto nla_put_failure;
521 }
522- if (copy_rx_sa_stats(skb, rx_sa->stats)) {
523+ memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
524+ get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
525+ if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
526 nla_nest_cancel(skb, attr);
527 nla_nest_cancel(skb, rxsa_list);
528 nla_nest_cancel(skb, rxsc_nest);
529diff --git a/include/net/macsec.h b/include/net/macsec.h
530index 71de2c863df70..52874cdfe2260 100644
531--- a/include/net/macsec.h
532+++ b/include/net/macsec.h
533@@ -88,6 +88,17 @@ struct macsec_tx_sc_stats {
534 __u64 OutOctetsEncrypted;
535 };
536
537+struct macsec_dev_stats {
538+ __u64 OutPktsUntagged;
539+ __u64 InPktsUntagged;
540+ __u64 OutPktsTooLong;
541+ __u64 InPktsNoTag;
542+ __u64 InPktsBadTag;
543+ __u64 InPktsUnknownSCI;
544+ __u64 InPktsNoSCI;
545+ __u64 InPktsOverrun;
546+};
547+
548 /**
549 * struct macsec_rx_sa - receive secure association
550 * @active:
551@@ -236,6 +247,13 @@ struct macsec_context {
552 struct macsec_tx_sa *tx_sa;
553 };
554 } sa;
555+ union {
556+ struct macsec_tx_sc_stats *tx_sc_stats;
557+ struct macsec_tx_sa_stats *tx_sa_stats;
558+ struct macsec_rx_sc_stats *rx_sc_stats;
559+ struct macsec_rx_sa_stats *rx_sa_stats;
560+ struct macsec_dev_stats *dev_stats;
561+ } stats;
562
563 u8 prepare:1;
564 };
565@@ -262,6 +280,12 @@ struct macsec_ops {
566 int (*mdo_add_txsa)(struct macsec_context *ctx);
567 int (*mdo_upd_txsa)(struct macsec_context *ctx);
568 int (*mdo_del_txsa)(struct macsec_context *ctx);
569+ /* Statistics */
570+ int (*mdo_get_dev_stats)(struct macsec_context *ctx);
571+ int (*mdo_get_tx_sc_stats)(struct macsec_context *ctx);
572+ int (*mdo_get_tx_sa_stats)(struct macsec_context *ctx);
573+ int (*mdo_get_rx_sc_stats)(struct macsec_context *ctx);
574+ int (*mdo_get_rx_sa_stats)(struct macsec_context *ctx);
575 };
576
577 void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
578--
579cgit 1.2.3-1.el7
580