blob: 1a5c255aed68948b55988f867a4890d760610ac7 [file] [log] [blame]
developerbc6a6a32022-07-19 11:03:07 +08001diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
2index 4f53794..dc5d050 100644
3--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
4+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
5@@ -3944,7 +3944,8 @@ static int mtk_probe(struct platform_device *pdev)
6 for (i = 0; i < eth->ppe_num; i++) {
7 eth->ppe[i] = mtk_ppe_init(eth,
8 eth->base + MTK_ETH_PPE_BASE + i * 0x400,
9- 2, eth->soc->hash_way, i);
10+ 2, eth->soc->hash_way, i,
11+ eth->soc->has_accounting);
12 if (!eth->ppe[i]) {
13 err = -ENOMEM;
14 goto err_free_dev;
15@@ -4057,6 +4058,7 @@ static const struct mtk_soc_data mt2701_data = {
16 .required_clks = MT7623_CLKS_BITMAP,
17 .required_pctl = true,
18 .has_sram = false,
19+ .has_accounting = false,
20 .hash_way = 2,
21 .offload_version = 2,
22 .txrx = {
23@@ -4073,6 +4075,7 @@ static const struct mtk_soc_data mt7621_data = {
24 .required_clks = MT7621_CLKS_BITMAP,
25 .required_pctl = false,
26 .has_sram = false,
27+ .has_accounting = false,
28 .hash_way = 2,
29 .offload_version = 2,
30 .txrx = {
31@@ -4090,6 +4093,7 @@ static const struct mtk_soc_data mt7622_data = {
32 .required_clks = MT7622_CLKS_BITMAP,
33 .required_pctl = false,
34 .has_sram = false,
35+ .has_accounting = true,
36 .hash_way = 2,
37 .offload_version = 2,
38 .txrx = {
39@@ -4106,6 +4110,7 @@ static const struct mtk_soc_data mt7623_data = {
40 .required_clks = MT7623_CLKS_BITMAP,
41 .required_pctl = true,
42 .has_sram = false,
43+ .has_accounting = false,
44 .hash_way = 2,
45 .offload_version = 2,
46 .txrx = {
47@@ -4123,6 +4128,7 @@ static const struct mtk_soc_data mt7629_data = {
48 .required_clks = MT7629_CLKS_BITMAP,
49 .required_pctl = false,
50 .has_sram = false,
51+ .has_accounting = true,
52 .txrx = {
53 .txd_size = sizeof(struct mtk_tx_dma),
54 .rxd_size = sizeof(struct mtk_rx_dma),
55@@ -4138,6 +4144,7 @@ static const struct mtk_soc_data mt7986_data = {
56 .required_clks = MT7986_CLKS_BITMAP,
57 .required_pctl = false,
58 .has_sram = true,
59+ .has_accounting = true,
60 .hash_way = 4,
61 .offload_version = 2,
62 .txrx = {
63@@ -4155,6 +4162,7 @@ static const struct mtk_soc_data mt7981_data = {
64 .required_clks = MT7981_CLKS_BITMAP,
65 .required_pctl = false,
66 .has_sram = true,
67+ .has_accounting = true,
68 .hash_way = 4,
69 .offload_version = 2,
70 .txrx = {
71@@ -4171,6 +4179,7 @@ static const struct mtk_soc_data rt5350_data = {
72 .required_clks = MT7628_CLKS_BITMAP,
73 .required_pctl = false,
74 .has_sram = false,
75+ .has_accounting = false,
76 .txrx = {
77 .txd_size = sizeof(struct mtk_tx_dma),
78 .rxd_size = sizeof(struct mtk_rx_dma),
79diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
80index f659633..5e16fa8 100644
81--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
82+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
83@@ -1213,6 +1213,7 @@ struct mtk_soc_data {
84 u8 offload_version;
85 netdev_features_t hw_features;
86 bool has_sram;
87+ bool has_accounting;
88 struct {
89 u32 txd_size;
90 u32 rxd_size;
91diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
92index 918aa22..8c036cd 100755
93--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
94+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
95@@ -74,6 +74,46 @@ static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
96 return ret;
97 }
98
99+static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
100+{
101+ int ret;
102+ u32 val;
103+
104+ ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
105+ !(val & MTK_PPE_MIB_SER_CR_ST),
106+ 20, MTK_PPE_WAIT_TIMEOUT_US);
107+
108+ if (ret)
109+ dev_err(ppe->dev, "MIB table busy");
110+
111+ return ret;
112+}
113+
114+int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
115+{
116+ u32 val, cnt_r0, cnt_r1, cnt_r2;
117+ u32 byte_cnt_low, byte_cnt_high, pkt_cnt_low, pkt_cnt_high;
118+
119+ val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
120+ ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
121+
122+ if (mtk_ppe_mib_wait_busy(ppe))
123+ return -ETIMEDOUT;
124+
125+ cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
126+ cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
127+ cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
128+
129+ byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
130+ byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
131+ pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
132+ pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
133+ *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
134+ *packets = (pkt_cnt_high << 16) | pkt_cnt_low;
135+
136+ return 0;
137+}
138+
139 static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
140 {
141 ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
142@@ -412,6 +452,14 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
143 MTK_FOE_STATE_INVALID);
144 dma_wmb();
145+
146+ if (ppe->accounting) {
147+ struct mtk_foe_accounting *acct;
148+
149+ acct = ppe->acct_table + entry->hash * sizeof(*acct);
150+ acct->packets = 0;
151+ acct->bytes = 0;
152+ }
153 }
154 entry->hash = 0xffff;
155
156 if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
157@@ -513,6 +560,16 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
158 wmb();
159 hwe->ib1 = entry->ib1;
160
161+ if (ppe->accounting) {
162+ int type;
163+
164+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
165+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
166+ hwe->ipv6.ib2 |= MTK_FOE_IB2_MIB_CNT;
167+ else
168+ hwe->ipv4.ib2 |= MTK_FOE_IB2_MIB_CNT;
169+ }
170+
171 dma_wmb();
172
173 mtk_ppe_cache_clear(ppe);
174@@ -618,8 +675,6 @@ void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
175 }
176
177 if (found || !mtk_flow_entry_match(entry, hwe)) {
178- if (entry->hash != 0xffff)
179- entry->hash = 0xffff;
180 continue;
181 }
182
183@@ -676,12 +731,40 @@ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
184 return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
185 }
186
187-struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version, int way, int id)
188+struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, struct mtk_foe_accounting *diff)
189+{
190+ struct mtk_foe_accounting *acct;
191+ int size = sizeof(struct mtk_foe_accounting);
192+ u64 bytes, packets;
193+
194+ if (!ppe->accounting)
195+ return NULL;
196+
197+ if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
198+ return NULL;
199+
200+ acct = ppe->acct_table + index * size;
201+
202+ acct->bytes += bytes;
203+ acct->packets += packets;
204+
205+ if (diff) {
206+ diff->bytes = bytes;
207+ diff->packets = packets;
208+ }
209+
210+ return acct;
211+}
212+
213+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version, int way, int id,
214+ int accounting)
215 {
216 struct device *dev = eth->dev;
217 struct mtk_foe_entry *foe;
218+ struct mtk_mib_entry *mib;
219 struct mtk_ppe *ppe;
220 struct hlist_head *flow;
221+ struct mtk_foe_accounting *acct;
222
223 ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
224 if (!ppe)
225@@ -698,6 +781,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int versio
226 ppe->version = version;
227 ppe->way = way;
228 ppe->id = id;
229+ ppe->accounting = accounting;
230
231 foe = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*foe),
232 &ppe->foe_phys, GFP_KERNEL);
233@@ -713,6 +797,24 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int versio
234
235 ppe->foe_flow = flow;
236
237+ if (accounting) {
238+ mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
239+ &ppe->mib_phys, GFP_KERNEL);
240+ if (!foe)
241+ return NULL;
242+
243+ memset(mib, 0, MTK_PPE_ENTRIES * sizeof(*mib));
244+
245+ ppe->mib_table = mib;
246+
247+ acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
248+ GFP_KERNEL);
249+ if (!acct)
250+ return NULL;
251+
252+ ppe->acct_table = acct;
253+ }
254+
255 return ppe;
256 }
257
258@@ -811,6 +949,13 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
259 ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
260 ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
261
262+ if (ppe->accounting && ppe->mib_phys) {
263+ ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
264+ ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN, MTK_PPE_MIB_CFG_EN);
265+ ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR, MTK_PPE_MIB_CFG_RD_CLR);
266+ ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN, MTK_PPE_MIB_CFG_RD_CLR);
267+ }
268+
269 return 0;
270 }
271
272diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
273index 3d6928c..8076e5d 100644
274--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
275+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
276@@ -270,6 +270,20 @@ struct mtk_flow_entry {
277 unsigned long cookie;
278 };
279
280+struct mtk_mib_entry {
281+ u32 byt_cnt_l;
282+ u16 byt_cnt_h;
283+ u32 pkt_cnt_l;
284+ u8 pkt_cnt_h;
285+ u8 _rsv0;
286+ u32 _rsv1;
287+} __packed;
288+
289+struct mtk_foe_accounting {
290+ u64 bytes;
291+ u64 packets;
292+};
293+
294 struct mtk_ppe {
295 struct mtk_eth *eth;
296 struct device *dev;
297@@ -277,10 +291,14 @@ struct mtk_ppe {
298 int version;
299 int id;
300 int way;
301+ int accounting;
302
303 struct mtk_foe_entry *foe_table;
304 dma_addr_t foe_phys;
305
306+ struct mtk_mib_entry *mib_table;
307+ dma_addr_t mib_phys;
308+
309 u16 foe_check_time[MTK_PPE_ENTRIES];
310 struct hlist_head *foe_flow;
311
312@@ -289,7 +307,8 @@ struct mtk_ppe {
313 void *acct_table;
314 };
315
316-struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version, int way, int id);
317+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version, int way, int id,
318+ int accounting);
319 int mtk_ppe_start(struct mtk_ppe *ppe);
320 int mtk_ppe_stop(struct mtk_ppe *ppe);
321
322@@ -340,5 +359,6 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
323 int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
324 void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
325 int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
326+struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, struct mtk_foe_accounting *diff);
327
328 #endif
329diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
330index f4ebe59..d713e2e 100644
331--- a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
332+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
333@@ -81,6 +81,7 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, struct mtk_ppe *ppe, bool bind)
334 struct mtk_foe_entry *entry = &ppe->foe_table[i];
335 struct mtk_foe_mac_info *l2;
336 struct mtk_flow_addr_info ai = {};
337+ struct mtk_foe_accounting *acct;
338 unsigned char h_source[ETH_ALEN];
339 unsigned char h_dest[ETH_ALEN];
340 int type, state;
341@@ -94,6 +95,8 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, struct mtk_ppe *ppe, bool bind)
342 if (bind && state != MTK_FOE_STATE_BIND)
343 continue;
344
345+ acct = mtk_foe_entry_get_mib(ppe, i, NULL);
346+
347 type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
348 seq_printf(m, "%05x %s %7s", i,
349 mtk_foe_entry_state_str(state),
350@@ -154,9 +157,12 @@ mtk_ppe_debugfs_foe_show(struct seq_file *m, struct mtk_ppe *ppe, bool bind)
351 *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
352
353 seq_printf(m, " eth=%pM->%pM etype=%04x"
354- " vlan=%d,%d ib1=%08x ib2=%08x\n",
355+ " vlan=%d,%d ib1=%08x ib2=%08x"
356+ " packets=%lld bytes=%lld\n",
357 h_source, h_dest, ntohs(l2->etype),
358- l2->vlan1, l2->vlan2, entry->ib1, ib2);
359+ l2->vlan1, l2->vlan2, entry->ib1, ib2,
360+ acct->packets, acct->bytes
361+ );
362 }
363
364 return 0;
365diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
366index 2f7d76d..f258539 100755
367--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
368+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
369@@ -504,6 +504,7 @@ static int
370 mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
371 {
372 struct mtk_flow_entry *entry;
373+ struct mtk_foe_accounting diff;
374 u32 idle;
375 int i;
376
377@@ -516,6 +517,12 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
378 idle = mtk_foe_entry_idle_time(eth->ppe[i], entry);
379 f->stats.lastused = jiffies - idle * HZ;
380
381+ if (entry->hash != 0xFFFF) {
382+ mtk_foe_entry_get_mib(eth->ppe[i], entry->hash, &diff);
383+ f->stats.pkts += diff.packets;
384+ f->stats.bytes += diff.bytes;
385+ }
386+
387 return 0;
388 }
389
390diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
391index d319f18..9eb7a0d 100644
392--- a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
393+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
394@@ -145,6 +146,20 @@ enum {
395
396 #define MTK_PPE_MIB_TB_BASE 0x338
397
398+#define MTK_PPE_MIB_SER_CR 0x33C
399+#define MTK_PPE_MIB_SER_CR_ST BIT(16)
400+#define MTK_PPE_MIB_SER_CR_ADDR GENMASK(13, 0)
401+
402+#define MTK_PPE_MIB_SER_R0 0x340
403+#define MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW GENMASK(31, 0)
404+
405+#define MTK_PPE_MIB_SER_R1 0x344
406+#define MTK_PPE_MIB_SER_R1_PKT_CNT_LOW GENMASK(31, 16)
407+#define MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH GENMASK(15, 0)
408+
409+#define MTK_PPE_MIB_SER_R2 0x348
410+#define MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH GENMASK(23, 0)
411+
412 #define MTK_PPE_MIB_CACHE_CTL 0x350
413 #define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
414 #define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
415diff --git a/net/netfilter/xt_FLOWOFFLOAD.c b/net/netfilter/xt_FLOWOFFLOAD.c
416index 8547f4a..c175e4d 100644
417--- a/net/netfilter/xt_FLOWOFFLOAD.c
418+++ b/net/netfilter/xt_FLOWOFFLOAD.c
419@@ -700,12 +781,12 @@ static int __init xt_flowoffload_tg_init(void)
420 if (ret)
421 goto cleanup;
422
423- flowtable[1].ft.flags = NF_FLOWTABLE_HW_OFFLOAD;
424+ flowtable[1].ft.flags = NF_FLOWTABLE_HW_OFFLOAD | NF_FLOWTABLE_COUNTER;
425
426 ret = xt_register_target(&offload_tg_reg);
427 if (ret)
428 goto cleanup2;
429
430 return 0;
431
432 cleanup2: