blob: d51f24fcaa105f82f2fa853e46c44c3b24842cee [file] [log] [blame]
From 9e21d6fa97e93cb2ba9b923810666ddaf7a981ee Mon Sep 17 00:00:00 2001
From: Bc-bocun Chen <bc-bocun.chen@mediatek.com>
Date: Mon, 18 Sep 2023 11:11:03 +0800
Subject: [PATCH 13/22] flow-offload-add-mtkhnat-qdma-qos
---
drivers/net/ethernet/mediatek/Makefile | 2 +-
drivers/net/ethernet/mediatek/mtk_eth_soc.c | 10 +
drivers/net/ethernet/mediatek/mtk_eth_soc.h | 48 ++-
drivers/net/ethernet/mediatek/mtk_ppe.c | 48 +-
drivers/net/ethernet/mediatek/mtk_ppe.h | 4 +
.../net/ethernet/mediatek/mtk_ppe_offload.c | 28 +-
.../net/ethernet/mediatek/mtk_qdma_debugfs.c | 439 ++++++++++++++++++
include/net/flow_offload.h | 1 +
net/netfilter/nf_flow_table_offload.c | 4 +-
9 files changed, 593 insertions(+), 6 deletions(-)
create mode 100644 drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index fdbb90f..c7d2296 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_usxgmii.o mtk_eth_path.o mtk_eth_dbg.o mtk_eth_reset.o \
- mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
+ mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o mtk_qdma_debugfs.o
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o
ifdef CONFIG_DEBUG_FS
mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 9d8ce07..0f6613b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -5266,6 +5266,8 @@ static int mtk_probe(struct platform_device *pdev)
}
mtk_ppe_debugfs_init(eth);
+
+ mtk_qdma_debugfs_init(eth);
}
for (i = 0; i < MTK_MAX_DEVS; i++) {
@@ -5371,6 +5373,7 @@ static const struct mtk_soc_data mt2701_data = {
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
+ .qdma_tx_sch = 2,
},
};
@@ -5391,6 +5394,7 @@ static const struct mtk_soc_data mt7621_data = {
.rxd_size = sizeof(struct mtk_rx_dma),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
+ .qdma_tx_sch = 2,
},
};
@@ -5412,6 +5416,7 @@ static const struct mtk_soc_data mt7622_data = {
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
+ .qdma_tx_sch = 2,
},
};
@@ -5432,6 +5437,7 @@ static const struct mtk_soc_data mt7623_data = {
.rx_dma_l4_valid = RX_DMA_L4_VALID,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
+ .qdma_tx_sch = 2,
},
};
@@ -5472,6 +5478,7 @@ static const struct mtk_soc_data mt7986_data = {
.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
+ .qdma_tx_sch = 4,
},
};
@@ -5493,6 +5500,7 @@ static const struct mtk_soc_data mt7981_data = {
.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
+ .qdma_tx_sch = 4,
},
};
@@ -5511,6 +5519,7 @@ static const struct mtk_soc_data mt7988_data = {
.rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = MTK_TX_DMA_BUF_SHIFT_V2,
+ .qdma_tx_sch = 4,
},
};
@@ -5529,6 +5538,7 @@ static const struct mtk_soc_data rt5350_data = {
.rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = MTK_TX_DMA_BUF_SHIFT,
+ .qdma_tx_sch = 4,
},
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 101c233..7ea380e 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -458,6 +471,9 @@
#define FC_THRES_DROP_EN (7 << 16)
#define FC_THRES_MIN 0x4444
+/* QDMA TX Scheduler Rate Control Register */
+#define MTK_QDMA_TX_2SCH_BASE (QDMA_BASE + 0x214)
+
/* QDMA Interrupt Status Register */
#define MTK_QDMA_INT_STATUS (QDMA_BASE + 0x218)
#if defined(CONFIG_MEDIATEK_NETSYS_RX_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
@@ -494,6 +510,11 @@
/* QDMA Interrupt Mask Register */
#define MTK_QDMA_HRED2 (QDMA_BASE + 0x244)
+/* QDMA TX Queue MIB Interface Register */
+#define MTK_QTX_MIB_IF (QDMA_BASE + 0x2bc)
+#define MTK_MIB_ON_QTX_CFG BIT(31)
+#define MTK_VQTX_MIB_EN BIT(28)
+
/* QDMA TX Forward CPU Pointer Register */
#define MTK_QTX_CTX_PTR (QDMA_BASE +0x300)
@@ -521,6 +542,14 @@
/* QDMA FQ Free Page Buffer Length Register */
#define MTK_QDMA_FQ_BLEN (QDMA_BASE +0x32c)
+/* QDMA TX Scheduler Rate Control Register */
+#define MTK_QDMA_TX_4SCH_BASE(x) (QDMA_BASE + 0x398 + (((x) >> 1) * 0x4))
+#define MTK_QDMA_TX_SCH_MASK GENMASK(15, 0)
+#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
+#define MTK_QDMA_TX_SCH_RATE_EN BIT(11)
+#define MTK_QDMA_TX_SCH_RATE_MAN GENMASK(10, 4)
+#define MTK_QDMA_TX_SCH_RATE_EXP GENMASK(3, 0)
+
/* WDMA Registers */
#define MTK_WDMA_CTX_PTR(x) (WDMA_BASE(x) + 0x8)
#define MTK_WDMA_DTX_PTR(x) (WDMA_BASE(x) + 0xC)
@@ -1690,6 +1719,7 @@ struct mtk_soc_data {
u32 rx_dma_l4_valid;
u32 dma_max_len;
u32 dma_len_offset;
+ u32 qdma_tx_sch;
} txrx;
};
@@ -1879,6 +1909,7 @@ struct mtk_eth {
spinlock_t syscfg0_lock;
struct timer_list mtk_dma_monitor_timer;
+ u8 qos_toggle;
u8 ppe_num;
struct mtk_ppe *ppe[MTK_MAX_PPE_NUM];
struct rhashtable flow_table;
@@ -1936,6 +1967,36 @@ extern const struct of_device_id of_mtk_match[];
extern u32 mtk_hwlro_stats_ebl;
extern u32 dbg_show_level;
+static inline void mtk_set_ib1_sp(struct mtk_eth *eth, struct mtk_foe_entry *foe, u32 val)
+{
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ foe->ib1 |= FIELD_PREP(MTK_FOE_IB1_UNBIND_SRC_PORT, val);
+#endif
+}
+
+static inline u32 mtk_get_ib1_sp(struct mtk_eth *eth, struct mtk_foe_entry *foe)
+{
+#if defined(CONFIG_MEDIATEK_NETSYS_V2)
+ return FIELD_GET(MTK_FOE_IB1_UNBIND_SRC_PORT, foe->ib1);
+#else
+ return 0;
+#endif
+}
+
+static inline int
+mtk_ppe_check_pppq_path(struct mtk_eth *eth, struct mtk_foe_entry *foe, int dsa_port)
+{
+ u32 sp = mtk_get_ib1_sp(eth, foe);
+
+ if ((dsa_port >= 0 && dsa_port <= 4) ||
+ (dsa_port == 5 && (sp == PSE_WDMA0_PORT ||
+ sp == PSE_WDMA1_PORT ||
+ sp == PSE_WDMA2_PORT)))
+ return 1;
+
+ return 0;
+}
+
/* read the hardware status register */
void mtk_stats_update_mac(struct mtk_mac *mac);
@@ -1969,4 +2028,6 @@ void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
u32 mtk_rss_indr_table(struct mtk_rss_params *rss_params, int index);
int mtk_ppe_debugfs_init(struct mtk_eth *eth);
+
+int mtk_qdma_debugfs_init(struct mtk_eth *eth);
#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index f55a95c..6d6c1e4 100755
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -128,7 +128,7 @@ static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
enable * MTK_PPE_CACHE_CTL_EN);
}
-static u32 mtk_ppe_hash_entry(struct mtk_ppe *ppe, struct mtk_foe_entry *e)
+u32 mtk_ppe_hash_entry(struct mtk_ppe *ppe, struct mtk_foe_entry *e)
{
u32 hv1, hv2, hv3;
u32 hash;
@@ -420,12 +420,58 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
return 0;
}
+int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid)
+{
+ u32 *ib2 = mtk_foe_entry_ib2(entry);
+
+ *ib2 &= ~MTK_FOE_IB2_QID;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, qid);
+ *ib2 |= MTK_FOE_IB2_PSE_QOS;
+
+ return 0;
+}
static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry)
{
return !(entry->ib1 & MTK_FOE_IB1_STATIC) &&
FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND;
}
+bool mtk_foe_entry_match(struct mtk_foe_entry *entry, struct mtk_foe_entry *data)
+{
+ int type, len;
+
+ if ((data->ib1 ^ entry->ib1) & MTK_FOE_IB1_UDP)
+ return false;
+
+ type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1);
+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+ else
+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
+
+ return !memcmp(&entry->data, &data->data, len - 4);
+}
+
+int mtk_foe_entry_set_sp(struct mtk_ppe *ppe, struct mtk_foe_entry *entry)
+{
+ struct mtk_foe_entry *hwe;
+ u32 hash, sp = 0;
+ int i;
+
+ hash = mtk_ppe_hash_entry(ppe, entry);
+ for (i = 0; i < ppe->way; i++) {
+ hwe = &ppe->foe_table[hash + i];
+ if (mtk_foe_entry_match(hwe, entry)) {
+ sp = mtk_get_ib1_sp(ppe->eth, hwe);
+ break;
+ }
+ }
+
+ mtk_set_ib1_sp(ppe->eth, entry, sp);
+
+ return 0;
+}
+
static bool
mtk_flow_entry_match(struct mtk_flow_entry *entry, struct mtk_foe_entry *data)
{
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 86288b0..5ab864f 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -403,9 +403,13 @@ int mtk_foe_entry_set_vlan(struct mtk_foe_entry *entry, int vid);
int mtk_foe_entry_set_pppoe(struct mtk_foe_entry *entry, int sid);
int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
int bss, int wcid);
+int mtk_foe_entry_set_qid(struct mtk_foe_entry *entry, int qid);
+bool mtk_foe_entry_match(struct mtk_foe_entry *entry, struct mtk_foe_entry *data);
+int mtk_foe_entry_set_sp(struct mtk_ppe *ppe, struct mtk_foe_entry *entry);
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index, struct mtk_foe_accounting *diff);
+u32 mtk_ppe_hash_entry(struct mtk_ppe *ppe, struct mtk_foe_entry *e);
#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index b80f72d..3bc50a4 100755
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -9,6 +9,8 @@
#include <linux/ipv6.h>
#include <net/flow_offload.h>
#include <net/pkt_cls.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_flow_table.h>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
#include "mtk_wed.h"
@@ -183,7 +185,7 @@ mtk_flow_get_dsa_port(struct net_device **dev)
static int
mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
- struct net_device *dev, const u8 *dest_mac,
+ struct net_device *dev, struct nf_conn *ct, const u8 *dest_mac,
int *wed_index)
{
struct mtk_wdma_info info = {};
@@ -209,6 +211,9 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
if (dsa_port >= 0)
mtk_foe_entry_set_dsa(foe, dsa_port);
+ if (eth->qos_toggle == 2 && mtk_ppe_check_pppq_path(eth, foe, dsa_port))
+ mtk_foe_entry_set_qid(foe, dsa_port & MTK_QDMA_TX_MASK);
+
if (dev == eth->netdev[0])
pse_port = PSE_GDM1_PORT;
else if (dev == eth->netdev[1])
@@ -217,6 +222,23 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
return -EOPNOTSUPP;
out:
+ if (eth->qos_toggle == 1 || (ct->mark & MTK_QDMA_TX_MASK) >= 6) {
+ u8 qos_ul_toggle;
+
+ if (eth->qos_toggle == 2)
+ qos_ul_toggle = ((ct->mark >> 16) & MTK_QDMA_TX_MASK) >= 6 ? 1 : 0;
+ else
+ qos_ul_toggle = ((ct->mark >> 16) & MTK_QDMA_TX_MASK) >= 1 ? 1 : 0;
+
+ if (qos_ul_toggle == 1) {
+ if (dev == eth->netdev[1])
+ mtk_foe_entry_set_qid(foe, (ct->mark >> 16) & MTK_QDMA_TX_MASK);
+ else
+ mtk_foe_entry_set_qid(foe, ct->mark & MTK_QDMA_TX_MASK);
+ } else
+ mtk_foe_entry_set_qid(foe, ct->mark & MTK_QDMA_TX_MASK);
+ }
+
mtk_foe_entry_set_pse_port(foe, pse_port);
return 0;
@@ -447,7 +469,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
if (data.pppoe.num == 1)
mtk_foe_entry_set_pppoe(&foe, data.pppoe.sid);
- err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
+ mtk_foe_entry_set_sp(eth->ppe[ppe_index], &foe);
+
+ err = mtk_flow_set_output_device(eth, &foe, odev, f->flow->ct, data.eth.h_dest,
&wed_index);
if (err)
return err;
diff --git a/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c b/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
new file mode 100644
index 0000000..3a7c585
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_qdma_debugfs.c
@@ -0,0 +1,439 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Henry Yen <henry.yen@mediatek.com>
+ * Bo-Cun Chen <bc-bocun.chen@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include "mtk_eth_soc.h"
+
+#define MAX_PPPQ_PORT_NUM 6
+
+static struct mtk_eth *_eth;
+
+static void mtk_qdma_qos_shaper_ebl(struct mtk_eth *eth, u32 id, u32 enable)
+{
+ u32 val;
+
+ if (enable) {
+ val = MTK_QTX_SCH_MIN_RATE_EN | MTK_QTX_SCH_MAX_RATE_EN;
+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 25) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 4);
+
+ writel(val, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
+ } else {
+ writel(0, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
+ }
+}
+
+static void mtk_qdma_qos_disable(struct mtk_eth *eth)
+{
+ u32 id, val;
+
+ for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) {
+ mtk_qdma_qos_shaper_ebl(eth, id, 0);
+
+ writel(FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, 4) |
+ FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, 4),
+ eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
+ }
+
+ val = (MTK_QDMA_TX_SCH_MAX_WFQ) | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
+ for (id = 0; id < eth->soc->txrx.qdma_tx_sch; id += 2) {
+ if (eth->soc->txrx.qdma_tx_sch == 4)
+ writel(val, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
+ else
+ writel(val, eth->base + MTK_QDMA_TX_2SCH_BASE);
+ }
+}
+
+static void mtk_qdma_qos_pppq_enable(struct mtk_eth *eth)
+{
+ u32 id, val;
+
+ for (id = 0; id < MAX_PPPQ_PORT_NUM; id++) {
+ mtk_qdma_qos_shaper_ebl(eth, id, 1);
+
+ writel(FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, 4) |
+ FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, 4),
+ eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
+ }
+
+ val = (MTK_QDMA_TX_SCH_MAX_WFQ) | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
+ for (id = 0; id < eth->soc->txrx.qdma_tx_sch; id+= 2) {
+ if (eth->soc->txrx.qdma_tx_sch == 4)
+ writel(val, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
+ else
+ writel(val, eth->base + MTK_QDMA_TX_2SCH_BASE);
+ }
+}
+
+ static ssize_t mtk_qmda_debugfs_write_qos(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ struct seq_file *m = file->private_data;
+ struct mtk_eth *eth = m->private;
+ char buf[8];
+ int len = count;
+
+ if ((len > 8) || copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ if (buf[0] == '0') {
+ pr_info("HQoS is going to be disabled !\n");
+ eth->qos_toggle = 0;
+ mtk_qdma_qos_disable(eth);
+ } else if (buf[0] == '1') {
+ pr_info("HQoS mode is going to be enabled !\n");
+ eth->qos_toggle = 1;
+ } else if (buf[0] == '2') {
+ pr_info("Per-port-per-queue mode is going to be enabled !\n");
+ pr_info("PPPQ use qid 0~5 (scheduler 0).\n");
+ eth->qos_toggle = 2;
+ mtk_qdma_qos_pppq_enable(eth);
+ }
+
+ return len;
+}
+
+static int mtk_qmda_debugfs_read_qos(struct seq_file *m, void *private)
+{
+ struct mtk_eth *eth = m->private;
+
+ if (eth->qos_toggle == 0)
+ pr_info("HQoS is disabled now!\n");
+ else if (eth->qos_toggle == 1)
+ pr_info("HQoS is enabled now!\n");
+ else if (eth->qos_toggle == 2)
+ pr_info("Per-port-per-queue mode is enabled!\n");
+
+ return 0;
+}
+
+static int mtk_qmda_debugfs_open_qos(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtk_qmda_debugfs_read_qos,
+ inode->i_private);
+}
+
+static ssize_t mtk_qmda_debugfs_read_qos_sched(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mtk_eth *eth = _eth;
+ long id = (long)file->private_data;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ int enable, scheduling, max_rate, exp, scheduler, i;
+ ssize_t ret_cnt;
+ u32 val;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (eth->soc->txrx.qdma_tx_sch == 4)
+ val = readl(eth->base + MTK_QDMA_TX_4SCH_BASE(id));
+ else
+ val = readl(eth->base + MTK_QDMA_TX_2SCH_BASE);
+
+ if (id & 0x1)
+ val >>= 16;
+
+ val &= MTK_QDMA_TX_SCH_MASK;
+ enable = FIELD_GET(MTK_QDMA_TX_SCH_RATE_EN, val);
+ scheduling = FIELD_GET(MTK_QDMA_TX_SCH_MAX_WFQ, val);
+ max_rate = FIELD_GET(MTK_QDMA_TX_SCH_RATE_MAN, val);
+ exp = FIELD_GET(MTK_QDMA_TX_SCH_RATE_EXP, val);
+ while (exp--)
+ max_rate *= 10;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "EN\tScheduling\tMAX\tQueue#\n%d\t%s%16d\t", enable,
+ (scheduling == 1) ? "WRR" : "SP", max_rate);
+
+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, i / MTK_QTX_PER_PAGE);
+ writel(val, eth->base + MTK_QDMA_PAGE);
+
+ val = readl(eth->base + MTK_QTX_SCH(i % MTK_QTX_PER_PAGE));
+ if (eth->soc->txrx.qdma_tx_sch == 4)
+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SEL_V2, val);
+ else
+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SEL, val);
+ if (id == scheduler)
+ len += scnprintf(buf + len, buf_len - len, "%d ", i);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static ssize_t mtk_qmda_debugfs_write_qos_sched(struct file *file, const char __user *buf,
+ size_t length, loff_t *offset)
+{
+ struct mtk_eth *eth = _eth;
+ long id = (long)file->private_data;
+ char line[64] = {0}, scheduling[32];
+ int enable, rate, exp = 0, shift = 0;
+ size_t size;
+ u32 sch, val = 0;
+
+ if (length >= sizeof(line))
+ return -EINVAL;
+
+ if (copy_from_user(line, buf, length))
+ return -EFAULT;
+
+ if (sscanf(line, "%d %s %d", &enable, scheduling, &rate) != 3)
+ return -EFAULT;
+
+ while (rate > 127) {
+ rate /= 10;
+ exp++;
+ }
+
+ line[length] = '\0';
+
+ if (enable)
+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_EN, 1);
+ if (strcmp(scheduling, "sp") != 0)
+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_MAX_WFQ, 1);
+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_MAN, rate);
+ val |= FIELD_PREP(MTK_QDMA_TX_SCH_RATE_EXP, exp);
+
+ if (id & 0x1)
+ shift = 16;
+
+ if (eth->soc->txrx.qdma_tx_sch == 4)
+ sch = readl(eth->base + MTK_QDMA_TX_4SCH_BASE(id));
+ else
+ sch = readl(eth->base + MTK_QDMA_TX_2SCH_BASE);
+
+ sch &= ~(MTK_QDMA_TX_SCH_MASK << shift);
+ sch |= val << shift;
+ if (eth->soc->txrx.qdma_tx_sch == 4)
+ writel(sch, eth->base + MTK_QDMA_TX_4SCH_BASE(id));
+ else
+ writel(sch, eth->base + MTK_QDMA_TX_2SCH_BASE);
+
+ size = strlen(line);
+ *offset += size;
+
+ return length;
+}
+
+static ssize_t mtk_qmda_debugfs_read_qos_queue(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mtk_eth *eth = _eth;
+ long id = (long)file->private_data;
+ char *buf;
+ unsigned int len = 0, buf_len = 1500;
+ int min_rate_en, min_rate, min_rate_exp;
+ int max_rate_en, max_weight, max_rate, max_rate_exp;
+ u32 qtx_sch, qtx_cfg, scheduler, val;
+ ssize_t ret_cnt;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, id / MTK_QTX_PER_PAGE);
+ writel(val, eth->base + MTK_QDMA_PAGE);
+
+ qtx_cfg = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
+ qtx_sch = readl(eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
+ if (eth->soc->txrx.qdma_tx_sch == 4)
+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SEL_V2, qtx_sch);
+ else
+ scheduler = FIELD_GET(MTK_QTX_SCH_TX_SEL, qtx_sch);
+
+ min_rate_en = FIELD_GET(MTK_QTX_SCH_MIN_RATE_EN, qtx_sch);
+ min_rate = FIELD_GET(MTK_QTX_SCH_MIN_RATE_MAN, qtx_sch);
+ min_rate_exp = FIELD_GET(MTK_QTX_SCH_MIN_RATE_EXP, qtx_sch);
+ max_rate_en = FIELD_GET(MTK_QTX_SCH_MAX_RATE_EN, qtx_sch);
+ max_weight = FIELD_GET(MTK_QTX_SCH_MAX_RATE_WEIGHT, qtx_sch);
+ max_rate = FIELD_GET(MTK_QTX_SCH_MAX_RATE_MAN, qtx_sch);
+ max_rate_exp = FIELD_GET(MTK_QTX_SCH_MAX_RATE_EXP, qtx_sch);
+ while (min_rate_exp--)
+ min_rate *= 10;
+
+ while (max_rate_exp--)
+ max_rate *= 10;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "scheduler: %d\nhw resv: %d\nsw resv: %d\n", scheduler,
+ (qtx_cfg >> 8) & 0xff, qtx_cfg & 0xff);
+
+ /* Switch to debug mode */
+ val = readl(eth->base + MTK_QTX_MIB_IF) & ~MTK_MIB_ON_QTX_CFG;
+ val |= MTK_MIB_ON_QTX_CFG;
+ writel(val, eth->base + MTK_QTX_MIB_IF);
+
+ val = readl(eth->base + MTK_QTX_MIB_IF) & ~MTK_VQTX_MIB_EN;
+ val |= MTK_VQTX_MIB_EN;
+ writel(val, eth->base + MTK_QTX_MIB_IF);
+
+ qtx_cfg = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
+ qtx_sch = readl(eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
+
+ len += scnprintf(buf + len, buf_len - len,
+ "packet count: %u\n", qtx_cfg);
+ len += scnprintf(buf + len, buf_len - len,
+ "packet drop: %u\n\n", qtx_sch);
+
+ /* Recover to normal mode */
+ val = readl(eth->base + MTK_QTX_MIB_IF);
+ val &= ~MTK_MIB_ON_QTX_CFG;
+ writel(val, eth->base + MTK_QTX_MIB_IF);
+
+ val = readl(eth->base + MTK_QTX_MIB_IF);
+ val &= ~MTK_VQTX_MIB_EN;
+ writel(val, eth->base + MTK_QTX_MIB_IF);
+
+ len += scnprintf(buf + len, buf_len - len,
+ " EN RATE WEIGHT\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "----------------------------\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "max%5d%9d%9d\n", max_rate_en, max_rate, max_weight);
+ len += scnprintf(buf + len, buf_len - len,
+ "min%5d%9d -\n", min_rate_en, min_rate);
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+
+ return ret_cnt;
+}
+
+static ssize_t mtk_qmda_debugfs_write_qos_queue(struct file *file, const char __user *buf,
+ size_t length, loff_t *offset)
+{
+ struct mtk_eth *eth = _eth;
+ long id = (long)file->private_data;
+ char line[64] = {0};
+ int max_enable, max_rate, max_exp = 0;
+ int min_enable, min_rate, min_exp = 0;
+ int scheduler, weight, resv;
+ size_t size;
+ u32 val;
+
+ if (length >= sizeof(line))
+ return -EINVAL;
+
+ if (copy_from_user(line, buf, length))
+ return -EFAULT;
+
+ if (sscanf(line, "%d %d %d %d %d %d %d", &scheduler, &min_enable, &min_rate,
+ &max_enable, &max_rate, &weight, &resv) != 7)
+ return -EFAULT;
+
+ line[length] = '\0';
+
+ while (max_rate > 127) {
+ max_rate /= 10;
+ max_exp++;
+ }
+
+ while (min_rate > 127) {
+ min_rate /= 10;
+ min_exp++;
+ }
+
+ val = readl(eth->base + MTK_QDMA_PAGE) & ~MTK_QTX_CFG_PAGE;
+ val |= FIELD_PREP(MTK_QTX_CFG_PAGE, id / MTK_QTX_PER_PAGE);
+ writel(val, eth->base + MTK_QDMA_PAGE);
+
+ if (eth->soc->txrx.qdma_tx_sch == 4)
+ val = FIELD_PREP(MTK_QTX_SCH_TX_SEL_V2, scheduler);
+ else
+ val = FIELD_PREP(MTK_QTX_SCH_TX_SEL, scheduler);
+ if (min_enable)
+ val |= MTK_QTX_SCH_MIN_RATE_EN;
+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, min_rate);
+ val |= FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, min_exp);
+ if (max_enable)
+ val |= MTK_QTX_SCH_MAX_RATE_EN;
+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, weight);
+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, max_rate);
+ val |= FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, max_exp);
+ writel(val, eth->base + MTK_QTX_SCH(id % MTK_QTX_PER_PAGE));
+
+ val = readl(eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
+ val |= FIELD_PREP(MTK_QTX_CFG_HW_RESV_CNT_OFFSET, resv);
+ val |= FIELD_PREP(MTK_QTX_CFG_SW_RESV_CNT_OFFSET, resv);
+ writel(val, eth->base + MTK_QTX_CFG(id % MTK_QTX_PER_PAGE));
+
+ size = strlen(line);
+ *offset += size;
+
+ return length;
+}
+
+int mtk_qdma_debugfs_init(struct mtk_eth *eth)
+{
+ static const struct file_operations fops_qos = {
+ .open = mtk_qmda_debugfs_open_qos,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = mtk_qmda_debugfs_write_qos,
+ .release = single_release,
+ };
+
+ static const struct file_operations fops_qos_sched = {
+ .open = simple_open,
+ .read = mtk_qmda_debugfs_read_qos_sched,
+ .write = mtk_qmda_debugfs_write_qos_sched,
+ .llseek = default_llseek,
+ };
+
+ static const struct file_operations fops_qos_queue = {
+ .open = simple_open,
+ .read = mtk_qmda_debugfs_read_qos_queue,
+ .write = mtk_qmda_debugfs_write_qos_queue,
+ .llseek = default_llseek,
+ };
+
+ struct dentry *root;
+ long i;
+ char name[16];
+
+ _eth = eth;
+
+ root = debugfs_lookup("mtk_ppe", NULL);
+ if (!root)
+ return -ENOMEM;
+
+ debugfs_create_file("qos_toggle", S_IRUGO, root, eth, &fops_qos);
+
+ for (i = 0; i < eth->soc->txrx.qdma_tx_sch; i++) {
+ snprintf(name, sizeof(name), "qdma_sch%ld", i);
+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
+ &fops_qos_sched);
+ }
+
+ for (i = 0; i < MTK_QDMA_TX_NUM; i++) {
+ snprintf(name, sizeof(name), "qdma_txq%ld", i);
+ debugfs_create_file(name, S_IRUGO, root, (void *)i,
+ &fops_qos_queue);
+ }
+
+ return 0;
+}
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 59b8736..c4eb45c 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -365,6 +365,7 @@ struct flow_cls_offload {
struct flow_cls_common_offload common;
enum flow_cls_command command;
unsigned long cookie;
+ struct flow_offload *flow;
struct flow_rule *rule;
struct flow_stats stats;
u32 classid;
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 50f2f2e..ba34572 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -810,11 +810,13 @@ static int nf_flow_offload_alloc(const struct flow_offload_work *offload,
}
static void nf_flow_offload_init(struct flow_cls_offload *cls_flow,
+ struct flow_offload *flow,
__be16 proto, int priority,
enum flow_cls_command cmd,
const struct flow_offload_tuple *tuple,
struct netlink_ext_ack *extack)
{
+ cls_flow->flow = flow;
cls_flow->common.protocol = proto;
cls_flow->common.prio = priority;
cls_flow->common.extack = extack;
@@ -836,7 +838,7 @@ static int nf_flow_offload_tuple(struct nf_flowtable *flowtable,
__be16 proto = ETH_P_ALL;
int err, i = 0;
- nf_flow_offload_init(&cls_flow, proto, priority, cmd,
+ nf_flow_offload_init(&cls_flow, flow, proto, priority, cmd,
&flow->tuplehash[dir].tuple, &extack);
if (cmd == FLOW_CLS_REPLACE)
cls_flow.rule = flow_rule->rule;
--
2.18.0