blob: 5a053f5e6b4dfc92f2afcbcc4376a3b374115b2d [file] [log] [blame]
developer1966afb2023-08-08 16:02:18 +08001// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2023 Mediatek Inc. All Rights Reserved.
4 *
5 * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
6 */
7
8#include <linux/bitfield.h>
9#include <linux/delay.h>
10#include <linux/device.h>
11#include <linux/err.h>
12#include <linux/of.h>
13#include <linux/of_address.h>
14#include <linux/of_platform.h>
15#include <linux/platform_device.h>
16#include <linux/spinlock.h>
17
18#include "pce/cdrt.h"
19#include "pce/cls.h"
20#include "pce/dipfilter.h"
21#include "pce/internal.h"
22#include "pce/netsys.h"
23#include "pce/pce.h"
24
25struct netsys_hw {
26 void __iomem *base;
27 spinlock_t lock;
28 u32 fe_mem_limit[__FE_MEM_TYPE_MAX];
29};
30
31static struct netsys_hw netsys = {
32 .fe_mem_limit = {
33 [FE_MEM_TYPE_TS_CONFIG] = FE_MEM_TS_CONFIG_MAX_INDEX,
34 [FE_MEM_TYPE_DIPFILTER] = FE_MEM_DIPFILTER_MAX_IDX,
35 [FE_MEM_TYPE_CLS] = FE_MEM_CLS_MAX_INDEX,
36 [FE_MEM_TYPE_CDRT] = FE_MEM_CDRT_MAX_INDEX,
37 },
38};
39
40void mtk_pce_ppe_rmw(enum pse_port ppe, u32 reg, u32 mask, u32 val)
41{
42 if (ppe == PSE_PORT_PPE0)
43 mtk_pce_netsys_rmw(PPE0_BASE + reg, mask, val);
44 else if (ppe == PSE_PORT_PPE1)
45 mtk_pce_netsys_rmw(PPE1_BASE + reg, mask, val);
46 else if (ppe == PSE_PORT_PPE2)
47 mtk_pce_netsys_rmw(PPE2_BASE + reg, mask, val);
48}
49
50u32 mtk_pce_ppe_read(enum pse_port ppe, u32 reg)
51{
52 if (ppe == PSE_PORT_PPE0)
53 return mtk_pce_netsys_read(PPE0_BASE + reg);
54 else if (ppe == PSE_PORT_PPE1)
55 return mtk_pce_netsys_read(PPE1_BASE + reg);
56 else if (ppe == PSE_PORT_PPE2)
57 return mtk_pce_netsys_read(PPE2_BASE + reg);
58
59 return 0;
60}
61
62void mtk_pce_netsys_write(u32 reg, u32 val)
63{
64 writel(val, netsys.base + reg);
65}
66
67void mtk_pce_netsys_setbits(u32 reg, u32 mask)
68{
69 writel(readl(netsys.base + reg) | mask, netsys.base + reg);
70}
71
72void mtk_pce_netsys_clrbits(u32 reg, u32 mask)
73{
74 writel(readl(netsys.base + reg) & (~mask), netsys.base + reg);
75}
76
77void mtk_pce_netsys_rmw(u32 reg, u32 mask, u32 val)
78{
79 writel((readl(netsys.base + reg) & (~mask)) | val, netsys.base + reg);
80}
81
82u32 mtk_pce_netsys_read(u32 reg)
83{
84 return readl(netsys.base + reg);
85}
86
87static inline void mtk_pce_fe_mem_config(enum fe_mem_type type, u32 idx)
88{
89 /* select memory index to program */
90 mtk_pce_netsys_rmw(GLO_MEM_CTRL,
91 GLO_MEM_CTRL_ADDR_MASK,
92 FIELD_PREP(GLO_MEM_CTRL_ADDR_MASK, idx));
93
94 /* select memory type to program */
95 mtk_pce_netsys_rmw(GLO_MEM_CTRL,
96 GLO_MEM_CTRL_INDEX_MASK,
97 FIELD_PREP(GLO_MEM_CTRL_INDEX_MASK, type));
98}
99
100static inline void mtk_pce_fe_mem_start(enum fe_mem_cmd cmd)
101{
102 /* trigger start */
103 mtk_pce_netsys_rmw(GLO_MEM_CTRL,
104 GLO_MEM_CTRL_CMD_MASK,
105 FIELD_PREP(GLO_MEM_CTRL_CMD_MASK, cmd));
106}
107
108static inline void mtk_pce_fe_mem_wait_transfer_done(void)
109{
110 while (FIELD_GET(GLO_MEM_CTRL_CMD_MASK, mtk_pce_netsys_read(GLO_MEM_CTRL)))
111 udelay(10);
112}
113
114static int mtk_pce_fe_mem_write(enum fe_mem_type type, u32 *raw_data, u32 idx)
115{
116 unsigned long flag;
117 u32 i = 0;
118
119 spin_lock_irqsave(&netsys.lock, flag);
120
121 mtk_pce_fe_mem_config(type, idx);
122
123 /* place data */
124 for (i = 0; i < FE_MEM_DATA_WLEN; i++)
125 mtk_pce_netsys_write(GLO_MEM_DATA_IDX(i), raw_data[i]);
126
127 mtk_pce_fe_mem_start(FE_MEM_CMD_WRITE);
128
129 mtk_pce_fe_mem_wait_transfer_done();
130
131 spin_unlock_irqrestore(&netsys.lock, flag);
132
133 return 0;
134}
135
136static int mtk_pce_fe_mem_read(enum fe_mem_type type, u32 *raw_data, u32 idx)
137{
138 unsigned long flag;
139 u32 i = 0;
140
141 spin_lock_irqsave(&netsys.lock, flag);
142
143 mtk_pce_fe_mem_config(type, idx);
144
145 mtk_pce_fe_mem_start(FE_MEM_CMD_READ);
146
147 mtk_pce_fe_mem_wait_transfer_done();
148
149 /* read data out */
150 for (i = 0; i < FE_MEM_DATA_WLEN; i++)
151 raw_data[i] = mtk_pce_netsys_read(GLO_MEM_DATA_IDX(i));
152
153 spin_unlock_irqrestore(&netsys.lock, flag);
154
155 return 0;
156}
157
158int mtk_pce_fe_mem_msg_send(struct fe_mem_msg *msg)
159{
160 if (unlikely(!msg))
161 return -EINVAL;
162
163 if (msg->cmd >= __FE_MEM_CMD_MAX) {
164 PCE_ERR("invalid fe_mem_cmd: %u\n", msg->cmd);
165 return -EPERM;
166 }
167
168 if (msg->type >= __FE_MEM_TYPE_MAX) {
169 PCE_ERR("invalid fe_mem_type: %u\n", msg->type);
170 return -EPERM;
171 }
172
173 if (msg->index >= netsys.fe_mem_limit[msg->type]) {
174 PCE_ERR("invalid FE_MEM index: %u, type: %u, max: %u\n",
175 msg->index, msg->type, netsys.fe_mem_limit[msg->type]);
176 return -EPERM;
177 }
178
179 switch (msg->cmd) {
180 case FE_MEM_CMD_WRITE:
181 return mtk_pce_fe_mem_write(msg->type, msg->raw, msg->index);
182 case FE_MEM_CMD_READ:
183 return mtk_pce_fe_mem_read(msg->type, msg->raw, msg->index);
184 default:
185 break;
186 }
187
188 return 0;
189}
190EXPORT_SYMBOL(mtk_pce_fe_mem_msg_send);
191
192int mtk_pce_netsys_init(struct platform_device *pdev)
193{
194 struct device_node *fe_mem = NULL;
195 struct resource res;
196 int ret = 0;
197
198 fe_mem = of_parse_phandle(pdev->dev.of_node, "fe_mem", 0);
199 if (!fe_mem) {
200 PCE_ERR("can not find fe_mem node\n");
201 return -ENODEV;
202 }
203
204 if (of_address_to_resource(fe_mem, 0, &res))
205 return -ENXIO;
206
207 /* map fe_mem */
208 netsys.base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
209 if (!netsys.base)
210 return -ENOMEM;
211
212 spin_lock_init(&netsys.lock);
213
214 of_node_put(fe_mem);
215
216 return ret;
217}
218
219void mtk_pce_netsys_deinit(struct platform_device *pdev)
220{
221 /* nothing to deinit right now */
222}
223
224int mtk_pce_enable(void)
225{
226 mtk_pce_cls_enable();
227
228 mtk_pce_dipfilter_enable();
229
230 mtk_pce_cdrt_enable();
231
232 return 0;
233}
234EXPORT_SYMBOL(mtk_pce_enable);
235
236void mtk_pce_disable(void)
237{
238 mtk_pce_cdrt_disable();
239
240 mtk_pce_dipfilter_disable();
241
242 mtk_pce_cls_disable();
243}
244EXPORT_SYMBOL(mtk_pce_disable);