blob: 7cda69f0569a61827b7f52169fd3b269fdaa03d5 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2014-2016 Sean Wang <sean.wang@mediatek.com>
11 * Copyright (C) 2016-2017 John Crispin <blogic@openwrt.org>
12 */
13
14#include <linux/dma-mapping.h>
15#include <linux/delay.h>
16#include <linux/if.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/of_device.h>
20#include <linux/platform_device.h>
21#include <linux/reset.h>
developer731b98f2021-09-17 17:44:37 +080022#include <linux/rtnetlink.h>
23#include <net/netlink.h>
developerfd40db22021-04-29 10:08:25 +080024
25#include "nf_hnat_mtk.h"
26#include "hnat.h"
27
28struct mtk_hnat *hnat_priv;
developer731b98f2021-09-17 17:44:37 +080029static struct socket *_hnat_roam_sock;
30static struct work_struct _hnat_roam_work;
developerfd40db22021-04-29 10:08:25 +080031
32int (*ra_sw_nat_hook_rx)(struct sk_buff *skb) = NULL;
33EXPORT_SYMBOL(ra_sw_nat_hook_rx);
34int (*ra_sw_nat_hook_tx)(struct sk_buff *skb, int gmac_no) = NULL;
35EXPORT_SYMBOL(ra_sw_nat_hook_tx);
36
developera5da1972021-12-14 09:02:42 +080037int (*ppe_del_entry_by_mac)(unsigned char *mac) = NULL;
38EXPORT_SYMBOL(ppe_del_entry_by_mac);
39
developerfd40db22021-04-29 10:08:25 +080040void (*ppe_dev_register_hook)(struct net_device *dev) = NULL;
41EXPORT_SYMBOL(ppe_dev_register_hook);
42void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
43EXPORT_SYMBOL(ppe_dev_unregister_hook);
44
45static void hnat_sma_build_entry(struct timer_list *t)
46{
developer471f6562021-05-10 20:48:34 +080047 int i;
48
49 for (i = 0; i < CFG_PPE_NUM; i++)
50 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
51 SMA, SMA_FWD_CPU_BUILD_ENTRY);
developerfd40db22021-04-29 10:08:25 +080052}
53
54void hnat_cache_ebl(int enable)
55{
developer471f6562021-05-10 20:48:34 +080056 int i;
57
58 for (i = 0; i < CFG_PPE_NUM; i++) {
59 cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_X_MODE, 1);
60 cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_X_MODE, 0);
61 cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable);
62 }
developerfd40db22021-04-29 10:08:25 +080063}
64
65static void hnat_reset_timestamp(struct timer_list *t)
66{
67 struct foe_entry *entry;
68 int hash_index;
69
70 hnat_cache_ebl(0);
developer471f6562021-05-10 20:48:34 +080071 cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, TCP_AGE, 0);
72 cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, UDP_AGE, 0);
developerfd40db22021-04-29 10:08:25 +080073 writel(0, hnat_priv->fe_base + 0x0010);
74
75 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
developer471f6562021-05-10 20:48:34 +080076 entry = hnat_priv->foe_table_cpu[0] + hash_index;
developerfd40db22021-04-29 10:08:25 +080077 if (entry->bfib1.state == BIND)
78 entry->bfib1.time_stamp =
79 readl(hnat_priv->fe_base + 0x0010) & (0xFFFF);
80 }
81
developer471f6562021-05-10 20:48:34 +080082 cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, TCP_AGE, 1);
83 cr_set_field(hnat_priv->ppe_base[0] + PPE_TB_CFG, UDP_AGE, 1);
developerfd40db22021-04-29 10:08:25 +080084 hnat_cache_ebl(1);
85
86 mod_timer(&hnat_priv->hnat_reset_timestamp_timer, jiffies + 14400 * HZ);
87}
88
89static void cr_set_bits(void __iomem *reg, u32 bs)
90{
91 u32 val = readl(reg);
92
93 val |= bs;
94 writel(val, reg);
95}
96
97static void cr_clr_bits(void __iomem *reg, u32 bs)
98{
99 u32 val = readl(reg);
100
101 val &= ~bs;
102 writel(val, reg);
103}
104
105void cr_set_field(void __iomem *reg, u32 field, u32 val)
106{
107 unsigned int tv = readl(reg);
108
109 tv &= ~field;
110 tv |= ((val) << (ffs((unsigned int)field) - 1));
111 writel(tv, reg);
112}
113
114/*boundary entry can't be used to accelerate data flow*/
115static void exclude_boundary_entry(struct foe_entry *foe_table_cpu)
116{
117 int entry_base = 0;
118 int bad_entry, i, j;
119 struct foe_entry *foe_entry;
120 /*these entries are boundary every 128 entries*/
121 int boundary_entry_offset[8] = { 12, 25, 38, 51, 76, 89, 102, 115};
122
123 if (!foe_table_cpu)
124 return;
125
126 for (i = 0; entry_base < hnat_priv->foe_etry_num; i++) {
127 /* set boundary entries as static*/
128 for (j = 0; j < 8; j++) {
129 bad_entry = entry_base + boundary_entry_offset[j];
130 foe_entry = &foe_table_cpu[bad_entry];
131 foe_entry->udib1.sta = 1;
132 }
133 entry_base = (i + 1) * 128;
134 }
135}
136
137void set_gmac_ppe_fwd(int id, int enable)
138{
139 void __iomem *reg;
140 u32 val;
141
developerd35bbcc2022-09-28 22:46:01 +0800142 reg = hnat_priv->fe_base +
143 ((id == NR_GMAC1_PORT) ? GDMA1_FWD_CFG :
144 (id == NR_GMAC2_PORT) ? GDMA2_FWD_CFG : GDMA3_FWD_CFG);
developerfd40db22021-04-29 10:08:25 +0800145
146 if (enable) {
developerf0bb6582022-12-16 16:05:28 +0800147#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developerd35bbcc2022-09-28 22:46:01 +0800148 if (CFG_PPE_NUM == 3 && id == NR_GMAC3_PORT)
149 cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE2);
150 else if (CFG_PPE_NUM == 3 && id == NR_GMAC2_PORT)
151 cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE1);
developerf0bb6582022-12-16 16:05:28 +0800152#endif
153 cr_set_bits(reg, BITS_GDM_ALL_FRC_P_PPE);
developerfd40db22021-04-29 10:08:25 +0800154
155 return;
156 }
157
158 /*disabled */
159 val = readl(reg);
developerf0bb6582022-12-16 16:05:28 +0800160#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
161 if ((CFG_PPE_NUM == 3 &&
developerd35bbcc2022-09-28 22:46:01 +0800162 ((val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE1 ||
163 (val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE2)))
developerfd40db22021-04-29 10:08:25 +0800164 cr_set_field(reg, GDM_ALL_FRC_MASK,
165 BITS_GDM_ALL_FRC_P_CPU_PDMA);
developerf0bb6582022-12-16 16:05:28 +0800166#endif
167
168 if ((val & GDM_ALL_FRC_MASK) == BITS_GDM_ALL_FRC_P_PPE)
169 cr_set_field(reg, GDM_ALL_FRC_MASK,
170 BITS_GDM_ALL_FRC_P_CPU_PDMA);
171
developerfd40db22021-04-29 10:08:25 +0800172}
173
developer731b98f2021-09-17 17:44:37 +0800174static int entry_mac_cmp(struct foe_entry *entry, u8 *mac)
175{
developer08c720e2021-11-12 14:18:53 +0800176 int ret = 0;
developer731b98f2021-09-17 17:44:37 +0800177
178 if(IS_IPV4_GRP(entry)) {
179 if(((swab32(entry->ipv4_hnapt.dmac_hi) == *(u32 *)mac) &&
180 (swab16(entry->ipv4_hnapt.dmac_lo) == *(u16 *)&mac[4])) ||
181 ((swab32(entry->ipv4_hnapt.smac_hi) == *(u32 *)mac) &&
182 (swab16(entry->ipv4_hnapt.smac_lo) == *(u16 *)&mac[4])))
183 ret = 1;
184 } else {
185 if(((swab32(entry->ipv6_5t_route.dmac_hi) == *(u32 *)mac) &&
186 (swab16(entry->ipv6_5t_route.dmac_lo) == *(u16 *)&mac[4])) ||
187 ((swab32(entry->ipv6_5t_route.smac_hi) == *(u32 *)mac) &&
188 (swab16(entry->ipv6_5t_route.smac_lo) == *(u16 *)&mac[4])))
189 ret = 1;
190 }
191
developer08c720e2021-11-12 14:18:53 +0800192 if (ret && debug_level >= 2)
193 pr_info("mac=%pM\n", mac);
developer731b98f2021-09-17 17:44:37 +0800194
195 return ret;
196}
197
198int entry_delete_by_mac(u8 *mac)
199{
200 struct foe_entry *entry = NULL;
201 int index, i, ret = 0;
202
203 for (i = 0; i < CFG_PPE_NUM; i++) {
204 entry = hnat_priv->foe_table_cpu[i];
205 for (index = 0; index < DEF_ETRY_NUM; entry++, index++) {
206 if(entry->bfib1.state == BIND && entry_mac_cmp(entry, mac)) {
207 memset(entry, 0, sizeof(*entry));
208 hnat_cache_ebl(1);
developer08c720e2021-11-12 14:18:53 +0800209 if (debug_level >= 2)
210 pr_info("delete entry idx = %d\n", index);
developer731b98f2021-09-17 17:44:37 +0800211 ret++;
212 }
213 }
214 }
215
216 if(!ret && debug_level >= 2)
217 pr_info("entry not found\n");
218
219 return ret;
220}
developer731b98f2021-09-17 17:44:37 +0800221
222static void hnat_roam_handler(struct work_struct *work)
223{
224 struct kvec iov;
225 struct msghdr msg;
226 struct nlmsghdr *nlh;
227 struct ndmsg *ndm;
228 struct nlattr *nla;
229 u8 rcv_buf[512];
230 int len;
231
232 if (!_hnat_roam_sock)
233 return;
234
235 iov.iov_base = rcv_buf;
236 iov.iov_len = sizeof(rcv_buf);
237 memset(&msg, 0, sizeof(msg));
238 msg.msg_namelen = sizeof(struct sockaddr_nl);
239
240 len = kernel_recvmsg(_hnat_roam_sock, &msg, &iov, 1, iov.iov_len, 0);
241 if (len <= 0)
242 goto out;
243
244 nlh = (struct nlmsghdr*)rcv_buf;
245 if (!NLMSG_OK(nlh, len) || nlh->nlmsg_type != RTM_NEWNEIGH)
246 goto out;
247
248 len = nlh->nlmsg_len - NLMSG_HDRLEN;
249 ndm = (struct ndmsg *)NLMSG_DATA(nlh);
250 if (ndm->ndm_family != PF_BRIDGE)
251 goto out;
252
253 nla = (struct nlattr *)((u8 *)ndm + sizeof(struct ndmsg));
254 len -= NLMSG_LENGTH(sizeof(struct ndmsg));
255 while (nla_ok(nla, len)) {
256 if (nla_type(nla) == NDA_LLADDR) {
257 entry_delete_by_mac(nla_data(nla));
258 }
259 nla = nla_next(nla, &len);
260 }
261
262out:
263 schedule_work(&_hnat_roam_work);
264}
265
266static int hnat_roaming_enable(void)
267{
268 struct socket *sock = NULL;
269 struct sockaddr_nl addr;
270 int ret;
271
272 INIT_WORK(&_hnat_roam_work, hnat_roam_handler);
273
274 ret = sock_create_kern(&init_net, AF_NETLINK, SOCK_RAW, NETLINK_ROUTE, &sock);
275 if (ret < 0)
276 goto out;
277
278 _hnat_roam_sock = sock;
279
280 addr.nl_family = AF_NETLINK;
281 addr.nl_pad = 0;
282 addr.nl_pid = 65534;
283 addr.nl_groups = 1 << (RTNLGRP_NEIGH - 1);
284 ret = kernel_bind(sock, (struct sockaddr *)&addr, sizeof(addr));
285 if (ret < 0)
286 goto out;
287
288 schedule_work(&_hnat_roam_work);
289 pr_info("hnat roaming work enable\n");
290
291 return 0;
292out:
293 if (sock)
294 sock_release(sock);
295
296 return ret;
297}
298
299static void hnat_roaming_disable(void)
300{
301 if (_hnat_roam_sock)
302 sock_release(_hnat_roam_sock);
303 _hnat_roam_sock = NULL;
304 pr_info("hnat roaming work disable\n");
305}
306
developer8051e042022-04-08 13:26:36 +0800307static int hnat_hw_init(u32 ppe_id)
developerfd40db22021-04-29 10:08:25 +0800308{
developer4c32b7a2021-11-13 16:46:43 +0800309 if (ppe_id >= CFG_PPE_NUM)
310 return -EINVAL;
311
developerfd40db22021-04-29 10:08:25 +0800312 /* setup hashing */
developer8051e042022-04-08 13:26:36 +0800313 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TB_ETRY_NUM, hnat_priv->etry_num_cfg);
developer471f6562021-05-10 20:48:34 +0800314 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, HASH_MODE, HASH_MODE_1);
315 writel(HASH_SEED_KEY, hnat_priv->ppe_base[ppe_id] + PPE_HASH_SEED);
316 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, XMODE, 0);
developerd35bbcc2022-09-28 22:46:01 +0800317 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TB_ENTRY_SIZE,
developer4164cfe2022-12-01 11:27:41 +0800318 (hnat_priv->data->version == MTK_HNAT_V3) ? ENTRY_128B :
319 (hnat_priv->data->version == MTK_HNAT_V2) ? ENTRY_96B :
320 ENTRY_80B);
developer471f6562021-05-10 20:48:34 +0800321 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SMA, SMA_FWD_CPU_BUILD_ENTRY);
developerfd40db22021-04-29 10:08:25 +0800322
323 /* set ip proto */
developer471f6562021-05-10 20:48:34 +0800324 writel(0xFFFFFFFF, hnat_priv->ppe_base[ppe_id] + PPE_IP_PROT_CHK);
developerfd40db22021-04-29 10:08:25 +0800325
326 /* setup caching */
327 hnat_cache_ebl(1);
328
329 /* enable FOE */
developer471f6562021-05-10 20:48:34 +0800330 cr_set_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
developerf0b0e232022-04-12 11:33:10 +0800331 BIT_IPV4_NAT_EN | BIT_IPV4_NAPT_EN |
developerfd40db22021-04-29 10:08:25 +0800332 BIT_IPV4_NAT_FRAG_EN | BIT_IPV4_HASH_GREK |
333 BIT_IPV4_DSL_EN | BIT_IPV6_6RD_EN |
334 BIT_IPV6_3T_ROUTE_EN | BIT_IPV6_5T_ROUTE_EN);
335
developer4164cfe2022-12-01 11:27:41 +0800336 if (hnat_priv->data->version == MTK_HNAT_V2 ||
337 hnat_priv->data->version == MTK_HNAT_V3)
developer471f6562021-05-10 20:48:34 +0800338 cr_set_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
developerfd40db22021-04-29 10:08:25 +0800339 BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
340
developer4164cfe2022-12-01 11:27:41 +0800341 if (hnat_priv->data->version == MTK_HNAT_V3)
developer5ffc5f12022-10-25 18:51:46 +0800342 cr_set_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
343 BIT_IPV6_NAT_EN | BIT_IPV6_NAPT_EN |
344 BIT_CS0_RM_ALL_IP6_IP_EN);
345
developerfd40db22021-04-29 10:08:25 +0800346 /* setup FOE aging */
developer471f6562021-05-10 20:48:34 +0800347 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, NTU_AGE, 1);
348 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UNBD_AGE, 1);
349 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_UNB_AGE, UNB_MNP, 1000);
350 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_UNB_AGE, UNB_DLTA, 3);
351 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TCP_AGE, 1);
352 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UDP_AGE, 1);
353 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, FIN_AGE, 1);
354 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_0, UDP_DLTA, 12);
355 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_0, NTU_DLTA, 1);
356 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_1, FIN_DLTA, 1);
357 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BND_AGE_1, TCP_DLTA, 7);
developerfd40db22021-04-29 10:08:25 +0800358
359 /* setup FOE ka */
developer8051e042022-04-08 13:26:36 +0800360 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 0);
361 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_1, NTU_KA, 0);
362 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, KA_T, 0);
363 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, TCP_KA, 0);
364 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, UDP_KA, 0);
365 mdelay(10);
366
developer471f6562021-05-10 20:48:34 +0800367 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SCAN_MODE, 2);
368 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 3);
developer8051e042022-04-08 13:26:36 +0800369 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TICK_SEL, 0);
developer471f6562021-05-10 20:48:34 +0800370 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, KA_T, 1);
371 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, TCP_KA, 1);
372 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_KA, UDP_KA, 1);
373 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_1, NTU_KA, 1);
developerfd40db22021-04-29 10:08:25 +0800374
375 /* setup FOE rate limit */
developer471f6562021-05-10 20:48:34 +0800376 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_0, QURT_LMT, 16383);
377 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_0, HALF_LMT, 16383);
378 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BIND_LMT_1, FULL_LMT, 16383);
developerfd40db22021-04-29 10:08:25 +0800379 /* setup binding threshold as 30 packets per second */
developer471f6562021-05-10 20:48:34 +0800380 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_BNDR, BIND_RATE, 0x1E);
developerfd40db22021-04-29 10:08:25 +0800381
382 /* setup FOE cf gen */
developer471f6562021-05-10 20:48:34 +0800383 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, PPE_EN, 1);
384 writel(0, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT); /* pdma */
385 /* writel(0x55555555, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT); */ /* qdma */
386 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, TTL0_DRP, 0);
developer8051e042022-04-08 13:26:36 +0800387 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, MCAST_TB_EN, 1);
developerfd40db22021-04-29 10:08:25 +0800388
developer4164cfe2022-12-01 11:27:41 +0800389 if (hnat_priv->data->version == MTK_HNAT_V2 ||
390 hnat_priv->data->version == MTK_HNAT_V3) {
developer471f6562021-05-10 20:48:34 +0800391 writel(0xcb777, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT1);
392 writel(0x7f, hnat_priv->ppe_base[ppe_id] + PPE_SBW_CTRL);
developerfd40db22021-04-29 10:08:25 +0800393 }
394
developer4164cfe2022-12-01 11:27:41 +0800395 if (hnat_priv->data->version == MTK_HNAT_V3) {
developerd35bbcc2022-09-28 22:46:01 +0800396 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_SB_FIFO_DBG,
397 SB_MED_FULL_DRP_EN, 1);
398 }
399
developerfd40db22021-04-29 10:08:25 +0800400 /*enable ppe mib counter*/
401 if (hnat_priv->data->per_flow_accounting) {
developer471f6562021-05-10 20:48:34 +0800402 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MIB_CFG, MIB_EN, 1);
403 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MIB_CFG, MIB_READ_CLEAR, 1);
404 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_MIB_CAH_CTRL, MIB_CAH_EN, 1);
developerfd40db22021-04-29 10:08:25 +0800405 }
406
407 hnat_priv->g_ppdev = dev_get_by_name(&init_net, hnat_priv->ppd);
developer8c9c0d02021-06-18 16:15:37 +0800408 hnat_priv->g_wandev = dev_get_by_name(&init_net, hnat_priv->wan);
developerfd40db22021-04-29 10:08:25 +0800409
developer471f6562021-05-10 20:48:34 +0800410 dev_info(hnat_priv->dev, "PPE%d hwnat start\n", ppe_id);
developerfd40db22021-04-29 10:08:25 +0800411
412 return 0;
413}
414
developer8051e042022-04-08 13:26:36 +0800415static int hnat_start(u32 ppe_id)
416{
417 u32 foe_table_sz;
418 u32 foe_mib_tb_sz;
developer577ad2f2022-11-28 10:33:36 +0800419 int etry_num_cfg;
developer8051e042022-04-08 13:26:36 +0800420
421 if (ppe_id >= CFG_PPE_NUM)
422 return -EINVAL;
423
424 /* mapp the FOE table */
425 for (etry_num_cfg = DEF_ETRY_NUM_CFG ; etry_num_cfg >= 0 ;
426 etry_num_cfg--, hnat_priv->foe_etry_num /= 2) {
427 foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
428 hnat_priv->foe_table_cpu[ppe_id] = dma_alloc_coherent(
429 hnat_priv->dev, foe_table_sz,
430 &hnat_priv->foe_table_dev[ppe_id], GFP_KERNEL);
431
432 if (hnat_priv->foe_table_cpu[ppe_id])
433 break;
434 }
435
436 if (!hnat_priv->foe_table_cpu[ppe_id])
437 return -1;
438 dev_info(hnat_priv->dev, "PPE%d entry number = %d\n",
439 ppe_id, hnat_priv->foe_etry_num);
440
441 writel(hnat_priv->foe_table_dev[ppe_id], hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE);
442 memset(hnat_priv->foe_table_cpu[ppe_id], 0, foe_table_sz);
443
developer4164cfe2022-12-01 11:27:41 +0800444 if (hnat_priv->data->version == MTK_HNAT_V1_1)
developer8051e042022-04-08 13:26:36 +0800445 exclude_boundary_entry(hnat_priv->foe_table_cpu[ppe_id]);
446
447 if (hnat_priv->data->per_flow_accounting) {
448 foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
449 hnat_priv->foe_mib_cpu[ppe_id] =
450 dma_alloc_coherent(hnat_priv->dev, foe_mib_tb_sz,
451 &hnat_priv->foe_mib_dev[ppe_id], GFP_KERNEL);
452 if (!hnat_priv->foe_mib_cpu[ppe_id])
453 return -1;
454 writel(hnat_priv->foe_mib_dev[ppe_id],
455 hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE);
456 memset(hnat_priv->foe_mib_cpu[ppe_id], 0, foe_mib_tb_sz);
457
458 hnat_priv->acct[ppe_id] =
459 kzalloc(hnat_priv->foe_etry_num * sizeof(struct hnat_accounting),
460 GFP_KERNEL);
461 if (!hnat_priv->acct[ppe_id])
462 return -1;
463 }
464
465 hnat_priv->etry_num_cfg = etry_num_cfg;
466 hnat_hw_init(ppe_id);
467
468 return 0;
469}
470
developer4c32b7a2021-11-13 16:46:43 +0800471static int ppe_busy_wait(u32 ppe_id)
developerfd40db22021-04-29 10:08:25 +0800472{
473 unsigned long t_start = jiffies;
474 u32 r = 0;
475
developer4c32b7a2021-11-13 16:46:43 +0800476 if (ppe_id >= CFG_PPE_NUM)
477 return -EINVAL;
478
developerfd40db22021-04-29 10:08:25 +0800479 while (1) {
developer471f6562021-05-10 20:48:34 +0800480 r = readl((hnat_priv->ppe_base[ppe_id] + 0x0));
developerfd40db22021-04-29 10:08:25 +0800481 if (!(r & BIT(31)))
482 return 0;
483 if (time_after(jiffies, t_start + HZ))
484 break;
developer8051e042022-04-08 13:26:36 +0800485 mdelay(10);
developerfd40db22021-04-29 10:08:25 +0800486 }
487
488 dev_notice(hnat_priv->dev, "ppe:%s timeout\n", __func__);
489
490 return -1;
491}
492
developer4c32b7a2021-11-13 16:46:43 +0800493static void hnat_stop(u32 ppe_id)
developerfd40db22021-04-29 10:08:25 +0800494{
495 u32 foe_table_sz;
496 u32 foe_mib_tb_sz;
497 struct foe_entry *entry, *end;
developerfd40db22021-04-29 10:08:25 +0800498
developer4c32b7a2021-11-13 16:46:43 +0800499 if (ppe_id >= CFG_PPE_NUM)
500 return;
501
developerfd40db22021-04-29 10:08:25 +0800502 /* send all traffic back to the DMA engine */
developerd35bbcc2022-09-28 22:46:01 +0800503 set_gmac_ppe_fwd(NR_GMAC1_PORT, 0);
504 set_gmac_ppe_fwd(NR_GMAC2_PORT, 0);
505 set_gmac_ppe_fwd(NR_GMAC3_PORT, 0);
developerfd40db22021-04-29 10:08:25 +0800506
507 dev_info(hnat_priv->dev, "hwnat stop\n");
508
developer471f6562021-05-10 20:48:34 +0800509 if (hnat_priv->foe_table_cpu[ppe_id]) {
510 entry = hnat_priv->foe_table_cpu[ppe_id];
511 end = hnat_priv->foe_table_cpu[ppe_id] + hnat_priv->foe_etry_num;
developerfd40db22021-04-29 10:08:25 +0800512 while (entry < end) {
513 entry->bfib1.state = INVALID;
514 entry++;
515 }
516 }
517 /* disable caching */
518 hnat_cache_ebl(0);
519
520 /* flush cache has to be ahead of hnat disable --*/
developer471f6562021-05-10 20:48:34 +0800521 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, PPE_EN, 0);
developerfd40db22021-04-29 10:08:25 +0800522
523 /* disable scan mode and keep-alive */
developer471f6562021-05-10 20:48:34 +0800524 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, SCAN_MODE, 0);
525 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, KA_CFG, 0);
developerfd40db22021-04-29 10:08:25 +0800526
developer471f6562021-05-10 20:48:34 +0800527 ppe_busy_wait(ppe_id);
developerfd40db22021-04-29 10:08:25 +0800528
529 /* disable FOE */
developer471f6562021-05-10 20:48:34 +0800530 cr_clr_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
developerfd40db22021-04-29 10:08:25 +0800531 BIT_IPV4_NAPT_EN | BIT_IPV4_NAT_EN | BIT_IPV4_NAT_FRAG_EN |
532 BIT_IPV6_HASH_GREK | BIT_IPV4_DSL_EN |
533 BIT_IPV6_6RD_EN | BIT_IPV6_3T_ROUTE_EN |
534 BIT_IPV6_5T_ROUTE_EN | BIT_FUC_FOE | BIT_FMC_FOE);
535
developer4164cfe2022-12-01 11:27:41 +0800536 if (hnat_priv->data->version == MTK_HNAT_V2 ||
537 hnat_priv->data->version == MTK_HNAT_V3)
developer471f6562021-05-10 20:48:34 +0800538 cr_clr_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
developerfd40db22021-04-29 10:08:25 +0800539 BIT_IPV4_MAPE_EN | BIT_IPV4_MAPT_EN);
540
developer4164cfe2022-12-01 11:27:41 +0800541 if (hnat_priv->data->version == MTK_HNAT_V3)
developer5ffc5f12022-10-25 18:51:46 +0800542 cr_clr_bits(hnat_priv->ppe_base[ppe_id] + PPE_FLOW_CFG,
543 BIT_IPV6_NAT_EN | BIT_IPV6_NAPT_EN |
544 BIT_CS0_RM_ALL_IP6_IP_EN);
545
developerfd40db22021-04-29 10:08:25 +0800546 /* disable FOE aging */
developer471f6562021-05-10 20:48:34 +0800547 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, NTU_AGE, 0);
548 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UNBD_AGE, 0);
549 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, TCP_AGE, 0);
550 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, UDP_AGE, 0);
551 cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_TB_CFG, FIN_AGE, 0);
developerfd40db22021-04-29 10:08:25 +0800552
developerfd40db22021-04-29 10:08:25 +0800553 /* free the FOE table */
554 foe_table_sz = hnat_priv->foe_etry_num * sizeof(struct foe_entry);
developer471f6562021-05-10 20:48:34 +0800555 if (hnat_priv->foe_table_cpu[ppe_id])
556 dma_free_coherent(hnat_priv->dev, foe_table_sz,
557 hnat_priv->foe_table_cpu[ppe_id],
558 hnat_priv->foe_table_dev[ppe_id]);
developer8051e042022-04-08 13:26:36 +0800559 hnat_priv->foe_table_cpu[ppe_id] = NULL;
developer471f6562021-05-10 20:48:34 +0800560 writel(0, hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE);
developerfd40db22021-04-29 10:08:25 +0800561
562 if (hnat_priv->data->per_flow_accounting) {
563 foe_mib_tb_sz = hnat_priv->foe_etry_num * sizeof(struct mib_entry);
developer471f6562021-05-10 20:48:34 +0800564 if (hnat_priv->foe_mib_cpu[ppe_id])
developerfd40db22021-04-29 10:08:25 +0800565 dma_free_coherent(hnat_priv->dev, foe_mib_tb_sz,
developer471f6562021-05-10 20:48:34 +0800566 hnat_priv->foe_mib_cpu[ppe_id],
567 hnat_priv->foe_mib_dev[ppe_id]);
568 writel(0, hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE);
569 kfree(hnat_priv->acct[ppe_id]);
developerfd40db22021-04-29 10:08:25 +0800570 }
571}
572
573static void hnat_release_netdev(void)
574{
575 int i;
576 struct extdev_entry *ext_entry;
577
578 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
579 ext_entry = hnat_priv->ext_if[i];
580 if (ext_entry->dev)
581 dev_put(ext_entry->dev);
582 ext_if_del(ext_entry);
583 kfree(ext_entry);
584 }
585
586 if (hnat_priv->g_ppdev)
587 dev_put(hnat_priv->g_ppdev);
developer8c9c0d02021-06-18 16:15:37 +0800588
589 if (hnat_priv->g_wandev)
590 dev_put(hnat_priv->g_wandev);
developerfd40db22021-04-29 10:08:25 +0800591}
592
593static struct notifier_block nf_hnat_netdevice_nb __read_mostly = {
594 .notifier_call = nf_hnat_netdevice_event,
595};
596
597static struct notifier_block nf_hnat_netevent_nb __read_mostly = {
598 .notifier_call = nf_hnat_netevent_handler,
599};
600
601int hnat_enable_hook(void)
602{
603 /* register hook functions used by WHNAT module.
604 */
605 if (hnat_priv->data->whnat) {
606 ra_sw_nat_hook_rx =
developer4164cfe2022-12-01 11:27:41 +0800607 (hnat_priv->data->version == MTK_HNAT_V2 ||
608 hnat_priv->data->version == MTK_HNAT_V3) ?
developerfd40db22021-04-29 10:08:25 +0800609 mtk_sw_nat_hook_rx : NULL;
610 ra_sw_nat_hook_tx = mtk_sw_nat_hook_tx;
611 ppe_dev_register_hook = mtk_ppe_dev_register_hook;
612 ppe_dev_unregister_hook = mtk_ppe_dev_unregister_hook;
613 }
614
615 if (hnat_register_nf_hooks())
616 return -1;
617
developera5da1972021-12-14 09:02:42 +0800618 ppe_del_entry_by_mac = entry_delete_by_mac;
developerfd40db22021-04-29 10:08:25 +0800619 hook_toggle = 1;
620
621 return 0;
622}
623
624int hnat_disable_hook(void)
625{
developer471f6562021-05-10 20:48:34 +0800626 int i, hash_index;
developerfd40db22021-04-29 10:08:25 +0800627 struct foe_entry *entry;
628
629 ra_sw_nat_hook_tx = NULL;
630 ra_sw_nat_hook_rx = NULL;
631 hnat_unregister_nf_hooks();
632
developer471f6562021-05-10 20:48:34 +0800633 for (i = 0; i < CFG_PPE_NUM; i++) {
634 cr_set_field(hnat_priv->ppe_base[i] + PPE_TB_CFG,
635 SMA, SMA_ONLY_FWD_CPU);
636
637 for (hash_index = 0; hash_index < hnat_priv->foe_etry_num; hash_index++) {
638 entry = hnat_priv->foe_table_cpu[i] + hash_index;
639 if (entry->bfib1.state == BIND) {
640 entry->ipv4_hnapt.udib1.state = INVALID;
641 entry->ipv4_hnapt.udib1.time_stamp =
642 readl((hnat_priv->fe_base + 0x0010)) & 0xFF;
643 }
developerfd40db22021-04-29 10:08:25 +0800644 }
645 }
646
647 /* clear HWNAT cache */
648 hnat_cache_ebl(1);
649
650 mod_timer(&hnat_priv->hnat_sma_build_entry_timer, jiffies + 3 * HZ);
developera5da1972021-12-14 09:02:42 +0800651 ppe_del_entry_by_mac = NULL;
developerfd40db22021-04-29 10:08:25 +0800652 hook_toggle = 0;
653
654 return 0;
655}
656
developer8051e042022-04-08 13:26:36 +0800657int hnat_warm_init(void)
658{
659 u32 foe_table_sz, foe_mib_tb_sz, ppe_id = 0;
660
661 unregister_netevent_notifier(&nf_hnat_netevent_nb);
662
663 for (ppe_id = 0; ppe_id < CFG_PPE_NUM; ppe_id++) {
664 foe_table_sz =
665 hnat_priv->foe_etry_num * sizeof(struct foe_entry);
666 writel(hnat_priv->foe_table_dev[ppe_id],
667 hnat_priv->ppe_base[ppe_id] + PPE_TB_BASE);
668 memset(hnat_priv->foe_table_cpu[ppe_id], 0, foe_table_sz);
669
developer4164cfe2022-12-01 11:27:41 +0800670 if (hnat_priv->data->version == MTK_HNAT_V1_1)
developer8051e042022-04-08 13:26:36 +0800671 exclude_boundary_entry(hnat_priv->foe_table_cpu[ppe_id]);
672
673 if (hnat_priv->data->per_flow_accounting) {
674 foe_mib_tb_sz =
675 hnat_priv->foe_etry_num * sizeof(struct mib_entry);
676 writel(hnat_priv->foe_mib_dev[ppe_id],
677 hnat_priv->ppe_base[ppe_id] + PPE_MIB_TB_BASE);
678 memset(hnat_priv->foe_mib_cpu[ppe_id], 0,
679 foe_mib_tb_sz);
680 }
681
682 hnat_hw_init(ppe_id);
683 }
684
developer37482a42022-12-26 13:31:13 +0800685 set_gmac_ppe_fwd(NR_GMAC1_PORT, 1);
686 set_gmac_ppe_fwd(NR_GMAC2_PORT, 1);
687 set_gmac_ppe_fwd(NR_GMAC3_PORT, 1);
developer8051e042022-04-08 13:26:36 +0800688 register_netevent_notifier(&nf_hnat_netevent_nb);
689
690 return 0;
691}
692
developerfd40db22021-04-29 10:08:25 +0800693static struct packet_type mtk_pack_type __read_mostly = {
694 .type = HQOS_MAGIC_TAG,
695 .func = mtk_hqos_ptype_cb,
696};
developerfd40db22021-04-29 10:08:25 +0800697
698static int hnat_probe(struct platform_device *pdev)
699{
700 int i;
701 int err = 0;
702 int index = 0;
703 struct resource *res;
704 const char *name;
705 struct device_node *np;
706 unsigned int val;
707 struct property *prop;
708 struct extdev_entry *ext_entry;
709 const struct of_device_id *match;
710
711 hnat_priv = devm_kzalloc(&pdev->dev, sizeof(struct mtk_hnat), GFP_KERNEL);
712 if (!hnat_priv)
713 return -ENOMEM;
714
715 hnat_priv->foe_etry_num = DEF_ETRY_NUM;
716
717 match = of_match_device(of_hnat_match, &pdev->dev);
developer4c32b7a2021-11-13 16:46:43 +0800718 if (unlikely(!match))
719 return -EINVAL;
720
developerfd40db22021-04-29 10:08:25 +0800721 hnat_priv->data = (struct mtk_hnat_data *)match->data;
722
723 hnat_priv->dev = &pdev->dev;
724 np = hnat_priv->dev->of_node;
725
726 err = of_property_read_string(np, "mtketh-wan", &name);
727 if (err < 0)
728 return -EINVAL;
729
developer4c32b7a2021-11-13 16:46:43 +0800730 strncpy(hnat_priv->wan, (char *)name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +0800731 dev_info(&pdev->dev, "wan = %s\n", hnat_priv->wan);
732
733 err = of_property_read_string(np, "mtketh-lan", &name);
734 if (err < 0)
735 strncpy(hnat_priv->lan, "eth0", IFNAMSIZ);
736 else
developer4c32b7a2021-11-13 16:46:43 +0800737 strncpy(hnat_priv->lan, (char *)name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +0800738 dev_info(&pdev->dev, "lan = %s\n", hnat_priv->lan);
739
developerd35bbcc2022-09-28 22:46:01 +0800740 err = of_property_read_string(np, "mtketh-lan2", &name);
741 if (err < 0)
742 strncpy(hnat_priv->lan2, "eth2", IFNAMSIZ);
743 else
744 strncpy(hnat_priv->lan2, (char *)name, IFNAMSIZ - 1);
745 dev_info(&pdev->dev, "lan2 = %s\n", hnat_priv->lan2);
746
developerfd40db22021-04-29 10:08:25 +0800747 err = of_property_read_string(np, "mtketh-ppd", &name);
748 if (err < 0)
749 strncpy(hnat_priv->ppd, "eth0", IFNAMSIZ);
750 else
developer4c32b7a2021-11-13 16:46:43 +0800751 strncpy(hnat_priv->ppd, (char *)name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +0800752 dev_info(&pdev->dev, "ppd = %s\n", hnat_priv->ppd);
753
754 /*get total gmac num in hnat*/
755 err = of_property_read_u32_index(np, "mtketh-max-gmac", 0, &val);
756
757 if (err < 0)
758 return -EINVAL;
759
760 hnat_priv->gmac_num = val;
761
762 dev_info(&pdev->dev, "gmac num = %d\n", hnat_priv->gmac_num);
763
764 err = of_property_read_u32_index(np, "mtkdsa-wan-port", 0, &val);
765
766 if (err < 0) {
767 hnat_priv->wan_dsa_port = NONE_DSA_PORT;
768 } else {
769 hnat_priv->wan_dsa_port = val;
770 dev_info(&pdev->dev, "wan dsa port = %d\n", hnat_priv->wan_dsa_port);
771 }
772
developer471f6562021-05-10 20:48:34 +0800773 err = of_property_read_u32_index(np, "mtketh-ppe-num", 0, &val);
774
775 if (err < 0)
776 hnat_priv->ppe_num = 1;
777 else
778 hnat_priv->ppe_num = val;
779
780 dev_info(&pdev->dev, "ppe num = %d\n", hnat_priv->ppe_num);
781
developerfd40db22021-04-29 10:08:25 +0800782 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
783 if (!res)
784 return -ENOENT;
785
786 hnat_priv->fe_base = devm_ioremap_nocache(&pdev->dev, res->start,
787 res->end - res->start + 1);
788 if (!hnat_priv->fe_base)
789 return -EADDRNOTAVAIL;
790
developerd35bbcc2022-09-28 22:46:01 +0800791#if defined(CONFIG_MEDIATEK_NETSYS_V2) || defined(CONFIG_MEDIATEK_NETSYS_V3)
developer471f6562021-05-10 20:48:34 +0800792 hnat_priv->ppe_base[0] = hnat_priv->fe_base + 0x2200;
793
794 if (CFG_PPE_NUM > 1)
795 hnat_priv->ppe_base[1] = hnat_priv->fe_base + 0x2600;
developerd35bbcc2022-09-28 22:46:01 +0800796
797 if (CFG_PPE_NUM > 2)
798 hnat_priv->ppe_base[2] = hnat_priv->fe_base + 0x2e00;
developer471f6562021-05-10 20:48:34 +0800799#else
800 hnat_priv->ppe_base[0] = hnat_priv->fe_base + 0xe00;
801#endif
developerfd40db22021-04-29 10:08:25 +0800802
803 err = hnat_init_debugfs(hnat_priv);
804 if (err)
805 return err;
806
807 prop = of_find_property(np, "ext-devices", NULL);
808 for (name = of_prop_next_string(prop, NULL); name;
809 name = of_prop_next_string(prop, name), index++) {
810 ext_entry = kzalloc(sizeof(*ext_entry), GFP_KERNEL);
811 if (!ext_entry) {
812 err = -ENOMEM;
813 goto err_out1;
814 }
developer4c32b7a2021-11-13 16:46:43 +0800815 strncpy(ext_entry->name, (char *)name, IFNAMSIZ - 1);
developerfd40db22021-04-29 10:08:25 +0800816 ext_if_add(ext_entry);
817 }
818
819 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
820 ext_entry = hnat_priv->ext_if[i];
821 dev_info(&pdev->dev, "ext devices = %s\n", ext_entry->name);
822 }
823
824 hnat_priv->lvid = 1;
825 hnat_priv->wvid = 2;
826
developer471f6562021-05-10 20:48:34 +0800827 for (i = 0; i < CFG_PPE_NUM; i++) {
828 err = hnat_start(i);
829 if (err)
830 goto err_out;
831 }
developerfd40db22021-04-29 10:08:25 +0800832
833 if (hnat_priv->data->whnat) {
834 err = whnat_adjust_nf_hooks();
835 if (err)
836 goto err_out;
837 }
838
839 err = hnat_enable_hook();
840 if (err)
841 goto err_out;
842
843 register_netdevice_notifier(&nf_hnat_netdevice_nb);
844 register_netevent_notifier(&nf_hnat_netevent_nb);
developer471f6562021-05-10 20:48:34 +0800845
846 if (hnat_priv->data->mcast) {
847 for (i = 0; i < CFG_PPE_NUM; i++)
848 hnat_mcast_enable(i);
849 }
850
developerfd40db22021-04-29 10:08:25 +0800851 timer_setup(&hnat_priv->hnat_sma_build_entry_timer, hnat_sma_build_entry, 0);
developer4164cfe2022-12-01 11:27:41 +0800852 if (hnat_priv->data->version == MTK_HNAT_V1_3) {
developerfd40db22021-04-29 10:08:25 +0800853 timer_setup(&hnat_priv->hnat_reset_timestamp_timer, hnat_reset_timestamp, 0);
854 hnat_priv->hnat_reset_timestamp_timer.expires = jiffies;
855 add_timer(&hnat_priv->hnat_reset_timestamp_timer);
856 }
857
developer34028fb2022-01-11 13:51:29 +0800858 if (IS_HQOS_MODE && IS_GMAC1_MODE)
developerfd40db22021-04-29 10:08:25 +0800859 dev_add_pack(&mtk_pack_type);
developer24948202021-11-24 17:38:27 +0800860
developer731b98f2021-09-17 17:44:37 +0800861 err = hnat_roaming_enable();
862 if (err)
863 pr_info("hnat roaming work fail\n");
developerfd40db22021-04-29 10:08:25 +0800864
developere8b7dfa2023-04-20 10:16:44 +0800865 INIT_LIST_HEAD(&hnat_priv->xlat.map_list);
866
developerfd40db22021-04-29 10:08:25 +0800867 return 0;
868
869err_out:
developer471f6562021-05-10 20:48:34 +0800870 for (i = 0; i < CFG_PPE_NUM; i++)
871 hnat_stop(i);
developerfd40db22021-04-29 10:08:25 +0800872err_out1:
873 hnat_deinit_debugfs(hnat_priv);
874 for (i = 0; i < MAX_EXT_DEVS && hnat_priv->ext_if[i]; i++) {
875 ext_entry = hnat_priv->ext_if[i];
876 ext_if_del(ext_entry);
877 kfree(ext_entry);
878 }
879 return err;
880}
881
882static int hnat_remove(struct platform_device *pdev)
883{
developer471f6562021-05-10 20:48:34 +0800884 int i;
885
developer731b98f2021-09-17 17:44:37 +0800886 hnat_roaming_disable();
developerfd40db22021-04-29 10:08:25 +0800887 unregister_netdevice_notifier(&nf_hnat_netdevice_nb);
888 unregister_netevent_notifier(&nf_hnat_netevent_nb);
889 hnat_disable_hook();
890
891 if (hnat_priv->data->mcast)
892 hnat_mcast_disable();
893
developer471f6562021-05-10 20:48:34 +0800894 for (i = 0; i < CFG_PPE_NUM; i++)
895 hnat_stop(i);
896
developerfd40db22021-04-29 10:08:25 +0800897 hnat_deinit_debugfs(hnat_priv);
898 hnat_release_netdev();
899 del_timer_sync(&hnat_priv->hnat_sma_build_entry_timer);
developer4164cfe2022-12-01 11:27:41 +0800900 if (hnat_priv->data->version == MTK_HNAT_V1_3)
developerfd40db22021-04-29 10:08:25 +0800901 del_timer_sync(&hnat_priv->hnat_reset_timestamp_timer);
902
developer34028fb2022-01-11 13:51:29 +0800903 if (IS_HQOS_MODE && IS_GMAC1_MODE)
developerfd40db22021-04-29 10:08:25 +0800904 dev_remove_pack(&mtk_pack_type);
developerfd40db22021-04-29 10:08:25 +0800905
906 return 0;
907}
908
909static const struct mtk_hnat_data hnat_data_v1 = {
910 .num_of_sch = 2,
911 .whnat = false,
912 .per_flow_accounting = false,
913 .mcast = false,
developer4164cfe2022-12-01 11:27:41 +0800914 .version = MTK_HNAT_V1_1,
developerfd40db22021-04-29 10:08:25 +0800915};
916
917static const struct mtk_hnat_data hnat_data_v2 = {
918 .num_of_sch = 2,
919 .whnat = true,
920 .per_flow_accounting = true,
921 .mcast = false,
developer4164cfe2022-12-01 11:27:41 +0800922 .version = MTK_HNAT_V1_2,
developerfd40db22021-04-29 10:08:25 +0800923};
924
925static const struct mtk_hnat_data hnat_data_v3 = {
926 .num_of_sch = 4,
927 .whnat = false,
928 .per_flow_accounting = false,
929 .mcast = false,
developer4164cfe2022-12-01 11:27:41 +0800930 .version = MTK_HNAT_V1_3,
developerfd40db22021-04-29 10:08:25 +0800931};
932
933static const struct mtk_hnat_data hnat_data_v4 = {
934 .num_of_sch = 4,
935 .whnat = true,
936 .per_flow_accounting = true,
937 .mcast = false,
developer4164cfe2022-12-01 11:27:41 +0800938 .version = MTK_HNAT_V2,
developerfd40db22021-04-29 10:08:25 +0800939};
940
developerd35bbcc2022-09-28 22:46:01 +0800941static const struct mtk_hnat_data hnat_data_v5 = {
942 .num_of_sch = 4,
943 .whnat = true,
944 .per_flow_accounting = true,
945 .mcast = false,
developer4164cfe2022-12-01 11:27:41 +0800946 .version = MTK_HNAT_V3,
developerd35bbcc2022-09-28 22:46:01 +0800947};
948
developerfd40db22021-04-29 10:08:25 +0800949const struct of_device_id of_hnat_match[] = {
950 { .compatible = "mediatek,mtk-hnat", .data = &hnat_data_v3 },
951 { .compatible = "mediatek,mtk-hnat_v1", .data = &hnat_data_v1 },
952 { .compatible = "mediatek,mtk-hnat_v2", .data = &hnat_data_v2 },
953 { .compatible = "mediatek,mtk-hnat_v3", .data = &hnat_data_v3 },
954 { .compatible = "mediatek,mtk-hnat_v4", .data = &hnat_data_v4 },
developerd35bbcc2022-09-28 22:46:01 +0800955 { .compatible = "mediatek,mtk-hnat_v5", .data = &hnat_data_v5 },
developerfd40db22021-04-29 10:08:25 +0800956 {},
957};
958MODULE_DEVICE_TABLE(of, of_hnat_match);
959
960static struct platform_driver hnat_driver = {
961 .probe = hnat_probe,
962 .remove = hnat_remove,
963 .driver = {
964 .name = "mediatek_soc_hnat",
965 .of_match_table = of_hnat_match,
966 },
967};
968
969module_platform_driver(hnat_driver);
970
971MODULE_LICENSE("GPL v2");
972MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
973MODULE_AUTHOR("John Crispin <john@phrozen.org>");
974MODULE_DESCRIPTION("Mediatek Hardware NAT");