[][openwrt][mt7988][tops][TOPS Alpha release]
[Description]
Add alpha version of TOPS(tunnel offload processor system) and tops-tool
package.
TOPS package supports tunnel protocol HW offload. The support offload
tunnel protocols for Alpha version are L2oGRE and L2TPv2.
Notice that, TOPS only guarantees that inner packets are TCP. It is still
unstable for UDP inner packet flow.
tops-tool package provides several debug features such as logger, coredump
for TOPS.
[Release-log]
N/A
Change-Id: Iab6e4a89bebbe42c967f28e0c9e9c0611673f354
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/7852683
diff --git a/feed/tops-tool/Makefile b/feed/tops-tool/Makefile
new file mode 100644
index 0000000..621c4cd
--- /dev/null
+++ b/feed/tops-tool/Makefile
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2023 Mediatek Inc. All Rights Reserved.
+# Author: Alvin Kuo <alvin.kuo@mediatek.com>
+#
+
+include $(TOPDIR)/rules.mk
+
+PKG_NAME:=tops-tool
+PKG_RELEASE:=1
+
+include $(INCLUDE_DIR)/package.mk
+include $(INCLUDE_DIR)/package-defaults.mk
+
+define Package/tops-tool
+ TITLE:=Mediatek Tunnel Offload Processor System User Tool
+ SECTION:=MTK Properties
+ CATEGORY:=MTK Properties
+ DEFAULT:=y
+ SUBMENU:=Applications
+ DEPENDS:=kmod-tops
+endef
+
+define Package/tops-tool/description
+ Mediatek Tunnel Offload Processor System User Tool
+endef
+
+TARGET_CFLAGS += \
+ -I$(PKG_BUILD_DIR)/inc
+
+define Build/Compile
+ $(MAKE) -C $(PKG_BUILD_DIR) \
+ CC="$(TARGET_CC)" \
+ CFLAGS="$(TARGET_CFLAGS) -Wall -Wextra" \
+ LDFLAGS="$(TARGET_LDFLAGS)"
+endef
+
+define Package/tops-tool/install
+ $(INSTALL_DIR) $(1)/usr/sbin
+ $(INSTALL_BIN) $(PKG_BUILD_DIR)/tops-tool $(1)/usr/sbin/
+
+ $(INSTALL_DIR) $(1)/etc/init.d
+ $(INSTALL_BIN) ./files/tops-tool.init $(1)/etc/init.d/tops-tool
+endef
+
+$(eval $(call BuildPackage,tops-tool))
diff --git a/feed/tops-tool/files/tops-tool.init b/feed/tops-tool/files/tops-tool.init
new file mode 100644
index 0000000..6c4e612
--- /dev/null
+++ b/feed/tops-tool/files/tops-tool.init
@@ -0,0 +1,19 @@
+#!/bin/sh /etc/rc.common
+
+#the priority of TOPS driver is 41
+START=42
+
+USE_PROCD=1
+NAME=tops-tool
+PROG=/usr/sbin/tops-tool
+
+start_service() {
+ procd_open_instance
+ procd_set_param command "${PROG}" save_dump /log/tops
+ procd_set_param respawn
+ procd_close_instance
+}
+
+stop_service() {
+ service_stop "${PROG}"
+}
diff --git a/feed/tops-tool/src/Makefile b/feed/tops-tool/src/Makefile
new file mode 100644
index 0000000..b125bdb
--- /dev/null
+++ b/feed/tops-tool/src/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-or-later */
+#
+# Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+#
+# Author: Alvin Kuo <Alvin.Kuo@mediatek.com>
+#
+
+PROJECT := tops-tool
+OBJECTS := tops-tool.o dump.o
+
+all: $(PROJECT)
+
+$(PROJECT): $(OBJECTS) Makefile
+ $(CC) $(CFLAGS) $(LDFLAGS) $(OBJECTS) -o $@
+
+%.o: %.c %.h Makefile
+ $(CC) $(CFLAGS) -c $< -o $@
+
+.PHONY : clean
+clean:
+ rm -f $(PROJECT) *.o
diff --git a/feed/tops-tool/src/dump.c b/feed/tops-tool/src/dump.c
new file mode 100644
index 0000000..d3a8bf1
--- /dev/null
+++ b/feed/tops-tool/src/dump.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <Alvin.Kuo@mediatek.com>
+ */
+
+#include <limits.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <time.h>
+#include <poll.h>
+
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include "dump.h"
+
+static int time_to_str(time_t *time_sec, char *time_str, unsigned int time_str_size)
+{
+ struct tm *ptm;
+ int ret;
+
+ ptm = gmtime(time_sec);
+ if (!ptm)
+ return -1;
+
+ ret = strftime(time_str, time_str_size, "%Y%m%d%H%M%S", ptm);
+ if (!ret)
+ return -2;
+
+ return 0;
+}
+
+static int save_dump_data(char *dump_root_dir,
+ struct dump_data_header *dd_hdr,
+ char *dd)
+{
+ size_t dump_file_size = dd_hdr->info.size + sizeof(struct dump_info);
+ char dump_time_str[32];
+ struct stat st = { 0 };
+ char *dump_file = NULL;
+ char *dump_dir = NULL;
+ int ret = 0;
+ int fd;
+
+ ret = time_to_str((time_t *)&dd_hdr->info.dump_time_sec,
+ dump_time_str, sizeof(dump_time_str));
+ if (ret < 0) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("time_to_str(%lu) fail(%d)\n"),
+ dd_hdr->info.dump_time_sec, ret);
+ ret = -1;
+ goto out;
+ }
+
+ dump_dir = malloc(strlen(dump_root_dir) + 1 +
+ strlen(dump_time_str) + 1);
+ if (!dump_dir) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ sprintf(dump_dir, "%s/%s", dump_root_dir, dump_time_str);
+
+ dump_file = malloc(strlen(dump_dir) + 1 +
+ strlen(dd_hdr->info.name) + 1);
+ if (!dump_file) {
+ ret = -ENOMEM;
+ goto free_dump_dir;
+ }
+ sprintf(dump_file, "%s/%s", dump_dir, dd_hdr->info.name);
+
+ if (stat(dump_dir, &st)) {
+ ret = mkdir(dump_dir, 0775);
+ if (ret) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("mkdir(%s) fail(%s)\n"),
+ dump_dir, strerror(errno));
+ ret = -1;
+ goto free_dump_file;
+ }
+
+ /* TODO: only keep latest three dump directories */
+ }
+
+ fd = open(dump_file, 0664);
+ if (fd < 0) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("open(%s) fail(%s)\n"),
+ dump_file, strerror(errno));
+ ret = -1;
+ goto free_dump_file;
+ }
+
+ /* write information of dump at begining of the file */
+ lseek(fd, 0, SEEK_SET);
+ write(fd, &dd_hdr->info, sizeof(struct dump_info));
+
+ /* write data of dump start from data offset of the file */
+ lseek(fd, dd_hdr->data_offset, SEEK_CUR);
+ write(fd, dd, dd_hdr->data_len);
+
+ close(fd);
+
+ if (dd_hdr->last_frag) {
+ stat(dump_file, &st);
+ if ((size_t)st.st_size != dump_file_size) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("file(%s) size %zu != %zu\n"),
+ dump_file, st.st_size, dump_file_size);
+ ret = -1;
+ goto free_dump_file;
+ }
+ }
+
+free_dump_file:
+ free(dump_file);
+ dump_file = NULL;
+
+free_dump_dir:
+ free(dump_dir);
+ dump_dir = NULL;
+
+out:
+ return ret;
+}
+
+static int read_retry(int fd, void *buf, int len)
+{
+ int out_len = 0;
+ int ret;
+
+ while (len > 0) {
+ ret = read(fd, buf, len);
+ if (ret < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
+
+ return -1;
+ }
+
+ if (!ret)
+ return 0;
+
+ out_len += ret;
+ len -= ret;
+ buf += ret;
+ }
+
+ return out_len;
+}
+
+static int mkdir_p(char *path, mode_t mode)
+{
+ struct stat st = { 0 };
+ char *cpy_path = NULL;
+ char *cur_path = NULL;
+ char *tmp_path = NULL;
+ int ret = 0;
+ char *dir;
+
+ cpy_path = malloc(strlen(path) + 1);
+ if (!cpy_path) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ strcpy(cpy_path, path);
+
+ cur_path = malloc(strlen(path) + 1);
+ if (!cur_path) {
+ ret = -ENOMEM;
+ goto free_cpy_path;
+ }
+ memset(cur_path, 0, strlen(path) + 1);
+
+ for (dir = strtok(cpy_path, "/");
+ dir != NULL;
+ dir = strtok(NULL, "/")) {
+ /* keep current path */
+ tmp_path = malloc(strlen(cur_path) + 1);
+ if (!tmp_path) {
+ ret = -ENOMEM;
+ goto free_cur_path;
+ }
+ strcpy(tmp_path, cur_path);
+
+ /* append directory in current path */
+ sprintf(cur_path, "%s/%s", tmp_path, dir);
+
+ free(tmp_path);
+ tmp_path = NULL;
+
+ if (stat(cur_path, &st)) {
+ ret = mkdir(cur_path, mode);
+ if (ret) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("mkdir(%s) fail(%s)\n"),
+ cur_path, strerror(errno));
+ goto free_cur_path;
+ }
+ }
+ }
+
+free_cur_path:
+ free(cur_path);
+ cur_path = NULL;
+
+free_cpy_path:
+ free(cpy_path);
+ cpy_path = NULL;
+
+out:
+ return ret;
+}
+
+int tops_save_dump_data(char *dump_root_dir)
+{
+ struct stat st = { 0 };
+ int ret = 0;
+ int fd;
+
+ if (!dump_root_dir) {
+ ret = -1;
+ goto out;
+ }
+
+ /* reserve 256 bytes for saving name of dump directory and dump file */
+ if (strlen(dump_root_dir) > (PATH_MAX - 256)) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("dump_root_dir(%s) length %zu > %u\n"),
+ dump_root_dir, strlen(dump_root_dir), PATH_MAX - 256);
+ return -1;
+ }
+
+ if (stat(dump_root_dir, &st)) {
+ ret = mkdir_p(dump_root_dir, 0775);
+ if (ret) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("mkdir_p(%s) fail(%d)\n"),
+ dump_root_dir, ret);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ fd = open(DUMP_DATA_PATH, O_RDONLY);
+ if (fd < 0) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("open(%s) fail(%s)\n"),
+ DUMP_DATA_PATH, strerror(errno));
+ ret = -1;
+ goto out;
+ }
+
+ while (1) {
+ char dd[RELAY_DUMP_SUBBUF_SIZE - sizeof(struct dump_data_header)];
+ struct dump_data_header dd_hdr;
+ struct pollfd pfd = {
+ .fd = fd,
+ .events = POLLIN | POLLHUP | POLLERR,
+ };
+
+ poll(&pfd, 1, -1);
+
+ ret = read_retry(fd, &dd_hdr, sizeof(struct dump_data_header));
+ if (ret < 0) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("read dd_hdr fail(%d)\n"), ret);
+ ret = -1;
+ break;
+ }
+
+ if (!ret)
+ continue;
+
+ if (dd_hdr.data_len == 0) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("read empty data\n"));
+ continue;
+ }
+
+ if (dd_hdr.data_len > sizeof(dd)) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("data length %u > %lu\n"),
+ dd_hdr.data_len, sizeof(dd));
+ ret = -1;
+ break;
+ }
+
+ ret = read_retry(fd, dd, dd_hdr.data_len);
+ if (ret < 0) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("read dd fail(%d)\n"), ret);
+ ret = -1;
+ break;
+ }
+
+ if ((uint32_t)ret != dd_hdr.data_len) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("read dd length %u != %u\n"),
+ (uint32_t)ret, dd_hdr.data_len);
+ ret = -1;
+ break;
+ }
+
+ ret = save_dump_data(dump_root_dir, &dd_hdr, dd);
+ if (ret) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("save_dump_data(%s) fail(%d)\n"),
+ dump_root_dir, ret);
+ break;
+ }
+ }
+
+ close(fd);
+
+out:
+ return ret;
+}
diff --git a/feed/tops-tool/src/inc/dump.h b/feed/tops-tool/src/inc/dump.h
new file mode 100644
index 0000000..0d331c9
--- /dev/null
+++ b/feed/tops-tool/src/inc/dump.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <Alvin.Kuo@mediatek.com>
+ */
+
+#ifndef __DUMP_H__
+#define __DUMP_H__
+
+#include <sys/types.h>
+
+#define DUMP_INFO_NAME_MAX_LEN 32
+#define RELAY_DUMP_SUBBUF_SIZE 2048
+#define DUMP_DATA_PATH "/sys/kernel/debug/tops/dump_data"
+
+#define DUMP_LOG_FMT(FMT) "[TOPS_TOOL] [%s]: " FMT, __func__
+
+struct dump_info {
+ char name[DUMP_INFO_NAME_MAX_LEN];
+ uint64_t dump_time_sec;
+ uint32_t start_addr;
+ uint32_t size;
+ uint32_t dump_rsn;
+#define DUMP_RSN_NULL (0x0000)
+#define DUMP_RSN_WDT_TIMEOUT_CORE0 (0x0001)
+#define DUMP_RSN_WDT_TIMEOUT_CORE1 (0x0002)
+#define DUMP_RSN_WDT_TIMEOUT_CORE2 (0x0004)
+#define DUMP_RSN_WDT_TIMEOUT_CORE3 (0x0008)
+#define DUMP_RSN_WDT_TIMEOUT_COREM (0x0010)
+#define DUMP_RSN_FE_RESET (0x0020)
+};
+
+struct dump_data_header {
+ struct dump_info info;
+ uint32_t data_offset;
+ uint32_t data_len;
+ uint8_t last_frag;
+};
+
+int tops_save_dump_data(char *dump_dir);
+
+#endif /* __DUMP_H__ */
diff --git a/feed/tops-tool/src/tops-tool.c b/feed/tops-tool/src/tops-tool.c
new file mode 100644
index 0000000..32ab579
--- /dev/null
+++ b/feed/tops-tool/src/tops-tool.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <Alvin.Kuo@mediatek.com>
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+
+#include "dump.h"
+
+static void print_usage(void)
+{
+ printf("Usage:\n");
+ printf(" tops-tool [CMD] [DUMP_DIR]\n");
+ printf(" [CMD] are:\n");
+ printf(" save_dump save dump data as file in directory [DUMP_DIR]\n");
+ printf(" [DUMP_DIR] is directory of dump file\n");
+}
+
+static int verify_parameters(int argc,
+ char *argv[])
+{
+ char *cmd;
+
+ if (argc < 2) {
+ fprintf(stderr, DUMP_LOG_FMT("missing cmd\n"));
+ return -EINVAL;
+ }
+
+ cmd = argv[1];
+ if (!strncmp(cmd, "save_dump", 9)) {
+ if (argc < 3) {
+ fprintf(stderr, DUMP_LOG_FMT("too few parameters\n"));
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int ret = 0;
+ char *cmd;
+
+ ret = verify_parameters(argc, argv);
+ if (ret) {
+ print_usage();
+ goto error;
+ }
+
+ cmd = argv[1];
+ if (!strncmp(cmd, "save_dump", 9)) {
+ ret = tops_save_dump_data(argv[2]);
+ if (ret) {
+ fprintf(stderr,
+ DUMP_LOG_FMT("cmd %s: save dump data fail(%d)\n"),
+ cmd, ret);
+ goto error;
+ }
+ } else {
+ fprintf(stderr, DUMP_LOG_FMT("unsupported cmd %s\n"), cmd);
+ goto error;
+ }
+
+error:
+ return ret;
+}
diff --git a/package-21.02/kernel/tops/Config-protocols.in b/package-21.02/kernel/tops/Config-protocols.in
new file mode 100644
index 0000000..ea06d87
--- /dev/null
+++ b/package-21.02/kernel/tops/Config-protocols.in
@@ -0,0 +1,36 @@
+menu "TOPS Offload Tunnel Protocols Configuration"
+
+config MTK_TOPS_GRE
+ bool
+ default n
+
+config MTK_TOPS_GRETAP
+ bool "Mediatek TOPS L2oGRE HW Offload"
+ default y
+ select MTK_TOPS_GRE
+ select PACKAGE_kmod-gre
+ help
+ select y for L2oGRE HW offload by tunnel offload processing system
+
+config MTK_TOPS_PPTP
+ bool "Mediatek TOPS PPTP HW Offload"
+ default y
+ select PACKAGE_ppp-mod-pptp
+ select PACKAGE_pptpd
+ help
+ select y for PPTP HW offload by tunnel offload processing system
+
+config MTK_TOPS_L2TP
+ bool
+ default n
+
+config MTK_TOPS_UDP_L2TP_DATA
+ bool "Mediatek TOPS UDP L2TP Data HW Offload"
+ default y
+ select MTK_TOPS_L2TP
+ select PACKAGE_kmod-l2tp
+ select PACKAGE_xl2tpd
+ help
+ select y for UDP L2TP data offload by tunnel offload processing system
+
+endmenu
diff --git a/package-21.02/kernel/tops/Config.in b/package-21.02/kernel/tops/Config.in
new file mode 100644
index 0000000..b76fc0b
--- /dev/null
+++ b/package-21.02/kernel/tops/Config.in
@@ -0,0 +1,53 @@
+menu "TOPS configuration"
+ depends on PACKAGE_kmod-tops
+
+source "Config-protocols.in"
+
+choice
+ prompt "TOPS Tunnel Count"
+ default TOPS_TNL_32
+ help
+ Determine number of TOPS tunnel
+
+config TOPS_TNL_32
+ bool "32 TOPS Tunnel"
+
+endchoice
+
+choice
+ prompt "TOPS Firmware Target"
+ default MTK_TOPS_FIRMWARE_RELEASE
+ help
+ Select TOPS firmware target. Either release or latest firmware
+
+config MTK_TOPS_FIRMWARE_RELEASE
+ bool "MTK TOPS Firmware Release Build"
+ select PACKAGE_tops-rebb-fw-release
+
+config MTK_TOPS_FIRMWARE_LATEST
+ bool "MTK TOPS Firmware Latest Build"
+ select PACKAGE_tops-rebb-fw
+
+endchoice
+
+config MTK_TOPS_SECURE_FW
+ bool "TOPS Secure Firmware Load"
+ default n
+ help
+ Enable TOPS secure firmware load
+
+config TOPS_TNL_NUM
+ int
+ default 32 if TOPS_TNL_32
+ help
+ Configuration for TOPS tunnel count. This value should be
+ 2 ^ TOPS_TNL_MAP_BIT.
+
+config TOPS_TNL_MAP_BIT
+ int
+ default 5 if TOPS_TNL_32
+ help
+ Configuration for TOPS tunnel map bit. This value should be the log
+ of TOPS_TNL_NUM.
+
+endmenu
diff --git a/package-21.02/kernel/tops/Makefile b/package-21.02/kernel/tops/Makefile
new file mode 100644
index 0000000..c84565e
--- /dev/null
+++ b/package-21.02/kernel/tops/Makefile
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (C) 2023 Mediatek Inc. All Rights Reserved.
+# Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+#
+
+include $(TOPDIR)/rules.mk
+include $(INCLUDE_DIR)/kernel.mk
+
+PKG_NAME:=tops
+PKG_RELEASE:=1
+
+include $(INCLUDE_DIR)/package.mk
+include $(INCLUDE_DIR)/package-defaults.mk
+
+EXTRA_KCONFIG+= \
+ CONFIG_MTK_TOPS_SUPPORT=m \
+ CONFIG_MTK_TOPS_GRE=$(CONFIG_MTK_TOPS_GRE) \
+ CONFIG_MTK_TOPS_GRETAP=$(CONFIG_MTK_TOPS_GRETAP) \
+ CONFIG_MTK_TOPS_L2TP=$(CONFIG_MTK_TOPS_L2TP) \
+ CONFIG_MTK_TOPS_UDP_L2TP_DATA=$(CONFIG_MTK_TOPS_UDP_L2TP_DATA) \
+ CONFIG_MTK_TOPS_SECURE_FW=$(CONFIG_MTK_TOPS_SECURE_FW)
+
+EXTRA_CFLAGS+= \
+ $(patsubst CONFIG_%, -DCONFIG_%=1, $(patsubst %=m,%,$(filter %=m,$(EXTRA_KCONFIG)))) \
+ $(patsubst CONFIG_%, -DCONFIG_%=1, $(patsubst %=y,%,$(filter %=y,$(EXTRA_KCONFIG))))
+
+EXTRA_CFLAGS+= \
+ -I$(LINUX_DIR)/drivers/net/ethernet/mediatek/ \
+ -I$(LINUX_DIR)/drivers/dma/ \
+ -I$(KERNEL_BUILD_DIR)/pce/inc/ \
+ -DCONFIG_TOPS_TNL_NUM=$(CONFIG_TOPS_TNL_NUM) \
+ -DCONFIG_TOPS_TNL_MAP_BIT=$(CONFIG_TOPS_TNL_MAP_BIT) \
+ -Wall -Werror
+
+define Build/Prepare
+ mkdir -p $(PKG_BUILD_DIR)
+ $(CP) ./firmware $(PKG_BUILD_DIR)/firmware
+ $(CP) ./src/* $(PKG_BUILD_DIR)/
+endef
+
+define Package/tops-rebb-fw-release
+ TITLE:=Mediatek Tunnel Offload Processor System ReBB Firmware
+ SECTION:=firmware
+ CATEGORY:=Firmware
+ DEPENDS:=@MTK_TOPS_FIRMWARE_RELEASE
+endef
+
+define Package/tops-rebb-fw-release/description
+ Support for Mediatek Tunnel Offload Processor System ReBB firmware. The
+ firmware offload and accerlerate APMCU's tunnel protocols traffic. Available
+ offload tunnel include L2oGRE, L2TP.
+endef
+
+define Package/tops-rebb-fw-release/install
+ $(INSTALL_DIR) $(1)/lib/firmware/mediatek
+ $(CP) \
+ $(PKG_BUILD_DIR)/firmware/rebb/mt7988_mgmt/tops-mgmt.img \
+ $(PKG_BUILD_DIR)/firmware/rebb/mt7988_offload/tops-offload.img \
+ $(1)/lib/firmware/mediatek
+endef
+
+define KernelPackage/tops
+ CATEGORY:=MTK Properties
+ SUBMENU:=Drivers
+ TITLE:= MTK Tunnel Offload Processor System Driver
+ FILES+=$(PKG_BUILD_DIR)/tops.ko
+ KCONFIG:=
+ DEFAULT:=y
+ DEPENDS:= \
+ @TARGET_mediatek_mt7988 \
+ kmod-mediatek_hnat \
+ +kmod-pce \
+ +@KERNEL_RELAY
+endef
+
+define KernelPackage/tops/description
+ Support for MTK Tunnel Offload Processor System. This system reduces the
+ loading of APMCU's tunnel protocol overhead and improve tunnel protocol's
+ throughput.
+endef
+
+define KernelPackage/tops/config
+ source "$(SOURCE)/Config.in"
+endef
+
+define KernelPackage/tops-autoload
+ CATEGORY:=MTK Properties
+ SUBMENU:=Drivers
+ TITLE:= MTK Tunnel Offload Processor System Auto Load
+ AUTOLOAD:=$(call AutoLoad,51,tops)
+ KCONFIG:=
+ DEPENDS:= \
+ kmod-tops \
+ +kmod-pce-autoload
+endef
+
+define KernelPackage/tops-autoload/description
+ Support for MTK Tunnel Offload Processor System auto load on system
+ boot process.
+endef
+
+define Build/Compile
+ $(MAKE) -C "$(LINUX_DIR)" \
+ $(KERNEL_MAKE_FLAGS) \
+ M="$(PKG_BUILD_DIR)" \
+ EXTRA_CFLAGS="$(EXTRA_CFLAGS)" \
+ $(EXTRA_KCONFIG) \
+ modules
+endef
+
+$(eval $(call BuildPackage,tops-rebb-fw-release))
+$(eval $(call KernelPackage,tops))
+$(eval $(call KernelPackage,tops-autoload))
diff --git a/package-21.02/kernel/tops/firmware/rebb/mt7988_mgmt/tops-mgmt.img b/package-21.02/kernel/tops/firmware/rebb/mt7988_mgmt/tops-mgmt.img
new file mode 100644
index 0000000..798d044
--- /dev/null
+++ b/package-21.02/kernel/tops/firmware/rebb/mt7988_mgmt/tops-mgmt.img
Binary files differ
diff --git a/package-21.02/kernel/tops/firmware/rebb/mt7988_offload/tops-offload.img b/package-21.02/kernel/tops/firmware/rebb/mt7988_offload/tops-offload.img
new file mode 100644
index 0000000..f7678d2
--- /dev/null
+++ b/package-21.02/kernel/tops/firmware/rebb/mt7988_offload/tops-offload.img
Binary files differ
diff --git a/package-21.02/kernel/tops/src/Kconfig b/package-21.02/kernel/tops/src/Kconfig
new file mode 100644
index 0000000..eafa48e
--- /dev/null
+++ b/package-21.02/kernel/tops/src/Kconfig
@@ -0,0 +1,158 @@
+config MTK_TOPS_SUPPORT
+ bool "Mediatek Tunnel Offload Processor System Support"
+ help
+ Support for Mediatek Tunnel Offload Processor System which
+ offloads tunnel protocols such as GRE, VxLAN, L2TP, PPTP etc. from
+ host CPU. The TOPS system cooperate with Mediatek HNAT HW and
+ Mediatek PCE HW to offload specific tunnel procotol.
+
+config MTK_TOPS_GRE
+ depends on MTK_TOPS_SUPPORT
+ bool
+ help
+ Support for GRE offload to Mediatek Network Processing Unit.
+ Alleviate host CPU's loading by offloading GRE related encapulation
+ and decapulation to NPU.
+
+config MTK_TOPS_GRETAP
+ depends on MTK_TOPS_SUPPORT
+ select MTK_TOPS_GRE
+ bool "Mediatek TOPS L2oGRE Offload Support"
+ help
+ Support for L2oGRE offload to Mediatek Network Processing Unit.
+ Alleviate host CPU's loading by offloading L2oGRE encapulation and
+ decapulation to NPU.
+
+config MTK_TOPS_PPTP
+ depends on MTK_TOPS_SUPPORT
+ bool "Mediatek TOPS PPTP Offload Support"
+ help
+ Support for PPTP offload to Mediatek Network Processing Unit.
+ Alleviate host CPU's loading by offloading PPTP encapulation and
+ decapulation to NPU.
+
+config MTK_TOPS_L2TP
+ depends on MTK_TOPS_SUPPORT
+ bool
+ help
+ Support for L2TP offload to Mediatek Network Processing Unit.
+ Alleviate host CPU's loading by offloading L2TP related encapulation
+ and decapulation to NPU.
+
+config MTK_TOPS_IP_L2TP
+ depends on MTK_TOPS_SUPPORT
+ select MTK_TOPS_L2TP
+ bool "Mediatek TOPS IP L2TP Offload Support"
+ help
+ Support for IP L2TP offload to Mediatek Network Processing Unit.
+ Alleviate host CPU's loading by offloading IP L2TP encapulation and
+ decapulation to NPU.
+
+config MTK_TOPS_UDP_L2TP_CTRL
+ depends on MTK_TOPS_SUPPORT
+ select MTK_TOPS_L2TP
+ bool "Mediatek TOPS UDP L2TP Control Offload Support"
+ help
+ Support for UDP L2TP control offload to Mediatek Network Processing
+ Unit. Alleviate host CPU's loading by offloading UDP L2TP control
+ encapulation and decapulation to NPU.
+
+config MTK_TOPS_UDP_L2TP_DATA
+ depends on MTK_TOPS_SUPPORT
+ select MTK_TOPS_L2TP
+ bool "Mediatek TOPS UDP L2TP Data Offload Support"
+ help
+ Support for UDP L2TP data offload to Mediatek Network Processing
+ Unit. Alleviate host CPU's loading by offloading UDP L2TP data
+ encapulation and decapulation to NPU.
+
+config MTK_TOPS_VXLAN
+ depends on MTK_TOPS_SUPPORT
+ bool "Mediatek TOPS VxLAN Offload Support"
+ help
+ Support for VxLAN offload to Mediatek Network Processing Unit.
+ Alleviate host CPU's loading by offloading VxLAN encapulation and
+ decapulation to NPU.
+
+config MTK_TOPS_NATT
+ depends on MTK_TOPS_SUPPORT
+ bool "Mediatek TOPS NAT Traversal Offload Support"
+ help
+ Support for NATT offload to Mediatek Network Processing Unit.
+ Alleviate host CPU's loading by offloading NATT encapulation and
+ decapulation to NPU.
+
+config MTK_TOPS_CAPWAP
+ depends on MTK_TOPS_SUPPORT
+ bool
+ help
+ Support for CAPWAP offload to Mediatek Network Processing Unit.
+ Alleviate host CPU's loading by offloading CAPWAP related
+ encapulation and decapulation to NPU.
+
+config MTK_TOPS_CAPWAP_CTRL
+ depends on MTK_TOPS_SUPPORT
+ select MTK_TOPS_CAPWAP
+ bool "Mediatek TOPS CAPWAP Control Offload Support"
+ help
+ Support for CAPWAP control offload to Mediatek Network Processing
+ Unit. Alleviate host CPU's loading by offloading CAPWAP control
+ encapulation and decapulation to NPU.
+
+config MTK_TOPS_CAPWAP_DATA
+ depends on MTK_TOPS_SUPPORT
+ select MTK_TOPS_CAPWAP
+ bool "Mediatek TOPS CAPWAP Data Offload Support"
+ help
+ Support for CAPWAP data offload to Mediatek Network Processing
+ Unit. Alleviate host CPU's loading by offloading CAPWAP data
+ encapulation and decapulation to NPU.
+
+config MTK_TOPS_CAPWAP_DTLS
+ depends on MTK_TOPS_SUPPORT
+ select MTK_TOPS_CAPWAP
+ bool "Mediatek TOPS CAPWAP DTLS Offload Support"
+ help
+ Support for CAPWAP DTLS offload to Mediatek Network Processing
+ Unit. Alleviate host CPU's loading by offloading CAPWAP DTLS
+ encapulation and decapulation to NPU.
+
+config MTK_TOPS_IPSEC
+ depends on MTK_TOPS_SUPPORT
+ bool
+ help
+ Support for IPSEC offload to Mediatek Network Processing Unit.
+ Alleviate host CPU's loading by offloading IPSEC related
+ encapulation and decapulation to NPU.
+
+config MTK_TOPS_IPSEC_ESP
+ depends on MTK_TOPS_SUPPORT
+ select MTK_TOPS_IPSEC
+ bool "Mediatek TOPS IPSec ESP Offload Support"
+ help
+ Support for IPSec ESP offload to Mediatek Network Processing
+ Unit. Alleviate host CPU's loading by offloading IPSec ESP
+ encapulation and decapulation to NPU.
+
+config MTK_TOPS_IPSEC_AH
+ depends on MTK_TOPS_SUPPORT
+ select MTK_TOPS_IPSEC
+ bool "Mediatek TOPS IPSec AH Offload Support"
+ help
+ Support for IPSec AH offload to Mediatek Network Processing
+ Unit. Alleviate host CPU's loading by offloading IPSec AH
+ encapulation and decapulation to NPU.
+
+config TOPS_TNL_NUM
+ int "Mediatek TOPS Tunnel Count"
+ depends on MTK_TOPS_SUPPORT
+ help
+ Configuration for tunnel count for Tunnel Offload Processing
+ System. This value should be 2 ^ TOPS_TNL_MAP_BIT.
+
+config TOPS_TNL_MAP_BIT
+ int "Mediatek TOPS Tunnel Map Bit"
+ depends on MTK_TOPS_SUPPORT
+ help
+ Configuration for tunnel map bit for Tunnel Offload Processing
+ System. This value is log of TOPS_TNL_NUM.
diff --git a/package-21.02/kernel/tops/src/Makefile b/package-21.02/kernel/tops/src/Makefile
new file mode 100644
index 0000000..820ebb4
--- /dev/null
+++ b/package-21.02/kernel/tops/src/Makefile
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+#
+# Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+#
+
+obj-$(CONFIG_MTK_TOPS_SUPPORT) += tops.o
+
+ccflags-y += -I$(src)/inc
+ccflags-y += -I$(src)/protocol/inc
+
+tops-y += ctrl.o
+tops-y += firmware.o
+tops-y += init.o
+tops-y += hpdma.o
+tops-y += hwspinlock.o
+tops-y += mbox.o
+tops-y += mcu.o
+tops-y += netsys.o
+tops-y += net-event.o
+tops-y += tnl_offload.o
+tops-y += ser.o
+tops-y += tdma.o
+tops-y += trm-fs.o
+tops-y += trm-mcu.o
+tops-y += trm.o
+tops-y += wdt.o
+
+tops-$(CONFIG_MTK_TOPS_GRETAP) += protocol/gre/gretap.o
+tops-$(CONFIG_MTK_TOPS_UDP_L2TP_DATA) += protocol/l2tp/udp_l2tp_data.o
+
+include $(wildcard $(src)/*.mk)
diff --git a/package-21.02/kernel/tops/src/ctrl.c b/package-21.02/kernel/tops/src/ctrl.c
new file mode 100644
index 0000000..e425e68
--- /dev/null
+++ b/package-21.02/kernel/tops/src/ctrl.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/device.h>
+
+#include "firmware.h"
+#include "hpdma.h"
+#include "internal.h"
+#include "mcu.h"
+#include "netsys.h"
+#include "trm.h"
+#include "tunnel.h"
+#include "wdt.h"
+
+static const char *tops_role_name[__TOPS_ROLE_TYPE_MAX] = {
+ [TOPS_ROLE_TYPE_MGMT] = "tops-mgmt",
+ [TOPS_ROLE_TYPE_CLUSTER] = "tops-offload",
+};
+
+static ssize_t mtk_tops_fw_info_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ enum tops_role_type rtype;
+ struct tm tm = {0};
+ const char *value;
+ const char *prop;
+ int len = 0;
+ u32 nattr;
+ u32 i;
+
+ for (rtype = TOPS_ROLE_TYPE_MGMT; rtype < __TOPS_ROLE_TYPE_MAX; rtype++) {
+ mtk_tops_fw_get_built_date(rtype, &tm);
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s FW information:\n", tops_role_name[rtype]);
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Git revision:\t%llx\n",
+ mtk_tops_fw_get_git_commit_id(rtype));
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "Build date:\t%04ld/%02d/%02d %02d:%02d:%02d\n",
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+ nattr = mtk_tops_fw_attr_get_num(rtype);
+
+ for (i = 0; i < nattr; i++) {
+ prop = mtk_tops_fw_attr_get_property(rtype, i);
+ if (!prop)
+ continue;
+
+ value = mtk_tops_fw_attr_get_value(rtype, prop);
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s:\t%s\n", prop, value);
+ }
+ len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+ }
+
+ return len;
+}
+
+static int mtk_tops_ctrl_fetch_port(const char *buf, int *ofs, u16 *port)
+{
+ int nchar = 0;
+ int ret;
+ u16 p = 0;
+
+ ret = sscanf(buf + *ofs, "%hu %n", &p, &nchar);
+ if (ret != 1)
+ return -EPERM;
+
+ *port = htons(p);
+
+ *ofs += nchar;
+
+ return nchar;
+}
+
+static int mtk_tops_ctrl_fetch_ip(const char *buf, int *ofs, u32 *ip)
+{
+ int nchar = 0;
+ int ret = 0;
+ u8 tmp[4];
+
+ ret = sscanf(buf + *ofs, "%hhu.%hhu.%hhu.%hhu %n",
+ &tmp[3], &tmp[2], &tmp[1], &tmp[0], &nchar);
+ if (ret != 4)
+ return -EPERM;
+
+ *ip = tmp[0] | tmp[1] << 8 | tmp[2] << 16 | tmp[3] << 24;
+
+ *ofs += nchar;
+
+ return nchar;
+}
+
+static int mtk_tops_ctrl_fetch_mac(const char *buf, int *ofs, u8 *mac)
+{
+ int nchar = 0;
+ int ret = 0;
+
+ ret = sscanf(buf + *ofs, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %n",
+ &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5], &nchar);
+ if (ret != 6)
+ return -EPERM;
+
+ *ofs += nchar;
+
+ return 0;
+}
+
+static int mtk_tops_ctrl_add_tnl(const char *buf)
+{
+ struct tops_tnl_params tnl_params = {0};
+ struct tops_tnl_info *tnl_info;
+ struct tops_tnl_type *tnl_type;
+ char tnl_type_name[21] = {0};
+ int ofs = 0;
+ int ret = 0;
+
+ ret = sscanf(buf, "%20s %n", tnl_type_name, &ofs);
+ if (ret != 1)
+ return -EPERM;
+
+ tnl_type = mtk_tops_tnl_type_get_by_name(tnl_type_name);
+ if (unlikely(!tnl_type || !tnl_type->tnl_debug_param_setup))
+ return -ENODEV;
+
+ ret = mtk_tops_ctrl_fetch_mac(buf, &ofs, tnl_params.daddr);
+ if (ret < 0)
+ return ret;
+
+ ret = mtk_tops_ctrl_fetch_mac(buf, &ofs, tnl_params.saddr);
+ if (ret < 0)
+ return ret;
+
+ ret = mtk_tops_ctrl_fetch_ip(buf, &ofs, &tnl_params.dip);
+ if (ret < 0)
+ return ret;
+
+ ret = mtk_tops_ctrl_fetch_ip(buf, &ofs, &tnl_params.sip);
+ if (ret < 0)
+ return ret;
+
+ ret = mtk_tops_ctrl_fetch_port(buf, &ofs, &tnl_params.dport);
+ if (ret < 0)
+ return ret;
+
+ ret = mtk_tops_ctrl_fetch_port(buf, &ofs, &tnl_params.sport);
+ if (ret < 0)
+ return ret;
+
+ ret = tnl_type->tnl_debug_param_setup(buf, &ofs, &tnl_params);
+ if (ret < 0)
+ return ret;
+
+ tnl_params.flag |= TNL_DECAP_ENABLE;
+ tnl_params.flag |= TNL_ENCAP_ENABLE;
+ tnl_params.tops_entry_proto = tnl_type->tops_entry;
+
+ tnl_info = mtk_tops_tnl_info_alloc();
+ if (IS_ERR(tnl_info))
+ return -ENOMEM;
+
+ tnl_info->flag |= TNL_INFO_DEBUG;
+ memcpy(&tnl_info->cache, &tnl_params, sizeof(struct tops_tnl_params));
+
+ mtk_tops_tnl_info_hash(tnl_info);
+
+ mtk_tops_tnl_info_submit(tnl_info);
+
+ return 0;
+}
+
+static ssize_t mtk_tops_tnl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char cmd[21] = {0};
+ int nchar = 0;
+ int ret = 0;
+
+ ret = sscanf(buf, "%20s %n", cmd, &nchar);
+
+ if (ret != 1)
+ return -EPERM;
+
+ if (!strcmp(cmd, "NEW_TNL")) {
+ ret = mtk_tops_ctrl_add_tnl(buf + nchar);
+ if (ret)
+ return ret;
+ }
+
+ return count;
+}
+
+static int mtk_tops_trm_fetch_setting(const char *buf,
+ int *ofs,
+ char *name,
+ u32 *offset,
+ u32 *size,
+ u8 *enable)
+{
+ int nchar = 0;
+ int ret = 0;
+
+ ret = sscanf(buf + *ofs, "%31s %x %x %hhx %n",
+ name, offset, size, enable, &nchar);
+ if (ret != 4)
+ return -EPERM;
+
+ *ofs += nchar;
+
+ return nchar;
+}
+
+static ssize_t mtk_tops_trm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char name[TRM_CONFIG_NAME_MAX_LEN] = { 0 };
+ char cmd[21] = { 0 };
+ int nchar = 0;
+ int ret = 0;
+ u32 offset;
+ u8 enable;
+ u32 size;
+
+ ret = sscanf(buf, "%20s %n", cmd, &nchar);
+ if (ret != 1)
+ return -EPERM;
+
+ if (!strcmp(cmd, "trm_dump")) {
+ ret = mtk_trm_dump(TRM_RSN_NULL);
+ if (ret)
+ return ret;
+ } else if (!strcmp(cmd, "trm_cfg_setup")) {
+ ret = mtk_tops_trm_fetch_setting(buf, &nchar,
+ name, &offset, &size, &enable);
+ if (ret < 0)
+ return ret;
+
+ ret = mtk_trm_cfg_setup(name, offset, size, enable);
+ if (ret)
+ return ret;
+ }
+
+ return count;
+}
+
+static ssize_t mtk_tops_wdt_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ char cmd[21] = {0};
+ u32 core = 0;
+ u32 i;
+ int ret;
+
+ ret = sscanf(buf, "%20s %x", cmd, &core);
+ if (ret != 2)
+ return -EPERM;
+
+ core &= CORE_TOPS_MASK;
+ if (!strcmp(cmd, "WDT_TO")) {
+ for (i = 0; i < CORE_TOPS_NUM; i++) {
+ if (core & 0x1)
+ mtk_tops_wdt_trigger_timeout(i);
+ core >>= 1;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR_RO(mtk_tops_fw_info);
+static DEVICE_ATTR_WO(mtk_tops_tnl);
+static DEVICE_ATTR_WO(mtk_tops_trm);
+static DEVICE_ATTR_WO(mtk_tops_wdt);
+
+static struct attribute *mtk_tops_attributes[] = {
+ &dev_attr_mtk_tops_fw_info.attr,
+ &dev_attr_mtk_tops_tnl.attr,
+ &dev_attr_mtk_tops_trm.attr,
+ &dev_attr_mtk_tops_wdt.attr,
+ NULL,
+};
+
+static const struct attribute_group mtk_tops_attr_group = {
+ .name = "mtk_tops",
+ .attrs = mtk_tops_attributes,
+};
+
+int mtk_tops_ctrl_init(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &mtk_tops_attr_group);
+ if (ret) {
+ TOPS_ERR("create sysfs failed\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+void mtk_tops_ctrl_deinit(struct platform_device *pdev)
+{
+ sysfs_remove_group(&pdev->dev.kobj, &mtk_tops_attr_group);
+}
diff --git a/package-21.02/kernel/tops/src/firmware.c b/package-21.02/kernel/tops/src/firmware.c
new file mode 100644
index 0000000..20a7db1
--- /dev/null
+++ b/package-21.02/kernel/tops/src/firmware.c
@@ -0,0 +1,796 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "firmware.h"
+#include "internal.h"
+#include "mcu.h"
+
+#define TOPS_MGMT_IMG "mediatek/tops-mgmt.img"
+#define TOPS_OFFLOAD_IMG "mediatek/tops-offload.img"
+
+#define MTK_SIP_TOPS_LOAD 0xC2000560
+
+#define PAYLOAD_ALIGNMENT (32)
+
+#define TOPS_ITCM_BOOT_ADDR (0x40020000)
+#define TOPS_DTCM_BOOT_ADDR (0x40000000)
+#define TOPS_L2SRAM_BOOT_ADDR (0x4E100000)
+#define TOPS_DEFAULT_BOOT_ADDR (TOPS_ITCM_BOOT_ADDR)
+
+#define TOPS_FW_MAGIC (0x53504f54)
+#define TOPS_FW_HDR_VER (1)
+#define FW_HLEN (sizeof(struct tops_fw_header))
+#define FW_DATA(fw) ((fw)->data)
+#define FW_ROLE(fw) ((fw)->hdr.role)
+#define FW_PART_HLEN(fw) ((fw)->hdr.part_hdr_len)
+#define FW_PART_HDR(fw, idx) (FW_DATA(fw) + FW_PART_HLEN(fw) * (idx))
+#define FW_NUM_PARTS(fw) ((fw)->hdr.num_parts)
+#define FW_GIT_ID(fw) ((fw)->hdr.git_commit_id)
+#define FW_BUILD_TS(fw) ((fw)->hdr.build_ts)
+
+#define FW_PART_LOAD_ADDR_OVERRIDE (BIT(0))
+#define FW_PART_BOOT_OVERRIDE (BIT(1))
+
+enum tops_part_type {
+ TOPS_PART_TYPE_IRAM0,
+ TOPS_PART_TYPE_DRAM0,
+ TOPS_PART_TYPE_L2SRAM,
+ TOPS_PART_TYPE_METADATA,
+
+ __TOPS_PART_TYPE_MAX,
+};
+
+enum tops_plat_id {
+ TOPS_PLAT_MT7988,
+
+ __TOPS_PLAT_MAX,
+};
+
+struct tops_boot_config {
+ enum tops_part_type boot_type;
+ u32 boot_addr;
+};
+
+struct tops_fw_plat {
+ enum tops_plat_id plat;
+ u16 id;
+};
+
+struct tops_fw_header {
+ u32 magic;
+ u8 hdr_ver;
+ u8 api_ver;
+ u16 hdr_len;
+ u32 hdr_crc;
+ u16 plat_id;
+ u16 flags;
+ u64 git_commit_id;
+ u32 build_ts;
+ u8 role;
+ u8 signing_type;
+ u8 num_parts;
+ u8 part_hdr_len;
+ u32 part_hdr_crc;
+ u32 payload_len;
+ u32 payload_crc;
+ u32 sign_body_len;
+} __aligned(4);
+
+struct tops_fw_part_hdr {
+ u8 part_type;
+ u8 resv;
+ u16 flags;
+ u32 size;
+ u32 value[2];
+} __aligned(4);
+
+struct tops_fw_part {
+ const struct tops_fw_part_hdr *hdr[__TOPS_PART_TYPE_MAX];
+ const void *payload[__TOPS_PART_TYPE_MAX];
+};
+
+struct tops_fw {
+ struct tops_fw_header hdr;
+ u8 data[0];
+};
+
+struct tops_fw_attr {
+ char *property;
+ char *value;
+};
+
+struct tops_fw_info {
+ struct tops_fw_attr *attrs;
+ u64 git_commit_id;
+ u32 build_ts;
+ u32 nattr;
+};
+
+struct npu {
+ void __iomem *base;
+ struct device *dev;
+ struct tops_fw_info fw_info[__TOPS_ROLE_TYPE_MAX];
+};
+
+#if !defined(CONFIG_MTK_TOPS_SECURE_FW)
+static struct tops_boot_config tops_boot_configs[] = {
+ { .boot_type = TOPS_PART_TYPE_IRAM0, .boot_addr = TOPS_ITCM_BOOT_ADDR },
+ { .boot_type = TOPS_PART_TYPE_DRAM0, .boot_addr = TOPS_DTCM_BOOT_ADDR },
+ { .boot_type = TOPS_PART_TYPE_L2SRAM, .boot_addr = TOPS_L2SRAM_BOOT_ADDR},
+};
+
+static struct tops_fw_plat tops_plats[] = {
+ { .plat = TOPS_PLAT_MT7988, .id = 0x7988 },
+};
+#endif /* !defined(CONFIG_MTK_TOPS_SECURE_FW) */
+
+static struct npu npu;
+
+static inline void npu_write(u32 reg, u32 val)
+{
+ writel(val, npu.base + reg);
+}
+
+static inline void npu_set(u32 reg, u32 mask)
+{
+ setbits(npu.base + reg, mask);
+}
+
+static inline void npu_clr(u32 reg, u32 mask)
+{
+ clrbits(npu.base + reg, mask);
+}
+
+static inline void npu_rmw(u32 reg, u32 mask, u32 val)
+{
+ clrsetbits(npu.base + reg, mask, val);
+}
+
+static inline u32 npu_read(u32 reg)
+{
+ return readl(npu.base + reg);
+}
+
+u64 mtk_tops_fw_get_git_commit_id(enum tops_role_type rtype)
+{
+ if (rtype >= __TOPS_ROLE_TYPE_MAX)
+ return 0;
+
+ return npu.fw_info[rtype].git_commit_id;
+}
+
+void mtk_tops_fw_get_built_date(enum tops_role_type rtype, struct tm *tm)
+{
+ if (rtype >= __TOPS_ROLE_TYPE_MAX)
+ return;
+
+ time64_to_tm(npu.fw_info[rtype].build_ts, 0, tm);
+}
+
+u32 mtk_tops_fw_attr_get_num(enum tops_role_type rtype)
+{
+ if (rtype >= __TOPS_ROLE_TYPE_MAX)
+ return 0;
+
+ return npu.fw_info[rtype].nattr;
+}
+
+const char *mtk_tops_fw_attr_get_property(enum tops_role_type rtype, u32 idx)
+{
+ if (rtype >= __TOPS_ROLE_TYPE_MAX || idx >= npu.fw_info[rtype].nattr)
+ return NULL;
+
+ return npu.fw_info[rtype].attrs[idx].property;
+}
+
+const char *mtk_tops_fw_attr_get_value(enum tops_role_type rtype,
+ const char *property)
+{
+ u32 plen = strlen(property);
+ u32 nattr;
+ u32 i;
+
+ if (rtype >= __TOPS_ROLE_TYPE_MAX)
+ return NULL;
+
+ nattr = npu.fw_info[rtype].nattr;
+ for (i = 0; i < nattr; i++) {
+ if (!strncmp(property, npu.fw_info[rtype].attrs[i].property, plen))
+ return npu.fw_info[rtype].attrs[i].value;
+ }
+
+ return NULL;
+}
+
+static bool mtk_tops_fw_support_plat(const struct tops_fw_header *fw_hdr)
+{
+ u32 i;
+
+ for (i = 0; i < __TOPS_PLAT_MAX; i++)
+ if (le16_to_cpu(fw_hdr->plat_id) == tops_plats[i].plat)
+ return true;
+
+ return false;
+}
+
+static int mtk_tops_fw_valid_hdr(const struct tops_fw *tfw, uint32_t fw_size)
+{
+ const struct tops_fw_header *fw_hdr = &tfw->hdr;
+ u32 total_size;
+ u32 ph_len;
+
+ if (fw_size < FW_HLEN) {
+ TOPS_ERR("requested fw hlen is less than minimal TOPS fw hlen\n");
+ return -EINVAL;
+ }
+
+ if (le32_to_cpu(fw_hdr->magic) != TOPS_FW_MAGIC) {
+ TOPS_ERR("not a tops fw!\n");
+ return -EBADF;
+ }
+
+ if (le16_to_cpu(fw_hdr->hdr_ver) != TOPS_FW_HDR_VER) {
+ TOPS_ERR("unsupport tops fw header: %u\n",
+ le16_to_cpu(fw_hdr->hdr_ver));
+ return -EBADF;
+ }
+
+ if (le16_to_cpu(fw_hdr->hdr_len) != sizeof(struct tops_fw_header)) {
+ TOPS_ERR("tops fw header length mismatch\n");
+ return -EBADF;
+ }
+
+ if (fw_hdr->part_hdr_len != sizeof(struct tops_fw_part_hdr)) {
+ TOPS_ERR("unsupport tops fw header len: %u\n",
+ fw_hdr->part_hdr_len);
+ return -EBADF;
+ }
+
+ if (!mtk_tops_fw_support_plat(fw_hdr)) {
+ TOPS_ERR("unsupport tops platform fw: %u\n",
+ le16_to_cpu(fw_hdr->plat_id));
+ return -EBADF;
+ }
+
+ if (fw_hdr->role >= __TOPS_ROLE_TYPE_MAX) {
+ TOPS_ERR("unsupport tops role: %u\n", fw_hdr->role);
+ return -EBADF;
+ }
+
+ if (fw_hdr->num_parts > __TOPS_PART_TYPE_MAX) {
+ TOPS_ERR("number of parts exceeds tops' support: %u\n",
+ fw_hdr->num_parts);
+ return -EBADF;
+ }
+
+ ph_len = fw_hdr->part_hdr_len * fw_hdr->num_parts;
+ total_size = fw_hdr->hdr_len + ph_len + fw_hdr->payload_len;
+
+ if (total_size > fw_size) {
+ TOPS_ERR("firmware incomplete\n");
+ return -EBADF;
+ }
+
+ return 0;
+}
+
+static int mtk_tops_fw_init_part_data(const struct tops_fw *fw,
+ struct tops_fw_part *part)
+{
+ const struct tops_fw_part_hdr *phdr;
+ uint32_t part_off = FW_PART_HLEN(fw) * FW_NUM_PARTS(fw);
+ int ret = 0;
+ u8 i;
+
+ for (i = 0; i < FW_NUM_PARTS(fw); i++) {
+ /* get part hdr */
+ phdr = (struct tops_fw_part_hdr *)FW_PART_HDR(fw, i);
+ if (phdr->part_type >= __TOPS_PART_TYPE_MAX) {
+ TOPS_ERR("unknown part type: %u\n", phdr->part_type);
+ return -EBADF;
+ }
+
+ part->hdr[phdr->part_type] = phdr;
+
+ /* get part payload */
+ part->payload[phdr->part_type] = FW_DATA(fw) + part_off;
+
+ part_off += ALIGN(le32_to_cpu(phdr->size), PAYLOAD_ALIGNMENT);
+ }
+
+ return ret;
+}
+
+#if defined(CONFIG_MTK_TOPS_SECURE_FW)
+static int mtk_tops_fw_smc(u32 smc_id,
+ u64 x1,
+ u64 x2,
+ u64 x3,
+ u64 x4,
+ struct arm_smccc_res *res)
+{
+ if (!res)
+ return -EINVAL;
+
+ arm_smccc_smc(smc_id, x1, x2, x3, x4, 0, 0, 0, res);
+
+ return res->a0;
+}
+
+static int __mtk_tops_fw_bring_up_core(const void *fw, u32 fw_size)
+{
+ struct arm_smccc_res res = {0};
+ dma_addr_t fw_paddr;
+ void *fw_vaddr;
+ u32 order = 0;
+ u32 psize;
+ int ret;
+
+ psize = (fw_size / PAGE_SIZE) + 1;
+ while ((1 << order) < psize)
+ order++;
+
+ fw_vaddr = __get_free_pages(GFP_KERNEL, order);
+ if (!fw_vaddr)
+ return -ENOMEM;
+
+ memcpy(fw_vaddr, fw, fw_size);
+
+ fw_paddr = dma_map_single(tops_dev, fw_vaddr, PAGE_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(tops_dev, fw_paddr)) {
+ ret = -ENOMEM;
+ goto dma_map_err;
+ }
+ /* make sure firmware data is written and mapped to buffer */
+ wmb();
+
+ ret = mtk_tops_fw_smc(MTK_SIP_TOPS_LOAD, 0, fw_paddr, fw_size, 0, &res);
+ if (ret)
+ TOPS_ERR("tops secure firmware load failed: %d\n", ret);
+
+ dma_unmap_single(tops_dev, fw_paddr, fw_size, DMA_TO_DEVICE);
+
+dma_map_err:
+ free_pages(fw_vaddr, order);
+
+ return ret;
+}
+#else /* !defined(CONFIG_MTK_TOPS_SECURE_FW) */
+static u32 mtk_tops_fw_get_boot_addr(struct tops_fw_part *part)
+{
+ const struct tops_fw_part_hdr *hdr = NULL;
+ u32 boot_addr = TOPS_DEFAULT_BOOT_ADDR;
+ u32 i;
+
+ for (i = TOPS_PART_TYPE_IRAM0; i < __TOPS_PART_TYPE_MAX; i++) {
+ hdr = part->hdr[i];
+
+ if (le16_to_cpu(hdr->flags) & FW_PART_BOOT_OVERRIDE) {
+ boot_addr = tops_boot_configs[i].boot_addr;
+
+ if (le16_to_cpu(hdr->flags) & FW_PART_LOAD_ADDR_OVERRIDE)
+ boot_addr = le32_to_cpu(hdr->value[0]);
+ }
+ }
+
+ return boot_addr;
+}
+
+static void __mtk_tops_fw_load_data(const struct tops_fw_part_hdr *phdr,
+ const void *payload,
+ u32 addr)
+{
+ int ofs;
+
+ for (ofs = 0; ofs < le32_to_cpu(phdr->size); ofs += 0x4)
+ npu_write(addr + ofs, *(u32 *)(payload + ofs));
+}
+
+static int mtk_tops_fw_load_core_mgmt(struct tops_fw_part *part)
+{
+ if (!part)
+ return -ENODEV;
+
+ __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_IRAM0],
+ part->payload[TOPS_PART_TYPE_IRAM0],
+ TOP_CORE_M_ITCM);
+
+ __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_DRAM0],
+ part->payload[TOPS_PART_TYPE_DRAM0],
+ TOP_CORE_M_DTCM);
+
+ __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_L2SRAM],
+ part->payload[TOPS_PART_TYPE_L2SRAM],
+ TOP_L2SRAM);
+
+ return 0;
+}
+
+static int mtk_tops_fw_bring_up_core_mgmt(struct tops_fw_part *part)
+{
+ int ret = 0;
+
+ /* setup boot address */
+ npu_write(TOP_CORE_M_RESET_VECTOR, mtk_tops_fw_get_boot_addr(part));
+
+ /* de-assert core reset */
+ npu_write(TOP_CORE_NPU_SW_RST, 0);
+
+ /* enable run stall */
+ npu_write(TOP_CORE_NPU_CTRL, 0x1);
+
+ /* enable ext bootup sel */
+ npu_write(TOP_CORE_M_STAT_VECTOR_SEL, 0x1);
+
+ /* toggle reset */
+ npu_write(TOP_CORE_NPU_SW_RST, 0x1);
+ npu_write(TOP_CORE_NPU_SW_RST, 0x0);
+
+ /* load firmware */
+ ret = mtk_tops_fw_load_core_mgmt(part);
+ if (ret) {
+ TOPS_ERR("load core mgmt fw failed: %d\n", ret);
+ return ret;
+ }
+
+ /* release run stall */
+ npu_write(TOP_CORE_NPU_CTRL, 0);
+
+ return ret;
+}
+
+static int mtk_tops_fw_load_core_offload(struct tops_fw_part *part,
+ enum core_id core)
+{
+ if (!part)
+ return -ENODEV;
+
+ if (core >= CORE_OFFLOAD_NUM)
+ return -EPERM;
+
+ __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_IRAM0],
+ part->payload[TOPS_PART_TYPE_IRAM0],
+ CLUST_CORE_X_ITCM(core));
+
+ __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_DRAM0],
+ part->payload[TOPS_PART_TYPE_DRAM0],
+ CLUST_CORE_X_DTCM(core));
+
+ return 0;
+}
+
+static int __mtk_tops_fw_bring_up_core_offload(struct tops_fw_part *part,
+ enum core_id core)
+{
+ int ret = 0;
+
+ /* setup boot address */
+ npu_write(CLUST_CORE_X_RESET_VECTOR(core),
+ mtk_tops_fw_get_boot_addr(part));
+
+ /* de-assert core reset */
+ npu_write(CLUST_CORE_NPU_SW_RST(core), 0);
+
+ /* enable run stall */
+ npu_write(CLUST_CORE_NPU_CTRL(core), 0x1);
+
+ /* enable ext bootup sel */
+ npu_write(CLUST_CORE_X_STAT_VECTOR_SEL(core), 0x1);
+
+ /* toggle reset */
+ npu_write(CLUST_CORE_NPU_SW_RST(core), 0x1);
+ npu_write(CLUST_CORE_NPU_SW_RST(core), 0x0);
+
+ /* load firmware */
+ ret = mtk_tops_fw_load_core_offload(part, core);
+ if (ret) {
+ TOPS_ERR("load core offload fw failed: %d\n", ret);
+ return ret;
+ }
+
+ /* release run stall */
+ npu_write(CLUST_CORE_NPU_CTRL(core), 0);
+
+ return ret;
+}
+
+static int mtk_tops_fw_bring_up_core_offload(struct tops_fw_part *part)
+{
+ int ret = 0;
+ u32 i = 0;
+
+ __mtk_tops_fw_load_data(part->hdr[TOPS_PART_TYPE_L2SRAM],
+ part->payload[TOPS_PART_TYPE_L2SRAM],
+ CLUST_L2SRAM);
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
+ ret = __mtk_tops_fw_bring_up_core_offload(part, i);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int __mtk_tops_fw_bring_up_core(const struct tops_fw *tfw,
+ struct tops_fw_part *part)
+{
+ int ret = 0;
+
+ if (!tfw || !part)
+ return -EINVAL;
+
+ /* bring up core by role */
+ switch (FW_ROLE(tfw)) {
+ case TOPS_ROLE_TYPE_MGMT:
+ ret = mtk_tops_fw_bring_up_core_mgmt(part);
+
+ break;
+ case TOPS_ROLE_TYPE_CLUSTER:
+ ret = mtk_tops_fw_bring_up_core_offload(part);
+
+ break;
+ default:
+ TOPS_ERR("unsupport tops fw role\n");
+
+ return -EBADF;
+ }
+
+ return ret;
+}
+#endif /* defined(CONFIG_MTK_TOPS_SECURE_FW) */
+
+static int mtk_tops_fw_get_info(const struct tops_fw *tfw, struct tops_fw_part *part)
+{
+ const struct tops_fw_part_hdr *phdr;
+ const u8 *payload;
+ struct tops_fw_info *fw_info;
+ struct tops_fw_attr *attrs;
+ u32 kofs, klen, vofs, vlen;
+ u32 meta_len;
+ u32 ofs = 0;
+ u32 nattr;
+ u32 i;
+
+ if (!tfw || !part)
+ return -EINVAL;
+
+ if (FW_ROLE(tfw) > __TOPS_ROLE_TYPE_MAX)
+ return -EINVAL;
+
+ phdr = part->hdr[TOPS_PART_TYPE_METADATA];
+ payload = part->payload[TOPS_PART_TYPE_METADATA];
+ meta_len = le32_to_cpu(phdr->size);
+
+ if (!phdr || !payload || !meta_len)
+ return 0;
+
+ fw_info = &npu.fw_info[FW_ROLE(tfw)];
+ fw_info->nattr = nattr = le32_to_cpu(*((u32 *)payload));
+ ofs += 0x4;
+
+ fw_info->attrs = devm_kcalloc(tops_dev,
+ nattr * 2,
+ sizeof(char *),
+ GFP_KERNEL);
+ if (!fw_info->attrs) {
+ fw_info->nattr = 0;
+ return -ENOMEM;
+ }
+ attrs = fw_info->attrs;
+
+ for (i = 0; i < nattr; i++) {
+ struct tops_fw_attr *attr = &attrs[i];
+
+ /* get property offset */
+ if (ofs + (i * 2) * 0x4 >= meta_len)
+ break;
+ kofs = le32_to_cpu(*((u32 *)(payload + ofs + (i * 2) * 0x4)));
+
+ /* get value offset */
+ if (ofs + (i * 2 + 1) * 0x4 >= meta_len)
+ break;
+ vofs = le32_to_cpu(*((u32 *)(payload + ofs + (i * 2 + 1) * 0x4)));
+
+ klen = strlen(payload + kofs);
+ vlen = strlen(payload + vofs);
+ if (!kofs || !vofs || !klen || !vlen) {
+ TOPS_ERR("invalid attribute property value pair, kofs: %u, klen: %u, vofs: %u, vlen: %u\n",
+ kofs, klen, vofs, vlen);
+ break;
+ }
+
+ attr->property = devm_kzalloc(tops_dev,
+ sizeof(char) * klen + 1,
+ GFP_KERNEL);
+ if (!attr->property)
+ goto err_out;
+
+ attr->value = devm_kzalloc(tops_dev,
+ sizeof(char) * vlen + 1,
+ GFP_KERNEL);
+ if (!attr->value) {
+ devm_kfree(tops_dev, attr->property);
+ goto err_out;
+ }
+
+ strncpy(attr->property, payload + kofs, klen);
+ strncpy(attr->value, payload + vofs, vlen);
+ }
+
+ fw_info->git_commit_id = le64_to_cpu(FW_GIT_ID(tfw));
+ fw_info->build_ts = le32_to_cpu(FW_BUILD_TS(tfw));
+
+ return 0;
+
+err_out:
+ fw_info->git_commit_id = 0;
+ fw_info->build_ts = 0;
+
+ for (i = i - 1; i >= 0; i--) {
+ devm_kfree(tops_dev, attrs[i].property);
+ devm_kfree(tops_dev, attrs[i].value);
+ }
+
+ devm_kfree(tops_dev, attrs);
+
+ return -ENOMEM;
+}
+
+static void mtk_tops_fw_put_info(void)
+{
+ enum tops_role_type rtype;
+ struct tops_fw_attr *attrs;
+ u32 nattr;
+ u32 i;
+
+ for (rtype = TOPS_ROLE_TYPE_MGMT; rtype < __TOPS_ROLE_TYPE_MAX; rtype++) {
+ nattr = npu.fw_info[rtype].nattr;
+ attrs = npu.fw_info[rtype].attrs;
+
+ npu.fw_info[rtype].git_commit_id = 0;
+ npu.fw_info[rtype].build_ts = 0;
+
+ if (!nattr)
+ continue;
+
+ for (i = 0; i < nattr; i++) {
+ devm_kfree(tops_dev, attrs[i].property);
+ devm_kfree(tops_dev, attrs[i].value);
+ }
+
+ devm_kfree(tops_dev, attrs);
+
+ npu.fw_info[rtype].nattr = 0;
+ npu.fw_info[rtype].attrs = NULL;
+ }
+}
+
+int mtk_tops_fw_bring_up_core(const char *fw_path)
+{
+ const struct firmware *fw;
+ const struct tops_fw *tfw;
+ struct tops_fw_part part;
+ struct tm tm = {0};
+ int ret;
+
+ ret = request_firmware(&fw, fw_path, tops_dev);
+ if (ret) {
+ TOPS_ERR("request %s firmware failed\n", fw_path);
+ return ret;
+ }
+
+ tfw = (const void *)fw->data;
+
+ ret = mtk_tops_fw_valid_hdr(tfw, fw->size);
+ if (ret) {
+ TOPS_ERR("valid fw: %s image failed: %d\n", fw_path, ret);
+ goto err_out;
+ }
+
+ ret = mtk_tops_fw_init_part_data(tfw, &part);
+ if (ret) {
+ TOPS_ERR("init fw part data failed: %d\n", ret);
+ goto err_out;
+ }
+
+ ret = mtk_tops_fw_get_info(tfw, &part);
+ if (ret) {
+ TOPS_ERR("meta data initialize failed: %d\n", ret);
+ goto err_out;
+ }
+
+ ret = __mtk_tops_fw_bring_up_core(tfw, &part);
+ if (ret) {
+ TOPS_ERR("bring up core %s failed\n", fw_path);
+ mtk_tops_fw_put_info();
+ goto err_out;
+ }
+
+ mtk_tops_fw_get_built_date(FW_ROLE(tfw), &tm);
+
+ TOPS_NOTICE("TOPS Load Firmware: %s\n", fw_path);
+ TOPS_NOTICE("\tFirmware version:\t%s\n",
+ mtk_tops_fw_attr_get_value(FW_ROLE(tfw), "version"));
+ TOPS_NOTICE("\tGit revision:\t\t%llx\n",
+ mtk_tops_fw_get_git_commit_id(FW_ROLE(tfw)));
+ TOPS_NOTICE("\tBuilt date:\t\t%04ld/%02d/%02d %02d:%02d:%02d\n",
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+err_out:
+ release_firmware(fw);
+
+ return ret;
+}
+#if defined(CONFIG_MTK_TOPS_EVALUATION)
+EXPORT_SYMBOL(mtk_tops_fw_bring_up_core);
+#endif /* defined(CONFIG_MTK_TOPS_EVALUATION) */
+
+int mtk_tops_fw_bring_up_default_cores(void)
+{
+ int ret;
+
+ ret = mtk_tops_fw_bring_up_core(TOPS_MGMT_IMG);
+ if (ret)
+ return ret;
+
+ ret = mtk_tops_fw_bring_up_core(TOPS_OFFLOAD_IMG);
+
+ return ret;
+}
+
+#if defined(CONFIG_MTK_TOPS_CORE_DEBUG)
+static void mtk_tops_fw_enable_core_debug(void)
+{
+ u32 i;
+
+ npu_write(TOP_CORE_DBG_CTRL, 0x3F);
+ npu_write(CLUST_CORE_DBG_CTRL, 0x1F);
+
+ npu_write(TOP_CORE_OCD_CTRL, 0x1);
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++)
+ npu_write(CLUST_CORE_OCD_CTRL(i), 0x1);
+}
+#endif /* defined(CONFIG_MTK_TOPS_CORE_DEBUG) */
+
+void mtk_tops_fw_clean_up(void)
+{
+ mtk_tops_fw_put_info();
+}
+
+int mtk_tops_fw_init(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
+ if (!res)
+ return -ENXIO;
+
+ npu.base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!npu.base)
+ return -ENOMEM;
+
+/* TODO: move to somewhere else */
+#if defined(CONFIG_MTK_TOPS_CORE_DEBUG)
+ mtk_tops_enable_core_debug();
+#endif /* defined(CONFIG_MTK_TOPS_CORE_DEBUG) */
+
+ return 0;
+}
diff --git a/package-21.02/kernel/tops/src/hpdma.c b/package-21.02/kernel/tops/src/hpdma.c
new file mode 100644
index 0000000..18f17e4
--- /dev/null
+++ b/package-21.02/kernel/tops/src/hpdma.c
@@ -0,0 +1,942 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/io.h>
+#include <linux/lockdep.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_dma.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <virt-dma.h>
+
+#include "hpdma.h"
+#include "hwspinlock.h"
+#include "internal.h"
+#include "mbox.h"
+#include "mcu.h"
+
+#define HPDMA_CHAN_NUM (4)
+
+#define MTK_HPDMA_ALIGN_SIZE (DMAENGINE_ALIGN_16_BYTES)
+#define MTK_HPDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+
+struct hpdma_dev;
+struct hpdma_vchan;
+struct hpdma_vdesc;
+struct hpdma_init_data;
+
+typedef struct hpdma_dev *(*hpdma_init_func_t)(struct platform_device *pdev,
+ const struct hpdma_init_data *data);
+typedef void (*tx_pending_desc_t)(struct hpdma_dev *hpdma,
+ struct hpdma_vchan *hchan,
+ struct hpdma_vdesc *hdesc);
+typedef struct dma_chan *(*of_dma_xlate_func_t)(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma);
+
+struct hpdma_vdesc {
+ struct virt_dma_desc vdesc;
+ dma_addr_t src;
+ dma_addr_t dst;
+ u32 total_num;
+ u32 axsize;
+ size_t len;
+};
+
+struct hpdma_vchan {
+ struct virt_dma_chan vchan;
+ struct work_struct tx_work;
+ struct hpdma_vdesc *issued_desc;
+ wait_queue_head_t stop_wait;
+ bool busy;
+ bool terminating;
+ u8 pchan_id;
+};
+
+struct hpdma_ops {
+ int (*vchan_init)(struct hpdma_dev *hpdma, struct dma_device *ddev);
+ void (*vchan_deinit)(struct hpdma_dev *hpdma);
+ int (*mbox_init)(struct platform_device *pdev, struct hpdma_dev *hpdma);
+ void (*mbox_deinit)(struct platform_device *pdev, struct hpdma_dev *hpdma);
+ tx_pending_desc_t tx_pending_desc;
+ of_dma_xlate_func_t of_dma_xlate;
+};
+
+struct hpdma_init_data {
+ struct hpdma_ops ops;
+ hpdma_init_func_t init;
+ mbox_handler_func_t mbox_handler;
+ enum hwspinlock_group hwspinlock_grp;
+ u32 trigger_start_slot; /* permission to start dma transfer */
+ u32 ch_base_slot; /* permission to occupy a physical channel */
+};
+
+struct hpdma_dev {
+ struct dma_device ddev;
+ struct hpdma_ops ops;
+ struct hpdma_vchan *hvchans;
+ struct hpdma_vchan *issued_chan;
+ spinlock_t lock; /* prevent inter-process racing hwspinlock */
+ void __iomem *base;
+ enum hwspinlock_group hwspinlock_grp;
+ u32 trigger_start_slot; /* permission to start dma transfer */
+ u32 ch_base_slot; /* permission to occupy a physical channel */
+};
+
+struct top_hpdma_dev {
+ struct mailbox_dev mdev;
+ struct hpdma_dev hpdma;
+};
+
+struct clust_hpdma_dev {
+ struct mailbox_dev mdev[CORE_MAX];
+ struct hpdma_dev hpdma;
+};
+
+static inline void hpdma_write(struct hpdma_dev *hpdma, u32 reg, u32 val)
+{
+ writel(val, hpdma->base + reg);
+}
+
+static inline void hpdma_set(struct hpdma_dev *hpdma, u32 reg, u32 mask)
+{
+ setbits(hpdma->base + reg, mask);
+}
+
+static inline void hpdma_clr(struct hpdma_dev *hpdma, u32 reg, u32 mask)
+{
+ clrbits(hpdma->base + reg, mask);
+}
+
+static inline void hpdma_rmw(struct hpdma_dev *hpdma, u32 reg, u32 mask, u32 val)
+{
+ clrsetbits(hpdma->base + reg, mask, val);
+}
+
+static inline u32 hpdma_read(struct hpdma_dev *hpdma, u32 reg)
+{
+ return readl(hpdma->base + reg);
+}
+
+struct hpdma_dev *chan_to_hpdma_dev(struct dma_chan *chan)
+{
+ return container_of(chan->device, struct hpdma_dev, ddev);
+}
+
+struct hpdma_vchan *chan_to_hpdma_vchan(struct dma_chan *chan)
+{
+ return container_of(chan, struct hpdma_vchan, vchan.chan);
+}
+
+struct hpdma_vdesc *vdesc_to_hpdma_vdesc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct hpdma_vdesc, vdesc);
+}
+
+static inline void __mtk_hpdma_vchan_deinit(struct virt_dma_chan *vchan)
+{
+ list_del(&vchan->chan.device_node);
+ tasklet_kill(&vchan->task);
+}
+
+static inline int mtk_hpdma_prepare_transfer(struct hpdma_dev *hpdma)
+{
+ /*
+ * release when hpdma done
+ * prevent other APMCU's process contend hw spinlock
+ * since this lock will not be contended in interrupt context,
+ * it's safe to hold it without disable irq
+ */
+ spin_lock(&hpdma->lock);
+
+ /* it is not expected any issued chan right here */
+ if (!hpdma->issued_chan)
+ return 0;
+
+ dev_err(hpdma->ddev.dev,
+ "hpdma issued_chan is not empty when transfer started");
+
+ WARN_ON(1);
+
+ spin_unlock(&hpdma->lock);
+
+ return -1;
+}
+
+static inline void mtk_hpdma_unprepare_transfer(struct hpdma_dev *hpdma)
+{
+ spin_unlock(&hpdma->lock);
+}
+
+static inline int mtk_hpdma_start_transfer(struct hpdma_dev *hpdma,
+ struct hpdma_vchan *hvchan,
+ struct hpdma_vdesc *hvdesc)
+{
+ /* occupy hpdma start permission */
+ mtk_tops_hwspin_lock(hpdma->hwspinlock_grp, hpdma->trigger_start_slot);
+
+ /* acknowledge the terminate flow that HW is going to start */
+ hvchan->busy = true;
+
+ list_del(&hvdesc->vdesc.node);
+
+ /* set vdesc to current channel's pending transfer */
+ hvchan->issued_desc = hvdesc;
+ hpdma->issued_chan = hvchan;
+
+ /* last chance to abort the transfer if channel is terminating */
+ if (unlikely(hvchan->terminating))
+ goto terminate_transfer;
+
+ /* trigger dma start */
+ hpdma_set(hpdma, TOPS_HPDMA_X_START(hvchan->pchan_id), HPDMA_START);
+
+ return 0;
+
+terminate_transfer:
+ hvchan->busy = false;
+
+ hpdma->issued_chan = NULL;
+
+ mtk_tops_hwspin_unlock(hpdma->hwspinlock_grp, hpdma->trigger_start_slot);
+
+ return -1;
+}
+
+/* setup a channel's parameter before it acquires the permission to start transfer */
+static inline void mtk_hpdma_config_pchan(struct hpdma_dev *hpdma,
+ struct hpdma_vchan *hvchan,
+ struct hpdma_vdesc *hvdesc)
+{
+ /* update axsize */
+ hpdma_rmw(hpdma,
+ TOPS_HPDMA_X_CTRL(hvchan->pchan_id),
+ HPDMA_AXSIZE_MASK,
+ FIELD_PREP(HPDMA_AXSIZE_MASK, hvdesc->axsize));
+
+ /* update total num */
+ hpdma_rmw(hpdma,
+ TOPS_HPDMA_X_NUM(hvchan->pchan_id),
+ HPDMA_TOTALNUM_MASK,
+ FIELD_PREP(HPDMA_TOTALNUM_MASK, hvdesc->total_num));
+
+ /* set src addr */
+ hpdma_write(hpdma, TOPS_HPDMA_X_SRC(hvchan->pchan_id), hvdesc->src);
+
+ /* set dst addr */
+ hpdma_write(hpdma, TOPS_HPDMA_X_DST(hvchan->pchan_id), hvdesc->dst);
+}
+
+/*
+ * TODO: in general, we should allocate some buffer for dma transmission
+ * nothing to allocate for hpdma right now?
+ * TODO: we may not need this right now
+ */
+static int mtk_hpdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ return 0;
+}
+
+/* TODO: we may not need this right now */
+static void mtk_hpdma_free_chan_resources(struct dma_chan *chan)
+{
+ /* stop all transmission, we have nothing to free for each channel */
+ dmaengine_terminate_sync(chan);
+}
+
+static void mtk_hpdma_issue_vchan_pending(struct hpdma_dev *hpdma,
+ struct hpdma_vchan *hvchan)
+{
+ struct virt_dma_desc *vdesc;
+
+ /* vchan's lock need to be held since its list will be modified */
+ lockdep_assert_held(&hvchan->vchan.lock);
+
+ /* if there is pending transfer on the fly, we should wait until it done */
+ if (unlikely(hvchan->issued_desc))
+ return;
+
+ /* fetch next desc to process */
+ vdesc = vchan_next_desc(&hvchan->vchan);
+ if (unlikely(!vdesc))
+ return;
+
+ /* start to transfer a pending descriptor */
+ hpdma->ops.tx_pending_desc(hpdma, hvchan, vdesc_to_hpdma_vdesc(vdesc));
+}
+
+static void mtk_hpdma_issue_pending(struct dma_chan *chan)
+{
+ struct hpdma_dev *hpdma = chan_to_hpdma_dev(chan);
+ struct hpdma_vchan *hvchan = chan_to_hpdma_vchan(chan);
+ unsigned long flag;
+
+ spin_lock_irqsave(&hvchan->vchan.lock, flag);
+
+ if (vchan_issue_pending(&hvchan->vchan))
+ mtk_hpdma_issue_vchan_pending(hpdma, hvchan);
+
+ spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
+}
+
+/*
+ * since hpdma is not support to report how many chunks left to transfer,
+ * we can only report that current desc is completed or not
+ */
+static enum dma_status mtk_hpdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *tx_state)
+{
+ return dma_cookie_status(chan, cookie, tx_state);
+}
+
+/* optimize the hpdma parameters to get maximum throughput */
+static int mtk_hpdma_config_desc(struct hpdma_vdesc *hvdesc)
+{
+ hvdesc->axsize = 4;
+
+ /*
+ * the total transfer length = axsize * total_num
+ * axsize can be 1, 2, 4, 8, 16 bytes
+ * calculate axsize
+ */
+ while (hvdesc->axsize >= 0 && hvdesc->len % (0x1 << hvdesc->axsize))
+ hvdesc->axsize--;
+
+ if (hvdesc->axsize < 0)
+ return -EINVAL;
+
+ hvdesc->total_num = hvdesc->len / (0x1 << hvdesc->axsize);
+
+ return 0;
+}
+
+static struct dma_async_tx_descriptor *mtk_hpdma_prep_dma_memcpy(struct dma_chan *chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ size_t len,
+ unsigned long flags)
+{
+ struct hpdma_vdesc *hvdesc;
+ int ret = 0;
+
+ if (!len)
+ return ERR_PTR(-EPERM);
+
+ if (dst > 0xFFFFFFFF || src > 0xFFFFFFFF)
+ return ERR_PTR(-EINVAL);
+
+ hvdesc = kzalloc(sizeof(struct hpdma_vdesc), GFP_NOWAIT);
+ if (!hvdesc)
+ return ERR_PTR(-ENOMEM);
+
+ hvdesc->src = src;
+ hvdesc->dst = dst;
+ hvdesc->len = len;
+
+ ret = mtk_hpdma_config_desc(hvdesc);
+ if (ret) {
+ kfree(hvdesc);
+ return ERR_PTR(ret);
+ }
+
+ return vchan_tx_prep(to_virt_chan(chan), &hvdesc->vdesc, flags);
+}
+
+static void mtk_hpdma_terminate_all_inactive_desc(struct dma_chan *chan)
+{
+ struct virt_dma_chan *vchan = to_virt_chan(chan);
+ unsigned long flag;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&vchan->lock, flag);
+
+ list_splice_tail_init(&vchan->desc_allocated, &head);
+ list_splice_tail_init(&vchan->desc_submitted, &head);
+ list_splice_tail_init(&vchan->desc_issued, &head);
+
+ spin_unlock_irqrestore(&vchan->lock, flag);
+
+ vchan_dma_desc_free_list(vchan, &head);
+}
+
+static int mtk_hpdma_terminate_all(struct dma_chan *chan)
+{
+ struct hpdma_vchan *hvchan = chan_to_hpdma_vchan(chan);
+
+ hvchan->terminating = true;
+
+ /* first terminate all inactive descriptors */
+ mtk_hpdma_terminate_all_inactive_desc(chan);
+
+ if (!hvchan->issued_desc)
+ goto out;
+
+ /* if there is a desc on the fly, we must wait until it done */
+ wait_event_interruptible(hvchan->stop_wait, !hvchan->busy);
+
+ vchan_terminate_vdesc(&hvchan->issued_desc->vdesc);
+
+ hvchan->issued_desc = NULL;
+
+ vchan_synchronize(&hvchan->vchan);
+
+out:
+ hvchan->terminating = false;
+
+ return 0;
+}
+
+static void mtk_hpdma_vdesc_free(struct virt_dma_desc *vdesc)
+{
+ kfree(container_of(vdesc, struct hpdma_vdesc, vdesc));
+}
+
+static void mtk_hpdma_tx_work(struct work_struct *work)
+{
+ struct hpdma_vchan *hvchan = container_of(work, struct hpdma_vchan, tx_work);
+ struct hpdma_dev *hpdma = chan_to_hpdma_dev(&hvchan->vchan.chan);
+ unsigned long flag;
+
+ if (unlikely(!vchan_next_desc(&hvchan->vchan)))
+ return;
+
+ spin_lock_irqsave(&hvchan->vchan.lock, flag);
+
+ mtk_hpdma_issue_vchan_pending(hpdma, hvchan);
+
+ spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
+}
+
+static int mtk_hpdma_provider_init(struct platform_device *pdev,
+ struct hpdma_dev *hpdma)
+{
+ struct dma_device *ddev = &hpdma->ddev;
+ int ret = 0;
+
+ dma_cap_set(DMA_MEMCPY, ddev->cap_mask);
+ dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
+
+ ddev->dev = &pdev->dev;
+ ddev->directions = BIT(DMA_MEM_TO_MEM);
+ ddev->copy_align = MTK_HPDMA_ALIGN_SIZE;
+ ddev->src_addr_widths = MTK_HPDMA_DMA_BUSWIDTHS;
+ ddev->dst_addr_widths = MTK_HPDMA_DMA_BUSWIDTHS;
+ ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+
+ ddev->device_alloc_chan_resources = mtk_hpdma_alloc_chan_resources;
+ ddev->device_free_chan_resources = mtk_hpdma_free_chan_resources;
+ ddev->device_issue_pending = mtk_hpdma_issue_pending;
+ ddev->device_tx_status = mtk_hpdma_tx_status;
+ ddev->device_prep_dma_memcpy = mtk_hpdma_prep_dma_memcpy;
+ ddev->device_terminate_all = mtk_hpdma_terminate_all;
+
+ INIT_LIST_HEAD(&ddev->channels);
+
+ ret = hpdma->ops.vchan_init(hpdma, ddev);
+ if (ret)
+ return ret;
+
+ ret = dma_async_device_register(ddev);
+ if (ret) {
+ dev_err(&pdev->dev, "register async dma device failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ hpdma->ops.of_dma_xlate,
+ ddev);
+ if (ret) {
+ dev_err(&pdev->dev, "register dma controller failed: %d\n", ret);
+ goto unregister_async_dev;
+ }
+
+ return ret;
+
+unregister_async_dev:
+ dma_async_device_unregister(ddev);
+
+ return ret;
+}
+
+static int mtk_hpdma_probe(struct platform_device *pdev)
+{
+ const struct hpdma_init_data *init_data;
+ struct hpdma_dev *hpdma;
+ struct resource *res;
+ int ret = 0;
+
+ init_data = of_device_get_match_data(&pdev->dev);
+ if (!init_data) {
+ dev_err(&pdev->dev, "hpdma init data not exist\n");
+ return -ENODEV;
+ }
+
+ hpdma = init_data->init(pdev, init_data);
+ if (IS_ERR(hpdma)) {
+ dev_err(&pdev->dev, "hpdma init failed: %ld\n", PTR_ERR(hpdma));
+ return PTR_ERR(hpdma);
+ }
+
+ memcpy(&hpdma->ops, &init_data->ops, sizeof(struct hpdma_ops));
+ hpdma->hwspinlock_grp = init_data->hwspinlock_grp;
+ hpdma->trigger_start_slot = init_data->trigger_start_slot;
+ hpdma->ch_base_slot = init_data->ch_base_slot;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ if (!res)
+ return -ENXIO;
+
+ hpdma->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!hpdma->base)
+ return -ENOMEM;
+
+ /*
+ * since hpdma does not send signal to APMCU,
+ * we need TOPS mailbox to notify us when hpdma done
+ */
+ ret = hpdma->ops.mbox_init(pdev, hpdma);
+ if (ret)
+ return ret;
+
+ ret = mtk_hpdma_provider_init(pdev, hpdma);
+ if (ret)
+ goto unregister_mbox;
+
+ spin_lock_init(&hpdma->lock);
+
+ platform_set_drvdata(pdev, hpdma);
+
+ dev_info(hpdma->ddev.dev, "hpdma init done\n");
+
+ return ret;
+
+unregister_mbox:
+ hpdma->ops.mbox_deinit(pdev, hpdma);
+
+ return ret;
+}
+
+static int mtk_hpdma_remove(struct platform_device *pdev)
+{
+ struct hpdma_dev *hpdma = platform_get_drvdata(pdev);
+
+ if (!hpdma)
+ return 0;
+
+ hpdma->ops.vchan_deinit(hpdma);
+
+ hpdma->ops.mbox_deinit(pdev, hpdma);
+
+ dma_async_device_unregister(&hpdma->ddev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+
+ return 0;
+}
+
+static struct dma_chan *mtk_clust_hpdma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dma_device *ddev = ofdma->of_dma_data;
+ struct hpdma_dev *hpdma;
+ u32 id;
+
+ if (!ddev || dma_spec->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ hpdma = container_of(ddev, struct hpdma_dev, ddev);
+ id = dma_spec->args[0] * CORE_OFFLOAD_NUM + dma_spec->args[1];
+
+ return dma_get_slave_channel(&hpdma->hvchans[id].vchan.chan);
+}
+
+static struct hpdma_dev *mtk_top_hpdma_init(struct platform_device *pdev,
+ const struct hpdma_init_data *data)
+{
+ struct top_hpdma_dev *top_hpdma = NULL;
+
+ if (!data)
+ return ERR_PTR(-EINVAL);
+
+ top_hpdma = devm_kzalloc(&pdev->dev, sizeof(*top_hpdma), GFP_KERNEL);
+ if (!top_hpdma)
+ return ERR_PTR(-ENOMEM);
+
+ top_hpdma->mdev.core = CORE_MGMT;
+ top_hpdma->mdev.cmd_id = MBOX_CM2AP_CMD_HPDMA;
+ top_hpdma->mdev.mbox_handler = data->mbox_handler;
+ top_hpdma->mdev.priv = &top_hpdma->hpdma;
+
+ return &top_hpdma->hpdma;
+}
+
+static void mtk_top_hpdma_vchan_deinit(struct hpdma_dev *hpdma)
+{
+ struct hpdma_vchan *hvchan;
+ u32 i;
+
+ for (i = 0; i < __TOP_HPDMA_REQ; i++) {
+ hvchan = &hpdma->hvchans[i];
+ __mtk_hpdma_vchan_deinit(&hvchan->vchan);
+ }
+}
+
+static int mtk_top_hpdma_vchan_init(struct hpdma_dev *hpdma, struct dma_device *ddev)
+{
+ struct hpdma_vchan *hvchan;
+ u32 i;
+
+ hpdma->hvchans = devm_kcalloc(ddev->dev, __TOP_HPDMA_REQ,
+ sizeof(struct hpdma_vchan),
+ GFP_KERNEL);
+ if (!hpdma->hvchans)
+ return -ENOMEM;
+
+ for (i = 0; i < __TOP_HPDMA_REQ; i++) {
+ hvchan = &hpdma->hvchans[i];
+
+ init_waitqueue_head(&hvchan->stop_wait);
+ INIT_WORK(&hvchan->tx_work, mtk_hpdma_tx_work);
+
+ hvchan->vchan.desc_free = mtk_hpdma_vdesc_free;
+ /*
+ * TODO: maybe init vchan by ourselves with
+ * customized tasklet?
+ * if we setup customized tasklet to transmit
+ * remaining chunks in a channel, we should be careful about
+ * hpdma->lock since it will be acquired in softirq context
+ */
+ vchan_init(&hvchan->vchan, ddev);
+ }
+
+ return 0;
+}
+
+static void mtk_top_hpdma_unregister_mbox(struct platform_device *pdev,
+ struct hpdma_dev *hpdma)
+{
+ struct top_hpdma_dev *top_hpdma;
+
+ top_hpdma = container_of(hpdma, struct top_hpdma_dev, hpdma);
+
+ unregister_mbox_dev(MBOX_RECV, &top_hpdma->mdev);
+}
+
+static int mtk_top_hpdma_register_mbox(struct platform_device *pdev,
+ struct hpdma_dev *hpdma)
+{
+ struct top_hpdma_dev *top_hpdma;
+ int ret = 0;
+
+ top_hpdma = container_of(hpdma, struct top_hpdma_dev, hpdma);
+
+ ret = register_mbox_dev(MBOX_RECV, &top_hpdma->mdev);
+ if (ret) {
+ dev_err(&pdev->dev, "register mailbox device failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void mtk_top_hpdma_tx_pending_desc(struct hpdma_dev *hpdma,
+ struct hpdma_vchan *hvchan,
+ struct hpdma_vdesc *hvdesc)
+{
+ u32 slot = hpdma->ch_base_slot;
+ enum hwspinlock_group grp = hpdma->hwspinlock_grp;
+
+ hvchan->pchan_id = 0;
+
+ mtk_hpdma_prepare_transfer(hpdma);
+
+ /* occupy hpdma physical channel */
+ while (!mtk_tops_hwspin_try_lock(grp, slot)) {
+
+ if (unlikely(hvchan->terminating)) {
+ spin_unlock(&hpdma->lock);
+ return;
+ }
+
+ hvchan->pchan_id = (hvchan->pchan_id + 1) % HPDMA_CHAN_NUM;
+ if (++slot - hpdma->ch_base_slot == HPDMA_CHAN_NUM)
+ slot = hpdma->ch_base_slot;
+ }
+
+ mtk_hpdma_config_pchan(hpdma, hvchan, hvdesc);
+
+ if (!mtk_hpdma_start_transfer(hpdma, hvchan, hvdesc))
+ return;
+
+ /* start transfer failed */
+ mtk_tops_hwspin_unlock(grp, slot);
+
+ mtk_hpdma_unprepare_transfer(hpdma);
+
+ wake_up_interruptible(&hvchan->stop_wait);
+}
+
+static struct hpdma_dev *mtk_clust_hpdma_init(struct platform_device *pdev,
+ const struct hpdma_init_data *data)
+{
+ struct clust_hpdma_dev *clust_hpdma = NULL;
+ u32 i;
+
+ if (!data)
+ return ERR_PTR(-EINVAL);
+
+ clust_hpdma = devm_kzalloc(&pdev->dev, sizeof(*clust_hpdma), GFP_KERNEL);
+ if (!clust_hpdma)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
+ clust_hpdma->mdev[i].core = CORE_OFFLOAD_0 + i;
+ clust_hpdma->mdev[i].cmd_id = MBOX_CX2AP_CMD_HPDMA;
+ clust_hpdma->mdev[i].mbox_handler = data->mbox_handler;
+ clust_hpdma->mdev[i].priv = &clust_hpdma->hpdma;
+ }
+
+ return &clust_hpdma->hpdma;
+}
+
+static void mtk_clust_hpdma_vchan_deinit(struct hpdma_dev *hpdma)
+{
+ struct hpdma_vchan *hvchan;
+ u32 i, j;
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
+ for (j = 0; j < __CLUST_HPDMA_REQ; j++) {
+ hvchan = &hpdma->hvchans[i];
+ __mtk_hpdma_vchan_deinit(&hvchan->vchan);
+ }
+ }
+}
+
+static int mtk_clust_hpdma_vchan_init(struct hpdma_dev *hpdma, struct dma_device *ddev)
+{
+ struct hpdma_vchan *hvchan;
+ u32 i, j;
+
+ hpdma->hvchans = devm_kcalloc(ddev->dev, __CLUST_HPDMA_REQ * CORE_OFFLOAD_NUM,
+ sizeof(struct hpdma_vchan),
+ GFP_KERNEL);
+ if (!hpdma->hvchans)
+ return -ENOMEM;
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
+ for (j = 0; j < __CLUST_HPDMA_REQ; j++) {
+ hvchan = &hpdma->hvchans[i * __CLUST_HPDMA_REQ + j];
+
+ hvchan->pchan_id = i;
+ init_waitqueue_head(&hvchan->stop_wait);
+ INIT_WORK(&hvchan->tx_work, mtk_hpdma_tx_work);
+
+ hvchan->vchan.desc_free = mtk_hpdma_vdesc_free;
+ /*
+ * TODO: maybe init vchan by ourselves with
+ * customized tasklet?
+ * if we setup customized tasklet to transmit
+ * remaining chunks in a channel, we should be careful about
+ * hpdma->lock since it will be acquired in softirq context
+ */
+ vchan_init(&hvchan->vchan, ddev);
+ }
+ }
+
+ return 0;
+}
+
+static void mtk_clust_hpdma_unregister_mbox(struct platform_device *pdev,
+ struct hpdma_dev *hpdma)
+{
+ struct clust_hpdma_dev *clust_hpdma;
+ u32 i;
+
+ clust_hpdma = container_of(hpdma, struct clust_hpdma_dev, hpdma);
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++)
+ unregister_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
+}
+
+static int mtk_clust_hpdma_register_mbox(struct platform_device *pdev,
+ struct hpdma_dev *hpdma)
+{
+ struct clust_hpdma_dev *clust_hpdma;
+ int ret = 0;
+ int i;
+
+ clust_hpdma = container_of(hpdma, struct clust_hpdma_dev, hpdma);
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
+ ret = register_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
+ if (ret) {
+ dev_err(&pdev->dev, "register mbox%d failed: %d\n", i, ret);
+ goto unregister_mbox;
+ }
+ }
+
+ return ret;
+
+unregister_mbox:
+ for (--i; i >= 0; i--)
+ unregister_mbox_dev(MBOX_RECV, &clust_hpdma->mdev[i]);
+
+ return ret;
+}
+
+static void mtk_clust_hpdma_tx_pending_desc(struct hpdma_dev *hpdma,
+ struct hpdma_vchan *hvchan,
+ struct hpdma_vdesc *hvdesc)
+{
+ u32 slot = hpdma->ch_base_slot + hvchan->pchan_id;
+ enum hwspinlock_group grp = hpdma->hwspinlock_grp;
+
+ mtk_hpdma_prepare_transfer(hpdma);
+
+ /* occupy hpdma physical channel */
+ mtk_tops_hwspin_lock(grp, slot);
+
+ mtk_hpdma_config_pchan(hpdma, hvchan, hvdesc);
+
+ if (!mtk_hpdma_start_transfer(hpdma, hvchan, hvdesc))
+ return;
+
+ /* start transfer failed */
+ mtk_tops_hwspin_unlock(grp, slot);
+
+ mtk_hpdma_unprepare_transfer(hpdma);
+
+ wake_up_interruptible(&hvchan->stop_wait);
+}
+
+static enum mbox_msg_cnt mtk_hpdma_ap_recv_mbox_msg(struct mailbox_dev *mdev,
+ struct mailbox_msg *msg)
+{
+ struct hpdma_dev *hpdma = mdev->priv;
+ struct hpdma_vchan *hvchan;
+ struct hpdma_vdesc *hvdesc;
+ enum hwspinlock_group grp;
+ unsigned long flag;
+ u32 slot;
+
+ if (!hpdma)
+ return MBOX_NO_RET_MSG;
+
+ hvchan = hpdma->issued_chan;
+ if (!hvchan) {
+ dev_err(hpdma->ddev.dev, "unexpected hpdma mailbox recv\n");
+ return MBOX_NO_RET_MSG;
+ }
+
+ grp = hpdma->hwspinlock_grp;
+
+ hvdesc = hvchan->issued_desc;
+
+ /* clear issued channel before releasing hwspinlock */
+ hpdma->issued_chan = NULL;
+
+ hvchan->busy = false;
+ hvchan->issued_desc = NULL;
+
+ /* release hwspinlock */
+ slot = hvchan->pchan_id + hpdma->ch_base_slot;
+
+ mtk_tops_hwspin_unlock(grp, hpdma->trigger_start_slot);
+
+ mtk_tops_hwspin_unlock(grp, slot);
+
+ /* release to let other APMCU process to contend hw spinlock */
+ spin_unlock(&hpdma->lock);
+
+ if (unlikely(hvchan->terminating)) {
+ wake_up_interruptible(&hvchan->stop_wait);
+ return MBOX_NO_RET_MSG;
+ }
+
+ /*
+ * complete vdesc and schedule tx work again
+ * if there is more vdesc left in the channel
+ */
+ spin_lock_irqsave(&hvchan->vchan.lock, flag);
+
+ vchan_cookie_complete(&hvdesc->vdesc);
+
+ if (vchan_next_desc(&hvchan->vchan))
+ schedule_work(&hvchan->tx_work);
+
+ spin_unlock_irqrestore(&hvchan->vchan.lock, flag);
+
+ return MBOX_NO_RET_MSG;
+}
+
+struct hpdma_init_data top_hpdma_init_data = {
+ .ops = {
+ .vchan_init = mtk_top_hpdma_vchan_init,
+ .vchan_deinit = mtk_top_hpdma_vchan_deinit,
+ .mbox_init = mtk_top_hpdma_register_mbox,
+ .mbox_deinit = mtk_top_hpdma_unregister_mbox,
+ .tx_pending_desc = mtk_top_hpdma_tx_pending_desc,
+ .of_dma_xlate = of_dma_xlate_by_chan_id,
+ },
+ .init = mtk_top_hpdma_init,
+ .mbox_handler = mtk_hpdma_ap_recv_mbox_msg,
+ .hwspinlock_grp = HWSPINLOCK_GROUP_TOP,
+ .trigger_start_slot = HWSPINLOCK_TOP_SLOT_HPDMA_LOCK,
+ .ch_base_slot = HWSPINLOCK_TOP_SLOT_HPDMA_PCH0,
+};
+
+static struct hpdma_init_data clust_hpdma_init_data = {
+ .ops = {
+ .vchan_init = mtk_clust_hpdma_vchan_init,
+ .vchan_deinit = mtk_clust_hpdma_vchan_deinit,
+ .mbox_init = mtk_clust_hpdma_register_mbox,
+ .mbox_deinit = mtk_clust_hpdma_unregister_mbox,
+ .tx_pending_desc = mtk_clust_hpdma_tx_pending_desc,
+ .of_dma_xlate = mtk_clust_hpdma_of_xlate,
+ },
+ .init = mtk_clust_hpdma_init,
+ .mbox_handler = mtk_hpdma_ap_recv_mbox_msg,
+ .hwspinlock_grp = HWSPINLOCK_GROUP_CLUST,
+ .trigger_start_slot = HWSPINLOCK_CLUST_SLOT_HPDMA_LOCK,
+ .ch_base_slot = HWSPINLOCK_CLUST_SLOT_HPDMA_PCH0,
+};
+
+static struct of_device_id mtk_hpdma_match[] = {
+ { .compatible = "mediatek,hpdma-top", .data = &top_hpdma_init_data, },
+ { .compatible = "mediatek,hpdma-sub", .data = &clust_hpdma_init_data, },
+ { },
+};
+
+static struct platform_driver mtk_hpdma_driver = {
+ .probe = mtk_hpdma_probe,
+ .remove = mtk_hpdma_remove,
+ .driver = {
+ .name = "mediatek,hpdma",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_hpdma_match,
+ },
+};
+
+int __init mtk_tops_hpdma_init(void)
+{
+ int ret = 0;
+
+ ret = platform_driver_register(&mtk_hpdma_driver);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+void __exit mtk_tops_hpdma_exit(void)
+{
+ platform_driver_unregister(&mtk_hpdma_driver);
+}
diff --git a/package-21.02/kernel/tops/src/hwspinlock.c b/package-21.02/kernel/tops/src/hwspinlock.c
new file mode 100644
index 0000000..1723cf4
--- /dev/null
+++ b/package-21.02/kernel/tops/src/hwspinlock.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include "hwspinlock.h"
+
+#define SEMA_ID (BIT(CORE_AP))
+
+static void __iomem *base;
+
+static inline u32 hwspinlock_read(u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void hwspinlock_write(u32 reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static inline u32 __mtk_tops_hwspinlock_get_reg(enum hwspinlock_group grp, u32 slot)
+{
+ if (unlikely(slot >= HWSPINLOCK_SLOT_MAX || grp >= __HWSPINLOCK_GROUP_MAX))
+ return 0;
+
+ if (grp == HWSPINLOCK_GROUP_TOP)
+ return HWSPINLOCK_TOP_BASE + slot * 4;
+ else
+ return HWSPINLOCK_CLUST_BASE + slot * 4;
+}
+
+/*
+ * try take TOPS HW spinlock
+ * return 1 on success
+ * return 0 on failure
+ */
+int mtk_tops_hwspin_try_lock(enum hwspinlock_group grp, u32 slot)
+{
+ u32 reg = __mtk_tops_hwspinlock_get_reg(grp, slot);
+
+ WARN_ON(!reg);
+
+ hwspinlock_write(reg, SEMA_ID);
+
+ return hwspinlock_read(reg) == SEMA_ID ? 1 : 0;
+}
+
+void mtk_tops_hwspin_lock(enum hwspinlock_group grp, u32 slot)
+{
+ u32 reg = __mtk_tops_hwspinlock_get_reg(grp, slot);
+
+ WARN_ON(!reg);
+
+ do {
+ hwspinlock_write(reg, SEMA_ID);
+ } while (hwspinlock_read(reg) != SEMA_ID);
+}
+
+void mtk_tops_hwspin_unlock(enum hwspinlock_group grp, u32 slot)
+{
+ u32 reg = __mtk_tops_hwspinlock_get_reg(grp, slot);
+
+ WARN_ON(!reg);
+
+ if (hwspinlock_read(reg) == SEMA_ID)
+ hwspinlock_write(reg, SEMA_ID);
+}
+
+int mtk_tops_hwspinlock_init(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
+ if (!res)
+ return -ENXIO;
+
+ base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!base)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/package-21.02/kernel/tops/src/inc/ctrl.h b/package-21.02/kernel/tops/src/inc/ctrl.h
new file mode 100644
index 0000000..fb74e40
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/ctrl.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_CTRL_H_
+#define _TOPS_CTRL_H_
+
+#include <linux/platform_device.h>
+
+int mtk_tops_ctrl_init(struct platform_device *pdev);
+void mtk_tops_ctrl_deinit(struct platform_device *pdev);
+#endif /* _TOPS_CTRL_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/firmware.h b/package-21.02/kernel/tops/src/inc/firmware.h
new file mode 100644
index 0000000..663ea95
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/firmware.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_FW_H_
+#define _TOPS_FW_H_
+
+#include <linux/platform_device.h>
+#include <linux/time.h>
+
+enum tops_role_type {
+ TOPS_ROLE_TYPE_MGMT,
+ TOPS_ROLE_TYPE_CLUSTER,
+
+ __TOPS_ROLE_TYPE_MAX,
+};
+
+u64 mtk_tops_fw_get_git_commit_id(enum tops_role_type rtype);
+void mtk_tops_fw_get_built_date(enum tops_role_type rtype, struct tm *tm);
+u32 mtk_tops_fw_attr_get_num(enum tops_role_type rtype);
+const char *mtk_tops_fw_attr_get_property(enum tops_role_type rtype, u32 idx);
+const char *mtk_tops_fw_attr_get_value(enum tops_role_type rtype,
+ const char *property);
+
+int mtk_tops_fw_bring_up_default_cores(void);
+int mtk_tops_fw_bring_up_core(const char *fw_path);
+void mtk_tops_fw_clean_up(void);
+int mtk_tops_fw_init(struct platform_device *pdev);
+#endif /* _TOPS_FW_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/hpdma.h b/package-21.02/kernel/tops/src/inc/hpdma.h
new file mode 100644
index 0000000..4f3d08c
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/hpdma.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_HPDMA_H_
+#define _TOPS_HPDMA_H_
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+/* AXI DMA */
+#define TOPS_HPDMA_X_SRC(x) (0x100 * (x) + 0x0000)
+#define TOPS_HPDMA_X_DST(x) (0x100 * (x) + 0x0004)
+#define TOPS_HPDMA_X_NUM(x) (0x100 * (x) + 0x0008)
+#define TOPS_HPDMA_X_CTRL(x) (0x100 * (x) + 0x000C)
+#define TOPS_HPDMA_X_CLRIRQ(x) (0x100 * (x) + 0x0010)
+#define TOPS_HPDMA_X_START(x) (0x100 * (x) + 0x0014)
+#define TOPS_HPDMA_X_RRESP(x) (0x100 * (x) + 0x0018)
+#define TOPS_HPDMA_X_BRESP(x) (0x100 * (x) + 0x001C)
+#define TOPS_HPDMA_X_HW(x) (0x100 * (x) + 0x0020)
+#define TOPS_HPDMA_X_ERR(x) (0x100 * (x) + 0x0024)
+
+
+/* AXI DMA NUM */
+#define HPDMA_TOTALNUM_SHIFT (0)
+#define HPDMA_TOTALNUM_MASK GENMASK(15, 0)
+
+/* AXI DMA CTRL */
+#define HPDMA_AXLEN_SHIFT (0)
+#define HPDMA_AXLEN_MASK GENMASK(3, 0)
+#define HPDMA_AXSIZE_SHIFT (8)
+#define HPDMA_AXSIZE_MASK GENMASK(10, 8)
+#define HPDMA_IRQEN BIT(16)
+#define HPDMA_AWMODE_EN BIT(24)
+#define HPDMA_OUTSTD_SHIFT (25)
+#define HPDMA_OUTSTD_MASK GENMASK(29, 25)
+
+/* AXI DMA START */
+#define HPDMA_STATUS_SHIFT (0)
+#define HPDMA_STATUS_MASK GENMASK(0, 0)
+#define HPDMA_SKIP_RACE_SHIFT (7)
+#define HPDMA_SKIP_RACE_MASK GENMASK(7, 7)
+#define HPDMA_START BIT(15)
+
+/* AXI DMA RRESP */
+#define HPDMA_LOG_SHIFT (0)
+#define HPDMA_LOG_MASK GENMASK(15, 0)
+#define HPDMA_RESP_SHIFT (16)
+#define HPDMA_RESP_MASK GENMASK(17, 16)
+
+/* AXI DMA HW */
+#define HPDMA_FIFO_DEPTH_SHIFT (0)
+#define HPDMA_FIFO_DEPTH_MASK GENMASK(7, 0)
+#define HPDMA_MAX_AXSIZE_SHIFT (8)
+#define HPDMA_MAX_AXSIZE_MASK GENMASK(15, 8)
+
+enum hpdma_err {
+ AWMODE_ERR = 0x1 << 0,
+ AXSIZE_ERR = 0x1 << 1,
+ ARADDR_ERR = 0x1 << 2,
+ AWADDR_ERR = 0x1 << 3,
+ RACE_ERR = 0x1 << 4,
+};
+
+enum top_hpdma_req {
+ TOP_HPDMA_TNL_SYNC_REQ,
+
+ __TOP_HPDMA_REQ,
+};
+
+enum clust_hpdma_req {
+ CLUST_HPDMA_DUMMY_REQ,
+
+ __CLUST_HPDMA_REQ,
+};
+
+int mtk_tops_hpdma_init(void);
+void mtk_tops_hpdma_exit(void);
+#endif /* _TOPS_HPDMA_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/hwspinlock.h b/package-21.02/kernel/tops/src/inc/hwspinlock.h
new file mode 100644
index 0000000..ee9e343
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/hwspinlock.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_HWSPIN_LOCK_H_
+#define _TOPS_HWSPIN_LOCK_H_
+
+#include <linux/types.h>
+
+#include "mbox.h"
+
+#define HWSPINLOCK_SLOT_MAX 16
+
+#define HWSPINLOCK_TOP_BASE 0x10100
+#define HWSPINLOCK_CLUST_BASE 0x880000
+
+enum hwspinlock_group {
+ HWSPINLOCK_GROUP_TOP,
+ HWSPINLOCK_GROUP_CLUST,
+
+ __HWSPINLOCK_GROUP_MAX,
+};
+
+enum hwspinlock_top_slot {
+ HWSPINLOCK_TOP_SLOT_HPDMA_LOCK,
+ HWSPINLOCK_TOP_SLOT_HPDMA_PCH0,
+ HWSPINLOCK_TOP_SLOT_HPDMA_PCH1,
+ HWSPINLOCK_TOP_SLOT_HPDMA_PCH2,
+ HWSPINLOCK_TOP_SLOT_HPDMA_PCH3,
+ HWSPINLOCK_TOP_SLOT_5,
+ HWSPINLOCK_TOP_SLOT_6,
+ HWSPINLOCK_TOP_SLOT_7,
+ HWSPINLOCK_TOP_SLOT_8,
+ HWSPINLOCK_TOP_SLOT_9,
+ HWSPINLOCK_TOP_SLOT_10,
+ HWSPINLOCK_TOP_SLOT_11,
+ HWSPINLOCK_TOP_SLOT_12,
+ HWSPINLOCK_TOP_SLOT_13,
+ HWSPINLOCK_TOP_SLOT_14,
+ HWSPINLOCK_TOP_SLOT_15,
+
+ __HWSPINLOCK_TOP_MAX = HWSPINLOCK_SLOT_MAX,
+};
+
+enum hwspinlock_clust_slot {
+ HWSPINLOCK_CLUST_SLOT_PRINTF,
+ HWSPINLOCK_CLUST_SLOT_HPDMA_LOCK,
+ HWSPINLOCK_CLUST_SLOT_HPDMA_PCH0,
+ HWSPINLOCK_CLUST_SLOT_HPDMA_PCH1,
+ HWSPINLOCK_CLUST_SLOT_HPDMA_PCH2,
+ HWSPINLOCK_CLUST_SLOT_HPDMA_PCH3,
+ HWSPINLOCK_CLUST_SLOT_6,
+ HWSPINLOCK_CLUST_SLOT_7,
+ HWSPINLOCK_CLUST_SLOT_8,
+ HWSPINLOCK_CLUST_SLOT_9,
+ HWSPINLOCK_CLUST_SLOT_10,
+ HWSPINLOCK_CLUST_SLOT_11,
+ HWSPINLOCK_CLUST_SLOT_12,
+ HWSPINLOCK_CLUST_SLOT_13,
+ HWSPINLOCK_CLUST_SLOT_14,
+ HWSPINLOCK_CLUST_SLOT_15,
+
+ __HWSPINLOCK_CLUST_MAX = HWSPINLOCK_SLOT_MAX,
+};
+
+int mtk_tops_hwspin_try_lock(enum hwspinlock_group grp, u32 slot);
+void mtk_tops_hwspin_lock(enum hwspinlock_group grp, u32 slot);
+void mtk_tops_hwspin_unlock(enum hwspinlock_group grp, u32 slot);
+int mtk_tops_hwspinlock_init(struct platform_device *pdev);
+#endif /* _TOPS_HWSPIN_LOCK_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/internal.h b/package-21.02/kernel/tops/src/inc/internal.h
new file mode 100644
index 0000000..81e1ca1
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/internal.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_INTERNAL_H_
+#define _TOPS_INTERNAL_H_
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/io.h>
+
+extern struct device *tops_dev;
+
+#define TOPS_DBG(fmt, ...) dev_dbg(tops_dev, fmt, ##__VA_ARGS__)
+#define TOPS_INFO(fmt, ...) dev_info(tops_dev, fmt, ##__VA_ARGS__)
+#define TOPS_NOTICE(fmt, ...) dev_notice(tops_dev, fmt, ##__VA_ARGS__)
+#define TOPS_WARN(fmt, ...) dev_warn(tops_dev, fmt, ##__VA_ARGS__)
+#define TOPS_ERR(fmt, ...) dev_err(tops_dev, fmt, ##__VA_ARGS__)
+
+/* tops 32 bits read/write */
+#define setbits(addr, set) writel(readl(addr) | (set), (addr))
+#define clrbits(addr, clr) writel(readl(addr) & ~(clr), (addr))
+#define clrsetbits(addr, clr, set) writel((readl(addr) & ~(clr)) | (set), (addr))
+#endif /* _TOPS_INTERNAL_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/mbox.h b/package-21.02/kernel/tops/src/inc/mbox.h
new file mode 100644
index 0000000..002f2c6
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/mbox.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_MBOX_H_
+#define _TOPS_MBOX_H_
+
+#include <linux/list.h>
+
+#include "mbox_id.h"
+#include "tops.h"
+
+/* mbox device macros */
+#define MBOX_DEV(core_id, cmd) \
+ .core = core_id, \
+ .cmd_id = cmd,
+
+#define MBOX_SEND_DEV(core_id, cmd) \
+ { \
+ MBOX_DEV(core_id, cmd) \
+ }
+
+#define MBOX_SEND_MGMT_DEV(cmd) \
+ MBOX_SEND_DEV(CORE_MGMT, MBOX_AP2CM_CMD_ ## cmd)
+
+#define MBOX_SEND_OFFLOAD_DEV(core_id, cmd) \
+ MBOX_SEND_DEV(CORE_OFFLOAD_ ## core_id, MBOX_AP2CX_CMD_ ## cmd)
+
+#define MBOX_RECV_DEV(core_id, cmd, handler) \
+ { \
+ MBOX_DEV(core_id, cmd) \
+ .mbox_handler = handler, \
+ }
+
+#define MBOX_RECV_MGMT_DEV(cmd, handler) \
+ MBOX_RECV_DEV(CORE_MGMT, MBOX_CM2AP_CMD_ ## cmd, handler)
+
+#define MBOX_RECV_OFFLOAD_DEV(core_id, cmd, handler) \
+ MBOX_RECV_DEV(CORE_OFFLOAD_ ## core_id, MBOX_CX2AP_CMD_ ## cmd, handler)
+
+/* Base Address */
+#define MBOX_TOP_BASE (0x010000)
+#define MBOX_CLUST0_BASE (0x510000)
+
+/* TOP Mailbox */
+#define TOPS_TOP_CM_SLOT (MBOX_TOP_BASE + 0x000)
+#define TOPS_TOP_AP_SLOT (MBOX_TOP_BASE + 0x004)
+
+#define TOPS_TOP_AP_TO_CM_CMD_SET (MBOX_TOP_BASE + 0x200)
+#define TOPS_TOP_AP_TO_CM_CMD_CLR (MBOX_TOP_BASE + 0x204)
+#define TOPS_TOP_CM_TO_AP_CMD_SET (MBOX_TOP_BASE + 0x21C)
+#define TOPS_TOP_CM_TO_AP_CMD_CLR (MBOX_TOP_BASE + 0x220)
+
+#define TOPS_TOP_AP_TO_CM_MSG_N(n) (MBOX_TOP_BASE + 0x208 + 0x4 * (n))
+#define TOPS_TOP_CM_TO_AP_MSG_N(n) (MBOX_TOP_BASE + 0x224 + 0x4 * (n))
+
+/* CLUST Mailbox */
+#define TOPS_CLUST0_CX_SLOT(x) (MBOX_CLUST0_BASE + (0x4 * (x)))
+#define TOPS_CLUST0_CM_SLOT (MBOX_CLUST0_BASE + 0x10)
+#define TOPS_CLUST0_AP_SLOT (MBOX_CLUST0_BASE + 0x14)
+
+#define TOPS_CLUST0_CX_TO_CY_CMD_SET(x, y) \
+ (MBOX_CLUST0_BASE + 0x100 + ((x) * 0x200) + ((y) * 0x40))
+#define TOPS_CLUST0_CX_TO_CY_CMD_CLR(x, y) \
+ (MBOX_CLUST0_BASE + 0x104 + ((x) * 0x200) + ((y) * 0x40))
+#define TOPS_CLUST0_CX_TO_CM_CMD_SET(x) \
+ (MBOX_CLUST0_BASE + 0x200 + ((x) * 0x200))
+#define TOPS_CLUST0_CX_TO_CM_CMD_CLR(x) \
+ (MBOX_CLUST0_BASE + 0x204 + ((x) * 0x200))
+#define TOPS_CLUST0_CX_TO_AP_CMD_SET(x) \
+ (MBOX_CLUST0_BASE + 0x240 + ((x) * 0x200))
+#define TOPS_CLUST0_CX_TO_AP_CMD_CLR(x) \
+ (MBOX_CLUST0_BASE + 0x244 + ((x) * 0x200))
+#define TOPS_CLUST0_CM_TO_CX_CMD_SET(x) \
+ (MBOX_CLUST0_BASE + 0x900 + ((x) * 0x40))
+#define TOPS_CLUST0_CM_TO_CX_CMD_CLR(x) \
+ (MBOX_CLUST0_BASE + 0x904 + ((x) * 0x40))
+#define TOPS_CLUST0_AP_TO_CX_CMD_SET(x) \
+ (MBOX_CLUST0_BASE + 0xB00 + ((x) * 0x40))
+#define TOPS_CLUST0_AP_TO_CX_CMD_CLR(x) \
+ (MBOX_CLUST0_BASE + 0xB04 + ((x) * 0x40))
+
+#define TOPS_CLUST0_CX_TO_CY_MSG_N(x, y, n) \
+ (MBOX_CLUST0_BASE + 0x108 + ((n) * 0x4) + ((x) * 0x200) + ((y) * 0x40))
+#define TOPS_CLUST0_CX_TO_CM_MSG_N(x, n) \
+ (MBOX_CLUST0_BASE + 0x208 + ((n) * 0x4) + ((x) * 0x200))
+#define TOPS_CLUST0_CX_TO_AP_MSG_N(x, n) \
+ (MBOX_CLUST0_BASE + 0x248 + ((n) * 0x4) + ((x) * 0x200))
+#define TOPS_CLUST0_CM_TO_CX_MSG_N(x, n) \
+ (MBOX_CLUST0_BASE + 0x908 + ((n) * 0x4) + ((x) * 0x40))
+#define TOPS_CLUST0_AP_TO_CX_MSG_N(x, n) \
+ (MBOX_CLUST0_BASE + 0xB08 + ((n) * 0x4) + ((x) * 0x40))
+
+#define MBOX_TOP_MBOX_FROM_C0 (0x1)
+#define MBOX_TOP_MBOX_FROM_C1 (0x2)
+#define MBOX_TOP_MBOX_FROM_C2 (0x4)
+#define MBOX_TOP_MBOX_FROM_C3 (0x8)
+#define MBOX_TOP_MBOX_FROM_AP (0x10)
+#define MBOX_TOP_MBOX_FROM_CM (0x20) /* TODO: need DE update */
+
+#define MBOX_CLUST0_MBOX_FROM_C0 (0x1)
+#define MBOX_CLUST0_MBOX_FROM_C1 (0x2)
+#define MBOX_CLUST0_MBOX_FROM_C2 (0x4)
+#define MBOX_CLUST0_MBOX_FROM_C3 (0x8)
+#define MBOX_CLUST0_MBOX_FROM_CM (0x10)
+#define MBOX_CLUST0_MBOX_FROM_AP (0x20)
+
+struct mailbox_msg;
+struct mailbox_dev;
+enum mbox_msg_cnt;
+
+typedef void (*mbox_ret_func_t)(void *priv, struct mailbox_msg *msg);
+typedef enum mbox_msg_cnt (*mbox_handler_func_t)(struct mailbox_dev *mdev,
+ struct mailbox_msg *msg);
+
+enum mbox_act {
+ MBOX_SEND,
+ MBOX_RECV,
+ MBOX_ACT_MAX,
+};
+
+enum mbox_msg_cnt {
+ MBOX_NO_RET_MSG,
+ MBOX_RET_MSG1,
+ MBOX_RET_MSG2,
+ MBOX_RET_MSG3,
+ MBOX_RET_MSG4,
+};
+
+struct mailbox_msg {
+ u32 msg1;
+ u32 msg2;
+ u32 msg3;
+ u32 msg4;
+};
+
+struct mailbox_dev {
+ struct list_head list;
+ enum core_id core;
+ mbox_handler_func_t mbox_handler;
+ void *priv;
+ u8 cmd_id;
+};
+
+int mbox_send_msg_no_wait_irq(struct mailbox_dev *mdev, struct mailbox_msg *msg);
+int mbox_send_msg_no_wait(struct mailbox_dev *mdev, struct mailbox_msg *msg);
+int mbox_send_msg(struct mailbox_dev *mdev, struct mailbox_msg *msg, void *priv,
+ mbox_ret_func_t ret_handler);
+int register_mbox_dev(enum mbox_act act, struct mailbox_dev *mdev);
+int unregister_mbox_dev(enum mbox_act act, struct mailbox_dev *mdev);
+void mtk_tops_mbox_clear_all_cmd(void);
+int mtk_tops_mbox_init(void);
+void mtk_tops_mbox_exit(void);
+#endif /* _TOPS_MBOX_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/mbox_id.h b/package-21.02/kernel/tops/src/inc/mbox_id.h
new file mode 100644
index 0000000..bb46250
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/mbox_id.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_MBOX_ID_H_
+#define _TOPS_MBOX_ID_H_
+
+enum mbox_cm2ap_cmd_id {
+ MBOX_CM2AP_CMD_CORE_CTRL = 0,
+ MBOX_CM2AP_CMD_HPDMA = 10,
+ MBOX_CM2AP_CMD_TNL_OFFLOAD = 11,
+ MBOX_CM2AP_CMD_TEST = 31,
+ __MBOX_CM2AP_CMD_MAX = 32,
+};
+
+enum mbox_ap2cm_cmd_id {
+ MBOX_AP2CM_CMD_CORE_CTRL = 0,
+ MBOX_AP2CM_CMD_NET = 1,
+ MBOX_AP2CM_CMD_WDT = 2,
+ MBOX_AP2CM_CMD_TNL_OFFLOAD = 11,
+ MBOX_AP2CM_CMD_TEST = 31,
+ __MBOX_AP2CM_CMD_MAX = 32,
+};
+
+enum mbox_cx2ap_cmd_id {
+ MBOX_CX2AP_CMD_CORE_CTRL = 0,
+ MBOX_CX2AP_CMD_HPDMA = 10,
+ __MBOX_CX2AP_CMD_MAX = 32,
+};
+
+enum mbox_ap2cx_cmd_id {
+ MBOX_AP2CX_CMD_CORE_CTRL = 0,
+ MBOX_AP2CX_CMD_NET = 1,
+ MBOX_AP2CX_CMD_WDT = 2,
+ __MBOX_AP2CX_CMD_MAX = 32,
+};
+
+enum mbox_cm2cx_cmd_id {
+ MBOX_CM2CX_CMD_CORE_CTRL = 0,
+ __MBOX_CM2CX_CMD_MAX = 32,
+};
+
+enum mbox_cx2cm_cmd_id {
+ MBOX_CX2CM_CMD_CORE_CTRL = 0,
+ __MBOX_CX2CM_CMD_MAX = 32,
+};
+
+#endif /* _TOPS_MBOX_ID_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/mcu.h b/package-21.02/kernel/tops/src/inc/mcu.h
new file mode 100644
index 0000000..7c463eb
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/mcu.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_MCU_H_
+#define _TOPS_MCU_H_
+
+#include <linux/clk.h>
+#include <linux/bits.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+
+#include "tops.h"
+
+struct mcu_state;
+
+#define TOP_CORE_BASE (0x001000)
+#define TOP_SEC_BASE (0x00A000)
+#define TOP_L2SRAM (0x100000)
+#define TOP_CORE_M_DTCM (0x300000)
+#define TOP_CORE_M_ITCM (0x310000)
+#define CLUST_CORE_BASE(x) (0x501000 + 0x1000 * (x))
+#define CLUST_SEC_BASE (0x50A000)
+#define CLUST_L2SRAM (0x700000)
+#define CLUST_CORE_X_DTCM(x) (0x800000 + 0x20000 * (x))
+#define CLUST_CORE_X_ITCM(x) (0x810000 + 0x20000 * (x))
+
+/* CORE */
+#define TOP_CORE_NPU_SW_RST (TOP_CORE_BASE + 0x00)
+#define TOP_CORE_NPU_CTRL (TOP_CORE_BASE + 0x04)
+#define TOP_CORE_OCD_CTRL (TOP_CORE_BASE + 0x18)
+
+#define TOP_CORE_DBG_CTRL (TOP_SEC_BASE + 0x64)
+#define TOP_CORE_M_STAT_VECTOR_SEL (TOP_SEC_BASE + 0x68)
+#define TOP_CORE_M_RESET_VECTOR (TOP_SEC_BASE + 0x6C)
+
+#define CLUST_CORE_NPU_SW_RST(x) (CLUST_CORE_BASE(x) + 0x00)
+#define CLUST_CORE_NPU_CTRL(x) (CLUST_CORE_BASE(x) + 0x04)
+#define CLUST_CORE_OCD_CTRL(x) (CLUST_CORE_BASE(x) + 0x18)
+
+#define CLUST_CORE_DBG_CTRL (CLUST_SEC_BASE + 0x64)
+#define CLUST_CORE_X_STAT_VECTOR_SEL(x) (CLUST_SEC_BASE + 0x68 + (0xC * (x)))
+#define CLUST_CORE_X_RESET_VECTOR(x) (CLUST_SEC_BASE + 0x6C + (0xC * (x)))
+
+#define MCU_ACT_ABNORMAL (BIT(MCU_ACT_ABNORMAL_BIT))
+#define MCU_ACT_RESET (BIT(MCU_ACT_RESET_BIT))
+#define MCU_ACT_NETSTOP (BIT(MCU_ACT_NETSTOP_BIT))
+#define MCU_ACT_SHUTDOWN (BIT(MCU_ACT_SHUTDOWN_BIT))
+#define MCU_ACT_INIT (BIT(MCU_ACT_INIT_BIT))
+#define MCU_ACT_STALL (BIT(MCU_ACT_STALL_BIT))
+#define MCU_ACT_FREERUN (BIT(MCU_ACT_FREERUN_BIT))
+
+#define MCU_CTRL_ARG_NUM 2
+
+enum mcu_act {
+ MCU_ACT_ABNORMAL_BIT,
+ MCU_ACT_RESET_BIT,
+ MCU_ACT_NETSTOP_BIT,
+ MCU_ACT_SHUTDOWN_BIT,
+ MCU_ACT_INIT_BIT,
+ MCU_ACT_STALL_BIT,
+ MCU_ACT_FREERUN_BIT,
+
+ __MCU_ACT_MAX,
+};
+
+enum mcu_state_type {
+ MCU_STATE_TYPE_SHUTDOWN,
+ MCU_STATE_TYPE_INIT,
+ MCU_STATE_TYPE_FREERUN,
+ MCU_STATE_TYPE_STALL,
+ MCU_STATE_TYPE_NETSTOP,
+ MCU_STATE_TYPE_RESET,
+ MCU_STATE_TYPE_ABNORMAL,
+
+ __MCU_STATE_TYPE_MAX,
+};
+
+enum mcu_cmd_type {
+ MCU_CMD_TYPE_NULL,
+ MCU_CMD_TYPE_INIT_DONE,
+ MCU_CMD_TYPE_STALL,
+ MCU_CMD_TYPE_STALL_DONE,
+ MCU_CMD_TYPE_FREERUN,
+ MCU_CMD_TYPE_FREERUN_DONE,
+ MCU_CMD_TYPE_ASSERT_RESET,
+ MCU_CMD_TYPE_ASSERT_RESET_DONE,
+ MCU_CMD_TYPE_RELEASE_RESET,
+ MCU_CMD_TYPE_RELEASE_RESET_DONE,
+
+ __MCU_CMD_TYPE_MAX,
+};
+
+enum mcu_event_type {
+ MCU_EVENT_TYPE_NULL,
+ MCU_EVENT_TYPE_SYNC_TNL,
+ MCU_EVENT_TYPE_WDT_TIMEOUT,
+ MCU_EVENT_TYPE_FE_RESET,
+
+ __MCU_EVENT_TYPE_MAX,
+};
+
+struct mcu_ctrl_cmd {
+ enum mcu_event_type e;
+ u32 arg[MCU_CTRL_ARG_NUM];
+ /*
+ * if bit n (BIT(enum core_id)) == 1, send control message to that core.
+ * default send to all cores if core_mask == 0
+ */
+ u32 core_mask;
+};
+
+struct mcu_state {
+ enum mcu_state_type state;
+ struct mcu_state *(*state_trans)(u32 mcu_act, struct mcu_state *state);
+ int (*enter)(struct mcu_state *state);
+ int (*leave)(struct mcu_state *state);
+};
+
+bool mtk_tops_mcu_alive(void);
+bool mtk_tops_mcu_bring_up_done(void);
+bool mtk_tops_mcu_netsys_fe_rst(void);
+int mtk_tops_mcu_stall(struct mcu_ctrl_cmd *mcmd,
+ void (*callback)(void *param), void *param);
+int mtk_tops_mcu_reset(struct mcu_ctrl_cmd *mcmd,
+ void (*callback)(void *param), void *param);
+
+int mtk_tops_mcu_bring_up(struct platform_device *pdev);
+void mtk_tops_mcu_tear_down(struct platform_device *pdev);
+int mtk_tops_mcu_init(struct platform_device *pdev);
+void mtk_tops_mcu_deinit(struct platform_device *pdev);
+#endif /* _TOPS_MCU_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/net-event.h b/package-21.02/kernel/tops/src/inc/net-event.h
new file mode 100644
index 0000000..785a124
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/net-event.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_NET_EVENT_H_
+#define _TOPS_NET_EVENT_H_
+
+#include <linux/platform_device.h>
+
+#include <mtk_eth_soc.h>
+#include <mtk_eth_reset.h>
+
+struct tops_net_ser_data {
+ struct net_device *ndev;
+};
+
+int mtk_tops_netevent_register(struct platform_device *pdev);
+void mtk_tops_netevent_unregister(struct platform_device *pdev);
+#endif /* _TOPS_NET_EVENT_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/netsys.h b/package-21.02/kernel/tops/src/inc/netsys.h
new file mode 100644
index 0000000..1f51695
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/netsys.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_NETSYS_H_
+#define _TOPS_NETSYS_H_
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/platform_device.h>
+
+#include "tunnel.h"
+
+/* FE BASE */
+#define FE_BASE (0x0000)
+
+/* PPE BASE */
+#define PPE0_BASE (0x2000)
+#define PPE1_BASE (0x2400)
+#define PPE2_BASE (0x2C00)
+
+/* FE_INT */
+#define FE_INT_GRP (0x0020)
+#define FE_INT_STA2 (0x0028)
+#define FE_INT_EN2 (0x002C)
+
+/* PSE IQ/OQ */
+#define PSE_IQ_STA6 (0x0194)
+#define PSE_OQ_STA6 (0x01B4)
+
+/* PPE */
+#define PPE_TBL_CFG (0x021C)
+
+/* FE_INT_GRP */
+#define FE_MISC_INT_ASG_SHIFT (0)
+#define FE_MISC_INT_ASG_MASK GENMASK(3, 0)
+
+/* FE_INT_STA2/FE_INT_EN2 */
+#define PSE_FC_ON_1_SHIFT (0)
+#define PSE_FC_ON_1_MASK GENMASK(6, 0)
+#define TDMA_TX_PAUSE (BIT(2))
+
+/* PSE IQ/OQ PORT */
+#define TDMA_PORT_SHIFT (0)
+#define TDMA_PORT_MASK GENMASK(15, 0)
+
+u32 mtk_tops_netsys_ppe_get_max_entry_num(u32 ppe_id);
+int mtk_tops_netsys_init(struct platform_device *pdev);
+void mtk_tops_netsys_deinit(struct platform_device *pdev);
+#endif /* _TOPS_NETSYS_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/ser.h b/package-21.02/kernel/tops/src/inc/ser.h
new file mode 100644
index 0000000..99f9d3d
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/ser.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <alvin.kuog@mediatek.com>
+ */
+
+#ifndef _TOPS_SER_H_
+#define _TOPS_SER_H_
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+
+#include "net-event.h"
+#include "mcu.h"
+#include "wdt.h"
+
+enum tops_ser_type {
+ TOPS_SER_NETSYS_FE_RST,
+ TOPS_SER_WDT_TO,
+
+ __TOPS_SER_TYPE_MAX,
+};
+
+struct tops_ser_params {
+ enum tops_ser_type type;
+
+ union {
+ struct tops_net_ser_data net;
+ struct tops_wdt_ser_data wdt;
+ } data;
+
+ void (*ser_callback)(struct tops_ser_params *ser_params);
+ void (*ser_mcmd_setup)(struct tops_ser_params *ser_params,
+ struct mcu_ctrl_cmd *mcmd);
+};
+
+int mtk_tops_ser(struct tops_ser_params *ser_params);
+int mtk_tops_ser_init(struct platform_device *pdev);
+int mtk_tops_ser_deinit(struct platform_device *pdev);
+#endif /* _TOPS_SER_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/tdma.h b/package-21.02/kernel/tops/src/inc/tdma.h
new file mode 100644
index 0000000..2cbd644
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/tdma.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_TDMA_H_
+#define _TOPS_TDMA_H_
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+/* TDMA */
+#define TDMA_BASE (0x6000)
+
+#define TDMA_TX_CTX_IDX_0 (0x008)
+#define TDMA_RX_MAX_CNT_X(idx) (0x104 + ((idx) * 0x10))
+#define TDMA_RX_CRX_IDX_X(idx) (0x108 + ((idx) * 0x10))
+#define TDMA_RX_DRX_IDX_X(idx) (0x10C + ((idx) * 0x10))
+#define TDMA_GLO_CFG0 (0x204)
+#define TDMA_RST_IDX (0x208)
+#define TDMA_TX_XDMA_FIFO_CFG0 (0x238)
+#define TDMA_RX_XDMA_FIFO_CFG0 (0x23C)
+#define TDMA_PREF_TX_CFG (0x2D0)
+#define TDMA_PREF_TX_FIFO_CFG0 (0x2D4)
+#define TDMA_PREF_RX_CFG (0x2DC)
+#define TDMA_PREF_RX_FIFO_CFG0 (0x2E0)
+#define TDMA_PREF_SIDX_CFG (0x2E4)
+#define TDMA_WRBK_TX_CFG (0x300)
+#define TDMA_WRBK_TX_FIFO_CFG0 (0x304)
+#define TDMA_WRBK_RX_CFG (0x344)
+#define TDMA_WRBK_RX_FIFO_CFGX(x) (0x348 + 0x4 * (x))
+#define TDMA_WRBK_SIDX_CFG (0x388)
+#define TDMA_PREF_RX_FIFO_CFG1 (0x3EC)
+
+/* TDMA_GLO_CFG0 */
+#define TX_DMA_EN (BIT(0))
+#define TX_DMA_BUSY (BIT(1))
+#define RX_DMA_EN (BIT(2))
+#define RX_DMA_BUSY (BIT(3))
+#define DMA_BT_SIZE_MASK (0x7)
+#define DMA_BT_SIZE_SHIFT (11)
+#define OTSD_THRES_MASK (0xF)
+#define OTSD_THRES_SHIFT (14)
+#define CDM_FCNT_THRES_MASK (0xF)
+#define CDM_FCNT_THRES_SHIFT (18)
+#define LB_MODE (BIT(24))
+#define PKT_WCOMP (BIT(27))
+#define DEC_WCOMP (BIT(28))
+
+/* TDMA_RST_IDX */
+#define RST_DTX_IDX_0 (BIT(0))
+#define RST_DRX_IDX_X(idx) (BIT(16 + (idx)))
+
+/* TDMA_TX_XDMA_FIFO_CFG0 TDMA_RX_XDMA_FIFO_CFG0 */
+#define PAR_FIFO_CLEAR (BIT(0))
+#define CMD_FIFO_CLEAR (BIT(4))
+#define DMAD_FIFO_CLEAR (BIT(8))
+#define ARR_FIFO_CLEAR (BIT(12))
+#define LEN_FIFO_CLEAR (BIT(15))
+#define WID_FIFO_CLEAR (BIT(18))
+#define BID_FIFO_CLEAR (BIT(21))
+
+/* TDMA_SDL_CFG */
+#define SDL_EN (BIT(16))
+#define SDL_MASK (0xFFFF)
+#define SDL_SHIFT (0)
+
+/* TDMA_PREF_TX_CFG TDMA_PREF_RX_CFG */
+#define PREF_BUSY BIT(1)
+#define PREF_EN BIT(0)
+
+/* TDMA_PREF_TX_FIFO_CFG0 TDMA_PREF_RX_FIFO_CFG0 TDMA_PREF_RX_FIFO_CFG1 */
+#define PREF_TX_RING0_CLEAR (BIT(0))
+#define PREF_RX_RINGX_CLEAR(x) (BIT((((x) % 2) * 16)))
+#define PREF_RX_RING1_CLEAR (BIT(0))
+#define PREF_RX_RING2_CLEAR (BIT(16))
+#define PREF_RX_RING3_CLEAR (BIT(0))
+#define PREF_RX_RING4_CLEAR (BIT(16))
+
+/* TDMA_PREF_SIDX_CFG TDMA_WRBK_SIDX_CFG */
+#define TX_RING0_SIDX_CLR (BIT(0))
+#define RX_RINGX_SIDX_CLR(x) (BIT(4 + (x)))
+
+/* TDMA_WRBK_TX_FIFO_CFG0 TDMA_WRBK_RX_FIFO_CFGX */
+#define WRBK_RING_CLEAR (BIT(0))
+
+/* TDMA_WRBK_TX_CFG TDMA_WRBK_RX_CFG */
+#define WRBK_BUSY (BIT(0))
+#define BURST_SIZE_SHIFT (6)
+#define BURST_SIZE_MASK (0x1F)
+#define WRBK_THRES_SHIFT (14)
+#define WRBK_THRES_MASK (0x3F)
+#define FLUSH_TIMER_EN (BIT(21))
+#define MAX_PENDING_TIME_SHIFT (22)
+#define MAX_PENDING_TIME_MASK (0xFF)
+#define WRBK_EN (BIT(30))
+
+#define TDMA_RING_NUM (4)
+#define TDMA_RING_NUM_MOD (TDMA_RING_NUM - 1)
+
+enum tops_net_cmd {
+ TOPS_NET_CMD_NULL,
+ TOPS_NET_CMD_STOP,
+ TOPS_NET_CMD_START,
+
+ __TOPS_NET_CMD_MAX,
+};
+
+void mtk_tops_tdma_record_last_state(void);
+void mtk_tops_tdma_reset(void);
+int mtk_tops_tdma_enable(void);
+void mtk_tops_tdma_disable(void);
+int mtk_tops_tdma_init(struct platform_device *pdev);
+void mtk_tops_tdma_deinit(struct platform_device *pdev);
+#endif /* _TOPS_TDMA_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/tops.h b/package-21.02/kernel/tops/src/inc/tops.h
new file mode 100644
index 0000000..224ed7f
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/tops.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_H_
+#define _TOPS_H_
+
+#define CORE_TOPS_MASK (GENMASK(CORE_TOPS_NUM - 1, 0))
+
+enum core_id {
+ CORE_OFFLOAD_0,
+ CORE_OFFLOAD_1,
+ CORE_OFFLOAD_2,
+ CORE_OFFLOAD_3,
+ CORE_OFFLOAD_NUM,
+ CORE_MGMT = CORE_OFFLOAD_NUM,
+ CORE_TOPS_NUM,
+ CORE_AP = CORE_TOPS_NUM,
+ CORE_MAX,
+};
+#endif /* _TOPS_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/trm-fs.h b/package-21.02/kernel/tops/src/inc/trm-fs.h
new file mode 100644
index 0000000..972924f
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/trm-fs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <alvin.kuog@mediatek.com>
+ * Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_TRM_FS_H_
+#define _TOPS_TRM_FS_H_
+
+#define RLY_DUMP_SUBBUF_SZ 2048
+#define RLY_DUMP_SUBBUF_NUM 256
+
+bool mtk_trm_fs_is_init(void);
+void *mtk_trm_fs_relay_reserve(u32 size);
+void mtk_trm_fs_relay_flush(void);
+int mtk_trm_fs_init(void);
+void mtk_trm_fs_deinit(void);
+#endif /* _TOPS_TRM_FS_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/trm-mcu.h b/package-21.02/kernel/tops/src/inc/trm-mcu.h
new file mode 100644
index 0000000..e3f9e3f
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/trm-mcu.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <alvin.kuog@mediatek.com>
+ * Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_TRM_MCU_H_
+#define _TOPS_TRM_MCU_H_
+
+#include "tops.h"
+
+#define XCHAL_NUM_AREG (32)
+#define CORE_DUMP_FRAM_MAGIC (0x00BE00BE)
+
+#define CORE_DUMP_FRAME_LEN (sizeof(struct core_dump_fram))
+
+/* need to sync with core_dump.S */
+struct core_dump_fram {
+ uint32_t magic;
+ uint32_t num_areg;
+ uint32_t pc;
+ uint32_t ps;
+ uint32_t windowstart;
+ uint32_t windowbase;
+ uint32_t epc1;
+ uint32_t exccause;
+ uint32_t excvaddr;
+ uint32_t excsave1;
+ uint32_t areg[XCHAL_NUM_AREG];
+};
+
+extern struct core_dump_fram cd_frams[CORE_TOPS_NUM];
+
+int mtk_trm_mcu_core_dump(void);
+int mtk_tops_trm_mcu_init(void);
+void mtk_tops_trm_mcu_exit(void);
+#endif /* _TOPS_TRM_MCU_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/trm.h b/package-21.02/kernel/tops/src/inc/trm.h
new file mode 100644
index 0000000..4b5118f
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/trm.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <alvin.kuog@mediatek.com>
+ * Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_TRM_H_
+#define _TOPS_TRM_H_
+
+#include <linux/platform_device.h>
+
+extern struct device *trm_dev;
+
+#define TRM_DBG(fmt, ...) dev_dbg(trm_dev, "[TRM] " fmt, ##__VA_ARGS__)
+#define TRM_INFO(fmt, ...) dev_info(trm_dev, "[TRM] " fmt, ##__VA_ARGS__)
+#define TRM_NOTICE(fmt, ...) dev_notice(trm_dev, "[TRM] " fmt, ##__VA_ARGS__)
+#define TRM_WARN(fmt, ...) dev_warn(trm_dev, "[TRM] " fmt, ##__VA_ARGS__)
+#define TRM_ERR(fmt, ...) dev_err(trm_dev, "[TRM] " fmt, ##__VA_ARGS__)
+
+#define TRM_CONFIG_NAME_MAX_LEN 32
+
+/* TRM Configuration */
+#define TRM_CFG(_name, _addr, _len, _ofs, _size, _flag) \
+ .name = _name, \
+ .addr = _addr, \
+ .len = _len, \
+ .offset = _ofs, \
+ .size = _size, \
+ .flag = _flag,
+
+#define TRM_CFG_EN(name, addr, len, ofs, size, flag) \
+ TRM_CFG(name, addr, len, ofs, size, TRM_CONFIG_F_ENABLE | (flag))
+
+#define TRM_CFG_CORE_DUMP_EN(name, addr, len, ofs, size, flag, core_id) \
+ TRM_CFG_EN(name, addr, len, ofs, size, TRM_CONFIG_F_CORE_DUMP | flag) \
+ .core = core_id
+
+/* TRM configuration flags */
+#define TRM_CONFIG_F(trm_cfg_bit) \
+ (BIT(TRM_CONFIG_F_ ## trm_cfg_bit ## _BIT))
+#define TRM_CONFIG_F_CX_CORE_DUMP_MASK (GENMASK(CORE_TOPS_NUM, 0))
+#define TRM_CONFIG_F_CX_CORE_DUMP_SHIFT (0)
+
+/* TRM reason flag */
+#define TRM_RSN(trm_rsn_bit) (BIT(TRM_RSN_ ## trm_rsn_bit ## _BIT))
+
+/* TRM Reason */
+#define TRM_RSN_NULL (0x0000)
+#define TRM_RSN_WDT_TIMEOUT_CORE0 (TRM_RSN(C0_WDT))
+#define TRM_RSN_WDT_TIMEOUT_CORE1 (TRM_RSN(C1_WDT))
+#define TRM_RSN_WDT_TIMEOUT_CORE2 (TRM_RSN(C2_WDT))
+#define TRM_RSN_WDT_TIMEOUT_CORE3 (TRM_RSN(C3_WDT))
+#define TRM_RSN_WDT_TIMEOUT_COREM (TRM_RSN(CM_WDT))
+#define TRM_RSN_FE_RESET (TRM_RSN(FE_RESET))
+#define TRM_RSN_MCU_STATE_ACT_FAIL (TRM_RSN(MCU_STATE_ACT_FAIL))
+
+enum trm_config_flag {
+ TRM_CONFIG_F_ENABLE_BIT,
+ TRM_CONFIG_F_CORE_DUMP_BIT,
+};
+
+enum trm_rsn {
+ TRM_RSN_C0_WDT_BIT,
+ TRM_RSN_C1_WDT_BIT,
+ TRM_RSN_C2_WDT_BIT,
+ TRM_RSN_C3_WDT_BIT,
+ TRM_RSN_CM_WDT_BIT,
+ TRM_RSN_FE_RESET_BIT,
+ TRM_RSN_MCU_STATE_ACT_FAIL_BIT,
+};
+
+enum trm_hardware {
+ TRM_TOPS,
+ TRM_NETSYS,
+ TRM_TDMA,
+
+ __TRM_HARDWARE_MAX,
+};
+
+struct trm_config {
+ char name[TRM_CONFIG_NAME_MAX_LEN];
+ enum core_id core; /* valid if TRM_CONFIG_F_CORE_DUMP is set */
+ u32 addr; /* memory address of the dump info */
+ u32 len; /* total length of the dump info */
+ u32 offset; /* dump offset */
+ u32 size; /* dump size */
+ u8 flag;
+#define TRM_CONFIG_F_CORE_DUMP (TRM_CONFIG_F(CORE_DUMP))
+#define TRM_CONFIG_F_ENABLE (TRM_CONFIG_F(ENABLE))
+};
+
+struct trm_hw_config {
+ struct trm_config *trm_cfgs;
+ u32 cfg_len;
+ int (*trm_hw_dump)(void *dst, u32 ofs, u32 len);
+};
+
+int mtk_trm_dump(u32 dump_rsn);
+int mtk_trm_cfg_setup(char *name, u32 offset, u32 size, u8 enable);
+int mtk_tops_trm_init(void);
+void mtk_tops_trm_exit(void);
+int mtk_trm_hw_config_register(enum trm_hardware trm_hw,
+ struct trm_hw_config *trm_hw_cfg);
+void mtk_trm_hw_config_unregister(enum trm_hardware trm_hw,
+ struct trm_hw_config *trm_hw_cfg);
+#endif /* _TOPS_TRM_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/tunnel.h b/package-21.02/kernel/tops/src/inc/tunnel.h
new file mode 100644
index 0000000..961aa03
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/tunnel.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_TUNNEL_H_
+#define _TOPS_TUNNEL_H_
+
+#include <linux/bitmap.h>
+#include <linux/hashtable.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/kthread.h>
+#include <linux/netdevice.h>
+#include <linux/refcount.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include "protocol/l2tp/l2tp.h"
+
+/* tunnel info status */
+#define TNL_STA_UNINIT (BIT(TNL_STATUS_UNINIT))
+#define TNL_STA_INIT (BIT(TNL_STATUS_INIT))
+#define TNL_STA_QUEUED (BIT(TNL_STATUS_QUEUED))
+#define TNL_STA_UPDATING (BIT(TNL_STATUS_UPDATING))
+#define TNL_STA_UPDATED (BIT(TNL_STATUS_UPDATED))
+#define TNL_STA_DIP_UPDATE (BIT(TNL_STATUS_DIP_UPDATE))
+#define TNL_STA_DELETING (BIT(TNL_STATUS_DELETING))
+
+/* tunnel params flags */
+#define TNL_DECAP_ENABLE (BIT(TNL_PARAMS_DECAP_ENABLE_BIT))
+#define TNL_ENCAP_ENABLE (BIT(TNL_PARAMS_ENCAP_ENABLE_BIT))
+
+/* tunnel info flags */
+#define TNL_INFO_DEBUG (BIT(TNL_INFO_DEBUG_BIT))
+
+struct tops_tnl_info;
+struct tops_tnl_params;
+
+/*
+ * tops_crsn
+ * TOPS_CRSN_TNL_ID_START
+ * TOPS_CRSN_TNL_ID_END
+ * APMCU checks whether tops_crsn is in this range to know if this packet
+ * was processed by TOPS previously.
+ */
+enum tops_crsn {
+ TOPS_CRSN_IGNORE = 0x00,
+ TOPS_CRSN_TNL_ID_START = 0x10,
+ TOPS_CRSN_TNL_ID_END = 0x2F,
+};
+
+enum tops_entry_type {
+ TOPS_ENTRY_NONE = 0,
+ TOPS_ENTRY_GRETAP,
+ TOPS_ENTRY_PPTP,
+ TOPS_ENTRY_IP_L2TP,
+ TOPS_ENTRY_UDP_L2TP_CTRL,
+ TOPS_ENTRY_UDP_L2TP_DATA = 5,
+ TOPS_ENTRY_VXLAN,
+ TOPS_ENTRY_NATT,
+ TOPS_ENTRY_CAPWAP_CTRL,
+ TOPS_ENTRY_CAPWAP_DATA,
+ TOPS_ENTRY_CAPWAP_DTLS = 10,
+ TOPS_ENTRY_IPSEC_ESP,
+ TOPS_ENTRY_IPSEC_AH,
+
+ __TOPS_ENTRY_MAX = CONFIG_TOPS_TNL_NUM,
+};
+
+enum tops_tunnel_mbox_cmd {
+ TOPS_TNL_MBOX_CMD_RESV,
+ TOPS_TNL_START_ADDR_SYNC,
+
+ __TOPS_TNL_MBOX_CMD_MAX,
+};
+
+enum tunnel_ctrl_event {
+ TUNNEL_CTRL_EVENT_NULL,
+ TUNNEL_CTRL_EVENT_NEW,
+ TUNNEL_CTRL_EVENT_DEL,
+ TUNNEL_CTRL_EVENT_DIP_UPDATE,
+
+ __TUNNEL_CTRL_EVENT_MAX,
+};
+
+enum tnl_status {
+ TNL_STATUS_UNINIT,
+ TNL_STATUS_INIT,
+ TNL_STATUS_QUEUED,
+ TNL_STATUS_UPDATING,
+ TNL_STATUS_UPDATED,
+ TNL_STATUS_DIP_UPDATE,
+ TNL_STATUS_DELETING,
+
+ __TNL_STATUS_MAX,
+};
+
+enum tops_tnl_params_flag {
+ TNL_PARAMS_DECAP_ENABLE_BIT,
+ TNL_PARAMS_ENCAP_ENABLE_BIT,
+};
+
+enum tops_tnl_info_flag {
+ TNL_INFO_DEBUG_BIT,
+};
+
+/* record outer tunnel header data for HW offloading */
+struct tops_tnl_params {
+ u8 daddr[ETH_ALEN];
+ u8 saddr[ETH_ALEN];
+ __be32 dip;
+ __be32 sip;
+ __be16 dport;
+ __be16 sport;
+ u16 protocol;
+ u8 tops_entry_proto;
+ u8 flag; /* bit: enum tops_tnl_params_flag */
+ union {
+ struct l2tp_param l2tp; /* 4B */
+ } priv;
+} __packed __aligned(16);
+
+struct tops_tnl_info {
+ struct tops_tnl_params tnl_params;
+ struct tops_tnl_params cache;
+ struct list_head sync_node;
+ struct hlist_node hlist;
+ struct net_device *dev;
+ struct timer_list taging;
+ spinlock_t lock;
+ u32 tnl_idx;
+ u32 status;
+ u32 flag; /* bit: enum tops_tnl_info_flag */
+ refcount_t refcnt;
+} __aligned(16);
+
+struct tops_tnl_type {
+ const char *type_name;
+ int (*tnl_decap_param_setup)(struct sk_buff *skb,
+ struct tops_tnl_params *tnl_params);
+ int (*tnl_encap_param_setup)(struct sk_buff *skb,
+ struct tops_tnl_params *tnl_params);
+ int (*tnl_debug_param_setup)(const char *buf, int *ofs,
+ struct tops_tnl_params *tnl_params);
+ int (*tnl_dump_param)(char *buf, struct tops_tnl_params *tnl_params);
+ bool (*tnl_info_match)(struct tops_tnl_params *params1,
+ struct tops_tnl_params *params2);
+ bool (*tnl_decap_offloadable)(struct sk_buff *skb);
+ enum tops_entry_type tops_entry;
+ bool has_inner_eth;
+};
+
+void mtk_tops_tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info);
+void mtk_tops_tnl_info_submit(struct tops_tnl_info *tnl_info);
+struct tops_tnl_info *mtk_tops_tnl_info_find(struct tops_tnl_params *tnl_params);
+struct tops_tnl_info *mtk_tops_tnl_info_alloc(void);
+void mtk_tops_tnl_info_hash(struct tops_tnl_info *tnl_info);
+
+int mtk_tops_tnl_offload_init(struct platform_device *pdev);
+void mtk_tops_tnl_offload_deinit(struct platform_device *pdev);
+int mtk_tops_tnl_offload_proto_setup(struct platform_device *pdev);
+void mtk_tops_tnl_offload_proto_teardown(struct platform_device *pdev);
+void mtk_tops_tnl_offload_flush(void);
+void mtk_tops_tnl_offload_recover(void);
+void mtk_tops_tnl_offload_netdev_down(struct net_device *ndev);
+
+struct tops_tnl_type *mtk_tops_tnl_type_get_by_name(const char *name);
+int mtk_tops_tnl_type_register(struct tops_tnl_type *tnl_type);
+void mtk_tops_tnl_type_unregister(struct tops_tnl_type *tnl_type);
+#endif /* _TOPS_TUNNEL_H_ */
diff --git a/package-21.02/kernel/tops/src/inc/wdt.h b/package-21.02/kernel/tops/src/inc/wdt.h
new file mode 100644
index 0000000..b70bf63
--- /dev/null
+++ b/package-21.02/kernel/tops/src/inc/wdt.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <alvin.kuog@mediatek.com>
+ */
+
+#ifndef _TOPS_WDT_H_
+#define _TOPS_WDT_H_
+
+#include <linux/platform_device.h>
+
+#include "tops.h"
+
+enum wdt_cmd {
+ WDT_CMD_TRIGGER_TIMEOUT,
+
+ __WDT_CMD_MAX,
+};
+
+struct tops_wdt_ser_data {
+ u32 timeout_cores;
+};
+
+int mtk_tops_wdt_trigger_timeout(enum core_id core);
+int mtk_tops_wdt_init(struct platform_device *pdev);
+int mtk_tops_wdt_deinit(struct platform_device *pdev);
+#endif /* _TOPS_WDT_H_ */
diff --git a/package-21.02/kernel/tops/src/init.c b/package-21.02/kernel/tops/src/init.c
new file mode 100644
index 0000000..8c47546
--- /dev/null
+++ b/package-21.02/kernel/tops/src/init.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include "ctrl.h"
+#include "firmware.h"
+#include "hpdma.h"
+#include "hwspinlock.h"
+#include "internal.h"
+#include "mbox.h"
+#include "mcu.h"
+#include "netsys.h"
+#include "net-event.h"
+#include "ser.h"
+#include "tdma.h"
+#include "trm-mcu.h"
+#include "trm.h"
+#include "tunnel.h"
+#include "wdt.h"
+
+struct device *tops_dev;
+
+static int mtk_tops_post_init(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ /* kick core */
+ ret = mtk_tops_mcu_bring_up(pdev);
+ if (ret) {
+ TOPS_ERR("mcu post init failed: %d\n", ret);
+ return ret;
+ }
+
+ /* offload tunnel protocol initialization */
+ ret = mtk_tops_tnl_offload_proto_setup(pdev);
+ if (ret) {
+ TOPS_ERR("tnl offload protocol init failed: %d\n", ret);
+ goto err_mcu_tear_down;
+ }
+
+ ret = mtk_tops_netevent_register(pdev);
+ if (ret) {
+ TOPS_ERR("netevent register fail: %d\n", ret);
+ goto err_offload_proto_tear_down;
+ }
+
+ /* create sysfs file */
+ ret = mtk_tops_ctrl_init(pdev);
+ if (ret) {
+ TOPS_ERR("ctrl init failed: %d\n", ret);
+ goto err_netevent_unregister;
+ }
+
+ ret = mtk_tops_ser_init(pdev);
+ if (ret) {
+ TOPS_ERR("ser init failed: %d\n", ret);
+ goto err_ctrl_deinit;
+ }
+
+ ret = mtk_tops_wdt_init(pdev);
+ if (ret) {
+ TOPS_ERR("wdt init failed: %d\n", ret);
+ goto err_ser_deinit;
+ }
+
+ return ret;
+
+err_ser_deinit:
+ mtk_tops_ser_deinit(pdev);
+
+err_ctrl_deinit:
+ mtk_tops_ctrl_deinit(pdev);
+
+err_netevent_unregister:
+ mtk_tops_netevent_unregister(pdev);
+
+err_offload_proto_tear_down:
+ mtk_tops_tnl_offload_proto_teardown(pdev);
+
+err_mcu_tear_down:
+ mtk_tops_mcu_tear_down(pdev);
+
+ return ret;
+}
+
+static int mtk_tops_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ tops_dev = &pdev->dev;
+
+ ret = mtk_tops_hwspinlock_init(pdev);
+ if (ret) {
+ TOPS_ERR("hwspinlock init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mtk_tops_fw_init(pdev);
+ if (ret) {
+ TOPS_ERR("firmware init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mtk_tops_mcu_init(pdev);
+ if (ret) {
+ TOPS_ERR("mcu init failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mtk_tops_netsys_init(pdev);
+ if (ret) {
+ TOPS_ERR("netsys init failed: %d\n", ret);
+ goto err_mcu_deinit;
+ }
+
+ ret = mtk_tops_tdma_init(pdev);
+ if (ret) {
+ TOPS_ERR("tdma init failed: %d\n", ret);
+ goto err_netsys_deinit;
+ }
+
+ ret = mtk_tops_tnl_offload_init(pdev);
+ if (ret) {
+ TOPS_ERR("tunnel table init failed: %d\n", ret);
+ goto err_tdma_deinit;
+ }
+
+ ret = mtk_tops_post_init(pdev);
+ if (ret)
+ goto err_tnl_offload_deinit;
+
+ TOPS_ERR("init done\n");
+ return ret;
+
+err_tnl_offload_deinit:
+ mtk_tops_tnl_offload_deinit(pdev);
+
+err_tdma_deinit:
+ mtk_tops_tdma_deinit(pdev);
+
+err_netsys_deinit:
+ mtk_tops_netsys_deinit(pdev);
+
+err_mcu_deinit:
+ mtk_tops_mcu_deinit(pdev);
+
+ return ret;
+}
+
+static int mtk_tops_remove(struct platform_device *pdev)
+{
+ mtk_tops_wdt_deinit(pdev);
+
+ mtk_tops_ser_deinit(pdev);
+
+ mtk_tops_ctrl_deinit(pdev);
+
+ mtk_tops_netevent_unregister(pdev);
+
+ mtk_tops_tnl_offload_proto_teardown(pdev);
+
+ mtk_tops_mcu_tear_down(pdev);
+
+ mtk_tops_tnl_offload_deinit(pdev);
+
+ mtk_tops_tdma_deinit(pdev);
+
+ mtk_tops_netsys_deinit(pdev);
+
+ mtk_tops_mcu_deinit(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id tops_match[] = {
+ { .compatible = "mediatek,tops", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tops_match);
+
+static struct platform_driver mtk_tops_driver = {
+ .probe = mtk_tops_probe,
+ .remove = mtk_tops_remove,
+ .driver = {
+ .name = "mediatek,tops",
+ .owner = THIS_MODULE,
+ .of_match_table = tops_match,
+ },
+};
+
+static int __init mtk_tops_init(void)
+{
+ mtk_tops_mbox_init();
+
+ mtk_tops_hpdma_init();
+
+ mtk_tops_trm_init();
+
+ return platform_driver_register(&mtk_tops_driver);
+}
+
+static void __exit mtk_tops_exit(void)
+{
+ platform_driver_unregister(&mtk_tops_driver);
+
+ mtk_tops_trm_exit();
+
+ mtk_tops_hpdma_exit();
+
+ mtk_tops_mbox_exit();
+}
+
+module_init(mtk_tops_init);
+module_exit(mtk_tops_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MediaTek TOPS Driver");
+MODULE_AUTHOR("Ren-Ting Wang <ren-ting.wang@mediatek.com>");
diff --git a/package-21.02/kernel/tops/src/mbox.c b/package-21.02/kernel/tops/src/mbox.c
new file mode 100644
index 0000000..c10cbca
--- /dev/null
+++ b/package-21.02/kernel/tops/src/mbox.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/ktime.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "mcu.h"
+#include "mbox.h"
+#include "internal.h"
+
+#define MBOX_SEND_TIMEOUT (2000)
+
+struct mailbox_reg {
+ u32 cmd_set_reg;
+ u32 cmd_clr_reg;
+ u32 msg_reg;
+};
+
+struct mailbox_core {
+ struct list_head mdev_list;
+ u32 registered_cmd;
+ spinlock_t lock;
+};
+
+struct mailbox_hw {
+ struct mailbox_core core[MBOX_ACT_MAX][CORE_MAX];
+ struct device *dev;
+ void __iomem *base;
+};
+
+static struct mailbox_hw mbox;
+
+static inline void mbox_write(u32 reg, u32 val)
+{
+ writel(val, mbox.base + reg);
+}
+
+static inline void mbox_set(u32 reg, u32 mask)
+{
+ setbits(mbox.base + reg, mask);
+}
+
+static inline void mbox_clr(u32 reg, u32 mask)
+{
+ clrbits(mbox.base + reg, mask);
+}
+
+static inline void mbox_rmw(u32 reg, u32 mask, u32 val)
+{
+ clrsetbits(mbox.base + reg, mask, val);
+}
+
+static inline u32 mbox_read(u32 reg)
+{
+ return readl(mbox.base + reg);
+}
+
+static inline void mbox_fill_msg(enum mbox_msg_cnt cnt, struct mailbox_msg *msg,
+ struct mailbox_reg *mbox_reg)
+{
+ if (cnt == MBOX_RET_MSG4)
+ goto send_msg4;
+ else if (cnt == MBOX_RET_MSG3)
+ goto send_msg3;
+ else if (cnt == MBOX_RET_MSG2)
+ goto send_msg2;
+ else if (cnt == MBOX_RET_MSG1)
+ goto send_msg1;
+ else
+ return;
+
+send_msg4:
+ mbox_write(mbox_reg->msg_reg + 0x10, msg->msg4);
+send_msg3:
+ mbox_write(mbox_reg->msg_reg + 0xC, msg->msg3);
+send_msg2:
+ mbox_write(mbox_reg->msg_reg + 0x8, msg->msg3);
+send_msg1:
+ mbox_write(mbox_reg->msg_reg + 0x4, msg->msg1);
+}
+
+static inline void mbox_clear_msg(enum mbox_msg_cnt cnt,
+ struct mailbox_reg *mbox_reg)
+{
+ if (cnt == MBOX_NO_RET_MSG)
+ goto clear_msg4;
+ else if (cnt == MBOX_RET_MSG1)
+ goto clear_msg3;
+ else if (cnt == MBOX_RET_MSG2)
+ goto clear_msg2;
+ else if (cnt == MBOX_RET_MSG3)
+ goto clear_msg1;
+ else
+ return;
+
+clear_msg4:
+ mbox_write(mbox_reg->msg_reg + 0x4, 0);
+clear_msg3:
+ mbox_write(mbox_reg->msg_reg + 0x8, 0);
+clear_msg2:
+ mbox_write(mbox_reg->msg_reg + 0xC, 0);
+clear_msg1:
+ mbox_write(mbox_reg->msg_reg + 0x10, 0);
+}
+
+static void exec_mbox_handler(enum core_id core, struct mailbox_reg *mbox_reg)
+{
+ struct mailbox_core *mcore = &mbox.core[MBOX_RECV][core];
+ struct mailbox_dev *mdev = NULL;
+ struct mailbox_msg msg = {0};
+ enum mbox_msg_cnt ret = 0;
+ u32 cmd_id = 0;
+
+ cmd_id = mbox_read(mbox_reg->msg_reg);
+
+ list_for_each_entry(mdev, &mcore->mdev_list, list) {
+ if (mdev->cmd_id == cmd_id) {
+ if (!mdev->mbox_handler)
+ goto out;
+
+ /* setup msg for handler */
+ msg.msg1 = mbox_read(mbox_reg->msg_reg + 0x4);
+ msg.msg2 = mbox_read(mbox_reg->msg_reg + 0x8);
+ msg.msg3 = mbox_read(mbox_reg->msg_reg + 0xC);
+ msg.msg4 = mbox_read(mbox_reg->msg_reg + 0x10);
+
+ ret = mdev->mbox_handler(mdev, &msg);
+
+ mbox_fill_msg(ret, &msg, mbox_reg);
+
+ break;
+ }
+ }
+out:
+ mbox_write(mbox_reg->msg_reg, 0);
+ mbox_clear_msg(ret, mbox_reg);
+
+ /* clear cmd */
+ mbox_write(mbox_reg->cmd_clr_reg, 0xFFFFFFFF);
+}
+
+static irqreturn_t mtk_tops_mbox_handler(int irq, void *dev_id)
+{
+ struct mailbox_reg mreg = {0};
+ u32 cluster_reg = 0;
+ u32 top_reg = 0;
+
+ top_reg = mbox_read(TOPS_TOP_AP_SLOT);
+ cluster_reg = mbox_read(TOPS_CLUST0_AP_SLOT);
+
+ if (top_reg & MBOX_TOP_MBOX_FROM_CM) {
+ mreg.cmd_set_reg = TOPS_TOP_CM_TO_AP_CMD_SET;
+ mreg.cmd_clr_reg = TOPS_TOP_CM_TO_AP_CMD_CLR;
+ mreg.msg_reg = TOPS_TOP_CM_TO_AP_MSG_N(0);
+ exec_mbox_handler(CORE_MGMT, &mreg);
+ }
+ if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C0) {
+ mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(0);
+ mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(0);
+ mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(0, 0);
+ exec_mbox_handler(CORE_OFFLOAD_0, &mreg);
+ }
+ if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C1) {
+ mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(1);
+ mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(1);
+ mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(1, 0);
+ exec_mbox_handler(CORE_OFFLOAD_1, &mreg);
+ }
+ if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C2) {
+ mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(2);
+ mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(2);
+ mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(2, 0);
+ exec_mbox_handler(CORE_OFFLOAD_2, &mreg);
+ }
+ if (cluster_reg & MBOX_CLUST0_MBOX_FROM_C3) {
+ mreg.cmd_set_reg = TOPS_CLUST0_CX_TO_AP_CMD_SET(3);
+ mreg.cmd_clr_reg = TOPS_CLUST0_CX_TO_AP_CMD_CLR(3);
+ mreg.msg_reg = TOPS_CLUST0_CX_TO_AP_MSG_N(3, 0);
+ exec_mbox_handler(CORE_OFFLOAD_3, &mreg);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mbox_get_send_reg(struct mailbox_dev *mdev,
+ struct mailbox_reg *mbox_reg)
+{
+ if (!mdev) {
+ dev_notice(mbox.dev, "no mdev specified!\n");
+ return -EINVAL;
+ }
+
+ if (mdev->core == CORE_MGMT) {
+ mbox_reg->cmd_set_reg = TOPS_TOP_AP_TO_CM_CMD_SET;
+ mbox_reg->cmd_clr_reg = TOPS_TOP_AP_TO_CM_CMD_CLR;
+ mbox_reg->msg_reg = TOPS_TOP_AP_TO_CM_MSG_N(0);
+ } else if (mdev->core == CORE_OFFLOAD_0) {
+ mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(0);
+ mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(0);
+ mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(0, 0);
+ } else if (mdev->core == CORE_OFFLOAD_1) {
+ mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(1);
+ mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(1);
+ mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(1, 0);
+ } else if (mdev->core == CORE_OFFLOAD_2) {
+ mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(2);
+ mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(2);
+ mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(2, 0);
+ } else if (mdev->core == CORE_OFFLOAD_3) {
+ mbox_reg->cmd_set_reg = TOPS_CLUST0_AP_TO_CX_CMD_SET(3);
+ mbox_reg->cmd_clr_reg = TOPS_CLUST0_AP_TO_CX_CMD_CLR(3);
+ mbox_reg->msg_reg = TOPS_CLUST0_AP_TO_CX_MSG_N(3, 0);
+ } else {
+ dev_notice(mbox.dev, "invalid mdev->core: %u\n", mdev->core);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mbox_post_send(u32 msg_reg, struct mailbox_msg *msg,
+ void *priv,
+ mbox_ret_func_t ret_handler)
+{
+ if (!ret_handler)
+ goto out;
+
+ msg->msg1 = mbox_read(msg_reg + 0x4);
+ msg->msg2 = mbox_read(msg_reg + 0x8);
+ msg->msg3 = mbox_read(msg_reg + 0xC);
+ msg->msg4 = mbox_read(msg_reg + 0x10);
+
+ ret_handler(priv, msg);
+
+out:
+ mbox_write(msg_reg, 0);
+ mbox_write(msg_reg + 0x4, 0);
+ mbox_write(msg_reg + 0x8, 0);
+ mbox_write(msg_reg + 0xC, 0);
+ mbox_write(msg_reg + 0x10, 0);
+}
+
+static inline bool mbox_send_msg_chk_timeout(ktime_t start)
+{
+ return ktime_to_us(ktime_sub(ktime_get(), start)) > MBOX_SEND_TIMEOUT;
+}
+
+static inline int __mbox_send_msg_no_wait_irq(struct mailbox_dev *mdev,
+ struct mailbox_msg *msg,
+ struct mailbox_reg *mbox_reg)
+{
+ ktime_t start;
+
+ if (!mdev || !msg || !mbox_reg) {
+ dev_notice(mbox.dev, "missing some necessary parameters!\n");
+ return -EPERM;
+ }
+
+ start = ktime_get();
+
+ /* wait for all cmd cleared */
+ while (mbox_read(mbox_reg->cmd_set_reg)) {
+ if (mbox_send_msg_chk_timeout(start)) {
+ dev_notice(mbox.dev, "mbox occupied too long\n");
+ dev_notice(mbox.dev, "cmd set reg (0x%x): 0x%x\n",
+ mbox_reg->cmd_set_reg,
+ mbox_read(mbox_reg->cmd_set_reg));
+ dev_notice(mbox.dev, "msg1 reg (0x%x): 0x%x\n",
+ mbox_reg->msg_reg,
+ mbox_read(mbox_reg->msg_reg));
+ dev_notice(mbox.dev, "msg2 reg (0x%x): 0x%x\n",
+ mbox_reg->msg_reg,
+ mbox_read(mbox_reg->msg_reg + 0x4));
+ dev_notice(mbox.dev, "msg3 reg (0x%x): 0x%x\n",
+ mbox_reg->msg_reg,
+ mbox_read(mbox_reg->msg_reg + 0x8));
+ dev_notice(mbox.dev, "msg4 reg (0x%x): 0x%x\n",
+ mbox_reg->msg_reg,
+ mbox_read(mbox_reg->msg_reg + 0xC));
+ dev_notice(mbox.dev, "msg5 reg (0x%x): 0x%x\n",
+ mbox_reg->msg_reg,
+ mbox_read(mbox_reg->msg_reg + 0x10));
+ WARN_ON(1);
+ }
+ }
+
+ /* write msg */
+ mbox_write(mbox_reg->msg_reg, mdev->cmd_id);
+ mbox_write(mbox_reg->msg_reg + 0x4, msg->msg1);
+ mbox_write(mbox_reg->msg_reg + 0x8, msg->msg2);
+ mbox_write(mbox_reg->msg_reg + 0xC, msg->msg3);
+ mbox_write(mbox_reg->msg_reg + 0x10, msg->msg4);
+
+ /* write cmd */
+ mbox_write(mbox_reg->cmd_set_reg, BIT(mdev->cmd_id));
+
+ return 0;
+}
+
+int mbox_send_msg_no_wait_irq(struct mailbox_dev *mdev, struct mailbox_msg *msg)
+{
+ struct mailbox_reg mbox_reg = {0};
+ int ret = 0;
+
+ ret = mbox_get_send_reg(mdev, &mbox_reg);
+ if (ret)
+ return ret;
+
+ spin_lock(&mbox.core[MBOX_SEND][mdev->core].lock);
+
+ /* send cmd + msg */
+ ret = __mbox_send_msg_no_wait_irq(mdev, msg, &mbox_reg);
+
+ spin_unlock(&mbox.core[MBOX_SEND][mdev->core].lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(mbox_send_msg_no_wait_irq);
+
+int mbox_send_msg_no_wait(struct mailbox_dev *mdev, struct mailbox_msg *msg)
+{
+ struct mailbox_reg mbox_reg = {0};
+ unsigned long flag = 0;
+ int ret = 0;
+
+ ret = mbox_get_send_reg(mdev, &mbox_reg);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
+
+ /* send cmd + msg */
+ ret = __mbox_send_msg_no_wait_irq(mdev, msg, &mbox_reg);
+
+ spin_unlock_irqrestore(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
+
+ return ret;
+}
+EXPORT_SYMBOL(mbox_send_msg_no_wait);
+
+int mbox_send_msg(struct mailbox_dev *mdev, struct mailbox_msg *msg, void *priv,
+ mbox_ret_func_t ret_handler)
+{
+ struct mailbox_reg mbox_reg = {0};
+ unsigned long flag = 0;
+ ktime_t start;
+ int ret = 0;
+
+ ret = mbox_get_send_reg(mdev, &mbox_reg);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
+
+ /* send cmd + msg */
+ ret = __mbox_send_msg_no_wait_irq(mdev, msg, &mbox_reg);
+
+ start = ktime_get();
+
+ /* wait for cmd clear */
+ while (mbox_read(mbox_reg.cmd_set_reg) & BIT(mdev->cmd_id))
+ mbox_send_msg_chk_timeout(start);
+
+ /* execute return handler and clear message */
+ mbox_post_send(mbox_reg.msg_reg, msg, priv, ret_handler);
+
+ spin_unlock_irqrestore(&mbox.core[MBOX_SEND][mdev->core].lock, flag);
+
+ return ret;
+}
+EXPORT_SYMBOL(mbox_send_msg);
+
+static inline int mbox_ctrl_sanity_check(enum core_id core, enum mbox_act act)
+{
+ /* sanity check */
+ if (core >= CORE_MAX || act >= MBOX_ACT_MAX)
+ return -EINVAL;
+
+ /* mbox handler should not be register to core itself */
+ if (core == CORE_AP)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void __register_mbox_dev(struct mailbox_core *mcore,
+ struct mailbox_dev *mdev)
+{
+ struct mailbox_dev *cur = NULL;
+
+ INIT_LIST_HEAD(&mdev->list);
+
+ /* insert the mailbox_dev in order */
+ list_for_each_entry(cur, &mcore->mdev_list, list)
+ if (cur->cmd_id > mdev->cmd_id)
+ break;
+
+ list_add(&mdev->list, &cur->list);
+
+ mcore->registered_cmd |= (0x1 << mdev->cmd_id);
+}
+
+static void __unregister_mbox_dev(struct mailbox_core *mcore,
+ struct mailbox_dev *mdev)
+{
+ struct mailbox_dev *cur = NULL;
+ struct mailbox_dev *next = NULL;
+
+ /* ensure the node being deleted is existed in the list */
+ list_for_each_entry_safe(cur, next, &mcore->mdev_list, list) {
+ if (cur->cmd_id == mdev->cmd_id && cur == mdev) {
+ list_del(&mdev->list);
+ break;
+ }
+ }
+
+ mcore->registered_cmd &= (~(0x1 << mdev->cmd_id));
+}
+
+int register_mbox_dev(enum mbox_act act, struct mailbox_dev *mdev)
+{
+ struct mailbox_core *mcore;
+ int ret = 0;
+
+ /* sanity check */
+ ret = mbox_ctrl_sanity_check(mdev->core, act);
+ if (ret)
+ return ret;
+
+ mcore = &mbox.core[act][mdev->core];
+
+ /* check cmd is occupied or not */
+ if (mcore->registered_cmd & (0x1 << mdev->cmd_id))
+ return -EBUSY;
+
+ __register_mbox_dev(mcore, mdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(register_mbox_dev);
+
+int unregister_mbox_dev(enum mbox_act act, struct mailbox_dev *mdev)
+{
+ struct mailbox_core *mcore;
+ int ret = 0;
+
+ /* sanity check */
+ ret = mbox_ctrl_sanity_check(mdev->core, act);
+ if (ret)
+ return ret;
+
+ mcore = &mbox.core[act][mdev->core];
+
+ /* check cmd need to unregister or not */
+ if (!(mcore->registered_cmd & (0x1 << mdev->cmd_id)))
+ return 0;
+
+ __unregister_mbox_dev(mcore, mdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(unregister_mbox_dev);
+
+void mtk_tops_mbox_clear_all_cmd(void)
+{
+ u32 i, j;
+
+ mbox_write(TOPS_TOP_AP_TO_CM_CMD_CLR, 0xFFFFFFFF);
+ mbox_write(TOPS_TOP_CM_TO_AP_CMD_CLR, 0xFFFFFFFF);
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
+ mbox_write(TOPS_CLUST0_CX_TO_CM_CMD_CLR(i), 0xFFFFFFFF);
+ mbox_write(TOPS_CLUST0_CM_TO_CX_CMD_CLR(i), 0xFFFFFFFF);
+ mbox_write(TOPS_CLUST0_CX_TO_AP_CMD_CLR(i), 0xFFFFFFFF);
+ mbox_write(TOPS_CLUST0_AP_TO_CX_CMD_CLR(i), 0xFFFFFFFF);
+
+ for (j = 0; j < CORE_OFFLOAD_NUM; j++) {
+ if (i == j)
+ continue;
+
+ mbox_write(TOPS_CLUST0_CX_TO_CY_CMD_CLR(i, j), 0xFFFFFFFF);
+ }
+ }
+}
+
+static int mtk_tops_mbox_probe(struct platform_device *pdev)
+{
+ struct device_node *tops = NULL;
+ struct resource res;
+ int irq = platform_get_irq_byname(pdev, "mbox");
+ int ret = 0;
+ u32 idx = 0;
+
+ mbox.dev = &pdev->dev;
+
+ tops = of_parse_phandle(pdev->dev.of_node, "tops", 0);
+ if (!tops) {
+ dev_err(mbox.dev, "can not find tops node\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(tops, 0, &res))
+ return -ENXIO;
+
+ mbox.base = devm_ioremap(mbox.dev, res.start, resource_size(&res));
+ if (!mbox.base)
+ return -ENOMEM;
+
+ if (irq < 0) {
+ dev_err(mbox.dev, "get mbox irq failed\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq,
+ mtk_tops_mbox_handler,
+ IRQF_ONESHOT,
+ pdev->name, NULL);
+ if (ret) {
+ dev_err(mbox.dev, "request mbox irq failed\n");
+ return ret;
+ }
+
+ for (idx = 0; idx < CORE_MAX; idx++) {
+ INIT_LIST_HEAD(&mbox.core[MBOX_SEND][idx].mdev_list);
+ INIT_LIST_HEAD(&mbox.core[MBOX_RECV][idx].mdev_list);
+ spin_lock_init(&mbox.core[MBOX_SEND][idx].lock);
+ spin_lock_init(&mbox.core[MBOX_RECV][idx].lock);
+ }
+
+ mtk_tops_mbox_clear_all_cmd();
+
+ return ret;
+}
+
+static int mtk_tops_mbox_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct of_device_id mtk_mbox_match[] = {
+ { .compatible = "mediatek,tops-mbox", },
+ { },
+};
+
+static struct platform_driver mtk_tops_mbox_driver = {
+ .probe = mtk_tops_mbox_probe,
+ .remove = mtk_tops_mbox_remove,
+ .driver = {
+ .name = "mediatek,tops-mbox",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_mbox_match,
+ },
+};
+
+int __init mtk_tops_mbox_init(void)
+{
+ return platform_driver_register(&mtk_tops_mbox_driver);
+}
+
+void __exit mtk_tops_mbox_exit(void)
+{
+ platform_driver_unregister(&mtk_tops_mbox_driver);
+}
diff --git a/package-21.02/kernel/tops/src/mcu.c b/package-21.02/kernel/tops/src/mcu.c
new file mode 100644
index 0000000..94f465c
--- /dev/null
+++ b/package-21.02/kernel/tops/src/mcu.c
@@ -0,0 +1,1492 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#include <pce/pce.h>
+
+#include "ctrl.h"
+#include "firmware.h"
+#include "hpdma.h"
+#include "internal.h"
+#include "mbox.h"
+#include "mcu.h"
+#include "netsys.h"
+#include "tdma.h"
+#include "trm.h"
+
+#define TDMA_TIMEOUT_MAX_CNT (3)
+#define TDMA_TIMEOUT_DELAY (100) /* 100ms */
+
+#define MCU_STATE_TRANS_TIMEOUT (5000) /* 5000ms */
+#define MCU_CTRL_DONE_BIT (31)
+#define MCU_CTRL_DONE (CORE_TOPS_MASK | \
+ BIT(MCU_CTRL_DONE_BIT))
+
+/* TRM dump length */
+#define TOP_CORE_BASE_LEN (0x80)
+#define TOP_L2SRAM_LEN (0x40000)
+#define TOP_CORE_M_XTCM_LEN (0x8000)
+
+#define CLUST_CORE_BASE_LEN (0x80)
+#define CLUST_L2SRAM_LEN (0x40000)
+#define CLUST_CORE_X_XTCM_LEN (0x8000)
+
+/* MCU State */
+#define MCU_STATE_FUNC_DECLARE(name) \
+static int mtk_tops_mcu_state_ ## name ## _enter(struct mcu_state *state); \
+static int mtk_tops_mcu_state_ ## name ## _leave(struct mcu_state *state); \
+static struct mcu_state *mtk_tops_mcu_state_ ## name ## _trans( \
+ u32 mcu_act, \
+ struct mcu_state *state)
+
+#define MCU_STATE_DATA(name, id) \
+ [id] = { \
+ .state = id, \
+ .state_trans = mtk_tops_mcu_state_ ## name ## _trans, \
+ .enter = mtk_tops_mcu_state_ ## name ## _enter, \
+ .leave = mtk_tops_mcu_state_ ## name ## _leave, \
+ }
+
+static inline void mcu_ctrl_issue_pending_act(u32 mcu_act);
+static enum mbox_msg_cnt mtk_tops_ap_recv_mgmt_mbox_msg(struct mailbox_dev *mdev,
+ struct mailbox_msg *msg);
+static enum mbox_msg_cnt mtk_tops_ap_recv_offload_mbox_msg(struct mailbox_dev *mdev,
+ struct mailbox_msg *msg);
+static int mcu_trm_hw_dump(void *dst, u32 ofs, u32 len);
+
+MCU_STATE_FUNC_DECLARE(shutdown);
+MCU_STATE_FUNC_DECLARE(init);
+MCU_STATE_FUNC_DECLARE(freerun);
+MCU_STATE_FUNC_DECLARE(stall);
+MCU_STATE_FUNC_DECLARE(netstop);
+MCU_STATE_FUNC_DECLARE(reset);
+MCU_STATE_FUNC_DECLARE(abnormal);
+
+struct npu {
+ void __iomem *base;
+
+ struct clk *bus_clk;
+ struct clk *sram_clk;
+ struct clk *xdma_clk;
+ struct clk *offload_clk;
+ struct clk *mgmt_clk;
+
+ struct device **pd_devices;
+ struct device_link **pd_links;
+ int pd_num;
+
+ struct task_struct *mcu_ctrl_thread;
+ struct timer_list mcu_ctrl_timer;
+ struct mcu_state *next_state;
+ struct mcu_state *cur_state;
+ /* ensure that only 1 user can trigger state transition at a time */
+ struct mutex mcu_ctrl_lock;
+ spinlock_t pending_act_lock;
+ wait_queue_head_t mcu_ctrl_wait_act;
+ wait_queue_head_t mcu_state_wait_done;
+ bool mcu_bring_up_done;
+ bool state_trans_fail;
+ u32 pending_act;
+
+ spinlock_t ctrl_done_lock;
+ wait_queue_head_t mcu_ctrl_wait_done;
+ enum mcu_cmd_type ctrl_done_cmd;
+ /* MSB = 1 means that mcu control done. Otherwise it is still ongoing */
+ u32 ctrl_done;
+
+ struct work_struct recover_work;
+ bool in_reset;
+ bool in_recover;
+ bool netsys_fe_ser;
+ bool shuting_down;
+
+ struct mailbox_msg ctrl_msg;
+ struct mailbox_dev recv_mgmt_mbox_dev;
+ struct mailbox_dev send_mgmt_mbox_dev;
+
+ struct mailbox_dev recv_offload_mbox_dev[CORE_OFFLOAD_NUM];
+ struct mailbox_dev send_offload_mbox_dev[CORE_OFFLOAD_NUM];
+};
+
+static struct mcu_state mcu_states[__MCU_STATE_TYPE_MAX] = {
+ MCU_STATE_DATA(shutdown, MCU_STATE_TYPE_SHUTDOWN),
+ MCU_STATE_DATA(init, MCU_STATE_TYPE_INIT),
+ MCU_STATE_DATA(freerun, MCU_STATE_TYPE_FREERUN),
+ MCU_STATE_DATA(stall, MCU_STATE_TYPE_STALL),
+ MCU_STATE_DATA(netstop, MCU_STATE_TYPE_NETSTOP),
+ MCU_STATE_DATA(reset, MCU_STATE_TYPE_RESET),
+ MCU_STATE_DATA(abnormal, MCU_STATE_TYPE_ABNORMAL),
+};
+
+static struct npu npu = {
+ .send_mgmt_mbox_dev = MBOX_SEND_MGMT_DEV(CORE_CTRL),
+ .send_offload_mbox_dev = {
+ [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, CORE_CTRL),
+ [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, CORE_CTRL),
+ [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, CORE_CTRL),
+ [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, CORE_CTRL),
+ },
+ .recv_mgmt_mbox_dev =
+ MBOX_RECV_MGMT_DEV(CORE_CTRL, mtk_tops_ap_recv_mgmt_mbox_msg),
+ .recv_offload_mbox_dev = {
+ [CORE_OFFLOAD_0] =
+ MBOX_RECV_OFFLOAD_DEV(0,
+ CORE_CTRL,
+ mtk_tops_ap_recv_offload_mbox_msg
+ ),
+ [CORE_OFFLOAD_1] =
+ MBOX_RECV_OFFLOAD_DEV(1,
+ CORE_CTRL,
+ mtk_tops_ap_recv_offload_mbox_msg
+ ),
+ [CORE_OFFLOAD_2] =
+ MBOX_RECV_OFFLOAD_DEV(2,
+ CORE_CTRL,
+ mtk_tops_ap_recv_offload_mbox_msg
+ ),
+ [CORE_OFFLOAD_3] =
+ MBOX_RECV_OFFLOAD_DEV(3,
+ CORE_CTRL,
+ mtk_tops_ap_recv_offload_mbox_msg
+ ),
+ },
+};
+
+static struct trm_config mcu_trm_cfgs[] = {
+ {
+ TRM_CFG_EN("top-core-base",
+ TOP_CORE_BASE, TOP_CORE_BASE_LEN,
+ 0x0, TOP_CORE_BASE_LEN,
+ 0)
+ },
+ {
+ TRM_CFG_EN("clust-core0-base",
+ CLUST_CORE_BASE(0), CLUST_CORE_BASE_LEN,
+ 0x0, CLUST_CORE_BASE_LEN,
+ 0)
+ },
+ {
+ TRM_CFG_EN("clust-core1-base",
+ CLUST_CORE_BASE(1), CLUST_CORE_BASE_LEN,
+ 0x0, CLUST_CORE_BASE_LEN,
+ 0)
+ },
+ {
+ TRM_CFG_EN("clust-core2-base",
+ CLUST_CORE_BASE(2), CLUST_CORE_BASE_LEN,
+ 0x0, CLUST_CORE_BASE_LEN,
+ 0)
+ },
+ {
+ TRM_CFG_EN("clust-core3-base",
+ CLUST_CORE_BASE(3), CLUST_CORE_BASE_LEN,
+ 0x0, CLUST_CORE_BASE_LEN,
+ 0)
+ },
+ {
+ TRM_CFG_CORE_DUMP_EN("top-core-m-dtcm",
+ TOP_CORE_M_DTCM, TOP_CORE_M_XTCM_LEN,
+ 0x0, TOP_CORE_M_XTCM_LEN,
+ 0, CORE_MGMT)
+ },
+ {
+ TRM_CFG_CORE_DUMP_EN("clust-core-0-dtcm",
+ CLUST_CORE_X_DTCM(0), CLUST_CORE_X_XTCM_LEN,
+ 0x0, CLUST_CORE_X_XTCM_LEN,
+ 0, CORE_OFFLOAD_0)
+ },
+ {
+ TRM_CFG_CORE_DUMP_EN("clust-core-1-dtcm",
+ CLUST_CORE_X_DTCM(1), CLUST_CORE_X_XTCM_LEN,
+ 0x0, CLUST_CORE_X_XTCM_LEN,
+ 0, CORE_OFFLOAD_1)
+ },
+ {
+ TRM_CFG_CORE_DUMP_EN("clust-core-2-dtcm",
+ CLUST_CORE_X_DTCM(2), CLUST_CORE_X_XTCM_LEN,
+ 0x0, CLUST_CORE_X_XTCM_LEN,
+ 0, CORE_OFFLOAD_2)
+ },
+ {
+ TRM_CFG_CORE_DUMP_EN("clust-core-3-dtcm",
+ CLUST_CORE_X_DTCM(3), CLUST_CORE_X_XTCM_LEN,
+ 0x0, CLUST_CORE_X_XTCM_LEN,
+ 0, CORE_OFFLOAD_3)
+ },
+ {
+ TRM_CFG("top-core-m-itcm",
+ TOP_CORE_M_ITCM, TOP_CORE_M_XTCM_LEN,
+ 0x0, TOP_CORE_M_XTCM_LEN,
+ 0)
+ },
+ {
+ TRM_CFG("clust-core-0-itcm",
+ CLUST_CORE_X_ITCM(0), CLUST_CORE_X_XTCM_LEN,
+ 0x0, CLUST_CORE_X_XTCM_LEN,
+ 0)
+ },
+ {
+ TRM_CFG("clust-core-1-itcm",
+ CLUST_CORE_X_ITCM(1), CLUST_CORE_X_XTCM_LEN,
+ 0x0, CLUST_CORE_X_XTCM_LEN,
+ 0)
+ },
+ {
+ TRM_CFG("clust-core-2-itcm",
+ CLUST_CORE_X_ITCM(2), CLUST_CORE_X_XTCM_LEN,
+ 0x0, CLUST_CORE_X_XTCM_LEN,
+ 0)
+ },
+ {
+ TRM_CFG("clust-core-3-itcm",
+ CLUST_CORE_X_ITCM(3), CLUST_CORE_X_XTCM_LEN,
+ 0x0, CLUST_CORE_X_XTCM_LEN,
+ 0)
+ },
+ {
+ TRM_CFG("top-l2sram",
+ TOP_L2SRAM, TOP_L2SRAM_LEN,
+ 0x0, TOP_L2SRAM_LEN,
+ 0)
+ },
+ {
+ TRM_CFG_EN("clust-l2sram",
+ CLUST_L2SRAM, CLUST_L2SRAM_LEN,
+ 0x38000, 0x8000,
+ 0)
+ },
+};
+
+static struct trm_hw_config mcu_trm_hw_cfg = {
+ .trm_cfgs = mcu_trm_cfgs,
+ .cfg_len = ARRAY_SIZE(mcu_trm_cfgs),
+ .trm_hw_dump = mcu_trm_hw_dump,
+};
+
+static inline void npu_write(u32 reg, u32 val)
+{
+ writel(val, npu.base + reg);
+}
+
+static inline void npu_set(u32 reg, u32 mask)
+{
+ setbits(npu.base + reg, mask);
+}
+
+static inline void npu_clr(u32 reg, u32 mask)
+{
+ clrbits(npu.base + reg, mask);
+}
+
+static inline void npu_rmw(u32 reg, u32 mask, u32 val)
+{
+ clrsetbits(npu.base + reg, mask, val);
+}
+
+static inline u32 npu_read(u32 reg)
+{
+ return readl(npu.base + reg);
+}
+
+static int mcu_trm_hw_dump(void *dst, u32 start_addr, u32 len)
+{
+ u32 ofs;
+
+ if (unlikely(!dst))
+ return -ENODEV;
+
+ for (ofs = 0; len > 0; len -= 0x4, ofs += 0x4)
+ writel(npu_read(start_addr + ofs), dst + ofs);
+
+ return 0;
+}
+
+static int mcu_power_on(void)
+{
+ int ret = 0;
+
+ ret = clk_prepare_enable(npu.bus_clk);
+ if (ret) {
+ TOPS_ERR("bus clk enable failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(npu.sram_clk);
+ if (ret) {
+ TOPS_ERR("sram clk enable failed: %d\n", ret);
+ goto err_disable_bus_clk;
+ }
+
+ ret = clk_prepare_enable(npu.xdma_clk);
+ if (ret) {
+ TOPS_ERR("xdma clk enable failed: %d\n", ret);
+ goto err_disable_sram_clk;
+ }
+
+ ret = clk_prepare_enable(npu.offload_clk);
+ if (ret) {
+ TOPS_ERR("offload clk enable failed: %d\n", ret);
+ goto err_disable_xdma_clk;
+ }
+
+ ret = clk_prepare_enable(npu.mgmt_clk);
+ if (ret) {
+ TOPS_ERR("mgmt clk enable failed: %d\n", ret);
+ goto err_disable_offload_clk;
+ }
+
+ ret = pm_runtime_get_sync(tops_dev);
+ if (ret < 0) {
+ TOPS_ERR("power on failed: %d\n", ret);
+ goto err_disable_mgmt_clk;
+ }
+
+ return ret;
+
+err_disable_mgmt_clk:
+ clk_disable_unprepare(npu.mgmt_clk);
+
+err_disable_offload_clk:
+ clk_disable_unprepare(npu.offload_clk);
+
+err_disable_xdma_clk:
+ clk_disable_unprepare(npu.xdma_clk);
+
+err_disable_sram_clk:
+ clk_disable_unprepare(npu.sram_clk);
+
+err_disable_bus_clk:
+ clk_disable_unprepare(npu.bus_clk);
+
+ return ret;
+}
+
+static void mcu_power_off(void)
+{
+ pm_runtime_put_sync(tops_dev);
+
+ clk_disable_unprepare(npu.mgmt_clk);
+
+ clk_disable_unprepare(npu.offload_clk);
+
+ clk_disable_unprepare(npu.xdma_clk);
+
+ clk_disable_unprepare(npu.sram_clk);
+
+ clk_disable_unprepare(npu.bus_clk);
+}
+
+static inline int mcu_state_send_cmd(struct mcu_state *state)
+{
+ unsigned long flag;
+ enum core_id core;
+ u32 ctrl_cpu;
+ int ret;
+
+ spin_lock_irqsave(&npu.ctrl_done_lock, flag);
+ ctrl_cpu = (~npu.ctrl_done) & CORE_TOPS_MASK;
+ spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
+
+ if (ctrl_cpu & BIT(CORE_MGMT)) {
+ ret = mbox_send_msg_no_wait(&npu.send_mgmt_mbox_dev,
+ &npu.ctrl_msg);
+ if (ret)
+ goto out;
+ }
+
+ for (core = CORE_OFFLOAD_0; core < CORE_OFFLOAD_NUM; core++) {
+ if (ctrl_cpu & BIT(core)) {
+ ret = mbox_send_msg_no_wait(&npu.send_offload_mbox_dev[core],
+ &npu.ctrl_msg);
+ if (ret)
+ goto out;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static inline void mcu_state_trans_start(void)
+{
+ mod_timer(&npu.mcu_ctrl_timer,
+ jiffies + msecs_to_jiffies(MCU_STATE_TRANS_TIMEOUT));
+}
+
+static inline void mcu_state_trans_end(void)
+{
+ del_timer_sync(&npu.mcu_ctrl_timer);
+}
+
+static inline void mcu_state_trans_err(void)
+{
+ wake_up_interruptible(&npu.mcu_ctrl_wait_done);
+}
+
+static inline int mcu_state_wait_complete(void (*state_complete_cb)(void))
+{
+ unsigned long flag;
+ int ret = 0;
+
+ wait_event_interruptible(npu.mcu_state_wait_done,
+ (npu.ctrl_done == CORE_TOPS_MASK) ||
+ (npu.state_trans_fail));
+
+ if (npu.state_trans_fail)
+ return -EINVAL;
+
+ npu.ctrl_msg.msg1 = npu.ctrl_done_cmd;
+
+ spin_lock_irqsave(&npu.ctrl_done_lock, flag);
+ npu.ctrl_done |= BIT(MCU_CTRL_DONE_BIT);
+ spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
+
+ if (state_complete_cb)
+ state_complete_cb();
+
+ wake_up_interruptible(&npu.mcu_ctrl_wait_done);
+
+ return ret;
+}
+
+static inline void mcu_state_prepare_wait(enum mcu_cmd_type done_cmd)
+{
+ unsigned long flag;
+
+ /* if user does not specify CPU to control, default controll all CPU */
+ spin_lock_irqsave(&npu.ctrl_done_lock, flag);
+ if ((npu.ctrl_done & CORE_TOPS_MASK) == CORE_TOPS_MASK)
+ npu.ctrl_done = 0;
+ spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
+
+ npu.ctrl_done_cmd = done_cmd;
+}
+
+static struct mcu_state *mtk_tops_mcu_state_shutdown_trans(u32 mcu_act,
+ struct mcu_state *state)
+{
+ if (mcu_act == MCU_ACT_INIT)
+ return &mcu_states[MCU_STATE_TYPE_INIT];
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int mtk_tops_mcu_state_shutdown_enter(struct mcu_state *state)
+{
+ mcu_power_off();
+
+ mtk_tops_tdma_record_last_state();
+
+ mtk_tops_fw_clean_up();
+
+ npu.mcu_bring_up_done = false;
+
+ if (npu.shuting_down) {
+ npu.shuting_down = false;
+ wake_up_interruptible(&npu.mcu_ctrl_wait_done);
+
+ return 0;
+ }
+
+ if (npu.in_recover || npu.in_reset)
+ mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
+
+ return 0;
+}
+
+static int mtk_tops_mcu_state_shutdown_leave(struct mcu_state *state)
+{
+ return 0;
+}
+
+static struct mcu_state *mtk_tops_mcu_state_init_trans(u32 mcu_act,
+ struct mcu_state *state)
+{
+ if (mcu_act == MCU_ACT_FREERUN)
+ return &mcu_states[MCU_STATE_TYPE_FREERUN];
+ else if (mcu_act == MCU_ACT_NETSTOP)
+ return &mcu_states[MCU_STATE_TYPE_NETSTOP];
+
+ return ERR_PTR(-ENODEV);
+}
+
+static void mtk_tops_mcu_state_init_enter_complete_cb(void)
+{
+ npu.mcu_bring_up_done = true;
+ npu.in_reset = false;
+ npu.in_recover = false;
+ npu.netsys_fe_ser = false;
+
+ mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
+}
+
+static int mtk_tops_mcu_state_init_enter(struct mcu_state *state)
+{
+ int ret = 0;
+
+ ret = mcu_power_on();
+ if (ret)
+ return ret;
+
+ mtk_tops_mbox_clear_all_cmd();
+
+ /* reset TDMA first */
+ mtk_tops_tdma_reset();
+
+ npu.ctrl_done = 0;
+ mcu_state_prepare_wait(MCU_CMD_TYPE_INIT_DONE);
+
+ ret = mtk_tops_fw_bring_up_default_cores();
+ if (ret) {
+ TOPS_ERR("bring up TOPS cores failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = mcu_state_wait_complete(mtk_tops_mcu_state_init_enter_complete_cb);
+ if (unlikely(ret))
+ TOPS_ERR("init leave failed\n");
+
+out:
+ return ret;
+}
+
+static int mtk_tops_mcu_state_init_leave(struct mcu_state *state)
+{
+ int ret;
+
+ mtk_tops_tdma_enable();
+
+ mtk_tops_tnl_offload_recover();
+
+ /* enable cls, dipfilter */
+ ret = mtk_pce_enable();
+ if (ret) {
+ TOPS_ERR("netsys enable failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static struct mcu_state *mtk_tops_mcu_state_freerun_trans(u32 mcu_act,
+ struct mcu_state *state)
+{
+ if (mcu_act == MCU_ACT_RESET)
+ return &mcu_states[MCU_STATE_TYPE_RESET];
+ else if (mcu_act == MCU_ACT_STALL)
+ return &mcu_states[MCU_STATE_TYPE_STALL];
+ else if (mcu_act == MCU_ACT_NETSTOP)
+ return &mcu_states[MCU_STATE_TYPE_NETSTOP];
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int mtk_tops_mcu_state_freerun_enter(struct mcu_state *state)
+{
+ /* TODO : switch to HW path */
+
+ return 0;
+}
+
+static int mtk_tops_mcu_state_freerun_leave(struct mcu_state *state)
+{
+ /* TODO : switch to SW path */
+
+ return 0;
+}
+
+static struct mcu_state *mtk_tops_mcu_state_stall_trans(u32 mcu_act,
+ struct mcu_state *state)
+{
+ if (mcu_act == MCU_ACT_RESET)
+ return &mcu_states[MCU_STATE_TYPE_RESET];
+ else if (mcu_act == MCU_ACT_FREERUN)
+ return &mcu_states[MCU_STATE_TYPE_FREERUN];
+ else if (mcu_act == MCU_ACT_NETSTOP)
+ return &mcu_states[MCU_STATE_TYPE_NETSTOP];
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int mtk_tops_mcu_state_stall_enter(struct mcu_state *state)
+{
+ int ret = 0;
+
+ mcu_state_prepare_wait(MCU_CMD_TYPE_STALL_DONE);
+
+ ret = mcu_state_send_cmd(state);
+ if (ret)
+ return ret;
+
+ ret = mcu_state_wait_complete(NULL);
+ if (ret)
+ TOPS_ERR("stall enter failed\n");
+
+ return ret;
+}
+
+static int mtk_tops_mcu_state_stall_leave(struct mcu_state *state)
+{
+ int ret = 0;
+
+ /*
+ * if next state is going to stop network,
+ * we should not let mcu do freerun cmd since it is going to abort stall
+ */
+ if (npu.next_state->state == MCU_STATE_TYPE_NETSTOP)
+ return 0;
+
+ mcu_state_prepare_wait(MCU_CMD_TYPE_FREERUN_DONE);
+
+ ret = mcu_state_send_cmd(state);
+ if (ret)
+ return ret;
+
+ ret = mcu_state_wait_complete(NULL);
+ if (ret)
+ TOPS_ERR("stall leave failed\n");
+
+ return ret;
+}
+
+static struct mcu_state *mtk_tops_mcu_state_netstop_trans(u32 mcu_act,
+ struct mcu_state *state)
+{
+ if (mcu_act == MCU_ACT_ABNORMAL)
+ return &mcu_states[MCU_STATE_TYPE_ABNORMAL];
+ else if (mcu_act == MCU_ACT_RESET)
+ return &mcu_states[MCU_STATE_TYPE_RESET];
+ else if (mcu_act == MCU_ACT_SHUTDOWN)
+ return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int mtk_tops_mcu_state_netstop_enter(struct mcu_state *state)
+{
+ mtk_tops_tnl_offload_flush();
+
+ mtk_pce_disable();
+
+ mtk_tops_tdma_disable();
+
+ if (npu.in_recover)
+ mcu_ctrl_issue_pending_act(MCU_ACT_ABNORMAL);
+ else if (npu.in_reset)
+ mcu_ctrl_issue_pending_act(MCU_ACT_RESET);
+ else
+ mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
+
+ return 0;
+}
+
+static int mtk_tops_mcu_state_netstop_leave(struct mcu_state *state)
+{
+ return 0;
+}
+
+static struct mcu_state *mtk_tops_mcu_state_reset_trans(u32 mcu_act,
+ struct mcu_state *state)
+{
+ if (mcu_act == MCU_ACT_FREERUN)
+ return &mcu_states[MCU_STATE_TYPE_FREERUN];
+ else if (mcu_act == MCU_ACT_SHUTDOWN)
+ return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
+ else if (mcu_act == MCU_ACT_NETSTOP)
+ /*
+ * since netstop is already done before reset,
+ * there is no need to do it again. We just go to abnormal directly
+ */
+ return &mcu_states[MCU_STATE_TYPE_ABNORMAL];
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int mtk_tops_mcu_state_reset_enter(struct mcu_state *state)
+{
+ int ret = 0;
+
+ mcu_state_prepare_wait(MCU_CMD_TYPE_ASSERT_RESET_DONE);
+
+ if (!npu.netsys_fe_ser) {
+ ret = mcu_state_send_cmd(state);
+ if (ret)
+ return ret;
+ } else {
+ /* skip to assert reset mcu if NETSYS SER */
+ npu.ctrl_done = CORE_TOPS_MASK;
+ }
+
+ ret = mcu_state_wait_complete(NULL);
+ if (ret)
+ TOPS_ERR("assert reset failed\n");
+
+ return ret;
+}
+
+static int mtk_tops_mcu_state_reset_leave(struct mcu_state *state)
+{
+ int ret = 0;
+
+ /*
+ * if next state is going to shutdown,
+ * no need to let mcu do release reset cmd
+ */
+ if (npu.next_state->state == MCU_STATE_TYPE_ABNORMAL
+ || npu.next_state->state == MCU_STATE_TYPE_SHUTDOWN)
+ return 0;
+
+ mcu_state_prepare_wait(MCU_CMD_TYPE_RELEASE_RESET_DONE);
+
+ ret = mcu_state_send_cmd(state);
+ if (ret)
+ return ret;
+
+ ret = mcu_state_wait_complete(NULL);
+ if (ret)
+ TOPS_ERR("release reset failed\n");
+
+ return ret;
+}
+
+static struct mcu_state *mtk_tops_mcu_state_abnormal_trans(u32 mcu_act,
+ struct mcu_state *state)
+{
+ if (mcu_act == MCU_ACT_SHUTDOWN)
+ return &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int mtk_tops_mcu_state_abnormal_enter(struct mcu_state *state)
+{
+ mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
+
+ return 0;
+}
+
+static int mtk_tops_mcu_state_abnormal_leave(struct mcu_state *state)
+{
+ if (npu.mcu_bring_up_done)
+ mtk_trm_dump(TRM_RSN_MCU_STATE_ACT_FAIL);
+
+ return 0;
+}
+
+static int mtk_tops_mcu_state_transition(u32 mcu_act)
+{
+ int ret = 0;
+
+ npu.next_state = npu.cur_state->state_trans(mcu_act, npu.cur_state);
+ if (IS_ERR(npu.next_state))
+ return PTR_ERR(npu.next_state);
+
+ /* skip mcu_state leave if current MCU_ACT has failure */
+ if (unlikely(mcu_act == MCU_ACT_ABNORMAL))
+ goto skip_state_leave;
+
+ mcu_state_trans_start();
+ if (npu.cur_state->leave) {
+ ret = npu.cur_state->leave(npu.cur_state);
+ if (ret) {
+ TOPS_ERR("state%d transition leave failed: %d\n",
+ npu.cur_state->state, ret);
+ goto state_trans_end;
+ }
+ }
+ mcu_state_trans_end();
+
+skip_state_leave:
+ npu.cur_state = npu.next_state;
+
+ mcu_state_trans_start();
+ if (npu.cur_state->enter) {
+ ret = npu.cur_state->enter(npu.cur_state);
+ if (ret) {
+ TOPS_ERR("state%d transition enter failed: %d\n",
+ npu.cur_state->state, ret);
+ goto state_trans_end;
+ }
+ }
+
+state_trans_end:
+ mcu_state_trans_end();
+
+ return ret;
+}
+
+static void mtk_tops_mcu_state_trans_timeout(struct timer_list *timer)
+{
+ TOPS_ERR("state%d transition timeout!\n", npu.cur_state->state);
+ TOPS_ERR("ctrl_done=0x%x ctrl_msg.msg1: 0x%x\n",
+ npu.ctrl_done, npu.ctrl_msg.msg1);
+
+ npu.state_trans_fail = true;
+
+ wake_up_interruptible(&npu.mcu_state_wait_done);
+}
+
+static inline int mcu_ctrl_cmd_prepare(enum mcu_cmd_type cmd,
+ struct mcu_ctrl_cmd *mcmd)
+{
+ if (!mcmd || cmd == MCU_CMD_TYPE_NULL || cmd >= __MCU_CMD_TYPE_MAX)
+ return -EINVAL;
+
+ lockdep_assert_held(&npu.mcu_ctrl_lock);
+
+ npu.ctrl_msg.msg1 = cmd;
+ npu.ctrl_msg.msg2 = mcmd->e;
+ npu.ctrl_msg.msg3 = mcmd->arg[0];
+ npu.ctrl_msg.msg4 = mcmd->arg[1];
+
+ if (mcmd->core_mask) {
+ unsigned long flag;
+
+ spin_lock_irqsave(&npu.ctrl_done_lock, flag);
+ npu.ctrl_done = ~(CORE_TOPS_MASK & mcmd->core_mask);
+ npu.ctrl_done &= CORE_TOPS_MASK;
+ spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
+ }
+
+ return 0;
+}
+
+static inline void mcu_ctrl_callback(void (*callback)(void *param), void *param)
+{
+ if (callback)
+ callback(param);
+}
+
+static inline void mcu_ctrl_issue_pending_act(u32 mcu_act)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&npu.pending_act_lock, flag);
+
+ npu.pending_act |= mcu_act;
+
+ spin_unlock_irqrestore(&npu.pending_act_lock, flag);
+
+ wake_up_interruptible(&npu.mcu_ctrl_wait_act);
+}
+
+static inline enum mcu_act mcu_ctrl_pop_pending_act(void)
+{
+ unsigned long flag;
+ enum mcu_act act;
+
+ spin_lock_irqsave(&npu.pending_act_lock, flag);
+
+ act = ffs(npu.pending_act) - 1;
+ npu.pending_act &= ~BIT(act);
+
+ spin_unlock_irqrestore(&npu.pending_act_lock, flag);
+
+ return act;
+}
+
+static inline bool mcu_ctrl_is_complete(enum mcu_cmd_type done_cmd)
+{
+ unsigned long flag;
+ bool ctrl_done;
+
+ spin_lock_irqsave(&npu.ctrl_done_lock, flag);
+ ctrl_done = npu.ctrl_done == MCU_CTRL_DONE && npu.ctrl_msg.msg1 == done_cmd;
+ spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
+
+ return ctrl_done;
+}
+
+static inline void mcu_ctrl_done(enum core_id core)
+{
+ unsigned long flag;
+
+ if (core > CORE_MGMT)
+ return;
+
+ spin_lock_irqsave(&npu.ctrl_done_lock, flag);
+ npu.ctrl_done |= BIT(core);
+ spin_unlock_irqrestore(&npu.ctrl_done_lock, flag);
+}
+
+static int mcu_ctrl_task(void *data)
+{
+ enum mcu_act act;
+ int ret;
+
+ while (1) {
+ wait_event_interruptible(npu.mcu_ctrl_wait_act,
+ npu.pending_act || kthread_should_stop());
+
+ if (kthread_should_stop()) {
+ TOPS_INFO("tops mcu ctrl task stop\n");
+ break;
+ }
+
+ act = mcu_ctrl_pop_pending_act();
+ if (unlikely(act >= __MCU_ACT_MAX)) {
+ TOPS_ERR("invalid MCU act: %u\n", act);
+ continue;
+ }
+
+ /*
+ * ensure that the act is submitted by either
+ * mtk_tops_mcu_stall, mtk_tops_mcu_reset or mtk_tops_mcu_cold_boot
+ * if mcu_act is ABNORMAL, it must be caused by the state transition
+ * triggerred by above APIs
+ * as a result, mcu_ctrl_lock must be held before mcu_ctrl_task start
+ */
+ lockdep_assert_held(&npu.mcu_ctrl_lock);
+
+ if (unlikely(!npu.cur_state->state_trans)) {
+ TOPS_ERR("cur state has no state_trans()\n");
+ WARN_ON(1);
+ }
+
+ ret = mtk_tops_mcu_state_transition(BIT(act));
+ if (ret) {
+ npu.state_trans_fail = true;
+
+ mcu_state_trans_err();
+ }
+ }
+ return 0;
+}
+
+bool mtk_tops_mcu_alive(void)
+{
+ return npu.mcu_bring_up_done && !npu.in_reset && !npu.state_trans_fail;
+}
+
+bool mtk_tops_mcu_bring_up_done(void)
+{
+ return npu.mcu_bring_up_done;
+}
+
+bool mtk_tops_mcu_netsys_fe_rst(void)
+{
+ return npu.netsys_fe_ser;
+}
+
+static int mtk_tops_mcu_wait_done(enum mcu_cmd_type done_cmd)
+{
+ int ret = 0;
+
+ wait_event_interruptible(npu.mcu_ctrl_wait_done,
+ mcu_ctrl_is_complete(done_cmd)
+ || npu.state_trans_fail);
+
+ if (npu.state_trans_fail)
+ return -EINVAL;
+
+ return ret;
+}
+
+int mtk_tops_mcu_stall(struct mcu_ctrl_cmd *mcmd,
+ void (*callback)(void *param), void *param)
+{
+ int ret = 0;
+
+ if (unlikely(!npu.mcu_bring_up_done || npu.state_trans_fail))
+ return -EBUSY;
+
+ if (unlikely(!mcmd || mcmd->e >= __MCU_EVENT_TYPE_MAX))
+ return -EINVAL;
+
+ mutex_lock(&npu.mcu_ctrl_lock);
+
+ /* go to stall state */
+ ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_STALL, mcmd);
+ if (ret)
+ goto unlock;
+
+ mcu_ctrl_issue_pending_act(MCU_ACT_STALL);
+
+ ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_STALL_DONE);
+ if (ret) {
+ TOPS_ERR("tops stall failed: %d\n", ret);
+ goto recover_mcu;
+ }
+
+ mcu_ctrl_callback(callback, param);
+
+ /* go to freerun state */
+ ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_FREERUN, mcmd);
+ if (ret)
+ goto recover_mcu;
+
+ mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
+
+ ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_FREERUN_DONE);
+ if (ret) {
+ TOPS_ERR("tops freerun failed: %d\n", ret);
+ goto recover_mcu;
+ }
+
+ /* stall freerun successfully done */
+ goto unlock;
+
+recover_mcu:
+ schedule_work(&npu.recover_work);
+
+unlock:
+ mutex_unlock(&npu.mcu_ctrl_lock);
+
+ return ret;
+}
+
+int mtk_tops_mcu_reset(struct mcu_ctrl_cmd *mcmd,
+ void (*callback)(void *param), void *param)
+{
+ int ret = 0;
+
+ if (unlikely(!npu.mcu_bring_up_done || npu.state_trans_fail))
+ return -EBUSY;
+
+ if (unlikely(!mcmd || mcmd->e >= __MCU_EVENT_TYPE_MAX))
+ return -EINVAL;
+
+ mutex_lock(&npu.mcu_ctrl_lock);
+
+ npu.in_reset = true;
+ if (mcmd->e == MCU_EVENT_TYPE_FE_RESET)
+ npu.netsys_fe_ser = true;
+
+ ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_ASSERT_RESET, mcmd);
+ if (ret)
+ goto unlock;
+
+ mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
+
+ ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_ASSERT_RESET_DONE);
+ if (ret) {
+ TOPS_ERR("tops assert reset failed: %d\n", ret);
+ goto recover_mcu;
+ }
+
+ mcu_ctrl_callback(callback, param);
+
+ switch (mcmd->e) {
+ case MCU_EVENT_TYPE_WDT_TIMEOUT:
+ case MCU_EVENT_TYPE_FE_RESET:
+ mcu_ctrl_issue_pending_act(MCU_ACT_SHUTDOWN);
+
+ ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE);
+ if (ret)
+ goto recover_mcu;
+
+ break;
+ default:
+ ret = mcu_ctrl_cmd_prepare(MCU_CMD_TYPE_RELEASE_RESET, mcmd);
+ if (ret)
+ goto recover_mcu;
+
+ mcu_ctrl_issue_pending_act(MCU_ACT_FREERUN);
+
+ ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_RELEASE_RESET_DONE);
+ if (ret)
+ goto recover_mcu;
+
+ break;
+ }
+
+ goto unlock;
+
+recover_mcu:
+ schedule_work(&npu.recover_work);
+
+unlock:
+ mutex_unlock(&npu.mcu_ctrl_lock);
+
+ return ret;
+}
+
+static void mtk_tops_mcu_recover_work(struct work_struct *work)
+{
+ int ret;
+
+ mutex_lock(&npu.mcu_ctrl_lock);
+
+ if (!npu.mcu_bring_up_done && !npu.in_reset && !npu.state_trans_fail)
+ mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
+ else if (npu.in_reset || npu.state_trans_fail)
+ mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
+
+ npu.state_trans_fail = false;
+ npu.in_recover = true;
+
+ while ((ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE))) {
+ if (npu.shuting_down)
+ goto unlock;
+
+ npu.mcu_bring_up_done = false;
+ npu.state_trans_fail = false;
+ TOPS_ERR("bring up failed: %d\n", ret);
+
+ msleep(1000);
+
+ mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
+ }
+
+unlock:
+ mutex_unlock(&npu.mcu_ctrl_lock);
+}
+
+static int mtk_tops_mcu_register_mbox(void)
+{
+ int ret;
+ int i;
+
+ ret = register_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
+ if (ret) {
+ TOPS_ERR("register mcu_ctrl mgmt mbox send failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = register_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
+ if (ret) {
+ TOPS_ERR("register mcu_ctrl mgmt mbox recv failed: %d\n", ret);
+ goto err_unregister_mgmt_mbox_send;
+ }
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
+ ret = register_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
+ if (ret) {
+ TOPS_ERR("register mcu_ctrl offload %d mbox send failed: %d\n",
+ i, ret);
+ goto err_unregister_offload_mbox;
+ }
+
+ ret = register_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
+ if (ret) {
+ TOPS_ERR("register mcu_ctrl offload %d mbox recv failed: %d\n",
+ i, ret);
+ unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
+ goto err_unregister_offload_mbox;
+ }
+ }
+
+ return ret;
+
+err_unregister_offload_mbox:
+ for (i -= 1; i >= 0; i--) {
+ unregister_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
+ unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
+ }
+
+ unregister_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
+
+err_unregister_mgmt_mbox_send:
+ unregister_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
+
+ return ret;
+}
+
+static void mtk_tops_mcu_unregister_mbox(void)
+{
+ int i;
+
+ unregister_mbox_dev(MBOX_SEND, &npu.send_mgmt_mbox_dev);
+ unregister_mbox_dev(MBOX_RECV, &npu.recv_mgmt_mbox_dev);
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
+ unregister_mbox_dev(MBOX_SEND, &npu.send_offload_mbox_dev[i]);
+ unregister_mbox_dev(MBOX_RECV, &npu.recv_offload_mbox_dev[i]);
+ }
+}
+
+static void mtk_tops_mcu_shutdown(void)
+{
+ npu.shuting_down = true;
+
+ mutex_lock(&npu.mcu_ctrl_lock);
+
+ mcu_ctrl_issue_pending_act(MCU_ACT_NETSTOP);
+
+ wait_event_interruptible(npu.mcu_ctrl_wait_done,
+ !npu.mcu_bring_up_done && !npu.shuting_down);
+
+ mutex_unlock(&npu.mcu_ctrl_lock);
+}
+
+/* TODO: should be implemented to not block other module's init tasks */
+static int mtk_tops_mcu_cold_boot(void)
+{
+ int ret = 0;
+
+ npu.cur_state = &mcu_states[MCU_STATE_TYPE_SHUTDOWN];
+
+ mutex_lock(&npu.mcu_ctrl_lock);
+
+ mcu_ctrl_issue_pending_act(MCU_ACT_INIT);
+ ret = mtk_tops_mcu_wait_done(MCU_CMD_TYPE_INIT_DONE);
+
+ mutex_unlock(&npu.mcu_ctrl_lock);
+ if (!ret)
+ return ret;
+
+ TOPS_ERR("cold boot failed: %d\n", ret);
+
+ schedule_work(&npu.recover_work);
+
+ return 0;
+}
+
+int mtk_tops_mcu_bring_up(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = mtk_tops_mcu_register_mbox();
+ if (ret) {
+ TOPS_ERR("register mcu ctrl mbox failed: %d\n", ret);
+ goto runtime_disable;
+ }
+
+ npu.mcu_ctrl_thread = kthread_run(mcu_ctrl_task, NULL, "tops mcu ctrl task");
+ if (IS_ERR(npu.mcu_ctrl_thread)) {
+ ret = PTR_ERR(npu.mcu_ctrl_thread);
+ TOPS_ERR("mcu ctrl thread create failed: %d\n", ret);
+ goto err_unregister_mbox;
+ }
+
+ ret = mtk_tops_mcu_cold_boot();
+ if (ret) {
+ TOPS_ERR("cold boot failed: %d\n", ret);
+ goto err_stop_mcu_ctrl_thread;
+ }
+
+ return ret;
+
+err_stop_mcu_ctrl_thread:
+ kthread_stop(npu.mcu_ctrl_thread);
+
+err_unregister_mbox:
+ mtk_tops_mcu_unregister_mbox();
+
+runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+void mtk_tops_mcu_tear_down(struct platform_device *pdev)
+{
+ mtk_tops_mcu_shutdown();
+
+ kthread_stop(npu.mcu_ctrl_thread);
+
+ /* TODO: stop mcu? */
+
+ mtk_tops_mcu_unregister_mbox();
+
+ pm_runtime_disable(&pdev->dev);
+}
+
+static int mtk_tops_mcu_dts_init(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct resource *res = NULL;
+ int ret = 0;
+
+ if (!node)
+ return -EINVAL;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
+ if (!res) {
+ TOPS_ERR("can not find tops base\n");
+ return -ENXIO;
+ }
+
+ npu.base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!npu.base) {
+ TOPS_ERR("map tops base failed\n");
+ return -ENOMEM;
+ }
+
+ npu.bus_clk = devm_clk_get(tops_dev, "bus");
+ if (IS_ERR(npu.bus_clk)) {
+ TOPS_ERR("get bus clk failed: %ld\n", PTR_ERR(npu.bus_clk));
+ return PTR_ERR(npu.bus_clk);
+ }
+
+ npu.sram_clk = devm_clk_get(tops_dev, "sram");
+ if (IS_ERR(npu.sram_clk)) {
+ TOPS_ERR("get sram clk failed: %ld\n", PTR_ERR(npu.sram_clk));
+ return PTR_ERR(npu.sram_clk);
+ }
+
+ npu.xdma_clk = devm_clk_get(tops_dev, "xdma");
+ if (IS_ERR(npu.xdma_clk)) {
+ TOPS_ERR("get xdma clk failed: %ld\n", PTR_ERR(npu.xdma_clk));
+ return PTR_ERR(npu.xdma_clk);
+ }
+
+ npu.offload_clk = devm_clk_get(tops_dev, "offload");
+ if (IS_ERR(npu.offload_clk)) {
+ TOPS_ERR("get offload clk failed: %ld\n", PTR_ERR(npu.offload_clk));
+ return PTR_ERR(npu.offload_clk);
+ }
+
+ npu.mgmt_clk = devm_clk_get(tops_dev, "mgmt");
+ if (IS_ERR(npu.mgmt_clk)) {
+ TOPS_ERR("get mgmt clk failed: %ld\n", PTR_ERR(npu.mgmt_clk));
+ return PTR_ERR(npu.mgmt_clk);
+ }
+
+ return ret;
+}
+
+static void mtk_tops_mcu_pm_domain_detach(void)
+{
+ int i = npu.pd_num;
+
+ while (--i >= 0) {
+ device_link_del(npu.pd_links[i]);
+ dev_pm_domain_detach(npu.pd_devices[i], true);
+ }
+}
+
+static int mtk_tops_mcu_pm_domain_attach(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ int ret = 0;
+ int i;
+
+ npu.pd_num = of_count_phandle_with_args(dev->of_node,
+ "power-domains",
+ "#power-domain-cells");
+
+ /* only 1 power domain exist, no need to link devices */
+ if (npu.pd_num <= 1)
+ return 0;
+
+ npu.pd_devices = devm_kmalloc_array(dev, npu.pd_num,
+ sizeof(struct device),
+ GFP_KERNEL);
+ if (!npu.pd_devices)
+ return -ENOMEM;
+
+ npu.pd_links = devm_kmalloc_array(dev, npu.pd_num,
+ sizeof(struct device_link),
+ GFP_KERNEL);
+ if (!npu.pd_links)
+ return -ENOMEM;
+
+ for (i = 0; i < npu.pd_num; i++) {
+ npu.pd_devices[i] = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR(npu.pd_devices[i])) {
+ ret = PTR_ERR(npu.pd_devices[i]);
+ goto pm_attach_fail;
+ }
+
+ npu.pd_links[i] = device_link_add(dev, npu.pd_devices[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME);
+ if (!npu.pd_links[i]) {
+ ret = -EINVAL;
+ dev_pm_domain_detach(npu.pd_devices[i], false);
+ goto pm_attach_fail;
+ }
+ }
+
+ return 0;
+
+pm_attach_fail:
+ TOPS_ERR("attach power domain failed: %d\n", ret);
+
+ while (--i >= 0) {
+ device_link_del(npu.pd_links[i]);
+ dev_pm_domain_detach(npu.pd_devices[i], false);
+ }
+
+ return ret;
+}
+
+int mtk_tops_mcu_init(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ dma_set_mask(tops_dev, DMA_BIT_MASK(32));
+
+ ret = mtk_tops_mcu_dts_init(pdev);
+ if (ret)
+ return ret;
+
+ ret = mtk_tops_mcu_pm_domain_attach(pdev);
+ if (ret)
+ return ret;
+
+ INIT_WORK(&npu.recover_work, mtk_tops_mcu_recover_work);
+ init_waitqueue_head(&npu.mcu_ctrl_wait_act);
+ init_waitqueue_head(&npu.mcu_ctrl_wait_done);
+ init_waitqueue_head(&npu.mcu_state_wait_done);
+ spin_lock_init(&npu.pending_act_lock);
+ spin_lock_init(&npu.ctrl_done_lock);
+ mutex_init(&npu.mcu_ctrl_lock);
+ timer_setup(&npu.mcu_ctrl_timer, mtk_tops_mcu_state_trans_timeout, 0);
+
+ ret = mtk_trm_hw_config_register(TRM_TOPS, &mcu_trm_hw_cfg);
+ if (ret) {
+ TOPS_ERR("TRM register failed: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+void mtk_tops_mcu_deinit(struct platform_device *pdev)
+{
+ mtk_trm_hw_config_unregister(TRM_TOPS, &mcu_trm_hw_cfg);
+
+ mtk_tops_mcu_pm_domain_detach();
+}
+
+static enum mbox_msg_cnt mtk_tops_ap_recv_mgmt_mbox_msg(struct mailbox_dev *mdev,
+ struct mailbox_msg *msg)
+{
+ if (msg->msg1 == npu.ctrl_done_cmd)
+ /* mcu side state transition success */
+ mcu_ctrl_done(mdev->core);
+ else
+ /* mcu side state transition failed */
+ npu.state_trans_fail = true;
+
+ wake_up_interruptible(&npu.mcu_state_wait_done);
+
+ return MBOX_NO_RET_MSG;
+}
+
+static enum mbox_msg_cnt mtk_tops_ap_recv_offload_mbox_msg(struct mailbox_dev *mdev,
+ struct mailbox_msg *msg)
+{
+ if (msg->msg1 == npu.ctrl_done_cmd)
+ /* mcu side state transition success */
+ mcu_ctrl_done(mdev->core);
+ else
+ /* mcu side state transition failed */
+ npu.state_trans_fail = true;
+
+ wake_up_interruptible(&npu.mcu_state_wait_done);
+
+ return MBOX_NO_RET_MSG;
+}
diff --git a/package-21.02/kernel/tops/src/net-event.c b/package-21.02/kernel/tops/src/net-event.c
new file mode 100644
index 0000000..7cc4553
--- /dev/null
+++ b/package-21.02/kernel/tops/src/net-event.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/device.h>
+#include <linux/hashtable.h>
+#include <linux/netdevice.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/notifier.h>
+#include <net/arp.h>
+#include <net/flow.h>
+#include <net/ip.h>
+#include <net/ip_tunnels.h>
+#include <net/netevent.h>
+#include <net/net_namespace.h>
+#include <net/neighbour.h>
+#include <net/route.h>
+
+#include "internal.h"
+#include "netsys.h"
+#include "net-event.h"
+#include "mcu.h"
+#include "ser.h"
+#include "trm.h"
+#include "tunnel.h"
+
+static struct completion wait_fe_reset_done;
+
+static void mtk_tops_netdev_ser_callback(struct tops_ser_params *ser_param)
+{
+ struct net_device *netdev = ser_param->data.net.ndev;
+
+ WARN_ON(ser_param->type != TOPS_SER_NETSYS_FE_RST);
+
+ mtk_trm_dump(TRM_RSN_FE_RESET);
+
+ /* send tops dump done notification to mtk eth */
+ rtnl_lock();
+ call_netdevice_notifiers(MTK_TOPS_DUMP_DONE, netdev);
+ rtnl_unlock();
+
+ /* wait for FE reset done notification */
+ /* TODO : if not received FE reset done notification */
+ wait_for_completion(&wait_fe_reset_done);
+}
+
+static inline void mtk_tops_netdev_ser(struct net_device *dev)
+{
+ struct tops_ser_params ser_params = {
+ .type = TOPS_SER_NETSYS_FE_RST,
+ .data.net.ndev = dev,
+ .ser_callback = mtk_tops_netdev_ser_callback,
+ };
+
+ mtk_tops_ser(&ser_params);
+}
+
+/* TODO: update tunnel status when user delete or change tunnel parameters */
+/*
+ * eth will send out MTK_FE_START_RESET event if detected wdma abnormal, or
+ * send out MTK_FE_STOP_TRAFFIC event if detected qdma or adma or tdma abnormal,
+ * then do FE reset, so we use the same mcu event to represent it.
+ *
+ * after FE reset done, eth will send out MTK_FE_START_TRAFFIC event if this is
+ * wdma abnormal induced FE reset, or send out MTK_FE_RESET_DONE event for qdma
+ * or adma or tdma abnormal induced FE reset.
+ */
+static int mtk_tops_netdev_callback(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(data);
+ int ret = 0;
+
+ switch (event) {
+ case NETDEV_UP:
+ break;
+ case NETDEV_DOWN:
+ mtk_tops_tnl_offload_netdev_down(dev);
+ break;
+ case MTK_FE_START_RESET:
+ case MTK_FE_STOP_TRAFFIC:
+ mtk_tops_netdev_ser(dev);
+ break;
+ case MTK_FE_RESET_DONE:
+ case MTK_FE_START_TRAFFIC:
+ complete(&wait_fe_reset_done);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct notifier_block mtk_tops_netdev_notifier = {
+ .notifier_call = mtk_tops_netdev_callback,
+};
+
+static int mtk_tops_netevent_callback(struct notifier_block *nb,
+ unsigned long event,
+ void *data)
+{
+ int ret = 0;
+
+ switch (event) {
+ case NETEVENT_NEIGH_UPDATE:
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static struct notifier_block mtk_tops_netevent_notifier = {
+ .notifier_call = mtk_tops_netevent_callback,
+};
+
+int mtk_tops_netevent_register(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = register_netdevice_notifier(&mtk_tops_netdev_notifier);
+ if (ret) {
+ TOPS_ERR("TOPS register netdev notifier failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = register_netevent_notifier(&mtk_tops_netevent_notifier);
+ if (ret) {
+ unregister_netdevice_notifier(&mtk_tops_netdev_notifier);
+ TOPS_ERR("TOPS register net event notifier failed: %d\n", ret);
+ return ret;
+ }
+
+ init_completion(&wait_fe_reset_done);
+
+ return ret;
+}
+
+void mtk_tops_netevent_unregister(struct platform_device *pdev)
+{
+ unregister_netevent_notifier(&mtk_tops_netevent_notifier);
+
+ unregister_netdevice_notifier(&mtk_tops_netdev_notifier);
+}
diff --git a/package-21.02/kernel/tops/src/netsys.c b/package-21.02/kernel/tops/src/netsys.c
new file mode 100644
index 0000000..7deb502
--- /dev/null
+++ b/package-21.02/kernel/tops/src/netsys.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include <mtk_hnat/hnat.h>
+
+#include <pce/netsys.h>
+
+#include "hpdma.h"
+#include "internal.h"
+#include "mcu.h"
+#include "netsys.h"
+#include "tdma.h"
+#include "trm.h"
+
+/* Netsys dump length */
+#define FE_BASE_LEN (0x2900)
+
+#define PPE_DEFAULT_ENTRY_SIZE (0x400)
+
+static int netsys_trm_hw_dump(void *dst, u32 ofs, u32 len);
+
+struct netsys_hw {
+ void __iomem *base;
+};
+
+static struct netsys_hw netsys;
+
+static struct trm_config netsys_trm_configs[] = {
+ {
+ TRM_CFG_EN("netsys-fe",
+ FE_BASE, FE_BASE_LEN,
+ 0x0, FE_BASE_LEN,
+ 0)
+ },
+};
+
+static struct trm_hw_config netsys_trm_hw_cfg = {
+ .trm_cfgs = netsys_trm_configs,
+ .cfg_len = ARRAY_SIZE(netsys_trm_configs),
+ .trm_hw_dump = netsys_trm_hw_dump,
+};
+
+static inline void netsys_write(u32 reg, u32 val)
+{
+ writel(val, netsys.base + reg);
+}
+
+static inline void netsys_set(u32 reg, u32 mask)
+{
+ setbits(netsys.base + reg, mask);
+}
+
+static inline void netsys_clr(u32 reg, u32 mask)
+{
+ clrbits(netsys.base + reg, mask);
+}
+
+static inline void netsys_rmw(u32 reg, u32 mask, u32 val)
+{
+ clrsetbits(netsys.base + reg, mask, val);
+}
+
+static inline u32 netsys_read(u32 reg)
+{
+ return readl(netsys.base + reg);
+}
+
+static int netsys_trm_hw_dump(void *dst, u32 start_addr, u32 len)
+{
+ u32 ofs;
+
+ if (unlikely(!dst))
+ return -ENODEV;
+
+ for (ofs = 0; len > 0; len -= 0x4, ofs += 0x4)
+ writel(netsys_read(start_addr + ofs), dst + ofs);
+
+ return 0;
+}
+
+static inline void ppe_rmw(enum pse_port ppe, u32 reg, u32 mask, u32 val)
+{
+ if (ppe == PSE_PORT_PPE0)
+ netsys_rmw(PPE0_BASE + reg, mask, val);
+ else if (ppe == PSE_PORT_PPE1)
+ netsys_rmw(PPE1_BASE + reg, mask, val);
+ else if (ppe == PSE_PORT_PPE2)
+ netsys_rmw(PPE2_BASE + reg, mask, val);
+}
+
+static inline u32 ppe_read(enum pse_port ppe, u32 reg)
+{
+ if (ppe == PSE_PORT_PPE0)
+ return netsys_read(PPE0_BASE + reg);
+ else if (ppe == PSE_PORT_PPE1)
+ return netsys_read(PPE1_BASE + reg);
+ else if (ppe == PSE_PORT_PPE2)
+ return netsys_read(PPE2_BASE + reg);
+
+ return 0;
+}
+
+u32 mtk_tops_netsys_ppe_get_max_entry_num(u32 ppe_id)
+{
+ u32 tbl_entry_num;
+ enum pse_port ppe;
+
+ if (ppe_id == 0)
+ ppe = PSE_PORT_PPE0;
+ else if (ppe_id == 1)
+ ppe = PSE_PORT_PPE1;
+ else if (ppe_id == 2)
+ ppe = PSE_PORT_PPE2;
+ else
+ return PPE_DEFAULT_ENTRY_SIZE << 5; /* max entry count */
+
+ tbl_entry_num = ppe_read(ppe, PPE_TBL_CFG);
+ if (tbl_entry_num > 5)
+ return PPE_DEFAULT_ENTRY_SIZE << 5;
+
+ return PPE_DEFAULT_ENTRY_SIZE << tbl_entry_num;
+}
+
+int mtk_tops_netsys_init(struct platform_device *pdev)
+{
+ struct device_node *fe_mem = NULL;
+ struct resource res;
+ int ret = 0;
+
+ fe_mem = of_parse_phandle(pdev->dev.of_node, "fe_mem", 0);
+ if (!fe_mem) {
+ TOPS_ERR("can not find fe_mem node\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(fe_mem, 0, &res))
+ return -ENXIO;
+
+ netsys.base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
+ if (!netsys.base)
+ return -ENOMEM;
+
+ ret = mtk_trm_hw_config_register(TRM_NETSYS, &netsys_trm_hw_cfg);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+void mtk_tops_netsys_deinit(struct platform_device *pdev)
+{
+ mtk_trm_hw_config_unregister(TRM_NETSYS, &netsys_trm_hw_cfg);
+}
diff --git a/package-21.02/kernel/tops/src/protocol/gre/gretap.c b/package-21.02/kernel/tops/src/protocol/gre/gretap.c
new file mode 100644
index 0000000..91a239f
--- /dev/null
+++ b/package-21.02/kernel/tops/src/protocol/gre/gretap.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <net/gre.h>
+
+#include <pce/cls.h>
+#include <pce/pce.h>
+
+#include "tunnel.h"
+
+static struct cls_entry gretap_cls_entry = {
+ .entry = CLS_ENTRY_GRETAP,
+ .cdesc = {
+ .fport = 0x3,
+ .tport_idx = 0x4,
+ .tag_m = 0x3,
+ .tag = 0x1,
+ .dip_match_m = 0x1,
+ .dip_match = 0x1,
+ .l4_type_m = 0xFF,
+ .l4_type = 0x2F,
+ .l4_udp_hdr_nez_m = 0x1,
+ .l4_udp_hdr_nez = 0x1,
+ .l4_valid_m = 0x7,
+ .l4_valid = 0x3,
+ .l4_hdr_usr_data_m = 0xFFFF,
+ .l4_hdr_usr_data = 0x6558,
+ },
+};
+
+static int gretap_tnl_decap_param_setup(struct sk_buff *skb,
+ struct tops_tnl_params *tnl_params)
+{
+ struct gre_base_hdr *pgre;
+ struct gre_base_hdr greh;
+ struct ethhdr *eth;
+ struct ethhdr ethh;
+ struct iphdr *ip;
+ struct iphdr iph;
+ int ret = 0;
+
+ if (!skb->dev->rtnl_link_ops
+ || strcmp(skb->dev->rtnl_link_ops->kind, "gretap"))
+ return -EAGAIN;
+
+ skb_push(skb, sizeof(struct gre_base_hdr));
+ pgre = skb_header_pointer(skb, 0, sizeof(struct gre_base_hdr), &greh);
+ if (unlikely(!pgre)) {
+ ret = -EINVAL;
+ goto restore_gre;
+ }
+
+ if (unlikely(ntohs(pgre->protocol) != ETH_P_TEB)) {
+ pr_notice("gre: %p protocol unmatched, proto: 0x%x\n",
+ pgre, ntohs(pgre->protocol));
+ ret = -EINVAL;
+ goto restore_gre;
+ }
+
+ /* TODO: store gre parameters? */
+
+ skb_push(skb, sizeof(struct iphdr));
+ ip = skb_header_pointer(skb, 0, sizeof(struct iphdr), &iph);
+ if (unlikely(!ip)) {
+ ret = -EINVAL;
+ goto restore_ip;
+ }
+
+ if (unlikely(ip->version != IPVERSION || ip->protocol != IPPROTO_GRE)) {
+ pr_notice("ip: %p version or protocol unmatched, ver: 0x%x, proto: 0x%x\n",
+ ip, ip->version, ip->protocol);
+ ret = -EINVAL;
+ goto restore_ip;
+ }
+
+ /* TODO: check ip options is support for us? */
+ /* TODO: store ip parameters? */
+ tnl_params->protocol = ip->protocol;
+ tnl_params->sip = ip->daddr;
+ tnl_params->dip = ip->saddr;
+
+ skb_push(skb, sizeof(struct ethhdr));
+ eth = skb_header_pointer(skb, 0, sizeof(struct ethhdr), ðh);
+ if (unlikely(!eth)) {
+ ret = -EINVAL;
+ goto restore_eth;
+ }
+
+ if (unlikely(ntohs(eth->h_proto) != ETH_P_IP)) {
+ pr_notice("eth proto not support, proto: 0x%x\n",
+ ntohs(eth->h_proto));
+ ret = -EINVAL;
+ goto restore_eth;
+ }
+
+ memcpy(&tnl_params->saddr, eth->h_dest, sizeof(u8) * ETH_ALEN);
+ memcpy(&tnl_params->daddr, eth->h_source, sizeof(u8) * ETH_ALEN);
+
+restore_eth:
+ skb_pull(skb, sizeof(struct ethhdr));
+
+restore_ip:
+ skb_pull(skb, sizeof(struct iphdr));
+
+restore_gre:
+ skb_pull(skb, sizeof(struct gre_base_hdr));
+
+ return ret;
+}
+
+static int gretap_tnl_encap_param_setup(struct sk_buff *skb,
+ struct tops_tnl_params *tnl_params)
+{
+ struct ethhdr *eth = eth_hdr(skb);
+ struct iphdr *ip = ip_hdr(skb);
+
+ /*
+ * ether type no need to check since it is even not constructed yet
+ * currently not support gre without ipv4
+ */
+ if (unlikely(ip->version != IPVERSION || ip->protocol != IPPROTO_GRE)) {
+ pr_notice("eth proto: 0x%x, ip ver: 0x%x, proto: 0x%x is not support\n",
+ ntohs(eth->h_proto),
+ ip->version,
+ ip->protocol);
+ return -EINVAL;
+ }
+
+ memcpy(&tnl_params->saddr, eth->h_source, sizeof(u8) * ETH_ALEN);
+ memcpy(&tnl_params->daddr, eth->h_dest, sizeof(u8) * ETH_ALEN);
+ tnl_params->protocol = ip->protocol;
+ tnl_params->sip = ip->saddr;
+ tnl_params->dip = ip->daddr;
+
+ return 0;
+}
+
+static int gretap_tnl_debug_param_setup(const char *buf, int *ofs,
+ struct tops_tnl_params *tnl_params)
+{
+ tnl_params->protocol = IPPROTO_GRE;
+ return 0;
+}
+
+static bool gretap_tnl_info_match(struct tops_tnl_params *parms1,
+ struct tops_tnl_params *parms2)
+{
+ if (parms1->sip == parms2->sip
+ && parms1->dip == parms2->dip
+ && !memcmp(parms1->saddr, parms2->saddr, sizeof(u8) * ETH_ALEN)
+ && !memcmp(parms1->daddr, parms2->daddr, sizeof(u8) * ETH_ALEN)) {
+ return true;
+ }
+
+ return false;
+}
+
+static bool gretap_tnl_decap_offloadable(struct sk_buff *skb)
+{
+ struct iphdr *ip = ip_hdr(skb);
+
+ if (ip->protocol != IPPROTO_GRE)
+ return false;
+
+ return true;
+}
+
+static struct tops_tnl_type gretap_type = {
+ .type_name = "gretap",
+ .tnl_decap_param_setup = gretap_tnl_decap_param_setup,
+ .tnl_encap_param_setup = gretap_tnl_encap_param_setup,
+ .tnl_debug_param_setup = gretap_tnl_debug_param_setup,
+ .tnl_info_match = gretap_tnl_info_match,
+ .tnl_decap_offloadable = gretap_tnl_decap_offloadable,
+ .tops_entry = TOPS_ENTRY_GRETAP,
+ .has_inner_eth = true,
+};
+
+int mtk_tops_gretap_init(void)
+{
+ int ret;
+
+ ret = mtk_tops_tnl_type_register(&gretap_type);
+ if (ret)
+ return ret;
+
+ ret = mtk_pce_cls_entry_register(&gretap_cls_entry);
+ if (ret) {
+ mtk_tops_tnl_type_unregister(&gretap_type);
+ return ret;
+ }
+
+ return ret;
+}
+
+void mtk_tops_gretap_deinit(void)
+{
+ mtk_pce_cls_entry_unregister(&gretap_cls_entry);
+
+ mtk_tops_tnl_type_unregister(&gretap_type);
+}
diff --git a/package-21.02/kernel/tops/src/protocol/inc/protocol/gre/gretap.h b/package-21.02/kernel/tops/src/protocol/inc/protocol/gre/gretap.h
new file mode 100644
index 0000000..e308fde
--- /dev/null
+++ b/package-21.02/kernel/tops/src/protocol/inc/protocol/gre/gretap.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_GRETAP_H_
+#define _TOPS_GRETAP_H_
+
+#if defined(CONFIG_MTK_TOPS_GRETAP)
+int mtk_tops_gretap_init(void);
+void mtk_tops_gretap_deinit(void);
+#else /* !defined(CONFIG_MTK_TOPS_GRETAP) */
+static inline int mtk_tops_gretap_init(void)
+{
+ return 0;
+}
+
+static inline void mtk_tops_gretap_deinit(void)
+{
+}
+#endif /* defined(CONFIG_MTK_TOPS_GRETAP) */
+#endif /* _TOPS_GRETAP_H_ */
diff --git a/package-21.02/kernel/tops/src/protocol/inc/protocol/l2tp/l2tp.h b/package-21.02/kernel/tops/src/protocol/inc/protocol/l2tp/l2tp.h
new file mode 100644
index 0000000..34449c1
--- /dev/null
+++ b/package-21.02/kernel/tops/src/protocol/inc/protocol/l2tp/l2tp.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Frank-zj Lin <rank-zj.lin@mediatek.com>
+ */
+
+#ifndef _TOPS_L2TP_H_
+#define _TOPS_L2TP_H_
+
+/* L2TP header constants */
+#define L2TP_HDRFLAG_T 0x8000
+#define L2TP_HDRFLAG_L 0x4000
+
+#define L2TP_HDR_VER_MASK 0x000F
+#define L2TP_HDR_VER_2 0x0002
+#define L2TP_HDR_VER_3 0x0003
+
+#define UDP_L2TP_PORT 1701
+
+struct l2tp_param {
+ u16 tid; /* l2tp tunnel id */
+ u16 sid; /* l2tp session id */
+};
+
+/* Limited support: L2TPv2 only, no length field, no options */
+struct udp_l2tp_data_hdr {
+ u16 flag_ver;
+ u16 tid;
+ u16 sid;
+};
+
+#endif /* _TOPS_L2TP_H_ */
diff --git a/package-21.02/kernel/tops/src/protocol/inc/protocol/l2tp/udp_l2tp_data.h b/package-21.02/kernel/tops/src/protocol/inc/protocol/l2tp/udp_l2tp_data.h
new file mode 100644
index 0000000..25f9dc3
--- /dev/null
+++ b/package-21.02/kernel/tops/src/protocol/inc/protocol/l2tp/udp_l2tp_data.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_UDP_L2TP_DATA_H_
+#define _TOPS_UDP_L2TP_DATA_H_
+
+#if defined(CONFIG_MTK_TOPS_UDP_L2TP_DATA)
+int mtk_tops_udp_l2tp_data_init(void);
+void mtk_tops_udp_l2tp_data_deinit(void);
+#else /* !defined(CONFIG_MTK_TOPS_UDP_L2TP_DATA) */
+static inline int mtk_tops_udp_l2tp_data_init(void)
+{
+ return 0;
+}
+
+static inline void mtk_tops_udp_l2tp_data_deinit(void)
+{
+}
+#endif /* defined(CONFIG_MTK_TOPS_UDP_L2TP_DATA) */
+#endif /* _TOPS_UDP_L2TP_DATA_H_ */
diff --git a/package-21.02/kernel/tops/src/protocol/inc/protocol/ppp/ppp.h b/package-21.02/kernel/tops/src/protocol/inc/protocol/ppp/ppp.h
new file mode 100644
index 0000000..274dfde
--- /dev/null
+++ b/package-21.02/kernel/tops/src/protocol/inc/protocol/ppp/ppp.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#ifndef _TOPS_PPP_H_
+#define _TOPS_PPP_H_
+
+/* Limited support: ppp header, no options */
+struct ppp_hdr {
+ u8 addr;
+ u8 ctrl;
+ u16 proto;
+};
+
+#endif /* _TOPS_PPP_H_ */
diff --git a/package-21.02/kernel/tops/src/protocol/l2tp/udp_l2tp_data.c b/package-21.02/kernel/tops/src/protocol/l2tp/udp_l2tp_data.c
new file mode 100644
index 0000000..bde94e5
--- /dev/null
+++ b/package-21.02/kernel/tops/src/protocol/l2tp/udp_l2tp_data.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Frank-zj Lin <rank-zj.lin@mediatek.com>
+ * Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/ppp_defs.h>
+#include <linux/udp.h>
+
+#include <pce/cls.h>
+#include <pce/pce.h>
+
+#include "protocol/l2tp/l2tp.h"
+#include "protocol/ppp/ppp.h"
+#include "tunnel.h"
+
+static struct cls_entry udp_l2tp_data_cls_entry = {
+ .entry = CLS_ENTRY_UDP_L2TP_DATA,
+ .cdesc = {
+ .fport = 0x3,
+ .tport_idx = 0x4,
+ .tag_m = 0x3,
+ .tag = 0x2,
+ .dip_match_m = 0x1,
+ .dip_match = 0x1,
+ .l4_type_m = 0xFF,
+ .l4_type = 0x11,
+ .l4_valid_m = 0x7,
+ .l4_valid = 0x7,
+ .l4_dport_m = 0xFFFF,
+ .l4_dport = 1701,
+ .l4_hdr_usr_data_m = 0x80030000,
+ .l4_hdr_usr_data = 0x00020000,
+ },
+};
+
+static inline bool l2tpv2_offload_match(struct udp_l2tp_data_hdr *l2tp)
+{
+ u16 hdrflags = ntohs(l2tp->flag_ver);
+
+ return ((hdrflags & L2TP_HDR_VER_MASK) == L2TP_HDR_VER_2 &&
+ !(hdrflags & L2TP_HDRFLAG_T));
+}
+
+static inline bool ppp_offload_match(struct ppp_hdr *ppp)
+{
+ return (ppp->addr == PPP_ALLSTATIONS &&
+ ppp->ctrl == PPP_UI && ntohs(ppp->proto) == PPP_IP);
+}
+
+static int udp_l2tp_data_tnl_decap_param_setup(struct sk_buff *skb,
+ struct tops_tnl_params *tnl_params)
+{
+ struct udp_l2tp_data_hdr *l2tp;
+ struct udp_l2tp_data_hdr l2tph;
+ struct ppp_hdr *ppp;
+ struct ppp_hdr ppph;
+ struct udphdr *udp;
+ struct udphdr udph;
+ struct ethhdr *eth;
+ struct ethhdr ethh;
+ struct iphdr *ip;
+ struct iphdr iph;
+ int ret = 0;
+
+ /* ppp */
+ skb_push(skb, sizeof(struct ppp_hdr));
+ ppp = skb_header_pointer(skb, 0, sizeof(struct ppp_hdr), &ppph);
+
+ if (unlikely(!ppp)) {
+ ret = -EINVAL;
+ goto restore_ppp;
+ }
+
+ if (unlikely(!ppp_offload_match(ppp))) {
+ pr_notice("ppp offload unmatched\n");
+ ret = -EINVAL;
+ goto restore_ppp;
+ }
+
+ /* l2tp */
+ skb_push(skb, sizeof(struct udp_l2tp_data_hdr));
+ l2tp = skb_header_pointer(skb, 0, sizeof(struct udp_l2tp_data_hdr), &l2tph);
+ if (unlikely(!l2tp)) {
+ ret = -EINVAL;
+ goto restore_l2tp;
+ }
+
+ if (unlikely(!l2tpv2_offload_match(l2tp))) {
+ ret = -EINVAL;
+ goto restore_l2tp;
+ }
+
+ tnl_params->priv.l2tp.tid = l2tp->tid;
+ tnl_params->priv.l2tp.sid = l2tp->sid;
+
+ /* udp */
+ skb_push(skb, sizeof(struct udphdr));
+ udp = skb_header_pointer(skb, 0, sizeof(struct udphdr), &udph);
+ if (unlikely(!udp)) {
+ ret = -EINVAL;
+ goto restore_udp;
+ }
+
+ if (unlikely(ntohs(udp->dest) != UDP_L2TP_PORT)) {
+ pr_notice("udp port 0x%x unmatched\n", ntohs(udp->dest));
+ ret = -EINVAL;
+ goto restore_udp;
+ }
+
+ tnl_params->sport = udp->dest;
+ tnl_params->dport = udp->source;
+
+ /* ip */
+ skb_push(skb, sizeof(struct iphdr));
+ ip = skb_header_pointer(skb, 0, sizeof(struct iphdr), &iph);
+ if (unlikely(!ip)) {
+ ret = -EINVAL;
+ goto restore_ip;
+ }
+
+ if (unlikely(ip->version != IPVERSION || ip->protocol != IPPROTO_UDP)) {
+ pr_notice("ip: %p version or protocol unmatched, ver: 0x%x, proto: 0x%x\n",
+ ip, ip->version, ip->protocol);
+ ret = -EINVAL;
+ goto restore_ip;
+ }
+
+ tnl_params->protocol = ip->protocol;
+ tnl_params->sip = ip->daddr;
+ tnl_params->dip = ip->saddr;
+
+ /* eth */
+ skb_push(skb, sizeof(struct ethhdr));
+ eth = skb_header_pointer(skb, 0, sizeof(struct ethhdr), ðh);
+ if (unlikely(!eth)) {
+ ret = -EINVAL;
+ goto restore_eth;
+ }
+
+ if (unlikely(ntohs(eth->h_proto) != ETH_P_IP)) {
+ pr_notice("eth proto not supported, proto: 0x%x\n",
+ ntohs(eth->h_proto));
+ ret = -EINVAL;
+ goto restore_eth;
+ }
+
+ memcpy(&tnl_params->saddr, eth->h_dest, sizeof(u8) * ETH_ALEN);
+ memcpy(&tnl_params->daddr, eth->h_source, sizeof(u8) * ETH_ALEN);
+
+restore_eth:
+ skb_pull(skb, sizeof(struct ethhdr));
+restore_ip:
+ skb_pull(skb, sizeof(struct iphdr));
+restore_udp:
+ skb_pull(skb, sizeof(struct udphdr));
+restore_l2tp:
+ skb_pull(skb, sizeof(struct udp_l2tp_data_hdr));
+restore_ppp:
+ skb_pull(skb, sizeof(struct ppp_hdr));
+
+ return ret;
+}
+
+static int udp_l2tp_data_tnl_encap_param_setup(struct sk_buff *skb,
+ struct tops_tnl_params *tnl_params)
+{
+ struct ethhdr *eth = eth_hdr(skb);
+ struct iphdr *ip = ip_hdr(skb);
+ struct udp_l2tp_data_hdr *l2tp;
+ struct udp_l2tp_data_hdr l2tph;
+ struct udphdr *udp;
+ struct udphdr udph;
+ int ret = 0;
+
+ if (unlikely(ip->version != IPVERSION || ip->protocol != IPPROTO_UDP)) {
+ pr_notice("eth proto: 0x%x, ip ver: 0x%x, proto: 0x%x is not support\n",
+ ntohs(eth->h_proto),
+ ip->version,
+ ip->protocol);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ skb_pull(skb, sizeof(struct iphdr));
+ udp = skb_header_pointer(skb, 0, sizeof(struct udphdr), &udph);
+ if (unlikely(!udp)) {
+ ret = -EINVAL;
+ goto restore_ip;
+ }
+
+ if (unlikely(ntohs(udp->dest) != UDP_L2TP_PORT)) {
+ pr_notice("udp port 0x%x unmatched\n", ntohs(udp->dest));
+ ret = -EINVAL;
+ goto restore_ip;
+ }
+
+ skb_pull(skb, sizeof(struct udphdr));
+ l2tp = skb_header_pointer(skb, 0, sizeof(struct udp_l2tp_data_hdr), &l2tph);
+ if (unlikely(!l2tp)) {
+ ret = -EINVAL;
+ goto restore_udp;
+ }
+
+ if (unlikely(!l2tpv2_offload_match(l2tp))) {
+ ret = -EINVAL;
+ goto restore_udp;
+ }
+
+ memcpy(&tnl_params->saddr, eth->h_source, sizeof(u8) * ETH_ALEN);
+ memcpy(&tnl_params->daddr, eth->h_dest, sizeof(u8) * ETH_ALEN);
+ tnl_params->protocol = ip->protocol;
+ tnl_params->sip = ip->saddr;
+ tnl_params->dip = ip->daddr;
+ tnl_params->sport = udp->source;
+ tnl_params->dport = udp->dest;
+ tnl_params->priv.l2tp.tid = l2tp->tid;
+ tnl_params->priv.l2tp.sid = l2tp->sid;
+
+restore_udp:
+ skb_push(skb, sizeof(struct udphdr));
+restore_ip:
+ skb_push(skb, sizeof(struct iphdr));
+out:
+ return ret;
+}
+
+static int udp_l2tp_data_tnl_debug_param_setup(const char *buf, int *ofs,
+ struct tops_tnl_params *tnl_params)
+{
+ return -EPERM; //TODO: not implemented
+}
+
+static bool udp_l2tp_data_tnl_info_match(struct tops_tnl_params *params1,
+ struct tops_tnl_params *params2)
+{
+ if (params1->sip == params2->sip
+ && params1->dip == params2->dip
+ && params1->sport == params2->sport
+ && params1->dport == params2->dport
+ && params1->priv.l2tp.tid == params2->priv.l2tp.tid
+ && params1->priv.l2tp.sid == params2->priv.l2tp.sid
+ && !memcmp(params1->saddr, params2->saddr, sizeof(u8) * ETH_ALEN)
+ && !memcmp(params1->daddr, params2->daddr, sizeof(u8) * ETH_ALEN))
+ return true;
+
+ return false;
+}
+
+static bool udp_l2tp_data_tnl_decap_offloadable(struct sk_buff *skb)
+{
+ struct udp_l2tp_data_hdr *l2tp;
+ struct udp_l2tp_data_hdr l2tph;
+ struct ppp_hdr *ppp;
+ struct ppp_hdr ppph;
+ struct udphdr *udp;
+ struct iphdr *ip;
+
+ ip = ip_hdr(skb);
+ if (ip->protocol != IPPROTO_UDP)
+ return false;
+
+ udp = udp_hdr(skb);
+ if (ntohs(udp->dest) != UDP_L2TP_PORT)
+ return false;
+
+ l2tp = skb_header_pointer(skb, ip_hdr(skb)->ihl * 4 + sizeof(struct udphdr),
+ sizeof(struct udp_l2tp_data_hdr), &l2tph);
+
+ if (unlikely(!l2tp))
+ return false;
+
+ if (unlikely(!l2tpv2_offload_match(l2tp)))
+ return false;
+
+ ppp = skb_header_pointer(skb, (ip_hdr(skb)->ihl * 4 +
+ sizeof(struct udphdr) +
+ sizeof(struct udp_l2tp_data_hdr)),
+ sizeof(struct ppp_hdr), &ppph);
+
+ if (unlikely(!ppp))
+ return false;
+
+ if (unlikely(!ppp_offload_match(ppp)))
+ return false;
+
+ return true;
+}
+
+static struct tops_tnl_type udp_l2tp_data_type = {
+ .type_name = "udp-l2tp-data",
+ .tnl_decap_param_setup = udp_l2tp_data_tnl_decap_param_setup,
+ .tnl_encap_param_setup = udp_l2tp_data_tnl_encap_param_setup,
+ .tnl_debug_param_setup = udp_l2tp_data_tnl_debug_param_setup,
+ .tnl_info_match = udp_l2tp_data_tnl_info_match,
+ .tnl_decap_offloadable = udp_l2tp_data_tnl_decap_offloadable,
+ .tops_entry = TOPS_ENTRY_UDP_L2TP_DATA,
+ .has_inner_eth = false,
+};
+
+int mtk_tops_udp_l2tp_data_init(void)
+{
+ int ret = 0;
+
+ ret = mtk_tops_tnl_type_register(&udp_l2tp_data_type);
+ if (ret)
+ return ret;
+
+ ret = mtk_pce_cls_entry_register(&udp_l2tp_data_cls_entry);
+ if (ret) {
+ mtk_tops_tnl_type_unregister(&udp_l2tp_data_type);
+ return ret;
+ }
+
+ return ret;
+}
+
+void mtk_tops_udp_l2tp_data_deinit(void)
+{
+ mtk_pce_cls_entry_unregister(&udp_l2tp_data_cls_entry);
+
+ mtk_tops_tnl_type_unregister(&udp_l2tp_data_type);
+}
diff --git a/package-21.02/kernel/tops/src/ser.c b/package-21.02/kernel/tops/src/ser.c
new file mode 100644
index 0000000..a126185
--- /dev/null
+++ b/package-21.02/kernel/tops/src/ser.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <alvin.kuog@mediatek.com>
+ */
+
+#include <linux/err.h>
+#include <linux/workqueue.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+
+#include "internal.h"
+#include "net-event.h"
+#include "ser.h"
+#include "trm.h"
+
+struct tops_ser {
+ struct work_struct work;
+ struct tops_ser_params ser_params;
+ spinlock_t params_lock;
+};
+
+struct tops_ser tops_ser;
+
+static inline void __mtk_tops_ser_cmd_clear(void)
+{
+ memset(&tops_ser.ser_params, 0, sizeof(struct tops_ser_params));
+ tops_ser.ser_params.type = __TOPS_SER_TYPE_MAX;
+}
+
+static inline void mtk_tops_ser_cmd_clear(void)
+{
+ unsigned long flag;
+
+ spin_lock_irqsave(&tops_ser.params_lock, flag);
+
+ __mtk_tops_ser_cmd_clear();
+
+ spin_unlock_irqrestore(&tops_ser.params_lock, flag);
+}
+
+static void mtk_tops_ser_setup_mcmd(struct tops_ser_params *ser_params,
+ struct mcu_ctrl_cmd *mcmd)
+{
+ memset(mcmd, 0, sizeof(struct mcu_ctrl_cmd));
+
+ switch (ser_params->type) {
+ case TOPS_SER_NETSYS_FE_RST:
+ mcmd->e = MCU_EVENT_TYPE_FE_RESET;
+ break;
+ case TOPS_SER_WDT_TO:
+ mcmd->e = MCU_EVENT_TYPE_WDT_TIMEOUT;
+ break;
+ default:
+ TOPS_ERR("unsupport TOPS SER type: %u\n", ser_params->type);
+ return;
+ }
+
+ if (ser_params->ser_mcmd_setup)
+ ser_params->ser_mcmd_setup(ser_params, mcmd);
+}
+
+static void mtk_tops_ser_reset_callback(void *params)
+{
+ struct tops_ser_params *ser_params = params;
+
+ if (ser_params->ser_callback)
+ ser_params->ser_callback(ser_params);
+}
+
+static void mtk_tops_ser_work(struct work_struct *work)
+{
+ struct tops_ser_params ser_params;
+ struct mcu_ctrl_cmd mcmd;
+ unsigned long flag = 0;
+
+ spin_lock_irqsave(&tops_ser.params_lock, flag);
+
+ while (tops_ser.ser_params.type != __TOPS_SER_TYPE_MAX) {
+ memcpy(&ser_params,
+ &tops_ser.ser_params,
+ sizeof(struct tops_ser_params));
+
+ spin_unlock_irqrestore(&tops_ser.params_lock, flag);
+
+ mtk_tops_ser_setup_mcmd(&ser_params, &mcmd);
+
+ if (mtk_tops_mcu_reset(&mcmd,
+ mtk_tops_ser_reset_callback,
+ &ser_params)) {
+ TOPS_INFO("SER type: %u failed to recover\n",
+ ser_params.type);
+ /*
+ * TODO: check is OK to directly return
+ * since mcu state machine should handle
+ * state transition failed?
+ */
+ mtk_tops_ser_cmd_clear();
+ return;
+ }
+
+ TOPS_INFO("SER type: %u successfully recovered\n", ser_params.type);
+
+ spin_lock_irqsave(&tops_ser.params_lock, flag);
+ /*
+ * If there isn't queued any other SER cmd that has higher priority
+ * than current SER command, clear SER command and exit.
+ * Otherwise let the work perform reset again for high priority SER.
+ */
+ if (tops_ser.ser_params.type > ser_params.type
+ || !memcmp(&tops_ser.ser_params, &ser_params,
+ sizeof(struct tops_ser_params)))
+ __mtk_tops_ser_cmd_clear();
+ }
+
+ spin_unlock_irqrestore(&tops_ser.params_lock, flag);
+}
+
+int mtk_tops_ser(struct tops_ser_params *ser_params)
+{
+ unsigned long flag;
+
+ if (!ser_params)
+ return -EINVAL;
+
+ spin_lock_irqsave(&tops_ser.params_lock, flag);
+
+ /* higher SER type should not override lower SER type */
+ if (tops_ser.ser_params.type != __TOPS_SER_TYPE_MAX
+ && tops_ser.ser_params.type < ser_params->type)
+ goto unlock;
+
+ memcpy(&tops_ser.ser_params, ser_params, sizeof(*ser_params));
+
+ schedule_work(&tops_ser.work);
+
+unlock:
+ spin_unlock_irqrestore(&tops_ser.params_lock, flag);
+
+ return 0;
+}
+
+int mtk_tops_ser_init(struct platform_device *pdev)
+{
+ INIT_WORK(&tops_ser.work, mtk_tops_ser_work);
+
+ spin_lock_init(&tops_ser.params_lock);
+
+ tops_ser.ser_params.type = __TOPS_SER_TYPE_MAX;
+
+ return 0;
+}
+
+int mtk_tops_ser_deinit(struct platform_device *pdev)
+{
+ return 0;
+}
diff --git a/package-21.02/kernel/tops/src/tdma.c b/package-21.02/kernel/tops/src/tdma.c
new file mode 100644
index 0000000..c7053e7
--- /dev/null
+++ b/package-21.02/kernel/tops/src/tdma.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "internal.h"
+#include "mbox.h"
+#include "mcu.h"
+#include "tdma.h"
+#include "tops.h"
+#include "trm.h"
+
+/* TDMA dump length */
+#define TDMA_BASE_LEN (0x400)
+
+static int tdma_trm_hw_dump(void *dst, u32 start_addr, u32 len);
+
+struct tdma_hw {
+ void __iomem *base;
+ u32 start_ring;
+
+ struct mailbox_dev mgmt_mdev;
+ struct mailbox_dev offload_mdev[CORE_OFFLOAD_NUM];
+};
+
+struct tdma_hw tdma = {
+ .mgmt_mdev = MBOX_SEND_MGMT_DEV(NET),
+ .offload_mdev = {
+ [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, NET),
+ [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, NET),
+ [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, NET),
+ [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, NET),
+ },
+};
+
+static inline void tdma_write(u32 reg, u32 val)
+{
+ writel(val, tdma.base + reg);
+}
+
+static inline void tdma_set(u32 reg, u32 mask)
+{
+ setbits(tdma.base + reg, mask);
+}
+
+static inline void tdma_clr(u32 reg, u32 mask)
+{
+ clrbits(tdma.base + reg, mask);
+}
+
+static inline void tdma_rmw(u32 reg, u32 mask, u32 val)
+{
+ clrsetbits(tdma.base + reg, mask, val);
+}
+
+static inline u32 tdma_read(u32 reg)
+{
+ return readl(tdma.base + reg);
+}
+
+static struct trm_config tdma_trm_configs[] = {
+ {
+ TRM_CFG_EN("netsys-tdma",
+ TDMA_BASE, TDMA_BASE_LEN,
+ 0x0, TDMA_BASE_LEN,
+ 0)
+ },
+};
+
+static struct trm_hw_config tdma_trm_hw_cfg = {
+ .trm_cfgs = tdma_trm_configs,
+ .cfg_len = ARRAY_SIZE(tdma_trm_configs),
+ .trm_hw_dump = tdma_trm_hw_dump,
+};
+
+static int tdma_trm_hw_dump(void *dst, u32 start_addr, u32 len)
+{
+ u32 ofs;
+
+ if (unlikely(!dst))
+ return -ENODEV;
+
+ for (ofs = 0; len > 0; len -= 0x4, ofs += 0x4)
+ writel(tdma_read(start_addr + ofs), dst + ofs);
+
+ return 0;
+}
+
+static inline void tdma_prefetch_enable(bool en)
+{
+ if (en) {
+ tdma_set(TDMA_PREF_TX_CFG, PREF_EN);
+ tdma_set(TDMA_PREF_RX_CFG, PREF_EN);
+ } else {
+ /* wait for prefetch idle */
+ while ((tdma_read(TDMA_PREF_TX_CFG) & PREF_BUSY)
+ || (tdma_read(TDMA_PREF_RX_CFG) & PREF_BUSY))
+ ;
+
+ tdma_write(TDMA_PREF_TX_CFG,
+ tdma_read(TDMA_PREF_TX_CFG) & (~PREF_EN));
+ tdma_write(TDMA_PREF_RX_CFG,
+ tdma_read(TDMA_PREF_RX_CFG) & (~PREF_EN));
+ }
+}
+
+static inline void tdma_writeback_enable(bool en)
+{
+ if (en) {
+ tdma_set(TDMA_WRBK_TX_CFG, WRBK_EN);
+ tdma_set(TDMA_WRBK_RX_CFG, WRBK_EN);
+ } else {
+ /* wait for write back idle */
+ while ((tdma_read(TDMA_WRBK_TX_CFG) & WRBK_BUSY)
+ || (tdma_read(TDMA_WRBK_RX_CFG) & WRBK_BUSY))
+ ;
+
+ tdma_write(TDMA_WRBK_TX_CFG,
+ tdma_read(TDMA_WRBK_TX_CFG) & (~WRBK_EN));
+ tdma_write(TDMA_WRBK_RX_CFG,
+ tdma_read(TDMA_WRBK_RX_CFG) & (~WRBK_EN));
+ }
+}
+
+static inline void tdma_assert_prefetch_reset(bool en)
+{
+ if (en) {
+ tdma_set(TDMA_PREF_TX_FIFO_CFG0, PREF_TX_RING0_CLEAR);
+ tdma_set(TDMA_PREF_RX_FIFO_CFG0,
+ PREF_RX_RINGX_CLEAR(0) | PREF_RX_RINGX_CLEAR(1));
+ tdma_set(TDMA_PREF_RX_FIFO_CFG1,
+ PREF_RX_RINGX_CLEAR(2) | PREF_RX_RINGX_CLEAR(3));
+ } else {
+ tdma_clr(TDMA_PREF_TX_FIFO_CFG0, PREF_TX_RING0_CLEAR);
+ tdma_clr(TDMA_PREF_RX_FIFO_CFG0,
+ PREF_RX_RINGX_CLEAR(0) | PREF_RX_RINGX_CLEAR(1));
+ tdma_clr(TDMA_PREF_RX_FIFO_CFG1,
+ PREF_RX_RINGX_CLEAR(2) | PREF_RX_RINGX_CLEAR(3));
+ }
+}
+
+static inline void tdma_assert_fifo_reset(bool en)
+{
+ if (en) {
+ tdma_set(TDMA_TX_XDMA_FIFO_CFG0,
+ (PAR_FIFO_CLEAR
+ | CMD_FIFO_CLEAR
+ | DMAD_FIFO_CLEAR
+ | ARR_FIFO_CLEAR));
+ tdma_set(TDMA_RX_XDMA_FIFO_CFG0,
+ (PAR_FIFO_CLEAR
+ | CMD_FIFO_CLEAR
+ | DMAD_FIFO_CLEAR
+ | ARR_FIFO_CLEAR
+ | LEN_FIFO_CLEAR
+ | WID_FIFO_CLEAR
+ | BID_FIFO_CLEAR));
+ } else {
+ tdma_clr(TDMA_TX_XDMA_FIFO_CFG0,
+ (PAR_FIFO_CLEAR
+ | CMD_FIFO_CLEAR
+ | DMAD_FIFO_CLEAR
+ | ARR_FIFO_CLEAR));
+ tdma_clr(TDMA_RX_XDMA_FIFO_CFG0,
+ (PAR_FIFO_CLEAR
+ | CMD_FIFO_CLEAR
+ | DMAD_FIFO_CLEAR
+ | ARR_FIFO_CLEAR
+ | LEN_FIFO_CLEAR
+ | WID_FIFO_CLEAR
+ | BID_FIFO_CLEAR));
+ }
+}
+
+static inline void tdma_assert_writeback_reset(bool en)
+{
+ if (en) {
+ tdma_set(TDMA_WRBK_TX_FIFO_CFG0, WRBK_RING_CLEAR);
+ tdma_set(TDMA_WRBK_RX_FIFO_CFGX(0), WRBK_RING_CLEAR);
+ tdma_set(TDMA_WRBK_RX_FIFO_CFGX(1), WRBK_RING_CLEAR);
+ tdma_set(TDMA_WRBK_RX_FIFO_CFGX(2), WRBK_RING_CLEAR);
+ tdma_set(TDMA_WRBK_RX_FIFO_CFGX(3), WRBK_RING_CLEAR);
+ } else {
+ tdma_clr(TDMA_WRBK_TX_FIFO_CFG0, WRBK_RING_CLEAR);
+ tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(0), WRBK_RING_CLEAR);
+ tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(1), WRBK_RING_CLEAR);
+ tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(2), WRBK_RING_CLEAR);
+ tdma_clr(TDMA_WRBK_RX_FIFO_CFGX(3), WRBK_RING_CLEAR);
+ }
+}
+
+static inline void tdma_assert_prefetch_ring_reset(bool en)
+{
+ if (en) {
+ tdma_set(TDMA_PREF_SIDX_CFG,
+ (TX_RING0_SIDX_CLR
+ | RX_RINGX_SIDX_CLR(0)
+ | RX_RINGX_SIDX_CLR(1)
+ | RX_RINGX_SIDX_CLR(2)
+ | RX_RINGX_SIDX_CLR(3)));
+ } else {
+ tdma_clr(TDMA_PREF_SIDX_CFG,
+ (TX_RING0_SIDX_CLR
+ | RX_RINGX_SIDX_CLR(0)
+ | RX_RINGX_SIDX_CLR(1)
+ | RX_RINGX_SIDX_CLR(2)
+ | RX_RINGX_SIDX_CLR(3)));
+ }
+}
+
+static inline void tdma_assert_writeback_ring_reset(bool en)
+{
+ if (en) {
+ tdma_set(TDMA_WRBK_SIDX_CFG,
+ (TX_RING0_SIDX_CLR
+ | RX_RINGX_SIDX_CLR(0)
+ | RX_RINGX_SIDX_CLR(1)
+ | RX_RINGX_SIDX_CLR(2)
+ | RX_RINGX_SIDX_CLR(3)));
+ } else {
+ tdma_clr(TDMA_WRBK_SIDX_CFG,
+ (TX_RING0_SIDX_CLR
+ | RX_RINGX_SIDX_CLR(0)
+ | RX_RINGX_SIDX_CLR(1)
+ | RX_RINGX_SIDX_CLR(2)
+ | RX_RINGX_SIDX_CLR(3)));
+ }
+}
+
+static void mtk_tops_tdma_retrieve_last_state(void)
+{
+ tdma.start_ring = tdma_read(TDMA_TX_CTX_IDX_0);
+}
+
+void mtk_tops_tdma_record_last_state(void)
+{
+ tdma_write(TDMA_TX_CTX_IDX_0, tdma.start_ring);
+}
+
+static void tdma_get_next_rx_ring(void)
+{
+ u32 pkt_num_per_core = tdma_read(TDMA_RX_MAX_CNT_X(0));
+ u32 ring[TDMA_RING_NUM] = {0};
+ u32 start = 0;
+ u32 tmp_idx;
+ u32 i;
+
+ for (i = 0; i < TDMA_RING_NUM; i++) {
+ tmp_idx = (tdma.start_ring + i) % TDMA_RING_NUM;
+ ring[i] = tdma_read(TDMA_RX_DRX_IDX_X(tmp_idx));
+ }
+
+ for (i = 1; i < TDMA_RING_NUM; i++) {
+ if (ring[i] >= (pkt_num_per_core - 1) && !ring[i - 1])
+ ring[i - 1] += pkt_num_per_core;
+
+ if (!ring[i] && ring[i - 1] >= (pkt_num_per_core - 1))
+ ring[i] = pkt_num_per_core;
+
+ if (ring[i] < ring[i - 1])
+ start = i;
+ }
+
+ tdma.start_ring = (tdma.start_ring + start) & TDMA_RING_NUM_MOD;
+}
+
+void mtk_tops_tdma_reset(void)
+{
+ if (!mtk_tops_mcu_netsys_fe_rst())
+ /* get next start Rx ring if TDMA reset without NETSYS FE reset */
+ tdma_get_next_rx_ring();
+ else
+ /*
+ * NETSYS FE reset will restart CDM ring index
+ * so we don't need to calculate next ring index
+ */
+ tdma.start_ring = 0;
+
+ /* then start reset TDMA */
+ tdma_assert_prefetch_reset(true);
+ tdma_assert_prefetch_reset(false);
+
+ tdma_assert_fifo_reset(true);
+ tdma_assert_fifo_reset(false);
+
+ tdma_assert_writeback_reset(true);
+ tdma_assert_writeback_reset(false);
+
+ /* reset tdma ring */
+ tdma_set(TDMA_RST_IDX,
+ (RST_DTX_IDX_0
+ | RST_DRX_IDX_X(0)
+ | RST_DRX_IDX_X(1)
+ | RST_DRX_IDX_X(2)
+ | RST_DRX_IDX_X(3)));
+
+ tdma_assert_prefetch_ring_reset(true);
+ tdma_assert_prefetch_ring_reset(false);
+
+ tdma_assert_writeback_ring_reset(true);
+ tdma_assert_writeback_ring_reset(false);
+
+ /* TODO: should we reset Tx/Rx CPU ring index? */
+}
+
+int mtk_tops_tdma_enable(void)
+{
+ struct mailbox_msg msg = {
+ .msg1 = TOPS_NET_CMD_START,
+ .msg2 = tdma.start_ring,
+ };
+ int ret;
+ u32 i;
+
+ tdma_prefetch_enable(true);
+
+ tdma_set(TDMA_GLO_CFG0, RX_DMA_EN | TX_DMA_EN);
+
+ tdma_writeback_enable(true);
+
+ /* notify TOPS start network processing */
+ ret = mbox_send_msg_no_wait(&tdma.mgmt_mdev, &msg);
+ if (unlikely(ret))
+ return ret;
+
+ for (i = CORE_OFFLOAD_0; i < CORE_OFFLOAD_NUM; i++) {
+ ret = mbox_send_msg_no_wait(&tdma.offload_mdev[i], &msg);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ return ret;
+}
+
+void mtk_tops_tdma_disable(void)
+{
+ struct mailbox_msg msg = {
+ .msg1 = TOPS_NET_CMD_STOP,
+ };
+ u32 i;
+
+ if (mtk_tops_mcu_bring_up_done()) {
+ /* notify TOPS stop network processing */
+ if (unlikely(mbox_send_msg_no_wait(&tdma.mgmt_mdev, &msg)))
+ return;
+
+ for (i = CORE_OFFLOAD_0; i < CORE_OFFLOAD_NUM; i++) {
+ if (unlikely(mbox_send_msg_no_wait(&tdma.offload_mdev[i],
+ &msg)))
+ return;
+ }
+ }
+
+ tdma_prefetch_enable(false);
+
+ /* There is no need to wait for Tx/Rx idle before we stop Tx/Rx */
+ if (!mtk_tops_mcu_netsys_fe_rst())
+ while (tdma_read(TDMA_GLO_CFG0) & RX_DMA_BUSY)
+ ;
+ tdma_write(TDMA_GLO_CFG0, tdma_read(TDMA_GLO_CFG0) & (~RX_DMA_EN));
+
+ if (!mtk_tops_mcu_netsys_fe_rst())
+ while (tdma_read(TDMA_GLO_CFG0) & TX_DMA_BUSY)
+ ;
+ tdma_write(TDMA_GLO_CFG0, tdma_read(TDMA_GLO_CFG0) & (~TX_DMA_EN));
+
+ tdma_writeback_enable(false);
+}
+
+static int mtk_tops_tdma_register_mbox(void)
+{
+ int ret;
+ int i;
+
+ ret = register_mbox_dev(MBOX_SEND, &tdma.mgmt_mdev);
+ if (ret) {
+ TOPS_ERR("register tdma mgmt mbox send failed: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
+ ret = register_mbox_dev(MBOX_SEND, &tdma.offload_mdev[i]);
+ if (ret) {
+ TOPS_ERR("register tdma offload %d mbox send failed: %d\n",
+ i, ret);
+ goto err_unregister_offload_mbox;
+ }
+ }
+
+ return ret;
+
+err_unregister_offload_mbox:
+ for (i -= 1; i >= 0; i--)
+ unregister_mbox_dev(MBOX_SEND, &tdma.offload_mdev[i]);
+
+ unregister_mbox_dev(MBOX_SEND, &tdma.mgmt_mdev);
+
+ return ret;
+}
+
+static void mtk_tops_tdma_unregister_mbox(void)
+{
+ int i;
+
+ unregister_mbox_dev(MBOX_SEND, &tdma.mgmt_mdev);
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++)
+ unregister_mbox_dev(MBOX_SEND, &tdma.offload_mdev[i]);
+}
+
+static int mtk_tops_tdma_dts_init(struct platform_device *pdev)
+{
+ struct device_node *fe_mem = NULL;
+ struct resource res;
+ int ret = 0;
+
+ fe_mem = of_parse_phandle(pdev->dev.of_node, "fe_mem", 0);
+ if (!fe_mem) {
+ TOPS_ERR("can not find fe_mem node\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(fe_mem, 0, &res))
+ return -ENXIO;
+
+ /* map FE address */
+ tdma.base = devm_ioremap(&pdev->dev, res.start, resource_size(&res));
+ if (!tdma.base)
+ return -ENOMEM;
+
+ /* shift FE address to TDMA base */
+ tdma.base += TDMA_BASE;
+
+ of_node_put(fe_mem);
+
+ return ret;
+}
+
+int mtk_tops_tdma_init(struct platform_device *pdev)
+{
+ int ret = 0;
+
+ ret = mtk_tops_tdma_register_mbox();
+ if (ret)
+ return ret;
+
+ ret = mtk_tops_tdma_dts_init(pdev);
+ if (ret)
+ return ret;
+
+ ret = mtk_trm_hw_config_register(TRM_TDMA, &tdma_trm_hw_cfg);
+ if (ret)
+ return ret;
+
+ mtk_tops_tdma_retrieve_last_state();
+
+ return ret;
+}
+
+void mtk_tops_tdma_deinit(struct platform_device *pdev)
+{
+ mtk_trm_hw_config_unregister(TRM_TDMA, &tdma_trm_hw_cfg);
+
+ mtk_tops_tdma_unregister_mbox();
+}
diff --git a/package-21.02/kernel/tops/src/tnl_offload.c b/package-21.02/kernel/tops/src/tnl_offload.c
new file mode 100644
index 0000000..c989054
--- /dev/null
+++ b/package-21.02/kernel/tops/src/tnl_offload.c
@@ -0,0 +1,1356 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/string.h>
+
+#include <mtk_eth_soc.h>
+#include <mtk_hnat/hnat.h>
+#include <mtk_hnat/nf_hnat_mtk.h>
+
+#include <pce/dipfilter.h>
+#include <pce/pce.h>
+
+#include "internal.h"
+#include "mbox.h"
+#include "mcu.h"
+#include "netsys.h"
+#include "protocol/gre/gretap.h"
+#include "protocol/l2tp/udp_l2tp_data.h"
+#include "tunnel.h"
+
+#define TOPS_PPE_ENTRY_BUCKETS (64)
+#define TOPS_PPE_ENTRY_BUCKETS_BIT (6)
+
+struct tops_tnl {
+ /* tunnel types */
+ struct tops_tnl_type *offload_tnl_types[__TOPS_ENTRY_MAX];
+ u32 offload_tnl_type_num;
+ u32 tnl_base_addr;
+
+ /* tunnel table */
+ DECLARE_HASHTABLE(ht, CONFIG_TOPS_TNL_MAP_BIT);
+ DECLARE_BITMAP(tnl_used, CONFIG_TOPS_TNL_NUM);
+ wait_queue_head_t tnl_sync_wait;
+ spinlock_t tnl_sync_lock;
+ spinlock_t tbl_lock;
+ bool has_tnl_to_sync;
+ struct task_struct *tnl_sync_thread;
+ struct list_head *tnl_sync_pending;
+ struct list_head *tnl_sync_submit;
+ struct tops_tnl_info *tnl_infos;
+
+ /* dma request */
+ struct completion dma_done;
+ struct dma_chan *dmachan;
+
+ struct device *dev;
+};
+
+static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
+ struct mailbox_msg *msg);
+
+static struct tops_tnl tops_tnl;
+
+static LIST_HEAD(tnl_sync_q1);
+static LIST_HEAD(tnl_sync_q2);
+
+struct mailbox_dev tnl_offload_mbox_recv =
+ MBOX_RECV_MGMT_DEV(TNL_OFFLOAD, tnl_offload_mbox_cmd_recv);
+
+/* tunnel mailbox communication */
+static enum mbox_msg_cnt tnl_offload_mbox_cmd_recv(struct mailbox_dev *mdev,
+ struct mailbox_msg *msg)
+{
+ switch (msg->msg1) {
+ case TOPS_TNL_START_ADDR_SYNC:
+ tops_tnl.tnl_base_addr = msg->msg2;
+
+ return MBOX_NO_RET_MSG;
+ default:
+ break;
+ }
+
+ return MBOX_NO_RET_MSG;
+}
+
+static inline void tnl_flush_ppe_entry(struct foe_entry *entry, u32 tnl_idx)
+{
+ u32 bind_tnl_idx;
+
+ if (unlikely(!entry))
+ return;
+
+ switch (entry->bfib1.pkt_type) {
+ case IPV4_HNAPT:
+ if (entry->ipv4_hnapt.tport_id != NR_TDMA_TPORT
+ && entry->ipv4_hnapt.tport_id != NR_TDMA_QDMA_TPORT)
+ return;
+
+ bind_tnl_idx = entry->ipv4_hnapt.tops_entry - __TOPS_ENTRY_MAX;
+
+ break;
+ default:
+ return;
+ }
+
+ /* unexpected tunnel index */
+ if (bind_tnl_idx >= __TOPS_ENTRY_MAX)
+ return;
+
+ if (tnl_idx == __TOPS_ENTRY_MAX || tnl_idx == bind_tnl_idx)
+ memset(entry, 0, sizeof(*entry));
+}
+
+static inline void skb_set_tops_tnl_idx(struct sk_buff *skb, u32 tnl_idx)
+{
+ skb_hnat_tops(skb) = tnl_idx + __TOPS_ENTRY_MAX;
+}
+
+static inline bool skb_tops_valid(struct sk_buff *skb)
+{
+ return (skb
+ && skb_hnat_tops(skb) >= 0
+ && skb_hnat_tops(skb) <= __TOPS_ENTRY_MAX);
+}
+
+static inline struct tops_tnl_type *skb_to_tnl_type(struct sk_buff *skb)
+{
+ enum tops_entry_type tops_entry = skb_hnat_tops(skb);
+ struct tops_tnl_type *tnl_type;
+
+ if (unlikely(!tops_entry || tops_entry >= __TOPS_ENTRY_MAX))
+ return ERR_PTR(-EINVAL);
+
+ tnl_type = tops_tnl.offload_tnl_types[tops_entry];
+
+ return tnl_type ? tnl_type : ERR_PTR(-ENODEV);
+}
+
+static inline void skb_mark_unbind(struct sk_buff *skb)
+{
+ skb_hnat_tops(skb) = 0;
+ skb_hnat_is_decap(skb) = 0;
+ skb_hnat_alg(skb) = 1;
+}
+
+static inline u32 tnl_params_hash(struct tops_tnl_params *tnl_params)
+{
+ if (!tnl_params)
+ return 0;
+
+ /* TODO: check collision possibility? */
+ return (tnl_params->sip ^ tnl_params->dip);
+}
+
+static inline bool tnl_info_decap_is_enable(struct tops_tnl_info *tnl_info)
+{
+ return tnl_info->cache.flag & TNL_DECAP_ENABLE;
+}
+
+static inline void tnl_info_decap_enable(struct tops_tnl_info *tnl_info)
+{
+ tnl_info->cache.flag |= TNL_DECAP_ENABLE;
+}
+
+static inline void tnl_info_decap_disable(struct tops_tnl_info *tnl_info)
+{
+ tnl_info->cache.flag &= ~(TNL_DECAP_ENABLE);
+}
+
+static inline bool tnl_info_encap_is_enable(struct tops_tnl_info *tnl_info)
+{
+ return tnl_info->cache.flag & TNL_ENCAP_ENABLE;
+}
+
+static inline void tnl_info_encap_enable(struct tops_tnl_info *tnl_info)
+{
+ tnl_info->cache.flag |= TNL_ENCAP_ENABLE;
+}
+
+static inline void tnl_info_encap_disable(struct tops_tnl_info *tnl_info)
+{
+ tnl_info->cache.flag &= ~(TNL_ENCAP_ENABLE);
+}
+
+static inline void tnl_info_sta_updated_no_tnl_lock(struct tops_tnl_info *tnl_info)
+{
+ tnl_info->status &= (~TNL_STA_UPDATING);
+ tnl_info->status &= (~TNL_STA_INIT);
+ tnl_info->status |= TNL_STA_UPDATED;
+}
+
+static inline void tnl_info_sta_updated(struct tops_tnl_info *tnl_info)
+{
+ unsigned long flag = 0;
+
+ if (unlikely(!tnl_info))
+ return;
+
+ spin_lock_irqsave(&tnl_info->lock, flag);
+
+ tnl_info_sta_updated_no_tnl_lock(tnl_info);
+
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+}
+
+static inline bool tnl_info_sta_is_updated(struct tops_tnl_info *tnl_info)
+{
+ return tnl_info->status & TNL_STA_UPDATED;
+}
+
+static inline void tnl_info_sta_updating_no_tnl_lock(struct tops_tnl_info *tnl_info)
+{
+ tnl_info->status |= TNL_STA_UPDATING;
+ tnl_info->status &= (~TNL_STA_QUEUED);
+ tnl_info->status &= (~TNL_STA_UPDATED);
+}
+
+static inline void tnl_info_sta_updating(struct tops_tnl_info *tnl_info)
+{
+ unsigned long flag = 0;
+
+ if (unlikely(!tnl_info))
+ return;
+
+ spin_lock_irqsave(&tnl_info->lock, flag);
+
+ tnl_info_sta_updating_no_tnl_lock(tnl_info);
+
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+}
+
+static inline bool tnl_info_sta_is_updating(struct tops_tnl_info *tnl_info)
+{
+ return tnl_info->status & TNL_STA_UPDATING;
+}
+
+static inline void tnl_info_sta_queued_no_tnl_lock(struct tops_tnl_info *tnl_info)
+{
+ tnl_info->status |= TNL_STA_QUEUED;
+ tnl_info->status &= (~TNL_STA_UPDATED);
+}
+
+static inline void tnl_info_sta_queued(struct tops_tnl_info *tnl_info)
+{
+ unsigned long flag = 0;
+
+ if (unlikely(!tnl_info))
+ return;
+
+ spin_lock_irqsave(&tnl_info->lock, flag);
+
+ tnl_info_sta_queued_no_tnl_lock(tnl_info);
+
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+}
+
+static inline bool tnl_info_sta_is_queued(struct tops_tnl_info *tnl_info)
+{
+ return tnl_info->status & TNL_STA_QUEUED;
+}
+
+static inline void tnl_info_sta_init_no_tnl_lock(struct tops_tnl_info *tnl_info)
+{
+ tnl_info->status = TNL_STA_INIT;
+}
+
+static inline void tnl_info_sta_init(struct tops_tnl_info *tnl_info)
+{
+ unsigned long flag = 0;
+
+ if (unlikely(!tnl_info))
+ return;
+
+ spin_lock_irqsave(&tnl_info->lock, flag);
+
+ tnl_info_sta_init_no_tnl_lock(tnl_info);
+
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+}
+
+static inline bool tnl_info_sta_is_init(struct tops_tnl_info *tnl_info)
+{
+ return tnl_info->status & TNL_STA_INIT;
+}
+
+static inline void tnl_info_sta_uninit_no_tnl_lock(struct tops_tnl_info *tnl_info)
+{
+ tnl_info->status = TNL_STA_UNINIT;
+}
+
+static inline void tnl_info_sta_uninit(struct tops_tnl_info *tnl_info)
+{
+ unsigned long flag = 0;
+
+ if (unlikely(!tnl_info))
+ return;
+
+ spin_lock_irqsave(&tnl_info->lock, flag);
+
+ tnl_info_sta_uninit_no_tnl_lock(tnl_info);
+
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+}
+
+static inline bool tnl_info_sta_is_uninit(struct tops_tnl_info *tnl_info)
+{
+ return tnl_info->status & TNL_STA_UNINIT;
+}
+
+static inline void tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
+{
+ unsigned long flag = 0;
+
+ spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
+
+ list_add_tail(&tnl_info->sync_node, tops_tnl.tnl_sync_submit);
+
+ tops_tnl.has_tnl_to_sync = true;
+
+ spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
+
+ if (mtk_tops_mcu_alive())
+ wake_up_interruptible(&tops_tnl.tnl_sync_wait);
+}
+
+static int mtk_tops_tnl_info_dipfilter_tear_down(struct tops_tnl_info *tnl_info)
+{
+ struct dip_desc dipd;
+
+ memset(&dipd, 0, sizeof(struct dip_desc));
+
+ dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.sip);
+ dipd.tag = DIPFILTER_IPV4;
+
+ return mtk_pce_dipfilter_entry_del(&dipd);
+}
+
+static int mtk_tops_tnl_info_dipfilter_setup(struct tops_tnl_info *tnl_info)
+{
+ struct dip_desc dipd;
+
+ /* setup dipfilter */
+ memset(&dipd, 0, sizeof(struct dip_desc));
+
+ dipd.ipv4 = be32_to_cpu(tnl_info->tnl_params.sip);
+ dipd.tag = DIPFILTER_IPV4;
+
+ return mtk_pce_dipfilter_entry_add(&dipd);
+}
+
+void mtk_tops_tnl_info_submit_no_tnl_lock(struct tops_tnl_info *tnl_info)
+{
+ lockdep_assert_held(&tnl_info->lock);
+
+ if (tnl_info_sta_is_queued(tnl_info))
+ return;
+
+ tnl_info_submit_no_tnl_lock(tnl_info);
+
+ tnl_info_sta_queued_no_tnl_lock(tnl_info);
+}
+
+void mtk_tops_tnl_info_submit(struct tops_tnl_info *tnl_info)
+{
+ unsigned long flag = 0;
+
+ if (unlikely(!tnl_info))
+ return;
+
+ spin_lock_irqsave(&tnl_info->lock, flag);
+
+ mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
+
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+}
+
+static void mtk_tops_tnl_info_hash_no_lock(struct tops_tnl_info *tnl_info)
+{
+ lockdep_assert_held(&tops_tnl.tbl_lock);
+ lockdep_assert_held(&tnl_info->lock);
+
+ if (hash_hashed(&tnl_info->hlist))
+ hash_del(&tnl_info->hlist);
+
+ hash_add(tops_tnl.ht, &tnl_info->hlist, tnl_params_hash(&tnl_info->cache));
+}
+
+void mtk_tops_tnl_info_hash(struct tops_tnl_info *tnl_info)
+{
+ unsigned long flag = 0;
+
+ if (unlikely(!tnl_info))
+ return;
+
+ spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
+
+ spin_lock(&tnl_info->lock);
+
+ mtk_tops_tnl_info_hash_no_lock(tnl_info);
+
+ spin_unlock(&tnl_info->lock);
+
+ spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
+}
+
+static bool mtk_tops_tnl_info_match(struct tops_tnl_type *tnl_type,
+ struct tops_tnl_info *tnl_info,
+ struct tops_tnl_params *match_data)
+{
+ unsigned long flag = 0;
+ bool match;
+
+ spin_lock_irqsave(&tnl_info->lock, flag);
+
+ match = tnl_type->tnl_info_match(&tnl_info->cache, match_data);
+
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+
+ return match;
+}
+
+struct tops_tnl_info *mtk_tops_tnl_info_find(struct tops_tnl_params *tnl_params)
+{
+ struct tops_tnl_info *tnl_info;
+ struct tops_tnl_type *tnl_type;
+
+ lockdep_assert_held(&tops_tnl.tbl_lock);
+
+ if (unlikely(!tnl_params->tops_entry_proto
+ || tnl_params->tops_entry_proto >= __TOPS_ENTRY_MAX))
+ return ERR_PTR(-EINVAL);
+
+ tnl_type = tops_tnl.offload_tnl_types[tnl_params->tops_entry_proto];
+ if (unlikely(!tnl_type))
+ return ERR_PTR(-EINVAL);
+
+ if (unlikely(!tnl_type->tnl_info_match))
+ return ERR_PTR(-ENXIO);
+
+ hash_for_each_possible(tops_tnl.ht,
+ tnl_info,
+ hlist,
+ tnl_params_hash(tnl_params))
+ if (mtk_tops_tnl_info_match(tnl_type, tnl_info, tnl_params))
+ return tnl_info;
+
+ return ERR_PTR(-ENODEV);
+}
+
+/* tnl_info->lock should be held before calling this function */
+static int mtk_tops_tnl_info_setup(struct sk_buff *skb,
+ struct tops_tnl_info *tnl_info,
+ struct tops_tnl_params *tnl_params)
+{
+ if (unlikely(!skb || !tnl_info || !tnl_params))
+ return -EPERM;
+
+ lockdep_assert_held(&tnl_info->lock);
+
+ tnl_params->flag |= tnl_info->cache.flag;
+
+ if (memcmp(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params))) {
+ memcpy(&tnl_info->cache, tnl_params, sizeof(struct tops_tnl_params));
+
+ mtk_tops_tnl_info_hash_no_lock(tnl_info);
+ }
+
+ if (skb_hnat_is_decap(skb)) {
+ /* the net_device is used to forward pkt to decap'ed inf when Rx */
+ tnl_info->dev = skb->dev;
+ if (!tnl_info_decap_is_enable(tnl_info)) {
+ tnl_info_decap_enable(tnl_info);
+
+ mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
+ }
+ } else if (skb_hnat_is_encap(skb)) {
+ /* set skb_hnat_tops(skb) to tunnel index for ppe binding */
+ skb_set_tops_tnl_idx(skb, tnl_info->tnl_idx);
+ if (!tnl_info_encap_is_enable(tnl_info)) {
+ tnl_info_encap_enable(tnl_info);
+
+ mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
+ }
+ }
+
+ return 0;
+}
+
+/* tops_tnl.tbl_lock should be acquired before calling this functions */
+static struct tops_tnl_info *mtk_tops_tnl_info_alloc_no_lock(void)
+{
+ struct tops_tnl_info *tnl_info;
+ unsigned long flag = 0;
+ u32 tnl_idx;
+
+ lockdep_assert_held(&tops_tnl.tbl_lock);
+
+ tnl_idx = find_first_zero_bit(tops_tnl.tnl_used, CONFIG_TOPS_TNL_NUM);
+ if (tnl_idx == CONFIG_TOPS_TNL_NUM) {
+ TOPS_NOTICE("offload tunnel table full!\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* occupy used tunnel */
+ tnl_info = &tops_tnl.tnl_infos[tnl_idx];
+ memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
+ memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
+
+ /* TODO: maybe spin_lock_bh() is enough? */
+ spin_lock_irqsave(&tnl_info->lock, flag);
+
+ if (tnl_info_sta_is_init(tnl_info)) {
+ TOPS_ERR("error: fetched an initialized tunnel info\n");
+
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+
+ return ERR_PTR(-EBADF);
+ }
+ tnl_info_sta_init_no_tnl_lock(tnl_info);
+
+ INIT_HLIST_NODE(&tnl_info->hlist);
+
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+
+ set_bit(tnl_idx, tops_tnl.tnl_used);
+
+ return tnl_info;
+}
+
+struct tops_tnl_info *mtk_tops_tnl_info_alloc(void)
+{
+ struct tops_tnl_info *tnl_info;
+ unsigned long flag = 0;
+
+ spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
+
+ tnl_info = mtk_tops_tnl_info_alloc_no_lock();
+
+ spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
+
+ return tnl_info;
+}
+
+static void mtk_tops_tnl_info_free_no_lock(struct tops_tnl_info *tnl_info)
+{
+ if (unlikely(!tnl_info))
+ return;
+
+ lockdep_assert_held(&tops_tnl.tbl_lock);
+ lockdep_assert_held(&tnl_info->lock);
+
+ hash_del(&tnl_info->hlist);
+
+ tnl_info_sta_uninit_no_tnl_lock(tnl_info);
+
+ clear_bit(tnl_info->tnl_idx, tops_tnl.tnl_used);
+}
+
+static void mtk_tops_tnl_info_free(struct tops_tnl_info *tnl_info)
+{
+ unsigned long flag = 0;
+
+ spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
+
+ spin_lock(&tnl_info->lock);
+
+ mtk_tops_tnl_info_free_no_lock(tnl_info);
+
+ spin_unlock(&tnl_info->lock);
+
+ spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
+}
+
+static void __mtk_tops_tnl_offload_disable(struct tops_tnl_info *tnl_info)
+{
+ tnl_info->status |= TNL_STA_DELETING;
+ mtk_tops_tnl_info_submit_no_tnl_lock(tnl_info);
+}
+
+static int mtk_tops_tnl_offload(struct sk_buff *skb,
+ struct tops_tnl_params *tnl_params)
+{
+ struct tops_tnl_info *tnl_info;
+ unsigned long flag;
+ int ret = 0;
+
+ if (unlikely(!tnl_params))
+ return -EPERM;
+
+ /* prepare tnl_info */
+ spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
+
+ tnl_info = mtk_tops_tnl_info_find(tnl_params);
+ if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) != -ENODEV) {
+ /* error */
+ ret = PTR_ERR(tnl_info);
+ goto err_out;
+ } else if (IS_ERR(tnl_info) && PTR_ERR(tnl_info) == -ENODEV) {
+ /* not allocate yet */
+ tnl_info = mtk_tops_tnl_info_alloc_no_lock();
+ }
+
+ if (IS_ERR(tnl_info)) {
+ ret = PTR_ERR(tnl_info);
+ TOPS_DBG("tnl offload alloc tnl_info failed: %d\n", ret);
+ goto err_out;
+ }
+
+ spin_lock(&tnl_info->lock);
+ ret = mtk_tops_tnl_info_setup(skb, tnl_info, tnl_params);
+
+ spin_unlock(&tnl_info->lock);
+
+err_out:
+ spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
+
+ return ret;
+}
+
+static bool mtk_tops_tnl_decap_offloadable(struct sk_buff *skb)
+{
+ struct tops_tnl_type *tnl_type;
+ struct ethhdr *eth;
+ u32 cnt;
+ u32 i;
+
+ if (unlikely(!mtk_tops_mcu_alive())) {
+ skb_mark_unbind(skb);
+ return -EAGAIN;
+ }
+
+ /* skb should not carry tops here */
+ if (skb_hnat_tops(skb))
+ return false;
+
+ eth = eth_hdr(skb);
+
+ /* TODO: currently decap only support ethernet IPv4 */
+ if (ntohs(eth->h_proto) != ETH_P_IP)
+ return false;
+
+ /* TODO: may can be optimized */
+ for (i = TOPS_ENTRY_GRETAP, cnt = 0;
+ i < __TOPS_ENTRY_MAX && cnt < tops_tnl.offload_tnl_type_num;
+ i++) {
+ tnl_type = tops_tnl.offload_tnl_types[i];
+ if (unlikely(!tnl_type))
+ continue;
+
+ cnt++;
+ if (tnl_type->tnl_decap_offloadable
+ && tnl_type->tnl_decap_offloadable(skb)) {
+ skb_hnat_tops(skb) = tnl_type->tops_entry;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int mtk_tops_tnl_decap_offload(struct sk_buff *skb)
+{
+ struct tops_tnl_params tnl_params;
+ struct tops_tnl_type *tnl_type;
+ int ret;
+
+ if (unlikely(!mtk_tops_mcu_alive())) {
+ skb_mark_unbind(skb);
+ return -EAGAIN;
+ }
+
+ if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_decap(skb))) {
+ skb_mark_unbind(skb);
+ return -EINVAL;
+ }
+
+ tnl_type = skb_to_tnl_type(skb);
+ if (IS_ERR(tnl_type)) {
+ skb_mark_unbind(skb);
+ return PTR_ERR(tnl_type);
+ }
+
+ if (unlikely(!tnl_type->tnl_decap_param_setup)) {
+ skb_mark_unbind(skb);
+ return -ENODEV;
+ }
+
+ memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
+
+ /* push removed ethernet header back first */
+ if (tnl_type->has_inner_eth)
+ skb_push(skb, sizeof(struct ethhdr));
+
+ ret = tnl_type->tnl_decap_param_setup(skb, &tnl_params);
+
+ /* pull ethernet header to restore skb->data to ip start */
+ if (tnl_type->has_inner_eth)
+ skb_pull(skb, sizeof(struct ethhdr));
+
+ if (unlikely(ret)) {
+ skb_mark_unbind(skb);
+ return ret;
+ }
+
+ tnl_params.tops_entry_proto = tnl_type->tops_entry;
+
+ ret = mtk_tops_tnl_offload(skb, &tnl_params);
+
+ /*
+ * whether success or fail to offload a decapsulation tunnel
+ * skb_hnat_tops(skb) must be cleared to avoid mtk_tnl_decap_offload() get
+ * called again
+ */
+ skb_hnat_tops(skb) = 0;
+ skb_hnat_is_decap(skb) = 0;
+
+ return ret;
+}
+
+static int mtk_tops_tnl_encap_offload(struct sk_buff *skb)
+{
+ struct tops_tnl_params tnl_params;
+ struct tops_tnl_type *tnl_type;
+ int ret;
+
+ if (unlikely(!mtk_tops_mcu_alive())) {
+ skb_mark_unbind(skb);
+ return -EAGAIN;
+ }
+
+ if (unlikely(!skb_tops_valid(skb) || !skb_hnat_is_encap(skb)))
+ return -EPERM;
+
+ tnl_type = skb_to_tnl_type(skb);
+ if (IS_ERR(tnl_type))
+ return PTR_ERR(tnl_type);
+
+ if (unlikely(!tnl_type->tnl_encap_param_setup))
+ return -ENODEV;
+
+ memset(&tnl_params, 0, sizeof(struct tops_tnl_params));
+
+ ret = tnl_type->tnl_encap_param_setup(skb, &tnl_params);
+ if (unlikely(ret))
+ return ret;
+ tnl_params.tops_entry_proto = tnl_type->tops_entry;
+
+ return mtk_tops_tnl_offload(skb, &tnl_params);
+}
+
+static struct net_device *mtk_tops_get_tnl_dev(int tnl_idx)
+{
+ if (tnl_idx < TOPS_CRSN_TNL_ID_START || tnl_idx > TOPS_CRSN_TNL_ID_END)
+ return ERR_PTR(-EINVAL);
+
+ tnl_idx = tnl_idx - TOPS_CRSN_TNL_ID_START;
+
+ return tops_tnl.tnl_infos[tnl_idx].dev;
+}
+
+static void mtk_tops_tnl_sync_dma_done(void *param)
+{
+ /* TODO: check tx status with dmaengine_tx_status()? */
+ complete(&tops_tnl.dma_done);
+}
+
+static void mtk_tops_tnl_sync_dma_start(void *param)
+{
+ dma_async_issue_pending(tops_tnl.dmachan);
+
+ wait_for_completion(&tops_tnl.dma_done);
+}
+
+static void mtk_tops_tnl_sync_dma_unprepare(struct tops_tnl_info *tnl_info,
+ dma_addr_t *addr)
+{
+ dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
+ DMA_TO_DEVICE);
+
+ dma_release_channel(tops_tnl.dmachan);
+}
+
+static int mtk_tops_tnl_sync_dma_prepare(struct tops_tnl_info *tnl_info,
+ dma_addr_t *addr)
+{
+ u32 tnl_addr = tops_tnl.tnl_base_addr;
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (!tnl_info)
+ return -EPERM;
+
+ tnl_addr += tnl_info->tnl_idx * sizeof(struct tops_tnl_params);
+
+ tops_tnl.dmachan = dma_request_slave_channel(tops_dev, "tnl-sync");
+ if (!tops_tnl.dmachan) {
+ TOPS_ERR("request dma channel failed\n");
+ return -ENODEV;
+ }
+
+ *addr = dma_map_single(tops_dev,
+ &tnl_info->tnl_params,
+ sizeof(struct tops_tnl_params),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tops_dev, *addr)) {
+ ret = -ENOMEM;
+ goto dma_release;
+ }
+
+ desc = dmaengine_prep_dma_memcpy(tops_tnl.dmachan,
+ (dma_addr_t)tnl_addr, *addr,
+ sizeof(struct tops_tnl_params),
+ 0);
+ if (!desc) {
+ ret = -EBUSY;
+ goto dma_unmap;
+ }
+
+ desc->callback = mtk_tops_tnl_sync_dma_done;
+
+ cookie = dmaengine_submit(desc);
+ ret = dma_submit_error(cookie);
+ if (ret)
+ goto dma_terminate;
+
+ reinit_completion(&tops_tnl.dma_done);
+
+ return ret;
+
+dma_terminate:
+ dmaengine_terminate_all(tops_tnl.dmachan);
+
+dma_unmap:
+ dma_unmap_single(tops_dev, *addr, sizeof(struct tops_tnl_params),
+ DMA_TO_DEVICE);
+
+dma_release:
+ dma_release_channel(tops_tnl.dmachan);
+
+ return ret;
+}
+
+static int __mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
+{
+ struct mcu_ctrl_cmd mcmd;
+ dma_addr_t addr;
+ int ret;
+
+ mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
+ mcmd.arg[0] = TUNNEL_CTRL_EVENT_DEL;
+ mcmd.arg[1] = tnl_info->tnl_idx;
+ mcmd.core_mask = CORE_TOPS_MASK;
+
+ ret = mtk_tops_mcu_stall(&mcmd, NULL, NULL);
+ if (ret) {
+ TOPS_ERR("tnl sync deletion notify mcu failed: %d\n", ret);
+ return ret;
+ }
+
+ /* there shouldn't be any other reference to tnl_info right now */
+ memset(&tnl_info->cache, 0, sizeof(struct tops_tnl_params));
+ memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
+
+ ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
+ if (ret) {
+ TOPS_ERR("tnl sync deletion prepare dma request failed: %d\n", ret);
+ return ret;
+ }
+
+ mtk_tops_tnl_sync_dma_start(NULL);
+
+ mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
+
+ return ret;
+}
+
+static int mtk_tops_tnl_sync_param_delete(struct tops_tnl_info *tnl_info)
+{
+ int ret;
+
+ ret = mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
+ if (ret) {
+ TOPS_ERR("tnl sync dipfitler tear down failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = __mtk_tops_tnl_sync_param_delete(tnl_info);
+ if (ret) {
+ TOPS_ERR("tnl sync deletion failed: %d\n", ret);
+ return ret;
+ }
+
+ mtk_tops_tnl_info_free(tnl_info);
+
+ return ret;
+}
+
+static int __mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
+ bool is_new_tnl)
+{
+ struct mcu_ctrl_cmd mcmd;
+ dma_addr_t addr;
+ int ret;
+
+ mcmd.e = MCU_EVENT_TYPE_SYNC_TNL;
+ mcmd.arg[1] = tnl_info->tnl_idx;
+ mcmd.core_mask = CORE_TOPS_MASK;
+
+ if (is_new_tnl)
+ mcmd.arg[0] = TUNNEL_CTRL_EVENT_NEW;
+ else
+ mcmd.arg[0] = TUNNEL_CTRL_EVENT_DIP_UPDATE;
+
+ ret = mtk_tops_tnl_sync_dma_prepare(tnl_info, &addr);
+ if (ret) {
+ TOPS_ERR("tnl sync update prepare dma request failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = mtk_tops_mcu_stall(&mcmd, mtk_tops_tnl_sync_dma_start, NULL);
+ if (ret)
+ TOPS_ERR("tnl sync update notify mcu failed: %d\n", ret);
+
+ mtk_tops_tnl_sync_dma_unprepare(tnl_info, &addr);
+
+ return ret;
+}
+
+static int mtk_tops_tnl_sync_param_update(struct tops_tnl_info *tnl_info,
+ bool setup_pce, bool is_new_tnl)
+{
+ int ret;
+
+ ret = __mtk_tops_tnl_sync_param_update(tnl_info, is_new_tnl);
+ if (ret) {
+ TOPS_ERR("tnl sync failed: %d\n", ret);
+ return ret;
+ }
+
+ tnl_info_sta_updated(tnl_info);
+
+ if (setup_pce) {
+ ret = mtk_tops_tnl_info_dipfilter_setup(tnl_info);
+ if (ret) {
+ TOPS_ERR("tnl dipfilter setup failed: %d\n", ret);
+ /* TODO: should undo parameter sync */
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static inline int mtk_tops_tnl_sync_param_new(struct tops_tnl_info *tnl_info,
+ bool setup_pce)
+{
+ return mtk_tops_tnl_sync_param_update(tnl_info, setup_pce, true);
+}
+
+static void mtk_tops_tnl_sync_get_pending_queue(void)
+{
+ struct list_head *tmp = tops_tnl.tnl_sync_submit;
+ unsigned long flag = 0;
+
+ spin_lock_irqsave(&tops_tnl.tnl_sync_lock, flag);
+
+ tops_tnl.tnl_sync_submit = tops_tnl.tnl_sync_pending;
+ tops_tnl.tnl_sync_pending = tmp;
+
+ tops_tnl.has_tnl_to_sync = false;
+
+ spin_unlock_irqrestore(&tops_tnl.tnl_sync_lock, flag);
+}
+
+static void mtk_tops_tnl_sync_queue_proc(void)
+{
+ struct tops_tnl_info *tnl_info;
+ struct tops_tnl_info *tmp;
+ unsigned long flag = 0;
+ bool is_decap = false;
+ u32 tnl_status = 0;
+ int ret;
+
+ list_for_each_entry_safe(tnl_info,
+ tmp,
+ tops_tnl.tnl_sync_pending,
+ sync_node) {
+ spin_lock_irqsave(&tnl_info->lock, flag);
+
+ /* tnl update is on the fly, queue tnl to next round */
+ if (tnl_info_sta_is_updating(tnl_info)) {
+ list_del_init(&tnl_info->sync_node);
+
+ tnl_info_submit_no_tnl_lock(tnl_info);
+
+ goto next;
+ }
+
+ /*
+ * if tnl_info is not queued, something wrong
+ * just remove that tnl_info from the queue
+ * maybe trigger BUG_ON()?
+ */
+ if (!tnl_info_sta_is_queued(tnl_info)) {
+ list_del_init(&tnl_info->sync_node);
+ goto next;
+ }
+
+ is_decap = (!(tnl_info->tnl_params.flag & TNL_DECAP_ENABLE)
+ && tnl_info_decap_is_enable(tnl_info));
+
+ tnl_status = tnl_info->status;
+ memcpy(&tnl_info->tnl_params, &tnl_info->cache,
+ sizeof(struct tops_tnl_params));
+
+ list_del_init(&tnl_info->sync_node);
+
+ /*
+ * mark tnl info to updating and release tnl info's spin lock
+ * since it is going to use dma to transfer data
+ * and might going to sleep
+ */
+ tnl_info_sta_updating_no_tnl_lock(tnl_info);
+
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+
+ if (tnl_status & TNL_STA_INIT)
+ ret = mtk_tops_tnl_sync_param_new(tnl_info, is_decap);
+ else if (tnl_status & TNL_STA_DELETING)
+ ret = mtk_tops_tnl_sync_param_delete(tnl_info);
+ else
+ ret = mtk_tops_tnl_sync_param_update(tnl_info,
+ is_decap,
+ false);
+
+ if (ret)
+ TOPS_ERR("sync tunnel parameter failed: %d\n", ret);
+
+ continue;
+
+next:
+ spin_unlock_irqrestore(&tnl_info->lock, flag);
+ }
+}
+
+static int tnl_sync_task(void *data)
+{
+ while (1) {
+ wait_event_interruptible(tops_tnl.tnl_sync_wait,
+ (tops_tnl.has_tnl_to_sync && mtk_tops_mcu_alive())
+ || kthread_should_stop());
+
+ if (kthread_should_stop())
+ break;
+
+ mtk_tops_tnl_sync_get_pending_queue();
+
+ mtk_tops_tnl_sync_queue_proc();
+ }
+
+ return 0;
+}
+
+static void mtk_tops_tnl_info_flush_ppe(struct tops_tnl_info *tnl_info)
+{
+ struct foe_entry *entry;
+ u32 max_entry;
+ u32 ppe_id;
+ u32 eidx;
+
+ /* tnl info's lock should be held */
+ lockdep_assert_held(&tnl_info->lock);
+
+ /* clear all TOPS related PPE entries */
+ for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
+ max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
+ for (eidx = 0; eidx < max_entry; eidx++) {
+ entry = hnat_get_foe_entry(ppe_id, eidx);
+ if (IS_ERR(entry))
+ break;
+
+ if (!entry_hnat_is_bound(entry))
+ continue;
+
+ tnl_flush_ppe_entry(entry, tnl_info->tnl_idx);
+ }
+ }
+ hnat_cache_ebl(1);
+ /* make sure all data is written to dram PPE table */
+ wmb();
+}
+
+void mtk_tops_tnl_offload_netdev_down(struct net_device *ndev)
+{
+ struct tops_tnl_info *tnl_info;
+ unsigned long flag;
+ u32 bkt;
+
+ spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
+
+ hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
+ spin_lock(&tnl_info->lock);
+
+ if (tnl_info->dev == ndev) {
+ mtk_tops_tnl_info_flush_ppe(tnl_info);
+
+ __mtk_tops_tnl_offload_disable(tnl_info);
+
+ spin_unlock(&tnl_info->lock);
+
+ break;
+ }
+
+ spin_unlock(&tnl_info->lock);
+ }
+
+ spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
+}
+
+void mtk_tops_tnl_offload_flush(void)
+{
+ struct tops_tnl_info *tnl_info;
+ struct foe_entry *entry;
+ unsigned long flag;
+ u32 max_entry;
+ u32 ppe_id;
+ u32 eidx;
+ u32 bkt;
+
+ /* clear all TOPS related PPE entries */
+ for (ppe_id = 0; ppe_id < MAX_PPE_NUM; ppe_id++) {
+ max_entry = mtk_tops_netsys_ppe_get_max_entry_num(ppe_id);
+ for (eidx = 0; eidx < max_entry; eidx++) {
+ entry = hnat_get_foe_entry(ppe_id, eidx);
+ if (IS_ERR(entry))
+ break;
+
+ if (!entry_hnat_is_bound(entry))
+ continue;
+
+ tnl_flush_ppe_entry(entry, __TOPS_ENTRY_MAX);
+ }
+ }
+ hnat_cache_ebl(1);
+ /* make sure all data is written to dram PPE table */
+ wmb();
+
+ spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
+
+ hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
+ /* clear all tunnel's synced parameters, but preserve cache */
+ memset(&tnl_info->tnl_params, 0, sizeof(struct tops_tnl_params));
+ /*
+ * make tnl_info status to TNL_INIT state
+ * so that it can be added to TOPS again
+ */
+ spin_lock(&tnl_info->lock);
+
+ tnl_info_sta_init_no_tnl_lock(tnl_info);
+ list_del_init(&tnl_info->sync_node);
+
+ spin_unlock(&tnl_info->lock);
+ }
+
+ spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
+}
+
+void mtk_tops_tnl_offload_recover(void)
+{
+ struct tops_tnl_info *tnl_info;
+ unsigned long flag;
+ u32 bkt;
+
+ spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
+
+ hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist)
+ mtk_tops_tnl_info_submit(tnl_info);
+
+ spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
+}
+
+int mtk_tops_tnl_offload_init(struct platform_device *pdev)
+{
+ struct tops_tnl_info *tnl_info;
+ int ret = 0;
+ int i = 0;
+
+ hash_init(tops_tnl.ht);
+
+ tops_tnl.tnl_infos = devm_kzalloc(&pdev->dev,
+ sizeof(struct tops_tnl_info) * CONFIG_TOPS_TNL_NUM,
+ GFP_KERNEL);
+ if (!tops_tnl.tnl_infos)
+ return -ENOMEM;
+
+ for (i = 0; i < CONFIG_TOPS_TNL_NUM; i++) {
+ tnl_info = &tops_tnl.tnl_infos[i];
+ tnl_info->tnl_idx = i;
+ tnl_info->status = TNL_STA_UNINIT;
+ INIT_HLIST_NODE(&tnl_info->hlist);
+ INIT_LIST_HEAD(&tnl_info->sync_node);
+ spin_lock_init(&tnl_info->lock);
+ }
+
+ ret = register_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
+ if (ret) {
+ TOPS_ERR("tnl offload recv dev register failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ init_completion(&tops_tnl.dma_done);
+ init_waitqueue_head(&tops_tnl.tnl_sync_wait);
+
+ tops_tnl.tnl_sync_thread = kthread_run(tnl_sync_task, NULL,
+ "tnl sync param task");
+ if (IS_ERR(tops_tnl.tnl_sync_thread)) {
+ TOPS_ERR("tnl sync thread create failed\n");
+ ret = -ENOMEM;
+ goto unregister_mbox;
+ }
+
+ mtk_tnl_encap_offload = mtk_tops_tnl_encap_offload;
+ mtk_tnl_decap_offload = mtk_tops_tnl_decap_offload;
+ mtk_tnl_decap_offloadable = mtk_tops_tnl_decap_offloadable;
+ mtk_get_tnl_dev = mtk_tops_get_tnl_dev;
+
+ tops_tnl.tnl_sync_submit = &tnl_sync_q1;
+ tops_tnl.tnl_sync_pending = &tnl_sync_q2;
+ spin_lock_init(&tops_tnl.tnl_sync_lock);
+ spin_lock_init(&tops_tnl.tbl_lock);
+
+ return 0;
+
+unregister_mbox:
+ unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
+
+ return ret;
+}
+
+void mtk_tops_tnl_offload_pce_clean_up(void)
+{
+ struct tops_tnl_info *tnl_info;
+ unsigned long flag;
+ u32 bkt;
+
+ spin_lock_irqsave(&tops_tnl.tbl_lock, flag);
+
+ hash_for_each(tops_tnl.ht, bkt, tnl_info, hlist) {
+ mtk_tops_tnl_info_flush_ppe(tnl_info);
+
+ mtk_tops_tnl_info_dipfilter_tear_down(tnl_info);
+ }
+
+ spin_unlock_irqrestore(&tops_tnl.tbl_lock, flag);
+}
+
+void mtk_tops_tnl_offload_deinit(struct platform_device *pdev)
+{
+ mtk_tnl_encap_offload = NULL;
+ mtk_tnl_decap_offload = NULL;
+ mtk_tnl_decap_offloadable = NULL;
+ mtk_get_tnl_dev = NULL;
+
+ kthread_stop(tops_tnl.tnl_sync_thread);
+
+ mtk_tops_tnl_offload_pce_clean_up();
+
+ unregister_mbox_dev(MBOX_RECV, &tnl_offload_mbox_recv);
+}
+
+int mtk_tops_tnl_offload_proto_setup(struct platform_device *pdev)
+{
+ mtk_tops_gretap_init();
+
+ mtk_tops_udp_l2tp_data_init();
+
+ return 0;
+}
+
+void mtk_tops_tnl_offload_proto_teardown(struct platform_device *pdev)
+{
+ mtk_tops_gretap_deinit();
+
+ mtk_tops_udp_l2tp_data_deinit();
+}
+
+struct tops_tnl_type *mtk_tops_tnl_type_get_by_name(const char *name)
+{
+ enum tops_entry_type tops_entry = TOPS_ENTRY_NONE + 1;
+ struct tops_tnl_type *tnl_type;
+
+ if (unlikely(!name))
+ return ERR_PTR(-EPERM);
+
+ for (; tops_entry < __TOPS_ENTRY_MAX; tops_entry++) {
+ tnl_type = tops_tnl.offload_tnl_types[tops_entry];
+ if (tnl_type && !strcmp(name, tnl_type->type_name))
+ break;
+ }
+
+ return tnl_type;
+}
+
+int mtk_tops_tnl_type_register(struct tops_tnl_type *tnl_type)
+{
+ enum tops_entry_type tops_entry = tnl_type->tops_entry;
+
+ if (unlikely(tops_entry == TOPS_ENTRY_NONE
+ || tops_entry >= __TOPS_ENTRY_MAX)) {
+ TOPS_ERR("invalid tops_entry: %u\n", tops_entry);
+ return -EINVAL;
+ }
+
+ if (unlikely(!tnl_type))
+ return -EINVAL;
+
+ if (tops_tnl.offload_tnl_types[tops_entry]) {
+ TOPS_ERR("offload tnl type is already registered: %u\n", tops_entry);
+ return -EBUSY;
+ }
+
+ tops_tnl.offload_tnl_types[tops_entry] = tnl_type;
+ tops_tnl.offload_tnl_type_num++;
+
+ return 0;
+}
+
+void mtk_tops_tnl_type_unregister(struct tops_tnl_type *tnl_type)
+{
+ enum tops_entry_type tops_entry = tnl_type->tops_entry;
+
+ if (unlikely(tops_entry == TOPS_ENTRY_NONE
+ || tops_entry >= __TOPS_ENTRY_MAX)) {
+ TOPS_ERR("invalid tops_entry: %u\n", tops_entry);
+ return;
+ }
+
+ if (unlikely(!tnl_type))
+ return;
+
+ if (tops_tnl.offload_tnl_types[tops_entry] != tnl_type) {
+ TOPS_ERR("offload tnl type is registered by others\n");
+ return;
+ }
+
+ tops_tnl.offload_tnl_types[tops_entry] = NULL;
+ tops_tnl.offload_tnl_type_num--;
+}
diff --git a/package-21.02/kernel/tops/src/trm-fs.c b/package-21.02/kernel/tops/src/trm-fs.c
new file mode 100644
index 0000000..f2bd9bf
--- /dev/null
+++ b/package-21.02/kernel/tops/src/trm-fs.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <alvin.kuog@mediatek.com>
+ * Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/relay.h>
+
+#include "trm-fs.h"
+#include "trm-mcu.h"
+#include "trm.h"
+
+#define RLY_RETRY_NUM 3
+
+static struct dentry *debugfs_dir;
+static struct rchan *relay;
+static bool trm_fs_is_init;
+
+bool mtk_trm_fs_is_init(void)
+{
+ return trm_fs_is_init;
+}
+
+void *mtk_trm_fs_relay_reserve(u32 size)
+{
+ u32 rty = 0;
+ void *dst;
+
+ while (rty < RLY_RETRY_NUM) {
+ dst = relay_reserve(relay, size);
+ if (likely(dst))
+ return dst;
+
+ if (rty++ < 3)
+ msleep(100);
+ else
+ break;
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+void mtk_trm_fs_relay_flush(void)
+{
+ relay_flush(relay);
+}
+
+static struct dentry *trm_fs_create_buf_file_cb(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *debugfs_file;
+
+ debugfs_file = debugfs_create_file("dump_data", mode,
+ parent, buf,
+ &relay_file_operations);
+
+ *is_global = 1;
+
+ return debugfs_file;
+}
+
+static int trm_fs_remove_buf_file_cb(struct dentry *debugfs_file)
+{
+ debugfs_remove(debugfs_file);
+
+ return 0;
+}
+
+int mtk_trm_fs_init(void)
+{
+ static struct rchan_callbacks relay_cb = {
+ .create_buf_file = trm_fs_create_buf_file_cb,
+ .remove_buf_file = trm_fs_remove_buf_file_cb,
+ };
+ int ret = 0;
+
+ if (!debugfs_dir) {
+ debugfs_dir = debugfs_create_dir("tops", NULL);
+ if (IS_ERR(debugfs_dir)) {
+ ret = PTR_ERR(debugfs_dir);
+ goto out;
+ }
+ }
+
+ if (!relay) {
+ relay = relay_open("dump_data", debugfs_dir,
+ RLY_DUMP_SUBBUF_SZ,
+ RLY_DUMP_SUBBUF_NUM,
+ &relay_cb, NULL);
+ if (!relay) {
+ ret = -EINVAL;
+ goto err_debugfs_remove;
+ }
+ }
+
+ relay_reset(relay);
+
+ trm_fs_is_init = true;
+
+out:
+ return ret;
+
+err_debugfs_remove:
+ trm_fs_is_init = false;
+
+ debugfs_remove(debugfs_dir);
+
+ debugfs_dir = NULL;
+
+ return ret;
+}
+
+void mtk_trm_fs_deinit(void)
+{
+ trm_fs_is_init = false;
+
+ relay_close(relay);
+
+ debugfs_remove(debugfs_dir);
+}
diff --git a/package-21.02/kernel/tops/src/trm-mcu.c b/package-21.02/kernel/tops/src/trm-mcu.c
new file mode 100644
index 0000000..4550122
--- /dev/null
+++ b/package-21.02/kernel/tops/src/trm-mcu.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <alvin.kuog@mediatek.com>
+ * Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include "internal.h"
+#include "mcu.h"
+#include "trm-fs.h"
+#include "trm-mcu.h"
+#include "trm.h"
+
+#define TOPS_OCD_RETRY_TIMES (3)
+
+#define TOPS_OCD_DCRSET (0x200C)
+#define ENABLE_OCD (1 << 0)
+#define DEBUG_INT (1 << 1)
+
+#define TOPS_OCD_DSR (0x2010)
+#define EXEC_DONE (1 << 0)
+#define EXEC_EXCE (1 << 1)
+#define EXEC_BUSY (1 << 2)
+#define STOPPED (1 << 4)
+#define DEBUG_PEND_HOST (1 << 17)
+
+#define TOPS_OCD_DDR (0x2014)
+
+#define TOPS_OCD_DIR0EXEC (0x201C)
+
+struct tops_ocd_dev {
+ void __iomem *base;
+ u32 base_offset;
+ struct clk *debugsys_clk;
+};
+
+static struct tops_ocd_dev ocd;
+
+struct core_dump_fram cd_frams[CORE_TOPS_NUM];
+
+static inline void ocd_write(struct tops_ocd_dev *ocd, u32 reg, u32 val)
+{
+ writel(val, ocd->base + ocd->base_offset + reg);
+}
+
+static inline u32 ocd_read(struct tops_ocd_dev *ocd, u32 reg)
+{
+ return readl(ocd->base + ocd->base_offset + reg);
+}
+
+static inline void ocd_set(struct tops_ocd_dev *ocd, u32 reg, u32 mask)
+{
+ setbits(ocd->base + ocd->base_offset + reg, mask);
+}
+
+static inline void ocd_clr(struct tops_ocd_dev *ocd, u32 reg, u32 mask)
+{
+ clrbits(ocd->base + ocd->base_offset + reg, mask);
+}
+
+static int core_exec_instr(u32 instr)
+{
+ u32 rty = 0;
+ int ret;
+
+ ocd_set(&ocd, TOPS_OCD_DSR, EXEC_DONE);
+ ocd_set(&ocd, TOPS_OCD_DSR, EXEC_EXCE);
+
+ ocd_write(&ocd, TOPS_OCD_DIR0EXEC, instr);
+
+ while ((ocd_read(&ocd, TOPS_OCD_DSR) & EXEC_BUSY)) {
+ if (rty++ < TOPS_OCD_RETRY_TIMES) {
+ usleep_range(1000, 1500);
+ } else {
+ TRM_ERR("run instruction(0x%x) timeout\n", instr);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ ret = ocd_read(&ocd, TOPS_OCD_DSR) & EXEC_EXCE ? -1 : 0;
+ if (ret)
+ TRM_ERR("run instruction(0x%x) fail\n", instr);
+
+out:
+ return ret;
+}
+
+static int core_dump(struct core_dump_fram *cd_fram)
+{
+ cd_fram->magic = CORE_DUMP_FRAM_MAGIC;
+ cd_fram->num_areg = XCHAL_NUM_AREG;
+
+ /*
+ * save
+ * PC, PS, WINDOWSTART, WINDOWBASE,
+ * EPC1, EXCCAUSE, EXCVADDR, EXCSAVE1
+ */
+ core_exec_instr(0x13f500);
+
+ core_exec_instr(0x03b500);
+ core_exec_instr(0x136800);
+ cd_fram->pc = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x03e600);
+ core_exec_instr(0x136800);
+ cd_fram->ps = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x034900);
+ core_exec_instr(0x136800);
+ cd_fram->windowstart = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x034800);
+ core_exec_instr(0x136800);
+ cd_fram->windowbase = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x03b100);
+ core_exec_instr(0x136800);
+ cd_fram->epc1 = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x03e800);
+ core_exec_instr(0x136800);
+ cd_fram->exccause = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x03ee00);
+ core_exec_instr(0x136800);
+ cd_fram->excvaddr = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x03d100);
+ core_exec_instr(0x136800);
+ cd_fram->excsave1 = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x03f500);
+
+ /*
+ * save
+ * a0, a1, a2, a3, a4, a5, a6, a7
+ */
+ core_exec_instr(0x136800);
+ cd_fram->areg[0] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x136810);
+ cd_fram->areg[1] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x136820);
+ cd_fram->areg[2] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x136830);
+ cd_fram->areg[3] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x136840);
+ cd_fram->areg[4] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x136850);
+ cd_fram->areg[5] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x136860);
+ cd_fram->areg[6] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x136870);
+ cd_fram->areg[7] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ /*
+ * save
+ * a8, a9, a10, a11, a12, a13, a14, a15
+ */
+ core_exec_instr(0x136880);
+ cd_fram->areg[8] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x136890);
+ cd_fram->areg[9] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368a0);
+ cd_fram->areg[10] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368b0);
+ cd_fram->areg[11] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368c0);
+ cd_fram->areg[12] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368d0);
+ cd_fram->areg[13] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368e0);
+ cd_fram->areg[14] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368f0);
+ cd_fram->areg[15] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x408020);
+
+ /*
+ * save
+ * a16, a17, a18, a19, a20, a21, a22, a23
+ */
+ core_exec_instr(0x136880);
+ cd_fram->areg[16] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x136890);
+ cd_fram->areg[17] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368a0);
+ cd_fram->areg[18] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368b0);
+ cd_fram->areg[19] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368c0);
+ cd_fram->areg[20] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368d0);
+ cd_fram->areg[21] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368e0);
+ cd_fram->areg[22] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368f0);
+ cd_fram->areg[23] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x408020);
+
+ /*
+ * save
+ * a24, a25, a26, a27, a28, a29, a30, a31
+ */
+ core_exec_instr(0x136880);
+ cd_fram->areg[24] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x136890);
+ cd_fram->areg[25] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368a0);
+ cd_fram->areg[26] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368b0);
+ cd_fram->areg[27] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368c0);
+ cd_fram->areg[28] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368d0);
+ cd_fram->areg[29] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368e0);
+ cd_fram->areg[30] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x1368f0);
+ cd_fram->areg[31] = ocd_read(&ocd, TOPS_OCD_DDR);
+
+ core_exec_instr(0x408040);
+
+ core_exec_instr(0xf1e000);
+
+ return 0;
+}
+
+static int __mtk_trm_mcu_core_dump(enum core_id core)
+{
+ u32 rty = 0;
+ int ret;
+
+ ocd.base_offset = (core == CORE_MGMT) ? (0x0) : (0x5000 + (core * 0x4000));
+
+ /* enable OCD */
+ ocd_set(&ocd, TOPS_OCD_DCRSET, ENABLE_OCD);
+
+ /* assert debug interrupt to core */
+ ocd_set(&ocd, TOPS_OCD_DCRSET, DEBUG_INT);
+
+ /* wait core into stopped state */
+ while (!(ocd_read(&ocd, TOPS_OCD_DSR) & STOPPED)) {
+ if (rty++ < TOPS_OCD_RETRY_TIMES) {
+ usleep_range(10000, 15000);
+ } else {
+ TRM_ERR("wait core(%d) into stopped state timeout\n", core);
+ ret = -1;
+ goto out;
+ }
+ }
+
+ /* deassert debug interrupt to core */
+ ocd_set(&ocd, TOPS_OCD_DSR, DEBUG_PEND_HOST);
+
+ /* dump core's registers and let core into running state */
+ ret = core_dump(&cd_frams[core]);
+
+out:
+ return ret;
+}
+
+int mtk_trm_mcu_core_dump(void)
+{
+ enum core_id core;
+ int ret;
+
+ ret = clk_prepare_enable(ocd.debugsys_clk);
+ if (ret) {
+ TRM_ERR("debugsys clk enable failed: %d\n", ret);
+ goto out;
+ }
+
+ memset(cd_frams, 0, sizeof(cd_frams));
+
+ for (core = CORE_OFFLOAD_0; core <= CORE_MGMT; core++) {
+ ret = __mtk_trm_mcu_core_dump(core);
+ if (ret)
+ break;
+ }
+
+ clk_disable_unprepare(ocd.debugsys_clk);
+
+out:
+ return ret;
+}
+
+static int mtk_tops_ocd_probe(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ int ret;
+
+ trm_dev = &pdev->dev;
+
+ ocd.debugsys_clk = devm_clk_get(trm_dev, "debugsys");
+ if (IS_ERR(ocd.debugsys_clk)) {
+ TRM_ERR("get debugsys clk failed: %ld\n", PTR_ERR(ocd.debugsys_clk));
+ return PTR_ERR(ocd.debugsys_clk);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-ocd-base");
+ if (!res)
+ return -ENXIO;
+
+ ocd.base = devm_ioremap(trm_dev, res->start, resource_size(res));
+ if (!ocd.base)
+ return -ENOMEM;
+
+ ret = mtk_trm_fs_init();
+ if (ret)
+ return ret;
+
+ TRM_INFO("tops-ocd init done\n");
+
+ return 0;
+}
+
+static int mtk_tops_ocd_remove(struct platform_device *pdev)
+{
+ mtk_trm_fs_deinit();
+
+ return 0;
+}
+
+static struct of_device_id mtk_tops_ocd_match[] = {
+ { .compatible = "mediatek,tops-ocd", },
+ { },
+};
+
+static struct platform_driver mtk_tops_ocd_driver = {
+ .probe = mtk_tops_ocd_probe,
+ .remove = mtk_tops_ocd_remove,
+ .driver = {
+ .name = "mediatek,tops-ocd",
+ .owner = THIS_MODULE,
+ .of_match_table = mtk_tops_ocd_match,
+ },
+};
+
+int __init mtk_tops_trm_mcu_init(void)
+{
+ return platform_driver_register(&mtk_tops_ocd_driver);
+}
+
+void __exit mtk_tops_trm_mcu_exit(void)
+{
+ platform_driver_unregister(&mtk_tops_ocd_driver);
+}
diff --git a/package-21.02/kernel/tops/src/trm.c b/package-21.02/kernel/tops/src/trm.c
new file mode 100644
index 0000000..11991cf
--- /dev/null
+++ b/package-21.02/kernel/tops/src/trm.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <alvin.kuog@mediatek.com>
+ * Ren-Ting Wang <ren-ting.wang@mediatek.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/relay.h>
+#include <linux/types.h>
+
+#include "mbox.h"
+#include "mcu.h"
+#include "netsys.h"
+#include "trm-fs.h"
+#include "trm-mcu.h"
+#include "trm.h"
+
+#define TRM_HDR_LEN (sizeof(struct trm_header))
+
+#define RLY_DUMP_SUBBUF_DATA_MAX (RLY_DUMP_SUBBUF_SZ - TRM_HDR_LEN)
+
+struct trm_info {
+ char name[TRM_CONFIG_NAME_MAX_LEN];
+ u64 dump_time;
+ u32 start_addr;
+ u32 size;
+ u32 rsn; /* TRM_RSN_* */
+};
+
+struct trm_header {
+ struct trm_info info;
+ u32 data_offset;
+ u32 data_len;
+ u8 last_frag;
+};
+
+struct device *trm_dev;
+
+static struct trm_hw_config *trm_hw_configs[__TRM_HARDWARE_MAX];
+struct mutex trm_lock;
+
+static inline void trm_hdr_init(struct trm_header *trm_hdr,
+ struct trm_config *trm_cfg,
+ u32 size,
+ u64 dump_time,
+ u32 dump_rsn)
+{
+ if (unlikely(!trm_hdr || !trm_cfg))
+ return;
+
+ memset(trm_hdr, 0, TRM_HDR_LEN);
+
+ strncpy(trm_hdr->info.name, trm_cfg->name, TRM_CONFIG_NAME_MAX_LEN);
+ trm_hdr->info.start_addr = trm_cfg->addr + trm_cfg->offset;
+ trm_hdr->info.size = size;
+ trm_hdr->info.dump_time = dump_time;
+ trm_hdr->info.rsn = dump_rsn;
+}
+
+static inline int trm_cfg_sanity_check(struct trm_config *trm_cfg)
+{
+ u32 start = trm_cfg->addr + trm_cfg->offset;
+ u32 end = start + trm_cfg->size;
+
+ if (start < trm_cfg->addr || end > trm_cfg->addr + trm_cfg->len)
+ return -1;
+
+ return 0;
+}
+
+static inline bool trm_cfg_is_core_dump_en(struct trm_config *trm_cfg)
+{
+ return trm_cfg->flag & TRM_CONFIG_F_CORE_DUMP;
+}
+
+static inline bool trm_cfg_is_en(struct trm_config *trm_cfg)
+{
+ return trm_cfg->flag & TRM_CONFIG_F_ENABLE;
+}
+
+static inline int __mtk_trm_cfg_setup(struct trm_config *trm_cfg,
+ u32 offset, u32 size, u8 enable)
+{
+ struct trm_config tmp = { 0 };
+
+ if (!enable) {
+ trm_cfg->flag &= ~TRM_CONFIG_F_ENABLE;
+ } else {
+ tmp.addr = trm_cfg->addr;
+ tmp.len = trm_cfg->len;
+ tmp.offset = offset;
+ tmp.size = size;
+
+ if (trm_cfg_sanity_check(&tmp))
+ return -EINVAL;
+
+ trm_cfg->offset = offset;
+ trm_cfg->size = size;
+ trm_cfg->flag |= TRM_CONFIG_F_ENABLE;
+ }
+
+ return 0;
+}
+
+int mtk_trm_cfg_setup(char *name, u32 offset, u32 size, u8 enable)
+{
+ struct trm_hw_config *trm_hw_cfg;
+ struct trm_config *trm_cfg;
+ int ret = 0;
+ u32 i, j;
+
+ for (i = 0; i < __TRM_HARDWARE_MAX; i++) {
+ trm_hw_cfg = trm_hw_configs[i];
+ if (unlikely(!trm_hw_cfg))
+ continue;
+
+ for (j = 0; j < trm_hw_cfg->cfg_len; j++) {
+ trm_cfg = &trm_hw_cfg->trm_cfgs[j];
+ if (unlikely(!trm_cfg))
+ continue;
+
+ if (!strncmp(trm_cfg->name, name, strlen(name))) {
+ mutex_lock(&trm_lock);
+
+ ret = __mtk_trm_cfg_setup(trm_cfg,
+ offset,
+ size,
+ enable);
+
+ mutex_unlock(&trm_lock);
+ }
+ }
+ }
+
+ return ret;
+}
+
+/* append core dump(via ocd) in bottom of core-x-dtcm file */
+static inline void __mtk_trm_save_core_dump(struct trm_config *trm_cfg,
+ void *dst,
+ u32 *frag_len)
+{
+ *frag_len -= CORE_DUMP_FRAME_LEN;
+ memcpy(dst + *frag_len, &cd_frams[trm_cfg->core], CORE_DUMP_FRAME_LEN);
+}
+
+static int __mtk_trm_dump(struct trm_hw_config *trm_hw_cfg,
+ struct trm_config *trm_cfg,
+ u64 dump_time,
+ u32 dump_rsn)
+{
+ struct trm_header trm_hdr;
+ u32 total = trm_cfg->size;
+ u32 i = 0;
+ u32 frag_len;
+ u32 ofs;
+ void *dst;
+
+ /* reserve core dump frame len if core dump enabled */
+ if (trm_cfg_is_core_dump_en(trm_cfg))
+ total += CORE_DUMP_FRAME_LEN;
+
+ /* fill in trm inforamtion */
+ trm_hdr_init(&trm_hdr, trm_cfg, total, dump_time, dump_rsn);
+
+ while (total > 0) {
+ if (total >= RLY_DUMP_SUBBUF_DATA_MAX) {
+ frag_len = RLY_DUMP_SUBBUF_DATA_MAX;
+ total -= RLY_DUMP_SUBBUF_DATA_MAX;
+ } else {
+ frag_len = total;
+ total -= total;
+ trm_hdr.last_frag = true;
+ }
+
+ trm_hdr.data_offset = i++ * RLY_DUMP_SUBBUF_DATA_MAX;
+ trm_hdr.data_len = frag_len;
+
+ dst = mtk_trm_fs_relay_reserve(frag_len + TRM_HDR_LEN);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ memcpy(dst, &trm_hdr, TRM_HDR_LEN);
+ dst += TRM_HDR_LEN;
+
+ /* TODO: what if core dump is being cut between 2 fragment? */
+ if (trm_hdr.last_frag && trm_cfg_is_core_dump_en(trm_cfg))
+ __mtk_trm_save_core_dump(trm_cfg, dst, &frag_len);
+
+ ofs = trm_hdr.info.start_addr + trm_hdr.data_offset;
+
+ /* let TRM HW write memory to destination */
+ trm_hw_cfg->trm_hw_dump(dst, ofs, frag_len);
+
+ mtk_trm_fs_relay_flush();
+ }
+
+ return 0;
+}
+
+int mtk_trm_dump(u32 rsn)
+{
+ u64 time = ktime_to_ns(ktime_get_real()) / 1000000000;
+ struct trm_hw_config *trm_hw_cfg;
+ struct trm_config *trm_cfg;
+ int ret = 0;
+ u32 i, j;
+
+ if (!mtk_trm_fs_is_init())
+ return -EINVAL;
+
+ mutex_lock(&trm_lock);
+
+ mtk_trm_mcu_core_dump();
+
+ for (i = 0; i < __TRM_HARDWARE_MAX; i++) {
+ trm_hw_cfg = trm_hw_configs[i];
+ if (unlikely(!trm_hw_cfg || !trm_hw_cfg->trm_hw_dump))
+ continue;
+
+ for (j = 0; j < trm_hw_cfg->cfg_len; j++) {
+ trm_cfg = &trm_hw_cfg->trm_cfgs[j];
+ if (unlikely(!trm_cfg || !trm_cfg_is_en(trm_cfg)))
+ continue;
+
+ if (unlikely(trm_cfg_sanity_check(trm_cfg))) {
+ TRM_ERR("trm %s: sanity check fail\n", trm_cfg->name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = __mtk_trm_dump(trm_hw_cfg, trm_cfg, time, rsn);
+ if (ret) {
+ TRM_ERR("trm %s: trm dump fail: %d\n",
+ trm_cfg->name, ret);
+ goto out;
+ }
+ }
+ }
+
+ TRM_NOTICE("TOPS runtime monitor dump\n");
+
+out:
+ mutex_unlock(&trm_lock);
+
+ return ret;
+}
+
+int __init mtk_tops_trm_init(void)
+{
+ mutex_init(&trm_lock);
+
+ return mtk_tops_trm_mcu_init();
+}
+
+void __exit mtk_tops_trm_exit(void)
+{
+ mtk_tops_trm_mcu_exit();
+}
+
+int mtk_trm_hw_config_register(enum trm_hardware trm_hw,
+ struct trm_hw_config *trm_hw_cfg)
+{
+ if (unlikely(trm_hw >= __TRM_HARDWARE_MAX || !trm_hw_cfg))
+ return -ENODEV;
+
+ if (unlikely(!trm_hw_cfg->cfg_len || !trm_hw_cfg->trm_hw_dump))
+ return -EINVAL;
+
+ if (trm_hw_configs[trm_hw])
+ return -EBUSY;
+
+ trm_hw_configs[trm_hw] = trm_hw_cfg;
+
+ return 0;
+}
+
+void mtk_trm_hw_config_unregister(enum trm_hardware trm_hw,
+ struct trm_hw_config *trm_hw_cfg)
+{
+ if (unlikely(trm_hw >= __TRM_HARDWARE_MAX || !trm_hw_cfg))
+ return;
+
+ if (trm_hw_configs[trm_hw] != trm_hw_cfg)
+ return;
+
+ trm_hw_configs[trm_hw] = NULL;
+}
diff --git a/package-21.02/kernel/tops/src/wdt.c b/package-21.02/kernel/tops/src/wdt.c
new file mode 100644
index 0000000..5d450c5
--- /dev/null
+++ b/package-21.02/kernel/tops/src/wdt.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 MediaTek Inc. All Rights Reserved.
+ *
+ * Author: Alvin Kuo <alvin.kuog@mediatek.com>,
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+
+#include "internal.h"
+#include "mbox.h"
+#include "ser.h"
+#include "trm.h"
+#include "wdt.h"
+
+#define WDT_IRQ_STATUS 0x0140B0
+#define TOP_WDT_MODE 0x012000
+#define CLUST_WDT_MODE(x) (0x512000 + 0x100 * (x))
+
+#define WDT_MODE_KEY 0x22000000
+
+struct watchdog_hw {
+ void __iomem *base;
+ struct mailbox_dev mgmt_mdev;
+ struct mailbox_dev offload_mdev[CORE_OFFLOAD_NUM];
+};
+
+static struct watchdog_hw wdt = {
+ .mgmt_mdev = MBOX_SEND_MGMT_DEV(WDT),
+ .offload_mdev = {
+ [CORE_OFFLOAD_0] = MBOX_SEND_OFFLOAD_DEV(0, WDT),
+ [CORE_OFFLOAD_1] = MBOX_SEND_OFFLOAD_DEV(1, WDT),
+ [CORE_OFFLOAD_2] = MBOX_SEND_OFFLOAD_DEV(2, WDT),
+ [CORE_OFFLOAD_3] = MBOX_SEND_OFFLOAD_DEV(3, WDT),
+ },
+};
+
+static inline void wdt_write(u32 reg, u32 val)
+{
+ writel(val, wdt.base + reg);
+}
+
+static inline void wdt_set(u32 reg, u32 mask)
+{
+ setbits(wdt.base + reg, mask);
+}
+
+static inline void wdt_clr(u32 reg, u32 mask)
+{
+ clrbits(wdt.base + reg, mask);
+}
+
+static inline void wdt_rmw(u32 reg, u32 mask, u32 val)
+{
+ clrsetbits(wdt.base + reg, mask, val);
+}
+
+static inline u32 wdt_read(u32 reg)
+{
+ return readl(wdt.base + reg);
+}
+
+static inline void wdt_irq_clr(u32 wdt_mode_reg)
+{
+ wdt_set(wdt_mode_reg, WDT_MODE_KEY);
+}
+
+static void wdt_ser_callback(struct tops_ser_params *ser_params)
+{
+ if (ser_params->type != TOPS_SER_WDT_TO)
+ WARN_ON(1);
+
+ mtk_trm_dump(ser_params->data.wdt.timeout_cores);
+}
+
+static void wdt_ser_mcmd_setup(struct tops_ser_params *ser_params,
+ struct mcu_ctrl_cmd *mcmd)
+{
+ mcmd->core_mask = (~ser_params->data.wdt.timeout_cores) & CORE_TOPS_MASK;
+}
+
+static irqreturn_t wdt_irq_handler(int irq, void *dev_id)
+{
+ struct tops_ser_params ser_params = {
+ .type = TOPS_SER_WDT_TO,
+ .ser_callback = wdt_ser_callback,
+ .ser_mcmd_setup = wdt_ser_mcmd_setup,
+ };
+ u32 status;
+ u32 i;
+
+ status = wdt_read(WDT_IRQ_STATUS);
+ if (status) {
+ ser_params.data.wdt.timeout_cores = status;
+ mtk_tops_ser(&ser_params);
+
+ /* clear wdt irq */
+ if (status & BIT(CORE_MGMT))
+ wdt_irq_clr(TOP_WDT_MODE);
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++)
+ if (status & BIT(i))
+ wdt_irq_clr(CLUST_WDT_MODE(i));
+ }
+ TOPS_ERR("WDT Timeout: 0x%x\n", status);
+
+ return IRQ_HANDLED;
+}
+
+int mtk_tops_wdt_trigger_timeout(enum core_id core)
+{
+ struct mailbox_msg msg = {
+ .msg1 = WDT_CMD_TRIGGER_TIMEOUT,
+ };
+
+ if (core == CORE_MGMT)
+ mbox_send_msg_no_wait(&wdt.mgmt_mdev, &msg);
+ else
+ mbox_send_msg_no_wait(&wdt.offload_mdev[core], &msg);
+
+ return 0;
+}
+
+static int mtk_tops_wdt_register_mbox(void)
+{
+ int ret;
+ int i;
+
+ ret = register_mbox_dev(MBOX_SEND, &wdt.mgmt_mdev);
+ if (ret) {
+ TOPS_ERR("register wdt mgmt mbox send failed: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++) {
+ ret = register_mbox_dev(MBOX_SEND, &wdt.offload_mdev[i]);
+ if (ret) {
+ TOPS_ERR("register wdt offload %d mbox send failed: %d\n",
+ i, ret);
+ goto err_unregister_offload_mbox;
+ }
+ }
+
+ return ret;
+
+err_unregister_offload_mbox:
+ for (i -= 1; i >= 0; i--)
+ unregister_mbox_dev(MBOX_SEND, &wdt.offload_mdev[i]);
+
+ unregister_mbox_dev(MBOX_SEND, &wdt.mgmt_mdev);
+
+ return ret;
+}
+
+static void mtk_tops_wdt_unregister_mbox(void)
+{
+ int i;
+
+ unregister_mbox_dev(MBOX_SEND, &wdt.mgmt_mdev);
+
+ for (i = 0; i < CORE_OFFLOAD_NUM; i++)
+ unregister_mbox_dev(MBOX_SEND, &wdt.offload_mdev[i]);
+}
+
+int mtk_tops_wdt_init(struct platform_device *pdev)
+{
+ struct resource *res = NULL;
+ int ret;
+ int irq;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tops-base");
+ if (!res)
+ return -ENXIO;
+
+ wdt.base = devm_ioremap(tops_dev, res->start, resource_size(res));
+ if (!wdt.base)
+ return -ENOMEM;
+
+ irq = platform_get_irq_byname(pdev, "wdt");
+ if (irq < 0) {
+ TOPS_ERR("get wdt irq failed\n");
+ return irq;
+ }
+
+ ret = devm_request_irq(tops_dev, irq,
+ wdt_irq_handler,
+ IRQF_ONESHOT,
+ pdev->name, NULL);
+ if (ret) {
+ TOPS_ERR("request wdt irq failed\n");
+ return ret;
+ }
+
+ ret = mtk_tops_wdt_register_mbox();
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+int mtk_tops_wdt_deinit(struct platform_device *pdev)
+{
+ mtk_tops_wdt_unregister_mbox();
+
+ return 0;
+}
diff --git a/target/linux/mediatek/patches-5.4/999-4100-mtk-tops-tunnel-offload-support.patch b/target/linux/mediatek/patches-5.4/999-4100-mtk-tops-tunnel-offload-support.patch
new file mode 100644
index 0000000..a4a6d95
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-4100-mtk-tops-tunnel-offload-support.patch
@@ -0,0 +1,450 @@
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -245,6 +245,9 @@ static const char * const mtk_clks_sourc
+ "top_netsys_warp_sel",
+ };
+
++struct net_device *(*mtk_get_tnl_dev)(int tnl_idx) = NULL;
++EXPORT_SYMBOL(mtk_get_tnl_dev);
++
+ void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+ {
+ __raw_writel(val, eth->base + reg);
+@@ -2089,6 +2092,7 @@ static int mtk_poll_rx(struct napi_struc
+ u64 addr64 = 0;
+ u8 *data, *new_data;
+ struct mtk_rx_dma_v2 *rxd, trxd;
++ int tnl_idx = 0;
+ int done = 0;
+
+ if (unlikely(!ring))
+@@ -2132,11 +2136,20 @@ static int mtk_poll_rx(struct napi_struc
+ 0 : RX_DMA_GET_SPORT(trxd.rxd4) - 1;
+ }
+
+- if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
+- !eth->netdev[mac]))
+- goto release_desc;
++ tnl_idx = RX_DMA_GET_TOPS_CRSN(trxd.rxd6);
++ if (mtk_get_tnl_dev && tnl_idx) {
++ netdev = mtk_get_tnl_dev(tnl_idx);
++ if (unlikely(IS_ERR(netdev)))
++ netdev = NULL;
++ }
+
+- netdev = eth->netdev[mac];
++ if (!netdev) {
++ if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
++ !eth->netdev[mac]))
++ goto release_desc;
++
++ netdev = eth->netdev[mac];
++ }
+
+ if (unlikely(test_bit(MTK_RESETTING, ð->state)))
+ goto release_desc;
+@@ -2221,6 +2234,8 @@ static int mtk_poll_rx(struct napi_struc
+ skb_hnat_alg(skb) = 0;
+ skb_hnat_filled(skb) = 0;
+ skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
++ skb_hnat_set_tops(skb, 0);
++ skb_hnat_set_is_decap(skb, 0);
+
+ if (skb_hnat_reason(skb) == HIT_BIND_FORCE_TO_CPU) {
+ trace_printk("[%s] reason=0x%x(force to CPU) from WAN to Ext\n",
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
+@@ -43,6 +43,12 @@ void (*ppe_dev_register_hook)(struct net
+ EXPORT_SYMBOL(ppe_dev_register_hook);
+ void (*ppe_dev_unregister_hook)(struct net_device *dev) = NULL;
+ EXPORT_SYMBOL(ppe_dev_unregister_hook);
++int (*mtk_tnl_encap_offload)(struct sk_buff *skb) = NULL;
++EXPORT_SYMBOL(mtk_tnl_encap_offload);
++int (*mtk_tnl_decap_offload)(struct sk_buff *skb) = NULL;
++EXPORT_SYMBOL(mtk_tnl_decap_offload);
++bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb) = NULL;
++EXPORT_SYMBOL(mtk_tnl_decap_offloadable);
+
+ static void hnat_sma_build_entry(struct timer_list *t)
+ {
+@@ -53,6 +59,16 @@ static void hnat_sma_build_entry(struct
+ SMA, SMA_FWD_CPU_BUILD_ENTRY);
+ }
+
++struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index)
++{
++ if (index == 0x7fff || index >= hnat_priv->foe_etry_num
++ || ppe_id >= CFG_PPE_NUM)
++ return ERR_PTR(-EINVAL);
++
++ return &hnat_priv->foe_table_cpu[ppe_id][index];
++}
++EXPORT_SYMBOL(hnat_get_foe_entry);
++
+ void hnat_cache_ebl(int enable)
+ {
+ int i;
+@@ -63,6 +79,7 @@ void hnat_cache_ebl(int enable)
+ cr_set_field(hnat_priv->ppe_base[i] + PPE_CAH_CTRL, CAH_EN, enable);
+ }
+ }
++EXPORT_SYMBOL(hnat_cache_ebl);
+
+ static void hnat_reset_timestamp(struct timer_list *t)
+ {
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.h
+@@ -1085,6 +1085,8 @@ enum FoeIpAct {
+ #define NR_WDMA0_PORT 8
+ #define NR_WDMA1_PORT 9
+ #define NR_GMAC3_PORT 15
++#define NR_TDMA_TPORT 4
++#define NR_TDMA_QDMA_TPORT 5
+ #define LAN_DEV_NAME hnat_priv->lan
+ #define LAN2_DEV_NAME hnat_priv->lan2
+ #define IS_WAN(dev) \
+@@ -1208,6 +1210,8 @@ static inline bool hnat_dsa_is_enable(st
+ }
+ #endif
+
++struct foe_entry *hnat_get_foe_entry(u32 ppe_id, u32 index);
++
+ void hnat_deinit_debugfs(struct mtk_hnat *h);
+ int hnat_init_debugfs(struct mtk_hnat *h);
+ int hnat_register_nf_hooks(void);
+@@ -1224,6 +1228,9 @@ extern int qos_ul_toggle;
+ extern int hook_toggle;
+ extern int mape_toggle;
+ extern int qos_toggle;
++extern int (*mtk_tnl_encap_offload)(struct sk_buff *skb);
++extern int (*mtk_tnl_decap_offload)(struct sk_buff *skb);
++extern bool (*mtk_tnl_decap_offloadable)(struct sk_buff *skb);
+
+ int ext_if_add(struct extdev_entry *ext_entry);
+ int ext_if_del(struct extdev_entry *ext_entry);
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
+@@ -726,10 +726,14 @@ static unsigned int is_ppe_support_type(
+ case ETH_P_IP:
+ iph = ip_hdr(skb);
+
+- /* do not accelerate non tcp/udp traffic */
+- if ((iph->protocol == IPPROTO_TCP) ||
++ if (mtk_tnl_decap_offloadable && mtk_tnl_decap_offloadable(skb)) {
++ /* tunnel protocol is offloadable */
++ skb_hnat_set_is_decap(skb, 1);
++ return 1;
++ } else if ((iph->protocol == IPPROTO_TCP) ||
+ (iph->protocol == IPPROTO_UDP) ||
+ (iph->protocol == IPPROTO_IPV6)) {
++ /* do not accelerate non tcp/udp traffic */
+ return 1;
+ }
+
+@@ -846,6 +850,13 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv,
+
+ hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
++ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
++ && is_magic_tag_valid(skb)
++ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
++ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
++ return NF_ACCEPT;
++ }
++
+ /*
+ * Avoid mistakenly binding of outer IP, ports in SW L2TP decap flow.
+ * In pre-routing, if dev is virtual iface, TOPS module is not loaded,
+@@ -922,6 +933,13 @@ mtk_hnat_br_nf_local_in(void *priv, stru
+
+ hnat_set_head_frags(state, skb, -1, hnat_set_iif);
+
++ if (skb_hnat_tops(skb) && skb_hnat_is_decap(skb)
++ && is_magic_tag_valid(skb)
++ && skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
++ && mtk_tnl_decap_offload && mtk_tnl_decap_offload(skb)) {
++ return NF_ACCEPT;
++ }
++
+ pre_routing_print(skb, state->in, state->out, __func__);
+
+ if (unlikely(debug_level >= 7)) {
+@@ -1074,9 +1092,22 @@ static unsigned int hnat_ipv4_get_nextho
+ return -1;
+ }
+
++ /*
++ * if this packet is a tunnel packet and is about to construct
++ * outer header, we must update its outer mac header pointer
++ * before filling outer mac or it may screw up inner mac
++ */
++ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
++ skb_push(skb, sizeof(struct ethhdr));
++ skb_reset_mac_header(skb);
++ }
++
+ memcpy(eth_hdr(skb)->h_dest, neigh->ha, ETH_ALEN);
+ memcpy(eth_hdr(skb)->h_source, out->dev_addr, ETH_ALEN);
+
++ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
++ skb_pull(skb, sizeof(struct ethhdr));
++
+ rcu_read_unlock_bh();
+
+ return 0;
+@@ -1202,6 +1233,37 @@ static struct ethhdr *get_ipv6_ipip_ethh
+ return eth;
+ }
+
++static inline void hnat_get_filled_unbind_entry(struct sk_buff *skb,
++ struct foe_entry *entry)
++{
++ if (unlikely(!skb || !entry))
++ return;
++
++ memcpy(entry,
++ &hnat_priv->foe_table_cpu[skb_hnat_ppe(skb)][skb_hnat_entry(skb)],
++ sizeof(*entry));
++}
++
++static inline void hnat_fill_offload_engine_entry(struct sk_buff *skb,
++ struct foe_entry *entry)
++{
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb)) {
++ /*
++ * if skb_hnat_tops(skb) is setup for encapsulation,
++ * we fill in hnat tport and tops_entry for tunnel encapsulation
++ * offloading
++ */
++ entry->ipv4_hnapt.tport_id = NR_TDMA_QDMA_TPORT;
++ entry->ipv4_hnapt.tops_entry = skb_hnat_tops(skb);
++ } else {
++ return;
++ }
++
++ entry->ipv4_hnapt.iblk2.qid = 12; /* offload engine use QID 12 */
++#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
++}
++
+ static unsigned int skb_to_hnat_info(struct sk_buff *skb,
+ const struct net_device *dev,
+ struct foe_entry *foe,
+@@ -1237,6 +1299,11 @@ static unsigned int skb_to_hnat_info(str
+ if (whnat && is_hnat_pre_filled(foe))
+ return 0;
+
++ if (skb_hnat_tops(skb) && !(hw_path->flags & FLOW_OFFLOAD_PATH_TNL)) {
++ hnat_get_filled_unbind_entry(skb, &entry);
++ goto hnat_entry_bind;
++ }
++
+ entry.bfib1.pkt_type = foe->udib1.pkt_type; /* Get packte type state*/
+ entry.bfib1.state = foe->udib1.state;
+
+@@ -1247,6 +1314,7 @@ static unsigned int skb_to_hnat_info(str
+ switch (ntohs(eth->h_proto)) {
+ case ETH_P_IP:
+ iph = ip_hdr(skb);
++
+ switch (iph->protocol) {
+ case IPPROTO_UDP:
+ udp = 1;
+@@ -1628,6 +1696,10 @@ static unsigned int skb_to_hnat_info(str
+ /* Fill Layer2 Info.*/
+ entry = ppe_fill_L2_info(eth, entry, hw_path);
+
++ if (skb_hnat_tops(skb) && hw_path->flags & FLOW_OFFLOAD_PATH_TNL)
++ goto hnat_entry_skip_bind;
++
++hnat_entry_bind:
+ /* Fill Info Blk*/
+ entry = ppe_fill_info_blk(eth, entry, hw_path);
+
+@@ -1806,7 +1878,20 @@ static unsigned int skb_to_hnat_info(str
+ entry.ipv6_5t_route.act_dp |= UDF_HNAT_PRE_FILLED;
+ }
+
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++ hnat_fill_offload_engine_entry(skb, &entry);
++#endif
++
++hnat_entry_skip_bind:
+ wmb();
++
++ /*
++ * final check before we write BIND info.
++ * If this entry is already bound, we should not modify it right now
++ */
++ if (entry_hnat_is_bound(foe))
++ return 0;
++
+ memcpy(foe, &entry, sizeof(entry));
+ /*reset statistic for this entry*/
+ if (hnat_priv->data->per_flow_accounting &&
+@@ -1859,6 +1944,7 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
+ return NF_ACCEPT;
+
+ eth = eth_hdr(skb);
++
+ memcpy(&bfib1_tx, &entry->bfib1, sizeof(entry->bfib1));
+
+ /*not bind multicast if PPE mcast not enable*/
+@@ -1878,6 +1964,12 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
+ switch ((int)bfib1_tx.pkt_type) {
+ case IPV4_HNAPT:
+ case IPV4_HNAT:
++ /*
++ * skip if packet is an encap tnl packet or it may
++ * screw up inner mac header
++ */
++ if (skb_hnat_tops(skb) && skb_hnat_is_encap(skb))
++ break;
+ entry->ipv4_hnapt.smac_hi = swab32(*((u32 *)eth->h_source));
+ entry->ipv4_hnapt.smac_lo = swab16(*((u16 *)ð->h_source[4]));
+ break;
+@@ -2037,6 +2129,10 @@ int mtk_sw_nat_hook_tx(struct sk_buff *s
+ entry->ipv6_5t_route.iblk2.dp = gmac_no;
+ }
+
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++ hnat_fill_offload_engine_entry(skb, entry);
++#endif
++
+ bfib1_tx.ttl = 1;
+ bfib1_tx.state = BIND;
+ wmb();
+@@ -2058,6 +2154,7 @@ int mtk_sw_nat_hook_rx(struct sk_buff *s
+ }
+
+ skb_hnat_alg(skb) = 0;
++ skb_hnat_set_tops(skb, 0);
+ skb_hnat_magic_tag(skb) = HNAT_MAGIC_TAG;
+
+ if (skb_hnat_iface(skb) == FOE_MAGIC_WED0)
+@@ -2504,6 +2601,7 @@ static unsigned int mtk_hnat_nf_post_rou
+ struct flow_offload_hw_path hw_path = { .dev = (struct net_device*)out,
+ .virt_dev = (struct net_device*)out };
+ const struct net_device *arp_dev = out;
++ bool is_virt_dev = false;
+
+ if (xlat_toggle && !mtk_464xlat_post_process(skb, out))
+ return 0;
+@@ -2524,10 +2622,18 @@ static unsigned int mtk_hnat_nf_post_rou
+
+ if (out->netdev_ops->ndo_flow_offload_check) {
+ out->netdev_ops->ndo_flow_offload_check(&hw_path);
++
+ out = (IS_GMAC1_MODE) ? hw_path.virt_dev : hw_path.dev;
++ if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL && mtk_tnl_encap_offload)
++ skb_hnat_set_tops(skb, hw_path.tnl_type + 1);
+ }
+
+ if (!IS_LAN_GRP(out) && !IS_WAN(out) && !IS_EXT(out))
++ is_virt_dev = true;
++
++ if (is_virt_dev
++ && !(skb_hnat_tops(skb) && skb_hnat_is_encap(skb)
++ && (hw_path.flags & FLOW_OFFLOAD_PATH_TNL)))
+ return 0;
+
+ trace_printk("[%s] case hit, %x-->%s, reason=%x\n", __func__,
+@@ -2547,9 +2653,18 @@ static unsigned int mtk_hnat_nf_post_rou
+ if (fn && !mtk_hnat_accel_type(skb))
+ break;
+
+- if (fn && fn(skb, arp_dev, &hw_path))
++ if (!is_virt_dev && fn && fn(skb, arp_dev, &hw_path))
+ break;
+
++ /* skb_hnat_tops(skb) is updated in mtk_tnl_offload() */
++ if (skb_hnat_tops(skb)) {
++ if (skb_hnat_is_encap(skb) && !is_virt_dev
++ && mtk_tnl_encap_offload && mtk_tnl_encap_offload(skb))
++ break;
++ if (skb_hnat_is_decap(skb))
++ break;
++ }
++
+ skb_to_hnat_info(skb, out, entry, &hw_path);
+ break;
+ case HIT_BIND_KEEPALIVE_DUP_OLD_HDR:
+@@ -2820,7 +2935,7 @@ mtk_hnat_ipv4_nf_local_out(void *priv, s
+ if (iph->protocol == IPPROTO_IPV6) {
+ entry->udib1.pkt_type = IPV6_6RD;
+ hnat_set_head_frags(state, skb, 0, hnat_set_alg);
+- } else {
++ } else if (!skb_hnat_tops(skb)) {
+ hnat_set_head_frags(state, skb, 1, hnat_set_alg);
+ }
+
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/nf_hnat_mtk.h
+@@ -44,7 +44,9 @@ struct hnat_desc {
+ u32 is_sp : 1;
+ u32 hf : 1;
+ u32 amsdu : 1;
+- u32 resv3 : 19;
++ u32 tops : 6;
++ u32 is_decap : 1;
++ u32 resv3 : 12;
+ u32 magic_tag_protect : 16;
+ } __packed;
+ #elif defined(CONFIG_MEDIATEK_NETSYS_RX_V2)
+@@ -91,6 +93,19 @@ struct hnat_desc {
+ ((((skb_headroom(skb) >= FOE_INFO_LEN) ? 1 : 0)))
+
+ #define skb_hnat_info(skb) ((struct hnat_desc *)(skb->head))
++#if defined(CONFIG_MEDIATEK_NETSYS_V3)
++#define skb_hnat_tops(skb) (((struct hnat_desc *)((skb)->head))->tops)
++#define skb_hnat_is_decap(skb) (((struct hnat_desc *)((skb)->head))->is_decap)
++#define skb_hnat_is_encap(skb) (!skb_hnat_is_decap(skb))
++#define skb_hnat_set_tops(skb, tops) ((skb_hnat_tops(skb)) = (tops))
++#define skb_hnat_set_is_decap(skb, is_decap) ((skb_hnat_is_decap(skb)) = (is_decap))
++#else /* !defined(CONFIG_MEDIATEK_NETSYS_V3) */
++#define skb_hnat_tops(skb) (0)
++#define skb_hnat_is_decap(skb) (0)
++#define skb_hnat_is_encap(skb) (0)
++#define skb_hnat_set_tops(skb, tops)
++#define skb_hnat_set_is_decap(skb, is_decap)
++#endif /* defined(CONFIG_MEDIATEK_NETSYS_V3) */
+ #define skb_hnat_magic(skb) (((struct hnat_desc *)(skb->head))->magic)
+ #define skb_hnat_reason(skb) (((struct hnat_desc *)(skb->head))->crsn)
+ #define skb_hnat_entry(skb) (((struct hnat_desc *)(skb->head))->entry)
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -98,10 +98,22 @@ struct flow_offload {
+ #define FLOW_OFFLOAD_PATH_6RD BIT(5)
+ #define FLOW_OFFLOAD_PATH_TNL BIT(6)
+
++enum flow_offload_tnl {
++ FLOW_OFFLOAD_TNL_GRETAP,
++ FLOW_OFFLOAD_TNL_PPTP,
++ FLOW_OFFLOAD_TNL_IP_L2TP,
++ FLOW_OFFLOAD_TNL_UDP_L2TP_CTRL,
++ FLOW_OFFLOAD_TNL_UDP_L2TP_DATA,
++ FLOW_OFFLOAD_VXLAN,
++ FLOW_OFFLOAD_NATT,
++ __FLOW_OFFLOAD_MAX,
++};
++
+ struct flow_offload_hw_path {
+ struct net_device *dev;
+ struct net_device *virt_dev;
+ u32 flags;
++ u32 tnl_type;
+
+ u8 eth_src[ETH_ALEN];
+ u8 eth_dest[ETH_ALEN];
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -1868,6 +1868,9 @@ extern const struct of_device_id of_mtk_
+ extern u32 mtk_hwlro_stats_ebl;
+ extern u32 dbg_show_level;
+
++/* tunnel offload related */
++extern struct net_device *(*mtk_get_tnl_dev)(int tnl_idx);
++
+ /* read the hardware status register */
+ void mtk_stats_update_mac(struct mtk_mac *mac);
+
diff --git a/target/linux/mediatek/patches-5.4/999-4101-mtk-tops-network-service-error-recover-support.patch b/target/linux/mediatek/patches-5.4/999-4101-mtk-tops-network-service-error-recover-support.patch
new file mode 100644
index 0000000..fbb46ac
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-4101-mtk-tops-network-service-error-recover-support.patch
@@ -0,0 +1,49 @@
+--- a/drivers/net/ethernet/mediatek/mtk_eth_reset.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_reset.c
+@@ -635,6 +635,9 @@ static int mtk_eth_netdevice_event(struc
+ unsigned long event, void *ptr)
+ {
+ switch (event) {
++ case MTK_TOPS_DUMP_DONE:
++ complete(&wait_tops_done);
++ break;
+ case MTK_WIFI_RESET_DONE:
+ case MTK_FE_STOP_TRAFFIC_DONE:
+ pr_info("%s rcv done event:%lx\n", __func__, event);
+--- a/drivers/net/ethernet/mediatek/mtk_eth_reset.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_reset.h
+@@ -13,6 +13,7 @@
+ #define MTK_WIFI_RESET_DONE 0x2002
+ #define MTK_WIFI_CHIP_ONLINE 0x2003
+ #define MTK_WIFI_CHIP_OFFLINE 0x2004
++#define MTK_TOPS_DUMP_DONE 0x3001
+ #define MTK_FE_RESET_NAT_DONE 0x4001
+
+ #define MTK_FE_STOP_TRAFFIC (0x2005)
+@@ -67,6 +68,7 @@ enum mtk_reset_event_id {
+
+ extern struct notifier_block mtk_eth_netdevice_nb __read_mostly;
+ extern struct completion wait_ser_done;
++extern struct completion wait_tops_done;
+ extern char* mtk_reset_event_name[32];
+ extern atomic_t reset_lock;
+ extern struct completion wait_nat_done;
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -38,6 +38,7 @@ atomic_t force = ATOMIC_INIT(0);
+ module_param_named(msg_level, mtk_msg_level, int, 0);
+ MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
+ DECLARE_COMPLETION(wait_ser_done);
++DECLARE_COMPLETION(wait_tops_done);
+
+ #define MTK_ETHTOOL_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+@@ -4057,6 +4058,8 @@ static void mtk_pending_work(struct work
+ }
+ pr_warn("wait for MTK_FE_START_RESET\n");
+ }
++ if (!try_wait_for_completion(&wait_tops_done))
++ pr_warn("wait for MTK_TOPS_DUMP_DONE\n");
+ rtnl_lock();
+ break;
+ }
diff --git a/target/linux/mediatek/patches-5.4/999-4500-mtk-tops-gre-offload-support.patch b/target/linux/mediatek/patches-5.4/999-4500-mtk-tops-gre-offload-support.patch
new file mode 100644
index 0000000..3833fa0
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-4500-mtk-tops-gre-offload-support.patch
@@ -0,0 +1,45 @@
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -39,6 +39,7 @@
+ #include <net/inet_ecn.h>
+ #include <net/xfrm.h>
+ #include <net/net_namespace.h>
++#include <net/netfilter/nf_flow_table.h>
+ #include <net/netns/generic.h>
+ #include <net/rtnetlink.h>
+ #include <net/gre.h>
+@@ -901,6 +902,24 @@ static int ipgre_close(struct net_device
+ }
+ #endif
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++static int gre_dev_flow_offload_check(struct flow_offload_hw_path *path)
++{
++ struct net_device *dev = path->dev;
++ struct ip_tunnel *tunnel = netdev_priv(dev);
++
++ if (path->flags & FLOW_OFFLOAD_PATH_TNL)
++ return -EEXIST;
++
++ path->flags |= FLOW_OFFLOAD_PATH_TNL;
++ path->tnl_type = FLOW_OFFLOAD_TNL_GRETAP;
++ path->virt_dev = dev;
++ path->dev = tunnel->dev;
++
++ return 0;
++}
++#endif /* CONFIG_NF_FLOW_TABLE */
++
+ static const struct net_device_ops ipgre_netdev_ops = {
+ .ndo_init = ipgre_tunnel_init,
+ .ndo_uninit = ip_tunnel_uninit,
+@@ -1264,6 +1283,9 @@ static const struct net_device_ops gre_t
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip_tunnel_get_iflink,
+ .ndo_fill_metadata_dst = gre_fill_metadata_dst,
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++ .ndo_flow_offload_check = gre_dev_flow_offload_check,
++#endif
+ };
+
+ static int erspan_tunnel_init(struct net_device *dev)
diff --git a/target/linux/mediatek/patches-5.4/999-4500-mtk-tops-l2tp-offload-support.patch b/target/linux/mediatek/patches-5.4/999-4500-mtk-tops-l2tp-offload-support.patch
new file mode 100644
index 0000000..e6583b6
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/999-4500-mtk-tops-l2tp-offload-support.patch
@@ -0,0 +1,35 @@
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1068,6 +1068,10 @@ int l2tp_xmit_skb(struct l2tp_session *s
+ int udp_len;
+ int ret = NET_XMIT_SUCCESS;
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++ skb_reset_inner_headers(skb);
++#endif
++
+ /* Check that there's enough headroom in the skb to insert IP,
+ * UDP and L2TP headers. If not enough, expand it to
+ * make room. Adjust truesize.
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat_nf_hook.c
+@@ -855,7 +855,8 @@ mtk_hnat_ipv4_nf_pre_routing(void *priv,
+ * and it's L2TP flow, then do not bind.
+ */
+ if (skb_hnat_iface(skb) == FOE_MAGIC_GE_VIRTUAL
+- && skb->dev->netdev_ops->ndo_flow_offload_check) {
++ && skb->dev->netdev_ops->ndo_flow_offload_check
++ && !mtk_tnl_decap_offload) {
+ skb->dev->netdev_ops->ndo_flow_offload_check(&hw_path);
+
+ if (hw_path.flags & FLOW_OFFLOAD_PATH_TNL)
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -356,6 +356,7 @@ static int l2tp_ppp_flow_offload_check(s
+ return -EINVAL;
+
+ path->flags |= FLOW_OFFLOAD_PATH_TNL;
++ path->tnl_type = FLOW_OFFLOAD_TNL_UDP_L2TP_DATA;
+
+ return 0;
+ }