[Add meta-filogic bsp for rdkb development]
[Description]
Add meta-filogic bsp for rdkb development
1. rdkb base on dunfell rdkb-next (> 2022q1)
2. arm64/arm 32bit bsp both can run on rdkb
[Release-log]
N/A
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0001-clk-mtk-add-mt7986-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0001-clk-mtk-add-mt7986-support.patch
new file mode 100644
index 0000000..930e88b
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0001-clk-mtk-add-mt7986-support.patch
@@ -0,0 +1,41 @@
+diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
+index 7efc361..5f11280 100644
+--- a/drivers/clk/mediatek/Kconfig
++++ b/drivers/clk/mediatek/Kconfig
+@@ -258,6 +258,15 @@ config COMMON_CLK_MT7629_HIFSYS
+ This driver supports MediaTek MT7629 HIFSYS clocks providing
+ to PCI-E and USB.
+
++config COMMON_CLK_MT7986
++ bool "Clock driver for MediaTek MT7986"
++ depends on ARCH_MEDIATEK || COMPILE_TEST
++ select COMMON_CLK_MEDIATEK
++ default ARCH_MEDIATEK && ARM
++ ---help---
++ This driver supports MediaTek MT7986 basic clocks and clocks
++ required for various periperals found on MediaTek.
++
+ config COMMON_CLK_MT8135
+ bool "Clock driver for MediaTek MT8135"
+ depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
+diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
+index 8cdb76a..8c392f4 100644
+--- a/drivers/clk/mediatek/Makefile
++++ b/drivers/clk/mediatek/Makefile
+@@ -39,6 +39,7 @@ obj-$(CONFIG_COMMON_CLK_MT7622_AUDSYS) += clk-mt7622-aud.o
+ obj-$(CONFIG_COMMON_CLK_MT7629) += clk-mt7629.o
+ obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
+ obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
++obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986.o
+ obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
+ obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
+ obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o
+@@ -55,3 +56,4 @@ obj-$(CONFIG_COMMON_CLK_MT8183_VDECSYS) += clk-mt8183-vdec.o
+ obj-$(CONFIG_COMMON_CLK_MT8183_VENCSYS) += clk-mt8183-venc.o
+ obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o
+ obj-$(CONFIG_COMMON_CLK_MT8516_AUDSYS) += clk-mt8516-aud.o
++obj-y += clk-bringup.o
+\ No newline at end of file
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0001-v5.7-spi-make-spi-max-frequency-optional.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0001-v5.7-spi-make-spi-max-frequency-optional.patch
new file mode 100644
index 0000000..79ce15c
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0001-v5.7-spi-make-spi-max-frequency-optional.patch
@@ -0,0 +1,38 @@
+From 671c3bf50ae498dc12aef6c70abe5cfa066b1348 Mon Sep 17 00:00:00 2001
+From: Chuanhong Guo <gch981213@gmail.com>
+Date: Fri, 6 Mar 2020 16:50:49 +0800
+Subject: [PATCH 1/2] spi: make spi-max-frequency optional
+
+We only need a spi-max-frequency when we specifically request a
+spi frequency lower than the max speed of spi host.
+This property is already documented as optional property and current
+host drivers are implemented to operate at highest speed possible
+when spi->max_speed_hz is 0.
+This patch makes spi-max-frequency an optional property so that
+we could just omit it to use max controller speed.
+
+Signed-off-by: Chuanhong Guo <gch981213@gmail.com>
+Link: https://lore.kernel.org/r/20200306085052.28258-2-gch981213@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+---
+ drivers/spi/spi.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1809,13 +1809,8 @@ static int of_spi_parse_dt(struct spi_co
+ spi->mode |= SPI_CS_HIGH;
+
+ /* Device speed */
+- rc = of_property_read_u32(nc, "spi-max-frequency", &value);
+- if (rc) {
+- dev_err(&ctlr->dev,
+- "%pOF has no valid 'spi-max-frequency' property (%d)\n", nc, rc);
+- return rc;
+- }
+- spi->max_speed_hz = value;
++ if (!of_property_read_u32(nc, "spi-max-frequency", &value))
++ spi->max_speed_hz = value;
+
+ return 0;
+ }
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0002-clk-mtk-add-mt7981-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0002-clk-mtk-add-mt7981-support.patch
new file mode 100644
index 0000000..72f9e8a
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0002-clk-mtk-add-mt7981-support.patch
@@ -0,0 +1,34 @@
+diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
+index 1c48fe9..23393d5 100644
+--- a/drivers/clk/mediatek/Kconfig
++++ b/drivers/clk/mediatek/Kconfig
+@@ -267,6 +267,14 @@ config COMMON_CLK_MT7986
+ This driver supports MediaTek MT7986 basic clocks and clocks
+ required for various periperals found on MediaTek.
+
++config COMMON_CLK_MT7981
++ bool "Clock driver for MediaTek MT7981"
++ depends on ARCH_MEDIATEK || COMPILE_TEST
++ select COMMON_CLK_MEDIATEK
++ ---help---
++ This driver supports MediaTek MT7981 basic clocks and clocks
++ required for various periperals found on MediaTek.
++
+ config COMMON_CLK_MT8135
+ bool "Clock driver for MediaTek MT8135"
+ depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
+diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
+index 8c392f4..ffe0850 100644
+--- a/drivers/clk/mediatek/Makefile
++++ b/drivers/clk/mediatek/Makefile
+@@ -40,6 +40,7 @@ obj-$(CONFIG_COMMON_CLK_MT7629) += clk-mt7629.o
+ obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
+ obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
+ obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986.o
++obj-$(CONFIG_COMMON_CLK_MT7981) += clk-mt7981.o
+ obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
+ obj-$(CONFIG_COMMON_CLK_MT8173) += clk-mt8173.o
+ obj-$(CONFIG_COMMON_CLK_MT8183) += clk-mt8183.o
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0002-v5.7-spi-add-support-for-mediatek-spi-nor-controller.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0002-v5.7-spi-add-support-for-mediatek-spi-nor-controller.patch
new file mode 100644
index 0000000..0a63bdd
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0002-v5.7-spi-add-support-for-mediatek-spi-nor-controller.patch
@@ -0,0 +1,761 @@
+From 881d1ee9fe81ff2be1b90809a07621be97404a57 Mon Sep 17 00:00:00 2001
+From: Chuanhong Guo <gch981213@gmail.com>
+Date: Fri, 6 Mar 2020 16:50:50 +0800
+Subject: [PATCH 2/2] spi: add support for mediatek spi-nor controller
+
+This is a driver for mtk spi-nor controller using spi-mem interface.
+The same controller already has limited support provided by mtk-quadspi
+driver under spi-nor framework and this new driver is a replacement
+for the old one.
+
+Comparing to the old driver, this driver has following advantages:
+1. It can handle any full-duplex spi transfer up to 6 bytes, and
+ this is implemented using generic spi interface.
+2. It take account into command opcode properly. The reading routine
+ in this controller can only use 0x03 or 0x0b as opcode on 1-1-1
+ transfers, but old driver doesn't implement this properly. This
+ driver checks supported opcode explicitly and use (1) to perform
+ unmatched operations.
+3. It properly handles SFDP reading. Old driver can't read SFDP
+ due to the bug mentioned in (2).
+4. It can do 1-2-2 and 1-4-4 fast reading on spi-nor. These two ops
+ requires parsing SFDP, which isn't possible in old driver. And
+ the old driver is only flagged to support 1-1-2 mode.
+5. It takes advantage of the DMA feature in this controller for
+ long reads and supports IRQ on DMA requests to free cpu cycles
+ from polling status registers on long DMA reading. It achieves
+ up to 17.5MB/s reading speed (1-4-4 mode) which is way faster
+ than the old one. IRQ is implemented as optional to maintain
+ backward compatibility.
+
+Signed-off-by: Chuanhong Guo <gch981213@gmail.com>
+Link: https://lore.kernel.org/r/20200306085052.28258-3-gch981213@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+---
+ drivers/spi/Kconfig | 10 +
+ drivers/spi/Makefile | 1 +
+ drivers/spi/spi-mtk-nor.c | 689 ++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 700 insertions(+)
+ create mode 100644 drivers/spi/spi-mtk-nor.c
+
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -433,6 +433,16 @@ config SPI_MT7621
+ help
+ This selects a driver for the MediaTek MT7621 SPI Controller.
+
++config SPI_MTK_NOR
++ tristate "MediaTek SPI NOR controller"
++ depends on ARCH_MEDIATEK || COMPILE_TEST
++ help
++ This enables support for SPI NOR controller found on MediaTek
++ ARM SoCs. This is a controller specifically for SPI-NOR flash.
++ It can perform generic SPI transfers up to 6 bytes via generic
++ SPI interface as well as several SPI-NOR specific instructions
++ via SPI MEM interface.
++
+ config SPI_NPCM_FIU
+ tristate "Nuvoton NPCM FLASH Interface Unit"
+ depends on ARCH_NPCM || COMPILE_TEST
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -61,6 +61,7 @@ obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mp
+ obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
+ obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
+ obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
++obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o
+ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
+ obj-$(CONFIG_SPI_MXS) += spi-mxs.o
+ obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
+--- /dev/null
++++ b/drivers/spi/spi-mtk-nor.c
+@@ -0,0 +1,689 @@
++// SPDX-License-Identifier: GPL-2.0
++//
++// Mediatek SPI NOR controller driver
++//
++// Copyright (C) 2020 Chuanhong Guo <gch981213@gmail.com>
++
++#include <linux/bits.h>
++#include <linux/clk.h>
++#include <linux/completion.h>
++#include <linux/dma-mapping.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/iopoll.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of_device.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/spi-mem.h>
++#include <linux/string.h>
++
++#define DRIVER_NAME "mtk-spi-nor"
++
++#define MTK_NOR_REG_CMD 0x00
++#define MTK_NOR_CMD_WRITE BIT(4)
++#define MTK_NOR_CMD_PROGRAM BIT(2)
++#define MTK_NOR_CMD_READ BIT(0)
++#define MTK_NOR_CMD_MASK GENMASK(5, 0)
++
++#define MTK_NOR_REG_PRG_CNT 0x04
++#define MTK_NOR_REG_RDATA 0x0c
++
++#define MTK_NOR_REG_RADR0 0x10
++#define MTK_NOR_REG_RADR(n) (MTK_NOR_REG_RADR0 + 4 * (n))
++#define MTK_NOR_REG_RADR3 0xc8
++
++#define MTK_NOR_REG_WDATA 0x1c
++
++#define MTK_NOR_REG_PRGDATA0 0x20
++#define MTK_NOR_REG_PRGDATA(n) (MTK_NOR_REG_PRGDATA0 + 4 * (n))
++#define MTK_NOR_REG_PRGDATA_MAX 5
++
++#define MTK_NOR_REG_SHIFT0 0x38
++#define MTK_NOR_REG_SHIFT(n) (MTK_NOR_REG_SHIFT0 + 4 * (n))
++#define MTK_NOR_REG_SHIFT_MAX 9
++
++#define MTK_NOR_REG_CFG1 0x60
++#define MTK_NOR_FAST_READ BIT(0)
++
++#define MTK_NOR_REG_CFG2 0x64
++#define MTK_NOR_WR_CUSTOM_OP_EN BIT(4)
++#define MTK_NOR_WR_BUF_EN BIT(0)
++
++#define MTK_NOR_REG_PP_DATA 0x98
++
++#define MTK_NOR_REG_IRQ_STAT 0xa8
++#define MTK_NOR_REG_IRQ_EN 0xac
++#define MTK_NOR_IRQ_DMA BIT(7)
++#define MTK_NOR_IRQ_MASK GENMASK(7, 0)
++
++#define MTK_NOR_REG_CFG3 0xb4
++#define MTK_NOR_DISABLE_WREN BIT(7)
++#define MTK_NOR_DISABLE_SR_POLL BIT(5)
++
++#define MTK_NOR_REG_WP 0xc4
++#define MTK_NOR_ENABLE_SF_CMD 0x30
++
++#define MTK_NOR_REG_BUSCFG 0xcc
++#define MTK_NOR_4B_ADDR BIT(4)
++#define MTK_NOR_QUAD_ADDR BIT(3)
++#define MTK_NOR_QUAD_READ BIT(2)
++#define MTK_NOR_DUAL_ADDR BIT(1)
++#define MTK_NOR_DUAL_READ BIT(0)
++#define MTK_NOR_BUS_MODE_MASK GENMASK(4, 0)
++
++#define MTK_NOR_REG_DMA_CTL 0x718
++#define MTK_NOR_DMA_START BIT(0)
++
++#define MTK_NOR_REG_DMA_FADR 0x71c
++#define MTK_NOR_REG_DMA_DADR 0x720
++#define MTK_NOR_REG_DMA_END_DADR 0x724
++
++#define MTK_NOR_PRG_MAX_SIZE 6
++// Reading DMA src/dst addresses have to be 16-byte aligned
++#define MTK_NOR_DMA_ALIGN 16
++#define MTK_NOR_DMA_ALIGN_MASK (MTK_NOR_DMA_ALIGN - 1)
++// and we allocate a bounce buffer if destination address isn't aligned.
++#define MTK_NOR_BOUNCE_BUF_SIZE PAGE_SIZE
++
++// Buffered page program can do one 128-byte transfer
++#define MTK_NOR_PP_SIZE 128
++
++#define CLK_TO_US(sp, clkcnt) ((clkcnt) * 1000000 / sp->spi_freq)
++
++struct mtk_nor {
++ struct spi_controller *ctlr;
++ struct device *dev;
++ void __iomem *base;
++ u8 *buffer;
++ struct clk *spi_clk;
++ struct clk *ctlr_clk;
++ unsigned int spi_freq;
++ bool wbuf_en;
++ bool has_irq;
++ struct completion op_done;
++};
++
++static inline void mtk_nor_rmw(struct mtk_nor *sp, u32 reg, u32 set, u32 clr)
++{
++ u32 val = readl(sp->base + reg);
++
++ val &= ~clr;
++ val |= set;
++ writel(val, sp->base + reg);
++}
++
++static inline int mtk_nor_cmd_exec(struct mtk_nor *sp, u32 cmd, ulong clk)
++{
++ ulong delay = CLK_TO_US(sp, clk);
++ u32 reg;
++ int ret;
++
++ writel(cmd, sp->base + MTK_NOR_REG_CMD);
++ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CMD, reg, !(reg & cmd),
++ delay / 3, (delay + 1) * 200);
++ if (ret < 0)
++ dev_err(sp->dev, "command %u timeout.\n", cmd);
++ return ret;
++}
++
++static void mtk_nor_set_addr(struct mtk_nor *sp, const struct spi_mem_op *op)
++{
++ u32 addr = op->addr.val;
++ int i;
++
++ for (i = 0; i < 3; i++) {
++ writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR(i));
++ addr >>= 8;
++ }
++ if (op->addr.nbytes == 4) {
++ writeb(addr & 0xff, sp->base + MTK_NOR_REG_RADR3);
++ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, MTK_NOR_4B_ADDR, 0);
++ } else {
++ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, 0, MTK_NOR_4B_ADDR);
++ }
++}
++
++static bool mtk_nor_match_read(const struct spi_mem_op *op)
++{
++ int dummy = 0;
++
++ if (op->dummy.buswidth)
++ dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
++
++ if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
++ if (op->addr.buswidth == 1)
++ return dummy == 8;
++ else if (op->addr.buswidth == 2)
++ return dummy == 4;
++ else if (op->addr.buswidth == 4)
++ return dummy == 6;
++ } else if ((op->addr.buswidth == 1) && (op->data.buswidth == 1)) {
++ if (op->cmd.opcode == 0x03)
++ return dummy == 0;
++ else if (op->cmd.opcode == 0x0b)
++ return dummy == 8;
++ }
++ return false;
++}
++
++static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
++{
++ size_t len;
++
++ if (!op->data.nbytes)
++ return 0;
++
++ if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
++ if ((op->data.dir == SPI_MEM_DATA_IN) &&
++ mtk_nor_match_read(op)) {
++ if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
++ (op->data.nbytes < MTK_NOR_DMA_ALIGN))
++ op->data.nbytes = 1;
++ else if (!((ulong)(op->data.buf.in) &
++ MTK_NOR_DMA_ALIGN_MASK))
++ op->data.nbytes &= ~MTK_NOR_DMA_ALIGN_MASK;
++ else if (op->data.nbytes > MTK_NOR_BOUNCE_BUF_SIZE)
++ op->data.nbytes = MTK_NOR_BOUNCE_BUF_SIZE;
++ return 0;
++ } else if (op->data.dir == SPI_MEM_DATA_OUT) {
++ if (op->data.nbytes >= MTK_NOR_PP_SIZE)
++ op->data.nbytes = MTK_NOR_PP_SIZE;
++ else
++ op->data.nbytes = 1;
++ return 0;
++ }
++ }
++
++ len = MTK_NOR_PRG_MAX_SIZE - sizeof(op->cmd.opcode) - op->addr.nbytes -
++ op->dummy.nbytes;
++ if (op->data.nbytes > len)
++ op->data.nbytes = len;
++
++ return 0;
++}
++
++static bool mtk_nor_supports_op(struct spi_mem *mem,
++ const struct spi_mem_op *op)
++{
++ size_t len;
++
++ if (op->cmd.buswidth != 1)
++ return false;
++
++ if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
++ if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op))
++ return true;
++ else if (op->data.dir == SPI_MEM_DATA_OUT)
++ return (op->addr.buswidth == 1) &&
++ (op->dummy.buswidth == 0) &&
++ (op->data.buswidth == 1);
++ }
++ len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
++ if ((len > MTK_NOR_PRG_MAX_SIZE) ||
++ ((op->data.nbytes) && (len == MTK_NOR_PRG_MAX_SIZE)))
++ return false;
++ return true;
++}
++
++static void mtk_nor_setup_bus(struct mtk_nor *sp, const struct spi_mem_op *op)
++{
++ u32 reg = 0;
++
++ if (op->addr.nbytes == 4)
++ reg |= MTK_NOR_4B_ADDR;
++
++ if (op->data.buswidth == 4) {
++ reg |= MTK_NOR_QUAD_READ;
++ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(4));
++ if (op->addr.buswidth == 4)
++ reg |= MTK_NOR_QUAD_ADDR;
++ } else if (op->data.buswidth == 2) {
++ reg |= MTK_NOR_DUAL_READ;
++ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA(3));
++ if (op->addr.buswidth == 2)
++ reg |= MTK_NOR_DUAL_ADDR;
++ } else {
++ if (op->cmd.opcode == 0x0b)
++ mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, MTK_NOR_FAST_READ, 0);
++ else
++ mtk_nor_rmw(sp, MTK_NOR_REG_CFG1, 0, MTK_NOR_FAST_READ);
++ }
++ mtk_nor_rmw(sp, MTK_NOR_REG_BUSCFG, reg, MTK_NOR_BUS_MODE_MASK);
++}
++
++static int mtk_nor_read_dma(struct mtk_nor *sp, u32 from, unsigned int length,
++ u8 *buffer)
++{
++ int ret = 0;
++ ulong delay;
++ u32 reg;
++ dma_addr_t dma_addr;
++
++ dma_addr = dma_map_single(sp->dev, buffer, length, DMA_FROM_DEVICE);
++ if (dma_mapping_error(sp->dev, dma_addr)) {
++ dev_err(sp->dev, "failed to map dma buffer.\n");
++ return -EINVAL;
++ }
++
++ writel(from, sp->base + MTK_NOR_REG_DMA_FADR);
++ writel(dma_addr, sp->base + MTK_NOR_REG_DMA_DADR);
++ writel(dma_addr + length, sp->base + MTK_NOR_REG_DMA_END_DADR);
++
++ if (sp->has_irq) {
++ reinit_completion(&sp->op_done);
++ mtk_nor_rmw(sp, MTK_NOR_REG_IRQ_EN, MTK_NOR_IRQ_DMA, 0);
++ }
++
++ mtk_nor_rmw(sp, MTK_NOR_REG_DMA_CTL, MTK_NOR_DMA_START, 0);
++
++ delay = CLK_TO_US(sp, (length + 5) * BITS_PER_BYTE);
++
++ if (sp->has_irq) {
++ if (!wait_for_completion_timeout(&sp->op_done,
++ (delay + 1) * 100))
++ ret = -ETIMEDOUT;
++ } else {
++ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_DMA_CTL, reg,
++ !(reg & MTK_NOR_DMA_START), delay / 3,
++ (delay + 1) * 100);
++ }
++
++ dma_unmap_single(sp->dev, dma_addr, length, DMA_FROM_DEVICE);
++ if (ret < 0)
++ dev_err(sp->dev, "dma read timeout.\n");
++
++ return ret;
++}
++
++static int mtk_nor_read_bounce(struct mtk_nor *sp, u32 from,
++ unsigned int length, u8 *buffer)
++{
++ unsigned int rdlen;
++ int ret;
++
++ if (length & MTK_NOR_DMA_ALIGN_MASK)
++ rdlen = (length + MTK_NOR_DMA_ALIGN) & ~MTK_NOR_DMA_ALIGN_MASK;
++ else
++ rdlen = length;
++
++ ret = mtk_nor_read_dma(sp, from, rdlen, sp->buffer);
++ if (ret)
++ return ret;
++
++ memcpy(buffer, sp->buffer, length);
++ return 0;
++}
++
++static int mtk_nor_read_pio(struct mtk_nor *sp, const struct spi_mem_op *op)
++{
++ u8 *buf = op->data.buf.in;
++ int ret;
++
++ ret = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_READ, 6 * BITS_PER_BYTE);
++ if (!ret)
++ buf[0] = readb(sp->base + MTK_NOR_REG_RDATA);
++ return ret;
++}
++
++static int mtk_nor_write_buffer_enable(struct mtk_nor *sp)
++{
++ int ret;
++ u32 val;
++
++ if (sp->wbuf_en)
++ return 0;
++
++ val = readl(sp->base + MTK_NOR_REG_CFG2);
++ writel(val | MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
++ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
++ val & MTK_NOR_WR_BUF_EN, 0, 10000);
++ if (!ret)
++ sp->wbuf_en = true;
++ return ret;
++}
++
++static int mtk_nor_write_buffer_disable(struct mtk_nor *sp)
++{
++ int ret;
++ u32 val;
++
++ if (!sp->wbuf_en)
++ return 0;
++ val = readl(sp->base + MTK_NOR_REG_CFG2);
++ writel(val & ~MTK_NOR_WR_BUF_EN, sp->base + MTK_NOR_REG_CFG2);
++ ret = readl_poll_timeout(sp->base + MTK_NOR_REG_CFG2, val,
++ !(val & MTK_NOR_WR_BUF_EN), 0, 10000);
++ if (!ret)
++ sp->wbuf_en = false;
++ return ret;
++}
++
++static int mtk_nor_pp_buffered(struct mtk_nor *sp, const struct spi_mem_op *op)
++{
++ const u8 *buf = op->data.buf.out;
++ u32 val;
++ int ret, i;
++
++ ret = mtk_nor_write_buffer_enable(sp);
++ if (ret < 0)
++ return ret;
++
++ for (i = 0; i < op->data.nbytes; i += 4) {
++ val = buf[i + 3] << 24 | buf[i + 2] << 16 | buf[i + 1] << 8 |
++ buf[i];
++ writel(val, sp->base + MTK_NOR_REG_PP_DATA);
++ }
++ return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE,
++ (op->data.nbytes + 5) * BITS_PER_BYTE);
++}
++
++static int mtk_nor_pp_unbuffered(struct mtk_nor *sp,
++ const struct spi_mem_op *op)
++{
++ const u8 *buf = op->data.buf.out;
++ int ret;
++
++ ret = mtk_nor_write_buffer_disable(sp);
++ if (ret < 0)
++ return ret;
++ writeb(buf[0], sp->base + MTK_NOR_REG_WDATA);
++ return mtk_nor_cmd_exec(sp, MTK_NOR_CMD_WRITE, 6 * BITS_PER_BYTE);
++}
++
++int mtk_nor_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
++{
++ struct mtk_nor *sp = spi_controller_get_devdata(mem->spi->master);
++ int ret;
++
++ if ((op->data.nbytes == 0) ||
++ ((op->addr.nbytes != 3) && (op->addr.nbytes != 4)))
++ return -ENOTSUPP;
++
++ if (op->data.dir == SPI_MEM_DATA_OUT) {
++ mtk_nor_set_addr(sp, op);
++ writeb(op->cmd.opcode, sp->base + MTK_NOR_REG_PRGDATA0);
++ if (op->data.nbytes == MTK_NOR_PP_SIZE)
++ return mtk_nor_pp_buffered(sp, op);
++ return mtk_nor_pp_unbuffered(sp, op);
++ }
++
++ if ((op->data.dir == SPI_MEM_DATA_IN) && mtk_nor_match_read(op)) {
++ ret = mtk_nor_write_buffer_disable(sp);
++ if (ret < 0)
++ return ret;
++ mtk_nor_setup_bus(sp, op);
++ if (op->data.nbytes == 1) {
++ mtk_nor_set_addr(sp, op);
++ return mtk_nor_read_pio(sp, op);
++ } else if (((ulong)(op->data.buf.in) &
++ MTK_NOR_DMA_ALIGN_MASK)) {
++ return mtk_nor_read_bounce(sp, op->addr.val,
++ op->data.nbytes,
++ op->data.buf.in);
++ } else {
++ return mtk_nor_read_dma(sp, op->addr.val,
++ op->data.nbytes,
++ op->data.buf.in);
++ }
++ }
++
++ return -ENOTSUPP;
++}
++
++static int mtk_nor_setup(struct spi_device *spi)
++{
++ struct mtk_nor *sp = spi_controller_get_devdata(spi->master);
++
++ if (spi->max_speed_hz && (spi->max_speed_hz < sp->spi_freq)) {
++ dev_err(&spi->dev, "spi clock should be %u Hz.\n",
++ sp->spi_freq);
++ return -EINVAL;
++ }
++ spi->max_speed_hz = sp->spi_freq;
++
++ return 0;
++}
++
++static int mtk_nor_transfer_one_message(struct spi_controller *master,
++ struct spi_message *m)
++{
++ struct mtk_nor *sp = spi_controller_get_devdata(master);
++ struct spi_transfer *t = NULL;
++ unsigned long trx_len = 0;
++ int stat = 0;
++ int reg_offset = MTK_NOR_REG_PRGDATA_MAX;
++ void __iomem *reg;
++ const u8 *txbuf;
++ u8 *rxbuf;
++ int i;
++
++ list_for_each_entry(t, &m->transfers, transfer_list) {
++ txbuf = t->tx_buf;
++ for (i = 0; i < t->len; i++, reg_offset--) {
++ reg = sp->base + MTK_NOR_REG_PRGDATA(reg_offset);
++ if (txbuf)
++ writeb(txbuf[i], reg);
++ else
++ writeb(0, reg);
++ }
++ trx_len += t->len;
++ }
++
++ writel(trx_len * BITS_PER_BYTE, sp->base + MTK_NOR_REG_PRG_CNT);
++
++ stat = mtk_nor_cmd_exec(sp, MTK_NOR_CMD_PROGRAM,
++ trx_len * BITS_PER_BYTE);
++ if (stat < 0)
++ goto msg_done;
++
++ reg_offset = trx_len - 1;
++ list_for_each_entry(t, &m->transfers, transfer_list) {
++ rxbuf = t->rx_buf;
++ for (i = 0; i < t->len; i++, reg_offset--) {
++ reg = sp->base + MTK_NOR_REG_SHIFT(reg_offset);
++ if (rxbuf)
++ rxbuf[i] = readb(reg);
++ }
++ }
++
++ m->actual_length = trx_len;
++msg_done:
++ m->status = stat;
++ spi_finalize_current_message(master);
++
++ return 0;
++}
++
++static void mtk_nor_disable_clk(struct mtk_nor *sp)
++{
++ clk_disable_unprepare(sp->spi_clk);
++ clk_disable_unprepare(sp->ctlr_clk);
++}
++
++static int mtk_nor_enable_clk(struct mtk_nor *sp)
++{
++ int ret;
++
++ ret = clk_prepare_enable(sp->spi_clk);
++ if (ret)
++ return ret;
++
++ ret = clk_prepare_enable(sp->ctlr_clk);
++ if (ret) {
++ clk_disable_unprepare(sp->spi_clk);
++ return ret;
++ }
++
++ return 0;
++}
++
++static int mtk_nor_init(struct mtk_nor *sp)
++{
++ int ret;
++
++ ret = mtk_nor_enable_clk(sp);
++ if (ret)
++ return ret;
++
++ sp->spi_freq = clk_get_rate(sp->spi_clk);
++
++ writel(MTK_NOR_ENABLE_SF_CMD, sp->base + MTK_NOR_REG_WP);
++ mtk_nor_rmw(sp, MTK_NOR_REG_CFG2, MTK_NOR_WR_CUSTOM_OP_EN, 0);
++ mtk_nor_rmw(sp, MTK_NOR_REG_CFG3,
++ MTK_NOR_DISABLE_WREN | MTK_NOR_DISABLE_SR_POLL, 0);
++
++ return ret;
++}
++
++static irqreturn_t mtk_nor_irq_handler(int irq, void *data)
++{
++ struct mtk_nor *sp = data;
++ u32 irq_status, irq_enabled;
++
++ irq_status = readl(sp->base + MTK_NOR_REG_IRQ_STAT);
++ irq_enabled = readl(sp->base + MTK_NOR_REG_IRQ_EN);
++ // write status back to clear interrupt
++ writel(irq_status, sp->base + MTK_NOR_REG_IRQ_STAT);
++
++ if (!(irq_status & irq_enabled))
++ return IRQ_NONE;
++
++ if (irq_status & MTK_NOR_IRQ_DMA) {
++ complete(&sp->op_done);
++ writel(0, sp->base + MTK_NOR_REG_IRQ_EN);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static size_t mtk_max_msg_size(struct spi_device *spi)
++{
++ return MTK_NOR_PRG_MAX_SIZE;
++}
++
++static const struct spi_controller_mem_ops mtk_nor_mem_ops = {
++ .adjust_op_size = mtk_nor_adjust_op_size,
++ .supports_op = mtk_nor_supports_op,
++ .exec_op = mtk_nor_exec_op
++};
++
++static const struct of_device_id mtk_nor_match[] = {
++ { .compatible = "mediatek,mt8173-nor" },
++ { /* sentinel */ }
++};
++MODULE_DEVICE_TABLE(of, mtk_nor_match);
++
++static int mtk_nor_probe(struct platform_device *pdev)
++{
++ struct spi_controller *ctlr;
++ struct mtk_nor *sp;
++ void __iomem *base;
++ u8 *buffer;
++ struct clk *spi_clk, *ctlr_clk;
++ int ret, irq;
++
++ base = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++
++ spi_clk = devm_clk_get(&pdev->dev, "spi");
++ if (IS_ERR(spi_clk))
++ return PTR_ERR(spi_clk);
++
++ ctlr_clk = devm_clk_get(&pdev->dev, "sf");
++ if (IS_ERR(ctlr_clk))
++ return PTR_ERR(ctlr_clk);
++
++ buffer = devm_kmalloc(&pdev->dev,
++ MTK_NOR_BOUNCE_BUF_SIZE + MTK_NOR_DMA_ALIGN,
++ GFP_KERNEL);
++ if (!buffer)
++ return -ENOMEM;
++
++ if ((ulong)buffer & MTK_NOR_DMA_ALIGN_MASK)
++ buffer = (u8 *)(((ulong)buffer + MTK_NOR_DMA_ALIGN) &
++ ~MTK_NOR_DMA_ALIGN_MASK);
++
++ ctlr = spi_alloc_master(&pdev->dev, sizeof(*sp));
++ if (!ctlr) {
++ dev_err(&pdev->dev, "failed to allocate spi controller\n");
++ return -ENOMEM;
++ }
++
++ ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
++ ctlr->dev.of_node = pdev->dev.of_node;
++ ctlr->max_message_size = mtk_max_msg_size;
++ ctlr->mem_ops = &mtk_nor_mem_ops;
++ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD;
++ ctlr->num_chipselect = 1;
++ ctlr->setup = mtk_nor_setup;
++ ctlr->transfer_one_message = mtk_nor_transfer_one_message;
++
++ dev_set_drvdata(&pdev->dev, ctlr);
++
++ sp = spi_controller_get_devdata(ctlr);
++ sp->base = base;
++ sp->buffer = buffer;
++ sp->has_irq = false;
++ sp->wbuf_en = false;
++ sp->ctlr = ctlr;
++ sp->dev = &pdev->dev;
++ sp->spi_clk = spi_clk;
++ sp->ctlr_clk = ctlr_clk;
++
++ irq = platform_get_irq_optional(pdev, 0);
++ if (irq < 0) {
++ dev_warn(sp->dev, "IRQ not available.");
++ } else {
++ writel(MTK_NOR_IRQ_MASK, base + MTK_NOR_REG_IRQ_STAT);
++ writel(0, base + MTK_NOR_REG_IRQ_EN);
++ ret = devm_request_irq(sp->dev, irq, mtk_nor_irq_handler, 0,
++ pdev->name, sp);
++ if (ret < 0) {
++ dev_warn(sp->dev, "failed to request IRQ.");
++ } else {
++ init_completion(&sp->op_done);
++ sp->has_irq = true;
++ }
++ }
++
++ ret = mtk_nor_init(sp);
++ if (ret < 0) {
++ kfree(ctlr);
++ return ret;
++ }
++
++ dev_info(&pdev->dev, "spi frequency: %d Hz\n", sp->spi_freq);
++
++ return devm_spi_register_controller(&pdev->dev, ctlr);
++}
++
++static int mtk_nor_remove(struct platform_device *pdev)
++{
++ struct spi_controller *ctlr;
++ struct mtk_nor *sp;
++
++ ctlr = dev_get_drvdata(&pdev->dev);
++ sp = spi_controller_get_devdata(ctlr);
++
++ mtk_nor_disable_clk(sp);
++
++ return 0;
++}
++
++static struct platform_driver mtk_nor_driver = {
++ .driver = {
++ .name = DRIVER_NAME,
++ .of_match_table = mtk_nor_match,
++ },
++ .probe = mtk_nor_probe,
++ .remove = mtk_nor_remove,
++};
++
++module_platform_driver(mtk_nor_driver);
++
++MODULE_DESCRIPTION("Mediatek SPI NOR controller driver");
++MODULE_AUTHOR("Chuanhong Guo <gch981213@gmail.com>");
++MODULE_LICENSE("GPL v2");
++MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0003-switch-add-mt7531.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0003-switch-add-mt7531.patch
new file mode 100644
index 0000000..6fae99c
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0003-switch-add-mt7531.patch
@@ -0,0 +1,19 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -329,6 +329,8 @@ config RTL8367B_PHY
+
+ endif # RTL8366_SMI
+
++source "drivers/net/phy/mtk/mt753x/Kconfig"
++
+ comment "MII PHY device drivers"
+
+ config SFP
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -109,3 +109,5 @@ obj-$(CONFIG_STE10XP) += ste10Xp.o
+ obj-$(CONFIG_TERANETICS_PHY) += teranetics.o
+ obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+ obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o
++obj-$(CONFIG_MT753X_GSW) += mtk/mt753x/
++
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0005-dts-mt7622-add-gsw.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0005-dts-mt7622-add-gsw.patch
new file mode 100644
index 0000000..d40cbfb
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0005-dts-mt7622-add-gsw.patch
@@ -0,0 +1,258 @@
+--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+@@ -53,6 +53,13 @@
+ };
+ };
+
++ gsw: gsw@0 {
++ compatible = "mediatek,mt753x";
++ mediatek,ethsys = <ðsys>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
+ leds {
+ compatible = "gpio-leds";
+
+@@ -146,6 +153,36 @@
+ };
+ };
+
++&gsw {
++ mediatek,mdio = <&mdio>;
++ mediatek,portmap = "wllll";
++ mediatek,mdio_master_pinmux = <0>;
++ reset-gpios = <&pio 54 0>;
++ interrupt-parent = <&pio>;
++ interrupts = <53 IRQ_TYPE_LEVEL_HIGH>;
++ status = "okay";
++
++ port5: port@5 {
++ compatible = "mediatek,mt753x-port";
++ reg = <5>;
++ phy-mode = "rgmii";
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ };
++ };
++
++ port6: port@6 {
++ compatible = "mediatek,mt753x-port";
++ reg = <6>;
++ phy-mode = "sgmii";
++ fixed-link {
++ speed = <2500>;
++ full-duplex;
++ };
++ };
++};
++
+ &i2c1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins>;
+--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+@@ -1,7 +1,6 @@
+ /*
+- * Copyright (c) 2017 MediaTek Inc.
+- * Author: Ming Huang <ming.huang@mediatek.com>
+- * Sean Wang <sean.wang@mediatek.com>
++ * Copyright (c) 2018 MediaTek Inc.
++ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * SPDX-License-Identifier: (GPL-2.0 OR MIT)
+ */
+@@ -14,7 +13,7 @@
+ #include "mt6380.dtsi"
+
+ / {
+- model = "MediaTek MT7622 RFB1 board";
++ model = "MT7622_MT7531 RFB";
+ compatible = "mediatek,mt7622-rfb1", "mediatek,mt7622";
+
+ aliases {
+@@ -23,7 +22,7 @@
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+- bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
++ bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+ };
+
+ cpus {
+@@ -40,23 +39,36 @@
+
+ gpio-keys {
+ compatible = "gpio-keys";
+- poll-interval = <100>;
+
+ factory {
+ label = "factory";
+ linux,code = <BTN_0>;
+- gpios = <&pio 0 0>;
++ gpios = <&pio 0 GPIO_ACTIVE_LOW>;
+ };
+
+ wps {
+ label = "wps";
+ linux,code = <KEY_WPS_BUTTON>;
+- gpios = <&pio 102 0>;
++ gpios = <&pio 102 GPIO_ACTIVE_LOW>;
++ };
++ };
++
++ leds {
++ compatible = "gpio-leds";
++
++ green {
++ label = "bpi-r64:pio:green";
++ gpios = <&pio 89 GPIO_ACTIVE_HIGH>;
++ };
++
++ red {
++ label = "bpi-r64:pio:red";
++ gpios = <&pio 88 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ memory {
+- reg = <0 0x40000000 0 0x20000000>;
++ reg = <0 0x40000000 0 0x40000000>;
+ };
+
+ reg_1p8v: regulator-1p8v {
+@@ -101,23 +113,82 @@
+ };
+
+ ð {
+- pinctrl-names = "default";
+- pinctrl-0 = <ð_pins>;
+ status = "okay";
++ gmac0: mac@0 {
++ compatible = "mediatek,eth-mac";
++ reg = <0>;
++ phy-mode = "2500base-x";
++
++ fixed-link {
++ speed = <2500>;
++ full-duplex;
++ pause;
++ };
++ };
+
+ gmac1: mac@1 {
+ compatible = "mediatek,eth-mac";
+ reg = <1>;
+- phy-handle = <&phy5>;
++ phy-mode = "rgmii";
++
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ pause;
++ };
+ };
+
+- mdio-bus {
++ mdio: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- phy5: ethernet-phy@5 {
+- reg = <5>;
+- phy-mode = "sgmii";
++ switch@0 {
++ compatible = "mediatek,mt7531";
++ reg = <0>;
++ reset-gpios = <&pio 54 0>;
++
++ ports {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ port@0 {
++ reg = <0>;
++ label = "lan1";
++ };
++
++ port@1 {
++ reg = <1>;
++ label = "lan2";
++ };
++
++ port@2 {
++ reg = <2>;
++ label = "lan3";
++ };
++
++ port@3 {
++ reg = <3>;
++ label = "lan4";
++ };
++
++ port@4 {
++ reg = <4>;
++ label = "wan";
++ };
++
++ port@6 {
++ reg = <6>;
++ label = "cpu";
++ ethernet = <&gmac0>;
++ phy-mode = "2500base-x";
++
++ fixed-link {
++ speed = <2500>;
++ full-duplex;
++ pause;
++ };
++ };
++ };
+ };
+ };
+ };
+@@ -185,15 +256,28 @@
+
+ &pcie {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pcie0_pins>;
++ pinctrl-0 = <&pcie0_pins>, <&pcie1_pins>;
+ status = "okay";
+
+ pcie@0,0 {
+ status = "okay";
+ };
++
++ pcie@1,0 {
++ status = "okay";
++ };
+ };
+
+ &pio {
++ /* Attention: GPIO 90 is used to switch between PCIe@1,0 and
++ * SATA functions. i.e. output-high: PCIe, output-low: SATA
++ */
++ asm_sel {
++ gpio-hog;
++ gpios = <90 GPIO_ACTIVE_HIGH>;
++ output-high;
++ };
++
+ /* eMMC is shared pin with parallel NAND */
+ emmc_pins_default: emmc-pins-default {
+ mux {
+@@ -460,11 +544,11 @@
+ };
+
+ &sata {
+- status = "okay";
++ status = "disable";
+ };
+
+ &sata_phy {
+- status = "okay";
++ status = "disable";
+ };
+
+ &spi0 {
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0005-dts-mt7629-add-gsw.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0005-dts-mt7629-add-gsw.patch
new file mode 100644
index 0000000..773a69f
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0005-dts-mt7629-add-gsw.patch
@@ -0,0 +1,67 @@
+--- a/arch/arm/boot/dts/mt7629-rfb.dts
++++ b/arch/arm/boot/dts/mt7629-rfb.dts
+@@ -18,6 +18,7 @@
+
+ chosen {
+ stdout-path = "serial0:115200n8";
++ bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n8";
+ };
+
+ gpio-keys {
+@@ -36,6 +37,13 @@
+ };
+ };
+
++ gsw: gsw@0 {
++ compatible = "mediatek,mt753x";
++ mediatek,ethsys = <ðsys>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0x40000000 0x10000000>;
+@@ -69,6 +77,7 @@
+ gmac0: mac@0 {
+ compatible = "mediatek,eth-mac";
+ reg = <0>;
++ mtd-mac-address = <&factory 0x2a>;
+ phy-mode = "2500base-x";
+ fixed-link {
+ speed = <2500>;
+@@ -80,6 +89,7 @@
+ gmac1: mac@1 {
+ compatible = "mediatek,eth-mac";
+ reg = <1>;
++ mtd-mac-address = <&factory 0x24>;
+ phy-mode = "gmii";
+ phy-handle = <&phy0>;
+ };
+@@ -93,6 +103,26 @@
+ };
+ };
+ };
++
++&gsw {
++ mediatek,mdio = <&mdio>;
++ mediatek,portmap = "llllw";
++ mediatek,mdio_master_pinmux = <0>;
++ reset-gpios = <&pio 28 0>;
++ interrupt-parent = <&pio>;
++ interrupts = <6 IRQ_TYPE_LEVEL_HIGH>;
++ status = "okay";
++
++ port6: port@6 {
++ compatible = "mediatek,mt753x-port";
++ reg = <6>;
++ phy-mode = "sgmii";
++ fixed-link {
++ speed = <2500>;
++ full-duplex;
++ };
++ };
++};
+
+ &i2c {
+ pinctrl-names = "default";
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0006-dts-fix-bpi2-console.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0006-dts-fix-bpi2-console.patch
new file mode 100644
index 0000000..b725117
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0006-dts-fix-bpi2-console.patch
@@ -0,0 +1,10 @@
+--- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
++++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
+@@ -19,6 +19,7 @@
+
+ chosen {
+ stdout-path = "serial2:115200n8";
++ bootargs = "console=ttyS2,115200n8";
+ };
+
+ cpus {
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0006-dts-fix-bpi64-console.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0006-dts-fix-bpi64-console.patch
new file mode 100644
index 0000000..07a2eae
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0006-dts-fix-bpi64-console.patch
@@ -0,0 +1,11 @@
+--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+@@ -22,7 +22,7 @@
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+- bootargs = "earlycon=uart8250,mmio32,0x11002000 swiotlb=512";
++ bootargs = "earlycon=uart8250,mmio32,0x11002000 console=ttyS0,115200n1 swiotlb=512";
+ };
+
+ cpus {
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0010-dts-mt7629-rfb-fix-firmware-partition.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0010-dts-mt7629-rfb-fix-firmware-partition.patch
new file mode 100644
index 0000000..5d0a19e
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0010-dts-mt7629-rfb-fix-firmware-partition.patch
@@ -0,0 +1,13 @@
+--- a/arch/arm/boot/dts/mt7629-rfb.dts
++++ b/arch/arm/boot/dts/mt7629-rfb.dts
+@@ -163,8 +163,9 @@
+ };
+
+ partition@b0000 {
+- label = "kernel";
++ label = "firmware";
+ reg = <0xb0000 0xb50000>;
++ compatible = "denx,fit";
+ };
+ };
+ };
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0020-dts-mt7622-enable-new-mtk-snand-for-ubi.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0020-dts-mt7622-enable-new-mtk-snand-for-ubi.patch
new file mode 100644
index 0000000..3a9e061
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0020-dts-mt7622-enable-new-mtk-snand-for-ubi.patch
@@ -0,0 +1,23 @@
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -567,6 +567,20 @@
+ status = "disabled";
+ };
+
++ snand: snfi@1100d000 {
++ compatible = "mediatek,mt7622-snand";
++ reg = <0 0x1100d000 0 0x1000>, <0 0x1100e000 0 0x1000>;
++ reg-names = "nfi", "ecc";
++ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
++ clocks = <&pericfg CLK_PERI_NFI_PD>,
++ <&pericfg CLK_PERI_SNFI_PD>,
++ <&pericfg CLK_PERI_NFIECC_PD>;
++ clock-names = "nfi_clk", "pad_clk", "ecc_clk";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++ };
++
+ nor_flash: spi@11014000 {
+ compatible = "mediatek,mt7622-nor",
+ "mediatek,mt8173-nor";
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0021-dts-mt7622-remove-cooling-device.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0021-dts-mt7622-remove-cooling-device.patch
new file mode 100644
index 0000000..efcc14f
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0021-dts-mt7622-remove-cooling-device.patch
@@ -0,0 +1,31 @@
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -167,25 +167,6 @@
+ };
+ };
+
+- cooling-maps {
+- map0 {
+- trip = <&cpu_passive>;
+- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+- <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+- };
+-
+- map1 {
+- trip = <&cpu_active>;
+- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+- <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+- };
+-
+- map2 {
+- trip = <&cpu_hot>;
+- cooling-device = <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>,
+- <&cpu1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
+- };
+- };
+ };
+ };
+
+--
+2.29.2
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0100-hwnat_Kconfig_Makefile.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0100-hwnat_Kconfig_Makefile.patch
new file mode 100755
index 0000000..e0ac7ab
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0100-hwnat_Kconfig_Makefile.patch
@@ -0,0 +1,33 @@
+--- a/net/Kconfig 2020-04-29 17:25:49.750444000 +0800
++++ b/net/Kconfig 2020-04-29 17:42:40.950424000 +0800
+@@ -451,6 +451,18 @@
+ migration of VMs with direct attached VFs by failing over to the
+ paravirtual datapath when the VF is unplugged.
+
++config HW_NAT
++ bool "HW NAT support"
++ default n
++ ---help---
++ This feature provides a fast path to support network lan/wan nat.
++ If you need hw_nat engine to reduce cpu loading, please say Y.
++
++ Note that the answer to this question doesn't directly affect the
++ kernel: saying N will just cause the configurator to skip all
++ the questions about Mediatek Ethernet devices. If you say Y,
++ you will be asked for your specific card in the following questions.
++
+ endif # if NET
+
+ # Used by archs to tell that they support BPF JIT compiler plus which flavour.
+--- a/net/Makefile 2020-04-23 16:36:46.000000000 +0800
++++ b/net/Makefile 2020-04-29 17:42:58.106487000 +0800
+@@ -62,6 +62,9 @@
+ obj-$(CONFIG_6LOWPAN) += 6lowpan/
+ obj-$(CONFIG_IEEE802154) += ieee802154/
+ obj-$(CONFIG_MAC802154) += mac802154/
++ifeq ($(CONFIG_HW_NAT),y)
++obj-y += nat/foe_hook/
++endif
+
+ ifeq ($(CONFIG_NET),y)
+ obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0101-add-mtk-wifi-utility-rbus.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0101-add-mtk-wifi-utility-rbus.patch
new file mode 100644
index 0000000..211324b
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0101-add-mtk-wifi-utility-rbus.patch
@@ -0,0 +1,11 @@
+diff -urN a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
+--- a/drivers/net/wireless/Makefile 2020-05-08 12:16:50.030922777 +0800
++++ b/drivers/net/wireless/Makefile 2020-05-08 12:16:55.718755223 +0800
+@@ -12,6 +12,7 @@
+ obj-$(CONFIG_WLAN_VENDOR_INTERSIL) += intersil/
+ obj-$(CONFIG_WLAN_VENDOR_MARVELL) += marvell/
+ obj-$(CONFIG_WLAN_VENDOR_MEDIATEK) += mediatek/
++obj-y += wifi_utility/
+ obj-$(CONFIG_WLAN_VENDOR_RALINK) += ralink/
+ obj-$(CONFIG_WLAN_VENDOR_REALTEK) += realtek/
+ obj-$(CONFIG_WLAN_VENDOR_RSI) += rsi/
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0111-mt7986-trng-add-rng-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0111-mt7986-trng-add-rng-support.patch
new file mode 100644
index 0000000..1b132a3
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0111-mt7986-trng-add-rng-support.patch
@@ -0,0 +1,46 @@
+From 6d4a858d6f7db2a86f6513a543feb8f7b8a8b4c1 Mon Sep 17 00:00:00 2001
+From: "Mingming.Su" <Mingming.Su@mediatek.com>
+Date: Wed, 30 Jun 2021 16:59:32 +0800
+Subject: [PATCH] mt7986: trng: add rng support
+
+1. Add trng compatible name for MT7986
+2. Fix mtk_rng_wait_ready() function
+
+Signed-off-by: Mingming.Su <Mingming.Su@mediatek.com>
+---
+ drivers/char/hw_random/mtk-rng.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
+index e649be5a5..496adb0a0 100644
+--- a/drivers/char/hw_random/mtk-rng.c
++++ b/drivers/char/hw_random/mtk-rng.c
+@@ -22,7 +22,7 @@
+ #define RNG_AUTOSUSPEND_TIMEOUT 100
+
+ #define USEC_POLL 2
+-#define TIMEOUT_POLL 20
++#define TIMEOUT_POLL 60
+
+ #define RNG_CTRL 0x00
+ #define RNG_EN BIT(0)
+@@ -77,7 +77,7 @@ static bool mtk_rng_wait_ready(struct hwrng *rng, bool wait)
+ readl_poll_timeout_atomic(priv->base + RNG_CTRL, ready,
+ ready & RNG_READY, USEC_POLL,
+ TIMEOUT_POLL);
+- return !!ready;
++ return !!(ready & RNG_READY);
+ }
+
+ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+@@ -181,6 +181,7 @@ static UNIVERSAL_DEV_PM_OPS(mtk_rng_pm_ops, mtk_rng_runtime_suspend,
+ #endif /* CONFIG_PM */
+
+ static const struct of_device_id mtk_rng_match[] = {
++ { .compatible = "mediatek,mt7986-rng" },
+ { .compatible = "mediatek,mt7623-rng" },
+ {},
+ };
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0200-show_model_name_in_cpuinfo_on_arm64.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0200-show_model_name_in_cpuinfo_on_arm64.patch
new file mode 100644
index 0000000..98e5ab6
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0200-show_model_name_in_cpuinfo_on_arm64.patch
@@ -0,0 +1,16 @@
+Index: linux-5.4.70/arch/arm64/kernel/cpuinfo.c
+===================================================================
+--- linux-5.4.70.orig/arch/arm64/kernel/cpuinfo.c
++++ linux-5.4.70/arch/arm64/kernel/cpuinfo.c
+@@ -139,9 +139,8 @@ static int c_show(struct seq_file *m, vo
+ * "processor". Give glibc what it expects.
+ */
+ seq_printf(m, "processor\t: %d\n", i);
+- if (compat)
+- seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
+- MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
++ seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n",
++ MIDR_REVISION(midr), COMPAT_ELF_PLATFORM);
+
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000UL/HZ),
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0226-phy-phy-mtk-tphy-Add-hifsys-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0226-phy-phy-mtk-tphy-Add-hifsys-support.patch
new file mode 100644
index 0000000..f2647e8
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0226-phy-phy-mtk-tphy-Add-hifsys-support.patch
@@ -0,0 +1,66 @@
+From 28f9a5e2a3f5441ab5594669ed82da11e32277a9 Mon Sep 17 00:00:00 2001
+From: Kristian Evensen <kristian.evensen@gmail.com>
+Date: Mon, 30 Apr 2018 14:38:01 +0200
+Subject: [PATCH] phy: phy-mtk-tphy: Add hifsys-support
+
+---
+ drivers/phy/mediatek/phy-mtk-tphy.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/drivers/phy/mediatek/phy-mtk-tphy.c
++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
+@@ -15,6 +15,8 @@
+ #include <linux/of_device.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
++#include <linux/mfd/syscon.h>
++#include <linux/regmap.h>
+
+ /* version V1 sub-banks offset base address */
+ /* banks shared by multiple phys */
+@@ -263,6 +265,9 @@
+ #define RG_CDR_BIRLTD0_GEN3_MSK GENMASK(4, 0)
+ #define RG_CDR_BIRLTD0_GEN3_VAL(x) (0x1f & (x))
+
++#define HIF_SYSCFG1 0x14
++#define HIF_SYSCFG1_PHY2_MASK (0x3 << 20)
++
+ enum mtk_phy_version {
+ MTK_PHY_V1 = 1,
+ MTK_PHY_V2,
+@@ -310,6 +315,7 @@ struct mtk_tphy {
+ struct clk *u3phya_ref; /* reference clock of usb3 anolog phy */
+ const struct mtk_phy_pdata *pdata;
+ struct mtk_phy_instance **phys;
++ struct regmap *hif;
+ int nphys;
+ int src_ref_clk; /* MHZ, reference clock for slew rate calibrate */
+ int src_coef; /* coefficient for slew rate calibrate */
+@@ -629,6 +635,10 @@ static void pcie_phy_instance_init(struc
+ if (tphy->pdata->version != MTK_PHY_V1)
+ return;
+
++ if (tphy->hif)
++ regmap_update_bits(tphy->hif, HIF_SYSCFG1,
++ HIF_SYSCFG1_PHY2_MASK, 0);
++
+ tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG0);
+ tmp &= ~(P3A_RG_XTAL_EXT_PE1H | P3A_RG_XTAL_EXT_PE2H);
+ tmp |= P3A_RG_XTAL_EXT_PE1H_VAL(0x2) | P3A_RG_XTAL_EXT_PE2H_VAL(0x2);
+@@ -1114,6 +1124,16 @@ static int mtk_tphy_probe(struct platfor
+ &tphy->src_ref_clk);
+ device_property_read_u32(dev, "mediatek,src-coef", &tphy->src_coef);
+
++ if (of_find_property(np, "mediatek,phy-switch", NULL)) {
++ tphy->hif = syscon_regmap_lookup_by_phandle(np,
++ "mediatek,phy-switch");
++ if (IS_ERR(tphy->hif)) {
++ dev_err(&pdev->dev,
++ "missing \"mediatek,phy-switch\" phandle\n");
++ return PTR_ERR(tphy->hif);
++ }
++ }
++
+ port = 0;
+ for_each_child_of_node(np, child_np) {
+ struct mtk_phy_instance *instance;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0227-arm-dts-Add-Unielec-U7623-DTS.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0227-arm-dts-Add-Unielec-U7623-DTS.patch
new file mode 100644
index 0000000..3cb1dab
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0227-arm-dts-Add-Unielec-U7623-DTS.patch
@@ -0,0 +1,387 @@
+From 004eb24e939b5b31f828333f37fb5cb2a877d6f2 Mon Sep 17 00:00:00 2001
+From: Kristian Evensen <kristian.evensen@gmail.com>
+Date: Sun, 17 Jun 2018 14:41:47 +0200
+Subject: [PATCH] arm: dts: Add Unielec U7623 DTS
+
+---
+ arch/arm/boot/dts/Makefile | 1 +
+ .../dts/mt7623a-unielec-u7623-02-emmc-512m.dts | 18 +
+ .../boot/dts/mt7623a-unielec-u7623-02-emmc.dtsi | 366 +++++++++++++++++++++
+ 3 files changed, 385 insertions(+)
+ create mode 100644 arch/arm/boot/dts/mt7623a-unielec-u7623-02-emmc-512m.dts
+ create mode 100644 arch/arm/boot/dts/mt7623a-unielec-u7623-02-emmc.dtsi
+
+--- a/arch/arm/boot/dts/Makefile
++++ b/arch/arm/boot/dts/Makefile
+@@ -1272,6 +1272,7 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += \
+ mt7623a-rfb-nand.dtb \
+ mt7623n-rfb-emmc.dtb \
+ mt7623n-bananapi-bpi-r2.dtb \
++ mt7623a-unielec-u7623-02-emmc-512m.dtb \
+ mt7629-rfb.dtb \
+ mt8127-moose.dtb \
+ mt8135-evbp1.dtb
+--- /dev/null
++++ b/arch/arm/boot/dts/mt7623a-unielec-u7623-02-emmc-512m.dts
+@@ -0,0 +1,18 @@
++/*
++ * Copyright 2018 Kristian Evensen <kristian.evensen@gmail.com>
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
++ */
++
++/dts-v1/;
++#include "mt7623a-unielec-u7623-02-emmc.dtsi"
++
++/ {
++ model = "UniElec U7623-02 eMMC (512M RAM)";
++ compatible = "unielec,u7623-02-emmc-512m", "unielec,u7623-02-emmc", "mediatek,mt7623";
++
++ memory@80000000 {
++ device_type = "memory";
++ reg = <0 0x80000000 0 0x20000000>;
++ };
++};
+--- /dev/null
++++ b/arch/arm/boot/dts/mt7623a-unielec-u7623-02-emmc.dtsi
+@@ -0,0 +1,340 @@
++/*
++ * Copyright 2018 Kristian Evensen <kristian.evensen@gmail.com>
++ *
++ * SPDX-License-Identifier: (GPL-2.0+ OR MIT)
++ */
++
++#include <dt-bindings/input/input.h>
++#include "mt7623.dtsi"
++#include "mt6323.dtsi"
++
++/ {
++ compatible = "unielec,u7623-02-emmc", "mediatek,mt7623";
++
++ aliases {
++ serial2 = &uart2;
++ };
++
++ chosen {
++ bootargs = "root=/dev/mmcblk0p2 rootfstype=squashfs,f2fs console=ttyS0,115200 blkdevparts=mmcblk0:3M@6M(recovery),256M@9M(root)";
++ stdout-path = "serial2:115200n8";
++ };
++
++ cpus {
++ cpu@0 {
++ proc-supply = <&mt6323_vproc_reg>;
++ };
++
++ cpu@1 {
++ proc-supply = <&mt6323_vproc_reg>;
++ };
++
++ cpu@2 {
++ proc-supply = <&mt6323_vproc_reg>;
++ };
++
++ cpu@3 {
++ proc-supply = <&mt6323_vproc_reg>;
++ };
++ };
++
++ reg_1p8v: regulator-1p8v {
++ compatible = "regulator-fixed";
++ regulator-name = "fixed-1.8V";
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <1800000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ reg_3p3v: regulator-3p3v {
++ compatible = "regulator-fixed";
++ regulator-name = "fixed-3.3V";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ reg_5v: regulator-5v {
++ compatible = "regulator-fixed";
++ regulator-name = "fixed-5V";
++ regulator-min-microvolt = <5000000>;
++ regulator-max-microvolt = <5000000>;
++ regulator-boot-on;
++ regulator-always-on;
++ };
++
++ gpio-keys {
++ compatible = "gpio-keys";
++ pinctrl-names = "default";
++ pinctrl-0 = <&key_pins_a>;
++
++ factory {
++ label = "factory";
++ linux,code = <KEY_RESTART>;
++ gpios = <&pio 256 GPIO_ACTIVE_LOW>;
++ };
++ };
++
++ leds {
++ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&led_pins_unielec>;
++
++ led3 {
++ label = "u7623-01:green:led3";
++ gpios = <&pio 14 GPIO_ACTIVE_LOW>;
++ };
++
++ led4 {
++ label = "u7623-01:green:led4";
++ gpios = <&pio 15 GPIO_ACTIVE_LOW>;
++ };
++ };
++};
++
++&crypto {
++ status = "okay";
++};
++
++ð {
++ status = "okay";
++
++ gmac0: mac@0 {
++ compatible = "mediatek,eth-mac";
++ reg = <0>;
++ phy-mode = "trgmii";
++
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ pause;
++ };
++ };
++
++ mdio: mdio-bus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ mt7530: switch@0 {
++ compatible = "mediatek,mt7530";
++ };
++ };
++};
++
++&mt7530 {
++ compatible = "mediatek,mt7530";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0>;
++ pinctrl-names = "default";
++ mediatek,mcm;
++ resets = <ðsys 2>;
++ reset-names = "mcm";
++ core-supply = <&mt6323_vpa_reg>;
++ io-supply = <&mt6323_vemc3v3_reg>;
++
++ dsa,mii-bus = <&mdio>;
++
++ ports {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0>;
++
++ port@0 {
++ reg = <0>;
++ label = "lan0";
++ cpu = <&cpu_port0>;
++ };
++
++ port@1 {
++ reg = <1>;
++ label = "lan1";
++ cpu = <&cpu_port0>;
++ };
++
++ port@2 {
++ reg = <2>;
++ label = "lan2";
++ cpu = <&cpu_port0>;
++ };
++
++ port@3 {
++ reg = <3>;
++ label = "lan3";
++ cpu = <&cpu_port0>;
++ };
++
++ port@4 {
++ reg = <4>;
++ label = "wan";
++ cpu = <&cpu_port0>;
++ };
++
++ cpu_port0: port@6 {
++ reg = <6>;
++ label = "cpu";
++ ethernet = <&gmac0>;
++ phy-mode = "trgmii";
++
++ fixed-link {
++ speed = <1000>;
++ full-duplex;
++ };
++ };
++ };
++};
++
++&mmc0 {
++ pinctrl-names = "default", "state_uhs";
++ pinctrl-0 = <&mmc0_pins_default>;
++ pinctrl-1 = <&mmc0_pins_uhs>;
++ status = "okay";
++ bus-width = <8>;
++ max-frequency = <50000000>;
++ cap-mmc-highspeed;
++ vmmc-supply = <®_3p3v>;
++ vqmmc-supply = <®_1p8v>;
++ non-removable;
++};
++
++&pio {
++ key_pins_a: keys-alt {
++ pins-keys {
++ pinmux = <MT7623_PIN_256_GPIO256_FUNC_GPIO256>,
++ <MT7623_PIN_257_GPIO257_FUNC_GPIO257>;
++ input-enable;
++ };
++ };
++
++ led_pins_unielec: leds-unielec {
++ pins-leds {
++ pinmux = <MT7623_PIN_14_GPIO14_FUNC_GPIO14>,
++ <MT7623_PIN_15_GPIO15_FUNC_GPIO15>;
++ };
++ };
++
++ mmc0_pins_default: mmc0default {
++ pins_cmd_dat {
++ pinmux = <MT7623_PIN_111_MSDC0_DAT7_FUNC_MSDC0_DAT7>,
++ <MT7623_PIN_112_MSDC0_DAT6_FUNC_MSDC0_DAT6>,
++ <MT7623_PIN_113_MSDC0_DAT5_FUNC_MSDC0_DAT5>,
++ <MT7623_PIN_114_MSDC0_DAT4_FUNC_MSDC0_DAT4>,
++ <MT7623_PIN_118_MSDC0_DAT3_FUNC_MSDC0_DAT3>,
++ <MT7623_PIN_119_MSDC0_DAT2_FUNC_MSDC0_DAT2>,
++ <MT7623_PIN_120_MSDC0_DAT1_FUNC_MSDC0_DAT1>,
++ <MT7623_PIN_121_MSDC0_DAT0_FUNC_MSDC0_DAT0>,
++ <MT7623_PIN_116_MSDC0_CMD_FUNC_MSDC0_CMD>;
++ input-enable;
++ bias-pull-up;
++ };
++
++ pins_clk {
++ pinmux = <MT7623_PIN_117_MSDC0_CLK_FUNC_MSDC0_CLK>;
++ bias-pull-down;
++ };
++
++ pins_rst {
++ pinmux = <MT7623_PIN_115_MSDC0_RSTB_FUNC_MSDC0_RSTB>;
++ bias-pull-up;
++ };
++ };
++
++ mmc0_pins_uhs: mmc0 {
++ pins_cmd_dat {
++ pinmux = <MT7623_PIN_111_MSDC0_DAT7_FUNC_MSDC0_DAT7>,
++ <MT7623_PIN_112_MSDC0_DAT6_FUNC_MSDC0_DAT6>,
++ <MT7623_PIN_113_MSDC0_DAT5_FUNC_MSDC0_DAT5>,
++ <MT7623_PIN_114_MSDC0_DAT4_FUNC_MSDC0_DAT4>,
++ <MT7623_PIN_118_MSDC0_DAT3_FUNC_MSDC0_DAT3>,
++ <MT7623_PIN_119_MSDC0_DAT2_FUNC_MSDC0_DAT2>,
++ <MT7623_PIN_120_MSDC0_DAT1_FUNC_MSDC0_DAT1>,
++ <MT7623_PIN_121_MSDC0_DAT0_FUNC_MSDC0_DAT0>,
++ <MT7623_PIN_116_MSDC0_CMD_FUNC_MSDC0_CMD>;
++ input-enable;
++ drive-strength = <MTK_DRIVE_2mA>;
++ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
++ };
++
++ pins_clk {
++ pinmux = <MT7623_PIN_117_MSDC0_CLK_FUNC_MSDC0_CLK>;
++ drive-strength = <MTK_DRIVE_2mA>;
++ bias-pull-down = <MTK_PUPD_SET_R1R0_01>;
++ };
++
++ pins_rst {
++ pinmux = <MT7623_PIN_115_MSDC0_RSTB_FUNC_MSDC0_RSTB>;
++ bias-pull-up;
++ };
++ };
++
++ pcie_default: pcie_pin_default {
++ pins_cmd_dat {
++ pinmux = <MT7623_PIN_208_AUD_EXT_CK1_FUNC_PCIE0_PERST_N>,
++ <MT7623_PIN_209_AUD_EXT_CK2_FUNC_PCIE1_PERST_N>;
++ bias-disable;
++ };
++ };
++};
++
++&pwm {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pwm_pins_a>;
++ status = "okay";
++};
++
++&pwrap {
++ mt6323 {
++ mt6323led: led {
++ compatible = "mediatek,mt6323-led";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ led@0 {
++ reg = <0>;
++ label = "led0";
++ };
++ };
++ };
++};
++
++&uart2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&uart2_pins_b>;
++ status = "okay";
++};
++
++&usb1 {
++ vusb33-supply = <®_3p3v>;
++ vbus-supply = <®_3p3v>;
++ status = "okay";
++};
++
++&u3phy1 {
++ status = "okay";
++};
++
++&u3phy2 {
++ status = "okay";
++ mediatek,phy-switch = <&hifsys>;
++};
++
++&pcie {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pcie_default>;
++ status = "okay";
++
++ pcie@1,0 {
++ status = "okay";
++ };
++
++ pcie@2,0 {
++ status = "okay";
++ };
++};
++
++&pcie1_phy {
++ status = "okay";
++};
++
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0301-mtd-mtk-ecc-move-mtk-ecc-header-file-to-include-mtd.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0301-mtd-mtk-ecc-move-mtk-ecc-header-file-to-include-mtd.patch
new file mode 100644
index 0000000..d9ab339
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0301-mtd-mtk-ecc-move-mtk-ecc-header-file-to-include-mtd.patch
@@ -0,0 +1,139 @@
+From a2479dc254ebe31c84fbcfda73f35e2321576494 Mon Sep 17 00:00:00 2001
+From: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
+Date: Tue, 19 Mar 2019 13:57:38 +0800
+Subject: [PATCH 1/6] mtd: mtk ecc: move mtk ecc header file to include/mtd
+
+Change-Id: I8dc1d30e21b40d68ef5efd9587012f82970156a5
+Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
+---
+ drivers/mtd/nand/raw/mtk_ecc.c | 3 +--
+ drivers/mtd/nand/raw/mtk_nand.c | 2 +-
+ {drivers/mtd/nand/raw => include/linux/mtd}/mtk_ecc.h | 0
+ 3 files changed, 2 insertions(+), 3 deletions(-)
+ rename {drivers/mtd/nand/raw => include/linux/mtd}/mtk_ecc.h (100%)
+
+--- a/drivers/mtd/nand/raw/mtk_ecc.c
++++ b/drivers/mtd/nand/raw/mtk_ecc.c
+@@ -15,8 +15,7 @@
+ #include <linux/of.h>
+ #include <linux/of_platform.h>
+ #include <linux/mutex.h>
+-
+-#include "mtk_ecc.h"
++#include <linux/mtd/mtk_ecc.h>
+
+ #define ECC_IDLE_MASK BIT(0)
+ #define ECC_IRQ_EN BIT(0)
+--- a/drivers/mtd/nand/raw/mtk_nand.c
++++ b/drivers/mtd/nand/raw/mtk_nand.c
+@@ -17,7 +17,7 @@
+ #include <linux/iopoll.h>
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+-#include "mtk_ecc.h"
++#include <linux/mtd/mtk_ecc.h>
+
+ /* NAND controller register definition */
+ #define NFI_CNFG (0x00)
+--- /dev/null
++++ b/include/linux/mtd/mtk_ecc.h
+@@ -0,0 +1,49 @@
++/*
++ * MTK SDG1 ECC controller
++ *
++ * Copyright (c) 2016 Mediatek
++ * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
++ * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation.
++ */
++
++#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
++#define __DRIVERS_MTD_NAND_MTK_ECC_H__
++
++#include <linux/types.h>
++
++enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
++enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
++
++struct device_node;
++struct mtk_ecc;
++
++struct mtk_ecc_stats {
++ u32 corrected;
++ u32 bitflips;
++ u32 failed;
++};
++
++struct mtk_ecc_config {
++ enum mtk_ecc_operation op;
++ enum mtk_ecc_mode mode;
++ dma_addr_t addr;
++ u32 strength;
++ u32 sectors;
++ u32 len;
++};
++
++int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
++void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
++int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
++int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
++void mtk_ecc_disable(struct mtk_ecc *);
++void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
++unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
++
++struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
++void mtk_ecc_release(struct mtk_ecc *);
++
++#endif
+--- a/drivers/mtd/nand/raw/mtk_ecc.h
++++ /dev/null
+@@ -1,47 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+-/*
+- * MTK SDG1 ECC controller
+- *
+- * Copyright (c) 2016 Mediatek
+- * Authors: Xiaolei Li <xiaolei.li@mediatek.com>
+- * Jorge Ramirez-Ortiz <jorge.ramirez-ortiz@linaro.org>
+- */
+-
+-#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
+-#define __DRIVERS_MTD_NAND_MTK_ECC_H__
+-
+-#include <linux/types.h>
+-
+-enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
+-enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+-
+-struct device_node;
+-struct mtk_ecc;
+-
+-struct mtk_ecc_stats {
+- u32 corrected;
+- u32 bitflips;
+- u32 failed;
+-};
+-
+-struct mtk_ecc_config {
+- enum mtk_ecc_operation op;
+- enum mtk_ecc_mode mode;
+- dma_addr_t addr;
+- u32 strength;
+- u32 sectors;
+- u32 len;
+-};
+-
+-int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
+-void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
+-int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
+-int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
+-void mtk_ecc_disable(struct mtk_ecc *);
+-void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p);
+-unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc);
+-
+-struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
+-void mtk_ecc_release(struct mtk_ecc *);
+-
+-#endif
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0306-spi-spi-mem-MediaTek-Add-SPI-NAND-Flash-interface-dr.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0306-spi-spi-mem-MediaTek-Add-SPI-NAND-Flash-interface-dr.patch
new file mode 100644
index 0000000..b3672e5
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0306-spi-spi-mem-MediaTek-Add-SPI-NAND-Flash-interface-dr.patch
@@ -0,0 +1,1246 @@
+From 1ecb38eabd90efe93957d0a822a167560c39308a Mon Sep 17 00:00:00 2001
+From: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
+Date: Wed, 20 Mar 2019 16:19:51 +0800
+Subject: [PATCH 6/6] spi: spi-mem: MediaTek: Add SPI NAND Flash interface
+ driver for MediaTek MT7622
+
+Change-Id: I3e78406bb9b46b0049d3988a5c71c7069e4f809c
+Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
+---
+ drivers/spi/Kconfig | 9 +
+ drivers/spi/Makefile | 1 +
+ drivers/spi/spi-mtk-snfi.c | 1183 ++++++++++++++++++++++++++++++++++++
+ 3 files changed, 1193 insertions(+)
+ create mode 100644 drivers/spi/spi-mtk-snfi.c
+
+--- a/drivers/spi/Makefile
++++ b/drivers/spi/Makefile
+@@ -60,6 +60,7 @@ obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mp
+ obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
+ obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
+ obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
++obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o
+ obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
+ obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o
+ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -427,6 +427,15 @@ config SPI_MT65XX
+ say Y or M here.If you are not sure, say N.
+ SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
+
++config SPI_MTK_SNFI
++ tristate "MediaTek SPI NAND interface"
++ select MTD_SPI_NAND
++ help
++ This selects the SPI NAND FLASH interface(SNFI),
++ which could be found on MediaTek Soc.
++ Say Y or M here.If you are not sure, say N.
++ Note Parallel Nand and SPI NAND is alternative on MediaTek SoCs.
++
+ config SPI_MT7621
+ tristate "MediaTek MT7621 SPI Controller"
+ depends on RALINK || COMPILE_TEST
+--- /dev/null
++++ b/drivers/spi/spi-mtk-snfi.c
+@@ -0,0 +1,1200 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Driver for MediaTek SPI Nand interface
++ *
++ * Copyright (C) 2018 MediaTek Inc.
++ * Authors: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
++ *
++ */
++
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/interrupt.h>
++#include <linux/iopoll.h>
++#include <linux/mtd/mtd.h>
++#include <linux/mtd/mtk_ecc.h>
++#include <linux/mtd/spinand.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/spi/spi.h>
++#include <linux/spi/spi-mem.h>
++
++/* NAND controller register definition */
++/* NFI control */
++#define NFI_CNFG 0x00
++#define CNFG_DMA BIT(0)
++#define CNFG_READ_EN BIT(1)
++#define CNFG_DMA_BURST_EN BIT(2)
++#define CNFG_BYTE_RW BIT(6)
++#define CNFG_HW_ECC_EN BIT(8)
++#define CNFG_AUTO_FMT_EN BIT(9)
++#define CNFG_OP_PROGRAM (3UL << 12)
++#define CNFG_OP_CUST (6UL << 12)
++#define NFI_PAGEFMT 0x04
++#define PAGEFMT_512 0
++#define PAGEFMT_2K 1
++#define PAGEFMT_4K 2
++#define PAGEFMT_FDM_SHIFT 8
++#define PAGEFMT_FDM_ECC_SHIFT 12
++#define NFI_CON 0x08
++#define CON_FIFO_FLUSH BIT(0)
++#define CON_NFI_RST BIT(1)
++#define CON_BRD BIT(8)
++#define CON_BWR BIT(9)
++#define CON_SEC_SHIFT 12
++#define NFI_INTR_EN 0x10
++#define INTR_AHB_DONE_EN BIT(6)
++#define NFI_INTR_STA 0x14
++#define NFI_CMD 0x20
++#define NFI_STA 0x60
++#define STA_EMP_PAGE BIT(12)
++#define NAND_FSM_MASK (0x1f << 24)
++#define NFI_FSM_MASK (0xf << 16)
++#define NFI_ADDRCNTR 0x70
++#define CNTR_MASK GENMASK(16, 12)
++#define ADDRCNTR_SEC_SHIFT 12
++#define ADDRCNTR_SEC(val) \
++ (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
++#define NFI_STRADDR 0x80
++#define NFI_BYTELEN 0x84
++#define NFI_CSEL 0x90
++#define NFI_FDML(x) (0xa0 + (x) * sizeof(u32) * 2)
++#define NFI_FDMM(x) (0xa4 + (x) * sizeof(u32) * 2)
++#define NFI_MASTER_STA 0x224
++#define MASTER_STA_MASK 0x0fff
++/* NFI_SPI control */
++#define SNFI_MAC_OUTL 0x504
++#define SNFI_MAC_INL 0x508
++#define SNFI_RD_CTL2 0x510
++#define RD_CMD_MASK 0x00ff
++#define RD_DUMMY_SHIFT 8
++#define SNFI_RD_CTL3 0x514
++#define RD_ADDR_MASK 0xffff
++#define SNFI_MISC_CTL 0x538
++#define RD_MODE_X2 BIT(16)
++#define RD_MODE_X4 (2UL << 16)
++#define RD_QDUAL_IO (4UL << 16)
++#define RD_MODE_MASK (7UL << 16)
++#define RD_CUSTOM_EN BIT(6)
++#define WR_CUSTOM_EN BIT(7)
++#define WR_X4_EN BIT(20)
++#define SW_RST BIT(28)
++#define SNFI_MISC_CTL2 0x53c
++#define WR_LEN_SHIFT 16
++#define SNFI_PG_CTL1 0x524
++#define WR_LOAD_CMD_SHIFT 8
++#define SNFI_PG_CTL2 0x528
++#define WR_LOAD_ADDR_MASK 0xffff
++#define SNFI_MAC_CTL 0x500
++#define MAC_WIP BIT(0)
++#define MAC_WIP_READY BIT(1)
++#define MAC_TRIG BIT(2)
++#define MAC_EN BIT(3)
++#define MAC_SIO_SEL BIT(4)
++#define SNFI_STA_CTL1 0x550
++#define SPI_STATE_IDLE 0xf
++#define SNFI_CNFG 0x55c
++#define SNFI_MODE_EN BIT(0)
++#define SNFI_GPRAM_DATA 0x800
++#define SNFI_GPRAM_MAX_LEN 16
++
++/* Dummy command trigger NFI to spi mode */
++#define NAND_CMD_DUMMYREAD 0x00
++#define NAND_CMD_DUMMYPROG 0x80
++
++#define MTK_TIMEOUT 500000
++#define MTK_RESET_TIMEOUT 1000000
++#define MTK_SNFC_MIN_SPARE 16
++#define KB(x) ((x) * 1024UL)
++
++/*
++ * supported spare size of each IP.
++ * order should be the same with the spare size bitfiled defination of
++ * register NFI_PAGEFMT.
++ */
++static const u8 spare_size_mt7622[] = {
++ 16, 26, 27, 28
++};
++
++struct mtk_snfi_caps {
++ const u8 *spare_size;
++ u8 num_spare_size;
++ u32 nand_sec_size;
++ u8 nand_fdm_size;
++ u8 nand_fdm_ecc_size;
++ u8 ecc_parity_bits;
++ u8 pageformat_spare_shift;
++ u8 bad_mark_swap;
++};
++
++struct mtk_snfi_bad_mark_ctl {
++ void (*bm_swap)(struct spi_mem *mem, u8 *buf, int raw);
++ u32 sec;
++ u32 pos;
++};
++
++struct mtk_snfi_nand_chip {
++ struct mtk_snfi_bad_mark_ctl bad_mark;
++ u32 spare_per_sector;
++};
++
++struct mtk_snfi_clk {
++ struct clk *nfi_clk;
++ struct clk *spi_clk;
++};
++
++struct mtk_snfi {
++ const struct mtk_snfi_caps *caps;
++ struct mtk_snfi_nand_chip snfi_nand;
++ struct mtk_snfi_clk clk;
++ struct mtk_ecc_config ecc_cfg;
++ struct mtk_ecc *ecc;
++ struct completion done;
++ struct device *dev;
++
++ void __iomem *regs;
++
++ u8 *buffer;
++};
++
++static inline u8 *oob_ptr(struct spi_mem *mem, int i)
++{
++ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
++ u8 *poi;
++
++ /* map the sector's FDM data to free oob:
++ * the beginning of the oob area stores the FDM data of bad mark
++ */
++
++ if (i < snfi_nand->bad_mark.sec)
++ poi = spinand->oobbuf + (i + 1) * snfi->caps->nand_fdm_size;
++ else if (i == snfi_nand->bad_mark.sec)
++ poi = spinand->oobbuf;
++ else
++ poi = spinand->oobbuf + i * snfi->caps->nand_fdm_size;
++
++ return poi;
++}
++
++static inline int mtk_data_len(struct spi_mem *mem)
++{
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
++
++ return snfi->caps->nand_sec_size + snfi_nand->spare_per_sector;
++}
++
++static inline u8 *mtk_oob_ptr(struct spi_mem *mem,
++ const u8 *p, int i)
++{
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++
++ return (u8 *)p + i * mtk_data_len(mem) + snfi->caps->nand_sec_size;
++}
++
++static void mtk_snfi_bad_mark_swap(struct spi_mem *mem,
++ u8 *buf, int raw)
++{
++ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
++ u32 bad_pos = snfi_nand->bad_mark.pos;
++
++ if (raw)
++ bad_pos += snfi_nand->bad_mark.sec * mtk_data_len(mem);
++ else
++ bad_pos += snfi_nand->bad_mark.sec * snfi->caps->nand_sec_size;
++
++ swap(spinand->oobbuf[0], buf[bad_pos]);
++}
++
++static void mtk_snfi_set_bad_mark_ctl(struct mtk_snfi_bad_mark_ctl *bm_ctl,
++ struct spi_mem *mem)
++{
++ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
++ struct mtd_info *mtd = spinand_to_mtd(spinand);
++
++ bm_ctl->bm_swap = mtk_snfi_bad_mark_swap;
++ bm_ctl->sec = mtd->writesize / mtk_data_len(mem);
++ bm_ctl->pos = mtd->writesize % mtk_data_len(mem);
++}
++
++static void mtk_snfi_mac_enable(struct mtk_snfi *snfi)
++{
++ u32 mac;
++
++ mac = readl(snfi->regs + SNFI_MAC_CTL);
++ mac &= ~MAC_SIO_SEL;
++ mac |= MAC_EN;
++
++ writel(mac, snfi->regs + SNFI_MAC_CTL);
++}
++
++static int mtk_snfi_mac_trigger(struct mtk_snfi *snfi)
++{
++ u32 mac, reg;
++ int ret = 0;
++
++ mac = readl(snfi->regs + SNFI_MAC_CTL);
++ mac |= MAC_TRIG;
++ writel(mac, snfi->regs + SNFI_MAC_CTL);
++
++ ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL, reg,
++ reg & MAC_WIP_READY, 10,
++ MTK_TIMEOUT);
++ if (ret < 0) {
++ dev_err(snfi->dev, "polling wip ready for read timeout\n");
++ return -EIO;
++ }
++
++ ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL, reg,
++ !(reg & MAC_WIP), 10,
++ MTK_TIMEOUT);
++ if (ret < 0) {
++ dev_err(snfi->dev, "polling flash update timeout\n");
++ return -EIO;
++ }
++
++ return ret;
++}
++
++static void mtk_snfi_mac_leave(struct mtk_snfi *snfi)
++{
++ u32 mac;
++
++ mac = readl(snfi->regs + SNFI_MAC_CTL);
++ mac &= ~(MAC_TRIG | MAC_EN | MAC_SIO_SEL);
++ writel(mac, snfi->regs + SNFI_MAC_CTL);
++}
++
++static int mtk_snfi_mac_op(struct mtk_snfi *snfi)
++{
++ int ret = 0;
++
++ mtk_snfi_mac_enable(snfi);
++
++ ret = mtk_snfi_mac_trigger(snfi);
++ if (ret)
++ return ret;
++
++ mtk_snfi_mac_leave(snfi);
++
++ return ret;
++}
++
++static irqreturn_t mtk_snfi_irq(int irq, void *id)
++{
++ struct mtk_snfi *snfi = id;
++ u16 sta, ien;
++
++ sta = readw(snfi->regs + NFI_INTR_STA);
++ ien = readw(snfi->regs + NFI_INTR_EN);
++
++ if (!(sta & ien))
++ return IRQ_NONE;
++
++ writew(~sta & ien, snfi->regs + NFI_INTR_EN);
++ complete(&snfi->done);
++
++ return IRQ_HANDLED;
++}
++
++static int mtk_snfi_enable_clk(struct device *dev, struct mtk_snfi_clk *clk)
++{
++ int ret;
++
++ ret = clk_prepare_enable(clk->nfi_clk);
++ if (ret) {
++ dev_err(dev, "failed to enable nfi clk\n");
++ return ret;
++ }
++
++ ret = clk_prepare_enable(clk->spi_clk);
++ if (ret) {
++ dev_err(dev, "failed to enable spi clk\n");
++ clk_disable_unprepare(clk->nfi_clk);
++ return ret;
++ }
++
++ return 0;
++}
++
++static void mtk_snfi_disable_clk(struct mtk_snfi_clk *clk)
++{
++ clk_disable_unprepare(clk->nfi_clk);
++ clk_disable_unprepare(clk->spi_clk);
++}
++
++static int mtk_snfi_reset(struct mtk_snfi *snfi)
++{
++ u32 val;
++ int ret;
++
++ /* SW reset controller */
++ val = readl(snfi->regs + SNFI_MISC_CTL) | SW_RST;
++ writel(val, snfi->regs + SNFI_MISC_CTL);
++
++ ret = readw_poll_timeout(snfi->regs + SNFI_STA_CTL1, val,
++ !(val & SPI_STATE_IDLE), 50,
++ MTK_RESET_TIMEOUT);
++ if (ret) {
++ dev_warn(snfi->dev, "spi state active in reset [0x%x] = 0x%x\n",
++ SNFI_STA_CTL1, val);
++ return ret;
++ }
++
++ val = readl(snfi->regs + SNFI_MISC_CTL);
++ val &= ~SW_RST;
++ writel(val, snfi->regs + SNFI_MISC_CTL);
++
++ /* reset all registers and force the NFI master to terminate */
++ writew(CON_FIFO_FLUSH | CON_NFI_RST, snfi->regs + NFI_CON);
++ ret = readw_poll_timeout(snfi->regs + NFI_STA, val,
++ !(val & (NFI_FSM_MASK | NAND_FSM_MASK)), 50,
++ MTK_RESET_TIMEOUT);
++ if (ret) {
++ dev_warn(snfi->dev, "nfi active in reset [0x%x] = 0x%x\n",
++ NFI_STA, val);
++ return ret;
++ }
++
++ return 0;
++}
++
++static int mtk_snfi_set_spare_per_sector(struct spinand_device *spinand,
++ const struct mtk_snfi_caps *caps,
++ u32 *sps)
++{
++ struct mtd_info *mtd = spinand_to_mtd(spinand);
++ const u8 *spare = caps->spare_size;
++ u32 sectors, i, closest_spare = 0;
++
++ sectors = mtd->writesize / caps->nand_sec_size;
++ *sps = mtd->oobsize / sectors;
++
++ if (*sps < MTK_SNFC_MIN_SPARE)
++ return -EINVAL;
++
++ for (i = 0; i < caps->num_spare_size; i++) {
++ if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
++ closest_spare = i;
++ if (*sps == spare[i])
++ break;
++ }
++ }
++
++ *sps = spare[closest_spare];
++
++ return 0;
++}
++
++static void mtk_snfi_read_fdm_data(struct spi_mem *mem,
++ u32 sectors)
++{
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ const struct mtk_snfi_caps *caps = snfi->caps;
++ u32 vall, valm;
++ int i, j;
++ u8 *oobptr;
++
++ for (i = 0; i < sectors; i++) {
++ oobptr = oob_ptr(mem, i);
++ vall = readl(snfi->regs + NFI_FDML(i));
++ valm = readl(snfi->regs + NFI_FDMM(i));
++
++ for (j = 0; j < caps->nand_fdm_size; j++)
++ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
++ }
++}
++
++static void mtk_snfi_write_fdm_data(struct spi_mem *mem,
++ u32 sectors)
++{
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ const struct mtk_snfi_caps *caps = snfi->caps;
++ u32 vall, valm;
++ int i, j;
++ u8 *oobptr;
++
++ for (i = 0; i < sectors; i++) {
++ oobptr = oob_ptr(mem, i);
++ vall = 0;
++ valm = 0;
++ for (j = 0; j < 8; j++) {
++ if (j < 4)
++ vall |= (j < caps->nand_fdm_size ? oobptr[j] :
++ 0xff) << (j * 8);
++ else
++ valm |= (j < caps->nand_fdm_size ? oobptr[j] :
++ 0xff) << ((j - 4) * 8);
++ }
++ writel(vall, snfi->regs + NFI_FDML(i));
++ writel(valm, snfi->regs + NFI_FDMM(i));
++ }
++}
++
++static int mtk_snfi_update_ecc_stats(struct spi_mem *mem,
++ u8 *buf, u32 sectors)
++{
++ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
++ struct mtd_info *mtd = spinand_to_mtd(spinand);
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ struct mtk_ecc_stats stats;
++ int rc, i;
++
++ rc = readl(snfi->regs + NFI_STA) & STA_EMP_PAGE;
++ if (rc) {
++ memset(buf, 0xff, sectors * snfi->caps->nand_sec_size);
++ for (i = 0; i < sectors; i++)
++ memset(spinand->oobbuf, 0xff,
++ snfi->caps->nand_fdm_size);
++ return 0;
++ }
++
++ mtk_ecc_get_stats(snfi->ecc, &stats, sectors);
++ mtd->ecc_stats.corrected += stats.corrected;
++ mtd->ecc_stats.failed += stats.failed;
++
++ return 0;
++}
++
++static int mtk_snfi_hw_runtime_config(struct spi_mem *mem)
++{
++ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
++ struct mtd_info *mtd = spinand_to_mtd(spinand);
++ struct nand_device *nand = mtd_to_nanddev(mtd);
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ const struct mtk_snfi_caps *caps = snfi->caps;
++ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
++ u32 fmt, spare, i = 0;
++ int ret;
++
++ ret = mtk_snfi_set_spare_per_sector(spinand, caps, &spare);
++ if (ret)
++ return ret;
++
++ /* calculate usable oob bytes for ecc parity data */
++ snfi_nand->spare_per_sector = spare;
++ spare -= caps->nand_fdm_size;
++
++ nand->memorg.oobsize = snfi_nand->spare_per_sector
++ * (mtd->writesize / caps->nand_sec_size);
++ mtd->oobsize = nanddev_per_page_oobsize(nand);
++
++ snfi->ecc_cfg.strength = (spare << 3) / caps->ecc_parity_bits;
++ mtk_ecc_adjust_strength(snfi->ecc, &snfi->ecc_cfg.strength);
++
++ switch (mtd->writesize) {
++ case 512:
++ fmt = PAGEFMT_512;
++ break;
++ case KB(2):
++ fmt = PAGEFMT_2K;
++ break;
++ case KB(4):
++ fmt = PAGEFMT_4K;
++ break;
++ default:
++ dev_err(snfi->dev, "invalid page len: %d\n", mtd->writesize);
++ return -EINVAL;
++ }
++
++ /* Setup PageFormat */
++ while (caps->spare_size[i] != snfi_nand->spare_per_sector) {
++ i++;
++ if (i == (caps->num_spare_size - 1)) {
++ dev_err(snfi->dev, "invalid spare size %d\n",
++ snfi_nand->spare_per_sector);
++ return -EINVAL;
++ }
++ }
++
++ fmt |= i << caps->pageformat_spare_shift;
++ fmt |= caps->nand_fdm_size << PAGEFMT_FDM_SHIFT;
++ fmt |= caps->nand_fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
++ writel(fmt, snfi->regs + NFI_PAGEFMT);
++
++ snfi->ecc_cfg.len = caps->nand_sec_size + caps->nand_fdm_ecc_size;
++
++ mtk_snfi_set_bad_mark_ctl(&snfi_nand->bad_mark, mem);
++
++ return 0;
++}
++
++static int mtk_snfi_read_from_cache(struct spi_mem *mem,
++ const struct spi_mem_op *op, int oob_on)
++{
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
++ struct mtd_info *mtd = spinand_to_mtd(spinand);
++ u32 sectors = mtd->writesize / snfi->caps->nand_sec_size;
++ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
++ u32 reg, len, col_addr = 0;
++ int dummy_cycle, ret;
++ dma_addr_t dma_addr;
++
++ len = sectors * (snfi->caps->nand_sec_size
++ + snfi_nand->spare_per_sector);
++
++ dma_addr = dma_map_single(snfi->dev, snfi->buffer,
++ len, DMA_FROM_DEVICE);
++ ret = dma_mapping_error(snfi->dev, dma_addr);
++ if (ret) {
++ dev_err(snfi->dev, "dma mapping error\n");
++ return -EINVAL;
++ }
++
++ /* set Read cache command and dummy cycle */
++ dummy_cycle = (op->dummy.nbytes << 3) >> (ffs(op->dummy.buswidth) - 1);
++ reg = ((op->cmd.opcode & RD_CMD_MASK) |
++ (dummy_cycle << RD_DUMMY_SHIFT));
++ writel(reg, snfi->regs + SNFI_RD_CTL2);
++
++ writel((col_addr & RD_ADDR_MASK), snfi->regs + SNFI_RD_CTL3);
++
++ reg = readl(snfi->regs + SNFI_MISC_CTL);
++ reg |= RD_CUSTOM_EN;
++ reg &= ~(RD_MODE_MASK | WR_X4_EN);
++
++ /* set data and addr buswidth */
++ if (op->data.buswidth == 4)
++ reg |= RD_MODE_X4;
++ else if (op->data.buswidth == 2)
++ reg |= RD_MODE_X2;
++
++ if (op->addr.buswidth == 4 || op->addr.buswidth == 2)
++ reg |= RD_QDUAL_IO;
++ writel(reg, snfi->regs + SNFI_MISC_CTL);
++
++ writel(len, snfi->regs + SNFI_MISC_CTL2);
++ writew(sectors << CON_SEC_SHIFT, snfi->regs + NFI_CON);
++ reg = readw(snfi->regs + NFI_CNFG);
++ reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_DMA | CNFG_OP_CUST;
++
++ if (!oob_on) {
++ reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
++ writew(reg, snfi->regs + NFI_CNFG);
++
++ snfi->ecc_cfg.mode = ECC_NFI_MODE;
++ snfi->ecc_cfg.sectors = sectors;
++ snfi->ecc_cfg.op = ECC_DECODE;
++ ret = mtk_ecc_enable(snfi->ecc, &snfi->ecc_cfg);
++ if (ret) {
++ dev_err(snfi->dev, "ecc enable failed\n");
++ /* clear NFI_CNFG */
++ reg &= ~(CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_DMA |
++ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
++ writew(reg, snfi->regs + NFI_CNFG);
++ goto out;
++ }
++ } else {
++ writew(reg, snfi->regs + NFI_CNFG);
++ }
++
++ writel(lower_32_bits(dma_addr), snfi->regs + NFI_STRADDR);
++ readw(snfi->regs + NFI_INTR_STA);
++ writew(INTR_AHB_DONE_EN, snfi->regs + NFI_INTR_EN);
++
++ init_completion(&snfi->done);
++
++ /* set dummy command to trigger NFI enter SPI mode */
++ writew(NAND_CMD_DUMMYREAD, snfi->regs + NFI_CMD);
++ reg = readl(snfi->regs + NFI_CON) | CON_BRD;
++ writew(reg, snfi->regs + NFI_CON);
++
++ ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
++ if (!ret) {
++ dev_err(snfi->dev, "read ahb done timeout\n");
++ writew(0, snfi->regs + NFI_INTR_EN);
++ ret = -ETIMEDOUT;
++ goto out;
++ }
++
++ ret = readl_poll_timeout_atomic(snfi->regs + NFI_BYTELEN, reg,
++ ADDRCNTR_SEC(reg) >= sectors, 10,
++ MTK_TIMEOUT);
++ if (ret < 0) {
++ dev_err(snfi->dev, "polling read byte len timeout\n");
++ ret = -EIO;
++ } else {
++ if (!oob_on) {
++ ret = mtk_ecc_wait_done(snfi->ecc, ECC_DECODE);
++ if (ret) {
++ dev_warn(snfi->dev, "wait ecc done timeout\n");
++ } else {
++ mtk_snfi_update_ecc_stats(mem, snfi->buffer,
++ sectors);
++ mtk_snfi_read_fdm_data(mem, sectors);
++ }
++ }
++ }
++
++ if (oob_on)
++ goto out;
++
++ mtk_ecc_disable(snfi->ecc);
++out:
++ dma_unmap_single(snfi->dev, dma_addr, len, DMA_FROM_DEVICE);
++ writel(0, snfi->regs + NFI_CON);
++ writel(0, snfi->regs + NFI_CNFG);
++ reg = readl(snfi->regs + SNFI_MISC_CTL);
++ reg &= ~RD_CUSTOM_EN;
++ writel(reg, snfi->regs + SNFI_MISC_CTL);
++
++ return ret;
++}
++
++static int mtk_snfi_write_to_cache(struct spi_mem *mem,
++ const struct spi_mem_op *op,
++ int oob_on)
++{
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
++ struct mtd_info *mtd = spinand_to_mtd(spinand);
++ u32 sectors = mtd->writesize / snfi->caps->nand_sec_size;
++ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
++ u32 reg, len, col_addr = 0;
++ dma_addr_t dma_addr;
++ int ret;
++
++ len = sectors * (snfi->caps->nand_sec_size
++ + snfi_nand->spare_per_sector);
++
++ dma_addr = dma_map_single(snfi->dev, snfi->buffer, len,
++ DMA_TO_DEVICE);
++ ret = dma_mapping_error(snfi->dev, dma_addr);
++ if (ret) {
++ dev_err(snfi->dev, "dma mapping error\n");
++ return -EINVAL;
++ }
++
++ /* set program load cmd and address */
++ reg = (op->cmd.opcode << WR_LOAD_CMD_SHIFT);
++ writel(reg, snfi->regs + SNFI_PG_CTL1);
++ writel(col_addr & WR_LOAD_ADDR_MASK, snfi->regs + SNFI_PG_CTL2);
++
++ reg = readl(snfi->regs + SNFI_MISC_CTL);
++ reg |= WR_CUSTOM_EN;
++ reg &= ~(RD_MODE_MASK | WR_X4_EN);
++
++ if (op->data.buswidth == 4)
++ reg |= WR_X4_EN;
++ writel(reg, snfi->regs + SNFI_MISC_CTL);
++
++ writel(len << WR_LEN_SHIFT, snfi->regs + SNFI_MISC_CTL2);
++ writew(sectors << CON_SEC_SHIFT, snfi->regs + NFI_CON);
++
++ reg = readw(snfi->regs + NFI_CNFG);
++ reg &= ~(CNFG_READ_EN | CNFG_BYTE_RW);
++ reg |= CNFG_DMA | CNFG_DMA_BURST_EN | CNFG_OP_PROGRAM;
++
++ if (!oob_on) {
++ reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
++ writew(reg, snfi->regs + NFI_CNFG);
++
++ snfi->ecc_cfg.mode = ECC_NFI_MODE;
++ snfi->ecc_cfg.op = ECC_ENCODE;
++ ret = mtk_ecc_enable(snfi->ecc, &snfi->ecc_cfg);
++ if (ret) {
++ dev_err(snfi->dev, "ecc enable failed\n");
++ /* clear NFI_CNFG */
++ reg &= ~(CNFG_DMA_BURST_EN | CNFG_DMA |
++ CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
++ writew(reg, snfi->regs + NFI_CNFG);
++ dma_unmap_single(snfi->dev, dma_addr, len,
++ DMA_FROM_DEVICE);
++ goto out;
++ }
++ /* write OOB into the FDM registers (OOB area in MTK NAND) */
++ mtk_snfi_write_fdm_data(mem, sectors);
++ } else {
++ writew(reg, snfi->regs + NFI_CNFG);
++ }
++ writel(lower_32_bits(dma_addr), snfi->regs + NFI_STRADDR);
++ readw(snfi->regs + NFI_INTR_STA);
++ writew(INTR_AHB_DONE_EN, snfi->regs + NFI_INTR_EN);
++
++ init_completion(&snfi->done);
++
++ /* set dummy command to trigger NFI enter SPI mode */
++ writew(NAND_CMD_DUMMYPROG, snfi->regs + NFI_CMD);
++ reg = readl(snfi->regs + NFI_CON) | CON_BWR;
++ writew(reg, snfi->regs + NFI_CON);
++
++ ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
++ if (!ret) {
++ dev_err(snfi->dev, "custom program done timeout\n");
++ writew(0, snfi->regs + NFI_INTR_EN);
++ ret = -ETIMEDOUT;
++ goto ecc_disable;
++ }
++
++ ret = readl_poll_timeout_atomic(snfi->regs + NFI_ADDRCNTR, reg,
++ ADDRCNTR_SEC(reg) >= sectors,
++ 10, MTK_TIMEOUT);
++ if (ret)
++ dev_err(snfi->dev, "hwecc write timeout\n");
++
++ecc_disable:
++ mtk_ecc_disable(snfi->ecc);
++
++out:
++ dma_unmap_single(snfi->dev, dma_addr, len, DMA_TO_DEVICE);
++ writel(0, snfi->regs + NFI_CON);
++ writel(0, snfi->regs + NFI_CNFG);
++ reg = readl(snfi->regs + SNFI_MISC_CTL);
++ reg &= ~WR_CUSTOM_EN;
++ writel(reg, snfi->regs + SNFI_MISC_CTL);
++
++ return ret;
++}
++
++static int mtk_snfi_read(struct spi_mem *mem,
++ const struct spi_mem_op *op)
++{
++ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ struct mtd_info *mtd = spinand_to_mtd(spinand);
++ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
++ u32 col_addr = op->addr.val;
++ int i, ret, sectors, oob_on = false;
++
++ if (col_addr == mtd->writesize)
++ oob_on = true;
++
++ ret = mtk_snfi_read_from_cache(mem, op, oob_on);
++ if (ret) {
++ dev_warn(snfi->dev, "read from cache fail\n");
++ return ret;
++ }
++
++ sectors = mtd->writesize / snfi->caps->nand_sec_size;
++ for (i = 0; i < sectors; i++) {
++ if (oob_on)
++ memcpy(oob_ptr(mem, i),
++ mtk_oob_ptr(mem, snfi->buffer, i),
++ snfi->caps->nand_fdm_size);
++
++ if (i == snfi_nand->bad_mark.sec && snfi->caps->bad_mark_swap)
++ snfi_nand->bad_mark.bm_swap(mem, snfi->buffer,
++ oob_on);
++ }
++
++ if (!oob_on)
++ memcpy(spinand->databuf, snfi->buffer, mtd->writesize);
++
++ return ret;
++}
++
++static int mtk_snfi_write(struct spi_mem *mem,
++ const struct spi_mem_op *op)
++{
++ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ struct mtd_info *mtd = spinand_to_mtd(spinand);
++ struct mtk_snfi_nand_chip *snfi_nand = &snfi->snfi_nand;
++ u32 ret, i, sectors, col_addr = op->addr.val;
++ int oob_on = false;
++
++ if (col_addr == mtd->writesize)
++ oob_on = true;
++
++ sectors = mtd->writesize / snfi->caps->nand_sec_size;
++ memset(snfi->buffer, 0xff, mtd->writesize + mtd->oobsize);
++
++ if (!oob_on)
++ memcpy(snfi->buffer, spinand->databuf, mtd->writesize);
++
++ for (i = 0; i < sectors; i++) {
++ if (i == snfi_nand->bad_mark.sec && snfi->caps->bad_mark_swap)
++ snfi_nand->bad_mark.bm_swap(mem, snfi->buffer, oob_on);
++
++ if (oob_on)
++ memcpy(mtk_oob_ptr(mem, snfi->buffer, i),
++ oob_ptr(mem, i),
++ snfi->caps->nand_fdm_size);
++ }
++
++ ret = mtk_snfi_write_to_cache(mem, op, oob_on);
++ if (ret)
++ dev_warn(snfi->dev, "write to cache fail\n");
++
++ return ret;
++}
++
++static int mtk_snfi_command_exec(struct mtk_snfi *snfi,
++ const u8 *txbuf, u8 *rxbuf,
++ const u32 txlen, const u32 rxlen)
++{
++ u32 tmp, i, j, reg, m;
++ u8 *p_tmp = (u8 *)(&tmp);
++ int ret = 0;
++
++ /* Moving tx data to NFI_SPI GPRAM */
++ for (i = 0, m = 0; i < txlen; ) {
++ for (j = 0, tmp = 0; i < txlen && j < 4; i++, j++)
++ p_tmp[j] = txbuf[i];
++
++ writel(tmp, snfi->regs + SNFI_GPRAM_DATA + m);
++ m += 4;
++ }
++
++ writel(txlen, snfi->regs + SNFI_MAC_OUTL);
++ writel(rxlen, snfi->regs + SNFI_MAC_INL);
++ ret = mtk_snfi_mac_op(snfi);
++ if (ret)
++ return ret;
++
++ /* For NULL input data, this loop will be skipped */
++ if (rxlen)
++ for (i = 0, m = 0; i < rxlen; ) {
++ reg = readl(snfi->regs +
++ SNFI_GPRAM_DATA + m);
++ for (j = 0; i < rxlen && j < 4; i++, j++, rxbuf++) {
++ if (m == 0 && i == 0)
++ j = i + txlen;
++ *rxbuf = (reg >> (j * 8)) & 0xFF;
++ }
++ m += 4;
++ }
++
++ return ret;
++}
++
++/*
++ * mtk_snfi_exec_op - to process command/data to send to the
++ * SPI NAND by mtk controller
++ */
++static int mtk_snfi_exec_op(struct spi_mem *mem,
++ const struct spi_mem_op *op)
++
++{
++ struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
++ struct spinand_device *spinand = spi_mem_get_drvdata(mem);
++ struct mtd_info *mtd = spinand_to_mtd(spinand);
++ struct nand_device *nand = mtd_to_nanddev(mtd);
++ const struct spi_mem_op *read_cache;
++ const struct spi_mem_op *write_cache;
++ const struct spi_mem_op *update_cache;
++ u32 tmpbufsize, txlen = 0, rxlen = 0;
++ u8 *txbuf, *rxbuf = NULL, *buf;
++ int i, ret = 0;
++
++ ret = mtk_snfi_reset(snfi);
++ if (ret) {
++ dev_warn(snfi->dev, "reset spi memory controller fail\n");
++ return ret;
++ }
++
++ /*if bbt initial, framework have detect nand information */
++ if (nand->bbt.cache) {
++ read_cache = spinand->op_templates.read_cache;
++ write_cache = spinand->op_templates.write_cache;
++ update_cache = spinand->op_templates.update_cache;
++
++ ret = mtk_snfi_hw_runtime_config(mem);
++ if (ret)
++ return ret;
++
++ /* For Read/Write with cache, Erase use framework flow */
++ if (op->cmd.opcode == read_cache->cmd.opcode) {
++ ret = mtk_snfi_read(mem, op);
++ if (ret)
++ dev_warn(snfi->dev, "snfi read fail\n");
++
++ return ret;
++ } else if ((op->cmd.opcode == write_cache->cmd.opcode)
++ || (op->cmd.opcode == update_cache->cmd.opcode)) {
++ ret = mtk_snfi_write(mem, op);
++ if (ret)
++ dev_warn(snfi->dev, "snfi write fail\n");
++
++ return ret;
++ }
++ }
++
++ tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
++ op->dummy.nbytes + op->data.nbytes;
++
++ txbuf = kzalloc(tmpbufsize, GFP_KERNEL);
++ if (!txbuf)
++ return -ENOMEM;
++
++ txbuf[txlen++] = op->cmd.opcode;
++
++ if (op->addr.nbytes)
++ for (i = 0; i < op->addr.nbytes; i++)
++ txbuf[txlen++] = op->addr.val >>
++ (8 * (op->addr.nbytes - i - 1));
++
++ txlen += op->dummy.nbytes;
++
++ if (op->data.dir == SPI_MEM_DATA_OUT)
++ for (i = 0; i < op->data.nbytes; i++) {
++ buf = (u8 *)op->data.buf.out;
++ txbuf[txlen++] = buf[i];
++ }
++
++ if (op->data.dir == SPI_MEM_DATA_IN) {
++ rxbuf = (u8 *)op->data.buf.in;
++ rxlen += op->data.nbytes;
++ }
++
++ ret = mtk_snfi_command_exec(snfi, txbuf, rxbuf, txlen, rxlen);
++ kfree(txbuf);
++
++ return ret;
++}
++
++static int mtk_snfi_init(struct mtk_snfi *snfi)
++{
++ int ret;
++
++ /* Reset the state machine and data FIFO */
++ ret = mtk_snfi_reset(snfi);
++ if (ret) {
++ dev_warn(snfi->dev, "MTK reset controller fail\n");
++ return ret;
++ }
++
++ snfi->buffer = devm_kzalloc(snfi->dev, 4096 + 256, GFP_KERNEL);
++ if (!snfi->buffer)
++ return -ENOMEM;
++
++ /* Clear interrupt, read clear. */
++ readw(snfi->regs + NFI_INTR_STA);
++ writew(0, snfi->regs + NFI_INTR_EN);
++
++ writel(0, snfi->regs + NFI_CON);
++ writel(0, snfi->regs + NFI_CNFG);
++
++ /* Change to NFI_SPI mode. */
++ writel(SNFI_MODE_EN, snfi->regs + SNFI_CNFG);
++
++ return 0;
++}
++
++static int mtk_snfi_check_buswidth(u8 width)
++{
++ switch (width) {
++ case 1:
++ case 2:
++ case 4:
++ return 0;
++
++ default:
++ break;
++ }
++
++ return -ENOTSUPP;
++}
++
++static bool mtk_snfi_supports_op(struct spi_mem *mem,
++ const struct spi_mem_op *op)
++{
++ int ret = 0;
++
++ /* For MTK Spi Nand controller, cmd buswidth just support 1 bit*/
++ if (op->cmd.buswidth != 1)
++ ret = -ENOTSUPP;
++
++ if (op->addr.nbytes)
++ ret |= mtk_snfi_check_buswidth(op->addr.buswidth);
++
++ if (op->dummy.nbytes)
++ ret |= mtk_snfi_check_buswidth(op->dummy.buswidth);
++
++ if (op->data.nbytes)
++ ret |= mtk_snfi_check_buswidth(op->data.buswidth);
++
++ if (ret)
++ return false;
++
++ return true;
++}
++
++static const struct spi_controller_mem_ops mtk_snfi_ops = {
++ .supports_op = mtk_snfi_supports_op,
++ .exec_op = mtk_snfi_exec_op,
++};
++
++static const struct mtk_snfi_caps snfi_mt7622 = {
++ .spare_size = spare_size_mt7622,
++ .num_spare_size = 4,
++ .nand_sec_size = 512,
++ .nand_fdm_size = 8,
++ .nand_fdm_ecc_size = 1,
++ .ecc_parity_bits = 13,
++ .pageformat_spare_shift = 4,
++ .bad_mark_swap = 0,
++};
++
++static const struct mtk_snfi_caps snfi_mt7629 = {
++ .spare_size = spare_size_mt7622,
++ .num_spare_size = 4,
++ .nand_sec_size = 512,
++ .nand_fdm_size = 8,
++ .nand_fdm_ecc_size = 1,
++ .ecc_parity_bits = 13,
++ .pageformat_spare_shift = 4,
++ .bad_mark_swap = 1,
++};
++
++static const struct of_device_id mtk_snfi_id_table[] = {
++ { .compatible = "mediatek,mt7622-snfi", .data = &snfi_mt7622, },
++ { .compatible = "mediatek,mt7629-snfi", .data = &snfi_mt7629, },
++ { /* sentinel */ }
++};
++
++static int mtk_snfi_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct device_node *np = dev->of_node;
++ struct spi_controller *ctlr;
++ struct mtk_snfi *snfi;
++ struct resource *res;
++ int ret = 0, irq;
++
++ ctlr = spi_alloc_master(&pdev->dev, sizeof(*snfi));
++ if (!ctlr)
++ return -ENOMEM;
++
++ snfi = spi_controller_get_devdata(ctlr);
++ snfi->caps = of_device_get_match_data(dev);
++ snfi->dev = dev;
++
++ snfi->ecc = of_mtk_ecc_get(np);
++ if (IS_ERR_OR_NULL(snfi->ecc))
++ goto err_put_master;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ snfi->regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(snfi->regs)) {
++ ret = PTR_ERR(snfi->regs);
++ goto release_ecc;
++ }
++
++ /* find the clocks */
++ snfi->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
++ if (IS_ERR(snfi->clk.nfi_clk)) {
++ dev_err(dev, "no nfi clk\n");
++ ret = PTR_ERR(snfi->clk.nfi_clk);
++ goto release_ecc;
++ }
++
++ snfi->clk.spi_clk = devm_clk_get(dev, "spi_clk");
++ if (IS_ERR(snfi->clk.spi_clk)) {
++ dev_err(dev, "no spi clk\n");
++ ret = PTR_ERR(snfi->clk.spi_clk);
++ goto release_ecc;
++ }
++
++ ret = mtk_snfi_enable_clk(dev, &snfi->clk);
++ if (ret)
++ goto release_ecc;
++
++ /* find the irq */
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ dev_err(dev, "no snfi irq resource\n");
++ ret = -EINVAL;
++ goto clk_disable;
++ }
++
++ ret = devm_request_irq(dev, irq, mtk_snfi_irq, 0, "mtk-snfi", snfi);
++ if (ret) {
++ dev_err(dev, "failed to request snfi irq\n");
++ goto clk_disable;
++ }
++
++ ret = dma_set_mask(dev, DMA_BIT_MASK(32));
++ if (ret) {
++ dev_err(dev, "failed to set dma mask\n");
++ goto clk_disable;
++ }
++
++ ctlr->dev.of_node = np;
++ ctlr->mem_ops = &mtk_snfi_ops;
++
++ platform_set_drvdata(pdev, snfi);
++ ret = mtk_snfi_init(snfi);
++ if (ret) {
++ dev_err(dev, "failed to init snfi\n");
++ goto clk_disable;
++ }
++
++ ret = devm_spi_register_master(dev, ctlr);
++ if (ret)
++ goto clk_disable;
++
++ return 0;
++
++clk_disable:
++ mtk_snfi_disable_clk(&snfi->clk);
++
++release_ecc:
++ mtk_ecc_release(snfi->ecc);
++
++err_put_master:
++ spi_master_put(ctlr);
++
++ dev_err(dev, "MediaTek SPI NAND interface probe failed %d\n", ret);
++ return ret;
++}
++
++static int mtk_snfi_remove(struct platform_device *pdev)
++{
++ struct mtk_snfi *snfi = platform_get_drvdata(pdev);
++
++ mtk_snfi_disable_clk(&snfi->clk);
++
++ return 0;
++}
++
++static int mtk_snfi_suspend(struct platform_device *pdev, pm_message_t state)
++{
++ struct mtk_snfi *snfi = platform_get_drvdata(pdev);
++
++ mtk_snfi_disable_clk(&snfi->clk);
++
++ return 0;
++}
++
++static int mtk_snfi_resume(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct mtk_snfi *snfi = dev_get_drvdata(dev);
++ int ret;
++
++ ret = mtk_snfi_enable_clk(dev, &snfi->clk);
++ if (ret)
++ return ret;
++
++ ret = mtk_snfi_init(snfi);
++ if (ret)
++ dev_err(dev, "failed to init snfi controller\n");
++
++ return ret;
++}
++
++static struct platform_driver mtk_snfi_driver = {
++ .driver = {
++ .name = "mtk-snfi",
++ .of_match_table = mtk_snfi_id_table,
++ },
++ .probe = mtk_snfi_probe,
++ .remove = mtk_snfi_remove,
++ .suspend = mtk_snfi_suspend,
++ .resume = mtk_snfi_resume,
++};
++
++module_platform_driver(mtk_snfi_driver);
++
++MODULE_LICENSE("GPL v2");
++MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou@mediatek.com>");
++MODULE_DESCRIPTION("Mediatek SPI Memory Interface Driver");
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0307-dts-mt7629-add-snand-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0307-dts-mt7629-add-snand-support.patch
new file mode 100644
index 0000000..753c111
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0307-dts-mt7629-add-snand-support.patch
@@ -0,0 +1,97 @@
+From c813fbe806257c574240770ef716fbee19f7dbfa Mon Sep 17 00:00:00 2001
+From: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
+Date: Thu, 6 Jun 2019 16:29:04 +0800
+Subject: [PATCH] spi: spi-mem: Mediatek: Add SPI Nand support for MT7629
+
+Signed-off-by: Xiangsheng Hou <xiangsheng.hou@mediatek.com>
+---
+ arch/arm/boot/dts/mt7629-rfb.dts | 45 ++++++++++++++++++++++++++++++++
+ arch/arm/boot/dts/mt7629.dtsi | 22 ++++++++++++++++
+ 3 files changed, 79 insertions(+)
+
+--- a/arch/arm/boot/dts/mt7629.dtsi
++++ b/arch/arm/boot/dts/mt7629.dtsi
+@@ -258,6 +258,28 @@
+ status = "disabled";
+ };
+
++ bch: ecc@1100e000 {
++ compatible = "mediatek,mt7622-ecc";
++ reg = <0x1100e000 0x1000>;
++ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_LOW>;
++ clocks = <&pericfg CLK_PERI_NFIECC_PD>;
++ clock-names = "nfiecc_clk";
++ status = "disabled";
++ };
++
++ snfi: spi@1100d000 {
++ compatible = "mediatek,mt7629-snfi";
++ reg = <0x1100d000 0x1000>;
++ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
++ clocks = <&pericfg CLK_PERI_NFI_PD>,
++ <&pericfg CLK_PERI_SNFI_PD>;
++ clock-names = "nfi_clk", "spi_clk";
++ ecc-engine = <&bch>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++ };
++
+ spi: spi@1100a000 {
+ compatible = "mediatek,mt7629-spi",
+ "mediatek,mt7622-spi";
+--- a/arch/arm/boot/dts/mt7629-rfb.dts
++++ b/arch/arm/boot/dts/mt7629-rfb.dts
+@@ -276,6 +276,52 @@
+ };
+ };
+
++&bch {
++ status = "okay";
++};
++
++&snfi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&serial_nand_pins>;
++ status = "okay";
++
++ spi_nand@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "spi-nand";
++ spi-max-frequency = <104000000>;
++ reg = <0>;
++
++ partitions {
++ compatible = "fixed-partitions";
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ partition@0 {
++ label = "Bootloader";
++ reg = <0x00000 0x0100000>;
++ read-only;
++ };
++
++ partition@100000 {
++ label = "Config";
++ reg = <0x100000 0x0040000>;
++ };
++
++ partition@140000 {
++ label = "factory";
++ reg = <0x140000 0x0080000>;
++ };
++
++ partition@1c0000 {
++ label = "firmware";
++ reg = <0x1c0000 0x1000000>;
++ };
++
++ };
++ };
++};
++
+ &spi {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi_pins>;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0308-dts-mt7622-add-snand-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0308-dts-mt7622-add-snand-support.patch
new file mode 100644
index 0000000..b287780
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0308-dts-mt7622-add-snand-support.patch
@@ -0,0 +1,96 @@
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -554,6 +554,19 @@
+ status = "disabled";
+ };
+
++ snfi: spi@1100d000 {
++ compatible = "mediatek,mt7622-snfi";
++ reg = <0 0x1100d000 0 0x1000>;
++ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_LOW>;
++ clocks = <&pericfg CLK_PERI_NFI_PD>,
++ <&pericfg CLK_PERI_SNFI_PD>;
++ clock-names = "nfi_clk", "spi_clk";
++ ecc-engine = <&bch>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ status = "disabled";
++ };
++
+ nor_flash: spi@11014000 {
+ compatible = "mediatek,mt7622-nor",
+ "mediatek,mt8173-nor";
+--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+@@ -99,7 +99,7 @@
+ };
+
+ &bch {
+- status = "disabled";
++ status = "okay";
+ };
+
+ &btif {
+@@ -551,6 +551,62 @@
+ status = "disable";
+ };
+
++&snfi {
++ pinctrl-names = "default";
++ pinctrl-0 = <&serial_nand_pins>;
++ status = "okay";
++
++ spi_nand@0 {
++ #address-cells = <1>;
++ #size-cells = <1>;
++ compatible = "spi-nand";
++ spi-max-frequency = <104000000>;
++ reg = <0>;
++
++ partitions {
++ compatible = "fixed-partitions";
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ partition@0 {
++ label = "Preloader";
++ reg = <0x00000 0x0080000>;
++ read-only;
++ };
++
++ partition@80000 {
++ label = "ATF";
++ reg = <0x80000 0x0040000>;
++ };
++
++ partition@c0000 {
++ label = "Bootloader";
++ reg = <0xc0000 0x0080000>;
++ };
++
++ partition@140000 {
++ label = "Config";
++ reg = <0x140000 0x0080000>;
++ };
++
++ partition@1c0000 {
++ label = "Factory";
++ reg = <0x1c0000 0x0040000>;
++ };
++
++ partition@200000 {
++ label = "firmware";
++ reg = <0x200000 0x2000000>;
++ };
++
++ partition@2200000 {
++ label = "User_data";
++ reg = <0x2200000 0x4000000>;
++ };
++ };
++ };
++};
++
+ &spi0 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&spic0_pins>;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0310-dts-add-wmac-support-for-mt7622-rfb1.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0310-dts-add-wmac-support-for-mt7622-rfb1.patch
new file mode 100644
index 0000000..84aed89
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0310-dts-add-wmac-support-for-mt7622-rfb1.patch
@@ -0,0 +1,40 @@
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -716,6 +716,17 @@
+ status = "disabled";
+ };
+
++ wmac: wmac@18000000 {
++ compatible = "mediatek,mt7622-wmac";
++ reg = <0 0x18000000 0 0x100000>;
++ interrupts = <GIC_SPI 211 IRQ_TYPE_LEVEL_LOW>;
++
++ mediatek,infracfg = <&infracfg>;
++ status = "disabled";
++
++ power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>;
++ };
++
+ ssusbsys: ssusbsys@1a000000 {
+ compatible = "mediatek,mt7622-ssusbsys",
+ "syscon";
+--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+@@ -589,7 +589,7 @@
+ reg = <0x140000 0x0080000>;
+ };
+
+- partition@1c0000 {
++ factory: partition@1c0000 {
+ label = "Factory";
+ reg = <0x1c0000 0x0040000>;
+ };
+@@ -646,3 +646,8 @@
+ pinctrl-0 = <&watchdog_pins>;
+ status = "okay";
+ };
++
++&wmac {
++ mediatek,mtd-eeprom = <&factory 0x0000>;
++ status = "okay";
++};
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0400-sound-add-some-helpers-to-control-mtk_memif.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0400-sound-add-some-helpers-to-control-mtk_memif.patch
new file mode 100644
index 0000000..ddeb5a4
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0400-sound-add-some-helpers-to-control-mtk_memif.patch
@@ -0,0 +1,313 @@
+--- a/sound/soc/mediatek/common/mtk-afe-fe-dai.c
++++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
+@@ -361,6 +361,222 @@
+ }
+ EXPORT_SYMBOL_GPL(mtk_afe_dai_resume);
+
++int mtk_memif_set_enable(struct mtk_base_afe *afe, int id)
++{
++ struct mtk_base_afe_memif *memif = &afe->memif[id];
++
++ if (memif->data->enable_shift < 0) {
++ dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
++ __func__, id);
++ return 0;
++ }
++ return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
++ 1, 1, memif->data->enable_shift);
++}
++EXPORT_SYMBOL_GPL(mtk_memif_set_enable);
++
++int mtk_memif_set_disable(struct mtk_base_afe *afe, int id)
++{
++ struct mtk_base_afe_memif *memif = &afe->memif[id];
++
++ if (memif->data->enable_shift < 0) {
++ dev_warn(afe->dev, "%s(), error, id %d, enable_shift < 0\n",
++ __func__, id);
++ return 0;
++ }
++ return mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
++ 1, 0, memif->data->enable_shift);
++}
++EXPORT_SYMBOL_GPL(mtk_memif_set_disable);
++
++int mtk_memif_set_addr(struct mtk_base_afe *afe, int id,
++ unsigned char *dma_area,
++ dma_addr_t dma_addr,
++ size_t dma_bytes)
++{
++ struct mtk_base_afe_memif *memif = &afe->memif[id];
++ int msb_at_bit33 = upper_32_bits(dma_addr) ? 1 : 0;
++ unsigned int phys_buf_addr = lower_32_bits(dma_addr);
++ unsigned int phys_buf_addr_upper_32 = upper_32_bits(dma_addr);
++
++ memif->dma_area = dma_area;
++ memif->dma_addr = dma_addr;
++ memif->dma_bytes = dma_bytes;
++
++ /* start */
++ mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
++ phys_buf_addr);
++ /* end */
++ if (memif->data->reg_ofs_end)
++ mtk_regmap_write(afe->regmap,
++ memif->data->reg_ofs_end,
++ phys_buf_addr + dma_bytes - 1);
++ else
++ mtk_regmap_write(afe->regmap,
++ memif->data->reg_ofs_base +
++ AFE_BASE_END_OFFSET,
++ phys_buf_addr + dma_bytes - 1);
++
++ /* set start, end, upper 32 bits */
++ if (memif->data->reg_ofs_base_msb) {
++ mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base_msb,
++ phys_buf_addr_upper_32);
++ mtk_regmap_write(afe->regmap,
++ memif->data->reg_ofs_end_msb,
++ phys_buf_addr_upper_32);
++ }
++
++ /* set MSB to 33-bit */
++ if (memif->data->msb_reg >= 0)
++ mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
++ 1, msb_at_bit33, memif->data->msb_shift);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(mtk_memif_set_addr);
++
++int mtk_memif_set_channel(struct mtk_base_afe *afe,
++ int id, unsigned int channel)
++{
++ struct mtk_base_afe_memif *memif = &afe->memif[id];
++ unsigned int mono;
++
++ if (memif->data->mono_shift < 0)
++ return 0;
++
++ if (memif->data->quad_ch_mask) {
++ unsigned int quad_ch = (channel == 4) ? 1 : 0;
++
++ mtk_regmap_update_bits(afe->regmap, memif->data->quad_ch_reg,
++ memif->data->quad_ch_mask,
++ quad_ch, memif->data->quad_ch_shift);
++ }
++
++ if (memif->data->mono_invert)
++ mono = (channel == 1) ? 0 : 1;
++ else
++ mono = (channel == 1) ? 1 : 0;
++
++ return mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
++ 1, mono, memif->data->mono_shift);
++}
++EXPORT_SYMBOL_GPL(mtk_memif_set_channel);
++
++static int mtk_memif_set_rate_fs(struct mtk_base_afe *afe,
++ int id, int fs)
++{
++ struct mtk_base_afe_memif *memif = &afe->memif[id];
++
++ if (memif->data->fs_shift >= 0)
++ mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
++ memif->data->fs_maskbit,
++ fs, memif->data->fs_shift);
++
++ return 0;
++}
++
++int mtk_memif_set_rate(struct mtk_base_afe *afe,
++ int id, unsigned int rate)
++{
++ int fs = 0;
++
++ if (!afe->get_dai_fs) {
++ dev_err(afe->dev, "%s(), error, afe->get_dai_fs == NULL\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ fs = afe->get_dai_fs(afe, id, rate);
++
++ if (fs < 0)
++ return -EINVAL;
++
++ return mtk_memif_set_rate_fs(afe, id, fs);
++}
++EXPORT_SYMBOL_GPL(mtk_memif_set_rate);
++
++int mtk_memif_set_rate_substream(struct snd_pcm_substream *substream,
++ int id, unsigned int rate)
++{
++ struct snd_soc_pcm_runtime *rtd = substream->private_data;
++ struct snd_soc_component *component =
++ snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
++ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
++
++ int fs = 0;
++
++ if (!afe->memif_fs) {
++ dev_err(afe->dev, "%s(), error, afe->memif_fs == NULL\n",
++ __func__);
++ return -EINVAL;
++ }
++
++ fs = afe->memif_fs(substream, rate);
++
++ if (fs < 0)
++ return -EINVAL;
++
++ return mtk_memif_set_rate_fs(afe, id, fs);
++}
++EXPORT_SYMBOL_GPL(mtk_memif_set_rate_substream);
++
++int mtk_memif_set_format(struct mtk_base_afe *afe,
++ int id, snd_pcm_format_t format)
++{
++ struct mtk_base_afe_memif *memif = &afe->memif[id];
++ int hd_audio = 0;
++ int hd_align = 0;
++
++ /* set hd mode */
++ switch (format) {
++ case SNDRV_PCM_FORMAT_S16_LE:
++ case SNDRV_PCM_FORMAT_U16_LE:
++ hd_audio = 0;
++ break;
++ case SNDRV_PCM_FORMAT_S32_LE:
++ case SNDRV_PCM_FORMAT_U32_LE:
++ hd_audio = 1;
++ hd_align = 1;
++ break;
++ case SNDRV_PCM_FORMAT_S24_LE:
++ case SNDRV_PCM_FORMAT_U24_LE:
++ hd_audio = 1;
++ break;
++ default:
++ dev_err(afe->dev, "%s() error: unsupported format %d\n",
++ __func__, format);
++ break;
++ }
++
++ mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
++ 1, hd_audio, memif->data->hd_shift);
++
++ mtk_regmap_update_bits(afe->regmap, memif->data->hd_align_reg,
++ 1, hd_align, memif->data->hd_align_mshift);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(mtk_memif_set_format);
++
++int mtk_memif_set_pbuf_size(struct mtk_base_afe *afe,
++ int id, int pbuf_size)
++{
++ const struct mtk_base_memif_data *memif_data = afe->memif[id].data;
++
++ if (memif_data->pbuf_mask == 0 || memif_data->minlen_mask == 0)
++ return 0;
++
++ mtk_regmap_update_bits(afe->regmap, memif_data->pbuf_reg,
++ memif_data->pbuf_mask,
++ pbuf_size, memif_data->pbuf_shift);
++
++ mtk_regmap_update_bits(afe->regmap, memif_data->minlen_reg,
++ memif_data->minlen_mask,
++ pbuf_size, memif_data->minlen_shift);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(mtk_memif_set_pbuf_size);
++
+ MODULE_DESCRIPTION("Mediatek simple fe dai operator");
+ MODULE_AUTHOR("Garlic Tseng <garlic.tseng@mediatek.com>");
+ MODULE_LICENSE("GPL v2");
+--- a/sound/soc/mediatek/common/mtk-afe-fe-dai.h
++++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.h
+@@ -34,4 +34,20 @@
+ int mtk_afe_dai_suspend(struct snd_soc_dai *dai);
+ int mtk_afe_dai_resume(struct snd_soc_dai *dai);
+
++int mtk_memif_set_enable(struct mtk_base_afe *afe, int id);
++int mtk_memif_set_disable(struct mtk_base_afe *afe, int id);
++int mtk_memif_set_addr(struct mtk_base_afe *afe, int id,
++ unsigned char *dma_area,
++ dma_addr_t dma_addr,
++ size_t dma_bytes);
++int mtk_memif_set_channel(struct mtk_base_afe *afe,
++ int id, unsigned int channel);
++int mtk_memif_set_rate(struct mtk_base_afe *afe,
++ int id, unsigned int rate);
++int mtk_memif_set_rate_substream(struct snd_pcm_substream *substream,
++ int id, unsigned int rate);
++int mtk_memif_set_format(struct mtk_base_afe *afe,
++ int id, snd_pcm_format_t format);
++int mtk_memif_set_pbuf_size(struct mtk_base_afe *afe,
++ int id, int pbuf_size);
+ #endif
+--- a/sound/soc/mediatek/common/mtk-base-afe.h
++++ b/sound/soc/mediatek/common/mtk-base-afe.h
+@@ -16,21 +16,38 @@
+ const char *name;
+ int reg_ofs_base;
+ int reg_ofs_cur;
++ int reg_ofs_end;
++ int reg_ofs_base_msb;
++ int reg_ofs_cur_msb;
++ int reg_ofs_end_msb;
+ int fs_reg;
+ int fs_shift;
+ int fs_maskbit;
+ int mono_reg;
+ int mono_shift;
++ int mono_invert;
++ int quad_ch_reg;
++ int quad_ch_mask;
++ int quad_ch_shift;
+ int enable_reg;
+ int enable_shift;
+ int hd_reg;
+- int hd_align_reg;
+ int hd_shift;
++ int hd_align_reg;
+ int hd_align_mshift;
+ int msb_reg;
+ int msb_shift;
++ int msb2_reg;
++ int msb2_shift;
+ int agent_disable_reg;
+ int agent_disable_shift;
++ /* playback memif only */
++ int pbuf_reg;
++ int pbuf_mask;
++ int pbuf_shift;
++ int minlen_reg;
++ int minlen_mask;
++ int minlen_shift;
+ };
+
+ struct mtk_base_irq_data {
+@@ -84,6 +101,12 @@
+ unsigned int rate);
+ int (*irq_fs)(struct snd_pcm_substream *substream,
+ unsigned int rate);
++ int (*get_dai_fs)(struct mtk_base_afe *afe,
++ int dai_id, unsigned int rate);
++ int (*get_memif_pbuf_size)(struct snd_pcm_substream *substream);
++
++ int (*request_dram_resource)(struct device *dev);
++ int (*release_dram_resource)(struct device *dev);
+
+ void *platform_priv;
+ };
+@@ -95,6 +118,9 @@
+ const struct mtk_base_memif_data *data;
+ int irq_usage;
+ int const_irq;
++ unsigned char *dma_area;
++ dma_addr_t dma_addr;
++ size_t dma_bytes;
+ };
+
+ struct mtk_base_afe_irq {
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0401-sound-refine-hw-params-and-hw-prepare.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0401-sound-refine-hw-params-and-hw-prepare.patch
new file mode 100644
index 0000000..3e24d51
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0401-sound-refine-hw-params-and-hw-prepare.patch
@@ -0,0 +1,221 @@
+--- a/sound/soc/mediatek/common/mtk-afe-fe-dai.c
++++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
+@@ -6,11 +6,13 @@
+ * Author: Garlic Tseng <garlic.tseng@mediatek.com>
+ */
+
++#include <linux/io.h>
+ #include <linux/module.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/regmap.h>
+ #include <sound/soc.h>
+ #include "mtk-afe-platform-driver.h"
++#include <sound/pcm_params.h>
+ #include "mtk-afe-fe-dai.h"
+ #include "mtk-base-afe.h"
+
+@@ -120,50 +122,64 @@
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+- struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+- int msb_at_bit33 = 0;
+- int ret, fs = 0;
++ int id = rtd->cpu_dai->id;
++ struct mtk_base_afe_memif *memif = &afe->memif[id];
++ int ret;
++ unsigned int channels = params_channels(params);
++ unsigned int rate = params_rate(params);
++ snd_pcm_format_t format = params_format(params);
+
+ ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
+ if (ret < 0)
+ return ret;
+
+- msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
+- memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
+- memif->buffer_size = substream->runtime->dma_bytes;
+-
+- /* start */
+- mtk_regmap_write(afe->regmap, memif->data->reg_ofs_base,
+- memif->phys_buf_addr);
+- /* end */
+- mtk_regmap_write(afe->regmap,
+- memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
+- memif->phys_buf_addr + memif->buffer_size - 1);
+-
+- /* set MSB to 33-bit */
+- mtk_regmap_update_bits(afe->regmap, memif->data->msb_reg,
+- 1, msb_at_bit33, memif->data->msb_shift);
++ if (afe->request_dram_resource)
++ afe->request_dram_resource(afe->dev);
+
+- /* set channel */
+- if (memif->data->mono_shift >= 0) {
+- unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
++ dev_dbg(afe->dev, "%s(), %s, ch %d, rate %d, fmt %d, dma_addr %pad, dma_area %p, dma_bytes 0x%zx\n",
++ __func__, memif->data->name,
++ channels, rate, format,
++ &substream->runtime->dma_addr,
++ substream->runtime->dma_area,
++ substream->runtime->dma_bytes);
++
++ memset_io(substream->runtime->dma_area, 0,
++ substream->runtime->dma_bytes);
++
++ /* set addr */
++ ret = mtk_memif_set_addr(afe, id,
++ substream->runtime->dma_area,
++ substream->runtime->dma_addr,
++ substream->runtime->dma_bytes);
++ if (ret) {
++ dev_err(afe->dev, "%s(), error, id %d, set addr, ret %d\n",
++ __func__, id, ret);
++ return ret;
++ }
+
+- mtk_regmap_update_bits(afe->regmap, memif->data->mono_reg,
+- 1, mono, memif->data->mono_shift);
++ /* set channel */
++ ret = mtk_memif_set_channel(afe, id, channels);
++ if (ret) {
++ dev_err(afe->dev, "%s(), error, id %d, set channel %d, ret %d\n",
++ __func__, id, channels, ret);
++ return ret;
+ }
+
+ /* set rate */
+- if (memif->data->fs_shift < 0)
+- return 0;
+-
+- fs = afe->memif_fs(substream, params_rate(params));
+-
+- if (fs < 0)
+- return -EINVAL;
++ ret = mtk_memif_set_rate_substream(substream, id, rate);
++ if (ret) {
++ dev_err(afe->dev, "%s(), error, id %d, set rate %d, ret %d\n",
++ __func__, id, rate, ret);
++ return ret;
++ }
+
+- mtk_regmap_update_bits(afe->regmap, memif->data->fs_reg,
+- memif->data->fs_maskbit, fs,
+- memif->data->fs_shift);
++ /* set format */
++ ret = mtk_memif_set_format(afe, id, format);
++ if (ret) {
++ dev_err(afe->dev, "%s(), error, id %d, set format %d, ret %d\n",
++ __func__, id, format, ret);
++ return ret;
++ }
+
+ return 0;
+ }
+@@ -172,6 +188,11 @@
+ int mtk_afe_fe_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+ {
++ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
++
++ if (afe->release_dram_resource)
++ afe->release_dram_resource(afe->dev);
++
+ return snd_pcm_lib_free_pages(substream);
+ }
+ EXPORT_SYMBOL_GPL(mtk_afe_fe_hw_free);
+@@ -182,20 +203,25 @@
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_pcm_runtime * const runtime = substream->runtime;
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+- struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
++ int id = rtd->cpu_dai->id;
++ struct mtk_base_afe_memif *memif = &afe->memif[id];
+ struct mtk_base_afe_irq *irqs = &afe->irqs[memif->irq_usage];
+ const struct mtk_base_irq_data *irq_data = irqs->irq_data;
+ unsigned int counter = runtime->period_size;
+ int fs;
++ int ret;
+
+ dev_dbg(afe->dev, "%s %s cmd=%d\n", __func__, memif->data->name, cmd);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+- mtk_regmap_update_bits(afe->regmap,
+- memif->data->enable_reg,
+- 1, 1, memif->data->enable_shift);
++ ret = mtk_memif_set_enable(afe, id);
++ if (ret) {
++ dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
++ __func__, id, ret);
++ return ret;
++ }
+
+ /* set irq counter */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_cnt_reg,
+@@ -219,15 +245,19 @@
+ return 0;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+- mtk_regmap_update_bits(afe->regmap, memif->data->enable_reg,
+- 1, 0, memif->data->enable_shift);
++ ret = mtk_memif_set_disable(afe, id);
++ if (ret) {
++ dev_err(afe->dev, "%s(), error, id %d, memif enable, ret %d\n",
++ __func__, id, ret);
++ }
++
+ /* disable interrupt */
+ mtk_regmap_update_bits(afe->regmap, irq_data->irq_en_reg,
+ 1, 0, irq_data->irq_en_shift);
+ /* and clear pending IRQ */
+ mtk_regmap_write(afe->regmap, irq_data->irq_clr_reg,
+ 1 << irq_data->irq_clr_shift);
+- return 0;
++ return ret;
+ default:
+ return -EINVAL;
+ }
+@@ -239,34 +269,15 @@
+ {
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai);
+- struct mtk_base_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+- int hd_audio = 0;
+- int hd_align = 0;
++ int id = rtd->cpu_dai->id;
++ int pbuf_size;
+
+- /* set hd mode */
+- switch (substream->runtime->format) {
+- case SNDRV_PCM_FORMAT_S16_LE:
+- hd_audio = 0;
+- break;
+- case SNDRV_PCM_FORMAT_S32_LE:
+- hd_audio = 1;
+- hd_align = 1;
+- break;
+- case SNDRV_PCM_FORMAT_S24_LE:
+- hd_audio = 1;
+- break;
+- default:
+- dev_err(afe->dev, "%s() error: unsupported format %d\n",
+- __func__, substream->runtime->format);
+- break;
++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
++ if (afe->get_memif_pbuf_size) {
++ pbuf_size = afe->get_memif_pbuf_size(substream);
++ mtk_memif_set_pbuf_size(afe, id, pbuf_size);
++ }
+ }
+-
+- mtk_regmap_update_bits(afe->regmap, memif->data->hd_reg,
+- 1, hd_audio, memif->data->hd_shift);
+-
+- mtk_regmap_update_bits(afe->regmap, memif->data->hd_align_reg,
+- 1, hd_align, memif->data->hd_align_mshift);
+-
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(mtk_afe_fe_prepare);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0402-sound-add-mt7986-driver-and-slic-driver.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0402-sound-add-mt7986-driver-and-slic-driver.patch
new file mode 100644
index 0000000..ee5ea6f
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0402-sound-add-mt7986-driver-and-slic-driver.patch
@@ -0,0 +1,91 @@
+--- a/sound/soc/codecs/Kconfig
++++ b/sound/soc/codecs/Kconfig
+@@ -164,6 +164,7 @@
+ select SND_SOC_RT5677 if I2C && SPI_MASTER
+ select SND_SOC_RT5682 if I2C
+ select SND_SOC_SGTL5000 if I2C
++ select SND_SOC_SI3218X_SPI
+ select SND_SOC_SI476X if MFD_SI476X_CORE
+ select SND_SOC_SIMPLE_AMPLIFIER
+ select SND_SOC_SIRF_AUDIO_CODEC
+@@ -1484,6 +1485,14 @@
+ config SND_SOC_NAU8825
+ tristate
+
++config SND_SOC_SI3218X
++ tristate
++
++config SND_SOC_SI3218X_SPI
++ tristate "Proslic SI3218X"
++ depends on SPI
++ select SND_SOC_SI3218X
++
+ config SND_SOC_TPA6130A2
+ tristate "Texas Instruments TPA6130A2 headphone amplifier"
+ depends on I2C
+--- a/sound/soc/codecs/Makefile
++++ b/sound/soc/codecs/Makefile
+@@ -176,6 +176,7 @@
+ snd-soc-sigmadsp-objs := sigmadsp.o
+ snd-soc-sigmadsp-i2c-objs := sigmadsp-i2c.o
+ snd-soc-sigmadsp-regmap-objs := sigmadsp-regmap.o
++snd-soc-si3218x-spi-objs := si3218x-spi.o
+ snd-soc-si476x-objs := si476x.o
+ snd-soc-sirf-audio-codec-objs := sirf-audio-codec.o
+ snd-soc-spdif-tx-objs := spdif_transmitter.o
+@@ -563,3 +564,7 @@
+ obj-$(CONFIG_SND_SOC_MAX98504) += snd-soc-max98504.o
+ obj-$(CONFIG_SND_SOC_SIMPLE_AMPLIFIER) += snd-soc-simple-amplifier.o
+ obj-$(CONFIG_SND_SOC_TPA6130A2) += snd-soc-tpa6130a2.o
++
++# Proslic si3218x
++obj-$(CONFIG_SND_SOC_SI3218X) += si3218x/
++obj-$(CONFIG_SND_SOC_SI3218X_SPI) += snd-soc-si3218x-spi.o
+--- a/sound/soc/mediatek/Kconfig
++++ b/sound/soc/mediatek/Kconfig
+@@ -53,6 +53,36 @@
+ Select Y if you have such device.
+ If unsure select "N".
+
++config SND_SOC_MT79XX
++ tristate "ASoC support for Mediatek MT79XX chip"
++ depends on ARCH_MEDIATEK
++ select SND_SOC_MEDIATEK
++ help
++ This adds ASoC platform driver support for Mediatek MT79XX chip
++ that can be used with other codecs.
++ Select Y if you have such device.
++ If unsure select "N".
++
++config SND_SOC_MT79XX_WM8960
++ tristate "ASoc Audio driver for MT79XX with WM8960 codec"
++ depends on SND_SOC_MT79XX && I2C
++ select SND_SOC_WM8960
++ help
++ This adds ASoC driver for Mediatek MT79XX boards
++ with the WM8960 codecs.
++ Select Y if you have such device.
++ If unsure select "N".
++
++config SND_SOC_MT79XX_SI3218X
++ tristate "ASoc Audio driver for MT79XX with SI3218X codec"
++ depends on SND_SOC_MT79XX && SPI
++ select SND_SOC_SI3218X_SPI
++ help
++ This adds ASoC driver for Mediatek MT79XX boards
++ with the SI3218X codecs.
++ Select Y if you have such device.
++ If unsure select "N".
++
+ config SND_SOC_MT8173
+ tristate "ASoC support for Mediatek MT8173 chip"
+ depends on ARCH_MEDIATEK
+--- a/sound/soc/mediatek/Makefile
++++ b/sound/soc/mediatek/Makefile
+@@ -2,5 +2,6 @@
+ obj-$(CONFIG_SND_SOC_MEDIATEK) += common/
+ obj-$(CONFIG_SND_SOC_MT2701) += mt2701/
+ obj-$(CONFIG_SND_SOC_MT6797) += mt6797/
++obj-$(CONFIG_SND_SOC_MT79XX) += mt79xx/
+ obj-$(CONFIG_SND_SOC_MT8173) += mt8173/
+ obj-$(CONFIG_SND_SOC_MT8183) += mt8183/
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0500-v5.6-crypto-backport-inside-secure.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0500-v5.6-crypto-backport-inside-secure.patch
new file mode 100644
index 0000000..2fae90e
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0500-v5.6-crypto-backport-inside-secure.patch
@@ -0,0 +1,5464 @@
+--- a/drivers/crypto/inside-secure/safexcel.c
++++ b/drivers/crypto/inside-secure/safexcel.c
+@@ -75,9 +75,9 @@ static void eip197_trc_cache_banksel(str
+ }
+
+ static u32 eip197_trc_cache_probe(struct safexcel_crypto_priv *priv,
+- int maxbanks, u32 probemask)
++ int maxbanks, u32 probemask, u32 stride)
+ {
+- u32 val, addrhi, addrlo, addrmid;
++ u32 val, addrhi, addrlo, addrmid, addralias, delta, marker;
+ int actbank;
+
+ /*
+@@ -87,32 +87,37 @@ static u32 eip197_trc_cache_probe(struct
+ addrhi = 1 << (16 + maxbanks);
+ addrlo = 0;
+ actbank = min(maxbanks - 1, 0);
+- while ((addrhi - addrlo) > 32) {
++ while ((addrhi - addrlo) > stride) {
+ /* write marker to lowest address in top half */
+ addrmid = (addrhi + addrlo) >> 1;
++ marker = (addrmid ^ 0xabadbabe) & probemask; /* Unique */
+ eip197_trc_cache_banksel(priv, addrmid, &actbank);
+- writel((addrmid | (addrlo << 16)) & probemask,
++ writel(marker,
+ priv->base + EIP197_CLASSIFICATION_RAMS +
+ (addrmid & 0xffff));
+
+- /* write marker to lowest address in bottom half */
+- eip197_trc_cache_banksel(priv, addrlo, &actbank);
+- writel((addrlo | (addrhi << 16)) & probemask,
+- priv->base + EIP197_CLASSIFICATION_RAMS +
+- (addrlo & 0xffff));
++ /* write invalid markers to possible aliases */
++ delta = 1 << __fls(addrmid);
++ while (delta >= stride) {
++ addralias = addrmid - delta;
++ eip197_trc_cache_banksel(priv, addralias, &actbank);
++ writel(~marker,
++ priv->base + EIP197_CLASSIFICATION_RAMS +
++ (addralias & 0xffff));
++ delta >>= 1;
++ }
+
+ /* read back marker from top half */
+ eip197_trc_cache_banksel(priv, addrmid, &actbank);
+ val = readl(priv->base + EIP197_CLASSIFICATION_RAMS +
+ (addrmid & 0xffff));
+
+- if (val == ((addrmid | (addrlo << 16)) & probemask)) {
++ if ((val & probemask) == marker)
+ /* read back correct, continue with top half */
+ addrlo = addrmid;
+- } else {
++ else
+ /* not read back correct, continue with bottom half */
+ addrhi = addrmid;
+- }
+ }
+ return addrhi;
+ }
+@@ -150,7 +155,7 @@ static void eip197_trc_cache_clear(struc
+ htable_offset + i * sizeof(u32));
+ }
+
+-static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
++static int eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
+ {
+ u32 val, dsize, asize;
+ int cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
+@@ -183,7 +188,7 @@ static void eip197_trc_cache_init(struct
+ writel(val, priv->base + EIP197_TRC_PARAMS);
+
+ /* Probed data RAM size in bytes */
+- dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff);
++ dsize = eip197_trc_cache_probe(priv, maxbanks, 0xffffffff, 32);
+
+ /*
+ * Now probe the administration RAM size pretty much the same way
+@@ -196,11 +201,18 @@ static void eip197_trc_cache_init(struct
+ writel(val, priv->base + EIP197_TRC_PARAMS);
+
+ /* Probed admin RAM size in admin words */
+- asize = eip197_trc_cache_probe(priv, 0, 0xbfffffff) >> 4;
++ asize = eip197_trc_cache_probe(priv, 0, 0x3fffffff, 16) >> 4;
+
+ /* Clear any ECC errors detected while probing! */
+ writel(0, priv->base + EIP197_TRC_ECCCTRL);
+
++ /* Sanity check probing results */
++ if (dsize < EIP197_MIN_DSIZE || asize < EIP197_MIN_ASIZE) {
++ dev_err(priv->dev, "Record cache probing failed (%d,%d).",
++ dsize, asize);
++ return -ENODEV;
++ }
++
+ /*
+ * Determine optimal configuration from RAM sizes
+ * Note that we assume that the physical RAM configuration is sane
+@@ -251,6 +263,7 @@ static void eip197_trc_cache_init(struct
+
+ dev_info(priv->dev, "TRC init: %dd,%da (%dr,%dh)\n",
+ dsize, asize, cs_rc_max, cs_ht_wc + cs_ht_wc);
++ return 0;
+ }
+
+ static void eip197_init_firmware(struct safexcel_crypto_priv *priv)
+@@ -298,13 +311,14 @@ static void eip197_init_firmware(struct
+ static int eip197_write_firmware(struct safexcel_crypto_priv *priv,
+ const struct firmware *fw)
+ {
+- const u32 *data = (const u32 *)fw->data;
++ const __be32 *data = (const __be32 *)fw->data;
+ int i;
+
+ /* Write the firmware */
+ for (i = 0; i < fw->size / sizeof(u32); i++)
+ writel(be32_to_cpu(data[i]),
+- priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
++ priv->base + EIP197_CLASSIFICATION_RAMS +
++ i * sizeof(__be32));
+
+ /* Exclude final 2 NOPs from size */
+ return i - EIP197_FW_TERMINAL_NOPS;
+@@ -471,6 +485,14 @@ static int safexcel_hw_setup_cdesc_rings
+ cd_fetch_cnt = ((1 << priv->hwconfig.hwcfsize) /
+ cd_size_rnd) - 1;
+ }
++ /*
++ * Since we're using command desc's way larger than formally specified,
++ * we need to check whether we can fit even 1 for low-end EIP196's!
++ */
++ if (!cd_fetch_cnt) {
++ dev_err(priv->dev, "Unable to fit even 1 command desc!\n");
++ return -ENODEV;
++ }
+
+ for (i = 0; i < priv->config.rings; i++) {
+ /* ring base address */
+@@ -479,12 +501,12 @@ static int safexcel_hw_setup_cdesc_rings
+ writel(upper_32_bits(priv->ring[i].cdr.base_dma),
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
+- priv->config.cd_size,
++ writel(EIP197_xDR_DESC_MODE_64BIT | EIP197_CDR_DESC_MODE_ADCP |
++ (priv->config.cd_offset << 14) | priv->config.cd_size,
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
+ writel(((cd_fetch_cnt *
+ (cd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
+- (cd_fetch_cnt * priv->config.cd_offset),
++ (cd_fetch_cnt * (priv->config.cd_offset / sizeof(u32))),
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+ /* Configure DMA tx control */
+@@ -527,13 +549,13 @@ static int safexcel_hw_setup_rdesc_rings
+ writel(upper_32_bits(priv->ring[i].rdr.base_dma),
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+- writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
++ writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 14) |
+ priv->config.rd_size,
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
+
+ writel(((rd_fetch_cnt *
+ (rd_size_rnd << priv->hwconfig.hwdataw)) << 16) |
+- (rd_fetch_cnt * priv->config.rd_offset),
++ (rd_fetch_cnt * (priv->config.rd_offset / sizeof(u32))),
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
+
+ /* Configure DMA tx control */
+@@ -559,7 +581,7 @@ static int safexcel_hw_setup_rdesc_rings
+ static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
+ {
+ u32 val;
+- int i, ret, pe;
++ int i, ret, pe, opbuflo, opbufhi;
+
+ dev_dbg(priv->dev, "HW init: using %d pipe(s) and %d ring(s)\n",
+ priv->config.pes, priv->config.rings);
+@@ -595,8 +617,8 @@ static int safexcel_hw_init(struct safex
+ writel(EIP197_DxE_THR_CTRL_RESET_PE,
+ EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
+
+- if (priv->flags & SAFEXCEL_HW_EIP197)
+- /* Reset HIA input interface arbiter (EIP197 only) */
++ if (priv->flags & EIP197_PE_ARB)
++ /* Reset HIA input interface arbiter (if present) */
+ writel(EIP197_HIA_RA_PE_CTRL_RESET,
+ EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
+
+@@ -639,9 +661,16 @@ static int safexcel_hw_init(struct safex
+ ;
+
+ /* DMA transfer size to use */
++ if (priv->hwconfig.hwnumpes > 4) {
++ opbuflo = 9;
++ opbufhi = 10;
++ } else {
++ opbuflo = 7;
++ opbufhi = 8;
++ }
+ val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
+- val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
+- EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
++ val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(opbuflo) |
++ EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(opbufhi);
+ val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
+ val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
+ /* FIXME: instability issues can occur for EIP97 but disabling
+@@ -655,8 +684,8 @@ static int safexcel_hw_init(struct safex
+ writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
+
+ /* Configure the procesing engine thresholds */
+- writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
+- EIP197_PE_OUT_DBUF_THRES_MAX(8),
++ writel(EIP197_PE_OUT_DBUF_THRES_MIN(opbuflo) |
++ EIP197_PE_OUT_DBUF_THRES_MAX(opbufhi),
+ EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
+
+ /* Processing Engine configuration */
+@@ -696,7 +725,7 @@ static int safexcel_hw_init(struct safex
+ writel(0,
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
+
+- writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
++ writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset),
+ EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
+ }
+
+@@ -719,7 +748,7 @@ static int safexcel_hw_init(struct safex
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
+
+ /* Ring size */
+- writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
++ writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset),
+ EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
+ }
+
+@@ -736,19 +765,28 @@ static int safexcel_hw_init(struct safex
+ /* Clear any HIA interrupt */
+ writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
+
+- if (priv->flags & SAFEXCEL_HW_EIP197) {
+- eip197_trc_cache_init(priv);
+- priv->flags |= EIP197_TRC_CACHE;
++ if (priv->flags & EIP197_SIMPLE_TRC) {
++ writel(EIP197_STRC_CONFIG_INIT |
++ EIP197_STRC_CONFIG_LARGE_REC(EIP197_CS_TRC_REC_WC) |
++ EIP197_STRC_CONFIG_SMALL_REC(EIP197_CS_TRC_REC_WC),
++ priv->base + EIP197_STRC_CONFIG);
++ writel(EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE,
++ EIP197_PE(priv) + EIP197_PE_EIP96_TOKEN_CTRL2(0));
++ } else if (priv->flags & SAFEXCEL_HW_EIP197) {
++ ret = eip197_trc_cache_init(priv);
++ if (ret)
++ return ret;
++ }
+
++ if (priv->flags & EIP197_ICE) {
+ ret = eip197_load_firmwares(priv);
+ if (ret)
+ return ret;
+ }
+
+- safexcel_hw_setup_cdesc_rings(priv);
+- safexcel_hw_setup_rdesc_rings(priv);
+-
+- return 0;
++ return safexcel_hw_setup_cdesc_rings(priv) ?:
++ safexcel_hw_setup_rdesc_rings(priv) ?:
++ 0;
+ }
+
+ /* Called with ring's lock taken */
+@@ -836,20 +874,24 @@ finalize:
+ spin_unlock_bh(&priv->ring[ring].lock);
+
+ /* let the RDR know we have pending descriptors */
+- writel((rdesc * priv->config.rd_offset) << 2,
++ writel((rdesc * priv->config.rd_offset),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
+
+ /* let the CDR know we have pending descriptors */
+- writel((cdesc * priv->config.cd_offset) << 2,
++ writel((cdesc * priv->config.cd_offset),
+ EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
+ }
+
+ inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
+- struct safexcel_result_desc *rdesc)
++ void *rdp)
+ {
+- if (likely((!rdesc->descriptor_overflow) &&
+- (!rdesc->buffer_overflow) &&
+- (!rdesc->result_data.error_code)))
++ struct safexcel_result_desc *rdesc = rdp;
++ struct result_data_desc *result_data = rdp + priv->config.res_offset;
++
++ if (likely((!rdesc->last_seg) || /* Rest only valid if last seg! */
++ ((!rdesc->descriptor_overflow) &&
++ (!rdesc->buffer_overflow) &&
++ (!result_data->error_code))))
+ return 0;
+
+ if (rdesc->descriptor_overflow)
+@@ -858,13 +900,14 @@ inline int safexcel_rdesc_check_errors(s
+ if (rdesc->buffer_overflow)
+ dev_err(priv->dev, "Buffer overflow detected");
+
+- if (rdesc->result_data.error_code & 0x4066) {
++ if (result_data->error_code & 0x4066) {
+ /* Fatal error (bits 1,2,5,6 & 14) */
+ dev_err(priv->dev,
+ "result descriptor error (%x)",
+- rdesc->result_data.error_code);
++ result_data->error_code);
++
+ return -EIO;
+- } else if (rdesc->result_data.error_code &
++ } else if (result_data->error_code &
+ (BIT(7) | BIT(4) | BIT(3) | BIT(0))) {
+ /*
+ * Give priority over authentication fails:
+@@ -872,7 +915,7 @@ inline int safexcel_rdesc_check_errors(s
+ * something wrong with the input!
+ */
+ return -EINVAL;
+- } else if (rdesc->result_data.error_code & BIT(9)) {
++ } else if (result_data->error_code & BIT(9)) {
+ /* Authentication failed */
+ return -EBADMSG;
+ }
+@@ -931,16 +974,18 @@ int safexcel_invalidate_cache(struct cry
+ {
+ struct safexcel_command_desc *cdesc;
+ struct safexcel_result_desc *rdesc;
++ struct safexcel_token *dmmy;
+ int ret = 0;
+
+ /* Prepare command descriptor */
+- cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
++ cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma,
++ &dmmy);
+ if (IS_ERR(cdesc))
+ return PTR_ERR(cdesc);
+
+ cdesc->control_data.type = EIP197_TYPE_EXTENDED;
+ cdesc->control_data.options = 0;
+- cdesc->control_data.refresh = 0;
++ cdesc->control_data.context_lo &= ~EIP197_CONTEXT_SIZE_MASK;
+ cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
+
+ /* Prepare result descriptor */
+@@ -1003,7 +1048,7 @@ handle_results:
+ acknowledge:
+ if (i)
+ writel(EIP197_xDR_PROC_xD_PKT(i) |
+- EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
++ (tot_descs * priv->config.rd_offset),
+ EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
+
+ /* If the number of requests overflowed the counter, try to proceed more
+@@ -1171,6 +1216,44 @@ static struct safexcel_alg_template *saf
+ &safexcel_alg_xts_aes,
+ &safexcel_alg_gcm,
+ &safexcel_alg_ccm,
++ &safexcel_alg_crc32,
++ &safexcel_alg_cbcmac,
++ &safexcel_alg_xcbcmac,
++ &safexcel_alg_cmac,
++ &safexcel_alg_chacha20,
++ &safexcel_alg_chachapoly,
++ &safexcel_alg_chachapoly_esp,
++ &safexcel_alg_sm3,
++ &safexcel_alg_hmac_sm3,
++ &safexcel_alg_ecb_sm4,
++ &safexcel_alg_cbc_sm4,
++ &safexcel_alg_ofb_sm4,
++ &safexcel_alg_cfb_sm4,
++ &safexcel_alg_ctr_sm4,
++ &safexcel_alg_authenc_hmac_sha1_cbc_sm4,
++ &safexcel_alg_authenc_hmac_sm3_cbc_sm4,
++ &safexcel_alg_authenc_hmac_sha1_ctr_sm4,
++ &safexcel_alg_authenc_hmac_sm3_ctr_sm4,
++ &safexcel_alg_sha3_224,
++ &safexcel_alg_sha3_256,
++ &safexcel_alg_sha3_384,
++ &safexcel_alg_sha3_512,
++ &safexcel_alg_hmac_sha3_224,
++ &safexcel_alg_hmac_sha3_256,
++ &safexcel_alg_hmac_sha3_384,
++ &safexcel_alg_hmac_sha3_512,
++ &safexcel_alg_authenc_hmac_sha1_cbc_des,
++ &safexcel_alg_authenc_hmac_sha256_cbc_des3_ede,
++ &safexcel_alg_authenc_hmac_sha224_cbc_des3_ede,
++ &safexcel_alg_authenc_hmac_sha512_cbc_des3_ede,
++ &safexcel_alg_authenc_hmac_sha384_cbc_des3_ede,
++ &safexcel_alg_authenc_hmac_sha256_cbc_des,
++ &safexcel_alg_authenc_hmac_sha224_cbc_des,
++ &safexcel_alg_authenc_hmac_sha512_cbc_des,
++ &safexcel_alg_authenc_hmac_sha384_cbc_des,
++ &safexcel_alg_rfc4106_gcm,
++ &safexcel_alg_rfc4543_gcm,
++ &safexcel_alg_rfc4309_ccm,
+ };
+
+ static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
+@@ -1240,30 +1323,30 @@ static void safexcel_unregister_algorith
+
+ static void safexcel_configure(struct safexcel_crypto_priv *priv)
+ {
+- u32 val, mask = 0;
+-
+- val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
+-
+- /* Read number of PEs from the engine */
+- if (priv->flags & SAFEXCEL_HW_EIP197)
+- /* Wider field width for all EIP197 type engines */
+- mask = EIP197_N_PES_MASK;
+- else
+- /* Narrow field width for EIP97 type engine */
+- mask = EIP97_N_PES_MASK;
+-
+- priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
++ u32 mask = BIT(priv->hwconfig.hwdataw) - 1;
+
+- priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
++ priv->config.pes = priv->hwconfig.hwnumpes;
++ priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings);
++ /* Cannot currently support more rings than we have ring AICs! */
++ priv->config.rings = min_t(u32, priv->config.rings,
++ priv->hwconfig.hwnumraic);
+
+- val = (val & GENMASK(27, 25)) >> 25;
+- mask = BIT(val) - 1;
+-
+- priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
++ priv->config.cd_size = EIP197_CD64_FETCH_SIZE;
+ priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
++ priv->config.cdsh_offset = (EIP197_MAX_TOKENS + mask) & ~mask;
+
+- priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
++ /* res token is behind the descr, but ofs must be rounded to buswdth */
++ priv->config.res_offset = (EIP197_RD64_FETCH_SIZE + mask) & ~mask;
++ /* now the size of the descr is this 1st part plus the result struct */
++ priv->config.rd_size = priv->config.res_offset +
++ EIP197_RD64_RESULT_SIZE;
+ priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
++
++ /* convert dwords to bytes */
++ priv->config.cd_offset *= sizeof(u32);
++ priv->config.cdsh_offset *= sizeof(u32);
++ priv->config.rd_offset *= sizeof(u32);
++ priv->config.res_offset *= sizeof(u32);
+ }
+
+ static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
+@@ -1309,7 +1392,7 @@ static int safexcel_probe_generic(void *
+ int is_pci_dev)
+ {
+ struct device *dev = priv->dev;
+- u32 peid, version, mask, val, hiaopt;
++ u32 peid, version, mask, val, hiaopt, hwopt, peopt;
+ int i, ret, hwctg;
+
+ priv->context_pool = dmam_pool_create("safexcel-context", dev,
+@@ -1371,13 +1454,16 @@ static int safexcel_probe_generic(void *
+ */
+ version = readl(EIP197_GLOBAL(priv) + EIP197_VERSION);
+ if (((priv->flags & SAFEXCEL_HW_EIP197) &&
+- (EIP197_REG_LO16(version) != EIP197_VERSION_LE)) ||
++ (EIP197_REG_LO16(version) != EIP197_VERSION_LE) &&
++ (EIP197_REG_LO16(version) != EIP196_VERSION_LE)) ||
+ ((!(priv->flags & SAFEXCEL_HW_EIP197) &&
+ (EIP197_REG_LO16(version) != EIP97_VERSION_LE)))) {
+ /*
+ * We did not find the device that matched our initial probing
+ * (or our initial probing failed) Report appropriate error.
+ */
++ dev_err(priv->dev, "Probing for EIP97/EIP19x failed - no such device (read %08x)\n",
++ version);
+ return -ENODEV;
+ }
+
+@@ -1385,6 +1471,14 @@ static int safexcel_probe_generic(void *
+ hwctg = version >> 28;
+ peid = version & 255;
+
++ /* Detect EIP206 processing pipe */
++ version = readl(EIP197_PE(priv) + + EIP197_PE_VERSION(0));
++ if (EIP197_REG_LO16(version) != EIP206_VERSION_LE) {
++ dev_err(priv->dev, "EIP%d: EIP206 not detected\n", peid);
++ return -ENODEV;
++ }
++ priv->hwconfig.ppver = EIP197_VERSION_MASK(version);
++
+ /* Detect EIP96 packet engine and version */
+ version = readl(EIP197_PE(priv) + EIP197_PE_EIP96_VERSION(0));
+ if (EIP197_REG_LO16(version) != EIP96_VERSION_LE) {
+@@ -1393,10 +1487,13 @@ static int safexcel_probe_generic(void *
+ }
+ priv->hwconfig.pever = EIP197_VERSION_MASK(version);
+
++ hwopt = readl(EIP197_GLOBAL(priv) + EIP197_OPTIONS);
+ hiaopt = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_OPTIONS);
+
+ if (priv->flags & SAFEXCEL_HW_EIP197) {
+ /* EIP197 */
++ peopt = readl(EIP197_PE(priv) + EIP197_PE_OPTIONS(0));
++
+ priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
+ EIP197_HWDATAW_MASK;
+ priv->hwconfig.hwcfsize = ((hiaopt >> EIP197_CFSIZE_OFFSET) &
+@@ -1405,6 +1502,19 @@ static int safexcel_probe_generic(void *
+ priv->hwconfig.hwrfsize = ((hiaopt >> EIP197_RFSIZE_OFFSET) &
+ EIP197_RFSIZE_MASK) +
+ EIP197_RFSIZE_ADJUST;
++ priv->hwconfig.hwnumpes = (hiaopt >> EIP197_N_PES_OFFSET) &
++ EIP197_N_PES_MASK;
++ priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
++ EIP197_N_RINGS_MASK;
++ if (hiaopt & EIP197_HIA_OPT_HAS_PE_ARB)
++ priv->flags |= EIP197_PE_ARB;
++ if (EIP206_OPT_ICE_TYPE(peopt) == 1)
++ priv->flags |= EIP197_ICE;
++ /* If not a full TRC, then assume simple TRC */
++ if (!(hwopt & EIP197_OPT_HAS_TRC))
++ priv->flags |= EIP197_SIMPLE_TRC;
++ /* EIP197 always has SOME form of TRC */
++ priv->flags |= EIP197_TRC_CACHE;
+ } else {
+ /* EIP97 */
+ priv->hwconfig.hwdataw = (hiaopt >> EIP197_HWDATAW_OFFSET) &
+@@ -1413,6 +1523,23 @@ static int safexcel_probe_generic(void *
+ EIP97_CFSIZE_MASK;
+ priv->hwconfig.hwrfsize = (hiaopt >> EIP97_RFSIZE_OFFSET) &
+ EIP97_RFSIZE_MASK;
++ priv->hwconfig.hwnumpes = 1; /* by definition */
++ priv->hwconfig.hwnumrings = (hiaopt >> EIP197_N_RINGS_OFFSET) &
++ EIP197_N_RINGS_MASK;
++ }
++
++ /* Scan for ring AIC's */
++ for (i = 0; i < EIP197_MAX_RING_AIC; i++) {
++ version = readl(EIP197_HIA_AIC_R(priv) +
++ EIP197_HIA_AIC_R_VERSION(i));
++ if (EIP197_REG_LO16(version) != EIP201_VERSION_LE)
++ break;
++ }
++ priv->hwconfig.hwnumraic = i;
++ /* Low-end EIP196 may not have any ring AIC's ... */
++ if (!priv->hwconfig.hwnumraic) {
++ dev_err(priv->dev, "No ring interrupt controller present!\n");
++ return -ENODEV;
+ }
+
+ /* Get supported algorithms from EIP96 transform engine */
+@@ -1420,10 +1547,12 @@ static int safexcel_probe_generic(void *
+ EIP197_PE_EIP96_OPTIONS(0));
+
+ /* Print single info line describing what we just detected */
+- dev_info(priv->dev, "EIP%d:%x(%d)-HIA:%x(%d,%d,%d),PE:%x,alg:%08x\n",
+- peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hiaver,
+- priv->hwconfig.hwdataw, priv->hwconfig.hwcfsize,
+- priv->hwconfig.hwrfsize, priv->hwconfig.pever,
++ dev_info(priv->dev, "EIP%d:%x(%d,%d,%d,%d)-HIA:%x(%d,%d,%d),PE:%x/%x,alg:%08x\n",
++ peid, priv->hwconfig.hwver, hwctg, priv->hwconfig.hwnumpes,
++ priv->hwconfig.hwnumrings, priv->hwconfig.hwnumraic,
++ priv->hwconfig.hiaver, priv->hwconfig.hwdataw,
++ priv->hwconfig.hwcfsize, priv->hwconfig.hwrfsize,
++ priv->hwconfig.ppver, priv->hwconfig.pever,
+ priv->hwconfig.algo_flags);
+
+ safexcel_configure(priv);
+@@ -1547,7 +1676,6 @@ static void safexcel_hw_reset_rings(stru
+ }
+ }
+
+-#if IS_ENABLED(CONFIG_OF)
+ /* for Device Tree platform driver */
+
+ static int safexcel_probe(struct platform_device *pdev)
+@@ -1625,6 +1753,7 @@ static int safexcel_remove(struct platfo
+ safexcel_unregister_algorithms(priv);
+ safexcel_hw_reset_rings(priv);
+
++ clk_disable_unprepare(priv->reg_clk);
+ clk_disable_unprepare(priv->clk);
+
+ for (i = 0; i < priv->config.rings; i++)
+@@ -1666,9 +1795,7 @@ static struct platform_driver crypto_sa
+ .of_match_table = safexcel_of_match_table,
+ },
+ };
+-#endif
+
+-#if IS_ENABLED(CONFIG_PCI)
+ /* PCIE devices - i.e. Inside Secure development boards */
+
+ static int safexcel_pci_probe(struct pci_dev *pdev,
+@@ -1759,7 +1886,7 @@ static int safexcel_pci_probe(struct pci
+ return rc;
+ }
+
+-void safexcel_pci_remove(struct pci_dev *pdev)
++static void safexcel_pci_remove(struct pci_dev *pdev)
+ {
+ struct safexcel_crypto_priv *priv = pci_get_drvdata(pdev);
+ int i;
+@@ -1789,54 +1916,32 @@ static struct pci_driver safexcel_pci_dr
+ .probe = safexcel_pci_probe,
+ .remove = safexcel_pci_remove,
+ };
+-#endif
+-
+-/* Unfortunately, we have to resort to global variables here */
+-#if IS_ENABLED(CONFIG_PCI)
+-int pcireg_rc = -EINVAL; /* Default safe value */
+-#endif
+-#if IS_ENABLED(CONFIG_OF)
+-int ofreg_rc = -EINVAL; /* Default safe value */
+-#endif
+
+ static int __init safexcel_init(void)
+ {
+-#if IS_ENABLED(CONFIG_PCI)
++ int ret;
++
+ /* Register PCI driver */
+- pcireg_rc = pci_register_driver(&safexcel_pci_driver);
+-#endif
++ ret = pci_register_driver(&safexcel_pci_driver);
+
+-#if IS_ENABLED(CONFIG_OF)
+ /* Register platform driver */
+- ofreg_rc = platform_driver_register(&crypto_safexcel);
+- #if IS_ENABLED(CONFIG_PCI)
+- /* Return success if either PCI or OF registered OK */
+- return pcireg_rc ? ofreg_rc : 0;
+- #else
+- return ofreg_rc;
+- #endif
+-#else
+- #if IS_ENABLED(CONFIG_PCI)
+- return pcireg_rc;
+- #else
+- return -EINVAL;
+- #endif
+-#endif
++ if (IS_ENABLED(CONFIG_OF) && !ret) {
++ ret = platform_driver_register(&crypto_safexcel);
++ if (ret)
++ pci_unregister_driver(&safexcel_pci_driver);
++ }
++
++ return ret;
+ }
+
+ static void __exit safexcel_exit(void)
+ {
+-#if IS_ENABLED(CONFIG_OF)
+ /* Unregister platform driver */
+- if (!ofreg_rc)
++ if (IS_ENABLED(CONFIG_OF))
+ platform_driver_unregister(&crypto_safexcel);
+-#endif
+
+-#if IS_ENABLED(CONFIG_PCI)
+ /* Unregister PCI driver if successfully registered before */
+- if (!pcireg_rc)
+- pci_unregister_driver(&safexcel_pci_driver);
+-#endif
++ pci_unregister_driver(&safexcel_pci_driver);
+ }
+
+ module_init(safexcel_init);
+--- a/drivers/crypto/inside-secure/safexcel_cipher.c
++++ b/drivers/crypto/inside-secure/safexcel_cipher.c
+@@ -5,18 +5,22 @@
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
++#include <asm/unaligned.h>
+ #include <linux/device.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmapool.h>
+-
+ #include <crypto/aead.h>
+ #include <crypto/aes.h>
+ #include <crypto/authenc.h>
++#include <crypto/chacha.h>
+ #include <crypto/ctr.h>
+ #include <crypto/internal/des.h>
+ #include <crypto/gcm.h>
+ #include <crypto/ghash.h>
++#include <crypto/poly1305.h>
+ #include <crypto/sha.h>
++#include <crypto/sm3.h>
++#include <crypto/sm4.h>
+ #include <crypto/xts.h>
+ #include <crypto/skcipher.h>
+ #include <crypto/internal/aead.h>
+@@ -33,6 +37,8 @@ enum safexcel_cipher_alg {
+ SAFEXCEL_DES,
+ SAFEXCEL_3DES,
+ SAFEXCEL_AES,
++ SAFEXCEL_CHACHA20,
++ SAFEXCEL_SM4,
+ };
+
+ struct safexcel_cipher_ctx {
+@@ -41,8 +47,12 @@ struct safexcel_cipher_ctx {
+
+ u32 mode;
+ enum safexcel_cipher_alg alg;
+- bool aead;
+- int xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
++ u8 aead; /* !=0=AEAD, 2=IPSec ESP AEAD, 3=IPsec ESP GMAC */
++ u8 xcm; /* 0=authenc, 1=GCM, 2 reserved for CCM */
++ u8 aadskip;
++ u8 blocksz;
++ u32 ivmask;
++ u32 ctrinit;
+
+ __le32 key[16];
+ u32 nonce;
+@@ -51,10 +61,11 @@ struct safexcel_cipher_ctx {
+ /* All the below is AEAD specific */
+ u32 hash_alg;
+ u32 state_sz;
+- u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+- u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
++ __be32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
++ __be32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
+
+ struct crypto_cipher *hkaes;
++ struct crypto_aead *fback;
+ };
+
+ struct safexcel_cipher_req {
+@@ -65,206 +76,298 @@ struct safexcel_cipher_req {
+ int nr_src, nr_dst;
+ };
+
+-static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+- struct safexcel_command_desc *cdesc)
++static int safexcel_skcipher_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
++ struct safexcel_command_desc *cdesc)
+ {
+- u32 block_sz = 0;
+-
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+-
+ /* 32 bit nonce */
+ cdesc->control_data.token[0] = ctx->nonce;
+ /* 64 bit IV part */
+ memcpy(&cdesc->control_data.token[1], iv, 8);
+- /* 32 bit counter, start at 1 (big endian!) */
+- cdesc->control_data.token[3] = cpu_to_be32(1);
+-
+- return;
+- } else if (ctx->xcm == EIP197_XCM_MODE_GCM) {
+- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+-
+- /* 96 bit IV part */
+- memcpy(&cdesc->control_data.token[0], iv, 12);
+- /* 32 bit counter, start at 1 (big endian!) */
+- cdesc->control_data.token[3] = cpu_to_be32(1);
+-
+- return;
+- } else if (ctx->xcm == EIP197_XCM_MODE_CCM) {
++ /* 32 bit counter, start at 0 or 1 (big endian!) */
++ cdesc->control_data.token[3] =
++ (__force u32)cpu_to_be32(ctx->ctrinit);
++ return 4;
++ }
++ if (ctx->alg == SAFEXCEL_CHACHA20) {
+ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+-
+- /* Variable length IV part */
+- memcpy(&cdesc->control_data.token[0], iv, 15 - iv[0]);
+- /* Start variable length counter at 0 */
+- memset((u8 *)&cdesc->control_data.token[0] + 15 - iv[0],
+- 0, iv[0] + 1);
+-
+- return;
++ /* 96 bit nonce part */
++ memcpy(&cdesc->control_data.token[0], &iv[4], 12);
++ /* 32 bit counter */
++ cdesc->control_data.token[3] = *(u32 *)iv;
++ return 4;
+ }
+
+- if (ctx->mode != CONTEXT_CONTROL_CRYPTO_MODE_ECB) {
+- switch (ctx->alg) {
+- case SAFEXCEL_DES:
+- block_sz = DES_BLOCK_SIZE;
+- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+- break;
+- case SAFEXCEL_3DES:
+- block_sz = DES3_EDE_BLOCK_SIZE;
+- cdesc->control_data.options |= EIP197_OPTION_2_TOKEN_IV_CMD;
+- break;
+- case SAFEXCEL_AES:
+- block_sz = AES_BLOCK_SIZE;
+- cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+- break;
+- }
+- memcpy(cdesc->control_data.token, iv, block_sz);
+- }
++ cdesc->control_data.options |= ctx->ivmask;
++ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
++ return ctx->blocksz / sizeof(u32);
+ }
+
+ static void safexcel_skcipher_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc,
++ struct safexcel_token *atoken,
+ u32 length)
+ {
+ struct safexcel_token *token;
++ int ivlen;
+
+- safexcel_cipher_token(ctx, iv, cdesc);
+-
+- /* skip over worst case IV of 4 dwords, no need to be exact */
+- token = (struct safexcel_token *)(cdesc->control_data.token + 4);
++ ivlen = safexcel_skcipher_iv(ctx, iv, cdesc);
++ if (ivlen == 4) {
++ /* No space in cdesc, instruction moves to atoken */
++ cdesc->additional_cdata_size = 1;
++ token = atoken;
++ } else {
++ /* Everything fits in cdesc */
++ token = (struct safexcel_token *)(cdesc->control_data.token + 2);
++ /* Need to pad with NOP */
++ eip197_noop_token(&token[1]);
++ }
++
++ token->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
++ token->packet_length = length;
++ token->stat = EIP197_TOKEN_STAT_LAST_PACKET |
++ EIP197_TOKEN_STAT_LAST_HASH;
++ token->instructions = EIP197_TOKEN_INS_LAST |
++ EIP197_TOKEN_INS_TYPE_CRYPTO |
++ EIP197_TOKEN_INS_TYPE_OUTPUT;
++}
+
+- token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+- token[0].packet_length = length;
+- token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET |
+- EIP197_TOKEN_STAT_LAST_HASH;
+- token[0].instructions = EIP197_TOKEN_INS_LAST |
+- EIP197_TOKEN_INS_TYPE_CRYPTO |
+- EIP197_TOKEN_INS_TYPE_OUTPUT;
++static void safexcel_aead_iv(struct safexcel_cipher_ctx *ctx, u8 *iv,
++ struct safexcel_command_desc *cdesc)
++{
++ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD ||
++ ctx->aead & EIP197_AEAD_TYPE_IPSEC_ESP) { /* _ESP and _ESP_GMAC */
++ /* 32 bit nonce */
++ cdesc->control_data.token[0] = ctx->nonce;
++ /* 64 bit IV part */
++ memcpy(&cdesc->control_data.token[1], iv, 8);
++ /* 32 bit counter, start at 0 or 1 (big endian!) */
++ cdesc->control_data.token[3] =
++ (__force u32)cpu_to_be32(ctx->ctrinit);
++ return;
++ }
++ if (ctx->xcm == EIP197_XCM_MODE_GCM || ctx->alg == SAFEXCEL_CHACHA20) {
++ /* 96 bit IV part */
++ memcpy(&cdesc->control_data.token[0], iv, 12);
++ /* 32 bit counter, start at 0 or 1 (big endian!) */
++ cdesc->control_data.token[3] =
++ (__force u32)cpu_to_be32(ctx->ctrinit);
++ return;
++ }
++ /* CBC */
++ memcpy(cdesc->control_data.token, iv, ctx->blocksz);
+ }
+
+ static void safexcel_aead_token(struct safexcel_cipher_ctx *ctx, u8 *iv,
+ struct safexcel_command_desc *cdesc,
++ struct safexcel_token *atoken,
+ enum safexcel_cipher_direction direction,
+ u32 cryptlen, u32 assoclen, u32 digestsize)
+ {
+- struct safexcel_token *token;
++ struct safexcel_token *aadref;
++ int atoksize = 2; /* Start with minimum size */
++ int assocadj = assoclen - ctx->aadskip, aadalign;
+
+- safexcel_cipher_token(ctx, iv, cdesc);
++ /* Always 4 dwords of embedded IV for AEAD modes */
++ cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+
+- if (direction == SAFEXCEL_ENCRYPT) {
+- /* align end of instruction sequence to end of token */
+- token = (struct safexcel_token *)(cdesc->control_data.token +
+- EIP197_MAX_TOKENS - 13);
+-
+- token[12].opcode = EIP197_TOKEN_OPCODE_INSERT;
+- token[12].packet_length = digestsize;
+- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
+- EIP197_TOKEN_STAT_LAST_PACKET;
+- token[12].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+- EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+- } else {
++ if (direction == SAFEXCEL_DECRYPT)
+ cryptlen -= digestsize;
+
+- /* align end of instruction sequence to end of token */
+- token = (struct safexcel_token *)(cdesc->control_data.token +
+- EIP197_MAX_TOKENS - 14);
+-
+- token[12].opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
+- token[12].packet_length = digestsize;
+- token[12].stat = EIP197_TOKEN_STAT_LAST_HASH |
+- EIP197_TOKEN_STAT_LAST_PACKET;
+- token[12].instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+-
+- token[13].opcode = EIP197_TOKEN_OPCODE_VERIFY;
+- token[13].packet_length = digestsize |
+- EIP197_TOKEN_HASH_RESULT_VERIFY;
+- token[13].stat = EIP197_TOKEN_STAT_LAST_HASH |
+- EIP197_TOKEN_STAT_LAST_PACKET;
+- token[13].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
+- }
+-
+- token[6].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+- token[6].packet_length = assoclen;
+-
+- if (likely(cryptlen)) {
+- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+-
+- token[10].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+- token[10].packet_length = cryptlen;
+- token[10].stat = EIP197_TOKEN_STAT_LAST_HASH;
+- token[10].instructions = EIP197_TOKEN_INS_LAST |
+- EIP197_TOKEN_INS_TYPE_CRYPTO |
+- EIP197_TOKEN_INS_TYPE_HASH |
+- EIP197_TOKEN_INS_TYPE_OUTPUT;
+- } else if (ctx->xcm != EIP197_XCM_MODE_CCM) {
+- token[6].stat = EIP197_TOKEN_STAT_LAST_HASH;
+- token[6].instructions = EIP197_TOKEN_INS_LAST |
+- EIP197_TOKEN_INS_TYPE_HASH;
+- }
+-
+- if (!ctx->xcm)
+- return;
+-
+- token[8].opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
+- token[8].packet_length = 0;
+- token[8].instructions = AES_BLOCK_SIZE;
+-
+- token[9].opcode = EIP197_TOKEN_OPCODE_INSERT;
+- token[9].packet_length = AES_BLOCK_SIZE;
+- token[9].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+- EIP197_TOKEN_INS_TYPE_CRYPTO;
+-
+- if (ctx->xcm == EIP197_XCM_MODE_GCM) {
+- token[6].instructions = EIP197_TOKEN_INS_LAST |
+- EIP197_TOKEN_INS_TYPE_HASH;
+- } else {
+- u8 *cbcmaciv = (u8 *)&token[1];
+- u32 *aadlen = (u32 *)&token[5];
+-
++ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM)) {
+ /* Construct IV block B0 for the CBC-MAC */
+- token[0].opcode = EIP197_TOKEN_OPCODE_INSERT;
+- token[0].packet_length = AES_BLOCK_SIZE +
+- ((assoclen > 0) << 1);
+- token[0].instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
+- EIP197_TOKEN_INS_TYPE_HASH;
+- /* Variable length IV part */
+- memcpy(cbcmaciv, iv, 15 - iv[0]);
+- /* fixup flags byte */
+- cbcmaciv[0] |= ((assoclen > 0) << 6) | ((digestsize - 2) << 2);
+- /* Clear upper bytes of variable message length to 0 */
+- memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
+- /* insert lower 2 bytes of message length */
+- cbcmaciv[14] = cryptlen >> 8;
+- cbcmaciv[15] = cryptlen & 255;
+-
+- if (assoclen) {
+- *aadlen = cpu_to_le32(cpu_to_be16(assoclen));
+- assoclen += 2;
++ u8 *final_iv = (u8 *)cdesc->control_data.token;
++ u8 *cbcmaciv = (u8 *)&atoken[1];
++ __le32 *aadlen = (__le32 *)&atoken[5];
++
++ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
++ /* Length + nonce */
++ cdesc->control_data.token[0] = ctx->nonce;
++ /* Fixup flags byte */
++ *(__le32 *)cbcmaciv =
++ cpu_to_le32(ctx->nonce |
++ ((assocadj > 0) << 6) |
++ ((digestsize - 2) << 2));
++ /* 64 bit IV part */
++ memcpy(&cdesc->control_data.token[1], iv, 8);
++ memcpy(cbcmaciv + 4, iv, 8);
++ /* Start counter at 0 */
++ cdesc->control_data.token[3] = 0;
++ /* Message length */
++ *(__be32 *)(cbcmaciv + 12) = cpu_to_be32(cryptlen);
++ } else {
++ /* Variable length IV part */
++ memcpy(final_iv, iv, 15 - iv[0]);
++ memcpy(cbcmaciv, iv, 15 - iv[0]);
++ /* Start variable length counter at 0 */
++ memset(final_iv + 15 - iv[0], 0, iv[0] + 1);
++ memset(cbcmaciv + 15 - iv[0], 0, iv[0] - 1);
++ /* fixup flags byte */
++ cbcmaciv[0] |= ((assocadj > 0) << 6) |
++ ((digestsize - 2) << 2);
++ /* insert lower 2 bytes of message length */
++ cbcmaciv[14] = cryptlen >> 8;
++ cbcmaciv[15] = cryptlen & 255;
++ }
++
++ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
++ atoken->packet_length = AES_BLOCK_SIZE +
++ ((assocadj > 0) << 1);
++ atoken->stat = 0;
++ atoken->instructions = EIP197_TOKEN_INS_ORIGIN_TOKEN |
++ EIP197_TOKEN_INS_TYPE_HASH;
++
++ if (likely(assocadj)) {
++ *aadlen = cpu_to_le32((assocadj >> 8) |
++ (assocadj & 255) << 8);
++ atoken += 6;
++ atoksize += 7;
++ } else {
++ atoken += 5;
++ atoksize += 6;
+ }
+
+- token[6].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+-
+- /* Align AAD data towards hash engine */
+- token[7].opcode = EIP197_TOKEN_OPCODE_INSERT;
+- assoclen &= 15;
+- token[7].packet_length = assoclen ? 16 - assoclen : 0;
+-
++ /* Process AAD data */
++ aadref = atoken;
++ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
++ atoken->packet_length = assocadj;
++ atoken->stat = 0;
++ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
++ atoken++;
++
++ /* For CCM only, align AAD data towards hash engine */
++ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
++ aadalign = (assocadj + 2) & 15;
++ atoken->packet_length = assocadj && aadalign ?
++ 16 - aadalign :
++ 0;
+ if (likely(cryptlen)) {
+- token[7].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+-
+- /* Align crypto data towards hash engine */
+- token[10].stat = 0;
++ atoken->stat = 0;
++ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
++ } else {
++ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
++ atoken->instructions = EIP197_TOKEN_INS_LAST |
++ EIP197_TOKEN_INS_TYPE_HASH;
++ }
++ } else {
++ safexcel_aead_iv(ctx, iv, cdesc);
+
+- token[11].opcode = EIP197_TOKEN_OPCODE_INSERT;
+- cryptlen &= 15;
+- token[11].packet_length = cryptlen ? 16 - cryptlen : 0;
+- token[11].stat = EIP197_TOKEN_STAT_LAST_HASH;
+- token[11].instructions = EIP197_TOKEN_INS_TYPE_HASH;
++ /* Process AAD data */
++ aadref = atoken;
++ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
++ atoken->packet_length = assocadj;
++ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
++ atoken->instructions = EIP197_TOKEN_INS_LAST |
++ EIP197_TOKEN_INS_TYPE_HASH;
++ }
++ atoken++;
++
++ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
++ /* For ESP mode (and not GMAC), skip over the IV */
++ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
++ atoken->packet_length = EIP197_AEAD_IPSEC_IV_SIZE;
++ atoken->stat = 0;
++ atoken->instructions = 0;
++ atoken++;
++ atoksize++;
++ } else if (unlikely(ctx->alg == SAFEXCEL_CHACHA20 &&
++ direction == SAFEXCEL_DECRYPT)) {
++ /* Poly-chacha decryption needs a dummy NOP here ... */
++ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
++ atoken->packet_length = 16; /* According to Op Manual */
++ atoken->stat = 0;
++ atoken->instructions = 0;
++ atoken++;
++ atoksize++;
++ }
++
++ if (ctx->xcm) {
++ /* For GCM and CCM, obtain enc(Y0) */
++ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT_REMRES;
++ atoken->packet_length = 0;
++ atoken->stat = 0;
++ atoken->instructions = AES_BLOCK_SIZE;
++ atoken++;
++
++ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
++ atoken->packet_length = AES_BLOCK_SIZE;
++ atoken->stat = 0;
++ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
++ EIP197_TOKEN_INS_TYPE_CRYPTO;
++ atoken++;
++ atoksize += 2;
++ }
++
++ if (likely(cryptlen || ctx->alg == SAFEXCEL_CHACHA20)) {
++ /* Fixup stat field for AAD direction instruction */
++ aadref->stat = 0;
++
++ /* Process crypto data */
++ atoken->opcode = EIP197_TOKEN_OPCODE_DIRECTION;
++ atoken->packet_length = cryptlen;
++
++ if (unlikely(ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC)) {
++ /* Fixup instruction field for AAD dir instruction */
++ aadref->instructions = EIP197_TOKEN_INS_TYPE_HASH;
++
++ /* Do not send to crypt engine in case of GMAC */
++ atoken->instructions = EIP197_TOKEN_INS_LAST |
++ EIP197_TOKEN_INS_TYPE_HASH |
++ EIP197_TOKEN_INS_TYPE_OUTPUT;
++ } else {
++ atoken->instructions = EIP197_TOKEN_INS_LAST |
++ EIP197_TOKEN_INS_TYPE_CRYPTO |
++ EIP197_TOKEN_INS_TYPE_HASH |
++ EIP197_TOKEN_INS_TYPE_OUTPUT;
++ }
++
++ cryptlen &= 15;
++ if (unlikely(ctx->xcm == EIP197_XCM_MODE_CCM && cryptlen)) {
++ atoken->stat = 0;
++ /* For CCM only, pad crypto data to the hash engine */
++ atoken++;
++ atoksize++;
++ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
++ atoken->packet_length = 16 - cryptlen;
++ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
++ atoken->instructions = EIP197_TOKEN_INS_TYPE_HASH;
+ } else {
+- token[7].stat = EIP197_TOKEN_STAT_LAST_HASH;
+- token[7].instructions = EIP197_TOKEN_INS_LAST |
+- EIP197_TOKEN_INS_TYPE_HASH;
++ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH;
+ }
++ atoken++;
++ atoksize++;
+ }
++
++ if (direction == SAFEXCEL_ENCRYPT) {
++ /* Append ICV */
++ atoken->opcode = EIP197_TOKEN_OPCODE_INSERT;
++ atoken->packet_length = digestsize;
++ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
++ EIP197_TOKEN_STAT_LAST_PACKET;
++ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
++ EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
++ } else {
++ /* Extract ICV */
++ atoken->opcode = EIP197_TOKEN_OPCODE_RETRIEVE;
++ atoken->packet_length = digestsize;
++ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
++ EIP197_TOKEN_STAT_LAST_PACKET;
++ atoken->instructions = EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
++ atoken++;
++ atoksize++;
++
++ /* Verify ICV */
++ atoken->opcode = EIP197_TOKEN_OPCODE_VERIFY;
++ atoken->packet_length = digestsize |
++ EIP197_TOKEN_HASH_RESULT_VERIFY;
++ atoken->stat = EIP197_TOKEN_STAT_LAST_HASH |
++ EIP197_TOKEN_STAT_LAST_PACKET;
++ atoken->instructions = EIP197_TOKEN_INS_TYPE_OUTPUT;
++ }
++
++ /* Fixup length of the token in the command descriptor */
++ cdesc->additional_cdata_size = atoksize;
+ }
+
+ static int safexcel_skcipher_aes_setkey(struct crypto_skcipher *ctfm,
+@@ -277,14 +380,12 @@ static int safexcel_skcipher_aes_setkey(
+ int ret, i;
+
+ ret = aes_expandkey(&aes, key, len);
+- if (ret) {
+- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ if (ret)
+ return ret;
+- }
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+ for (i = 0; i < len / sizeof(u32); i++) {
+- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
++ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
+ ctx->base.needs_inv = true;
+ break;
+ }
+@@ -309,43 +410,57 @@ static int safexcel_aead_setkey(struct c
+ struct safexcel_crypto_priv *priv = ctx->priv;
+ struct crypto_authenc_keys keys;
+ struct crypto_aes_ctx aes;
+- int err = -EINVAL;
++ int err = -EINVAL, i;
+
+- if (crypto_authenc_extractkeys(&keys, key, len) != 0)
++ if (unlikely(crypto_authenc_extractkeys(&keys, key, len)))
+ goto badkey;
+
+ if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD) {
+- /* Minimum keysize is minimum AES key size + nonce size */
+- if (keys.enckeylen < (AES_MIN_KEY_SIZE +
+- CTR_RFC3686_NONCE_SIZE))
++ /* Must have at least space for the nonce here */
++ if (unlikely(keys.enckeylen < CTR_RFC3686_NONCE_SIZE))
+ goto badkey;
+ /* last 4 bytes of key are the nonce! */
+ ctx->nonce = *(u32 *)(keys.enckey + keys.enckeylen -
+ CTR_RFC3686_NONCE_SIZE);
+ /* exclude the nonce here */
+- keys.enckeylen -= CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
++ keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
+ /* Encryption key */
+ switch (ctx->alg) {
++ case SAFEXCEL_DES:
++ err = verify_aead_des_key(ctfm, keys.enckey, keys.enckeylen);
++ if (unlikely(err))
++ goto badkey;
++ break;
+ case SAFEXCEL_3DES:
+ err = verify_aead_des3_key(ctfm, keys.enckey, keys.enckeylen);
+ if (unlikely(err))
+- goto badkey_expflags;
++ goto badkey;
+ break;
+ case SAFEXCEL_AES:
+ err = aes_expandkey(&aes, keys.enckey, keys.enckeylen);
+ if (unlikely(err))
+ goto badkey;
+ break;
++ case SAFEXCEL_SM4:
++ if (unlikely(keys.enckeylen != SM4_KEY_SIZE))
++ goto badkey;
++ break;
+ default:
+ dev_err(priv->dev, "aead: unsupported cipher algorithm\n");
+ goto badkey;
+ }
+
+- if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
+- memcmp(ctx->key, keys.enckey, keys.enckeylen))
+- ctx->base.needs_inv = true;
++ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
++ for (i = 0; i < keys.enckeylen / sizeof(u32); i++) {
++ if (le32_to_cpu(ctx->key[i]) !=
++ ((u32 *)keys.enckey)[i]) {
++ ctx->base.needs_inv = true;
++ break;
++ }
++ }
++ }
+
+ /* Auth key */
+ switch (ctx->hash_alg) {
+@@ -374,21 +489,24 @@ static int safexcel_aead_setkey(struct c
+ keys.authkeylen, &istate, &ostate))
+ goto badkey;
+ break;
++ case CONTEXT_CONTROL_CRYPTO_ALG_SM3:
++ if (safexcel_hmac_setkey("safexcel-sm3", keys.authkey,
++ keys.authkeylen, &istate, &ostate))
++ goto badkey;
++ break;
+ default:
+ dev_err(priv->dev, "aead: unsupported hash algorithm\n");
+ goto badkey;
+ }
+
+- crypto_aead_set_flags(ctfm, crypto_aead_get_flags(ctfm) &
+- CRYPTO_TFM_RES_MASK);
+-
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma &&
+ (memcmp(ctx->ipad, istate.state, ctx->state_sz) ||
+ memcmp(ctx->opad, ostate.state, ctx->state_sz)))
+ ctx->base.needs_inv = true;
+
+ /* Now copy the keys into the context */
+- memcpy(ctx->key, keys.enckey, keys.enckeylen);
++ for (i = 0; i < keys.enckeylen / sizeof(u32); i++)
++ ctx->key[i] = cpu_to_le32(((u32 *)keys.enckey)[i]);
+ ctx->key_len = keys.enckeylen;
+
+ memcpy(ctx->ipad, &istate.state, ctx->state_sz);
+@@ -398,8 +516,6 @@ static int safexcel_aead_setkey(struct c
+ return 0;
+
+ badkey:
+- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+-badkey_expflags:
+ memzero_explicit(&keys, sizeof(keys));
+ return err;
+ }
+@@ -423,6 +539,17 @@ static int safexcel_context_control(stru
+ CONTEXT_CONTROL_DIGEST_XCM |
+ ctx->hash_alg |
+ CONTEXT_CONTROL_SIZE(ctrl_size);
++ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
++ /* Chacha20-Poly1305 */
++ cdesc->control_data.control0 =
++ CONTEXT_CONTROL_KEY_EN |
++ CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 |
++ (sreq->direction == SAFEXCEL_ENCRYPT ?
++ CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT :
++ CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN) |
++ ctx->hash_alg |
++ CONTEXT_CONTROL_SIZE(ctrl_size);
++ return 0;
+ } else {
+ ctrl_size += ctx->state_sz / sizeof(u32) * 2;
+ cdesc->control_data.control0 =
+@@ -431,17 +558,21 @@ static int safexcel_context_control(stru
+ ctx->hash_alg |
+ CONTEXT_CONTROL_SIZE(ctrl_size);
+ }
+- if (sreq->direction == SAFEXCEL_ENCRYPT)
+- cdesc->control_data.control0 |=
+- (ctx->xcm == EIP197_XCM_MODE_CCM) ?
+- CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT :
+- CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
+
++ if (sreq->direction == SAFEXCEL_ENCRYPT &&
++ (ctx->xcm == EIP197_XCM_MODE_CCM ||
++ ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP_GMAC))
++ cdesc->control_data.control0 |=
++ CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT;
++ else if (sreq->direction == SAFEXCEL_ENCRYPT)
++ cdesc->control_data.control0 |=
++ CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT;
++ else if (ctx->xcm == EIP197_XCM_MODE_CCM)
++ cdesc->control_data.control0 |=
++ CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN;
+ else
+ cdesc->control_data.control0 |=
+- (ctx->xcm == EIP197_XCM_MODE_CCM) ?
+- CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN :
+- CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
++ CONTEXT_CONTROL_TYPE_HASH_DECRYPT_IN;
+ } else {
+ if (sreq->direction == SAFEXCEL_ENCRYPT)
+ cdesc->control_data.control0 =
+@@ -480,6 +611,12 @@ static int safexcel_context_control(stru
+ ctx->key_len >> ctx->xts);
+ return -EINVAL;
+ }
++ } else if (ctx->alg == SAFEXCEL_CHACHA20) {
++ cdesc->control_data.control0 |=
++ CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20;
++ } else if (ctx->alg == SAFEXCEL_SM4) {
++ cdesc->control_data.control0 |=
++ CONTEXT_CONTROL_CRYPTO_ALG_SM4;
+ }
+
+ return 0;
+@@ -563,6 +700,7 @@ static int safexcel_send_req(struct cryp
+ unsigned int totlen;
+ unsigned int totlen_src = cryptlen + assoclen;
+ unsigned int totlen_dst = totlen_src;
++ struct safexcel_token *atoken;
+ int n_cdesc = 0, n_rdesc = 0;
+ int queued, i, ret = 0;
+ bool first = true;
+@@ -637,56 +775,60 @@ static int safexcel_send_req(struct cryp
+
+ memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
+
+- /* The EIP cannot deal with zero length input packets! */
+- if (totlen == 0)
+- totlen = 1;
++ if (!totlen) {
++ /*
++ * The EIP97 cannot deal with zero length input packets!
++ * So stuff a dummy command descriptor indicating a 1 byte
++ * (dummy) input packet, using the context record as source.
++ */
++ first_cdesc = safexcel_add_cdesc(priv, ring,
++ 1, 1, ctx->base.ctxr_dma,
++ 1, 1, ctx->base.ctxr_dma,
++ &atoken);
++ if (IS_ERR(first_cdesc)) {
++ /* No space left in the command descriptor ring */
++ ret = PTR_ERR(first_cdesc);
++ goto cdesc_rollback;
++ }
++ n_cdesc = 1;
++ goto skip_cdesc;
++ }
+
+ /* command descriptors */
+ for_each_sg(src, sg, sreq->nr_src, i) {
+ int len = sg_dma_len(sg);
+
+ /* Do not overflow the request */
+- if (queued - len < 0)
++ if (queued < len)
+ len = queued;
+
+ cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+ !(queued - len),
+ sg_dma_address(sg), len, totlen,
+- ctx->base.ctxr_dma);
++ ctx->base.ctxr_dma, &atoken);
+ if (IS_ERR(cdesc)) {
+ /* No space left in the command descriptor ring */
+ ret = PTR_ERR(cdesc);
+ goto cdesc_rollback;
+ }
+- n_cdesc++;
+
+- if (n_cdesc == 1) {
++ if (!n_cdesc)
+ first_cdesc = cdesc;
+- }
+
++ n_cdesc++;
+ queued -= len;
+ if (!queued)
+ break;
+ }
+-
+- if (unlikely(!n_cdesc)) {
+- /*
+- * Special case: zero length input buffer.
+- * The engine always needs the 1st command descriptor, however!
+- */
+- first_cdesc = safexcel_add_cdesc(priv, ring, 1, 1, 0, 0, totlen,
+- ctx->base.ctxr_dma);
+- n_cdesc = 1;
+- }
+-
++skip_cdesc:
+ /* Add context control words and token to first command descriptor */
+ safexcel_context_control(ctx, base, sreq, first_cdesc);
+ if (ctx->aead)
+- safexcel_aead_token(ctx, iv, first_cdesc,
++ safexcel_aead_token(ctx, iv, first_cdesc, atoken,
+ sreq->direction, cryptlen,
+ assoclen, digestsize);
+ else
+- safexcel_skcipher_token(ctx, iv, first_cdesc,
++ safexcel_skcipher_token(ctx, iv, first_cdesc, atoken,
+ cryptlen);
+
+ /* result descriptors */
+@@ -1073,6 +1215,8 @@ static int safexcel_skcipher_cra_init(st
+
+ ctx->base.send = safexcel_skcipher_send;
+ ctx->base.handle_result = safexcel_skcipher_handle_result;
++ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
++ ctx->ctrinit = 1;
+ return 0;
+ }
+
+@@ -1137,6 +1281,8 @@ static int safexcel_skcipher_aes_ecb_cra
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_AES;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
++ ctx->blocksz = 0;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
+ return 0;
+ }
+
+@@ -1171,6 +1317,7 @@ static int safexcel_skcipher_aes_cbc_cra
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_AES;
++ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
+ return 0;
+ }
+@@ -1207,6 +1354,7 @@ static int safexcel_skcipher_aes_cfb_cra
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_AES;
++ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
+ return 0;
+ }
+@@ -1243,6 +1391,7 @@ static int safexcel_skcipher_aes_ofb_cra
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_AES;
++ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
+ return 0;
+ }
+@@ -1288,14 +1437,12 @@ static int safexcel_skcipher_aesctr_setk
+ /* exclude the nonce here */
+ keylen = len - CTR_RFC3686_NONCE_SIZE;
+ ret = aes_expandkey(&aes, key, keylen);
+- if (ret) {
+- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ if (ret)
+ return ret;
+- }
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+ for (i = 0; i < keylen / sizeof(u32); i++) {
+- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
++ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
+ ctx->base.needs_inv = true;
+ break;
+ }
+@@ -1317,6 +1464,7 @@ static int safexcel_skcipher_aes_ctr_cra
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_AES;
++ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
+ return 0;
+ }
+@@ -1352,6 +1500,7 @@ static int safexcel_des_setkey(struct cr
+ unsigned int len)
+ {
+ struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
++ struct safexcel_crypto_priv *priv = ctx->priv;
+ int ret;
+
+ ret = verify_skcipher_des_key(ctfm, key);
+@@ -1359,7 +1508,7 @@ static int safexcel_des_setkey(struct cr
+ return ret;
+
+ /* if context exits and key changed, need to invalidate it */
+- if (ctx->base.ctxr_dma)
++ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, len))
+ ctx->base.needs_inv = true;
+
+@@ -1375,6 +1524,8 @@ static int safexcel_skcipher_des_cbc_cra
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES;
++ ctx->blocksz = DES_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
+ return 0;
+ }
+@@ -1412,6 +1563,8 @@ static int safexcel_skcipher_des_ecb_cra
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_DES;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
++ ctx->blocksz = 0;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
+ return 0;
+ }
+
+@@ -1444,6 +1597,7 @@ static int safexcel_des3_ede_setkey(stru
+ const u8 *key, unsigned int len)
+ {
+ struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
++ struct safexcel_crypto_priv *priv = ctx->priv;
+ int err;
+
+ err = verify_skcipher_des3_key(ctfm, key);
+@@ -1451,13 +1605,11 @@ static int safexcel_des3_ede_setkey(stru
+ return err;
+
+ /* if context exits and key changed, need to invalidate it */
+- if (ctx->base.ctxr_dma) {
++ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
+ if (memcmp(ctx->key, key, len))
+ ctx->base.needs_inv = true;
+- }
+
+ memcpy(ctx->key, key, len);
+-
+ ctx->key_len = len;
+
+ return 0;
+@@ -1469,6 +1621,8 @@ static int safexcel_skcipher_des3_cbc_cr
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES;
++ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
+ return 0;
+ }
+@@ -1506,6 +1660,8 @@ static int safexcel_skcipher_des3_ecb_cr
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
++ ctx->blocksz = 0;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
+ return 0;
+ }
+
+@@ -1561,6 +1717,9 @@ static int safexcel_aead_cra_init(struct
+ ctx->priv = tmpl->priv;
+
+ ctx->alg = SAFEXCEL_AES; /* default */
++ ctx->blocksz = AES_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_4_TOKEN_IV_CMD;
++ ctx->ctrinit = 1;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC; /* default */
+ ctx->aead = true;
+ ctx->base.send = safexcel_aead_send;
+@@ -1749,6 +1908,8 @@ static int safexcel_aead_sha1_des3_cra_i
+
+ safexcel_aead_sha1_cra_init(tfm);
+ ctx->alg = SAFEXCEL_3DES; /* override default */
++ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
+ return 0;
+ }
+
+@@ -1777,6 +1938,330 @@ struct safexcel_alg_template safexcel_al
+ },
+ };
+
++static int safexcel_aead_sha256_des3_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sha256_cra_init(tfm);
++ ctx->alg = SAFEXCEL_3DES; /* override default */
++ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha256),cbc(des3_ede))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des3_ede",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sha256_des3_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sha224_des3_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sha224_cra_init(tfm);
++ ctx->alg = SAFEXCEL_3DES; /* override default */
++ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha224),cbc(des3_ede))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des3_ede",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sha224_des3_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sha512_des3_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sha512_cra_init(tfm);
++ ctx->alg = SAFEXCEL_3DES; /* override default */
++ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha512),cbc(des3_ede))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des3_ede",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sha512_des3_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sha384_des3_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sha384_cra_init(tfm);
++ ctx->alg = SAFEXCEL_3DES; /* override default */
++ ctx->blocksz = DES3_EDE_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = DES3_EDE_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha384),cbc(des3_ede))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des3_ede",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sha384_des3_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sha1_des_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sha1_cra_init(tfm);
++ ctx->alg = SAFEXCEL_DES; /* override default */
++ ctx->blocksz = DES_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA1,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha1),cbc(des))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-des",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = DES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sha1_des_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sha256_des_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sha256_cra_init(tfm);
++ ctx->alg = SAFEXCEL_DES; /* override default */
++ ctx->blocksz = DES_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA256_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha256),cbc(des))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha256-cbc-des",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = DES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sha256_des_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sha224_des_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sha224_cra_init(tfm);
++ ctx->alg = SAFEXCEL_DES; /* override default */
++ ctx->blocksz = DES_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_256,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA224_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha224),cbc(des))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha224-cbc-des",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = DES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sha224_des_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sha512_des_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sha512_cra_init(tfm);
++ ctx->alg = SAFEXCEL_DES; /* override default */
++ ctx->blocksz = DES_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA512_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha512),cbc(des))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha512-cbc-des",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = DES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sha512_des_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sha384_des_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sha384_cra_init(tfm);
++ ctx->alg = SAFEXCEL_DES; /* override default */
++ ctx->blocksz = DES_BLOCK_SIZE;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_DES | SAFEXCEL_ALG_SHA2_512,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = DES_BLOCK_SIZE,
++ .maxauthsize = SHA384_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha384),cbc(des))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha384-cbc-des",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = DES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sha384_des_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
+ static int safexcel_aead_sha1_ctr_cra_init(struct crypto_tfm *tfm)
+ {
+ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+@@ -1965,14 +2450,12 @@ static int safexcel_skcipher_aesxts_setk
+ /* Only half of the key data is cipher key */
+ keylen = (len >> 1);
+ ret = aes_expandkey(&aes, key, keylen);
+- if (ret) {
+- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ if (ret)
+ return ret;
+- }
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+ for (i = 0; i < keylen / sizeof(u32); i++) {
+- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
++ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
+ ctx->base.needs_inv = true;
+ break;
+ }
+@@ -1984,15 +2467,13 @@ static int safexcel_skcipher_aesxts_setk
+
+ /* The other half is the tweak key */
+ ret = aes_expandkey(&aes, (u8 *)(key + keylen), keylen);
+- if (ret) {
+- crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
++ if (ret)
+ return ret;
+- }
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+ for (i = 0; i < keylen / sizeof(u32); i++) {
+- if (ctx->key[i + keylen / sizeof(u32)] !=
+- cpu_to_le32(aes.key_enc[i])) {
++ if (le32_to_cpu(ctx->key[i + keylen / sizeof(u32)]) !=
++ aes.key_enc[i]) {
+ ctx->base.needs_inv = true;
+ break;
+ }
+@@ -2015,6 +2496,7 @@ static int safexcel_skcipher_aes_xts_cra
+
+ safexcel_skcipher_cra_init(tfm);
+ ctx->alg = SAFEXCEL_AES;
++ ctx->blocksz = AES_BLOCK_SIZE;
+ ctx->xts = 1;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XTS;
+ return 0;
+@@ -2075,14 +2557,13 @@ static int safexcel_aead_gcm_setkey(stru
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&aes, sizeof(aes));
+ return ret;
+ }
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+ for (i = 0; i < len / sizeof(u32); i++) {
+- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
++ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
+ ctx->base.needs_inv = true;
+ break;
+ }
+@@ -2099,8 +2580,6 @@ static int safexcel_aead_gcm_setkey(stru
+ crypto_cipher_set_flags(ctx->hkaes, crypto_aead_get_flags(ctfm) &
+ CRYPTO_TFM_REQ_MASK);
+ ret = crypto_cipher_setkey(ctx->hkaes, key, len);
+- crypto_aead_set_flags(ctfm, crypto_cipher_get_flags(ctx->hkaes) &
+- CRYPTO_TFM_RES_MASK);
+ if (ret)
+ return ret;
+
+@@ -2109,7 +2588,7 @@ static int safexcel_aead_gcm_setkey(stru
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++) {
+- if (ctx->ipad[i] != cpu_to_be32(hashkey[i])) {
++ if (be32_to_cpu(ctx->ipad[i]) != hashkey[i]) {
+ ctx->base.needs_inv = true;
+ break;
+ }
+@@ -2135,10 +2614,7 @@ static int safexcel_aead_gcm_cra_init(st
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
+
+ ctx->hkaes = crypto_alloc_cipher("aes", 0, 0);
+- if (IS_ERR(ctx->hkaes))
+- return PTR_ERR(ctx->hkaes);
+-
+- return 0;
++ return PTR_ERR_OR_ZERO(ctx->hkaes);
+ }
+
+ static void safexcel_aead_gcm_cra_exit(struct crypto_tfm *tfm)
+@@ -2192,14 +2668,13 @@ static int safexcel_aead_ccm_setkey(stru
+
+ ret = aes_expandkey(&aes, key, len);
+ if (ret) {
+- crypto_aead_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&aes, sizeof(aes));
+ return ret;
+ }
+
+ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma) {
+ for (i = 0; i < len / sizeof(u32); i++) {
+- if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
++ if (le32_to_cpu(ctx->key[i]) != aes.key_enc[i]) {
+ ctx->base.needs_inv = true;
+ break;
+ }
+@@ -2235,6 +2710,7 @@ static int safexcel_aead_ccm_cra_init(st
+ ctx->state_sz = 3 * AES_BLOCK_SIZE;
+ ctx->xcm = EIP197_XCM_MODE_CCM;
+ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_XCM; /* override default */
++ ctx->ctrinit = 0;
+ return 0;
+ }
+
+@@ -2301,5 +2777,949 @@ struct safexcel_alg_template safexcel_al
+ .cra_exit = safexcel_aead_cra_exit,
+ .cra_module = THIS_MODULE,
+ },
++ },
++};
++
++static void safexcel_chacha20_setkey(struct safexcel_cipher_ctx *ctx,
++ const u8 *key)
++{
++ struct safexcel_crypto_priv *priv = ctx->priv;
++
++ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
++ if (memcmp(ctx->key, key, CHACHA_KEY_SIZE))
++ ctx->base.needs_inv = true;
++
++ memcpy(ctx->key, key, CHACHA_KEY_SIZE);
++ ctx->key_len = CHACHA_KEY_SIZE;
++}
++
++static int safexcel_skcipher_chacha20_setkey(struct crypto_skcipher *ctfm,
++ const u8 *key, unsigned int len)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_skcipher_ctx(ctfm);
++
++ if (len != CHACHA_KEY_SIZE)
++ return -EINVAL;
++
++ safexcel_chacha20_setkey(ctx, key);
++
++ return 0;
++}
++
++static int safexcel_skcipher_chacha20_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_skcipher_cra_init(tfm);
++ ctx->alg = SAFEXCEL_CHACHA20;
++ ctx->ctrinit = 0;
++ ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_chacha20 = {
++ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
++ .algo_mask = SAFEXCEL_ALG_CHACHA20,
++ .alg.skcipher = {
++ .setkey = safexcel_skcipher_chacha20_setkey,
++ .encrypt = safexcel_encrypt,
++ .decrypt = safexcel_decrypt,
++ .min_keysize = CHACHA_KEY_SIZE,
++ .max_keysize = CHACHA_KEY_SIZE,
++ .ivsize = CHACHA_IV_SIZE,
++ .base = {
++ .cra_name = "chacha20",
++ .cra_driver_name = "safexcel-chacha20",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_skcipher_chacha20_cra_init,
++ .cra_exit = safexcel_skcipher_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_chachapoly_setkey(struct crypto_aead *ctfm,
++ const u8 *key, unsigned int len)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_aead_ctx(ctfm);
++
++ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP &&
++ len > EIP197_AEAD_IPSEC_NONCE_SIZE) {
++ /* ESP variant has nonce appended to key */
++ len -= EIP197_AEAD_IPSEC_NONCE_SIZE;
++ ctx->nonce = *(u32 *)(key + len);
++ }
++ if (len != CHACHA_KEY_SIZE)
++ return -EINVAL;
++
++ safexcel_chacha20_setkey(ctx, key);
++
++ return 0;
++}
++
++static int safexcel_aead_chachapoly_setauthsize(struct crypto_aead *tfm,
++ unsigned int authsize)
++{
++ if (authsize != POLY1305_DIGEST_SIZE)
++ return -EINVAL;
++ return 0;
++}
++
++static int safexcel_aead_chachapoly_crypt(struct aead_request *req,
++ enum safexcel_cipher_direction dir)
++{
++ struct safexcel_cipher_req *creq = aead_request_ctx(req);
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++ struct aead_request *subreq = aead_request_ctx(req);
++ u32 key[CHACHA_KEY_SIZE / sizeof(u32) + 1];
++ int ret = 0;
++
++ /*
++ * Instead of wasting time detecting umpteen silly corner cases,
++ * just dump all "small" requests to the fallback implementation.
++ * HW would not be faster on such small requests anyway.
++ */
++ if (likely((ctx->aead != EIP197_AEAD_TYPE_IPSEC_ESP ||
++ req->assoclen >= EIP197_AEAD_IPSEC_IV_SIZE) &&
++ req->cryptlen > POLY1305_DIGEST_SIZE)) {
++ return safexcel_queue_req(&req->base, creq, dir);
++ }
++
++ /* HW cannot do full (AAD+payload) zero length, use fallback */
++ memcpy(key, ctx->key, CHACHA_KEY_SIZE);
++ if (ctx->aead == EIP197_AEAD_TYPE_IPSEC_ESP) {
++ /* ESP variant has nonce appended to the key */
++ key[CHACHA_KEY_SIZE / sizeof(u32)] = ctx->nonce;
++ ret = crypto_aead_setkey(ctx->fback, (u8 *)key,
++ CHACHA_KEY_SIZE +
++ EIP197_AEAD_IPSEC_NONCE_SIZE);
++ } else {
++ ret = crypto_aead_setkey(ctx->fback, (u8 *)key,
++ CHACHA_KEY_SIZE);
++ }
++ if (ret) {
++ crypto_aead_clear_flags(aead, CRYPTO_TFM_REQ_MASK);
++ crypto_aead_set_flags(aead, crypto_aead_get_flags(ctx->fback) &
++ CRYPTO_TFM_REQ_MASK);
++ return ret;
++ }
++
++ aead_request_set_tfm(subreq, ctx->fback);
++ aead_request_set_callback(subreq, req->base.flags, req->base.complete,
++ req->base.data);
++ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
++ req->iv);
++ aead_request_set_ad(subreq, req->assoclen);
++
++ return (dir == SAFEXCEL_ENCRYPT) ?
++ crypto_aead_encrypt(subreq) :
++ crypto_aead_decrypt(subreq);
++}
++
++static int safexcel_aead_chachapoly_encrypt(struct aead_request *req)
++{
++ return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_ENCRYPT);
++}
++
++static int safexcel_aead_chachapoly_decrypt(struct aead_request *req)
++{
++ return safexcel_aead_chachapoly_crypt(req, SAFEXCEL_DECRYPT);
++}
++
++static int safexcel_aead_fallback_cra_init(struct crypto_tfm *tfm)
++{
++ struct crypto_aead *aead = __crypto_aead_cast(tfm);
++ struct aead_alg *alg = crypto_aead_alg(aead);
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_cra_init(tfm);
++
++ /* Allocate fallback implementation */
++ ctx->fback = crypto_alloc_aead(alg->base.cra_name, 0,
++ CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_NEED_FALLBACK);
++ if (IS_ERR(ctx->fback))
++ return PTR_ERR(ctx->fback);
++
++ crypto_aead_set_reqsize(aead, max(sizeof(struct safexcel_cipher_req),
++ sizeof(struct aead_request) +
++ crypto_aead_reqsize(ctx->fback)));
++
++ return 0;
++}
++
++static int safexcel_aead_chachapoly_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_fallback_cra_init(tfm);
++ ctx->alg = SAFEXCEL_CHACHA20;
++ ctx->mode = CONTEXT_CONTROL_CHACHA20_MODE_256_32 |
++ CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK;
++ ctx->ctrinit = 0;
++ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_POLY1305;
++ ctx->state_sz = 0; /* Precomputed by HW */
++ return 0;
++}
++
++static void safexcel_aead_fallback_cra_exit(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ crypto_free_aead(ctx->fback);
++ safexcel_aead_cra_exit(tfm);
++}
++
++struct safexcel_alg_template safexcel_alg_chachapoly = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305,
++ .alg.aead = {
++ .setkey = safexcel_aead_chachapoly_setkey,
++ .setauthsize = safexcel_aead_chachapoly_setauthsize,
++ .encrypt = safexcel_aead_chachapoly_encrypt,
++ .decrypt = safexcel_aead_chachapoly_decrypt,
++ .ivsize = CHACHAPOLY_IV_SIZE,
++ .maxauthsize = POLY1305_DIGEST_SIZE,
++ .base = {
++ .cra_name = "rfc7539(chacha20,poly1305)",
++ .cra_driver_name = "safexcel-chacha20-poly1305",
++ /* +1 to put it above HW chacha + SW poly */
++ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_chachapoly_cra_init,
++ .cra_exit = safexcel_aead_fallback_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_chachapolyesp_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++ int ret;
++
++ ret = safexcel_aead_chachapoly_cra_init(tfm);
++ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
++ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
++ return ret;
++}
++
++struct safexcel_alg_template safexcel_alg_chachapoly_esp = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_CHACHA20 | SAFEXCEL_ALG_POLY1305,
++ .alg.aead = {
++ .setkey = safexcel_aead_chachapoly_setkey,
++ .setauthsize = safexcel_aead_chachapoly_setauthsize,
++ .encrypt = safexcel_aead_chachapoly_encrypt,
++ .decrypt = safexcel_aead_chachapoly_decrypt,
++ .ivsize = CHACHAPOLY_IV_SIZE - EIP197_AEAD_IPSEC_NONCE_SIZE,
++ .maxauthsize = POLY1305_DIGEST_SIZE,
++ .base = {
++ .cra_name = "rfc7539esp(chacha20,poly1305)",
++ .cra_driver_name = "safexcel-chacha20-poly1305-esp",
++ /* +1 to put it above HW chacha + SW poly */
++ .cra_priority = SAFEXCEL_CRA_PRIORITY + 1,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_chachapolyesp_cra_init,
++ .cra_exit = safexcel_aead_fallback_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_skcipher_sm4_setkey(struct crypto_skcipher *ctfm,
++ const u8 *key, unsigned int len)
++{
++ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++ struct safexcel_crypto_priv *priv = ctx->priv;
++
++ if (len != SM4_KEY_SIZE)
++ return -EINVAL;
++
++ if (priv->flags & EIP197_TRC_CACHE && ctx->base.ctxr_dma)
++ if (memcmp(ctx->key, key, SM4_KEY_SIZE))
++ ctx->base.needs_inv = true;
++
++ memcpy(ctx->key, key, SM4_KEY_SIZE);
++ ctx->key_len = SM4_KEY_SIZE;
++
++ return 0;
++}
++
++static int safexcel_sm4_blk_encrypt(struct skcipher_request *req)
++{
++ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
++ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
++ return -EINVAL;
++ else
++ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
++ SAFEXCEL_ENCRYPT);
++}
++
++static int safexcel_sm4_blk_decrypt(struct skcipher_request *req)
++{
++ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
++ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
++ return -EINVAL;
++ else
++ return safexcel_queue_req(&req->base, skcipher_request_ctx(req),
++ SAFEXCEL_DECRYPT);
++}
++
++static int safexcel_skcipher_sm4_ecb_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_skcipher_cra_init(tfm);
++ ctx->alg = SAFEXCEL_SM4;
++ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_ECB;
++ ctx->blocksz = 0;
++ ctx->ivmask = EIP197_OPTION_2_TOKEN_IV_CMD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_ecb_sm4 = {
++ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
++ .algo_mask = SAFEXCEL_ALG_SM4,
++ .alg.skcipher = {
++ .setkey = safexcel_skcipher_sm4_setkey,
++ .encrypt = safexcel_sm4_blk_encrypt,
++ .decrypt = safexcel_sm4_blk_decrypt,
++ .min_keysize = SM4_KEY_SIZE,
++ .max_keysize = SM4_KEY_SIZE,
++ .base = {
++ .cra_name = "ecb(sm4)",
++ .cra_driver_name = "safexcel-ecb-sm4",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = SM4_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_skcipher_sm4_ecb_cra_init,
++ .cra_exit = safexcel_skcipher_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_skcipher_sm4_cbc_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_skcipher_cra_init(tfm);
++ ctx->alg = SAFEXCEL_SM4;
++ ctx->blocksz = SM4_BLOCK_SIZE;
++ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CBC;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_cbc_sm4 = {
++ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
++ .algo_mask = SAFEXCEL_ALG_SM4,
++ .alg.skcipher = {
++ .setkey = safexcel_skcipher_sm4_setkey,
++ .encrypt = safexcel_sm4_blk_encrypt,
++ .decrypt = safexcel_sm4_blk_decrypt,
++ .min_keysize = SM4_KEY_SIZE,
++ .max_keysize = SM4_KEY_SIZE,
++ .ivsize = SM4_BLOCK_SIZE,
++ .base = {
++ .cra_name = "cbc(sm4)",
++ .cra_driver_name = "safexcel-cbc-sm4",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = SM4_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_skcipher_sm4_cbc_cra_init,
++ .cra_exit = safexcel_skcipher_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_skcipher_sm4_ofb_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_skcipher_cra_init(tfm);
++ ctx->alg = SAFEXCEL_SM4;
++ ctx->blocksz = SM4_BLOCK_SIZE;
++ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_OFB;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_ofb_sm4 = {
++ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
++ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
++ .alg.skcipher = {
++ .setkey = safexcel_skcipher_sm4_setkey,
++ .encrypt = safexcel_encrypt,
++ .decrypt = safexcel_decrypt,
++ .min_keysize = SM4_KEY_SIZE,
++ .max_keysize = SM4_KEY_SIZE,
++ .ivsize = SM4_BLOCK_SIZE,
++ .base = {
++ .cra_name = "ofb(sm4)",
++ .cra_driver_name = "safexcel-ofb-sm4",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_skcipher_sm4_ofb_cra_init,
++ .cra_exit = safexcel_skcipher_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_skcipher_sm4_cfb_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_skcipher_cra_init(tfm);
++ ctx->alg = SAFEXCEL_SM4;
++ ctx->blocksz = SM4_BLOCK_SIZE;
++ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CFB;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_cfb_sm4 = {
++ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
++ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_AES_XFB,
++ .alg.skcipher = {
++ .setkey = safexcel_skcipher_sm4_setkey,
++ .encrypt = safexcel_encrypt,
++ .decrypt = safexcel_decrypt,
++ .min_keysize = SM4_KEY_SIZE,
++ .max_keysize = SM4_KEY_SIZE,
++ .ivsize = SM4_BLOCK_SIZE,
++ .base = {
++ .cra_name = "cfb(sm4)",
++ .cra_driver_name = "safexcel-cfb-sm4",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_skcipher_sm4_cfb_cra_init,
++ .cra_exit = safexcel_skcipher_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_skcipher_sm4ctr_setkey(struct crypto_skcipher *ctfm,
++ const u8 *key, unsigned int len)
++{
++ struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ /* last 4 bytes of key are the nonce! */
++ ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
++ /* exclude the nonce here */
++ len -= CTR_RFC3686_NONCE_SIZE;
++
++ return safexcel_skcipher_sm4_setkey(ctfm, key, len);
++}
++
++static int safexcel_skcipher_sm4_ctr_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_skcipher_cra_init(tfm);
++ ctx->alg = SAFEXCEL_SM4;
++ ctx->blocksz = SM4_BLOCK_SIZE;
++ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_ctr_sm4 = {
++ .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
++ .algo_mask = SAFEXCEL_ALG_SM4,
++ .alg.skcipher = {
++ .setkey = safexcel_skcipher_sm4ctr_setkey,
++ .encrypt = safexcel_encrypt,
++ .decrypt = safexcel_decrypt,
++ /* Add nonce size */
++ .min_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
++ .max_keysize = SM4_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .base = {
++ .cra_name = "rfc3686(ctr(sm4))",
++ .cra_driver_name = "safexcel-ctr-sm4",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_skcipher_sm4_ctr_cra_init,
++ .cra_exit = safexcel_skcipher_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sm4_blk_encrypt(struct aead_request *req)
++{
++ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
++ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
++ return -EINVAL;
++
++ return safexcel_queue_req(&req->base, aead_request_ctx(req),
++ SAFEXCEL_ENCRYPT);
++}
++
++static int safexcel_aead_sm4_blk_decrypt(struct aead_request *req)
++{
++ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
++
++ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
++ if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1))
++ return -EINVAL;
++
++ return safexcel_queue_req(&req->base, aead_request_ctx(req),
++ SAFEXCEL_DECRYPT);
++}
++
++static int safexcel_aead_sm4cbc_sha1_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_cra_init(tfm);
++ ctx->alg = SAFEXCEL_SM4;
++ ctx->blocksz = SM4_BLOCK_SIZE;
++ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
++ ctx->state_sz = SHA1_DIGEST_SIZE;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4 = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_sm4_blk_encrypt,
++ .decrypt = safexcel_aead_sm4_blk_decrypt,
++ .ivsize = SM4_BLOCK_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha1),cbc(sm4))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha1-cbc-sm4",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = SM4_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sm4cbc_sha1_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_fallback_setkey(struct crypto_aead *ctfm,
++ const u8 *key, unsigned int len)
++{
++ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ /* Keep fallback cipher synchronized */
++ return crypto_aead_setkey(ctx->fback, (u8 *)key, len) ?:
++ safexcel_aead_setkey(ctfm, key, len);
++}
++
++static int safexcel_aead_fallback_setauthsize(struct crypto_aead *ctfm,
++ unsigned int authsize)
++{
++ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ /* Keep fallback cipher synchronized */
++ return crypto_aead_setauthsize(ctx->fback, authsize);
++}
++
++static int safexcel_aead_fallback_crypt(struct aead_request *req,
++ enum safexcel_cipher_direction dir)
++{
++ struct crypto_aead *aead = crypto_aead_reqtfm(req);
++ struct crypto_tfm *tfm = crypto_aead_tfm(aead);
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++ struct aead_request *subreq = aead_request_ctx(req);
++
++ aead_request_set_tfm(subreq, ctx->fback);
++ aead_request_set_callback(subreq, req->base.flags, req->base.complete,
++ req->base.data);
++ aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
++ req->iv);
++ aead_request_set_ad(subreq, req->assoclen);
++
++ return (dir == SAFEXCEL_ENCRYPT) ?
++ crypto_aead_encrypt(subreq) :
++ crypto_aead_decrypt(subreq);
++}
++
++static int safexcel_aead_sm4cbc_sm3_encrypt(struct aead_request *req)
++{
++ struct safexcel_cipher_req *creq = aead_request_ctx(req);
++
++ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
++ if (req->cryptlen & (SM4_BLOCK_SIZE - 1))
++ return -EINVAL;
++ else if (req->cryptlen || req->assoclen) /* If input length > 0 only */
++ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
++
++ /* HW cannot do full (AAD+payload) zero length, use fallback */
++ return safexcel_aead_fallback_crypt(req, SAFEXCEL_ENCRYPT);
++}
++
++static int safexcel_aead_sm4cbc_sm3_decrypt(struct aead_request *req)
++{
++ struct safexcel_cipher_req *creq = aead_request_ctx(req);
++ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
++
++ /* Workaround for HW bug: EIP96 4.3 does not report blocksize error */
++ if ((req->cryptlen - crypto_aead_authsize(tfm)) & (SM4_BLOCK_SIZE - 1))
++ return -EINVAL;
++ else if (req->cryptlen > crypto_aead_authsize(tfm) || req->assoclen)
++ /* If input length > 0 only */
++ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
++
++ /* HW cannot do full (AAD+payload) zero length, use fallback */
++ return safexcel_aead_fallback_crypt(req, SAFEXCEL_DECRYPT);
++}
++
++static int safexcel_aead_sm4cbc_sm3_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_fallback_cra_init(tfm);
++ ctx->alg = SAFEXCEL_SM4;
++ ctx->blocksz = SM4_BLOCK_SIZE;
++ ctx->hash_alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
++ ctx->state_sz = SM3_DIGEST_SIZE;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4 = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3,
++ .alg.aead = {
++ .setkey = safexcel_aead_fallback_setkey,
++ .setauthsize = safexcel_aead_fallback_setauthsize,
++ .encrypt = safexcel_aead_sm4cbc_sm3_encrypt,
++ .decrypt = safexcel_aead_sm4cbc_sm3_decrypt,
++ .ivsize = SM4_BLOCK_SIZE,
++ .maxauthsize = SM3_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sm3),cbc(sm4))",
++ .cra_driver_name = "safexcel-authenc-hmac-sm3-cbc-sm4",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = SM4_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sm4cbc_sm3_cra_init,
++ .cra_exit = safexcel_aead_fallback_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sm4ctr_sha1_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sm4cbc_sha1_cra_init(tfm);
++ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4 = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SHA1,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SHA1_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sha1),rfc3686(ctr(sm4)))",
++ .cra_driver_name = "safexcel-authenc-hmac-sha1-ctr-sm4",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sm4ctr_sha1_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_aead_sm4ctr_sm3_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_aead_sm4cbc_sm3_cra_init(tfm);
++ ctx->mode = CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD;
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4 = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_SM4 | SAFEXCEL_ALG_SM3,
++ .alg.aead = {
++ .setkey = safexcel_aead_setkey,
++ .encrypt = safexcel_aead_encrypt,
++ .decrypt = safexcel_aead_decrypt,
++ .ivsize = CTR_RFC3686_IV_SIZE,
++ .maxauthsize = SM3_DIGEST_SIZE,
++ .base = {
++ .cra_name = "authenc(hmac(sm3),rfc3686(ctr(sm4)))",
++ .cra_driver_name = "safexcel-authenc-hmac-sm3-ctr-sm4",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_aead_sm4ctr_sm3_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++};
++
++static int safexcel_rfc4106_gcm_setkey(struct crypto_aead *ctfm, const u8 *key,
++ unsigned int len)
++{
++ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ /* last 4 bytes of key are the nonce! */
++ ctx->nonce = *(u32 *)(key + len - CTR_RFC3686_NONCE_SIZE);
++
++ len -= CTR_RFC3686_NONCE_SIZE;
++ return safexcel_aead_gcm_setkey(ctfm, key, len);
++}
++
++static int safexcel_rfc4106_gcm_setauthsize(struct crypto_aead *tfm,
++ unsigned int authsize)
++{
++ return crypto_rfc4106_check_authsize(authsize);
++}
++
++static int safexcel_rfc4106_encrypt(struct aead_request *req)
++{
++ return crypto_ipsec_check_assoclen(req->assoclen) ?:
++ safexcel_aead_encrypt(req);
++}
++
++static int safexcel_rfc4106_decrypt(struct aead_request *req)
++{
++ return crypto_ipsec_check_assoclen(req->assoclen) ?:
++ safexcel_aead_decrypt(req);
++}
++
++static int safexcel_rfc4106_gcm_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++ int ret;
++
++ ret = safexcel_aead_gcm_cra_init(tfm);
++ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
++ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
++ return ret;
++}
++
++struct safexcel_alg_template safexcel_alg_rfc4106_gcm = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
++ .alg.aead = {
++ .setkey = safexcel_rfc4106_gcm_setkey,
++ .setauthsize = safexcel_rfc4106_gcm_setauthsize,
++ .encrypt = safexcel_rfc4106_encrypt,
++ .decrypt = safexcel_rfc4106_decrypt,
++ .ivsize = GCM_RFC4106_IV_SIZE,
++ .maxauthsize = GHASH_DIGEST_SIZE,
++ .base = {
++ .cra_name = "rfc4106(gcm(aes))",
++ .cra_driver_name = "safexcel-rfc4106-gcm-aes",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_rfc4106_gcm_cra_init,
++ .cra_exit = safexcel_aead_gcm_cra_exit,
++ },
++ },
++};
++
++static int safexcel_rfc4543_gcm_setauthsize(struct crypto_aead *tfm,
++ unsigned int authsize)
++{
++ if (authsize != GHASH_DIGEST_SIZE)
++ return -EINVAL;
++
++ return 0;
++}
++
++static int safexcel_rfc4543_gcm_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++ int ret;
++
++ ret = safexcel_aead_gcm_cra_init(tfm);
++ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP_GMAC;
++ return ret;
++}
++
++struct safexcel_alg_template safexcel_alg_rfc4543_gcm = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_GHASH,
++ .alg.aead = {
++ .setkey = safexcel_rfc4106_gcm_setkey,
++ .setauthsize = safexcel_rfc4543_gcm_setauthsize,
++ .encrypt = safexcel_rfc4106_encrypt,
++ .decrypt = safexcel_rfc4106_decrypt,
++ .ivsize = GCM_RFC4543_IV_SIZE,
++ .maxauthsize = GHASH_DIGEST_SIZE,
++ .base = {
++ .cra_name = "rfc4543(gcm(aes))",
++ .cra_driver_name = "safexcel-rfc4543-gcm-aes",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_rfc4543_gcm_cra_init,
++ .cra_exit = safexcel_aead_gcm_cra_exit,
++ },
++ },
++};
++
++static int safexcel_rfc4309_ccm_setkey(struct crypto_aead *ctfm, const u8 *key,
++ unsigned int len)
++{
++ struct crypto_tfm *tfm = crypto_aead_tfm(ctfm);
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ /* First byte of the nonce = L = always 3 for RFC4309 (4 byte ctr) */
++ *(u8 *)&ctx->nonce = EIP197_AEAD_IPSEC_COUNTER_SIZE - 1;
++ /* last 3 bytes of key are the nonce! */
++ memcpy((u8 *)&ctx->nonce + 1, key + len -
++ EIP197_AEAD_IPSEC_CCM_NONCE_SIZE,
++ EIP197_AEAD_IPSEC_CCM_NONCE_SIZE);
++
++ len -= EIP197_AEAD_IPSEC_CCM_NONCE_SIZE;
++ return safexcel_aead_ccm_setkey(ctfm, key, len);
++}
++
++static int safexcel_rfc4309_ccm_setauthsize(struct crypto_aead *tfm,
++ unsigned int authsize)
++{
++ /* Borrowed from crypto/ccm.c */
++ switch (authsize) {
++ case 8:
++ case 12:
++ case 16:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int safexcel_rfc4309_ccm_encrypt(struct aead_request *req)
++{
++ struct safexcel_cipher_req *creq = aead_request_ctx(req);
++
++ /* Borrowed from crypto/ccm.c */
++ if (req->assoclen != 16 && req->assoclen != 20)
++ return -EINVAL;
++
++ return safexcel_queue_req(&req->base, creq, SAFEXCEL_ENCRYPT);
++}
++
++static int safexcel_rfc4309_ccm_decrypt(struct aead_request *req)
++{
++ struct safexcel_cipher_req *creq = aead_request_ctx(req);
++
++ /* Borrowed from crypto/ccm.c */
++ if (req->assoclen != 16 && req->assoclen != 20)
++ return -EINVAL;
++
++ return safexcel_queue_req(&req->base, creq, SAFEXCEL_DECRYPT);
++}
++
++static int safexcel_rfc4309_ccm_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
++ int ret;
++
++ ret = safexcel_aead_ccm_cra_init(tfm);
++ ctx->aead = EIP197_AEAD_TYPE_IPSEC_ESP;
++ ctx->aadskip = EIP197_AEAD_IPSEC_IV_SIZE;
++ return ret;
++}
++
++struct safexcel_alg_template safexcel_alg_rfc4309_ccm = {
++ .type = SAFEXCEL_ALG_TYPE_AEAD,
++ .algo_mask = SAFEXCEL_ALG_AES | SAFEXCEL_ALG_CBC_MAC_ALL,
++ .alg.aead = {
++ .setkey = safexcel_rfc4309_ccm_setkey,
++ .setauthsize = safexcel_rfc4309_ccm_setauthsize,
++ .encrypt = safexcel_rfc4309_ccm_encrypt,
++ .decrypt = safexcel_rfc4309_ccm_decrypt,
++ .ivsize = EIP197_AEAD_IPSEC_IV_SIZE,
++ .maxauthsize = AES_BLOCK_SIZE,
++ .base = {
++ .cra_name = "rfc4309(ccm(aes))",
++ .cra_driver_name = "safexcel-rfc4309-ccm-aes",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
++ .cra_alignmask = 0,
++ .cra_init = safexcel_rfc4309_ccm_cra_init,
++ .cra_exit = safexcel_aead_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
+ },
+ };
+--- a/drivers/crypto/inside-secure/safexcel.h
++++ b/drivers/crypto/inside-secure/safexcel.h
+@@ -17,8 +17,11 @@
+ #define EIP197_HIA_VERSION_BE 0xca35
+ #define EIP197_HIA_VERSION_LE 0x35ca
+ #define EIP97_VERSION_LE 0x9e61
++#define EIP196_VERSION_LE 0x3bc4
+ #define EIP197_VERSION_LE 0x3ac5
+ #define EIP96_VERSION_LE 0x9f60
++#define EIP201_VERSION_LE 0x36c9
++#define EIP206_VERSION_LE 0x31ce
+ #define EIP197_REG_LO16(reg) (reg & 0xffff)
+ #define EIP197_REG_HI16(reg) ((reg >> 16) & 0xffff)
+ #define EIP197_VERSION_MASK(reg) ((reg >> 16) & 0xfff)
+@@ -26,12 +29,23 @@
+ ((reg >> 4) & 0xf0) | \
+ ((reg >> 12) & 0xf))
+
++/* EIP197 HIA OPTIONS ENCODING */
++#define EIP197_HIA_OPT_HAS_PE_ARB BIT(29)
++
++/* EIP206 OPTIONS ENCODING */
++#define EIP206_OPT_ICE_TYPE(n) ((n>>8)&3)
++
++/* EIP197 OPTIONS ENCODING */
++#define EIP197_OPT_HAS_TRC BIT(31)
++
+ /* Static configuration */
+ #define EIP197_DEFAULT_RING_SIZE 400
+-#define EIP197_MAX_TOKENS 18
++#define EIP197_EMB_TOKENS 4 /* Pad CD to 16 dwords */
++#define EIP197_MAX_TOKENS 16
+ #define EIP197_MAX_RINGS 4
+ #define EIP197_FETCH_DEPTH 2
+ #define EIP197_MAX_BATCH_SZ 64
++#define EIP197_MAX_RING_AIC 14
+
+ #define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
+ GFP_KERNEL : GFP_ATOMIC)
+@@ -138,6 +152,7 @@
+ #define EIP197_HIA_AIC_R_ENABLED_STAT(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+ #define EIP197_HIA_AIC_R_ACK(r) (0xe010 - EIP197_HIA_AIC_R_OFF(r))
+ #define EIP197_HIA_AIC_R_ENABLE_CLR(r) (0xe014 - EIP197_HIA_AIC_R_OFF(r))
++#define EIP197_HIA_AIC_R_VERSION(r) (0xe01c - EIP197_HIA_AIC_R_OFF(r))
+ #define EIP197_HIA_AIC_G_ENABLE_CTRL 0xf808
+ #define EIP197_HIA_AIC_G_ENABLED_STAT 0xf810
+ #define EIP197_HIA_AIC_G_ACK 0xf810
+@@ -157,12 +172,16 @@
+ #define EIP197_PE_EIP96_FUNCTION_EN(n) (0x1004 + (0x2000 * (n)))
+ #define EIP197_PE_EIP96_CONTEXT_CTRL(n) (0x1008 + (0x2000 * (n)))
+ #define EIP197_PE_EIP96_CONTEXT_STAT(n) (0x100c + (0x2000 * (n)))
++#define EIP197_PE_EIP96_TOKEN_CTRL2(n) (0x102c + (0x2000 * (n)))
+ #define EIP197_PE_EIP96_FUNCTION2_EN(n) (0x1030 + (0x2000 * (n)))
+ #define EIP197_PE_EIP96_OPTIONS(n) (0x13f8 + (0x2000 * (n)))
+ #define EIP197_PE_EIP96_VERSION(n) (0x13fc + (0x2000 * (n)))
+ #define EIP197_PE_OUT_DBUF_THRES(n) (0x1c00 + (0x2000 * (n)))
+ #define EIP197_PE_OUT_TBUF_THRES(n) (0x1d00 + (0x2000 * (n)))
++#define EIP197_PE_OPTIONS(n) (0x1ff8 + (0x2000 * (n)))
++#define EIP197_PE_VERSION(n) (0x1ffc + (0x2000 * (n)))
+ #define EIP197_MST_CTRL 0xfff4
++#define EIP197_OPTIONS 0xfff8
+ #define EIP197_VERSION 0xfffc
+
+ /* EIP197-specific registers, no indirection */
+@@ -178,6 +197,7 @@
+ #define EIP197_TRC_ECCADMINSTAT 0xf0838
+ #define EIP197_TRC_ECCDATASTAT 0xf083c
+ #define EIP197_TRC_ECCDATA 0xf0840
++#define EIP197_STRC_CONFIG 0xf43f0
+ #define EIP197_FLUE_CACHEBASE_LO(n) (0xf6000 + (32 * (n)))
+ #define EIP197_FLUE_CACHEBASE_HI(n) (0xf6004 + (32 * (n)))
+ #define EIP197_FLUE_CONFIG(n) (0xf6010 + (32 * (n)))
+@@ -188,6 +208,7 @@
+
+ /* EIP197_HIA_xDR_DESC_SIZE */
+ #define EIP197_xDR_DESC_MODE_64BIT BIT(31)
++#define EIP197_CDR_DESC_MODE_ADCP BIT(30)
+
+ /* EIP197_HIA_xDR_DMA_CFG */
+ #define EIP197_HIA_xDR_WR_RES_BUF BIT(22)
+@@ -213,7 +234,6 @@
+ /* EIP197_HIA_xDR_PROC_COUNT */
+ #define EIP197_xDR_PROC_xD_PKT_OFFSET 24
+ #define EIP197_xDR_PROC_xD_PKT_MASK GENMASK(6, 0)
+-#define EIP197_xDR_PROC_xD_COUNT(n) ((n) << 2)
+ #define EIP197_xDR_PROC_xD_PKT(n) ((n) << 24)
+ #define EIP197_xDR_PROC_CLR_COUNT BIT(31)
+
+@@ -228,6 +248,8 @@
+ #define EIP197_HIA_RA_PE_CTRL_EN BIT(30)
+
+ /* EIP197_HIA_OPTIONS */
++#define EIP197_N_RINGS_OFFSET 0
++#define EIP197_N_RINGS_MASK GENMASK(3, 0)
+ #define EIP197_N_PES_OFFSET 4
+ #define EIP197_N_PES_MASK GENMASK(4, 0)
+ #define EIP97_N_PES_MASK GENMASK(2, 0)
+@@ -237,13 +259,13 @@
+ #define EIP197_CFSIZE_OFFSET 9
+ #define EIP197_CFSIZE_ADJUST 4
+ #define EIP97_CFSIZE_OFFSET 8
+-#define EIP197_CFSIZE_MASK GENMASK(3, 0)
+-#define EIP97_CFSIZE_MASK GENMASK(4, 0)
++#define EIP197_CFSIZE_MASK GENMASK(2, 0)
++#define EIP97_CFSIZE_MASK GENMASK(3, 0)
+ #define EIP197_RFSIZE_OFFSET 12
+ #define EIP197_RFSIZE_ADJUST 4
+ #define EIP97_RFSIZE_OFFSET 12
+-#define EIP197_RFSIZE_MASK GENMASK(3, 0)
+-#define EIP97_RFSIZE_MASK GENMASK(4, 0)
++#define EIP197_RFSIZE_MASK GENMASK(2, 0)
++#define EIP97_RFSIZE_MASK GENMASK(3, 0)
+
+ /* EIP197_HIA_AIC_R_ENABLE_CTRL */
+ #define EIP197_CDR_IRQ(n) BIT((n) * 2)
+@@ -257,9 +279,9 @@
+ #define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n) ((n) << 16)
+ #define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n) (((n) & 0x7) << 20)
+ #define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n) ((n) << 24)
+-#define EIP197_HIA_DFE_CFG_DIS_DEBUG (BIT(31) | BIT(29))
++#define EIP197_HIA_DFE_CFG_DIS_DEBUG GENMASK(31, 29)
+ #define EIP197_HIA_DSE_CFG_EN_SINGLE_WR BIT(29)
+-#define EIP197_HIA_DSE_CFG_DIS_DEBUG BIT(31)
++#define EIP197_HIA_DSE_CFG_DIS_DEBUG GENMASK(31, 30)
+
+ /* EIP197_HIA_DFE/DSE_THR_CTRL */
+ #define EIP197_DxE_THR_CTRL_EN BIT(30)
+@@ -327,13 +349,21 @@
+ #define EIP197_ADDRESS_MODE BIT(8)
+ #define EIP197_CONTROL_MODE BIT(9)
+
++/* EIP197_PE_EIP96_TOKEN_CTRL2 */
++#define EIP197_PE_EIP96_TOKEN_CTRL2_CTX_DONE BIT(3)
++
++/* EIP197_STRC_CONFIG */
++#define EIP197_STRC_CONFIG_INIT BIT(31)
++#define EIP197_STRC_CONFIG_LARGE_REC(s) (s<<8)
++#define EIP197_STRC_CONFIG_SMALL_REC(s) (s<<0)
++
+ /* EIP197_FLUE_CONFIG */
+ #define EIP197_FLUE_CONFIG_MAGIC 0xc7000004
+
+ /* Context Control */
+ struct safexcel_context_record {
+- u32 control0;
+- u32 control1;
++ __le32 control0;
++ __le32 control1;
+
+ __le32 data[40];
+ } __packed;
+@@ -358,10 +388,14 @@ struct safexcel_context_record {
+ #define CONTEXT_CONTROL_CRYPTO_ALG_AES128 (0x5 << 17)
+ #define CONTEXT_CONTROL_CRYPTO_ALG_AES192 (0x6 << 17)
+ #define CONTEXT_CONTROL_CRYPTO_ALG_AES256 (0x7 << 17)
++#define CONTEXT_CONTROL_CRYPTO_ALG_CHACHA20 (0x8 << 17)
++#define CONTEXT_CONTROL_CRYPTO_ALG_SM4 (0xd << 17)
++#define CONTEXT_CONTROL_DIGEST_INITIAL (0x0 << 21)
+ #define CONTEXT_CONTROL_DIGEST_PRECOMPUTED (0x1 << 21)
+ #define CONTEXT_CONTROL_DIGEST_XCM (0x2 << 21)
+ #define CONTEXT_CONTROL_DIGEST_HMAC (0x3 << 21)
+ #define CONTEXT_CONTROL_CRYPTO_ALG_MD5 (0x0 << 23)
++#define CONTEXT_CONTROL_CRYPTO_ALG_CRC32 (0x0 << 23)
+ #define CONTEXT_CONTROL_CRYPTO_ALG_SHA1 (0x2 << 23)
+ #define CONTEXT_CONTROL_CRYPTO_ALG_SHA224 (0x4 << 23)
+ #define CONTEXT_CONTROL_CRYPTO_ALG_SHA256 (0x3 << 23)
+@@ -371,17 +405,25 @@ struct safexcel_context_record {
+ #define CONTEXT_CONTROL_CRYPTO_ALG_XCBC128 (0x1 << 23)
+ #define CONTEXT_CONTROL_CRYPTO_ALG_XCBC192 (0x2 << 23)
+ #define CONTEXT_CONTROL_CRYPTO_ALG_XCBC256 (0x3 << 23)
++#define CONTEXT_CONTROL_CRYPTO_ALG_SM3 (0x7 << 23)
++#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256 (0xb << 23)
++#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224 (0xc << 23)
++#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512 (0xd << 23)
++#define CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384 (0xe << 23)
++#define CONTEXT_CONTROL_CRYPTO_ALG_POLY1305 (0xf << 23)
+ #define CONTEXT_CONTROL_INV_FR (0x5 << 24)
+ #define CONTEXT_CONTROL_INV_TR (0x6 << 24)
+
+ /* control1 */
+ #define CONTEXT_CONTROL_CRYPTO_MODE_ECB (0 << 0)
+ #define CONTEXT_CONTROL_CRYPTO_MODE_CBC (1 << 0)
++#define CONTEXT_CONTROL_CHACHA20_MODE_256_32 (2 << 0)
+ #define CONTEXT_CONTROL_CRYPTO_MODE_OFB (4 << 0)
+ #define CONTEXT_CONTROL_CRYPTO_MODE_CFB (5 << 0)
+ #define CONTEXT_CONTROL_CRYPTO_MODE_CTR_LOAD (6 << 0)
+ #define CONTEXT_CONTROL_CRYPTO_MODE_XTS (7 << 0)
+ #define CONTEXT_CONTROL_CRYPTO_MODE_XCM ((6 << 0) | BIT(17))
++#define CONTEXT_CONTROL_CHACHA20_MODE_CALC_OTK (12 << 0)
+ #define CONTEXT_CONTROL_IV0 BIT(5)
+ #define CONTEXT_CONTROL_IV1 BIT(6)
+ #define CONTEXT_CONTROL_IV2 BIT(7)
+@@ -394,6 +436,13 @@ struct safexcel_context_record {
+ #define EIP197_XCM_MODE_GCM 1
+ #define EIP197_XCM_MODE_CCM 2
+
++#define EIP197_AEAD_TYPE_IPSEC_ESP 2
++#define EIP197_AEAD_TYPE_IPSEC_ESP_GMAC 3
++#define EIP197_AEAD_IPSEC_IV_SIZE 8
++#define EIP197_AEAD_IPSEC_NONCE_SIZE 4
++#define EIP197_AEAD_IPSEC_COUNTER_SIZE 4
++#define EIP197_AEAD_IPSEC_CCM_NONCE_SIZE 3
++
+ /* The hash counter given to the engine in the context has a granularity of
+ * 64 bits.
+ */
+@@ -423,6 +472,8 @@ struct safexcel_context_record {
+ #define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n) ((n) << 18)
+
+ /* Cache helpers */
++#define EIP197_MIN_DSIZE 1024
++#define EIP197_MIN_ASIZE 8
+ #define EIP197_CS_TRC_REC_WC 64
+ #define EIP197_CS_RC_SIZE (4 * sizeof(u32))
+ #define EIP197_CS_RC_NEXT(x) (x)
+@@ -447,7 +498,7 @@ struct result_data_desc {
+ u16 application_id;
+ u16 rsvd1;
+
+- u32 rsvd2;
++ u32 rsvd2[5];
+ } __packed;
+
+
+@@ -465,16 +516,15 @@ struct safexcel_result_desc {
+
+ u32 data_lo;
+ u32 data_hi;
+-
+- struct result_data_desc result_data;
+ } __packed;
+
+ /*
+ * The EIP(1)97 only needs to fetch the descriptor part of
+ * the result descriptor, not the result token part!
+ */
+-#define EIP197_RD64_FETCH_SIZE ((sizeof(struct safexcel_result_desc) -\
+- sizeof(struct result_data_desc)) /\
++#define EIP197_RD64_FETCH_SIZE (sizeof(struct safexcel_result_desc) /\
++ sizeof(u32))
++#define EIP197_RD64_RESULT_SIZE (sizeof(struct result_data_desc) /\
+ sizeof(u32))
+
+ struct safexcel_token {
+@@ -505,6 +555,8 @@ static inline void eip197_noop_token(str
+ {
+ token->opcode = EIP197_TOKEN_OPCODE_NOOP;
+ token->packet_length = BIT(2);
++ token->stat = 0;
++ token->instructions = 0;
+ }
+
+ /* Instructions */
+@@ -526,14 +578,13 @@ struct safexcel_control_data_desc {
+ u16 application_id;
+ u16 rsvd;
+
+- u8 refresh:2;
+- u32 context_lo:30;
++ u32 context_lo;
+ u32 context_hi;
+
+ u32 control0;
+ u32 control1;
+
+- u32 token[EIP197_MAX_TOKENS];
++ u32 token[EIP197_EMB_TOKENS];
+ } __packed;
+
+ #define EIP197_OPTION_MAGIC_VALUE BIT(0)
+@@ -543,7 +594,10 @@ struct safexcel_control_data_desc {
+ #define EIP197_OPTION_2_TOKEN_IV_CMD GENMASK(11, 10)
+ #define EIP197_OPTION_4_TOKEN_IV_CMD GENMASK(11, 9)
+
++#define EIP197_TYPE_BCLA 0x0
+ #define EIP197_TYPE_EXTENDED 0x3
++#define EIP197_CONTEXT_SMALL 0x2
++#define EIP197_CONTEXT_SIZE_MASK 0x3
+
+ /* Basic Command Descriptor format */
+ struct safexcel_command_desc {
+@@ -551,16 +605,22 @@ struct safexcel_command_desc {
+ u8 rsvd0:5;
+ u8 last_seg:1;
+ u8 first_seg:1;
+- u16 additional_cdata_size:8;
++ u8 additional_cdata_size:8;
+
+ u32 rsvd1;
+
+ u32 data_lo;
+ u32 data_hi;
+
++ u32 atok_lo;
++ u32 atok_hi;
++
+ struct safexcel_control_data_desc control_data;
+ } __packed;
+
++#define EIP197_CD64_FETCH_SIZE (sizeof(struct safexcel_command_desc) /\
++ sizeof(u32))
++
+ /*
+ * Internal structures & functions
+ */
+@@ -578,15 +638,20 @@ enum eip197_fw {
+
+ struct safexcel_desc_ring {
+ void *base;
++ void *shbase;
+ void *base_end;
++ void *shbase_end;
+ dma_addr_t base_dma;
++ dma_addr_t shbase_dma;
+
+ /* write and read pointers */
+ void *write;
++ void *shwrite;
+ void *read;
+
+ /* descriptor element offset */
+- unsigned offset;
++ unsigned int offset;
++ unsigned int shoffset;
+ };
+
+ enum safexcel_alg_type {
+@@ -601,9 +666,11 @@ struct safexcel_config {
+
+ u32 cd_size;
+ u32 cd_offset;
++ u32 cdsh_offset;
+
+ u32 rd_size;
+ u32 rd_offset;
++ u32 res_offset;
+ };
+
+ struct safexcel_work_data {
+@@ -654,6 +721,12 @@ enum safexcel_eip_version {
+ /* Priority we use for advertising our algorithms */
+ #define SAFEXCEL_CRA_PRIORITY 300
+
++/* SM3 digest result for zero length message */
++#define EIP197_SM3_ZEROM_HASH "\x1A\xB2\x1D\x83\x55\xCF\xA1\x7F" \
++ "\x8E\x61\x19\x48\x31\xE8\x1A\x8F" \
++ "\x22\xBE\xC8\xC7\x28\xFE\xFB\x74" \
++ "\x7E\xD0\x35\xEB\x50\x82\xAA\x2B"
++
+ /* EIP algorithm presence flags */
+ enum safexcel_eip_algorithms {
+ SAFEXCEL_ALG_BC0 = BIT(5),
+@@ -697,16 +770,23 @@ struct safexcel_register_offsets {
+ enum safexcel_flags {
+ EIP197_TRC_CACHE = BIT(0),
+ SAFEXCEL_HW_EIP197 = BIT(1),
++ EIP197_PE_ARB = BIT(2),
++ EIP197_ICE = BIT(3),
++ EIP197_SIMPLE_TRC = BIT(4),
+ };
+
+ struct safexcel_hwconfig {
+ enum safexcel_eip_algorithms algo_flags;
+ int hwver;
+ int hiaver;
++ int ppver;
+ int pever;
+ int hwdataw;
+ int hwcfsize;
+ int hwrfsize;
++ int hwnumpes;
++ int hwnumrings;
++ int hwnumraic;
+ };
+
+ struct safexcel_crypto_priv {
+@@ -778,7 +858,7 @@ struct safexcel_inv_result {
+
+ void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
+ int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
+- struct safexcel_result_desc *rdesc);
++ void *rdp);
+ void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
+ int safexcel_invalidate_cache(struct crypto_async_request *async,
+ struct safexcel_crypto_priv *priv,
+@@ -797,7 +877,8 @@ struct safexcel_command_desc *safexcel_a
+ bool first, bool last,
+ dma_addr_t data, u32 len,
+ u32 full_data_len,
+- dma_addr_t context);
++ dma_addr_t context,
++ struct safexcel_token **atoken);
+ struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
+ int ring_id,
+ bool first, bool last,
+@@ -853,5 +934,43 @@ extern struct safexcel_alg_template safe
+ extern struct safexcel_alg_template safexcel_alg_xts_aes;
+ extern struct safexcel_alg_template safexcel_alg_gcm;
+ extern struct safexcel_alg_template safexcel_alg_ccm;
++extern struct safexcel_alg_template safexcel_alg_crc32;
++extern struct safexcel_alg_template safexcel_alg_cbcmac;
++extern struct safexcel_alg_template safexcel_alg_xcbcmac;
++extern struct safexcel_alg_template safexcel_alg_cmac;
++extern struct safexcel_alg_template safexcel_alg_chacha20;
++extern struct safexcel_alg_template safexcel_alg_chachapoly;
++extern struct safexcel_alg_template safexcel_alg_chachapoly_esp;
++extern struct safexcel_alg_template safexcel_alg_sm3;
++extern struct safexcel_alg_template safexcel_alg_hmac_sm3;
++extern struct safexcel_alg_template safexcel_alg_ecb_sm4;
++extern struct safexcel_alg_template safexcel_alg_cbc_sm4;
++extern struct safexcel_alg_template safexcel_alg_ofb_sm4;
++extern struct safexcel_alg_template safexcel_alg_cfb_sm4;
++extern struct safexcel_alg_template safexcel_alg_ctr_sm4;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_sm4;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_cbc_sm4;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_ctr_sm4;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sm3_ctr_sm4;
++extern struct safexcel_alg_template safexcel_alg_sha3_224;
++extern struct safexcel_alg_template safexcel_alg_sha3_256;
++extern struct safexcel_alg_template safexcel_alg_sha3_384;
++extern struct safexcel_alg_template safexcel_alg_sha3_512;
++extern struct safexcel_alg_template safexcel_alg_hmac_sha3_224;
++extern struct safexcel_alg_template safexcel_alg_hmac_sha3_256;
++extern struct safexcel_alg_template safexcel_alg_hmac_sha3_384;
++extern struct safexcel_alg_template safexcel_alg_hmac_sha3_512;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha1_cbc_des;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des3_ede;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des3_ede;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des3_ede;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des3_ede;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha256_cbc_des;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha224_cbc_des;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha512_cbc_des;
++extern struct safexcel_alg_template safexcel_alg_authenc_hmac_sha384_cbc_des;
++extern struct safexcel_alg_template safexcel_alg_rfc4106_gcm;
++extern struct safexcel_alg_template safexcel_alg_rfc4543_gcm;
++extern struct safexcel_alg_template safexcel_alg_rfc4309_ccm;
+
+ #endif
+--- a/drivers/crypto/inside-secure/safexcel_hash.c
++++ b/drivers/crypto/inside-secure/safexcel_hash.c
+@@ -5,9 +5,13 @@
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ */
+
++#include <crypto/aes.h>
+ #include <crypto/hmac.h>
+ #include <crypto/md5.h>
+ #include <crypto/sha.h>
++#include <crypto/sha3.h>
++#include <crypto/skcipher.h>
++#include <crypto/sm3.h>
+ #include <linux/device.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/dmapool.h>
+@@ -19,9 +23,19 @@ struct safexcel_ahash_ctx {
+ struct safexcel_crypto_priv *priv;
+
+ u32 alg;
+-
+- u32 ipad[SHA512_DIGEST_SIZE / sizeof(u32)];
+- u32 opad[SHA512_DIGEST_SIZE / sizeof(u32)];
++ u8 key_sz;
++ bool cbcmac;
++ bool do_fallback;
++ bool fb_init_done;
++ bool fb_do_setkey;
++
++ __le32 ipad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
++ __le32 opad[SHA3_512_BLOCK_SIZE / sizeof(__le32)];
++
++ struct crypto_cipher *kaes;
++ struct crypto_ahash *fback;
++ struct crypto_shash *shpre;
++ struct shash_desc *shdesc;
+ };
+
+ struct safexcel_ahash_req {
+@@ -31,6 +45,8 @@ struct safexcel_ahash_req {
+ bool needs_inv;
+ bool hmac_zlen;
+ bool len_is_le;
++ bool not_first;
++ bool xcbcmac;
+
+ int nents;
+ dma_addr_t result_dma;
+@@ -39,7 +55,9 @@ struct safexcel_ahash_req {
+
+ u8 state_sz; /* expected state size, only set once */
+ u8 block_sz; /* block size, only set once */
+- u32 state[SHA512_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
++ u8 digest_sz; /* output digest size, only set once */
++ __le32 state[SHA3_512_BLOCK_SIZE /
++ sizeof(__le32)] __aligned(sizeof(__le32));
+
+ u64 len;
+ u64 processed;
+@@ -57,22 +75,36 @@ static inline u64 safexcel_queued_len(st
+ }
+
+ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
+- u32 input_length, u32 result_length)
++ u32 input_length, u32 result_length,
++ bool cbcmac)
+ {
+ struct safexcel_token *token =
+ (struct safexcel_token *)cdesc->control_data.token;
+
+ token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+ token[0].packet_length = input_length;
+- token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+ token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+
+- token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
+- token[1].packet_length = result_length;
+- token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
++ input_length &= 15;
++ if (unlikely(cbcmac && input_length)) {
++ token[0].stat = 0;
++ token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
++ token[1].packet_length = 16 - input_length;
++ token[1].stat = EIP197_TOKEN_STAT_LAST_HASH;
++ token[1].instructions = EIP197_TOKEN_INS_TYPE_HASH;
++ } else {
++ token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
++ eip197_noop_token(&token[1]);
++ }
++
++ token[2].opcode = EIP197_TOKEN_OPCODE_INSERT;
++ token[2].stat = EIP197_TOKEN_STAT_LAST_HASH |
+ EIP197_TOKEN_STAT_LAST_PACKET;
+- token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
++ token[2].packet_length = result_length;
++ token[2].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+ EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
++
++ eip197_noop_token(&token[3]);
+ }
+
+ static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
+@@ -82,29 +114,49 @@ static void safexcel_context_control(str
+ struct safexcel_crypto_priv *priv = ctx->priv;
+ u64 count = 0;
+
+- cdesc->control_data.control0 |= ctx->alg;
++ cdesc->control_data.control0 = ctx->alg;
++ cdesc->control_data.control1 = 0;
+
+ /*
+ * Copy the input digest if needed, and setup the context
+ * fields. Do this now as we need it to setup the first command
+ * descriptor.
+ */
+- if (!req->processed) {
+- /* First - and possibly only - block of basic hash only */
+- if (req->finish) {
++ if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM)) {
++ if (req->xcbcmac)
++ memcpy(ctx->base.ctxr->data, ctx->ipad, ctx->key_sz);
++ else
++ memcpy(ctx->base.ctxr->data, req->state, req->state_sz);
++
++ if (!req->finish && req->xcbcmac)
++ cdesc->control_data.control0 |=
++ CONTEXT_CONTROL_DIGEST_XCM |
++ CONTEXT_CONTROL_TYPE_HASH_OUT |
++ CONTEXT_CONTROL_NO_FINISH_HASH |
++ CONTEXT_CONTROL_SIZE(req->state_sz /
++ sizeof(u32));
++ else
+ cdesc->control_data.control0 |=
++ CONTEXT_CONTROL_DIGEST_XCM |
++ CONTEXT_CONTROL_TYPE_HASH_OUT |
++ CONTEXT_CONTROL_SIZE(req->state_sz /
++ sizeof(u32));
++ return;
++ } else if (!req->processed) {
++ /* First - and possibly only - block of basic hash only */
++ if (req->finish)
++ cdesc->control_data.control0 |= req->digest |
+ CONTEXT_CONTROL_TYPE_HASH_OUT |
+ CONTEXT_CONTROL_RESTART_HASH |
+ /* ensure its not 0! */
+ CONTEXT_CONTROL_SIZE(1);
+- } else {
+- cdesc->control_data.control0 |=
++ else
++ cdesc->control_data.control0 |= req->digest |
+ CONTEXT_CONTROL_TYPE_HASH_OUT |
+ CONTEXT_CONTROL_RESTART_HASH |
+ CONTEXT_CONTROL_NO_FINISH_HASH |
+ /* ensure its not 0! */
+ CONTEXT_CONTROL_SIZE(1);
+- }
+ return;
+ }
+
+@@ -204,7 +256,7 @@ static int safexcel_handle_req_result(st
+ }
+
+ if (sreq->result_dma) {
+- dma_unmap_single(priv->dev, sreq->result_dma, sreq->state_sz,
++ dma_unmap_single(priv->dev, sreq->result_dma, sreq->digest_sz,
+ DMA_FROM_DEVICE);
+ sreq->result_dma = 0;
+ }
+@@ -223,14 +275,15 @@ static int safexcel_handle_req_result(st
+ memcpy(sreq->cache, sreq->state,
+ crypto_ahash_digestsize(ahash));
+
+- memcpy(sreq->state, ctx->opad, sreq->state_sz);
++ memcpy(sreq->state, ctx->opad, sreq->digest_sz);
+
+ sreq->len = sreq->block_sz +
+ crypto_ahash_digestsize(ahash);
+ sreq->processed = sreq->block_sz;
+ sreq->hmac = 0;
+
+- ctx->base.needs_inv = true;
++ if (priv->flags & EIP197_TRC_CACHE)
++ ctx->base.needs_inv = true;
+ areq->nbytes = 0;
+ safexcel_ahash_enqueue(areq);
+
+@@ -238,8 +291,14 @@ static int safexcel_handle_req_result(st
+ return 1;
+ }
+
+- memcpy(areq->result, sreq->state,
+- crypto_ahash_digestsize(ahash));
++ if (unlikely(sreq->digest == CONTEXT_CONTROL_DIGEST_XCM &&
++ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_CRC32)) {
++ /* Undo final XOR with 0xffffffff ...*/
++ *(__le32 *)areq->result = ~sreq->state[0];
++ } else {
++ memcpy(areq->result, sreq->state,
++ crypto_ahash_digestsize(ahash));
++ }
+ }
+
+ cache_len = safexcel_queued_len(sreq);
+@@ -261,10 +320,11 @@ static int safexcel_ahash_send_req(struc
+ struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
+ struct safexcel_result_desc *rdesc;
+ struct scatterlist *sg;
+- int i, extra = 0, n_cdesc = 0, ret = 0;
+- u64 queued, len, cache_len;
++ struct safexcel_token *dmmy;
++ int i, extra = 0, n_cdesc = 0, ret = 0, cache_len, skip = 0;
++ u64 queued, len;
+
+- queued = len = safexcel_queued_len(req);
++ queued = safexcel_queued_len(req);
+ if (queued <= HASH_CACHE_SIZE)
+ cache_len = queued;
+ else
+@@ -287,15 +347,52 @@ static int safexcel_ahash_send_req(struc
+ areq->nbytes - extra);
+
+ queued -= extra;
+- len -= extra;
+
+ if (!queued) {
+ *commands = 0;
+ *results = 0;
+ return 0;
+ }
++
++ extra = 0;
++ }
++
++ if (unlikely(req->xcbcmac && req->processed > AES_BLOCK_SIZE)) {
++ if (unlikely(cache_len < AES_BLOCK_SIZE)) {
++ /*
++ * Cache contains less than 1 full block, complete.
++ */
++ extra = AES_BLOCK_SIZE - cache_len;
++ if (queued > cache_len) {
++ /* More data follows: borrow bytes */
++ u64 tmp = queued - cache_len;
++
++ skip = min_t(u64, tmp, extra);
++ sg_pcopy_to_buffer(areq->src,
++ sg_nents(areq->src),
++ req->cache + cache_len,
++ skip, 0);
++ }
++ extra -= skip;
++ memset(req->cache + cache_len + skip, 0, extra);
++ if (!ctx->cbcmac && extra) {
++ // 10- padding for XCBCMAC & CMAC
++ req->cache[cache_len + skip] = 0x80;
++ // HW will use K2 iso K3 - compensate!
++ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
++ ((__be32 *)req->cache)[i] ^=
++ cpu_to_be32(le32_to_cpu(
++ ctx->ipad[i] ^ ctx->ipad[i + 4]));
++ }
++ cache_len = AES_BLOCK_SIZE;
++ queued = queued + extra;
++ }
++
++ /* XCBC continue: XOR previous result into 1st word */
++ crypto_xor(req->cache, (const u8 *)req->state, AES_BLOCK_SIZE);
+ }
+
++ len = queued;
+ /* Add a command descriptor for the cached data, if any */
+ if (cache_len) {
+ req->cache_dma = dma_map_single(priv->dev, req->cache,
+@@ -306,8 +403,9 @@ static int safexcel_ahash_send_req(struc
+ req->cache_sz = cache_len;
+ first_cdesc = safexcel_add_cdesc(priv, ring, 1,
+ (cache_len == len),
+- req->cache_dma, cache_len, len,
+- ctx->base.ctxr_dma);
++ req->cache_dma, cache_len,
++ len, ctx->base.ctxr_dma,
++ &dmmy);
+ if (IS_ERR(first_cdesc)) {
+ ret = PTR_ERR(first_cdesc);
+ goto unmap_cache;
+@@ -319,10 +417,6 @@ static int safexcel_ahash_send_req(struc
+ goto send_command;
+ }
+
+- /* Skip descriptor generation for zero-length requests */
+- if (!areq->nbytes)
+- goto send_command;
+-
+ /* Now handle the current ahash request buffer(s) */
+ req->nents = dma_map_sg(priv->dev, areq->src,
+ sg_nents_for_len(areq->src,
+@@ -336,26 +430,34 @@ static int safexcel_ahash_send_req(struc
+ for_each_sg(areq->src, sg, req->nents, i) {
+ int sglen = sg_dma_len(sg);
+
++ if (unlikely(sglen <= skip)) {
++ skip -= sglen;
++ continue;
++ }
++
+ /* Do not overflow the request */
+- if (queued < sglen)
++ if ((queued + skip) <= sglen)
+ sglen = queued;
++ else
++ sglen -= skip;
+
+ cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+ !(queued - sglen),
+- sg_dma_address(sg),
+- sglen, len, ctx->base.ctxr_dma);
++ sg_dma_address(sg) + skip, sglen,
++ len, ctx->base.ctxr_dma, &dmmy);
+ if (IS_ERR(cdesc)) {
+ ret = PTR_ERR(cdesc);
+ goto unmap_sg;
+ }
+- n_cdesc++;
+
+- if (n_cdesc == 1)
++ if (!n_cdesc)
+ first_cdesc = cdesc;
++ n_cdesc++;
+
+ queued -= sglen;
+ if (!queued)
+ break;
++ skip = 0;
+ }
+
+ send_command:
+@@ -363,9 +465,9 @@ send_command:
+ safexcel_context_control(ctx, req, first_cdesc);
+
+ /* Add the token */
+- safexcel_hash_token(first_cdesc, len, req->state_sz);
++ safexcel_hash_token(first_cdesc, len, req->digest_sz, ctx->cbcmac);
+
+- req->result_dma = dma_map_single(priv->dev, req->state, req->state_sz,
++ req->result_dma = dma_map_single(priv->dev, req->state, req->digest_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, req->result_dma)) {
+ ret = -EINVAL;
+@@ -374,7 +476,7 @@ send_command:
+
+ /* Add a result descriptor */
+ rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
+- req->state_sz);
++ req->digest_sz);
+ if (IS_ERR(rdesc)) {
+ ret = PTR_ERR(rdesc);
+ goto unmap_result;
+@@ -382,17 +484,20 @@ send_command:
+
+ safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
+
+- req->processed += len;
++ req->processed += len - extra;
+
+ *commands = n_cdesc;
+ *results = 1;
+ return 0;
+
+ unmap_result:
+- dma_unmap_single(priv->dev, req->result_dma, req->state_sz,
++ dma_unmap_single(priv->dev, req->result_dma, req->digest_sz,
+ DMA_FROM_DEVICE);
+ unmap_sg:
+- dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
++ if (req->nents) {
++ dma_unmap_sg(priv->dev, areq->src, req->nents, DMA_TO_DEVICE);
++ req->nents = 0;
++ }
+ cdesc_rollback:
+ for (i = 0; i < n_cdesc; i++)
+ safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+@@ -590,16 +695,12 @@ static int safexcel_ahash_enqueue(struct
+
+ if (ctx->base.ctxr) {
+ if (priv->flags & EIP197_TRC_CACHE && !ctx->base.needs_inv &&
+- req->processed &&
+- (/* invalidate for basic hash continuation finish */
+- (req->finish &&
+- (req->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)) ||
++ /* invalidate for *any* non-XCBC continuation */
++ ((req->not_first && !req->xcbcmac) ||
+ /* invalidate if (i)digest changed */
+ memcmp(ctx->base.ctxr->data, req->state, req->state_sz) ||
+- /* invalidate for HMAC continuation finish */
+- (req->finish && (req->processed != req->block_sz)) ||
+ /* invalidate for HMAC finish with odigest changed */
+- (req->finish &&
++ (req->finish && req->hmac &&
+ memcmp(ctx->base.ctxr->data + (req->state_sz>>2),
+ ctx->opad, req->state_sz))))
+ /*
+@@ -622,6 +723,7 @@ static int safexcel_ahash_enqueue(struct
+ if (!ctx->base.ctxr)
+ return -ENOMEM;
+ }
++ req->not_first = true;
+
+ ring = ctx->base.ring;
+
+@@ -691,8 +793,34 @@ static int safexcel_ahash_final(struct a
+ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA512)
+ memcpy(areq->result, sha512_zero_message_hash,
+ SHA512_DIGEST_SIZE);
++ else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SM3) {
++ memcpy(areq->result,
++ EIP197_SM3_ZEROM_HASH, SM3_DIGEST_SIZE);
++ }
+
+ return 0;
++ } else if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM &&
++ ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_MD5 &&
++ req->len == sizeof(u32) && !areq->nbytes)) {
++ /* Zero length CRC32 */
++ memcpy(areq->result, ctx->ipad, sizeof(u32));
++ return 0;
++ } else if (unlikely(ctx->cbcmac && req->len == AES_BLOCK_SIZE &&
++ !areq->nbytes)) {
++ /* Zero length CBC MAC */
++ memset(areq->result, 0, AES_BLOCK_SIZE);
++ return 0;
++ } else if (unlikely(req->xcbcmac && req->len == AES_BLOCK_SIZE &&
++ !areq->nbytes)) {
++ /* Zero length (X)CBC/CMAC */
++ int i;
++
++ for (i = 0; i < AES_BLOCK_SIZE / sizeof(u32); i++)
++ ((__be32 *)areq->result)[i] =
++ cpu_to_be32(le32_to_cpu(ctx->ipad[i + 4]));//K3
++ areq->result[0] ^= 0x80; // 10- padding
++ crypto_cipher_encrypt_one(ctx->kaes, areq->result, areq->result);
++ return 0;
+ } else if (unlikely(req->hmac &&
+ (req->len == req->block_sz) &&
+ !areq->nbytes)) {
+@@ -792,6 +920,7 @@ static int safexcel_ahash_cra_init(struc
+ ctx->priv = tmpl->priv;
+ ctx->base.send = safexcel_ahash_send;
+ ctx->base.handle_result = safexcel_handle_result;
++ ctx->fb_do_setkey = false;
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct safexcel_ahash_req));
+@@ -808,6 +937,7 @@ static int safexcel_sha1_init(struct aha
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA1_DIGEST_SIZE;
++ req->digest_sz = SHA1_DIGEST_SIZE;
+ req->block_sz = SHA1_BLOCK_SIZE;
+
+ return 0;
+@@ -889,6 +1019,7 @@ static int safexcel_hmac_sha1_init(struc
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA1_DIGEST_SIZE;
++ req->digest_sz = SHA1_DIGEST_SIZE;
+ req->block_sz = SHA1_BLOCK_SIZE;
+ req->hmac = true;
+
+@@ -1125,6 +1256,7 @@ static int safexcel_sha256_init(struct a
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA256_DIGEST_SIZE;
++ req->digest_sz = SHA256_DIGEST_SIZE;
+ req->block_sz = SHA256_BLOCK_SIZE;
+
+ return 0;
+@@ -1180,6 +1312,7 @@ static int safexcel_sha224_init(struct a
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA256_DIGEST_SIZE;
++ req->digest_sz = SHA256_DIGEST_SIZE;
+ req->block_sz = SHA256_BLOCK_SIZE;
+
+ return 0;
+@@ -1248,6 +1381,7 @@ static int safexcel_hmac_sha224_init(str
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA256_DIGEST_SIZE;
++ req->digest_sz = SHA256_DIGEST_SIZE;
+ req->block_sz = SHA256_BLOCK_SIZE;
+ req->hmac = true;
+
+@@ -1318,6 +1452,7 @@ static int safexcel_hmac_sha256_init(str
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA256_DIGEST_SIZE;
++ req->digest_sz = SHA256_DIGEST_SIZE;
+ req->block_sz = SHA256_BLOCK_SIZE;
+ req->hmac = true;
+
+@@ -1375,6 +1510,7 @@ static int safexcel_sha512_init(struct a
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
++ req->digest_sz = SHA512_DIGEST_SIZE;
+ req->block_sz = SHA512_BLOCK_SIZE;
+
+ return 0;
+@@ -1430,6 +1566,7 @@ static int safexcel_sha384_init(struct a
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
++ req->digest_sz = SHA512_DIGEST_SIZE;
+ req->block_sz = SHA512_BLOCK_SIZE;
+
+ return 0;
+@@ -1498,6 +1635,7 @@ static int safexcel_hmac_sha512_init(str
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA512;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
++ req->digest_sz = SHA512_DIGEST_SIZE;
+ req->block_sz = SHA512_BLOCK_SIZE;
+ req->hmac = true;
+
+@@ -1568,6 +1706,7 @@ static int safexcel_hmac_sha384_init(str
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA384;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = SHA512_DIGEST_SIZE;
++ req->digest_sz = SHA512_DIGEST_SIZE;
+ req->block_sz = SHA512_BLOCK_SIZE;
+ req->hmac = true;
+
+@@ -1625,6 +1764,7 @@ static int safexcel_md5_init(struct ahas
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = MD5_DIGEST_SIZE;
++ req->digest_sz = MD5_DIGEST_SIZE;
+ req->block_sz = MD5_HMAC_BLOCK_SIZE;
+
+ return 0;
+@@ -1686,6 +1826,7 @@ static int safexcel_hmac_md5_init(struct
+ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_MD5;
+ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+ req->state_sz = MD5_DIGEST_SIZE;
++ req->digest_sz = MD5_DIGEST_SIZE;
+ req->block_sz = MD5_HMAC_BLOCK_SIZE;
+ req->len_is_le = true; /* MD5 is little endian! ... */
+ req->hmac = true;
+@@ -1738,5 +1879,1235 @@ struct safexcel_alg_template safexcel_al
+ .cra_module = THIS_MODULE,
+ },
+ },
++ },
++};
++
++static int safexcel_crc32_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
++ int ret = safexcel_ahash_cra_init(tfm);
++
++ /* Default 'key' is all zeroes */
++ memset(ctx->ipad, 0, sizeof(u32));
++ return ret;
++}
++
++static int safexcel_crc32_init(struct ahash_request *areq)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ /* Start from loaded key */
++ req->state[0] = (__force __le32)le32_to_cpu(~ctx->ipad[0]);
++ /* Set processed to non-zero to enable invalidation detection */
++ req->len = sizeof(u32);
++ req->processed = sizeof(u32);
++
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_CRC32;
++ req->digest = CONTEXT_CONTROL_DIGEST_XCM;
++ req->state_sz = sizeof(u32);
++ req->digest_sz = sizeof(u32);
++ req->block_sz = sizeof(u32);
++
++ return 0;
++}
++
++static int safexcel_crc32_setkey(struct crypto_ahash *tfm, const u8 *key,
++ unsigned int keylen)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
++
++ if (keylen != sizeof(u32))
++ return -EINVAL;
++
++ memcpy(ctx->ipad, key, sizeof(u32));
++ return 0;
++}
++
++static int safexcel_crc32_digest(struct ahash_request *areq)
++{
++ return safexcel_crc32_init(areq) ?: safexcel_ahash_finup(areq);
++}
++
++struct safexcel_alg_template safexcel_alg_crc32 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = 0,
++ .alg.ahash = {
++ .init = safexcel_crc32_init,
++ .update = safexcel_ahash_update,
++ .final = safexcel_ahash_final,
++ .finup = safexcel_ahash_finup,
++ .digest = safexcel_crc32_digest,
++ .setkey = safexcel_crc32_setkey,
++ .export = safexcel_ahash_export,
++ .import = safexcel_ahash_import,
++ .halg = {
++ .digestsize = sizeof(u32),
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "crc32",
++ .cra_driver_name = "safexcel-crc32",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_OPTIONAL_KEY |
++ CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_crc32_cra_init,
++ .cra_exit = safexcel_ahash_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_cbcmac_init(struct ahash_request *areq)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ /* Start from loaded keys */
++ memcpy(req->state, ctx->ipad, ctx->key_sz);
++ /* Set processed to non-zero to enable invalidation detection */
++ req->len = AES_BLOCK_SIZE;
++ req->processed = AES_BLOCK_SIZE;
++
++ req->digest = CONTEXT_CONTROL_DIGEST_XCM;
++ req->state_sz = ctx->key_sz;
++ req->digest_sz = AES_BLOCK_SIZE;
++ req->block_sz = AES_BLOCK_SIZE;
++ req->xcbcmac = true;
++
++ return 0;
++}
++
++static int safexcel_cbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
++ unsigned int len)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
++ struct crypto_aes_ctx aes;
++ int ret, i;
++
++ ret = aes_expandkey(&aes, key, len);
++ if (ret)
++ return ret;
++
++ memset(ctx->ipad, 0, 2 * AES_BLOCK_SIZE);
++ for (i = 0; i < len / sizeof(u32); i++)
++ ctx->ipad[i + 8] = (__force __le32)cpu_to_be32(aes.key_enc[i]);
++
++ if (len == AES_KEYSIZE_192) {
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
++ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
++ } else if (len == AES_KEYSIZE_256) {
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
++ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
++ } else {
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
++ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
++ }
++ ctx->cbcmac = true;
++
++ memzero_explicit(&aes, sizeof(aes));
++ return 0;
++}
++
++static int safexcel_cbcmac_digest(struct ahash_request *areq)
++{
++ return safexcel_cbcmac_init(areq) ?: safexcel_ahash_finup(areq);
++}
++
++struct safexcel_alg_template safexcel_alg_cbcmac = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = 0,
++ .alg.ahash = {
++ .init = safexcel_cbcmac_init,
++ .update = safexcel_ahash_update,
++ .final = safexcel_ahash_final,
++ .finup = safexcel_ahash_finup,
++ .digest = safexcel_cbcmac_digest,
++ .setkey = safexcel_cbcmac_setkey,
++ .export = safexcel_ahash_export,
++ .import = safexcel_ahash_import,
++ .halg = {
++ .digestsize = AES_BLOCK_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "cbcmac(aes)",
++ .cra_driver_name = "safexcel-cbcmac-aes",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = 1,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_ahash_cra_init,
++ .cra_exit = safexcel_ahash_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_xcbcmac_setkey(struct crypto_ahash *tfm, const u8 *key,
++ unsigned int len)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
++ struct crypto_aes_ctx aes;
++ u32 key_tmp[3 * AES_BLOCK_SIZE / sizeof(u32)];
++ int ret, i;
++
++ ret = aes_expandkey(&aes, key, len);
++ if (ret)
++ return ret;
++
++ /* precompute the XCBC key material */
++ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
++ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
++ CRYPTO_TFM_REQ_MASK);
++ ret = crypto_cipher_setkey(ctx->kaes, key, len);
++ if (ret)
++ return ret;
++
++ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
++ "\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1\x1");
++ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp,
++ "\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2\x2");
++ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)key_tmp + AES_BLOCK_SIZE,
++ "\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3\x3");
++ for (i = 0; i < 3 * AES_BLOCK_SIZE / sizeof(u32); i++)
++ ctx->ipad[i] =
++ cpu_to_le32((__force u32)cpu_to_be32(key_tmp[i]));
++
++ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
++ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
++ CRYPTO_TFM_REQ_MASK);
++ ret = crypto_cipher_setkey(ctx->kaes,
++ (u8 *)key_tmp + 2 * AES_BLOCK_SIZE,
++ AES_MIN_KEY_SIZE);
++ if (ret)
++ return ret;
++
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
++ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
++ ctx->cbcmac = false;
++
++ memzero_explicit(&aes, sizeof(aes));
++ return 0;
++}
++
++static int safexcel_xcbcmac_cra_init(struct crypto_tfm *tfm)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_ahash_cra_init(tfm);
++ ctx->kaes = crypto_alloc_cipher("aes", 0, 0);
++ return PTR_ERR_OR_ZERO(ctx->kaes);
++}
++
++static void safexcel_xcbcmac_cra_exit(struct crypto_tfm *tfm)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ crypto_free_cipher(ctx->kaes);
++ safexcel_ahash_cra_exit(tfm);
++}
++
++struct safexcel_alg_template safexcel_alg_xcbcmac = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = 0,
++ .alg.ahash = {
++ .init = safexcel_cbcmac_init,
++ .update = safexcel_ahash_update,
++ .final = safexcel_ahash_final,
++ .finup = safexcel_ahash_finup,
++ .digest = safexcel_cbcmac_digest,
++ .setkey = safexcel_xcbcmac_setkey,
++ .export = safexcel_ahash_export,
++ .import = safexcel_ahash_import,
++ .halg = {
++ .digestsize = AES_BLOCK_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "xcbc(aes)",
++ .cra_driver_name = "safexcel-xcbc-aes",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_xcbcmac_cra_init,
++ .cra_exit = safexcel_xcbcmac_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
++ unsigned int len)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
++ struct crypto_aes_ctx aes;
++ __be64 consts[4];
++ u64 _const[2];
++ u8 msb_mask, gfmask;
++ int ret, i;
++
++ ret = aes_expandkey(&aes, key, len);
++ if (ret)
++ return ret;
++
++ for (i = 0; i < len / sizeof(u32); i++)
++ ctx->ipad[i + 8] =
++ cpu_to_le32((__force u32)cpu_to_be32(aes.key_enc[i]));
++
++ /* precompute the CMAC key material */
++ crypto_cipher_clear_flags(ctx->kaes, CRYPTO_TFM_REQ_MASK);
++ crypto_cipher_set_flags(ctx->kaes, crypto_ahash_get_flags(tfm) &
++ CRYPTO_TFM_REQ_MASK);
++ ret = crypto_cipher_setkey(ctx->kaes, key, len);
++ if (ret)
++ return ret;
++
++ /* code below borrowed from crypto/cmac.c */
++ /* encrypt the zero block */
++ memset(consts, 0, AES_BLOCK_SIZE);
++ crypto_cipher_encrypt_one(ctx->kaes, (u8 *)consts, (u8 *)consts);
++
++ gfmask = 0x87;
++ _const[0] = be64_to_cpu(consts[1]);
++ _const[1] = be64_to_cpu(consts[0]);
++
++ /* gf(2^128) multiply zero-ciphertext with u and u^2 */
++ for (i = 0; i < 4; i += 2) {
++ msb_mask = ((s64)_const[1] >> 63) & gfmask;
++ _const[1] = (_const[1] << 1) | (_const[0] >> 63);
++ _const[0] = (_const[0] << 1) ^ msb_mask;
++
++ consts[i + 0] = cpu_to_be64(_const[1]);
++ consts[i + 1] = cpu_to_be64(_const[0]);
++ }
++ /* end of code borrowed from crypto/cmac.c */
++
++ for (i = 0; i < 2 * AES_BLOCK_SIZE / sizeof(u32); i++)
++ ctx->ipad[i] = (__force __le32)cpu_to_be32(((u32 *)consts)[i]);
++
++ if (len == AES_KEYSIZE_192) {
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC192;
++ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
++ } else if (len == AES_KEYSIZE_256) {
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC256;
++ ctx->key_sz = AES_MAX_KEY_SIZE + 2 * AES_BLOCK_SIZE;
++ } else {
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_XCBC128;
++ ctx->key_sz = AES_MIN_KEY_SIZE + 2 * AES_BLOCK_SIZE;
++ }
++ ctx->cbcmac = false;
++
++ memzero_explicit(&aes, sizeof(aes));
++ return 0;
++}
++
++struct safexcel_alg_template safexcel_alg_cmac = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = 0,
++ .alg.ahash = {
++ .init = safexcel_cbcmac_init,
++ .update = safexcel_ahash_update,
++ .final = safexcel_ahash_final,
++ .finup = safexcel_ahash_finup,
++ .digest = safexcel_cbcmac_digest,
++ .setkey = safexcel_cmac_setkey,
++ .export = safexcel_ahash_export,
++ .import = safexcel_ahash_import,
++ .halg = {
++ .digestsize = AES_BLOCK_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "cmac(aes)",
++ .cra_driver_name = "safexcel-cmac-aes",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = AES_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_xcbcmac_cra_init,
++ .cra_exit = safexcel_xcbcmac_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_sm3_init(struct ahash_request *areq)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
++ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
++ req->state_sz = SM3_DIGEST_SIZE;
++ req->digest_sz = SM3_DIGEST_SIZE;
++ req->block_sz = SM3_BLOCK_SIZE;
++
++ return 0;
++}
++
++static int safexcel_sm3_digest(struct ahash_request *areq)
++{
++ int ret = safexcel_sm3_init(areq);
++
++ if (ret)
++ return ret;
++
++ return safexcel_ahash_finup(areq);
++}
++
++struct safexcel_alg_template safexcel_alg_sm3 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = SAFEXCEL_ALG_SM3,
++ .alg.ahash = {
++ .init = safexcel_sm3_init,
++ .update = safexcel_ahash_update,
++ .final = safexcel_ahash_final,
++ .finup = safexcel_ahash_finup,
++ .digest = safexcel_sm3_digest,
++ .export = safexcel_ahash_export,
++ .import = safexcel_ahash_import,
++ .halg = {
++ .digestsize = SM3_DIGEST_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "sm3",
++ .cra_driver_name = "safexcel-sm3",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = SM3_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_ahash_cra_init,
++ .cra_exit = safexcel_ahash_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_hmac_sm3_setkey(struct crypto_ahash *tfm, const u8 *key,
++ unsigned int keylen)
++{
++ return safexcel_hmac_alg_setkey(tfm, key, keylen, "safexcel-sm3",
++ SM3_DIGEST_SIZE);
++}
++
++static int safexcel_hmac_sm3_init(struct ahash_request *areq)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ /* Start from ipad precompute */
++ memcpy(req->state, ctx->ipad, SM3_DIGEST_SIZE);
++ /* Already processed the key^ipad part now! */
++ req->len = SM3_BLOCK_SIZE;
++ req->processed = SM3_BLOCK_SIZE;
++
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SM3;
++ req->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
++ req->state_sz = SM3_DIGEST_SIZE;
++ req->digest_sz = SM3_DIGEST_SIZE;
++ req->block_sz = SM3_BLOCK_SIZE;
++ req->hmac = true;
++
++ return 0;
++}
++
++static int safexcel_hmac_sm3_digest(struct ahash_request *areq)
++{
++ int ret = safexcel_hmac_sm3_init(areq);
++
++ if (ret)
++ return ret;
++
++ return safexcel_ahash_finup(areq);
++}
++
++struct safexcel_alg_template safexcel_alg_hmac_sm3 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = SAFEXCEL_ALG_SM3,
++ .alg.ahash = {
++ .init = safexcel_hmac_sm3_init,
++ .update = safexcel_ahash_update,
++ .final = safexcel_ahash_final,
++ .finup = safexcel_ahash_finup,
++ .digest = safexcel_hmac_sm3_digest,
++ .setkey = safexcel_hmac_sm3_setkey,
++ .export = safexcel_ahash_export,
++ .import = safexcel_ahash_import,
++ .halg = {
++ .digestsize = SM3_DIGEST_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "hmac(sm3)",
++ .cra_driver_name = "safexcel-hmac-sm3",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = SM3_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_ahash_cra_init,
++ .cra_exit = safexcel_ahash_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_sha3_224_init(struct ahash_request *areq)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
++ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
++ req->state_sz = SHA3_224_DIGEST_SIZE;
++ req->digest_sz = SHA3_224_DIGEST_SIZE;
++ req->block_sz = SHA3_224_BLOCK_SIZE;
++ ctx->do_fallback = false;
++ ctx->fb_init_done = false;
++ return 0;
++}
++
++static int safexcel_sha3_fbcheck(struct ahash_request *req)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct ahash_request *subreq = ahash_request_ctx(req);
++ int ret = 0;
++
++ if (ctx->do_fallback) {
++ ahash_request_set_tfm(subreq, ctx->fback);
++ ahash_request_set_callback(subreq, req->base.flags,
++ req->base.complete, req->base.data);
++ ahash_request_set_crypt(subreq, req->src, req->result,
++ req->nbytes);
++ if (!ctx->fb_init_done) {
++ if (ctx->fb_do_setkey) {
++ /* Set fallback cipher HMAC key */
++ u8 key[SHA3_224_BLOCK_SIZE];
++
++ memcpy(key, ctx->ipad,
++ crypto_ahash_blocksize(ctx->fback) / 2);
++ memcpy(key +
++ crypto_ahash_blocksize(ctx->fback) / 2,
++ ctx->opad,
++ crypto_ahash_blocksize(ctx->fback) / 2);
++ ret = crypto_ahash_setkey(ctx->fback, key,
++ crypto_ahash_blocksize(ctx->fback));
++ memzero_explicit(key,
++ crypto_ahash_blocksize(ctx->fback));
++ ctx->fb_do_setkey = false;
++ }
++ ret = ret ?: crypto_ahash_init(subreq);
++ ctx->fb_init_done = true;
++ }
++ }
++ return ret;
++}
++
++static int safexcel_sha3_update(struct ahash_request *req)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct ahash_request *subreq = ahash_request_ctx(req);
++
++ ctx->do_fallback = true;
++ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_update(subreq);
++}
++
++static int safexcel_sha3_final(struct ahash_request *req)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct ahash_request *subreq = ahash_request_ctx(req);
++
++ ctx->do_fallback = true;
++ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_final(subreq);
++}
++
++static int safexcel_sha3_finup(struct ahash_request *req)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct ahash_request *subreq = ahash_request_ctx(req);
++
++ ctx->do_fallback |= !req->nbytes;
++ if (ctx->do_fallback)
++ /* Update or ex/import happened or len 0, cannot use the HW */
++ return safexcel_sha3_fbcheck(req) ?:
++ crypto_ahash_finup(subreq);
++ else
++ return safexcel_ahash_finup(req);
++}
++
++static int safexcel_sha3_digest_fallback(struct ahash_request *req)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct ahash_request *subreq = ahash_request_ctx(req);
++
++ ctx->do_fallback = true;
++ ctx->fb_init_done = false;
++ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_finup(subreq);
++}
++
++static int safexcel_sha3_224_digest(struct ahash_request *req)
++{
++ if (req->nbytes)
++ return safexcel_sha3_224_init(req) ?: safexcel_ahash_finup(req);
++
++ /* HW cannot do zero length hash, use fallback instead */
++ return safexcel_sha3_digest_fallback(req);
++}
++
++static int safexcel_sha3_export(struct ahash_request *req, void *out)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct ahash_request *subreq = ahash_request_ctx(req);
++
++ ctx->do_fallback = true;
++ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_export(subreq, out);
++}
++
++static int safexcel_sha3_import(struct ahash_request *req, const void *in)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct ahash_request *subreq = ahash_request_ctx(req);
++
++ ctx->do_fallback = true;
++ return safexcel_sha3_fbcheck(req) ?: crypto_ahash_import(subreq, in);
++ // return safexcel_ahash_import(req, in);
++}
++
++static int safexcel_sha3_cra_init(struct crypto_tfm *tfm)
++{
++ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ safexcel_ahash_cra_init(tfm);
++
++ /* Allocate fallback implementation */
++ ctx->fback = crypto_alloc_ahash(crypto_tfm_alg_name(tfm), 0,
++ CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_NEED_FALLBACK);
++ if (IS_ERR(ctx->fback))
++ return PTR_ERR(ctx->fback);
++
++ /* Update statesize from fallback algorithm! */
++ crypto_hash_alg_common(ahash)->statesize =
++ crypto_ahash_statesize(ctx->fback);
++ crypto_ahash_set_reqsize(ahash, max(sizeof(struct safexcel_ahash_req),
++ sizeof(struct ahash_request) +
++ crypto_ahash_reqsize(ctx->fback)));
++ return 0;
++}
++
++static void safexcel_sha3_cra_exit(struct crypto_tfm *tfm)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ crypto_free_ahash(ctx->fback);
++ safexcel_ahash_cra_exit(tfm);
++}
++
++struct safexcel_alg_template safexcel_alg_sha3_224 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = SAFEXCEL_ALG_SHA3,
++ .alg.ahash = {
++ .init = safexcel_sha3_224_init,
++ .update = safexcel_sha3_update,
++ .final = safexcel_sha3_final,
++ .finup = safexcel_sha3_finup,
++ .digest = safexcel_sha3_224_digest,
++ .export = safexcel_sha3_export,
++ .import = safexcel_sha3_import,
++ .halg = {
++ .digestsize = SHA3_224_DIGEST_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "sha3-224",
++ .cra_driver_name = "safexcel-sha3-224",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = SHA3_224_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_sha3_cra_init,
++ .cra_exit = safexcel_sha3_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_sha3_256_init(struct ahash_request *areq)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
++ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
++ req->state_sz = SHA3_256_DIGEST_SIZE;
++ req->digest_sz = SHA3_256_DIGEST_SIZE;
++ req->block_sz = SHA3_256_BLOCK_SIZE;
++ ctx->do_fallback = false;
++ ctx->fb_init_done = false;
++ return 0;
++}
++
++static int safexcel_sha3_256_digest(struct ahash_request *req)
++{
++ if (req->nbytes)
++ return safexcel_sha3_256_init(req) ?: safexcel_ahash_finup(req);
++
++ /* HW cannot do zero length hash, use fallback instead */
++ return safexcel_sha3_digest_fallback(req);
++}
++
++struct safexcel_alg_template safexcel_alg_sha3_256 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = SAFEXCEL_ALG_SHA3,
++ .alg.ahash = {
++ .init = safexcel_sha3_256_init,
++ .update = safexcel_sha3_update,
++ .final = safexcel_sha3_final,
++ .finup = safexcel_sha3_finup,
++ .digest = safexcel_sha3_256_digest,
++ .export = safexcel_sha3_export,
++ .import = safexcel_sha3_import,
++ .halg = {
++ .digestsize = SHA3_256_DIGEST_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "sha3-256",
++ .cra_driver_name = "safexcel-sha3-256",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = SHA3_256_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_sha3_cra_init,
++ .cra_exit = safexcel_sha3_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_sha3_384_init(struct ahash_request *areq)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
++ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
++ req->state_sz = SHA3_384_DIGEST_SIZE;
++ req->digest_sz = SHA3_384_DIGEST_SIZE;
++ req->block_sz = SHA3_384_BLOCK_SIZE;
++ ctx->do_fallback = false;
++ ctx->fb_init_done = false;
++ return 0;
++}
++
++static int safexcel_sha3_384_digest(struct ahash_request *req)
++{
++ if (req->nbytes)
++ return safexcel_sha3_384_init(req) ?: safexcel_ahash_finup(req);
++
++ /* HW cannot do zero length hash, use fallback instead */
++ return safexcel_sha3_digest_fallback(req);
++}
++
++struct safexcel_alg_template safexcel_alg_sha3_384 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = SAFEXCEL_ALG_SHA3,
++ .alg.ahash = {
++ .init = safexcel_sha3_384_init,
++ .update = safexcel_sha3_update,
++ .final = safexcel_sha3_final,
++ .finup = safexcel_sha3_finup,
++ .digest = safexcel_sha3_384_digest,
++ .export = safexcel_sha3_export,
++ .import = safexcel_sha3_import,
++ .halg = {
++ .digestsize = SHA3_384_DIGEST_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "sha3-384",
++ .cra_driver_name = "safexcel-sha3-384",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = SHA3_384_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_sha3_cra_init,
++ .cra_exit = safexcel_sha3_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_sha3_512_init(struct ahash_request *areq)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
++ req->digest = CONTEXT_CONTROL_DIGEST_INITIAL;
++ req->state_sz = SHA3_512_DIGEST_SIZE;
++ req->digest_sz = SHA3_512_DIGEST_SIZE;
++ req->block_sz = SHA3_512_BLOCK_SIZE;
++ ctx->do_fallback = false;
++ ctx->fb_init_done = false;
++ return 0;
++}
++
++static int safexcel_sha3_512_digest(struct ahash_request *req)
++{
++ if (req->nbytes)
++ return safexcel_sha3_512_init(req) ?: safexcel_ahash_finup(req);
++
++ /* HW cannot do zero length hash, use fallback instead */
++ return safexcel_sha3_digest_fallback(req);
++}
++
++struct safexcel_alg_template safexcel_alg_sha3_512 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = SAFEXCEL_ALG_SHA3,
++ .alg.ahash = {
++ .init = safexcel_sha3_512_init,
++ .update = safexcel_sha3_update,
++ .final = safexcel_sha3_final,
++ .finup = safexcel_sha3_finup,
++ .digest = safexcel_sha3_512_digest,
++ .export = safexcel_sha3_export,
++ .import = safexcel_sha3_import,
++ .halg = {
++ .digestsize = SHA3_512_DIGEST_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "sha3-512",
++ .cra_driver_name = "safexcel-sha3-512",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = SHA3_512_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_sha3_cra_init,
++ .cra_exit = safexcel_sha3_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_hmac_sha3_cra_init(struct crypto_tfm *tfm, const char *alg)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
++ int ret;
++
++ ret = safexcel_sha3_cra_init(tfm);
++ if (ret)
++ return ret;
++
++ /* Allocate precalc basic digest implementation */
++ ctx->shpre = crypto_alloc_shash(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
++ if (IS_ERR(ctx->shpre))
++ return PTR_ERR(ctx->shpre);
++
++ ctx->shdesc = kmalloc(sizeof(*ctx->shdesc) +
++ crypto_shash_descsize(ctx->shpre), GFP_KERNEL);
++ if (!ctx->shdesc) {
++ crypto_free_shash(ctx->shpre);
++ return -ENOMEM;
++ }
++ ctx->shdesc->tfm = ctx->shpre;
++ return 0;
++}
++
++static void safexcel_hmac_sha3_cra_exit(struct crypto_tfm *tfm)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ crypto_free_ahash(ctx->fback);
++ crypto_free_shash(ctx->shpre);
++ kfree(ctx->shdesc);
++ safexcel_ahash_cra_exit(tfm);
++}
++
++static int safexcel_hmac_sha3_setkey(struct crypto_ahash *tfm, const u8 *key,
++ unsigned int keylen)
++{
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ int ret = 0;
++
++ if (keylen > crypto_ahash_blocksize(tfm)) {
++ /*
++ * If the key is larger than the blocksize, then hash it
++ * first using our fallback cipher
++ */
++ ret = crypto_shash_digest(ctx->shdesc, key, keylen,
++ (u8 *)ctx->ipad);
++ keylen = crypto_shash_digestsize(ctx->shpre);
++
++ /*
++ * If the digest is larger than half the blocksize, we need to
++ * move the rest to opad due to the way our HMAC infra works.
++ */
++ if (keylen > crypto_ahash_blocksize(tfm) / 2)
++ /* Buffers overlap, need to use memmove iso memcpy! */
++ memmove(ctx->opad,
++ (u8 *)ctx->ipad +
++ crypto_ahash_blocksize(tfm) / 2,
++ keylen - crypto_ahash_blocksize(tfm) / 2);
++ } else {
++ /*
++ * Copy the key to our ipad & opad buffers
++ * Note that ipad and opad each contain one half of the key,
++ * to match the existing HMAC driver infrastructure.
++ */
++ if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
++ memcpy(ctx->ipad, key, keylen);
++ } else {
++ memcpy(ctx->ipad, key,
++ crypto_ahash_blocksize(tfm) / 2);
++ memcpy(ctx->opad,
++ key + crypto_ahash_blocksize(tfm) / 2,
++ keylen - crypto_ahash_blocksize(tfm) / 2);
++ }
++ }
++
++ /* Pad key with zeroes */
++ if (keylen <= crypto_ahash_blocksize(tfm) / 2) {
++ memset((u8 *)ctx->ipad + keylen, 0,
++ crypto_ahash_blocksize(tfm) / 2 - keylen);
++ memset(ctx->opad, 0, crypto_ahash_blocksize(tfm) / 2);
++ } else {
++ memset((u8 *)ctx->opad + keylen -
++ crypto_ahash_blocksize(tfm) / 2, 0,
++ crypto_ahash_blocksize(tfm) - keylen);
++ }
++
++ /* If doing fallback, still need to set the new key! */
++ ctx->fb_do_setkey = true;
++ return ret;
++}
++
++static int safexcel_hmac_sha3_224_init(struct ahash_request *areq)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ /* Copy (half of) the key */
++ memcpy(req->state, ctx->ipad, SHA3_224_BLOCK_SIZE / 2);
++ /* Start of HMAC should have len == processed == blocksize */
++ req->len = SHA3_224_BLOCK_SIZE;
++ req->processed = SHA3_224_BLOCK_SIZE;
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_224;
++ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
++ req->state_sz = SHA3_224_BLOCK_SIZE / 2;
++ req->digest_sz = SHA3_224_DIGEST_SIZE;
++ req->block_sz = SHA3_224_BLOCK_SIZE;
++ req->hmac = true;
++ ctx->do_fallback = false;
++ ctx->fb_init_done = false;
++ return 0;
++}
++
++static int safexcel_hmac_sha3_224_digest(struct ahash_request *req)
++{
++ if (req->nbytes)
++ return safexcel_hmac_sha3_224_init(req) ?:
++ safexcel_ahash_finup(req);
++
++ /* HW cannot do zero length HMAC, use fallback instead */
++ return safexcel_sha3_digest_fallback(req);
++}
++
++static int safexcel_hmac_sha3_224_cra_init(struct crypto_tfm *tfm)
++{
++ return safexcel_hmac_sha3_cra_init(tfm, "sha3-224");
++}
++
++struct safexcel_alg_template safexcel_alg_hmac_sha3_224 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = SAFEXCEL_ALG_SHA3,
++ .alg.ahash = {
++ .init = safexcel_hmac_sha3_224_init,
++ .update = safexcel_sha3_update,
++ .final = safexcel_sha3_final,
++ .finup = safexcel_sha3_finup,
++ .digest = safexcel_hmac_sha3_224_digest,
++ .setkey = safexcel_hmac_sha3_setkey,
++ .export = safexcel_sha3_export,
++ .import = safexcel_sha3_import,
++ .halg = {
++ .digestsize = SHA3_224_DIGEST_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "hmac(sha3-224)",
++ .cra_driver_name = "safexcel-hmac-sha3-224",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = SHA3_224_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_hmac_sha3_224_cra_init,
++ .cra_exit = safexcel_hmac_sha3_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_hmac_sha3_256_init(struct ahash_request *areq)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ /* Copy (half of) the key */
++ memcpy(req->state, ctx->ipad, SHA3_256_BLOCK_SIZE / 2);
++ /* Start of HMAC should have len == processed == blocksize */
++ req->len = SHA3_256_BLOCK_SIZE;
++ req->processed = SHA3_256_BLOCK_SIZE;
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_256;
++ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
++ req->state_sz = SHA3_256_BLOCK_SIZE / 2;
++ req->digest_sz = SHA3_256_DIGEST_SIZE;
++ req->block_sz = SHA3_256_BLOCK_SIZE;
++ req->hmac = true;
++ ctx->do_fallback = false;
++ ctx->fb_init_done = false;
++ return 0;
++}
++
++static int safexcel_hmac_sha3_256_digest(struct ahash_request *req)
++{
++ if (req->nbytes)
++ return safexcel_hmac_sha3_256_init(req) ?:
++ safexcel_ahash_finup(req);
++
++ /* HW cannot do zero length HMAC, use fallback instead */
++ return safexcel_sha3_digest_fallback(req);
++}
++
++static int safexcel_hmac_sha3_256_cra_init(struct crypto_tfm *tfm)
++{
++ return safexcel_hmac_sha3_cra_init(tfm, "sha3-256");
++}
++
++struct safexcel_alg_template safexcel_alg_hmac_sha3_256 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = SAFEXCEL_ALG_SHA3,
++ .alg.ahash = {
++ .init = safexcel_hmac_sha3_256_init,
++ .update = safexcel_sha3_update,
++ .final = safexcel_sha3_final,
++ .finup = safexcel_sha3_finup,
++ .digest = safexcel_hmac_sha3_256_digest,
++ .setkey = safexcel_hmac_sha3_setkey,
++ .export = safexcel_sha3_export,
++ .import = safexcel_sha3_import,
++ .halg = {
++ .digestsize = SHA3_256_DIGEST_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "hmac(sha3-256)",
++ .cra_driver_name = "safexcel-hmac-sha3-256",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = SHA3_256_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_hmac_sha3_256_cra_init,
++ .cra_exit = safexcel_hmac_sha3_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_hmac_sha3_384_init(struct ahash_request *areq)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ /* Copy (half of) the key */
++ memcpy(req->state, ctx->ipad, SHA3_384_BLOCK_SIZE / 2);
++ /* Start of HMAC should have len == processed == blocksize */
++ req->len = SHA3_384_BLOCK_SIZE;
++ req->processed = SHA3_384_BLOCK_SIZE;
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_384;
++ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
++ req->state_sz = SHA3_384_BLOCK_SIZE / 2;
++ req->digest_sz = SHA3_384_DIGEST_SIZE;
++ req->block_sz = SHA3_384_BLOCK_SIZE;
++ req->hmac = true;
++ ctx->do_fallback = false;
++ ctx->fb_init_done = false;
++ return 0;
++}
++
++static int safexcel_hmac_sha3_384_digest(struct ahash_request *req)
++{
++ if (req->nbytes)
++ return safexcel_hmac_sha3_384_init(req) ?:
++ safexcel_ahash_finup(req);
++
++ /* HW cannot do zero length HMAC, use fallback instead */
++ return safexcel_sha3_digest_fallback(req);
++}
++
++static int safexcel_hmac_sha3_384_cra_init(struct crypto_tfm *tfm)
++{
++ return safexcel_hmac_sha3_cra_init(tfm, "sha3-384");
++}
++
++struct safexcel_alg_template safexcel_alg_hmac_sha3_384 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = SAFEXCEL_ALG_SHA3,
++ .alg.ahash = {
++ .init = safexcel_hmac_sha3_384_init,
++ .update = safexcel_sha3_update,
++ .final = safexcel_sha3_final,
++ .finup = safexcel_sha3_finup,
++ .digest = safexcel_hmac_sha3_384_digest,
++ .setkey = safexcel_hmac_sha3_setkey,
++ .export = safexcel_sha3_export,
++ .import = safexcel_sha3_import,
++ .halg = {
++ .digestsize = SHA3_384_DIGEST_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "hmac(sha3-384)",
++ .cra_driver_name = "safexcel-hmac-sha3-384",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = SHA3_384_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_hmac_sha3_384_cra_init,
++ .cra_exit = safexcel_hmac_sha3_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
++ },
++};
++
++static int safexcel_hmac_sha3_512_init(struct ahash_request *areq)
++{
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
++ struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
++ struct safexcel_ahash_req *req = ahash_request_ctx(areq);
++
++ memset(req, 0, sizeof(*req));
++
++ /* Copy (half of) the key */
++ memcpy(req->state, ctx->ipad, SHA3_512_BLOCK_SIZE / 2);
++ /* Start of HMAC should have len == processed == blocksize */
++ req->len = SHA3_512_BLOCK_SIZE;
++ req->processed = SHA3_512_BLOCK_SIZE;
++ ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA3_512;
++ req->digest = CONTEXT_CONTROL_DIGEST_HMAC;
++ req->state_sz = SHA3_512_BLOCK_SIZE / 2;
++ req->digest_sz = SHA3_512_DIGEST_SIZE;
++ req->block_sz = SHA3_512_BLOCK_SIZE;
++ req->hmac = true;
++ ctx->do_fallback = false;
++ ctx->fb_init_done = false;
++ return 0;
++}
++
++static int safexcel_hmac_sha3_512_digest(struct ahash_request *req)
++{
++ if (req->nbytes)
++ return safexcel_hmac_sha3_512_init(req) ?:
++ safexcel_ahash_finup(req);
++
++ /* HW cannot do zero length HMAC, use fallback instead */
++ return safexcel_sha3_digest_fallback(req);
++}
++
++static int safexcel_hmac_sha3_512_cra_init(struct crypto_tfm *tfm)
++{
++ return safexcel_hmac_sha3_cra_init(tfm, "sha3-512");
++}
++struct safexcel_alg_template safexcel_alg_hmac_sha3_512 = {
++ .type = SAFEXCEL_ALG_TYPE_AHASH,
++ .algo_mask = SAFEXCEL_ALG_SHA3,
++ .alg.ahash = {
++ .init = safexcel_hmac_sha3_512_init,
++ .update = safexcel_sha3_update,
++ .final = safexcel_sha3_final,
++ .finup = safexcel_sha3_finup,
++ .digest = safexcel_hmac_sha3_512_digest,
++ .setkey = safexcel_hmac_sha3_setkey,
++ .export = safexcel_sha3_export,
++ .import = safexcel_sha3_import,
++ .halg = {
++ .digestsize = SHA3_512_DIGEST_SIZE,
++ .statesize = sizeof(struct safexcel_ahash_export_state),
++ .base = {
++ .cra_name = "hmac(sha3-512)",
++ .cra_driver_name = "safexcel-hmac-sha3-512",
++ .cra_priority = SAFEXCEL_CRA_PRIORITY,
++ .cra_flags = CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_blocksize = SHA3_512_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
++ .cra_init = safexcel_hmac_sha3_512_cra_init,
++ .cra_exit = safexcel_hmac_sha3_cra_exit,
++ .cra_module = THIS_MODULE,
++ },
++ },
+ },
+ };
+--- a/drivers/crypto/inside-secure/safexcel_ring.c
++++ b/drivers/crypto/inside-secure/safexcel_ring.c
+@@ -14,7 +14,12 @@ int safexcel_init_ring_descriptors(struc
+ struct safexcel_desc_ring *cdr,
+ struct safexcel_desc_ring *rdr)
+ {
+- cdr->offset = sizeof(u32) * priv->config.cd_offset;
++ int i;
++ struct safexcel_command_desc *cdesc;
++ dma_addr_t atok;
++
++ /* Actual command descriptor ring */
++ cdr->offset = priv->config.cd_offset;
+ cdr->base = dmam_alloc_coherent(priv->dev,
+ cdr->offset * EIP197_DEFAULT_RING_SIZE,
+ &cdr->base_dma, GFP_KERNEL);
+@@ -24,7 +29,34 @@ int safexcel_init_ring_descriptors(struc
+ cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
+ cdr->read = cdr->base;
+
+- rdr->offset = sizeof(u32) * priv->config.rd_offset;
++ /* Command descriptor shadow ring for storing additional token data */
++ cdr->shoffset = priv->config.cdsh_offset;
++ cdr->shbase = dmam_alloc_coherent(priv->dev,
++ cdr->shoffset *
++ EIP197_DEFAULT_RING_SIZE,
++ &cdr->shbase_dma, GFP_KERNEL);
++ if (!cdr->shbase)
++ return -ENOMEM;
++ cdr->shwrite = cdr->shbase;
++ cdr->shbase_end = cdr->shbase + cdr->shoffset *
++ (EIP197_DEFAULT_RING_SIZE - 1);
++
++ /*
++ * Populate command descriptors with physical pointers to shadow descs.
++ * Note that we only need to do this once if we don't overwrite them.
++ */
++ cdesc = cdr->base;
++ atok = cdr->shbase_dma;
++ for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
++ cdesc->atok_lo = lower_32_bits(atok);
++ cdesc->atok_hi = upper_32_bits(atok);
++ cdesc = (void *)cdesc + cdr->offset;
++ atok += cdr->shoffset;
++ }
++
++ rdr->offset = priv->config.rd_offset;
++ /* Use shoffset for result token offset here */
++ rdr->shoffset = priv->config.res_offset;
+ rdr->base = dmam_alloc_coherent(priv->dev,
+ rdr->offset * EIP197_DEFAULT_RING_SIZE,
+ &rdr->base_dma, GFP_KERNEL);
+@@ -42,11 +74,40 @@ inline int safexcel_select_ring(struct s
+ return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
+ }
+
+-static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
+- struct safexcel_desc_ring *ring)
++static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
++ struct safexcel_desc_ring *ring,
++ bool first,
++ struct safexcel_token **atoken)
+ {
+ void *ptr = ring->write;
+
++ if (first)
++ *atoken = ring->shwrite;
++
++ if ((ring->write == ring->read - ring->offset) ||
++ (ring->read == ring->base && ring->write == ring->base_end))
++ return ERR_PTR(-ENOMEM);
++
++ if (ring->write == ring->base_end) {
++ ring->write = ring->base;
++ ring->shwrite = ring->shbase;
++ } else {
++ ring->write += ring->offset;
++ ring->shwrite += ring->shoffset;
++ }
++
++ return ptr;
++}
++
++static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
++ struct safexcel_desc_ring *ring,
++ struct result_data_desc **rtoken)
++{
++ void *ptr = ring->write;
++
++ /* Result token at relative offset shoffset */
++ *rtoken = ring->write + ring->shoffset;
++
+ if ((ring->write == ring->read - ring->offset) ||
+ (ring->read == ring->base && ring->write == ring->base_end))
+ return ERR_PTR(-ENOMEM);
+@@ -106,10 +167,13 @@ void safexcel_ring_rollback_wptr(struct
+ if (ring->write == ring->read)
+ return;
+
+- if (ring->write == ring->base)
++ if (ring->write == ring->base) {
+ ring->write = ring->base_end;
+- else
++ ring->shwrite = ring->shbase_end;
++ } else {
+ ring->write -= ring->offset;
++ ring->shwrite -= ring->shoffset;
++ }
+ }
+
+ struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
+@@ -117,26 +181,26 @@ struct safexcel_command_desc *safexcel_a
+ bool first, bool last,
+ dma_addr_t data, u32 data_len,
+ u32 full_data_len,
+- dma_addr_t context) {
++ dma_addr_t context,
++ struct safexcel_token **atoken)
++{
+ struct safexcel_command_desc *cdesc;
+- int i;
+
+- cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
++ cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
++ first, atoken);
+ if (IS_ERR(cdesc))
+ return cdesc;
+
+- memset(cdesc, 0, sizeof(struct safexcel_command_desc));
+-
+- cdesc->first_seg = first;
+- cdesc->last_seg = last;
+ cdesc->particle_size = data_len;
++ cdesc->rsvd0 = 0;
++ cdesc->last_seg = last;
++ cdesc->first_seg = first;
++ cdesc->additional_cdata_size = 0;
++ cdesc->rsvd1 = 0;
+ cdesc->data_lo = lower_32_bits(data);
+ cdesc->data_hi = upper_32_bits(data);
+
+- if (first && context) {
+- struct safexcel_token *token =
+- (struct safexcel_token *)cdesc->control_data.token;
+-
++ if (first) {
+ /*
+ * Note that the length here MUST be >0 or else the EIP(1)97
+ * may hang. Newer EIP197 firmware actually incorporates this
+@@ -146,20 +210,12 @@ struct safexcel_command_desc *safexcel_a
+ cdesc->control_data.packet_length = full_data_len ?: 1;
+ cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
+ EIP197_OPTION_64BIT_CTX |
+- EIP197_OPTION_CTX_CTRL_IN_CMD;
+- cdesc->control_data.context_lo =
+- (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
++ EIP197_OPTION_CTX_CTRL_IN_CMD |
++ EIP197_OPTION_RC_AUTO;
++ cdesc->control_data.type = EIP197_TYPE_BCLA;
++ cdesc->control_data.context_lo = lower_32_bits(context) |
++ EIP197_CONTEXT_SMALL;
+ cdesc->control_data.context_hi = upper_32_bits(context);
+-
+- if (priv->version == EIP197B_MRVL ||
+- priv->version == EIP197D_MRVL)
+- cdesc->control_data.options |= EIP197_OPTION_RC_AUTO;
+-
+- /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
+- cdesc->control_data.refresh = 2;
+-
+- for (i = 0; i < EIP197_MAX_TOKENS; i++)
+- eip197_noop_token(&token[i]);
+ }
+
+ return cdesc;
+@@ -171,18 +227,27 @@ struct safexcel_result_desc *safexcel_ad
+ dma_addr_t data, u32 len)
+ {
+ struct safexcel_result_desc *rdesc;
++ struct result_data_desc *rtoken;
+
+- rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
++ rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
++ &rtoken);
+ if (IS_ERR(rdesc))
+ return rdesc;
+
+- memset(rdesc, 0, sizeof(struct safexcel_result_desc));
+-
+- rdesc->first_seg = first;
+- rdesc->last_seg = last;
+ rdesc->particle_size = len;
++ rdesc->rsvd0 = 0;
++ rdesc->descriptor_overflow = 0;
++ rdesc->buffer_overflow = 0;
++ rdesc->last_seg = last;
++ rdesc->first_seg = first;
++ rdesc->result_size = EIP197_RD64_RESULT_SIZE;
++ rdesc->rsvd1 = 0;
+ rdesc->data_lo = lower_32_bits(data);
+ rdesc->data_hi = upper_32_bits(data);
+
++ /* Clear length & error code in result token */
++ rtoken->packet_length = 0;
++ rtoken->error_code = 0;
++
+ return rdesc;
+ }
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0501-crypto-add-eip97-inside-secure-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0501-crypto-add-eip97-inside-secure-support.patch
new file mode 100644
index 0000000..3eda4f2
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0501-crypto-add-eip97-inside-secure-support.patch
@@ -0,0 +1,27 @@
+--- a/drivers/crypto/inside-secure/safexcel.c
++++ b/drivers/crypto/inside-secure/safexcel.c
+@@ -595,6 +595,14 @@ static int safexcel_hw_init(struct safex
+ val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
+ writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
+ }
++ /*
++ * Set maximum number of TX commands to 2^4 = 16 for EIP97 HW2.1/HW2.3
++ */
++ else {
++ val = 0;
++ val |= EIP97_MST_CTRL_TX_MAX_CMD(4);
++ writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
++ }
+
+ /* Configure wr/rd cache values */
+ writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
+--- a/drivers/crypto/inside-secure/safexcel.h
++++ b/drivers/crypto/inside-secure/safexcel.h
+@@ -306,6 +306,7 @@
+ #define EIP197_MST_CTRL_RD_CACHE(n) (((n) & 0xf) << 0)
+ #define EIP197_MST_CTRL_WD_CACHE(n) (((n) & 0xf) << 4)
+ #define EIP197_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 20)
++#define EIP97_MST_CTRL_TX_MAX_CMD(n) (((n) & 0xf) << 4)
+ #define EIP197_MST_CTRL_BYTE_SWAP BIT(24)
+ #define EIP197_MST_CTRL_NO_BYTE_SWAP BIT(25)
+ #define EIP197_MST_CTRL_BYTE_SWAP_BITS GENMASK(25, 24)
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0502-dts-mt7623-eip97-inside-secure-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0502-dts-mt7623-eip97-inside-secure-support.patch
new file mode 100644
index 0000000..06077fb
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0502-dts-mt7623-eip97-inside-secure-support.patch
@@ -0,0 +1,23 @@
+--- a/arch/arm/boot/dts/mt7623.dtsi
++++ b/arch/arm/boot/dts/mt7623.dtsi
+@@ -1047,17 +1047,14 @@
+ };
+
+ crypto: crypto@1b240000 {
+- compatible = "mediatek,eip97-crypto";
++ compatible = "inside-secure,safexcel-eip97";
+ reg = <0 0x1b240000 0 0x20000>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 83 IRQ_TYPE_LEVEL_LOW>,
+ <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>,
+- <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>,
+- <GIC_SPI 97 IRQ_TYPE_LEVEL_LOW>;
++ <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "ring0", "ring1", "ring2", "ring3";
+ clocks = <ðsys CLK_ETHSYS_CRYPTO>;
+- clock-names = "cryp";
+- power-domains = <&scpsys MT2701_POWER_DOMAIN_ETH>;
+- status = "disabled";
+ };
+
+ bdpsys: syscon@1c000000 {
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0503-crypto-fix-eip97-cache-incoherent.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0503-crypto-fix-eip97-cache-incoherent.patch
new file mode 100644
index 0000000..5bc0fd0
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0503-crypto-fix-eip97-cache-incoherent.patch
@@ -0,0 +1,26 @@
+--- a/drivers/crypto/inside-secure/safexcel.h
++++ b/drivers/crypto/inside-secure/safexcel.h
+@@ -722,6 +722,9 @@ enum safexcel_eip_version {
+ /* Priority we use for advertising our algorithms */
+ #define SAFEXCEL_CRA_PRIORITY 300
+
++/* System cache line size */
++#define SYSTEM_CACHELINE_SIZE 64
++
+ /* SM3 digest result for zero length message */
+ #define EIP197_SM3_ZEROM_HASH "\x1A\xB2\x1D\x83\x55\xCF\xA1\x7F" \
+ "\x8E\x61\x19\x48\x31\xE8\x1A\x8F" \
+--- a/drivers/crypto/inside-secure/safexcel_hash.c
++++ b/drivers/crypto/inside-secure/safexcel_hash.c
+@@ -57,9 +57,9 @@ struct safexcel_ahash_req {
+ u8 block_sz; /* block size, only set once */
+ u8 digest_sz; /* output digest size, only set once */
+ __le32 state[SHA3_512_BLOCK_SIZE /
+- sizeof(__le32)] __aligned(sizeof(__le32));
++ sizeof(__le32)] __aligned(SYSTEM_CACHELINE_SIZE);
+
+- u64 len;
++ u64 len __aligned(SYSTEM_CACHELINE_SIZE);
+ u64 processed;
+
+ u8 cache[HASH_CACHE_SIZE] __aligned(sizeof(u32));
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0504-macsec-revert-async-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0504-macsec-revert-async-support.patch
new file mode 100644
index 0000000..d52db50
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0504-macsec-revert-async-support.patch
@@ -0,0 +1,12 @@
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -1309,8 +1309,7 @@
+ struct crypto_aead *tfm;
+ int ret;
+
+- /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
+- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
++ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
+
+ if (IS_ERR(tfm))
+ return tfm;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0600-net-phylink-propagate-resolved-link-config-via-mac_l.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0600-net-phylink-propagate-resolved-link-config-via-mac_l.patch
new file mode 100644
index 0000000..a49b921
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0600-net-phylink-propagate-resolved-link-config-via-mac_l.patch
@@ -0,0 +1,230 @@
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Wed, 26 Feb 2020 10:23:41 +0000
+Subject: [PATCH] net: phylink: propagate resolved link config via
+ mac_link_up()
+
+Propagate the resolved link parameters via the mac_link_up() call for
+MACs that do not automatically track their PCS state. We propagate the
+link parameters via function arguments so that inappropriate members
+of struct phylink_link_state can't be accessed, and creating a new
+structure just for this adds needless complexity to the API.
+
+Tested-by: Andre Przywara <andre.przywara@arm.com>
+Tested-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Tested-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+--- a/Documentation/networking/sfp-phylink.rst
++++ b/Documentation/networking/sfp-phylink.rst
+@@ -74,10 +74,13 @@ phylib to the sfp/phylink support. Plea
+ this documentation.
+
+ 1. Optionally split the network driver's phylib update function into
+- three parts dealing with link-down, link-up and reconfiguring the
+- MAC settings. This can be done as a separate preparation commit.
++ two parts dealing with link-down and link-up. This can be done as
++ a separate preparation commit.
+
+- An example of this preparation can be found in git commit fc548b991fb0.
++ An older example of this preparation can be found in git commit
++ fc548b991fb0, although this was splitting into three parts; the
++ link-up part now includes configuring the MAC for the link settings.
++ Please see :c:func:`mac_link_up` for more information on this.
+
+ 2. Replace::
+
+@@ -207,6 +210,14 @@ this documentation.
+ using. This is particularly important for in-band negotiation
+ methods such as 1000base-X and SGMII.
+
++ The :c:func:`mac_link_up` method is used to inform the MAC that the
++ link has come up. The call includes the negotiation mode and interface
++ for reference only. The finalised link parameters are also supplied
++ (speed, duplex and flow control/pause enablement settings) which
++ should be used to configure the MAC when the MAC and PCS are not
++ tightly integrated, or when the settings are not coming from in-band
++ negotiation.
++
+ The :c:func:`mac_config` method is used to update the MAC with the
+ requested state, and must avoid unnecessarily taking the link down
+ when making changes to the MAC configuration. This means the
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3655,9 +3655,11 @@ static void mvneta_mac_link_down(struct
+ mvneta_set_eee(pp, false);
+ }
+
+-static void mvneta_mac_link_up(struct phylink_config *config, unsigned int mode,
+- phy_interface_t interface,
+- struct phy_device *phy)
++static void mvneta_mac_link_up(struct phylink_config *config,
++ struct phy_device *phy,
++ unsigned int mode, phy_interface_t interface,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct mvneta_port *pp = netdev_priv(ndev);
+--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+@@ -58,8 +58,11 @@ static struct {
+ */
+ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state);
+-static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
+- phy_interface_t interface, struct phy_device *phy);
++static void mvpp2_mac_link_up(struct phylink_config *config,
++ struct phy_device *phy,
++ unsigned int mode, phy_interface_t interface,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause);
+
+ /* Queue modes */
+ #define MVPP2_QDIST_SINGLE_MODE 0
+@@ -3468,8 +3471,9 @@ static void mvpp2_start_dev(struct mvpp2
+ .interface = port->phy_interface,
+ };
+ mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
+- mvpp2_mac_link_up(&port->phylink_config, MLO_AN_INBAND,
+- port->phy_interface, NULL);
++ mvpp2_mac_link_up(&port->phylink_config, NULL,
++ MLO_AN_INBAND, port->phy_interface,
++ SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
+ }
+
+ netif_tx_start_all_queues(port->dev);
+@@ -5125,8 +5129,11 @@ static void mvpp2_mac_config(struct phyl
+ mvpp2_port_enable(port);
+ }
+
+-static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
+- phy_interface_t interface, struct phy_device *phy)
++static void mvpp2_mac_link_up(struct phylink_config *config,
++ struct phy_device *phy,
++ unsigned int mode, phy_interface_t interface,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ u32 val;
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -925,8 +925,10 @@ static void stmmac_mac_link_down(struct
+ }
+
+ static void stmmac_mac_link_up(struct phylink_config *config,
++ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+- struct phy_device *phy)
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1501,9 +1501,10 @@ static void axienet_mac_link_down(struct
+ }
+
+ static void axienet_mac_link_up(struct phylink_config *config,
+- unsigned int mode,
+- phy_interface_t interface,
+- struct phy_device *phy)
++ struct phy_device *phy,
++ unsigned int mode, phy_interface_t interface,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ /* nothing meaningful to do */
+ }
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -447,8 +447,11 @@ static void phylink_mac_link_up(struct p
+ struct net_device *ndev = pl->netdev;
+
+ pl->cur_interface = link_state.interface;
+- pl->ops->mac_link_up(pl->config, pl->cur_link_an_mode,
+- pl->cur_interface, pl->phydev);
++ pl->ops->mac_link_up(pl->config, pl->phydev,
++ pl->cur_link_an_mode, pl->cur_interface,
++ link_state.speed, link_state.duplex,
++ !!(link_state.pause & MLO_PAUSE_TX),
++ !!(link_state.pause & MLO_PAUSE_RX));
+
+ if (ndev)
+ netif_carrier_on(ndev);
+--- a/include/linux/phylink.h
++++ b/include/linux/phylink.h
+@@ -91,9 +91,10 @@ struct phylink_mac_ops {
+ void (*mac_an_restart)(struct phylink_config *config);
+ void (*mac_link_down)(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface);
+- void (*mac_link_up)(struct phylink_config *config, unsigned int mode,
+- phy_interface_t interface,
+- struct phy_device *phy);
++ void (*mac_link_up)(struct phylink_config *config,
++ struct phy_device *phy, unsigned int mode,
++ phy_interface_t interface, int speed, int duplex,
++ bool tx_pause, bool rx_pause);
+ };
+
+ #if 0 /* For kernel-doc purposes only. */
+@@ -217,19 +218,34 @@ void mac_link_down(struct phylink_config
+ /**
+ * mac_link_up() - allow the link to come up
+ * @config: a pointer to a &struct phylink_config.
++ * @phy: any attached phy
+ * @mode: link autonegotiation mode
+ * @interface: link &typedef phy_interface_t mode
+- * @phy: any attached phy
++ * @speed: link speed
++ * @duplex: link duplex
++ * @tx_pause: link transmit pause enablement status
++ * @rx_pause: link receive pause enablement status
++ *
++ * Configure the MAC for an established link.
++ *
++ * @speed, @duplex, @tx_pause and @rx_pause indicate the finalised link
++ * settings, and should be used to configure the MAC block appropriately
++ * where these settings are not automatically conveyed from the PCS block,
++ * or if in-band negotiation (as defined by phylink_autoneg_inband(@mode))
++ * is disabled.
++ *
++ * Note that when 802.3z in-band negotiation is in use, it is possible
++ * that the user wishes to override the pause settings, and this should
++ * be allowed when considering the implementation of this method.
+ *
+- * If @mode is not an in-band negotiation mode (as defined by
+- * phylink_autoneg_inband()), allow the link to come up. If @phy
+- * is non-%NULL, configure Energy Efficient Ethernet by calling
++ * If in-band negotiation mode is disabled, allow the link to come up. If
++ * @phy is non-%NULL, configure Energy Efficient Ethernet by calling
+ * phy_init_eee() and perform appropriate MAC configuration for EEE.
+ * Interface type selection must be done in mac_config().
+ */
+-void mac_link_up(struct phylink_config *config, unsigned int mode,
+- phy_interface_t interface,
+- struct phy_device *phy);
++void mac_link_up(struct phylink_config *config, struct phy_device *phy,
++ unsigned int mode, phy_interface_t interface,
++ int speed, int duplex, bool tx_pause, bool rx_pause);
+ #endif
+
+ struct phylink *phylink_create(struct phylink_config *, struct fwnode_handle *,
+--- a/net/dsa/port.c
++++ b/net/dsa/port.c
+@@ -529,9 +529,11 @@ void dsa_port_phylink_mac_link_down(stru
+ EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_link_down);
+
+ void dsa_port_phylink_mac_link_up(struct phylink_config *config,
++ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+- struct phy_device *phydev)
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ struct dsa_port *dp = container_of(config, struct dsa_port, pl_config);
+ struct dsa_switch *ds = dp->ds;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0601-net-dsa-propagate-resolved-link-config-via-mac_link_.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0601-net-dsa-propagate-resolved-link-config-via-mac_link_.patch
new file mode 100644
index 0000000..ee5a52e
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0601-net-dsa-propagate-resolved-link-config-via-mac_link_.patch
@@ -0,0 +1,143 @@
+From: Russell King <rmk+kernel@armlinux.org.uk>
+Date: Wed, 26 Feb 2020 10:23:46 +0000
+Subject: [PATCH] net: dsa: propagate resolved link config via mac_link_up()
+
+Propagate the resolved link configuration down via DSA's
+phylink_mac_link_up() operation to allow split PCS/MAC to work.
+
+Tested-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1284,7 +1284,9 @@ EXPORT_SYMBOL(b53_phylink_mac_link_down)
+ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+- struct phy_device *phydev)
++ struct phy_device *phydev,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ struct b53_device *dev = ds->priv;
+
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -337,7 +337,9 @@ void b53_phylink_mac_link_down(struct ds
+ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+- struct phy_device *phydev);
++ struct phy_device *phydev,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause);
+ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
+ int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan);
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -636,7 +636,9 @@ static void bcm_sf2_sw_mac_link_down(str
+ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+- struct phy_device *phydev)
++ struct phy_device *phydev,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct ethtool_eee *p = &priv->dev->ports[port].eee;
+--- a/drivers/net/dsa/lantiq_gswip.c
++++ b/drivers/net/dsa/lantiq_gswip.c
+@@ -1664,7 +1664,9 @@ static void gswip_phylink_mac_link_down(
+ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+- struct phy_device *phydev)
++ struct phy_device *phydev,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ struct gswip_priv *priv = ds->priv;
+
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -1440,7 +1440,9 @@ static void mt7530_phylink_mac_link_down
+ static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+- struct phy_device *phydev)
++ struct phy_device *phydev,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ struct mt7530_priv *priv = ds->priv;
+
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -652,7 +652,9 @@ static void mv88e6xxx_mac_link_down(stru
+
+ static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+- struct phy_device *phydev)
++ struct phy_device *phydev,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ if (mode == MLO_AN_FIXED)
+ mv88e6xxx_mac_link_force(ds, port, LINK_FORCED_UP);
+--- a/drivers/net/dsa/sja1105/sja1105_main.c
++++ b/drivers/net/dsa/sja1105/sja1105_main.c
+@@ -831,7 +831,9 @@ static void sja1105_mac_link_down(struct
+ static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+- struct phy_device *phydev)
++ struct phy_device *phydev,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause)
+ {
+ sja1105_inhibit_tx(ds->priv, BIT(port), false);
+ }
+--- a/include/net/dsa.h
++++ b/include/net/dsa.h
+@@ -406,7 +406,9 @@ struct dsa_switch_ops {
+ void (*phylink_mac_link_up)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+- struct phy_device *phydev);
++ struct phy_device *phydev,
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause);
+ void (*phylink_fixed_state)(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state);
+ /*
+--- a/net/dsa/port.c
++++ b/net/dsa/port.c
+@@ -544,7 +544,8 @@ void dsa_port_phylink_mac_link_up(struct
+ return;
+ }
+
+- ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev);
++ ds->ops->phylink_mac_link_up(ds, dp->index, mode, interface, phydev,
++ speed, duplex, tx_pause, rx_pause);
+ }
+ EXPORT_SYMBOL_GPL(dsa_port_phylink_mac_link_up);
+
+--- a/net/dsa/dsa_priv.h
++++ b/net/dsa/dsa_priv.h
+@@ -192,9 +192,11 @@ void dsa_port_phylink_mac_link_down(stru
+ unsigned int mode,
+ phy_interface_t interface);
+ void dsa_port_phylink_mac_link_up(struct phylink_config *config,
++ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+- struct phy_device *phydev);
++ int speed, int duplex,
++ bool tx_pause, bool rx_pause);
+ extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
+
+ /* slave.c */
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0602-net-dsa-mt7530-use-resolved-link-config-in-mac_link_.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0602-net-dsa-mt7530-use-resolved-link-config-in-mac_link_.patch
new file mode 100644
index 0000000..23fba85
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0602-net-dsa-mt7530-use-resolved-link-config-in-mac_link_.patch
@@ -0,0 +1,145 @@
+From: =?UTF-8?q?Ren=C3=A9=20van=20Dorst?= <opensource@vdorst.com>
+Date: Fri, 27 Mar 2020 15:44:12 +0100
+Subject: [PATCH] net: dsa: mt7530: use resolved link config in mac_link_up()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Convert the mt7530 switch driver to use the finalised link
+parameters in mac_link_up() rather than the parameters in mac_config().
+
+Signed-off-by: René van Dorst <opensource@vdorst.com>
+Tested-by: Sean Wang <sean.wang@mediatek.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -489,17 +489,6 @@ mt7530_mib_reset(struct dsa_switch *ds)
+ mt7530_write(priv, MT7530_MIB_CCR, CCR_MIB_ACTIVATE);
+ }
+
+-static void
+-mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
+-{
+- u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
+-
+- if (enable)
+- mt7530_set(priv, MT7530_PMCR_P(port), mask);
+- else
+- mt7530_clear(priv, MT7530_PMCR_P(port), mask);
+-}
+-
+ static int mt7530_phy_read(struct dsa_switch *ds, int port, int regnum)
+ {
+ struct mt7530_priv *priv = ds->priv;
+@@ -673,7 +662,7 @@ mt7530_port_enable(struct dsa_switch *ds
+ priv->ports[port].enable = true;
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+ priv->ports[port].pm);
+- mt7530_port_set_status(priv, port, 0);
++ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
+
+ mutex_unlock(&priv->reg_mutex);
+
+@@ -696,7 +685,7 @@ mt7530_port_disable(struct dsa_switch *d
+ priv->ports[port].enable = false;
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+- mt7530_port_set_status(priv, port, 0);
++ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
+
+ mutex_unlock(&priv->reg_mutex);
+ }
+@@ -1395,8 +1384,7 @@ static void mt7530_phylink_mac_config(st
+
+ mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
+ mcr_new = mcr_cur;
+- mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
+- PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
++ mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
+ mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
+ PMCR_BACKPR_EN | PMCR_FORCE_MODE;
+
+@@ -1404,26 +1392,6 @@ static void mt7530_phylink_mac_config(st
+ if (port == 5 && dsa_is_user_port(ds, 5))
+ mcr_new |= PMCR_EXT_PHY;
+
+- switch (state->speed) {
+- case SPEED_1000:
+- mcr_new |= PMCR_FORCE_SPEED_1000;
+- if (priv->eee_enable & BIT(port))
+- mcr_new |= PMCR_FORCE_EEE1G;
+- break;
+- case SPEED_100:
+- mcr_new |= PMCR_FORCE_SPEED_100;
+- if (priv->eee_enable & BIT(port))
+- mcr_new |= PMCR_FORCE_EEE100;
+- break;
+- }
+- if (state->duplex == DUPLEX_FULL) {
+- mcr_new |= PMCR_FORCE_FDX;
+- if (state->pause & MLO_PAUSE_TX)
+- mcr_new |= PMCR_TX_FC_EN;
+- if (state->pause & MLO_PAUSE_RX)
+- mcr_new |= PMCR_RX_FC_EN;
+- }
+-
+ if (mcr_new != mcr_cur)
+ mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
+ }
+@@ -1434,7 +1402,7 @@ static void mt7530_phylink_mac_link_down
+ {
+ struct mt7530_priv *priv = ds->priv;
+
+- mt7530_port_set_status(priv, port, 0);
++ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
+ }
+
+ static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
+@@ -1445,8 +1413,31 @@ static void mt7530_phylink_mac_link_up(s
+ bool tx_pause, bool rx_pause)
+ {
+ struct mt7530_priv *priv = ds->priv;
++ u32 mcr;
++
++ mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
++
++ switch (speed) {
++ case SPEED_1000:
++ mcr |= PMCR_FORCE_SPEED_1000;
++ if (priv->eee_enable & BIT(port))
++ mcr_new |= PMCR_FORCE_EEE1G;
++ break;
++ case SPEED_100:
++ mcr |= PMCR_FORCE_SPEED_100;
++ if (priv->eee_enable & BIT(port))
++ mcr_new |= PMCR_FORCE_EEE100;
++ break;
++ }
++ if (duplex == DUPLEX_FULL) {
++ mcr |= PMCR_FORCE_FDX;
++ if (tx_pause)
++ mcr |= PMCR_TX_FC_EN;
++ if (rx_pause)
++ mcr |= PMCR_RX_FC_EN;
++ }
+
+- mt7530_port_set_status(priv, port, 1);
++ mt7530_set(priv, MT7530_PMCR_P(port), mcr);
+ }
+
+ static void mt7530_phylink_validate(struct dsa_switch *ds, int port,
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -222,6 +222,10 @@ enum mt7530_vlan_port_attr {
+ #define PMCR_FORCE_LNK BIT(0)
+ #define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
+ PMCR_FORCE_SPEED_1000)
++#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
++ PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
++ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
++ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+
+ #define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
+ #define PMSR_EEE1G BIT(7)
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0603-net-dsa-mt7530-Extend-device-data-ready-for-adding-a.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0603-net-dsa-mt7530-Extend-device-data-ready-for-adding-a.patch
new file mode 100644
index 0000000..718ed8e
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0603-net-dsa-mt7530-Extend-device-data-ready-for-adding-a.patch
@@ -0,0 +1,458 @@
+From: Landen Chao <landen.chao@mediatek.com>
+Date: Fri, 4 Sep 2020 22:21:57 +0800
+Subject: [PATCH] net: dsa: mt7530: Extend device data ready for adding a
+ new hardware
+
+Add a structure holding required operations for each device such as device
+initialization, PHY port read or write, a checker whether PHY interface is
+supported on a certain port, MAC port setup for either bus pad or a
+specific PHY interface.
+
+The patch is done for ready adding a new hardware MT7531, and keep the
+same setup logic of existing hardware.
+
+Signed-off-by: Landen Chao <landen.chao@mediatek.com>
+Signed-off-by: Sean Wang <sean.wang@mediatek.com>
+---
+
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -373,8 +373,9 @@ mt7530_fdb_write(struct mt7530_priv *pri
+ mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
+ }
+
++/* Setup TX circuit including relevant PAD and driving */
+ static int
+-mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
++mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
+ {
+ struct mt7530_priv *priv = ds->priv;
+ u32 ncpo1, ssc_delta, trgint, i, xtal;
+@@ -388,7 +389,7 @@ mt7530_pad_clk_setup(struct dsa_switch *
+ return -EINVAL;
+ }
+
+- switch (mode) {
++ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ trgint = 0;
+ /* PLL frequency: 125MHz */
+@@ -410,7 +411,8 @@ mt7530_pad_clk_setup(struct dsa_switch *
+ }
+ break;
+ default:
+- dev_err(priv->dev, "xMII mode %d not supported\n", mode);
++ dev_err(priv->dev, "xMII interface %d not supported\n",
++ interface);
+ return -EINVAL;
+ }
+
+@@ -1332,12 +1334,11 @@ mt7530_setup(struct dsa_switch *ds)
+ return 0;
+ }
+
+-static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
+- unsigned int mode,
+- const struct phylink_link_state *state)
++static bool
++mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
++ const struct phylink_link_state *state)
+ {
+ struct mt7530_priv *priv = ds->priv;
+- u32 mcr_cur, mcr_new;
+
+ switch (port) {
+ case 0: /* Internal phy */
+@@ -1346,33 +1347,114 @@ static void mt7530_phylink_mac_config(st
+ case 3:
+ case 4:
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+- return;
++ goto unsupported;
+ break;
+ case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+- if (priv->p5_interface == state->interface)
+- break;
+ if (!phy_interface_mode_is_rgmii(state->interface) &&
+ state->interface != PHY_INTERFACE_MODE_MII &&
+ state->interface != PHY_INTERFACE_MODE_GMII)
+- return;
++ goto unsupported;
++ break;
++ case 6: /* 1st cpu port */
++ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
++ state->interface != PHY_INTERFACE_MODE_TRGMII)
++ goto unsupported;
++ break;
++ default:
++ dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
++ port);
++ goto unsupported;
++ }
++
++ return true;
++
++unsupported:
++ return false;
++}
++
++static bool
++mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
++ const struct phylink_link_state *state)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ return priv->info->phy_mode_supported(ds, port, state);
++}
++
++static int
++mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ return priv->info->pad_setup(ds, state->interface);
++}
++
++static int
++mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
++ phy_interface_t interface)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ /* Only need to setup port5. */
++ if (port != 5)
++ return 0;
++
++ mt7530_setup_port5(priv->ds, interface);
++
++ return 0;
++}
++
++static int
++mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
++ const struct phylink_link_state *state)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ return priv->info->mac_port_config(ds, port, mode, state->interface);
++}
++
++static void
++mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
++ const struct phylink_link_state *state)
++{
++ struct mt7530_priv *priv = ds->priv;
++ u32 mcr_cur, mcr_new;
++
++ if (!mt753x_phy_mode_supported(ds, port, state))
++ goto unsupported;
++
++ switch (port) {
++ case 0: /* Internal phy */
++ case 1:
++ case 2:
++ case 3:
++ case 4:
++ if (state->interface != PHY_INTERFACE_MODE_GMII)
++ goto unsupported;
++ break;
++ case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
++ if (priv->p5_interface == state->interface)
++ break;
++
++ if (mt753x_mac_config(ds, port, mode, state) < 0)
++ goto unsupported;
+
+- mt7530_setup_port5(ds, state->interface);
+ break;
+ case 6: /* 1st cpu port */
+ if (priv->p6_interface == state->interface)
+ break;
+
+- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+- state->interface != PHY_INTERFACE_MODE_TRGMII)
+- return;
++ mt753x_pad_setup(ds, state);
+
+- /* Setup TX circuit incluing relevant PAD and driving */
+- mt7530_pad_clk_setup(ds, state->interface);
++ if (mt753x_mac_config(ds, port, mode, state) < 0)
++ goto unsupported;
+
+ priv->p6_interface = state->interface;
+ break;
+ default:
+- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
++unsupported:
++ dev_err(ds->dev, "%s: unsupported %s port: %i\n",
++ __func__, phy_modes(state->interface), port);
+ return;
+ }
+
+@@ -1440,61 +1522,44 @@ static void mt7530_phylink_mac_link_up(s
+ mt7530_set(priv, MT7530_PMCR_P(port), mcr);
+ }
+
+-static void mt7530_phylink_validate(struct dsa_switch *ds, int port,
+- unsigned long *supported,
+- struct phylink_link_state *state)
++static void
++mt7530_mac_port_validate(struct dsa_switch *ds, int port,
++ unsigned long *supported)
+ {
++ if (port == 5)
++ phylink_set(supported, 1000baseX_Full);
++}
++
++static void
++mt753x_phylink_validate(struct dsa_switch *ds, int port,
++ unsigned long *supported,
++ struct phylink_link_state *state)
++{
++ struct mt7530_priv *priv = ds->priv;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+- switch (port) {
+- case 0: /* Internal phy */
+- case 1:
+- case 2:
+- case 3:
+- case 4:
+- if (state->interface != PHY_INTERFACE_MODE_NA &&
+- state->interface != PHY_INTERFACE_MODE_GMII)
+- goto unsupported;
+- break;
+- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+- if (state->interface != PHY_INTERFACE_MODE_NA &&
+- !phy_interface_mode_is_rgmii(state->interface) &&
+- state->interface != PHY_INTERFACE_MODE_MII &&
+- state->interface != PHY_INTERFACE_MODE_GMII)
+- goto unsupported;
+- break;
+- case 6: /* 1st cpu port */
+- if (state->interface != PHY_INTERFACE_MODE_NA &&
+- state->interface != PHY_INTERFACE_MODE_RGMII &&
+- state->interface != PHY_INTERFACE_MODE_TRGMII)
+- goto unsupported;
+- break;
+- default:
+- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+-unsupported:
++ if (state->interface != PHY_INTERFACE_MODE_NA &&
++ !mt753x_phy_mode_supported(ds, port, state)) {
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+- phylink_set(mask, Autoneg);
+
+- if (state->interface == PHY_INTERFACE_MODE_TRGMII) {
+- phylink_set(mask, 1000baseT_Full);
+- } else {
++ if (state->interface != PHY_INTERFACE_MODE_TRGMII) {
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+-
+- if (state->interface != PHY_INTERFACE_MODE_MII) {
+- /* This switch only supports 1G full-duplex. */
+- phylink_set(mask, 1000baseT_Full);
+- if (port == 5)
+- phylink_set(mask, 1000baseX_Full);
+- }
++ phylink_set(mask, Autoneg);
+ }
+
++ /* This switch only supports 1G full-duplex. */
++ if (state->interface != PHY_INTERFACE_MODE_MII)
++ phylink_set(mask, 1000baseT_Full);
++
++ priv->info->mac_port_validate(ds, port, mask);
++
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+@@ -1590,12 +1655,45 @@ static int mt7530_set_mac_eee(struct dsa
+ return 0;
+ }
+
++static int
++mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
++ struct phylink_link_state *state)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ return priv->info->mac_port_get_state(ds, port, state);
++}
++
++static int
++mt753x_setup(struct dsa_switch *ds)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ return priv->info->sw_setup(ds);
++}
++
++static int
++mt753x_phy_read(struct dsa_switch *ds, int port, int regnum)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ return priv->info->phy_read(ds, port, regnum);
++}
++
++static int
++mt753x_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ return priv->info->phy_write(ds, port, regnum, val);
++}
++
+ static const struct dsa_switch_ops mt7530_switch_ops = {
+ .get_tag_protocol = mtk_get_tag_protocol,
+- .setup = mt7530_setup,
++ .setup = mt753x_setup,
+ .get_strings = mt7530_get_strings,
+- .phy_read = mt7530_phy_read,
+- .phy_write = mt7530_phy_write,
++ .phy_read = mt753x_phy_read,
++ .phy_write = mt753x_phy_write,
+ .get_ethtool_stats = mt7530_get_ethtool_stats,
+ .get_sset_count = mt7530_get_sset_count,
+ .port_enable = mt7530_port_enable,
+@@ -1612,18 +1710,43 @@ static const struct dsa_switch_ops mt753
+ .port_vlan_del = mt7530_port_vlan_del,
+ .port_mirror_add = mt7530_port_mirror_add,
+ .port_mirror_del = mt7530_port_mirror_del,
+- .phylink_validate = mt7530_phylink_validate,
+- .phylink_mac_link_state = mt7530_phylink_mac_link_state,
+- .phylink_mac_config = mt7530_phylink_mac_config,
++ .phylink_validate = mt753x_phylink_validate,
++ .phylink_mac_link_state = mt753x_phylink_mac_link_state,
++ .phylink_mac_config = mt753x_phylink_mac_config,
+ .phylink_mac_link_down = mt7530_phylink_mac_link_down,
+ .phylink_mac_link_up = mt7530_phylink_mac_link_up,
+ .get_mac_eee = mt7530_get_mac_eee,
+ .set_mac_eee = mt7530_set_mac_eee,
+ };
+
++static const struct mt753x_info mt753x_table[] = {
++ [ID_MT7621] = {
++ .id = ID_MT7621,
++ .sw_setup = mt7530_setup,
++ .phy_read = mt7530_phy_read,
++ .phy_write = mt7530_phy_write,
++ .pad_setup = mt7530_pad_clk_setup,
++ .phy_mode_supported = mt7530_phy_mode_supported,
++ .mac_port_validate = mt7530_mac_port_validate,
++ .mac_port_get_state = mt7530_phylink_mac_link_state,
++ .mac_port_config = mt7530_mac_config,
++ },
++ [ID_MT7530] = {
++ .id = ID_MT7530,
++ .sw_setup = mt7530_setup,
++ .phy_read = mt7530_phy_read,
++ .phy_write = mt7530_phy_write,
++ .pad_setup = mt7530_pad_clk_setup,
++ .phy_mode_supported = mt7530_phy_mode_supported,
++ .mac_port_validate = mt7530_mac_port_validate,
++ .mac_port_get_state = mt7530_phylink_mac_link_state,
++ .mac_port_config = mt7530_mac_config,
++ },
++};
++
+ static const struct of_device_id mt7530_of_match[] = {
+- { .compatible = "mediatek,mt7621", .data = (void *)ID_MT7621, },
+- { .compatible = "mediatek,mt7530", .data = (void *)ID_MT7530, },
++ { .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
++ { .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
+ { /* sentinel */ },
+ };
+ MODULE_DEVICE_TABLE(of, mt7530_of_match);
+@@ -1661,8 +1784,21 @@ mt7530_probe(struct mdio_device *mdiodev
+ /* Get the hardware identifier from the devicetree node.
+ * We will need it for some of the clock and regulator setup.
+ */
+- priv->id = (unsigned int)(unsigned long)
+- of_device_get_match_data(&mdiodev->dev);
++ priv->info = of_device_get_match_data(&mdiodev->dev);
++ if (!priv->info)
++ return -EINVAL;
++
++ /* Sanity check if these required device operations are filled
++ * properly.
++ */
++ if (!priv->info->sw_setup || !priv->info->pad_setup ||
++ !priv->info->phy_read || !priv->info->phy_write ||
++ !priv->info->phy_mode_supported ||
++ !priv->info->mac_port_validate ||
++ !priv->info->mac_port_get_state || !priv->info->mac_port_config)
++ return -EINVAL;
++
++ priv->id = priv->info->id;
+
+ if (priv->id == ID_MT7530) {
+ priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -11,7 +11,7 @@
+ #define MT7530_NUM_FDB_RECORDS 2048
+ #define MT7530_ALL_MEMBERS 0xff
+
+-enum {
++enum mt753x_id {
+ ID_MT7530 = 0,
+ ID_MT7621 = 1,
+ };
+@@ -451,6 +451,40 @@ static const char *p5_intf_modes(unsigne
+ }
+ }
+
++/* struct mt753x_info - This is the main data structure for holding the specific
++ * part for each supported device
++ * @sw_setup: Holding the handler to a device initialization
++ * @phy_read: Holding the way reading PHY port
++ * @phy_write: Holding the way writing PHY port
++ * @pad_setup: Holding the way setting up the bus pad for a certain
++ * MAC port
++ * @phy_mode_supported: Check if the PHY type is being supported on a certain
++ * port
++ * @mac_port_validate: Holding the way to set addition validate type for a
++ * certan MAC port
++ * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain
++ * MAC port
++ * @mac_port_config: Holding the way setting up the PHY attribute to a
++ * certain MAC port
++ */
++struct mt753x_info {
++ enum mt753x_id id;
++
++ int (*sw_setup)(struct dsa_switch *ds);
++ int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
++ int (*phy_write)(struct dsa_switch *ds, int port, int regnum, u16 val);
++ int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
++ bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
++ const struct phylink_link_state *state);
++ void (*mac_port_validate)(struct dsa_switch *ds, int port,
++ unsigned long *supported);
++ int (*mac_port_get_state)(struct dsa_switch *ds, int port,
++ struct phylink_link_state *state);
++ int (*mac_port_config)(struct dsa_switch *ds, int port,
++ unsigned int mode,
++ phy_interface_t interface);
++};
++
+ /* struct mt7530_priv - This is the main data structure for holding the state
+ * of the driver
+ * @dev: The device pointer
+@@ -476,6 +510,7 @@ struct mt7530_priv {
+ struct regulator *core_pwr;
+ struct regulator *io_pwr;
+ struct gpio_desc *reset;
++ const struct mt753x_info *info;
+ unsigned int id;
+ bool mcm;
+ phy_interface_t p6_interface;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0604-net-dsa-mt7530-Add-the-support-of-MT7531-switch.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0604-net-dsa-mt7530-Add-the-support-of-MT7531-switch.patch
new file mode 100644
index 0000000..8ede862
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0604-net-dsa-mt7530-Add-the-support-of-MT7531-switch.patch
@@ -0,0 +1,1510 @@
+From: Landen Chao <landen.chao@mediatek.com>
+Date: Fri, 4 Sep 2020 22:21:59 +0800
+Subject: [PATCH] net: dsa: mt7530: Add the support of MT7531 switch
+
+Add new support for MT7531:
+
+MT7531 is the next generation of MT7530. It is also a 7-ports switch with
+5 giga embedded phys, 2 cpu ports, and the same MAC logic of MT7530. Cpu
+port 6 only supports SGMII interface. Cpu port 5 supports either RGMII
+or SGMII in different HW sku, but cannot be muxed to PHY of port 0/4 like
+mt7530. Due to SGMII interface support, pll, and pad setting are different
+from MT7530. This patch adds different initial setting, and SGMII phylink
+handlers of MT7531.
+
+MT7531 SGMII interface can be configured in following mode:
+- 'SGMII AN mode' with in-band negotiation capability
+ which is compatible with PHY_INTERFACE_MODE_SGMII.
+- 'SGMII force mode' without in-band negotiation
+ which is compatible with 10B/8B encoding of
+ PHY_INTERFACE_MODE_1000BASEX with fixed full-duplex and fixed pause.
+- 2.5 times faster clocked 'SGMII force mode' without in-band negotiation
+ which is compatible with 10B/8B encoding of
+ PHY_INTERFACE_MODE_2500BASEX with fixed full-duplex and fixed pause.
+
+Signed-off-by: Landen Chao <landen.chao@mediatek.com>
+Signed-off-by: Sean Wang <sean.wang@mediatek.com>
+---
+
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -235,6 +235,12 @@ mt7530_write(struct mt7530_priv *priv, u
+ }
+
+ static u32
++_mt7530_unlocked_read(struct mt7530_dummy_poll *p)
++{
++ return mt7530_mii_read(p->priv, p->reg);
++}
++
++static u32
+ _mt7530_read(struct mt7530_dummy_poll *p)
+ {
+ struct mii_bus *bus = p->priv->bus;
+@@ -482,6 +488,108 @@ mt7530_pad_clk_setup(struct dsa_switch *
+ return 0;
+ }
+
++static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
++{
++ u32 val;
++
++ val = mt7530_read(priv, MT7531_TOP_SIG_SR);
++
++ return (val & PAD_DUAL_SGMII_EN) != 0;
++}
++
++static int
++mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
++{
++ struct mt7530_priv *priv = ds->priv;
++ u32 val;
++ u32 top_sig;
++ u32 hwstrap;
++ u32 xtal;
++
++ if (mt7531_dual_sgmii_supported(priv))
++ return 0;
++
++ val = mt7530_read(priv, MT7531_CREV);
++ top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
++ hwstrap = mt7530_read(priv, MT7531_HWTRAP);
++ if ((val & CHIP_REV_M) > 0)
++ xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ :
++ HWTRAP_XTAL_FSEL_25MHZ;
++ else
++ xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK;
++
++ /* Step 1 : Disable MT7531 COREPLL */
++ val = mt7530_read(priv, MT7531_PLLGP_EN);
++ val &= ~EN_COREPLL;
++ mt7530_write(priv, MT7531_PLLGP_EN, val);
++
++ /* Step 2: switch to XTAL output */
++ val = mt7530_read(priv, MT7531_PLLGP_EN);
++ val |= SW_CLKSW;
++ mt7530_write(priv, MT7531_PLLGP_EN, val);
++
++ val = mt7530_read(priv, MT7531_PLLGP_CR0);
++ val &= ~RG_COREPLL_EN;
++ mt7530_write(priv, MT7531_PLLGP_CR0, val);
++
++ /* Step 3: disable PLLGP and enable program PLLGP */
++ val = mt7530_read(priv, MT7531_PLLGP_EN);
++ val |= SW_PLLGP;
++ mt7530_write(priv, MT7531_PLLGP_EN, val);
++
++ /* Step 4: program COREPLL output frequency to 500MHz */
++ val = mt7530_read(priv, MT7531_PLLGP_CR0);
++ val &= ~RG_COREPLL_POSDIV_M;
++ val |= 2 << RG_COREPLL_POSDIV_S;
++ mt7530_write(priv, MT7531_PLLGP_CR0, val);
++ usleep_range(25, 35);
++
++ switch (xtal) {
++ case HWTRAP_XTAL_FSEL_25MHZ:
++ val = mt7530_read(priv, MT7531_PLLGP_CR0);
++ val &= ~RG_COREPLL_SDM_PCW_M;
++ val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
++ mt7530_write(priv, MT7531_PLLGP_CR0, val);
++ break;
++ case HWTRAP_XTAL_FSEL_40MHZ:
++ val = mt7530_read(priv, MT7531_PLLGP_CR0);
++ val &= ~RG_COREPLL_SDM_PCW_M;
++ val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
++ mt7530_write(priv, MT7531_PLLGP_CR0, val);
++ break;
++ };
++
++ /* Set feedback divide ratio update signal to high */
++ val = mt7530_read(priv, MT7531_PLLGP_CR0);
++ val |= RG_COREPLL_SDM_PCW_CHG;
++ mt7530_write(priv, MT7531_PLLGP_CR0, val);
++ /* Wait for at least 16 XTAL clocks */
++ usleep_range(10, 20);
++
++ /* Step 5: set feedback divide ratio update signal to low */
++ val = mt7530_read(priv, MT7531_PLLGP_CR0);
++ val &= ~RG_COREPLL_SDM_PCW_CHG;
++ mt7530_write(priv, MT7531_PLLGP_CR0, val);
++
++ /* Enable 325M clock for SGMII */
++ mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
++
++ /* Enable 250SSC clock for RGMII */
++ mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
++
++ /* Step 6: Enable MT7531 PLL */
++ val = mt7530_read(priv, MT7531_PLLGP_CR0);
++ val |= RG_COREPLL_EN;
++ mt7530_write(priv, MT7531_PLLGP_CR0, val);
++
++ val = mt7530_read(priv, MT7531_PLLGP_EN);
++ val |= EN_COREPLL;
++ mt7530_write(priv, MT7531_PLLGP_EN, val);
++ usleep_range(25, 35);
++
++ return 0;
++}
++
+ static void
+ mt7530_mib_reset(struct dsa_switch *ds)
+ {
+@@ -506,6 +614,217 @@ static int mt7530_phy_write(struct dsa_s
+ return mdiobus_write_nested(priv->bus, port, regnum, val);
+ }
+
++static int
++mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
++ int regnum)
++{
++ struct mii_bus *bus = priv->bus;
++ struct mt7530_dummy_poll p;
++ u32 reg, val;
++ int ret;
++
++ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
++
++ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
++
++ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
++ !(val & MT7531_PHY_ACS_ST), 20, 100000);
++ if (ret < 0) {
++ dev_err(priv->dev, "poll timeout\n");
++ goto out;
++ }
++
++ reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
++ MT7531_MDIO_DEV_ADDR(devad) | regnum;
++ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
++
++ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
++ !(val & MT7531_PHY_ACS_ST), 20, 100000);
++ if (ret < 0) {
++ dev_err(priv->dev, "poll timeout\n");
++ goto out;
++ }
++
++ reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) |
++ MT7531_MDIO_DEV_ADDR(devad);
++ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
++
++ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
++ !(val & MT7531_PHY_ACS_ST), 20, 100000);
++ if (ret < 0) {
++ dev_err(priv->dev, "poll timeout\n");
++ goto out;
++ }
++
++ ret = val & MT7531_MDIO_RW_DATA_MASK;
++out:
++ mutex_unlock(&bus->mdio_lock);
++
++ return ret;
++}
++
++static int
++mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
++ int regnum, u32 data)
++{
++ struct mii_bus *bus = priv->bus;
++ struct mt7530_dummy_poll p;
++ u32 val, reg;
++ int ret;
++
++ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
++
++ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
++
++ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
++ !(val & MT7531_PHY_ACS_ST), 20, 100000);
++ if (ret < 0) {
++ dev_err(priv->dev, "poll timeout\n");
++ goto out;
++ }
++
++ reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
++ MT7531_MDIO_DEV_ADDR(devad) | regnum;
++ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
++
++ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
++ !(val & MT7531_PHY_ACS_ST), 20, 100000);
++ if (ret < 0) {
++ dev_err(priv->dev, "poll timeout\n");
++ goto out;
++ }
++
++ reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) |
++ MT7531_MDIO_DEV_ADDR(devad) | data;
++ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
++
++ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
++ !(val & MT7531_PHY_ACS_ST), 20, 100000);
++ if (ret < 0) {
++ dev_err(priv->dev, "poll timeout\n");
++ goto out;
++ }
++
++out:
++ mutex_unlock(&bus->mdio_lock);
++
++ return ret;
++}
++
++static int
++mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
++{
++ struct mii_bus *bus = priv->bus;
++ struct mt7530_dummy_poll p;
++ int ret;
++ u32 val;
++
++ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
++
++ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
++
++ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
++ !(val & MT7531_PHY_ACS_ST), 20, 100000);
++ if (ret < 0) {
++ dev_err(priv->dev, "poll timeout\n");
++ goto out;
++ }
++
++ val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) |
++ MT7531_MDIO_REG_ADDR(regnum);
++
++ mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST);
++
++ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
++ !(val & MT7531_PHY_ACS_ST), 20, 100000);
++ if (ret < 0) {
++ dev_err(priv->dev, "poll timeout\n");
++ goto out;
++ }
++
++ ret = val & MT7531_MDIO_RW_DATA_MASK;
++out:
++ mutex_unlock(&bus->mdio_lock);
++
++ return ret;
++}
++
++static int
++mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
++ u16 data)
++{
++ struct mii_bus *bus = priv->bus;
++ struct mt7530_dummy_poll p;
++ int ret;
++ u32 reg;
++
++ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
++
++ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
++
++ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
++ !(reg & MT7531_PHY_ACS_ST), 20, 100000);
++ if (ret < 0) {
++ dev_err(priv->dev, "poll timeout\n");
++ goto out;
++ }
++
++ reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) |
++ MT7531_MDIO_REG_ADDR(regnum) | data;
++
++ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
++
++ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
++ !(reg & MT7531_PHY_ACS_ST), 20, 100000);
++ if (ret < 0) {
++ dev_err(priv->dev, "poll timeout\n");
++ goto out;
++ }
++
++out:
++ mutex_unlock(&bus->mdio_lock);
++
++ return ret;
++}
++
++static int
++mt7531_ind_phy_read(struct dsa_switch *ds, int port, int regnum)
++{
++ struct mt7530_priv *priv = ds->priv;
++ int devad;
++ int ret;
++
++ if (regnum & MII_ADDR_C45) {
++ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
++ ret = mt7531_ind_c45_phy_read(priv, port, devad,
++ regnum & MII_REGADDR_C45_MASK);
++ } else {
++ ret = mt7531_ind_c22_phy_read(priv, port, regnum);
++ }
++
++ return ret;
++}
++
++static int
++mt7531_ind_phy_write(struct dsa_switch *ds, int port, int regnum,
++ u16 data)
++{
++ struct mt7530_priv *priv = ds->priv;
++ int devad;
++ int ret;
++
++ if (regnum & MII_ADDR_C45) {
++ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
++ ret = mt7531_ind_c45_phy_write(priv, port, devad,
++ regnum & MII_REGADDR_C45_MASK,
++ data);
++ } else {
++ ret = mt7531_ind_c22_phy_write(priv, port, regnum, data);
++ }
++
++ return ret;
++}
++
+ static void
+ mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
+ uint8_t *data)
+@@ -622,9 +941,14 @@ unlock_exit:
+ }
+
+ static int
+-mt7530_cpu_port_enable(struct mt7530_priv *priv,
+- int port)
++mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
+ {
++ struct mt7530_priv *priv = ds->priv;
++
++ /* Setup max capability of CPU port at first */
++ if (priv->info->cpu_port_config)
++ priv->info->cpu_port_config(ds, port);
++
+ /* Enable Mediatek header mode on the cpu port */
+ mt7530_write(priv, MT7530_PVC_P(port),
+ PORT_SPEC_TAG);
+@@ -637,7 +961,7 @@ mt7530_cpu_port_enable(struct mt7530_pri
+ mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
+
+ /* CPU port gets connected to all user ports of
+- * the switch
++ * the switch.
+ */
+ mt7530_write(priv, MT7530_PCR_P(port),
+ PCR_MATRIX(dsa_user_ports(priv->ds)));
+@@ -1120,27 +1444,42 @@ mt7530_port_vlan_del(struct dsa_switch *
+ return 0;
+ }
+
+-static int mt7530_port_mirror_add(struct dsa_switch *ds, int port,
++static int mt753x_mirror_port_get(unsigned int id, u32 val)
++{
++ return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
++ MIRROR_PORT(val);
++}
++
++static int mt753x_mirror_port_set(unsigned int id, u32 val)
++{
++ return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
++ MIRROR_PORT(val);
++}
++
++static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress)
+ {
+ struct mt7530_priv *priv = ds->priv;
++ int monitor_port;
+ u32 val;
+
+ /* Check for existent entry */
+ if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
+ return -EEXIST;
+
+- val = mt7530_read(priv, MT7530_MFC);
++ val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
+
+ /* MT7530 only supports one monitor port */
+- if (val & MIRROR_EN && MIRROR_PORT(val) != mirror->to_local_port)
++ monitor_port = mt753x_mirror_port_get(priv->id, val);
++ if (val & MT753X_MIRROR_EN(priv->id) &&
++ monitor_port != mirror->to_local_port)
+ return -EEXIST;
+
+- val |= MIRROR_EN;
+- val &= ~MIRROR_MASK;
+- val |= mirror->to_local_port;
+- mt7530_write(priv, MT7530_MFC, val);
++ val |= MT753X_MIRROR_EN(priv->id);
++ val &= ~MT753X_MIRROR_MASK(priv->id);
++ val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port);
++ mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
+
+ val = mt7530_read(priv, MT7530_PCR_P(port));
+ if (ingress) {
+@@ -1155,7 +1494,7 @@ static int mt7530_port_mirror_add(struct
+ return 0;
+ }
+
+-static void mt7530_port_mirror_del(struct dsa_switch *ds, int port,
++static void mt753x_port_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+ {
+ struct mt7530_priv *priv = ds->priv;
+@@ -1172,9 +1511,9 @@ static void mt7530_port_mirror_del(struc
+ mt7530_write(priv, MT7530_PCR_P(port), val);
+
+ if (!priv->mirror_rx && !priv->mirror_tx) {
+- val = mt7530_read(priv, MT7530_MFC);
+- val &= ~MIRROR_EN;
+- mt7530_write(priv, MT7530_MFC, val);
++ val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
++ val &= ~MT753X_MIRROR_EN(priv->id);
++ mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
+ }
+ }
+
+@@ -1280,7 +1619,7 @@ mt7530_setup(struct dsa_switch *ds)
+ PCR_MATRIX_CLR);
+
+ if (dsa_is_cpu_port(ds, i))
+- mt7530_cpu_port_enable(priv, i);
++ mt753x_cpu_port_enable(ds, i);
+ else
+ mt7530_port_disable(ds, i);
+
+@@ -1334,6 +1673,118 @@ mt7530_setup(struct dsa_switch *ds)
+ return 0;
+ }
+
++static int
++mt7531_setup(struct dsa_switch *ds)
++{
++ struct mt7530_priv *priv = ds->priv;
++ struct mt7530_dummy_poll p;
++ u32 val, id;
++ int ret, i;
++
++ /* Reset whole chip through gpio pin or memory-mapped registers for
++ * different type of hardware
++ */
++ if (priv->mcm) {
++ reset_control_assert(priv->rstc);
++ usleep_range(1000, 1100);
++ reset_control_deassert(priv->rstc);
++ } else {
++ gpiod_set_value_cansleep(priv->reset, 0);
++ usleep_range(1000, 1100);
++ gpiod_set_value_cansleep(priv->reset, 1);
++ }
++
++ /* Waiting for MT7530 got to stable */
++ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
++ ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
++ 20, 1000000);
++ if (ret < 0) {
++ dev_err(priv->dev, "reset timeout\n");
++ return ret;
++ }
++
++ id = mt7530_read(priv, MT7531_CREV);
++ id >>= CHIP_NAME_SHIFT;
++
++ if (id != MT7531_ID) {
++ dev_err(priv->dev, "chip %x can't be supported\n", id);
++ return -ENODEV;
++ }
++
++ /* Reset the switch through internal reset */
++ mt7530_write(priv, MT7530_SYS_CTRL,
++ SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
++ SYS_CTRL_REG_RST);
++
++ if (mt7531_dual_sgmii_supported(priv)) {
++ priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
++
++ /* Let ds->slave_mii_bus be able to access external phy. */
++ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
++ MT7531_EXT_P_MDC_11);
++ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
++ MT7531_EXT_P_MDIO_12);
++ } else {
++ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
++ }
++ dev_dbg(ds->dev, "P5 support %s interface\n",
++ p5_intf_modes(priv->p5_intf_sel));
++
++ mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
++ MT7531_GPIO0_INTERRUPT);
++
++ /* Let phylink decide the interface later. */
++ priv->p5_interface = PHY_INTERFACE_MODE_NA;
++ priv->p6_interface = PHY_INTERFACE_MODE_NA;
++
++ /* Enable PHY core PLL, since phy_device has not yet been created
++ * provided for phy_[read,write]_mmd_indirect is called, we provide
++ * our own mt7531_ind_mmd_phy_[read,write] to complete this
++ * function.
++ */
++ val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
++ MDIO_MMD_VEND2, CORE_PLL_GROUP4);
++ val |= MT7531_PHY_PLL_BYPASS_MODE;
++ val &= ~MT7531_PHY_PLL_OFF;
++ mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
++ CORE_PLL_GROUP4, val);
++
++ /* BPDU to CPU port */
++ mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
++ BIT(MT7530_CPU_PORT));
++ mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
++ MT753X_BPDU_CPU_ONLY);
++
++ /* Enable and reset MIB counters */
++ mt7530_mib_reset(ds);
++
++ for (i = 0; i < MT7530_NUM_PORTS; i++) {
++ /* Disable forwarding by default on all ports */
++ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
++ PCR_MATRIX_CLR);
++
++ mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
++
++ if (dsa_is_cpu_port(ds, i))
++ mt753x_cpu_port_enable(ds, i);
++ else
++ mt7530_port_disable(ds, i);
++
++ /* Enable consistent egress tag */
++ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
++ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
++ }
++
++ ds->configure_vlan_while_not_filtering = true;
++
++ /* Flush the FDB table */
++ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
++ if (ret < 0)
++ return ret;
++
++ return 0;
++}
++
+ static bool
+ mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
+@@ -1372,6 +1823,47 @@ unsupported:
+ return false;
+ }
+
++static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
++{
++ return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
++}
++
++static bool
++mt7531_phy_supported(struct dsa_switch *ds, int port,
++ const struct phylink_link_state *state)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ switch (port) {
++ case 0: /* Internal phy */
++ case 1:
++ case 2:
++ case 3:
++ case 4:
++ if (state->interface != PHY_INTERFACE_MODE_GMII)
++ goto unsupported;
++ break;
++ case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
++ if (mt7531_is_rgmii_port(priv, port))
++ return phy_interface_mode_is_rgmii(state->interface);
++ fallthrough;
++ case 6: /* 1st cpu port supports sgmii/8023z only */
++ if (state->interface != PHY_INTERFACE_MODE_SGMII &&
++ !phy_interface_mode_is_8023z(state->interface))
++ goto unsupported;
++ break;
++ default:
++ dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
++ port);
++ goto unsupported;
++ }
++
++ return true;
++
++unsupported:
++ return false;
++}
++
+ static bool
+ mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
+@@ -1404,6 +1896,227 @@ mt7530_mac_config(struct dsa_switch *ds,
+ return 0;
+ }
+
++static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
++ phy_interface_t interface,
++ struct phy_device *phydev)
++{
++ u32 val;
++
++ if (!mt7531_is_rgmii_port(priv, port)) {
++ dev_err(priv->dev, "RGMII mode is not available for port %d\n",
++ port);
++ return -EINVAL;
++ }
++
++ val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
++ val |= GP_CLK_EN;
++ val &= ~GP_MODE_MASK;
++ val |= GP_MODE(MT7531_GP_MODE_RGMII);
++ val &= ~CLK_SKEW_IN_MASK;
++ val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG);
++ val &= ~CLK_SKEW_OUT_MASK;
++ val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG);
++ val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY;
++
++ /* Do not adjust rgmii delay when vendor phy driver presents. */
++ if (!phydev || phy_driver_is_genphy(phydev)) {
++ val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY);
++ switch (interface) {
++ case PHY_INTERFACE_MODE_RGMII:
++ val |= TXCLK_NO_REVERSE;
++ val |= RXCLK_NO_DELAY;
++ break;
++ case PHY_INTERFACE_MODE_RGMII_RXID:
++ val |= TXCLK_NO_REVERSE;
++ break;
++ case PHY_INTERFACE_MODE_RGMII_TXID:
++ val |= RXCLK_NO_DELAY;
++ break;
++ case PHY_INTERFACE_MODE_RGMII_ID:
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++ mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
++
++ return 0;
++}
++
++static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
++ unsigned long *supported)
++{
++ /* Port5 supports ethier RGMII or SGMII.
++ * Port6 supports SGMII only.
++ */
++ switch (port) {
++ case 5:
++ if (mt7531_is_rgmii_port(priv, port))
++ break;
++ fallthrough;
++ case 6:
++ phylink_set(supported, 1000baseX_Full);
++ phylink_set(supported, 2500baseX_Full);
++ phylink_set(supported, 2500baseT_Full);
++ }
++}
++
++static void
++mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
++ unsigned int mode, phy_interface_t interface,
++ int speed, int duplex)
++{
++ struct mt7530_priv *priv = ds->priv;
++ unsigned int val;
++
++ /* For adjusting speed and duplex of SGMII force mode. */
++ if (interface != PHY_INTERFACE_MODE_SGMII ||
++ phylink_autoneg_inband(mode))
++ return;
++
++ /* SGMII force mode setting */
++ val = mt7530_read(priv, MT7531_SGMII_MODE(port));
++ val &= ~MT7531_SGMII_IF_MODE_MASK;
++
++ switch (speed) {
++ case SPEED_10:
++ val |= MT7531_SGMII_FORCE_SPEED_10;
++ break;
++ case SPEED_100:
++ val |= MT7531_SGMII_FORCE_SPEED_100;
++ break;
++ case SPEED_1000:
++ val |= MT7531_SGMII_FORCE_SPEED_1000;
++ break;
++ }
++
++ /* MT7531 SGMII 1G force mode can only work in full duplex mode,
++ * no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
++ */
++ if ((speed == SPEED_10 || speed == SPEED_100) &&
++ duplex != DUPLEX_FULL)
++ val |= MT7531_SGMII_FORCE_HALF_DUPLEX;
++
++ mt7530_write(priv, MT7531_SGMII_MODE(port), val);
++}
++
++static bool mt753x_is_mac_port(u32 port)
++{
++ return (port == 5 || port == 6);
++}
++
++static int mt7531_sgmii_setup_mode_force(struct mt7530_priv *priv, u32 port,
++ phy_interface_t interface)
++{
++ u32 val;
++
++ if (!mt753x_is_mac_port(port))
++ return -EINVAL;
++
++ mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
++ MT7531_SGMII_PHYA_PWD);
++
++ val = mt7530_read(priv, MT7531_PHYA_CTRL_SIGNAL3(port));
++ val &= ~MT7531_RG_TPHY_SPEED_MASK;
++ /* Setup 2.5 times faster clock for 2.5Gbps data speeds with 10B/8B
++ * encoding.
++ */
++ val |= (interface == PHY_INTERFACE_MODE_2500BASEX) ?
++ MT7531_RG_TPHY_SPEED_3_125G : MT7531_RG_TPHY_SPEED_1_25G;
++ mt7530_write(priv, MT7531_PHYA_CTRL_SIGNAL3(port), val);
++
++ mt7530_clear(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
++
++ /* MT7531 SGMII 1G and 2.5G force mode can only work in full duplex
++ * mode, no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
++ */
++ mt7530_rmw(priv, MT7531_SGMII_MODE(port),
++ MT7531_SGMII_IF_MODE_MASK | MT7531_SGMII_REMOTE_FAULT_DIS,
++ MT7531_SGMII_FORCE_SPEED_1000);
++
++ mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
++
++ return 0;
++}
++
++static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
++ phy_interface_t interface)
++{
++ if (!mt753x_is_mac_port(port))
++ return -EINVAL;
++
++ mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
++ MT7531_SGMII_PHYA_PWD);
++
++ mt7530_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
++ MT7531_RG_TPHY_SPEED_MASK, MT7531_RG_TPHY_SPEED_1_25G);
++
++ mt7530_set(priv, MT7531_SGMII_MODE(port),
++ MT7531_SGMII_REMOTE_FAULT_DIS |
++ MT7531_SGMII_SPEED_DUPLEX_AN);
++
++ mt7530_rmw(priv, MT7531_PCS_SPEED_ABILITY(port),
++ MT7531_SGMII_TX_CONFIG_MASK, 1);
++
++ mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
++
++ mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_RESTART);
++
++ mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
++
++ return 0;
++}
++
++static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port)
++{
++ struct mt7530_priv *priv = ds->priv;
++ u32 val;
++
++ /* Only restart AN when AN is enabled */
++ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
++ if (val & MT7531_SGMII_AN_ENABLE) {
++ val |= MT7531_SGMII_AN_RESTART;
++ mt7530_write(priv, MT7531_PCS_CONTROL_1(port), val);
++ }
++}
++
++static int
++mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
++ phy_interface_t interface)
++{
++ struct mt7530_priv *priv = ds->priv;
++ struct phy_device *phydev;
++ const struct dsa_port *dp;
++
++ if (!mt753x_is_mac_port(port)) {
++ dev_err(priv->dev, "port %d is not a MAC port\n", port);
++ return -EINVAL;
++ }
++
++ switch (interface) {
++ case PHY_INTERFACE_MODE_RGMII:
++ case PHY_INTERFACE_MODE_RGMII_ID:
++ case PHY_INTERFACE_MODE_RGMII_RXID:
++ case PHY_INTERFACE_MODE_RGMII_TXID:
++ dp = dsa_to_port(ds, port);
++ phydev = dp->slave->phydev;
++ return mt7531_rgmii_setup(priv, port, interface, phydev);
++ case PHY_INTERFACE_MODE_SGMII:
++ return mt7531_sgmii_setup_mode_an(priv, port, interface);
++ case PHY_INTERFACE_MODE_NA:
++ case PHY_INTERFACE_MODE_1000BASEX:
++ case PHY_INTERFACE_MODE_2500BASEX:
++ if (phylink_autoneg_inband(mode))
++ return -EINVAL;
++
++ return mt7531_sgmii_setup_mode_force(priv, port, interface);
++ default:
++ return -EINVAL;
++ }
++
++ return -EINVAL;
++}
++
+ static int
+ mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+@@ -1439,6 +2152,8 @@ mt753x_phylink_mac_config(struct dsa_swi
+ if (mt753x_mac_config(ds, port, mode, state) < 0)
+ goto unsupported;
+
++ if (priv->p5_intf_sel != P5_DISABLED)
++ priv->p5_interface = state->interface;
+ break;
+ case 6: /* 1st cpu port */
+ if (priv->p6_interface == state->interface)
+@@ -1458,7 +2173,8 @@ unsupported:
+ return;
+ }
+
+- if (phylink_autoneg_inband(mode)) {
++ if (phylink_autoneg_inband(mode) &&
++ state->interface != PHY_INTERFACE_MODE_SGMII) {
+ dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
+ __func__);
+ return;
+@@ -1468,7 +2184,7 @@ unsupported:
+ mcr_new = mcr_cur;
+ mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
+ mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
+- PMCR_BACKPR_EN | PMCR_FORCE_MODE;
++ PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
+
+ /* Are we connected to external phy */
+ if (port == 5 && dsa_is_user_port(ds, 5))
+@@ -1478,7 +2194,18 @@ unsupported:
+ mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
+ }
+
+-static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port,
++static void
++mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ if (!priv->info->mac_pcs_an_restart)
++ return;
++
++ priv->info->mac_pcs_an_restart(ds, port);
++}
++
++static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+ {
+@@ -1487,7 +2214,19 @@ static void mt7530_phylink_mac_link_down
+ mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
+ }
+
+-static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
++static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port,
++ unsigned int mode, phy_interface_t interface,
++ int speed, int duplex)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ if (!priv->info->mac_pcs_link_up)
++ return;
++
++ priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
++}
++
++static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev,
+@@ -1497,18 +2236,29 @@ static void mt7530_phylink_mac_link_up(s
+ struct mt7530_priv *priv = ds->priv;
+ u32 mcr;
+
++ mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
++
+ mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
+
++ /* MT753x MAC works in 1G full duplex mode for all up-clocked
++ * variants.
++ */
++ if (interface == PHY_INTERFACE_MODE_TRGMII ||
++ (phy_interface_mode_is_8023z(interface))) {
++ speed = SPEED_1000;
++ duplex = DUPLEX_FULL;
++ }
++
+ switch (speed) {
+ case SPEED_1000:
+ mcr |= PMCR_FORCE_SPEED_1000;
+ if (priv->eee_enable & BIT(port))
+- mcr_new |= PMCR_FORCE_EEE1G;
++ mcr |= PMCR_FORCE_EEE1G;
+ break;
+ case SPEED_100:
+ mcr |= PMCR_FORCE_SPEED_100;
+ if (priv->eee_enable & BIT(port))
+- mcr_new |= PMCR_FORCE_EEE100;
++ mcr |= PMCR_FORCE_EEE100;
+ break;
+ }
+ if (duplex == DUPLEX_FULL) {
+@@ -1522,6 +2272,45 @@ static void mt7530_phylink_mac_link_up(s
+ mt7530_set(priv, MT7530_PMCR_P(port), mcr);
+ }
+
++static int
++mt7531_cpu_port_config(struct dsa_switch *ds, int port)
++{
++ struct mt7530_priv *priv = ds->priv;
++ phy_interface_t interface;
++ int speed;
++
++ switch (port) {
++ case 5:
++ if (mt7531_is_rgmii_port(priv, port))
++ interface = PHY_INTERFACE_MODE_RGMII;
++ else
++ interface = PHY_INTERFACE_MODE_2500BASEX;
++
++ priv->p5_interface = interface;
++ break;
++ case 6:
++ interface = PHY_INTERFACE_MODE_2500BASEX;
++
++ mt7531_pad_setup(ds, interface);
++
++ priv->p6_interface = interface;
++ break;
++ };
++
++ if (interface == PHY_INTERFACE_MODE_2500BASEX)
++ speed = SPEED_2500;
++ else
++ speed = SPEED_1000;
++
++ mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
++ mt7530_write(priv, MT7530_PMCR_P(port),
++ PMCR_CPU_PORT_SETTING(priv->id));
++ mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
++ speed, DUPLEX_FULL, true, true);
++
++ return 0;
++}
++
+ static void
+ mt7530_mac_port_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported)
+@@ -1530,6 +2319,14 @@ mt7530_mac_port_validate(struct dsa_swit
+ phylink_set(supported, 1000baseX_Full);
+ }
+
++static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
++ unsigned long *supported)
++{
++ struct mt7530_priv *priv = ds->priv;
++
++ mt7531_sgmii_validate(priv, port, supported);
++}
++
+ static void
+ mt753x_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+@@ -1546,7 +2343,8 @@ mt753x_phylink_validate(struct dsa_switc
+
+ phylink_set_port_modes(mask);
+
+- if (state->interface != PHY_INTERFACE_MODE_TRGMII) {
++ if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
++ !phy_interface_mode_is_8023z(state->interface)) {
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+@@ -1565,6 +2363,11 @@ mt753x_phylink_validate(struct dsa_switc
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
++
++ /* We can only operate at 2500BaseX or 1000BaseX. If requested
++ * to advertise both, only report advertising at 2500BaseX.
++ */
++ phylink_helper_basex_speed(state);
+ }
+
+ static int
+@@ -1655,6 +2458,63 @@ static int mt7530_set_mac_eee(struct dsa
+ return 0;
+ }
+
++#ifdef notyet
++static int
++mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
++ struct phylink_link_state *state)
++{
++ u32 status, val;
++ u16 config_reg;
++
++ status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
++ state->link = !!(status & MT7531_SGMII_LINK_STATUS);
++ if (state->interface == PHY_INTERFACE_MODE_SGMII &&
++ (status & MT7531_SGMII_AN_ENABLE)) {
++ val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
++ config_reg = val >> 16;
++
++ switch (config_reg & LPA_SGMII_SPD_MASK) {
++ case LPA_SGMII_1000:
++ state->speed = SPEED_1000;
++ break;
++ case LPA_SGMII_100:
++ state->speed = SPEED_100;
++ break;
++ case LPA_SGMII_10:
++ state->speed = SPEED_10;
++ break;
++ default:
++ dev_err(priv->dev, "invalid sgmii PHY speed\n");
++ state->link = false;
++ return -EINVAL;
++ }
++
++ if (config_reg & LPA_SGMII_FULL_DUPLEX)
++ state->duplex = DUPLEX_FULL;
++ else
++ state->duplex = DUPLEX_HALF;
++ }
++
++ return 0;
++}
++#endif
++
++static int
++mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port,
++ struct phylink_link_state *state)
++{
++#ifdef notyet
++ struct mt7530_priv *priv = ds->priv;
++
++ if (state->interface == PHY_INTERFACE_MODE_SGMII)
++ return mt7531_sgmii_pcs_get_state_an(priv, port, state);
++#else
++ return mt7530_phylink_mac_link_state(ds, port, state);
++#endif
++
++ return -EOPNOTSUPP;
++}
++
+ static int
+ mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+@@ -1708,13 +2568,14 @@ static const struct dsa_switch_ops mt753
+ .port_vlan_prepare = mt7530_port_vlan_prepare,
+ .port_vlan_add = mt7530_port_vlan_add,
+ .port_vlan_del = mt7530_port_vlan_del,
+- .port_mirror_add = mt7530_port_mirror_add,
+- .port_mirror_del = mt7530_port_mirror_del,
++ .port_mirror_add = mt753x_port_mirror_add,
++ .port_mirror_del = mt753x_port_mirror_del,
+ .phylink_validate = mt753x_phylink_validate,
+ .phylink_mac_link_state = mt753x_phylink_mac_link_state,
+ .phylink_mac_config = mt753x_phylink_mac_config,
+- .phylink_mac_link_down = mt7530_phylink_mac_link_down,
+- .phylink_mac_link_up = mt7530_phylink_mac_link_up,
++ .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
++ .phylink_mac_link_down = mt753x_phylink_mac_link_down,
++ .phylink_mac_link_up = mt753x_phylink_mac_link_up,
+ .get_mac_eee = mt7530_get_mac_eee,
+ .set_mac_eee = mt7530_set_mac_eee,
+ };
+@@ -1742,11 +2603,26 @@ static const struct mt753x_info mt753x_t
+ .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_config = mt7530_mac_config,
+ },
++ [ID_MT7531] = {
++ .id = ID_MT7531,
++ .sw_setup = mt7531_setup,
++ .phy_read = mt7531_ind_phy_read,
++ .phy_write = mt7531_ind_phy_write,
++ .pad_setup = mt7531_pad_setup,
++ .cpu_port_config = mt7531_cpu_port_config,
++ .phy_mode_supported = mt7531_phy_supported,
++ .mac_port_validate = mt7531_mac_port_validate,
++ .mac_port_get_state = mt7531_phylink_mac_link_state,
++ .mac_port_config = mt7531_mac_config,
++ .mac_pcs_an_restart = mt7531_sgmii_restart_an,
++ .mac_pcs_link_up = mt7531_sgmii_link_up_force,
++ },
+ };
+
+ static const struct of_device_id mt7530_of_match[] = {
+ { .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
+ { .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
++ { .compatible = "mediatek,mt7531", .data = &mt753x_table[ID_MT7531], },
+ { /* sentinel */ },
+ };
+ MODULE_DEVICE_TABLE(of, mt7530_of_match);
+--- a/drivers/net/dsa/mt7530.h
++++ b/drivers/net/dsa/mt7530.h
+@@ -14,6 +14,7 @@
+ enum mt753x_id {
+ ID_MT7530 = 0,
+ ID_MT7621 = 1,
++ ID_MT7531 = 2,
+ };
+
+ #define NUM_TRGMII_CTRL 5
+@@ -41,6 +42,33 @@ enum mt753x_id {
+ #define MIRROR_PORT(x) ((x) & 0x7)
+ #define MIRROR_MASK 0x7
+
++/* Registers for CPU forward control */
++#define MT7531_CFC 0x4
++#define MT7531_MIRROR_EN BIT(19)
++#define MT7531_MIRROR_MASK (MIRROR_MASK << 16)
++#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
++#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
++#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
++
++#define MT753X_MIRROR_REG(id) (((id) == ID_MT7531) ? \
++ MT7531_CFC : MT7530_MFC)
++#define MT753X_MIRROR_EN(id) (((id) == ID_MT7531) ? \
++ MT7531_MIRROR_EN : MIRROR_EN)
++#define MT753X_MIRROR_MASK(id) (((id) == ID_MT7531) ? \
++ MT7531_MIRROR_MASK : MIRROR_MASK)
++
++/* Registers for BPDU and PAE frame control*/
++#define MT753X_BPC 0x24
++#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
++
++enum mt753x_bpdu_port_fw {
++ MT753X_BPDU_FOLLOW_MFC,
++ MT753X_BPDU_CPU_EXCLUDE = 4,
++ MT753X_BPDU_CPU_INCLUDE = 5,
++ MT753X_BPDU_CPU_ONLY = 6,
++ MT753X_BPDU_DROP = 7,
++};
++
+ /* Registers for address table access */
+ #define MT7530_ATA1 0x74
+ #define STATIC_EMP 0
+@@ -222,10 +250,30 @@ enum mt7530_vlan_port_attr {
+ #define PMCR_FORCE_LNK BIT(0)
+ #define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
+ PMCR_FORCE_SPEED_1000)
++#define MT7531_FORCE_LNK BIT(31)
++#define MT7531_FORCE_SPD BIT(30)
++#define MT7531_FORCE_DPX BIT(29)
++#define MT7531_FORCE_RX_FC BIT(28)
++#define MT7531_FORCE_TX_FC BIT(27)
++#define MT7531_FORCE_MODE (MT7531_FORCE_LNK | \
++ MT7531_FORCE_SPD | \
++ MT7531_FORCE_DPX | \
++ MT7531_FORCE_RX_FC | \
++ MT7531_FORCE_TX_FC)
++#define PMCR_FORCE_MODE_ID(id) (((id) == ID_MT7531) ? \
++ MT7531_FORCE_MODE : \
++ PMCR_FORCE_MODE)
+ #define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
+ PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
+ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
++#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
++ PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
++ PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
++ PMCR_TX_EN | PMCR_RX_EN | \
++ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
++ PMCR_FORCE_SPEED_1000 | \
++ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+
+ #define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
+ #define PMSR_EEE1G BIT(7)
+@@ -245,6 +293,10 @@ enum mt7530_vlan_port_attr {
+ #define LPI_THRESH(x) ((x & 0xFFF) << 4)
+ #define LPI_MODE_EN BIT(0)
+
++/* Register for port debug count */
++#define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100)
++#define MT7531_DIS_CLR BIT(31)
++
+ /* Register for MIB */
+ #define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
+ #define MT7530_MIB_CCR 0x4fe0
+@@ -262,12 +314,118 @@ enum mt7530_vlan_port_attr {
+ CCR_RX_OCT_CNT_BAD | \
+ CCR_TX_OCT_CNT_GOOD | \
+ CCR_TX_OCT_CNT_BAD)
++
++/* MT7531 SGMII register group */
++#define MT7531_SGMII_REG_BASE 0x5000
++#define MT7531_SGMII_REG(p, r) (MT7531_SGMII_REG_BASE + \
++ ((p) - 5) * 0x1000 + (r))
++
++/* Register forSGMII PCS_CONTROL_1 */
++#define MT7531_PCS_CONTROL_1(p) MT7531_SGMII_REG(p, 0x00)
++#define MT7531_SGMII_LINK_STATUS BIT(18)
++#define MT7531_SGMII_AN_ENABLE BIT(12)
++#define MT7531_SGMII_AN_RESTART BIT(9)
++
++/* Register for SGMII PCS_SPPED_ABILITY */
++#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
++#define MT7531_SGMII_TX_CONFIG_MASK GENMASK(15, 0)
++#define MT7531_SGMII_TX_CONFIG BIT(0)
++
++/* Register for SGMII_MODE */
++#define MT7531_SGMII_MODE(p) MT7531_SGMII_REG(p, 0x20)
++#define MT7531_SGMII_REMOTE_FAULT_DIS BIT(8)
++#define MT7531_SGMII_IF_MODE_MASK GENMASK(5, 1)
++#define MT7531_SGMII_FORCE_DUPLEX BIT(4)
++#define MT7531_SGMII_FORCE_SPEED_MASK GENMASK(3, 2)
++#define MT7531_SGMII_FORCE_SPEED_1000 BIT(3)
++#define MT7531_SGMII_FORCE_SPEED_100 BIT(2)
++#define MT7531_SGMII_FORCE_SPEED_10 0
++#define MT7531_SGMII_SPEED_DUPLEX_AN BIT(1)
++
++enum mt7531_sgmii_force_duplex {
++ MT7531_SGMII_FORCE_FULL_DUPLEX = 0,
++ MT7531_SGMII_FORCE_HALF_DUPLEX = 0x10,
++};
++
++/* Fields of QPHY_PWR_STATE_CTRL */
++#define MT7531_QPHY_PWR_STATE_CTRL(p) MT7531_SGMII_REG(p, 0xe8)
++#define MT7531_SGMII_PHYA_PWD BIT(4)
++
++/* Values of SGMII SPEED */
++#define MT7531_PHYA_CTRL_SIGNAL3(p) MT7531_SGMII_REG(p, 0x128)
++#define MT7531_RG_TPHY_SPEED_MASK (BIT(2) | BIT(3))
++#define MT7531_RG_TPHY_SPEED_1_25G 0x0
++#define MT7531_RG_TPHY_SPEED_3_125G BIT(2)
++
+ /* Register for system reset */
+ #define MT7530_SYS_CTRL 0x7000
+ #define SYS_CTRL_PHY_RST BIT(2)
+ #define SYS_CTRL_SW_RST BIT(1)
+ #define SYS_CTRL_REG_RST BIT(0)
+
++/* Register for PHY Indirect Access Control */
++#define MT7531_PHY_IAC 0x701C
++#define MT7531_PHY_ACS_ST BIT(31)
++#define MT7531_MDIO_REG_ADDR_MASK (0x1f << 25)
++#define MT7531_MDIO_PHY_ADDR_MASK (0x1f << 20)
++#define MT7531_MDIO_CMD_MASK (0x3 << 18)
++#define MT7531_MDIO_ST_MASK (0x3 << 16)
++#define MT7531_MDIO_RW_DATA_MASK (0xffff)
++#define MT7531_MDIO_REG_ADDR(x) (((x) & 0x1f) << 25)
++#define MT7531_MDIO_DEV_ADDR(x) (((x) & 0x1f) << 25)
++#define MT7531_MDIO_PHY_ADDR(x) (((x) & 0x1f) << 20)
++#define MT7531_MDIO_CMD(x) (((x) & 0x3) << 18)
++#define MT7531_MDIO_ST(x) (((x) & 0x3) << 16)
++
++enum mt7531_phy_iac_cmd {
++ MT7531_MDIO_ADDR = 0,
++ MT7531_MDIO_WRITE = 1,
++ MT7531_MDIO_READ = 2,
++ MT7531_MDIO_READ_CL45 = 3,
++};
++
++/* MDIO_ST: MDIO start field */
++enum mt7531_mdio_st {
++ MT7531_MDIO_ST_CL45 = 0,
++ MT7531_MDIO_ST_CL22 = 1,
++};
++
++#define MT7531_MDIO_CL22_READ (MT7531_MDIO_ST(MT7531_MDIO_ST_CL22) | \
++ MT7531_MDIO_CMD(MT7531_MDIO_READ))
++#define MT7531_MDIO_CL22_WRITE (MT7531_MDIO_ST(MT7531_MDIO_ST_CL22) | \
++ MT7531_MDIO_CMD(MT7531_MDIO_WRITE))
++#define MT7531_MDIO_CL45_ADDR (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
++ MT7531_MDIO_CMD(MT7531_MDIO_ADDR))
++#define MT7531_MDIO_CL45_READ (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
++ MT7531_MDIO_CMD(MT7531_MDIO_READ))
++#define MT7531_MDIO_CL45_WRITE (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
++ MT7531_MDIO_CMD(MT7531_MDIO_WRITE))
++
++/* Register for RGMII clock phase */
++#define MT7531_CLKGEN_CTRL 0x7500
++#define CLK_SKEW_OUT(x) (((x) & 0x3) << 8)
++#define CLK_SKEW_OUT_MASK GENMASK(9, 8)
++#define CLK_SKEW_IN(x) (((x) & 0x3) << 6)
++#define CLK_SKEW_IN_MASK GENMASK(7, 6)
++#define RXCLK_NO_DELAY BIT(5)
++#define TXCLK_NO_REVERSE BIT(4)
++#define GP_MODE(x) (((x) & 0x3) << 1)
++#define GP_MODE_MASK GENMASK(2, 1)
++#define GP_CLK_EN BIT(0)
++
++enum mt7531_gp_mode {
++ MT7531_GP_MODE_RGMII = 0,
++ MT7531_GP_MODE_MII = 1,
++ MT7531_GP_MODE_REV_MII = 2
++};
++
++enum mt7531_clk_skew {
++ MT7531_CLK_SKEW_NO_CHG = 0,
++ MT7531_CLK_SKEW_DLY_100PPS = 1,
++ MT7531_CLK_SKEW_DLY_200PPS = 2,
++ MT7531_CLK_SKEW_REVERSE = 3,
++};
++
+ /* Register for hw trap status */
+ #define MT7530_HWTRAP 0x7800
+ #define HWTRAP_XTAL_MASK (BIT(10) | BIT(9))
+@@ -275,6 +433,16 @@ enum mt7530_vlan_port_attr {
+ #define HWTRAP_XTAL_40MHZ (BIT(10))
+ #define HWTRAP_XTAL_20MHZ (BIT(9))
+
++#define MT7531_HWTRAP 0x7800
++#define HWTRAP_XTAL_FSEL_MASK BIT(7)
++#define HWTRAP_XTAL_FSEL_25MHZ BIT(7)
++#define HWTRAP_XTAL_FSEL_40MHZ 0
++/* Unique fields of (M)HWSTRAP for MT7531 */
++#define XTAL_FSEL_S 7
++#define XTAL_FSEL_M BIT(7)
++#define PHY_EN BIT(6)
++#define CHG_STRAP BIT(8)
++
+ /* Register for hw trap modification */
+ #define MT7530_MHWTRAP 0x7804
+ #define MHWTRAP_PHY0_SEL BIT(20)
+@@ -289,14 +457,37 @@ enum mt7530_vlan_port_attr {
+ #define MT7530_TOP_SIG_CTRL 0x7808
+ #define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
+
++#define MT7531_TOP_SIG_SR 0x780c
++#define PAD_DUAL_SGMII_EN BIT(1)
++#define PAD_MCM_SMI_EN BIT(0)
++
+ #define MT7530_IO_DRV_CR 0x7810
+ #define P5_IO_CLK_DRV(x) ((x) & 0x3)
+ #define P5_IO_DATA_DRV(x) (((x) & 0x3) << 4)
+
++#define MT7531_CHIP_REV 0x781C
++
++#define MT7531_PLLGP_EN 0x7820
++#define EN_COREPLL BIT(2)
++#define SW_CLKSW BIT(1)
++#define SW_PLLGP BIT(0)
++
+ #define MT7530_P6ECR 0x7830
+ #define P6_INTF_MODE_MASK 0x3
+ #define P6_INTF_MODE(x) ((x) & 0x3)
+
++#define MT7531_PLLGP_CR0 0x78a8
++#define RG_COREPLL_EN BIT(22)
++#define RG_COREPLL_POSDIV_S 23
++#define RG_COREPLL_POSDIV_M 0x3800000
++#define RG_COREPLL_SDM_PCW_S 1
++#define RG_COREPLL_SDM_PCW_M 0x3ffffe
++#define RG_COREPLL_SDM_PCW_CHG BIT(0)
++
++/* Registers for RGMII and SGMII PLL clock */
++#define MT7531_ANA_PLLGP_CR2 0x78b0
++#define MT7531_ANA_PLLGP_CR5 0x78bc
++
+ /* Registers for TRGMII on the both side */
+ #define MT7530_TRGMII_RCK_CTRL 0x7a00
+ #define RX_RST BIT(31)
+@@ -335,10 +526,25 @@ enum mt7530_vlan_port_attr {
+ #define MT7530_P5RGMIITXCR 0x7b04
+ #define CSR_RGMII_TXC_CFG(x) ((x) & 0x1f)
+
++/* Registers for GPIO mode */
++#define MT7531_GPIO_MODE0 0x7c0c
++#define MT7531_GPIO0_MASK GENMASK(3, 0)
++#define MT7531_GPIO0_INTERRUPT 1
++
++#define MT7531_GPIO_MODE1 0x7c10
++#define MT7531_GPIO11_RG_RXD2_MASK GENMASK(15, 12)
++#define MT7531_EXT_P_MDC_11 (2 << 12)
++#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
++#define MT7531_EXT_P_MDIO_12 (2 << 16)
++
+ #define MT7530_CREV 0x7ffc
+ #define CHIP_NAME_SHIFT 16
+ #define MT7530_ID 0x7530
+
++#define MT7531_CREV 0x781C
++#define CHIP_REV_M 0x0f
++#define MT7531_ID 0x7531
++
+ /* Registers for core PLL access through mmd indirect */
+ #define CORE_PLL_GROUP2 0x401
+ #define RG_SYSPLL_EN_NORMAL BIT(15)
+@@ -355,6 +561,10 @@ enum mt7530_vlan_port_attr {
+ #define RG_SYSPLL_DDSFBK_EN BIT(12)
+ #define RG_SYSPLL_BIAS_EN BIT(11)
+ #define RG_SYSPLL_BIAS_LPF_EN BIT(10)
++#define MT7531_PHY_PLL_OFF BIT(5)
++#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
++
++#define MT753X_CTRL_PHY_ADDR 0
+
+ #define CORE_PLL_GROUP5 0x404
+ #define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff)
+@@ -433,6 +643,7 @@ enum p5_interface_select {
+ P5_INTF_SEL_PHY_P0,
+ P5_INTF_SEL_PHY_P4,
+ P5_INTF_SEL_GMAC5,
++ P5_INTF_SEL_GMAC5_SGMII,
+ };
+
+ static const char *p5_intf_modes(unsigned int p5_interface)
+@@ -446,6 +657,8 @@ static const char *p5_intf_modes(unsigne
+ return "PHY P4";
+ case P5_INTF_SEL_GMAC5:
+ return "GMAC5";
++ case P5_INTF_SEL_GMAC5_SGMII:
++ return "GMAC5_SGMII";
+ default:
+ return "unknown";
+ }
+@@ -466,6 +679,10 @@ static const char *p5_intf_modes(unsigne
+ * MAC port
+ * @mac_port_config: Holding the way setting up the PHY attribute to a
+ * certain MAC port
++ * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a
++ * certain MAC port
++ * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs
++ * of the certain MAC port
+ */
+ struct mt753x_info {
+ enum mt753x_id id;
+@@ -474,6 +691,7 @@ struct mt753x_info {
+ int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
+ int (*phy_write)(struct dsa_switch *ds, int port, int regnum, u16 val);
+ int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
++ int (*cpu_port_config)(struct dsa_switch *ds, int port);
+ bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state);
+ void (*mac_port_validate)(struct dsa_switch *ds, int port,
+@@ -483,6 +701,10 @@ struct mt753x_info {
+ int (*mac_port_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
++ void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port);
++ void (*mac_pcs_link_up)(struct dsa_switch *ds, int port,
++ unsigned int mode, phy_interface_t interface,
++ int speed, int duplex);
+ };
+
+ /* struct mt7530_priv - This is the main data structure for holding the state
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0605-arm64-dts-mt7622-add-mt7531-dsa-to-bananapi-bpi-r64-board.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0605-arm64-dts-mt7622-add-mt7531-dsa-to-bananapi-bpi-r64-board.patch
new file mode 100644
index 0000000..8c3fe52
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0605-arm64-dts-mt7622-add-mt7531-dsa-to-bananapi-bpi-r64-board.patch
@@ -0,0 +1,71 @@
+From: Landen Chao <landen.chao@mediatek.com>
+Subject: [PATCH net-next 6/6] arm64: dts: mt7622: add mt7531 dsa to
+ bananapi-bpi-r64 board
+Date: Tue, 10 Dec 2019 16:14:42 +0800
+
+Add mt7531 dsa to bananapi-bpi-r64 board for 5 giga Ethernet ports support.
+
+Signed-off-by: Landen Chao <landen.chao@mediatek.com>
+---
+ .../dts/mediatek/mt7622-bananapi-bpi-r64.dts | 50 +++++++++++++++++++
+ 1 file changed, 50 insertions(+)
+
+--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+@@ -150,6 +150,56 @@
+ mdio: mdio-bus {
+ #address-cells = <1>;
+ #size-cells = <0>;
++
++ switch@0 {
++ compatible = "mediatek,mt7531";
++ reg = <0>;
++ reset-gpios = <&pio 54 0>;
++
++ ports {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ port@0 {
++ reg = <0>;
++ label = "wan";
++ };
++
++ port@1 {
++ reg = <1>;
++ label = "lan0";
++ };
++
++ port@2 {
++ reg = <2>;
++ label = "lan1";
++ };
++
++ port@3 {
++ reg = <3>;
++ label = "lan2";
++ };
++
++ port@4 {
++ reg = <4>;
++ label = "lan3";
++ };
++
++ port@6 {
++ reg = <6>;
++ label = "cpu";
++ ethernet = <&gmac0>;
++ phy-mode = "2500base-x";
++
++ fixed-link {
++ speed = <2500>;
++ full-duplex;
++ pause;
++ };
++ };
++ };
++ };
++
+ };
+ };
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0666-add-spimem-support-to-mtk-spi.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0666-add-spimem-support-to-mtk-spi.patch
new file mode 100644
index 0000000..d50aa25
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0666-add-spimem-support-to-mtk-spi.patch
@@ -0,0 +1,636 @@
+From 675b477b2a50b2fb97f35944756f89644bf70092 Mon Sep 17 00:00:00 2001
+From: Qii Wang <qii.wang@mediatek.com>
+Date: Tue, 5 Jan 2021 16:48:39 +0800
+Subject: [PATCH] spi: mediatek: support IPM Design
+
+[Description]
+1. support sigle mode;
+2. support dual/quad mode with spi-mem framework.
+
+Signed-off-by: Leilk Liu <leilk.liu@mediatek.com>
+Reviewed-by: Qii Wang <qii.wang@mediatek.com>
+---
+ drivers/spi/spi-mt65xx.c | 395 +++++++++++++++++++++--
+ include/linux/platform_data/spi-mt65xx.h | 2 +-
+ 2 files changed, 370 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index 8acf24f7c..9183c64e4 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -17,6 +17,7 @@
+ #include <linux/platform_data/spi-mt65xx.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/spi/spi.h>
++#include <linux/spi/spi-mem.h>
+ #include <linux/dma-mapping.h>
+
+ #define SPI_CFG0_REG 0x0000
+@@ -31,6 +32,7 @@
+ #define SPI_CFG2_REG 0x0028
+ #define SPI_TX_SRC_REG_64 0x002c
+ #define SPI_RX_DST_REG_64 0x0030
++#define SPI_CFG3_IPM_REG 0x0040
+
+ #define SPI_CFG0_SCK_HIGH_OFFSET 0
+ #define SPI_CFG0_SCK_LOW_OFFSET 8
+@@ -42,13 +44,15 @@
+ #define SPI_CFG1_CS_IDLE_OFFSET 0
+ #define SPI_CFG1_PACKET_LOOP_OFFSET 8
+ #define SPI_CFG1_PACKET_LENGTH_OFFSET 16
+-#define SPI_CFG1_GET_TICK_DLY_OFFSET 30
++#define SPI_CFG1_GET_TICKDLY_OFFSET 29
+
++#define SPI_CFG1_GET_TICKDLY_MASK GENMASK(31, 29)
+ #define SPI_CFG1_CS_IDLE_MASK 0xff
+ #define SPI_CFG1_PACKET_LOOP_MASK 0xff00
+ #define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
++#define SPI_CFG1_IPM_PACKET_LENGTH_MASK GENMASK(31, 16)
+ #define SPI_CFG2_SCK_HIGH_OFFSET 0
+-#define SPI_CFG2_SCK_LOW_OFFSET 16
++#define SPI_CFG2_SCK_LOW_OFFSET 16
+
+ #define SPI_CMD_ACT BIT(0)
+ #define SPI_CMD_RESUME BIT(1)
+@@ -67,6 +71,25 @@
+ #define SPI_CMD_TX_ENDIAN BIT(15)
+ #define SPI_CMD_FINISH_IE BIT(16)
+ #define SPI_CMD_PAUSE_IE BIT(17)
++#define SPI_CMD_IPM_NONIDLE_MODE BIT(19)
++#define SPI_CMD_IPM_SPIM_LOOP BIT(21)
++#define SPI_CMD_IPM_GET_TICKDLY_OFFSET 22
++
++#define SPI_CMD_IPM_GET_TICKDLY_MASK GENMASK(24, 22)
++
++#define PIN_MODE_CFG(x) ((x) / 2)
++
++#define SPI_CFG3_IPM_PIN_MODE_OFFSET 0
++#define SPI_CFG3_IPM_HALF_DUPLEX_DIR BIT(2)
++#define SPI_CFG3_IPM_HALF_DUPLEX_EN BIT(3)
++#define SPI_CFG3_IPM_XMODE_EN BIT(4)
++#define SPI_CFG3_IPM_NODATA_FLAG BIT(5)
++#define SPI_CFG3_IPM_CMD_BYTELEN_OFFSET 8
++#define SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET 12
++
++#define SPI_CFG3_IPM_CMD_PIN_MODE_MASK GENMASK(1, 0)
++#define SPI_CFG3_IPM_CMD_BYTELEN_MASK GENMASK(11, 8)
++#define SPI_CFG3_IPM_ADDR_BYTELEN_MASK GENMASK(15, 12)
+
+ #define MT8173_SPI_MAX_PAD_SEL 3
+
+@@ -77,6 +100,9 @@
+
+ #define MTK_SPI_MAX_FIFO_SIZE 32U
+ #define MTK_SPI_PACKET_SIZE 1024
++#define MTK_SPI_IPM_PACKET_SIZE SZ_64K
++#define MTK_SPI_IPM_PACKET_LOOP SZ_256
++
+ #define MTK_SPI_32BITS_MASK (0xffffffff)
+
+ #define DMA_ADDR_EXT_BITS (36)
+@@ -90,6 +116,9 @@ struct mtk_spi_compatible {
+ bool enhance_timing;
+ /* some IC support DMA addr extension */
+ bool dma_ext;
++ /* the IPM IP design improve some feature, and support dual/quad mode */
++ bool ipm_design;
++ bool support_quad;
+ };
+
+ struct mtk_spi {
+@@ -104,6 +133,12 @@ struct mtk_spi {
+ struct scatterlist *tx_sgl, *rx_sgl;
+ u32 tx_sgl_len, rx_sgl_len;
+ const struct mtk_spi_compatible *dev_comp;
++
++ struct completion spimem_done;
++ bool use_spimem;
++ struct device *dev;
++ dma_addr_t tx_dma;
++ dma_addr_t rx_dma;
+ };
+
+ static const struct mtk_spi_compatible mtk_common_compat;
+@@ -112,6 +147,14 @@ static const struct mtk_spi_compatible mt2712_compat = {
+ .must_tx = true,
+ };
+
++static const struct mtk_spi_compatible ipm_compat = {
++ .must_tx = true,
++ .enhance_timing = true,
++ .dma_ext = true,
++ .ipm_design = true,
++ .support_quad = true,
++};
++
+ static const struct mtk_spi_compatible mt6765_compat = {
+ .need_pad_sel = true,
+ .must_tx = true,
+@@ -140,11 +183,14 @@ static const struct mtk_spi_compatible mt8183_compat = {
+ * supplies it.
+ */
+ static const struct mtk_chip_config mtk_default_chip_info = {
+- .cs_pol = 0,
+ .sample_sel = 0,
++ .get_tick_dly = 0,
+ };
+
+ static const struct of_device_id mtk_spi_of_match[] = {
++ { .compatible = "mediatek,ipm-spi",
++ .data = (void *)&ipm_compat,
++ },
+ { .compatible = "mediatek,mt2701-spi",
+ .data = (void *)&mtk_common_compat,
+ },
+@@ -190,19 +236,48 @@ static void mtk_spi_reset(struct mtk_spi *mdata)
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+ }
+
+-static int mtk_spi_prepare_message(struct spi_master *master,
+- struct spi_message *msg)
++static int mtk_spi_hw_init(struct spi_master *master,
++ struct spi_device *spi)
+ {
+ u16 cpha, cpol;
+ u32 reg_val;
+- struct spi_device *spi = msg->spi;
+ struct mtk_chip_config *chip_config = spi->controller_data;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ cpha = spi->mode & SPI_CPHA ? 1 : 0;
+ cpol = spi->mode & SPI_CPOL ? 1 : 0;
+
++ if (mdata->dev_comp->enhance_timing) {
++ if (mdata->dev_comp->ipm_design) {
++ /* CFG3 reg only used for spi-mem,
++ * here write to default value
++ */
++ writel(0x0, mdata->base + SPI_CFG3_IPM_REG);
++
++ reg_val = readl(mdata->base + SPI_CMD_REG);
++ reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
++ reg_val |= chip_config->get_tick_dly
++ << SPI_CMD_IPM_GET_TICKDLY_OFFSET;
++ writel(reg_val, mdata->base + SPI_CMD_REG);
++ } else {
++ reg_val = readl(mdata->base + SPI_CFG1_REG);
++ reg_val &= ~SPI_CFG1_GET_TICKDLY_MASK;
++ reg_val |= chip_config->get_tick_dly
++ << SPI_CFG1_GET_TICKDLY_OFFSET;
++ writel(reg_val, mdata->base + SPI_CFG1_REG);
++ }
++ }
++
+ reg_val = readl(mdata->base + SPI_CMD_REG);
++ if (mdata->dev_comp->ipm_design) {
++ /* SPI transfer without idle time until packet length done */
++ reg_val |= SPI_CMD_IPM_NONIDLE_MODE;
++ if (spi->mode & SPI_LOOP)
++ reg_val |= SPI_CMD_IPM_SPIM_LOOP;
++ else
++ reg_val &= ~SPI_CMD_IPM_SPIM_LOOP;
++ }
++
+ if (cpha)
+ reg_val |= SPI_CMD_CPHA;
+ else
+@@ -231,10 +306,12 @@ static int mtk_spi_prepare_message(struct spi_master *master,
+ #endif
+
+ if (mdata->dev_comp->enhance_timing) {
+- if (chip_config->cs_pol)
++ /* set CS polarity */
++ if (spi->mode & SPI_CS_HIGH)
+ reg_val |= SPI_CMD_CS_POL;
+ else
+ reg_val &= ~SPI_CMD_CS_POL;
++
+ if (chip_config->sample_sel)
+ reg_val |= SPI_CMD_SAMPLE_SEL;
+ else
+@@ -260,11 +337,20 @@ static int mtk_spi_prepare_message(struct spi_master *master,
+ return 0;
+ }
+
++static int mtk_spi_prepare_message(struct spi_master *master,
++ struct spi_message *msg)
++{
++ return mtk_spi_hw_init(master, msg->spi);
++}
++
+ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
+ {
+ u32 reg_val;
+ struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+
++ if (spi->mode & SPI_CS_HIGH)
++ enable = !enable;
++
+ reg_val = readl(mdata->base + SPI_CMD_REG);
+ if (!enable) {
+ reg_val |= SPI_CMD_PAUSE_EN;
+@@ -278,14 +364,14 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
+ }
+
+ static void mtk_spi_prepare_transfer(struct spi_master *master,
+- struct spi_transfer *xfer)
++ u32 speed_hz)
+ {
+ u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ spi_clk_hz = clk_get_rate(mdata->spi_clk);
+- if (xfer->speed_hz < spi_clk_hz / 2)
+- div = DIV_ROUND_UP(spi_clk_hz, xfer->speed_hz);
++ if (speed_hz < spi_clk_hz / 2)
++ div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
+ else
+ div = 1;
+
+@@ -323,12 +409,24 @@ static void mtk_spi_setup_packet(struct spi_master *master)
+ u32 packet_size, packet_loop, reg_val;
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+- packet_size = min_t(u32, mdata->xfer_len, MTK_SPI_PACKET_SIZE);
++ if (mdata->dev_comp->ipm_design)
++ packet_size = min_t(u32,
++ mdata->xfer_len,
++ MTK_SPI_IPM_PACKET_SIZE);
++ else
++ packet_size = min_t(u32,
++ mdata->xfer_len,
++ MTK_SPI_PACKET_SIZE);
++
+ packet_loop = mdata->xfer_len / packet_size;
+
+ reg_val = readl(mdata->base + SPI_CFG1_REG);
+- reg_val &= ~(SPI_CFG1_PACKET_LENGTH_MASK | SPI_CFG1_PACKET_LOOP_MASK);
++ if (mdata->dev_comp->ipm_design)
++ reg_val &= ~SPI_CFG1_IPM_PACKET_LENGTH_MASK;
++ else
++ reg_val &= ~SPI_CFG1_PACKET_LENGTH_MASK;
+ reg_val |= (packet_size - 1) << SPI_CFG1_PACKET_LENGTH_OFFSET;
++ reg_val &= ~SPI_CFG1_PACKET_LOOP_MASK;
+ reg_val |= (packet_loop - 1) << SPI_CFG1_PACKET_LOOP_OFFSET;
+ writel(reg_val, mdata->base + SPI_CFG1_REG);
+ }
+@@ -423,7 +521,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
+ mdata->cur_transfer = xfer;
+ mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
+ mdata->num_xfered = 0;
+- mtk_spi_prepare_transfer(master, xfer);
++ mtk_spi_prepare_transfer(master, xfer->speed_hz);
+ mtk_spi_setup_packet(master);
+
+ cnt = xfer->len / 4;
+@@ -455,7 +553,7 @@ static int mtk_spi_dma_transfer(struct spi_master *master,
+ mdata->cur_transfer = xfer;
+ mdata->num_xfered = 0;
+
+- mtk_spi_prepare_transfer(master, xfer);
++ mtk_spi_prepare_transfer(master, xfer->speed_hz);
+
+ cmd = readl(mdata->base + SPI_CMD_REG);
+ if (xfer->tx_buf)
+@@ -532,6 +630,13 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+ else
+ mdata->state = MTK_SPI_IDLE;
+
++ /* SPI-MEM ops */
++ if (mdata->use_spimem) {
++ complete(&mdata->spimem_done);
++
++ return IRQ_HANDLED;
++ }
++
+ if (!master->can_dma(master, NULL, trans)) {
+ if (trans->rx_buf) {
+ cnt = mdata->xfer_len / 4;
+@@ -615,12 +720,241 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
+ return IRQ_HANDLED;
+ }
+
++static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
++ const struct spi_mem_op *op)
++{
++ if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
++ op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
++ return false;
++
++ if (op->addr.nbytes && op->dummy.nbytes &&
++ op->addr.buswidth != op->dummy.buswidth)
++ return false;
++
++ if (op->addr.nbytes + op->dummy.nbytes > 16)
++ return false;
++
++ if (op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
++ if (op->data.nbytes / MTK_SPI_IPM_PACKET_SIZE >
++ MTK_SPI_IPM_PACKET_LOOP ||
++ op->data.nbytes % MTK_SPI_IPM_PACKET_SIZE != 0)
++ return false;
++ }
++
++ if (op->data.dir == SPI_MEM_DATA_IN &&
++ !IS_ALIGNED((size_t)op->data.buf.in, 4))
++ return false;
++
++ return true;
++}
++
++static void mtk_spi_mem_setup_dma_xfer(struct spi_master *master,
++ const struct spi_mem_op *op)
++{
++ struct mtk_spi *mdata = spi_master_get_devdata(master);
++
++ writel((u32)(mdata->tx_dma & MTK_SPI_32BITS_MASK),
++ mdata->base + SPI_TX_SRC_REG);
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++ if (mdata->dev_comp->dma_ext)
++ writel((u32)(mdata->tx_dma >> 32),
++ mdata->base + SPI_TX_SRC_REG_64);
++#endif
++
++ if (op->data.dir == SPI_MEM_DATA_IN) {
++ writel((u32)(mdata->rx_dma & MTK_SPI_32BITS_MASK),
++ mdata->base + SPI_RX_DST_REG);
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++ if (mdata->dev_comp->dma_ext)
++ writel((u32)(mdata->rx_dma >> 32),
++ mdata->base + SPI_RX_DST_REG_64);
++#endif
++ }
++}
++
++static int mtk_spi_transfer_wait(struct spi_mem *mem,
++ const struct spi_mem_op *op)
++{
++ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
++ unsigned long long ms = 1;
++
++ if (op->data.dir == SPI_MEM_NO_DATA)
++ ms = 8LL * 1000LL * 32;
++ else
++ ms = 8LL * 1000LL * op->data.nbytes;
++ do_div(ms, mem->spi->max_speed_hz);
++ ms += ms + 1000; /* 1s tolerance */
++
++ if (ms > UINT_MAX)
++ ms = UINT_MAX;
++
++ if (!wait_for_completion_timeout(&mdata->spimem_done,
++ msecs_to_jiffies(ms))) {
++ dev_err(mdata->dev, "spi-mem transfer timeout\n");
++ return -ETIMEDOUT;
++ }
++
++ return 0;
++}
++
++static int mtk_spi_mem_exec_op(struct spi_mem *mem,
++ const struct spi_mem_op *op)
++{
++ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
++ u32 reg_val, nio = 1, tx_size;
++ char *tx_tmp_buf;
++ int ret = 0;
++
++ mdata->use_spimem = true;
++ reinit_completion(&mdata->spimem_done);
++
++ mtk_spi_reset(mdata);
++ mtk_spi_hw_init(mem->spi->master, mem->spi);
++ mtk_spi_prepare_transfer(mem->spi->master, mem->spi->max_speed_hz);
++
++ reg_val = readl(mdata->base + SPI_CFG3_IPM_REG);
++ /* opcode byte len */
++ reg_val &= ~SPI_CFG3_IPM_CMD_BYTELEN_MASK;
++ reg_val |= 1 << SPI_CFG3_IPM_CMD_BYTELEN_OFFSET;
++
++ /* addr & dummy byte len */
++ reg_val &= ~SPI_CFG3_IPM_ADDR_BYTELEN_MASK;
++ if (op->addr.nbytes || op->dummy.nbytes)
++ reg_val |= (op->addr.nbytes + op->dummy.nbytes) <<
++ SPI_CFG3_IPM_ADDR_BYTELEN_OFFSET;
++
++ /* data byte len */
++ if (op->data.dir == SPI_MEM_NO_DATA) {
++ reg_val |= SPI_CFG3_IPM_NODATA_FLAG;
++ writel(0, mdata->base + SPI_CFG1_REG);
++ } else {
++ reg_val &= ~SPI_CFG3_IPM_NODATA_FLAG;
++ mdata->xfer_len = op->data.nbytes;
++ mtk_spi_setup_packet(mem->spi->master);
++ }
++
++ if (op->addr.nbytes || op->dummy.nbytes) {
++ if (op->addr.buswidth == 1 || op->dummy.buswidth == 1)
++ reg_val |= SPI_CFG3_IPM_XMODE_EN;
++ else
++ reg_val &= ~SPI_CFG3_IPM_XMODE_EN;
++ }
++
++ if (op->addr.buswidth == 2 ||
++ op->dummy.buswidth == 2 ||
++ op->data.buswidth == 2)
++ nio = 2;
++ else if (op->addr.buswidth == 4 ||
++ op->dummy.buswidth == 4 ||
++ op->data.buswidth == 4)
++ nio = 4;
++
++ reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
++ reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
++
++ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
++ if (op->data.dir == SPI_MEM_DATA_IN)
++ reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
++ else
++ reg_val &= ~SPI_CFG3_IPM_HALF_DUPLEX_DIR;
++ writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
++
++ tx_size = 1 + op->addr.nbytes + op->dummy.nbytes;
++ if (op->data.dir == SPI_MEM_DATA_OUT)
++ tx_size += op->data.nbytes;
++
++ tx_size = max(tx_size, (u32)32);
++
++ tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
++ if (!tx_tmp_buf)
++ return -ENOMEM;
++
++ tx_tmp_buf[0] = op->cmd.opcode;
++
++ if (op->addr.nbytes) {
++ int i;
++
++ for (i = 0; i < op->addr.nbytes; i++)
++ tx_tmp_buf[i + 1] = op->addr.val >>
++ (8 * (op->addr.nbytes - i - 1));
++ }
++
++ if (op->dummy.nbytes)
++ memset(tx_tmp_buf + op->addr.nbytes + 1,
++ 0xff,
++ op->dummy.nbytes);
++
++ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
++ memcpy(tx_tmp_buf + op->dummy.nbytes + op->addr.nbytes + 1,
++ op->data.buf.out,
++ op->data.nbytes);
++
++ mdata->tx_dma = dma_map_single(mdata->dev, tx_tmp_buf,
++ tx_size, DMA_TO_DEVICE);
++ if (dma_mapping_error(mdata->dev, mdata->tx_dma)) {
++ ret = -ENOMEM;
++ goto err_exit;
++ }
++
++ if (op->data.dir == SPI_MEM_DATA_IN) {
++ mdata->rx_dma = dma_map_single(mdata->dev,
++ op->data.buf.in,
++ op->data.nbytes,
++ DMA_FROM_DEVICE);
++ if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
++ ret = -ENOMEM;
++ goto unmap_tx_dma;
++ }
++ }
++
++ reg_val = readl(mdata->base + SPI_CMD_REG);
++ reg_val |= SPI_CMD_TX_DMA;
++ if (op->data.dir == SPI_MEM_DATA_IN)
++ reg_val |= SPI_CMD_RX_DMA;
++ writel(reg_val, mdata->base + SPI_CMD_REG);
++
++ mtk_spi_mem_setup_dma_xfer(mem->spi->master, op);
++
++ mtk_spi_enable_transfer(mem->spi->master);
++
++ /* Wait for the interrupt. */
++ ret = mtk_spi_transfer_wait(mem, op);
++ if (ret)
++ goto unmap_rx_dma;
++
++ /* spi disable dma */
++ reg_val = readl(mdata->base + SPI_CMD_REG);
++ reg_val &= ~SPI_CMD_TX_DMA;
++ if (op->data.dir == SPI_MEM_DATA_IN)
++ reg_val &= ~SPI_CMD_RX_DMA;
++ writel(reg_val, mdata->base + SPI_CMD_REG);
++
++ if (op->data.dir == SPI_MEM_DATA_IN)
++ dma_unmap_single(mdata->dev, mdata->rx_dma,
++ op->data.nbytes, DMA_FROM_DEVICE);
++unmap_rx_dma:
++ dma_unmap_single(mdata->dev, mdata->rx_dma,
++ op->data.nbytes, DMA_FROM_DEVICE);
++unmap_tx_dma:
++ dma_unmap_single(mdata->dev, mdata->tx_dma,
++ tx_size, DMA_TO_DEVICE);
++err_exit:
++ kfree(tx_tmp_buf);
++ mdata->use_spimem = false;
++
++ return ret;
++}
++
++static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
++ .supports_op = mtk_spi_mem_supports_op,
++ .exec_op = mtk_spi_mem_exec_op,
++};
++
+ static int mtk_spi_probe(struct platform_device *pdev)
+ {
+ struct spi_master *master;
+ struct mtk_spi *mdata;
+ const struct of_device_id *of_id;
+- struct resource *res;
+ int i, irq, ret, addr_bits;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
+@@ -629,7 +963,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ }
+
+- master->auto_runtime_pm = true;
++// master->auto_runtime_pm = true;
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
+@@ -648,9 +982,25 @@ static int mtk_spi_probe(struct platform_device *pdev)
+
+ mdata = spi_master_get_devdata(master);
+ mdata->dev_comp = of_id->data;
++
++ if (mdata->dev_comp->enhance_timing)
++ master->mode_bits |= SPI_CS_HIGH;
++
+ if (mdata->dev_comp->must_tx)
+ master->flags = SPI_MASTER_MUST_TX;
+
++ if (mdata->dev_comp->ipm_design)
++ master->mode_bits |= SPI_LOOP;
++
++ if (mdata->dev_comp->support_quad) {
++ master->mem_ops = &mtk_spi_mem_ops;
++ master->mode_bits |= SPI_RX_DUAL | SPI_TX_DUAL |
++ SPI_RX_QUAD | SPI_TX_QUAD;
++
++ mdata->dev = &pdev->dev;
++ init_completion(&mdata->spimem_done);
++ }
++
+ if (mdata->dev_comp->need_pad_sel) {
+ mdata->pad_num = of_property_count_u32_elems(
+ pdev->dev.of_node,
+@@ -683,15 +1033,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ }
+
+ platform_set_drvdata(pdev, master);
+-
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- if (!res) {
+- ret = -ENODEV;
+- dev_err(&pdev->dev, "failed to determine base address\n");
+- goto err_put_master;
+- }
+-
+- mdata->base = devm_ioremap_resource(&pdev->dev, res);
++ mdata->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdata->base)) {
+ ret = PTR_ERR(mdata->base);
+ goto err_put_master;
+@@ -713,6 +1055,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ goto err_put_master;
+ }
+
++/*
+ mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
+ if (IS_ERR(mdata->parent_clk)) {
+ ret = PTR_ERR(mdata->parent_clk);
+@@ -750,7 +1093,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ clk_disable_unprepare(mdata->spi_clk);
+
+ pm_runtime_enable(&pdev->dev);
+-
++*/
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
+diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
+index f0e6d6483..fae9bc15c 100644
+--- a/include/linux/platform_data/spi-mt65xx.h
++++ b/include/linux/platform_data/spi-mt65xx.h
+@@ -11,7 +11,7 @@
+
+ /* Board specific platform_data */
+ struct mtk_chip_config {
+- u32 cs_pol;
+ u32 sample_sel;
++ u32 get_tick_dly;
+ };
+ #endif
+--
+2.17.1
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0666-spi-mtk-nor-fix-timeout-calculation-overflow.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0666-spi-mtk-nor-fix-timeout-calculation-overflow.patch
new file mode 100644
index 0000000..86b2089
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0666-spi-mtk-nor-fix-timeout-calculation-overflow.patch
@@ -0,0 +1,179 @@
+From patchwork Tue Sep 22 11:49:02 2020
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Chuanhong Guo <gch981213@gmail.com>
+X-Patchwork-Id: 11792387
+Return-Path:
+ <SRS0=i66O=C7=lists.infradead.org=linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@kernel.org>
+Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org
+ [172.30.200.123])
+ by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 21EB0618
+ for <patchwork-linux-arm@patchwork.kernel.org>;
+ Tue, 22 Sep 2020 11:51:33 +0000 (UTC)
+Received: from merlin.infradead.org (merlin.infradead.org [205.233.59.134])
+ (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
+ (No client certificate requested)
+ by mail.kernel.org (Postfix) with ESMTPS id E15FF221EB
+ for <patchwork-linux-arm@patchwork.kernel.org>;
+ Tue, 22 Sep 2020 11:51:32 +0000 (UTC)
+Authentication-Results: mail.kernel.org;
+ dkim=pass (2048-bit key) header.d=lists.infradead.org
+ header.i=@lists.infradead.org header.b="KBg/skkC";
+ dkim=fail reason="signature verification failed" (2048-bit key)
+ header.d=gmail.com header.i=@gmail.com header.b="Gtqp4rrT"
+DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org E15FF221EB
+Authentication-Results: mail.kernel.org;
+ dmarc=fail (p=none dis=none) header.from=gmail.com
+Authentication-Results: mail.kernel.org;
+ spf=none
+ smtp.mailfrom=linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@lists.infradead.org
+DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
+ d=lists.infradead.org; s=merlin.20170209; h=Sender:Content-Transfer-Encoding:
+ Content-Type:Cc:List-Subscribe:List-Help:List-Post:List-Archive:
+ List-Unsubscribe:List-Id:MIME-Version:Message-Id:Date:Subject:To:From:
+ Reply-To:Content-ID:Content-Description:Resent-Date:Resent-From:Resent-Sender
+ :Resent-To:Resent-Cc:Resent-Message-ID:In-Reply-To:References:List-Owner;
+ bh=Xg61WV47qNPjINdHDPnF6T3q8GN8f9evwhTMdYR0Zqs=; b=KBg/skkCvnF7/8AlleTay0p/H2
+ hC4Lzo+slWhX5/eepUEXzhTr5ORf4Dx9gD65UEuordKQKFpg6Y9ApoGaYtmBJ0vABdAZt+oVG4sFf
+ K3z3CYV6EZ5qvwsZt53Xm3YsHojgu+Lnc/MGgGWBRjCtTP7gshm480pZ0w6ADgHvrym5hNajUF6+5
+ zMm5Wwq34jxUApGU7k5FAPsvO5ctYCuhECq/mLB6tplCVh3/+XLdSiHMUlY17fh+xs732kgaDotuQ
+ QYgXtDmMB1pVKCq5cf3Bcuz7Ww47vLSx4rBxtdB/vpp2w9SdrU6K8Q7DuJ3+XrGfbMhKtBU5ektA8
+ GxEUUaKw==;
+Received: from localhost ([::1] helo=merlin.infradead.org)
+ by merlin.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1kKgo2-0000Ze-Fb; Tue, 22 Sep 2020 11:50:00 +0000
+Received: from mail-pg1-x543.google.com ([2607:f8b0:4864:20::543])
+ by merlin.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1kKgnr-0000Vv-6z; Tue, 22 Sep 2020 11:49:49 +0000
+Received: by mail-pg1-x543.google.com with SMTP id o25so6798387pgm.0;
+ Tue, 22 Sep 2020 04:49:46 -0700 (PDT)
+DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025;
+ h=from:to:cc:subject:date:message-id:mime-version
+ :content-transfer-encoding;
+ bh=EJwpKrbgqo/Jc/SWHvyAGB9CrpkZ5L1Hzq9tInFHTYk=;
+ b=Gtqp4rrTgM1+bYxfUQXe+lfPcgHRW6GccdN42Iszl6ozMbezvftl1BUcKE22S6eFW3
+ Vs+lcKZN9Eh9C53YAAd0cuZYhJ2GqlfGNLA/9SyB7s/gIwHqO9Cuu17YpB9dAFfEUxoS
+ 825uUcTeRe6BTagZAh2/MBluiMY3TszRi94MbOftxUg+wSqp0wMAPe9RN0gAEc/l2xgK
+ 8PhXbZv3uItI4QqoKYiz93vrF/zYhj+oGTI44g2li2fpAgCNL7lXCpSE2C9NsEe+YqTw
+ aO5A3W8t4jvp8oCJEvr/MWY1ZZLd1fVJ17W3aGXoDi/7EUcAvX9G5Ee7U68UXGMtty/d
+ z5Nw==
+X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed;
+ d=1e100.net; s=20161025;
+ h=x-gm-message-state:from:to:cc:subject:date:message-id:mime-version
+ :content-transfer-encoding;
+ bh=EJwpKrbgqo/Jc/SWHvyAGB9CrpkZ5L1Hzq9tInFHTYk=;
+ b=XhcpP16zYyJr/qCT9JbO3fn8RyfI44xJL3hvgNrlcr4ljkEZ4TF6OfyhjdEZYeeA3C
+ kLlWuAqrSn6mweuhS2LZ0BV5QL/YYaVO4wP4B/y3j+tNbnW3JNM0NtEY19pOtaM4vYK/
+ tPuNxld5RvJWxQ9BLs8hH6y7j/ob6oDug170P5YkwK6Wa/FLCi2bw92/vldhdnFP/Nny
+ 1bbiWRVls1Ra/Q3z90tGViMkBdlcff6MI9DR5M6a1HTQN7kN9rLDCMGs3r9XVComY07N
+ ECbrZbL+iJwuRuT43RAUxE72X/Pn0WYD20unzITf8bta92usNDRgEuxc1bLyL+uHxgUk
+ YQKA==
+X-Gm-Message-State: AOAM531Xr1Bg4uwupCAPpH4eBWVrXGALjIWa+5AVNZ8w6ltS4BGgWv6b
+ e4g6ycKnUp/KalpJhOMi90o=
+X-Google-Smtp-Source:
+ ABdhPJx36OliaaLkiX3ZeZNNWgd/qSKiRor2X0eeHScDrjMSi5bTiEzAfX5j7hkQgqz8ZUT0qqLRNA==
+X-Received: by 2002:a63:1863:: with SMTP id 35mr3131307pgy.413.1600775385014;
+ Tue, 22 Sep 2020 04:49:45 -0700 (PDT)
+Received: from guoguo-omen.lan ([156.96.148.94])
+ by smtp.gmail.com with ESMTPSA id r4sm2223750pjf.4.2020.09.22.04.49.42
+ (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256);
+ Tue, 22 Sep 2020 04:49:44 -0700 (PDT)
+From: Chuanhong Guo <gch981213@gmail.com>
+To: linux-spi@vger.kernel.org
+Subject: [PATCH v2] spi: spi-mtk-nor: fix timeout calculation overflow
+Date: Tue, 22 Sep 2020 19:49:02 +0800
+Message-Id: <20200922114905.2942859-1-gch981213@gmail.com>
+X-Mailer: git-send-email 2.26.2
+MIME-Version: 1.0
+X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3
+X-CRM114-CacheID: sfid-20200922_074948_345420_69207EBE
+X-CRM114-Status: GOOD ( 12.60 )
+X-Spam-Score: 2.6 (++)
+X-Spam-Report: SpamAssassin version 3.4.4 on merlin.infradead.org summary:
+ Content analysis details: (2.6 points)
+ pts rule name description
+ ---- ----------------------
+ --------------------------------------------------
+ 2.6 RCVD_IN_SBL RBL: Received via a relay in Spamhaus SBL
+ [156.96.148.94 listed in zen.spamhaus.org]
+ -0.0 RCVD_IN_DNSWL_NONE RBL: Sender listed at https://www.dnswl.org/,
+ no trust [2607:f8b0:4864:20:0:0:0:543 listed in]
+ [list.dnswl.org]
+ 0.0 FREEMAIL_FROM Sender email is commonly abused enduser mail
+ provider [gch981213[at]gmail.com]
+ 0.2 FREEMAIL_ENVFROM_END_DIGIT Envelope-from freemail username ends
+ in digit [gch981213[at]gmail.com]
+ -0.0 SPF_PASS SPF: sender matches SPF record
+ 0.0 SPF_HELO_NONE SPF: HELO does not publish an SPF Record
+ -0.1 DKIM_VALID Message has at least one valid DKIM or DK signature
+ -0.1 DKIM_VALID_AU Message has a valid DKIM or DK signature from
+ author's domain
+ -0.1 DKIM_VALID_EF Message has a valid DKIM or DK signature from
+ envelope-from domain
+ 0.1 DKIM_SIGNED Message has a DKIM or DK signature,
+ not necessarily
+ valid
+X-BeenThere: linux-arm-kernel@lists.infradead.org
+X-Mailman-Version: 2.1.29
+Precedence: list
+List-Id: <linux-arm-kernel.lists.infradead.org>
+List-Unsubscribe:
+ <http://lists.infradead.org/mailman/options/linux-arm-kernel>,
+ <mailto:linux-arm-kernel-request@lists.infradead.org?subject=unsubscribe>
+List-Archive: <http://lists.infradead.org/pipermail/linux-arm-kernel/>
+List-Post: <mailto:linux-arm-kernel@lists.infradead.org>
+List-Help: <mailto:linux-arm-kernel-request@lists.infradead.org?subject=help>
+List-Subscribe:
+ <http://lists.infradead.org/mailman/listinfo/linux-arm-kernel>,
+ <mailto:linux-arm-kernel-request@lists.infradead.org?subject=subscribe>
+Cc: linux-kernel@vger.kernel.org, stable@vger.kernel.org,
+ Mark Brown <broonie@kernel.org>, linux-mediatek@lists.infradead.org,
+ bayi.cheng@mediatek.com, Matthias Brugger <matthias.bgg@gmail.com>,
+ Chuanhong Guo <gch981213@gmail.com>, linux-arm-kernel@lists.infradead.org
+Sender: "linux-arm-kernel" <linux-arm-kernel-bounces@lists.infradead.org>
+Errors-To:
+ linux-arm-kernel-bounces+patchwork-linux-arm=patchwork.kernel.org@lists.infradead.org
+
+CLK_TO_US macro is used to calculate potential transfer time for various
+timeout handling. However it overflows on transfer bigger than 512 bytes
+because it first did (len * 8 * 1000000).
+This controller typically operates at 45MHz. This patch did 2 things:
+1. calculate clock / 1000000 first
+2. add a 4M transfer size cap so that the final timeout in DMA reading
+ doesn't overflow
+
+Fixes: 881d1ee9fe81f ("spi: add support for mediatek spi-nor controller")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Chuanhong Guo <gch981213@gmail.com>
+---
+
+Change since v1: fix transfer size cap to 4M
+
+ drivers/spi/spi-mtk-nor.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
+index 6e6ca2b8e6c82..62f5ff2779884 100644
+--- a/drivers/spi/spi-mtk-nor.c
++++ b/drivers/spi/spi-mtk-nor.c
+@@ -89,7 +89,7 @@
+ // Buffered page program can do one 128-byte transfer
+ #define MTK_NOR_PP_SIZE 128
+
+-#define CLK_TO_US(sp, clkcnt) ((clkcnt) * 1000000 / sp->spi_freq)
++#define CLK_TO_US(sp, clkcnt) DIV_ROUND_UP(clkcnt, sp->spi_freq / 1000000)
+
+ struct mtk_nor {
+ struct spi_controller *ctlr;
+@@ -177,6 +177,10 @@ static int mtk_nor_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+ if ((op->addr.nbytes == 3) || (op->addr.nbytes == 4)) {
+ if ((op->data.dir == SPI_MEM_DATA_IN) &&
+ mtk_nor_match_read(op)) {
++ // limit size to prevent timeout calculation overflow
++ if (op->data.nbytes > 0x400000)
++ op->data.nbytes = 0x400000;
++
+ if ((op->addr.val & MTK_NOR_DMA_ALIGN_MASK) ||
+ (op->data.nbytes < MTK_NOR_DMA_ALIGN))
+ op->data.nbytes = 1;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0667-spi-mediatek-fix-timeout-for-large-data.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0667-spi-mediatek-fix-timeout-for-large-data.patch
new file mode 100644
index 0000000..a04f5d6
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0667-spi-mediatek-fix-timeout-for-large-data.patch
@@ -0,0 +1,34 @@
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -720,6 +720,23 @@ static irqreturn_t mtk_spi_interrupt(int
+ return IRQ_HANDLED;
+ }
+
++static int mtk_spi_mem_adjust_op_size(struct spi_mem *mem,
++ struct spi_mem_op *op)
++{
++ int opcode_len;
++
++ if(!op->data.nbytes)
++ return 0;
++
++ if (op->data.dir != SPI_MEM_NO_DATA) {
++ opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
++ if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE)
++ op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE -opcode_len;
++ }
++
++ return 0;
++}
++
+ static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+ {
+@@ -946,6 +963,7 @@ err_exit:
+ }
+
+ static const struct spi_controller_mem_ops mtk_spi_mem_ops = {
++ .adjust_op_size = mtk_spi_mem_adjust_op_size,
+ .supports_op = mtk_spi_mem_supports_op,
+ .exec_op = mtk_spi_mem_exec_op,
+ };
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0668-spi-mediatek-fix-dma-unmap-twice.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0668-spi-mediatek-fix-dma-unmap-twice.patch
new file mode 100644
index 0000000..31562bf
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0668-spi-mediatek-fix-dma-unmap-twice.patch
@@ -0,0 +1,16 @@
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -946,12 +946,10 @@ static int mtk_spi_mem_exec_op(struct sp
+ reg_val &= ~SPI_CMD_RX_DMA;
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
++unmap_rx_dma:
+ if (op->data.dir == SPI_MEM_DATA_IN)
+ dma_unmap_single(mdata->dev, mdata->rx_dma,
+ op->data.nbytes, DMA_FROM_DEVICE);
+-unmap_rx_dma:
+- dma_unmap_single(mdata->dev, mdata->rx_dma,
+- op->data.nbytes, DMA_FROM_DEVICE);
+ unmap_tx_dma:
+ dma_unmap_single(mdata->dev, mdata->tx_dma,
+ tx_size, DMA_TO_DEVICE);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0669-fix-SPIM-NAND-and-NOR-probing.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0669-fix-SPIM-NAND-and-NOR-probing.patch
new file mode 100644
index 0000000..582771b
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0669-fix-SPIM-NAND-and-NOR-probing.patch
@@ -0,0 +1,33 @@
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -1073,7 +1073,7 @@ static int mtk_spi_probe(struct platform
+ goto err_put_master;
+ }
+
+-/*
++
+ mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
+ if (IS_ERR(mdata->parent_clk)) {
+ ret = PTR_ERR(mdata->parent_clk);
+@@ -1101,17 +1101,17 @@ static int mtk_spi_probe(struct platform
+ goto err_put_master;
+ }
+
+- ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
++ /*ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
+ clk_disable_unprepare(mdata->spi_clk);
+ goto err_put_master;
+ }
+
+- clk_disable_unprepare(mdata->spi_clk);
++ clk_disable_unprepare(mdata->sel_clk);*/
++
++ //pm_runtime_enable(&pdev->dev);
+
+- pm_runtime_enable(&pdev->dev);
+-*/
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0670-fix-SPIM-dma-buffer-not-aligned.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0670-fix-SPIM-dma-buffer-not-aligned.patch
new file mode 100644
index 0000000..d4534e7
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0670-fix-SPIM-dma-buffer-not-aligned.patch
@@ -0,0 +1,81 @@
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -184,7 +184,7 @@ static const struct mtk_spi_compatible m
+ */
+ static const struct mtk_chip_config mtk_default_chip_info = {
+ .sample_sel = 0,
+- .get_tick_dly = 0,
++ .get_tick_dly = 1,
+ };
+
+ static const struct of_device_id mtk_spi_of_match[] = {
+@@ -730,8 +730,11 @@ static int mtk_spi_mem_adjust_op_size(st
+
+ if (op->data.dir != SPI_MEM_NO_DATA) {
+ opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
+- if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE)
++ if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
+ op->data.nbytes = MTK_SPI_IPM_PACKET_SIZE -opcode_len;
++ /* force data buffer dma-aligned. */
++ op->data.nbytes -= op->data.nbytes % 4;
++ }
+ }
+
+ return 0;
+@@ -758,10 +761,6 @@ static bool mtk_spi_mem_supports_op(stru
+ return false;
+ }
+
+- if (op->data.dir == SPI_MEM_DATA_IN &&
+- !IS_ALIGNED((size_t)op->data.buf.in, 4))
+- return false;
+-
+ return true;
+ }
+
+@@ -820,6 +819,7 @@ static int mtk_spi_mem_exec_op(struct sp
+ struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
+ u32 reg_val, nio = 1, tx_size;
+ char *tx_tmp_buf;
++ char *rx_tmp_buf;
+ int ret = 0;
+
+ mdata->use_spimem = true;
+@@ -914,10 +914,18 @@ static int mtk_spi_mem_exec_op(struct sp
+ }
+
+ if (op->data.dir == SPI_MEM_DATA_IN) {
++ if(!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
++ rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL | GFP_DMA);
++ if (!rx_tmp_buf)
++ return -ENOMEM;
++ }
++ else
++ rx_tmp_buf = op->data.buf.in;
++
+ mdata->rx_dma = dma_map_single(mdata->dev,
+- op->data.buf.in,
+- op->data.nbytes,
+- DMA_FROM_DEVICE);
++ rx_tmp_buf,
++ op->data.nbytes,
++ DMA_FROM_DEVICE);
+ if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
+ ret = -ENOMEM;
+ goto unmap_tx_dma;
+@@ -947,9 +955,14 @@ static int mtk_spi_mem_exec_op(struct sp
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+
+ unmap_rx_dma:
+- if (op->data.dir == SPI_MEM_DATA_IN)
++ if (op->data.dir == SPI_MEM_DATA_IN) {
++ if(!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
++ memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
++ kfree(rx_tmp_buf);
++ }
+ dma_unmap_single(mdata->dev, mdata->rx_dma,
+ op->data.nbytes, DMA_FROM_DEVICE);
++ }
+ unmap_tx_dma:
+ dma_unmap_single(mdata->dev, mdata->tx_dma,
+ tx_size, DMA_TO_DEVICE);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0671-add-micron-MT29F4G01ABAFD-spi-nand-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0671-add-micron-MT29F4G01ABAFD-spi-nand-support.patch
new file mode 100644
index 0000000..40833c0
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0671-add-micron-MT29F4G01ABAFD-spi-nand-support.patch
@@ -0,0 +1,111 @@
+--- a/drivers/mtd/nand/spi/micron.c
++++ b/drivers/mtd/nand/spi/micron.c
+@@ -18,7 +18,9 @@
+ #define MICRON_STATUS_ECC_4TO6_BITFLIPS (3 << 4)
+ #define MICRON_STATUS_ECC_7TO8_BITFLIPS (5 << 4)
+
+-static SPINAND_OP_VARIANTS(read_cache_variants,
++#define MICRON_CFG_CR BIT(0)
++
++static SPINAND_OP_VARIANTS(quadio_read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+@@ -26,46 +28,46 @@ static SPINAND_OP_VARIANTS(read_cache_va
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+-static SPINAND_OP_VARIANTS(write_cache_variants,
++static SPINAND_OP_VARIANTS(x4_write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+-static SPINAND_OP_VARIANTS(update_cache_variants,
++static SPINAND_OP_VARIANTS(x4_update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+-static int mt29f2g01abagd_ooblayout_ecc(struct mtd_info *mtd, int section,
+- struct mtd_oob_region *region)
++static int micron_8_ooblayout_ecc(struct mtd_info *mtd, int section,
++ struct mtd_oob_region *region)
+ {
+ if (section)
+ return -ERANGE;
+
+- region->offset = 64;
+- region->length = 64;
++ region->offset = mtd->oobsize / 2;
++ region->length = mtd->oobsize / 2;
+
+ return 0;
+ }
+
+-static int mt29f2g01abagd_ooblayout_free(struct mtd_info *mtd, int section,
+- struct mtd_oob_region *region)
++static int micron_8_ooblayout_free(struct mtd_info *mtd, int section,
++ struct mtd_oob_region *region)
+ {
+ if (section)
+ return -ERANGE;
+
+ /* Reserve 2 bytes for the BBM. */
+ region->offset = 2;
+- region->length = 62;
++ region->length = (mtd->oobsize / 2) - 2;
+
+ return 0;
+ }
+
+-static const struct mtd_ooblayout_ops mt29f2g01abagd_ooblayout = {
+- .ecc = mt29f2g01abagd_ooblayout_ecc,
+- .free = mt29f2g01abagd_ooblayout_free,
++static const struct mtd_ooblayout_ops micron_8_ooblayout = {
++ .ecc = micron_8_ooblayout_ecc,
++ .free = micron_8_ooblayout_free,
+ };
+
+-static int mt29f2g01abagd_ecc_get_status(struct spinand_device *spinand,
+- u8 status)
++static int micron_8_ecc_get_status(struct spinand_device *spinand,
++ u8 status)
+ {
+ switch (status & MICRON_STATUS_ECC_MASK) {
+ case STATUS_ECC_NO_BITFLIPS:
+@@ -94,12 +96,21 @@ static const struct spinand_info micron_
+ SPINAND_INFO("MT29F2G01ABAGD", 0x24,
+ NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 2, 1, 1),
+ NAND_ECCREQ(8, 512),
+- SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+- &write_cache_variants,
+- &update_cache_variants),
++ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
++ &x4_write_cache_variants,
++ &x4_update_cache_variants),
+ 0,
+- SPINAND_ECCINFO(&mt29f2g01abagd_ooblayout,
+- mt29f2g01abagd_ecc_get_status)),
++ SPINAND_ECCINFO(µn_8_ooblayout,
++ micron_8_ecc_get_status)),
++ SPINAND_INFO("MT29F4G01ABAFD", 0x34,
++ NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
++ NAND_ECCREQ(8, 512),
++ SPINAND_INFO_OP_VARIANTS(&quadio_read_cache_variants,
++ &x4_write_cache_variants,
++ &x4_update_cache_variants),
++ SPINAND_HAS_CR_FEAT_BIT,
++ SPINAND_ECCINFO(µn_8_ooblayout,
++ micron_8_ecc_get_status)),
+ };
+
+ static int micron_spinand_detect(struct spinand_device *spinand)
+--- a/include/linux/mtd/spinand.h
++++ b/include/linux/mtd/spinand.h
+@@ -270,6 +270,7 @@ struct spinand_ecc_info {
+ };
+
+ #define SPINAND_HAS_QE_BIT BIT(0)
++#define SPINAND_HAS_CR_FEAT_BIT BIT(1)
+
+ /**
+ * struct spinand_info - Structure used to describe SPI NAND chips
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0672-add-F50L1G41LB-and-GD5F1GQ5UExxG-snand-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0672-add-F50L1G41LB-and-GD5F1GQ5UExxG-snand-support.patch
new file mode 100644
index 0000000..63eee5d
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0672-add-F50L1G41LB-and-GD5F1GQ5UExxG-snand-support.patch
@@ -0,0 +1,75 @@
+Index: linux-5.4.158/drivers/mtd/nand/spi/gigadevice.c
+===================================================================
+--- linux-5.4.158.orig/drivers/mtd/nand/spi/gigadevice.c
++++ linux-5.4.158/drivers/mtd/nand/spi/gigadevice.c
+@@ -36,6 +36,15 @@ static SPINAND_OP_VARIANTS(read_cache_va
+ SPINAND_PAGE_READ_FROM_CACHE_OP_3A(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP_3A(false, 0, 0, NULL, 0));
+
++/* Q5 devices, QUADIO: Dummy bytes only valid for 1 GBit variants */
++static SPINAND_OP_VARIANTS(gd5f1gq5_read_cache_variants,
++ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
++ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
++ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
++ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
++ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
++ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
++
+ static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+@@ -223,7 +232,54 @@ static int gd5fxgq4ufxxg_ecc_get_status(
+ return -EINVAL;
+ }
+
++static int esmt_1_ooblayout_ecc(struct mtd_info *mtd, int section,
++ struct mtd_oob_region *region)
++{
++ if (section > 3)
++ return -ERANGE;
++
++ region->offset = (16 * section) + 8;
++ region->length = 8;
++
++ return 0;
++}
++
++static int esmt_1_ooblayout_free(struct mtd_info *mtd, int section,
++ struct mtd_oob_region *region)
++{
++ if (section > 3)
++ return -ERANGE;
++
++ region->offset = (16 * section) + 2;
++ region->length = 6;
++
++ return 0;
++}
++
++static const struct mtd_ooblayout_ops esmt_1_ooblayout = {
++ .ecc = esmt_1_ooblayout_ecc,
++ .free = esmt_1_ooblayout_free,
++};
++
+ static const struct spinand_info gigadevice_spinand_table[] = {
++ SPINAND_INFO("F50L1G41LB", 0x01,
++ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
++ NAND_ECCREQ(8, 512),
++ SPINAND_INFO_OP_VARIANTS(&gd5f1gq5_read_cache_variants,
++ &write_cache_variants,
++ &update_cache_variants),
++ 0,
++ SPINAND_ECCINFO(&esmt_1_ooblayout,
++ NULL)),
++ SPINAND_INFO("GD5F1GQ5UExxG", 0x51,
++ NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
++ NAND_ECCREQ(4, 512),
++ SPINAND_INFO_OP_VARIANTS(&gd5f1gq5_read_cache_variants,
++ &write_cache_variants,
++ &update_cache_variants),
++ SPINAND_HAS_QE_BIT,
++ SPINAND_ECCINFO(&gd5fxgq4xa_ooblayout,
++ gd5fxgq4xa_ecc_get_status)),
+ SPINAND_INFO("GD5F1GQ4xA", 0xF1,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0701-fix-mtk-nfi-driver-dependency.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0701-fix-mtk-nfi-driver-dependency.patch
new file mode 100644
index 0000000..3023076
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0701-fix-mtk-nfi-driver-dependency.patch
@@ -0,0 +1,10 @@
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -429,6 +429,7 @@ config SPI_MT65XX
+
+ config SPI_MTK_SNFI
+ tristate "MediaTek SPI NAND interface"
++ depends on MTD
+ select MTD_SPI_NAND
+ help
+ This selects the SPI NAND FLASH interface(SNFI),
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0801-mtk-sd-add-mt7986-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0801-mtk-sd-add-mt7986-support.patch
new file mode 100644
index 0000000..6b76993
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0801-mtk-sd-add-mt7986-support.patch
@@ -0,0 +1,29 @@
+--- a/drivers/mmc/host/mtk-sd.c
++++ b/drivers/mmc/host/mtk-sd.c
+@@ -508,6 +508,18 @@ static const struct mtk_mmc_compatible m
+ .support_64g = false,
+ };
+
++static const struct mtk_mmc_compatible mt7986_compat = {
++ .clk_div_bits = 12,
++ .hs400_tune = false,
++ .pad_tune_reg = MSDC_PAD_TUNE0,
++ .async_fifo = true,
++ .data_tune = true,
++ .busy_check = true,
++ .stop_clk_fix = true,
++ .enhance_rx = true,
++ .support_64g = true,
++};
++
+ static const struct mtk_mmc_compatible mt8516_compat = {
+ .clk_div_bits = 12,
+ .hs400_tune = false,
+@@ -537,6 +549,7 @@ static const struct of_device_id msdc_of
+ { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
+ { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
+ { .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
++ { .compatible = "mediatek,mt7986-mmc", .data = &mt7986_compat},
+ { .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
+ { .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
+ {}
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0900-bt-mtk-serial-fix.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0900-bt-mtk-serial-fix.patch
new file mode 100644
index 0000000..2b3a4ae
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0900-bt-mtk-serial-fix.patch
@@ -0,0 +1,33 @@
+--- a/drivers/tty/serial/8250/8250.h
++++ b/drivers/tty/serial/8250/8250.h
+@@ -82,6 +82,7 @@ struct serial8250_config {
+ #define UART_CAP_MINI (1 << 17) /* Mini UART on BCM283X family lacks:
+ * STOP PARITY EPAR SPAR WLEN5 WLEN6
+ */
++#define UART_CAP_NMOD (1 << 18) /* UART doesn't do termios */
+
+ #define UART_BUG_QUOT (1 << 0) /* UART has buggy quot LSB */
+ #define UART_BUG_TXEN (1 << 1) /* UART has buggy TX IIR status */
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -291,7 +291,7 @@ static const struct serial8250_config ua
+ .tx_loadsz = 16,
+ .fcr = UART_FCR_ENABLE_FIFO |
+ UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+- .flags = UART_CAP_FIFO,
++ .flags = UART_CAP_FIFO | UART_CAP_NMOD,
+ },
+ [PORT_NPCM] = {
+ .name = "Nuvoton 16550",
+@@ -2598,6 +2598,11 @@ serial8250_do_set_termios(struct uart_po
+ unsigned long flags;
+ unsigned int baud, quot, frac = 0;
+
++ if (up->capabilities & UART_CAP_NMOD) {
++ termios->c_cflag = 0;
++ return;
++ }
++
+ if (up->capabilities & UART_CAP_MINI) {
+ termios->c_cflag &= ~(CSTOPB | PARENB | PARODD | CMSPAR);
+ if ((termios->c_cflag & CSIZE) == CS5 ||
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0900-i2c-busses-add-mt7986-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0900-i2c-busses-add-mt7986-support.patch
new file mode 100644
index 0000000..a375842
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0900-i2c-busses-add-mt7986-support.patch
@@ -0,0 +1,32 @@
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index e1ef012..4fd4721 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -289,6 +289,19 @@ static const struct mtk_i2c_compatible mt7622_compat = {
+ .ltiming_adjust = 0,
+ };
+
++static const struct mtk_i2c_compatible mt7986_compat = {
++ .quirks = &mt7622_i2c_quirks,
++ .regs = mt_i2c_regs_v1,
++ .pmic_i2c = 0,
++ .dcm = 1,
++ .auto_restart = 1,
++ .aux_len_reg = 1,
++ .support_33bits = 0,
++ .timing_adjust = 0,
++ .dma_sync = 1,
++ .ltiming_adjust = 0,
++};
++
+ static const struct mtk_i2c_compatible mt8173_compat = {
+ .regs = mt_i2c_regs_v1,
+ .pmic_i2c = 0,
+@@ -319,6 +332,7 @@ static const struct of_device_id mtk_i2c_of_match[] = {
+ { .compatible = "mediatek,mt6577-i2c", .data = &mt6577_compat },
+ { .compatible = "mediatek,mt6589-i2c", .data = &mt6589_compat },
+ { .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat },
++ { .compatible = "mediatek,mt7986-i2c", .data = &mt7986_compat },
+ { .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
+ { .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
+ {}
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0901-i2c-busses-add-mt7981-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0901-i2c-busses-add-mt7981-support.patch
new file mode 100644
index 0000000..f79d2f8
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0901-i2c-busses-add-mt7981-support.patch
@@ -0,0 +1,43 @@
+diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
+index e1ef012..4fd4721 100644
+--- a/drivers/i2c/busses/i2c-mt65xx.c
++++ b/drivers/i2c/busses/i2c-mt65xx.c
+@@ -157,7 +157,7 @@ static const u16 mt_i2c_regs_v1[] = {
+
+ static const u16 mt_i2c_regs_v2[] = {
+ [OFFSET_DATA_PORT] = 0x0,
+- [OFFSET_SLAVE_ADDR] = 0x4,
++ [OFFSET_SLAVE_ADDR] = 0x94,
+ [OFFSET_INTR_MASK] = 0x8,
+ [OFFSET_INTR_STAT] = 0xc,
+ [OFFSET_CONTROL] = 0x10,
+@@ -289,6 +289,18 @@ static const struct mtk_i2c_compatible mt7622_compat = {
+ .ltiming_adjust = 0,
+ };
+
++static const struct mtk_i2c_compatible mt7981_compat = {
++ .regs = mt_i2c_regs_v2,
++ .pmic_i2c = 0,
++ .dcm = 0,
++ .auto_restart = 1,
++ .aux_len_reg = 1,
++ .support_33bits = 1,
++ .timing_adjust = 1,
++ .dma_sync = 1,
++ .ltiming_adjust = 1,
++};
++
+ static const struct mtk_i2c_compatible mt7986_compat = {
+ .quirks = &mt7622_i2c_quirks,
+ .regs = mt_i2c_regs_v1,
+@@ -332,6 +344,7 @@ static const struct of_device_id mtk_i2c_of_match[] = {
+ { .compatible = "mediatek,mt6577-i2c", .data = &mt6577_compat },
+ { .compatible = "mediatek,mt6589-i2c", .data = &mt6589_compat },
+ { .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat },
++ { .compatible = "mediatek,mt7981-i2c", .data = &mt7981_compat },
+ { .compatible = "mediatek,mt7986-i2c", .data = &mt7986_compat },
+ { .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat },
+ { .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat },
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0930-pwm-add-mt7986-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0930-pwm-add-mt7986-support.patch
new file mode 100644
index 0000000..a791d3a
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0930-pwm-add-mt7986-support.patch
@@ -0,0 +1,24 @@
+diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
+index b94e0d0..35a0db2 100644
+--- a/drivers/pwm/pwm-mediatek.c
++++ b/drivers/pwm/pwm-mediatek.c
+@@ -302,6 +302,11 @@ static const struct pwm_mediatek_of_data mt7629_pwm_data = {
+ .pwm45_fixup = false,
+ };
+
++static const struct pwm_mediatek_of_data mt7986_pwm_data = {
++ .num_pwms = 2,
++ .pwm45_fixup = false,
++};
++
+ static const struct pwm_mediatek_of_data mt8516_pwm_data = {
+ .num_pwms = 5,
+ .pwm45_fixup = false,
+@@ -313,6 +318,7 @@ static const struct of_device_id pwm_mediatek_of_match[] = {
+ { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
+ { .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
+ { .compatible = "mediatek,mt7629-pwm", .data = &mt7629_pwm_data },
++ { .compatible = "mediatek,mt7986-pwm", .data = &mt7986_pwm_data },
+ { .compatible = "mediatek,mt8516-pwm", .data = &mt8516_pwm_data },
+ { },
+ };
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0931-pwm-add-mt7981-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0931-pwm-add-mt7981-support.patch
new file mode 100644
index 0000000..b926383
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0931-pwm-add-mt7981-support.patch
@@ -0,0 +1,133 @@
+diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
+index 7c56ee2..3a5a456 100644
+--- a/drivers/pwm/pwm-mediatek.c
++++ b/drivers/pwm/pwm-mediatek.c
+@@ -33,10 +32,13 @@
+ #define PWM45THRES_FIXUP 0x34
+
+ #define PWM_CLK_DIV_MAX 7
++#define REG_V1 1
++#define REG_V2 2
+
+ struct pwm_mediatek_of_data {
+ unsigned int num_pwms;
+ bool pwm45_fixup;
++ int reg_ver;
+ };
+
+ /**
+@@ -57,10 +59,14 @@ struct pwm_mediatek_chip {
+ const struct pwm_mediatek_of_data *soc;
+ };
+
+-static const unsigned int pwm_mediatek_reg_offset[] = {
++static const unsigned int mtk_pwm_reg_offset_v1[] = {
+ 0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190, 0x0220
+ };
+
++static const unsigned int mtk_pwm_reg_offset_v2[] = {
++ 0x0080, 0x00c0, 0x0100, 0x0140, 0x0180, 0x1c0, 0x200, 0x0240
++};
++
+ static inline struct pwm_mediatek_chip *
+ to_pwm_mediatek_chip(struct pwm_chip *chip)
+ {
+@@ -108,14 +114,38 @@ static void pwm_mediatek_clk_disable(struct pwm_chip *chip,
+ static inline u32 pwm_mediatek_readl(struct pwm_mediatek_chip *chip,
+ unsigned int num, unsigned int offset)
+ {
+- return readl(chip->regs + pwm_mediatek_reg_offset[num] + offset);
++ u32 pwm_offset;
++
++ switch (chip->soc->reg_ver) {
++ case REG_V2:
++ pwm_offset = mtk_pwm_reg_offset_v2[num];
++ break;
++
++ case REG_V1:
++ default:
++ pwm_offset = mtk_pwm_reg_offset_v1[num];
++ }
++
++ return readl(chip->regs + pwm_offset + offset);
+ }
+
+ static inline void pwm_mediatek_writel(struct pwm_mediatek_chip *chip,
+ unsigned int num, unsigned int offset,
+ u32 value)
+ {
+- writel(value, chip->regs + pwm_mediatek_reg_offset[num] + offset);
++ u32 pwm_offset;
++
++ switch (chip->soc->reg_ver) {
++ case REG_V2:
++ pwm_offset = mtk_pwm_reg_offset_v2[num];
++ break;
++
++ case REG_V1:
++ default:
++ pwm_offset = mtk_pwm_reg_offset_v1[num];
++ }
++
++ writel(value, chip->regs + pwm_offset + offset);
+ }
+
+ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
+@@ -281,36 +311,49 @@ static int pwm_mediatek_remove(struct platform_device *pdev)
+ static const struct pwm_mediatek_of_data mt2712_pwm_data = {
+ .num_pwms = 8,
+ .pwm45_fixup = false,
++ .reg_ver = REG_V1,
+ };
+
+ static const struct pwm_mediatek_of_data mt7622_pwm_data = {
+ .num_pwms = 6,
+ .pwm45_fixup = false,
++ .reg_ver = REG_V1,
+ };
+
+ static const struct pwm_mediatek_of_data mt7623_pwm_data = {
+ .num_pwms = 5,
+ .pwm45_fixup = true,
++ .reg_ver = REG_V1,
+ };
+
+ static const struct pwm_mediatek_of_data mt7628_pwm_data = {
+ .num_pwms = 4,
+ .pwm45_fixup = true,
++ .reg_ver = REG_V1,
+ };
+
+ static const struct pwm_mediatek_of_data mt7629_pwm_data = {
+ .num_pwms = 1,
+ .pwm45_fixup = false,
++ .reg_ver = REG_V1,
++};
++
++static const struct pwm_mediatek_of_data mt7981_pwm_data = {
++ .num_pwms = 3,
++ .pwm45_fixup = false,
++ .reg_ver = REG_V2,
+ };
+
+ static const struct pwm_mediatek_of_data mt7986_pwm_data = {
+ .num_pwms = 2,
+ .pwm45_fixup = false,
++ .reg_ver = REG_V2,
+ };
+
+ static const struct pwm_mediatek_of_data mt8516_pwm_data = {
+ .num_pwms = 5,
+ .pwm45_fixup = false,
++ .reg_ver = REG_V1,
+ };
+
+ static const struct of_device_id pwm_mediatek_of_match[] = {
+@@ -319,6 +362,7 @@ static const struct of_device_id pwm_mediatek_of_match[] = {
+ { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
+ { .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
+ { .compatible = "mediatek,mt7629-pwm", .data = &mt7629_pwm_data },
++ { .compatible = "mediatek,mt7981-pwm", .data = &mt7981_pwm_data },
+ { .compatible = "mediatek,mt7986-pwm", .data = &mt7986_pwm_data },
+ { .compatible = "mediatek,mt8516-pwm", .data = &mt8516_pwm_data },
+ { },
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0960-watchdog-add-mt7986-assert.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0960-watchdog-add-mt7986-assert.patch
new file mode 100644
index 0000000..619fc10
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0960-watchdog-add-mt7986-assert.patch
@@ -0,0 +1,328 @@
+diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
+index 9c3d003..30127d1 100644
+--- a/drivers/watchdog/mtk_wdt.c
++++ b/drivers/watchdog/mtk_wdt.c
+@@ -9,6 +9,8 @@
+ * Based on sunxi_wdt.c
+ */
+
++#include <dt-bindings/reset/mt7986-resets.h>
++#include <linux/delay.h>
+ #include <linux/err.h>
+ #include <linux/init.h>
+ #include <linux/io.h>
+@@ -16,13 +18,15 @@
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/of.h>
++#include <linux/of_device.h>
+ #include <linux/platform_device.h>
++#include <linux/reset-controller.h>
+ #include <linux/types.h>
+ #include <linux/watchdog.h>
+-#include <linux/delay.h>
++#include <linux/interrupt.h>
+
+ #define WDT_MAX_TIMEOUT 31
+-#define WDT_MIN_TIMEOUT 1
++#define WDT_MIN_TIMEOUT 2
+ #define WDT_LENGTH_TIMEOUT(n) ((n) << 5)
+
+ #define WDT_LENGTH 0x04
+@@ -44,6 +48,9 @@
+ #define WDT_SWRST 0x14
+ #define WDT_SWRST_KEY 0x1209
+
++#define WDT_SWSYSRST 0x18U
++#define WDT_SWSYS_RST_KEY 0x88000000
++
+ #define DRV_NAME "mtk-wdt"
+ #define DRV_VERSION "1.0"
+
+@@ -53,8 +60,91 @@ static unsigned int timeout;
+ struct mtk_wdt_dev {
+ struct watchdog_device wdt_dev;
+ void __iomem *wdt_base;
++ spinlock_t lock; /* protects WDT_SWSYSRST reg */
++ struct reset_controller_dev rcdev;
++ bool disable_wdt_extrst;
++};
++
++struct mtk_wdt_data {
++ int toprgu_sw_rst_num;
++};
++
++static const struct mtk_wdt_data mt7986_data = {
++ .toprgu_sw_rst_num = MT7986_TOPRGU_SW_RST_NUM,
++};
++
++static int toprgu_reset_update(struct reset_controller_dev *rcdev,
++ unsigned long id, bool assert)
++{
++ unsigned int tmp;
++ unsigned long flags;
++ struct mtk_wdt_dev *data =
++ container_of(rcdev, struct mtk_wdt_dev, rcdev);
++
++ spin_lock_irqsave(&data->lock, flags);
++
++ tmp = readl(data->wdt_base + WDT_SWSYSRST);
++ if (assert)
++ tmp |= BIT(id);
++ else
++ tmp &= ~BIT(id);
++ tmp |= WDT_SWSYS_RST_KEY;
++ writel(tmp, data->wdt_base + WDT_SWSYSRST);
++
++ spin_unlock_irqrestore(&data->lock, flags);
++
++ return 0;
++}
++
++static int toprgu_reset_assert(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ return toprgu_reset_update(rcdev, id, true);
++}
++
++static int toprgu_reset_deassert(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ return toprgu_reset_update(rcdev, id, false);
++}
++
++static int toprgu_reset(struct reset_controller_dev *rcdev,
++ unsigned long id)
++{
++ int ret;
++
++ ret = toprgu_reset_assert(rcdev, id);
++ if (ret)
++ return ret;
++
++ return toprgu_reset_deassert(rcdev, id);
++}
++
++static const struct reset_control_ops toprgu_reset_ops = {
++ .assert = toprgu_reset_assert,
++ .deassert = toprgu_reset_deassert,
++ .reset = toprgu_reset,
+ };
+
++static int toprgu_register_reset_controller(struct platform_device *pdev,
++ int rst_num)
++{
++ int ret;
++ struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev);
++
++ spin_lock_init(&mtk_wdt->lock);
++
++ mtk_wdt->rcdev.owner = THIS_MODULE;
++ mtk_wdt->rcdev.nr_resets = rst_num;
++ mtk_wdt->rcdev.ops = &toprgu_reset_ops;
++ mtk_wdt->rcdev.of_node = pdev->dev.of_node;
++ ret = devm_reset_controller_register(&pdev->dev, &mtk_wdt->rcdev);
++ if (ret != 0)
++ dev_err(&pdev->dev,
++ "couldn't register wdt reset controller: %d\n", ret);
++ return ret;
++}
++
+ static int mtk_wdt_restart(struct watchdog_device *wdt_dev,
+ unsigned long action, void *data)
+ {
+@@ -89,12 +179,19 @@ static int mtk_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ u32 reg;
+
+ wdt_dev->timeout = timeout;
++ /*
++ * In dual mode, irq will be triggered at timeout / 2
++ * the real timeout occurs at timeout
++ */
++ if (wdt_dev->pretimeout)
++ wdt_dev->pretimeout = timeout / 2;
+
+ /*
+ * One bit is the value of 512 ticks
+ * The clock has 32 KHz
+ */
+- reg = WDT_LENGTH_TIMEOUT(timeout << 6) | WDT_LENGTH_KEY;
++ reg = WDT_LENGTH_TIMEOUT((timeout - wdt_dev->pretimeout) << 6)
++ | WDT_LENGTH_KEY;
+ iowrite32(reg, wdt_base + WDT_LENGTH);
+
+ mtk_wdt_ping(wdt_dev);
+@@ -102,6 +199,19 @@ static int mtk_wdt_set_timeout(struct watchdog_device *wdt_dev,
+ return 0;
+ }
+
++static void mtk_wdt_init(struct watchdog_device *wdt_dev)
++{
++ struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
++ void __iomem *wdt_base;
++
++ wdt_base = mtk_wdt->wdt_base;
++
++ if (readl(wdt_base + WDT_MODE) & WDT_MODE_EN) {
++ set_bit(WDOG_HW_RUNNING, &wdt_dev->status);
++ mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
++ }
++}
++
+ static int mtk_wdt_stop(struct watchdog_device *wdt_dev)
+ {
+ struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev);
+@@ -128,13 +238,50 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
+ return ret;
+
+ reg = ioread32(wdt_base + WDT_MODE);
+- reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
++ if (wdt_dev->pretimeout)
++ reg |= (WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
++ else
++ reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
++ if (mtk_wdt->disable_wdt_extrst)
++ reg &= ~WDT_MODE_EXRST_EN;
+ reg |= (WDT_MODE_EN | WDT_MODE_KEY);
+ iowrite32(reg, wdt_base + WDT_MODE);
+
+ return 0;
+ }
+
++static int mtk_wdt_set_pretimeout(struct watchdog_device *wdd,
++ unsigned int timeout)
++{
++ struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdd);
++ void __iomem *wdt_base = mtk_wdt->wdt_base;
++ u32 reg = ioread32(wdt_base + WDT_MODE);
++
++ if (timeout && !wdd->pretimeout) {
++ wdd->pretimeout = wdd->timeout / 2;
++ reg |= (WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
++ } else if (!timeout && wdd->pretimeout) {
++ wdd->pretimeout = 0;
++ reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN);
++ } else {
++ return 0;
++ }
++
++ reg |= WDT_MODE_KEY;
++ iowrite32(reg, wdt_base + WDT_MODE);
++
++ return mtk_wdt_set_timeout(wdd, wdd->timeout);
++}
++
++static irqreturn_t mtk_wdt_isr(int irq, void *arg)
++{
++ struct watchdog_device *wdd = arg;
++
++ watchdog_notify_pretimeout(wdd);
++
++ return IRQ_HANDLED;
++}
++
+ static const struct watchdog_info mtk_wdt_info = {
+ .identity = DRV_NAME,
+ .options = WDIOF_SETTIMEOUT |
+@@ -142,12 +289,21 @@ static const struct watchdog_info mtk_wdt_info = {
+ WDIOF_MAGICCLOSE,
+ };
+
++static const struct watchdog_info mtk_wdt_pt_info = {
++ .identity = DRV_NAME,
++ .options = WDIOF_SETTIMEOUT |
++ WDIOF_PRETIMEOUT |
++ WDIOF_KEEPALIVEPING |
++ WDIOF_MAGICCLOSE,
++};
++
+ static const struct watchdog_ops mtk_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = mtk_wdt_start,
+ .stop = mtk_wdt_stop,
+ .ping = mtk_wdt_ping,
+ .set_timeout = mtk_wdt_set_timeout,
++ .set_pretimeout = mtk_wdt_set_pretimeout,
+ .restart = mtk_wdt_restart,
+ };
+
+@@ -155,7 +311,8 @@ static int mtk_wdt_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ struct mtk_wdt_dev *mtk_wdt;
+- int err;
++ const struct mtk_wdt_data *wdt_data;
++ int err, irq;
+
+ mtk_wdt = devm_kzalloc(dev, sizeof(*mtk_wdt), GFP_KERNEL);
+ if (!mtk_wdt)
+@@ -167,10 +324,25 @@ static int mtk_wdt_probe(struct platform_device *pdev)
+ if (IS_ERR(mtk_wdt->wdt_base))
+ return PTR_ERR(mtk_wdt->wdt_base);
+
+- mtk_wdt->wdt_dev.info = &mtk_wdt_info;
++ irq = platform_get_irq(pdev, 0);
++ if (irq > 0) {
++ err = devm_request_irq(&pdev->dev, irq, mtk_wdt_isr, 0, "wdt_bark",
++ &mtk_wdt->wdt_dev);
++ if (err)
++ return err;
++
++ mtk_wdt->wdt_dev.info = &mtk_wdt_pt_info;
++ mtk_wdt->wdt_dev.pretimeout = WDT_MAX_TIMEOUT / 2;
++ } else {
++ if (irq == -EPROBE_DEFER)
++ return -EPROBE_DEFER;
++
++ mtk_wdt->wdt_dev.info = &mtk_wdt_info;
++ }
++
+ mtk_wdt->wdt_dev.ops = &mtk_wdt_ops;
+ mtk_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT;
+- mtk_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT;
++ mtk_wdt->wdt_dev.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT * 1000;
+ mtk_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT;
+ mtk_wdt->wdt_dev.parent = dev;
+
+@@ -180,7 +352,7 @@ static int mtk_wdt_probe(struct platform_device *pdev)
+
+ watchdog_set_drvdata(&mtk_wdt->wdt_dev, mtk_wdt);
+
+- mtk_wdt_stop(&mtk_wdt->wdt_dev);
++ mtk_wdt_init(&mtk_wdt->wdt_dev);
+
+ watchdog_stop_on_reboot(&mtk_wdt->wdt_dev);
+ err = devm_watchdog_register_device(dev, &mtk_wdt->wdt_dev);
+@@ -190,6 +362,17 @@ static int mtk_wdt_probe(struct platform_device *pdev)
+ dev_info(dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n",
+ mtk_wdt->wdt_dev.timeout, nowayout);
+
++ wdt_data = of_device_get_match_data(dev);
++ if (wdt_data) {
++ err = toprgu_register_reset_controller(pdev,
++ wdt_data->toprgu_sw_rst_num);
++ if (err)
++ return err;
++ }
++
++ mtk_wdt->disable_wdt_extrst =
++ of_property_read_bool(dev->of_node, "mediatek,disable-extrst");
++
+ return 0;
+ }
+
+@@ -219,6 +402,7 @@ static int mtk_wdt_resume(struct device *dev)
+
+ static const struct of_device_id mtk_wdt_dt_ids[] = {
+ { .compatible = "mediatek,mt6589-wdt" },
++ { .compatible = "mediatek,mt7986-wdt", .data = &mt7986_data },
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids);
+@@ -249,4 +433,4 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Matthias Brugger <matthias.bgg@gmail.com>");
+ MODULE_DESCRIPTION("Mediatek WatchDog Timer Driver");
+-MODULE_VERSION(DRV_VERSION);
++MODULE_VERSION(DRV_VERSION);
+\ No newline at end of file
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0990-gsw-rtl8367s-mt7622-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0990-gsw-rtl8367s-mt7622-support.patch
new file mode 100644
index 0000000..a3d49e9
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0990-gsw-rtl8367s-mt7622-support.patch
@@ -0,0 +1,23 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -42,6 +42,12 @@ config MDIO_BCM_IPROC
+ This module provides a driver for the MDIO busses found in the
+ Broadcom iProc SoC's.
+
++config RTL8367S_GSW
++ tristate "rtl8367 Gigabit Switch support for mt7622"
++ depends on NET_VENDOR_MEDIATEK
++ help
++ This driver supports rtl8367s in mt7622
++
+ config MDIO_BCM_UNIMAC
+ tristate "Broadcom UniMAC MDIO bus controller"
+ depends on HAS_IOMEM
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -110,4 +110,5 @@ obj-$(CONFIG_TERANETICS_PHY) += teraneti
+ obj-$(CONFIG_VITESSE_PHY) += vitesse.o
+ obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o
+ obj-$(CONFIG_MT753X_GSW) += mtk/mt753x/
++obj-$(CONFIG_RTL8367S_GSW) += rtk/
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0991-dt-bindings-PCI-Mediatek-Update-PCIe-binding.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0991-dt-bindings-PCI-Mediatek-Update-PCIe-binding.patch
new file mode 100644
index 0000000..02e4c13
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0991-dt-bindings-PCI-Mediatek-Update-PCIe-binding.patch
@@ -0,0 +1,415 @@
+From patchwork Thu May 28 06:16:45 2020
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Chuanjia Liu <chuanjia.liu@mediatek.com>
+X-Patchwork-Id: 11574793
+Return-Path:
+ <SRS0=ftSA=7K=lists.infradead.org=linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@kernel.org>
+Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org
+ [172.30.200.123])
+ by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 391201392
+ for <patchwork-linux-mediatek@patchwork.kernel.org>;
+ Thu, 28 May 2020 06:20:27 +0000 (UTC)
+Received: from bombadil.infradead.org (bombadil.infradead.org
+ [198.137.202.133])
+ (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
+ (No client certificate requested)
+ by mail.kernel.org (Postfix) with ESMTPS id 104F620657
+ for <patchwork-linux-mediatek@patchwork.kernel.org>;
+ Thu, 28 May 2020 06:20:27 +0000 (UTC)
+Authentication-Results: mail.kernel.org;
+ dkim=pass (2048-bit key) header.d=lists.infradead.org
+ header.i=@lists.infradead.org header.b="raZHaWxs";
+ dkim=fail reason="signature verification failed" (1024-bit key)
+ header.d=mediatek.com header.i=@mediatek.com header.b="YztrByG/"
+DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 104F620657
+Authentication-Results: mail.kernel.org;
+ dmarc=fail (p=none dis=none) header.from=mediatek.com
+Authentication-Results: mail.kernel.org;
+ spf=none
+ smtp.mailfrom=linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@lists.infradead.org
+DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
+ d=lists.infradead.org; s=bombadil.20170209; h=Sender:
+ Content-Transfer-Encoding:Content-Type:Cc:List-Subscribe:List-Help:List-Post:
+ List-Archive:List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:
+ Message-ID:Date:Subject:To:From:Reply-To:Content-ID:Content-Description:
+ Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:
+ List-Owner; bh=aVtKU+Ey8KEM97+S66fz9ZMo+H8BP570jhAAvaRsNWc=; b=raZHaWxsfCxsrd
+ Byn/w1oLN/J82ApnNdBBXixq9Qj0uXIU2tBVqkiQ9lG6QDk7uguxQSJLeTqrsI/uxQmCI/PGQtZdP
+ sH0oboi2sbQSqJ/1ud4uL2pPaiLRJCxINF5oWjoZMsjn/b2fWvn52P6vTr/dxDTaabiVhY0HL0J+X
+ 7YGc1aYtO76HZHE2ke3puR42QkI8hE9E2cEhiLWeuUiLdUBegNM5MdYftu4nJTcCXnAeJjp/wIpYG
+ 7X737N9cmanDf6Bxr2bNPgaYzH+m7JK6eGxuAvWo0+PE9OX7MLrXY3KjixcjD/b0he0mfEM++gBAq
+ KBYKl5wh1mnlR2WIWXew==;
+Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org)
+ by bombadil.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1jeBtx-0005JC-DJ; Thu, 28 May 2020 06:20:25 +0000
+Received: from mailgw01.mediatek.com ([216.200.240.184])
+ by bombadil.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1jeBtW-0002f2-75; Thu, 28 May 2020 06:20:01 +0000
+X-UUID: d5cb6d96c2a5421796c2f8a284ff3670-20200527
+DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
+ d=mediatek.com;
+ s=dk;
+ h=Content-Transfer-Encoding:Content-Type:MIME-Version:References:In-Reply-To:Message-ID:Date:Subject:CC:To:From;
+ bh=EqjC+5cHgv6eykN7FPf2mtwK9UivJ3XSCE0jEvb8h+8=;
+ b=YztrByG/Ia304l9KDPBwoHFYkFCN6qBXPqwZgg56CA9VitadAg2+K1VgfEU+oHqsqcsGAMdZTRMQh17tpm4bJParw6MMzAQ28te2TcxvQMV8PZMkerJdZyyYblI7ybauPWuofAQgQMtuwSKVii8eTRJbf99OZ9vDGJP3zo2j1wU=;
+X-UUID: d5cb6d96c2a5421796c2f8a284ff3670-20200527
+Received: from mtkcas66.mediatek.inc [(172.29.193.44)] by
+ mailgw01.mediatek.com
+ (envelope-from <chuanjia.liu@mediatek.com>)
+ (musrelay.mediatek.com ESMTP with TLS)
+ with ESMTP id 681958707; Wed, 27 May 2020 22:20:16 -0800
+Received: from MTKMBS07N2.mediatek.inc (172.21.101.141) by
+ MTKMBS62N1.mediatek.inc (172.29.193.41) with Microsoft SMTP Server (TLS) id
+ 15.0.1497.2; Wed, 27 May 2020 23:18:52 -0700
+Received: from mtkcas07.mediatek.inc (172.21.101.84) by
+ mtkmbs07n2.mediatek.inc (172.21.101.141) with Microsoft SMTP Server (TLS) id
+ 15.0.1497.2; Thu, 28 May 2020 14:18:49 +0800
+Received: from localhost.localdomain (10.17.3.153) by mtkcas07.mediatek.inc
+ (172.21.101.73) with Microsoft SMTP Server id 15.0.1497.2 via Frontend
+ Transport; Thu, 28 May 2020 14:18:47 +0800
+From: <chuanjia.liu@mediatek.com>
+To: <robh+dt@kernel.org>, <ryder.lee@mediatek.com>, <matthias.bgg@gmail.com>
+Subject: [PATCH v2 1/4] dt-bindings: PCI: Mediatek: Update PCIe binding
+Date: Thu, 28 May 2020 14:16:45 +0800
+Message-ID: <20200528061648.32078-2-chuanjia.liu@mediatek.com>
+X-Mailer: git-send-email 2.18.0
+In-Reply-To: <20200528061648.32078-1-chuanjia.liu@mediatek.com>
+References: <20200528061648.32078-1-chuanjia.liu@mediatek.com>
+MIME-Version: 1.0
+X-MTK: N
+X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3
+X-CRM114-CacheID: sfid-20200527_231958_261064_608CC03E
+X-CRM114-Status: GOOD ( 13.95 )
+X-Spam-Score: -0.2 (/)
+X-Spam-Report: SpamAssassin version 3.4.4 on bombadil.infradead.org summary:
+ Content analysis details: (-0.2 points)
+ pts rule name description
+ ---- ----------------------
+ --------------------------------------------------
+ -0.0 SPF_PASS SPF: sender matches SPF record
+ 0.0 SPF_HELO_NONE SPF: HELO does not publish an SPF Record
+ 0.0 MIME_BASE64_TEXT RAW: Message text disguised using base64
+ encoding
+ -0.1 DKIM_VALID_AU Message has a valid DKIM or DK signature from
+ author's domain
+ 0.1 DKIM_SIGNED Message has a DKIM or DK signature,
+ not necessarily
+ valid
+ -0.1 DKIM_VALID Message has at least one valid DKIM or DK signature
+ -0.1 DKIM_VALID_EF Message has a valid DKIM or DK signature from
+ envelope-from domain
+ 0.0 UNPARSEABLE_RELAY Informational: message has unparseable relay
+ lines
+X-BeenThere: linux-mediatek@lists.infradead.org
+X-Mailman-Version: 2.1.29
+Precedence: list
+List-Id: <linux-mediatek.lists.infradead.org>
+List-Unsubscribe: <http://lists.infradead.org/mailman/options/linux-mediatek>,
+ <mailto:linux-mediatek-request@lists.infradead.org?subject=unsubscribe>
+List-Archive: <http://lists.infradead.org/pipermail/linux-mediatek/>
+List-Post: <mailto:linux-mediatek@lists.infradead.org>
+List-Help: <mailto:linux-mediatek-request@lists.infradead.org?subject=help>
+List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mediatek>,
+ <mailto:linux-mediatek-request@lists.infradead.org?subject=subscribe>
+Cc: devicetree@vger.kernel.org, lorenzo.pieralisi@arm.com,
+ srv_heupstream@mediatek.com, "chuanjia.liu" <Chuanjia.Liu@mediatek.com>,
+ linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org,
+ jianjun.wang@mediatek.com, linux-mediatek@lists.infradead.org,
+ yong.wu@mediatek.com, bhelgaas@google.com,
+ linux-arm-kernel@lists.infradead.org, amurray@thegoodpenguin.co.uk
+Sender: "Linux-mediatek" <linux-mediatek-bounces@lists.infradead.org>
+Errors-To:
+ linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@lists.infradead.org
+
+From: "chuanjia.liu" <Chuanjia.Liu@mediatek.com>
+
+There are two independent PCIe controllers in MT2712/MT7622 platform,
+and each of them should contain an independent MSI domain.
+
+In current architecture, MSI domain will be inherited from the root
+bridge, and all of the devices will share the same MSI domain.
+Hence that, the PCIe devices will not work properly if the irq number
+which required is more than 32.
+
+Split the PCIe node for MT2712/MT7622 platform to fix MSI issue and
+comply with the hardware design.
+
+Signed-off-by: chuanjia.liu <Chuanjia.Liu@mediatek.com>
+---
+ .../bindings/pci/mediatek-pcie-cfg.yaml | 38 +++++
+ .../devicetree/bindings/pci/mediatek-pcie.txt | 144 +++++++++++-------
+ 2 files changed, 129 insertions(+), 53 deletions(-)
+ create mode 100644 Documentation/devicetree/bindings/pci/mediatek-pcie-cfg.yaml
+
+--- /dev/null
++++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-cfg.yaml
+@@ -0,0 +1,38 @@
++# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
++%YAML 1.2
++---
++$id: http://devicetree.org/schemas/pci/mediatek-pcie-cfg.yaml#
++$schema: http://devicetree.org/meta-schemas/core.yaml#
++
++title: Mediatek PCIECFG controller
++
++maintainers:
++ - Chuanjia Liu <chuanjia.liu@mediatek.com>
++ - Jianjun Wang <jianjun.wang@mediatek.com>
++
++description: |
++ The MediaTek PCIECFG controller controls some feature about
++ LTSSM, ASPM and so on.
++
++properties:
++ compatible:
++ items:
++ - enum:
++ - mediatek,mt7622-pciecfg
++ - mediatek,mt7629-pciecfg
++ - const: syscon
++
++ reg:
++ maxItems: 1
++
++required:
++ - compatible
++ - reg
++
++examples:
++ - |
++ pciecfg: pciecfg@1a140000 {
++ compatible = "mediatek,mt7622-pciecfg", "syscon";
++ reg = <0 0x1a140000 0 0x1000>;
++ };
++...
+--- a/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
++++ b/Documentation/devicetree/bindings/pci/mediatek-pcie.txt
+@@ -8,7 +8,7 @@ Required properties:
+ "mediatek,mt7623-pcie"
+ "mediatek,mt7629-pcie"
+ - device_type: Must be "pci"
+-- reg: Base addresses and lengths of the PCIe subsys and root ports.
++- reg: Base addresses and lengths of the root ports.
+ - reg-names: Names of the above areas to use during resource lookup.
+ - #address-cells: Address representation for root ports (must be 3)
+ - #size-cells: Size representation for root ports (must be 2)
+@@ -19,10 +19,10 @@ Required properties:
+ - sys_ckN :transaction layer and data link layer clock
+ Required entries for MT2701/MT7623:
+ - free_ck :for reference clock of PCIe subsys
+- Required entries for MT2712/MT7622:
++ Required entries for MT2712/MT7622/MT7629:
+ - ahb_ckN :AHB slave interface operating clock for CSR access and RC
+ initiated MMIO access
+- Required entries for MT7622:
++ Required entries for MT7622/MT7629:
+ - axi_ckN :application layer MMIO channel operating clock
+ - aux_ckN :pe2_mac_bridge and pe2_mac_core operating clock when
+ pcie_mac_ck/pcie_pipe_ck is turned off
+@@ -47,10 +47,13 @@ Required properties for MT7623/MT2701:
+ - reset-names: Must be "pcie-rst0", "pcie-rst1", "pcie-rstN".. based on the
+ number of root ports.
+
+-Required properties for MT2712/MT7622:
++Required properties for MT2712/MT7622/MT7629:
+ -interrupts: A list of interrupt outputs of the controller, must have one
+ entry for each PCIe port
+
++Required properties for MT7622/MT7629:
++- mediatek,pcie-subsys: Should be a phandle of the pciecfg node.
++
+ In addition, the device tree node must have sub-nodes describing each
+ PCIe port interface, having the following mandatory properties:
+
+@@ -143,56 +146,73 @@ Examples for MT7623:
+
+ Examples for MT2712:
+
+- pcie: pcie@11700000 {
++ pcie1: pcie@112ff000 {
+ compatible = "mediatek,mt2712-pcie";
+ device_type = "pci";
+- reg = <0 0x11700000 0 0x1000>,
+- <0 0x112ff000 0 0x1000>;
+- reg-names = "port0", "port1";
++ reg = <0 0x112ff000 0 0x1000>;
++ reg-names = "port1";
+ #address-cells = <3>;
+ #size-cells = <2>;
+- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&topckgen CLK_TOP_PE2_MAC_P0_SEL>,
+- <&topckgen CLK_TOP_PE2_MAC_P1_SEL>,
+- <&pericfg CLK_PERI_PCIE0>,
++ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "pcie_irq";
++ clocks = <&topckgen CLK_TOP_PE2_MAC_P1_SEL>,
+ <&pericfg CLK_PERI_PCIE1>;
+- clock-names = "sys_ck0", "sys_ck1", "ahb_ck0", "ahb_ck1";
+- phys = <&pcie0_phy PHY_TYPE_PCIE>, <&pcie1_phy PHY_TYPE_PCIE>;
+- phy-names = "pcie-phy0", "pcie-phy1";
++ clock-names = "sys_ck1", "ahb_ck1";
++ phys = <&u3port1 PHY_TYPE_PCIE>;
++ phy-names = "pcie-phy1";
+ bus-range = <0x00 0xff>;
+- ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>;
++ ranges = <0x82000000 0 0x11400000 0x0 0x11400000 0 0x300000>;
++ status = "disabled";
+
+- pcie0: pcie@0,0 {
+- reg = <0x0000 0 0 0 0>;
++ slot1: pcie@1,0 {
++ reg = <0x0800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+ interrupt-map-mask = <0 0 0 7>;
+- interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+- <0 0 0 2 &pcie_intc0 1>,
+- <0 0 0 3 &pcie_intc0 2>,
+- <0 0 0 4 &pcie_intc0 3>;
+- pcie_intc0: interrupt-controller {
++ interrupt-map = <0 0 0 1 &pcie_intc1 0>,
++ <0 0 0 2 &pcie_intc1 1>,
++ <0 0 0 3 &pcie_intc1 2>,
++ <0 0 0 4 &pcie_intc1 3>;
++ pcie_intc1: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
++ };
+
+- pcie1: pcie@1,0 {
+- reg = <0x0800 0 0 0 0>;
++ pcie0: pcie@11700000 {
++ compatible = "mediatek,mt2712-pcie";
++ device_type = "pci";
++ reg = <0 0x11700000 0 0x1000>;
++ reg-names = "port0";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "pcie_irq";
++ clocks = <&topckgen CLK_TOP_PE2_MAC_P0_SEL>,
++ <&pericfg CLK_PERI_PCIE0>;
++ clock-names = "sys_ck0", "ahb_ck0";
++ phys = <&u3port0 PHY_TYPE_PCIE>;
++ phy-names = "pcie-phy0";
++ bus-range = <0x00 0xff>;
++ ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>;
++ status = "disabled";
++
++ slot0: pcie@0,0 {
++ reg = <0x0000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+ interrupt-map-mask = <0 0 0 7>;
+- interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+- <0 0 0 2 &pcie_intc1 1>,
+- <0 0 0 3 &pcie_intc1 2>,
+- <0 0 0 4 &pcie_intc1 3>;
+- pcie_intc1: interrupt-controller {
++ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
++ <0 0 0 2 &pcie_intc0 1>,
++ <0 0 0 3 &pcie_intc0 2>,
++ <0 0 0 4 &pcie_intc0 3>;
++ pcie_intc0: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+@@ -202,39 +222,31 @@ Examples for MT2712:
+
+ Examples for MT7622:
+
+- pcie: pcie@1a140000 {
++ pcie0: pcie@1a143000 {
+ compatible = "mediatek,mt7622-pcie";
+ device_type = "pci";
+- reg = <0 0x1a140000 0 0x1000>,
+- <0 0x1a143000 0 0x1000>,
+- <0 0x1a145000 0 0x1000>;
+- reg-names = "subsys", "port0", "port1";
++ reg = <0 0x1a143000 0 0x1000>;
++ reg-names = "port0";
++ mediatek,pcie-cfg = <&pciecfg>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+- interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_LOW>,
+- <GIC_SPI 229 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "pcie_irq";
+ clocks = <&pciesys CLK_PCIE_P0_MAC_EN>,
+- <&pciesys CLK_PCIE_P1_MAC_EN>,
+ <&pciesys CLK_PCIE_P0_AHB_EN>,
+- <&pciesys CLK_PCIE_P1_AHB_EN>,
+ <&pciesys CLK_PCIE_P0_AUX_EN>,
+- <&pciesys CLK_PCIE_P1_AUX_EN>,
+ <&pciesys CLK_PCIE_P0_AXI_EN>,
+- <&pciesys CLK_PCIE_P1_AXI_EN>,
+ <&pciesys CLK_PCIE_P0_OBFF_EN>,
+- <&pciesys CLK_PCIE_P1_OBFF_EN>,
+- <&pciesys CLK_PCIE_P0_PIPE_EN>,
+- <&pciesys CLK_PCIE_P1_PIPE_EN>;
+- clock-names = "sys_ck0", "sys_ck1", "ahb_ck0", "ahb_ck1",
+- "aux_ck0", "aux_ck1", "axi_ck0", "axi_ck1",
+- "obff_ck0", "obff_ck1", "pipe_ck0", "pipe_ck1";
+- phys = <&pcie0_phy PHY_TYPE_PCIE>, <&pcie1_phy PHY_TYPE_PCIE>;
+- phy-names = "pcie-phy0", "pcie-phy1";
++ <&pciesys CLK_PCIE_P0_PIPE_EN>;
++ clock-names = "sys_ck0", "ahb_ck0", "aux_ck0",
++ "axi_ck0", "obff_ck0", "pipe_ck0";
++
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
+ bus-range = <0x00 0xff>;
+- ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>;
++ ranges = <0x82000000 0 0x20000000 0 0x20000000 0 0x8000000>;
++ status = "disabled";
+
+- pcie0: pcie@0,0 {
++ slot0: pcie@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+@@ -251,8 +263,34 @@ Examples for MT7622:
+ #interrupt-cells = <1>;
+ };
+ };
++ };
++
++ pcie1: pcie@1a145000 {
++ compatible = "mediatek,mt7622-pcie";
++ device_type = "pci";
++ reg = <0 0x1a145000 0 0x1000>;
++ reg-names = "port1";
++ mediatek,pcie-cfg = <&pciecfg>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "pcie_irq";
++ clocks = <&pciesys CLK_PCIE_P1_MAC_EN>,
++ /* designer has connect RC1 with p0_ahb clock */
++ <&pciesys CLK_PCIE_P0_AHB_EN>,
++ <&pciesys CLK_PCIE_P1_AUX_EN>,
++ <&pciesys CLK_PCIE_P1_AXI_EN>,
++ <&pciesys CLK_PCIE_P1_OBFF_EN>,
++ <&pciesys CLK_PCIE_P1_PIPE_EN>;
++ clock-names = "sys_ck1", "ahb_ck1", "aux_ck1",
++ "axi_ck1", "obff_ck1", "pipe_ck1";
++
++ power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
++ bus-range = <0x00 0xff>;
++ ranges = <0x82000000 0 0x28000000 0 0x28000000 0 0x8000000>;
++ status = "disabled";
+
+- pcie1: pcie@1,0 {
++ slot1: pcie@1,0 {
+ reg = <0x0800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0992-PCI-mediatek-Use-regmap-to-get-shared-pcie-cfg-base.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0992-PCI-mediatek-Use-regmap-to-get-shared-pcie-cfg-base.patch
new file mode 100644
index 0000000..3e4d44f
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0992-PCI-mediatek-Use-regmap-to-get-shared-pcie-cfg-base.patch
@@ -0,0 +1,217 @@
+From patchwork Thu May 28 06:16:46 2020
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Chuanjia Liu <chuanjia.liu@mediatek.com>
+X-Patchwork-Id: 11574781
+Return-Path:
+ <SRS0=ftSA=7K=lists.infradead.org=linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@kernel.org>
+Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org
+ [172.30.200.123])
+ by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 0A99B60D
+ for <patchwork-linux-mediatek@patchwork.kernel.org>;
+ Thu, 28 May 2020 06:19:04 +0000 (UTC)
+Received: from bombadil.infradead.org (bombadil.infradead.org
+ [198.137.202.133])
+ (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
+ (No client certificate requested)
+ by mail.kernel.org (Postfix) with ESMTPS id DCC99208FE
+ for <patchwork-linux-mediatek@patchwork.kernel.org>;
+ Thu, 28 May 2020 06:19:03 +0000 (UTC)
+Authentication-Results: mail.kernel.org;
+ dkim=pass (2048-bit key) header.d=lists.infradead.org
+ header.i=@lists.infradead.org header.b="SpOi0ueF";
+ dkim=fail reason="signature verification failed" (1024-bit key)
+ header.d=mediatek.com header.i=@mediatek.com header.b="UGIBoIEG"
+DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org DCC99208FE
+Authentication-Results: mail.kernel.org;
+ dmarc=fail (p=none dis=none) header.from=mediatek.com
+Authentication-Results: mail.kernel.org;
+ spf=none
+ smtp.mailfrom=linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@lists.infradead.org
+DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
+ d=lists.infradead.org; s=bombadil.20170209; h=Sender:
+ Content-Transfer-Encoding:Content-Type:Cc:List-Subscribe:List-Help:List-Post:
+ List-Archive:List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:
+ Message-ID:Date:Subject:To:From:Reply-To:Content-ID:Content-Description:
+ Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:
+ List-Owner; bh=LIr5poLUT/UdH6/akh/pnICGGa3rUBkN+4FhE1DyOrU=; b=SpOi0ueFcoJ/ka
+ 4esa6cDd5oU4fp0z684ZVPaVvvhm/azSZBBMYinHaAW6EvzKcMNYIX9grP8eg/728lEPNTKVq0I8H
+ PQZ9KvD4uTu8Opo1hD8LsRSLr+YLpNKt3KPOY/4gpwQ97uU9rI5PwkuAxPBgR949Vh5EiG0Vaww1H
+ Ep+I5BFRn2LVVQZP1Z7U0A0VUcOTLJ4znoWRLEXxtM9/Wd4hwQsrEPQszeDFti/RbwGfJ5efOb5UL
+ fhwBzSxELEzAAgH7env/XD2sSSpVf2Qsn6WO8D3ZepMtWrRtARiaRKSNxSBQTg2SSHcjmBSJSzcX+
+ w8wqWaUMs0crlBuZWS1g==;
+Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org)
+ by bombadil.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1jeBsc-0001tI-88; Thu, 28 May 2020 06:19:02 +0000
+Received: from mailgw01.mediatek.com ([216.200.240.184])
+ by bombadil.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1jeBsZ-0001rp-6g; Thu, 28 May 2020 06:19:01 +0000
+X-UUID: beeaf5765357439c91eab1f67ca7ef43-20200527
+DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
+ d=mediatek.com;
+ s=dk;
+ h=Content-Transfer-Encoding:Content-Type:MIME-Version:References:In-Reply-To:Message-ID:Date:Subject:CC:To:From;
+ bh=+IjWjsF/DhknqZB+lLSZ50cyvxDap+8w4tvqhp8Dv68=;
+ b=UGIBoIEGJUuq5pEvYEad1HVGpiv6yma+94hva83D2gD8lYmihRWkpJxB2yn+dVtNm7ZXXoQBf+jvvULOmslJgs1HZTLJTnjpdvLmQqo42OXRXSVpTE49HdRkJZDAIWIAReBfOEkFgNxcIX3uedrtnww/NLJ2lagrYPG5ET4lI2E=;
+X-UUID: beeaf5765357439c91eab1f67ca7ef43-20200527
+Received: from mtkcas68.mediatek.inc [(172.29.94.19)] by mailgw01.mediatek.com
+ (envelope-from <chuanjia.liu@mediatek.com>)
+ (musrelay.mediatek.com ESMTP with TLS)
+ with ESMTP id 603406343; Wed, 27 May 2020 22:19:17 -0800
+Received: from mtkmbs07n1.mediatek.inc (172.21.101.16) by
+ MTKMBS62DR.mediatek.inc (172.29.94.18) with Microsoft SMTP Server (TLS) id
+ 15.0.1497.2; Wed, 27 May 2020 23:18:47 -0700
+Received: from mtkcas07.mediatek.inc (172.21.101.84) by
+ mtkmbs07n1.mediatek.inc (172.21.101.16) with Microsoft SMTP Server (TLS) id
+ 15.0.1497.2; Thu, 28 May 2020 14:18:51 +0800
+Received: from localhost.localdomain (10.17.3.153) by mtkcas07.mediatek.inc
+ (172.21.101.73) with Microsoft SMTP Server id 15.0.1497.2 via Frontend
+ Transport; Thu, 28 May 2020 14:18:49 +0800
+From: <chuanjia.liu@mediatek.com>
+To: <robh+dt@kernel.org>, <ryder.lee@mediatek.com>, <matthias.bgg@gmail.com>
+Subject: [PATCH v2 2/4] PCI: mediatek: Use regmap to get shared pcie-cfg base
+Date: Thu, 28 May 2020 14:16:46 +0800
+Message-ID: <20200528061648.32078-3-chuanjia.liu@mediatek.com>
+X-Mailer: git-send-email 2.18.0
+In-Reply-To: <20200528061648.32078-1-chuanjia.liu@mediatek.com>
+References: <20200528061648.32078-1-chuanjia.liu@mediatek.com>
+MIME-Version: 1.0
+X-MTK: N
+X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3
+X-CRM114-CacheID: sfid-20200527_231859_251275_BED2B1E2
+X-CRM114-Status: GOOD ( 11.62 )
+X-Spam-Score: -0.2 (/)
+X-Spam-Report: SpamAssassin version 3.4.4 on bombadil.infradead.org summary:
+ Content analysis details: (-0.2 points)
+ pts rule name description
+ ---- ----------------------
+ --------------------------------------------------
+ -0.0 SPF_PASS SPF: sender matches SPF record
+ 0.0 SPF_HELO_NONE SPF: HELO does not publish an SPF Record
+ 0.0 MIME_BASE64_TEXT RAW: Message text disguised using base64
+ encoding
+ -0.1 DKIM_VALID_AU Message has a valid DKIM or DK signature from
+ author's domain
+ 0.1 DKIM_SIGNED Message has a DKIM or DK signature,
+ not necessarily
+ valid
+ -0.1 DKIM_VALID Message has at least one valid DKIM or DK signature
+ -0.1 DKIM_VALID_EF Message has a valid DKIM or DK signature from
+ envelope-from domain
+ 0.0 UNPARSEABLE_RELAY Informational: message has unparseable relay
+ lines
+X-BeenThere: linux-mediatek@lists.infradead.org
+X-Mailman-Version: 2.1.29
+Precedence: list
+List-Id: <linux-mediatek.lists.infradead.org>
+List-Unsubscribe: <http://lists.infradead.org/mailman/options/linux-mediatek>,
+ <mailto:linux-mediatek-request@lists.infradead.org?subject=unsubscribe>
+List-Archive: <http://lists.infradead.org/pipermail/linux-mediatek/>
+List-Post: <mailto:linux-mediatek@lists.infradead.org>
+List-Help: <mailto:linux-mediatek-request@lists.infradead.org?subject=help>
+List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mediatek>,
+ <mailto:linux-mediatek-request@lists.infradead.org?subject=subscribe>
+Cc: devicetree@vger.kernel.org, lorenzo.pieralisi@arm.com,
+ srv_heupstream@mediatek.com, "chuanjia.liu" <Chuanjia.Liu@mediatek.com>,
+ linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org,
+ jianjun.wang@mediatek.com, linux-mediatek@lists.infradead.org,
+ yong.wu@mediatek.com, bhelgaas@google.com,
+ linux-arm-kernel@lists.infradead.org, amurray@thegoodpenguin.co.uk
+Sender: "Linux-mediatek" <linux-mediatek-bounces@lists.infradead.org>
+Errors-To:
+ linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@lists.infradead.org
+
+From: "chuanjia.liu" <Chuanjia.Liu@mediatek.com>
+
+Use regmap to get shared pcie-cfg base and change
+the method to get pcie irq.
+
+Signed-off-by: chuanjia.liu <Chuanjia.Liu@mediatek.com>
+---
+ drivers/pci/controller/pcie-mediatek.c | 25 ++++++++++++++++++-------
+ 1 file changed, 18 insertions(+), 7 deletions(-)
+
+--- a/drivers/pci/controller/pcie-mediatek.c
++++ b/drivers/pci/controller/pcie-mediatek.c
+@@ -14,6 +14,7 @@
+ #include <linux/irqchip/chained_irq.h>
+ #include <linux/irqdomain.h>
+ #include <linux/kernel.h>
++#include <linux/mfd/syscon.h>
+ #include <linux/msi.h>
+ #include <linux/module.h>
+ #include <linux/of_address.h>
+@@ -23,6 +24,7 @@
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
++#include <linux/regmap.h>
+ #include <linux/reset.h>
+
+ #include "../pci.h"
+@@ -205,6 +207,7 @@ struct mtk_pcie_port {
+ * struct mtk_pcie - PCIe host information
+ * @dev: pointer to PCIe device
+ * @base: IO mapped register base
++ * @cfg: IO mapped register map for PCIe config
+ * @free_ck: free-run reference clock
+ * @mem: non-prefetchable memory resource
+ * @ports: pointer to PCIe port information
+@@ -214,6 +217,7 @@ struct mtk_pcie_port {
+ struct mtk_pcie {
+ struct device *dev;
+ void __iomem *base;
++ struct regmap *cfg;
+ struct clk *free_ck;
+
+ struct resource mem;
+@@ -651,7 +655,7 @@ static int mtk_pcie_setup_irq(struct mtk
+ return err;
+ }
+
+- port->irq = platform_get_irq(pdev, port->slot);
++ port->irq = platform_get_irq_byname(pdev, "pcie_irq");
+ irq_set_chained_handler_and_data(port->irq,
+ mtk_pcie_intr_handler, port);
+
+@@ -666,12 +670,11 @@ static int mtk_pcie_startup_port_v2(stru
+ u32 val;
+ int err;
+
+- /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
+- if (pcie->base) {
+- val = readl(pcie->base + PCIE_SYS_CFG_V2);
+- val |= PCIE_CSR_LTSSM_EN(port->slot) |
+- PCIE_CSR_ASPM_L1_EN(port->slot);
+- writel(val, pcie->base + PCIE_SYS_CFG_V2);
++ /* MT7622/MT7629 platforms need to enable LTSSM and ASPM. */
++ if (pcie->cfg) {
++ val = PCIE_CSR_LTSSM_EN(port->slot) |
++ PCIE_CSR_ASPM_L1_EN(port->slot);
++ regmap_update_bits(pcie->cfg, PCIE_SYS_CFG_V2, val, val);
+ }
+
+ /* Assert all reset signals */
+@@ -977,6 +980,7 @@ static int mtk_pcie_subsys_powerup(struc
+ struct device *dev = pcie->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *regs;
++ struct device_node *cfg_node;
+ int err;
+
+ /* get shared registers, which are optional */
+@@ -989,6 +993,13 @@ static int mtk_pcie_subsys_powerup(struc
+ }
+ }
+
++ cfg_node = of_parse_phandle(dev->of_node, "mediatek,pcie-cfg", 0);
++ if (cfg_node) {
++ pcie->cfg = syscon_node_to_regmap(cfg_node);
++ if (IS_ERR(pcie->cfg))
++ return PTR_ERR(pcie->cfg);
++ }
++
+ pcie->free_ck = devm_clk_get(dev, "free_ck");
+ if (IS_ERR(pcie->free_ck)) {
+ if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER)
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0993-arm64-dts-mediatek-Split-PCIe-node-for-MT2712-MT7622.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0993-arm64-dts-mediatek-Split-PCIe-node-for-MT2712-MT7622.patch
new file mode 100644
index 0000000..3c5558b
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0993-arm64-dts-mediatek-Split-PCIe-node-for-MT2712-MT7622.patch
@@ -0,0 +1,417 @@
+From patchwork Thu May 28 06:16:47 2020
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Chuanjia Liu <chuanjia.liu@mediatek.com>
+X-Patchwork-Id: 11574785
+Return-Path:
+ <SRS0=ftSA=7K=lists.infradead.org=linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@kernel.org>
+Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org
+ [172.30.200.123])
+ by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 933301391
+ for <patchwork-linux-mediatek@patchwork.kernel.org>;
+ Thu, 28 May 2020 06:19:16 +0000 (UTC)
+Received: from bombadil.infradead.org (bombadil.infradead.org
+ [198.137.202.133])
+ (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
+ (No client certificate requested)
+ by mail.kernel.org (Postfix) with ESMTPS id D19F02078C
+ for <patchwork-linux-mediatek@patchwork.kernel.org>;
+ Thu, 28 May 2020 06:19:15 +0000 (UTC)
+Authentication-Results: mail.kernel.org;
+ dkim=pass (2048-bit key) header.d=lists.infradead.org
+ header.i=@lists.infradead.org header.b="s8K7t7DF";
+ dkim=fail reason="signature verification failed" (1024-bit key)
+ header.d=mediatek.com header.i=@mediatek.com header.b="RhX81Iqp"
+DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org D19F02078C
+Authentication-Results: mail.kernel.org;
+ dmarc=fail (p=none dis=none) header.from=mediatek.com
+Authentication-Results: mail.kernel.org;
+ spf=none
+ smtp.mailfrom=linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@lists.infradead.org
+DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
+ d=lists.infradead.org; s=bombadil.20170209; h=Sender:
+ Content-Transfer-Encoding:Content-Type:Cc:List-Subscribe:List-Help:List-Post:
+ List-Archive:List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:
+ Message-ID:Date:Subject:To:From:Reply-To:Content-ID:Content-Description:
+ Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:
+ List-Owner; bh=NHyHqNMcE7LW10MwduOJoKiWe8cv+XayY+L6WDZeSu0=; b=s8K7t7DFh1iQ5w
+ eGvuMRgXEQv/YWRuSZRyX8lx8R2H9IuawEIgkhO6lEo6xv0VdsRuj8SptfoWg5afCItMhih373M21
+ 6sUy3tEiuKGgklfxLU0reLEkaATkKRGLJDY3eSSs1mvZDrydKuZLDTka+YDGaiESlOhqMr95Nm6YM
+ yK8O00qTwSRPJUILRsBv1e/Kz8NRCmYhs56snABJkKeJ51NRAkb20R6qGTEd6UyBlz3jTVYwluLgF
+ bdqzywDT6+BNg/Agh6Zd+v2PpO4cmwCpGm62+3UUyZkfi/aQ4qZ/AFAfSQI+3ZBAgsKMC1PGifOi/
+ FgGxIvAUk6atBy7DAHuw==;
+Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org)
+ by bombadil.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1jeBsn-00025C-EF; Thu, 28 May 2020 06:19:13 +0000
+Received: from mailgw01.mediatek.com ([216.200.240.184])
+ by bombadil.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1jeBsZ-0001s4-6j; Thu, 28 May 2020 06:19:01 +0000
+X-UUID: c6210e6371fa445db0ae40a8b8a7a0a1-20200527
+DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
+ d=mediatek.com;
+ s=dk;
+ h=Content-Transfer-Encoding:Content-Type:MIME-Version:References:In-Reply-To:Message-ID:Date:Subject:CC:To:From;
+ bh=X9AwTdbhpWmlWY4LjTm8KLq4Cca3YI9UnyCX3O0BAak=;
+ b=RhX81Iqp0mWhBDyMQMFSEtt23+DGAWoin1SrFGP1bzp6GEtu38b2pK5RJVBshJtuxi/a1uMXZjeDsHJn02VGdNA07FrzZ7jq6YYEL+8cJs2DnhySmNElZazXPv2vKu9TWygfilTT24h/u8V/eszuRuhkdoUKWol8LwDlPl9gskg=;
+X-UUID: c6210e6371fa445db0ae40a8b8a7a0a1-20200527
+Received: from mtkcas68.mediatek.inc [(172.29.94.19)] by mailgw01.mediatek.com
+ (envelope-from <chuanjia.liu@mediatek.com>)
+ (musrelay.mediatek.com ESMTP with TLS)
+ with ESMTP id 7561992; Wed, 27 May 2020 22:19:17 -0800
+Received: from mtkmbs07n1.mediatek.inc (172.21.101.16) by
+ MTKMBS62DR.mediatek.inc (172.29.94.18) with Microsoft SMTP Server (TLS) id
+ 15.0.1497.2; Wed, 27 May 2020 23:18:47 -0700
+Received: from mtkcas07.mediatek.inc (172.21.101.84) by
+ mtkmbs07n1.mediatek.inc (172.21.101.16) with Microsoft SMTP Server (TLS) id
+ 15.0.1497.2; Thu, 28 May 2020 14:18:52 +0800
+Received: from localhost.localdomain (10.17.3.153) by mtkcas07.mediatek.inc
+ (172.21.101.73) with Microsoft SMTP Server id 15.0.1497.2 via Frontend
+ Transport; Thu, 28 May 2020 14:18:51 +0800
+From: <chuanjia.liu@mediatek.com>
+To: <robh+dt@kernel.org>, <ryder.lee@mediatek.com>, <matthias.bgg@gmail.com>
+Subject: [PATCH v2 3/4] arm64: dts: mediatek: Split PCIe node for
+ MT2712/MT7622
+Date: Thu, 28 May 2020 14:16:47 +0800
+Message-ID: <20200528061648.32078-4-chuanjia.liu@mediatek.com>
+X-Mailer: git-send-email 2.18.0
+In-Reply-To: <20200528061648.32078-1-chuanjia.liu@mediatek.com>
+References: <20200528061648.32078-1-chuanjia.liu@mediatek.com>
+MIME-Version: 1.0
+X-MTK: N
+X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3
+X-CRM114-CacheID: sfid-20200527_231859_253529_B6751C5A
+X-CRM114-Status: GOOD ( 12.20 )
+X-Spam-Score: -0.2 (/)
+X-Spam-Report: SpamAssassin version 3.4.4 on bombadil.infradead.org summary:
+ Content analysis details: (-0.2 points)
+ pts rule name description
+ ---- ----------------------
+ --------------------------------------------------
+ -0.0 SPF_PASS SPF: sender matches SPF record
+ 0.0 SPF_HELO_NONE SPF: HELO does not publish an SPF Record
+ 0.0 MIME_BASE64_TEXT RAW: Message text disguised using base64
+ encoding
+ -0.1 DKIM_VALID_AU Message has a valid DKIM or DK signature from
+ author's domain
+ 0.1 DKIM_SIGNED Message has a DKIM or DK signature,
+ not necessarily
+ valid
+ -0.1 DKIM_VALID Message has at least one valid DKIM or DK signature
+ -0.1 DKIM_VALID_EF Message has a valid DKIM or DK signature from
+ envelope-from domain
+ 0.0 UNPARSEABLE_RELAY Informational: message has unparseable relay
+ lines
+X-BeenThere: linux-mediatek@lists.infradead.org
+X-Mailman-Version: 2.1.29
+Precedence: list
+List-Id: <linux-mediatek.lists.infradead.org>
+List-Unsubscribe: <http://lists.infradead.org/mailman/options/linux-mediatek>,
+ <mailto:linux-mediatek-request@lists.infradead.org?subject=unsubscribe>
+List-Archive: <http://lists.infradead.org/pipermail/linux-mediatek/>
+List-Post: <mailto:linux-mediatek@lists.infradead.org>
+List-Help: <mailto:linux-mediatek-request@lists.infradead.org?subject=help>
+List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mediatek>,
+ <mailto:linux-mediatek-request@lists.infradead.org?subject=subscribe>
+Cc: devicetree@vger.kernel.org, lorenzo.pieralisi@arm.com,
+ srv_heupstream@mediatek.com, "chuanjia.liu" <Chuanjia.Liu@mediatek.com>,
+ linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org,
+ jianjun.wang@mediatek.com, linux-mediatek@lists.infradead.org,
+ yong.wu@mediatek.com, bhelgaas@google.com,
+ linux-arm-kernel@lists.infradead.org, amurray@thegoodpenguin.co.uk
+Sender: "Linux-mediatek" <linux-mediatek-bounces@lists.infradead.org>
+Errors-To:
+ linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@lists.infradead.org
+
+From: "chuanjia.liu" <Chuanjia.Liu@mediatek.com>
+
+There are two independent PCIe controllers in MT2712/MT7622 platform,
+and each of them should contain an independent MSI domain.
+
+In current architecture, MSI domain will be inherited from the root
+bridge, and all of the devices will share the same MSI domain.
+Hence that, the PCIe devices will not work properly if the irq number
+which required is more than 32.
+
+Split the PCIe node for MT2712/MT7622 platform to fix MSI issue and
+comply with the hardware design.
+
+Signed-off-by: chuanjia.liu <Chuanjia.Liu@mediatek.com>
+---
+ arch/arm64/boot/dts/mediatek/mt2712e.dtsi | 75 +++++++++++--------
+ .../dts/mediatek/mt7622-bananapi-bpi-r64.dts | 16 ++--
+ arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts | 6 +-
+ arch/arm64/boot/dts/mediatek/mt7622.dtsi | 68 +++++++++++------
+ 4 files changed, 96 insertions(+), 69 deletions(-)
+
+--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+@@ -791,60 +791,73 @@
+ };
+ };
+
+- pcie: pcie@11700000 {
++ pcie1: pcie@112ff000 {
+ compatible = "mediatek,mt2712-pcie";
+ device_type = "pci";
+- reg = <0 0x11700000 0 0x1000>,
+- <0 0x112ff000 0 0x1000>;
+- reg-names = "port0", "port1";
++ reg = <0 0x112ff000 0 0x1000>;
++ reg-names = "port1";
+ #address-cells = <3>;
+ #size-cells = <2>;
+- interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+- <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&topckgen CLK_TOP_PE2_MAC_P0_SEL>,
+- <&topckgen CLK_TOP_PE2_MAC_P1_SEL>,
+- <&pericfg CLK_PERI_PCIE0>,
++ interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "pcie_irq";
++ clocks = <&topckgen CLK_TOP_PE2_MAC_P1_SEL>,
+ <&pericfg CLK_PERI_PCIE1>;
+- clock-names = "sys_ck0", "sys_ck1", "ahb_ck0", "ahb_ck1";
+- phys = <&u3port0 PHY_TYPE_PCIE>, <&u3port1 PHY_TYPE_PCIE>;
+- phy-names = "pcie-phy0", "pcie-phy1";
++ clock-names = "sys_ck1", "ahb_ck1";
++ phys = <&u3port1 PHY_TYPE_PCIE>;
++ phy-names = "pcie-phy1";
+ bus-range = <0x00 0xff>;
+- ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>;
++ ranges = <0x82000000 0 0x11400000 0x0 0x11400000 0 0x300000>;
++ status = "disabled";
+
+- pcie0: pcie@0,0 {
+- device_type = "pci";
+- status = "disabled";
+- reg = <0x0000 0 0 0 0>;
++ slot1: pcie@1,0 {
++ reg = <0x0800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+ interrupt-map-mask = <0 0 0 7>;
+- interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+- <0 0 0 2 &pcie_intc0 1>,
+- <0 0 0 3 &pcie_intc0 2>,
+- <0 0 0 4 &pcie_intc0 3>;
+- pcie_intc0: interrupt-controller {
++ interrupt-map = <0 0 0 1 &pcie_intc1 0>,
++ <0 0 0 2 &pcie_intc1 1>,
++ <0 0 0 3 &pcie_intc1 2>,
++ <0 0 0 4 &pcie_intc1 3>;
++ pcie_intc1: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ };
+ };
++ };
+
+- pcie1: pcie@1,0 {
+- device_type = "pci";
+- status = "disabled";
+- reg = <0x0800 0 0 0 0>;
++ pcie0: pcie@11700000 {
++ compatible = "mediatek,mt2712-pcie";
++ device_type = "pci";
++ reg = <0 0x11700000 0 0x1000>;
++ reg-names = "port0";
++ #address-cells = <3>;
++ #size-cells = <2>;
++ interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "pcie_irq";
++ clocks = <&topckgen CLK_TOP_PE2_MAC_P0_SEL>,
++ <&pericfg CLK_PERI_PCIE0>;
++ clock-names = "sys_ck0", "ahb_ck0";
++ phys = <&u3port0 PHY_TYPE_PCIE>;
++ phy-names = "pcie-phy0";
++ bus-range = <0x00 0xff>;
++ ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>;
++ status = "disabled";
++
++ slot0: pcie@0,0 {
++ reg = <0x0000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+ interrupt-map-mask = <0 0 0 7>;
+- interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+- <0 0 0 2 &pcie_intc1 1>,
+- <0 0 0 3 &pcie_intc1 2>,
+- <0 0 0 4 &pcie_intc1 3>;
+- pcie_intc1: interrupt-controller {
++ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
++ <0 0 0 2 &pcie_intc0 1>,
++ <0 0 0 3 &pcie_intc0 2>,
++ <0 0 0 4 &pcie_intc0 3>;
++ pcie_intc0: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+@@ -294,18 +294,16 @@
+ };
+ };
+
+-&pcie {
++&pcie0 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pcie0_pins>, <&pcie1_pins>;
++ pinctrl-0 = <&pcie0_pins>;
+ status = "okay";
++};
+
+- pcie@0,0 {
+- status = "okay";
+- };
+-
+- pcie@1,0 {
+- status = "okay";
+- };
++&pcie1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pcie1_pins>;
++ status = "okay";
+ };
+
+ &pio {
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -794,45 +794,41 @@
+ #reset-cells = <1>;
+ };
+
+- pcie: pcie@1a140000 {
++ pciecfg: pciecfg@1a140000 {
++ compatible = "mediatek,mt7622-pciecfg", "syscon";
++ reg = <0 0x1a140000 0 0x1000>;
++ };
++
++ pcie0: pcie@1a143000 {
+ compatible = "mediatek,mt7622-pcie";
+ device_type = "pci";
+- reg = <0 0x1a140000 0 0x1000>,
+- <0 0x1a143000 0 0x1000>,
+- <0 0x1a145000 0 0x1000>;
+- reg-names = "subsys", "port0", "port1";
++ reg = <0 0x1a143000 0 0x1000>;
++ reg-names = "port0";
++ mediatek,pcie-cfg = <&pciecfg>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+- interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_LOW>,
+- <GIC_SPI 229 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <GIC_SPI 228 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "pcie_irq";
+ clocks = <&pciesys CLK_PCIE_P0_MAC_EN>,
+- <&pciesys CLK_PCIE_P1_MAC_EN>,
+- <&pciesys CLK_PCIE_P0_AHB_EN>,
+ <&pciesys CLK_PCIE_P0_AHB_EN>,
+ <&pciesys CLK_PCIE_P0_AUX_EN>,
+- <&pciesys CLK_PCIE_P1_AUX_EN>,
+ <&pciesys CLK_PCIE_P0_AXI_EN>,
+- <&pciesys CLK_PCIE_P1_AXI_EN>,
+ <&pciesys CLK_PCIE_P0_OBFF_EN>,
+- <&pciesys CLK_PCIE_P1_OBFF_EN>,
+- <&pciesys CLK_PCIE_P0_PIPE_EN>,
+- <&pciesys CLK_PCIE_P1_PIPE_EN>;
+- clock-names = "sys_ck0", "sys_ck1", "ahb_ck0", "ahb_ck1",
+- "aux_ck0", "aux_ck1", "axi_ck0", "axi_ck1",
+- "obff_ck0", "obff_ck1", "pipe_ck0", "pipe_ck1";
++ <&pciesys CLK_PCIE_P0_PIPE_EN>;
++ clock-names = "sys_ck0", "ahb_ck0", "aux_ck0",
++ "axi_ck0", "obff_ck0", "pipe_ck0";
++
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
+ bus-range = <0x00 0xff>;
+- ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x10000000>;
++ ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x8000000>;
+ status = "disabled";
+
+- pcie0: pcie@0,0 {
++ slot0: pcie@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+- status = "disabled";
+-
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
+ <0 0 0 2 &pcie_intc0 1>,
+@@ -844,15 +840,39 @@
+ #interrupt-cells = <1>;
+ };
+ };
++ };
+
+- pcie1: pcie@1,0 {
++ pcie1: pcie@1a145000 {
++ compatible = "mediatek,mt7622-pcie";
++ device_type = "pci";
++ reg = <0 0x1a145000 0 0x1000>;
++ reg-names = "port1";
++ mediatek,pcie-cfg = <&pciecfg>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "pcie_irq";
++ clocks = <&pciesys CLK_PCIE_P1_MAC_EN>,
++ /* designer has connect RC1 with p0_ahb clock */
++ <&pciesys CLK_PCIE_P0_AHB_EN>,
++ <&pciesys CLK_PCIE_P1_AUX_EN>,
++ <&pciesys CLK_PCIE_P1_AXI_EN>,
++ <&pciesys CLK_PCIE_P1_OBFF_EN>,
++ <&pciesys CLK_PCIE_P1_PIPE_EN>;
++ clock-names = "sys_ck1", "ahb_ck1", "aux_ck1",
++ "axi_ck1", "obff_ck1", "pipe_ck1";
++
++ power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
++ bus-range = <0x00 0xff>;
++ ranges = <0x82000000 0 0x28000000 0x0 0x28000000 0 0x8000000>;
++ status = "disabled";
++
++ slot1: pcie@1,0 {
+ reg = <0x0800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+- status = "disabled";
+-
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+ <0 0 0 2 &pcie_intc1 1>,
+--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+@@ -254,18 +254,16 @@
+ };
+ };
+
+-&pcie {
++&pcie0 {
+ pinctrl-names = "default";
+- pinctrl-0 = <&pcie0_pins>, <&pcie1_pins>;
++ pinctrl-0 = <&pcie0_pins>;
+ status = "okay";
++};
+
+- pcie@0,0 {
+- status = "okay";
+- };
+-
+- pcie@1,0 {
+- status = "okay";
+- };
++&pcie1 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&pcie1_pins>;
++ status = "okay";
+ };
+
+ &pio {
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0994-ARM-dts-mediatek-Update-mt7629-PCIe-node.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0994-ARM-dts-mediatek-Update-mt7629-PCIe-node.patch
new file mode 100644
index 0000000..b20e1fc
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/0994-ARM-dts-mediatek-Update-mt7629-PCIe-node.patch
@@ -0,0 +1,203 @@
+From patchwork Thu May 28 06:16:48 2020
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+X-Patchwork-Submitter: Chuanjia Liu <chuanjia.liu@mediatek.com>
+X-Patchwork-Id: 11574797
+Return-Path:
+ <SRS0=ftSA=7K=lists.infradead.org=linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@kernel.org>
+Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org
+ [172.30.200.123])
+ by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 30A5E1392
+ for <patchwork-linux-mediatek@patchwork.kernel.org>;
+ Thu, 28 May 2020 06:29:05 +0000 (UTC)
+Received: from bombadil.infradead.org (bombadil.infradead.org
+ [198.137.202.133])
+ (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits))
+ (No client certificate requested)
+ by mail.kernel.org (Postfix) with ESMTPS id 08B6320721
+ for <patchwork-linux-mediatek@patchwork.kernel.org>;
+ Thu, 28 May 2020 06:29:05 +0000 (UTC)
+Authentication-Results: mail.kernel.org;
+ dkim=pass (2048-bit key) header.d=lists.infradead.org
+ header.i=@lists.infradead.org header.b="auhxDafY";
+ dkim=fail reason="signature verification failed" (1024-bit key)
+ header.d=mediatek.com header.i=@mediatek.com header.b="Kj09Arxb"
+DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 08B6320721
+Authentication-Results: mail.kernel.org;
+ dmarc=fail (p=none dis=none) header.from=mediatek.com
+Authentication-Results: mail.kernel.org;
+ spf=none
+ smtp.mailfrom=linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@lists.infradead.org
+DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
+ d=lists.infradead.org; s=bombadil.20170209; h=Sender:
+ Content-Transfer-Encoding:Content-Type:Cc:List-Subscribe:List-Help:List-Post:
+ List-Archive:List-Unsubscribe:List-Id:MIME-Version:References:In-Reply-To:
+ Message-ID:Date:Subject:To:From:Reply-To:Content-ID:Content-Description:
+ Resent-Date:Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:
+ List-Owner; bh=+QPxF1vlOH7StIZYuXJa3V40x8QVDxCLF9AFXHblB9M=; b=auhxDafYBeaUZO
+ aYp2KVO8Aie0v4tYtRwBon7hF+x55JwD78SAxQR2RsSvrlOo9cMYYby+ToUWflVUWQ60FapAl+w+l
+ nkEjIOrLBErHwxNOcsD8T5kjyCBMqlz4OMAQYUDNJ3fSugRlGhOtxkjCGd9ebB8N2Rvu6/U8P1A9n
+ P15mEQoc+RLonR1+9mBgwTEXErjsraxkimTD4Txsp4IvMs3UdsMkP+r3OT5S/p+Uj6O9ES0h7xIon
+ aL79KaVqRLHrfZxnrVwuGiecAiTp8qLy9clHuJU32NA6ZcXH1OnWipKApgp8Ck7ys80WPKaMrat9B
+ XuskJ63w13DZAbCVvuGQ==;
+Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org)
+ by bombadil.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1jeC2J-00014n-M9; Thu, 28 May 2020 06:29:03 +0000
+Received: from mailgw02.mediatek.com ([216.200.240.185])
+ by bombadil.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux))
+ id 1jeC2H-00013t-Li; Thu, 28 May 2020 06:29:03 +0000
+X-UUID: a4877c1586e64afeb2d6172e10605d2b-20200527
+DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed;
+ d=mediatek.com;
+ s=dk;
+ h=Content-Transfer-Encoding:Content-Type:MIME-Version:References:In-Reply-To:Message-ID:Date:Subject:CC:To:From;
+ bh=CIwcBFK1x0LbOjDt1BG6/knHFxDHRiqj8ov/jWEZDBY=;
+ b=Kj09ArxbnLVTc9bpaVPT3jQrIVjhL87sSYyVF9dFypS976k78Ce9gZd0f4K3zAZbYZHYoQtuyOQ9TOeufQfgD+Cr+j5VR7pTdO2E1iXHFs/eQAz5gAjvjlK01z1JiunrLnn9dvIr6c1gEkjQHny0VpuZ1duxx79jwYusg/Nw6Wc=;
+X-UUID: a4877c1586e64afeb2d6172e10605d2b-20200527
+Received: from mtkcas66.mediatek.inc [(172.29.193.44)] by
+ mailgw02.mediatek.com
+ (envelope-from <chuanjia.liu@mediatek.com>)
+ (musrelay.mediatek.com ESMTP with TLS)
+ with ESMTP id 899663677; Wed, 27 May 2020 22:29:21 -0800
+Received: from MTKMBS07N2.mediatek.inc (172.21.101.141) by
+ MTKMBS62DR.mediatek.inc (172.29.94.18) with Microsoft SMTP Server (TLS) id
+ 15.0.1497.2; Wed, 27 May 2020 23:18:50 -0700
+Received: from mtkcas07.mediatek.inc (172.21.101.84) by
+ mtkmbs07n2.mediatek.inc (172.21.101.141) with Microsoft SMTP Server (TLS) id
+ 15.0.1497.2; Thu, 28 May 2020 14:18:54 +0800
+Received: from localhost.localdomain (10.17.3.153) by mtkcas07.mediatek.inc
+ (172.21.101.73) with Microsoft SMTP Server id 15.0.1497.2 via Frontend
+ Transport; Thu, 28 May 2020 14:18:52 +0800
+From: <chuanjia.liu@mediatek.com>
+To: <robh+dt@kernel.org>, <ryder.lee@mediatek.com>, <matthias.bgg@gmail.com>
+Subject: [PATCH v2 4/4] ARM: dts: mediatek: Update mt7629 PCIe node
+Date: Thu, 28 May 2020 14:16:48 +0800
+Message-ID: <20200528061648.32078-5-chuanjia.liu@mediatek.com>
+X-Mailer: git-send-email 2.18.0
+In-Reply-To: <20200528061648.32078-1-chuanjia.liu@mediatek.com>
+References: <20200528061648.32078-1-chuanjia.liu@mediatek.com>
+MIME-Version: 1.0
+X-MTK: N
+X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3
+X-CRM114-CacheID: sfid-20200527_232901_719172_E5A99C62
+X-CRM114-Status: GOOD ( 11.61 )
+X-Spam-Score: -0.2 (/)
+X-Spam-Report: SpamAssassin version 3.4.4 on bombadil.infradead.org summary:
+ Content analysis details: (-0.2 points)
+ pts rule name description
+ ---- ----------------------
+ --------------------------------------------------
+ -0.0 SPF_PASS SPF: sender matches SPF record
+ 0.0 SPF_HELO_NONE SPF: HELO does not publish an SPF Record
+ 0.0 MIME_BASE64_TEXT RAW: Message text disguised using base64
+ encoding
+ -0.1 DKIM_VALID_AU Message has a valid DKIM or DK signature from
+ author's domain
+ 0.1 DKIM_SIGNED Message has a DKIM or DK signature,
+ not necessarily
+ valid
+ -0.1 DKIM_VALID Message has at least one valid DKIM or DK signature
+ -0.1 DKIM_VALID_EF Message has a valid DKIM or DK signature from
+ envelope-from domain
+ 0.0 UNPARSEABLE_RELAY Informational: message has unparseable relay
+ lines
+X-BeenThere: linux-mediatek@lists.infradead.org
+X-Mailman-Version: 2.1.29
+Precedence: list
+List-Id: <linux-mediatek.lists.infradead.org>
+List-Unsubscribe: <http://lists.infradead.org/mailman/options/linux-mediatek>,
+ <mailto:linux-mediatek-request@lists.infradead.org?subject=unsubscribe>
+List-Archive: <http://lists.infradead.org/pipermail/linux-mediatek/>
+List-Post: <mailto:linux-mediatek@lists.infradead.org>
+List-Help: <mailto:linux-mediatek-request@lists.infradead.org?subject=help>
+List-Subscribe: <http://lists.infradead.org/mailman/listinfo/linux-mediatek>,
+ <mailto:linux-mediatek-request@lists.infradead.org?subject=subscribe>
+Cc: devicetree@vger.kernel.org, lorenzo.pieralisi@arm.com,
+ srv_heupstream@mediatek.com, "chuanjia.liu" <Chuanjia.Liu@mediatek.com>,
+ linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org,
+ jianjun.wang@mediatek.com, linux-mediatek@lists.infradead.org,
+ yong.wu@mediatek.com, bhelgaas@google.com,
+ linux-arm-kernel@lists.infradead.org, amurray@thegoodpenguin.co.uk
+Sender: "Linux-mediatek" <linux-mediatek-bounces@lists.infradead.org>
+Errors-To:
+ linux-mediatek-bounces+patchwork-linux-mediatek=patchwork.kernel.org@lists.infradead.org
+
+From: "chuanjia.liu" <Chuanjia.Liu@mediatek.com>
+
+Remove unused property and add pciecfg node.
+
+Signed-off-by: chuanjia.liu <Chuanjia.Liu@mediatek.com>
+---
+ arch/arm/boot/dts/mt7629-rfb.dts | 3 ++-
+ arch/arm/boot/dts/mt7629.dtsi | 23 +++++++++++++----------
+ 2 files changed, 15 insertions(+), 11 deletions(-)
+
+--- a/arch/arm/boot/dts/mt7629-rfb.dts
++++ b/arch/arm/boot/dts/mt7629-rfb.dts
+@@ -171,9 +171,10 @@
+ };
+ };
+
+-&pcie {
++&pcie1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pcie_pins>;
++ status = "okay";
+ };
+
+ &pciephy1 {
+--- a/arch/arm/boot/dts/mt7629.dtsi
++++ b/arch/arm/boot/dts/mt7629.dtsi
+@@ -368,16 +368,21 @@
+ #reset-cells = <1>;
+ };
+
+- pcie: pcie@1a140000 {
++ pciecfg: pciecfg@1a140000 {
++ compatible = "mediatek,mt7629-pciecfg", "syscon";
++ reg = <0x1a140000 0x1000>;
++ };
++
++ pcie1: pcie@1a145000 {
+ compatible = "mediatek,mt7629-pcie";
+ device_type = "pci";
+- reg = <0x1a140000 0x1000>,
+- <0x1a145000 0x1000>;
+- reg-names = "subsys","port1";
++ reg = <0x1a145000 0x1000>;
++ reg-names = "port1";
++ mediatek,pcie-cfg = <&pciecfg>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+- interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_LOW>,
+- <GIC_SPI 229 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-names = "pcie_irq";
+ clocks = <&pciesys CLK_PCIE_P1_MAC_EN>,
+ <&pciesys CLK_PCIE_P0_AHB_EN>,
+ <&pciesys CLK_PCIE_P1_AUX_EN>,
+@@ -398,21 +403,19 @@
+ power-domains = <&scpsys MT7622_POWER_DOMAIN_HIF0>;
+ bus-range = <0x00 0xff>;
+ ranges = <0x82000000 0 0x20000000 0x20000000 0 0x10000000>;
++ status = "disabled";
+
+- pcie1: pcie@1,0 {
+- device_type = "pci";
++ slot1: pcie@1,0 {
+ reg = <0x0800 0 0 0 0>;
+ #address-cells = <3>;
+ #size-cells = <2>;
+ #interrupt-cells = <1>;
+ ranges;
+- num-lanes = <1>;
+ interrupt-map-mask = <0 0 0 7>;
+ interrupt-map = <0 0 0 1 &pcie_intc1 0>,
+ <0 0 0 2 &pcie_intc1 1>,
+ <0 0 0 3 &pcie_intc1 2>,
+ <0 0 0 4 &pcie_intc1 3>;
+-
+ pcie_intc1: interrupt-controller {
+ interrupt-controller;
+ #address-cells = <0>;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1001-mtkhnat-ipv6-fix-pskb-expand-head-limitation.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1001-mtkhnat-ipv6-fix-pskb-expand-head-limitation.patch
new file mode 100644
index 0000000..72719c8
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1001-mtkhnat-ipv6-fix-pskb-expand-head-limitation.patch
@@ -0,0 +1,22 @@
+diff --git a/net/core/skbuff.c b/net/core/skbuff.c
+index 5ba1c72f..f4239459 100644
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -69,6 +69,7 @@
+ #include <net/ip6_checksum.h>
+ #include <net/xfrm.h>
+ #include <net/mpls.h>
++#include <net/ra_nat.h>
+
+ #include <linux/uaccess.h>
+ #include <trace/events/skb.h>
+@@ -1666,6 +1667,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
+ skb_shinfo(skb),
+ offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
+
++ /*headroom copy*/
++ memcpy(data, skb->head, FOE_INFO_LEN);
++
+ /*
+ * if shinfo is shared we must drop the old head gracefully, but if it
+ * is not we can just drop the old head and let the existing refcount
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1002-mtkhnat-add-support-for-virtual-interface-acceleration.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1002-mtkhnat-add-support-for-virtual-interface-acceleration.patch
new file mode 100644
index 0000000..150087a
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1002-mtkhnat-add-support-for-virtual-interface-acceleration.patch
@@ -0,0 +1,127 @@
+diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
+index 3d73c0c..960ade1 100644
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -92,9 +92,12 @@ struct flow_offload {
+ #define FLOW_OFFLOAD_PATH_VLAN BIT(1)
+ #define FLOW_OFFLOAD_PATH_PPPOE BIT(2)
+ #define FLOW_OFFLOAD_PATH_DSA BIT(3)
++#define FLOW_OFFLOAD_PATH_DSLITE BIT(4)
++#define FLOW_OFFLOAD_PATH_6RD BIT(5)
+
+ struct flow_offload_hw_path {
+ struct net_device *dev;
++ struct net_device *virt_dev;
+ u32 flags;
+
+ u8 eth_src[ETH_ALEN];
+diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
+index be6801524..c51af70f6 100644
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -761,6 +761,7 @@ static int vlan_dev_flow_offload_check(struct flow_offload_hw_path *path)
+ path->flags |= FLOW_OFFLOAD_PATH_VLAN;
+ path->vlan_proto = vlan->vlan_proto;
+ path->vlan_id = vlan->vlan_id;
++ path->virt_dev = dev;
+ path->dev = vlan->real_dev;
+
+ if (vlan->real_dev->netdev_ops->ndo_flow_offload_check)
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index 1b7e3141c..da4e34f74 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -57,6 +57,11 @@
+ #include <net/netns/generic.h>
+ #include <net/dst_metadata.h>
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_flow_table.h>
++#endif
++
+ MODULE_AUTHOR("Ville Nuorvala");
+ MODULE_DESCRIPTION("IPv6 tunneling device");
+ MODULE_LICENSE("GPL");
+@@ -1880,6 +1885,22 @@ int ip6_tnl_get_iflink(const struct net_device *dev)
+ }
+ EXPORT_SYMBOL(ip6_tnl_get_iflink);
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++static int ipip6_dev_flow_offload_check(struct flow_offload_hw_path *path)
++{
++ struct net_device *dev = path->dev;
++ struct ip6_tnl *tnl = netdev_priv(dev);
++
++ if (path->flags & FLOW_OFFLOAD_PATH_DSLITE)
++ return -EEXIST;
++
++ path->flags |= FLOW_OFFLOAD_PATH_DSLITE;
++ path->dev = tnl->dev;
++
++ return 0;
++}
++#endif /* CONFIG_NF_FLOW_TABLE */
++
+ int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
+ unsigned int num)
+ {
+@@ -1941,6 +1962,9 @@ static const struct net_device_ops ip6_tnl_netdev_ops = {
+ .ndo_change_mtu = ip6_tnl_change_mtu,
+ .ndo_get_stats = ip6_get_stats,
+ .ndo_get_iflink = ip6_tnl_get_iflink,
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++ .ndo_flow_offload_check = ipip6_dev_flow_offload_check,
++#endif
+ };
+
+ #define IPXIPX_FEATURES (NETIF_F_SG | \
+diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
+index 98954830c..42b6e8c4c 100644
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -52,6 +52,11 @@
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++#include <linux/netfilter.h>
++#include <net/netfilter/nf_flow_table.h>
++#endif
++
+ /*
+ This version of net/ipv6/sit.c is cloned of net/ipv4/ip_gre.c
+
+@@ -1345,6 +1350,22 @@ ipip6_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+ return err;
+ }
+
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++static int ipip6_dev_flow_offload_check(struct flow_offload_hw_path *path)
++{
++ struct net_device *dev = path->dev;
++ struct ip_tunnel *tnl = netdev_priv(dev);
++
++ if (path->flags & FLOW_OFFLOAD_PATH_6RD)
++ return -EEXIST;
++
++ path->flags |= FLOW_OFFLOAD_PATH_6RD;
++ path->dev = tnl->dev;
++
++ return 0;
++}
++#endif /* CONFIG_NF_FLOW_TABLE */
++
+ static const struct net_device_ops ipip6_netdev_ops = {
+ .ndo_init = ipip6_tunnel_init,
+ .ndo_uninit = ipip6_tunnel_uninit,
+@@ -1352,6 +1373,9 @@ static const struct net_device_ops ipip6_netdev_ops = {
+ .ndo_do_ioctl = ipip6_tunnel_ioctl,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_get_iflink = ip_tunnel_get_iflink,
++#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
++ .ndo_flow_offload_check = ipip6_dev_flow_offload_check,
++#endif
+ };
+
+ static void ipip6_dev_free(struct net_device *dev)
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1003-dts-mt7622-rfb-change-to-ax-mtd-layout.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1003-dts-mt7622-rfb-change-to-ax-mtd-layout.patch
new file mode 100644
index 0000000..74a294f
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1003-dts-mt7622-rfb-change-to-ax-mtd-layout.patch
@@ -0,0 +1,23 @@
+--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
++++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+@@ -589,17 +589,17 @@
+
+ factory: partition@1c0000 {
+ label = "Factory";
+- reg = <0x1c0000 0x0040000>;
++ reg = <0x1c0000 0x0100000>;
+ };
+
+ partition@200000 {
+ label = "firmware";
+- reg = <0x200000 0x2000000>;
++ reg = <0x2c0000 0x2000000>;
+ };
+
+ partition@2200000 {
+ label = "User_data";
+- reg = <0x2200000 0x4000000>;
++ reg = <0x22c0000 0x4000000>;
+ };
+ };
+ };
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1004_remove_eth_transmit_timeout_hw_reset.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1004_remove_eth_transmit_timeout_hw_reset.patch
new file mode 100755
index 0000000..69a0acb
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1004_remove_eth_transmit_timeout_hw_reset.patch
@@ -0,0 +1,14 @@
+Index: linux-5.4.143/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+===================================================================
+--- linux-5.4.143.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ linux-5.4.143/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -2483,9 +2483,7 @@ static void mtk_tx_timeout(struct net_de
+ eth->netdev[mac->id]->stats.tx_errors++;
+ netif_err(eth, tx_err, dev,
+ "transmit timed out\n");
+- schedule_work(ð->pending_work);
+ }
+-
+ static irqreturn_t mtk_handle_irq_rx(int irq, void *priv)
+ {
+ struct mtk_napi *rx_napi = priv;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1005-mtkhnat-fix-pse-hang-for-multi-stations.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1005-mtkhnat-fix-pse-hang-for-multi-stations.patch
new file mode 100644
index 0000000..aaf1794
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1005-mtkhnat-fix-pse-hang-for-multi-stations.patch
@@ -0,0 +1,12 @@
+diff --git a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
+index c0794e37..2968eb68 100644
+--- a/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
++++ b/drivers/net/ethernet/mediatek/mtk_hnat/hnat.c
+@@ -250,6 +250,7 @@ static int hnat_start(int ppe_id)
+ writel(0, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT); /* pdma */
+ /* writel(0x55555555, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT); */ /* qdma */
+ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, TTL0_DRP, 0);
++ cr_set_field(hnat_priv->ppe_base[ppe_id] + PPE_GLO_CFG, MCAST_TB_EN, 1);
+
+ if (hnat_priv->data->version == MTK_HNAT_V4) {
+ writel(0xcb777, hnat_priv->ppe_base[ppe_id] + PPE_DFT_CPORT1);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1010-pcie-mediatek-fix-clearing-interrupt-status.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1010-pcie-mediatek-fix-clearing-interrupt-status.patch
new file mode 100644
index 0000000..d3ef78d
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1010-pcie-mediatek-fix-clearing-interrupt-status.patch
@@ -0,0 +1,24 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Fri, 4 Sep 2020 18:33:27 +0200
+Subject: [PATCH] pcie-mediatek: fix clearing interrupt status
+
+Clearing the status needs to happen after running the handler, otherwise
+we will get an extra spurious interrupt after the cause has been cleared
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/drivers/pci/controller/pcie-mediatek.c
++++ b/drivers/pci/controller/pcie-mediatek.c
+@@ -616,10 +616,10 @@ static void mtk_pcie_intr_handler(struct
+ if (status & INTX_MASK) {
+ for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) {
+ /* Clear the INTx */
+- writel(1 << bit, port->base + PCIE_INT_STATUS);
+ virq = irq_find_mapping(port->irq_domain,
+ bit - INTX_SHIFT);
+ generic_handle_irq(virq);
++ writel(1 << bit, port->base + PCIE_INT_STATUS);
+ }
+ }
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1015-pcie-add-pcie-gen3-upstream-driver.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1015-pcie-add-pcie-gen3-upstream-driver.patch
new file mode 100644
index 0000000..4b99d9d
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1015-pcie-add-pcie-gen3-upstream-driver.patch
@@ -0,0 +1,36 @@
+diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
+index 70e0782..67988f8 100644
+--- a/drivers/pci/controller/Kconfig
++++ b/drivers/pci/controller/Kconfig
+@@ -241,6 +241,19 @@ config PCIE_MEDIATEK
+ Say Y here if you want to enable PCIe controller support on
+ MediaTek SoCs.
+
++config PCIE_MEDIATEK_GEN3
++ tristate "MediaTek Gen3 PCIe controller"
++ depends on ARCH_MEDIATEK || COMPILE_TEST
++ depends on PCI_MSI_IRQ_DOMAIN
++ help
++ Adds support for PCIe Gen3 MAC controller for MediaTek SoCs.
++ This PCIe controller is compatible with Gen3, Gen2 and Gen1 speed,
++ and support up to 256 MSI interrupt numbers for
++ multi-function devices.
++
++ Say Y here if you want to enable Gen3 PCIe controller support on
++ MediaTek SoCs.
++
+ config PCIE_MOBIVEIL
+ bool "Mobiveil AXI PCIe controller"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
+index a2a22c9..54a496a 100644
+--- a/drivers/pci/controller/Makefile
++++ b/drivers/pci/controller/Makefile
+@@ -27,6 +27,7 @@ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+ obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
+ obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
+ obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
++obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
+ obj-$(CONFIG_PCIE_MOBIVEIL) += pcie-mobiveil.o
+ obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
+ obj-$(CONFIG_VMD) += vmd.o
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1020-spi-nor-w25q512jv.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1020-spi-nor-w25q512jv.patch
new file mode 100644
index 0000000..a234555
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1020-spi-nor-w25q512jv.patch
@@ -0,0 +1,25 @@
+From: David Bauer <mail@david-bauer.net>
+Date: Thu, 11 Feb 2021 19:57:26 +0100
+Subject: [PATCH] mtd: spi-nor: add support for Winbond W25Q512
+
+The Winbond W25Q512 is a 512mb SPI-NOR chip. It supports 4K sectors as
+well as block protection and Dual-/Quad-read.
+
+Tested on: Ubiquiti UniFi 6 LR
+
+Signed-off-by: David Bauer <mail@david-bauer.net>
+
+Ref: https://patchwork.ozlabs.org/project/linux-mtd/patch/20210213151047.11700-1-mail@david-bauer.net/
+
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -2552,6 +2552,9 @@ static const struct flash_info spi_nor_i
+ .fixups = &w25q256_fixups },
+ { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512,
+ SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
++ { "w25q512jv", INFO(0xef4020, 0, 64 * 1024, 1024,
++ SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ |
++ SPI_NOR_HAS_TB | SPI_NOR_HAS_LOCK) },
+ { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
+ SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1021-ubnt-ledbar-driver.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1021-ubnt-ledbar-driver.patch
new file mode 100644
index 0000000..41ab094
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1021-ubnt-ledbar-driver.patch
@@ -0,0 +1,29 @@
+--- a/drivers/leds/Kconfig
++++ b/drivers/leds/Kconfig
+@@ -824,6 +824,16 @@ config LEDS_LM36274
+ Say Y to enable the LM36274 LED driver for TI LMU devices.
+ This supports the LED device LM36274.
+
++config LEDS_UBNT_LEDBAR
++ tristate "LED support for Ubiquiti UniFi 6 LR"
++ depends on LEDS_CLASS && I2C && OF
++ help
++ This option enables support for the Ubiquiti LEDBAR
++ LED driver.
++
++ To compile this driver as a module, choose M here: the module
++ will be called leds-ubnt-ledbar.
++
+ comment "LED Triggers"
+ source "drivers/leds/trigger/Kconfig"
+
+--- a/drivers/leds/Makefile
++++ b/drivers/leds/Makefile
+@@ -85,6 +85,7 @@ obj-$(CONFIG_LEDS_LM3601X) += leds-lm36
+ obj-$(CONFIG_LEDS_TI_LMU_COMMON) += leds-ti-lmu-common.o
+ obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o
+ obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o
++obj-$(CONFIG_LEDS_UBNT_LEDBAR) += leds-ubnt-ledbar.o
+
+ # LED SPI Drivers
+ obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1023-kgdb-add-interrupt-control.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1023-kgdb-add-interrupt-control.patch
new file mode 100644
index 0000000..e0ee954
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1023-kgdb-add-interrupt-control.patch
@@ -0,0 +1,42 @@
+diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
+index 1a157ca..258fe4b 100644
+--- a/arch/arm64/kernel/kgdb.c
++++ b/arch/arm64/kernel/kgdb.c
+@@ -18,6 +18,10 @@
+ #include <asm/debug-monitors.h>
+ #include <asm/insn.h>
+ #include <asm/traps.h>
++#include <asm/ptrace.h>
++
++
++static DEFINE_PER_CPU(unsigned int, kgdb_pstate);
+
+ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ { "x0", 8, offsetof(struct pt_regs, regs[0])},
+@@ -206,6 +210,8 @@ int kgdb_arch_handle_exception(int exception_vector, int signo,
+ err = 0;
+ break;
+ case 's':
++ __this_cpu_write(kgdb_pstate, linux_regs->pstate);
++ linux_regs->pstate |= PSR_I_BIT;
+ /*
+ * Update step address value with address passed
+ * with step packet.
+@@ -249,9 +255,17 @@ NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
+
+ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
+ {
++ unsigned int pstate;
++
+ if (!kgdb_single_step)
+ return DBG_HOOK_ERROR;
++ kernel_disable_single_step();
+
++ pstate = __this_cpu_read(kgdb_pstate);
++ if (pstate & PSR_I_BIT)
++ regs->pstate |= PSR_I_BIT;
++ else
++ regs->pstate &= ~PSR_I_BIT;
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
+ return DBG_HOOK_HANDLED;
+ }
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1024-pcie-add-multi-MSI-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1024-pcie-add-multi-MSI-support.patch
new file mode 100644
index 0000000..5cf486c
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1024-pcie-add-multi-MSI-support.patch
@@ -0,0 +1,64 @@
+diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
+index 2a54fa7a3..132b3204c 100644
+--- a/drivers/pci/controller/pcie-mediatek.c
++++ b/drivers/pci/controller/pcie-mediatek.c
+@@ -446,24 +446,24 @@ static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int vir
+ unsigned int nr_irqs, void *args)
+ {
+ struct mtk_pcie_port *port = domain->host_data;
+- unsigned long bit;
++ int bit, i;
+
+- WARN_ON(nr_irqs != 1);
+ mutex_lock(&port->lock);
+
+- bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM);
+- if (bit >= MTK_MSI_IRQS_NUM) {
++ bit = bitmap_find_free_region(port->msi_irq_in_use, MTK_MSI_IRQS_NUM,
++ order_base_2(nr_irqs));
++ if (bit < 0) {
+ mutex_unlock(&port->lock);
+ return -ENOSPC;
+ }
+
+- __set_bit(bit, port->msi_irq_in_use);
+-
+ mutex_unlock(&port->lock);
+
+- irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip,
+- domain->host_data, handle_edge_irq,
+- NULL, NULL);
++ for (i = 0; i < nr_irqs; i++) {
++ irq_domain_set_info(domain, virq + i, bit + i,
++ &mtk_msi_bottom_irq_chip, domain->host_data,
++ handle_edge_irq, NULL, NULL);
++ }
+
+ return 0;
+ }
+@@ -501,7 +501,7 @@ static struct irq_chip mtk_msi_irq_chip = {
+
+ static struct msi_domain_info mtk_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+- MSI_FLAG_PCI_MSIX),
++ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
+ .chip = &mtk_msi_irq_chip,
+ };
+
+@@ -633,14 +633,14 @@ static void mtk_pcie_intr_handler(struct irq_desc *desc)
+ if (status & MSI_STATUS){
+ unsigned long imsi_status;
+
++ /* Clear MSI interrupt status */
++ writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) {
+ for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) {
+ virq = irq_find_mapping(port->inner_domain, bit);
+ generic_handle_irq(virq);
+ }
+ }
+- /* Clear MSI interrupt status */
+- writel(MSI_STATUS, port->base + PCIE_INT_STATUS);
+ }
+ }
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1661-Add-trngv2-driver-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1661-Add-trngv2-driver-support.patch
new file mode 100644
index 0000000..7c09a71
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/1661-Add-trngv2-driver-support.patch
@@ -0,0 +1,185 @@
+From ae5611b1b7a857edb3d9c8e900b550c76f7c236e Mon Sep 17 00:00:00 2001
+From: "Mingming.Su" <Mingming.Su@mediatek.com>
+Date: Fri, 17 Dec 2021 20:27:34 +0800
+Subject: [PATCH] Add trngv2 driver support
+
+---
+ drivers/char/hw_random/mtk-rng.c | 105 +++++++++++++++++++++++--------
+ 1 file changed, 78 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
+index a8bd06da7..75fca4cef 100644
+--- a/drivers/char/hw_random/mtk-rng.c
++++ b/drivers/char/hw_random/mtk-rng.c
+@@ -6,6 +6,7 @@
+ */
+ #define MTK_RNG_DEV KBUILD_MODNAME
+
++#include <linux/arm-smccc.h>
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/err.h>
+@@ -15,8 +16,12 @@
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
++#include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
++#include <linux/soc/mediatek/mtk_sip_svc.h>
++
++#define MTK_SIP_KERNEL_GET_RND MTK_SIP_SMC_CMD(0x550)
+
+ /* Runtime PM autosuspend timeout: */
+ #define RNG_AUTOSUSPEND_TIMEOUT 100
+@@ -32,10 +37,15 @@
+
+ #define to_mtk_rng(p) container_of(p, struct mtk_rng, rng)
+
++struct mtk_rng_of_data{
++ unsigned int rng_version;
++};
++
+ struct mtk_rng {
+ void __iomem *base;
+ struct clk *clk;
+ struct hwrng rng;
++ const struct mtk_rng_of_data *soc;
+ };
+
+ static int mtk_rng_init(struct hwrng *rng)
+@@ -103,41 +113,74 @@ static int mtk_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
+ return retval || !wait ? retval : -EIO;
+ }
+
++static int mtk_rngv2_read(struct hwrng *rng, void *buf, size_t max, bool wait)
++{
++ struct arm_smccc_res res;
++ int retval = 0;
++
++ while (max >= sizeof(u32)) {
++ arm_smccc_smc(MTK_SIP_KERNEL_GET_RND, 0, 0, 0, 0, 0, 0, 0,
++ &res);
++ if (res.a0)
++ break;
++
++ *(u32 *)buf = res.a1;
++ retval += sizeof(u32);
++ buf += sizeof(u32);
++ max -= sizeof(u32);
++ }
++
++ return retval || !wait ? retval : -EIO;
++}
++
+ static int mtk_rng_probe(struct platform_device *pdev)
+ {
+ struct resource *res;
+ int ret;
+ struct mtk_rng *priv;
+
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- if (!res) {
+- dev_err(&pdev->dev, "no iomem resource\n");
+- return -ENXIO;
+- }
+-
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+- priv->rng.name = pdev->name;
++ priv->soc = of_device_get_match_data(&pdev->dev);
++ if (priv->soc->rng_version == 1) {
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "no iomem resource\n");
++ return -ENXIO;
++ }
++
++ priv->base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(priv->base))
++ return PTR_ERR(priv->base);
++
++ priv->clk = devm_clk_get(&pdev->dev, "rng");
++ if (IS_ERR(priv->clk)) {
++ ret = PTR_ERR(priv->clk);
++ dev_err(&pdev->dev, "no clock for device: %d\n", ret);
++ return ret;
++ }
++
+ #ifndef CONFIG_PM
+- priv->rng.init = mtk_rng_init;
+- priv->rng.cleanup = mtk_rng_cleanup;
++ priv->rng.init = mtk_rng_init;
++ priv->rng.cleanup = mtk_rng_cleanup;
+ #endif
+- priv->rng.read = mtk_rng_read;
++ priv->rng.read = mtk_rng_read;
++
++ pm_runtime_set_autosuspend_delay(&pdev->dev,
++ RNG_AUTOSUSPEND_TIMEOUT);
++ pm_runtime_use_autosuspend(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
++ } else {
++ priv->rng.read = mtk_rngv2_read;
++ }
++
++ priv->rng.name = pdev->name;
+ priv->rng.priv = (unsigned long)&pdev->dev;
+ priv->rng.quality = 900;
+
+- priv->clk = devm_clk_get(&pdev->dev, "rng");
+- if (IS_ERR(priv->clk)) {
+- ret = PTR_ERR(priv->clk);
+- dev_err(&pdev->dev, "no clock for device: %d\n", ret);
+- return ret;
+- }
+-
+- priv->base = devm_ioremap_resource(&pdev->dev, res);
+- if (IS_ERR(priv->base))
+- return PTR_ERR(priv->base);
++ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+ if (ret) {
+@@ -146,11 +189,6 @@ static int mtk_rng_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- dev_set_drvdata(&pdev->dev, priv);
+- pm_runtime_set_autosuspend_delay(&pdev->dev, RNG_AUTOSUSPEND_TIMEOUT);
+- pm_runtime_use_autosuspend(&pdev->dev);
+- pm_runtime_enable(&pdev->dev);
+-
+ dev_info(&pdev->dev, "registered RNG driver\n");
+
+ return 0;
+@@ -185,9 +223,22 @@ static const struct dev_pm_ops mtk_rng_pm_ops = {
+ #define MTK_RNG_PM_OPS NULL
+ #endif /* CONFIG_PM */
+
++static const struct mtk_rng_of_data mt7981_rng_data = {
++ .rng_version = 2,
++};
++
++static const struct mtk_rng_of_data mt7986_rng_data = {
++ .rng_version = 1,
++};
++
++static const struct mtk_rng_of_data mt7623_rng_data = {
++ .rng_version = 1,
++};
++
+ static const struct of_device_id mtk_rng_match[] = {
+- { .compatible = "mediatek,mt7986-rng" },
+- { .compatible = "mediatek,mt7623-rng" },
++ { .compatible = "mediatek,mt7981-rng", .data = &mt7981_rng_data },
++ { .compatible = "mediatek,mt7986-rng", .data = &mt7986_rng_data },
++ { .compatible = "mediatek,mt7623-rng", .data = &mt7623_rng_data },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, mtk_rng_match);
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/2000-misc-add-mtk-platform.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/2000-misc-add-mtk-platform.patch
new file mode 100644
index 0000000..f280e10
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/2000-misc-add-mtk-platform.patch
@@ -0,0 +1,17 @@
+diff -urN a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+--- a/drivers/misc/Kconfig 2021-06-29 15:10:00.970788831 +0800
++++ b/drivers/misc/Kconfig 2021-06-29 15:09:41.579158152 +0800
+@@ -481,4 +481,5 @@
+ source "drivers/misc/ocxl/Kconfig"
+ source "drivers/misc/cardreader/Kconfig"
+ source "drivers/misc/habanalabs/Kconfig"
++source "drivers/misc/mediatek/Kconfig"
+ endmenu
+diff -urN a/drivers/misc/Makefile b/drivers/misc/Makefile
+--- a/drivers/misc/Makefile 2021-06-29 15:10:15.150518461 +0800
++++ b/drivers/misc/Makefile 2021-06-29 15:09:46.939056121 +0800
+@@ -57,3 +57,4 @@
+ obj-$(CONFIG_PVPANIC) += pvpanic.o
+ obj-$(CONFIG_HABANA_AI) += habanalabs/
+ obj-$(CONFIG_XILINX_SDFEC) += xilinx_sdfec.o
++obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/400-mtd-add-mtk-snand-driver.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/400-mtd-add-mtk-snand-driver.patch
new file mode 100644
index 0000000..f283bd2
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/400-mtd-add-mtk-snand-driver.patch
@@ -0,0 +1,21 @@
+--- a/drivers/mtd/Kconfig
++++ b/drivers/mtd/Kconfig
+@@ -230,6 +230,8 @@ source "drivers/mtd/hyperbus/Kconfig"
+
+ source "drivers/mtd/nmbm/Kconfig"
+
++source "drivers/mtd/mtk-snand/Kconfig"
++
+ source "drivers/mtd/composite/Kconfig"
+
+ endif # MTD
+--- a/drivers/mtd/Makefile
++++ b/drivers/mtd/Makefile
+@@ -35,5 +35,7 @@ obj-$(CONFIG_MTD_HYPERBUS) += hyperbus/
+
+ obj-y += nmbm/
+
++obj-$(CONFIG_MTK_SPI_NAND) += mtk-snand/
++
+ # Composite drivers must be loaded last
+ obj-y += composite/
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/401-pinctrl-add-mt7986-driver.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/401-pinctrl-add-mt7986-driver.patch
new file mode 100644
index 0000000..5022e49
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/401-pinctrl-add-mt7986-driver.patch
@@ -0,0 +1,28 @@
+diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
+index 701f9af..9109f91 100644
+--- a/drivers/pinctrl/mediatek/Kconfig
++++ b/drivers/pinctrl/mediatek/Kconfig
+@@ -100,6 +100,11 @@ config PINCTRL_MT7622
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_MOORE
+
++config PINCTRL_MT7986
++ bool "Mediatek MT7986 pin control"
++ depends on OF
++ select PINCTRL_MTK_MOORE
++
+ config PINCTRL_MT8173
+ bool "Mediatek MT8173 pin control"
+ depends on OF
+diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
+index a74325a..d408585 100644
+--- a/drivers/pinctrl/mediatek/Makefile
++++ b/drivers/pinctrl/mediatek/Makefile
+@@ -15,6 +15,7 @@ obj-$(CONFIG_PINCTRL_MT6797) += pinctrl-mt6797.o
+ obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o
+ obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
+ obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o
++obj-$(CONFIG_PINCTRL_MT7986) += pinctrl-mt7986.o
+ obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
+ obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
+ obj-$(CONFIG_PINCTRL_MT8516) += pinctrl-mt8516.o
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/402-pinctrl-add-mt7981-driver.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/402-pinctrl-add-mt7981-driver.patch
new file mode 100644
index 0000000..9e67ee7
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/402-pinctrl-add-mt7981-driver.patch
@@ -0,0 +1,41 @@
+From 1b529849f324edec053a34292e3f874bde8f7401 Mon Sep 17 00:00:00 2001
+From: Sam Shih <sam.shih@mediatek.com>
+Date: Fri, 25 Jun 2021 15:43:55 +0800
+Subject: [PATCH] Add mt7981 pinctrl driver support
+
+---
+ drivers/pinctrl/mediatek/Kconfig | 7 +++++++
+ drivers/pinctrl/mediatek/Makefile | 1 +
+ 2 files changed, 8 insertions(+)
+
+diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig
+index 9109f91..d40aee5 100644
+--- a/drivers/pinctrl/mediatek/Kconfig
++++ b/drivers/pinctrl/mediatek/Kconfig
+@@ -100,6 +100,11 @@ config PINCTRL_MT7622
+ default ARM64 && ARCH_MEDIATEK
+ select PINCTRL_MTK_MOORE
+
++config PINCTRL_MT7981
++ bool "Mediatek MT7981 pin control"
++ depends on OF
++ select PINCTRL_MTK_MOORE
++
+ config PINCTRL_MT7986
+ bool "Mediatek MT7986 pin control"
+ depends on OF
+diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile
+index d408585..e6813cf 100644
+--- a/drivers/pinctrl/mediatek/Makefile
++++ b/drivers/pinctrl/mediatek/Makefile
+@@ -15,6 +15,7 @@ obj-$(CONFIG_PINCTRL_MT6797) += pinctrl-mt6797.o
+ obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o
+ obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o
+ obj-$(CONFIG_PINCTRL_MT7629) += pinctrl-mt7629.o
++obj-$(CONFIG_PINCTRL_MT7981) += pinctrl-mt7981.o
+ obj-$(CONFIG_PINCTRL_MT7986) += pinctrl-mt7986.o
+ obj-$(CONFIG_PINCTRL_MT8173) += pinctrl-mt8173.o
+ obj-$(CONFIG_PINCTRL_MT8183) += pinctrl-mt8183.o
+--
+2.6.4
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/500-auxadc-add-auxadc-32k-clk.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/500-auxadc-add-auxadc-32k-clk.patch
new file mode 100644
index 0000000..dc0dd2f
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/500-auxadc-add-auxadc-32k-clk.patch
@@ -0,0 +1,68 @@
+diff --git a/drivers/iio/adc/mt6577_auxadc.c b/drivers/iio/adc/mt6577_auxadc.c
+index 2449d91..b8a43eb 100644
+--- a/drivers/iio/adc/mt6577_auxadc.c
++++ b/drivers/iio/adc/mt6577_auxadc.c
+@@ -42,6 +42,7 @@ struct mtk_auxadc_compatible {
+ struct mt6577_auxadc_device {
+ void __iomem *reg_base;
+ struct clk *adc_clk;
++ struct clk *adc_32k_clk;
+ struct mutex lock;
+ const struct mtk_auxadc_compatible *dev_comp;
+ };
+@@ -214,6 +215,12 @@ static int __maybe_unused mt6577_auxadc_resume(struct device *dev)
+ return ret;
+ }
+
++ ret = clk_prepare_enable(adc_dev->adc_32k_clk);
++ if (ret) {
++ pr_err("failed to enable auxadc clock\n");
++ return ret;
++ }
++
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+ MT6577_AUXADC_PDN_EN, 0);
+ mdelay(MT6577_AUXADC_POWER_READY_MS);
+@@ -228,6 +235,8 @@ static int __maybe_unused mt6577_auxadc_suspend(struct device *dev)
+
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+ 0, MT6577_AUXADC_PDN_EN);
++
++ clk_disable_unprepare(adc_dev->adc_32k_clk);
+ clk_disable_unprepare(adc_dev->adc_clk);
+
+ return 0;
+@@ -272,6 +281,17 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ adc_dev->adc_32k_clk = devm_clk_get(&pdev->dev, "32k");
++ if (IS_ERR(adc_dev->adc_32k_clk)) {
++ dev_err(&pdev->dev, "failed to get auxadc 32k clock\n");
++ } else {
++ ret = clk_prepare_enable(adc_dev->adc_32k_clk);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to enable auxadc 32k clock\n");
++ return ret;
++ }
++ }
++
+ adc_clk_rate = clk_get_rate(adc_dev->adc_clk);
+ if (!adc_clk_rate) {
+ ret = -EINVAL;
+@@ -301,6 +321,7 @@ static int mt6577_auxadc_probe(struct platform_device *pdev)
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+ 0, MT6577_AUXADC_PDN_EN);
+ err_disable_clk:
++ clk_disable_unprepare(adc_dev->adc_32k_clk);
+ clk_disable_unprepare(adc_dev->adc_clk);
+ return ret;
+ }
+@@ -315,6 +336,7 @@ static int mt6577_auxadc_remove(struct platform_device *pdev)
+ mt6577_auxadc_mod_reg(adc_dev->reg_base + MT6577_AUXADC_MISC,
+ 0, MT6577_AUXADC_PDN_EN);
+
++ clk_disable_unprepare(adc_dev->adc_32k_clk);
+ clk_disable_unprepare(adc_dev->adc_clk);
+
+ return 0;
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/730-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/730-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch
new file mode 100644
index 0000000..6b10584
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/730-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch
@@ -0,0 +1,44 @@
+--- linux-5.4.77.orig/net/dsa/tag_mtk.c
++++ linux-5.4.77/net/dsa/tag_mtk.c
+@@ -73,22 +73,28 @@ static struct sk_buff *mtk_tag_rcv(struc
+ bool is_multicast_skb = is_multicast_ether_addr(dest) &&
+ !is_broadcast_ether_addr(dest);
+
+- if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
+- return NULL;
++ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) {
++ hdr = ntohs(skb->vlan_proto);
++ skb->vlan_proto = 0;
++ skb->vlan_tci = 0;
++ } else {
++ if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN)))
++ return NULL;
+
+- /* The MTK header is added by the switch between src addr
+- * and ethertype at this point, skb->data points to 2 bytes
+- * after src addr so header should be 2 bytes right before.
+- */
+- phdr = (__be16 *)(skb->data - 2);
+- hdr = ntohs(*phdr);
++ /* The MTK header is added by the switch between src addr
++ * and ethertype at this point, skb->data points to 2 bytes
++ * after src addr so header should be 2 bytes right before.
++ */
++ phdr = (__be16 *)(skb->data - 2);
++ hdr = ntohs(*phdr);
+
+- /* Remove MTK tag and recalculate checksum. */
+- skb_pull_rcsum(skb, MTK_HDR_LEN);
++ /* Remove MTK tag and recalculate checksum. */
++ skb_pull_rcsum(skb, MTK_HDR_LEN);
+
+- memmove(skb->data - ETH_HLEN,
+- skb->data - ETH_HLEN - MTK_HDR_LEN,
+- 2 * ETH_ALEN);
++ memmove(skb->data - ETH_HLEN,
++ skb->data - ETH_HLEN - MTK_HDR_LEN,
++ 2 * ETH_ALEN);
++ }
+
+ /* Get source port information */
+ port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK);
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/738-mt7531-gsw-internal_phy_calibration.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/738-mt7531-gsw-internal_phy_calibration.patch
new file mode 100755
index 0000000..361eca6
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/738-mt7531-gsw-internal_phy_calibration.patch
@@ -0,0 +1,1282 @@
+Index: drivers/net/phy/mtk/mt753x/Makefile
+===================================================================
+--- a/drivers/net/phy/mtk/mt753x/Makefile
++++ b/drivers/net/phy/mtk/mt753x/Makefile
+@@ -7,5 +7,5 @@ obj-$(CONFIG_MT753X_GSW) += mt753x.o
+ mt753x-$(CONFIG_SWCONFIG) += mt753x_swconfig.o
+
+ mt753x-y += mt753x_mdio.o mt7530.o mt7531.o \
+- mt753x_common.o mt753x_vlan.o mt753x_nl.o
++ mt753x_common.o mt753x_vlan.o mt753x_nl.o mt753x_phy.o
+
+Index: drivers/net/phy/mtk/mt753x/mt7531.c
+===================================================================
+--- a/drivers/net/phy/mtk/mt753x/mt7531.c
++++ b/drivers/net/phy/mtk/mt753x/mt7531.c
+@@ -658,6 +658,27 @@ static void mt7531_core_pll_setup(struct
+
+ static int mt7531_internal_phy_calibration(struct gsw_mt753x *gsw)
+ {
++ u32 i, val;
++ int ret;
++
++ dev_info(gsw->dev,">>>>>>>>>>>>>>>>>>>>>>>>>>>>> START CALIBRATION:\n");
++
++ /* gphy value from sw path */
++ val = gsw->mmd_read(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403);
++ val |= GBE_EFUSE_SETTING;
++ gsw->mmd_write(gsw, 0, PHY_DEV1F, PHY_DEV1F_REG_403, val);
++
++ for (i = 0; i < 5; i++) {
++ dev_info(gsw->dev, "-------- gephy-calbration (port:%d) --------\n",
++ i);
++ ret = mt753x_phy_calibration(gsw, i);
++
++ /* set Auto-negotiation with giga extension. */
++ gsw->mii_write(gsw, i, 0, 0x1340);
++ if (ret)
++ return ret;
++ }
++
+ return 0;
+ }
+
+Index: drivers/net/phy/mtk/mt753x/mt753x.h
+===================================================================
+--- a/drivers/net/phy/mtk/mt753x/mt753x.h
++++ b/drivers/net/phy/mtk/mt753x/mt753x.h
+@@ -140,6 +140,8 @@ void mt753x_irq_enable(struct gsw_mt753x
+ int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr);
+ int extphy_init(struct gsw_mt753x *gsw, int addr);
+
++int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr);
++
+ /* MDIO Indirect Access Registers */
+ #define MII_MMD_ACC_CTL_REG 0x0d
+ #define MMD_CMD_S 14
+Index: drivers/net/phy/mtk/mt753x/mt753x_phy.c
+===================================================================
+new file mode 100644
+--- /dev/null
++++ b/drivers/net/phy/mtk/mt753x/mt753x_phy.c
+@@ -0,0 +1,1069 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Common part for MediaTek MT753x gigabit switch
++ *
++ * Copyright (C) 2018 MediaTek Inc. All Rights Reserved.
++ *
++ * Author: Weijie Gao <weijie.gao@mediatek.com>
++ */
++
++#include <linux/kernel.h>
++#include <linux/delay.h>
++
++#include "mt753x.h"
++#include "mt753x_regs.h"
++#include "mt753x_phy.h"
++
++u32 tc_phy_read_dev_reg(struct gsw_mt753x *gsw, u32 port_num, u32 dev_addr, u32 reg_addr)
++{
++ u32 phy_val;
++ phy_val = gsw->mmd_read(gsw, port_num, dev_addr, reg_addr);
++
++ //printk("switch phy cl45 r %d 0x%x 0x%x = %x\n",port_num, dev_addr, reg_addr, phy_val);
++ //switch_phy_read_cl45(port_num, dev_addr, reg_addr, &phy_val);
++ return phy_val;
++}
++
++void tc_phy_write_dev_reg(struct gsw_mt753x *gsw, u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
++{
++ u32 phy_val;
++ gsw->mmd_write(gsw, port_num, dev_addr, reg_addr, write_data);
++ phy_val = gsw->mmd_read(gsw, port_num, dev_addr, reg_addr);
++ //printk("switch phy cl45 w %d 0x%x 0x%x 0x%x --> read back 0x%x\n",port_num, dev_addr, reg_addr, write_data, phy_val);
++ //switch_phy_write_cl45(port_num, dev_addr, reg_addr, write_data);
++}
++
++void switch_phy_write(struct gsw_mt753x *gsw, u32 port_num, u32 reg_addr, u32 write_data){
++ gsw->mii_write(gsw, port_num, reg_addr, write_data);
++}
++
++u32 switch_phy_read(struct gsw_mt753x *gsw, u32 port_num, u32 reg_addr){
++ return gsw->mii_read(gsw, port_num, reg_addr);
++}
++
++const u8 MT753x_ZCAL_TO_R50ohm_GE_TBL_100[64] = {
++ 127, 127, 127, 127, 127, 127, 127, 127,
++ 127, 127, 127, 127, 127, 123, 122, 117,
++ 115, 112, 103, 100, 98, 87, 85, 83,
++ 81, 72, 70, 68, 66, 64, 55, 53,
++ 52, 50, 49, 48, 38, 36, 35, 34,
++ 33, 32, 22, 21, 20, 19, 18, 17,
++ 16, 7, 6, 5, 4, 3, 2, 1,
++ 0, 0, 0, 0, 0, 0, 0, 0
++};
++
++const u8 MT753x_TX_OFFSET_TBL[64] = {
++ 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
++ 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
++ 0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, 0x8,
++ 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0,
++ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
++ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
++};
++
++u8 ge_cal_flag;
++
++u8 all_ge_ana_cal_wait(struct gsw_mt753x *gsw, u32 delay, u32 phyaddr) // for EN7512
++{
++ u8 all_ana_cal_status;
++ u32 cnt, tmp_1e_17c;
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017c, 0x0001); // da_calin_flag pull high
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0x0001);
++ //printk("delay = %d\n", delay);
++
++ cnt = 10000;
++ do {
++ udelay(delay);
++ cnt--;
++ all_ana_cal_status = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17b) & 0x1;
++
++ } while ((all_ana_cal_status == 0) && (cnt != 0));
++
++
++ if(all_ana_cal_status == 1) {
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0);
++ return all_ana_cal_status;
++ } else {
++ tmp_1e_17c = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17c);
++ if ((tmp_1e_17c & 0x1) != 1) {
++ pr_info("FIRST MDC/MDIO write error\n");
++ pr_info("FIRST 1e_17c = %x\n", tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17c));
++
++ }
++ printk("re-K again\n");
++
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0);
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0x0001);
++ cnt = 10000;
++ do {
++ udelay(delay);
++ cnt--;
++ tmp_1e_17c = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17c);
++ if ((tmp_1e_17c & 0x1) != 1) {
++ pr_info("SECOND MDC/MDIO write error\n");
++ pr_info("SECOND 1e_17c = %x\n", tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17c));
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0x0001);
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0x0001);
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0x0001);
++ }
++ } while ((cnt != 0) && (tmp_1e_17c == 0));
++
++ cnt = 10000;
++ do {
++ udelay(delay);
++ cnt--;
++ all_ana_cal_status = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x17b) & 0x1;
++
++ } while ((all_ana_cal_status == 0) && (cnt != 0));
++
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x17c, 0);
++ }
++
++ if(all_ana_cal_status == 0){
++ pr_info("!!!!!!!!!!!! dev1Eh_reg17b ERROR\n");
++ }
++
++ return all_ana_cal_status;
++}
++
++
++
++
++int ge_cal_rext(struct gsw_mt753x *gsw, u8 phyaddr, u32 delay)
++{
++ u8 rg_zcal_ctrl, all_ana_cal_status;
++ u16 ad_cal_comp_out_init;
++ u16 dev1e_e0_ana_cal_r5;
++ int calibration_polarity;
++ u8 cnt = 0;
++ u16 dev1e_17a_tmp, dev1e_e0_tmp;
++
++ /* *** Iext/Rext Cal start ************ */
++ all_ana_cal_status = ANACAL_INIT;
++ /* analog calibration enable, Rext calibration enable */
++ /* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
++ /* 1e_dc[0]:rg_txvos_calen */
++ /* 1e_e1[4]:rg_cal_refsel(0:1.2V) */
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x1110)
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x1110);
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0000);
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0);
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e1, 0x0000);
++ //tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e1, 0x10);
++
++ rg_zcal_ctrl = 0x20;/* start with 0 dB */
++ dev1e_e0_ana_cal_r5 = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0xe0); // get default value
++ /* 1e_e0[5:0]:rg_zcal_ctrl */
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0xe0, rg_zcal_ctrl);
++ all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr);/* delay 20 usec */
++
++ if (all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk(" GE Rext AnaCal ERROR init! \r\n");
++ return -1;
++ }
++ /* 1e_17a[8]:ad_cal_comp_out */
++ ad_cal_comp_out_init = (tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a) >> 8) & 0x1;
++ if (ad_cal_comp_out_init == 1)
++ calibration_polarity = -1;
++ else /* ad_cal_comp_out_init == 0 */
++ calibration_polarity = 1;
++ cnt = 0;
++ while (all_ana_cal_status < ANACAL_ERROR) {
++ cnt++;
++ rg_zcal_ctrl += calibration_polarity;
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0xe0, (rg_zcal_ctrl));
++ all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); /* delay 20 usec */
++ dev1e_17a_tmp = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a);
++ if (all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk(" GE Rext AnaCal ERROR 2! \r\n");
++ return -1;
++ } else if (((dev1e_17a_tmp >> 8) & 0x1) != ad_cal_comp_out_init) {
++ all_ana_cal_status = ANACAL_FINISH;
++ //printk(" GE Rext AnaCal Done! (%d)(0x%x) \r\n", cnt, rg_zcal_ctrl);
++ } else {
++ dev1e_17a_tmp = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a);
++ dev1e_e0_tmp = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0xe0);
++ if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
++ all_ana_cal_status = ANACAL_SATURATION; /* need to FT(IC fail?) */
++ printk(" GE Rext AnaCal Saturation! \r\n");
++ rg_zcal_ctrl = 0x20; /* 0 dB */
++ }
++ }
++ }
++
++ if (all_ana_cal_status == ANACAL_ERROR) {
++ rg_zcal_ctrl = 0x20; /* 0 dB */
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++ } else if(all_ana_cal_status == ANACAL_FINISH){
++ //tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e0, ((rg_zcal_ctrl << 8) | rg_zcal_ctrl));
++ printk("0x1e-e0 = %x\n", tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x00e0));
++ /* **** 1f_115[2:0] = rg_zcal_ctrl[5:3] // Mog review */
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1f, 0x0115, ((rg_zcal_ctrl & 0x3f) >> 3));
++ printk("0x1f-115 = %x\n", tc_phy_read_dev_reg(gsw, PHY0, 0x1f, 0x115));
++ printk(" GE Rext AnaCal Done! (%d)(0x%x) \r\n", cnt, rg_zcal_ctrl);
++ ge_cal_flag = 1;
++ } else {
++ printk("GE Rxet cal something wrong2\n");
++ }
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x0000);
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0000);
++
++ return 0;
++}
++
++//-----------------------------------------------------------------
++int ge_cal_r50(struct gsw_mt753x *gsw, u8 phyaddr, u32 delay)
++{
++ u8 rg_zcal_ctrl, all_ana_cal_status, calibration_pair;
++ u16 ad_cal_comp_out_init;
++ u16 dev1e_e0_ana_cal_r5;
++ int calibration_polarity;
++ u8 cnt = 0;
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x1100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0000); // 1e_dc[0]:rg_txvos_calen
++
++ for(calibration_pair = ANACAL_PAIR_A; calibration_pair <= ANACAL_PAIR_D; calibration_pair ++) {
++ rg_zcal_ctrl = 0x20; // start with 0 dB
++ dev1e_e0_ana_cal_r5 = (tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x00e0) & (~0x003f));
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl)); // 1e_e0[5:0]:rg_zcal_ctrl
++ if(calibration_pair == ANACAL_PAIR_A)
++ {
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x1101); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0000);
++ //printk("R50 pair A 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x00db), tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x00dc));
++
++ }
++ else if(calibration_pair == ANACAL_PAIR_B)
++ {
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x1100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x1000); // 1e_dc[12]:rg_zcalen_b
++ //printk("R50 pair B 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x00db),tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x00dc));
++
++ }
++ else if(calibration_pair == ANACAL_PAIR_C)
++ {
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x1100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0100); // 1e_dc[8]:rg_zcalen_c
++ //printk("R50 pair C 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x00db), tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x00dc));
++
++ }
++ else // if(calibration_pair == ANACAL_PAIR_D)
++ {
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x1100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0010); // 1e_dc[4]:rg_zcalen_d
++ //printk("R50 pair D 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x00db), tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x00dc));
++
++ }
++
++ all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++ if(all_ana_cal_status == 0)
++ {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( "GE R50 AnaCal ERROR init! \r\n");
++ return -1;
++ }
++
++ ad_cal_comp_out_init = (tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a)>>8) & 0x1; // 1e_17a[8]:ad_cal_comp_out
++ if(ad_cal_comp_out_init == 1)
++ calibration_polarity = -1;
++ else
++ calibration_polarity = 1;
++
++ cnt = 0;
++ while(all_ana_cal_status < ANACAL_ERROR)
++ {
++ cnt ++;
++ rg_zcal_ctrl += calibration_polarity;
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++ all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++
++ if(all_ana_cal_status == 0)
++ {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( " GE R50 AnaCal ERROR 2! \r\n");
++ return -1;
++ }
++ else if(((tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a)>>8)&0x1) != ad_cal_comp_out_init)
++ {
++ all_ana_cal_status = ANACAL_FINISH;
++ }
++ else {
++ if((rg_zcal_ctrl == 0x3F)||(rg_zcal_ctrl == 0x00))
++ {
++ all_ana_cal_status = ANACAL_SATURATION; // need to FT
++ printk( " GE R50 AnaCal Saturation! \r\n");
++ }
++ }
++ }
++
++ if(all_ana_cal_status == ANACAL_ERROR) {
++ rg_zcal_ctrl = 0x20; // 0 dB
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++ }
++ else {
++ rg_zcal_ctrl = MT753x_ZCAL_TO_R50ohm_GE_TBL_100[rg_zcal_ctrl - 9]; // wait Mog zcal/r50 mapping table
++ printk( " GE R50 AnaCal Done! (%d) (0x%x)(0x%x) \r\n", cnt, rg_zcal_ctrl, (rg_zcal_ctrl|0x80));
++ }
++
++ if(calibration_pair == ANACAL_PAIR_A) {
++ ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174) & (~0x7f00);
++ //ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174);
++ //printk( " GE-a 1e_174(0x%x)(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174), ad_cal_comp_out_init, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0174, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<8)&0xff00) | 0x8000))); // 1e_174[15:8]
++ //printk( " GE-a 1e_174(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175));
++ }
++ else if(calibration_pair == ANACAL_PAIR_B) {
++ ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174) & (~0x007f);
++ //ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174);
++ //printk( " GE-b 1e_174(0x%x)(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174), ad_cal_comp_out_init, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175));
++
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0174, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<0)&0x00ff) | 0x0080))); // 1e_174[7:0]
++ //printk( " GE-b 1e_174(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175));
++ }
++ else if(calibration_pair == ANACAL_PAIR_C) {
++ ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175) & (~0x7f00);
++ //ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0175, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<8)&0xff00) | 0x8000))); // 1e_175[15:8]
++ //printk( " GE-c 1e_174(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175));
++ } else {// if(calibration_pair == ANACAL_PAIR_D)
++ ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175) & (~0x007f);
++ //ad_cal_comp_out_init = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0175, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<0)&0x00ff) | 0x0080))); // 1e_175[7:0]
++ //printk( " GE-d 1e_174(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175));
++ }
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00e0, ((rg_zcal_ctrl<<8)|rg_zcal_ctrl));
++ }
++
++ printk( " GE 1e_174(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0175));
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x0000);
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0000);
++
++ return 0;
++}
++
++int ge_cal_tx_offset(struct gsw_mt753x *gsw, u8 phyaddr, u32 delay)
++{
++ u8 all_ana_cal_status, calibration_pair;
++ u16 ad_cal_comp_out_init;
++ int calibration_polarity, tx_offset_temp;
++ u8 tx_offset_reg_shift, tabl_idx, i;
++ u8 cnt = 0;
++ u16 tx_offset_reg, reg_temp, cal_temp;
++ //switch_phy_write(phyaddr, R0, 0x2100);//harry tmp
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x0100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0001); // 1e_dc[0]:rg_txvos_calen
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0096, 0x8000); // 1e_96[15]:bypass_tx_offset_cal, Hw bypass, Fw cal
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x003e, 0xf808); // 1e_3e
++ for(i = 0; i <= 4; i++)
++ tc_phy_write_dev_reg(gsw, i, 0x1e, 0x00dd, 0x0000);
++ for(calibration_pair = ANACAL_PAIR_A; calibration_pair <= ANACAL_PAIR_D; calibration_pair ++)
++ {
++ tabl_idx = 31;
++ tx_offset_temp = MT753x_TX_OFFSET_TBL[tabl_idx];
++
++ if(calibration_pair == ANACAL_PAIR_A) {
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, 0x145, 0x5010);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x1000); // 1e_dd[12]:rg_txg_calen_a
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017d, (0x8000|DAC_IN_0V)); // 1e_17d:dac_in0_a
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0181, (0x8000|DAC_IN_0V)); // 1e_181:dac_in1_a
++ //printk("tx offset pairA 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++ reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0172) & (~0x3f00));
++ tx_offset_reg_shift = 8; // 1e_172[13:8]
++ tx_offset_reg = 0x0172;
++
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++ } else if(calibration_pair == ANACAL_PAIR_B) {
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, 0x145, 0x5018);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0100); // 1e_dd[8]:rg_txg_calen_b
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017e, (0x8000|DAC_IN_0V)); // 1e_17e:dac_in0_b
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0182, (0x8000|DAC_IN_0V)); // 1e_182:dac_in1_b
++ //printk("tx offset pairB 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++ reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0172) & (~0x003f));
++ tx_offset_reg_shift = 0; // 1e_172[5:0]
++ tx_offset_reg = 0x0172;
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++ } else if(calibration_pair == ANACAL_PAIR_C) {
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0010); // 1e_dd[4]:rg_txg_calen_c
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017f, (0x8000|DAC_IN_0V)); // 1e_17f:dac_in0_c
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0183, (0x8000|DAC_IN_0V)); // 1e_183:dac_in1_c
++ reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0173) & (~0x3f00));
++ //printk("tx offset pairC 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++ tx_offset_reg_shift = 8; // 1e_173[13:8]
++ tx_offset_reg = 0x0173;
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++ } else {// if(calibration_pair == ANACAL_PAIR_D)
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0001); // 1e_dd[0]:rg_txg_calen_d
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0180, (0x8000|DAC_IN_0V)); // 1e_180:dac_in0_d
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0184, (0x8000|DAC_IN_0V)); // 1e_184:dac_in1_d
++ //printk("tx offset pairD 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++ reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x0173) & (~0x003f));
++ tx_offset_reg_shift = 0; // 1e_173[5:0]
++ tx_offset_reg = 0x0173;
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++ }
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift))); // 1e_172, 1e_173
++ all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++ if(all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( " GE Tx offset AnaCal ERROR init! \r\n");
++ return -1;
++ }
++
++ ad_cal_comp_out_init = (tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a)>>8) & 0x1; // 1e_17a[8]:ad_cal_comp_out
++ if(ad_cal_comp_out_init == 1)
++ calibration_polarity = 1;
++ else
++ calibration_polarity = -1;
++
++ cnt = 0;
++ //printk("TX offset cnt = %d, tabl_idx= %x, offset_val = %x\n", cnt, tabl_idx, MT753x_TX_OFFSET_TBL[tabl_idx]);
++ while(all_ana_cal_status < ANACAL_ERROR) {
++
++ cnt ++;
++ tabl_idx += calibration_polarity;
++ //tx_offset_temp += calibration_polarity;
++ //cal_temp = tx_offset_temp;
++ cal_temp = MT753x_TX_OFFSET_TBL[tabl_idx];
++ //printk("TX offset cnt = %d, tabl_idx= %x, offset_val = %x\n", cnt, tabl_idx, MT753x_TX_OFFSET_TBL[tabl_idx]);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_offset_reg, (reg_temp|(cal_temp<<tx_offset_reg_shift)));
++
++ all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++ if(all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( " GE Tx offset AnaCal ERROR init 2! \r\n");
++ return -1;
++ } else if(((tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a)>>8)&0x1) != ad_cal_comp_out_init) {
++ all_ana_cal_status = ANACAL_FINISH;
++ } else {
++ if((tabl_idx == 0)||(tabl_idx == 0x3f)) {
++ all_ana_cal_status = ANACAL_SATURATION; // need to FT
++ printk( " GE Tx offset AnaCal Saturation! \r\n");
++ }
++ }
++ }
++
++ if(all_ana_cal_status == ANACAL_ERROR) {
++ tx_offset_temp = TX_AMP_OFFSET_0MV;
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++ } else {
++ printk( " GE Tx offset AnaCal Done! (pair-%d)(%d)(0x%x) 0x1e_%x=0x%x\n", calibration_pair, cnt, MT753x_TX_OFFSET_TBL[tabl_idx], tx_offset_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_offset_reg));
++ }
++ }
++
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017d, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017e, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017f, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0180, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0181, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0182, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0183, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0184, 0x0000);
++
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x0000); // disable analog calibration circuit
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0000); // disable Tx offset calibration circuit
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x0000); // disable analog calibration circuit
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0000); // disable Tx offset calibration circuit
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x003e, 0x0000); // disable Tx VLD force mode
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0000); // disable Tx offset/amplitude calibration circuit
++
++ return 0;
++}
++
++int ge_cal_tx_amp(struct gsw_mt753x *gsw, u8 phyaddr, u32 delay)
++{
++ u8 all_ana_cal_status, calibration_pair, i;
++ u16 ad_cal_comp_out_init;
++ int calibration_polarity;
++ u32 tx_amp_reg_shift;
++ u16 reg_temp;
++ u32 tx_amp_temp, tx_amp_reg, cnt=0, tx_amp_reg_100;
++ u32 debug_tmp, reg_backup, reg_tmp;
++ u32 orig_1e_11, orig_1f_300;
++
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x1100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0001); // 1e_dc[0]:rg_txvos_calen
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e1, 0x0010); // 1e_e1[4]:select 1V
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x003e, 0xf808); // 1e_3e:enable Tx VLD
++
++ orig_1e_11 = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x11);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x11, 0xff00);
++// tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x27a, 0x33);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0xc9, 0xffff);
++ orig_1f_300 = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x300);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x300, 0x4);
++ for(i = 0; i <= 4; i++)
++ tc_phy_write_dev_reg(gsw, i, 0x1e, 0x00dd, 0x0000);
++ for(calibration_pair = ANACAL_PAIR_A; calibration_pair <= ANACAL_PAIR_D; calibration_pair ++) {
++ tx_amp_temp = 0x20; // start with 0 dB
++
++ if(calibration_pair == ANACAL_PAIR_A) {
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x1000); // 1e_dd[12]:tx_a amp calibration enable
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017d, (0x8000|DAC_IN_2V)); // 1e_17d:dac_in0_a
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0181, (0x8000|DAC_IN_2V)); // 1e_181:dac_in1_a
++ reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x012) & (~0xfc00));
++ tx_amp_reg_shift = 10; // 1e_12[15:10]
++ tx_amp_reg = 0x12;
++ tx_amp_reg_100 = 0x16;
++ } else if(calibration_pair == ANACAL_PAIR_B) {
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0100); // 1e_dd[8]:tx_b amp calibration enable
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017e, (0x8000|DAC_IN_2V)); // 1e_17e:dac_in0_b
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0182, (0x8000|DAC_IN_2V)); // 1e_182:dac_in1_b
++ reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x017) & (~0x3f00));
++ tx_amp_reg_shift = 8; // 1e_17[13:8]
++ tx_amp_reg = 0x17;
++ tx_amp_reg_100 = 0x18;
++ } else if(calibration_pair == ANACAL_PAIR_C) {
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0010); // 1e_dd[4]:tx_c amp calibration enable
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017f, (0x8000|DAC_IN_2V)); // 1e_17f:dac_in0_c
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0183, (0x8000|DAC_IN_2V)); // 1e_183:dac_in1_c
++ reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x019) & (~0x3f00));
++ tx_amp_reg_shift = 8; // 1e_19[13:8]
++ tx_amp_reg = 0x19;
++ tx_amp_reg_100 = 0x20;
++ } else { //if(calibration_pair == ANACAL_PAIR_D)
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0001); // 1e_dd[0]:tx_d amp calibration enable
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0180, (0x8000|DAC_IN_2V)); // 1e_180:dac_in0_d
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0184, (0x8000|DAC_IN_2V)); // 1e_184:dac_in1_d
++ reg_temp = (tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x021) & (~0x3f00));
++ tx_amp_reg_shift = 8; // 1e_21[13:8]
++ tx_amp_reg = 0x21;
++ tx_amp_reg_100 = 0x22;
++ }
++ tc_phy_write_dev_reg( gsw, phyaddr, 0x1e, tx_amp_reg, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift))); // 1e_12, 1e_17, 1e_19, 1e_21
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++ all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++ if(all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( " GE Tx amp AnaCal ERROR init init! \r\n");
++ return -1;
++ }
++
++ ad_cal_comp_out_init = (tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a)>>8) & 0x1; // 1e_17a[8]:ad_cal_comp_out
++ if(ad_cal_comp_out_init == 1)
++ calibration_polarity = -1;
++ else
++ calibration_polarity = 1;
++
++ cnt =0;
++ while(all_ana_cal_status < ANACAL_ERROR) {
++ cnt ++;
++ tx_amp_temp += calibration_polarity;
++ //printk("tx_amp : %x, 1e %x = %x\n", tx_amp_temp, tx_amp_reg, (reg_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++ tc_phy_write_dev_reg( gsw, phyaddr, 0x1e, tx_amp_reg, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++ all_ana_cal_status = all_ge_ana_cal_wait(gsw, delay, phyaddr); // delay 20 usec
++ if(all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( " GE Tx amp AnaCal ERROR 2! \r\n");
++ return -1;
++ } else if(((tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x017a)>>8)&0x1) != ad_cal_comp_out_init) {
++ //printk("TX AMP ANACAL_FINISH\n");
++ all_ana_cal_status = ANACAL_FINISH;
++ if (phyaddr == 0) {
++ if (calibration_pair == ANACAL_PAIR_A)
++ tx_amp_temp = tx_amp_temp - 2;
++ else if(calibration_pair == ANACAL_PAIR_B)
++ tx_amp_temp = tx_amp_temp - 1;
++ else if(calibration_pair == ANACAL_PAIR_C)
++ tx_amp_temp = tx_amp_temp - 2;
++ else if(calibration_pair == ANACAL_PAIR_D)
++ tx_amp_temp = tx_amp_temp - 1;
++ } else if (phyaddr == 1) {
++ if (calibration_pair == ANACAL_PAIR_A)
++ tx_amp_temp = tx_amp_temp - 1;
++ else if(calibration_pair == ANACAL_PAIR_B)
++ tx_amp_temp = tx_amp_temp ;
++ else if(calibration_pair == ANACAL_PAIR_C)
++ tx_amp_temp = tx_amp_temp - 1;
++ else if(calibration_pair == ANACAL_PAIR_D)
++ tx_amp_temp = tx_amp_temp - 1;
++ } else if (phyaddr == 2) {
++ if (calibration_pair == ANACAL_PAIR_A)
++ tx_amp_temp = tx_amp_temp;
++ else if(calibration_pair == ANACAL_PAIR_B)
++ tx_amp_temp = tx_amp_temp - 1;
++ else if(calibration_pair == ANACAL_PAIR_C)
++ tx_amp_temp = tx_amp_temp;
++ else if(calibration_pair == ANACAL_PAIR_D)
++ tx_amp_temp = tx_amp_temp - 1;
++ } else if (phyaddr == 3) {
++ tx_amp_temp = tx_amp_temp;
++ } else if (phyaddr == 4) {
++ if (calibration_pair == ANACAL_PAIR_A)
++ tx_amp_temp = tx_amp_temp;
++ else if(calibration_pair == ANACAL_PAIR_B)
++ tx_amp_temp = tx_amp_temp - 1;
++ else if(calibration_pair == ANACAL_PAIR_C)
++ tx_amp_temp = tx_amp_temp;
++ else if(calibration_pair == ANACAL_PAIR_D)
++ tx_amp_temp = tx_amp_temp;
++ }
++ reg_temp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg)&(~0xff00);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, (tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)));
++ if (phyaddr == 0) {
++ if ((tx_amp_reg == 0x12) || (tx_amp_reg == 0x17)) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 7));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg_100 == 0x16) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp+1+4)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ if (tx_amp_reg_100 == 0x18) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp+4)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ } else if (phyaddr == 1) {
++ if (tx_amp_reg == 0x12) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 9));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg == 0x17){
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 7));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg_100 == 0x16) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp+4)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ if (tx_amp_reg_100 == 0x18) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-1+4)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ } else if (phyaddr == 2) {
++ if ((tx_amp_reg == 0x12) || (tx_amp_reg == 0x17)) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 6));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if ((tx_amp_reg_100 == 0x16) || (tx_amp_reg_100 == 0x18)) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-1+4)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ } else if (phyaddr == 3) {
++ if (tx_amp_reg == 0x12) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 4));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg == 0x17) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 7));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg_100 == 0x16) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-2+4)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ if (tx_amp_reg_100 == 0x18) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-1+3)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ } else if (phyaddr == 4) {
++ if ((tx_amp_reg == 0x12) || (tx_amp_reg == 0x17)) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, ((tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)) + 5));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg_100 == 0x16) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-2+4)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ if (tx_amp_reg_100 == 0x18) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp-1+4)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ }
++
++ if (calibration_pair == ANACAL_PAIR_A){
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x12);
++ reg_tmp = ((reg_backup & 0xfc00) >> 10);
++ reg_tmp -= 8;
++ reg_backup = 0x0000;
++ reg_backup |= ((reg_tmp << 10) | (reg_tmp << 0));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x12, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x12);
++ //printk("PORT[%d] 1e.012 = %x (OFFSET_1000M_PAIR_A)\n", phyaddr, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x16);
++ reg_tmp = ((reg_backup & 0x3f) >> 0);
++ reg_tmp -= 8;
++ reg_backup = (reg_backup & (~0x3f));
++ reg_backup |= (reg_tmp << 0);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x16, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x16);
++ //printk("PORT[%d] 1e.016 = %x (OFFSET_TESTMODE_1000M_PAIR_A)\n", phyaddr, reg_backup);
++ }
++ else if(calibration_pair == ANACAL_PAIR_B){
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x17);
++ reg_tmp = ((reg_backup & 0x3f00) >> 8);
++ reg_tmp -= 8;
++ reg_backup = 0x0000;
++ reg_backup |= ((reg_tmp << 8) | (reg_tmp << 0));
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x17, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x17);
++ //printk("PORT[%d] 1e.017 = %x (OFFSET_1000M_PAIR_B)\n", phyaddr, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x18);
++ reg_tmp = ((reg_backup & 0x3f) >> 0);
++ reg_tmp -= 8;
++ reg_backup = (reg_backup & (~0x3f));
++ reg_backup |= (reg_tmp << 0);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x18, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x18);
++ //printk("PORT[%d] 1e.018 = %x (OFFSET_TESTMODE_1000M_PAIR_B)\n", phyaddr, reg_backup);
++ }
++ else if(calibration_pair == ANACAL_PAIR_C){
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x19);
++ reg_tmp = ((reg_backup & 0x3f00) >> 8);
++ reg_tmp -= 8;
++ reg_backup = (reg_backup & (~0x3f00));
++ reg_backup |= (reg_tmp << 8);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x19, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x19);
++ //printk("PORT[%d] 1e.019 = %x (OFFSET_1000M_PAIR_C)\n", phyaddr, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x20);
++ reg_tmp = ((reg_backup & 0x3f) >> 0);
++ reg_tmp -= 8;
++ reg_backup = (reg_backup & (~0x3f));
++ reg_backup |= (reg_tmp << 0);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x20, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x20);
++ //printk("PORT[%d] 1e.020 = %x (OFFSET_TESTMODE_1000M_PAIR_C)\n", phyaddr, reg_backup);
++ }
++ else if(calibration_pair == ANACAL_PAIR_D){
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x21);
++ reg_tmp = ((reg_backup & 0x3f00) >> 8);
++ reg_tmp -= 8;
++ reg_backup = (reg_backup & (~0x3f00));
++ reg_backup |= (reg_tmp << 8);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x21, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x21);
++ //printk("PORT[%d] 1e.021 = %x (OFFSET_1000M_PAIR_D)\n", phyaddr, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x22);
++ reg_tmp = ((reg_backup & 0x3f) >> 0);
++ reg_tmp -= 8;
++ reg_backup = (reg_backup & (~0x3f));
++ reg_backup |= (reg_tmp << 0);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x22, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x22);
++ //printk("PORT[%d] 1e.022 = %x (OFFSET_TESTMODE_1000M_PAIR_D)\n", phyaddr, reg_backup);
++ }
++
++ if (calibration_pair == ANACAL_PAIR_A){
++ //printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++ debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x12);
++ //printk("1e.012 = 0x%x\n", debug_tmp);
++ debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x16);
++ //printk("1e.016 = 0x%x\n", debug_tmp);
++ }
++
++ else if(calibration_pair == ANACAL_PAIR_B){
++ //printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++ debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x17);
++ //printk("1e.017 = 0x%x\n", debug_tmp);
++ debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x18);
++ //printk("1e.018 = 0x%x\n", debug_tmp);
++ }
++ else if(calibration_pair == ANACAL_PAIR_C){
++ //printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++ debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x19);
++ //printk("1e.019 = 0x%x\n", debug_tmp);
++ debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x20);
++ //printk("1e.020 = 0x%x\n", debug_tmp);
++ }
++ else if(calibration_pair == ANACAL_PAIR_D){
++ //printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++ debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x21);
++ //printk("1e.021 = 0x%x\n", debug_tmp);
++ debug_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x22);
++ //printk("1e.022 = 0x%x\n", debug_tmp);
++ }
++
++
++ printk( " GE Tx amp AnaCal Done! (pair-%d)(1e_%x = 0x%x)\n", calibration_pair, tx_amp_reg, tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg));
++
++ } else {
++ if((tx_amp_temp == 0x3f)||(tx_amp_temp == 0x00)) {
++ all_ana_cal_status = ANACAL_SATURATION; // need to FT
++ printk( " GE Tx amp AnaCal Saturation! \r\n");
++ }
++ }
++ }
++
++ if(all_ana_cal_status == ANACAL_ERROR) {
++ tx_amp_temp = 0x20;
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, tx_amp_reg, (reg_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++ }
++ }
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017d, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017e, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x017f, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0180, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0181, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0182, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0183, 0x0000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x0184, 0x0000);
++
++ /* disable analog calibration circuit */
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00db, 0x0000);
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00dc, 0x0000); // disable Tx offset calibration circuit
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00db, 0x0000); // disable analog calibration circuit
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dc, 0x0000); // disable Tx offset calibration circuit
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x003e, 0x0000); // disable Tx VLD force mode
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x00dd, 0x0000); // disable Tx offset/amplitude calibration circuit
++
++
++
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x273, 0x2000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0xc9, 0x0fff);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x145, 0x1000);
++
++ /* Restore CR to default */
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x11, orig_1e_11);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x300, orig_1f_300);
++
++ return 0;
++}
++
++//-----------------------------------------------------------------
++
++int phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++ //u32 reg_tmp,reg_tmp0, reg_tmp1, i;
++ u32 reg_tmp;
++ u32 CALDLY = 40;
++ u32 orig_1e_11, orig_1e_185, orig_1e_e1, orig_1f_100;
++ int ret;
++ /* set [12]AN disable, [8]full duplex, [13/6]1000Mbps */
++ //tc_phy_write_dev_reg(phyaddr, 0x0, 0x0140);
++ switch_phy_write(gsw, phyaddr, R0, 0x140);
++
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x145, 0x1010);/* fix mdi */
++ orig_1e_185 = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, RG_185);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, RG_185, 0);/* disable tx slew control */
++ orig_1f_100 = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x100);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x100, 0xc000);/* BG voltage output */
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x403, 0x1099); //bypass efuse
++
++#if (1)
++ // 1f_27c[12:8] cr_da_tx_i2mpb_10m Trimming TX bias setup(@10M)
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x27c, 0x1f1f);
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x27c, 0x3300);
++
++ //reg_tmp1 = tc_phy_read_dev_reg(gsw, PHY0, 0x1f, 0x27c);
++ //dev1Fh_reg273h TXVLD DA register - Adjust voltage mode TX amplitude.
++ //tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0);
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x273, 0x1000);
++ //reg_tmp1 = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x273);
++ //printk("reg_tmp1273 = %x\n", reg_tmp1);
++ /*1e_11 TX overshoot Enable (PAIR A/B/C/D) in gbe mode*/
++
++ orig_1e_11 = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x11);
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x11);
++ reg_tmp = reg_tmp | (0xf << 12);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x11, reg_tmp);
++ orig_1e_e1 = tc_phy_read_dev_reg(gsw, PHY0, 0x1e, 0x00e1);
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e1, 0x10);
++ /* calibration start ============ */
++ printk("CALDLY = %d\n", CALDLY);
++ if(ge_cal_flag == 0){
++ ret = ge_cal_rext(gsw, 0, CALDLY);
++ if (ret == -1){
++ printk("ge_cal_rext error K port =%d\n", phyaddr);
++ return ret;
++ }
++ ge_cal_flag = 1;
++ }
++
++ /* *** R50 Cal start ***************************** */
++ /*phyaddress = 0*/
++ ret = ge_cal_r50(gsw, phyaddr, CALDLY);
++ if (ret == -1){
++ printk("R50 error K port =%d\n", phyaddr);
++ return ret;
++ }
++ /* *** R50 Cal end *** */
++ /* *** Tx offset Cal start *********************** */
++ ret = ge_cal_tx_offset(gsw, phyaddr, CALDLY);
++ if (ret == -1){
++ printk("ge_cal_tx_offset error K port =%d\n", phyaddr);
++ return ret;
++ }
++ /* *** Tx offset Cal end *** */
++
++ /* *** Tx Amp Cal start *** */
++ ret = ge_cal_tx_amp(gsw, phyaddr, CALDLY);
++ if (ret == -1){
++ printk("ge_cal_tx_amp error K port =%d\n", phyaddr);
++ return ret;
++ }
++ /* *** Tx Amp Cal end *** */
++ /*tmp maybe changed*/
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x27c, 0x1111);
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x27b, 0x47);
++ //tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x273, 0x2000);
++
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3a8, 0x0810);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3aa, 0x0008);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3ab, 0x0810);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3ad, 0x0008);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3ae, 0x0106);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3b0, 0x0001);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3b1, 0x0106);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3b3, 0x0001);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x18c, 0x0001);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x18d, 0x0001);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x18e, 0x0001);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x18f, 0x0001);
++
++ /*da_tx_bias1_b_tx_standby = 5'b10 (dev1eh_reg3aah[12:8])*/
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x3aa);
++ reg_tmp = reg_tmp & ~(0x1f00);
++ reg_tmp = reg_tmp | 0x2 << 8;
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3aa, reg_tmp);
++
++ /*da_tx_bias1_a_tx_standby = 5'b10 (dev1eh_reg3a9h[4:0])*/
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1e, 0x3a9);
++ reg_tmp = reg_tmp & ~(0x1f);
++ reg_tmp = reg_tmp | 0x2;
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x3a9, reg_tmp);
++
++ /* Restore CR to default */
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, RG_185, orig_1e_185);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x100, orig_1f_100);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x11, orig_1e_11);
++ tc_phy_write_dev_reg(gsw, PHY0, 0x1e, 0x00e1, orig_1e_e1);
++#endif
++ return 0;
++}
++
++void rx_dc_offset(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++ pr_info("PORT %d RX_DC_OFFSET\n", phyaddr);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x96, 0x8000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x37, 0x3);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x107, 0x4000);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x171, 0x1e5);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x39, 0x200f);
++ udelay(40);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x39, 0x000f);
++ udelay(40);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1e, 0x171, 0x65);
++}
++
++void check_rx_dc_offset_pair_a(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++ u32 reg_tmp;
++
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x114f);
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("before pairA output = %x\n", reg_tmp);
++ udelay(40);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1142);
++ udelay(40);
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("after pairA output = %x\n", reg_tmp);
++ if ((reg_tmp & 0x80) != 0)
++ reg_tmp = (~reg_tmp) + 1;
++ if ((reg_tmp & 0xff) >4)
++ pr_info("pairA RX_DC_OFFSET error");
++}
++
++void check_rx_dc_offset_pair_b(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++ u32 reg_tmp;
++
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1151);
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("before pairB output = %x\n", reg_tmp);
++ udelay(40);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1143);
++ udelay(40);
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("after pairB output = %x\n", reg_tmp);
++ if ((reg_tmp & 0x80) != 0)
++ reg_tmp = (~reg_tmp) + 1;
++ if ((reg_tmp & 0xff) >4)
++ pr_info("pairB RX_DC_OFFSET error");
++}
++
++void check_rx_dc_offset_pair_c(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++ u32 reg_tmp;
++
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1153);
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("before pairC output = %x\n", reg_tmp);
++ udelay(40);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1144);
++ udelay(40);
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("after pairC output = %x\n", reg_tmp);
++ if ((reg_tmp & 0x80) != 0)
++ reg_tmp = (~reg_tmp) + 1;
++ if ((reg_tmp & 0xff) >4)
++ pr_info("pairC RX_DC_OFFSET error");
++}
++
++void check_rx_dc_offset_pair_d(struct gsw_mt753x *gsw, u8 phyaddr)
++{
++ u32 reg_tmp;
++
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1155);
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("before pairD output = %x\n", reg_tmp);
++ udelay(40);
++ tc_phy_write_dev_reg(gsw, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1145);
++ udelay(40);
++ reg_tmp = tc_phy_read_dev_reg(gsw, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("after pairD output = %x\n", reg_tmp);
++ if ((reg_tmp & 0x80) != 0)
++ reg_tmp = (~reg_tmp) + 1;
++ if ((reg_tmp & 0xff) >4)
++ pr_info("pairD RX_DC_OFFSET error");
++}
++
++
++int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr){
++
++ int ret;
++
++ ret = phy_calibration(gsw, phyaddr);
++
++ rx_dc_offset(gsw, phyaddr);
++ check_rx_dc_offset_pair_a(gsw, phyaddr);
++ check_rx_dc_offset_pair_b(gsw, phyaddr);
++ check_rx_dc_offset_pair_c(gsw, phyaddr);
++ check_rx_dc_offset_pair_d(gsw, phyaddr);
++
++ return ret;
++}
+Index: drivers/net/phy/mtk/mt753x/mt753x_phy.h
+===================================================================
+new file mode 100644
+--- /dev/null
++++ b/drivers/net/phy/mtk/mt753x/mt753x_phy.h
+@@ -0,0 +1,145 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Register definitions for MediaTek MT753x Gigabit switches
++ *
++ * Copyright (C) 2018 MediaTek Inc. All Rights Reserved.
++ *
++ * Author: Weijie Gao <weijie.gao@mediatek.com>
++ */
++
++#ifndef _MT753X_PHY_H_
++#define _MT753X_PHY_H_
++
++#include <linux/bitops.h>
++
++/*phy calibration use*/
++#define DEV_1E 0x1E
++/*global device 0x1f, always set P0*/
++#define DEV_1F 0x1F
++
++
++/************IEXT/REXT CAL***************/
++/* bits range: for example BITS(16,23) = 0xFF0000*/
++#define BITS(m, n) (~(BIT(m) - 1) & ((BIT(n) - 1) | BIT(n)))
++#define ANACAL_INIT 0x01
++#define ANACAL_ERROR 0xFD
++#define ANACAL_SATURATION 0xFE
++#define ANACAL_FINISH 0xFF
++#define ANACAL_PAIR_A 0
++#define ANACAL_PAIR_B 1
++#define ANACAL_PAIR_C 2
++#define ANACAL_PAIR_D 3
++#define DAC_IN_0V 0x00
++#define DAC_IN_2V 0xf0
++#define TX_AMP_OFFSET_0MV 0x20
++#define TX_AMP_OFFSET_VALID_BITS 6
++
++#define R0 0
++#define PHY0 0
++#define PHY1 1
++#define PHY2 2
++#define PHY3 3
++#define PHY4 4
++#define ANA_TEST_MODE BITS(8, 15)
++#define TST_TCLK_SEL BITs(6, 7)
++#define ANA_TEST_VGA_RG 0x100
++
++#define FORCE_MDI_CROSS_OVER BITS(3, 4)
++#define T10_TEST_CTL_RG 0x145
++#define RG_185 0x185
++#define RG_TX_SLEW BIT(0)
++#define ANA_CAL_0 0xdb
++#define RG_CAL_CKINV BIT(12)
++#define RG_ANA_CALEN BIT(8)
++#define RG_REXT_CALEN BIT(4)
++#define RG_ZCALEN_A BIT(0)
++#define ANA_CAL_1 0xdc
++#define RG_ZCALEN_B BIT(12)
++#define RG_ZCALEN_C BIT(8)
++#define RG_ZCALEN_D BIT(4)
++#define RG_TXVOS_CALEN BIT(0)
++#define ANA_CAL_6 0xe1
++#define RG_CAL_REFSEL BIT(4)
++#define RG_CAL_COMP_PWD BIT(0)
++#define ANA_CAL_5 0xe0
++#define RG_REXT_TRIM BITs(8, 13)
++#define RG_ZCAL_CTRL BITs(0, 5)
++#define RG_17A 0x17a
++#define AD_CAL_COMP_OUT BIT(8)
++#define RG_17B 0x17b
++#define AD_CAL_CLK bit(0)
++#define RG_17C 0x17c
++#define DA_CALIN_FLAG bit(0)
++/************R50 CAL****************************/
++#define RG_174 0x174
++#define RG_R50OHM_RSEL_TX_A_EN BIT[15]
++#define CR_R50OHM_RSEL_TX_A BITS[8:14]
++#define RG_R50OHM_RSEL_TX_B_EN BIT[7]
++#define CR_R50OHM_RSEL_TX_B BITS[6:0]
++#define RG_175 0x175
++#define RG_R50OHM_RSEL_TX_C_EN BITS[15]
++#define CR_R50OHM_RSEL_TX_C BITS[8:14]
++#define RG_R50OHM_RSEL_TX_D_EN BIT[7]
++#define CR_R50OHM_RSEL_TX_D BITS[0:6]
++/**********TX offset Calibration***************************/
++#define RG_95 0x96
++#define BYPASS_TX_OFFSET_CAL BIT(15)
++#define RG_3E 0x3e
++#define BYPASS_PD_TXVLD_A BIT(15)
++#define BYPASS_PD_TXVLD_B BIT(14)
++#define BYPASS_PD_TXVLD_C BIT(13)
++#define BYPASS_PD_TXVLD_D BIT(12)
++#define BYPASS_PD_TX_10M BIT(11)
++#define POWER_DOWN_TXVLD_A BIT(7)
++#define POWER_DOWN_TXVLD_B BIT(6)
++#define POWER_DOWN_TXVLD_C BIT(5)
++#define POWER_DOWN_TXVLD_D BIT(4)
++#define POWER_DOWN_TX_10M BIT(3)
++#define RG_DD 0xdd
++#define RG_TXG_CALEN_A BIT(12)
++#define RG_TXG_CALEN_B BIT(8)
++#define RG_TXG_CALEN_C BIT(4)
++#define RG_TXG_CALEN_D BIT(0)
++#define RG_17D 0x17D
++#define FORCE_DASN_DAC_IN0_A BIT(15)
++#define DASN_DAC_IN0_A BITS(0, 9)
++#define RG_17E 0x17E
++#define FORCE_DASN_DAC_IN0_B BIT(15)
++#define DASN_DAC_IN0_B BITS(0, 9)
++#define RG_17F 0x17F
++
++#define FORCE_DASN_DAC_IN0_C BIT(15)
++#define DASN_DAC_IN0_C BITS(0, 9)
++#define RG_180 0x180
++#define FORCE_DASN_DAC_IN0_D BIT(15)
++#define DASN_DAC_IN0_D BITS(0, 9)
++
++#define RG_181 0x181
++#define FORCE_DASN_DAC_IN1_A BIT(15)
++#define DASN_DAC_IN1_A BITS(0, 9)
++#define RG_182 0x182
++#define FORCE_DASN_DAC_IN1_B BIT(15)
++#define DASN_DAC_IN1_B BITS(0, 9)
++#define RG_183 0x183
++#define FORCE_DASN_DAC_IN1_C BIT15]
++#define DASN_DAC_IN1_C BITS(0, 9)
++#define RG_184 0x184
++#define FORCE_DASN_DAC_IN1_D BIT(15)
++#define DASN_DAC_IN1_D BITS(0, 9)
++#define RG_172 0x172
++#define CR_TX_AMP_OFFSET_A BITS(8, 13)
++#define CR_TX_AMP_OFFSET_B BITS(0, 5)
++#define RG_173 0x173
++#define CR_TX_AMP_OFFSET_C BITS(8, 13)
++#define CR_TX_AMP_OFFSET_D BITS(0, 5)
++/**********TX Amp Calibration ***************************/
++#define RG_12 0x12
++#define DA_TX_I2MPB_A_GBE BITS(10, 15)
++#define RG_17 0x17
++#define DA_TX_I2MPB_B_GBE BITS(8, 13)
++#define RG_19 0x19
++#define DA_TX_I2MPB_C_GBE BITS(8, 13)
++#define RG_21 0x21
++#define DA_TX_I2MPB_D_GBE BITS(8, 13)
++
++#endif /* _MT753X_REGS_H_ */
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/739-mt7531-gsw-port5_external_phy_init.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/739-mt7531-gsw-port5_external_phy_init.patch
new file mode 100755
index 0000000..0d88c60
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/739-mt7531-gsw-port5_external_phy_init.patch
@@ -0,0 +1,156 @@
+From 9206472ba03032aea120604e8637b52408ca4b3a Mon Sep 17 00:00:00 2001
+From: Landen Chao <landen.chao@mediatek.com>
+Date: Fri, 29 May 2020 15:12:35 +0800
+Subject: [PATCH 2/2] 740_patch
+
+Change-Id: I7e0164751702f573d5185c4290ff78688f42f603
+---
+ drivers/net/phy/mtk/mt753x/Makefile | 3 +-
+ drivers/net/phy/mtk/mt753x/mt7531.c | 3 +
+ drivers/net/phy/mtk/mt753x/mt753x.h | 1 +
+ drivers/net/phy/mtk/mt753x/mt753x_extphy.c | 69 ++++++++++++++++++++++
+ drivers/net/phy/mtk/mt753x/mt753x_extphy.h | 18 ++++++
+ 5 files changed, 93 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/phy/mtk/mt753x/mt753x_extphy.c
+ create mode 100644 drivers/net/phy/mtk/mt753x/mt753x_extphy.h
+
+diff --git a/drivers/net/phy/mtk/mt753x/Makefile b/drivers/net/phy/mtk/mt753x/Makefile
+index 384b0ff7..694ffa83 100644
+--- a/drivers/net/phy/mtk/mt753x/Makefile
++++ b/drivers/net/phy/mtk/mt753x/Makefile
+@@ -7,5 +7,6 @@ obj-$(CONFIG_MT753X_GSW) += mt753x.o
+ mt753x-$(CONFIG_SWCONFIG) += mt753x_swconfig.o
+
+ mt753x-y += mt753x_mdio.o mt7530.o mt7531.o \
+- mt753x_common.o mt753x_vlan.o mt753x_nl.o mt753x_phy.o
++ mt753x_common.o mt753x_vlan.o mt753x_nl.o mt753x_phy.o \
++ mt753x_extphy.o
+
+diff --git a/drivers/net/phy/mtk/mt753x/mt7531.c b/drivers/net/phy/mtk/mt753x/mt7531.c
+index 04729835..4a2943b1 100644
+--- a/drivers/net/phy/mtk/mt753x/mt7531.c
++++ b/drivers/net/phy/mtk/mt753x/mt7531.c
+@@ -265,6 +265,9 @@ static int mt7531_set_port_sgmii_force_mode(struct gsw_mt753x *gsw, u32 port,
+ return -EINVAL;
+ }
+
++ if (port == 5)
++ extphy_init(gsw, port);
++
+ port_base = port - 5;
+
+ switch (port_cfg->speed) {
+diff --git a/drivers/net/phy/mtk/mt753x/mt753x.h b/drivers/net/phy/mtk/mt753x/mt753x.h
+index 5053a7d7..a3f343cd 100644
+--- a/drivers/net/phy/mtk/mt753x/mt753x.h
++++ b/drivers/net/phy/mtk/mt753x/mt753x.h
+@@ -154,6 +154,7 @@ void mt753x_irq_worker(struct work_struct *work);
+ void mt753x_irq_enable(struct gsw_mt753x *gsw);
+
+ int mt753x_phy_calibration(struct gsw_mt753x *gsw, u8 phyaddr);
++int extphy_init(struct gsw_mt753x *gsw, int addr);
+
+ /* MDIO Indirect Access Registers */
+ #define MII_MMD_ACC_CTL_REG 0x0d
+diff --git a/drivers/net/phy/mtk/mt753x/mt753x_extphy.c b/drivers/net/phy/mtk/mt753x/mt753x_extphy.c
+new file mode 100644
+index 00000000..f58e8a62
+--- /dev/null
++++ b/drivers/net/phy/mtk/mt753x/mt753x_extphy.c
+@@ -0,0 +1,69 @@
++/*
++ * Driver for MediaTek MT7531 gigabit switch
++ *
++ * Copyright (C) 2018 MediaTek Inc. All Rights Reserved.
++ *
++ * Author: Landen Chao <landen.chao@mediatek.com>
++ *
++ * SPDX-License-Identifier: GPL-2.0+
++ */
++
++#include <linux/kernel.h>
++#include <linux/mii.h>
++
++#include "mt753x.h"
++#include "mt753x_regs.h"
++#include "mt753x_extphy.h"
++
++int gpy211_init(struct gsw_mt753x *gsw, int addr)
++{
++ /* Enable rate adaption */
++ gsw->mmd_write(gsw, addr, 0x1e, 0x8, 0x24e2);
++
++ return 0;
++}
++
++static struct mt753x_extphy_id extphy_tbl[] = {
++ {0x67c9de00, 0x0fffffff0, gpy211_init},
++};
++
++static u32 get_cl22_phy_id(struct gsw_mt753x *gsw, int addr)
++{
++ int phy_reg;
++ u32 phy_id = 0;
++
++ phy_reg = gsw->mii_read(gsw, addr, MII_PHYSID1);
++ if (phy_reg < 0)
++ return 0;
++ phy_id = (phy_reg & 0xffff) << 16;
++
++ /* Grab the bits from PHYIR2, and put them in the lower half */
++ phy_reg = gsw->mii_read(gsw, addr, MII_PHYSID2);
++ if (phy_reg < 0)
++ return 0;
++
++ phy_id |= (phy_reg & 0xffff);
++
++ return phy_id;
++}
++
++static inline bool phy_id_is_match(u32 id, struct mt753x_extphy_id *phy)
++{
++ return ((id & phy->phy_id_mask) == (phy->phy_id & phy->phy_id_mask));
++}
++
++int extphy_init(struct gsw_mt753x *gsw, int addr)
++{
++ int i;
++ u32 phy_id;
++ struct mt753x_extphy_id *extphy;
++
++ phy_id = get_cl22_phy_id(gsw, addr);
++ for (i = 0; i < ARRAY_SIZE(extphy_tbl); i++) {
++ extphy = &extphy_tbl[i];
++ if(phy_id_is_match(phy_id, extphy))
++ extphy->init(gsw, addr);
++ }
++
++ return 0;
++}
+diff --git a/drivers/net/phy/mtk/mt753x/mt753x_extphy.h b/drivers/net/phy/mtk/mt753x/mt753x_extphy.h
+new file mode 100644
+index 00000000..2b72c8a9
+--- /dev/null
++++ b/drivers/net/phy/mtk/mt753x/mt753x_extphy.h
+@@ -0,0 +1,18 @@
++/*
++ * Driver for MediaTek MT753x gigabit switch
++ *
++ * Copyright (C) 2018 MediaTek Inc. All Rights Reserved.
++ *
++ * Author: Landen Chao <landen.chao@mediatek.com>
++ *
++ * SPDX-License-Identifier: GPL-2.0+
++ */
++
++#ifndef _MT753X_EXTPHY_H_
++#define _MT753X_EXTPHY_H_
++struct mt753x_extphy_id {
++ u32 phy_id;
++ u32 phy_id_mask;
++ int (*init)(struct gsw_mt753x *gsw, int addr);
++};
++#endif
+--
+2.17.1
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/740-add-gpy211-phy-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/740-add-gpy211-phy-support.patch
new file mode 100644
index 0000000..2496084
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/740-add-gpy211-phy-support.patch
@@ -0,0 +1,28 @@
+Index: linux-5.4.119/drivers/net/phy/Kconfig
+===================================================================
+--- linux-5.4.119.orig/drivers/net/phy/Kconfig
++++ linux-5.4.119/drivers/net/phy/Kconfig
+@@ -468,6 +468,11 @@ config FIXED_PHY
+
+ Currently tested with mpc866ads and mpc8349e-mitx.
+
++config GPY211_PHY
++ tristate "GPY211 PHY"
++ ---help---
++ Supports the Intel GPY211 PHY with rate adaption.
++
+ config ICPLUS_PHY
+ tristate "ICPlus PHYs"
+ ---help---
+Index: linux-5.4.119/drivers/net/phy/Makefile
+===================================================================
+--- linux-5.4.119.orig/drivers/net/phy/Makefile
++++ linux-5.4.119/drivers/net/phy/Makefile
+@@ -86,6 +86,7 @@ obj-$(CONFIG_DP83TC811_PHY) += dp83tc811
+ obj-$(CONFIG_DP83848_PHY) += dp83848.o
+ obj-$(CONFIG_DP83867_PHY) += dp83867.o
+ obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
++obj-$(CONFIG_GPY211_PHY) += gpy211.o
+ obj-$(CONFIG_ICPLUS_PHY) += icplus.o
+ obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
+ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/741-add-default-setting-to-dsa-unused-port.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/741-add-default-setting-to-dsa-unused-port.patch
new file mode 100644
index 0000000..7769ebd
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/741-add-default-setting-to-dsa-unused-port.patch
@@ -0,0 +1,124 @@
+Index: linux-5.4.124/drivers/net/dsa/mt7530.c
+===================================================================
+--- linux-5.4.124.orig/drivers/net/dsa/mt7530.c
++++ linux-5.4.124/drivers/net/dsa/mt7530.c
+@@ -1021,6 +1021,9 @@ mt7530_stp_state_set(struct dsa_switch *
+ struct mt7530_priv *priv = ds->priv;
+ u32 stp_state;
+
++ if (dsa_is_unused_port(ds, port))
++ return;
++
+ switch (state) {
+ case BR_STATE_DISABLED:
+ stp_state = MT7530_STP_DISABLED;
+@@ -1676,10 +1679,58 @@ mt7530_setup(struct dsa_switch *ds)
+ }
+
+ static int
++setup_unused_ports(struct dsa_switch *ds, u32 pm)
++{
++ struct mt7530_priv *priv = ds->priv;
++ u32 egtag_mask = 0;
++ u32 egtag_val = 0;
++ int i;
++
++ if (!pm)
++ return 0;
++
++ for (i = 0; i < MT7530_NUM_PORTS; i++) {
++ if (!dsa_is_unused_port(ds, i))
++ continue;
++
++ /* Setup MAC port with maximum capability. */
++ if ((i == 5) || (i == 6))
++ if (priv->info->cpu_port_config)
++ priv->info->cpu_port_config(ds, i);
++
++ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK | PCR_PORT_VLAN_MASK,
++ PCR_MATRIX(pm) | MT7530_PORT_SECURITY_MODE);
++ egtag_mask |= ETAG_CTRL_P_MASK(i);
++ egtag_val |= ETAG_CTRL_P(i, MT7530_VLAN_EGRESS_UNTAG);
++ }
++
++ /* Add unused ports to VLAN2 group for using IVL fdb. */
++ mt7530_write(priv, MT7530_VAWD1,
++ IVL_MAC | VTAG_EN | PORT_MEM(pm) | VLAN_VALID);
++ mt7530_rmw(priv, MT7530_VAWD2, egtag_mask, egtag_val);
++ mt7530_vlan_cmd(priv, MT7530_VTCR_WR_VID, MT753X_RESERVED_VLAN);
++
++ for (i = 0; i < MT7530_NUM_PORTS; i++) {
++ if (!dsa_is_unused_port(ds, i))
++ continue;
++
++ mt7530_rmw(priv, MT7530_PPBV1_P(i), G0_PORT_VID_MASK,
++ G0_PORT_VID(MT753X_RESERVED_VLAN));
++ mt7530_rmw(priv, MT7530_SSP_P(i), FID_PST_MASK, MT7530_STP_FORWARDING);
++
++ dev_dbg(ds->dev, "Add unused port%d to reserved VLAN%d group\n",
++ i, MT753X_RESERVED_VLAN);
++ }
++
++ return 0;
++}
++
++static int
+ mt7531_setup(struct dsa_switch *ds)
+ {
+ struct mt7530_priv *priv = ds->priv;
+ struct mt7530_dummy_poll p;
++ u32 unused_pm = 0;
+ u32 val, id;
+ int ret, i;
+
+@@ -1767,7 +1818,9 @@ mt7531_setup(struct dsa_switch *ds)
+
+ mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
+
+- if (dsa_is_cpu_port(ds, i))
++ if (dsa_is_unused_port(ds, i))
++ unused_pm |= BIT(i);
++ else if (dsa_is_cpu_port(ds, i))
+ mt753x_cpu_port_enable(ds, i);
+ else
+ mt7530_port_disable(ds, i);
+@@ -1777,6 +1830,9 @@ mt7531_setup(struct dsa_switch *ds)
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
++ /* Group and enable unused ports as a standalone dumb switch. */
++ setup_unused_ports(ds, unused_pm);
++
+ ds->configure_vlan_while_not_filtering = true;
+
+ /* Flush the FDB table */
+@@ -2101,7 +2157,7 @@ mt7531_mac_config(struct dsa_switch *ds,
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dp = dsa_to_port(ds, port);
+- phydev = dp->slave->phydev;
++ phydev = (dp->slave) ? dp->slave->phydev : NULL;
+ return mt7531_rgmii_setup(priv, port, interface, phydev);
+ case PHY_INTERFACE_MODE_SGMII:
+ return mt7531_sgmii_setup_mode_an(priv, port, interface);
+@@ -2641,7 +2697,7 @@ mt7530_probe(struct mdio_device *mdiodev
+ if (!priv)
+ return -ENOMEM;
+
+- priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
++ priv->ds = dsa_switch_alloc(&mdiodev->dev, MT7530_NUM_PORTS);
+ if (!priv->ds)
+ return -ENOMEM;
+
+Index: linux-5.4.124/drivers/net/dsa/mt7530.h
+===================================================================
+--- linux-5.4.124.orig/drivers/net/dsa/mt7530.h
++++ linux-5.4.124/drivers/net/dsa/mt7530.h
+@@ -10,6 +10,7 @@
+ #define MT7530_CPU_PORT 6
+ #define MT7530_NUM_FDB_RECORDS 2048
+ #define MT7530_ALL_MEMBERS 0xff
++#define MT753X_RESERVED_VLAN 2
+
+ enum mt753x_id {
+ ID_MT7530 = 0,
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/742-net-dsa-add-MT7531-Gigabit-Ethernet-PHY-setting.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/742-net-dsa-add-MT7531-Gigabit-Ethernet-PHY-setting.patch
new file mode 100644
index 0000000..948bb69
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/742-net-dsa-add-MT7531-Gigabit-Ethernet-PHY-setting.patch
@@ -0,0 +1,1687 @@
+Index: linux-5.4.124/drivers/net/dsa/mt7530.c
+===================================================================
+--- linux-5.4.124.orig/drivers/net/dsa/mt7530.c
++++ linux-5.4.124/drivers/net/dsa/mt7530.c
+@@ -1830,6 +1830,8 @@ mt7531_setup(struct dsa_switch *ds)
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
++ mt7531_phy_setup(ds);
++
+ /* Group and enable unused ports as a standalone dumb switch. */
+ setup_unused_ports(ds, unused_pm);
+
+Index: linux-5.4.124/drivers/net/dsa/mt7530.h
+===================================================================
+--- linux-5.4.124.orig/drivers/net/dsa/mt7530.h
++++ linux-5.4.124/drivers/net/dsa/mt7530.h
+@@ -782,4 +782,5 @@ static inline void INIT_MT7530_DUMMY_POL
+ p->reg = reg;
+ }
+
++int mt7531_phy_setup(struct dsa_switch *ds);
+ #endif /* __MT7530_H */
+Index: linux-5.4.124/drivers/net/dsa/mt7531_phy.c
+===================================================================
+--- /dev/null
++++ linux-5.4.124/drivers/net/dsa/mt7531_phy.c
+@@ -0,0 +1,1378 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Common part for MediaTek MT753x gigabit switch
++ *
++ * Copyright (C) 2018 MediaTek Inc. All Rights Reserved.
++ *
++ * Author: Weijie Gao <weijie.gao@mediatek.com>
++ */
++
++#include <linux/delay.h>
++#include <linux/hrtimer.h>
++#include <linux/kernel.h>
++#include <net/dsa.h>
++#include "mt7530.h"
++#include "mt7531_phy.h"
++
++#define MT7531_NUM_PHYS 5
++
++static u32 tc_phy_read_dev_reg(struct dsa_switch *ds, u32 port_num, u32 dev_addr, u32 reg_addr)
++{
++ struct mt7530_priv *priv = ds->priv;
++ u32 phy_val;
++ u32 addr;
++
++ addr = MII_ADDR_C45 | (dev_addr << 16) | (reg_addr & 0xffff);
++ phy_val = priv->info->phy_read(ds, port_num, addr);
++
++ //printk("switch phy cl45 r %d 0x%x 0x%x = %x\n",port_num, dev_addr, reg_addr, phy_val);
++ return phy_val;
++}
++
++static void tc_phy_write_dev_reg(struct dsa_switch *ds, u32 port_num, u32 dev_addr, u32 reg_addr, u32 write_data)
++{
++ struct mt7530_priv *priv = ds->priv;
++ u32 addr;
++
++ addr = MII_ADDR_C45 | (dev_addr << 16) | (reg_addr & 0xffff);
++
++ priv->info->phy_write(ds, port_num, addr, write_data);
++
++ //u32 phy_val = priv->info->phy_read(ds, port_num, addr);
++ //printk("switch phy cl45 w %d 0x%x 0x%x 0x%x --> read back 0x%x\n",port_num, dev_addr, reg_addr, write_data, phy_val);
++}
++
++static void switch_phy_write(struct dsa_switch *ds, u32 port_num, u32 reg_addr, u32 write_data){
++ struct mt7530_priv *priv = ds->priv;
++
++ priv->info->phy_write(ds, port_num, reg_addr, write_data);
++}
++
++static u32 switch_phy_read(struct dsa_switch *ds, u32 port_num, u32 reg_addr){
++ struct mt7530_priv *priv = ds->priv;
++
++ return priv->info->phy_read(ds, port_num, reg_addr);
++}
++
++static void mt753x_tr_write(struct dsa_switch *ds, int addr, u8 ch, u8 node, u8 daddr,
++ u32 data)
++{
++ ktime_t timeout;
++ u32 timeout_us;
++ u32 val;
++
++ switch_phy_write(ds, addr, PHY_CL22_PAGE_CTRL, PHY_TR_PAGE);
++
++ val = switch_phy_read(ds, addr, PHY_TR_CTRL);
++
++ timeout_us = 100000;
++ timeout = ktime_add_us(ktime_get(), timeout_us);
++ while (1) {
++ val = switch_phy_read(ds, addr, PHY_TR_CTRL);
++
++ if (!!(val & PHY_TR_PKT_XMT_STA))
++ break;
++
++ if (ktime_compare(ktime_get(), timeout) > 0)
++ goto out;
++ }
++
++ switch_phy_write(ds, addr, PHY_TR_LOW_DATA, PHY_TR_LOW_VAL(data));
++ switch_phy_write(ds, addr, PHY_TR_HIGH_DATA, PHY_TR_HIGH_VAL(data));
++ val = PHY_TR_PKT_XMT_STA | (PHY_TR_WRITE << PHY_TR_WR_S) |
++ (ch << PHY_TR_CH_ADDR_S) | (node << PHY_TR_NODE_ADDR_S) |
++ (daddr << PHY_TR_DATA_ADDR_S);
++ switch_phy_write(ds, addr, PHY_TR_CTRL, val);
++
++ timeout_us = 100000;
++ timeout = ktime_add_us(ktime_get(), timeout_us);
++ while (1) {
++ val = switch_phy_read(ds, addr, PHY_TR_CTRL);
++
++ if (!!(val & PHY_TR_PKT_XMT_STA))
++ break;
++
++ if (ktime_compare(ktime_get(), timeout) > 0)
++ goto out;
++ }
++out:
++ switch_phy_write(ds, addr, PHY_CL22_PAGE_CTRL, 0);
++}
++
++static int mt753x_tr_read(struct dsa_switch *ds, int addr, u8 ch, u8 node, u8 daddr)
++{
++ ktime_t timeout;
++ u32 timeout_us;
++ u32 val;
++ u8 val_h;
++
++ switch_phy_write(ds, addr, PHY_CL22_PAGE_CTRL, PHY_TR_PAGE);
++
++ val = switch_phy_read(ds, addr, PHY_TR_CTRL);
++
++ timeout_us = 100000;
++ timeout = ktime_add_us(ktime_get(), timeout_us);
++ while (1) {
++ val = switch_phy_read(ds, addr, PHY_TR_CTRL);
++
++ if (!!(val & PHY_TR_PKT_XMT_STA))
++ break;
++
++ if (ktime_compare(ktime_get(), timeout) > 0) {
++ switch_phy_write(ds, addr, PHY_CL22_PAGE_CTRL, 0);
++ return -ETIMEDOUT;
++ }
++ }
++
++ val = PHY_TR_PKT_XMT_STA | (PHY_TR_READ << PHY_TR_WR_S) |
++ (ch << PHY_TR_CH_ADDR_S) | (node << PHY_TR_NODE_ADDR_S) |
++ (daddr << PHY_TR_DATA_ADDR_S);
++ switch_phy_write(ds, addr, PHY_TR_CTRL, val);
++
++ timeout_us = 100000;
++ timeout = ktime_add_us(ktime_get(), timeout_us);
++ while (1) {
++ val = switch_phy_read(ds, addr, PHY_TR_CTRL);
++
++ if (!!(val & PHY_TR_PKT_XMT_STA))
++ break;
++
++ if (ktime_compare(ktime_get(), timeout) > 0) {
++ switch_phy_write(ds, addr, PHY_CL22_PAGE_CTRL, 0);
++ return -ETIMEDOUT;
++ }
++ }
++
++ val = switch_phy_read(ds, addr, PHY_TR_LOW_DATA);
++ val_h = switch_phy_read(ds, addr, PHY_TR_HIGH_DATA);
++ val |= (val_h << 16);
++
++ switch_phy_write(ds, addr, PHY_CL22_PAGE_CTRL, 0);
++
++ return val;
++}
++
++static const u8 MT753x_ZCAL_TO_R50ohm_GE_TBL_100[64] = {
++ 127, 127, 127, 127, 127, 127, 127, 127,
++ 127, 127, 127, 127, 127, 123, 122, 117,
++ 115, 112, 103, 100, 98, 87, 85, 83,
++ 81, 72, 70, 68, 66, 64, 55, 53,
++ 52, 50, 49, 48, 38, 36, 35, 34,
++ 33, 32, 22, 21, 20, 19, 18, 17,
++ 16, 7, 6, 5, 4, 3, 2, 1,
++ 0, 0, 0, 0, 0, 0, 0, 0
++};
++
++static const u8 MT753x_TX_OFFSET_TBL[64] = {
++ 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
++ 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
++ 0xf, 0xe, 0xd, 0xc, 0xb, 0xa, 0x9, 0x8,
++ 0x7, 0x6, 0x5, 0x4, 0x3, 0x2, 0x1, 0x0,
++ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
++ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
++ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
++ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f
++};
++
++static u8 ge_cal_flag;
++
++static u8 all_ge_ana_cal_wait(struct dsa_switch *ds, u32 delay, u32 phyaddr)
++{
++ u8 all_ana_cal_status;
++ u32 cnt, tmp_1e_17c;
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017c, 0x0001); // da_calin_flag pull high
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x17c, 0x0001);
++ //printk("delay = %d\n", delay);
++
++ cnt = 10000;
++ do {
++ udelay(delay);
++ cnt--;
++ all_ana_cal_status = tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x17b) & 0x1;
++
++ } while ((all_ana_cal_status == 0) && (cnt != 0));
++
++
++ if(all_ana_cal_status == 1) {
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x17c, 0);
++ return all_ana_cal_status;
++ } else {
++ tmp_1e_17c = tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x17c);
++ if ((tmp_1e_17c & 0x1) != 1) {
++ pr_info("FIRST MDC/MDIO write error\n");
++ pr_info("FIRST 1e_17c = %x\n", tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x17c));
++
++ }
++ printk("re-K again\n");
++
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x17c, 0);
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x17c, 0x0001);
++ cnt = 10000;
++ do {
++ udelay(delay);
++ cnt--;
++ tmp_1e_17c = tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x17c);
++ if ((tmp_1e_17c & 0x1) != 1) {
++ pr_info("SECOND MDC/MDIO write error\n");
++ pr_info("SECOND 1e_17c = %x\n", tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x17c));
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x17c, 0x0001);
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x17c, 0x0001);
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x17c, 0x0001);
++ }
++ } while ((cnt != 0) && (tmp_1e_17c == 0));
++
++ cnt = 10000;
++ do {
++ udelay(delay);
++ cnt--;
++ all_ana_cal_status = tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x17b) & 0x1;
++
++ } while ((all_ana_cal_status == 0) && (cnt != 0));
++
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x17c, 0);
++ }
++
++ if(all_ana_cal_status == 0){
++ pr_info("!!!!!!!!!!!! dev1Eh_reg17b ERROR\n");
++ }
++
++ return all_ana_cal_status;
++}
++
++
++
++
++static int ge_cal_rext(struct dsa_switch *ds, u8 phyaddr, u32 delay)
++{
++ u8 rg_zcal_ctrl, all_ana_cal_status;
++ u16 ad_cal_comp_out_init;
++ u16 dev1e_e0_ana_cal_r5;
++ int calibration_polarity;
++ u8 cnt = 0;
++ u16 dev1e_17a_tmp, dev1e_e0_tmp;
++
++ /* *** Iext/Rext Cal start ************ */
++ all_ana_cal_status = ANACAL_INIT;
++ /* analog calibration enable, Rext calibration enable */
++ /* 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a */
++ /* 1e_dc[0]:rg_txvos_calen */
++ /* 1e_e1[4]:rg_cal_refsel(0:1.2V) */
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00db, 0x1110)
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00db, 0x1110);
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00dc, 0x0000);
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00dc, 0);
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, 0x00e1, 0x0000);
++ //tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00e1, 0x10);
++
++ rg_zcal_ctrl = 0x20;/* start with 0 dB */
++ dev1e_e0_ana_cal_r5 = tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0xe0); // get default value
++ /* 1e_e0[5:0]:rg_zcal_ctrl */
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0xe0, rg_zcal_ctrl);
++ all_ana_cal_status = all_ge_ana_cal_wait(ds, delay, phyaddr);/* delay 20 usec */
++
++ if (all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk(" GE Rext AnaCal ERROR init! \r\n");
++ return -1;
++ }
++ /* 1e_17a[8]:ad_cal_comp_out */
++ ad_cal_comp_out_init = (tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x017a) >> 8) & 0x1;
++ if (ad_cal_comp_out_init == 1)
++ calibration_polarity = -1;
++ else /* ad_cal_comp_out_init == 0 */
++ calibration_polarity = 1;
++ cnt = 0;
++ while (all_ana_cal_status < ANACAL_ERROR) {
++ cnt++;
++ rg_zcal_ctrl += calibration_polarity;
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0xe0, (rg_zcal_ctrl));
++ all_ana_cal_status = all_ge_ana_cal_wait(ds, delay, phyaddr); /* delay 20 usec */
++ dev1e_17a_tmp = tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x017a);
++ if (all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk(" GE Rext AnaCal ERROR 2! \r\n");
++ return -1;
++ } else if (((dev1e_17a_tmp >> 8) & 0x1) != ad_cal_comp_out_init) {
++ all_ana_cal_status = ANACAL_FINISH;
++ //printk(" GE Rext AnaCal Done! (%d)(0x%x) \r\n", cnt, rg_zcal_ctrl);
++ } else {
++ dev1e_17a_tmp = tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x017a);
++ dev1e_e0_tmp = tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0xe0);
++ if ((rg_zcal_ctrl == 0x3F) || (rg_zcal_ctrl == 0x00)) {
++ all_ana_cal_status = ANACAL_SATURATION; /* need to FT(IC fail?) */
++ printk(" GE Rext AnaCal Saturation! \r\n");
++ rg_zcal_ctrl = 0x20; /* 0 dB */
++ }
++ }
++ }
++
++ if (all_ana_cal_status == ANACAL_ERROR) {
++ rg_zcal_ctrl = 0x20; /* 0 dB */
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++ } else if(all_ana_cal_status == ANACAL_FINISH){
++ //tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00e0, ((rg_zcal_ctrl << 8) | rg_zcal_ctrl));
++ printk("0x1e-e0 = %x\n", tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x00e0));
++ /* **** 1f_115[2:0] = rg_zcal_ctrl[5:3] // Mog review */
++ tc_phy_write_dev_reg(ds, PHY0, 0x1f, 0x0115, ((rg_zcal_ctrl & 0x3f) >> 3));
++ printk("0x1f-115 = %x\n", tc_phy_read_dev_reg(ds, PHY0, 0x1f, 0x115));
++ printk(" GE Rext AnaCal Done! (%d)(0x%x) \r\n", cnt, rg_zcal_ctrl);
++ ge_cal_flag = 1;
++ } else {
++ printk("GE Rxet cal something wrong2\n");
++ }
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00db, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00db, 0x0000);
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00dc, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dc, 0x0000);
++
++ return 0;
++}
++
++//-----------------------------------------------------------------
++static int ge_cal_r50(struct dsa_switch *ds, u8 phyaddr, u32 delay)
++{
++ u8 rg_zcal_ctrl, all_ana_cal_status, calibration_pair;
++ u16 ad_cal_comp_out_init;
++ u16 dev1e_e0_ana_cal_r5;
++ int calibration_polarity;
++ u8 cnt = 0;
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00db, 0x1100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00dc, 0x0000); // 1e_dc[0]:rg_txvos_calen
++
++ for(calibration_pair = ANACAL_PAIR_A; calibration_pair <= ANACAL_PAIR_D; calibration_pair ++) {
++ rg_zcal_ctrl = 0x20; // start with 0 dB
++ dev1e_e0_ana_cal_r5 = (tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x00e0) & (~0x003f));
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl)); // 1e_e0[5:0]:rg_zcal_ctrl
++ if(calibration_pair == ANACAL_PAIR_A)
++ {
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00db, 0x1101); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dc, 0x0000);
++ //printk("R50 pair A 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x00db), tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x00dc));
++
++ }
++ else if(calibration_pair == ANACAL_PAIR_B)
++ {
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00db, 0x1100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dc, 0x1000); // 1e_dc[12]:rg_zcalen_b
++ //printk("R50 pair B 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x00db),tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x00dc));
++
++ }
++ else if(calibration_pair == ANACAL_PAIR_C)
++ {
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00db, 0x1100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dc, 0x0100); // 1e_dc[8]:rg_zcalen_c
++ //printk("R50 pair C 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x00db), tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x00dc));
++
++ }
++ else // if(calibration_pair == ANACAL_PAIR_D)
++ {
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00db, 0x1100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dc, 0x0010); // 1e_dc[4]:rg_zcalen_d
++ //printk("R50 pair D 1e_db=%x 1e_db=%x\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x00db), tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x00dc));
++
++ }
++
++ all_ana_cal_status = all_ge_ana_cal_wait(ds, delay, phyaddr); // delay 20 usec
++ if(all_ana_cal_status == 0)
++ {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( "GE R50 AnaCal ERROR init! \r\n");
++ return -1;
++ }
++
++ ad_cal_comp_out_init = (tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x017a)>>8) & 0x1; // 1e_17a[8]:ad_cal_comp_out
++ if(ad_cal_comp_out_init == 1)
++ calibration_polarity = -1;
++ else
++ calibration_polarity = 1;
++
++ cnt = 0;
++ while(all_ana_cal_status < ANACAL_ERROR)
++ {
++ cnt ++;
++ rg_zcal_ctrl += calibration_polarity;
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++ all_ana_cal_status = all_ge_ana_cal_wait(ds, delay, phyaddr); // delay 20 usec
++
++ if(all_ana_cal_status == 0)
++ {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( " GE R50 AnaCal ERROR 2! \r\n");
++ return -1;
++ }
++ else if(((tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x017a)>>8)&0x1) != ad_cal_comp_out_init)
++ {
++ all_ana_cal_status = ANACAL_FINISH;
++ }
++ else {
++ if((rg_zcal_ctrl == 0x3F)||(rg_zcal_ctrl == 0x00))
++ {
++ all_ana_cal_status = ANACAL_SATURATION; // need to FT
++ printk( " GE R50 AnaCal Saturation! \r\n");
++ }
++ }
++ }
++
++ if(all_ana_cal_status == ANACAL_ERROR) {
++ rg_zcal_ctrl = 0x20; // 0 dB
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00e0, (dev1e_e0_ana_cal_r5 | rg_zcal_ctrl));
++ }
++ else {
++ rg_zcal_ctrl = MT753x_ZCAL_TO_R50ohm_GE_TBL_100[rg_zcal_ctrl - 9]; // wait Mog zcal/r50 mapping table
++ printk( " GE R50 AnaCal Done! (%d) (0x%x)(0x%x) \r\n", cnt, rg_zcal_ctrl, (rg_zcal_ctrl|0x80));
++ }
++
++ if(calibration_pair == ANACAL_PAIR_A) {
++ ad_cal_comp_out_init = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174) & (~0x7f00);
++ //ad_cal_comp_out_init = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174);
++ //printk( " GE-a 1e_174(0x%x)(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174), ad_cal_comp_out_init, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175));
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0174, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<8)&0xff00) | 0x8000))); // 1e_174[15:8]
++ //printk( " GE-a 1e_174(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175));
++ }
++ else if(calibration_pair == ANACAL_PAIR_B) {
++ ad_cal_comp_out_init = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174) & (~0x007f);
++ //ad_cal_comp_out_init = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174);
++ //printk( " GE-b 1e_174(0x%x)(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174), ad_cal_comp_out_init, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175));
++
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0174, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<0)&0x00ff) | 0x0080))); // 1e_174[7:0]
++ //printk( " GE-b 1e_174(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175));
++ }
++ else if(calibration_pair == ANACAL_PAIR_C) {
++ ad_cal_comp_out_init = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175) & (~0x7f00);
++ //ad_cal_comp_out_init = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0175, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<8)&0xff00) | 0x8000))); // 1e_175[15:8]
++ //printk( " GE-c 1e_174(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175));
++ } else {// if(calibration_pair == ANACAL_PAIR_D)
++ ad_cal_comp_out_init = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175) & (~0x007f);
++ //ad_cal_comp_out_init = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0175, (ad_cal_comp_out_init | (((rg_zcal_ctrl<<0)&0x00ff) | 0x0080))); // 1e_175[7:0]
++ //printk( " GE-d 1e_174(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175));
++ }
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00e0, ((rg_zcal_ctrl<<8)|rg_zcal_ctrl));
++ }
++
++ printk( " GE 1e_174(0x%x), 1e_175(0x%x) \r\n", tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0174), tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0175));
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00db, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00db, 0x0000);
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00dc, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dc, 0x0000);
++
++ return 0;
++}
++
++static int ge_cal_tx_offset(struct dsa_switch *ds, u8 phyaddr, u32 delay)
++{
++ u8 all_ana_cal_status, calibration_pair;
++ u16 ad_cal_comp_out_init;
++ int calibration_polarity, tx_offset_temp;
++ u8 tx_offset_reg_shift, tabl_idx, i;
++ u8 cnt = 0;
++ u16 tx_offset_reg, reg_temp, cal_temp;
++ //switch_phy_write(phyaddr, R0, 0x2100);//harry tmp
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00db, 0x0100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00dc, 0x0001); // 1e_dc[0]:rg_txvos_calen
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0096, 0x8000); // 1e_96[15]:bypass_tx_offset_cal, Hw bypass, Fw cal
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x003e, 0xf808); // 1e_3e
++ for(i = 0; i <= 4; i++)
++ tc_phy_write_dev_reg(ds, i, 0x1e, 0x00dd, 0x0000);
++ for(calibration_pair = ANACAL_PAIR_A; calibration_pair <= ANACAL_PAIR_D; calibration_pair ++)
++ {
++ tabl_idx = 31;
++ tx_offset_temp = MT753x_TX_OFFSET_TBL[tabl_idx];
++
++ if(calibration_pair == ANACAL_PAIR_A) {
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, 0x145, 0x5010);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dd, 0x1000); // 1e_dd[12]:rg_txg_calen_a
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017d, (0x8000|DAC_IN_0V)); // 1e_17d:dac_in0_a
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0181, (0x8000|DAC_IN_0V)); // 1e_181:dac_in1_a
++ //printk("tx offset pairA 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++ reg_temp = (tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0172) & (~0x3f00));
++ tx_offset_reg_shift = 8; // 1e_172[13:8]
++ tx_offset_reg = 0x0172;
++
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++ } else if(calibration_pair == ANACAL_PAIR_B) {
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, 0x145, 0x5018);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dd, 0x0100); // 1e_dd[8]:rg_txg_calen_b
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017e, (0x8000|DAC_IN_0V)); // 1e_17e:dac_in0_b
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0182, (0x8000|DAC_IN_0V)); // 1e_182:dac_in1_b
++ //printk("tx offset pairB 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++ reg_temp = (tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0172) & (~0x003f));
++ tx_offset_reg_shift = 0; // 1e_172[5:0]
++ tx_offset_reg = 0x0172;
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++ } else if(calibration_pair == ANACAL_PAIR_C) {
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dd, 0x0010); // 1e_dd[4]:rg_txg_calen_c
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017f, (0x8000|DAC_IN_0V)); // 1e_17f:dac_in0_c
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0183, (0x8000|DAC_IN_0V)); // 1e_183:dac_in1_c
++ reg_temp = (tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0173) & (~0x3f00));
++ //printk("tx offset pairC 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++ tx_offset_reg_shift = 8; // 1e_173[13:8]
++ tx_offset_reg = 0x0173;
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++ } else {// if(calibration_pair == ANACAL_PAIR_D)
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dd, 0x0001); // 1e_dd[0]:rg_txg_calen_d
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0180, (0x8000|DAC_IN_0V)); // 1e_180:dac_in0_d
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0184, (0x8000|DAC_IN_0V)); // 1e_184:dac_in1_d
++ //printk("tx offset pairD 1e_dd = %x, 1e_17d=%x, 1e_181=%x\n", tc_phy_read_dev_reg(phyaddr, 0x1e, 0x00dd), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x017d), tc_phy_read_dev_reg(phyaddr, 0x1e, 0x0181));
++ reg_temp = (tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x0173) & (~0x003f));
++ tx_offset_reg_shift = 0; // 1e_173[5:0]
++ tx_offset_reg = 0x0173;
++ //tc_phy_write_dev_reg(phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++ }
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift))); // 1e_172, 1e_173
++ all_ana_cal_status = all_ge_ana_cal_wait(ds, delay, phyaddr); // delay 20 usec
++ if(all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( " GE Tx offset AnaCal ERROR init! \r\n");
++ return -1;
++ }
++
++ ad_cal_comp_out_init = (tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x017a)>>8) & 0x1; // 1e_17a[8]:ad_cal_comp_out
++ if(ad_cal_comp_out_init == 1)
++ calibration_polarity = 1;
++ else
++ calibration_polarity = -1;
++
++ cnt = 0;
++ //printk("TX offset cnt = %d, tabl_idx= %x, offset_val = %x\n", cnt, tabl_idx, MT753x_TX_OFFSET_TBL[tabl_idx]);
++ while(all_ana_cal_status < ANACAL_ERROR) {
++
++ cnt ++;
++ tabl_idx += calibration_polarity;
++ //tx_offset_temp += calibration_polarity;
++ //cal_temp = tx_offset_temp;
++ cal_temp = MT753x_TX_OFFSET_TBL[tabl_idx];
++ //printk("TX offset cnt = %d, tabl_idx= %x, offset_val = %x\n", cnt, tabl_idx, MT753x_TX_OFFSET_TBL[tabl_idx]);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_offset_reg, (reg_temp|(cal_temp<<tx_offset_reg_shift)));
++
++ all_ana_cal_status = all_ge_ana_cal_wait(ds, delay, phyaddr); // delay 20 usec
++ if(all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( " GE Tx offset AnaCal ERROR init 2! \r\n");
++ return -1;
++ } else if(((tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x017a)>>8)&0x1) != ad_cal_comp_out_init) {
++ all_ana_cal_status = ANACAL_FINISH;
++ } else {
++ if((tabl_idx == 0)||(tabl_idx == 0x3f)) {
++ all_ana_cal_status = ANACAL_SATURATION; // need to FT
++ printk( " GE Tx offset AnaCal Saturation! \r\n");
++ }
++ }
++ }
++
++ if(all_ana_cal_status == ANACAL_ERROR) {
++ tx_offset_temp = TX_AMP_OFFSET_0MV;
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_offset_reg, (reg_temp|(tx_offset_temp<<tx_offset_reg_shift)));
++ } else {
++ printk( " GE Tx offset AnaCal Done! (pair-%d)(%d)(0x%x) 0x1e_%x=0x%x\n", calibration_pair, cnt, MT753x_TX_OFFSET_TBL[tabl_idx], tx_offset_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_offset_reg));
++ }
++ }
++
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017d, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017e, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017f, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0180, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0181, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0182, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0183, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0184, 0x0000);
++
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00db, 0x0000); // disable analog calibration circuit
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00dc, 0x0000); // disable Tx offset calibration circuit
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00db, 0x0000); // disable analog calibration circuit
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dc, 0x0000); // disable Tx offset calibration circuit
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x003e, 0x0000); // disable Tx VLD force mode
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dd, 0x0000); // disable Tx offset/amplitude calibration circuit
++
++ return 0;
++}
++
++u16 tx_amp_check_thres(int pair, u32 reg, u16 val, s16 offset)
++{
++ if ((offset < 0 && (0 - offset) > TX_AMP_MAX_OFFSET) ||
++ (offset > TX_AMP_MAX_OFFSET)) {
++ pr_info(" offset=%d exceed tx amp max offset=%d\n", offset, TX_AMP_MAX_OFFSET);
++ return val;
++ }
++
++ if (offset < 0 && val < TX_AMP_LOW_TS - offset) {
++ if (val < TX_AMP_LOWEST_TS - offset) {
++ pr_info(" GE Tx amp AnaCal underflow! (pair-%d)(1e_%x) seed 0x%x < 0x%x)\n",
++ pair, reg, val, TX_AMP_LOWEST_TS - offset);
++ }
++ return 0;
++ }
++
++ if (offset >= 0 && val > TX_AMP_HIGH_TS - offset) {
++ if ( val > TX_AMP_HIGHEST_TS - offset) {
++ pr_info(" GE Tx amp AnaCal overflow! (pair-%d)(1e_%x) seed = 0x%x > 0x%x)\n",
++ pair, reg, val, TX_AMP_HIGHEST_TS - offset);
++ }
++ return TX_AMP_MAX;
++ }
++
++ return val + offset;
++}
++
++static int ge_cal_tx_amp(struct dsa_switch *ds, u8 phyaddr, u32 delay)
++{
++ u8 all_ana_cal_status, calibration_pair, i;
++ u16 ad_cal_comp_out_init;
++ int calibration_polarity;
++ u32 tx_amp_reg_shift;
++ u16 reg_temp;
++ u32 tx_amp_temp, tx_amp_reg, cnt=0, tx_amp_reg_100;
++ u32 debug_tmp, reg_backup, reg_tmp;
++ u32 orig_1e_11, orig_1f_300;
++
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00db, 0x1100); // 1e_db[12]:rg_cal_ckinv, [8]:rg_ana_calen, [4]:rg_rext_calen, [0]:rg_zcalen_a
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00dc, 0x0001); // 1e_dc[0]:rg_txvos_calen
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00e1, 0x0010); // 1e_e1[4]:select 1V
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x003e, 0xf808); // 1e_3e:enable Tx VLD
++
++ orig_1e_11 = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x11);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x11, 0xff00);
++// tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x27a, 0x33);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0xc9, 0xffff);
++ orig_1f_300 = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x300);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x300, 0x4);
++ for(i = 0; i <= 4; i++)
++ tc_phy_write_dev_reg(ds, i, 0x1e, 0x00dd, 0x0000);
++ for(calibration_pair = ANACAL_PAIR_A; calibration_pair <= ANACAL_PAIR_D; calibration_pair ++) {
++ tx_amp_temp = 0x20; // start with 0 dB
++
++ if(calibration_pair == ANACAL_PAIR_A) {
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dd, 0x1000); // 1e_dd[12]:tx_a amp calibration enable
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017d, (0x8000|DAC_IN_2V)); // 1e_17d:dac_in0_a
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0181, (0x8000|DAC_IN_2V)); // 1e_181:dac_in1_a
++ reg_temp = (tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x012) & (~0xfc00));
++ tx_amp_reg_shift = 10; // 1e_12[15:10]
++ tx_amp_reg = 0x12;
++ tx_amp_reg_100 = 0x16;
++ } else if(calibration_pair == ANACAL_PAIR_B) {
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dd, 0x0100); // 1e_dd[8]:tx_b amp calibration enable
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017e, (0x8000|DAC_IN_2V)); // 1e_17e:dac_in0_b
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0182, (0x8000|DAC_IN_2V)); // 1e_182:dac_in1_b
++ reg_temp = (tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x017) & (~0x3f00));
++ tx_amp_reg_shift = 8; // 1e_17[13:8]
++ tx_amp_reg = 0x17;
++ tx_amp_reg_100 = 0x18;
++ } else if(calibration_pair == ANACAL_PAIR_C) {
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dd, 0x0010); // 1e_dd[4]:tx_c amp calibration enable
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017f, (0x8000|DAC_IN_2V)); // 1e_17f:dac_in0_c
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0183, (0x8000|DAC_IN_2V)); // 1e_183:dac_in1_c
++ reg_temp = (tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x019) & (~0x3f00));
++ tx_amp_reg_shift = 8; // 1e_19[13:8]
++ tx_amp_reg = 0x19;
++ tx_amp_reg_100 = 0x20;
++ } else { //if(calibration_pair == ANACAL_PAIR_D)
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dd, 0x0001); // 1e_dd[0]:tx_d amp calibration enable
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0180, (0x8000|DAC_IN_2V)); // 1e_180:dac_in0_d
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0184, (0x8000|DAC_IN_2V)); // 1e_184:dac_in1_d
++ reg_temp = (tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x021) & (~0x3f00));
++ tx_amp_reg_shift = 8; // 1e_21[13:8]
++ tx_amp_reg = 0x21;
++ tx_amp_reg_100 = 0x22;
++ }
++ tc_phy_write_dev_reg( ds, phyaddr, 0x1e, tx_amp_reg, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift))); // 1e_12, 1e_17, 1e_19, 1e_21
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++ all_ana_cal_status = all_ge_ana_cal_wait(ds, delay, phyaddr); // delay 20 usec
++ if(all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( " GE Tx amp AnaCal ERROR init init! \r\n");
++ return -1;
++ }
++
++ ad_cal_comp_out_init = (tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x017a)>>8) & 0x1; // 1e_17a[8]:ad_cal_comp_out
++ if(ad_cal_comp_out_init == 1)
++ calibration_polarity = -1;
++ else
++ calibration_polarity = 1;
++
++ cnt =0;
++ while(all_ana_cal_status < ANACAL_ERROR) {
++ cnt ++;
++ tx_amp_temp += calibration_polarity;
++ //printk("tx_amp : %x, 1e %x = %x\n", tx_amp_temp, tx_amp_reg, (reg_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++ tc_phy_write_dev_reg( ds, phyaddr, 0x1e, tx_amp_reg, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100, (tx_amp_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++ all_ana_cal_status = all_ge_ana_cal_wait(ds, delay, phyaddr); // delay 20 usec
++ if(all_ana_cal_status == 0) {
++ all_ana_cal_status = ANACAL_ERROR;
++ printk( " GE Tx amp AnaCal ERROR 2! \r\n");
++ return -1;
++ } else if(((tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x017a)>>8)&0x1) != ad_cal_comp_out_init) {
++ //printk("TX AMP ANACAL_FINISH\n");
++ all_ana_cal_status = ANACAL_FINISH;
++ if (phyaddr == 0) {
++ if (calibration_pair == ANACAL_PAIR_A)
++ tx_amp_temp = tx_amp_temp - 2;
++ else if(calibration_pair == ANACAL_PAIR_B)
++ tx_amp_temp = tx_amp_temp - 1;
++ else if(calibration_pair == ANACAL_PAIR_C)
++ tx_amp_temp = tx_amp_temp - 2;
++ else if(calibration_pair == ANACAL_PAIR_D)
++ tx_amp_temp = tx_amp_temp - 1;
++ } else if (phyaddr == 1) {
++ if (calibration_pair == ANACAL_PAIR_A)
++ tx_amp_temp = tx_amp_temp - 1;
++ else if(calibration_pair == ANACAL_PAIR_B)
++ tx_amp_temp = tx_amp_temp ;
++ else if(calibration_pair == ANACAL_PAIR_C)
++ tx_amp_temp = tx_amp_temp - 1;
++ else if(calibration_pair == ANACAL_PAIR_D)
++ tx_amp_temp = tx_amp_temp - 1;
++ } else if (phyaddr == 2) {
++ if (calibration_pair == ANACAL_PAIR_A)
++ tx_amp_temp = tx_amp_temp;
++ else if(calibration_pair == ANACAL_PAIR_B)
++ tx_amp_temp = tx_amp_temp - 1;
++ else if(calibration_pair == ANACAL_PAIR_C)
++ tx_amp_temp = tx_amp_temp;
++ else if(calibration_pair == ANACAL_PAIR_D)
++ tx_amp_temp = tx_amp_temp - 1;
++ } else if (phyaddr == 3) {
++ tx_amp_temp = tx_amp_temp;
++ } else if (phyaddr == 4) {
++ if (calibration_pair == ANACAL_PAIR_A)
++ tx_amp_temp = tx_amp_temp;
++ else if(calibration_pair == ANACAL_PAIR_B)
++ tx_amp_temp = tx_amp_temp - 1;
++ else if(calibration_pair == ANACAL_PAIR_C)
++ tx_amp_temp = tx_amp_temp;
++ else if(calibration_pair == ANACAL_PAIR_D)
++ tx_amp_temp = tx_amp_temp;
++ }
++ reg_temp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg)&(~0xff00);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)));
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg, (tx_amp_temp|((tx_amp_temp)<<tx_amp_reg_shift)));
++ if (phyaddr == 0) {
++ if ((tx_amp_reg == 0x12) || (tx_amp_reg == 0x17)) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg, tx_amp_temp, 7);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg, ((reg_tmp|((tx_amp_temp)<<tx_amp_reg_shift))));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg_100 == 0x16) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg_100, tx_amp_temp, 1+4);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((reg_tmp)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ if (tx_amp_reg_100 == 0x18) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg_100, tx_amp_temp, 4);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((reg_tmp)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ } else if (phyaddr == 1) {
++ if (tx_amp_reg == 0x12) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg, tx_amp_temp, 9);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg, ((reg_tmp|((tx_amp_temp)<<tx_amp_reg_shift))));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg == 0x17){
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg, tx_amp_temp, 7);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg, ((reg_tmp|((tx_amp_temp)<<tx_amp_reg_shift))));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg_100 == 0x16) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg_100, tx_amp_temp, 4);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((reg_tmp)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ if (tx_amp_reg_100 == 0x18) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg_100, tx_amp_temp, -1+4);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((reg_tmp)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ } else if (phyaddr == 2) {
++ if ((tx_amp_reg == 0x12) || (tx_amp_reg == 0x17)) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg, tx_amp_temp, 6);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg, ((reg_tmp|((tx_amp_temp)<<tx_amp_reg_shift))));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if ((tx_amp_reg_100 == 0x16) || (tx_amp_reg_100 == 0x18)) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg_100, tx_amp_temp, -1+4);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((reg_tmp)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ } else if (phyaddr == 3) {
++ if (tx_amp_reg == 0x12) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg, tx_amp_temp, 4);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg, ((reg_tmp|((tx_amp_temp)<<tx_amp_reg_shift))));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg == 0x17) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg, tx_amp_temp, 7);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg, ((reg_tmp|((tx_amp_temp)<<tx_amp_reg_shift))));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg_100 == 0x16) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg_100, tx_amp_temp, -2+4);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((reg_tmp)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ if (tx_amp_reg_100 == 0x18) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg_100, tx_amp_temp, -1+3);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((reg_tmp)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ } else if (phyaddr == 4) {
++ if ((tx_amp_reg == 0x12) || (tx_amp_reg == 0x17)) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg, tx_amp_temp, 5);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg, ((reg_tmp|((tx_amp_temp)<<tx_amp_reg_shift))));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg));
++ }
++ if (tx_amp_reg_100 == 0x16) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg_100, tx_amp_temp, -2+4);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((reg_tmp)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ if (tx_amp_reg_100 == 0x18) {
++ //printk("before : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ reg_tmp = tx_amp_check_thres(calibration_pair, tx_amp_reg_100, tx_amp_temp, -1+4);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100,(tx_amp_temp|((reg_tmp)<<tx_amp_reg_shift)));
++ //printk("after : PORT[%d] 1e_%x = %x\n", phyaddr, tx_amp_reg_100, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg_100));
++ }
++ }
++
++ if (calibration_pair == ANACAL_PAIR_A){
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x12);
++ reg_tmp = ((reg_backup & 0xfc00) >> 10);
++ tx_amp_temp = tx_amp_check_thres(calibration_pair, tx_amp_reg, reg_tmp, -8);
++ reg_backup = 0x0000;
++ reg_backup |= ((tx_amp_temp << 10) | (tx_amp_temp << 0));
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x12, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x12);
++ //printk("PORT[%d] 1e.012 = %x (OFFSET_1000M_PAIR_A)\n", phyaddr, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x16);
++ reg_tmp = ((reg_backup & 0x3f) >> 0);
++ tx_amp_temp = tx_amp_check_thres(calibration_pair, tx_amp_reg, reg_tmp, -8);
++ reg_backup = (reg_backup & (~0x3f));
++ reg_backup |= (tx_amp_temp << 0);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x16, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x16);
++ //printk("PORT[%d] 1e.016 = %x (OFFSET_TESTMODE_1000M_PAIR_A)\n", phyaddr, reg_backup);
++ }
++ else if(calibration_pair == ANACAL_PAIR_B){
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x17);
++ reg_tmp = ((reg_backup & 0x3f00) >> 8);
++ tx_amp_temp = tx_amp_check_thres(calibration_pair, tx_amp_reg, reg_tmp, -8);
++ reg_backup = 0x0000;
++ reg_backup |= ((tx_amp_temp << 8) | (tx_amp_temp << 0));
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x17, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x17);
++ //printk("PORT[%d] 1e.017 = %x (OFFSET_1000M_PAIR_B)\n", phyaddr, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x18);
++ reg_tmp = ((reg_backup & 0x3f) >> 0);
++ tx_amp_temp = tx_amp_check_thres(calibration_pair, tx_amp_reg, reg_tmp, -8);
++ reg_backup = (reg_backup & (~0x3f));
++ reg_backup |= (tx_amp_temp << 0);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x18, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x18);
++ //printk("PORT[%d] 1e.018 = %x (OFFSET_TESTMODE_1000M_PAIR_B)\n", phyaddr, reg_backup);
++ }
++ else if(calibration_pair == ANACAL_PAIR_C){
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x19);
++ reg_tmp = ((reg_backup & 0x3f00) >> 8);
++ tx_amp_temp = tx_amp_check_thres(calibration_pair, tx_amp_reg, reg_tmp, -8);
++ reg_backup = (reg_backup & (~0x3f00));
++ reg_backup |= (tx_amp_temp << 8);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x19, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x19);
++ //printk("PORT[%d] 1e.019 = %x (OFFSET_1000M_PAIR_C)\n", phyaddr, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x20);
++ reg_tmp = ((reg_backup & 0x3f) >> 0);
++ tx_amp_temp = tx_amp_check_thres(calibration_pair, tx_amp_reg, reg_tmp, -8);
++ reg_backup = (reg_backup & (~0x3f));
++ reg_backup |= (tx_amp_temp << 0);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x20, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x20);
++ //printk("PORT[%d] 1e.020 = %x (OFFSET_TESTMODE_1000M_PAIR_C)\n", phyaddr, reg_backup);
++ }
++ else if(calibration_pair == ANACAL_PAIR_D){
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x21);
++ reg_tmp = ((reg_backup & 0x3f00) >> 8);
++ tx_amp_temp = tx_amp_check_thres(calibration_pair, tx_amp_reg, reg_tmp, -8);
++ reg_backup = (reg_backup & (~0x3f00));
++ reg_backup |= (tx_amp_temp << 8);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x21, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x21);
++ //printk("PORT[%d] 1e.021 = %x (OFFSET_1000M_PAIR_D)\n", phyaddr, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x22);
++ reg_tmp = ((reg_backup & 0x3f) >> 0);
++ tx_amp_temp = tx_amp_check_thres(calibration_pair, tx_amp_reg, reg_tmp, -8);
++ reg_backup = (reg_backup & (~0x3f));
++ reg_backup |= (tx_amp_temp << 0);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x22, reg_backup);
++ reg_backup = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x22);
++ //printk("PORT[%d] 1e.022 = %x (OFFSET_TESTMODE_1000M_PAIR_D)\n", phyaddr, reg_backup);
++ }
++
++ if (calibration_pair == ANACAL_PAIR_A){
++ //printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++ debug_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x12);
++ //printk("1e.012 = 0x%x\n", debug_tmp);
++ debug_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x16);
++ //printk("1e.016 = 0x%x\n", debug_tmp);
++ }
++
++ else if(calibration_pair == ANACAL_PAIR_B){
++ //printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++ debug_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x17);
++ //printk("1e.017 = 0x%x\n", debug_tmp);
++ debug_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x18);
++ //printk("1e.018 = 0x%x\n", debug_tmp);
++ }
++ else if(calibration_pair == ANACAL_PAIR_C){
++ //printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++ debug_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x19);
++ //printk("1e.019 = 0x%x\n", debug_tmp);
++ debug_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x20);
++ //printk("1e.020 = 0x%x\n", debug_tmp);
++ }
++ else if(calibration_pair == ANACAL_PAIR_D){
++ //printk("PORT (%d) TX_AMP PAIR (A) FINAL CALIBRATION RESULT\n", phyaddr);
++ debug_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x21);
++ //printk("1e.021 = 0x%x\n", debug_tmp);
++ debug_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x22);
++ //printk("1e.022 = 0x%x\n", debug_tmp);
++ }
++
++
++ printk( " GE Tx amp AnaCal Done! (pair-%d)(1e_%x = 0x%x)(0x%x)\n", calibration_pair, tx_amp_reg, tc_phy_read_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg), reg_tmp);
++
++ } else {
++ if((tx_amp_temp == 0x3f)||(tx_amp_temp == 0x00)) {
++ all_ana_cal_status = ANACAL_SATURATION; // need to FT
++ printk( " GE Tx amp AnaCal Saturation! \r\n");
++ }
++ }
++ }
++
++ if(all_ana_cal_status == ANACAL_ERROR) {
++ tx_amp_temp = 0x20;
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, tx_amp_reg, (reg_temp|(tx_amp_temp<<tx_amp_reg_shift)));
++ }
++ }
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017d, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017e, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x017f, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0180, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0181, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0182, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0183, 0x0000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x0184, 0x0000);
++
++ /* disable analog calibration circuit */
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00db, 0x0000);
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00dc, 0x0000); // disable Tx offset calibration circuit
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00db, 0x0000); // disable analog calibration circuit
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dc, 0x0000); // disable Tx offset calibration circuit
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x003e, 0x0000); // disable Tx VLD force mode
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x00dd, 0x0000); // disable Tx offset/amplitude calibration circuit
++
++
++
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x273, 0x2000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0xc9, 0x0fff);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x145, 0x1000);
++
++ /* Restore CR to default */
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x11, orig_1e_11);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x300, orig_1f_300);
++
++ return 0;
++}
++
++//-----------------------------------------------------------------
++
++static int phy_calibration(struct dsa_switch *ds, u8 phyaddr)
++{
++ //u32 reg_tmp,reg_tmp0, reg_tmp1, i;
++ u32 reg_tmp;
++ u32 CALDLY = 40;
++ u32 orig_1e_11, orig_1e_185, orig_1e_e1, orig_1f_100;
++ int ret;
++
++ /* Use SW calibration data. */
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x403);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x403, reg_tmp | BIT(3));
++ /* set [12]AN disable, [8]full duplex, [13/6]1000Mbps */
++ //tc_phy_write_dev_reg(phyaddr, 0x0, 0x0140);
++ switch_phy_write(ds, phyaddr, R0, 0x140);
++
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x145, 0x1010);/* fix mdi */
++ orig_1e_185 = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, RG_185);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, RG_185, 0);/* disable tx slew control */
++ orig_1f_100 = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x100);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x100, 0xc000);/* BG voltage output */
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x403, 0x1099); //bypass efuse
++
++#if (1)
++ // 1f_27c[12:8] cr_da_tx_i2mpb_10m Trimming TX bias setup(@10M)
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x27c, 0x1f1f);
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x27c, 0x3300);
++
++ //reg_tmp1 = tc_phy_read_dev_reg(ds, PHY0, 0x1f, 0x27c);
++ //dev1Fh_reg273h TXVLD DA register - Adjust voltage mode TX amplitude.
++ //tc_phy_write_dev_reg(phyaddr, 0x1f, 0x273, 0);
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x273, 0x1000);
++ //reg_tmp1 = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x273);
++ //printk("reg_tmp1273 = %x\n", reg_tmp1);
++ /*1e_11 TX overshoot Enable (PAIR A/B/C/D) in gbe mode*/
++
++ orig_1e_11 = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x11);
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x11);
++ reg_tmp = reg_tmp | (0xf << 12);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x11, reg_tmp);
++ orig_1e_e1 = tc_phy_read_dev_reg(ds, PHY0, 0x1e, 0x00e1);
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00e1, 0x10);
++ /* calibration start ============ */
++ printk("CALDLY = %d\n", CALDLY);
++ if(ge_cal_flag == 0){
++ ret = ge_cal_rext(ds, 0, CALDLY);
++ if (ret == -1){
++ printk("ge_cal_rext error K port =%d\n", phyaddr);
++ return ret;
++ }
++ ge_cal_flag = 1;
++ }
++
++ /* *** R50 Cal start ***************************** */
++ /*phyaddress = 0*/
++ ret = ge_cal_r50(ds, phyaddr, CALDLY);
++ if (ret == -1){
++ printk("R50 error K port =%d\n", phyaddr);
++ return ret;
++ }
++ /* *** R50 Cal end *** */
++ /* *** Tx offset Cal start *********************** */
++ ret = ge_cal_tx_offset(ds, phyaddr, CALDLY);
++ if (ret == -1){
++ printk("ge_cal_tx_offset error K port =%d\n", phyaddr);
++ return ret;
++ }
++ /* *** Tx offset Cal end *** */
++
++ /* *** Tx Amp Cal start *** */
++ ret = ge_cal_tx_amp(ds, phyaddr, CALDLY);
++ if (ret == -1){
++ printk("ge_cal_tx_amp error K port =%d\n", phyaddr);
++ return ret;
++ }
++ /* *** Tx Amp Cal end *** */
++ /*tmp maybe changed*/
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x27c, 0x1111);
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x27b, 0x47);
++ //tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x273, 0x2000);
++
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x3a8, 0x0810);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x3aa, 0x0008);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x3ab, 0x0810);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x3ad, 0x0008);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x3ae, 0x0106);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x3b0, 0x0001);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x3b1, 0x0106);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x3b3, 0x0001);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x18c, 0x0001);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x18d, 0x0001);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x18e, 0x0001);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x18f, 0x0001);
++
++ /*da_tx_bias1_b_tx_standby = 5'b10 (dev1eh_reg3aah[12:8])*/
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x3aa);
++ reg_tmp = reg_tmp & ~(0x1f00);
++ reg_tmp = reg_tmp | 0x2 << 8;
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x3aa, reg_tmp);
++
++ /*da_tx_bias1_a_tx_standby = 5'b10 (dev1eh_reg3a9h[4:0])*/
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1e, 0x3a9);
++ reg_tmp = reg_tmp & ~(0x1f);
++ reg_tmp = reg_tmp | 0x2;
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x3a9, reg_tmp);
++
++ /* Restore CR to default */
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, RG_185, orig_1e_185);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x100, orig_1f_100);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x11, orig_1e_11);
++ tc_phy_write_dev_reg(ds, PHY0, 0x1e, 0x00e1, orig_1e_e1);
++#endif
++ return 0;
++}
++
++static void rx_dc_offset(struct dsa_switch *ds, u8 phyaddr)
++{
++ pr_info("PORT %d RX_DC_OFFSET\n", phyaddr);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x96, 0x8000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x37, 0x3);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x107, 0x4000);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x171, 0x1e5);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x39, 0x200f);
++ udelay(40);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x39, 0x000f);
++ udelay(40);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1e, 0x171, 0x65);
++}
++
++static void check_rx_dc_offset_pair_a(struct dsa_switch *ds, u8 phyaddr)
++{
++ u32 reg_tmp;
++
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x114f);
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("before pairA output = %x\n", reg_tmp);
++ udelay(40);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1142);
++ udelay(40);
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("after pairA output = %x\n", reg_tmp);
++ if ((reg_tmp & 0x80) != 0)
++ reg_tmp = (~reg_tmp) + 1;
++ if ((reg_tmp & 0xff) >4)
++ pr_info("pairA RX_DC_OFFSET error");
++}
++
++static void check_rx_dc_offset_pair_b(struct dsa_switch *ds, u8 phyaddr)
++{
++ u32 reg_tmp;
++
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1151);
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("before pairB output = %x\n", reg_tmp);
++ udelay(40);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1143);
++ udelay(40);
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("after pairB output = %x\n", reg_tmp);
++ if ((reg_tmp & 0x80) != 0)
++ reg_tmp = (~reg_tmp) + 1;
++ if ((reg_tmp & 0xff) >4)
++ pr_info("pairB RX_DC_OFFSET error");
++}
++
++static void check_rx_dc_offset_pair_c(struct dsa_switch *ds, u8 phyaddr)
++{
++ u32 reg_tmp;
++
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1153);
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("before pairC output = %x\n", reg_tmp);
++ udelay(40);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1144);
++ udelay(40);
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("after pairC output = %x\n", reg_tmp);
++ if ((reg_tmp & 0x80) != 0)
++ reg_tmp = (~reg_tmp) + 1;
++ if ((reg_tmp & 0xff) >4)
++ pr_info("pairC RX_DC_OFFSET error");
++}
++
++static void check_rx_dc_offset_pair_d(struct dsa_switch *ds, u8 phyaddr)
++{
++ u32 reg_tmp;
++
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1155);
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("before pairD output = %x\n", reg_tmp);
++ udelay(40);
++ tc_phy_write_dev_reg(ds, phyaddr, 0x1f, 0x15, (phyaddr << 13) | 0x1145);
++ udelay(40);
++ reg_tmp = tc_phy_read_dev_reg(ds, phyaddr, 0x1f, 0x1a);
++ reg_tmp = reg_tmp & 0xff;
++ pr_info("after pairD output = %x\n", reg_tmp);
++ if ((reg_tmp & 0x80) != 0)
++ reg_tmp = (~reg_tmp) + 1;
++ if ((reg_tmp & 0xff) >4)
++ pr_info("pairD RX_DC_OFFSET error");
++}
++
++/* 12 registers for TX_MLT3 waveform tuning.
++ * 012 345 678 9ab
++ * 1 __
++ * _/ \_
++ * 0_/ \
++ * \_ _/
++ * -1 \__/
++ */
++static void mt7531_phy_100m_eye_diag_setting(struct dsa_switch *ds, u32 port)
++{
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x0, 0x187);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x1, 0x1c9);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x2, 0x1c6);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x3, 0x182);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x4, 0x208);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x5, 0x205);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x6, 0x384);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x7, 0x3cb);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x8, 0x3c4);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0x9, 0x30a);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0xa, 0x00b);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_TX_MLT3_BASE + 0xb, 0x002);
++}
++
++static void mt7531_phy_setting(struct dsa_switch *ds)
++{
++ int i;
++ u32 val;
++
++ for (i = 0; i < MT7531_NUM_PHYS; i++) {
++ mt7531_phy_100m_eye_diag_setting(ds, i);
++
++ /* Enable HW auto downshift */
++ switch_phy_write(ds, i, 0x1f, 0x1);
++ val = switch_phy_read(ds, i, PHY_EXT_REG_14);
++ val |= PHY_EN_DOWN_SHFIT;
++ switch_phy_write(ds, i, PHY_EXT_REG_14, val);
++
++ /* Decrease SlvDPSready time */
++ val = mt753x_tr_read(ds, i, PMA_CH, PMA_NOD, PMA_17);
++ val &= ~SLV_DSP_READY_TIME_M;
++ val |= 0xc << SLV_DSP_READY_TIME_S;
++ mt753x_tr_write(ds, i, PMA_CH, PMA_NOD, PMA_17, val);
++
++ /* Enable Random Update Mechanism */
++ val = mt753x_tr_read(ds, i, PMA_CH, PMA_NOD, PMA_18);
++ val |= ENABLE_RANDOM_UPDATE_TRIGGER;
++ mt753x_tr_write(ds, i, PMA_CH, PMA_NOD, PMA_18, val);
++
++ /* PHY link down power saving enable */
++ val = switch_phy_read(ds, i, PHY_EXT_REG_17);
++ val |= PHY_LINKDOWN_POWER_SAVING_EN;
++ switch_phy_write(ds, i, PHY_EXT_REG_17, val);
++
++ val = tc_phy_read_dev_reg(ds, i, PHY_DEV1E, PHY_DEV1E_REG_0C6);
++ val &= ~PHY_POWER_SAVING_M;
++ val |= PHY_POWER_SAVING_TX << PHY_POWER_SAVING_S;
++ tc_phy_write_dev_reg(ds, i, PHY_DEV1E, PHY_DEV1E_REG_0C6, val);
++
++ /* Timing Recovery for GbE slave mode */
++ mt753x_tr_write(ds, i, PMA_CH, PMA_NOD, PMA_01, 0x6fb90a);
++ mt753x_tr_write(ds, i, DSP_CH, DSP_NOD, DSP_06, 0x2ebaef);
++ val = tc_phy_read_dev_reg(ds, i, PHY_DEV1E, PHY_DEV1E_REG_234);
++ val |= TR_OPEN_LOOP_EN;
++ tc_phy_write_dev_reg(ds, i, PHY_DEV1E, PHY_DEV1E_REG_234, val);
++
++ /* Enable Asymmetric Pause Capability */
++ val = switch_phy_read(ds, i, MII_ADVERTISE);
++ val |= ADVERTISE_PAUSE_ASYM;
++ switch_phy_write(ds, i, MII_ADVERTISE, val);
++ }
++}
++
++static void mt7531_adjust_line_driving(struct dsa_switch *ds, u32 port)
++{
++ /* For ADC timing margin window for LDO calibration */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, RXADC_LDO_CONTROL_2, 0x2222);
++
++ /* Adjust AD sample timing */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, RXADC_CONTROL_3, 0x4444);
++
++ /* Adjust Line driver current for different mode */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1F, TXVLD_DA_271, 0x2ca5);
++
++ /* Adjust Line driver current for different mode */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1F, TXVLD_DA_272, 0xc6b);
++
++ /* Adjust Line driver gain for 10BT from 1000BT calibration result */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1F, TXVLD_DA_273, 0x3000);
++
++ /* Adjust RX Echo path filter */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_DEV1E_REG_0FE, 0x2);
++
++ /* Adjust RX HVGA bias current */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_DEV1E_REG_41, 0x3333);
++
++ /* Adjust TX class AB driver 1 */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1F, PHY_DEV1F_REG_268, 0x384);
++
++ /* Adjust TX class AB driver 2 */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1F, PHY_DEV1F_REG_269, 0x1114);
++
++ /* Adjust DAC delay for TX Pairs */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_DEV1E_REG_13, 0x404);
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_DEV1E_REG_14, 0x404);
++
++ /* Adjust DAC digital delay for TX Delay */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1F, PHY_DEV1F_REG_44, 0xc0);
++
++ /* Adjust Line driver compensation cap for stability concern due to
++ * increase current.
++ */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1F, PHY_DEV1F_REG_26A, 0x3333);
++}
++
++static void mt7531_eee_setting(struct dsa_switch *ds, u32 port)
++{
++ u32 val;
++
++ /* Disable EEE */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV07, PHY_DEV07_REG_03C, 0);
++
++ /* Disable generate signal to clear the scramble_lock when lpi mode */
++ val = tc_phy_read_dev_reg(ds, port, PHY_DEV1E, PHY_DEV1E_REG_189);
++ val &= ~DESCRAMBLER_CLEAR_EN;
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_DEV1E_REG_189, val);
++
++ /* Roll back EEE Slave Mode */
++ tc_phy_write_dev_reg(ds, port, 0x1e, 0x2d1, 0);
++ mt753x_tr_write(ds, port, DSP_CH, DSP_NOD, DSP_08, 0x1b);
++ mt753x_tr_write(ds, port, DSP_CH, DSP_NOD, DSP_0f, 0);
++ mt753x_tr_write(ds, port, DSP_CH, DSP_NOD, DSP_10, 0x5000);
++
++ /* Adjust 100_mse_threshold */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_DEV1E_REG_123, 0xffff);
++
++ /* Disable mcc */
++ tc_phy_write_dev_reg(ds, port, PHY_DEV1E, PHY_DEV1E_REG_A6, 0x300);
++}
++
++int mt7531_phy_setup(struct dsa_switch *ds)
++{
++ int ret;
++ int i;
++
++ mt7531_phy_setting(ds);
++
++ for (i = 0; i < MT7531_NUM_PHYS; i++) {
++ mt7531_adjust_line_driving(ds, i);
++ mt7531_eee_setting(ds, i);
++ }
++
++ /*for (i = 0; i < MT7531_NUM_PHYS; i++) {
++ ret = phy_calibration(ds, i);
++
++ rx_dc_offset(ds, i);
++ check_rx_dc_offset_pair_a(ds, i);
++ check_rx_dc_offset_pair_b(ds, i);
++ check_rx_dc_offset_pair_c(ds, i);
++ check_rx_dc_offset_pair_d(ds, i);
++
++ switch_phy_write(ds, i, 0, 0x1040);
++ }*/
++
++ return ret;
++}
+Index: linux-5.4.124/drivers/net/dsa/mt7531_phy.h
+===================================================================
+--- /dev/null
++++ linux-5.4.124/drivers/net/dsa/mt7531_phy.h
+@@ -0,0 +1,262 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Register definitions for MediaTek MT753x Gigabit switches
++ *
++ * Copyright (C) 2018 MediaTek Inc. All Rights Reserved.
++ *
++ * Author: Weijie Gao <weijie.gao@mediatek.com>
++ */
++
++#ifndef _MT753X_PHY_H_
++#define _MT753X_PHY_H_
++
++#include <linux/bitops.h>
++
++/*phy calibration use*/
++#define DEV_1E 0x1E
++/*global device 0x1f, always set P0*/
++#define DEV_1F 0x1F
++
++
++/************IEXT/REXT CAL***************/
++/* bits range: for example BITS(16,23) = 0xFF0000*/
++#define BITS(m, n) (~(BIT(m) - 1) & ((BIT(n) - 1) | BIT(n)))
++#define ANACAL_INIT 0x01
++#define ANACAL_ERROR 0xFD
++#define ANACAL_SATURATION 0xFE
++#define ANACAL_FINISH 0xFF
++#define ANACAL_PAIR_A 0
++#define ANACAL_PAIR_B 1
++#define ANACAL_PAIR_C 2
++#define ANACAL_PAIR_D 3
++#define DAC_IN_0V 0x00
++#define DAC_IN_2V 0xf0
++#define TX_AMP_OFFSET_0MV 0x20
++#define TX_AMP_OFFSET_VALID_BITS 6
++
++#define R0 0
++#define PHY0 0
++#define PHY1 1
++#define PHY2 2
++#define PHY3 3
++#define PHY4 4
++#define ANA_TEST_MODE BITS(8, 15)
++#define TST_TCLK_SEL BITs(6, 7)
++#define ANA_TEST_VGA_RG 0x100
++
++#define FORCE_MDI_CROSS_OVER BITS(3, 4)
++#define T10_TEST_CTL_RG 0x145
++#define RG_185 0x185
++#define RG_TX_SLEW BIT(0)
++#define ANA_CAL_0 0xdb
++#define RG_CAL_CKINV BIT(12)
++#define RG_ANA_CALEN BIT(8)
++#define RG_REXT_CALEN BIT(4)
++#define RG_ZCALEN_A BIT(0)
++#define ANA_CAL_1 0xdc
++#define RG_ZCALEN_B BIT(12)
++#define RG_ZCALEN_C BIT(8)
++#define RG_ZCALEN_D BIT(4)
++#define RG_TXVOS_CALEN BIT(0)
++#define ANA_CAL_6 0xe1
++#define RG_CAL_REFSEL BIT(4)
++#define RG_CAL_COMP_PWD BIT(0)
++#define ANA_CAL_5 0xe0
++#define RG_REXT_TRIM BITs(8, 13)
++#define RG_ZCAL_CTRL BITs(0, 5)
++#define RG_17A 0x17a
++#define AD_CAL_COMP_OUT BIT(8)
++#define RG_17B 0x17b
++#define AD_CAL_CLK bit(0)
++#define RG_17C 0x17c
++#define DA_CALIN_FLAG bit(0)
++/************R50 CAL****************************/
++#define RG_174 0x174
++#define RG_R50OHM_RSEL_TX_A_EN BIT[15]
++#define CR_R50OHM_RSEL_TX_A BITS[8:14]
++#define RG_R50OHM_RSEL_TX_B_EN BIT[7]
++#define CR_R50OHM_RSEL_TX_B BITS[6:0]
++#define RG_175 0x175
++#define RG_R50OHM_RSEL_TX_C_EN BITS[15]
++#define CR_R50OHM_RSEL_TX_C BITS[8:14]
++#define RG_R50OHM_RSEL_TX_D_EN BIT[7]
++#define CR_R50OHM_RSEL_TX_D BITS[0:6]
++/**********TX offset Calibration***************************/
++#define RG_95 0x96
++#define BYPASS_TX_OFFSET_CAL BIT(15)
++#define RG_3E 0x3e
++#define BYPASS_PD_TXVLD_A BIT(15)
++#define BYPASS_PD_TXVLD_B BIT(14)
++#define BYPASS_PD_TXVLD_C BIT(13)
++#define BYPASS_PD_TXVLD_D BIT(12)
++#define BYPASS_PD_TX_10M BIT(11)
++#define POWER_DOWN_TXVLD_A BIT(7)
++#define POWER_DOWN_TXVLD_B BIT(6)
++#define POWER_DOWN_TXVLD_C BIT(5)
++#define POWER_DOWN_TXVLD_D BIT(4)
++#define POWER_DOWN_TX_10M BIT(3)
++#define RG_DD 0xdd
++#define RG_TXG_CALEN_A BIT(12)
++#define RG_TXG_CALEN_B BIT(8)
++#define RG_TXG_CALEN_C BIT(4)
++#define RG_TXG_CALEN_D BIT(0)
++#define RG_17D 0x17D
++#define FORCE_DASN_DAC_IN0_A BIT(15)
++#define DASN_DAC_IN0_A BITS(0, 9)
++#define RG_17E 0x17E
++#define FORCE_DASN_DAC_IN0_B BIT(15)
++#define DASN_DAC_IN0_B BITS(0, 9)
++#define RG_17F 0x17F
++
++#define FORCE_DASN_DAC_IN0_C BIT(15)
++#define DASN_DAC_IN0_C BITS(0, 9)
++#define RG_180 0x180
++#define FORCE_DASN_DAC_IN0_D BIT(15)
++#define DASN_DAC_IN0_D BITS(0, 9)
++
++#define RG_181 0x181
++#define FORCE_DASN_DAC_IN1_A BIT(15)
++#define DASN_DAC_IN1_A BITS(0, 9)
++#define RG_182 0x182
++#define FORCE_DASN_DAC_IN1_B BIT(15)
++#define DASN_DAC_IN1_B BITS(0, 9)
++#define RG_183 0x183
++#define FORCE_DASN_DAC_IN1_C BIT(15)
++#define DASN_DAC_IN1_C BITS(0, 9)
++#define RG_184 0x184
++#define FORCE_DASN_DAC_IN1_D BIT(15)
++#define DASN_DAC_IN1_D BITS(0, 9)
++#define RG_172 0x172
++#define CR_TX_AMP_OFFSET_A BITS(8, 13)
++#define CR_TX_AMP_OFFSET_B BITS(0, 5)
++#define RG_173 0x173
++#define CR_TX_AMP_OFFSET_C BITS(8, 13)
++#define CR_TX_AMP_OFFSET_D BITS(0, 5)
++/**********TX Amp Calibration ***************************/
++#define RG_12 0x12
++#define DA_TX_I2MPB_A_GBE BITS(10, 15)
++#define RG_17 0x17
++#define DA_TX_I2MPB_B_GBE BITS(8, 13)
++#define RG_19 0x19
++#define DA_TX_I2MPB_C_GBE BITS(8, 13)
++#define RG_21 0x21
++#define DA_TX_I2MPB_D_GBE BITS(8, 13)
++#define TX_AMP_MAX 0x3f
++#define TX_AMP_MAX_OFFSET 0xb
++#define TX_AMP_HIGHEST_TS ((TX_AMP_MAX) + 3)
++#define TX_AMP_LOWEST_TS (0 - 3)
++#define TX_AMP_HIGH_TS (TX_AMP_MAX)
++#define TX_AMP_LOW_TS 0
++
++/* PHY Extend Register 0x14 bitmap of define */
++#define PHY_EXT_REG_14 0x14
++
++/* Fields of PHY_EXT_REG_14 */
++#define PHY_EN_DOWN_SHFIT BIT(4)
++
++/* PHY Extend Register 0x17 bitmap of define */
++#define PHY_EXT_REG_17 0x17
++
++/* Fields of PHY_EXT_REG_17 */
++#define PHY_LINKDOWN_POWER_SAVING_EN BIT(4)
++
++/* PHY PMA Register 0x17 bitmap of define */
++#define SLV_DSP_READY_TIME_S 15
++#define SLV_DSP_READY_TIME_M (0xff << SLV_DSP_READY_TIME_S)
++
++/* PHY PMA Register 0x18 bitmap of define */
++#define ENABLE_RANDOM_UPDATE_TRIGGER BIT(8)
++
++/* PHY EEE Register bitmap of define */
++#define PHY_DEV07 0x07
++#define PHY_DEV07_REG_03C 0x3c
++
++/* PHY DEV 0x1e Register bitmap of define */
++#define PHY_DEV1E 0x1e
++#define PHY_DEV1F 0x1f
++
++/* Proprietory Control Register of Internal Phy device 0x1e */
++#define PHY_TX_MLT3_BASE 0x0
++#define PHY_DEV1E_REG_13 0x13
++#define PHY_DEV1E_REG_14 0x14
++#define PHY_DEV1E_REG_41 0x41
++#define PHY_DEV1E_REG_A6 0xa6
++#define RXADC_CONTROL_3 0xc2
++#define PHY_DEV1E_REG_0C6 0xc6
++#define RXADC_LDO_CONTROL_2 0xd3
++#define PHY_DEV1E_REG_0FE 0xfe
++#define PHY_DEV1E_REG_123 0x123
++#define PHY_DEV1E_REG_189 0x189
++#define PHY_DEV1E_REG_234 0x234
++
++/* Proprietory Control Register of Internal Phy device 0x1f */
++#define PHY_DEV1F_REG_44 0x44
++#define PHY_DEV1F_REG_268 0x268
++#define PHY_DEV1F_REG_269 0x269
++#define PHY_DEV1F_REG_26A 0x26A
++#define TXVLD_DA_271 0x271
++#define TXVLD_DA_272 0x272
++#define TXVLD_DA_273 0x273
++
++/* Fields of PHY_DEV1E_REG_0C6 */
++#define PHY_POWER_SAVING_S 8
++#define PHY_POWER_SAVING_M 0x300
++#define PHY_POWER_SAVING_TX 0x0
++
++/* Fields of PHY_DEV1E_REG_189 */
++#define DESCRAMBLER_CLEAR_EN 0x1
++
++/* Fields of PHY_DEV1E_REG_234 */
++#define TR_OPEN_LOOP_EN BIT(0)
++
++/* Internal GPHY Page Control Register */
++#define PHY_CL22_PAGE_CTRL 0x1f
++#define PHY_TR_PAGE 0x52b5
++
++/* Internal GPHY Token Ring Access Registers */
++#define PHY_TR_CTRL 0x10
++#define PHY_TR_LOW_DATA 0x11
++#define PHY_TR_HIGH_DATA 0x12
++
++/* Fields of PHY_TR_CTRL */
++#define PHY_TR_PKT_XMT_STA BIT(15)
++#define PHY_TR_WR_S 13
++#define PHY_TR_CH_ADDR_S 11
++#define PHY_TR_NODE_ADDR_S 7
++#define PHY_TR_DATA_ADDR_S 1
++
++enum phy_tr_wr {
++ PHY_TR_WRITE = 0,
++ PHY_TR_READ = 1,
++};
++
++/* Helper macro for GPHY Token Ring Access */
++#define PHY_TR_LOW_VAL(x) ((x) & 0xffff)
++#define PHY_TR_HIGH_VAL(x) (((x) & 0xff0000) >> 16)
++
++/* Token Ring Channels */
++#define PMA_CH 0x1
++#define DSP_CH 0x2
++
++/* Token Ring Nodes */
++#define PMA_NOD 0xf
++#define DSP_NOD 0xd
++
++/* Token Ring register range */
++enum tr_pma_reg_addr {
++ PMA_MIN = 0x0,
++ PMA_01 = 0x1,
++ PMA_17 = 0x17,
++ PMA_18 = 0x18,
++ PMA_MAX = 0x3d,
++};
++
++enum tr_dsp_reg_addr {
++ DSP_MIN = 0x0,
++ DSP_06 = 0x6,
++ DSP_08 = 0x8,
++ DSP_0f = 0xf,
++ DSP_10 = 0x10,
++ DSP_MAX = 0x3e,
++};
++#endif /* _MT753X_REGS_H_ */
+Index: linux-5.4.124/drivers/net/dsa/Makefile
+===================================================================
+--- linux-5.4.124.orig/drivers/net/dsa/Makefile
++++ linux-5.4.124/drivers/net/dsa/Makefile
+@@ -6,7 +6,8 @@ ifdef CONFIG_NET_DSA_LOOP
+ obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
+ endif
+ obj-$(CONFIG_NET_DSA_LANTIQ_GSWIP) += lantiq_gswip.o
+-obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
++obj-$(CONFIG_NET_DSA_MT7530) += mt7530-dsa.o
++mt7530-dsa-objs := mt7530.o mt7531_phy.o
+ obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+ obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
+ obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/743-add-mediatek-ge-gphy-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/743-add-mediatek-ge-gphy-support.patch
new file mode 100644
index 0000000..718f324
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/743-add-mediatek-ge-gphy-support.patch
@@ -0,0 +1,24 @@
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -512,6 +512,11 @@ config MESON_GXL_PHY
+ ---help---
+ Currently has a driver for the Amlogic Meson GXL Internal PHY
+
++config MEDIATEK_GE_PHY
++ tristate "MediaTek Gigabit Ethernet PHYs"
++ help
++ Supports the MediaTek Gigabit Ethernet PHYs.
++
+ config MICREL_PHY
+ tristate "Micrel PHYs"
+ ---help---
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -93,6 +93,7 @@ obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c
+ obj-$(CONFIG_LXT_PHY) += lxt.o
+ obj-$(CONFIG_MARVELL_PHY) += marvell.o
+ obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o
++obj-$(CONFIG_MEDIATEK_GE_PHY) += mediatek-ge.o
+ obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
+ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
+ obj-$(CONFIG_MICREL_PHY) += micrel.o
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/744-en8801s-gphy-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/744-en8801s-gphy-support.patch
new file mode 100644
index 0000000..23a0333
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/744-en8801s-gphy-support.patch
@@ -0,0 +1,585 @@
+Index: drivers/net/phy/Kconfig
+===================================================================
+--- a/drivers/net/phy/Kconfig
++++ b/drivers/net/phy/Kconfig
+@@ -345,6 +345,11 @@ config SFP
+ depends on HWMON || HWMON=n
+ select MDIO_I2C
+
++config AIROHA_EN8801S_PHY
++ tristate "Drivers for Airoha EN8801S Gigabit PHYs"
++ ---help---
++ Currently supports the Airoha EN8801S PHY.
++
+ config ADIN_PHY
+ tristate "Analog Devices Industrial Ethernet PHYs"
+ help
+Index: drivers/net/phy/Makefile
+===================================================================
+--- a/drivers/net/phy/Makefile
++++ b/drivers/net/phy/Makefile
+@@ -67,6 +67,7 @@ aquantia-objs += aquantia_main.o
+ ifdef CONFIG_HWMON
+ aquantia-objs += aquantia_hwmon.o
+ endif
++obj-$(CONFIG_AIROHA_EN8801S_PHY) += en8801s.o
+ obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
+ obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
+ obj-$(CONFIG_AT803X_PHY) += at803x.o
+Index: drivers/net/phy/en8801s.c
+===================================================================
+--- /dev/null
++++ b/drivers/net/phy/en8801s.c
+@@ -0,0 +1,394 @@
++// SPDX-License-Identifier: GPL-2.0
++/* FILE NAME: airoha.c
++ * PURPOSE:
++ * EN8801S phy driver for Linux
++ * NOTES:
++ *
++ */
++
++/* INCLUDE FILE DECLARATIONS
++ */
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/unistd.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/mii.h>
++#include <linux/ethtool.h>
++#include <linux/phy.h>
++#include <linux/delay.h>
++
++//#include <linux/bitfield.h>
++#include <linux/uaccess.h>
++#include <linux/version.h>
++
++#include "en8801s.h"
++
++/* #define TEST_BOARD */
++
++MODULE_DESCRIPTION("Airoha EN8801S PHY drivers");
++MODULE_AUTHOR("Airoha");
++MODULE_LICENSE("GPL");
++
++static int preSpeed = 0;
++/************************************************************************
++* F U N C T I O N S
++************************************************************************/
++unsigned int mdiobus_write45(struct mii_bus *bus, u32 port, u32 devad, u32 reg, u16 val)
++{
++ mdiobus_write(bus, port, MII_MMD_ACC_CTL_REG, devad);
++ mdiobus_write(bus, port, MII_MMD_ADDR_DATA_REG, reg);
++ mdiobus_write(bus, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
++ mdiobus_write(bus, port, MII_MMD_ADDR_DATA_REG, val);
++ return 0;
++}
++
++unsigned int mdiobus_read45(struct mii_bus *bus, u32 port, u32 devad, u32 reg, u32 *read_data)
++{
++ mdiobus_write(bus, port, MII_MMD_ACC_CTL_REG, devad);
++ mdiobus_write(bus, port, MII_MMD_ADDR_DATA_REG, reg);
++ mdiobus_write(bus, port, MII_MMD_ACC_CTL_REG, MMD_OP_MODE_DATA | devad);
++ *read_data = mdiobus_read(bus, port, MII_MMD_ADDR_DATA_REG);
++ return 0;
++}
++
++/* Airoha MII read function */
++unsigned int ecnt_mii_cl22_read(struct mii_bus *ebus, unsigned int phy_addr,unsigned int phy_register,unsigned int *read_data)
++{
++ *read_data = mdiobus_read(ebus, phy_addr, phy_register);
++ return 0;
++}
++
++/* Airoha MII write function */
++unsigned int ecnt_mii_cl22_write(struct mii_bus *ebus, unsigned int phy_addr, unsigned int phy_register,unsigned int write_data)
++{
++ mdiobus_write(ebus, phy_addr, phy_register, write_data);
++ return 0;
++}
++
++/* EN8801 PBUS write function */
++void En8801_PbusRegWr(struct mii_bus *ebus, unsigned long pbus_address, unsigned long pbus_data)
++{
++ ecnt_mii_cl22_write(ebus, EN8801S_PBUS_PHY_ID, 0x1F, (unsigned int)(pbus_address >> 6));
++ ecnt_mii_cl22_write(ebus, EN8801S_PBUS_PHY_ID, (unsigned int)((pbus_address >> 2) & 0xf), (unsigned int)(pbus_data & 0xFFFF));
++ ecnt_mii_cl22_write(ebus, EN8801S_PBUS_PHY_ID, 0x10, (unsigned int)(pbus_data >> 16));
++ return;
++}
++
++/* EN8801 PBUS read function */
++unsigned long En8801_PbusRegRd(struct mii_bus *ebus, unsigned long pbus_address)
++{
++ unsigned long pbus_data;
++ unsigned int pbus_data_low, pbus_data_high;
++
++ ecnt_mii_cl22_write(ebus, EN8801S_PBUS_PHY_ID, 0x1F, (unsigned int)(pbus_address >> 6));
++ ecnt_mii_cl22_read(ebus, EN8801S_PBUS_PHY_ID, (unsigned int)((pbus_address >> 2) & 0xf), &pbus_data_low);
++ ecnt_mii_cl22_read(ebus, EN8801S_PBUS_PHY_ID, 0x10, &pbus_data_high);
++ pbus_data = (pbus_data_high << 16) + pbus_data_low;
++ return pbus_data;
++}
++
++/* Use default PBUS_PHY_ID */
++/* EN8801 PBUS write function */
++void En8801_varPbusRegWr(struct mii_bus *ebus, unsigned long pbus_id,unsigned long pbus_address, unsigned long pbus_data)
++{
++ ecnt_mii_cl22_write(ebus, pbus_id, 0x1F, (unsigned int)(pbus_address >> 6));
++ ecnt_mii_cl22_write(ebus, pbus_id, (unsigned int)((pbus_address >> 2) & 0xf), (unsigned int)(pbus_data & 0xFFFF));
++ ecnt_mii_cl22_write(ebus, pbus_id, 0x10, (unsigned int)(pbus_data >> 16));
++ return;
++}
++
++/* EN8801 PBUS read function */
++unsigned long En8801_varPbusRegRd(struct mii_bus *ebus, unsigned long pbus_id, unsigned long pbus_address)
++{
++ unsigned long pbus_data;
++ unsigned int pbus_data_low, pbus_data_high;
++
++ ecnt_mii_cl22_write(ebus, pbus_id, 0x1F, (unsigned int)(pbus_address >> 6));
++ ecnt_mii_cl22_read(ebus, pbus_id, (unsigned int)((pbus_address >> 2) & 0xf), &pbus_data_low);
++ ecnt_mii_cl22_read(ebus, pbus_id, 0x10, &pbus_data_high);
++ pbus_data = (pbus_data_high << 16) + pbus_data_low;
++ return pbus_data;
++}
++
++/* EN8801 Token Ring Write function */
++void En8801_TR_RegWr(struct mii_bus *ebus, unsigned long tr_address, unsigned long tr_data)
++{
++ ecnt_mii_cl22_write(ebus, EN8801S_MDIO_PHY_ID, 0x1F, 0x52b5); /* page select */
++ ecnt_mii_cl22_write(ebus, EN8801S_MDIO_PHY_ID, 0x11, (unsigned int)(tr_data & 0xffff));
++ ecnt_mii_cl22_write(ebus, EN8801S_MDIO_PHY_ID, 0x12, (unsigned int)(tr_data >> 16));
++ ecnt_mii_cl22_write(ebus, EN8801S_MDIO_PHY_ID, 0x10, (unsigned int)(tr_address | TrReg_WR));
++ ecnt_mii_cl22_write(ebus, EN8801S_MDIO_PHY_ID, 0x1F, 0x0); /* page resetore */
++ return;
++}
++
++/* EN8801 Token Ring Read function */
++unsigned long En8801_TR_RegRd(struct mii_bus *ebus, unsigned long tr_address)
++{
++ unsigned long tr_data;
++ unsigned int tr_data_low, tr_data_high;
++
++ ecnt_mii_cl22_write(ebus, EN8801S_MDIO_PHY_ID, 0x1F, 0x52b5); /* page select */
++ ecnt_mii_cl22_write(ebus, EN8801S_MDIO_PHY_ID, 0x10, (unsigned int)(tr_address | TrReg_RD));
++ ecnt_mii_cl22_read(ebus, EN8801S_MDIO_PHY_ID, 0x11, &tr_data_low);
++ ecnt_mii_cl22_read(ebus, EN8801S_MDIO_PHY_ID, 0x12, &tr_data_high);
++ ecnt_mii_cl22_write(ebus, EN8801S_MDIO_PHY_ID, 0x1F, 0x0); /* page resetore */
++ tr_data = (tr_data_high << 16) + tr_data_low;
++ return tr_data;
++}
++
++static int en8801s_config_init(struct phy_device *phydev)
++{
++ gephy_all_REG_LpiReg1Ch GPHY_RG_LPI_1C;
++ gephy_all_REG_dev1Eh_reg324h GPHY_RG_1E_324;
++ gephy_all_REG_dev1Eh_reg012h GPHY_RG_1E_012;
++ gephy_all_REG_dev1Eh_reg017h GPHY_RG_1E_017;
++ unsigned long pbus_data;
++ unsigned int pbusAddress;
++ u32 reg_value;
++ int retry;
++ struct mii_bus *mbus;
++
++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
++ mbus = phydev->bus;
++ #else
++ mbus = phydev->mdio.bus;
++ #endif
++
++ pbusAddress = EN8801S_PBUS_DEFAULT_ID;
++ retry = MAX_OUI_CHECK;
++ while(1)
++ {
++ pbus_data = En8801_varPbusRegRd(mbus, pbusAddress, EN8801S_RG_ETHER_PHY_OUI); /* PHY OUI */
++ if(EN8801S_PBUS_OUI == pbus_data)
++ {
++ pbus_data = En8801_varPbusRegRd(mbus, pbusAddress, EN8801S_RG_SMI_ADDR); /* SMI ADDR */
++ pbus_data = (pbus_data & 0xffff0000) | (unsigned long)(EN8801S_PBUS_PHY_ID << 8) | (unsigned long)(EN8801S_MDIO_PHY_ID );
++ printk("[Airoha] EN8801S SMI_ADDR=%lx (renew)\n", pbus_data);
++ En8801_varPbusRegWr(mbus, pbusAddress, EN8801S_RG_SMI_ADDR, pbus_data);
++ En8801_varPbusRegWr(mbus, EN8801S_PBUS_PHY_ID, EN8801S_RG_BUCK_CTL, 0x03);
++ mdelay(10);
++ break;
++ }
++ else
++ {
++ pbusAddress = EN8801S_PBUS_PHY_ID;
++ }
++ retry --;
++ if (0 == retry)
++ {
++ printk("[Airoha] EN8801S probe fail !\n");
++ return 0;
++ }
++ }
++
++ reg_value = (En8801_PbusRegRd(mbus, EN8801S_RG_LTR_CTL) & 0xfffffffc) | 0x10 | (EN8801S_RX_POLARITY << 1) | EN8801S_TX_POLARITY;
++ En8801_PbusRegWr(mbus, 0xcf8, reg_value);
++ mdelay(10);
++ reg_value &= 0xffffffef;
++ En8801_PbusRegWr(mbus, 0xcf8, reg_value);
++
++ retry = MAX_RETRY;
++ while (1)
++ {
++ mdelay(10);
++ reg_value = phy_read(phydev, MII_PHYSID2);
++ if (reg_value == EN8801S_PHY_ID2)
++ {
++ break; /* wait GPHY ready */
++ }
++ retry--;
++ if (0 == retry)
++ {
++ printk("[Airoha] EN8801S initialize fail !\n");
++ return 0;
++ }
++ }
++ /* Software Reset PHY */
++ reg_value = phy_read(phydev, MII_BMCR);
++ reg_value |= BMCR_RESET;
++ phy_write(phydev, MII_BMCR, reg_value);
++ retry = MAX_RETRY;
++ do
++ {
++ mdelay(10);
++ reg_value = phy_read(phydev, MII_BMCR);
++ retry--;
++ if (0 == retry)
++ {
++ printk("[Airoha] EN8801S reset fail !\n");
++ return 0;
++ }
++ } while (reg_value & BMCR_RESET);
++
++ En8801_PbusRegWr(mbus, 0x0600, 0x0c000c00);
++ En8801_PbusRegWr(mbus, 0x10, 0xD801);
++ En8801_PbusRegWr(mbus, 0x0, 0x9140);
++
++ En8801_PbusRegWr(mbus, 0x0A14, 0x0003);
++ En8801_PbusRegWr(mbus, 0x0600, 0x0c000c00);
++ /* Set FCM control */
++ En8801_PbusRegWr(mbus, 0x1404, 0x004b);
++ En8801_PbusRegWr(mbus, 0x140c, 0x0007);
++ /* Set GPHY Perfomance*/
++ /* Token Ring */
++ En8801_TR_RegWr(mbus, RgAddr_PMA_01h, 0x6FB90A);
++ En8801_TR_RegWr(mbus, RgAddr_PMA_18h, 0x0E2F00);
++ En8801_TR_RegWr(mbus, RgAddr_DSPF_06h, 0x2EBAEF);
++ En8801_TR_RegWr(mbus, RgAddr_DSPF_11h, 0x040001);
++ En8801_TR_RegWr(mbus, RgAddr_DSPF_03h, 0x000004);
++ En8801_TR_RegWr(mbus, RgAddr_DSPF_1Ch, 0x003210);
++ En8801_TR_RegWr(mbus, RgAddr_DSPF_14h, 0x00024A);
++ En8801_TR_RegWr(mbus, RgAddr_DSPF_0Ch, 0x00704D);
++ En8801_TR_RegWr(mbus, RgAddr_DSPF_0Dh, 0x02314F);
++ En8801_TR_RegWr(mbus, RgAddr_DSPF_10h, 0x005010);
++ En8801_TR_RegWr(mbus, RgAddr_DSPF_0Fh, 0x003028);
++ En8801_TR_RegWr(mbus, RgAddr_TR_26h, 0x444444);
++ En8801_TR_RegWr(mbus, RgAddr_R1000DEC_15h,0x0055A0);
++ /* CL22 & CL45 */
++ phy_write(phydev, 0x1f, 0x03);
++ GPHY_RG_LPI_1C.DATA = phy_read(phydev, RgAddr_LpiReg1Ch);
++ GPHY_RG_LPI_1C.DataBitField.smi_deton_th = 0x0C;
++ phy_write(phydev, RgAddr_LpiReg1Ch, GPHY_RG_LPI_1C.DATA);
++ phy_write(phydev, 0x1f, 0x0);
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x122, 0xffff);
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x234, 0x0180);
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x238, 0x0120);
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x120, 0x9014);
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x239, 0x0117);
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x14A, 0xEE20);
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x19B, 0x0111);
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1F, 0x268, 0x07F4);
++
++ mdiobus_read45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x324, ®_value);
++ GPHY_RG_1E_324.DATA=(u16)reg_value;
++ GPHY_RG_1E_324.DataBitField.smi_det_deglitch_off = 0;
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x324, (u32)GPHY_RG_1E_324.DATA);
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x19E, 0xC2);
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x013, 0x0);
++
++ /* EFUSE */
++ En8801_PbusRegWr(mbus, 0x1C08, 0x40000040);
++ retry = MAX_RETRY;
++ while (0 != retry)
++ {
++ mdelay(1);
++ reg_value = En8801_PbusRegRd(mbus, 0x1C08);
++ if ((reg_value & (1 << 30)) == 0)
++ {
++ break;
++ }
++ retry--;
++ }
++ reg_value = En8801_PbusRegRd(mbus, 0x1C38); /* RAW#2 */
++ GPHY_RG_1E_012.DataBitField.da_tx_i2mpb_a_tbt = reg_value & 0x03f;
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x12, (u32)GPHY_RG_1E_012.DATA);
++ GPHY_RG_1E_017.DataBitField.da_tx_i2mpb_b_tbt=(reg_value >> 8) & 0x03f;
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x12, (u32)GPHY_RG_1E_017.DATA);
++
++ En8801_PbusRegWr(mbus, 0x1C08, 0x40400040);
++ retry = MAX_RETRY;
++ while (0 != retry)
++ {
++ mdelay(1);
++ reg_value = En8801_PbusRegRd(mbus, 0x1C08);
++ if ((reg_value & (1 << 30)) == 0)
++ {
++ break;
++ }
++ retry--;
++ }
++ reg_value = En8801_PbusRegRd(mbus, 0x1C30); /* RAW#16 */
++ GPHY_RG_1E_324.DataBitField.smi_det_deglitch_off = (reg_value >> 12) & 0x01;
++ mdiobus_write45(mbus, EN8801S_MDIO_PHY_ID, 0x1E, 0x324, (u32)GPHY_RG_1E_324.DATA);
++
++ printk("[Airoha] EN8801S initialize OK ! (1.0.5)\n");
++ return 0;
++}
++
++static int en8801s_read_status(struct phy_device *phydev)
++{
++ int ret;
++ struct mii_bus *mbus;
++
++ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0))
++ mbus = phydev->bus;
++ #else
++ mbus = phydev->mdio.bus;
++ #endif
++
++ ret = genphy_read_status(phydev);
++ if (LINK_DOWN == phydev->link) preSpeed =0;
++
++ if ((preSpeed != phydev->speed) && (LINK_UP == phydev->link))
++ {
++ preSpeed = phydev->speed;
++ En8801_PbusRegWr(mbus, 0x0600, 0x0c000c00);
++ if (SPEED_1000 == preSpeed)
++ {
++ En8801_PbusRegWr(mbus, 0x10, 0xD801);
++ En8801_PbusRegWr(mbus, 0x0, 0x9140);
++
++ En8801_PbusRegWr(mbus, 0x0A14, 0x0003);
++ En8801_PbusRegWr(mbus, 0x0600, 0x0c000c00);
++ mdelay(2); /* delay 2 ms */
++ En8801_PbusRegWr(mbus, 0x1404, 0x004b);
++ En8801_PbusRegWr(mbus, 0x140c, 0x0007);
++ }
++ else if (SPEED_100 == preSpeed)
++ {
++ En8801_PbusRegWr(mbus, 0x10, 0xD401);
++ En8801_PbusRegWr(mbus, 0x0, 0x9140);
++
++ En8801_PbusRegWr(mbus, 0x0A14, 0x0007);
++ En8801_PbusRegWr(mbus, 0x0600, 0x0c11);
++ mdelay(2); /* delay 2 ms */
++ En8801_PbusRegWr(mbus, 0x1404, 0x0027);
++ En8801_PbusRegWr(mbus, 0x140c, 0x0007);
++ }
++ else if (SPEED_10 == preSpeed)
++ {
++ En8801_PbusRegWr(mbus, 0x10, 0xD001);
++ En8801_PbusRegWr(mbus, 0x0, 0x9140);
++
++ En8801_PbusRegWr(mbus, 0x0A14, 0x000b);
++ En8801_PbusRegWr(mbus, 0x0600, 0x0c11);
++ mdelay(2); /* delay 2 ms */
++ En8801_PbusRegWr(mbus, 0x1404, 0x0027);
++ En8801_PbusRegWr(mbus, 0x140c, 0x0007);
++ }
++ }
++ return ret;
++}
++
++static struct phy_driver Airoha_driver[] = {
++{
++ .phy_id = EN8801S_PHY_ID,
++ .name = "Airoha EN8801S",
++ .phy_id_mask = 0x0ffffff0,
++ .features = PHY_GBIT_FEATURES,
++ .config_init = en8801s_config_init,
++ .config_aneg = genphy_config_aneg,
++ .read_status = en8801s_read_status,
++ .suspend = genphy_suspend,
++ .resume = genphy_resume,
++} };
++
++module_phy_driver(Airoha_driver);
++
++static struct mdio_device_id __maybe_unused Airoha_tbl[] = {
++ { EN8801S_PHY_ID, 0x0ffffff0 },
++ { }
++};
++
++MODULE_DEVICE_TABLE(mdio, Airoha_tbl);
++MODULE_LICENSE("GPL");
+Index: drivers/net/phy/en8801s.h
+===================================================================
+--- /dev/null
++++ b/drivers/net/phy/en8801s.h
+@@ -0,0 +1,153 @@
++// SPDX-License-Identifier: GPL-2.0
++/* FILE NAME: airoha.h
++ * PURPOSE:
++ * Define EN8801S driver function
++ *
++ * NOTES:
++ *
++ */
++
++#ifndef __AIROHA_H
++#define __AIROHA_H
++
++/* NAMING DECLARATIONS
++ */
++#define PHY_ADDRESS_RANGE 0x18
++#define EN8801S_PBUS_DEFAULT_ID 0x1e
++#define EN8801S_MDIO_PHY_ID 0x18 /* Range PHY_ADDRESS_RANGE .. 0x1e */
++#define EN8801S_PBUS_PHY_ID (EN8801S_MDIO_PHY_ID + 1)
++
++#define EN8801S_RG_ETHER_PHY_OUI 0x19a4
++#define EN8801S_RG_SMI_ADDR 0x19a8
++#define EN8801S_RG_BUCK_CTL 0x1a20
++#define EN8801S_RG_LTR_CTL 0x0cf8
++
++#define EN8801S_PBUS_OUI 0x17a5
++#define EN8801S_PHY_ID1 0x03a2
++#define EN8801S_PHY_ID2 0x9461
++#define EN8801S_PHY_ID (unsigned long)((EN8801S_PHY_ID1 << 16) | EN8801S_PHY_ID2)
++
++#define DEV1E_REG013_VALUE 0
++#define DEV1E_REG19E_VALUE 0xC2
++#define DEV1E_REG324_VALUE 0x200
++
++#define TRUE 1
++#define FALSE 0
++#define LINK_UP 1
++#define LINK_DOWN 0
++
++#if defined(TEST_BOARD)
++#define EN8801S_TX_POLARITY 1
++#define EN8801S_RX_POLARITY 0
++#else
++#define EN8801S_TX_POLARITY 0
++#define EN8801S_RX_POLARITY 1 /* The ping default assignment is set to 1 */
++#endif
++
++#define MAX_RETRY 5
++#define MAX_OUI_CHECK 2
++/* CL45 MDIO control */
++#define MII_MMD_ACC_CTL_REG 0x0d
++#define MII_MMD_ADDR_DATA_REG 0x0e
++#define MMD_OP_MODE_DATA BIT(14)
++
++#define MAX_TRG_COUNTER 5
++
++/* CL22 Reg Support Page Select */
++#define RgAddr_Reg1Fh 0x1f
++#define CL22_Page_Reg 0x0000
++#define CL22_Page_ExtReg 0x0001
++#define CL22_Page_MiscReg 0x0002
++#define CL22_Page_LpiReg 0x0003
++#define CL22_Page_tReg 0x02A3
++#define CL22_Page_TrReg 0x52B5
++
++/* CL45 Reg Support DEVID */
++#define DEVID_03 0x03
++#define DEVID_07 0x07
++#define DEVID_1E 0x1E
++#define DEVID_1F 0x1F
++
++/* TokenRing Reg Access */
++#define TrReg_PKT_XMT_STA 0x8000
++#define TrReg_WR 0x8000
++#define TrReg_RD 0xA000
++
++#define RgAddr_LpiReg1Ch 0x1c
++#define RgAddr_PMA_01h 0x0f82
++#define RgAddr_PMA_18h 0x0fb0
++#define RgAddr_DSPF_03h 0x1686
++#define RgAddr_DSPF_06h 0x168c
++#define RgAddr_DSPF_0Ch 0x1698
++#define RgAddr_DSPF_0Dh 0x169a
++#define RgAddr_DSPF_0Fh 0x169e
++#define RgAddr_DSPF_10h 0x16a0
++#define RgAddr_DSPF_11h 0x16a2
++#define RgAddr_DSPF_14h 0x16a8
++#define RgAddr_DSPF_1Ch 0x16b8
++#define RgAddr_TR_26h 0x0ecc
++#define RgAddr_R1000DEC_15h 0x03aa
++
++/* DATA TYPE DECLARATIONS
++ */
++typedef struct
++{
++ u16 DATA_Lo;
++ u16 DATA_Hi;
++}TR_DATA_T;
++
++typedef union
++{
++ struct
++ {
++ /* b[15:00] */
++ u16 smi_deton_wt : 3;
++ u16 smi_det_mdi_inv : 1;
++ u16 smi_detoff_wt : 3;
++ u16 smi_sigdet_debouncing_en : 1;
++ u16 smi_deton_th : 6;
++ u16 rsv_14 : 2;
++ } DataBitField;
++ u16 DATA;
++} gephy_all_REG_LpiReg1Ch, *Pgephy_all_REG_LpiReg1Ch;
++
++typedef union
++{
++ struct
++ {
++ /* b[15:00] */
++ u16 rg_smi_detcnt_max : 6;
++ u16 rsv_6 : 2;
++ u16 rg_smi_det_max_en : 1;
++ u16 smi_det_deglitch_off : 1;
++ u16 rsv_10 : 6;
++ } DataBitField;
++ u16 DATA;
++} gephy_all_REG_dev1Eh_reg324h, *Pgephy_all_REG_dev1Eh_reg324h;
++
++typedef union
++{
++ struct
++ {
++ /* b[15:00] */
++ u16 da_tx_i2mpb_a_tbt : 6;
++ u16 rsv_6 : 4;
++ u16 da_tx_i2mpb_a_gbe : 6;
++ } DataBitField;
++ u16 DATA;
++} gephy_all_REG_dev1Eh_reg012h, *Pgephy_all_REG_dev1Eh_reg012h;
++
++typedef union
++{
++ struct
++ {
++ /* b[15:00] */
++ u16 da_tx_i2mpb_b_tbt : 6;
++ u16 rsv_6 : 2;
++ u16 da_tx_i2mpb_b_gbe : 6;
++ u16 rsv_14 : 2;
++ } DataBitField;
++ u16 DATA;
++} gephy_all_REG_dev1Eh_reg017h, *Pgephy_all_REG_dev1Eh_reg017h;
++
++#endif /* End of __AIROHA_H */
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8000-PATCH-1-4-tphy-support-type-switch-by-pericfg.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8000-PATCH-1-4-tphy-support-type-switch-by-pericfg.patch
new file mode 100644
index 0000000..4ae4991
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8000-PATCH-1-4-tphy-support-type-switch-by-pericfg.patch
@@ -0,0 +1,168 @@
+From ddeda571f3d79dcccef6541b6413cb184de40afd Mon Sep 17 00:00:00 2001
+From: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Date: Fri, 17 Sep 2021 15:56:53 +0800
+Subject: [PATCH] phy: phy-mtk-tphy: support type switch by pericfg
+
+Add support type switch between USB3, PCIe, SATA and SGMII by
+pericfg register, this is used to take the place of efuse or
+jumper.
+
+Signed-off-by: Chunfeng Yun <chunfeng.yun@mediatek.com>
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Zhanyong Wang <zhanyong.wang@mediatek.com>
+---
+ drivers/phy/mediatek/phy-mtk-tphy.c | 84 +++++++++++++++++++++++++++--
+ 1 file changed, 81 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
+index d1ecf088032b..759e1c0c370a 100644
+--- a/drivers/phy/mediatek/phy-mtk-tphy.c
++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
+@@ -10,12 +10,12 @@
+ #include <linux/delay.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
++#include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+ #include <linux/of_address.h>
+ #include <linux/of_device.h>
+ #include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
+-#include <linux/mfd/syscon.h>
+ #include <linux/regmap.h>
+
+ /* version V1 sub-banks offset base address */
+@@ -268,6 +268,14 @@
+ #define HIF_SYSCFG1 0x14
+ #define HIF_SYSCFG1_PHY2_MASK (0x3 << 20)
+
++/* PHY switch between pcie/usb3/sgmii/sata */
++#define USB_PHY_SWITCH_CTRL 0x0
++#define RG_PHY_SW_TYPE GENMASK(3, 0)
++#define RG_PHY_SW_PCIE 0x0
++#define RG_PHY_SW_USB3 0x1
++#define RG_PHY_SW_SGMII 0x2
++#define RG_PHY_SW_SATA 0x3
++
+ enum mtk_phy_version {
+ MTK_PHY_V1 = 1,
+ MTK_PHY_V2,
+@@ -301,7 +309,10 @@ struct mtk_phy_instance {
+ };
+ struct clk *ref_clk; /* reference clock of anolog phy */
+ u32 index;
+- u8 type;
++ u32 type;
++ struct regmap *type_sw;
++ u32 type_sw_reg;
++ u32 type_sw_index;
+ int eye_src;
+ int eye_vrt;
+ int eye_term;
+@@ -900,6 +911,64 @@ static void u2_phy_props_set(struct mtk_tphy *tphy,
+ }
+ }
+
++/* type switch for usb3/pcie/sgmii/sata */
++static int phy_type_syscon_get(struct mtk_phy_instance *instance,
++ struct device_node *dn)
++{
++ struct of_phandle_args args;
++ int ret;
++
++ /* type switch function is optional */
++ if (!of_property_read_bool(dn, "mediatek,syscon-type"))
++ return 0;
++
++ ret = of_parse_phandle_with_fixed_args(dn, "mediatek,syscon-type",
++ 2, 0, &args);
++ if (ret)
++ return ret;
++
++ instance->type_sw_reg = args.args[0];
++ instance->type_sw_index = args.args[1] & 0x3; /* <=3 */
++ instance->type_sw = syscon_node_to_regmap(args.np);
++ of_node_put(args.np);
++ dev_info(&instance->phy->dev, "type_sw - reg %#x, index %d\n",
++ instance->type_sw_reg, instance->type_sw_index);
++
++ return PTR_ERR_OR_ZERO(instance->type_sw);
++}
++
++static int phy_type_set(struct mtk_phy_instance *instance)
++{
++ int type;
++ u32 mask;
++
++ if (!instance->type_sw)
++ return 0;
++
++ switch (instance->type) {
++ case PHY_TYPE_USB3:
++ type = RG_PHY_SW_USB3;
++ break;
++ case PHY_TYPE_PCIE:
++ type = RG_PHY_SW_PCIE;
++ break;
++ case PHY_TYPE_SGMII:
++ type = RG_PHY_SW_SGMII;
++ break;
++ case PHY_TYPE_SATA:
++ type = RG_PHY_SW_SATA;
++ break;
++ case PHY_TYPE_USB2:
++ default:
++ return 0;
++ }
++
++ mask = RG_PHY_SW_TYPE << (instance->type_sw_index * BITS_PER_BYTE);
++ regmap_update_bits(instance->type_sw, instance->type_sw_reg, mask, type);
++
++ return 0;
++}
++
+ static int mtk_phy_init(struct phy *phy)
+ {
+ struct mtk_phy_instance *instance = phy_get_drvdata(phy);
+@@ -932,6 +1001,9 @@ static int mtk_phy_init(struct phy *phy)
+ case PHY_TYPE_SATA:
+ sata_phy_instance_init(tphy, instance);
+ break;
++ case PHY_TYPE_SGMII:
++ /* nothing to do, only used to set type */
++ break;
+ default:
+ dev_err(tphy->dev, "incompatible PHY type\n");
+ return -EINVAL;
+@@ -1020,7 +1092,8 @@ static struct phy *mtk_phy_xlate(struct device *dev,
+ if (!(instance->type == PHY_TYPE_USB2 ||
+ instance->type == PHY_TYPE_USB3 ||
+ instance->type == PHY_TYPE_PCIE ||
+- instance->type == PHY_TYPE_SATA)) {
++ instance->type == PHY_TYPE_SATA ||
++ instance->type == PHY_TYPE_SGMII)) {
+ dev_err(dev, "unsupported device type: %d\n", instance->type);
+ return ERR_PTR(-EINVAL);
+ }
+@@ -1035,6 +1108,7 @@ static struct phy *mtk_phy_xlate(struct device *dev,
+ }
+
+ phy_parse_property(tphy, instance);
++ phy_type_set(instance);
+
+ return instance->phy;
+ }
+@@ -1183,6 +1257,10 @@ static int mtk_tphy_probe(struct platform_device *pdev)
+ retval = PTR_ERR(instance->ref_clk);
+ goto put_child;
+ }
++
++ retval = phy_type_syscon_get(instance, child_np);
++ if (retval)
++ goto put_child;
+ }
+
+ provider = devm_of_phy_provider_register(dev, mtk_phy_xlate);
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8001-PATCH-2-4-dt-bindings-phy-Add-PHY_TYPE_DP-definition.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8001-PATCH-2-4-dt-bindings-phy-Add-PHY_TYPE_DP-definition.patch
new file mode 100644
index 0000000..f83e220
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8001-PATCH-2-4-dt-bindings-phy-Add-PHY_TYPE_DP-definition.patch
@@ -0,0 +1,29 @@
+From 8a79db5e83a5d52c74e6f3c40d6f312cf899213e Mon Sep 17 00:00:00 2001
+From: Jyri Sarha <jsarha@ti.com>
+Date: Wed, 8 Jan 2020 10:30:07 +0200
+Subject: [PATCH 1/5] dt-bindings: phy: Add PHY_TYPE_DP definition
+
+Add definition for DisplayPort phy type.
+
+Signed-off-by: Jyri Sarha <jsarha@ti.com>
+Reviewed-by: Roger Quadros <rogerq@ti.com>
+Reviewed-by: Kishon Vijay Abraham I <kishon@ti.com>
+Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
+---
+ include/dt-bindings/phy/phy.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
+index b6a1eaf1b339..1f3f866fae7b 100644
+--- a/include/dt-bindings/phy/phy.h
++++ b/include/dt-bindings/phy/phy.h
+@@ -16,5 +16,6 @@
+ #define PHY_TYPE_USB2 3
+ #define PHY_TYPE_USB3 4
+ #define PHY_TYPE_UFS 5
++#define PHY_TYPE_DP 6
+
+ #endif /* _DT_BINDINGS_PHY */
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8002-PATCH-3-4-dt-bindings-phy-Add-PHY_TYPE_XPCS-definition.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8002-PATCH-3-4-dt-bindings-phy-Add-PHY_TYPE_XPCS-definition.patch
new file mode 100644
index 0000000..7bd1ca7
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8002-PATCH-3-4-dt-bindings-phy-Add-PHY_TYPE_XPCS-definition.patch
@@ -0,0 +1,30 @@
+From c5d3cdad688ed75fb311a3a671eb30ba7106d7d3 Mon Sep 17 00:00:00 2001
+From: Dilip Kota <eswara.kota@linux.intel.com>
+Date: Tue, 19 May 2020 14:19:19 +0800
+Subject: [PATCH 2/5] dt-bindings: phy: Add PHY_TYPE_XPCS definition
+
+Add definition for Ethernet PCS phy type.
+
+Signed-off-by: Dilip Kota <eswara.kota@linux.intel.com>
+Acked-by: Rob Herring <robh@kernel.org>
+Acked-By: Vinod Koul <vkoul@kernel.org>
+Link: https://lore.kernel.org/r/6091f0d2a1046f1e3656d9e33b6cc433d5465eaf.1589868358.git.eswara.kota@linux.intel.com
+Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
+---
+ include/dt-bindings/phy/phy.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
+index 1f3f866fae7b..3727ef72138b 100644
+--- a/include/dt-bindings/phy/phy.h
++++ b/include/dt-bindings/phy/phy.h
+@@ -17,5 +17,6 @@
+ #define PHY_TYPE_USB3 4
+ #define PHY_TYPE_UFS 5
+ #define PHY_TYPE_DP 6
++#define PHY_TYPE_XPCS 7
+
+ #endif /* _DT_BINDINGS_PHY */
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8003-PATCH-4-4-dt-bindings-phy-Add-DT-bindings-for-Xilinx-ZynqMP-PS.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8003-PATCH-4-4-dt-bindings-phy-Add-DT-bindings-for-Xilinx-ZynqMP-PS.patch
new file mode 100644
index 0000000..ef5df66
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8003-PATCH-4-4-dt-bindings-phy-Add-DT-bindings-for-Xilinx-ZynqMP-PS.patch
@@ -0,0 +1,33 @@
+From cea0f76a483d1270ac6f6513964e3e75193dda48 Mon Sep 17 00:00:00 2001
+From: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+Date: Mon, 29 Jun 2020 15:00:52 +0300
+Subject: [PATCH 3/5] dt-bindings: phy: Add DT bindings for Xilinx ZynqMP PSGTR
+ PHY
+
+Add DT bindings for the Xilinx ZynqMP PHY. ZynqMP SoCs have a High Speed
+Processing System Gigabit Transceiver which provides PHY capabilities to
+USB, SATA, PCIE, Display Port and Ehernet SGMII controllers.
+
+Signed-off-by: Anurag Kumar Vulisha <anurag.kumar.vulisha@xilinx.com>
+Signed-off-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Rob Herring <robh@kernel.org>
+Link: https://lore.kernel.org/r/20200629120054.29338-2-laurent.pinchart@ideasonboard.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+---
+ include/dt-bindings/phy/phy.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
+index 3727ef72138b..36e8c241cf48 100644
+--- a/include/dt-bindings/phy/phy.h
++++ b/include/dt-bindings/phy/phy.h
+@@ -18,5 +18,6 @@
+ #define PHY_TYPE_UFS 5
+ #define PHY_TYPE_DP 6
+ #define PHY_TYPE_XPCS 7
++#define PHY_TYPE_SGMII 8
+
+ #endif /* _DT_BINDINGS_PHY */
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8004-nvmem-core-Add-functions-to-make-number-reading-easy.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8004-nvmem-core-Add-functions-to-make-number-reading-easy.patch
new file mode 100644
index 0000000..969ec3f
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8004-nvmem-core-Add-functions-to-make-number-reading-easy.patch
@@ -0,0 +1,307 @@
+From 8dc0b1158dcffd78ea2b3a5604b82ee826de687b Mon Sep 17 00:00:00 2001
+From: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Date: Mon, 8 Nov 2021 13:58:51 +0800
+Subject: [PATCH 1/5] nvmem: core: Add functions to make number reading easy
+
+Sometimes the clients of nvmem just want to get a number out of
+nvmem. They don't want to think about exactly how many bytes the nvmem
+cell took up. They just want the number. Let's make it easy.
+
+In general this concept is useful because nvmem space is precious and
+usually the fewest bits are allocated that will hold a given value on
+a given system. However, even though small numbers might be fine on
+one system that doesn't mean that logically the number couldn't be
+bigger. Imagine nvmem containing a max frequency for a component. On
+one system perhaps that fits in 16 bits. On another system it might
+fit in 32 bits. The code reading this number doesn't care--it just
+wants the number.
+
+We'll provide two functions: nvmem_cell_read_variable_le_u32() and
+nvmem_cell_read_variable_le_u64().
+
+Comparing these to the existing functions like nvmem_cell_read_u32():
+* These new functions have no problems if the value was stored in
+ nvmem in fewer bytes. It's OK to use these function as long as the
+ value stored will fit in 32-bits (or 64-bits).
+* These functions avoid problems that the earlier APIs had with bit
+ offsets. For instance, you can't use nvmem_cell_read_u32() to read a
+ value has nbits=32 and bit_offset=4 because the nvmem cell must be
+ at least 5 bytes big to hold this value. The new API accounts for
+ this and works fine.
+* These functions make it very explicit that they assume that the
+ number was stored in little endian format. The old functions made
+ this assumption whenever bit_offset was non-zero (see
+ nvmem_shift_read_buffer_in_place()) but didn't whenever the
+ bit_offset was zero.
+
+NOTE: it's assumed that we don't need an 8-bit or 16-bit version of
+this function. The 32-bit version of the function can be used to read
+8-bit or 16-bit data.
+
+At the moment, I'm only adding the "unsigned" versions of these
+functions, but if it ends up being useful someone could add a "signed"
+version that did 2's complement sign extension.
+
+At the moment, I'm only adding the "little endian" versions of these
+functions. Adding the "big endian" version would require adding "big
+endian" support to nvmem_shift_read_buffer_in_place().
+
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+Link: https://lore.kernel.org/r/20210330111241.19401-7-srinivas.kandagatla@linaro.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Chunfeng Yun <chunfeng.yun@mediatek.com>
+Signed-off-by: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Change-Id: I3e1d96ec1680812d5e24681c79852c9b36899559
+---
+ drivers/nvmem/core.c | 161 +++++++++++++++++++++++++++------
+ include/linux/nvmem-consumer.h | 15 +++
+ 2 files changed, 150 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index c0f4324d8f7c..e26b25b5c288 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -1102,16 +1102,8 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
+ }
+ EXPORT_SYMBOL_GPL(nvmem_cell_write);
+
+-/**
+- * nvmem_cell_read_u16() - Read a cell value as an u16
+- *
+- * @dev: Device that requests the nvmem cell.
+- * @cell_id: Name of nvmem cell to read.
+- * @val: pointer to output value.
+- *
+- * Return: 0 on success or negative errno.
+- */
+-int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
++static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
++ void *val, size_t count)
+ {
+ struct nvmem_cell *cell;
+ void *buf;
+@@ -1126,21 +1118,50 @@ int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
+ nvmem_cell_put(cell);
+ return PTR_ERR(buf);
+ }
+- if (len != sizeof(*val)) {
++ if (len != count) {
+ kfree(buf);
+ nvmem_cell_put(cell);
+ return -EINVAL;
+ }
+- memcpy(val, buf, sizeof(*val));
++ memcpy(val, buf, count);
+ kfree(buf);
+ nvmem_cell_put(cell);
+
+ return 0;
+ }
++
++/**
++ * nvmem_cell_read_u8() - Read a cell value as a u8
++ *
++ * @dev: Device that requests the nvmem cell.
++ * @cell_id: Name of nvmem cell to read.
++ * @val: pointer to output value.
++ *
++ * Return: 0 on success or negative errno.
++ */
++int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
++{
++ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
++}
++EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
++
++/**
++ * nvmem_cell_read_u16() - Read a cell value as a u16
++ *
++ * @dev: Device that requests the nvmem cell.
++ * @cell_id: Name of nvmem cell to read.
++ * @val: pointer to output value.
++ *
++ * Return: 0 on success or negative errno.
++ */
++int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
++{
++ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
++}
+ EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
+
+ /**
+- * nvmem_cell_read_u32() - Read a cell value as an u32
++ * nvmem_cell_read_u32() - Read a cell value as a u32
+ *
+ * @dev: Device that requests the nvmem cell.
+ * @cell_id: Name of nvmem cell to read.
+@@ -1149,32 +1170,120 @@ EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
+ * Return: 0 on success or negative errno.
+ */
+ int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
++{
++ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
++}
++EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
++
++/**
++ * nvmem_cell_read_u64() - Read a cell value as a u64
++ *
++ * @dev: Device that requests the nvmem cell.
++ * @cell_id: Name of nvmem cell to read.
++ * @val: pointer to output value.
++ *
++ * Return: 0 on success or negative errno.
++ */
++int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
++{
++ return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
++}
++EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
++
++static const void *nvmem_cell_read_variable_common(struct device *dev,
++ const char *cell_id,
++ size_t max_len, size_t *len)
+ {
+ struct nvmem_cell *cell;
++ int nbits;
+ void *buf;
+- size_t len;
+
+ cell = nvmem_cell_get(dev, cell_id);
+ if (IS_ERR(cell))
+- return PTR_ERR(cell);
++ return cell;
+
+- buf = nvmem_cell_read(cell, &len);
+- if (IS_ERR(buf)) {
+- nvmem_cell_put(cell);
+- return PTR_ERR(buf);
+- }
+- if (len != sizeof(*val)) {
++ nbits = cell->nbits;
++ buf = nvmem_cell_read(cell, len);
++ nvmem_cell_put(cell);
++ if (IS_ERR(buf))
++ return buf;
++
++ /*
++ * If nbits is set then nvmem_cell_read() can significantly exaggerate
++ * the length of the real data. Throw away the extra junk.
++ */
++ if (nbits)
++ *len = DIV_ROUND_UP(nbits, 8);
++
++ if (*len > max_len) {
+ kfree(buf);
+- nvmem_cell_put(cell);
+- return -EINVAL;
++ return ERR_PTR(-ERANGE);
+ }
+- memcpy(val, buf, sizeof(*val));
++
++ return buf;
++}
++
++/**
++ * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
++ *
++ * @dev: Device that requests the nvmem cell.
++ * @cell_id: Name of nvmem cell to read.
++ * @val: pointer to output value.
++ *
++ * Return: 0 on success or negative errno.
++ */
++int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
++ u32 *val)
++{
++ size_t len;
++ const u8 *buf;
++ int i;
++
++ buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
++ if (IS_ERR(buf))
++ return PTR_ERR(buf);
++
++ /* Copy w/ implicit endian conversion */
++ *val = 0;
++ for (i = 0; i < len; i++)
++ *val |= buf[i] << (8 * i);
+
+ kfree(buf);
+- nvmem_cell_put(cell);
++
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
++EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
++
++/**
++ * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
++ *
++ * @dev: Device that requests the nvmem cell.
++ * @cell_id: Name of nvmem cell to read.
++ * @val: pointer to output value.
++ *
++ * Return: 0 on success or negative errno.
++ */
++int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
++ u64 *val)
++{
++ size_t len;
++ const u8 *buf;
++ int i;
++
++ buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
++ if (IS_ERR(buf))
++ return PTR_ERR(buf);
++
++ /* Copy w/ implicit endian conversion */
++ *val = 0;
++ for (i = 0; i < len; i++)
++ *val |= (uint64_t)buf[i] << (8 * i);
++
++ kfree(buf);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
+
+ /**
+ * nvmem_device_cell_read() - Read a given nvmem device and cell
+diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
+index 5c17cb733224..e328c0f7eef3 100644
+--- a/include/linux/nvmem-consumer.h
++++ b/include/linux/nvmem-consumer.h
+@@ -63,6 +63,10 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
+ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len);
+ int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val);
+ int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val);
++int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
++ u32 *val);
++int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
++ u64 *val);
+
+ /* direct nvmem device read/write interface */
+ struct nvmem_device *nvmem_device_get(struct device *dev, const char *name);
+@@ -134,6 +138,17 @@ static inline int nvmem_cell_read_u32(struct device *dev,
+ {
+ return -EOPNOTSUPP;
+ }
++static inline int nvmem_cell_read_variable_le_u32(struct device *dev,
++ const char *cell_id, u32 *val)
++{
++ return -ENOSYS;
++}
++
++static inline int nvmem_cell_read_variable_le_u64(struct device *dev,
++ const char *cell_id, u64 *val);
++{
++ return -ENOSYS;
++}
+
+ static inline struct nvmem_device *nvmem_device_get(struct device *dev,
+ const char *name)
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8005-nvmem-mtk-efuse-support-minimum-one-byte-access-stri.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8005-nvmem-mtk-efuse-support-minimum-one-byte-access-stri.patch
new file mode 100644
index 0000000..8de4c2a
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8005-nvmem-mtk-efuse-support-minimum-one-byte-access-stri.patch
@@ -0,0 +1,51 @@
+From 44ae4ed142265a6d50a9d3e6f4c395f97b6849ab Mon Sep 17 00:00:00 2001
+From: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Date: Sat, 6 Nov 2021 20:06:30 +0800
+Subject: [PATCH 2/5] nvmem: mtk-efuse: support minimum one byte access stride
+ and granularity
+
+In order to support nvmem bits property, should support minimum 1 byte
+read stride and minimum 1 byte read granularity at the same time.
+
+Signed-off-by: Chunfeng Yun <chunfeng.yun@mediatek.com>
+Signed-off-by: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Change-Id: Iafe1ebf195d58a3e9e3518913f795d14a01dfd3b
+---
+ drivers/nvmem/mtk-efuse.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
+index 856d9c3fc38e..2e728fed0b49 100644
+--- a/drivers/nvmem/mtk-efuse.c
++++ b/drivers/nvmem/mtk-efuse.c
+@@ -19,11 +19,12 @@ static int mtk_reg_read(void *context,
+ unsigned int reg, void *_val, size_t bytes)
+ {
+ struct mtk_efuse_priv *priv = context;
+- u32 *val = _val;
+- int i = 0, words = bytes / 4;
++ void __iomem *addr = priv->base + reg;
++ u8 *val = _val;
++ int i;
+
+- while (words--)
+- *val++ = readl(priv->base + reg + (i++ * 4));
++ for (i = 0; i < bytes; i++, val++)
++ *val = readb(addr + i);
+
+ return 0;
+ }
+@@ -58,8 +59,8 @@ static int mtk_efuse_probe(struct platform_device *pdev)
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+- econfig.stride = 4;
+- econfig.word_size = 4;
++ econfig.stride = 1;
++ econfig.word_size = 1;
+ econfig.reg_read = mtk_reg_read;
+ econfig.reg_write = mtk_reg_write;
+ econfig.size = resource_size(res);
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8006-phy-phy-mtk-tphy-add-support-efuse-setting.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8006-phy-phy-mtk-tphy-add-support-efuse-setting.patch
new file mode 100644
index 0000000..5cc8a65
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8006-phy-phy-mtk-tphy-add-support-efuse-setting.patch
@@ -0,0 +1,312 @@
+From afb123e0f9992d35d0fb28ed875f2b7b7884652f Mon Sep 17 00:00:00 2001
+From: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Date: Mon, 8 Nov 2021 14:51:38 +0800
+Subject: [PATCH 3/5] phy: phy-mtk-tphy: add support efuse setting
+
+Due to some SoCs have a bit shift issue that will drop a bit for usb3
+phy or pcie phy, fix it by adding software efuse reading and setting,
+but only support it optionally for versoin.
+
+Signed-off-by: Chunfeng Yun <chunfeng.yun@mediatek.com>
+Signed-off-by: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Change-Id: Ibf88868668b3889f18c7930531981400cac732f1
+---
+ drivers/phy/mediatek/phy-mtk-tphy.c | 194 ++++++++++++++++++++++++++++
+ 1 file changed, 194 insertions(+)
+
+diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
+index cb2ed3b25068..05a1ad4ff334 100644
+--- a/drivers/phy/mediatek/phy-mtk-tphy.c
++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
+@@ -11,6 +11,7 @@
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+ #include <linux/module.h>
++#include <linux/nvmem-consumer.h>
+ #include <linux/of_address.h>
+ #include <linux/of_device.h>
+ #include <linux/phy/phy.h>
+@@ -38,11 +39,16 @@
+ #define SSUSB_SIFSLV_V2_U3PHYD 0x200
+ #define SSUSB_SIFSLV_V2_U3PHYA 0x400
+
++#define U3P_MISC_REG1 0x04
++#define MR1_EFUSE_AUTO_LOAD_DIS BIT(6)
++
+ #define U3P_USBPHYACR0 0x000
+ #define PA0_RG_U2PLL_FORCE_ON BIT(15)
+ #define PA0_RG_USB20_INTR_EN BIT(5)
+
+ #define U3P_USBPHYACR1 0x004
++#define PA1_RG_INTR_CAL GENMASK(23, 19)
++#define PA1_RG_INTR_CAL_VAL(x) ((0x1f & (x)) << 19)
+ #define PA1_RG_VRT_SEL GENMASK(14, 12)
+ #define PA1_RG_VRT_SEL_VAL(x) ((0x7 & (x)) << 12)
+ #define PA1_RG_TERM_SEL GENMASK(10, 8)
+@@ -114,6 +120,8 @@
+ #define P3C_RG_SWRST_U3_PHYD_FORCE_EN BIT(24)
+
+ #define U3P_U3_PHYA_REG0 0x000
++#define P3A_RG_IEXT_INTR GENMASK(15, 10)
++#define P3A_RG_IEXT_INTR_VAL(x) ((0x3f & (x)) << 10)
+ #define P3A_RG_CLKDRV_OFF GENMASK(3, 2)
+ #define P3A_RG_CLKDRV_OFF_VAL(x) ((0x3 & (x)) << 2)
+
+@@ -168,6 +176,25 @@
+ #define P3D_RG_FWAKE_TH GENMASK(21, 16)
+ #define P3D_RG_FWAKE_TH_VAL(x) ((0x3f & (x)) << 16)
+
++#define U3P_U3_PHYD_IMPCAL0 0x010
++#define P3D_RG_FORCE_TX_IMPEL BIT(31)
++#define P3D_RG_TX_IMPEL GENMASK(28, 24)
++#define P3D_RG_TX_IMPEL_VAL(x) ((0x1f & (x)) << 24)
++
++#define U3P_U3_PHYD_IMPCAL1 0x014
++#define P3D_RG_FORCE_RX_IMPEL BIT(31)
++#define P3D_RG_RX_IMPEL GENMASK(28, 24)
++#define P3D_RG_RX_IMPEL_VAL(x) ((0x1f & (x)) << 24)
++
++#define U3P_U3_PHYD_RX0 0x02c
++
++#define U3P_U3_PHYD_T2RLB 0x030
++
++#define U3P_U3_PHYD_PIPE0 0x040
++
++#define U3P_U3_PHYD_RSV 0x054
++#define P3D_RG_EFUSE_AUTO_LOAD_DIS BIT(12)
++
+ #define U3P_U3_PHYD_CDR1 0x05c
+ #define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24)
+ #define P3D_RG_CDR_BIR_LTD1_VAL(x) ((0x1f & (x)) << 24)
+@@ -266,11 +293,23 @@
+ enum mtk_phy_version {
+ MTK_PHY_V1 = 1,
+ MTK_PHY_V2,
++ MTK_PHY_V3,
+ };
+
+ struct mtk_phy_pdata {
+ /* avoid RX sensitivity level degradation only for mt8173 */
+ bool avoid_rx_sen_degradation;
++ /*
++ * u2phy should use integer mode instead of fractional mode of
++ * 48M PLL, fix it by switching PLL to 26M from default 48M
++ * for mt8195
++ */
++ bool sx_pll_48m_to_26m;
++ /*
++ * Some SoCs (e.g. mt8195) drop a bit when use auto load efuse,
++ * support sw way, also support it for v2/v3 optionally.
++ */
++ bool sw_efuse_supported;
+ enum mtk_phy_version version;
+ };
+
+@@ -295,6 +334,10 @@ struct mtk_phy_instance {
+ struct u3phy_banks u3_banks;
+ };
+ struct clk *ref_clk; /* reference clock of anolog phy */
++ u32 efuse_sw_en;
++ u32 efuse_intr;
++ u32 efuse_tx_imp;
++ u32 efuse_rx_imp;
+ u32 index;
+ u8 type;
+ int eye_src;
+@@ -890,6 +933,138 @@ static void u2_phy_props_set(struct mtk_tphy *tphy,
+ }
+ }
+
++static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instance)
++{
++ struct device *dev = &instance->phy->dev;
++ int ret = 0;
++
++ dev_err(dev, "try to get sw efuse\n");
++
++ /* tphy v1 doesn't support sw efuse, skip it */
++ if (!tphy->pdata->sw_efuse_supported) {
++ instance->efuse_sw_en = 0;
++ return 0;
++ }
++
++ /* software efuse is optional */
++ instance->efuse_sw_en = device_property_read_bool(dev, "nvmem-cells");
++ if (!instance->efuse_sw_en)
++ return 0;
++
++ dev_err(dev, "try to get sw efuse+\n");
++
++ switch (instance->type) {
++ case PHY_TYPE_USB2:
++ ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr);
++ if (ret) {
++ dev_err(dev, "fail to get u2 intr efuse, %d\n", ret);
++ break;
++ }
++
++ /* no efuse, ignore it */
++ if (!instance->efuse_intr) {
++ dev_warn(dev, "no u2 intr efuse, but dts enable it\n");
++ instance->efuse_sw_en = 0;
++ break;
++ }
++
++ dev_info(dev, "u2 efuse - intr %x\n", instance->efuse_intr);
++ break;
++ case PHY_TYPE_USB3:
++ case PHY_TYPE_PCIE:
++ ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr);
++ if (ret) {
++ dev_err(dev, "fail to get u3 intr efuse, %d\n", ret);
++ break;
++ }
++
++ ret = nvmem_cell_read_variable_le_u32(dev, "rx_imp", &instance->efuse_rx_imp);
++ if (ret) {
++ dev_err(dev, "fail to get u3 rx_imp efuse, %d\n", ret);
++ break;
++ }
++
++ ret = nvmem_cell_read_variable_le_u32(dev, "tx_imp", &instance->efuse_tx_imp);
++ if (ret) {
++ dev_err(dev, "fail to get u3 tx_imp efuse, %d\n", ret);
++ break;
++ }
++
++ /* no efuse, ignore it */
++ if (!instance->efuse_intr &&
++ !instance->efuse_rx_imp &&
++ !instance->efuse_tx_imp) {
++ dev_warn(dev, "no u3 intr efuse, but dts enable it\n");
++ instance->efuse_sw_en = 0;
++ break;
++ }
++
++ dev_info(dev, "u3 efuse - intr %x, rx_imp %x, tx_imp %x\n",
++ instance->efuse_intr, instance->efuse_rx_imp,
++ instance->efuse_tx_imp);
++ break;
++ default:
++ dev_err(dev, "no sw efuse for type %d\n", instance->type);
++ ret = -EINVAL;
++ }
++
++ return ret;
++}
++
++static void phy_efuse_set(struct mtk_phy_instance *instance)
++{
++ struct device *dev = &instance->phy->dev;
++ struct u2phy_banks *u2_banks = &instance->u2_banks;
++ struct u3phy_banks *u3_banks = &instance->u3_banks;
++ u32 tmp;
++
++ if (!instance->efuse_sw_en)
++ return;
++
++ switch (instance->type) {
++ case PHY_TYPE_USB2:
++ tmp = readl(u2_banks->misc + U3P_MISC_REG1);
++ tmp |= MR1_EFUSE_AUTO_LOAD_DIS;
++ writel(tmp, u2_banks->misc + U3P_MISC_REG1);
++
++ tmp = readl(u2_banks->com + U3P_USBPHYACR1);
++ tmp &= ~PA1_RG_INTR_CAL;
++ tmp |= PA1_RG_INTR_CAL_VAL(instance->efuse_intr);
++ writel(tmp, u2_banks->com + U3P_USBPHYACR1);
++ pr_err("%s set efuse intr %x\n", __func__, instance->efuse_intr);
++
++ break;
++ case PHY_TYPE_USB3:
++ case PHY_TYPE_PCIE:
++ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RSV);
++ tmp |= P3D_RG_EFUSE_AUTO_LOAD_DIS;
++ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RSV);
++
++ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0);
++ tmp &= ~P3D_RG_TX_IMPEL;
++ tmp |= P3D_RG_TX_IMPEL_VAL(instance->efuse_tx_imp);
++ tmp |= P3D_RG_FORCE_TX_IMPEL;
++ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_IMPCAL0);
++
++ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1);
++ tmp &= ~P3D_RG_RX_IMPEL;
++ tmp |= P3D_RG_RX_IMPEL_VAL(instance->efuse_rx_imp);
++ tmp |= P3D_RG_FORCE_RX_IMPEL;
++ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_IMPCAL1);
++
++ tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG0);
++ tmp &= ~P3A_RG_IEXT_INTR;
++ tmp |= P3A_RG_IEXT_INTR_VAL(instance->efuse_intr);
++ writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG0);
++ pr_err("%s set efuse, tx_imp %x, rx_imp %x intr %x\n",
++ __func__, instance->efuse_tx_imp,
++ instance->efuse_rx_imp, instance->efuse_intr);
++ break;
++ default:
++ dev_warn(dev, "no sw efuse for type %d\n", instance->type);
++ }
++}
++
+ static int mtk_phy_init(struct phy *phy)
+ {
+ struct mtk_phy_instance *instance = phy_get_drvdata(phy);
+@@ -908,6 +1083,8 @@ static int mtk_phy_init(struct phy *phy)
+ return ret;
+ }
+
++ phy_efuse_set(instance);
++
+ switch (instance->type) {
+ case PHY_TYPE_USB2:
+ u2_phy_instance_init(tphy, instance);
+@@ -989,6 +1166,7 @@ static struct phy *mtk_phy_xlate(struct device *dev,
+ struct mtk_phy_instance *instance = NULL;
+ struct device_node *phy_np = args->np;
+ int index;
++ int ret;
+
+ if (args->args_count != 1) {
+ dev_err(dev, "invalid number of cells in 'phy' property\n");
+@@ -1024,6 +1202,10 @@ static struct phy *mtk_phy_xlate(struct device *dev,
+ return ERR_PTR(-EINVAL);
+ }
+
++ ret = phy_efuse_get(tphy, instance);
++ if (ret)
++ return ERR_PTR(ret);
++
+ phy_parse_property(tphy, instance);
+
+ return instance->phy;
+@@ -1045,14 +1227,26 @@ static const struct mtk_phy_pdata tphy_v1_pdata = {
+
+ static const struct mtk_phy_pdata tphy_v2_pdata = {
+ .avoid_rx_sen_degradation = false,
++ .sw_efuse_supported = true,
+ .version = MTK_PHY_V2,
+ };
+
++static const struct mtk_phy_pdata tphy_v3_pdata = {
++ .sw_efuse_supported = true,
++ .version = MTK_PHY_V3,
++};
++
+ static const struct mtk_phy_pdata mt8173_pdata = {
+ .avoid_rx_sen_degradation = true,
+ .version = MTK_PHY_V1,
+ };
+
++static const struct mtk_phy_pdata mt8195_pdata = {
++ .sx_pll_48m_to_26m = true,
++ .sw_efuse_supported = true,
++ .version = MTK_PHY_V3,
++};
++
+ static const struct of_device_id mtk_tphy_id_table[] = {
+ { .compatible = "mediatek,mt2701-u3phy", .data = &tphy_v1_pdata },
+ { .compatible = "mediatek,mt2712-u3phy", .data = &tphy_v2_pdata },
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8007-phy-phy-mtk-tphy-Add-PCIe-2-lane-efuse-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8007-phy-phy-mtk-tphy-Add-PCIe-2-lane-efuse-support.patch
new file mode 100644
index 0000000..b710695
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8007-phy-phy-mtk-tphy-Add-PCIe-2-lane-efuse-support.patch
@@ -0,0 +1,229 @@
+From 41ffe32e7ec23f592e21c508b5108899ad393059 Mon Sep 17 00:00:00 2001
+From: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Date: Tue, 25 Jan 2022 16:50:47 +0800
+Subject: [PATCH 4/5] phy: phy-mtk-tphy: Add PCIe 2 lane efuse support
+
+Add PCIe 2 lane efuse support in tphy driver.
+
+Signed-off-by: Jie Yang <jieyy.yang@mediatek.com>
+Signed-off-by: Zhanyong Wang <zhanyong.wang@mediatek.com>
+---
+ drivers/phy/mediatek/phy-mtk-tphy.c | 140 ++++++++++++++++++++++++++++
+ 1 file changed, 140 insertions(+)
+
+diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
+index 05a1ad4..59d6ac3 100644
+--- a/drivers/phy/mediatek/phy-mtk-tphy.c
++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
+@@ -39,6 +39,15 @@
+ #define SSUSB_SIFSLV_V2_U3PHYD 0x200
+ #define SSUSB_SIFSLV_V2_U3PHYA 0x400
+
++/* version V4 sub-banks offset base address */
++/* pcie phy banks */
++#define SSUSB_SIFSLV_V4_SPLLC 0x000
++#define SSUSB_SIFSLV_V4_CHIP 0x100
++#define SSUSB_SIFSLV_V4_U3PHYD 0x900
++#define SSUSB_SIFSLV_V4_U3PHYA 0xb00
++
++#define SSUSB_LN1_OFFSET 0x10000
++
+ #define U3P_MISC_REG1 0x04
+ #define MR1_EFUSE_AUTO_LOAD_DIS BIT(6)
+
+@@ -294,6 +303,7 @@ enum mtk_phy_version {
+ MTK_PHY_V1 = 1,
+ MTK_PHY_V2,
+ MTK_PHY_V3,
++ MTK_PHY_V4,
+ };
+
+ struct mtk_phy_pdata {
+@@ -338,6 +348,9 @@ struct mtk_phy_instance {
+ u32 efuse_intr;
+ u32 efuse_tx_imp;
+ u32 efuse_rx_imp;
++ u32 efuse_intr_ln1;
++ u32 efuse_tx_imp_ln1;
++ u32 efuse_rx_imp_ln1;
+ u32 index;
+ u8 type;
+ int eye_src;
+@@ -878,6 +891,36 @@ static void phy_v2_banks_init(struct mtk_tphy *tphy,
+ }
+ }
+
++static void phy_v4_banks_init(struct mtk_tphy *tphy,
++ struct mtk_phy_instance *instance)
++{
++ struct u2phy_banks *u2_banks = &instance->u2_banks;
++ struct u3phy_banks *u3_banks = &instance->u3_banks;
++
++ switch (instance->type) {
++ case PHY_TYPE_USB2:
++ u2_banks->misc = instance->port_base + SSUSB_SIFSLV_V2_MISC;
++ u2_banks->fmreg = instance->port_base + SSUSB_SIFSLV_V2_U2FREQ;
++ u2_banks->com = instance->port_base + SSUSB_SIFSLV_V2_U2PHY_COM;
++ break;
++ case PHY_TYPE_USB3:
++ u3_banks->spllc = instance->port_base + SSUSB_SIFSLV_V2_SPLLC;
++ u3_banks->chip = instance->port_base + SSUSB_SIFSLV_V2_CHIP;
++ u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V2_U3PHYD;
++ u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V2_U3PHYA;
++ break;
++ case PHY_TYPE_PCIE:
++ u3_banks->spllc = instance->port_base + SSUSB_SIFSLV_V4_SPLLC;
++ u3_banks->chip = instance->port_base + SSUSB_SIFSLV_V4_CHIP;
++ u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V4_U3PHYD;
++ u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V4_U3PHYA;
++ break;
++ default:
++ dev_err(tphy->dev, "incompatible PHY type\n");
++ return;
++ }
++}
++
+ static void phy_parse_property(struct mtk_tphy *tphy,
+ struct mtk_phy_instance *instance)
+ {
+@@ -1002,6 +1045,40 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
+ dev_info(dev, "u3 efuse - intr %x, rx_imp %x, tx_imp %x\n",
+ instance->efuse_intr, instance->efuse_rx_imp,
+ instance->efuse_tx_imp);
++
++ if (tphy->pdata->version != MTK_PHY_V4)
++ break;
++
++ ret = nvmem_cell_read_variable_le_u32(dev, "intr_ln1", &instance->efuse_intr_ln1);
++ if (ret) {
++ dev_err(dev, "fail to get u3 lane1 intr efuse, %d\n", ret);
++ break;
++ }
++
++ ret = nvmem_cell_read_variable_le_u32(dev, "rx_imp_ln1", &instance->efuse_rx_imp_ln1);
++ if (ret) {
++ dev_err(dev, "fail to get u3 lane1 rx_imp efuse, %d\n", ret);
++ break;
++ }
++
++ ret = nvmem_cell_read_variable_le_u32(dev, "tx_imp_ln1", &instance->efuse_tx_imp_ln1);
++ if (ret) {
++ dev_err(dev, "fail to get u3 lane1 tx_imp efuse, %d\n", ret);
++ break;
++ }
++
++ /* no efuse, ignore it */
++ if (!instance->efuse_intr_ln1 &&
++ !instance->efuse_rx_imp_ln1 &&
++ !instance->efuse_tx_imp_ln1) {
++ dev_warn(dev, "no u3 lane1 efuse, but dts enable it\n");
++ instance->efuse_sw_en = 0;
++ break;
++ }
++
++ dev_info(dev, "u3 lane1 efuse - intr %x, rx_imp %x, tx_imp %x\n",
++ instance->efuse_intr_ln1, instance->efuse_rx_imp_ln1,
++ instance->efuse_tx_imp_ln1);
+ break;
+ default:
+ dev_err(dev, "no sw efuse for type %d\n", instance->type);
+@@ -1035,6 +1112,31 @@ static void phy_efuse_set(struct mtk_phy_instance *instance)
+
+ break;
+ case PHY_TYPE_USB3:
++ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RSV);
++ tmp |= P3D_RG_EFUSE_AUTO_LOAD_DIS;
++ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RSV);
++
++ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0);
++ tmp &= ~P3D_RG_TX_IMPEL;
++ tmp |= P3D_RG_TX_IMPEL_VAL(instance->efuse_tx_imp);
++ tmp |= P3D_RG_FORCE_TX_IMPEL;
++ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_IMPCAL0);
++
++ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1);
++ tmp &= ~P3D_RG_RX_IMPEL;
++ tmp |= P3D_RG_RX_IMPEL_VAL(instance->efuse_rx_imp);
++ tmp |= P3D_RG_FORCE_RX_IMPEL;
++ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_IMPCAL1);
++
++ tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG0);
++ tmp &= ~P3A_RG_IEXT_INTR;
++ tmp |= P3A_RG_IEXT_INTR_VAL(instance->efuse_intr);
++ writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG0);
++ pr_err("%s set efuse, tx_imp %x, rx_imp %x intr %x\n",
++ __func__, instance->efuse_tx_imp,
++ instance->efuse_rx_imp, instance->efuse_intr);
++
++ break;
+ case PHY_TYPE_PCIE:
+ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RSV);
+ tmp |= P3D_RG_EFUSE_AUTO_LOAD_DIS;
+@@ -1059,6 +1161,35 @@ static void phy_efuse_set(struct mtk_phy_instance *instance)
+ pr_err("%s set efuse, tx_imp %x, rx_imp %x intr %x\n",
+ __func__, instance->efuse_tx_imp,
+ instance->efuse_rx_imp, instance->efuse_intr);
++
++ if (!instance->efuse_intr_ln1 &&
++ !instance->efuse_rx_imp_ln1 &&
++ !instance->efuse_tx_imp_ln1)
++ break;
++
++ tmp = readl(u3_banks->phyd + SSUSB_LN1_OFFSET + U3P_U3_PHYD_RSV);
++ tmp |= P3D_RG_EFUSE_AUTO_LOAD_DIS;
++ writel(tmp, u3_banks->phyd + SSUSB_LN1_OFFSET + U3P_U3_PHYD_RSV);
++
++ tmp = readl(u3_banks->phyd + SSUSB_LN1_OFFSET + U3P_U3_PHYD_IMPCAL0);
++ tmp &= ~P3D_RG_TX_IMPEL;
++ tmp |= P3D_RG_TX_IMPEL_VAL(instance->efuse_tx_imp_ln1);
++ tmp |= P3D_RG_FORCE_TX_IMPEL;
++ writel(tmp, u3_banks->phyd + SSUSB_LN1_OFFSET + U3P_U3_PHYD_IMPCAL0);
++
++ tmp = readl(u3_banks->phyd + SSUSB_LN1_OFFSET + U3P_U3_PHYD_IMPCAL1);
++ tmp &= ~P3D_RG_RX_IMPEL;
++ tmp |= P3D_RG_RX_IMPEL_VAL(instance->efuse_rx_imp_ln1);
++ tmp |= P3D_RG_FORCE_RX_IMPEL;
++ writel(tmp, u3_banks->phyd + SSUSB_LN1_OFFSET + U3P_U3_PHYD_IMPCAL1);
++
++ tmp = readl(u3_banks->phya + SSUSB_LN1_OFFSET + U3P_U3_PHYA_REG0);
++ tmp &= ~P3A_RG_IEXT_INTR;
++ tmp |= P3A_RG_IEXT_INTR_VAL(instance->efuse_intr_ln1);
++ writel(tmp, u3_banks->phya + SSUSB_LN1_OFFSET + U3P_U3_PHYA_REG0);
++ pr_err("%s set LN1 efuse, tx_imp %x, rx_imp %x intr %x\n",
++ __func__, instance->efuse_tx_imp_ln1,
++ instance->efuse_rx_imp_ln1, instance->efuse_intr_ln1);
+ break;
+ default:
+ dev_warn(dev, "no sw efuse for type %d\n", instance->type);
+@@ -1197,6 +1328,8 @@ static struct phy *mtk_phy_xlate(struct device *dev,
+ phy_v1_banks_init(tphy, instance);
+ } else if (tphy->pdata->version == MTK_PHY_V2) {
+ phy_v2_banks_init(tphy, instance);
++ } else if (tphy->pdata->version == MTK_PHY_V4) {
++ phy_v4_banks_init(tphy, instance);
+ } else {
+ dev_err(dev, "phy version is not supported\n");
+ return ERR_PTR(-EINVAL);
+@@ -1247,12 +1380,19 @@ static const struct mtk_phy_pdata mt8195_pdata = {
+ .version = MTK_PHY_V3,
+ };
+
++static const struct mtk_phy_pdata tphy_v4_pdata = {
++ .avoid_rx_sen_degradation = false,
++ .sw_efuse_supported = true,
++ .version = MTK_PHY_V4,
++};
++
+ static const struct of_device_id mtk_tphy_id_table[] = {
+ { .compatible = "mediatek,mt2701-u3phy", .data = &tphy_v1_pdata },
+ { .compatible = "mediatek,mt2712-u3phy", .data = &tphy_v2_pdata },
+ { .compatible = "mediatek,mt8173-u3phy", .data = &mt8173_pdata },
+ { .compatible = "mediatek,generic-tphy-v1", .data = &tphy_v1_pdata },
+ { .compatible = "mediatek,generic-tphy-v2", .data = &tphy_v2_pdata },
++ { .compatible = "mediatek,generic-tphy-v4", .data = &tphy_v4_pdata },
+ { },
+ };
+ MODULE_DEVICE_TABLE(of, mtk_tphy_id_table);
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8008-phy-phy-mtk-tphy-add-auto-load-valid-check-mechanism.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8008-phy-phy-mtk-tphy-add-auto-load-valid-check-mechanism.patch
new file mode 100644
index 0000000..1223fb6
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/8008-phy-phy-mtk-tphy-add-auto-load-valid-check-mechanism.patch
@@ -0,0 +1,153 @@
+From 1d5819e90f2ef6dead11809744372a9863227a92 Mon Sep 17 00:00:00 2001
+From: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Date: Tue, 25 Jan 2022 19:03:34 +0800
+Subject: [PATCH 5/5] phy: phy-mtk-tphy: add auto-load-valid check mechanism
+ support
+
+add auto-load-valid check mechanism support
+
+Signed-off-by: Zhanyong Wang <zhanyong.wang@mediatek.com>
+---
+ drivers/phy/mediatek/phy-mtk-tphy.c | 67 +++++++++++++++++++++++++++--
+ 1 file changed, 64 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
+index 59d6ac3..4adc505 100644
+--- a/drivers/phy/mediatek/phy-mtk-tphy.c
++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
+@@ -345,9 +345,13 @@ struct mtk_phy_instance {
+ };
+ struct clk *ref_clk; /* reference clock of anolog phy */
+ u32 efuse_sw_en;
++ bool efuse_alv_en;
++ u32 efuse_autoloadvalid;
+ u32 efuse_intr;
+ u32 efuse_tx_imp;
+ u32 efuse_rx_imp;
++ bool efuse_alv_ln1_en;
++ u32 efuse_ln1_autoloadvalid;
+ u32 efuse_intr_ln1;
+ u32 efuse_tx_imp_ln1;
+ u32 efuse_rx_imp_ln1;
+@@ -980,6 +984,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
+ {
+ struct device *dev = &instance->phy->dev;
+ int ret = 0;
++ bool alv = false;
+
+ dev_err(dev, "try to get sw efuse\n");
+
+@@ -998,6 +1003,20 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
+
+ switch (instance->type) {
+ case PHY_TYPE_USB2:
++ alv = of_property_read_bool(dev->of_node, "auto_load_valid");
++ if (alv) {
++ instance->efuse_alv_en = alv;
++ ret = nvmem_cell_read_variable_le_u32(dev, "auto_load_valid",
++ &instance->efuse_autoloadvalid);
++ if (ret) {
++ dev_err(dev, "fail to get u2 alv efuse, %d\n", ret);
++ break;
++ }
++ dev_info(dev,
++ "u2 auto load valid efuse: ENABLE with value: %u\n",
++ instance->efuse_autoloadvalid);
++ }
++
+ ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr);
+ if (ret) {
+ dev_err(dev, "fail to get u2 intr efuse, %d\n", ret);
+@@ -1015,6 +1034,20 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
+ break;
+ case PHY_TYPE_USB3:
+ case PHY_TYPE_PCIE:
++ alv = of_property_read_bool(dev->of_node, "auto_load_valid");
++ if (alv) {
++ instance->efuse_alv_en = alv;
++ ret = nvmem_cell_read_variable_le_u32(dev, "auto_load_valid",
++ &instance->efuse_autoloadvalid);
++ if (ret) {
++ dev_err(dev, "fail to get u3(pcei) alv efuse, %d\n", ret);
++ break;
++ }
++ dev_info(dev,
++ "u3 auto load valid efuse: ENABLE with value: %u\n",
++ instance->efuse_autoloadvalid);
++ }
++
+ ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr);
+ if (ret) {
+ dev_err(dev, "fail to get u3 intr efuse, %d\n", ret);
+@@ -1049,6 +1082,20 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc
+ if (tphy->pdata->version != MTK_PHY_V4)
+ break;
+
++ alv = of_property_read_bool(dev->of_node, "auto_load_valid_ln1");
++ if (alv) {
++ instance->efuse_alv_ln1_en = alv;
++ ret = nvmem_cell_read_variable_le_u32(dev, "auto_load_valid_ln1",
++ &instance->efuse_ln1_autoloadvalid);
++ if (ret) {
++ dev_err(dev, "fail to get pcie auto_load_valid efuse, %d\n", ret);
++ break;
++ }
++ dev_info(dev,
++ "pcie auto load valid efuse: ENABLE with value: %u\n",
++ instance->efuse_ln1_autoloadvalid);
++ }
++
+ ret = nvmem_cell_read_variable_le_u32(dev, "intr_ln1", &instance->efuse_intr_ln1);
+ if (ret) {
+ dev_err(dev, "fail to get u3 lane1 intr efuse, %d\n", ret);
+@@ -1100,6 +1147,10 @@ static void phy_efuse_set(struct mtk_phy_instance *instance)
+
+ switch (instance->type) {
+ case PHY_TYPE_USB2:
++ if (instance->efuse_alv_en &&
++ instance->efuse_autoloadvalid == 1)
++ break;
++
+ tmp = readl(u2_banks->misc + U3P_MISC_REG1);
+ tmp |= MR1_EFUSE_AUTO_LOAD_DIS;
+ writel(tmp, u2_banks->misc + U3P_MISC_REG1);
+@@ -1112,6 +1163,10 @@ static void phy_efuse_set(struct mtk_phy_instance *instance)
+
+ break;
+ case PHY_TYPE_USB3:
++ if (instance->efuse_alv_en &&
++ instance->efuse_autoloadvalid == 1)
++ break;
++
+ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RSV);
+ tmp |= P3D_RG_EFUSE_AUTO_LOAD_DIS;
+ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RSV);
+@@ -1138,6 +1193,10 @@ static void phy_efuse_set(struct mtk_phy_instance *instance)
+
+ break;
+ case PHY_TYPE_PCIE:
++ if (instance->efuse_alv_en &&
++ instance->efuse_autoloadvalid == 1)
++ break;
++
+ tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RSV);
+ tmp |= P3D_RG_EFUSE_AUTO_LOAD_DIS;
+ writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RSV);
+@@ -1162,9 +1221,11 @@ static void phy_efuse_set(struct mtk_phy_instance *instance)
+ __func__, instance->efuse_tx_imp,
+ instance->efuse_rx_imp, instance->efuse_intr);
+
+- if (!instance->efuse_intr_ln1 &&
+- !instance->efuse_rx_imp_ln1 &&
+- !instance->efuse_tx_imp_ln1)
++ if ((!instance->efuse_intr_ln1 &&
++ !instance->efuse_rx_imp_ln1 &&
++ !instance->efuse_tx_imp_ln1) ||
++ (instance->efuse_alv_ln1_en &&
++ instance->efuse_ln1_autoloadvalid == 1))
+ break;
+
+ tmp = readl(u3_banks->phyd + SSUSB_LN1_OFFSET + U3P_U3_PHYD_RSV);
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9001-PATCH-1-2-xHCI-MT7986-USB-2.0-USBIF-compliance-toolkit.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9001-PATCH-1-2-xHCI-MT7986-USB-2.0-USBIF-compliance-toolkit.patch
new file mode 100644
index 0000000..738d9b2
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9001-PATCH-1-2-xHCI-MT7986-USB-2.0-USBIF-compliance-toolkit.patch
@@ -0,0 +1,134 @@
+From b4048b5efd1ac39f85d86dedbf54a9b614d17d64 Mon Sep 17 00:00:00 2001
+From: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Date: Thu, 27 May 2021 11:44:17 +0800
+Subject: [PATCH 1/2] xHCI: MT7986 USB 2.0 USBIF compliance toolkit
+
+MT7986 USB 2.0 USBIF compliance toolkit
+
+Signed-off-by: Zhanyong Wang <zhanyong.wang@mediatek.com>
+---
+ drivers/usb/host/Kconfig | 9 +++++++++
+ drivers/usb/host/Makefile | 8 ++++++++
+ drivers/usb/host/xhci-mtk.c | 5 ++++-
+ drivers/usb/host/xhci-mtk.h | 7 +++++++
+ drivers/usb/host/xhci.c | 2 +-
+ drivers/usb/host/xhci.h | 1 +
+ 6 files changed, 30 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
+index 79b2e79dddd0..12b1bf9aa043 100644
+--- a/drivers/usb/host/Kconfig
++++ b/drivers/usb/host/Kconfig
+@@ -69,6 +69,15 @@ config USB_XHCI_MTK
+ found in MediaTek SoCs.
+ If unsure, say N.
+
++config USB_XHCI_MTK_DEBUGFS
++ tristate "xHCI DEBUGFS support for Mediatek MT65xx"
++ depends on USB_XHCI_MTK && DEBUG_FS
++ default y
++ ---help---
++ Say 'Y' to enable the debugfs support for the xHCI host controller
++ found in Mediatek MT65xx SoCs.
++ If don't need, say N.
++
+ config USB_XHCI_MVEBU
+ tristate "xHCI support for Marvell Armada 375/38x/37xx"
+ select USB_XHCI_PLATFORM
+diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
+index b191361257cc..704237831a58 100644
+--- a/drivers/usb/host/Makefile
++++ b/drivers/usb/host/Makefile
+@@ -21,6 +21,14 @@ endif
+
+ ifneq ($(CONFIG_USB_XHCI_MTK), )
+ xhci-hcd-y += xhci-mtk-sch.o
++ xhci-hcd-$(CONFIG_USB_XHCI_MTK_DEBUGFS) += xhci-mtk-test.o
++ xhci-hcd-$(CONFIG_USB_XHCI_MTK_DEBUGFS) += xhci-mtk-unusual.o
++ xhci-hcd-$(CONFIG_USB_XHCI_MTK_DEBUGFS) += xhci-mtk-intr-en.o
++ xhci-hcd-$(CONFIG_USB_XHCI_MTK_DEBUGFS) += xhci-mtk-vrt-vref.o
++ xhci-hcd-$(CONFIG_USB_XHCI_MTK_DEBUGFS) += xhci-mtk-term-vref.o
++ xhci-hcd-$(CONFIG_USB_XHCI_MTK_DEBUGFS) += xhci-mtk-hstx-srctrl.o
++ xhci-hcd-$(CONFIG_USB_XHCI_MTK_DEBUGFS) += xhci-mtk-discth.o
++ xhci-hcd-$(CONFIG_USB_XHCI_MTK_DEBUGFS) += xhci-mtk-chgdt-en.o
+ endif
+
+ xhci-plat-hcd-y := xhci-plat.o
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index 5c0eb35cd007..8bd4c95a5435 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -18,9 +18,10 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/regmap.h>
+ #include <linux/regulator/consumer.h>
+-
++#include <linux/usb/of.h>
+ #include "xhci.h"
+ #include "xhci-mtk.h"
++#include "xhci-mtk-test.h"
+
+ /* ip_pw_ctrl0 register */
+ #define CTRL0_IP_SW_RST BIT(0)
+@@ -570,6 +571,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+ if (ret)
+ goto dealloc_usb2_hcd;
++ hqa_create_attr(dev);
+
+ return 0;
+
+@@ -604,6 +606,7 @@ static int xhci_mtk_remove(struct platform_device *dev)
+ struct usb_hcd *hcd = mtk->hcd;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct usb_hcd *shared_hcd = xhci->shared_hcd;
++ hqa_remove_attr(&dev->dev);
+
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
+index 985e7a19f6f6..1540c66799d7 100644
+--- a/drivers/usb/host/xhci-mtk.h
++++ b/drivers/usb/host/xhci-mtk.h
+@@ -158,6 +158,13 @@ struct xhci_hcd_mtk {
+ struct regmap *uwk;
+ u32 uwk_reg_base;
+ u32 uwk_vers;
++
++#ifdef CONFIG_USB_XHCI_MTK_DEBUGFS
++ int test_mode;
++ size_t hqa_size;
++ u32 hqa_pos;
++ char *hqa_buf;
++#endif
+ };
+
+ static inline struct xhci_hcd_mtk *hcd_to_mtk(struct usb_hcd *hcd)
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 4bb850370bb6..710ccbe5a3b8 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -713,7 +713,7 @@ EXPORT_SYMBOL_GPL(xhci_run);
+ * Disable device contexts, disable IRQs, and quiesce the HC.
+ * Reset the HC, finish any completed transactions, and cleanup memory.
+ */
+-static void xhci_stop(struct usb_hcd *hcd)
++void xhci_stop(struct usb_hcd *hcd)
+ {
+ u32 temp;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 02df309e4409..3af400068324 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -2067,6 +2067,7 @@ int xhci_halt(struct xhci_hcd *xhci);
+ int xhci_start(struct xhci_hcd *xhci);
+ int xhci_reset(struct xhci_hcd *xhci);
+ int xhci_run(struct usb_hcd *hcd);
++void xhci_stop(struct usb_hcd *hcd);
+ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
+ void xhci_shutdown(struct usb_hcd *hcd);
+ void xhci_init_driver(struct hc_driver *drv,
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9002-PATCH-1-1-usb-add-embedded-Host-feature-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9002-PATCH-1-1-usb-add-embedded-Host-feature-support.patch
new file mode 100644
index 0000000..356ae99
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9002-PATCH-1-1-usb-add-embedded-Host-feature-support.patch
@@ -0,0 +1,124 @@
+From 801da3c9fd916d3743b8af174f4ef4aefc071981 Mon Sep 17 00:00:00 2001
+From: Zhanyong Wang <zhanyong.wang@mediatek.com>
+Date: Thu, 17 Jun 2021 16:09:04 +0800
+Subject: [PATCH 2/2] usb: add embedded Host feature support
+
+add EH(Embedded Host) feature for PET authentication
+1. need CONFIG_USB_OTG_WHITELIST enable
+ CONFIG_USB_OTG_WHITELIST=y
+
+2. host device tree node need include "tpl-support" keyword
+ &xhci {
+ tpl-support;
+ }
+
+Signed-off-by: Zhanyong Wang <zhanyong.wang@mediatek.com>
+---
+ drivers/usb/core/hub.c | 9 +++++---
+ drivers/usb/core/otg_whitelist.h | 39 ++++++++++++++++++++++++++++++++
+ drivers/usb/host/xhci-mtk.c | 2 ++
+ 3 files changed, 47 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 303e8b3c1bda..b8c96ac26886 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2419,6 +2419,8 @@ static int usb_enumerate_device(struct usb_device *udev)
+ if (err < 0)
+ dev_dbg(&udev->dev, "HNP fail, %d\n", err);
+ }
++
++ dev_info(&udev->dev, "Unsupported Device!\n");
+ return -ENOTSUPP;
+ }
+
+@@ -4778,9 +4780,10 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ goto fail;
+ }
+ if (r) {
+- if (r != -ENODEV)
+- dev_err(&udev->dev, "device descriptor read/64, error %d\n",
+- r);
++ if (r != -ENODEV) {
++ dev_err(&udev->dev, "device descriptor read/64, error %d\n", r);
++ dev_info(&udev->dev, "Device No Respond\n");
++ }
+ retval = -EMSGSIZE;
+ continue;
+ }
+diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
+index 2ae90158ded7..a8dd221334c1 100644
+--- a/drivers/usb/core/otg_whitelist.h
++++ b/drivers/usb/core/otg_whitelist.h
+@@ -39,9 +39,44 @@ static struct usb_device_id whitelist_table[] = {
+ { USB_DEVICE(0x0525, 0xa4a0), },
+ #endif
+
++/* xhci-mtk usb3 root-hub */
++{ USB_DEVICE(0x1d6b, 0x0003), },
++
++/* xhci-mtk usb2 root-hub */
++{ USB_DEVICE(0x1d6b, 0x0002), },
++
++/* */
++{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, 0, 0) },
++
+ { } /* Terminating entry */
+ };
+
++static bool usb_match_any_interface(struct usb_device *udev,
++ const struct usb_device_id *id)
++{
++ unsigned int i;
++
++ for (i = 0; i < udev->descriptor.bNumConfigurations; ++i) {
++ struct usb_host_config *cfg = &udev->config[i];
++ unsigned int j;
++
++ for (j = 0; j < cfg->desc.bNumInterfaces; ++j) {
++ struct usb_interface_cache *cache;
++ struct usb_host_interface *intf;
++
++ cache = cfg->intf_cache[j];
++ if (cache->num_altsetting == 0)
++ continue;
++
++ intf = &cache->altsetting[0];
++ if (id->bInterfaceClass == intf->desc.bInterfaceClass)
++ return true;
++ }
++ }
++
++ return false;
++}
++
+ static int is_targeted(struct usb_device *dev)
+ {
+ struct usb_device_id *id = whitelist_table;
+@@ -90,6 +125,10 @@ static int is_targeted(struct usb_device *dev)
+ (id->bDeviceProtocol != dev->descriptor.bDeviceProtocol))
+ continue;
+
++ if ((id->match_flags & USB_DEVICE_ID_MATCH_INT_INFO) &&
++ !usb_match_any_interface(dev, id))
++ continue;
++
+ return 1;
+ }
+
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index 8bd4c95a5435..876e134a01b4 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -560,6 +560,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
+ goto disable_device_wakeup;
+ }
+
++ hcd->tpl_support = of_usb_host_tpl_support(node);
++ xhci->shared_hcd->tpl_support = hcd->tpl_support;
+ ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (ret)
+ goto put_usb3_hcd;
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9009-Add-spi-runtime-PM-support.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9009-Add-spi-runtime-PM-support.patch
new file mode 100644
index 0000000..8371b57
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9009-Add-spi-runtime-PM-support.patch
@@ -0,0 +1,198 @@
+From 0c1e4af01506c913cc54e63f66bb5470f50790c7 Mon Sep 17 00:00:00 2001
+From: Leilk Liu <leilk.liu@mediatek.com>
+Date: Tue, 13 Jul 2021 21:45:59 +0800
+Subject: [PATCH] [Add spi runtime PM support]
+
+[Description]
+Add ahb clk and enable runtime pm
+
+[Release-log]
+N/A
+
+Change-Id: I0529f6e829f5fc4c5880508971c97b9434820340
+Signed-off-by: Leilk Liu <leilk.liu@mediatek.com>
+---
+ drivers/spi/spi-mt65xx.c | 77 ++++++++++++++++++++++++++++++++++------
+ 1 file changed, 67 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
+index 7e54984..ff2d825 100644
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -119,6 +119,8 @@ struct mtk_spi_compatible {
+ /* the IPM IP design improve some feature, and support dual/quad mode */
+ bool ipm_design;
+ bool support_quad;
++ /* some IC ahb & apb clk is different and also need to be enabled */
++ bool need_ahb_clk;
+ };
+
+ struct mtk_spi {
+@@ -126,7 +128,7 @@ struct mtk_spi {
+ u32 state;
+ int pad_num;
+ u32 *pad_sel;
+- struct clk *parent_clk, *sel_clk, *spi_clk;
++ struct clk *parent_clk, *sel_clk, *spi_clk, *spi_hclk;
+ struct spi_transfer *cur_transfer;
+ u32 xfer_len;
+ u32 num_xfered;
+@@ -147,12 +149,21 @@ static const struct mtk_spi_compatible mt2712_compat = {
+ .must_tx = true,
+ };
+
+-static const struct mtk_spi_compatible ipm_compat = {
++static const struct mtk_spi_compatible ipm_compat_single = {
++ .must_tx = true,
++ .enhance_timing = true,
++ .dma_ext = true,
++ .ipm_design = true,
++ .need_ahb_clk = true,
++};
++
++static const struct mtk_spi_compatible ipm_compat_quad = {
+ .must_tx = true,
+ .enhance_timing = true,
+ .dma_ext = true,
+ .ipm_design = true,
+ .support_quad = true,
++ .need_ahb_clk = true,
+ };
+
+ static const struct mtk_spi_compatible mt6765_compat = {
+@@ -188,8 +199,11 @@ static const struct mtk_chip_config mtk_default_chip_info = {
+ };
+
+ static const struct of_device_id mtk_spi_of_match[] = {
+- { .compatible = "mediatek,ipm-spi",
+- .data = (void *)&ipm_compat,
++ { .compatible = "mediatek,ipm-spi-single",
++ .data = (void *)&ipm_compat_single,
++ },
++ { .compatible = "mediatek,ipm-spi-quad",
++ .data = (void *)&ipm_compat_quad,
+ },
+ { .compatible = "mediatek,mt2701-spi",
+ .data = (void *)&mtk_common_compat,
+@@ -992,7 +1006,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ return -ENOMEM;
+ }
+
+-// master->auto_runtime_pm = true;
++ master->auto_runtime_pm = true;
+ master->dev.of_node = pdev->dev.of_node;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
+@@ -1106,22 +1120,40 @@ static int mtk_spi_probe(struct platform_device *pdev)
+ goto err_put_master;
+ }
+
++ if (mdata->dev_comp->need_ahb_clk) {
++ mdata->spi_hclk = devm_clk_get(&pdev->dev, "spi-hclk");
++ if (IS_ERR(mdata->spi_hclk)) {
++ ret = PTR_ERR(mdata->spi_hclk);
++ dev_err(&pdev->dev, "failed to get spi-hclk: %d\n", ret);
++ goto err_put_master;
++ }
++
++ ret = clk_prepare_enable(mdata->spi_hclk);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "failed to enable spi_hclk (%d)\n", ret);
++ goto err_put_master;
++ }
++ }
++
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
+ goto err_put_master;
+ }
+
+- /*ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
++ ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
+ clk_disable_unprepare(mdata->spi_clk);
+ goto err_put_master;
+ }
+
+- clk_disable_unprepare(mdata->sel_clk);*/
++ clk_disable_unprepare(mdata->spi_clk);
++
++ if (mdata->dev_comp->need_ahb_clk)
++ clk_disable_unprepare(mdata->spi_hclk);
+
+- //pm_runtime_enable(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+@@ -1201,8 +1233,11 @@ static int mtk_spi_suspend(struct device *dev)
+ if (ret)
+ return ret;
+
+- if (!pm_runtime_suspended(dev))
++ if (!pm_runtime_suspended(dev)) {
+ clk_disable_unprepare(mdata->spi_clk);
++ if (mdata->dev_comp->need_ahb_clk)
++ clk_disable_unprepare(mdata->spi_hclk);
++ }
+
+ return ret;
+ }
+@@ -1214,6 +1249,14 @@ static int mtk_spi_resume(struct device *dev)
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+
+ if (!pm_runtime_suspended(dev)) {
++ if (mdata->dev_comp->need_ahb_clk) {
++ ret = clk_prepare_enable(mdata->spi_hclk);
++ if (ret < 0) {
++ dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
++ return ret;
++ }
++ }
++
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+@@ -1222,8 +1265,11 @@ static int mtk_spi_resume(struct device *dev)
+ }
+
+ ret = spi_master_resume(master);
+- if (ret < 0)
++ if (ret < 0) {
+ clk_disable_unprepare(mdata->spi_clk);
++ if (mdata->dev_comp->need_ahb_clk)
++ clk_disable_unprepare(mdata->spi_hclk);
++ }
+
+ return ret;
+ }
+@@ -1237,6 +1283,9 @@ static int mtk_spi_runtime_suspend(struct device *dev)
+
+ clk_disable_unprepare(mdata->spi_clk);
+
++ if (mdata->dev_comp->need_ahb_clk)
++ clk_disable_unprepare(mdata->spi_hclk);
++
+ return 0;
+ }
+
+@@ -1246,6 +1295,14 @@ static int mtk_spi_runtime_resume(struct device *dev)
+ struct mtk_spi *mdata = spi_master_get_devdata(master);
+ int ret;
+
++ if (mdata->dev_comp->need_ahb_clk) {
++ ret = clk_prepare_enable(mdata->spi_hclk);
++ if (ret < 0) {
++ dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
++ return ret;
++ }
++ }
++
+ ret = clk_prepare_enable(mdata->spi_clk);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+--
+2.18.0
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9010-iwconfig-wireless-rate-fix.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9010-iwconfig-wireless-rate-fix.patch
new file mode 100644
index 0000000..b29e4cc
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9010-iwconfig-wireless-rate-fix.patch
@@ -0,0 +1,20 @@
+--- a/include/uapi/linux/wireless.h
++++ b/include/uapi/linux/wireless.h
+@@ -678,7 +678,7 @@
+ * Generic format for most parameters that fit in an int
+ */
+ struct iw_param {
+- __s32 value; /* The value of the parameter itself */
++ __u64 value; /* The value of the parameter itself */
+ __u8 fixed; /* Hardware should not use auto select */
+ __u8 disabled; /* Disable the feature */
+ __u16 flags; /* Various specifc flags (if any) */
+@@ -1002,7 +1002,7 @@ struct iw_range {
+
+ /* Rates */
+ __u8 num_bitrates; /* Number of entries in the list */
+- __s32 bitrate[IW_MAX_BITRATES]; /* list, in bps */
++ __u64 bitrate[IW_MAX_BITRATES]; /* list, in bps */
+
+ /* RTS threshold */
+ __s32 min_rts; /* Minimal RTS threshold */
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/999-add_armv7_support_for_panther.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/999-add_armv7_support_for_panther.patch
new file mode 100644
index 0000000..24dadf3
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/999-add_armv7_support_for_panther.patch
@@ -0,0 +1,14 @@
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 8a50efb..f601368 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -576,6 +576,8 @@ config ARCH_MULTI_V7
+ select ARCH_MULTI_V6_V7
+ select CPU_V7
+ select HAVE_SMP
++ select ARM_GIC_V3
++ select HAVE_ARM_ARCH_TIMER
+
+ config ARCH_MULTI_V6_V7
+ bool
+
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9999-null-test.patch b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9999-null-test.patch
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/9999-null-test.patch
diff --git a/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/patches-5.4.inc b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/patches-5.4.inc
new file mode 100644
index 0000000..2fbfbf9
--- /dev/null
+++ b/recipes-kernel/linux/linux-mediatek-5.4/mediatek/patches-5.4/patches-5.4.inc
@@ -0,0 +1,101 @@
+#patch patches-5.4 (come from openwrt/lede/target/linux/mediatek)
+SRC_URI_append = " \
+ file://0001-clk-mtk-add-mt7986-support.patch \
+ file://0001-v5.7-spi-make-spi-max-frequency-optional.patch \
+ file://0002-clk-mtk-add-mt7981-support.patch \
+ file://0002-v5.7-spi-add-support-for-mediatek-spi-nor-controller.patch \
+ file://0003-switch-add-mt7531.patch \
+ file://0005-dts-mt7622-add-gsw.patch \
+ file://0005-dts-mt7629-add-gsw.patch \
+ file://0006-dts-fix-bpi2-console.patch \
+ file://0006-dts-fix-bpi64-console.patch \
+ file://0010-dts-mt7629-rfb-fix-firmware-partition.patch \
+ file://0020-dts-mt7622-enable-new-mtk-snand-for-ubi.patch \
+ file://0021-dts-mt7622-remove-cooling-device.patch \
+ file://0100-hwnat_Kconfig_Makefile.patch \
+ file://0101-add-mtk-wifi-utility-rbus.patch \
+ file://0111-mt7986-trng-add-rng-support.patch \
+ file://0200-show_model_name_in_cpuinfo_on_arm64.patch \
+ file://0226-phy-phy-mtk-tphy-Add-hifsys-support.patch \
+ file://0227-arm-dts-Add-Unielec-U7623-DTS.patch \
+ file://0301-mtd-mtk-ecc-move-mtk-ecc-header-file-to-include-mtd.patch \
+ file://0306-spi-spi-mem-MediaTek-Add-SPI-NAND-Flash-interface-dr.patch \
+ file://0307-dts-mt7629-add-snand-support.patch \
+ file://0308-dts-mt7622-add-snand-support.patch \
+ file://0310-dts-add-wmac-support-for-mt7622-rfb1.patch \
+ file://0400-sound-add-some-helpers-to-control-mtk_memif.patch \
+ file://0401-sound-refine-hw-params-and-hw-prepare.patch \
+ file://0402-sound-add-mt7986-driver-and-slic-driver.patch \
+ file://0500-v5.6-crypto-backport-inside-secure.patch \
+ file://0501-crypto-add-eip97-inside-secure-support.patch \
+ file://0502-dts-mt7623-eip97-inside-secure-support.patch \
+ file://0503-crypto-fix-eip97-cache-incoherent.patch \
+ file://0504-macsec-revert-async-support.patch \
+ file://0600-net-phylink-propagate-resolved-link-config-via-mac_l.patch \
+ file://0601-net-dsa-propagate-resolved-link-config-via-mac_link_.patch \
+ file://0602-net-dsa-mt7530-use-resolved-link-config-in-mac_link_.patch \
+ file://0603-net-dsa-mt7530-Extend-device-data-ready-for-adding-a.patch \
+ file://0604-net-dsa-mt7530-Add-the-support-of-MT7531-switch.patch \
+ file://0605-arm64-dts-mt7622-add-mt7531-dsa-to-bananapi-bpi-r64-board.patch \
+ file://0666-add-spimem-support-to-mtk-spi.patch \
+ file://0666-spi-mtk-nor-fix-timeout-calculation-overflow.patch \
+ file://0667-spi-mediatek-fix-timeout-for-large-data.patch \
+ file://0668-spi-mediatek-fix-dma-unmap-twice.patch \
+ file://0669-fix-SPIM-NAND-and-NOR-probing.patch \
+ file://0670-fix-SPIM-dma-buffer-not-aligned.patch \
+ file://0671-add-micron-MT29F4G01ABAFD-spi-nand-support.patch \
+ file://0672-add-F50L1G41LB-and-GD5F1GQ5UExxG-snand-support.patch \
+ file://0701-fix-mtk-nfi-driver-dependency.patch \
+ file://0801-mtk-sd-add-mt7986-support.patch \
+ file://0900-bt-mtk-serial-fix.patch \
+ file://0900-i2c-busses-add-mt7986-support.patch \
+ file://0901-i2c-busses-add-mt7981-support.patch \
+ file://0930-pwm-add-mt7986-support.patch \
+ file://0931-pwm-add-mt7981-support.patch \
+ file://0960-watchdog-add-mt7986-assert.patch \
+ file://0990-gsw-rtl8367s-mt7622-support.patch \
+ file://0991-dt-bindings-PCI-Mediatek-Update-PCIe-binding.patch \
+ file://0992-PCI-mediatek-Use-regmap-to-get-shared-pcie-cfg-base.patch \
+ file://0993-arm64-dts-mediatek-Split-PCIe-node-for-MT2712-MT7622.patch \
+ file://0994-ARM-dts-mediatek-Update-mt7629-PCIe-node.patch \
+ file://1001-mtkhnat-ipv6-fix-pskb-expand-head-limitation.patch \
+ file://1002-mtkhnat-add-support-for-virtual-interface-acceleration.patch \
+ file://1003-dts-mt7622-rfb-change-to-ax-mtd-layout.patch \
+ file://1004_remove_eth_transmit_timeout_hw_reset.patch \
+ file://1005-mtkhnat-fix-pse-hang-for-multi-stations.patch \
+ file://1010-pcie-mediatek-fix-clearing-interrupt-status.patch \
+ file://1015-pcie-add-pcie-gen3-upstream-driver.patch \
+ file://1020-spi-nor-w25q512jv.patch \
+ file://1021-ubnt-ledbar-driver.patch \
+ file://1023-kgdb-add-interrupt-control.patch \
+ file://1024-pcie-add-multi-MSI-support.patch \
+ file://1661-Add-trngv2-driver-support.patch \
+ file://2000-misc-add-mtk-platform.patch \
+ file://400-mtd-add-mtk-snand-driver.patch \
+ file://401-pinctrl-add-mt7986-driver.patch \
+ file://402-pinctrl-add-mt7981-driver.patch \
+ file://500-auxadc-add-auxadc-32k-clk.patch \
+ file://730-net-ethernet-mtk_eth_soc-add-mtk-dsa-tag-rx-offload.patch \
+ file://738-mt7531-gsw-internal_phy_calibration.patch \
+ file://739-mt7531-gsw-port5_external_phy_init.patch \
+ file://740-add-gpy211-phy-support.patch \
+ file://741-add-default-setting-to-dsa-unused-port.patch \
+ file://742-net-dsa-add-MT7531-Gigabit-Ethernet-PHY-setting.patch \
+ file://743-add-mediatek-ge-gphy-support.patch \
+ file://744-en8801s-gphy-support.patch \
+ file://8000-PATCH-1-4-tphy-support-type-switch-by-pericfg.patch \
+ file://8001-PATCH-2-4-dt-bindings-phy-Add-PHY_TYPE_DP-definition.patch \
+ file://8002-PATCH-3-4-dt-bindings-phy-Add-PHY_TYPE_XPCS-definition.patch \
+ file://8003-PATCH-4-4-dt-bindings-phy-Add-DT-bindings-for-Xilinx-ZynqMP-PS.patch \
+ file://8004-nvmem-core-Add-functions-to-make-number-reading-easy.patch \
+ file://8005-nvmem-mtk-efuse-support-minimum-one-byte-access-stri.patch \
+ file://8006-phy-phy-mtk-tphy-add-support-efuse-setting.patch \
+ file://8007-phy-phy-mtk-tphy-Add-PCIe-2-lane-efuse-support.patch \
+ file://8008-phy-phy-mtk-tphy-add-auto-load-valid-check-mechanism.patch \
+ file://9001-PATCH-1-2-xHCI-MT7986-USB-2.0-USBIF-compliance-toolkit.patch \
+ file://9002-PATCH-1-1-usb-add-embedded-Host-feature-support.patch \
+ file://9009-Add-spi-runtime-PM-support.patch \
+ file://9010-iwconfig-wireless-rate-fix.patch \
+ file://999-add_armv7_support_for_panther.patch \
+ file://9999-null-test.patch \
+ "