[][Change spi driver]

[Description]
Change spi controller driver according to the upstream code

[Release-log]
N/A

Change-Id: I4c40e2600ac51e77b9d57504e6cf57e65bfa4d42
Reviewed-on: https://gerrit.mediatek.inc/c/openwrt/feeds/mtk_openwrt_feeds/+/7195779
diff --git a/target/linux/mediatek/patches-5.4/9102-spi-update-driver.patch b/target/linux/mediatek/patches-5.4/9102-spi-update-driver.patch
new file mode 100644
index 0000000..4900733
--- /dev/null
+++ b/target/linux/mediatek/patches-5.4/9102-spi-update-driver.patch
@@ -0,0 +1,829 @@
+--- a/drivers/spi/spi-mt65xx.c
++++ b/drivers/spi/spi-mt65xx.c
+@@ -12,7 +12,7 @@
+ #include <linux/ioport.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+-#include <linux/of_gpio.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/spi/spi.h>
+@@ -43,9 +43,11 @@
+ #define SPI_CFG1_CS_IDLE_OFFSET           0
+ #define SPI_CFG1_PACKET_LOOP_OFFSET       8
+ #define SPI_CFG1_PACKET_LENGTH_OFFSET     16
+-#define SPI_CFG1_GET_TICKDLY_OFFSET       29
++#define SPI_CFG1_GET_TICK_DLY_OFFSET	29
++#define SPI_CFG1_GET_TICK_DLY_OFFSET_V1	30
+ 
+-#define SPI_CFG1_GET_TICKDLY_MASK	  GENMASK(31, 29)
++#define SPI_CFG1_GET_TICK_DLY_MASK	0xe0000000
++#define SPI_CFG1_GET_TICK_DLY_MASK_V1	0xc0000000
+ #define SPI_CFG1_CS_IDLE_MASK             0xff
+ #define SPI_CFG1_PACKET_LOOP_MASK         0xff00
+ #define SPI_CFG1_PACKET_LENGTH_MASK       0x3ff0000
+@@ -78,7 +80,6 @@
+ 
+ #define PIN_MODE_CFG(x)	((x) / 2)
+ 
+-#define SPI_CFG3_IPM_PIN_MODE_OFFSET		0
+ #define SPI_CFG3_IPM_HALF_DUPLEX_DIR		BIT(2)
+ #define SPI_CFG3_IPM_HALF_DUPLEX_EN		BIT(3)
+ #define SPI_CFG3_IPM_XMODE_EN			BIT(4)
+@@ -94,14 +95,14 @@
+ 
+ #define MTK_SPI_PAUSE_INT_STATUS 0x2
+ 
+-#define MTK_SPI_IDLE 0
+-#define MTK_SPI_PAUSED 1
+-
+ #define MTK_SPI_MAX_FIFO_SIZE 32U
+ #define MTK_SPI_PACKET_SIZE 1024
+ #define MTK_SPI_IPM_PACKET_SIZE SZ_64K
+ #define MTK_SPI_IPM_PACKET_LOOP SZ_256
+ 
++#define MTK_SPI_IDLE			0
++#define MTK_SPI_PAUSED			1
++
+ #define MTK_SPI_32BITS_MASK  (0xffffffff)
+ 
+ #define DMA_ADDR_EXT_BITS (36)
+@@ -115,11 +116,8 @@ struct mtk_spi_compatible {
+ 	bool enhance_timing;
+ 	/* some IC support DMA addr extension */
+ 	bool dma_ext;
+-	/* the IPM IP design improve some feature, and support dual/quad mode */
++	bool no_need_unprepare;
+ 	bool ipm_design;
+-	bool support_quad;
+-	/* some IC ahb & apb clk is different and also need to be enabled */
+-	bool need_ahb_clk;
+ };
+ 
+ struct mtk_spi_config {
+@@ -140,7 +138,7 @@ struct mtk_spi {
+ 	u32 tx_sgl_len, rx_sgl_len;
+ 	const struct mtk_spi_compatible *dev_comp;
+ 	struct mtk_spi_config dev_config;
+-
++	u32 spi_clk_hz;
+ 	struct completion spimem_done;
+ 	bool use_spimem;
+ 	struct device *dev;
+@@ -154,21 +152,10 @@ static const struct mtk_spi_compatible m
+ 	.must_tx = true,
+ };
+ 
+-static const struct mtk_spi_compatible ipm_compat_single = {
+-	.must_tx = true,
++static const struct mtk_spi_compatible mtk_ipm_compat = {
+ 	.enhance_timing = true,
+ 	.dma_ext = true,
+ 	.ipm_design = true,
+-	.need_ahb_clk = true,
+-};
+-
+-static const struct mtk_spi_compatible ipm_compat_quad = {
+-	.must_tx = true,
+-	.enhance_timing = true,
+-	.dma_ext = true,
+-	.ipm_design = true,
+-	.support_quad = true,
+-	.need_ahb_clk = true,
+ };
+ 
+ static const struct mtk_spi_compatible mt6765_compat = {
+@@ -194,13 +181,25 @@ static const struct mtk_spi_compatible m
+ 	.enhance_timing = true,
+ };
+ 
++static const struct mtk_spi_compatible mt6893_compat = {
++	.need_pad_sel = true,
++	.must_tx = true,
++	.enhance_timing = true,
++	.dma_ext = true,
++	.no_need_unprepare = true,
++};
++
+ static const struct of_device_id mtk_spi_of_match[] = {
++	{ .compatible = "mediatek,spi-ipm",
++		.data = (void *)&mtk_ipm_compat,
++	},
+ 	{ .compatible = "mediatek,ipm-spi-single",
+-		.data = (void *)&ipm_compat_single,
++		.data = (void *)&mtk_ipm_compat,
+ 	},
+ 	{ .compatible = "mediatek,ipm-spi-quad",
+-		.data = (void *)&ipm_compat_quad,
++		.data = (void *)&mtk_ipm_compat,
+ 	},
++
+ 	{ .compatible = "mediatek,mt2701-spi",
+ 		.data = (void *)&mtk_common_compat,
+ 	},
+@@ -228,6 +227,12 @@ static const struct of_device_id mtk_spi
+ 	{ .compatible = "mediatek,mt8183-spi",
+ 		.data = (void *)&mt8183_compat,
+ 	},
++	{ .compatible = "mediatek,mt8192-spi",
++		.data = (void *)&mt6765_compat,
++	},
++	{ .compatible = "mediatek,mt6893-spi",
++		.data = (void *)&mt6893_compat,
++	},
+ 	{}
+ };
+ MODULE_DEVICE_TABLE(of, mtk_spi_of_match);
+@@ -256,27 +261,30 @@ static int mtk_spi_hw_init(struct spi_ma
+ 	cpha = spi->mode & SPI_CPHA ? 1 : 0;
+ 	cpol = spi->mode & SPI_CPOL ? 1 : 0;
+ 
++		/* tick delay */
+ 	if (mdata->dev_comp->enhance_timing) {
+ 		if (mdata->dev_comp->ipm_design) {
+-			/* CFG3 reg only used for spi-mem,
+-			 * here write to default value
+-			 */
+-			writel(0x0, mdata->base + SPI_CFG3_IPM_REG);
+-
+ 			reg_val = readl(mdata->base + SPI_CMD_REG);
+ 			reg_val &= ~SPI_CMD_IPM_GET_TICKDLY_MASK;
+-			reg_val |= mdata->dev_config.get_tick_dly
+-				   << SPI_CMD_IPM_GET_TICKDLY_OFFSET;
++			reg_val |= ((mdata->dev_config.get_tick_dly & 0x7)
++				    << SPI_CMD_IPM_GET_TICKDLY_OFFSET);
+ 			writel(reg_val, mdata->base + SPI_CMD_REG);
+ 		} else {
+ 			reg_val = readl(mdata->base + SPI_CFG1_REG);
+-			reg_val &= ~SPI_CFG1_GET_TICKDLY_MASK;
+-			reg_val |= mdata->dev_config.get_tick_dly
+-				   << SPI_CFG1_GET_TICKDLY_OFFSET;
++			reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK;
++			reg_val |= ((mdata->dev_config.get_tick_dly & 0x7)
++				    << SPI_CFG1_GET_TICK_DLY_OFFSET);
+ 			writel(reg_val, mdata->base + SPI_CFG1_REG);
+ 		}
++	} else {
++		reg_val = readl(mdata->base + SPI_CFG1_REG);
++		reg_val &= ~SPI_CFG1_GET_TICK_DLY_MASK_V1;
++		reg_val |= ((mdata->dev_config.get_tick_dly & 0x3)
++			    << SPI_CFG1_GET_TICK_DLY_OFFSET_V1);
++		writel(reg_val, mdata->base + SPI_CFG1_REG);
+ 	}
+ 
++
+ 	reg_val = readl(mdata->base + SPI_CMD_REG);
+ 	if (mdata->dev_comp->ipm_design) {
+ 		/* SPI transfer without idle time until packet length done */
+@@ -375,12 +383,11 @@ static void mtk_spi_set_cs(struct spi_de
+ static void mtk_spi_prepare_transfer(struct spi_master *master,
+ 				     u32 speed_hz)
+ {
+-	u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
++	u32 div, sck_time, cs_time, reg_val;
+ 	struct mtk_spi *mdata = spi_master_get_devdata(master);
+ 
+-	spi_clk_hz = clk_get_rate(mdata->spi_clk);
+-	if (speed_hz < spi_clk_hz / 2)
+-		div = DIV_ROUND_UP(spi_clk_hz, speed_hz);
++	if (speed_hz < mdata->spi_clk_hz / 2)
++		div = DIV_ROUND_UP(mdata->spi_clk_hz, speed_hz);
+ 	else
+ 		div = 1;
+ 
+@@ -388,13 +395,19 @@ static void mtk_spi_prepare_transfer(str
+ 	cs_time = sck_time * 2;
+ 
+ 	if (mdata->dev_comp->enhance_timing) {
+-		reg_val = (((sck_time - 1) & 0xffff)
++		reg_val = readl(mdata->base + SPI_CFG2_REG);
++		reg_val &= ~(0xffff << SPI_CFG2_SCK_HIGH_OFFSET);
++		reg_val |= (((sck_time - 1) & 0xffff)
+ 			   << SPI_CFG2_SCK_HIGH_OFFSET);
++		reg_val &= ~(0xffff << SPI_CFG2_SCK_LOW_OFFSET);
+ 		reg_val |= (((sck_time - 1) & 0xffff)
+ 			   << SPI_CFG2_SCK_LOW_OFFSET);
+ 		writel(reg_val, mdata->base + SPI_CFG2_REG);
+-		reg_val = (((cs_time - 1) & 0xffff)
++		reg_val = readl(mdata->base + SPI_CFG0_REG);
++		reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
++		reg_val |= (((cs_time - 1) & 0xffff)
+ 			   << SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
++		reg_val &= ~(0xffff << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+ 		reg_val |= (((cs_time - 1) & 0xffff)
+ 			   << SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
+ 		writel(reg_val, mdata->base + SPI_CFG0_REG);
+@@ -453,14 +466,17 @@ static void mtk_spi_enable_transfer(stru
+ 	writel(cmd, mdata->base + SPI_CMD_REG);
+ }
+ 
+-static int mtk_spi_get_mult_delta(u32 xfer_len)
++static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len)
+ {
+-	u32 mult_delta;
++	u32 mult_delta = 0;
+ 
+-	if (xfer_len > MTK_SPI_PACKET_SIZE)
+-		mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
+-	else
+-		mult_delta = 0;
++	if (mdata->dev_comp->ipm_design) {
++		if (xfer_len > MTK_SPI_IPM_PACKET_SIZE)
++			mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE;
++	} else {
++		if (xfer_len > MTK_SPI_PACKET_SIZE)
++			mult_delta = xfer_len % MTK_SPI_PACKET_SIZE;
++	}
+ 
+ 	return mult_delta;
+ }
+@@ -472,22 +488,22 @@ static void mtk_spi_update_mdata_len(str
+ 
+ 	if (mdata->tx_sgl_len && mdata->rx_sgl_len) {
+ 		if (mdata->tx_sgl_len > mdata->rx_sgl_len) {
+-			mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
++			mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
+ 			mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
+ 			mdata->rx_sgl_len = mult_delta;
+ 			mdata->tx_sgl_len -= mdata->xfer_len;
+ 		} else {
+-			mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
++			mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
+ 			mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
+ 			mdata->tx_sgl_len = mult_delta;
+ 			mdata->rx_sgl_len -= mdata->xfer_len;
+ 		}
+ 	} else if (mdata->tx_sgl_len) {
+-		mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len);
++		mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len);
+ 		mdata->xfer_len = mdata->tx_sgl_len - mult_delta;
+ 		mdata->tx_sgl_len = mult_delta;
+ 	} else if (mdata->rx_sgl_len) {
+-		mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len);
++		mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len);
+ 		mdata->xfer_len = mdata->rx_sgl_len - mult_delta;
+ 		mdata->rx_sgl_len = mult_delta;
+ 	}
+@@ -598,6 +614,19 @@ static int mtk_spi_transfer_one(struct s
+ 				struct spi_device *spi,
+ 				struct spi_transfer *xfer)
+ {
++	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
++	u32 reg_val = 0;
++
++	/* prepare xfer direction and duplex mode */
++	if (mdata->dev_comp->ipm_design) {
++		if (!xfer->tx_buf || !xfer->rx_buf) {
++			reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
++			if (xfer->rx_buf)
++				reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_DIR;
++		}
++		writel(reg_val, mdata->base + SPI_CFG3_IPM_REG);
++	}
++
+ 	if (master->can_dma(master, spi, xfer))
+ 		return mtk_spi_dma_transfer(master, spi, xfer);
+ 	else
+@@ -618,8 +647,9 @@ static int mtk_spi_setup(struct spi_devi
+ {
+ 	struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
+ 
+-	if (mdata->dev_comp->need_pad_sel && gpio_is_valid(spi->cs_gpio))
+-		gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
++	if (mdata->dev_comp->need_pad_sel && spi->cs_gpiod)
++		/* CS de-asserted, gpiolib will handle inversion */
++		gpiod_direction_output(spi->cs_gpiod, 0);
+ 
+ 	return 0;
+ }
+@@ -747,9 +777,6 @@ static int mtk_spi_mem_adjust_op_size(st
+ {
+ 	int opcode_len;
+ 
+-	if(!op->data.nbytes)
+-		return 0;
+-
+ 	if (op->data.dir != SPI_MEM_NO_DATA) {
+ 		opcode_len = 1 + op->addr.nbytes + op->dummy.nbytes;
+ 		if (opcode_len + op->data.nbytes > MTK_SPI_IPM_PACKET_SIZE) {
+@@ -765,8 +792,7 @@ static int mtk_spi_mem_adjust_op_size(st
+ static bool mtk_spi_mem_supports_op(struct spi_mem *mem,
+ 				     const struct spi_mem_op *op)
+ {
+-	if (op->data.buswidth > 4 || op->addr.buswidth > 4 ||
+-	    op->dummy.buswidth > 4 || op->cmd.buswidth > 4)
++	if (!spi_mem_default_supports_op(mem, op))
+ 		return false;
+ 
+ 	if (op->addr.nbytes && op->dummy.nbytes &&
+@@ -814,13 +840,18 @@ static int mtk_spi_transfer_wait(struct
+ 				 const struct spi_mem_op *op)
+ {
+ 	struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
+-	unsigned long long ms = 1;
++	/*
++	 * For each byte we wait for 8 cycles of the SPI clock.
++	 * Since speed is defined in Hz and we want milliseconds,
++	 * so it should be 8 * 1000.
++	 */
++	u64 ms = 8000LL;
+ 
+ 	if (op->data.dir == SPI_MEM_NO_DATA)
+-		ms = 8LL * 1000LL * 32;
++		ms *= 32; /* prevent we may get 0 for short transfers. */
+ 	else
+-		ms = 8LL * 1000LL * op->data.nbytes;
+-	do_div(ms, mem->spi->max_speed_hz);
++		ms *= op->data.nbytes;
++	ms = div_u64(ms, mem->spi->max_speed_hz);
+ 	ms += ms + 1000; /* 1s tolerance */
+ 
+ 	if (ms > UINT_MAX)
+@@ -839,9 +870,8 @@ static int mtk_spi_mem_exec_op(struct sp
+ 				const struct spi_mem_op *op)
+ {
+ 	struct mtk_spi *mdata = spi_master_get_devdata(mem->spi->master);
+-	u32 reg_val, nio = 1, tx_size;
+-	char *tx_tmp_buf;
+-	char *rx_tmp_buf;
++	u32 reg_val, nio, tx_size;
++	char *tx_tmp_buf, *rx_tmp_buf;
+ 	int ret = 0;
+ 
+ 	mdata->use_spimem = true;
+@@ -887,9 +917,11 @@ static int mtk_spi_mem_exec_op(struct sp
+ 		 op->dummy.buswidth == 4 ||
+ 		 op->data.buswidth == 4)
+ 		nio = 4;
++	else
++		nio = 1;
+ 
+ 	reg_val &= ~SPI_CFG3_IPM_CMD_PIN_MODE_MASK;
+-	reg_val |= PIN_MODE_CFG(nio) << SPI_CFG3_IPM_PIN_MODE_OFFSET;
++	reg_val |= PIN_MODE_CFG(nio);
+ 
+ 	reg_val |= SPI_CFG3_IPM_HALF_DUPLEX_EN;
+ 	if (op->data.dir == SPI_MEM_DATA_IN)
+@@ -902,11 +934,13 @@ static int mtk_spi_mem_exec_op(struct sp
+ 	if (op->data.dir == SPI_MEM_DATA_OUT)
+ 		tx_size += op->data.nbytes;
+ 
+-	tx_size = max(tx_size, (u32)32);
++	tx_size = max_t(u32, tx_size, 32);
+ 
+ 	tx_tmp_buf = kzalloc(tx_size, GFP_KERNEL | GFP_DMA);
+-	if (!tx_tmp_buf)
++	if (!tx_tmp_buf) {
++		mdata->use_spimem = false;
+ 		return -ENOMEM;
++	}
+ 
+ 	tx_tmp_buf[0] = op->cmd.opcode;
+ 
+@@ -937,12 +971,15 @@ static int mtk_spi_mem_exec_op(struct sp
+ 
+ 	if (op->data.dir == SPI_MEM_DATA_IN) {
+ 		if(!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
+-			rx_tmp_buf = kzalloc(op->data.nbytes, GFP_KERNEL | GFP_DMA);
+-			if (!rx_tmp_buf)
+-				return -ENOMEM;
+-		}
+-		else
++			rx_tmp_buf = kzalloc(op->data.nbytes,
++					     GFP_KERNEL | GFP_DMA);
++			if (!rx_tmp_buf) {
++				ret = -ENOMEM;
++				goto unmap_tx_dma;
++			}
++		} else {
+ 			rx_tmp_buf = op->data.buf.in;
++		}
+ 
+ 		mdata->rx_dma = dma_map_single(mdata->dev,
+ 						   rx_tmp_buf,
+@@ -950,7 +987,7 @@ static int mtk_spi_mem_exec_op(struct sp
+ 						   DMA_FROM_DEVICE);
+ 		if (dma_mapping_error(mdata->dev, mdata->rx_dma)) {
+ 			ret = -ENOMEM;
+-			goto unmap_tx_dma;
++			goto kfree_rx_tmp_buf;
+ 		}
+ 	}
+ 
+@@ -980,11 +1017,13 @@ unmap_rx_dma:
+ 	if (op->data.dir == SPI_MEM_DATA_IN) {
+ 		dma_unmap_single(mdata->dev, mdata->rx_dma,
+ 				 op->data.nbytes, DMA_FROM_DEVICE);
+-		if(!IS_ALIGNED((size_t)op->data.buf.in, 4)) {
++		if(!IS_ALIGNED((size_t)op->data.buf.in, 4))
+ 			memcpy(op->data.buf.in, rx_tmp_buf, op->data.nbytes);
+-			kfree(rx_tmp_buf);
+-		}
+ 	}
++kfree_rx_tmp_buf:
++	if (op->data.dir == SPI_MEM_DATA_IN &&
++	    !IS_ALIGNED((size_t)op->data.buf.in, 4))
++		kfree(rx_tmp_buf);
+ unmap_tx_dma:
+ 	dma_unmap_single(mdata->dev, mdata->tx_dma,
+ 			 tx_size, DMA_TO_DEVICE);
+@@ -1003,19 +1042,19 @@ static const struct spi_controller_mem_o
+ 
+ static int mtk_spi_probe(struct platform_device *pdev)
+ {
++	struct device *dev = &pdev->dev;
+ 	struct spi_master *master;
+ 	struct mtk_spi *mdata;
+-	const struct of_device_id *of_id;
+ 	int i, irq, ret, addr_bits;
+ 
+-	master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
++	master = devm_spi_alloc_master(dev, sizeof(*mdata));
+ 	if (!master) {
+-		dev_err(&pdev->dev, "failed to alloc spi master\n");
++		dev_err(dev, "failed to alloc spi master\n");
+ 		return -ENOMEM;
+ 	}
+ 
+ 	master->auto_runtime_pm = true;
+-	master->dev.of_node = pdev->dev.of_node;
++	master->dev.of_node = dev->of_node;
+ 	master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ 
+ 	master->set_cs = mtk_spi_set_cs;
+@@ -1023,23 +1062,16 @@ static int mtk_spi_probe(struct platform
+ 	master->transfer_one = mtk_spi_transfer_one;
+ 	master->can_dma = mtk_spi_can_dma;
+ 	master->setup = mtk_spi_setup;
+-
++	master->use_gpio_descriptors = true;
+ 	master->append_caldata = mtk_spi_append_caldata;
+ 
+-	of_id = of_match_node(mtk_spi_of_match, pdev->dev.of_node);
+-	if (!of_id) {
+-		dev_err(&pdev->dev, "failed to probe of_node\n");
+-		ret = -EINVAL;
+-		goto err_put_master;
+-	}
+-
+ 	mdata = spi_master_get_devdata(master);
+ 
+ 	/* Set device configs to default first. Calibrate it later. */
+ 	mdata->dev_config.sample_sel = 0;
+ 	mdata->dev_config.get_tick_dly = 2;
+ 
+-	mdata->dev_comp = of_id->data;
++	mdata->dev_comp = device_get_match_data(dev);
+ 
+ 	if (mdata->dev_comp->enhance_timing)
+ 		master->mode_bits |= SPI_CS_HIGH;
+@@ -1050,27 +1082,23 @@ static int mtk_spi_probe(struct platform
+ 	if (mdata->dev_comp->ipm_design)
+ 		master->mode_bits |= SPI_LOOP;
+ 
+-	if (mdata->dev_comp->support_quad) {
++	if (mdata->dev_comp->ipm_design) {
++		mdata->dev = dev;
+ 		master->mem_ops = &mtk_spi_mem_ops;
+-		master->mode_bits |= SPI_RX_DUAL | SPI_TX_DUAL |
+-				     SPI_RX_QUAD | SPI_TX_QUAD;
+-
+-		mdata->dev = &pdev->dev;
+ 		init_completion(&mdata->spimem_done);
+ 	}
+ 
+ 	if (mdata->dev_comp->need_pad_sel) {
+-		mdata->pad_num = of_property_count_u32_elems(
+-			pdev->dev.of_node,
++		mdata->pad_num = of_property_count_u32_elems(dev->of_node,
+ 			"mediatek,pad-select");
+ 		if (mdata->pad_num < 0) {
+-			dev_err(&pdev->dev,
++			dev_err(dev,
+ 				"No 'mediatek,pad-select' property\n");
+ 			ret = -EINVAL;
+ 			goto err_put_master;
+ 		}
+ 
+-		mdata->pad_sel = devm_kmalloc_array(&pdev->dev, mdata->pad_num,
++		mdata->pad_sel = devm_kmalloc_array(dev, mdata->pad_num,
+ 						    sizeof(u32), GFP_KERNEL);
+ 		if (!mdata->pad_sel) {
+ 			ret = -ENOMEM;
+@@ -1078,11 +1106,11 @@ static int mtk_spi_probe(struct platform
+ 		}
+ 
+ 		for (i = 0; i < mdata->pad_num; i++) {
+-			of_property_read_u32_index(pdev->dev.of_node,
++			of_property_read_u32_index(dev->of_node,
+ 						   "mediatek,pad-select",
+ 						   i, &mdata->pad_sel[i]);
+ 			if (mdata->pad_sel[i] > MT8173_SPI_MAX_PAD_SEL) {
+-				dev_err(&pdev->dev, "wrong pad-sel[%d]: %u\n",
++				dev_err(dev, "wrong pad-sel[%d]: %u\n",
+ 					i, mdata->pad_sel[i]);
+ 				ret = -EINVAL;
+ 				goto err_put_master;
+@@ -1103,122 +1131,118 @@ static int mtk_spi_probe(struct platform
+ 		goto err_put_master;
+ 	}
+ 
+-	if (!pdev->dev.dma_mask)
+-		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
++	if (!dev->dma_mask)
++		dev->dma_mask = &dev->coherent_dma_mask;
++
++	if (mdata->dev_comp->ipm_design)
++		dma_set_max_seg_size(dev, SZ_16M);
++	else
++		dma_set_max_seg_size(dev, SZ_256K);
+ 
+-	ret = devm_request_irq(&pdev->dev, irq, mtk_spi_interrupt,
+-			       IRQF_TRIGGER_NONE, dev_name(&pdev->dev), master);
++	ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
++			       IRQF_TRIGGER_NONE, dev_name(dev), master);
+ 	if (ret) {
+-		dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
++		dev_err(dev, "failed to register irq (%d)\n", ret);
+ 		goto err_put_master;
+ 	}
+ 
+ 
+-	mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
++	mdata->parent_clk = devm_clk_get(dev, "parent-clk");
+ 	if (IS_ERR(mdata->parent_clk)) {
+ 		ret = PTR_ERR(mdata->parent_clk);
+-		dev_err(&pdev->dev, "failed to get parent-clk: %d\n", ret);
++		dev_err(dev, "failed to get parent-clk: %d\n", ret);
+ 		goto err_put_master;
+ 	}
+ 
+-	mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
++	mdata->sel_clk = devm_clk_get(dev, "sel-clk");
+ 	if (IS_ERR(mdata->sel_clk)) {
+ 		ret = PTR_ERR(mdata->sel_clk);
+-		dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
++		dev_err(dev, "failed to get sel-clk: %d\n", ret);
+ 		goto err_put_master;
+ 	}
+ 
+-	mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
++	mdata->spi_clk = devm_clk_get(dev, "spi-clk");
+ 	if (IS_ERR(mdata->spi_clk)) {
+ 		ret = PTR_ERR(mdata->spi_clk);
+-		dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
++		dev_err(dev, "failed to get spi-clk: %d\n", ret);
+ 		goto err_put_master;
+ 	}
+ 
+-	if (mdata->dev_comp->need_ahb_clk) {
+-		mdata->spi_hclk = devm_clk_get(&pdev->dev, "spi-hclk");
+-		if (IS_ERR(mdata->spi_hclk)) {
+-			ret = PTR_ERR(mdata->spi_hclk);
+-			dev_err(&pdev->dev, "failed to get spi-hclk: %d\n", ret);
+-			goto err_put_master;
+-		}
+-
+-		ret = clk_prepare_enable(mdata->spi_hclk);
+-		if (ret < 0) {
+-			dev_err(&pdev->dev, "failed to enable spi_hclk (%d)\n", ret);
+-			goto err_put_master;
+-		}
++	mdata->spi_hclk = devm_clk_get_optional(dev, "spi-hclk");
++	if (IS_ERR(mdata->spi_hclk)) {
++		ret = PTR_ERR(mdata->spi_hclk);
++		dev_err(dev, "failed to get spi-hclk: %d\n", ret);
++		goto err_put_master;
+ 	}
+ 
+-	ret = clk_prepare_enable(mdata->spi_clk);
++	ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
+ 	if (ret < 0) {
+-		dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
++		dev_err(dev, "failed to clk_set_parent (%d)\n", ret);
+ 		goto err_put_master;
+ 	}
+-
+-	ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
++	
++	ret = clk_prepare_enable(mdata->spi_hclk);
+ 	if (ret < 0) {
+-		dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
+-		clk_disable_unprepare(mdata->spi_clk);
++		dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
+ 		goto err_put_master;
+ 	}
+ 
+-	clk_disable_unprepare(mdata->spi_clk);
+-
+-	if (mdata->dev_comp->need_ahb_clk)
++	ret = clk_prepare_enable(mdata->spi_clk);
++	if (ret < 0) {
+ 		clk_disable_unprepare(mdata->spi_hclk);
++		dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
++		goto err_put_master;
++	}
+ 
+-	pm_runtime_enable(&pdev->dev);
++	mdata->spi_clk_hz = clk_get_rate(mdata->spi_clk);
+ 
+-	ret = devm_spi_register_master(&pdev->dev, master);
+-	if (ret) {
+-		dev_err(&pdev->dev, "failed to register master (%d)\n", ret);
+-		goto err_disable_runtime_pm;
++	if (mdata->dev_comp->no_need_unprepare) {
++		clk_disable(mdata->spi_clk);
++		clk_disable(mdata->spi_hclk);
++	} else {
++		clk_disable_unprepare(mdata->spi_clk);
++		clk_disable_unprepare(mdata->spi_hclk);
+ 	}
+ 
+ 	if (mdata->dev_comp->need_pad_sel) {
+ 		if (mdata->pad_num != master->num_chipselect) {
+-			dev_err(&pdev->dev,
++			dev_err(dev,
+ 				"pad_num does not match num_chipselect(%d != %d)\n",
+ 				mdata->pad_num, master->num_chipselect);
+ 			ret = -EINVAL;
+-			goto err_disable_runtime_pm;
++			goto err_put_master;
+ 		}
+ 
+-		if (!master->cs_gpios && master->num_chipselect > 1) {
+-			dev_err(&pdev->dev,
++		if (!master->cs_gpiods && master->num_chipselect > 1) {
++			dev_err(dev,
+ 				"cs_gpios not specified and num_chipselect > 1\n");
+ 			ret = -EINVAL;
+-			goto err_disable_runtime_pm;
++			goto err_put_master;
+ 		}
+ 
+-		if (master->cs_gpios) {
+-			for (i = 0; i < master->num_chipselect; i++) {
+-				ret = devm_gpio_request(&pdev->dev,
+-							master->cs_gpios[i],
+-							dev_name(&pdev->dev));
+-				if (ret) {
+-					dev_err(&pdev->dev,
+-						"can't get CS GPIO %i\n", i);
+-					goto err_disable_runtime_pm;
+-				}
+-			}
+-		}
+ 	}
+ 
+ 	if (mdata->dev_comp->dma_ext)
+ 		addr_bits = DMA_ADDR_EXT_BITS;
+ 	else
+ 		addr_bits = DMA_ADDR_DEF_BITS;
+-	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits));
++	ret = dma_set_mask(dev, DMA_BIT_MASK(addr_bits));
+ 	if (ret)
+-		dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
++		dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
+ 			   addr_bits, ret);
+ 
++	pm_runtime_enable(dev);
++
++	ret = devm_spi_register_master(dev, master);
++	if (ret) {
++		dev_err(dev, "failed to register master (%d)\n", ret);
++		goto err_disable_runtime_pm;
++	}
++	
+ 	return 0;
+ 
+ err_disable_runtime_pm:
+-	pm_runtime_disable(&pdev->dev);
++	pm_runtime_disable(dev);
+ err_put_master:
+ 	spi_master_put(master);
+ 
+@@ -1229,11 +1253,22 @@ static int mtk_spi_remove(struct platfor
+ {
+ 	struct spi_master *master = platform_get_drvdata(pdev);
+ 	struct mtk_spi *mdata = spi_master_get_devdata(master);
++	int ret;
+ 
+-	pm_runtime_disable(&pdev->dev);
++	ret = pm_runtime_resume_and_get(&pdev->dev);
++	if (ret < 0)
++		return ret;
+ 
+ 	mtk_spi_reset(mdata);
+ 
++	if (mdata->dev_comp->no_need_unprepare) {
++		clk_unprepare(mdata->spi_clk);
++		clk_unprepare(mdata->spi_hclk);
++	}
++
++	pm_runtime_put_noidle(&pdev->dev);
++	pm_runtime_disable(&pdev->dev);
++
+ 	return 0;
+ }
+ 
+@@ -1250,8 +1285,7 @@ static int mtk_spi_suspend(struct device
+ 
+ 	if (!pm_runtime_suspended(dev)) {
+ 		clk_disable_unprepare(mdata->spi_clk);
+-		if (mdata->dev_comp->need_ahb_clk)
+-			clk_disable_unprepare(mdata->spi_hclk);
++		clk_disable_unprepare(mdata->spi_hclk);
+ 	}
+ 
+ 	return ret;
+@@ -1264,26 +1298,24 @@ static int mtk_spi_resume(struct device
+ 	struct mtk_spi *mdata = spi_master_get_devdata(master);
+ 
+ 	if (!pm_runtime_suspended(dev)) {
+-		if (mdata->dev_comp->need_ahb_clk) {
+-			ret = clk_prepare_enable(mdata->spi_hclk);
+-			if (ret < 0) {
+-				dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
+-				return ret;
+-			}
+-		}
+-
+ 		ret = clk_prepare_enable(mdata->spi_clk);
+ 		if (ret < 0) {
+ 			dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+ 			return ret;
+ 		}
++
++		ret = clk_prepare_enable(mdata->spi_hclk);
++		if (ret < 0) {
++			dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
++			clk_disable_unprepare(mdata->spi_clk);
++			return ret;
++		}
+ 	}
+ 
+ 	ret = spi_master_resume(master);
+ 	if (ret < 0) {
+ 		clk_disable_unprepare(mdata->spi_clk);
+-		if (mdata->dev_comp->need_ahb_clk)
+-			clk_disable_unprepare(mdata->spi_hclk);
++		clk_disable_unprepare(mdata->spi_hclk);
+ 	}
+ 
+ 	return ret;
+@@ -1296,10 +1328,13 @@ static int mtk_spi_runtime_suspend(struc
+ 	struct spi_master *master = dev_get_drvdata(dev);
+ 	struct mtk_spi *mdata = spi_master_get_devdata(master);
+ 
+-	clk_disable_unprepare(mdata->spi_clk);
+-
+-	if (mdata->dev_comp->need_ahb_clk)
++	if (mdata->dev_comp->no_need_unprepare) {
++		clk_disable(mdata->spi_clk);
++		clk_disable(mdata->spi_hclk);
++	} else {
++		clk_disable_unprepare(mdata->spi_clk);
+ 		clk_disable_unprepare(mdata->spi_hclk);
++	}
+ 
+ 	return 0;
+ }
+@@ -1310,18 +1345,31 @@ static int mtk_spi_runtime_resume(struct
+ 	struct mtk_spi *mdata = spi_master_get_devdata(master);
+ 	int ret;
+ 
+-	if (mdata->dev_comp->need_ahb_clk) {
+-		ret = clk_prepare_enable(mdata->spi_hclk);
++	if (mdata->dev_comp->no_need_unprepare) {
++		ret = clk_enable(mdata->spi_clk);
++		if (ret < 0) {
++			dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
++			return ret;
++		}
++		ret = clk_enable(mdata->spi_hclk);
+ 		if (ret < 0) {
+ 			dev_err(dev, "failed to enable spi_hclk (%d)\n", ret);
++			clk_disable(mdata->spi_clk);
++			return ret;
++		}
++	} else {
++		ret = clk_prepare_enable(mdata->spi_clk);
++		if (ret < 0) {
++			dev_err(dev, "failed to prepare_enable spi_clk (%d)\n", ret);
+ 			return ret;
+ 		}
+-	}
+ 
+-	ret = clk_prepare_enable(mdata->spi_clk);
+-	if (ret < 0) {
+-		dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
+-		return ret;
++		ret = clk_prepare_enable(mdata->spi_hclk);
++		if (ret < 0) {
++			dev_err(dev, "failed to prepare_enable spi_hclk (%d)\n", ret);
++			clk_disable_unprepare(mdata->spi_clk);
++			return ret;
++		}
+ 	}
+ 
+ 	return 0;