Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Freescale i.MX28 SPI driver |
| 4 | * |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 5 | * Copyright (C) 2019 DENX Software Engineering |
| 6 | * Lukasz Majewski, DENX Software Engineering, lukma@denx.de |
| 7 | * |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 8 | * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> |
| 9 | * on behalf of DENX Software Engineering GmbH |
| 10 | * |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 11 | * NOTE: This driver only supports the SPI-controller chipselects, |
| 12 | * GPIO driven chipselects are not supported. |
| 13 | */ |
| 14 | |
| 15 | #include <common.h> |
Jagan Teki | b4669c4 | 2020-05-25 23:31:59 +0530 | [diff] [blame] | 16 | #include <dm.h> |
| 17 | #include <dt-structs.h> |
Simon Glass | 6333448 | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 18 | #include <cpu_func.h> |
Jagan Teki | b4669c4 | 2020-05-25 23:31:59 +0530 | [diff] [blame] | 19 | #include <errno.h> |
Simon Glass | 0f2af88 | 2020-05-10 11:40:05 -0600 | [diff] [blame] | 20 | #include <log.h> |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 21 | #include <malloc.h> |
Simon Glass | 2dd337a | 2015-09-02 17:24:58 -0600 | [diff] [blame] | 22 | #include <memalign.h> |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 23 | #include <spi.h> |
Simon Glass | 274e0b0 | 2020-05-10 11:39:56 -0600 | [diff] [blame] | 24 | #include <asm/cache.h> |
Simon Glass | 4dcacfc | 2020-05-10 11:40:13 -0600 | [diff] [blame] | 25 | #include <linux/bitops.h> |
Masahiro Yamada | 56a931c | 2016-09-21 11:28:55 +0900 | [diff] [blame] | 26 | #include <linux/errno.h> |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 27 | #include <asm/io.h> |
| 28 | #include <asm/arch/clock.h> |
| 29 | #include <asm/arch/imx-regs.h> |
| 30 | #include <asm/arch/sys_proto.h> |
Stefano Babic | 33731bc | 2017-06-29 10:16:06 +0200 | [diff] [blame] | 31 | #include <asm/mach-imx/dma.h> |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 32 | |
| 33 | #define MXS_SPI_MAX_TIMEOUT 1000000 |
| 34 | #define MXS_SPI_PORT_OFFSET 0x2000 |
Fabio Estevam | d7bc6b0 | 2012-04-23 08:30:50 +0000 | [diff] [blame] | 35 | #define MXS_SSP_CHIPSELECT_MASK 0x00300000 |
| 36 | #define MXS_SSP_CHIPSELECT_SHIFT 20 |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 37 | |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 38 | #define MXSSSP_SMALL_TRANSFER 512 |
| 39 | |
Jagan Teki | b4669c4 | 2020-05-25 23:31:59 +0530 | [diff] [blame] | 40 | /* Base numbers of i.MX2[38] clk for ssp0 IP block */ |
| 41 | #define MXS_SSP_IMX23_CLKID_SSP0 33 |
| 42 | #define MXS_SSP_IMX28_CLKID_SSP0 46 |
Lukasz Majewski | 6be0656 | 2019-09-05 09:54:58 +0200 | [diff] [blame] | 43 | |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 44 | struct mxs_spi_platdata { |
Lukasz Majewski | 6be0656 | 2019-09-05 09:54:58 +0200 | [diff] [blame] | 45 | #if CONFIG_IS_ENABLED(OF_PLATDATA) |
Walter Lozano | 6935893 | 2020-07-23 00:22:04 -0300 | [diff] [blame] | 46 | struct dtd_fsl_imx23_spi dtplat; |
Lukasz Majewski | 6be0656 | 2019-09-05 09:54:58 +0200 | [diff] [blame] | 47 | #endif |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 48 | s32 frequency; /* Default clock frequency, -1 for none */ |
| 49 | fdt_addr_t base; /* SPI IP block base address */ |
| 50 | int num_cs; /* Number of CSes supported */ |
| 51 | int dma_id; /* ID of the DMA channel */ |
| 52 | int clk_id; /* ID of the SSP clock */ |
| 53 | }; |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 54 | |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 55 | struct mxs_spi_priv { |
| 56 | struct mxs_ssp_regs *regs; |
| 57 | unsigned int dma_channel; |
| 58 | unsigned int max_freq; |
| 59 | unsigned int clk_id; |
| 60 | unsigned int mode; |
| 61 | }; |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 62 | |
Jagan Teki | b4669c4 | 2020-05-25 23:31:59 +0530 | [diff] [blame] | 63 | static void mxs_spi_start_xfer(struct mxs_ssp_regs *ssp_regs) |
| 64 | { |
| 65 | writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_set); |
| 66 | writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_clr); |
| 67 | } |
| 68 | |
| 69 | static void mxs_spi_end_xfer(struct mxs_ssp_regs *ssp_regs) |
| 70 | { |
| 71 | writel(SSP_CTRL0_LOCK_CS, &ssp_regs->hw_ssp_ctrl0_clr); |
| 72 | writel(SSP_CTRL0_IGNORE_CRC, &ssp_regs->hw_ssp_ctrl0_set); |
| 73 | } |
| 74 | |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 75 | static int mxs_spi_xfer_pio(struct mxs_spi_priv *priv, |
| 76 | char *data, int length, int write, |
| 77 | unsigned long flags) |
| 78 | { |
| 79 | struct mxs_ssp_regs *ssp_regs = priv->regs; |
Marek Vasut | 955d92f | 2012-07-09 00:48:31 +0000 | [diff] [blame] | 80 | |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 81 | if (flags & SPI_XFER_BEGIN) |
| 82 | mxs_spi_start_xfer(ssp_regs); |
| 83 | |
Marek Vasut | 036b7bd | 2012-07-09 00:48:32 +0000 | [diff] [blame] | 84 | while (length--) { |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 85 | /* We transfer 1 byte */ |
Marek Vasut | bf372e3 | 2013-02-23 02:42:59 +0000 | [diff] [blame] | 86 | #if defined(CONFIG_MX23) |
| 87 | writel(SSP_CTRL0_XFER_COUNT_MASK, &ssp_regs->hw_ssp_ctrl0_clr); |
| 88 | writel(1, &ssp_regs->hw_ssp_ctrl0_set); |
| 89 | #elif defined(CONFIG_MX28) |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 90 | writel(1, &ssp_regs->hw_ssp_xfer_size); |
Marek Vasut | bf372e3 | 2013-02-23 02:42:59 +0000 | [diff] [blame] | 91 | #endif |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 92 | |
Marek Vasut | 036b7bd | 2012-07-09 00:48:32 +0000 | [diff] [blame] | 93 | if ((flags & SPI_XFER_END) && !length) |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 94 | mxs_spi_end_xfer(ssp_regs); |
| 95 | |
Marek Vasut | 955d92f | 2012-07-09 00:48:31 +0000 | [diff] [blame] | 96 | if (write) |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 97 | writel(SSP_CTRL0_READ, &ssp_regs->hw_ssp_ctrl0_clr); |
| 98 | else |
| 99 | writel(SSP_CTRL0_READ, &ssp_regs->hw_ssp_ctrl0_set); |
| 100 | |
| 101 | writel(SSP_CTRL0_RUN, &ssp_regs->hw_ssp_ctrl0_set); |
| 102 | |
Otavio Salvador | cbf0bf2 | 2012-08-13 09:53:12 +0000 | [diff] [blame] | 103 | if (mxs_wait_mask_set(&ssp_regs->hw_ssp_ctrl0_reg, |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 104 | SSP_CTRL0_RUN, MXS_SPI_MAX_TIMEOUT)) { |
| 105 | printf("MXS SPI: Timeout waiting for start\n"); |
Fabio Estevam | 8e57ca2 | 2012-03-18 17:23:35 +0000 | [diff] [blame] | 106 | return -ETIMEDOUT; |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 107 | } |
| 108 | |
Marek Vasut | 955d92f | 2012-07-09 00:48:31 +0000 | [diff] [blame] | 109 | if (write) |
| 110 | writel(*data++, &ssp_regs->hw_ssp_data); |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 111 | |
| 112 | writel(SSP_CTRL0_DATA_XFER, &ssp_regs->hw_ssp_ctrl0_set); |
| 113 | |
Marek Vasut | 955d92f | 2012-07-09 00:48:31 +0000 | [diff] [blame] | 114 | if (!write) { |
Otavio Salvador | cbf0bf2 | 2012-08-13 09:53:12 +0000 | [diff] [blame] | 115 | if (mxs_wait_mask_clr(&ssp_regs->hw_ssp_status_reg, |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 116 | SSP_STATUS_FIFO_EMPTY, MXS_SPI_MAX_TIMEOUT)) { |
| 117 | printf("MXS SPI: Timeout waiting for data\n"); |
Fabio Estevam | 8e57ca2 | 2012-03-18 17:23:35 +0000 | [diff] [blame] | 118 | return -ETIMEDOUT; |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 119 | } |
| 120 | |
Marek Vasut | 955d92f | 2012-07-09 00:48:31 +0000 | [diff] [blame] | 121 | *data = readl(&ssp_regs->hw_ssp_data); |
| 122 | data++; |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 123 | } |
| 124 | |
Otavio Salvador | cbf0bf2 | 2012-08-13 09:53:12 +0000 | [diff] [blame] | 125 | if (mxs_wait_mask_clr(&ssp_regs->hw_ssp_ctrl0_reg, |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 126 | SSP_CTRL0_RUN, MXS_SPI_MAX_TIMEOUT)) { |
| 127 | printf("MXS SPI: Timeout waiting for finish\n"); |
Fabio Estevam | 8e57ca2 | 2012-03-18 17:23:35 +0000 | [diff] [blame] | 128 | return -ETIMEDOUT; |
Marek Vasut | 7f8a558 | 2011-11-08 23:18:14 +0000 | [diff] [blame] | 129 | } |
| 130 | } |
| 131 | |
| 132 | return 0; |
Marek Vasut | 036b7bd | 2012-07-09 00:48:32 +0000 | [diff] [blame] | 133 | } |
| 134 | |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 135 | static int mxs_spi_xfer_dma(struct mxs_spi_priv *priv, |
| 136 | char *data, int length, int write, |
| 137 | unsigned long flags) |
| 138 | { struct mxs_ssp_regs *ssp_regs = priv->regs; |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 139 | const int xfer_max_sz = 0xff00; |
| 140 | const int desc_count = DIV_ROUND_UP(length, xfer_max_sz) + 1; |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 141 | struct mxs_dma_desc *dp; |
| 142 | uint32_t ctrl0; |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 143 | uint32_t cache_data_count; |
Marek Vasut | 8773799 | 2012-08-31 16:07:59 +0000 | [diff] [blame] | 144 | const uint32_t dstart = (uint32_t)data; |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 145 | int dmach; |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 146 | int tl; |
Marek Vasut | 45edc5d | 2012-08-31 16:08:00 +0000 | [diff] [blame] | 147 | int ret = 0; |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 148 | |
Marek Vasut | bf372e3 | 2013-02-23 02:42:59 +0000 | [diff] [blame] | 149 | #if defined(CONFIG_MX23) |
| 150 | const int mxs_spi_pio_words = 1; |
| 151 | #elif defined(CONFIG_MX28) |
| 152 | const int mxs_spi_pio_words = 4; |
| 153 | #endif |
| 154 | |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 155 | ALLOC_CACHE_ALIGN_BUFFER(struct mxs_dma_desc, desc, desc_count); |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 156 | |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 157 | memset(desc, 0, sizeof(struct mxs_dma_desc) * desc_count); |
| 158 | |
| 159 | ctrl0 = readl(&ssp_regs->hw_ssp_ctrl0); |
| 160 | ctrl0 |= SSP_CTRL0_DATA_XFER; |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 161 | |
| 162 | if (flags & SPI_XFER_BEGIN) |
| 163 | ctrl0 |= SSP_CTRL0_LOCK_CS; |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 164 | if (!write) |
| 165 | ctrl0 |= SSP_CTRL0_READ; |
| 166 | |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 167 | if (length % ARCH_DMA_MINALIGN) |
| 168 | cache_data_count = roundup(length, ARCH_DMA_MINALIGN); |
| 169 | else |
| 170 | cache_data_count = length; |
| 171 | |
Marek Vasut | 8773799 | 2012-08-31 16:07:59 +0000 | [diff] [blame] | 172 | /* Flush data to DRAM so DMA can pick them up */ |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 173 | if (write) |
Marek Vasut | 8773799 | 2012-08-31 16:07:59 +0000 | [diff] [blame] | 174 | flush_dcache_range(dstart, dstart + cache_data_count); |
| 175 | |
| 176 | /* Invalidate the area, so no writeback into the RAM races with DMA */ |
| 177 | invalidate_dcache_range(dstart, dstart + cache_data_count); |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 178 | |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 179 | dmach = priv->dma_channel; |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 180 | |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 181 | dp = desc; |
| 182 | while (length) { |
| 183 | dp->address = (dma_addr_t)dp; |
| 184 | dp->cmd.address = (dma_addr_t)data; |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 185 | |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 186 | /* |
| 187 | * This is correct, even though it does indeed look insane. |
| 188 | * I hereby have to, wholeheartedly, thank Freescale Inc., |
| 189 | * for always inventing insane hardware and keeping me busy |
| 190 | * and employed ;-) |
| 191 | */ |
| 192 | if (write) |
| 193 | dp->cmd.data = MXS_DMA_DESC_COMMAND_DMA_READ; |
| 194 | else |
| 195 | dp->cmd.data = MXS_DMA_DESC_COMMAND_DMA_WRITE; |
| 196 | |
| 197 | /* |
| 198 | * The DMA controller can transfer large chunks (64kB) at |
| 199 | * time by setting the transfer length to 0. Setting tl to |
| 200 | * 0x10000 will overflow below and make .data contain 0. |
| 201 | * Otherwise, 0xff00 is the transfer maximum. |
| 202 | */ |
| 203 | if (length >= 0x10000) |
| 204 | tl = 0x10000; |
| 205 | else |
| 206 | tl = min(length, xfer_max_sz); |
| 207 | |
| 208 | dp->cmd.data |= |
Marek Vasut | 45edc5d | 2012-08-31 16:08:00 +0000 | [diff] [blame] | 209 | ((tl & 0xffff) << MXS_DMA_DESC_BYTES_OFFSET) | |
Marek Vasut | bf372e3 | 2013-02-23 02:42:59 +0000 | [diff] [blame] | 210 | (mxs_spi_pio_words << MXS_DMA_DESC_PIO_WORDS_OFFSET) | |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 211 | MXS_DMA_DESC_HALT_ON_TERMINATE | |
| 212 | MXS_DMA_DESC_TERMINATE_FLUSH; |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 213 | |
| 214 | data += tl; |
| 215 | length -= tl; |
| 216 | |
Marek Vasut | 45edc5d | 2012-08-31 16:08:00 +0000 | [diff] [blame] | 217 | if (!length) { |
| 218 | dp->cmd.data |= MXS_DMA_DESC_IRQ | MXS_DMA_DESC_DEC_SEM; |
| 219 | |
| 220 | if (flags & SPI_XFER_END) { |
| 221 | ctrl0 &= ~SSP_CTRL0_LOCK_CS; |
| 222 | ctrl0 |= SSP_CTRL0_IGNORE_CRC; |
| 223 | } |
| 224 | } |
| 225 | |
| 226 | /* |
Marek Vasut | bf372e3 | 2013-02-23 02:42:59 +0000 | [diff] [blame] | 227 | * Write CTRL0, CMD0, CMD1 and XFER_SIZE registers in |
| 228 | * case of MX28, write only CTRL0 in case of MX23 due |
| 229 | * to the difference in register layout. It is utterly |
Marek Vasut | 45edc5d | 2012-08-31 16:08:00 +0000 | [diff] [blame] | 230 | * essential that the XFER_SIZE register is written on |
| 231 | * a per-descriptor basis with the same size as is the |
| 232 | * descriptor! |
| 233 | */ |
| 234 | dp->cmd.pio_words[0] = ctrl0; |
Marek Vasut | bf372e3 | 2013-02-23 02:42:59 +0000 | [diff] [blame] | 235 | #ifdef CONFIG_MX28 |
Marek Vasut | 45edc5d | 2012-08-31 16:08:00 +0000 | [diff] [blame] | 236 | dp->cmd.pio_words[1] = 0; |
| 237 | dp->cmd.pio_words[2] = 0; |
| 238 | dp->cmd.pio_words[3] = tl; |
Marek Vasut | bf372e3 | 2013-02-23 02:42:59 +0000 | [diff] [blame] | 239 | #endif |
Marek Vasut | 45edc5d | 2012-08-31 16:08:00 +0000 | [diff] [blame] | 240 | |
Marek Vasut | 7f4d014 | 2012-08-21 16:17:27 +0000 | [diff] [blame] | 241 | mxs_dma_desc_append(dmach, dp); |
| 242 | |
| 243 | dp++; |
| 244 | } |
| 245 | |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 246 | if (mxs_dma_go(dmach)) |
Marek Vasut | 45edc5d | 2012-08-31 16:08:00 +0000 | [diff] [blame] | 247 | ret = -EINVAL; |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 248 | |
| 249 | /* The data arrived into DRAM, invalidate cache over them */ |
Marek Vasut | 8773799 | 2012-08-31 16:07:59 +0000 | [diff] [blame] | 250 | if (!write) |
| 251 | invalidate_dcache_range(dstart, dstart + cache_data_count); |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 252 | |
Marek Vasut | 45edc5d | 2012-08-31 16:08:00 +0000 | [diff] [blame] | 253 | return ret; |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 254 | } |
| 255 | |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 256 | int mxs_spi_xfer(struct udevice *dev, unsigned int bitlen, |
| 257 | const void *dout, void *din, unsigned long flags) |
| 258 | { |
| 259 | struct udevice *bus = dev_get_parent(dev); |
| 260 | struct mxs_spi_priv *priv = dev_get_priv(bus); |
| 261 | struct mxs_ssp_regs *ssp_regs = priv->regs; |
Marek Vasut | 036b7bd | 2012-07-09 00:48:32 +0000 | [diff] [blame] | 262 | int len = bitlen / 8; |
| 263 | char dummy; |
| 264 | int write = 0; |
| 265 | char *data = NULL; |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 266 | int dma = 1; |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 267 | |
Marek Vasut | 036b7bd | 2012-07-09 00:48:32 +0000 | [diff] [blame] | 268 | if (bitlen == 0) { |
| 269 | if (flags & SPI_XFER_END) { |
| 270 | din = (void *)&dummy; |
| 271 | len = 1; |
| 272 | } else |
| 273 | return 0; |
| 274 | } |
| 275 | |
| 276 | /* Half-duplex only */ |
| 277 | if (din && dout) |
| 278 | return -EINVAL; |
| 279 | /* No data */ |
| 280 | if (!din && !dout) |
| 281 | return 0; |
| 282 | |
| 283 | if (dout) { |
| 284 | data = (char *)dout; |
| 285 | write = 1; |
| 286 | } else if (din) { |
| 287 | data = (char *)din; |
| 288 | write = 0; |
| 289 | } |
| 290 | |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 291 | /* |
| 292 | * Check for alignment, if the buffer is aligned, do DMA transfer, |
| 293 | * PIO otherwise. This is a temporary workaround until proper bounce |
| 294 | * buffer is in place. |
| 295 | */ |
| 296 | if (dma) { |
| 297 | if (((uint32_t)data) & (ARCH_DMA_MINALIGN - 1)) |
| 298 | dma = 0; |
| 299 | if (((uint32_t)len) & (ARCH_DMA_MINALIGN - 1)) |
| 300 | dma = 0; |
| 301 | } |
| 302 | |
| 303 | if (!dma || (len < MXSSSP_SMALL_TRANSFER)) { |
| 304 | writel(SSP_CTRL1_DMA_ENABLE, &ssp_regs->hw_ssp_ctrl1_clr); |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 305 | return mxs_spi_xfer_pio(priv, data, len, write, flags); |
Marek Vasut | 23697f6 | 2012-07-09 00:48:33 +0000 | [diff] [blame] | 306 | } else { |
| 307 | writel(SSP_CTRL1_DMA_ENABLE, &ssp_regs->hw_ssp_ctrl1_set); |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 308 | return mxs_spi_xfer_dma(priv, data, len, write, flags); |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 309 | } |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 310 | } |
| 311 | |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 312 | static int mxs_spi_probe(struct udevice *bus) |
| 313 | { |
| 314 | struct mxs_spi_platdata *plat = dev_get_platdata(bus); |
| 315 | struct mxs_spi_priv *priv = dev_get_priv(bus); |
| 316 | int ret; |
| 317 | |
| 318 | debug("%s: probe\n", __func__); |
Lukasz Majewski | 6be0656 | 2019-09-05 09:54:58 +0200 | [diff] [blame] | 319 | |
| 320 | #if CONFIG_IS_ENABLED(OF_PLATDATA) |
Walter Lozano | 6935893 | 2020-07-23 00:22:04 -0300 | [diff] [blame] | 321 | struct dtd_fsl_imx23_spi *dtplat = &plat->dtplat; |
Lukasz Majewski | 6be0656 | 2019-09-05 09:54:58 +0200 | [diff] [blame] | 322 | struct phandle_1_arg *p1a = &dtplat->clocks[0]; |
| 323 | |
| 324 | priv->regs = (struct mxs_ssp_regs *)dtplat->reg[0]; |
| 325 | priv->dma_channel = dtplat->dmas[1]; |
| 326 | priv->clk_id = p1a->arg[0]; |
| 327 | priv->max_freq = dtplat->spi_max_frequency; |
| 328 | plat->num_cs = dtplat->num_cs; |
| 329 | |
| 330 | debug("OF_PLATDATA: regs: 0x%x max freq: %d clkid: %d\n", |
| 331 | (unsigned int)priv->regs, priv->max_freq, priv->clk_id); |
| 332 | #else |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 333 | priv->regs = (struct mxs_ssp_regs *)plat->base; |
| 334 | priv->max_freq = plat->frequency; |
| 335 | |
| 336 | priv->dma_channel = plat->dma_id; |
| 337 | priv->clk_id = plat->clk_id; |
Lukasz Majewski | 6be0656 | 2019-09-05 09:54:58 +0200 | [diff] [blame] | 338 | #endif |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 339 | |
Lukasz Majewski | 35223a6 | 2019-09-05 09:54:57 +0200 | [diff] [blame] | 340 | mxs_reset_block(&priv->regs->hw_ssp_ctrl0_reg); |
| 341 | |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 342 | ret = mxs_dma_init_channel(priv->dma_channel); |
| 343 | if (ret) { |
| 344 | printf("%s: DMA init channel error %d\n", __func__, ret); |
| 345 | return ret; |
| 346 | } |
| 347 | |
| 348 | return 0; |
| 349 | } |
| 350 | |
| 351 | static int mxs_spi_claim_bus(struct udevice *dev) |
| 352 | { |
| 353 | struct udevice *bus = dev_get_parent(dev); |
| 354 | struct mxs_spi_priv *priv = dev_get_priv(bus); |
| 355 | struct mxs_ssp_regs *ssp_regs = priv->regs; |
| 356 | int cs = spi_chip_select(dev); |
| 357 | |
| 358 | /* |
| 359 | * i.MX28 supports up to 3 CS (SSn0, SSn1, SSn2) |
| 360 | * To set them it uses following tuple (WAIT_FOR_IRQ,WAIT_FOR_CMD), |
| 361 | * where: |
| 362 | * |
| 363 | * WAIT_FOR_IRQ is bit 21 of HW_SSP_CTRL0 |
| 364 | * WAIT_FOR_CMD is bit 20 (#defined as MXS_SSP_CHIPSELECT_SHIFT here) of |
| 365 | * HW_SSP_CTRL0 |
| 366 | * SSn0 b00 |
| 367 | * SSn1 b01 |
| 368 | * SSn2 b10 (which require setting WAIT_FOR_IRQ) |
| 369 | * |
| 370 | * However, for now i.MX28 SPI driver will support up till 2 CSes |
| 371 | * (SSn0, and SSn1). |
| 372 | */ |
| 373 | |
| 374 | /* Ungate SSP clock and set active CS */ |
| 375 | clrsetbits_le32(&ssp_regs->hw_ssp_ctrl0, |
| 376 | BIT(MXS_SSP_CHIPSELECT_SHIFT) | |
| 377 | SSP_CTRL0_CLKGATE, (cs << MXS_SSP_CHIPSELECT_SHIFT)); |
| 378 | |
| 379 | return 0; |
| 380 | } |
| 381 | |
| 382 | static int mxs_spi_release_bus(struct udevice *dev) |
| 383 | { |
| 384 | struct udevice *bus = dev_get_parent(dev); |
| 385 | struct mxs_spi_priv *priv = dev_get_priv(bus); |
| 386 | struct mxs_ssp_regs *ssp_regs = priv->regs; |
| 387 | |
| 388 | /* Gate SSP clock */ |
| 389 | setbits_le32(&ssp_regs->hw_ssp_ctrl0, SSP_CTRL0_CLKGATE); |
| 390 | |
| 391 | return 0; |
| 392 | } |
| 393 | |
| 394 | static int mxs_spi_set_speed(struct udevice *bus, uint speed) |
| 395 | { |
| 396 | struct mxs_spi_priv *priv = dev_get_priv(bus); |
| 397 | #ifdef CONFIG_MX28 |
| 398 | int clkid = priv->clk_id - MXS_SSP_IMX28_CLKID_SSP0; |
| 399 | #else /* CONFIG_MX23 */ |
| 400 | int clkid = priv->clk_id - MXS_SSP_IMX23_CLKID_SSP0; |
| 401 | #endif |
| 402 | if (speed > priv->max_freq) |
| 403 | speed = priv->max_freq; |
| 404 | |
| 405 | debug("%s speed: %u [Hz] clkid: %d\n", __func__, speed, clkid); |
| 406 | mxs_set_ssp_busclock(clkid, speed / 1000); |
| 407 | |
| 408 | return 0; |
| 409 | } |
| 410 | |
| 411 | static int mxs_spi_set_mode(struct udevice *bus, uint mode) |
| 412 | { |
| 413 | struct mxs_spi_priv *priv = dev_get_priv(bus); |
| 414 | struct mxs_ssp_regs *ssp_regs = priv->regs; |
| 415 | u32 reg; |
| 416 | |
| 417 | priv->mode = mode; |
| 418 | debug("%s: mode 0x%x\n", __func__, mode); |
| 419 | |
| 420 | reg = SSP_CTRL1_SSP_MODE_SPI | SSP_CTRL1_WORD_LENGTH_EIGHT_BITS; |
| 421 | reg |= (priv->mode & SPI_CPOL) ? SSP_CTRL1_POLARITY : 0; |
| 422 | reg |= (priv->mode & SPI_CPHA) ? SSP_CTRL1_PHASE : 0; |
| 423 | writel(reg, &ssp_regs->hw_ssp_ctrl1); |
| 424 | |
| 425 | /* Single bit SPI support */ |
| 426 | writel(SSP_CTRL0_BUS_WIDTH_ONE_BIT, &ssp_regs->hw_ssp_ctrl0); |
| 427 | |
| 428 | return 0; |
| 429 | } |
| 430 | |
| 431 | static const struct dm_spi_ops mxs_spi_ops = { |
| 432 | .claim_bus = mxs_spi_claim_bus, |
| 433 | .release_bus = mxs_spi_release_bus, |
| 434 | .xfer = mxs_spi_xfer, |
| 435 | .set_speed = mxs_spi_set_speed, |
| 436 | .set_mode = mxs_spi_set_mode, |
| 437 | /* |
| 438 | * cs_info is not needed, since we require all chip selects to be |
| 439 | * in the device tree explicitly |
| 440 | */ |
| 441 | }; |
| 442 | |
| 443 | #if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA) |
| 444 | static int mxs_ofdata_to_platdata(struct udevice *bus) |
| 445 | { |
| 446 | struct mxs_spi_platdata *plat = bus->platdata; |
| 447 | u32 prop[2]; |
| 448 | int ret; |
| 449 | |
| 450 | plat->base = dev_read_addr(bus); |
| 451 | plat->frequency = |
| 452 | dev_read_u32_default(bus, "spi-max-frequency", 40000000); |
| 453 | plat->num_cs = dev_read_u32_default(bus, "num-cs", 2); |
| 454 | |
| 455 | ret = dev_read_u32_array(bus, "dmas", prop, ARRAY_SIZE(prop)); |
| 456 | if (ret) { |
| 457 | printf("%s: Reading 'dmas' property failed!\n", __func__); |
| 458 | return ret; |
| 459 | } |
| 460 | plat->dma_id = prop[1]; |
| 461 | |
| 462 | ret = dev_read_u32_array(bus, "clocks", prop, ARRAY_SIZE(prop)); |
| 463 | if (ret) { |
| 464 | printf("%s: Reading 'clocks' property failed!\n", __func__); |
| 465 | return ret; |
| 466 | } |
| 467 | plat->clk_id = prop[1]; |
| 468 | |
| 469 | debug("%s: base=0x%x, max-frequency=%d num-cs=%d dma_id=%d clk_id=%d\n", |
| 470 | __func__, (uint)plat->base, plat->frequency, plat->num_cs, |
| 471 | plat->dma_id, plat->clk_id); |
| 472 | |
| 473 | return 0; |
| 474 | } |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 475 | |
| 476 | static const struct udevice_id mxs_spi_ids[] = { |
| 477 | { .compatible = "fsl,imx23-spi" }, |
| 478 | { .compatible = "fsl,imx28-spi" }, |
| 479 | { } |
| 480 | }; |
Lukasz Majewski | 6be0656 | 2019-09-05 09:54:58 +0200 | [diff] [blame] | 481 | #endif |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 482 | |
Walter Lozano | 2901ac6 | 2020-06-25 01:10:04 -0300 | [diff] [blame] | 483 | U_BOOT_DRIVER(fsl_imx23_spi) = { |
Lukasz Majewski | 6be0656 | 2019-09-05 09:54:58 +0200 | [diff] [blame] | 484 | .name = "fsl_imx23_spi", |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 485 | .id = UCLASS_SPI, |
| 486 | #if CONFIG_IS_ENABLED(OF_CONTROL) && !CONFIG_IS_ENABLED(OF_PLATDATA) |
| 487 | .of_match = mxs_spi_ids, |
| 488 | .ofdata_to_platdata = mxs_ofdata_to_platdata, |
| 489 | #endif |
Lukasz Majewski | e85a077 | 2019-09-05 09:54:56 +0200 | [diff] [blame] | 490 | .platdata_auto_alloc_size = sizeof(struct mxs_spi_platdata), |
Lukasz Majewski | 5faaf09 | 2019-06-19 17:31:07 +0200 | [diff] [blame] | 491 | .ops = &mxs_spi_ops, |
| 492 | .priv_auto_alloc_size = sizeof(struct mxs_spi_priv), |
| 493 | .probe = mxs_spi_probe, |
| 494 | }; |
Walter Lozano | 48e5b04 | 2020-06-25 01:10:06 -0300 | [diff] [blame] | 495 | |
| 496 | U_BOOT_DRIVER_ALIAS(fsl_imx23_spi, fsl_imx28_spi) |