blob: a9f996daa6a0bfab749d038047bd2690ec32f778 [file] [log] [blame]
Matt Porterda3e4c62013-10-07 15:53:02 +05301/*
2 * TI QSPI driver
3 *
4 * Copyright (C) 2013, Texas Instruments, Incorporated
5 *
6 * SPDX-License-Identifier: GPL-2.0+
7 */
8
9#include <common.h>
10#include <asm/io.h>
11#include <asm/arch/omap.h>
12#include <malloc.h>
13#include <spi.h>
Sourav Poddar2145dff2013-12-21 12:50:09 +053014#include <asm/gpio.h>
15#include <asm/omap_gpio.h>
Vignesh Ra5bba8d2015-08-17 15:20:13 +053016#include <asm/omap_common.h>
17#include <asm/ti-common/ti-edma3.h>
Matt Porterda3e4c62013-10-07 15:53:02 +053018
19/* ti qpsi register bit masks */
20#define QSPI_TIMEOUT 2000000
21#define QSPI_FCLK 192000000
22/* clock control */
Jagan Tekif16e4db2015-10-23 01:39:20 +053023#define QSPI_CLK_EN BIT(31)
Matt Porterda3e4c62013-10-07 15:53:02 +053024#define QSPI_CLK_DIV_MAX 0xffff
25/* command */
26#define QSPI_EN_CS(n) (n << 28)
27#define QSPI_WLEN(n) ((n-1) << 19)
Jagan Tekif16e4db2015-10-23 01:39:20 +053028#define QSPI_3_PIN BIT(18)
29#define QSPI_RD_SNGL BIT(16)
Matt Porterda3e4c62013-10-07 15:53:02 +053030#define QSPI_WR_SNGL (2 << 16)
31#define QSPI_INVAL (4 << 16)
32#define QSPI_RD_QUAD (7 << 16)
33/* device control */
34#define QSPI_DD(m, n) (m << (3 + n*8))
35#define QSPI_CKPHA(n) (1 << (2 + n*8))
36#define QSPI_CSPOL(n) (1 << (1 + n*8))
37#define QSPI_CKPOL(n) (1 << (n*8))
38/* status */
Jagan Tekif16e4db2015-10-23 01:39:20 +053039#define QSPI_WC BIT(1)
40#define QSPI_BUSY BIT(0)
Matt Porterda3e4c62013-10-07 15:53:02 +053041#define QSPI_WC_BUSY (QSPI_WC | QSPI_BUSY)
42#define QSPI_XFER_DONE QSPI_WC
43#define MM_SWITCH 0x01
Mugunthan V N132e0072015-12-23 20:39:33 +053044#define MEM_CS(cs) ((cs + 1) << 8)
Matt Porterda3e4c62013-10-07 15:53:02 +053045#define MEM_CS_UNSELECT 0xfffff0ff
Sourav Poddar2145dff2013-12-21 12:50:09 +053046#define MMAP_START_ADDR_DRA 0x5c000000
47#define MMAP_START_ADDR_AM43x 0x30000000
Matt Porterda3e4c62013-10-07 15:53:02 +053048#define CORE_CTRL_IO 0x4a002558
49
50#define QSPI_CMD_READ (0x3 << 0)
51#define QSPI_CMD_READ_QUAD (0x6b << 0)
52#define QSPI_CMD_READ_FAST (0x0b << 0)
53#define QSPI_SETUP0_NUM_A_BYTES (0x2 << 8)
54#define QSPI_SETUP0_NUM_D_BYTES_NO_BITS (0x0 << 10)
55#define QSPI_SETUP0_NUM_D_BYTES_8_BITS (0x1 << 10)
56#define QSPI_SETUP0_READ_NORMAL (0x0 << 12)
57#define QSPI_SETUP0_READ_QUAD (0x3 << 12)
58#define QSPI_CMD_WRITE (0x2 << 16)
59#define QSPI_NUM_DUMMY_BITS (0x0 << 24)
60
61/* ti qspi register set */
62struct ti_qspi_regs {
63 u32 pid;
64 u32 pad0[3];
65 u32 sysconfig;
66 u32 pad1[3];
67 u32 int_stat_raw;
68 u32 int_stat_en;
69 u32 int_en_set;
70 u32 int_en_ctlr;
71 u32 intc_eoi;
72 u32 pad2[3];
73 u32 clk_ctrl;
74 u32 dc;
75 u32 cmd;
76 u32 status;
77 u32 data;
78 u32 setup0;
79 u32 setup1;
80 u32 setup2;
81 u32 setup3;
82 u32 memswitch;
83 u32 data1;
84 u32 data2;
85 u32 data3;
86};
87
Mugunthan V Ne206d302015-12-23 20:39:34 +053088/* ti qspi priv */
89struct ti_qspi_priv {
Matt Porterda3e4c62013-10-07 15:53:02 +053090 struct spi_slave slave;
91 struct ti_qspi_regs *base;
92 unsigned int mode;
93 u32 cmd;
94 u32 dc;
95};
96
Mugunthan V Ne206d302015-12-23 20:39:34 +053097static inline struct ti_qspi_priv *to_ti_qspi_priv(struct spi_slave *slave)
Matt Porterda3e4c62013-10-07 15:53:02 +053098{
Mugunthan V Ne206d302015-12-23 20:39:34 +053099 return container_of(slave, struct ti_qspi_priv, slave);
Matt Porterda3e4c62013-10-07 15:53:02 +0530100}
101
Mugunthan V Ne206d302015-12-23 20:39:34 +0530102static void ti_spi_setup_spi_register(struct ti_qspi_priv *priv)
Matt Porterda3e4c62013-10-07 15:53:02 +0530103{
Mugunthan V Ne206d302015-12-23 20:39:34 +0530104 struct spi_slave *slave = &priv->slave;
Matt Porterda3e4c62013-10-07 15:53:02 +0530105 u32 memval = 0;
106
Felipe Balbia51c6152014-11-06 08:28:51 -0600107#if defined(CONFIG_DRA7XX) || defined(CONFIG_AM57XX)
Sourav Poddar2145dff2013-12-21 12:50:09 +0530108 slave->memory_map = (void *)MMAP_START_ADDR_DRA;
109#else
110 slave->memory_map = (void *)MMAP_START_ADDR_AM43x;
111#endif
Matt Porterda3e4c62013-10-07 15:53:02 +0530112
Ravi Babue8829b62015-02-11 18:54:29 -0500113#ifdef CONFIG_QSPI_QUAD_SUPPORT
114 memval |= (QSPI_CMD_READ_QUAD | QSPI_SETUP0_NUM_A_BYTES |
115 QSPI_SETUP0_NUM_D_BYTES_8_BITS |
116 QSPI_SETUP0_READ_QUAD | QSPI_CMD_WRITE |
117 QSPI_NUM_DUMMY_BITS);
Jagan Teki155c19f2015-12-16 15:24:24 +0530118 slave->mode_rx = SPI_RX_QUAD;
Ravi Babue8829b62015-02-11 18:54:29 -0500119#else
Matt Porterda3e4c62013-10-07 15:53:02 +0530120 memval |= QSPI_CMD_READ | QSPI_SETUP0_NUM_A_BYTES |
121 QSPI_SETUP0_NUM_D_BYTES_NO_BITS |
122 QSPI_SETUP0_READ_NORMAL | QSPI_CMD_WRITE |
123 QSPI_NUM_DUMMY_BITS;
Ravi Babue8829b62015-02-11 18:54:29 -0500124#endif
Matt Porterda3e4c62013-10-07 15:53:02 +0530125
Mugunthan V Ne206d302015-12-23 20:39:34 +0530126 writel(memval, &priv->base->setup0);
Matt Porterda3e4c62013-10-07 15:53:02 +0530127}
128
129static void ti_spi_set_speed(struct spi_slave *slave, uint hz)
130{
Mugunthan V Ne206d302015-12-23 20:39:34 +0530131 struct ti_qspi_priv *priv = to_ti_qspi_priv(slave);
Matt Porterda3e4c62013-10-07 15:53:02 +0530132 uint clk_div;
133
134 debug("ti_spi_set_speed: hz: %d, clock divider %d\n", hz, clk_div);
135
136 if (!hz)
137 clk_div = 0;
138 else
139 clk_div = (QSPI_FCLK / hz) - 1;
140
141 /* disable SCLK */
Mugunthan V Ne206d302015-12-23 20:39:34 +0530142 writel(readl(&priv->base->clk_ctrl) & ~QSPI_CLK_EN,
143 &priv->base->clk_ctrl);
Matt Porterda3e4c62013-10-07 15:53:02 +0530144
145 /* assign clk_div values */
146 if (clk_div < 0)
147 clk_div = 0;
148 else if (clk_div > QSPI_CLK_DIV_MAX)
149 clk_div = QSPI_CLK_DIV_MAX;
150
151 /* enable SCLK */
Mugunthan V Ne206d302015-12-23 20:39:34 +0530152 writel(QSPI_CLK_EN | clk_div, &priv->base->clk_ctrl);
Matt Porterda3e4c62013-10-07 15:53:02 +0530153}
154
155int spi_cs_is_valid(unsigned int bus, unsigned int cs)
156{
157 return 1;
158}
159
160void spi_cs_activate(struct spi_slave *slave)
161{
162 /* CS handled in xfer */
163 return;
164}
165
166void spi_cs_deactivate(struct spi_slave *slave)
167{
Mugunthan V Ne206d302015-12-23 20:39:34 +0530168 struct ti_qspi_priv *priv = to_ti_qspi_priv(slave);
Matt Porterda3e4c62013-10-07 15:53:02 +0530169
170 debug("spi_cs_deactivate: 0x%08x\n", (u32)slave);
171
Mugunthan V Ne206d302015-12-23 20:39:34 +0530172 writel(priv->cmd | QSPI_INVAL, &priv->base->cmd);
Vignesh R43ad9ce2015-11-10 11:52:10 +0530173 /* dummy readl to ensure bus sync */
174 readl(&qslave->base->cmd);
Matt Porterda3e4c62013-10-07 15:53:02 +0530175}
176
177void spi_init(void)
178{
179 /* nothing to do */
180}
181
182struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs,
183 unsigned int max_hz, unsigned int mode)
184{
Mugunthan V Ne206d302015-12-23 20:39:34 +0530185 struct ti_qspi_priv *priv;
Matt Porterda3e4c62013-10-07 15:53:02 +0530186
Sourav Poddar2145dff2013-12-21 12:50:09 +0530187#ifdef CONFIG_AM43XX
188 gpio_request(CONFIG_QSPI_SEL_GPIO, "qspi_gpio");
189 gpio_direction_output(CONFIG_QSPI_SEL_GPIO, 1);
190#endif
191
Mugunthan V Ne206d302015-12-23 20:39:34 +0530192 priv = spi_alloc_slave(struct ti_qspi_priv, bus, cs);
193 if (!priv) {
194 printf("SPI_error: Fail to allocate ti_qspi_priv\n");
Matt Porterda3e4c62013-10-07 15:53:02 +0530195 return NULL;
196 }
197
Mugunthan V Ne206d302015-12-23 20:39:34 +0530198 priv->base = (struct ti_qspi_regs *)QSPI_BASE;
199 priv->mode = mode;
Matt Porterda3e4c62013-10-07 15:53:02 +0530200
Mugunthan V Ne206d302015-12-23 20:39:34 +0530201 ti_spi_set_speed(&priv->slave, max_hz);
Matt Porterda3e4c62013-10-07 15:53:02 +0530202
203#ifdef CONFIG_TI_SPI_MMAP
Mugunthan V Ne206d302015-12-23 20:39:34 +0530204 ti_spi_setup_spi_register(priv);
Matt Porterda3e4c62013-10-07 15:53:02 +0530205#endif
206
Mugunthan V Ne206d302015-12-23 20:39:34 +0530207 return &priv->slave;
Matt Porterda3e4c62013-10-07 15:53:02 +0530208}
209
210void spi_free_slave(struct spi_slave *slave)
211{
Mugunthan V Ne206d302015-12-23 20:39:34 +0530212 struct ti_qspi_priv *priv = to_ti_qspi_priv(slave);
213 free(priv);
Matt Porterda3e4c62013-10-07 15:53:02 +0530214}
215
216int spi_claim_bus(struct spi_slave *slave)
217{
Mugunthan V Ne206d302015-12-23 20:39:34 +0530218 struct ti_qspi_priv *priv = to_ti_qspi_priv(slave);
Matt Porterda3e4c62013-10-07 15:53:02 +0530219
220 debug("spi_claim_bus: bus:%i cs:%i\n", slave->bus, slave->cs);
221
Mugunthan V Ne206d302015-12-23 20:39:34 +0530222 priv->dc = 0;
223 if (priv->mode & SPI_CPHA)
224 priv->dc |= QSPI_CKPHA(slave->cs);
225 if (priv->mode & SPI_CPOL)
226 priv->dc |= QSPI_CKPOL(slave->cs);
227 if (priv->mode & SPI_CS_HIGH)
228 priv->dc |= QSPI_CSPOL(slave->cs);
Matt Porterda3e4c62013-10-07 15:53:02 +0530229
Mugunthan V Ne206d302015-12-23 20:39:34 +0530230 writel(priv->dc, &priv->base->dc);
231 writel(0, &priv->base->cmd);
232 writel(0, &priv->base->data);
Matt Porterda3e4c62013-10-07 15:53:02 +0530233
234 return 0;
235}
236
237void spi_release_bus(struct spi_slave *slave)
238{
Mugunthan V Ne206d302015-12-23 20:39:34 +0530239 struct ti_qspi_priv *priv = to_ti_qspi_priv(slave);
Matt Porterda3e4c62013-10-07 15:53:02 +0530240
241 debug("spi_release_bus: bus:%i cs:%i\n", slave->bus, slave->cs);
242
Mugunthan V Ne206d302015-12-23 20:39:34 +0530243 writel(0, &priv->base->dc);
244 writel(0, &priv->base->cmd);
245 writel(0, &priv->base->data);
Matt Porterda3e4c62013-10-07 15:53:02 +0530246}
247
248int spi_xfer(struct spi_slave *slave, unsigned int bitlen, const void *dout,
249 void *din, unsigned long flags)
250{
Mugunthan V Ne206d302015-12-23 20:39:34 +0530251 struct ti_qspi_priv *priv = to_ti_qspi_priv(slave);
Matt Porterda3e4c62013-10-07 15:53:02 +0530252 uint words = bitlen >> 3; /* fixed 8-bit word length */
253 const uchar *txp = dout;
254 uchar *rxp = din;
255 uint status;
Sourav Poddar2145dff2013-12-21 12:50:09 +0530256 int timeout;
257
Felipe Balbia51c6152014-11-06 08:28:51 -0600258#if defined(CONFIG_DRA7XX) || defined(CONFIG_AM57XX)
Sourav Poddar2145dff2013-12-21 12:50:09 +0530259 int val;
260#endif
Matt Porterda3e4c62013-10-07 15:53:02 +0530261
262 debug("spi_xfer: bus:%i cs:%i bitlen:%i words:%i flags:%lx\n",
263 slave->bus, slave->cs, bitlen, words, flags);
264
265 /* Setup mmap flags */
266 if (flags & SPI_XFER_MMAP) {
Mugunthan V Ne206d302015-12-23 20:39:34 +0530267 writel(MM_SWITCH, &priv->base->memswitch);
Felipe Balbia51c6152014-11-06 08:28:51 -0600268#if defined(CONFIG_DRA7XX) || defined(CONFIG_AM57XX)
Matt Porterda3e4c62013-10-07 15:53:02 +0530269 val = readl(CORE_CTRL_IO);
Mugunthan V N132e0072015-12-23 20:39:33 +0530270 val |= MEM_CS(slave->cs);
Matt Porterda3e4c62013-10-07 15:53:02 +0530271 writel(val, CORE_CTRL_IO);
Sourav Poddar2145dff2013-12-21 12:50:09 +0530272#endif
Matt Porterda3e4c62013-10-07 15:53:02 +0530273 return 0;
274 } else if (flags & SPI_XFER_MMAP_END) {
Mugunthan V Ne206d302015-12-23 20:39:34 +0530275 writel(~MM_SWITCH, &priv->base->memswitch);
Felipe Balbia51c6152014-11-06 08:28:51 -0600276#if defined(CONFIG_DRA7XX) || defined(CONFIG_AM57XX)
Matt Porterda3e4c62013-10-07 15:53:02 +0530277 val = readl(CORE_CTRL_IO);
278 val &= MEM_CS_UNSELECT;
279 writel(val, CORE_CTRL_IO);
Sourav Poddar2145dff2013-12-21 12:50:09 +0530280#endif
Matt Porterda3e4c62013-10-07 15:53:02 +0530281 return 0;
282 }
283
284 if (bitlen == 0)
285 return -1;
286
287 if (bitlen % 8) {
288 debug("spi_xfer: Non byte aligned SPI transfer\n");
289 return -1;
290 }
291
292 /* Setup command reg */
Mugunthan V Ne206d302015-12-23 20:39:34 +0530293 priv->cmd = 0;
294 priv->cmd |= QSPI_WLEN(8);
295 priv->cmd |= QSPI_EN_CS(slave->cs);
296 if (priv->mode & SPI_3WIRE)
297 priv->cmd |= QSPI_3_PIN;
298 priv->cmd |= 0xfff;
Matt Porterda3e4c62013-10-07 15:53:02 +0530299
Sourav Poddar0de91802013-12-21 12:50:10 +0530300/* FIXME: This delay is required for successfull
301 * completion of read/write/erase. Once its root
302 * caused, it will be remove from the driver.
303 */
304#ifdef CONFIG_AM43XX
305 udelay(100);
306#endif
Matt Porterda3e4c62013-10-07 15:53:02 +0530307 while (words--) {
308 if (txp) {
309 debug("tx cmd %08x dc %08x data %02x\n",
Mugunthan V Ne206d302015-12-23 20:39:34 +0530310 priv->cmd | QSPI_WR_SNGL, priv->dc, *txp);
311 writel(*txp++, &priv->base->data);
312 writel(priv->cmd | QSPI_WR_SNGL,
313 &priv->base->cmd);
314 status = readl(&priv->base->status);
Matt Porterda3e4c62013-10-07 15:53:02 +0530315 timeout = QSPI_TIMEOUT;
316 while ((status & QSPI_WC_BUSY) != QSPI_XFER_DONE) {
317 if (--timeout < 0) {
318 printf("spi_xfer: TX timeout!\n");
319 return -1;
320 }
Mugunthan V Ne206d302015-12-23 20:39:34 +0530321 status = readl(&priv->base->status);
Matt Porterda3e4c62013-10-07 15:53:02 +0530322 }
323 debug("tx done, status %08x\n", status);
324 }
325 if (rxp) {
Mugunthan V Ne206d302015-12-23 20:39:34 +0530326 priv->cmd |= QSPI_RD_SNGL;
Matt Porterda3e4c62013-10-07 15:53:02 +0530327 debug("rx cmd %08x dc %08x\n",
Mugunthan V Ne206d302015-12-23 20:39:34 +0530328 priv->cmd, priv->dc);
Poddar, Souravf7aa7b52014-04-03 07:52:54 -0400329 #ifdef CONFIG_DRA7XX
330 udelay(500);
331 #endif
Mugunthan V Ne206d302015-12-23 20:39:34 +0530332 writel(priv->cmd, &priv->base->cmd);
333 status = readl(&priv->base->status);
Matt Porterda3e4c62013-10-07 15:53:02 +0530334 timeout = QSPI_TIMEOUT;
335 while ((status & QSPI_WC_BUSY) != QSPI_XFER_DONE) {
336 if (--timeout < 0) {
337 printf("spi_xfer: RX timeout!\n");
338 return -1;
339 }
Mugunthan V Ne206d302015-12-23 20:39:34 +0530340 status = readl(&priv->base->status);
Matt Porterda3e4c62013-10-07 15:53:02 +0530341 }
Mugunthan V Ne206d302015-12-23 20:39:34 +0530342 *rxp++ = readl(&priv->base->data);
Matt Porterda3e4c62013-10-07 15:53:02 +0530343 debug("rx done, status %08x, read %02x\n",
344 status, *(rxp-1));
345 }
346 }
347
348 /* Terminate frame */
349 if (flags & SPI_XFER_END)
350 spi_cs_deactivate(slave);
351
352 return 0;
353}
Vignesh Ra5bba8d2015-08-17 15:20:13 +0530354
355/* TODO: control from sf layer to here through dm-spi */
356#ifdef CONFIG_TI_EDMA3
357void spi_flash_copy_mmap(void *data, void *offset, size_t len)
358{
359 unsigned int addr = (unsigned int) (data);
360 unsigned int edma_slot_num = 1;
361
362 /* Invalidate the area, so no writeback into the RAM races with DMA */
363 invalidate_dcache_range(addr, addr + roundup(len, ARCH_DMA_MINALIGN));
364
365 /* enable edma3 clocks */
366 enable_edma3_clocks();
367
368 /* Call edma3 api to do actual DMA transfer */
369 edma3_transfer(EDMA3_BASE, edma_slot_num, data, offset, len);
370
371 /* disable edma3 clocks */
372 disable_edma3_clocks();
373
374 *((unsigned int *)offset) += len;
375}
376#endif