blob: 669410d97f6ea04b778de167eb484277550647aa [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasutfd83e762018-04-13 23:51:33 +02002/*
3 * Copyright (C) 2016 Socionext Inc.
4 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
Marek Vasutfd83e762018-04-13 23:51:33 +02005 */
6
7#include <common.h>
8#include <clk.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Marek Vasutfd83e762018-04-13 23:51:33 +020010#include <fdtdec.h>
11#include <mmc.h>
12#include <dm.h>
13#include <dm/pinctrl.h>
14#include <linux/compat.h>
15#include <linux/dma-direction.h>
16#include <linux/io.h>
17#include <linux/sizes.h>
18#include <power/regulator.h>
19#include <asm/unaligned.h>
20
21#include "tmio-common.h"
22
23DECLARE_GLOBAL_DATA_PTR;
24
25static u64 tmio_sd_readq(struct tmio_sd_priv *priv, unsigned int reg)
26{
27 return readq(priv->regbase + (reg << 1));
28}
29
30static void tmio_sd_writeq(struct tmio_sd_priv *priv,
31 u64 val, unsigned int reg)
32{
33 writeq(val, priv->regbase + (reg << 1));
34}
35
36static u16 tmio_sd_readw(struct tmio_sd_priv *priv, unsigned int reg)
37{
38 return readw(priv->regbase + (reg >> 1));
39}
40
41static void tmio_sd_writew(struct tmio_sd_priv *priv,
42 u16 val, unsigned int reg)
43{
44 writew(val, priv->regbase + (reg >> 1));
45}
46
47u32 tmio_sd_readl(struct tmio_sd_priv *priv, unsigned int reg)
48{
49 u32 val;
50
51 if (priv->caps & TMIO_SD_CAP_64BIT)
52 return readl(priv->regbase + (reg << 1));
53 else if (priv->caps & TMIO_SD_CAP_16BIT) {
54 val = readw(priv->regbase + (reg >> 1)) & 0xffff;
55 if ((reg == TMIO_SD_RSP10) || (reg == TMIO_SD_RSP32) ||
56 (reg == TMIO_SD_RSP54) || (reg == TMIO_SD_RSP76)) {
57 val |= readw(priv->regbase + (reg >> 1) + 2) << 16;
58 }
59 return val;
60 } else
61 return readl(priv->regbase + reg);
62}
63
64void tmio_sd_writel(struct tmio_sd_priv *priv,
65 u32 val, unsigned int reg)
66{
67 if (priv->caps & TMIO_SD_CAP_64BIT)
68 writel(val, priv->regbase + (reg << 1));
69 else if (priv->caps & TMIO_SD_CAP_16BIT) {
70 writew(val & 0xffff, priv->regbase + (reg >> 1));
71 if (reg == TMIO_SD_INFO1 || reg == TMIO_SD_INFO1_MASK ||
72 reg == TMIO_SD_INFO2 || reg == TMIO_SD_INFO2_MASK ||
73 reg == TMIO_SD_ARG)
74 writew(val >> 16, priv->regbase + (reg >> 1) + 2);
75 } else
76 writel(val, priv->regbase + reg);
77}
78
79static dma_addr_t __dma_map_single(void *ptr, size_t size,
80 enum dma_data_direction dir)
81{
82 unsigned long addr = (unsigned long)ptr;
83
84 if (dir == DMA_FROM_DEVICE)
85 invalidate_dcache_range(addr, addr + size);
86 else
87 flush_dcache_range(addr, addr + size);
88
89 return addr;
90}
91
92static void __dma_unmap_single(dma_addr_t addr, size_t size,
93 enum dma_data_direction dir)
94{
95 if (dir != DMA_TO_DEVICE)
96 invalidate_dcache_range(addr, addr + size);
97}
98
Marek Vasutdc86e912018-10-30 22:05:54 +010099static int tmio_sd_check_error(struct udevice *dev, struct mmc_cmd *cmd)
Marek Vasutfd83e762018-04-13 23:51:33 +0200100{
101 struct tmio_sd_priv *priv = dev_get_priv(dev);
102 u32 info2 = tmio_sd_readl(priv, TMIO_SD_INFO2);
103
104 if (info2 & TMIO_SD_INFO2_ERR_RTO) {
105 /*
106 * TIMEOUT must be returned for unsupported command. Do not
107 * display error log since this might be a part of sequence to
108 * distinguish between SD and MMC.
109 */
110 return -ETIMEDOUT;
111 }
112
113 if (info2 & TMIO_SD_INFO2_ERR_TO) {
114 dev_err(dev, "timeout error\n");
115 return -ETIMEDOUT;
116 }
117
118 if (info2 & (TMIO_SD_INFO2_ERR_END | TMIO_SD_INFO2_ERR_CRC |
119 TMIO_SD_INFO2_ERR_IDX)) {
Marek Vasutdc86e912018-10-30 22:05:54 +0100120 if ((cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK) &&
121 (cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK_HS200))
122 dev_err(dev, "communication out of sync\n");
Marek Vasutfd83e762018-04-13 23:51:33 +0200123 return -EILSEQ;
124 }
125
126 if (info2 & (TMIO_SD_INFO2_ERR_ILA | TMIO_SD_INFO2_ERR_ILR |
127 TMIO_SD_INFO2_ERR_ILW)) {
128 dev_err(dev, "illegal access\n");
129 return -EIO;
130 }
131
132 return 0;
133}
134
Marek Vasutdc86e912018-10-30 22:05:54 +0100135static int tmio_sd_wait_for_irq(struct udevice *dev, struct mmc_cmd *cmd,
136 unsigned int reg, u32 flag)
Marek Vasutfd83e762018-04-13 23:51:33 +0200137{
138 struct tmio_sd_priv *priv = dev_get_priv(dev);
139 long wait = 1000000;
140 int ret;
141
142 while (!(tmio_sd_readl(priv, reg) & flag)) {
143 if (wait-- < 0) {
144 dev_err(dev, "timeout\n");
145 return -ETIMEDOUT;
146 }
147
Marek Vasutdc86e912018-10-30 22:05:54 +0100148 ret = tmio_sd_check_error(dev, cmd);
Marek Vasutfd83e762018-04-13 23:51:33 +0200149 if (ret)
150 return ret;
151
152 udelay(1);
153 }
154
155 return 0;
156}
157
158#define tmio_pio_read_fifo(__width, __suffix) \
159static void tmio_pio_read_fifo_##__width(struct tmio_sd_priv *priv, \
160 char *pbuf, uint blksz) \
161{ \
162 u##__width *buf = (u##__width *)pbuf; \
163 int i; \
164 \
165 if (likely(IS_ALIGNED((uintptr_t)buf, ((__width) / 8)))) { \
166 for (i = 0; i < blksz / ((__width) / 8); i++) { \
167 *buf++ = tmio_sd_read##__suffix(priv, \
168 TMIO_SD_BUF); \
169 } \
170 } else { \
171 for (i = 0; i < blksz / ((__width) / 8); i++) { \
172 u##__width data; \
173 data = tmio_sd_read##__suffix(priv, \
174 TMIO_SD_BUF); \
175 put_unaligned(data, buf++); \
176 } \
177 } \
178}
179
180tmio_pio_read_fifo(64, q)
181tmio_pio_read_fifo(32, l)
182tmio_pio_read_fifo(16, w)
183
Marek Vasutdc86e912018-10-30 22:05:54 +0100184static int tmio_sd_pio_read_one_block(struct udevice *dev, struct mmc_cmd *cmd,
185 char *pbuf, uint blocksize)
Marek Vasutfd83e762018-04-13 23:51:33 +0200186{
187 struct tmio_sd_priv *priv = dev_get_priv(dev);
188 int ret;
189
190 /* wait until the buffer is filled with data */
Marek Vasutdc86e912018-10-30 22:05:54 +0100191 ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO2,
192 TMIO_SD_INFO2_BRE);
Marek Vasutfd83e762018-04-13 23:51:33 +0200193 if (ret)
194 return ret;
195
196 /*
197 * Clear the status flag _before_ read the buffer out because
198 * TMIO_SD_INFO2_BRE is edge-triggered, not level-triggered.
199 */
200 tmio_sd_writel(priv, 0, TMIO_SD_INFO2);
201
202 if (priv->caps & TMIO_SD_CAP_64BIT)
203 tmio_pio_read_fifo_64(priv, pbuf, blocksize);
204 else if (priv->caps & TMIO_SD_CAP_16BIT)
205 tmio_pio_read_fifo_16(priv, pbuf, blocksize);
206 else
207 tmio_pio_read_fifo_32(priv, pbuf, blocksize);
208
209 return 0;
210}
211
212#define tmio_pio_write_fifo(__width, __suffix) \
213static void tmio_pio_write_fifo_##__width(struct tmio_sd_priv *priv, \
214 const char *pbuf, uint blksz)\
215{ \
216 const u##__width *buf = (const u##__width *)pbuf; \
217 int i; \
218 \
219 if (likely(IS_ALIGNED((uintptr_t)buf, ((__width) / 8)))) { \
220 for (i = 0; i < blksz / ((__width) / 8); i++) { \
221 tmio_sd_write##__suffix(priv, *buf++, \
222 TMIO_SD_BUF); \
223 } \
224 } else { \
225 for (i = 0; i < blksz / ((__width) / 8); i++) { \
226 u##__width data = get_unaligned(buf++); \
227 tmio_sd_write##__suffix(priv, data, \
228 TMIO_SD_BUF); \
229 } \
230 } \
231}
232
233tmio_pio_write_fifo(64, q)
234tmio_pio_write_fifo(32, l)
235tmio_pio_write_fifo(16, w)
236
Marek Vasutdc86e912018-10-30 22:05:54 +0100237static int tmio_sd_pio_write_one_block(struct udevice *dev, struct mmc_cmd *cmd,
Marek Vasutfd83e762018-04-13 23:51:33 +0200238 const char *pbuf, uint blocksize)
239{
240 struct tmio_sd_priv *priv = dev_get_priv(dev);
241 int ret;
242
243 /* wait until the buffer becomes empty */
Marek Vasutdc86e912018-10-30 22:05:54 +0100244 ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO2,
245 TMIO_SD_INFO2_BWE);
Marek Vasutfd83e762018-04-13 23:51:33 +0200246 if (ret)
247 return ret;
248
249 tmio_sd_writel(priv, 0, TMIO_SD_INFO2);
250
251 if (priv->caps & TMIO_SD_CAP_64BIT)
252 tmio_pio_write_fifo_64(priv, pbuf, blocksize);
253 else if (priv->caps & TMIO_SD_CAP_16BIT)
254 tmio_pio_write_fifo_16(priv, pbuf, blocksize);
255 else
256 tmio_pio_write_fifo_32(priv, pbuf, blocksize);
257
258 return 0;
259}
260
Marek Vasutdc86e912018-10-30 22:05:54 +0100261static int tmio_sd_pio_xfer(struct udevice *dev, struct mmc_cmd *cmd,
262 struct mmc_data *data)
Marek Vasutfd83e762018-04-13 23:51:33 +0200263{
264 const char *src = data->src;
265 char *dest = data->dest;
266 int i, ret;
267
268 for (i = 0; i < data->blocks; i++) {
269 if (data->flags & MMC_DATA_READ)
Marek Vasutdc86e912018-10-30 22:05:54 +0100270 ret = tmio_sd_pio_read_one_block(dev, cmd, dest,
Marek Vasutfd83e762018-04-13 23:51:33 +0200271 data->blocksize);
272 else
Marek Vasutdc86e912018-10-30 22:05:54 +0100273 ret = tmio_sd_pio_write_one_block(dev, cmd, src,
Marek Vasutfd83e762018-04-13 23:51:33 +0200274 data->blocksize);
275 if (ret)
276 return ret;
277
278 if (data->flags & MMC_DATA_READ)
279 dest += data->blocksize;
280 else
281 src += data->blocksize;
282 }
283
284 return 0;
285}
286
287static void tmio_sd_dma_start(struct tmio_sd_priv *priv,
288 dma_addr_t dma_addr)
289{
290 u32 tmp;
291
292 tmio_sd_writel(priv, 0, TMIO_SD_DMA_INFO1);
293 tmio_sd_writel(priv, 0, TMIO_SD_DMA_INFO2);
294
295 /* enable DMA */
296 tmp = tmio_sd_readl(priv, TMIO_SD_EXTMODE);
297 tmp |= TMIO_SD_EXTMODE_DMA_EN;
298 tmio_sd_writel(priv, tmp, TMIO_SD_EXTMODE);
299
300 tmio_sd_writel(priv, dma_addr & U32_MAX, TMIO_SD_DMA_ADDR_L);
301
302 /* suppress the warning "right shift count >= width of type" */
303 dma_addr >>= min_t(int, 32, 8 * sizeof(dma_addr));
304
305 tmio_sd_writel(priv, dma_addr & U32_MAX, TMIO_SD_DMA_ADDR_H);
306
307 tmio_sd_writel(priv, TMIO_SD_DMA_CTL_START, TMIO_SD_DMA_CTL);
308}
309
310static int tmio_sd_dma_wait_for_irq(struct udevice *dev, u32 flag,
311 unsigned int blocks)
312{
313 struct tmio_sd_priv *priv = dev_get_priv(dev);
314 long wait = 1000000 + 10 * blocks;
315
316 while (!(tmio_sd_readl(priv, TMIO_SD_DMA_INFO1) & flag)) {
317 if (wait-- < 0) {
318 dev_err(dev, "timeout during DMA\n");
319 return -ETIMEDOUT;
320 }
321
322 udelay(10);
323 }
324
325 if (tmio_sd_readl(priv, TMIO_SD_DMA_INFO2)) {
326 dev_err(dev, "error during DMA\n");
327 return -EIO;
328 }
329
330 return 0;
331}
332
333static int tmio_sd_dma_xfer(struct udevice *dev, struct mmc_data *data)
334{
335 struct tmio_sd_priv *priv = dev_get_priv(dev);
336 size_t len = data->blocks * data->blocksize;
337 void *buf;
338 enum dma_data_direction dir;
339 dma_addr_t dma_addr;
340 u32 poll_flag, tmp;
341 int ret;
342
343 tmp = tmio_sd_readl(priv, TMIO_SD_DMA_MODE);
344
345 if (data->flags & MMC_DATA_READ) {
346 buf = data->dest;
347 dir = DMA_FROM_DEVICE;
348 /*
349 * The DMA READ completion flag position differs on Socionext
350 * and Renesas SoCs. It is bit 20 on Socionext SoCs and using
Marek Vasute9a28222019-01-11 23:45:54 +0100351 * bit 17 is a hardware bug and forbidden. It is either bit 17
352 * or bit 20 on Renesas SoCs, depending on SoC.
Marek Vasutfd83e762018-04-13 23:51:33 +0200353 */
Marek Vasute9a28222019-01-11 23:45:54 +0100354 poll_flag = priv->read_poll_flag;
Marek Vasutfd83e762018-04-13 23:51:33 +0200355 tmp |= TMIO_SD_DMA_MODE_DIR_RD;
356 } else {
357 buf = (void *)data->src;
358 dir = DMA_TO_DEVICE;
359 poll_flag = TMIO_SD_DMA_INFO1_END_WR;
360 tmp &= ~TMIO_SD_DMA_MODE_DIR_RD;
361 }
362
363 tmio_sd_writel(priv, tmp, TMIO_SD_DMA_MODE);
364
365 dma_addr = __dma_map_single(buf, len, dir);
366
367 tmio_sd_dma_start(priv, dma_addr);
368
369 ret = tmio_sd_dma_wait_for_irq(dev, poll_flag, data->blocks);
370
Marek Vasut314b9ca2019-01-11 23:38:07 +0100371 if (poll_flag == TMIO_SD_DMA_INFO1_END_RD)
372 udelay(1);
373
Marek Vasutfd83e762018-04-13 23:51:33 +0200374 __dma_unmap_single(dma_addr, len, dir);
375
376 return ret;
377}
378
379/* check if the address is DMA'able */
Marek Vasutc7da6e342018-10-03 00:44:37 +0200380static bool tmio_sd_addr_is_dmaable(const char *src)
Marek Vasutfd83e762018-04-13 23:51:33 +0200381{
Marek Vasutc7da6e342018-10-03 00:44:37 +0200382 uintptr_t addr = (uintptr_t)src;
383
Marek Vasutfd83e762018-04-13 23:51:33 +0200384 if (!IS_ALIGNED(addr, TMIO_SD_DMA_MINALIGN))
385 return false;
386
Marek Vasut967db0e2018-10-03 00:46:24 +0200387#if defined(CONFIG_RCAR_GEN3)
388 /* Gen3 DMA has 32bit limit */
389 if (addr >> 32)
390 return false;
391#endif
392
Marek Vasutfd83e762018-04-13 23:51:33 +0200393#if defined(CONFIG_ARCH_UNIPHIER) && !defined(CONFIG_ARM64) && \
394 defined(CONFIG_SPL_BUILD)
395 /*
396 * For UniPhier ARMv7 SoCs, the stack is allocated in the locked ways
397 * of L2, which is unreachable from the DMA engine.
398 */
399 if (addr < CONFIG_SPL_STACK)
400 return false;
401#endif
402
403 return true;
404}
405
406int tmio_sd_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
407 struct mmc_data *data)
408{
409 struct tmio_sd_priv *priv = dev_get_priv(dev);
410 int ret;
411 u32 tmp;
412
413 if (tmio_sd_readl(priv, TMIO_SD_INFO2) & TMIO_SD_INFO2_CBSY) {
414 dev_err(dev, "command busy\n");
415 return -EBUSY;
416 }
417
418 /* clear all status flags */
419 tmio_sd_writel(priv, 0, TMIO_SD_INFO1);
420 tmio_sd_writel(priv, 0, TMIO_SD_INFO2);
421
422 /* disable DMA once */
423 tmp = tmio_sd_readl(priv, TMIO_SD_EXTMODE);
424 tmp &= ~TMIO_SD_EXTMODE_DMA_EN;
425 tmio_sd_writel(priv, tmp, TMIO_SD_EXTMODE);
426
427 tmio_sd_writel(priv, cmd->cmdarg, TMIO_SD_ARG);
428
429 tmp = cmd->cmdidx;
430
431 if (data) {
432 tmio_sd_writel(priv, data->blocksize, TMIO_SD_SIZE);
433 tmio_sd_writel(priv, data->blocks, TMIO_SD_SECCNT);
434
435 /* Do not send CMD12 automatically */
436 tmp |= TMIO_SD_CMD_NOSTOP | TMIO_SD_CMD_DATA;
437
438 if (data->blocks > 1)
439 tmp |= TMIO_SD_CMD_MULTI;
440
441 if (data->flags & MMC_DATA_READ)
442 tmp |= TMIO_SD_CMD_RD;
443 }
444
445 /*
446 * Do not use the response type auto-detection on this hardware.
447 * CMD8, for example, has different response types on SD and eMMC,
448 * while this controller always assumes the response type for SD.
449 * Set the response type manually.
450 */
451 switch (cmd->resp_type) {
452 case MMC_RSP_NONE:
453 tmp |= TMIO_SD_CMD_RSP_NONE;
454 break;
455 case MMC_RSP_R1:
456 tmp |= TMIO_SD_CMD_RSP_R1;
457 break;
458 case MMC_RSP_R1b:
459 tmp |= TMIO_SD_CMD_RSP_R1B;
460 break;
461 case MMC_RSP_R2:
462 tmp |= TMIO_SD_CMD_RSP_R2;
463 break;
464 case MMC_RSP_R3:
465 tmp |= TMIO_SD_CMD_RSP_R3;
466 break;
467 default:
468 dev_err(dev, "unknown response type\n");
469 return -EINVAL;
470 }
471
472 dev_dbg(dev, "sending CMD%d (SD_CMD=%08x, SD_ARG=%08x)\n",
473 cmd->cmdidx, tmp, cmd->cmdarg);
474 tmio_sd_writel(priv, tmp, TMIO_SD_CMD);
475
Marek Vasutdc86e912018-10-30 22:05:54 +0100476 ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO1,
477 TMIO_SD_INFO1_RSP);
Marek Vasutfd83e762018-04-13 23:51:33 +0200478 if (ret)
479 return ret;
480
481 if (cmd->resp_type & MMC_RSP_136) {
482 u32 rsp_127_104 = tmio_sd_readl(priv, TMIO_SD_RSP76);
483 u32 rsp_103_72 = tmio_sd_readl(priv, TMIO_SD_RSP54);
484 u32 rsp_71_40 = tmio_sd_readl(priv, TMIO_SD_RSP32);
485 u32 rsp_39_8 = tmio_sd_readl(priv, TMIO_SD_RSP10);
486
487 cmd->response[0] = ((rsp_127_104 & 0x00ffffff) << 8) |
488 ((rsp_103_72 & 0xff000000) >> 24);
489 cmd->response[1] = ((rsp_103_72 & 0x00ffffff) << 8) |
490 ((rsp_71_40 & 0xff000000) >> 24);
491 cmd->response[2] = ((rsp_71_40 & 0x00ffffff) << 8) |
492 ((rsp_39_8 & 0xff000000) >> 24);
493 cmd->response[3] = (rsp_39_8 & 0xffffff) << 8;
494 } else {
495 /* bit 39-8 */
496 cmd->response[0] = tmio_sd_readl(priv, TMIO_SD_RSP10);
497 }
498
499 if (data) {
500 /* use DMA if the HW supports it and the buffer is aligned */
501 if (priv->caps & TMIO_SD_CAP_DMA_INTERNAL &&
Marek Vasutc7da6e342018-10-03 00:44:37 +0200502 tmio_sd_addr_is_dmaable(data->src))
Marek Vasutfd83e762018-04-13 23:51:33 +0200503 ret = tmio_sd_dma_xfer(dev, data);
504 else
Marek Vasutdc86e912018-10-30 22:05:54 +0100505 ret = tmio_sd_pio_xfer(dev, cmd, data);
Marek Vasut6b4a8ba2018-10-30 21:53:29 +0100506 if (ret)
507 return ret;
Marek Vasutfd83e762018-04-13 23:51:33 +0200508
Marek Vasutdc86e912018-10-30 22:05:54 +0100509 ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO1,
510 TMIO_SD_INFO1_CMP);
Marek Vasutfd83e762018-04-13 23:51:33 +0200511 if (ret)
512 return ret;
513 }
514
Marek Vasutdc86e912018-10-30 22:05:54 +0100515 return tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO2,
Marek Vasut6b4a8ba2018-10-30 21:53:29 +0100516 TMIO_SD_INFO2_SCLKDIVEN);
Marek Vasutfd83e762018-04-13 23:51:33 +0200517}
518
519static int tmio_sd_set_bus_width(struct tmio_sd_priv *priv,
520 struct mmc *mmc)
521{
522 u32 val, tmp;
523
524 switch (mmc->bus_width) {
525 case 0:
526 case 1:
527 val = TMIO_SD_OPTION_WIDTH_1;
528 break;
529 case 4:
530 val = TMIO_SD_OPTION_WIDTH_4;
531 break;
532 case 8:
533 val = TMIO_SD_OPTION_WIDTH_8;
534 break;
535 default:
536 return -EINVAL;
537 }
538
539 tmp = tmio_sd_readl(priv, TMIO_SD_OPTION);
540 tmp &= ~TMIO_SD_OPTION_WIDTH_MASK;
541 tmp |= val;
542 tmio_sd_writel(priv, tmp, TMIO_SD_OPTION);
543
544 return 0;
545}
546
547static void tmio_sd_set_ddr_mode(struct tmio_sd_priv *priv,
548 struct mmc *mmc)
549{
550 u32 tmp;
551
552 tmp = tmio_sd_readl(priv, TMIO_SD_IF_MODE);
553 if (mmc->ddr_mode)
554 tmp |= TMIO_SD_IF_MODE_DDR;
555 else
556 tmp &= ~TMIO_SD_IF_MODE_DDR;
557 tmio_sd_writel(priv, tmp, TMIO_SD_IF_MODE);
558}
559
Marek Vasutda90a1b2018-06-13 08:02:55 +0200560static ulong tmio_sd_clk_get_rate(struct tmio_sd_priv *priv)
561{
562 return priv->clk_get_rate(priv);
563}
564
Marek Vasut9763b182018-11-15 22:01:33 +0100565static void tmio_sd_set_clk_rate(struct tmio_sd_priv *priv, struct mmc *mmc)
Marek Vasutfd83e762018-04-13 23:51:33 +0200566{
567 unsigned int divisor;
Marek Vasut9763b182018-11-15 22:01:33 +0100568 u32 tmp, val = 0;
Marek Vasutda90a1b2018-06-13 08:02:55 +0200569 ulong mclk;
Marek Vasutfd83e762018-04-13 23:51:33 +0200570
Marek Vasut9763b182018-11-15 22:01:33 +0100571 if (mmc->clock) {
572 mclk = tmio_sd_clk_get_rate(priv);
Marek Vasutfd83e762018-04-13 23:51:33 +0200573
Marek Vasut9763b182018-11-15 22:01:33 +0100574 divisor = DIV_ROUND_UP(mclk, mmc->clock);
Marek Vasutda90a1b2018-06-13 08:02:55 +0200575
Marek Vasut9763b182018-11-15 22:01:33 +0100576 /* Do not set divider to 0xff in DDR mode */
577 if (mmc->ddr_mode && (divisor == 1))
578 divisor = 2;
Marek Vasutfd83e762018-04-13 23:51:33 +0200579
Marek Vasut9763b182018-11-15 22:01:33 +0100580 if (divisor <= 1)
581 val = (priv->caps & TMIO_SD_CAP_RCAR) ?
582 TMIO_SD_CLKCTL_RCAR_DIV1 : TMIO_SD_CLKCTL_DIV1;
583 else if (divisor <= 2)
584 val = TMIO_SD_CLKCTL_DIV2;
585 else if (divisor <= 4)
586 val = TMIO_SD_CLKCTL_DIV4;
587 else if (divisor <= 8)
588 val = TMIO_SD_CLKCTL_DIV8;
589 else if (divisor <= 16)
590 val = TMIO_SD_CLKCTL_DIV16;
591 else if (divisor <= 32)
592 val = TMIO_SD_CLKCTL_DIV32;
593 else if (divisor <= 64)
594 val = TMIO_SD_CLKCTL_DIV64;
595 else if (divisor <= 128)
596 val = TMIO_SD_CLKCTL_DIV128;
597 else if (divisor <= 256)
598 val = TMIO_SD_CLKCTL_DIV256;
599 else if (divisor <= 512 || !(priv->caps & TMIO_SD_CAP_DIV1024))
600 val = TMIO_SD_CLKCTL_DIV512;
601 else
602 val = TMIO_SD_CLKCTL_DIV1024;
603 }
Marek Vasutfd83e762018-04-13 23:51:33 +0200604
605 tmp = tmio_sd_readl(priv, TMIO_SD_CLKCTL);
Marek Vasut9763b182018-11-15 22:01:33 +0100606 if (mmc->clock &&
607 !((tmp & TMIO_SD_CLKCTL_SCLKEN) &&
608 ((tmp & TMIO_SD_CLKCTL_DIV_MASK) == val))) {
609 /*
610 * Stop the clock before changing its rate
611 * to avoid a glitch signal
612 */
613 tmp &= ~TMIO_SD_CLKCTL_SCLKEN;
614 tmio_sd_writel(priv, tmp, TMIO_SD_CLKCTL);
Marek Vasutfd83e762018-04-13 23:51:33 +0200615
Marek Vasut9763b182018-11-15 22:01:33 +0100616 /* Change the clock rate. */
617 tmp &= ~TMIO_SD_CLKCTL_DIV_MASK;
618 tmp |= val;
619 }
Marek Vasutfd83e762018-04-13 23:51:33 +0200620
Marek Vasut9763b182018-11-15 22:01:33 +0100621 /* Enable or Disable the clock */
622 if (mmc->clk_disable) {
Marek Vasut5abfb132018-06-13 08:02:55 +0200623 tmp |= TMIO_SD_CLKCTL_OFFEN;
624 tmp &= ~TMIO_SD_CLKCTL_SCLKEN;
Marek Vasut9763b182018-11-15 22:01:33 +0100625 } else {
626 tmp &= ~TMIO_SD_CLKCTL_OFFEN;
627 tmp |= TMIO_SD_CLKCTL_SCLKEN;
Marek Vasut5abfb132018-06-13 08:02:55 +0200628 }
Marek Vasut9763b182018-11-15 22:01:33 +0100629
Marek Vasutfd83e762018-04-13 23:51:33 +0200630 tmio_sd_writel(priv, tmp, TMIO_SD_CLKCTL);
631
632 udelay(1000);
633}
634
635static void tmio_sd_set_pins(struct udevice *dev)
636{
637 __maybe_unused struct mmc *mmc = mmc_get_mmc_dev(dev);
638
639#ifdef CONFIG_DM_REGULATOR
640 struct tmio_sd_priv *priv = dev_get_priv(dev);
641
642 if (priv->vqmmc_dev) {
643 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
644 regulator_set_value(priv->vqmmc_dev, 1800000);
645 else
646 regulator_set_value(priv->vqmmc_dev, 3300000);
647 regulator_set_enable(priv->vqmmc_dev, true);
648 }
649#endif
650
651#ifdef CONFIG_PINCTRL
Marek Vasut073e6582018-10-28 13:54:10 +0100652 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
Marek Vasutfd83e762018-04-13 23:51:33 +0200653 pinctrl_select_state(dev, "state_uhs");
Marek Vasut073e6582018-10-28 13:54:10 +0100654 else
655 pinctrl_select_state(dev, "default");
Marek Vasutfd83e762018-04-13 23:51:33 +0200656#endif
657}
658
659int tmio_sd_set_ios(struct udevice *dev)
660{
661 struct tmio_sd_priv *priv = dev_get_priv(dev);
662 struct mmc *mmc = mmc_get_mmc_dev(dev);
663 int ret;
664
665 dev_dbg(dev, "clock %uHz, DDRmode %d, width %u\n",
666 mmc->clock, mmc->ddr_mode, mmc->bus_width);
667
Marek Vasutbfe04e02018-06-13 08:02:55 +0200668 tmio_sd_set_clk_rate(priv, mmc);
Marek Vasutfd83e762018-04-13 23:51:33 +0200669 ret = tmio_sd_set_bus_width(priv, mmc);
670 if (ret)
671 return ret;
672 tmio_sd_set_ddr_mode(priv, mmc);
Marek Vasutfd83e762018-04-13 23:51:33 +0200673 tmio_sd_set_pins(dev);
674
675 return 0;
676}
677
678int tmio_sd_get_cd(struct udevice *dev)
679{
680 struct tmio_sd_priv *priv = dev_get_priv(dev);
681
682 if (priv->caps & TMIO_SD_CAP_NONREMOVABLE)
683 return 1;
684
685 return !!(tmio_sd_readl(priv, TMIO_SD_INFO1) &
686 TMIO_SD_INFO1_CD);
687}
688
689static void tmio_sd_host_init(struct tmio_sd_priv *priv)
690{
691 u32 tmp;
692
693 /* soft reset of the host */
694 tmp = tmio_sd_readl(priv, TMIO_SD_SOFT_RST);
695 tmp &= ~TMIO_SD_SOFT_RST_RSTX;
696 tmio_sd_writel(priv, tmp, TMIO_SD_SOFT_RST);
697 tmp |= TMIO_SD_SOFT_RST_RSTX;
698 tmio_sd_writel(priv, tmp, TMIO_SD_SOFT_RST);
699
700 /* FIXME: implement eMMC hw_reset */
701
702 tmio_sd_writel(priv, TMIO_SD_STOP_SEC, TMIO_SD_STOP);
703
704 /*
705 * Connected to 32bit AXI.
706 * This register dropped backward compatibility at version 0x10.
707 * Write an appropriate value depending on the IP version.
708 */
Marek Vasut2a43f882019-02-14 15:16:24 +0100709 if (priv->version >= 0x10) {
710 if (priv->caps & TMIO_SD_CAP_64BIT)
Marek Vasut1a4abf52019-02-19 19:20:14 +0100711 tmio_sd_writel(priv, 0x000, TMIO_SD_HOST_MODE);
Marek Vasut2a43f882019-02-14 15:16:24 +0100712 else
713 tmio_sd_writel(priv, 0x101, TMIO_SD_HOST_MODE);
714 } else {
Marek Vasutfd83e762018-04-13 23:51:33 +0200715 tmio_sd_writel(priv, 0x0, TMIO_SD_HOST_MODE);
Marek Vasut2a43f882019-02-14 15:16:24 +0100716 }
Marek Vasutfd83e762018-04-13 23:51:33 +0200717
718 if (priv->caps & TMIO_SD_CAP_DMA_INTERNAL) {
719 tmp = tmio_sd_readl(priv, TMIO_SD_DMA_MODE);
720 tmp |= TMIO_SD_DMA_MODE_ADDR_INC;
721 tmio_sd_writel(priv, tmp, TMIO_SD_DMA_MODE);
722 }
723}
724
725int tmio_sd_bind(struct udevice *dev)
726{
727 struct tmio_sd_plat *plat = dev_get_platdata(dev);
728
729 return mmc_bind(dev, &plat->mmc, &plat->cfg);
730}
731
732int tmio_sd_probe(struct udevice *dev, u32 quirks)
733{
734 struct tmio_sd_plat *plat = dev_get_platdata(dev);
735 struct tmio_sd_priv *priv = dev_get_priv(dev);
736 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev);
737 fdt_addr_t base;
Marek Vasutda90a1b2018-06-13 08:02:55 +0200738 ulong mclk;
Marek Vasutfd83e762018-04-13 23:51:33 +0200739 int ret;
740
741 base = devfdt_get_addr(dev);
742 if (base == FDT_ADDR_T_NONE)
743 return -EINVAL;
744
745 priv->regbase = devm_ioremap(dev, base, SZ_2K);
746 if (!priv->regbase)
747 return -ENOMEM;
748
749#ifdef CONFIG_DM_REGULATOR
750 device_get_supply_regulator(dev, "vqmmc-supply", &priv->vqmmc_dev);
Marek Vasute139ab22018-06-13 08:02:55 +0200751 if (priv->vqmmc_dev)
752 regulator_set_value(priv->vqmmc_dev, 3300000);
Marek Vasutfd83e762018-04-13 23:51:33 +0200753#endif
754
Marek Vasutfd83e762018-04-13 23:51:33 +0200755 ret = mmc_of_parse(dev, &plat->cfg);
756 if (ret < 0) {
757 dev_err(dev, "failed to parse host caps\n");
758 return ret;
759 }
760
761 plat->cfg.name = dev->name;
762 plat->cfg.host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
763
764 if (quirks)
765 priv->caps = quirks;
766
767 priv->version = tmio_sd_readl(priv, TMIO_SD_VERSION) &
768 TMIO_SD_VERSION_IP;
769 dev_dbg(dev, "version %x\n", priv->version);
770 if (priv->version >= 0x10) {
771 priv->caps |= TMIO_SD_CAP_DMA_INTERNAL;
772 priv->caps |= TMIO_SD_CAP_DIV1024;
773 }
774
775 if (fdt_get_property(gd->fdt_blob, dev_of_offset(dev), "non-removable",
776 NULL))
777 priv->caps |= TMIO_SD_CAP_NONREMOVABLE;
778
779 tmio_sd_host_init(priv);
780
Marek Vasutda90a1b2018-06-13 08:02:55 +0200781 mclk = tmio_sd_clk_get_rate(priv);
782
Marek Vasutfd83e762018-04-13 23:51:33 +0200783 plat->cfg.voltages = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34;
Marek Vasutda90a1b2018-06-13 08:02:55 +0200784 plat->cfg.f_min = mclk /
Marek Vasutfd83e762018-04-13 23:51:33 +0200785 (priv->caps & TMIO_SD_CAP_DIV1024 ? 1024 : 512);
Marek Vasutda90a1b2018-06-13 08:02:55 +0200786 plat->cfg.f_max = mclk;
Marek Vasut6d6e6fe2019-03-18 23:43:10 +0100787 if (quirks & TMIO_SD_CAP_16BIT)
788 plat->cfg.b_max = U16_MAX; /* max value of TMIO_SD_SECCNT */
789 else
790 plat->cfg.b_max = U32_MAX; /* max value of TMIO_SD_SECCNT */
Marek Vasutfd83e762018-04-13 23:51:33 +0200791
792 upriv->mmc = &plat->mmc;
793
794 return 0;
795}