blob: 719c4830bc30bad9fc4fa1f762cfeb1f44404474 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasutfd83e762018-04-13 23:51:33 +02002/*
3 * Copyright (C) 2016 Socionext Inc.
4 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
Marek Vasutfd83e762018-04-13 23:51:33 +02005 */
6
7#include <common.h>
8#include <clk.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Marek Vasutfd83e762018-04-13 23:51:33 +020010#include <fdtdec.h>
11#include <mmc.h>
12#include <dm.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060013#include <asm/global_data.h>
Simon Glass9bc15642020-02-03 07:36:16 -070014#include <dm/device_compat.h>
Marek Vasutfd83e762018-04-13 23:51:33 +020015#include <dm/pinctrl.h>
16#include <linux/compat.h>
Simon Glassdbd79542020-05-10 11:40:11 -060017#include <linux/delay.h>
Masahiro Yamada6373a172020-02-14 16:40:19 +090018#include <linux/dma-mapping.h>
Marek Vasutfd83e762018-04-13 23:51:33 +020019#include <linux/io.h>
20#include <linux/sizes.h>
21#include <power/regulator.h>
22#include <asm/unaligned.h>
23
24#include "tmio-common.h"
25
26DECLARE_GLOBAL_DATA_PTR;
27
28static u64 tmio_sd_readq(struct tmio_sd_priv *priv, unsigned int reg)
29{
30 return readq(priv->regbase + (reg << 1));
31}
32
33static void tmio_sd_writeq(struct tmio_sd_priv *priv,
34 u64 val, unsigned int reg)
35{
36 writeq(val, priv->regbase + (reg << 1));
37}
38
39static u16 tmio_sd_readw(struct tmio_sd_priv *priv, unsigned int reg)
40{
41 return readw(priv->regbase + (reg >> 1));
42}
43
44static void tmio_sd_writew(struct tmio_sd_priv *priv,
45 u16 val, unsigned int reg)
46{
47 writew(val, priv->regbase + (reg >> 1));
48}
49
50u32 tmio_sd_readl(struct tmio_sd_priv *priv, unsigned int reg)
51{
52 u32 val;
53
54 if (priv->caps & TMIO_SD_CAP_64BIT)
55 return readl(priv->regbase + (reg << 1));
56 else if (priv->caps & TMIO_SD_CAP_16BIT) {
57 val = readw(priv->regbase + (reg >> 1)) & 0xffff;
58 if ((reg == TMIO_SD_RSP10) || (reg == TMIO_SD_RSP32) ||
59 (reg == TMIO_SD_RSP54) || (reg == TMIO_SD_RSP76)) {
60 val |= readw(priv->regbase + (reg >> 1) + 2) << 16;
61 }
62 return val;
63 } else
64 return readl(priv->regbase + reg);
65}
66
67void tmio_sd_writel(struct tmio_sd_priv *priv,
68 u32 val, unsigned int reg)
69{
70 if (priv->caps & TMIO_SD_CAP_64BIT)
71 writel(val, priv->regbase + (reg << 1));
72 else if (priv->caps & TMIO_SD_CAP_16BIT) {
73 writew(val & 0xffff, priv->regbase + (reg >> 1));
74 if (reg == TMIO_SD_INFO1 || reg == TMIO_SD_INFO1_MASK ||
75 reg == TMIO_SD_INFO2 || reg == TMIO_SD_INFO2_MASK ||
76 reg == TMIO_SD_ARG)
77 writew(val >> 16, priv->regbase + (reg >> 1) + 2);
78 } else
79 writel(val, priv->regbase + reg);
80}
81
Marek Vasutdc86e912018-10-30 22:05:54 +010082static int tmio_sd_check_error(struct udevice *dev, struct mmc_cmd *cmd)
Marek Vasutfd83e762018-04-13 23:51:33 +020083{
84 struct tmio_sd_priv *priv = dev_get_priv(dev);
85 u32 info2 = tmio_sd_readl(priv, TMIO_SD_INFO2);
86
87 if (info2 & TMIO_SD_INFO2_ERR_RTO) {
88 /*
89 * TIMEOUT must be returned for unsupported command. Do not
90 * display error log since this might be a part of sequence to
91 * distinguish between SD and MMC.
92 */
93 return -ETIMEDOUT;
94 }
95
96 if (info2 & TMIO_SD_INFO2_ERR_TO) {
97 dev_err(dev, "timeout error\n");
98 return -ETIMEDOUT;
99 }
100
101 if (info2 & (TMIO_SD_INFO2_ERR_END | TMIO_SD_INFO2_ERR_CRC |
102 TMIO_SD_INFO2_ERR_IDX)) {
Marek Vasutdc86e912018-10-30 22:05:54 +0100103 if ((cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK) &&
104 (cmd->cmdidx != MMC_CMD_SEND_TUNING_BLOCK_HS200))
105 dev_err(dev, "communication out of sync\n");
Marek Vasutfd83e762018-04-13 23:51:33 +0200106 return -EILSEQ;
107 }
108
109 if (info2 & (TMIO_SD_INFO2_ERR_ILA | TMIO_SD_INFO2_ERR_ILR |
110 TMIO_SD_INFO2_ERR_ILW)) {
111 dev_err(dev, "illegal access\n");
112 return -EIO;
113 }
114
115 return 0;
116}
117
Marek Vasutdc86e912018-10-30 22:05:54 +0100118static int tmio_sd_wait_for_irq(struct udevice *dev, struct mmc_cmd *cmd,
119 unsigned int reg, u32 flag)
Marek Vasutfd83e762018-04-13 23:51:33 +0200120{
121 struct tmio_sd_priv *priv = dev_get_priv(dev);
122 long wait = 1000000;
123 int ret;
124
Marek Vasutf0ea8a72023-10-14 23:56:03 +0200125 while (true) {
126 if (tmio_sd_readl(priv, reg) & flag)
127 return tmio_sd_check_error(dev, cmd);
128
Marek Vasutfd83e762018-04-13 23:51:33 +0200129 if (wait-- < 0) {
130 dev_err(dev, "timeout\n");
131 return -ETIMEDOUT;
132 }
133
Marek Vasutdc86e912018-10-30 22:05:54 +0100134 ret = tmio_sd_check_error(dev, cmd);
Marek Vasutfd83e762018-04-13 23:51:33 +0200135 if (ret)
136 return ret;
137
138 udelay(1);
139 }
140
141 return 0;
142}
143
144#define tmio_pio_read_fifo(__width, __suffix) \
145static void tmio_pio_read_fifo_##__width(struct tmio_sd_priv *priv, \
146 char *pbuf, uint blksz) \
147{ \
148 u##__width *buf = (u##__width *)pbuf; \
149 int i; \
150 \
151 if (likely(IS_ALIGNED((uintptr_t)buf, ((__width) / 8)))) { \
152 for (i = 0; i < blksz / ((__width) / 8); i++) { \
153 *buf++ = tmio_sd_read##__suffix(priv, \
154 TMIO_SD_BUF); \
155 } \
156 } else { \
157 for (i = 0; i < blksz / ((__width) / 8); i++) { \
158 u##__width data; \
159 data = tmio_sd_read##__suffix(priv, \
160 TMIO_SD_BUF); \
161 put_unaligned(data, buf++); \
162 } \
163 } \
164}
165
166tmio_pio_read_fifo(64, q)
167tmio_pio_read_fifo(32, l)
168tmio_pio_read_fifo(16, w)
169
Marek Vasutdc86e912018-10-30 22:05:54 +0100170static int tmio_sd_pio_read_one_block(struct udevice *dev, struct mmc_cmd *cmd,
171 char *pbuf, uint blocksize)
Marek Vasutfd83e762018-04-13 23:51:33 +0200172{
173 struct tmio_sd_priv *priv = dev_get_priv(dev);
174 int ret;
175
176 /* wait until the buffer is filled with data */
Marek Vasutdc86e912018-10-30 22:05:54 +0100177 ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO2,
178 TMIO_SD_INFO2_BRE);
Marek Vasutfd83e762018-04-13 23:51:33 +0200179 if (ret)
180 return ret;
181
182 /*
183 * Clear the status flag _before_ read the buffer out because
184 * TMIO_SD_INFO2_BRE is edge-triggered, not level-triggered.
185 */
186 tmio_sd_writel(priv, 0, TMIO_SD_INFO2);
187
188 if (priv->caps & TMIO_SD_CAP_64BIT)
189 tmio_pio_read_fifo_64(priv, pbuf, blocksize);
190 else if (priv->caps & TMIO_SD_CAP_16BIT)
191 tmio_pio_read_fifo_16(priv, pbuf, blocksize);
192 else
193 tmio_pio_read_fifo_32(priv, pbuf, blocksize);
194
195 return 0;
196}
197
198#define tmio_pio_write_fifo(__width, __suffix) \
199static void tmio_pio_write_fifo_##__width(struct tmio_sd_priv *priv, \
200 const char *pbuf, uint blksz)\
201{ \
202 const u##__width *buf = (const u##__width *)pbuf; \
203 int i; \
204 \
205 if (likely(IS_ALIGNED((uintptr_t)buf, ((__width) / 8)))) { \
206 for (i = 0; i < blksz / ((__width) / 8); i++) { \
207 tmio_sd_write##__suffix(priv, *buf++, \
208 TMIO_SD_BUF); \
209 } \
210 } else { \
211 for (i = 0; i < blksz / ((__width) / 8); i++) { \
212 u##__width data = get_unaligned(buf++); \
213 tmio_sd_write##__suffix(priv, data, \
214 TMIO_SD_BUF); \
215 } \
216 } \
217}
218
219tmio_pio_write_fifo(64, q)
220tmio_pio_write_fifo(32, l)
221tmio_pio_write_fifo(16, w)
222
Marek Vasutdc86e912018-10-30 22:05:54 +0100223static int tmio_sd_pio_write_one_block(struct udevice *dev, struct mmc_cmd *cmd,
Marek Vasutfd83e762018-04-13 23:51:33 +0200224 const char *pbuf, uint blocksize)
225{
226 struct tmio_sd_priv *priv = dev_get_priv(dev);
227 int ret;
228
229 /* wait until the buffer becomes empty */
Marek Vasutdc86e912018-10-30 22:05:54 +0100230 ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO2,
231 TMIO_SD_INFO2_BWE);
Marek Vasutfd83e762018-04-13 23:51:33 +0200232 if (ret)
233 return ret;
234
235 tmio_sd_writel(priv, 0, TMIO_SD_INFO2);
236
237 if (priv->caps & TMIO_SD_CAP_64BIT)
238 tmio_pio_write_fifo_64(priv, pbuf, blocksize);
239 else if (priv->caps & TMIO_SD_CAP_16BIT)
240 tmio_pio_write_fifo_16(priv, pbuf, blocksize);
241 else
242 tmio_pio_write_fifo_32(priv, pbuf, blocksize);
243
244 return 0;
245}
246
Marek Vasutdc86e912018-10-30 22:05:54 +0100247static int tmio_sd_pio_xfer(struct udevice *dev, struct mmc_cmd *cmd,
248 struct mmc_data *data)
Marek Vasutfd83e762018-04-13 23:51:33 +0200249{
250 const char *src = data->src;
251 char *dest = data->dest;
252 int i, ret;
253
254 for (i = 0; i < data->blocks; i++) {
255 if (data->flags & MMC_DATA_READ)
Marek Vasutdc86e912018-10-30 22:05:54 +0100256 ret = tmio_sd_pio_read_one_block(dev, cmd, dest,
Marek Vasutfd83e762018-04-13 23:51:33 +0200257 data->blocksize);
258 else
Marek Vasutdc86e912018-10-30 22:05:54 +0100259 ret = tmio_sd_pio_write_one_block(dev, cmd, src,
Marek Vasutfd83e762018-04-13 23:51:33 +0200260 data->blocksize);
261 if (ret)
262 return ret;
263
264 if (data->flags & MMC_DATA_READ)
265 dest += data->blocksize;
266 else
267 src += data->blocksize;
268 }
269
270 return 0;
271}
272
273static void tmio_sd_dma_start(struct tmio_sd_priv *priv,
274 dma_addr_t dma_addr)
275{
276 u32 tmp;
277
278 tmio_sd_writel(priv, 0, TMIO_SD_DMA_INFO1);
279 tmio_sd_writel(priv, 0, TMIO_SD_DMA_INFO2);
280
281 /* enable DMA */
282 tmp = tmio_sd_readl(priv, TMIO_SD_EXTMODE);
283 tmp |= TMIO_SD_EXTMODE_DMA_EN;
284 tmio_sd_writel(priv, tmp, TMIO_SD_EXTMODE);
285
286 tmio_sd_writel(priv, dma_addr & U32_MAX, TMIO_SD_DMA_ADDR_L);
287
288 /* suppress the warning "right shift count >= width of type" */
289 dma_addr >>= min_t(int, 32, 8 * sizeof(dma_addr));
290
291 tmio_sd_writel(priv, dma_addr & U32_MAX, TMIO_SD_DMA_ADDR_H);
292
293 tmio_sd_writel(priv, TMIO_SD_DMA_CTL_START, TMIO_SD_DMA_CTL);
294}
295
296static int tmio_sd_dma_wait_for_irq(struct udevice *dev, u32 flag,
297 unsigned int blocks)
298{
299 struct tmio_sd_priv *priv = dev_get_priv(dev);
300 long wait = 1000000 + 10 * blocks;
301
Marek Vasutc2275a32024-02-20 09:38:14 +0100302 for (;;) {
303 if (tmio_sd_readl(priv, TMIO_SD_DMA_INFO1) & flag)
304 break;
305
306 if (tmio_sd_readl(priv, TMIO_SD_INFO1) & TMIO_SD_INFO1_CMP)
307 break;
308
Marek Vasutfd83e762018-04-13 23:51:33 +0200309 if (wait-- < 0) {
310 dev_err(dev, "timeout during DMA\n");
311 return -ETIMEDOUT;
312 }
313
314 udelay(10);
315 }
316
317 if (tmio_sd_readl(priv, TMIO_SD_DMA_INFO2)) {
318 dev_err(dev, "error during DMA\n");
319 return -EIO;
320 }
321
322 return 0;
323}
324
325static int tmio_sd_dma_xfer(struct udevice *dev, struct mmc_data *data)
326{
327 struct tmio_sd_priv *priv = dev_get_priv(dev);
328 size_t len = data->blocks * data->blocksize;
329 void *buf;
330 enum dma_data_direction dir;
331 dma_addr_t dma_addr;
332 u32 poll_flag, tmp;
333 int ret;
334
335 tmp = tmio_sd_readl(priv, TMIO_SD_DMA_MODE);
336
Marek Vasutd6e2f872021-01-03 11:38:25 +0100337 tmp |= priv->idma_bus_width;
338
Marek Vasutfd83e762018-04-13 23:51:33 +0200339 if (data->flags & MMC_DATA_READ) {
340 buf = data->dest;
341 dir = DMA_FROM_DEVICE;
342 /*
343 * The DMA READ completion flag position differs on Socionext
344 * and Renesas SoCs. It is bit 20 on Socionext SoCs and using
Marek Vasute9a28222019-01-11 23:45:54 +0100345 * bit 17 is a hardware bug and forbidden. It is either bit 17
346 * or bit 20 on Renesas SoCs, depending on SoC.
Marek Vasutfd83e762018-04-13 23:51:33 +0200347 */
Marek Vasute9a28222019-01-11 23:45:54 +0100348 poll_flag = priv->read_poll_flag;
Marek Vasutfd83e762018-04-13 23:51:33 +0200349 tmp |= TMIO_SD_DMA_MODE_DIR_RD;
350 } else {
351 buf = (void *)data->src;
352 dir = DMA_TO_DEVICE;
353 poll_flag = TMIO_SD_DMA_INFO1_END_WR;
354 tmp &= ~TMIO_SD_DMA_MODE_DIR_RD;
355 }
356
357 tmio_sd_writel(priv, tmp, TMIO_SD_DMA_MODE);
358
Vignesh Raghavendra0892e712020-01-16 14:23:46 +0530359 dma_addr = dma_map_single(buf, len, dir);
Marek Vasutfd83e762018-04-13 23:51:33 +0200360
361 tmio_sd_dma_start(priv, dma_addr);
362
363 ret = tmio_sd_dma_wait_for_irq(dev, poll_flag, data->blocks);
364
Marek Vasut314b9ca2019-01-11 23:38:07 +0100365 if (poll_flag == TMIO_SD_DMA_INFO1_END_RD)
366 udelay(1);
367
Masahiro Yamada05a5dba2020-02-14 16:40:18 +0900368 dma_unmap_single(dma_addr, len, dir);
Marek Vasutfd83e762018-04-13 23:51:33 +0200369
370 return ret;
371}
372
373/* check if the address is DMA'able */
Hiroyuki Yokoyamaed5aa492020-03-07 17:32:59 +0100374static bool tmio_sd_addr_is_dmaable(struct mmc_data *data)
Marek Vasutfd83e762018-04-13 23:51:33 +0200375{
Hiroyuki Yokoyamaed5aa492020-03-07 17:32:59 +0100376 uintptr_t addr = (uintptr_t)data->src;
Marek Vasutc7da6e342018-10-03 00:44:37 +0200377
Marek Vasutfd83e762018-04-13 23:51:33 +0200378 if (!IS_ALIGNED(addr, TMIO_SD_DMA_MINALIGN))
379 return false;
380
Hai Pham206dc912023-02-28 22:24:06 +0100381 if (IS_ENABLED(CONFIG_RCAR_64)) {
Marek Vasutd4be38e2023-02-28 22:18:13 +0100382 if (!(data->flags & MMC_DATA_READ) && !IS_ALIGNED(addr, 128))
383 return false;
384 /* Gen3 DMA has 32bit limit */
385 if (sizeof(addr) > 4 && addr >> 32)
386 return false;
387 }
Marek Vasut967db0e2018-10-03 00:46:24 +0200388
Marek Vasutd4be38e2023-02-28 22:18:13 +0100389#ifdef CONFIG_SPL_BUILD
Marek Vasutc738e0c2023-04-08 19:35:37 +0200390 if (IS_ENABLED(CONFIG_ARCH_UNIPHIER) && !IS_ENABLED(CONFIG_ARM64)) {
Marek Vasutd4be38e2023-02-28 22:18:13 +0100391 /*
392 * For UniPhier ARMv7 SoCs, the stack is allocated in locked
393 * ways of L2, which is unreachable from the DMA engine.
394 */
395 if (addr < CONFIG_SPL_STACK)
396 return false;
397 }
Marek Vasutfd83e762018-04-13 23:51:33 +0200398#endif
399
400 return true;
401}
402
403int tmio_sd_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
404 struct mmc_data *data)
405{
406 struct tmio_sd_priv *priv = dev_get_priv(dev);
407 int ret;
408 u32 tmp;
409
410 if (tmio_sd_readl(priv, TMIO_SD_INFO2) & TMIO_SD_INFO2_CBSY) {
411 dev_err(dev, "command busy\n");
412 return -EBUSY;
413 }
414
415 /* clear all status flags */
416 tmio_sd_writel(priv, 0, TMIO_SD_INFO1);
417 tmio_sd_writel(priv, 0, TMIO_SD_INFO2);
418
419 /* disable DMA once */
420 tmp = tmio_sd_readl(priv, TMIO_SD_EXTMODE);
421 tmp &= ~TMIO_SD_EXTMODE_DMA_EN;
422 tmio_sd_writel(priv, tmp, TMIO_SD_EXTMODE);
423
424 tmio_sd_writel(priv, cmd->cmdarg, TMIO_SD_ARG);
425
426 tmp = cmd->cmdidx;
427
428 if (data) {
429 tmio_sd_writel(priv, data->blocksize, TMIO_SD_SIZE);
430 tmio_sd_writel(priv, data->blocks, TMIO_SD_SECCNT);
431
432 /* Do not send CMD12 automatically */
433 tmp |= TMIO_SD_CMD_NOSTOP | TMIO_SD_CMD_DATA;
434
435 if (data->blocks > 1)
436 tmp |= TMIO_SD_CMD_MULTI;
437
438 if (data->flags & MMC_DATA_READ)
439 tmp |= TMIO_SD_CMD_RD;
440 }
441
442 /*
443 * Do not use the response type auto-detection on this hardware.
444 * CMD8, for example, has different response types on SD and eMMC,
445 * while this controller always assumes the response type for SD.
446 * Set the response type manually.
447 */
448 switch (cmd->resp_type) {
449 case MMC_RSP_NONE:
450 tmp |= TMIO_SD_CMD_RSP_NONE;
451 break;
452 case MMC_RSP_R1:
453 tmp |= TMIO_SD_CMD_RSP_R1;
454 break;
455 case MMC_RSP_R1b:
456 tmp |= TMIO_SD_CMD_RSP_R1B;
457 break;
458 case MMC_RSP_R2:
459 tmp |= TMIO_SD_CMD_RSP_R2;
460 break;
461 case MMC_RSP_R3:
462 tmp |= TMIO_SD_CMD_RSP_R3;
463 break;
464 default:
465 dev_err(dev, "unknown response type\n");
466 return -EINVAL;
467 }
468
469 dev_dbg(dev, "sending CMD%d (SD_CMD=%08x, SD_ARG=%08x)\n",
470 cmd->cmdidx, tmp, cmd->cmdarg);
471 tmio_sd_writel(priv, tmp, TMIO_SD_CMD);
472
Marek Vasutdc86e912018-10-30 22:05:54 +0100473 ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO1,
474 TMIO_SD_INFO1_RSP);
Marek Vasutfd83e762018-04-13 23:51:33 +0200475 if (ret)
476 return ret;
477
478 if (cmd->resp_type & MMC_RSP_136) {
479 u32 rsp_127_104 = tmio_sd_readl(priv, TMIO_SD_RSP76);
480 u32 rsp_103_72 = tmio_sd_readl(priv, TMIO_SD_RSP54);
481 u32 rsp_71_40 = tmio_sd_readl(priv, TMIO_SD_RSP32);
482 u32 rsp_39_8 = tmio_sd_readl(priv, TMIO_SD_RSP10);
483
484 cmd->response[0] = ((rsp_127_104 & 0x00ffffff) << 8) |
485 ((rsp_103_72 & 0xff000000) >> 24);
486 cmd->response[1] = ((rsp_103_72 & 0x00ffffff) << 8) |
487 ((rsp_71_40 & 0xff000000) >> 24);
488 cmd->response[2] = ((rsp_71_40 & 0x00ffffff) << 8) |
489 ((rsp_39_8 & 0xff000000) >> 24);
490 cmd->response[3] = (rsp_39_8 & 0xffffff) << 8;
491 } else {
492 /* bit 39-8 */
493 cmd->response[0] = tmio_sd_readl(priv, TMIO_SD_RSP10);
494 }
495
496 if (data) {
497 /* use DMA if the HW supports it and the buffer is aligned */
498 if (priv->caps & TMIO_SD_CAP_DMA_INTERNAL &&
Hiroyuki Yokoyamaed5aa492020-03-07 17:32:59 +0100499 tmio_sd_addr_is_dmaable(data))
Marek Vasutfd83e762018-04-13 23:51:33 +0200500 ret = tmio_sd_dma_xfer(dev, data);
501 else
Marek Vasutdc86e912018-10-30 22:05:54 +0100502 ret = tmio_sd_pio_xfer(dev, cmd, data);
Marek Vasut6b4a8ba2018-10-30 21:53:29 +0100503 if (ret)
504 return ret;
Marek Vasutfd83e762018-04-13 23:51:33 +0200505
Marek Vasutdc86e912018-10-30 22:05:54 +0100506 ret = tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO1,
507 TMIO_SD_INFO1_CMP);
Marek Vasutfd83e762018-04-13 23:51:33 +0200508 if (ret)
509 return ret;
510 }
511
Marek Vasutdc86e912018-10-30 22:05:54 +0100512 return tmio_sd_wait_for_irq(dev, cmd, TMIO_SD_INFO2,
Marek Vasut6b4a8ba2018-10-30 21:53:29 +0100513 TMIO_SD_INFO2_SCLKDIVEN);
Marek Vasutfd83e762018-04-13 23:51:33 +0200514}
515
516static int tmio_sd_set_bus_width(struct tmio_sd_priv *priv,
517 struct mmc *mmc)
518{
519 u32 val, tmp;
520
521 switch (mmc->bus_width) {
522 case 0:
523 case 1:
524 val = TMIO_SD_OPTION_WIDTH_1;
525 break;
526 case 4:
527 val = TMIO_SD_OPTION_WIDTH_4;
528 break;
529 case 8:
530 val = TMIO_SD_OPTION_WIDTH_8;
531 break;
532 default:
533 return -EINVAL;
534 }
535
536 tmp = tmio_sd_readl(priv, TMIO_SD_OPTION);
537 tmp &= ~TMIO_SD_OPTION_WIDTH_MASK;
538 tmp |= val;
539 tmio_sd_writel(priv, tmp, TMIO_SD_OPTION);
540
541 return 0;
542}
543
544static void tmio_sd_set_ddr_mode(struct tmio_sd_priv *priv,
545 struct mmc *mmc)
546{
547 u32 tmp;
548
549 tmp = tmio_sd_readl(priv, TMIO_SD_IF_MODE);
550 if (mmc->ddr_mode)
551 tmp |= TMIO_SD_IF_MODE_DDR;
552 else
553 tmp &= ~TMIO_SD_IF_MODE_DDR;
554 tmio_sd_writel(priv, tmp, TMIO_SD_IF_MODE);
555}
556
Marek Vasutda90a1b2018-06-13 08:02:55 +0200557static ulong tmio_sd_clk_get_rate(struct tmio_sd_priv *priv)
558{
559 return priv->clk_get_rate(priv);
560}
561
Marek Vasut9763b182018-11-15 22:01:33 +0100562static void tmio_sd_set_clk_rate(struct tmio_sd_priv *priv, struct mmc *mmc)
Marek Vasutfd83e762018-04-13 23:51:33 +0200563{
564 unsigned int divisor;
Marek Vasut9763b182018-11-15 22:01:33 +0100565 u32 tmp, val = 0;
Marek Vasutda90a1b2018-06-13 08:02:55 +0200566 ulong mclk;
Marek Vasutfd83e762018-04-13 23:51:33 +0200567
Marek Vasut9763b182018-11-15 22:01:33 +0100568 if (mmc->clock) {
569 mclk = tmio_sd_clk_get_rate(priv);
Marek Vasutfd83e762018-04-13 23:51:33 +0200570
Marek Vasut9763b182018-11-15 22:01:33 +0100571 divisor = DIV_ROUND_UP(mclk, mmc->clock);
Marek Vasutda90a1b2018-06-13 08:02:55 +0200572
Marek Vasut9763b182018-11-15 22:01:33 +0100573 /* Do not set divider to 0xff in DDR mode */
574 if (mmc->ddr_mode && (divisor == 1))
575 divisor = 2;
Marek Vasutfd83e762018-04-13 23:51:33 +0200576
Marek Vasut9763b182018-11-15 22:01:33 +0100577 if (divisor <= 1)
578 val = (priv->caps & TMIO_SD_CAP_RCAR) ?
579 TMIO_SD_CLKCTL_RCAR_DIV1 : TMIO_SD_CLKCTL_DIV1;
580 else if (divisor <= 2)
581 val = TMIO_SD_CLKCTL_DIV2;
582 else if (divisor <= 4)
583 val = TMIO_SD_CLKCTL_DIV4;
584 else if (divisor <= 8)
585 val = TMIO_SD_CLKCTL_DIV8;
586 else if (divisor <= 16)
587 val = TMIO_SD_CLKCTL_DIV16;
588 else if (divisor <= 32)
589 val = TMIO_SD_CLKCTL_DIV32;
590 else if (divisor <= 64)
591 val = TMIO_SD_CLKCTL_DIV64;
592 else if (divisor <= 128)
593 val = TMIO_SD_CLKCTL_DIV128;
594 else if (divisor <= 256)
595 val = TMIO_SD_CLKCTL_DIV256;
596 else if (divisor <= 512 || !(priv->caps & TMIO_SD_CAP_DIV1024))
597 val = TMIO_SD_CLKCTL_DIV512;
598 else
599 val = TMIO_SD_CLKCTL_DIV1024;
600 }
Marek Vasutfd83e762018-04-13 23:51:33 +0200601
602 tmp = tmio_sd_readl(priv, TMIO_SD_CLKCTL);
Marek Vasut9763b182018-11-15 22:01:33 +0100603 if (mmc->clock &&
604 !((tmp & TMIO_SD_CLKCTL_SCLKEN) &&
605 ((tmp & TMIO_SD_CLKCTL_DIV_MASK) == val))) {
606 /*
607 * Stop the clock before changing its rate
608 * to avoid a glitch signal
609 */
610 tmp &= ~TMIO_SD_CLKCTL_SCLKEN;
611 tmio_sd_writel(priv, tmp, TMIO_SD_CLKCTL);
Marek Vasutfd83e762018-04-13 23:51:33 +0200612
Marek Vasut9763b182018-11-15 22:01:33 +0100613 /* Change the clock rate. */
614 tmp &= ~TMIO_SD_CLKCTL_DIV_MASK;
615 tmp |= val;
616 }
Marek Vasutfd83e762018-04-13 23:51:33 +0200617
Marek Vasut9763b182018-11-15 22:01:33 +0100618 /* Enable or Disable the clock */
619 if (mmc->clk_disable) {
Marek Vasut5abfb132018-06-13 08:02:55 +0200620 tmp |= TMIO_SD_CLKCTL_OFFEN;
621 tmp &= ~TMIO_SD_CLKCTL_SCLKEN;
Marek Vasut9763b182018-11-15 22:01:33 +0100622 } else {
623 tmp &= ~TMIO_SD_CLKCTL_OFFEN;
624 tmp |= TMIO_SD_CLKCTL_SCLKEN;
Marek Vasut5abfb132018-06-13 08:02:55 +0200625 }
Marek Vasut9763b182018-11-15 22:01:33 +0100626
Marek Vasutfd83e762018-04-13 23:51:33 +0200627 tmio_sd_writel(priv, tmp, TMIO_SD_CLKCTL);
628
629 udelay(1000);
630}
631
632static void tmio_sd_set_pins(struct udevice *dev)
633{
634 __maybe_unused struct mmc *mmc = mmc_get_mmc_dev(dev);
Marek Vasutfd83e762018-04-13 23:51:33 +0200635 struct tmio_sd_priv *priv = dev_get_priv(dev);
636
Marek Vasutd4be38e2023-02-28 22:18:13 +0100637 if (CONFIG_IS_ENABLED(DM_REGULATOR) && priv->vqmmc_dev) {
Marek Vasutfd83e762018-04-13 23:51:33 +0200638 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
639 regulator_set_value(priv->vqmmc_dev, 1800000);
640 else
641 regulator_set_value(priv->vqmmc_dev, 3300000);
642 regulator_set_enable(priv->vqmmc_dev, true);
643 }
Marek Vasutfd83e762018-04-13 23:51:33 +0200644
Marek Vasutd4be38e2023-02-28 22:18:13 +0100645 if (CONFIG_IS_ENABLED(PINCTRL)) {
646 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
647 pinctrl_select_state(dev, "state_uhs");
648 else
649 pinctrl_select_state(dev, "default");
650 }
Marek Vasutfd83e762018-04-13 23:51:33 +0200651}
652
653int tmio_sd_set_ios(struct udevice *dev)
654{
655 struct tmio_sd_priv *priv = dev_get_priv(dev);
656 struct mmc *mmc = mmc_get_mmc_dev(dev);
657 int ret;
658
659 dev_dbg(dev, "clock %uHz, DDRmode %d, width %u\n",
660 mmc->clock, mmc->ddr_mode, mmc->bus_width);
661
Marek Vasutbfe04e02018-06-13 08:02:55 +0200662 tmio_sd_set_clk_rate(priv, mmc);
Marek Vasutfd83e762018-04-13 23:51:33 +0200663 ret = tmio_sd_set_bus_width(priv, mmc);
664 if (ret)
665 return ret;
666 tmio_sd_set_ddr_mode(priv, mmc);
Marek Vasutfd83e762018-04-13 23:51:33 +0200667 tmio_sd_set_pins(dev);
668
669 return 0;
670}
671
672int tmio_sd_get_cd(struct udevice *dev)
673{
674 struct tmio_sd_priv *priv = dev_get_priv(dev);
675
676 if (priv->caps & TMIO_SD_CAP_NONREMOVABLE)
677 return 1;
678
679 return !!(tmio_sd_readl(priv, TMIO_SD_INFO1) &
680 TMIO_SD_INFO1_CD);
681}
682
683static void tmio_sd_host_init(struct tmio_sd_priv *priv)
684{
685 u32 tmp;
686
687 /* soft reset of the host */
688 tmp = tmio_sd_readl(priv, TMIO_SD_SOFT_RST);
689 tmp &= ~TMIO_SD_SOFT_RST_RSTX;
690 tmio_sd_writel(priv, tmp, TMIO_SD_SOFT_RST);
691 tmp |= TMIO_SD_SOFT_RST_RSTX;
692 tmio_sd_writel(priv, tmp, TMIO_SD_SOFT_RST);
693
694 /* FIXME: implement eMMC hw_reset */
695
696 tmio_sd_writel(priv, TMIO_SD_STOP_SEC, TMIO_SD_STOP);
697
698 /*
699 * Connected to 32bit AXI.
700 * This register dropped backward compatibility at version 0x10.
701 * Write an appropriate value depending on the IP version.
702 */
Marek Vasut2a43f882019-02-14 15:16:24 +0100703 if (priv->version >= 0x10) {
704 if (priv->caps & TMIO_SD_CAP_64BIT)
Marek Vasut1a4abf52019-02-19 19:20:14 +0100705 tmio_sd_writel(priv, 0x000, TMIO_SD_HOST_MODE);
Marek Vasut2a43f882019-02-14 15:16:24 +0100706 else
707 tmio_sd_writel(priv, 0x101, TMIO_SD_HOST_MODE);
708 } else {
Marek Vasutfd83e762018-04-13 23:51:33 +0200709 tmio_sd_writel(priv, 0x0, TMIO_SD_HOST_MODE);
Marek Vasut2a43f882019-02-14 15:16:24 +0100710 }
Marek Vasutfd83e762018-04-13 23:51:33 +0200711
712 if (priv->caps & TMIO_SD_CAP_DMA_INTERNAL) {
713 tmp = tmio_sd_readl(priv, TMIO_SD_DMA_MODE);
714 tmp |= TMIO_SD_DMA_MODE_ADDR_INC;
Marek Vasutd6e2f872021-01-03 11:38:25 +0100715 tmp |= priv->idma_bus_width;
Marek Vasutfd83e762018-04-13 23:51:33 +0200716 tmio_sd_writel(priv, tmp, TMIO_SD_DMA_MODE);
717 }
718}
719
720int tmio_sd_bind(struct udevice *dev)
721{
Simon Glassfa20e932020-12-03 16:55:20 -0700722 struct tmio_sd_plat *plat = dev_get_plat(dev);
Marek Vasutfd83e762018-04-13 23:51:33 +0200723
724 return mmc_bind(dev, &plat->mmc, &plat->cfg);
725}
726
727int tmio_sd_probe(struct udevice *dev, u32 quirks)
728{
Simon Glassfa20e932020-12-03 16:55:20 -0700729 struct tmio_sd_plat *plat = dev_get_plat(dev);
Marek Vasutfd83e762018-04-13 23:51:33 +0200730 struct tmio_sd_priv *priv = dev_get_priv(dev);
731 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev);
732 fdt_addr_t base;
Marek Vasutda90a1b2018-06-13 08:02:55 +0200733 ulong mclk;
Marek Vasutfd83e762018-04-13 23:51:33 +0200734 int ret;
735
Masahiro Yamadaa89b4de2020-07-17 14:36:48 +0900736 base = dev_read_addr(dev);
Marek Vasutfd83e762018-04-13 23:51:33 +0200737 if (base == FDT_ADDR_T_NONE)
738 return -EINVAL;
739
740 priv->regbase = devm_ioremap(dev, base, SZ_2K);
741 if (!priv->regbase)
742 return -ENOMEM;
743
Marek Vasutd4be38e2023-02-28 22:18:13 +0100744 if (CONFIG_IS_ENABLED(DM_REGULATOR)) {
745 device_get_supply_regulator(dev, "vqmmc-supply",
746 &priv->vqmmc_dev);
747 if (priv->vqmmc_dev)
748 regulator_set_value(priv->vqmmc_dev, 3300000);
749 }
Marek Vasutfd83e762018-04-13 23:51:33 +0200750
Marek Vasutfd83e762018-04-13 23:51:33 +0200751 ret = mmc_of_parse(dev, &plat->cfg);
752 if (ret < 0) {
753 dev_err(dev, "failed to parse host caps\n");
754 return ret;
755 }
756
757 plat->cfg.name = dev->name;
758 plat->cfg.host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
759
760 if (quirks)
761 priv->caps = quirks;
762
763 priv->version = tmio_sd_readl(priv, TMIO_SD_VERSION) &
764 TMIO_SD_VERSION_IP;
765 dev_dbg(dev, "version %x\n", priv->version);
766 if (priv->version >= 0x10) {
767 priv->caps |= TMIO_SD_CAP_DMA_INTERNAL;
Marek Vasutd61f32e2023-10-22 23:40:43 +0200768 if (!(priv->caps & TMIO_SD_CAP_RCAR))
769 priv->caps |= TMIO_SD_CAP_DIV1024;
Marek Vasutfd83e762018-04-13 23:51:33 +0200770 }
771
772 if (fdt_get_property(gd->fdt_blob, dev_of_offset(dev), "non-removable",
773 NULL))
774 priv->caps |= TMIO_SD_CAP_NONREMOVABLE;
775
776 tmio_sd_host_init(priv);
777
Marek Vasutda90a1b2018-06-13 08:02:55 +0200778 mclk = tmio_sd_clk_get_rate(priv);
779
Marek Vasutfd83e762018-04-13 23:51:33 +0200780 plat->cfg.voltages = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34;
Marek Vasutda90a1b2018-06-13 08:02:55 +0200781 plat->cfg.f_min = mclk /
Marek Vasutfd83e762018-04-13 23:51:33 +0200782 (priv->caps & TMIO_SD_CAP_DIV1024 ? 1024 : 512);
Marek Vasutda90a1b2018-06-13 08:02:55 +0200783 plat->cfg.f_max = mclk;
Marek Vasut6d6e6fe2019-03-18 23:43:10 +0100784 if (quirks & TMIO_SD_CAP_16BIT)
785 plat->cfg.b_max = U16_MAX; /* max value of TMIO_SD_SECCNT */
786 else
787 plat->cfg.b_max = U32_MAX; /* max value of TMIO_SD_SECCNT */
Marek Vasutfd83e762018-04-13 23:51:33 +0200788
789 upriv->mmc = &plat->mmc;
790
791 return 0;
792}