blob: 7e05b1f3d1c6c198ca27adbfa7057e269294e09c [file] [log] [blame]
Marek Vasut06485cf2018-04-08 15:22:58 +02001/*
2 * Copyright (C) 2016 Socionext Inc.
3 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0+
6 */
7
8#include <common.h>
9#include <clk.h>
10#include <fdtdec.h>
11#include <mmc.h>
12#include <dm.h>
13#include <linux/compat.h>
14#include <linux/dma-direction.h>
15#include <linux/io.h>
16#include <linux/sizes.h>
17#include <power/regulator.h>
18#include <asm/unaligned.h>
19
20#include "matsushita-common.h"
21
22DECLARE_GLOBAL_DATA_PTR;
23
24static u64 matsu_sd_readq(struct matsu_sd_priv *priv, unsigned int reg)
25{
Marek Vasutae5c89e2018-04-08 17:25:49 +020026 return readq(priv->regbase + (reg << 1));
Marek Vasut06485cf2018-04-08 15:22:58 +020027}
28
29static void matsu_sd_writeq(struct matsu_sd_priv *priv,
30 u64 val, unsigned int reg)
31{
Marek Vasutae5c89e2018-04-08 17:25:49 +020032 writeq(val, priv->regbase + (reg << 1));
Marek Vasut06485cf2018-04-08 15:22:58 +020033}
34
Marek Vasut2cddcc12018-04-08 17:41:14 +020035static u16 matsu_sd_readw(struct matsu_sd_priv *priv, unsigned int reg)
36{
37 return readw(priv->regbase + (reg >> 1));
38}
39
40static void matsu_sd_writew(struct matsu_sd_priv *priv,
41 u16 val, unsigned int reg)
42{
43 writew(val, priv->regbase + (reg >> 1));
44}
45
Marek Vasut06485cf2018-04-08 15:22:58 +020046static u32 matsu_sd_readl(struct matsu_sd_priv *priv, unsigned int reg)
47{
Marek Vasut2cddcc12018-04-08 17:41:14 +020048 u32 val;
49
Marek Vasut06485cf2018-04-08 15:22:58 +020050 if (priv->caps & MATSU_SD_CAP_64BIT)
51 return readl(priv->regbase + (reg << 1));
Marek Vasut2cddcc12018-04-08 17:41:14 +020052 else if (priv->caps & MATSU_SD_CAP_16BIT) {
53 val = readw(priv->regbase + (reg >> 1)) & 0xffff;
54 if ((reg == MATSU_SD_RSP10) || (reg == MATSU_SD_RSP32) ||
55 (reg == MATSU_SD_RSP54) || (reg == MATSU_SD_RSP76)) {
56 val |= readw(priv->regbase + (reg >> 1) + 2) << 16;
57 }
58 return val;
59 } else
Marek Vasut06485cf2018-04-08 15:22:58 +020060 return readl(priv->regbase + reg);
61}
62
63static void matsu_sd_writel(struct matsu_sd_priv *priv,
64 u32 val, unsigned int reg)
65{
66 if (priv->caps & MATSU_SD_CAP_64BIT)
67 writel(val, priv->regbase + (reg << 1));
Marek Vasut2cddcc12018-04-08 17:41:14 +020068 if (priv->caps & MATSU_SD_CAP_16BIT) {
69 writew(val & 0xffff, priv->regbase + (reg >> 1));
70 if (val >> 16)
71 writew(val >> 16, priv->regbase + (reg >> 1) + 2);
72 } else
Marek Vasut06485cf2018-04-08 15:22:58 +020073 writel(val, priv->regbase + reg);
74}
75
76static dma_addr_t __dma_map_single(void *ptr, size_t size,
77 enum dma_data_direction dir)
78{
79 unsigned long addr = (unsigned long)ptr;
80
81 if (dir == DMA_FROM_DEVICE)
82 invalidate_dcache_range(addr, addr + size);
83 else
84 flush_dcache_range(addr, addr + size);
85
86 return addr;
87}
88
89static void __dma_unmap_single(dma_addr_t addr, size_t size,
90 enum dma_data_direction dir)
91{
92 if (dir != DMA_TO_DEVICE)
93 invalidate_dcache_range(addr, addr + size);
94}
95
96static int matsu_sd_check_error(struct udevice *dev)
97{
98 struct matsu_sd_priv *priv = dev_get_priv(dev);
99 u32 info2 = matsu_sd_readl(priv, MATSU_SD_INFO2);
100
101 if (info2 & MATSU_SD_INFO2_ERR_RTO) {
102 /*
103 * TIMEOUT must be returned for unsupported command. Do not
104 * display error log since this might be a part of sequence to
105 * distinguish between SD and MMC.
106 */
107 return -ETIMEDOUT;
108 }
109
110 if (info2 & MATSU_SD_INFO2_ERR_TO) {
111 dev_err(dev, "timeout error\n");
112 return -ETIMEDOUT;
113 }
114
115 if (info2 & (MATSU_SD_INFO2_ERR_END | MATSU_SD_INFO2_ERR_CRC |
116 MATSU_SD_INFO2_ERR_IDX)) {
117 dev_err(dev, "communication out of sync\n");
118 return -EILSEQ;
119 }
120
121 if (info2 & (MATSU_SD_INFO2_ERR_ILA | MATSU_SD_INFO2_ERR_ILR |
122 MATSU_SD_INFO2_ERR_ILW)) {
123 dev_err(dev, "illegal access\n");
124 return -EIO;
125 }
126
127 return 0;
128}
129
130static int matsu_sd_wait_for_irq(struct udevice *dev, unsigned int reg,
131 u32 flag)
132{
133 struct matsu_sd_priv *priv = dev_get_priv(dev);
134 long wait = 1000000;
135 int ret;
136
137 while (!(matsu_sd_readl(priv, reg) & flag)) {
138 if (wait-- < 0) {
139 dev_err(dev, "timeout\n");
140 return -ETIMEDOUT;
141 }
142
143 ret = matsu_sd_check_error(dev);
144 if (ret)
145 return ret;
146
147 udelay(1);
148 }
149
150 return 0;
151}
152
Marek Vasut215ae0e2018-04-08 17:14:42 +0200153#define matsu_pio_read_fifo(__width, __suffix) \
154static void matsu_pio_read_fifo_##__width(struct matsu_sd_priv *priv, \
155 char *pbuf, uint blksz) \
156{ \
157 u##__width *buf = (u##__width *)pbuf; \
158 int i; \
159 \
160 if (likely(IS_ALIGNED((uintptr_t)buf, ((__width) / 8)))) { \
161 for (i = 0; i < blksz / ((__width) / 8); i++) { \
162 *buf++ = matsu_sd_read##__suffix(priv, \
163 MATSU_SD_BUF); \
164 } \
165 } else { \
166 for (i = 0; i < blksz / ((__width) / 8); i++) { \
167 u##__width data; \
168 data = matsu_sd_read##__suffix(priv, \
169 MATSU_SD_BUF); \
170 put_unaligned(data, buf++); \
171 } \
172 } \
173}
174
175matsu_pio_read_fifo(64, q)
176matsu_pio_read_fifo(32, l)
Marek Vasut2cddcc12018-04-08 17:41:14 +0200177matsu_pio_read_fifo(16, w)
Marek Vasut215ae0e2018-04-08 17:14:42 +0200178
Marek Vasut06485cf2018-04-08 15:22:58 +0200179static int matsu_sd_pio_read_one_block(struct udevice *dev, char *pbuf,
180 uint blocksize)
181{
182 struct matsu_sd_priv *priv = dev_get_priv(dev);
Marek Vasut215ae0e2018-04-08 17:14:42 +0200183 int ret;
Marek Vasut06485cf2018-04-08 15:22:58 +0200184
185 /* wait until the buffer is filled with data */
186 ret = matsu_sd_wait_for_irq(dev, MATSU_SD_INFO2,
187 MATSU_SD_INFO2_BRE);
188 if (ret)
189 return ret;
190
191 /*
192 * Clear the status flag _before_ read the buffer out because
193 * MATSU_SD_INFO2_BRE is edge-triggered, not level-triggered.
194 */
195 matsu_sd_writel(priv, 0, MATSU_SD_INFO2);
196
Marek Vasut215ae0e2018-04-08 17:14:42 +0200197 if (priv->caps & MATSU_SD_CAP_64BIT)
198 matsu_pio_read_fifo_64(priv, pbuf, blocksize);
Marek Vasut2cddcc12018-04-08 17:41:14 +0200199 else if (priv->caps & MATSU_SD_CAP_16BIT)
200 matsu_pio_read_fifo_16(priv, pbuf, blocksize);
Marek Vasut215ae0e2018-04-08 17:14:42 +0200201 else
202 matsu_pio_read_fifo_32(priv, pbuf, blocksize);
Marek Vasut06485cf2018-04-08 15:22:58 +0200203
204 return 0;
205}
206
Marek Vasut215ae0e2018-04-08 17:14:42 +0200207#define matsu_pio_write_fifo(__width, __suffix) \
208static void matsu_pio_write_fifo_##__width(struct matsu_sd_priv *priv, \
209 const char *pbuf, uint blksz)\
210{ \
211 const u##__width *buf = (const u##__width *)pbuf; \
212 int i; \
213 \
214 if (likely(IS_ALIGNED((uintptr_t)buf, ((__width) / 8)))) { \
215 for (i = 0; i < blksz / ((__width) / 8); i++) { \
216 matsu_sd_write##__suffix(priv, *buf++, \
217 MATSU_SD_BUF); \
218 } \
219 } else { \
220 for (i = 0; i < blksz / ((__width) / 8); i++) { \
221 u##__width data = get_unaligned(buf++); \
222 matsu_sd_write##__suffix(priv, data, \
223 MATSU_SD_BUF); \
224 } \
225 } \
226}
227
228matsu_pio_write_fifo(64, q)
229matsu_pio_write_fifo(32, l)
Marek Vasut2cddcc12018-04-08 17:41:14 +0200230matsu_pio_write_fifo(16, w)
Marek Vasut215ae0e2018-04-08 17:14:42 +0200231
Marek Vasut06485cf2018-04-08 15:22:58 +0200232static int matsu_sd_pio_write_one_block(struct udevice *dev,
233 const char *pbuf, uint blocksize)
234{
235 struct matsu_sd_priv *priv = dev_get_priv(dev);
Marek Vasut215ae0e2018-04-08 17:14:42 +0200236 int ret;
Marek Vasut06485cf2018-04-08 15:22:58 +0200237
238 /* wait until the buffer becomes empty */
239 ret = matsu_sd_wait_for_irq(dev, MATSU_SD_INFO2,
Marek Vasut215ae0e2018-04-08 17:14:42 +0200240 MATSU_SD_INFO2_BWE);
Marek Vasut06485cf2018-04-08 15:22:58 +0200241 if (ret)
242 return ret;
243
244 matsu_sd_writel(priv, 0, MATSU_SD_INFO2);
245
Marek Vasut215ae0e2018-04-08 17:14:42 +0200246 if (priv->caps & MATSU_SD_CAP_64BIT)
247 matsu_pio_write_fifo_64(priv, pbuf, blocksize);
Marek Vasut2cddcc12018-04-08 17:41:14 +0200248 else if (priv->caps & MATSU_SD_CAP_16BIT)
249 matsu_pio_write_fifo_16(priv, pbuf, blocksize);
Marek Vasut215ae0e2018-04-08 17:14:42 +0200250 else
251 matsu_pio_write_fifo_32(priv, pbuf, blocksize);
Marek Vasut06485cf2018-04-08 15:22:58 +0200252
253 return 0;
254}
255
256static int matsu_sd_pio_xfer(struct udevice *dev, struct mmc_data *data)
257{
258 const char *src = data->src;
259 char *dest = data->dest;
260 int i, ret;
261
262 for (i = 0; i < data->blocks; i++) {
263 if (data->flags & MMC_DATA_READ)
264 ret = matsu_sd_pio_read_one_block(dev, dest,
265 data->blocksize);
266 else
267 ret = matsu_sd_pio_write_one_block(dev, src,
268 data->blocksize);
269 if (ret)
270 return ret;
271
272 if (data->flags & MMC_DATA_READ)
273 dest += data->blocksize;
274 else
275 src += data->blocksize;
276 }
277
278 return 0;
279}
280
281static void matsu_sd_dma_start(struct matsu_sd_priv *priv,
282 dma_addr_t dma_addr)
283{
284 u32 tmp;
285
286 matsu_sd_writel(priv, 0, MATSU_SD_DMA_INFO1);
287 matsu_sd_writel(priv, 0, MATSU_SD_DMA_INFO2);
288
289 /* enable DMA */
290 tmp = matsu_sd_readl(priv, MATSU_SD_EXTMODE);
291 tmp |= MATSU_SD_EXTMODE_DMA_EN;
292 matsu_sd_writel(priv, tmp, MATSU_SD_EXTMODE);
293
294 matsu_sd_writel(priv, dma_addr & U32_MAX, MATSU_SD_DMA_ADDR_L);
295
296 /* suppress the warning "right shift count >= width of type" */
297 dma_addr >>= min_t(int, 32, 8 * sizeof(dma_addr));
298
299 matsu_sd_writel(priv, dma_addr & U32_MAX, MATSU_SD_DMA_ADDR_H);
300
301 matsu_sd_writel(priv, MATSU_SD_DMA_CTL_START, MATSU_SD_DMA_CTL);
302}
303
304static int matsu_sd_dma_wait_for_irq(struct udevice *dev, u32 flag,
305 unsigned int blocks)
306{
307 struct matsu_sd_priv *priv = dev_get_priv(dev);
308 long wait = 1000000 + 10 * blocks;
309
310 while (!(matsu_sd_readl(priv, MATSU_SD_DMA_INFO1) & flag)) {
311 if (wait-- < 0) {
312 dev_err(dev, "timeout during DMA\n");
313 return -ETIMEDOUT;
314 }
315
316 udelay(10);
317 }
318
319 if (matsu_sd_readl(priv, MATSU_SD_DMA_INFO2)) {
320 dev_err(dev, "error during DMA\n");
321 return -EIO;
322 }
323
324 return 0;
325}
326
327static int matsu_sd_dma_xfer(struct udevice *dev, struct mmc_data *data)
328{
329 struct matsu_sd_priv *priv = dev_get_priv(dev);
330 size_t len = data->blocks * data->blocksize;
331 void *buf;
332 enum dma_data_direction dir;
333 dma_addr_t dma_addr;
334 u32 poll_flag, tmp;
335 int ret;
336
337 tmp = matsu_sd_readl(priv, MATSU_SD_DMA_MODE);
338
339 if (data->flags & MMC_DATA_READ) {
340 buf = data->dest;
341 dir = DMA_FROM_DEVICE;
342 poll_flag = MATSU_SD_DMA_INFO1_END_RD2;
343 tmp |= MATSU_SD_DMA_MODE_DIR_RD;
344 } else {
345 buf = (void *)data->src;
346 dir = DMA_TO_DEVICE;
347 poll_flag = MATSU_SD_DMA_INFO1_END_WR;
348 tmp &= ~MATSU_SD_DMA_MODE_DIR_RD;
349 }
350
351 matsu_sd_writel(priv, tmp, MATSU_SD_DMA_MODE);
352
353 dma_addr = __dma_map_single(buf, len, dir);
354
355 matsu_sd_dma_start(priv, dma_addr);
356
357 ret = matsu_sd_dma_wait_for_irq(dev, poll_flag, data->blocks);
358
359 __dma_unmap_single(dma_addr, len, dir);
360
361 return ret;
362}
363
364/* check if the address is DMA'able */
365static bool matsu_sd_addr_is_dmaable(unsigned long addr)
366{
367 if (!IS_ALIGNED(addr, MATSU_SD_DMA_MINALIGN))
368 return false;
369
370#if defined(CONFIG_ARCH_UNIPHIER) && !defined(CONFIG_ARM64) && \
371 defined(CONFIG_SPL_BUILD)
372 /*
373 * For UniPhier ARMv7 SoCs, the stack is allocated in the locked ways
374 * of L2, which is unreachable from the DMA engine.
375 */
376 if (addr < CONFIG_SPL_STACK)
377 return false;
378#endif
379
380 return true;
381}
382
383int matsu_sd_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
384 struct mmc_data *data)
385{
386 struct matsu_sd_priv *priv = dev_get_priv(dev);
387 int ret;
388 u32 tmp;
389
390 if (matsu_sd_readl(priv, MATSU_SD_INFO2) & MATSU_SD_INFO2_CBSY) {
391 dev_err(dev, "command busy\n");
392 return -EBUSY;
393 }
394
395 /* clear all status flags */
396 matsu_sd_writel(priv, 0, MATSU_SD_INFO1);
397 matsu_sd_writel(priv, 0, MATSU_SD_INFO2);
398
399 /* disable DMA once */
400 tmp = matsu_sd_readl(priv, MATSU_SD_EXTMODE);
401 tmp &= ~MATSU_SD_EXTMODE_DMA_EN;
402 matsu_sd_writel(priv, tmp, MATSU_SD_EXTMODE);
403
404 matsu_sd_writel(priv, cmd->cmdarg, MATSU_SD_ARG);
405
406 tmp = cmd->cmdidx;
407
408 if (data) {
409 matsu_sd_writel(priv, data->blocksize, MATSU_SD_SIZE);
410 matsu_sd_writel(priv, data->blocks, MATSU_SD_SECCNT);
411
412 /* Do not send CMD12 automatically */
413 tmp |= MATSU_SD_CMD_NOSTOP | MATSU_SD_CMD_DATA;
414
415 if (data->blocks > 1)
416 tmp |= MATSU_SD_CMD_MULTI;
417
418 if (data->flags & MMC_DATA_READ)
419 tmp |= MATSU_SD_CMD_RD;
420 }
421
422 /*
423 * Do not use the response type auto-detection on this hardware.
424 * CMD8, for example, has different response types on SD and eMMC,
425 * while this controller always assumes the response type for SD.
426 * Set the response type manually.
427 */
428 switch (cmd->resp_type) {
429 case MMC_RSP_NONE:
430 tmp |= MATSU_SD_CMD_RSP_NONE;
431 break;
432 case MMC_RSP_R1:
433 tmp |= MATSU_SD_CMD_RSP_R1;
434 break;
435 case MMC_RSP_R1b:
436 tmp |= MATSU_SD_CMD_RSP_R1B;
437 break;
438 case MMC_RSP_R2:
439 tmp |= MATSU_SD_CMD_RSP_R2;
440 break;
441 case MMC_RSP_R3:
442 tmp |= MATSU_SD_CMD_RSP_R3;
443 break;
444 default:
445 dev_err(dev, "unknown response type\n");
446 return -EINVAL;
447 }
448
449 dev_dbg(dev, "sending CMD%d (SD_CMD=%08x, SD_ARG=%08x)\n",
450 cmd->cmdidx, tmp, cmd->cmdarg);
451 matsu_sd_writel(priv, tmp, MATSU_SD_CMD);
452
453 ret = matsu_sd_wait_for_irq(dev, MATSU_SD_INFO1,
454 MATSU_SD_INFO1_RSP);
455 if (ret)
456 return ret;
457
458 if (cmd->resp_type & MMC_RSP_136) {
459 u32 rsp_127_104 = matsu_sd_readl(priv, MATSU_SD_RSP76);
460 u32 rsp_103_72 = matsu_sd_readl(priv, MATSU_SD_RSP54);
461 u32 rsp_71_40 = matsu_sd_readl(priv, MATSU_SD_RSP32);
462 u32 rsp_39_8 = matsu_sd_readl(priv, MATSU_SD_RSP10);
463
464 cmd->response[0] = ((rsp_127_104 & 0x00ffffff) << 8) |
465 ((rsp_103_72 & 0xff000000) >> 24);
466 cmd->response[1] = ((rsp_103_72 & 0x00ffffff) << 8) |
467 ((rsp_71_40 & 0xff000000) >> 24);
468 cmd->response[2] = ((rsp_71_40 & 0x00ffffff) << 8) |
469 ((rsp_39_8 & 0xff000000) >> 24);
470 cmd->response[3] = (rsp_39_8 & 0xffffff) << 8;
471 } else {
472 /* bit 39-8 */
473 cmd->response[0] = matsu_sd_readl(priv, MATSU_SD_RSP10);
474 }
475
476 if (data) {
477 /* use DMA if the HW supports it and the buffer is aligned */
478 if (priv->caps & MATSU_SD_CAP_DMA_INTERNAL &&
479 matsu_sd_addr_is_dmaable((long)data->src))
480 ret = matsu_sd_dma_xfer(dev, data);
481 else
482 ret = matsu_sd_pio_xfer(dev, data);
483
484 ret = matsu_sd_wait_for_irq(dev, MATSU_SD_INFO1,
485 MATSU_SD_INFO1_CMP);
486 if (ret)
487 return ret;
488 }
489
490 return ret;
491}
492
493static int matsu_sd_set_bus_width(struct matsu_sd_priv *priv,
494 struct mmc *mmc)
495{
496 u32 val, tmp;
497
498 switch (mmc->bus_width) {
499 case 1:
500 val = MATSU_SD_OPTION_WIDTH_1;
501 break;
502 case 4:
503 val = MATSU_SD_OPTION_WIDTH_4;
504 break;
505 case 8:
506 val = MATSU_SD_OPTION_WIDTH_8;
507 break;
508 default:
509 return -EINVAL;
510 }
511
512 tmp = matsu_sd_readl(priv, MATSU_SD_OPTION);
513 tmp &= ~MATSU_SD_OPTION_WIDTH_MASK;
514 tmp |= val;
515 matsu_sd_writel(priv, tmp, MATSU_SD_OPTION);
516
517 return 0;
518}
519
520static void matsu_sd_set_ddr_mode(struct matsu_sd_priv *priv,
521 struct mmc *mmc)
522{
523 u32 tmp;
524
525 tmp = matsu_sd_readl(priv, MATSU_SD_IF_MODE);
526 if (mmc->ddr_mode)
527 tmp |= MATSU_SD_IF_MODE_DDR;
528 else
529 tmp &= ~MATSU_SD_IF_MODE_DDR;
530 matsu_sd_writel(priv, tmp, MATSU_SD_IF_MODE);
531}
532
533static void matsu_sd_set_clk_rate(struct matsu_sd_priv *priv,
534 struct mmc *mmc)
535{
536 unsigned int divisor;
537 u32 val, tmp;
538
539 if (!mmc->clock)
540 return;
541
542 divisor = DIV_ROUND_UP(priv->mclk, mmc->clock);
543
544 if (divisor <= 1)
Marek Vasutdcfb7382017-09-26 20:34:35 +0200545 val = (priv->caps & MATSU_SD_CAP_RCAR) ?
546 MATSU_SD_CLKCTL_RCAR_DIV1 : MATSU_SD_CLKCTL_DIV1;
Marek Vasut06485cf2018-04-08 15:22:58 +0200547 else if (divisor <= 2)
548 val = MATSU_SD_CLKCTL_DIV2;
549 else if (divisor <= 4)
550 val = MATSU_SD_CLKCTL_DIV4;
551 else if (divisor <= 8)
552 val = MATSU_SD_CLKCTL_DIV8;
553 else if (divisor <= 16)
554 val = MATSU_SD_CLKCTL_DIV16;
555 else if (divisor <= 32)
556 val = MATSU_SD_CLKCTL_DIV32;
557 else if (divisor <= 64)
558 val = MATSU_SD_CLKCTL_DIV64;
559 else if (divisor <= 128)
560 val = MATSU_SD_CLKCTL_DIV128;
561 else if (divisor <= 256)
562 val = MATSU_SD_CLKCTL_DIV256;
563 else if (divisor <= 512 || !(priv->caps & MATSU_SD_CAP_DIV1024))
564 val = MATSU_SD_CLKCTL_DIV512;
565 else
566 val = MATSU_SD_CLKCTL_DIV1024;
567
568 tmp = matsu_sd_readl(priv, MATSU_SD_CLKCTL);
569 if (tmp & MATSU_SD_CLKCTL_SCLKEN &&
570 (tmp & MATSU_SD_CLKCTL_DIV_MASK) == val)
571 return;
572
573 /* stop the clock before changing its rate to avoid a glitch signal */
574 tmp &= ~MATSU_SD_CLKCTL_SCLKEN;
575 matsu_sd_writel(priv, tmp, MATSU_SD_CLKCTL);
576
577 tmp &= ~MATSU_SD_CLKCTL_DIV_MASK;
578 tmp |= val | MATSU_SD_CLKCTL_OFFEN;
579 matsu_sd_writel(priv, tmp, MATSU_SD_CLKCTL);
580
581 tmp |= MATSU_SD_CLKCTL_SCLKEN;
582 matsu_sd_writel(priv, tmp, MATSU_SD_CLKCTL);
583
584 udelay(1000);
585}
586
587int matsu_sd_set_ios(struct udevice *dev)
588{
589 struct matsu_sd_priv *priv = dev_get_priv(dev);
590 struct mmc *mmc = mmc_get_mmc_dev(dev);
591 int ret;
592
593 dev_dbg(dev, "clock %uHz, DDRmode %d, width %u\n",
594 mmc->clock, mmc->ddr_mode, mmc->bus_width);
595
596 ret = matsu_sd_set_bus_width(priv, mmc);
597 if (ret)
598 return ret;
599 matsu_sd_set_ddr_mode(priv, mmc);
600 matsu_sd_set_clk_rate(priv, mmc);
601
602 return 0;
603}
604
605int matsu_sd_get_cd(struct udevice *dev)
606{
607 struct matsu_sd_priv *priv = dev_get_priv(dev);
608
609 if (priv->caps & MATSU_SD_CAP_NONREMOVABLE)
610 return 1;
611
612 return !!(matsu_sd_readl(priv, MATSU_SD_INFO1) &
613 MATSU_SD_INFO1_CD);
614}
615
616static void matsu_sd_host_init(struct matsu_sd_priv *priv)
617{
618 u32 tmp;
619
620 /* soft reset of the host */
621 tmp = matsu_sd_readl(priv, MATSU_SD_SOFT_RST);
622 tmp &= ~MATSU_SD_SOFT_RST_RSTX;
623 matsu_sd_writel(priv, tmp, MATSU_SD_SOFT_RST);
624 tmp |= MATSU_SD_SOFT_RST_RSTX;
625 matsu_sd_writel(priv, tmp, MATSU_SD_SOFT_RST);
626
627 /* FIXME: implement eMMC hw_reset */
628
629 matsu_sd_writel(priv, MATSU_SD_STOP_SEC, MATSU_SD_STOP);
630
631 /*
632 * Connected to 32bit AXI.
633 * This register dropped backward compatibility at version 0x10.
634 * Write an appropriate value depending on the IP version.
635 */
Marek Vasut2cddcc12018-04-08 17:41:14 +0200636 if (priv->version >= 0x10)
637 matsu_sd_writel(priv, 0x101, MATSU_SD_HOST_MODE);
638 else if (priv->caps & MATSU_SD_CAP_16BIT)
639 matsu_sd_writel(priv, 0x1, MATSU_SD_HOST_MODE);
640 else
641 matsu_sd_writel(priv, 0x0, MATSU_SD_HOST_MODE);
Marek Vasut06485cf2018-04-08 15:22:58 +0200642
643 if (priv->caps & MATSU_SD_CAP_DMA_INTERNAL) {
644 tmp = matsu_sd_readl(priv, MATSU_SD_DMA_MODE);
645 tmp |= MATSU_SD_DMA_MODE_ADDR_INC;
646 matsu_sd_writel(priv, tmp, MATSU_SD_DMA_MODE);
647 }
648}
649
650int matsu_sd_bind(struct udevice *dev)
651{
652 struct matsu_sd_plat *plat = dev_get_platdata(dev);
653
654 return mmc_bind(dev, &plat->mmc, &plat->cfg);
655}
656
Marek Vasutabe3e952018-04-08 17:45:23 +0200657int matsu_sd_probe(struct udevice *dev, u32 quirks)
Marek Vasut06485cf2018-04-08 15:22:58 +0200658{
659 struct matsu_sd_plat *plat = dev_get_platdata(dev);
660 struct matsu_sd_priv *priv = dev_get_priv(dev);
661 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(dev);
Marek Vasut06485cf2018-04-08 15:22:58 +0200662 fdt_addr_t base;
663 struct clk clk;
664 int ret;
665#ifdef CONFIG_DM_REGULATOR
666 struct udevice *vqmmc_dev;
667#endif
668
669 base = devfdt_get_addr(dev);
670 if (base == FDT_ADDR_T_NONE)
671 return -EINVAL;
672
673 priv->regbase = devm_ioremap(dev, base, SZ_2K);
674 if (!priv->regbase)
675 return -ENOMEM;
676
677#ifdef CONFIG_DM_REGULATOR
678 ret = device_get_supply_regulator(dev, "vqmmc-supply", &vqmmc_dev);
679 if (!ret) {
680 /* Set the regulator to 3.3V until we support 1.8V modes */
681 regulator_set_value(vqmmc_dev, 3300000);
682 regulator_set_enable(vqmmc_dev, true);
683 }
684#endif
685
686 ret = clk_get_by_index(dev, 0, &clk);
687 if (ret < 0) {
688 dev_err(dev, "failed to get host clock\n");
689 return ret;
690 }
691
692 /* set to max rate */
693 priv->mclk = clk_set_rate(&clk, ULONG_MAX);
694 if (IS_ERR_VALUE(priv->mclk)) {
695 dev_err(dev, "failed to set rate for host clock\n");
696 clk_free(&clk);
697 return priv->mclk;
698 }
699
700 ret = clk_enable(&clk);
701 clk_free(&clk);
702 if (ret) {
703 dev_err(dev, "failed to enable host clock\n");
704 return ret;
705 }
706
Marek Vasut11ce71e2017-09-23 13:22:14 +0200707 ret = mmc_of_parse(dev, &plat->cfg);
708 if (ret < 0) {
709 dev_err(dev, "failed to parse host caps\n");
710 return ret;
Marek Vasut06485cf2018-04-08 15:22:58 +0200711 }
712
Marek Vasut11ce71e2017-09-23 13:22:14 +0200713 plat->cfg.name = dev->name;
714 plat->cfg.host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;
715
Marek Vasut06485cf2018-04-08 15:22:58 +0200716 if (quirks) {
717 priv->caps = quirks;
718 } else {
719 priv->version = matsu_sd_readl(priv, MATSU_SD_VERSION) &
720 MATSU_SD_VERSION_IP;
721 dev_dbg(dev, "version %x\n", priv->version);
722 if (priv->version >= 0x10) {
723 priv->caps |= MATSU_SD_CAP_DMA_INTERNAL;
724 priv->caps |= MATSU_SD_CAP_DIV1024;
725 }
726 }
727
728 if (fdt_get_property(gd->fdt_blob, dev_of_offset(dev), "non-removable",
729 NULL))
730 priv->caps |= MATSU_SD_CAP_NONREMOVABLE;
731
732 matsu_sd_host_init(priv);
733
734 plat->cfg.voltages = MMC_VDD_165_195 | MMC_VDD_32_33 | MMC_VDD_33_34;
735 plat->cfg.f_min = priv->mclk /
736 (priv->caps & MATSU_SD_CAP_DIV1024 ? 1024 : 512);
737 plat->cfg.f_max = priv->mclk;
738 plat->cfg.b_max = U32_MAX; /* max value of MATSU_SD_SECCNT */
739
740 upriv->mmc = &plat->mmc;
741
742 return 0;
743}