blob: 47dcc37c2c7cb8c7c8c92c221a86ca847b55cf49 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jaehoon Chung7cf73072012-10-15 19:10:29 +00002/*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
Jaehoon Chung7cf73072012-10-15 19:10:29 +00006 */
7
Alexey Brodkin55bab5e2013-12-26 15:29:07 +04008#include <bouncebuf.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glass4c9b9482015-08-06 20:16:27 -060010#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060011#include <log.h>
Jaehoon Chung7cf73072012-10-15 19:10:29 +000012#include <malloc.h>
Simon Glass2dd337a2015-09-02 17:24:58 -060013#include <memalign.h>
Jaehoon Chung7cf73072012-10-15 19:10:29 +000014#include <mmc.h>
15#include <dwmmc.h>
Ley Foon Tanb98e8922018-12-20 17:55:41 +080016#include <wait_bit.h>
Simon Glass274e0b02020-05-10 11:39:56 -060017#include <asm/cache.h>
Simon Glassdbd79542020-05-10 11:40:11 -060018#include <linux/delay.h>
Urja Rannikko9932a012019-05-13 13:25:27 +000019#include <power/regulator.h>
Jaehoon Chung7cf73072012-10-15 19:10:29 +000020
21#define PAGE_SIZE 4096
22
Sam Protsenko6384a1f2024-08-07 22:14:15 -050023/* Internal DMA Controller (IDMAC) descriptor for 32-bit addressing mode */
24struct dwmci_idmac32 {
25 u32 des0; /* Control descriptor */
26 u32 des1; /* Buffer size */
27 u32 des2; /* Buffer physical address */
28 u32 des3; /* Next descriptor physical address */
Sam Protsenko2543c322024-08-07 22:14:08 -050029} __aligned(ARCH_DMA_MINALIGN);
30
Sam Protsenko7c991612024-08-07 22:14:16 -050031/* Internal DMA Controller (IDMAC) descriptor for 64-bit addressing mode */
32struct dwmci_idmac64 {
33 u32 des0; /* Control descriptor */
34 u32 des1; /* Reserved */
35 u32 des2; /* Buffer sizes */
36 u32 des3; /* Reserved */
37 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1 */
38 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1 */
39 u32 des6; /* Lower 32-bits of Next Descriptor Address */
40 u32 des7; /* Upper 32-bits of Next Descriptor Address */
41} __aligned(ARCH_DMA_MINALIGN);
42
43/* Register offsets for DW MMC blocks with 32-bit IDMAC */
44static const struct dwmci_idmac_regs dwmci_idmac_regs32 = {
45 .dbaddrl = DWMCI_DBADDR,
46 .idsts = DWMCI_IDSTS,
47 .idinten = DWMCI_IDINTEN,
48 .dscaddrl = DWMCI_DSCADDR,
49 .bufaddrl = DWMCI_BUFADDR,
50};
51
52/* Register offsets for DW MMC blocks with 64-bit IDMAC */
53static const struct dwmci_idmac_regs dwmci_idmac_regs64 = {
54 .dbaddrl = DWMCI_DBADDRL,
55 .dbaddru = DWMCI_DBADDRU,
56 .idsts = DWMCI_IDSTS64,
57 .idinten = DWMCI_IDINTEN64,
58 .dscaddrl = DWMCI_DSCADDRL,
59 .dscaddru = DWMCI_DSCADDRU,
60 .bufaddrl = DWMCI_BUFADDRL,
61 .bufaddru = DWMCI_BUFADDRU,
62};
63
Jaehoon Chung7cf73072012-10-15 19:10:29 +000064static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
65{
66 unsigned long timeout = 1000;
67 u32 ctrl;
68
69 dwmci_writel(host, DWMCI_CTRL, value);
70
71 while (timeout--) {
72 ctrl = dwmci_readl(host, DWMCI_CTRL);
73 if (!(ctrl & DWMCI_RESET_ALL))
74 return 1;
75 }
76 return 0;
77}
78
Sam Protsenko6384a1f2024-08-07 22:14:15 -050079static void dwmci_set_idma_desc32(struct dwmci_idmac32 *desc, u32 control,
80 u32 buf_size, u32 buf_addr)
Jaehoon Chung7cf73072012-10-15 19:10:29 +000081{
Sam Protsenko6384a1f2024-08-07 22:14:15 -050082 phys_addr_t desc_phys = virt_to_phys(desc);
83 u32 next_desc_phys = desc_phys + sizeof(struct dwmci_idmac32);
Jaehoon Chung7cf73072012-10-15 19:10:29 +000084
Sam Protsenko6384a1f2024-08-07 22:14:15 -050085 desc->des0 = control;
86 desc->des1 = buf_size;
87 desc->des2 = buf_addr;
88 desc->des3 = next_desc_phys;
Jaehoon Chung7cf73072012-10-15 19:10:29 +000089}
90
Sam Protsenko7c991612024-08-07 22:14:16 -050091static void dwmci_set_idma_desc64(struct dwmci_idmac64 *desc, u32 control,
92 u32 buf_size, u64 buf_addr)
93{
94 phys_addr_t desc_phys = virt_to_phys(desc);
95 u64 next_desc_phys = desc_phys + sizeof(struct dwmci_idmac64);
96
97 desc->des0 = control;
98 desc->des1 = 0;
99 desc->des2 = buf_size;
100 desc->des3 = 0;
101 desc->des4 = buf_addr & 0xffffffff;
102 desc->des5 = buf_addr >> 32;
103 desc->des6 = next_desc_phys & 0xffffffff;
104 desc->des7 = next_desc_phys >> 32;
105}
106
107static void dwmci_prepare_desc(struct dwmci_host *host, struct mmc_data *data,
108 void *cur_idmac, void *bounce_buffer)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000109{
Sam Protsenko6384a1f2024-08-07 22:14:15 -0500110 struct dwmci_idmac32 *desc32 = cur_idmac;
Sam Protsenko7c991612024-08-07 22:14:16 -0500111 struct dwmci_idmac64 *desc64 = cur_idmac;
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400112 ulong data_start, data_end;
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500113 unsigned int blk_cnt, i;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000114
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500115 data_start = (ulong)cur_idmac;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000116 blk_cnt = data->blocks;
117
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500118 for (i = 0;; i++) {
Sam Protsenko6384a1f2024-08-07 22:14:15 -0500119 phys_addr_t buf_phys = virt_to_phys(bounce_buffer);
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500120 unsigned int flags, cnt;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000121
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500122 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH;
123 if (i == 0)
124 flags |= DWMCI_IDMAC_FS;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000125 if (blk_cnt <= 8) {
126 flags |= DWMCI_IDMAC_LD;
127 cnt = data->blocksize * blk_cnt;
128 } else
129 cnt = data->blocksize * 8;
130
Sam Protsenko7c991612024-08-07 22:14:16 -0500131 if (host->dma_64bit_address) {
132 dwmci_set_idma_desc64(desc64, flags, cnt,
133 buf_phys + i * PAGE_SIZE);
134 desc64++;
135 } else {
136 dwmci_set_idma_desc32(desc32, flags, cnt,
137 buf_phys + i * PAGE_SIZE);
138 desc32++;
139 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000140
Mischa Jonkera7a60912013-07-26 16:18:40 +0200141 if (blk_cnt <= 8)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000142 break;
143 blk_cnt -= 8;
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500144 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000145
Sam Protsenko7c991612024-08-07 22:14:16 -0500146 if (host->dma_64bit_address)
147 data_end = (ulong)desc64;
148 else
149 data_end = (ulong)desc32;
Marek Vasutb6da37b2019-02-13 20:16:20 +0100150 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500151}
152
153static void dwmci_prepare_data(struct dwmci_host *host,
154 struct mmc_data *data,
Sam Protsenko7c991612024-08-07 22:14:16 -0500155 void *cur_idmac,
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500156 void *bounce_buffer)
157{
Sam Protsenko7c991612024-08-07 22:14:16 -0500158 const u32 idmacl = virt_to_phys(cur_idmac) & 0xffffffff;
159 const u32 idmacu = (u64)virt_to_phys(cur_idmac) >> 32;
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500160 unsigned long ctrl;
161
162 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
163
164 /* Clear IDMAC interrupt */
Sam Protsenko7c991612024-08-07 22:14:16 -0500165 dwmci_writel(host, host->regs->idsts, 0xffffffff);
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500166
Sam Protsenko7c991612024-08-07 22:14:16 -0500167 dwmci_writel(host, host->regs->dbaddrl, idmacl);
168 if (host->dma_64bit_address)
169 dwmci_writel(host, host->regs->dbaddru, idmacu);
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500170
Sam Protsenko7c991612024-08-07 22:14:16 -0500171 dwmci_prepare_desc(host, data, cur_idmac, bounce_buffer);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000172
173 ctrl = dwmci_readl(host, DWMCI_CTRL);
174 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
175 dwmci_writel(host, DWMCI_CTRL, ctrl);
176
177 ctrl = dwmci_readl(host, DWMCI_BMOD);
178 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
179 dwmci_writel(host, DWMCI_BMOD, ctrl);
180
181 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
182 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
183}
184
Heiko Stuebner46b7a4f2018-09-21 10:59:45 +0200185static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
186{
187 u32 timeout = 20000;
188
189 *len = dwmci_readl(host, DWMCI_STATUS);
190 while (--timeout && (*len & bit)) {
191 udelay(200);
192 *len = dwmci_readl(host, DWMCI_STATUS);
193 }
194
195 if (!timeout) {
196 debug("%s: FIFO underflow timeout\n", __func__);
197 return -ETIMEDOUT;
198 }
199
200 return 0;
201}
202
Marek Vasutffac5122019-03-23 03:32:24 +0100203static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
204{
205 unsigned int timeout;
206
Kever Yang4889d832019-08-29 15:42:41 +0800207 timeout = size * 8; /* counting in bits */
208 timeout *= 10; /* wait 10 times as long */
Marek Vasutffac5122019-03-23 03:32:24 +0100209 timeout /= mmc->clock;
210 timeout /= mmc->bus_width;
211 timeout /= mmc->ddr_mode ? 2 : 1;
Kever Yang4889d832019-08-29 15:42:41 +0800212 timeout *= 1000; /* counting in msec */
Marek Vasutffac5122019-03-23 03:32:24 +0100213 timeout = (timeout < 1000) ? 1000 : timeout;
214
215 return timeout;
216}
217
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500218static int dwmci_data_transfer_fifo(struct dwmci_host *host,
219 struct mmc_data *data, u32 mask)
huang linf9836762015-11-17 14:20:21 +0800220{
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500221 const u32 int_rx = mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO);
222 const u32 int_tx = mask & DWMCI_INTMSK_TXDR;
huang linf9836762015-11-17 14:20:21 +0800223 int ret = 0;
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500224 u32 len = 0, size, i;
225 u32 *buf;
huang lin50b73752015-11-17 14:20:22 +0800226
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500227 size = (data->blocksize * data->blocks) / 4;
228 if (!host->fifo_mode || !size)
229 return 0;
230
huang lin50b73752015-11-17 14:20:22 +0800231 if (data->flags == MMC_DATA_READ)
232 buf = (unsigned int *)data->dest;
233 else
234 buf = (unsigned int *)data->src;
huang linf9836762015-11-17 14:20:21 +0800235
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500236 if (data->flags == MMC_DATA_READ && int_rx) {
237 dwmci_writel(host, DWMCI_RINTSTS, int_rx);
238 while (size) {
239 ret = dwmci_fifo_ready(host, DWMCI_FIFO_EMPTY, &len);
240 if (ret < 0)
241 break;
242
243 len = (len >> DWMCI_FIFO_SHIFT) & DWMCI_FIFO_MASK;
244 len = min(size, len);
245 for (i = 0; i < len; i++)
246 *buf++ = dwmci_readl(host, DWMCI_DATA);
247 size = size > len ? (size - len) : 0;
248 }
249 } else if (data->flags == MMC_DATA_WRITE && int_tx) {
250 while (size) {
251 ret = dwmci_fifo_ready(host, DWMCI_FIFO_FULL, &len);
252 if (ret < 0)
253 break;
254
Sam Protsenko751fdf12024-08-07 22:14:17 -0500255 len = host->fifo_depth - ((len >> DWMCI_FIFO_SHIFT) &
256 DWMCI_FIFO_MASK);
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500257 len = min(size, len);
258 for (i = 0; i < len; i++)
259 dwmci_writel(host, DWMCI_DATA, *buf++);
260 size = size > len ? (size - len) : 0;
261 }
262 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_TXDR);
263 }
264
265 return ret;
266}
Marek Vasutffac5122019-03-23 03:32:24 +0100267
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500268static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
269{
270 struct mmc *mmc = host->mmc;
271 int ret = 0;
272 u32 timeout, mask, size;
273 ulong start = get_timer(0);
274
275 size = data->blocksize * data->blocks;
276 timeout = dwmci_get_timeout(mmc, size);
Marek Vasutffac5122019-03-23 03:32:24 +0100277
huang linf9836762015-11-17 14:20:21 +0800278 for (;;) {
279 mask = dwmci_readl(host, DWMCI_RINTSTS);
280 /* Error during data transfer. */
281 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
282 debug("%s: DATA ERROR!\n", __func__);
283 ret = -EINVAL;
284 break;
285 }
286
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500287 ret = dwmci_data_transfer_fifo(host, data, mask);
huang lin50b73752015-11-17 14:20:22 +0800288
huang linf9836762015-11-17 14:20:21 +0800289 /* Data arrived correctly. */
290 if (mask & DWMCI_INTMSK_DTO) {
291 ret = 0;
292 break;
293 }
294
295 /* Check for timeout. */
296 if (get_timer(start) > timeout) {
297 debug("%s: Timeout waiting for data!\n",
298 __func__);
Jaehoon Chung7825d202016-07-19 16:33:36 +0900299 ret = -ETIMEDOUT;
huang linf9836762015-11-17 14:20:21 +0800300 break;
301 }
302 }
303
304 dwmci_writel(host, DWMCI_RINTSTS, mask);
305
306 return ret;
307}
308
Sam Protsenko3350c202024-08-07 22:14:13 -0500309static int dwmci_dma_transfer(struct dwmci_host *host, uint flags,
310 struct bounce_buffer *bbstate)
311{
312 int ret;
313 u32 mask, ctrl;
314
315 if (flags == MMC_DATA_READ)
316 mask = DWMCI_IDINTEN_RI;
317 else
318 mask = DWMCI_IDINTEN_TI;
319
Sam Protsenko7c991612024-08-07 22:14:16 -0500320 ret = wait_for_bit_le32(host->ioaddr + host->regs->idsts,
Sam Protsenko3350c202024-08-07 22:14:13 -0500321 mask, true, 1000, false);
322 if (ret)
323 debug("%s: DWMCI_IDINTEN mask 0x%x timeout\n", __func__, mask);
324
325 /* Clear interrupts */
Sam Protsenko7c991612024-08-07 22:14:16 -0500326 dwmci_writel(host, host->regs->idsts, DWMCI_IDINTEN_MASK);
Sam Protsenko3350c202024-08-07 22:14:13 -0500327
328 ctrl = dwmci_readl(host, DWMCI_CTRL);
329 ctrl &= ~DWMCI_DMA_EN;
330 dwmci_writel(host, DWMCI_CTRL, ctrl);
331
332 bounce_buffer_stop(bbstate);
333 return ret;
334}
335
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000336static int dwmci_set_transfer_mode(struct dwmci_host *host,
337 struct mmc_data *data)
338{
339 unsigned long mode;
340
341 mode = DWMCI_CMD_DATA_EXP;
342 if (data->flags & MMC_DATA_WRITE)
343 mode |= DWMCI_CMD_RW;
344
345 return mode;
346}
347
Sam Protsenkocf812042024-08-07 22:14:09 -0500348static void dwmci_wait_while_busy(struct dwmci_host *host, struct mmc_cmd *cmd)
349{
350 unsigned int timeout = 500; /* msec */
351 ulong start;
352
353 start = get_timer(0);
354 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
355 if (get_timer(start) > timeout) {
356 debug("%s: Timeout on data busy, continue anyway\n",
357 __func__);
358 break;
359 }
360 }
361}
362
Sam Protsenko7c991612024-08-07 22:14:16 -0500363static int dwmci_send_cmd_common(struct dwmci_host *host, struct mmc_cmd *cmd,
364 struct mmc_data *data, void *cur_idmac)
Simon Glassff5c1b72016-06-12 23:30:23 -0600365{
Sam Protsenko7c991612024-08-07 22:14:16 -0500366 int ret, flags = 0, i;
Alexander Graf61c2a662016-03-04 01:09:52 +0100367 u32 retry = 100000;
Sam Protsenko3350c202024-08-07 22:14:13 -0500368 u32 mask;
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400369 struct bounce_buffer bbstate;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000370
Sam Protsenkocf812042024-08-07 22:14:09 -0500371 dwmci_wait_while_busy(host, cmd);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000372 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
373
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400374 if (data) {
huang lin50b73752015-11-17 14:20:22 +0800375 if (host->fifo_mode) {
376 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
377 dwmci_writel(host, DWMCI_BYTCNT,
378 data->blocksize * data->blocks);
379 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400380 } else {
huang lin50b73752015-11-17 14:20:22 +0800381 if (data->flags == MMC_DATA_READ) {
Marek Vasut72d37b62019-03-23 18:45:27 +0100382 ret = bounce_buffer_start(&bbstate,
383 (void*)data->dest,
huang lin50b73752015-11-17 14:20:22 +0800384 data->blocksize *
385 data->blocks, GEN_BB_WRITE);
386 } else {
Marek Vasut72d37b62019-03-23 18:45:27 +0100387 ret = bounce_buffer_start(&bbstate,
388 (void*)data->src,
huang lin50b73752015-11-17 14:20:22 +0800389 data->blocksize *
390 data->blocks, GEN_BB_READ);
391 }
Marek Vasut72d37b62019-03-23 18:45:27 +0100392
393 if (ret)
394 return ret;
395
huang lin50b73752015-11-17 14:20:22 +0800396 dwmci_prepare_data(host, data, cur_idmac,
397 bbstate.bounce_buffer);
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400398 }
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400399 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000400
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000401 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
402
403 if (data)
404 flags = dwmci_set_transfer_mode(host, data);
405
406 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
John Keepingfeb7fa32021-12-07 16:09:35 +0000407 return -EBUSY;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000408
409 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
410 flags |= DWMCI_CMD_ABORT_STOP;
411 else
412 flags |= DWMCI_CMD_PRV_DAT_WAIT;
413
414 if (cmd->resp_type & MMC_RSP_PRESENT) {
415 flags |= DWMCI_CMD_RESP_EXP;
416 if (cmd->resp_type & MMC_RSP_136)
417 flags |= DWMCI_CMD_RESP_LENGTH;
418 }
419
420 if (cmd->resp_type & MMC_RSP_CRC)
421 flags |= DWMCI_CMD_CHECK_CRC;
422
423 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
424
425 debug("Sending CMD%d\n",cmd->cmdidx);
426
427 dwmci_writel(host, DWMCI_CMD, flags);
428
429 for (i = 0; i < retry; i++) {
430 mask = dwmci_readl(host, DWMCI_RINTSTS);
431 if (mask & DWMCI_INTMSK_CDONE) {
432 if (!data)
433 dwmci_writel(host, DWMCI_RINTSTS, mask);
434 break;
435 }
436 }
437
Pavel Macheka425f5d2014-09-05 12:49:48 +0200438 if (i == retry) {
Simon Glass4c9b9482015-08-06 20:16:27 -0600439 debug("%s: Timeout.\n", __func__);
Jaehoon Chung7825d202016-07-19 16:33:36 +0900440 return -ETIMEDOUT;
Pavel Macheka425f5d2014-09-05 12:49:48 +0200441 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000442
443 if (mask & DWMCI_INTMSK_RTO) {
Pavel Macheka425f5d2014-09-05 12:49:48 +0200444 /*
445 * Timeout here is not necessarily fatal. (e)MMC cards
446 * will splat here when they receive CMD55 as they do
447 * not support this command and that is exactly the way
448 * to tell them apart from SD cards. Thus, this output
449 * below shall be debug(). eMMC cards also do not favor
450 * CMD8, please keep that in mind.
451 */
452 debug("%s: Response Timeout.\n", __func__);
Jaehoon Chung7825d202016-07-19 16:33:36 +0900453 return -ETIMEDOUT;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000454 } else if (mask & DWMCI_INTMSK_RE) {
Simon Glass4c9b9482015-08-06 20:16:27 -0600455 debug("%s: Response Error.\n", __func__);
456 return -EIO;
Marek Vasuta6d91992018-11-06 23:42:11 +0100457 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
458 (mask & DWMCI_INTMSK_RCRC)) {
459 debug("%s: Response CRC Error.\n", __func__);
460 return -EIO;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000461 }
462
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000463 if (cmd->resp_type & MMC_RSP_PRESENT) {
464 if (cmd->resp_type & MMC_RSP_136) {
465 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
466 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
467 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
468 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
469 } else {
470 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
471 }
472 }
473
474 if (data) {
huang lin50b73752015-11-17 14:20:22 +0800475 ret = dwmci_data_transfer(host, data);
Sam Protsenko3350c202024-08-07 22:14:13 -0500476 if (!host->fifo_mode)
477 ret = dwmci_dma_transfer(host, data->flags, &bbstate);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000478 }
479
480 udelay(100);
481
Marek Vasut81e093f2015-07-27 22:39:38 +0200482 return ret;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000483}
484
Sam Protsenko7c991612024-08-07 22:14:16 -0500485#ifdef CONFIG_DM_MMC
486static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
487 struct mmc_data *data)
488{
489 struct mmc *mmc = mmc_get_mmc_dev(dev);
490#else
491static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
492 struct mmc_data *data)
493{
494#endif
495 struct dwmci_host *host = mmc->priv;
496 const size_t buf_size = data ? DIV_ROUND_UP(data->blocks, 8) : 0;
497
498 if (host->dma_64bit_address) {
499 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac64, idmac, buf_size);
500 return dwmci_send_cmd_common(host, cmd, data, idmac);
501 } else {
502 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac32, idmac, buf_size);
503 return dwmci_send_cmd_common(host, cmd, data, idmac);
504 }
505}
506
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500507static int dwmci_control_clken(struct dwmci_host *host, bool on)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000508{
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500509 const u32 val = on ? DWMCI_CLKEN_ENABLE | DWMCI_CLKEN_LOW_PWR : 0;
510 const u32 cmd_only_clk = DWMCI_CMD_PRV_DAT_WAIT | DWMCI_CMD_UPD_CLK;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000511 int timeout = 10000;
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500512 u32 status;
513
514 dwmci_writel(host, DWMCI_CLKENA, val);
515
516 /* Inform CIU */
517 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_START | cmd_only_clk);
518 do {
519 status = dwmci_readl(host, DWMCI_CMD);
520 if (timeout-- < 0) {
521 debug("%s: Timeout!\n", __func__);
522 return -ETIMEDOUT;
523 }
524 } while (status & DWMCI_CMD_START);
525
526 return 0;
527}
528
529/*
530 * Update the clock divider.
531 *
532 * To prevent a clock glitch keep the clock stopped during the update of
533 * clock divider and clock source.
534 */
535static int dwmci_update_div(struct dwmci_host *host, u32 div)
536{
537 int ret;
538
539 /* Disable clock */
540 ret = dwmci_control_clken(host, false);
541 if (ret)
542 return ret;
543
544 /* Set clock to desired speed */
545 dwmci_writel(host, DWMCI_CLKDIV, div);
546 dwmci_writel(host, DWMCI_CLKSRC, 0);
547
548 /* Enable clock */
549 return dwmci_control_clken(host, true);
550}
551
552static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
553{
554 u32 div;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000555 unsigned long sclk;
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500556 int ret;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000557
Amar902664c2013-04-27 11:42:54 +0530558 if ((freq == host->clock) || (freq == 0))
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000559 return 0;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000560 /*
Pavel Macheka425f5d2014-09-05 12:49:48 +0200561 * If host->get_mmc_clk isn't defined,
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000562 * then assume that host->bus_hz is source clock value.
Pavel Macheka425f5d2014-09-05 12:49:48 +0200563 * host->bus_hz should be set by user.
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000564 */
Jaehoon Chungd94735b2013-10-06 18:59:31 +0900565 if (host->get_mmc_clk)
Simon Glasseff76682015-08-30 16:55:15 -0600566 sclk = host->get_mmc_clk(host, freq);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000567 else if (host->bus_hz)
568 sclk = host->bus_hz;
569 else {
Simon Glass4c9b9482015-08-06 20:16:27 -0600570 debug("%s: Didn't get source clock value.\n", __func__);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000571 return -EINVAL;
572 }
573
Chin Liang See4cfff952014-06-10 01:26:52 -0500574 if (sclk == freq)
575 div = 0; /* bypass mode */
576 else
577 div = DIV_ROUND_UP(sclk, 2 * freq);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000578
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500579 ret = dwmci_update_div(host, div);
580 if (ret)
581 return ret;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000582
583 host->clock = freq;
584
585 return 0;
586}
587
Simon Glasseba48f92017-07-29 11:35:31 -0600588#ifdef CONFIG_DM_MMC
Jaehoon Chungad220ac2016-06-28 15:52:21 +0900589static int dwmci_set_ios(struct udevice *dev)
Simon Glassff5c1b72016-06-12 23:30:23 -0600590{
591 struct mmc *mmc = mmc_get_mmc_dev(dev);
592#else
Jaehoon Chungb6cd1d32016-12-30 15:30:16 +0900593static int dwmci_set_ios(struct mmc *mmc)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000594{
Simon Glassff5c1b72016-06-12 23:30:23 -0600595#endif
Jaehoon Chunge8672942014-05-16 13:59:55 +0900596 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
597 u32 ctype, regs;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000598
Pavel Macheka425f5d2014-09-05 12:49:48 +0200599 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000600
601 dwmci_setup_bus(host, mmc->clock);
602 switch (mmc->bus_width) {
603 case 8:
604 ctype = DWMCI_CTYPE_8BIT;
605 break;
606 case 4:
607 ctype = DWMCI_CTYPE_4BIT;
608 break;
609 default:
610 ctype = DWMCI_CTYPE_1BIT;
611 break;
612 }
613
614 dwmci_writel(host, DWMCI_CTYPE, ctype);
615
Jaehoon Chunge8672942014-05-16 13:59:55 +0900616 regs = dwmci_readl(host, DWMCI_UHS_REG);
Andrew Gabbasov54c0e222014-12-01 06:59:12 -0600617 if (mmc->ddr_mode)
Jaehoon Chunge8672942014-05-16 13:59:55 +0900618 regs |= DWMCI_DDR_MODE;
619 else
Jaehoon Chung401fc502015-01-14 17:37:53 +0900620 regs &= ~DWMCI_DDR_MODE;
Jaehoon Chunge8672942014-05-16 13:59:55 +0900621
622 dwmci_writel(host, DWMCI_UHS_REG, regs);
623
Siew Chin Limc51e7e12020-12-24 18:21:03 +0800624 if (host->clksel) {
625 int ret;
626
627 ret = host->clksel(host);
628 if (ret)
629 return ret;
630 }
Jaehoon Chungb6cd1d32016-12-30 15:30:16 +0900631
Urja Rannikko9932a012019-05-13 13:25:27 +0000632#if CONFIG_IS_ENABLED(DM_REGULATOR)
633 if (mmc->vqmmc_supply) {
634 int ret;
635
Jonas Karlmana117d612023-07-19 21:21:00 +0000636 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, false);
637 if (ret)
638 return ret;
639
Urja Rannikko9932a012019-05-13 13:25:27 +0000640 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
641 regulator_set_value(mmc->vqmmc_supply, 1800000);
642 else
643 regulator_set_value(mmc->vqmmc_supply, 3300000);
644
645 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
646 if (ret)
647 return ret;
648 }
649#endif
650
Simon Glassff5c1b72016-06-12 23:30:23 -0600651 return 0;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000652}
653
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500654static void dwmci_init_fifo(struct dwmci_host *host)
655{
Sam Protsenko751fdf12024-08-07 22:14:17 -0500656 u32 fifo_thr, fifoth_val;
657
658 if (!host->fifo_depth) {
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500659 u32 fifo_size;
660
Sam Protsenko751fdf12024-08-07 22:14:17 -0500661 /*
662 * Automatically detect FIFO depth from FIFOTH register.
663 * Power-on value of RX_WMark is FIFO_DEPTH-1.
664 */
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500665 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
666 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
Sam Protsenko751fdf12024-08-07 22:14:17 -0500667 host->fifo_depth = fifo_size;
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500668 }
669
Sam Protsenko751fdf12024-08-07 22:14:17 -0500670 fifo_thr = host->fifo_depth / 2;
671 fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_thr - 1) | TX_WMARK(fifo_thr);
672 dwmci_writel(host, DWMCI_FIFOTH, fifoth_val);
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500673}
674
Sam Protsenko7c991612024-08-07 22:14:16 -0500675static void dwmci_init_dma(struct dwmci_host *host)
676{
677 int addr_config;
678
679 if (host->fifo_mode)
680 return;
681
682 addr_config = (dwmci_readl(host, DWMCI_HCON) >> 27) & 0x1;
683 if (addr_config == 1) {
684 host->dma_64bit_address = true;
685 host->regs = &dwmci_idmac_regs64;
686 debug("%s: IDMAC supports 64-bit address mode\n", __func__);
687 } else {
688 host->dma_64bit_address = false;
689 host->regs = &dwmci_idmac_regs32;
690 debug("%s: IDMAC supports 32-bit address mode\n", __func__);
691 }
692
693 dwmci_writel(host, host->regs->idinten, DWMCI_IDINTEN_MASK);
694}
695
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000696static int dwmci_init(struct mmc *mmc)
697{
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200698 struct dwmci_host *host = mmc->priv;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000699
Jaehoon Chung42f81a82013-11-29 20:08:57 +0900700 if (host->board_init)
701 host->board_init(host);
Rajeshwari Shinde70163092013-10-29 12:53:13 +0530702
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000703 dwmci_writel(host, DWMCI_PWREN, 1);
704
705 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
Simon Glass4c9b9482015-08-06 20:16:27 -0600706 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
707 return -EIO;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000708 }
709
Amar902664c2013-04-27 11:42:54 +0530710 /* Enumerate at 400KHz */
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200711 dwmci_setup_bus(host, mmc->cfg->f_min);
Amar902664c2013-04-27 11:42:54 +0530712
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000713 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
714 dwmci_writel(host, DWMCI_INTMASK, 0);
715
716 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
717
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000718 dwmci_writel(host, DWMCI_BMOD, 1);
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500719 dwmci_init_fifo(host);
Sam Protsenko7c991612024-08-07 22:14:16 -0500720 dwmci_init_dma(host);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000721
722 dwmci_writel(host, DWMCI_CLKENA, 0);
723 dwmci_writel(host, DWMCI_CLKSRC, 0);
724
725 return 0;
726}
727
Simon Glasseba48f92017-07-29 11:35:31 -0600728#ifdef CONFIG_DM_MMC
Simon Glassff5c1b72016-06-12 23:30:23 -0600729int dwmci_probe(struct udevice *dev)
730{
731 struct mmc *mmc = mmc_get_mmc_dev(dev);
732
733 return dwmci_init(mmc);
734}
735
736const struct dm_mmc_ops dm_dwmci_ops = {
737 .send_cmd = dwmci_send_cmd,
738 .set_ios = dwmci_set_ios,
739};
740
741#else
Pantelis Antoniouc9e75912014-02-26 19:28:45 +0200742static const struct mmc_ops dwmci_ops = {
743 .send_cmd = dwmci_send_cmd,
744 .set_ios = dwmci_set_ios,
745 .init = dwmci_init,
746};
Simon Glassff5c1b72016-06-12 23:30:23 -0600747#endif
Pantelis Antoniouc9e75912014-02-26 19:28:45 +0200748
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900749void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
750 u32 max_clk, u32 min_clk)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000751{
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900752 cfg->name = host->name;
Simon Glasseba48f92017-07-29 11:35:31 -0600753#ifndef CONFIG_DM_MMC
Simon Glass82682542016-05-14 14:03:07 -0600754 cfg->ops = &dwmci_ops;
Simon Glassff5c1b72016-06-12 23:30:23 -0600755#endif
Simon Glass82682542016-05-14 14:03:07 -0600756 cfg->f_min = min_clk;
757 cfg->f_max = max_clk;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000758
Simon Glass82682542016-05-14 14:03:07 -0600759 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000760
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900761 cfg->host_caps = host->caps;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000762
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900763 if (host->buswidth == 8) {
Simon Glass82682542016-05-14 14:03:07 -0600764 cfg->host_caps |= MMC_MODE_8BIT;
765 cfg->host_caps &= ~MMC_MODE_4BIT;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000766 } else {
Simon Glass82682542016-05-14 14:03:07 -0600767 cfg->host_caps |= MMC_MODE_4BIT;
768 cfg->host_caps &= ~MMC_MODE_8BIT;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000769 }
Simon Glass82682542016-05-14 14:03:07 -0600770 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
771
772 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
773}
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200774
Simon Glass82682542016-05-14 14:03:07 -0600775#ifdef CONFIG_BLK
776int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
777{
778 return mmc_bind(dev, mmc, cfg);
779}
780#else
781int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
782{
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900783 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000784
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200785 host->mmc = mmc_create(&host->cfg, host);
786 if (host->mmc == NULL)
787 return -1;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000788
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200789 return 0;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000790}
Simon Glass82682542016-05-14 14:03:07 -0600791#endif