blob: e1110cace897ae23c978b63c1963a2ae54789ff0 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jaehoon Chung7cf73072012-10-15 19:10:29 +00002/*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
Jaehoon Chung7cf73072012-10-15 19:10:29 +00006 */
7
Alexey Brodkin55bab5e2013-12-26 15:29:07 +04008#include <bouncebuf.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glass4c9b9482015-08-06 20:16:27 -060010#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060011#include <log.h>
Jaehoon Chung7cf73072012-10-15 19:10:29 +000012#include <malloc.h>
Simon Glass2dd337a2015-09-02 17:24:58 -060013#include <memalign.h>
Jaehoon Chung7cf73072012-10-15 19:10:29 +000014#include <mmc.h>
15#include <dwmmc.h>
Ley Foon Tanb98e8922018-12-20 17:55:41 +080016#include <wait_bit.h>
Simon Glass274e0b02020-05-10 11:39:56 -060017#include <asm/cache.h>
Simon Glassdbd79542020-05-10 11:40:11 -060018#include <linux/delay.h>
Urja Rannikko9932a012019-05-13 13:25:27 +000019#include <power/regulator.h>
Jaehoon Chung7cf73072012-10-15 19:10:29 +000020
21#define PAGE_SIZE 4096
22
Sam Protsenko6384a1f2024-08-07 22:14:15 -050023/* Internal DMA Controller (IDMAC) descriptor for 32-bit addressing mode */
24struct dwmci_idmac32 {
25 u32 des0; /* Control descriptor */
26 u32 des1; /* Buffer size */
27 u32 des2; /* Buffer physical address */
28 u32 des3; /* Next descriptor physical address */
Sam Protsenko2543c322024-08-07 22:14:08 -050029} __aligned(ARCH_DMA_MINALIGN);
30
Sam Protsenko7c991612024-08-07 22:14:16 -050031/* Internal DMA Controller (IDMAC) descriptor for 64-bit addressing mode */
32struct dwmci_idmac64 {
33 u32 des0; /* Control descriptor */
34 u32 des1; /* Reserved */
35 u32 des2; /* Buffer sizes */
36 u32 des3; /* Reserved */
37 u32 des4; /* Lower 32-bits of Buffer Address Pointer 1 */
38 u32 des5; /* Upper 32-bits of Buffer Address Pointer 1 */
39 u32 des6; /* Lower 32-bits of Next Descriptor Address */
40 u32 des7; /* Upper 32-bits of Next Descriptor Address */
41} __aligned(ARCH_DMA_MINALIGN);
42
43/* Register offsets for DW MMC blocks with 32-bit IDMAC */
44static const struct dwmci_idmac_regs dwmci_idmac_regs32 = {
45 .dbaddrl = DWMCI_DBADDR,
46 .idsts = DWMCI_IDSTS,
47 .idinten = DWMCI_IDINTEN,
48 .dscaddrl = DWMCI_DSCADDR,
49 .bufaddrl = DWMCI_BUFADDR,
50};
51
52/* Register offsets for DW MMC blocks with 64-bit IDMAC */
53static const struct dwmci_idmac_regs dwmci_idmac_regs64 = {
54 .dbaddrl = DWMCI_DBADDRL,
55 .dbaddru = DWMCI_DBADDRU,
56 .idsts = DWMCI_IDSTS64,
57 .idinten = DWMCI_IDINTEN64,
58 .dscaddrl = DWMCI_DSCADDRL,
59 .dscaddru = DWMCI_DSCADDRU,
60 .bufaddrl = DWMCI_BUFADDRL,
61 .bufaddru = DWMCI_BUFADDRU,
62};
63
Jaehoon Chung7cf73072012-10-15 19:10:29 +000064static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
65{
66 unsigned long timeout = 1000;
67 u32 ctrl;
68
69 dwmci_writel(host, DWMCI_CTRL, value);
70
71 while (timeout--) {
72 ctrl = dwmci_readl(host, DWMCI_CTRL);
73 if (!(ctrl & DWMCI_RESET_ALL))
74 return 1;
75 }
76 return 0;
77}
78
Sam Protsenko6384a1f2024-08-07 22:14:15 -050079static void dwmci_set_idma_desc32(struct dwmci_idmac32 *desc, u32 control,
80 u32 buf_size, u32 buf_addr)
Jaehoon Chung7cf73072012-10-15 19:10:29 +000081{
Sam Protsenko6384a1f2024-08-07 22:14:15 -050082 phys_addr_t desc_phys = virt_to_phys(desc);
83 u32 next_desc_phys = desc_phys + sizeof(struct dwmci_idmac32);
Jaehoon Chung7cf73072012-10-15 19:10:29 +000084
Sam Protsenko6384a1f2024-08-07 22:14:15 -050085 desc->des0 = control;
86 desc->des1 = buf_size;
87 desc->des2 = buf_addr;
88 desc->des3 = next_desc_phys;
Jaehoon Chung7cf73072012-10-15 19:10:29 +000089}
90
Sam Protsenko7c991612024-08-07 22:14:16 -050091static void dwmci_set_idma_desc64(struct dwmci_idmac64 *desc, u32 control,
92 u32 buf_size, u64 buf_addr)
93{
94 phys_addr_t desc_phys = virt_to_phys(desc);
95 u64 next_desc_phys = desc_phys + sizeof(struct dwmci_idmac64);
96
97 desc->des0 = control;
98 desc->des1 = 0;
99 desc->des2 = buf_size;
100 desc->des3 = 0;
101 desc->des4 = buf_addr & 0xffffffff;
102 desc->des5 = buf_addr >> 32;
103 desc->des6 = next_desc_phys & 0xffffffff;
104 desc->des7 = next_desc_phys >> 32;
105}
106
107static void dwmci_prepare_desc(struct dwmci_host *host, struct mmc_data *data,
108 void *cur_idmac, void *bounce_buffer)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000109{
Sam Protsenko6384a1f2024-08-07 22:14:15 -0500110 struct dwmci_idmac32 *desc32 = cur_idmac;
Sam Protsenko7c991612024-08-07 22:14:16 -0500111 struct dwmci_idmac64 *desc64 = cur_idmac;
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400112 ulong data_start, data_end;
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500113 unsigned int blk_cnt, i;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000114
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500115 data_start = (ulong)cur_idmac;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000116 blk_cnt = data->blocks;
117
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500118 for (i = 0;; i++) {
Sam Protsenko6384a1f2024-08-07 22:14:15 -0500119 phys_addr_t buf_phys = virt_to_phys(bounce_buffer);
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500120 unsigned int flags, cnt;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000121
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500122 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH;
123 if (i == 0)
124 flags |= DWMCI_IDMAC_FS;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000125 if (blk_cnt <= 8) {
126 flags |= DWMCI_IDMAC_LD;
127 cnt = data->blocksize * blk_cnt;
Sam Protsenko286892e2024-08-07 22:14:19 -0500128 } else {
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000129 cnt = data->blocksize * 8;
Sam Protsenko286892e2024-08-07 22:14:19 -0500130 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000131
Sam Protsenko7c991612024-08-07 22:14:16 -0500132 if (host->dma_64bit_address) {
133 dwmci_set_idma_desc64(desc64, flags, cnt,
134 buf_phys + i * PAGE_SIZE);
135 desc64++;
136 } else {
137 dwmci_set_idma_desc32(desc32, flags, cnt,
138 buf_phys + i * PAGE_SIZE);
139 desc32++;
140 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000141
Mischa Jonkera7a60912013-07-26 16:18:40 +0200142 if (blk_cnt <= 8)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000143 break;
144 blk_cnt -= 8;
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500145 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000146
Sam Protsenko7c991612024-08-07 22:14:16 -0500147 if (host->dma_64bit_address)
148 data_end = (ulong)desc64;
149 else
150 data_end = (ulong)desc32;
Marek Vasutb6da37b2019-02-13 20:16:20 +0100151 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500152}
153
Sam Protsenko286892e2024-08-07 22:14:19 -0500154static void dwmci_prepare_data(struct dwmci_host *host, struct mmc_data *data,
155 void *cur_idmac, void *bounce_buffer)
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500156{
Sam Protsenko7c991612024-08-07 22:14:16 -0500157 const u32 idmacl = virt_to_phys(cur_idmac) & 0xffffffff;
158 const u32 idmacu = (u64)virt_to_phys(cur_idmac) >> 32;
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500159 unsigned long ctrl;
160
161 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
162
163 /* Clear IDMAC interrupt */
Sam Protsenko7c991612024-08-07 22:14:16 -0500164 dwmci_writel(host, host->regs->idsts, 0xffffffff);
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500165
Sam Protsenko7c991612024-08-07 22:14:16 -0500166 dwmci_writel(host, host->regs->dbaddrl, idmacl);
167 if (host->dma_64bit_address)
168 dwmci_writel(host, host->regs->dbaddru, idmacu);
Sam Protsenko2e7424b2024-08-07 22:14:14 -0500169
Sam Protsenko7c991612024-08-07 22:14:16 -0500170 dwmci_prepare_desc(host, data, cur_idmac, bounce_buffer);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000171
172 ctrl = dwmci_readl(host, DWMCI_CTRL);
173 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
174 dwmci_writel(host, DWMCI_CTRL, ctrl);
175
176 ctrl = dwmci_readl(host, DWMCI_BMOD);
177 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
178 dwmci_writel(host, DWMCI_BMOD, ctrl);
179
180 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
181 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
182}
183
Heiko Stuebner46b7a4f2018-09-21 10:59:45 +0200184static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
185{
186 u32 timeout = 20000;
187
188 *len = dwmci_readl(host, DWMCI_STATUS);
189 while (--timeout && (*len & bit)) {
190 udelay(200);
191 *len = dwmci_readl(host, DWMCI_STATUS);
192 }
193
194 if (!timeout) {
195 debug("%s: FIFO underflow timeout\n", __func__);
196 return -ETIMEDOUT;
197 }
198
199 return 0;
200}
201
Marek Vasutffac5122019-03-23 03:32:24 +0100202static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
203{
204 unsigned int timeout;
205
Kever Yang4889d832019-08-29 15:42:41 +0800206 timeout = size * 8; /* counting in bits */
207 timeout *= 10; /* wait 10 times as long */
Marek Vasutffac5122019-03-23 03:32:24 +0100208 timeout /= mmc->clock;
209 timeout /= mmc->bus_width;
210 timeout /= mmc->ddr_mode ? 2 : 1;
Kever Yang4889d832019-08-29 15:42:41 +0800211 timeout *= 1000; /* counting in msec */
Marek Vasutffac5122019-03-23 03:32:24 +0100212 timeout = (timeout < 1000) ? 1000 : timeout;
213
214 return timeout;
215}
216
Jonas Karlman4a01fee2024-10-08 19:18:31 +0000217static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
huang linf9836762015-11-17 14:20:21 +0800218{
Jonas Karlman4a01fee2024-10-08 19:18:31 +0000219 struct mmc *mmc = host->mmc;
huang linf9836762015-11-17 14:20:21 +0800220 int ret = 0;
Jonas Karlman4a01fee2024-10-08 19:18:31 +0000221 u32 timeout, mask, size, i, len = 0;
222 u32 *buf = NULL;
223 ulong start = get_timer(0);
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500224
Jonas Karlman4a01fee2024-10-08 19:18:31 +0000225 size = data->blocksize * data->blocks;
huang lin50b73752015-11-17 14:20:22 +0800226 if (data->flags == MMC_DATA_READ)
227 buf = (unsigned int *)data->dest;
228 else
229 buf = (unsigned int *)data->src;
huang linf9836762015-11-17 14:20:21 +0800230
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500231 timeout = dwmci_get_timeout(mmc, size);
Marek Vasutffac5122019-03-23 03:32:24 +0100232
Jonas Karlman4a01fee2024-10-08 19:18:31 +0000233 size /= 4;
234
huang linf9836762015-11-17 14:20:21 +0800235 for (;;) {
236 mask = dwmci_readl(host, DWMCI_RINTSTS);
Sam Protsenko286892e2024-08-07 22:14:19 -0500237 /* Error during data transfer */
huang linf9836762015-11-17 14:20:21 +0800238 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
239 debug("%s: DATA ERROR!\n", __func__);
240 ret = -EINVAL;
241 break;
242 }
243
Jonas Karlman4a01fee2024-10-08 19:18:31 +0000244 if (host->fifo_mode && size) {
245 len = 0;
246 if (data->flags == MMC_DATA_READ &&
247 (mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO))) {
248 dwmci_writel(host, DWMCI_RINTSTS,
249 mask & (DWMCI_INTMSK_RXDR |
250 DWMCI_INTMSK_DTO));
251 while (size) {
252 ret = dwmci_fifo_ready(host,
253 DWMCI_FIFO_EMPTY,
254 &len);
255 if (ret < 0)
256 break;
257
258 len = (len >> DWMCI_FIFO_SHIFT) &
259 DWMCI_FIFO_MASK;
260 len = min(size, len);
261 for (i = 0; i < len; i++)
262 *buf++ =
263 dwmci_readl(host, DWMCI_DATA);
264 size = size > len ? (size - len) : 0;
265 }
266 } else if (data->flags == MMC_DATA_WRITE &&
267 (mask & DWMCI_INTMSK_TXDR)) {
268 while (size) {
269 ret = dwmci_fifo_ready(host,
270 DWMCI_FIFO_FULL,
271 &len);
272 if (ret < 0)
273 break;
274
275 len = host->fifo_depth - ((len >>
276 DWMCI_FIFO_SHIFT) &
277 DWMCI_FIFO_MASK);
278 len = min(size, len);
279 for (i = 0; i < len; i++)
280 dwmci_writel(host, DWMCI_DATA,
281 *buf++);
282 size = size > len ? (size - len) : 0;
283 }
284 dwmci_writel(host, DWMCI_RINTSTS,
285 DWMCI_INTMSK_TXDR);
286 }
287 }
huang lin50b73752015-11-17 14:20:22 +0800288
Sam Protsenko286892e2024-08-07 22:14:19 -0500289 /* Data arrived correctly */
huang linf9836762015-11-17 14:20:21 +0800290 if (mask & DWMCI_INTMSK_DTO) {
291 ret = 0;
292 break;
293 }
294
Sam Protsenko286892e2024-08-07 22:14:19 -0500295 /* Check for timeout */
huang linf9836762015-11-17 14:20:21 +0800296 if (get_timer(start) > timeout) {
Sam Protsenko286892e2024-08-07 22:14:19 -0500297 debug("%s: Timeout waiting for data!\n", __func__);
Jaehoon Chung7825d202016-07-19 16:33:36 +0900298 ret = -ETIMEDOUT;
huang linf9836762015-11-17 14:20:21 +0800299 break;
300 }
301 }
302
303 dwmci_writel(host, DWMCI_RINTSTS, mask);
304
305 return ret;
306}
307
Sam Protsenko3350c202024-08-07 22:14:13 -0500308static int dwmci_dma_transfer(struct dwmci_host *host, uint flags,
309 struct bounce_buffer *bbstate)
310{
311 int ret;
312 u32 mask, ctrl;
313
314 if (flags == MMC_DATA_READ)
315 mask = DWMCI_IDINTEN_RI;
316 else
317 mask = DWMCI_IDINTEN_TI;
318
Sam Protsenko286892e2024-08-07 22:14:19 -0500319 ret = wait_for_bit_le32(host->ioaddr + host->regs->idsts, mask, true,
320 1000, false);
Sam Protsenko3350c202024-08-07 22:14:13 -0500321 if (ret)
322 debug("%s: DWMCI_IDINTEN mask 0x%x timeout\n", __func__, mask);
323
324 /* Clear interrupts */
Sam Protsenko7c991612024-08-07 22:14:16 -0500325 dwmci_writel(host, host->regs->idsts, DWMCI_IDINTEN_MASK);
Sam Protsenko3350c202024-08-07 22:14:13 -0500326
327 ctrl = dwmci_readl(host, DWMCI_CTRL);
328 ctrl &= ~DWMCI_DMA_EN;
329 dwmci_writel(host, DWMCI_CTRL, ctrl);
330
331 bounce_buffer_stop(bbstate);
332 return ret;
333}
334
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000335static int dwmci_set_transfer_mode(struct dwmci_host *host,
Sam Protsenko286892e2024-08-07 22:14:19 -0500336 struct mmc_data *data)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000337{
338 unsigned long mode;
339
340 mode = DWMCI_CMD_DATA_EXP;
341 if (data->flags & MMC_DATA_WRITE)
342 mode |= DWMCI_CMD_RW;
343
344 return mode;
345}
346
Sam Protsenkocf812042024-08-07 22:14:09 -0500347static void dwmci_wait_while_busy(struct dwmci_host *host, struct mmc_cmd *cmd)
348{
349 unsigned int timeout = 500; /* msec */
350 ulong start;
351
352 start = get_timer(0);
353 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
354 if (get_timer(start) > timeout) {
355 debug("%s: Timeout on data busy, continue anyway\n",
356 __func__);
357 break;
358 }
359 }
360}
361
Sam Protsenko7c991612024-08-07 22:14:16 -0500362static int dwmci_send_cmd_common(struct dwmci_host *host, struct mmc_cmd *cmd,
363 struct mmc_data *data, void *cur_idmac)
Simon Glassff5c1b72016-06-12 23:30:23 -0600364{
Sam Protsenko7c991612024-08-07 22:14:16 -0500365 int ret, flags = 0, i;
Alexander Graf61c2a662016-03-04 01:09:52 +0100366 u32 retry = 100000;
Sam Protsenko3350c202024-08-07 22:14:13 -0500367 u32 mask;
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400368 struct bounce_buffer bbstate;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000369
Sam Protsenkocf812042024-08-07 22:14:09 -0500370 dwmci_wait_while_busy(host, cmd);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000371 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
372
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400373 if (data) {
huang lin50b73752015-11-17 14:20:22 +0800374 if (host->fifo_mode) {
375 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
376 dwmci_writel(host, DWMCI_BYTCNT,
377 data->blocksize * data->blocks);
378 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400379 } else {
huang lin50b73752015-11-17 14:20:22 +0800380 if (data->flags == MMC_DATA_READ) {
Marek Vasut72d37b62019-03-23 18:45:27 +0100381 ret = bounce_buffer_start(&bbstate,
Sam Protsenko286892e2024-08-07 22:14:19 -0500382 (void *)data->dest,
huang lin50b73752015-11-17 14:20:22 +0800383 data->blocksize *
384 data->blocks, GEN_BB_WRITE);
385 } else {
Marek Vasut72d37b62019-03-23 18:45:27 +0100386 ret = bounce_buffer_start(&bbstate,
Sam Protsenko286892e2024-08-07 22:14:19 -0500387 (void *)data->src,
huang lin50b73752015-11-17 14:20:22 +0800388 data->blocksize *
389 data->blocks, GEN_BB_READ);
390 }
Marek Vasut72d37b62019-03-23 18:45:27 +0100391
392 if (ret)
393 return ret;
394
huang lin50b73752015-11-17 14:20:22 +0800395 dwmci_prepare_data(host, data, cur_idmac,
396 bbstate.bounce_buffer);
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400397 }
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400398 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000399
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000400 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
401
402 if (data)
403 flags = dwmci_set_transfer_mode(host, data);
404
405 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
John Keepingfeb7fa32021-12-07 16:09:35 +0000406 return -EBUSY;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000407
408 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
409 flags |= DWMCI_CMD_ABORT_STOP;
410 else
411 flags |= DWMCI_CMD_PRV_DAT_WAIT;
412
413 if (cmd->resp_type & MMC_RSP_PRESENT) {
414 flags |= DWMCI_CMD_RESP_EXP;
415 if (cmd->resp_type & MMC_RSP_136)
416 flags |= DWMCI_CMD_RESP_LENGTH;
417 }
418
419 if (cmd->resp_type & MMC_RSP_CRC)
420 flags |= DWMCI_CMD_CHECK_CRC;
421
Sam Protsenko286892e2024-08-07 22:14:19 -0500422 flags |= cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000423
Sam Protsenko286892e2024-08-07 22:14:19 -0500424 debug("Sending CMD%d\n", cmd->cmdidx);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000425
426 dwmci_writel(host, DWMCI_CMD, flags);
427
428 for (i = 0; i < retry; i++) {
429 mask = dwmci_readl(host, DWMCI_RINTSTS);
430 if (mask & DWMCI_INTMSK_CDONE) {
431 if (!data)
432 dwmci_writel(host, DWMCI_RINTSTS, mask);
433 break;
434 }
435 }
436
Pavel Macheka425f5d2014-09-05 12:49:48 +0200437 if (i == retry) {
Sam Protsenko286892e2024-08-07 22:14:19 -0500438 debug("%s: Timeout\n", __func__);
Jaehoon Chung7825d202016-07-19 16:33:36 +0900439 return -ETIMEDOUT;
Pavel Macheka425f5d2014-09-05 12:49:48 +0200440 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000441
442 if (mask & DWMCI_INTMSK_RTO) {
Pavel Macheka425f5d2014-09-05 12:49:48 +0200443 /*
444 * Timeout here is not necessarily fatal. (e)MMC cards
445 * will splat here when they receive CMD55 as they do
446 * not support this command and that is exactly the way
447 * to tell them apart from SD cards. Thus, this output
448 * below shall be debug(). eMMC cards also do not favor
449 * CMD8, please keep that in mind.
450 */
Sam Protsenko286892e2024-08-07 22:14:19 -0500451 debug("%s: Response Timeout\n", __func__);
Jaehoon Chung7825d202016-07-19 16:33:36 +0900452 return -ETIMEDOUT;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000453 } else if (mask & DWMCI_INTMSK_RE) {
Sam Protsenko286892e2024-08-07 22:14:19 -0500454 debug("%s: Response Error\n", __func__);
Simon Glass4c9b9482015-08-06 20:16:27 -0600455 return -EIO;
Marek Vasuta6d91992018-11-06 23:42:11 +0100456 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
457 (mask & DWMCI_INTMSK_RCRC)) {
Sam Protsenko286892e2024-08-07 22:14:19 -0500458 debug("%s: Response CRC Error\n", __func__);
Marek Vasuta6d91992018-11-06 23:42:11 +0100459 return -EIO;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000460 }
461
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000462 if (cmd->resp_type & MMC_RSP_PRESENT) {
463 if (cmd->resp_type & MMC_RSP_136) {
464 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
465 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
466 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
467 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
468 } else {
469 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
470 }
471 }
472
473 if (data) {
huang lin50b73752015-11-17 14:20:22 +0800474 ret = dwmci_data_transfer(host, data);
Sam Protsenko3350c202024-08-07 22:14:13 -0500475 if (!host->fifo_mode)
476 ret = dwmci_dma_transfer(host, data->flags, &bbstate);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000477 }
478
479 udelay(100);
480
Marek Vasut81e093f2015-07-27 22:39:38 +0200481 return ret;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000482}
483
Sam Protsenko7c991612024-08-07 22:14:16 -0500484#ifdef CONFIG_DM_MMC
485static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
486 struct mmc_data *data)
487{
488 struct mmc *mmc = mmc_get_mmc_dev(dev);
489#else
490static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
491 struct mmc_data *data)
492{
493#endif
494 struct dwmci_host *host = mmc->priv;
495 const size_t buf_size = data ? DIV_ROUND_UP(data->blocks, 8) : 0;
496
497 if (host->dma_64bit_address) {
498 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac64, idmac, buf_size);
499 return dwmci_send_cmd_common(host, cmd, data, idmac);
500 } else {
501 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac32, idmac, buf_size);
502 return dwmci_send_cmd_common(host, cmd, data, idmac);
503 }
504}
505
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500506static int dwmci_control_clken(struct dwmci_host *host, bool on)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000507{
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500508 const u32 val = on ? DWMCI_CLKEN_ENABLE | DWMCI_CLKEN_LOW_PWR : 0;
509 const u32 cmd_only_clk = DWMCI_CMD_PRV_DAT_WAIT | DWMCI_CMD_UPD_CLK;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000510 int timeout = 10000;
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500511 u32 status;
512
513 dwmci_writel(host, DWMCI_CLKENA, val);
514
515 /* Inform CIU */
516 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_START | cmd_only_clk);
517 do {
518 status = dwmci_readl(host, DWMCI_CMD);
519 if (timeout-- < 0) {
520 debug("%s: Timeout!\n", __func__);
521 return -ETIMEDOUT;
522 }
523 } while (status & DWMCI_CMD_START);
524
525 return 0;
526}
527
528/*
529 * Update the clock divider.
530 *
531 * To prevent a clock glitch keep the clock stopped during the update of
532 * clock divider and clock source.
533 */
534static int dwmci_update_div(struct dwmci_host *host, u32 div)
535{
536 int ret;
537
538 /* Disable clock */
539 ret = dwmci_control_clken(host, false);
540 if (ret)
541 return ret;
542
543 /* Set clock to desired speed */
544 dwmci_writel(host, DWMCI_CLKDIV, div);
545 dwmci_writel(host, DWMCI_CLKSRC, 0);
546
547 /* Enable clock */
548 return dwmci_control_clken(host, true);
549}
550
551static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
552{
553 u32 div;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000554 unsigned long sclk;
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500555 int ret;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000556
Sam Protsenko286892e2024-08-07 22:14:19 -0500557 if (freq == host->clock || freq == 0)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000558 return 0;
Sam Protsenko286892e2024-08-07 22:14:19 -0500559
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000560 /*
Sam Protsenko286892e2024-08-07 22:14:19 -0500561 * If host->get_mmc_clk isn't defined, then assume that host->bus_hz is
562 * source clock value. host->bus_hz should be set by user.
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000563 */
Sam Protsenko286892e2024-08-07 22:14:19 -0500564 if (host->get_mmc_clk) {
Simon Glasseff76682015-08-30 16:55:15 -0600565 sclk = host->get_mmc_clk(host, freq);
Sam Protsenko286892e2024-08-07 22:14:19 -0500566 } else if (host->bus_hz) {
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000567 sclk = host->bus_hz;
Sam Protsenko286892e2024-08-07 22:14:19 -0500568 } else {
569 debug("%s: Didn't get source clock value\n", __func__);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000570 return -EINVAL;
571 }
572
Chin Liang See4cfff952014-06-10 01:26:52 -0500573 if (sclk == freq)
Sam Protsenko286892e2024-08-07 22:14:19 -0500574 div = 0; /* bypass mode */
Chin Liang See4cfff952014-06-10 01:26:52 -0500575 else
576 div = DIV_ROUND_UP(sclk, 2 * freq);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000577
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500578 ret = dwmci_update_div(host, div);
579 if (ret)
580 return ret;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000581
582 host->clock = freq;
583
584 return 0;
585}
586
Simon Glasseba48f92017-07-29 11:35:31 -0600587#ifdef CONFIG_DM_MMC
Jaehoon Chungad220ac2016-06-28 15:52:21 +0900588static int dwmci_set_ios(struct udevice *dev)
Simon Glassff5c1b72016-06-12 23:30:23 -0600589{
590 struct mmc *mmc = mmc_get_mmc_dev(dev);
591#else
Jaehoon Chungb6cd1d32016-12-30 15:30:16 +0900592static int dwmci_set_ios(struct mmc *mmc)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000593{
Simon Glassff5c1b72016-06-12 23:30:23 -0600594#endif
Jaehoon Chunge8672942014-05-16 13:59:55 +0900595 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
596 u32 ctype, regs;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000597
Sam Protsenko286892e2024-08-07 22:14:19 -0500598 debug("Bus width = %d, clock: %d\n", mmc->bus_width, mmc->clock);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000599
600 dwmci_setup_bus(host, mmc->clock);
601 switch (mmc->bus_width) {
602 case 8:
603 ctype = DWMCI_CTYPE_8BIT;
604 break;
605 case 4:
606 ctype = DWMCI_CTYPE_4BIT;
607 break;
608 default:
609 ctype = DWMCI_CTYPE_1BIT;
610 break;
611 }
612
613 dwmci_writel(host, DWMCI_CTYPE, ctype);
614
Jaehoon Chunge8672942014-05-16 13:59:55 +0900615 regs = dwmci_readl(host, DWMCI_UHS_REG);
Andrew Gabbasov54c0e222014-12-01 06:59:12 -0600616 if (mmc->ddr_mode)
Jaehoon Chunge8672942014-05-16 13:59:55 +0900617 regs |= DWMCI_DDR_MODE;
618 else
Jaehoon Chung401fc502015-01-14 17:37:53 +0900619 regs &= ~DWMCI_DDR_MODE;
Jaehoon Chunge8672942014-05-16 13:59:55 +0900620
621 dwmci_writel(host, DWMCI_UHS_REG, regs);
622
Siew Chin Limc51e7e12020-12-24 18:21:03 +0800623 if (host->clksel) {
624 int ret;
625
626 ret = host->clksel(host);
627 if (ret)
628 return ret;
629 }
Jaehoon Chungb6cd1d32016-12-30 15:30:16 +0900630
Urja Rannikko9932a012019-05-13 13:25:27 +0000631#if CONFIG_IS_ENABLED(DM_REGULATOR)
632 if (mmc->vqmmc_supply) {
633 int ret;
634
Jonas Karlmana117d612023-07-19 21:21:00 +0000635 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, false);
636 if (ret)
637 return ret;
638
Urja Rannikko9932a012019-05-13 13:25:27 +0000639 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
640 regulator_set_value(mmc->vqmmc_supply, 1800000);
641 else
642 regulator_set_value(mmc->vqmmc_supply, 3300000);
643
644 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
645 if (ret)
646 return ret;
647 }
648#endif
649
Simon Glassff5c1b72016-06-12 23:30:23 -0600650 return 0;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000651}
652
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500653static void dwmci_init_fifo(struct dwmci_host *host)
654{
Sam Protsenko751fdf12024-08-07 22:14:17 -0500655 u32 fifo_thr, fifoth_val;
656
657 if (!host->fifo_depth) {
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500658 u32 fifo_size;
659
Sam Protsenko751fdf12024-08-07 22:14:17 -0500660 /*
661 * Automatically detect FIFO depth from FIFOTH register.
662 * Power-on value of RX_WMark is FIFO_DEPTH-1.
663 */
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500664 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
665 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
Sam Protsenko751fdf12024-08-07 22:14:17 -0500666 host->fifo_depth = fifo_size;
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500667 }
668
Sam Protsenko751fdf12024-08-07 22:14:17 -0500669 fifo_thr = host->fifo_depth / 2;
670 fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_thr - 1) | TX_WMARK(fifo_thr);
671 dwmci_writel(host, DWMCI_FIFOTH, fifoth_val);
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500672}
673
Sam Protsenko7c991612024-08-07 22:14:16 -0500674static void dwmci_init_dma(struct dwmci_host *host)
675{
676 int addr_config;
677
678 if (host->fifo_mode)
679 return;
680
681 addr_config = (dwmci_readl(host, DWMCI_HCON) >> 27) & 0x1;
682 if (addr_config == 1) {
683 host->dma_64bit_address = true;
684 host->regs = &dwmci_idmac_regs64;
685 debug("%s: IDMAC supports 64-bit address mode\n", __func__);
686 } else {
687 host->dma_64bit_address = false;
688 host->regs = &dwmci_idmac_regs32;
689 debug("%s: IDMAC supports 32-bit address mode\n", __func__);
690 }
691
692 dwmci_writel(host, host->regs->idinten, DWMCI_IDINTEN_MASK);
693}
694
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000695static int dwmci_init(struct mmc *mmc)
696{
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200697 struct dwmci_host *host = mmc->priv;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000698
Jaehoon Chung42f81a82013-11-29 20:08:57 +0900699 if (host->board_init)
700 host->board_init(host);
Rajeshwari Shinde70163092013-10-29 12:53:13 +0530701
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000702 dwmci_writel(host, DWMCI_PWREN, 1);
703
704 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
Simon Glass4c9b9482015-08-06 20:16:27 -0600705 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
706 return -EIO;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000707 }
708
Amar902664c2013-04-27 11:42:54 +0530709 /* Enumerate at 400KHz */
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200710 dwmci_setup_bus(host, mmc->cfg->f_min);
Amar902664c2013-04-27 11:42:54 +0530711
Sam Protsenko286892e2024-08-07 22:14:19 -0500712 dwmci_writel(host, DWMCI_RINTSTS, 0xffffffff);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000713 dwmci_writel(host, DWMCI_INTMASK, 0);
714
Sam Protsenko286892e2024-08-07 22:14:19 -0500715 dwmci_writel(host, DWMCI_TMOUT, 0xffffffff);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000716
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000717 dwmci_writel(host, DWMCI_BMOD, 1);
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500718 dwmci_init_fifo(host);
Sam Protsenko7c991612024-08-07 22:14:16 -0500719 dwmci_init_dma(host);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000720
721 dwmci_writel(host, DWMCI_CLKENA, 0);
722 dwmci_writel(host, DWMCI_CLKSRC, 0);
723
724 return 0;
725}
726
Simon Glasseba48f92017-07-29 11:35:31 -0600727#ifdef CONFIG_DM_MMC
Simon Glassff5c1b72016-06-12 23:30:23 -0600728int dwmci_probe(struct udevice *dev)
729{
730 struct mmc *mmc = mmc_get_mmc_dev(dev);
731
732 return dwmci_init(mmc);
733}
734
735const struct dm_mmc_ops dm_dwmci_ops = {
736 .send_cmd = dwmci_send_cmd,
737 .set_ios = dwmci_set_ios,
738};
739
740#else
Pantelis Antoniouc9e75912014-02-26 19:28:45 +0200741static const struct mmc_ops dwmci_ops = {
742 .send_cmd = dwmci_send_cmd,
743 .set_ios = dwmci_set_ios,
744 .init = dwmci_init,
745};
Simon Glassff5c1b72016-06-12 23:30:23 -0600746#endif
Pantelis Antoniouc9e75912014-02-26 19:28:45 +0200747
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900748void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
Sam Protsenko286892e2024-08-07 22:14:19 -0500749 u32 max_clk, u32 min_clk)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000750{
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900751 cfg->name = host->name;
Simon Glasseba48f92017-07-29 11:35:31 -0600752#ifndef CONFIG_DM_MMC
Simon Glass82682542016-05-14 14:03:07 -0600753 cfg->ops = &dwmci_ops;
Simon Glassff5c1b72016-06-12 23:30:23 -0600754#endif
Simon Glass82682542016-05-14 14:03:07 -0600755 cfg->f_min = min_clk;
756 cfg->f_max = max_clk;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000757
Simon Glass82682542016-05-14 14:03:07 -0600758 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000759
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900760 cfg->host_caps = host->caps;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000761
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900762 if (host->buswidth == 8) {
Simon Glass82682542016-05-14 14:03:07 -0600763 cfg->host_caps |= MMC_MODE_8BIT;
764 cfg->host_caps &= ~MMC_MODE_4BIT;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000765 } else {
Simon Glass82682542016-05-14 14:03:07 -0600766 cfg->host_caps |= MMC_MODE_4BIT;
767 cfg->host_caps &= ~MMC_MODE_8BIT;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000768 }
Simon Glass82682542016-05-14 14:03:07 -0600769 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
770
771 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
772}
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200773
Simon Glass82682542016-05-14 14:03:07 -0600774#ifdef CONFIG_BLK
775int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
776{
777 return mmc_bind(dev, mmc, cfg);
778}
779#else
780int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
781{
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900782 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000783
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200784 host->mmc = mmc_create(&host->cfg, host);
Sam Protsenko286892e2024-08-07 22:14:19 -0500785 if (!host->mmc)
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200786 return -1;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000787
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200788 return 0;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000789}
Simon Glass82682542016-05-14 14:03:07 -0600790#endif