blob: b99f63a6f8bd0036915c9935922e3e70c4358f47 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Jaehoon Chung7cf73072012-10-15 19:10:29 +00002/*
3 * (C) Copyright 2012 SAMSUNG Electronics
4 * Jaehoon Chung <jh80.chung@samsung.com>
5 * Rajeshawari Shinde <rajeshwari.s@samsung.com>
Jaehoon Chung7cf73072012-10-15 19:10:29 +00006 */
7
Alexey Brodkin55bab5e2013-12-26 15:29:07 +04008#include <bouncebuf.h>
Simon Glass63334482019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glass4c9b9482015-08-06 20:16:27 -060010#include <errno.h>
Simon Glass0f2af882020-05-10 11:40:05 -060011#include <log.h>
Jaehoon Chung7cf73072012-10-15 19:10:29 +000012#include <malloc.h>
Simon Glass2dd337a2015-09-02 17:24:58 -060013#include <memalign.h>
Jaehoon Chung7cf73072012-10-15 19:10:29 +000014#include <mmc.h>
15#include <dwmmc.h>
Ley Foon Tanb98e8922018-12-20 17:55:41 +080016#include <wait_bit.h>
Simon Glass274e0b02020-05-10 11:39:56 -060017#include <asm/cache.h>
Simon Glassdbd79542020-05-10 11:40:11 -060018#include <linux/delay.h>
Urja Rannikko9932a012019-05-13 13:25:27 +000019#include <power/regulator.h>
Jaehoon Chung7cf73072012-10-15 19:10:29 +000020
21#define PAGE_SIZE 4096
22
Sam Protsenko2543c322024-08-07 22:14:08 -050023struct dwmci_idmac {
24 u32 flags;
25 u32 cnt;
26 u32 addr;
27 u32 next_addr;
28} __aligned(ARCH_DMA_MINALIGN);
29
Jaehoon Chung7cf73072012-10-15 19:10:29 +000030static int dwmci_wait_reset(struct dwmci_host *host, u32 value)
31{
32 unsigned long timeout = 1000;
33 u32 ctrl;
34
35 dwmci_writel(host, DWMCI_CTRL, value);
36
37 while (timeout--) {
38 ctrl = dwmci_readl(host, DWMCI_CTRL);
39 if (!(ctrl & DWMCI_RESET_ALL))
40 return 1;
41 }
42 return 0;
43}
44
45static void dwmci_set_idma_desc(struct dwmci_idmac *idmac,
46 u32 desc0, u32 desc1, u32 desc2)
47{
48 struct dwmci_idmac *desc = idmac;
49
50 desc->flags = desc0;
51 desc->cnt = desc1;
52 desc->addr = desc2;
Prabhakar Kushwahafdefb902015-10-25 13:18:25 +053053 desc->next_addr = (ulong)desc + sizeof(struct dwmci_idmac);
Jaehoon Chung7cf73072012-10-15 19:10:29 +000054}
55
56static void dwmci_prepare_data(struct dwmci_host *host,
Alexey Brodkin55bab5e2013-12-26 15:29:07 +040057 struct mmc_data *data,
58 struct dwmci_idmac *cur_idmac,
59 void *bounce_buffer)
Jaehoon Chung7cf73072012-10-15 19:10:29 +000060{
61 unsigned long ctrl;
62 unsigned int i = 0, flags, cnt, blk_cnt;
Alexey Brodkin55bab5e2013-12-26 15:29:07 +040063 ulong data_start, data_end;
Jaehoon Chung7cf73072012-10-15 19:10:29 +000064
Jaehoon Chung7cf73072012-10-15 19:10:29 +000065 blk_cnt = data->blocks;
66
67 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
68
Ley Foon Tanb98e8922018-12-20 17:55:41 +080069 /* Clear IDMAC interrupt */
70 dwmci_writel(host, DWMCI_IDSTS, 0xFFFFFFFF);
71
Jaehoon Chung7cf73072012-10-15 19:10:29 +000072 data_start = (ulong)cur_idmac;
Prabhakar Kushwahafdefb902015-10-25 13:18:25 +053073 dwmci_writel(host, DWMCI_DBADDR, (ulong)cur_idmac);
Jaehoon Chung7cf73072012-10-15 19:10:29 +000074
Jaehoon Chung7cf73072012-10-15 19:10:29 +000075 do {
76 flags = DWMCI_IDMAC_OWN | DWMCI_IDMAC_CH ;
77 flags |= (i == 0) ? DWMCI_IDMAC_FS : 0;
78 if (blk_cnt <= 8) {
79 flags |= DWMCI_IDMAC_LD;
80 cnt = data->blocksize * blk_cnt;
81 } else
82 cnt = data->blocksize * 8;
83
84 dwmci_set_idma_desc(cur_idmac, flags, cnt,
Prabhakar Kushwahafdefb902015-10-25 13:18:25 +053085 (ulong)bounce_buffer + (i * PAGE_SIZE));
Jaehoon Chung7cf73072012-10-15 19:10:29 +000086
Marek Vasutb6da37b2019-02-13 20:16:20 +010087 cur_idmac++;
Mischa Jonkera7a60912013-07-26 16:18:40 +020088 if (blk_cnt <= 8)
Jaehoon Chung7cf73072012-10-15 19:10:29 +000089 break;
90 blk_cnt -= 8;
Jaehoon Chung7cf73072012-10-15 19:10:29 +000091 i++;
92 } while(1);
93
94 data_end = (ulong)cur_idmac;
Marek Vasutb6da37b2019-02-13 20:16:20 +010095 flush_dcache_range(data_start, roundup(data_end, ARCH_DMA_MINALIGN));
Jaehoon Chung7cf73072012-10-15 19:10:29 +000096
97 ctrl = dwmci_readl(host, DWMCI_CTRL);
98 ctrl |= DWMCI_IDMAC_EN | DWMCI_DMA_EN;
99 dwmci_writel(host, DWMCI_CTRL, ctrl);
100
101 ctrl = dwmci_readl(host, DWMCI_BMOD);
102 ctrl |= DWMCI_BMOD_IDMAC_FB | DWMCI_BMOD_IDMAC_EN;
103 dwmci_writel(host, DWMCI_BMOD, ctrl);
104
105 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
106 dwmci_writel(host, DWMCI_BYTCNT, data->blocksize * data->blocks);
107}
108
Heiko Stuebner46b7a4f2018-09-21 10:59:45 +0200109static int dwmci_fifo_ready(struct dwmci_host *host, u32 bit, u32 *len)
110{
111 u32 timeout = 20000;
112
113 *len = dwmci_readl(host, DWMCI_STATUS);
114 while (--timeout && (*len & bit)) {
115 udelay(200);
116 *len = dwmci_readl(host, DWMCI_STATUS);
117 }
118
119 if (!timeout) {
120 debug("%s: FIFO underflow timeout\n", __func__);
121 return -ETIMEDOUT;
122 }
123
124 return 0;
125}
126
Marek Vasutffac5122019-03-23 03:32:24 +0100127static unsigned int dwmci_get_timeout(struct mmc *mmc, const unsigned int size)
128{
129 unsigned int timeout;
130
Kever Yang4889d832019-08-29 15:42:41 +0800131 timeout = size * 8; /* counting in bits */
132 timeout *= 10; /* wait 10 times as long */
Marek Vasutffac5122019-03-23 03:32:24 +0100133 timeout /= mmc->clock;
134 timeout /= mmc->bus_width;
135 timeout /= mmc->ddr_mode ? 2 : 1;
Kever Yang4889d832019-08-29 15:42:41 +0800136 timeout *= 1000; /* counting in msec */
Marek Vasutffac5122019-03-23 03:32:24 +0100137 timeout = (timeout < 1000) ? 1000 : timeout;
138
139 return timeout;
140}
141
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500142static int dwmci_data_transfer_fifo(struct dwmci_host *host,
143 struct mmc_data *data, u32 mask)
huang linf9836762015-11-17 14:20:21 +0800144{
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500145 const u32 fifo_depth = (((host->fifoth_val & RX_WMARK_MASK) >>
146 RX_WMARK_SHIFT) + 1) * 2;
147 const u32 int_rx = mask & (DWMCI_INTMSK_RXDR | DWMCI_INTMSK_DTO);
148 const u32 int_tx = mask & DWMCI_INTMSK_TXDR;
huang linf9836762015-11-17 14:20:21 +0800149 int ret = 0;
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500150 u32 len = 0, size, i;
151 u32 *buf;
huang lin50b73752015-11-17 14:20:22 +0800152
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500153 size = (data->blocksize * data->blocks) / 4;
154 if (!host->fifo_mode || !size)
155 return 0;
156
huang lin50b73752015-11-17 14:20:22 +0800157 if (data->flags == MMC_DATA_READ)
158 buf = (unsigned int *)data->dest;
159 else
160 buf = (unsigned int *)data->src;
huang linf9836762015-11-17 14:20:21 +0800161
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500162 if (data->flags == MMC_DATA_READ && int_rx) {
163 dwmci_writel(host, DWMCI_RINTSTS, int_rx);
164 while (size) {
165 ret = dwmci_fifo_ready(host, DWMCI_FIFO_EMPTY, &len);
166 if (ret < 0)
167 break;
168
169 len = (len >> DWMCI_FIFO_SHIFT) & DWMCI_FIFO_MASK;
170 len = min(size, len);
171 for (i = 0; i < len; i++)
172 *buf++ = dwmci_readl(host, DWMCI_DATA);
173 size = size > len ? (size - len) : 0;
174 }
175 } else if (data->flags == MMC_DATA_WRITE && int_tx) {
176 while (size) {
177 ret = dwmci_fifo_ready(host, DWMCI_FIFO_FULL, &len);
178 if (ret < 0)
179 break;
180
181 len = fifo_depth - ((len >> DWMCI_FIFO_SHIFT) &
182 DWMCI_FIFO_MASK);
183 len = min(size, len);
184 for (i = 0; i < len; i++)
185 dwmci_writel(host, DWMCI_DATA, *buf++);
186 size = size > len ? (size - len) : 0;
187 }
188 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_TXDR);
189 }
190
191 return ret;
192}
Marek Vasutffac5122019-03-23 03:32:24 +0100193
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500194static int dwmci_data_transfer(struct dwmci_host *host, struct mmc_data *data)
195{
196 struct mmc *mmc = host->mmc;
197 int ret = 0;
198 u32 timeout, mask, size;
199 ulong start = get_timer(0);
200
201 size = data->blocksize * data->blocks;
202 timeout = dwmci_get_timeout(mmc, size);
Marek Vasutffac5122019-03-23 03:32:24 +0100203
huang linf9836762015-11-17 14:20:21 +0800204 for (;;) {
205 mask = dwmci_readl(host, DWMCI_RINTSTS);
206 /* Error during data transfer. */
207 if (mask & (DWMCI_DATA_ERR | DWMCI_DATA_TOUT)) {
208 debug("%s: DATA ERROR!\n", __func__);
209 ret = -EINVAL;
210 break;
211 }
212
Sam Protsenkoc4cffe82024-08-07 22:14:12 -0500213 ret = dwmci_data_transfer_fifo(host, data, mask);
huang lin50b73752015-11-17 14:20:22 +0800214
huang linf9836762015-11-17 14:20:21 +0800215 /* Data arrived correctly. */
216 if (mask & DWMCI_INTMSK_DTO) {
217 ret = 0;
218 break;
219 }
220
221 /* Check for timeout. */
222 if (get_timer(start) > timeout) {
223 debug("%s: Timeout waiting for data!\n",
224 __func__);
Jaehoon Chung7825d202016-07-19 16:33:36 +0900225 ret = -ETIMEDOUT;
huang linf9836762015-11-17 14:20:21 +0800226 break;
227 }
228 }
229
230 dwmci_writel(host, DWMCI_RINTSTS, mask);
231
232 return ret;
233}
234
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000235static int dwmci_set_transfer_mode(struct dwmci_host *host,
236 struct mmc_data *data)
237{
238 unsigned long mode;
239
240 mode = DWMCI_CMD_DATA_EXP;
241 if (data->flags & MMC_DATA_WRITE)
242 mode |= DWMCI_CMD_RW;
243
244 return mode;
245}
246
Sam Protsenkocf812042024-08-07 22:14:09 -0500247static void dwmci_wait_while_busy(struct dwmci_host *host, struct mmc_cmd *cmd)
248{
249 unsigned int timeout = 500; /* msec */
250 ulong start;
251
252 start = get_timer(0);
253 while (dwmci_readl(host, DWMCI_STATUS) & DWMCI_BUSY) {
254 if (get_timer(start) > timeout) {
255 debug("%s: Timeout on data busy, continue anyway\n",
256 __func__);
257 break;
258 }
259 }
260}
261
Simon Glasseba48f92017-07-29 11:35:31 -0600262#ifdef CONFIG_DM_MMC
Jaehoon Chungad220ac2016-06-28 15:52:21 +0900263static int dwmci_send_cmd(struct udevice *dev, struct mmc_cmd *cmd,
Simon Glassff5c1b72016-06-12 23:30:23 -0600264 struct mmc_data *data)
265{
266 struct mmc *mmc = mmc_get_mmc_dev(dev);
267#else
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000268static int dwmci_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd,
269 struct mmc_data *data)
270{
Simon Glassff5c1b72016-06-12 23:30:23 -0600271#endif
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200272 struct dwmci_host *host = mmc->priv;
Mischa Jonker7423bed2013-07-26 14:08:14 +0200273 ALLOC_CACHE_ALIGN_BUFFER(struct dwmci_idmac, cur_idmac,
Mischa Jonkera7a60912013-07-26 16:18:40 +0200274 data ? DIV_ROUND_UP(data->blocks, 8) : 0);
Marek Vasut81e093f2015-07-27 22:39:38 +0200275 int ret = 0, flags = 0, i;
Alexander Graf61c2a662016-03-04 01:09:52 +0100276 u32 retry = 100000;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000277 u32 mask, ctrl;
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400278 struct bounce_buffer bbstate;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000279
Sam Protsenkocf812042024-08-07 22:14:09 -0500280 dwmci_wait_while_busy(host, cmd);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000281 dwmci_writel(host, DWMCI_RINTSTS, DWMCI_INTMSK_ALL);
282
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400283 if (data) {
huang lin50b73752015-11-17 14:20:22 +0800284 if (host->fifo_mode) {
285 dwmci_writel(host, DWMCI_BLKSIZ, data->blocksize);
286 dwmci_writel(host, DWMCI_BYTCNT,
287 data->blocksize * data->blocks);
288 dwmci_wait_reset(host, DWMCI_CTRL_FIFO_RESET);
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400289 } else {
huang lin50b73752015-11-17 14:20:22 +0800290 if (data->flags == MMC_DATA_READ) {
Marek Vasut72d37b62019-03-23 18:45:27 +0100291 ret = bounce_buffer_start(&bbstate,
292 (void*)data->dest,
huang lin50b73752015-11-17 14:20:22 +0800293 data->blocksize *
294 data->blocks, GEN_BB_WRITE);
295 } else {
Marek Vasut72d37b62019-03-23 18:45:27 +0100296 ret = bounce_buffer_start(&bbstate,
297 (void*)data->src,
huang lin50b73752015-11-17 14:20:22 +0800298 data->blocksize *
299 data->blocks, GEN_BB_READ);
300 }
Marek Vasut72d37b62019-03-23 18:45:27 +0100301
302 if (ret)
303 return ret;
304
huang lin50b73752015-11-17 14:20:22 +0800305 dwmci_prepare_data(host, data, cur_idmac,
306 bbstate.bounce_buffer);
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400307 }
Alexey Brodkin55bab5e2013-12-26 15:29:07 +0400308 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000309
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000310 dwmci_writel(host, DWMCI_CMDARG, cmd->cmdarg);
311
312 if (data)
313 flags = dwmci_set_transfer_mode(host, data);
314
315 if ((cmd->resp_type & MMC_RSP_136) && (cmd->resp_type & MMC_RSP_BUSY))
John Keepingfeb7fa32021-12-07 16:09:35 +0000316 return -EBUSY;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000317
318 if (cmd->cmdidx == MMC_CMD_STOP_TRANSMISSION)
319 flags |= DWMCI_CMD_ABORT_STOP;
320 else
321 flags |= DWMCI_CMD_PRV_DAT_WAIT;
322
323 if (cmd->resp_type & MMC_RSP_PRESENT) {
324 flags |= DWMCI_CMD_RESP_EXP;
325 if (cmd->resp_type & MMC_RSP_136)
326 flags |= DWMCI_CMD_RESP_LENGTH;
327 }
328
329 if (cmd->resp_type & MMC_RSP_CRC)
330 flags |= DWMCI_CMD_CHECK_CRC;
331
332 flags |= (cmd->cmdidx | DWMCI_CMD_START | DWMCI_CMD_USE_HOLD_REG);
333
334 debug("Sending CMD%d\n",cmd->cmdidx);
335
336 dwmci_writel(host, DWMCI_CMD, flags);
337
338 for (i = 0; i < retry; i++) {
339 mask = dwmci_readl(host, DWMCI_RINTSTS);
340 if (mask & DWMCI_INTMSK_CDONE) {
341 if (!data)
342 dwmci_writel(host, DWMCI_RINTSTS, mask);
343 break;
344 }
345 }
346
Pavel Macheka425f5d2014-09-05 12:49:48 +0200347 if (i == retry) {
Simon Glass4c9b9482015-08-06 20:16:27 -0600348 debug("%s: Timeout.\n", __func__);
Jaehoon Chung7825d202016-07-19 16:33:36 +0900349 return -ETIMEDOUT;
Pavel Macheka425f5d2014-09-05 12:49:48 +0200350 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000351
352 if (mask & DWMCI_INTMSK_RTO) {
Pavel Macheka425f5d2014-09-05 12:49:48 +0200353 /*
354 * Timeout here is not necessarily fatal. (e)MMC cards
355 * will splat here when they receive CMD55 as they do
356 * not support this command and that is exactly the way
357 * to tell them apart from SD cards. Thus, this output
358 * below shall be debug(). eMMC cards also do not favor
359 * CMD8, please keep that in mind.
360 */
361 debug("%s: Response Timeout.\n", __func__);
Jaehoon Chung7825d202016-07-19 16:33:36 +0900362 return -ETIMEDOUT;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000363 } else if (mask & DWMCI_INTMSK_RE) {
Simon Glass4c9b9482015-08-06 20:16:27 -0600364 debug("%s: Response Error.\n", __func__);
365 return -EIO;
Marek Vasuta6d91992018-11-06 23:42:11 +0100366 } else if ((cmd->resp_type & MMC_RSP_CRC) &&
367 (mask & DWMCI_INTMSK_RCRC)) {
368 debug("%s: Response CRC Error.\n", __func__);
369 return -EIO;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000370 }
371
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000372 if (cmd->resp_type & MMC_RSP_PRESENT) {
373 if (cmd->resp_type & MMC_RSP_136) {
374 cmd->response[0] = dwmci_readl(host, DWMCI_RESP3);
375 cmd->response[1] = dwmci_readl(host, DWMCI_RESP2);
376 cmd->response[2] = dwmci_readl(host, DWMCI_RESP1);
377 cmd->response[3] = dwmci_readl(host, DWMCI_RESP0);
378 } else {
379 cmd->response[0] = dwmci_readl(host, DWMCI_RESP0);
380 }
381 }
382
383 if (data) {
huang lin50b73752015-11-17 14:20:22 +0800384 ret = dwmci_data_transfer(host, data);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000385
huang lin50b73752015-11-17 14:20:22 +0800386 /* only dma mode need it */
387 if (!host->fifo_mode) {
Ley Foon Tanb98e8922018-12-20 17:55:41 +0800388 if (data->flags == MMC_DATA_READ)
389 mask = DWMCI_IDINTEN_RI;
390 else
391 mask = DWMCI_IDINTEN_TI;
392 ret = wait_for_bit_le32(host->ioaddr + DWMCI_IDSTS,
393 mask, true, 1000, false);
394 if (ret)
395 debug("%s: DWMCI_IDINTEN mask 0x%x timeout.\n",
396 __func__, mask);
397 /* clear interrupts */
398 dwmci_writel(host, DWMCI_IDSTS, DWMCI_IDINTEN_MASK);
399
huang lin50b73752015-11-17 14:20:22 +0800400 ctrl = dwmci_readl(host, DWMCI_CTRL);
401 ctrl &= ~(DWMCI_DMA_EN);
402 dwmci_writel(host, DWMCI_CTRL, ctrl);
403 bounce_buffer_stop(&bbstate);
404 }
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000405 }
406
407 udelay(100);
408
Marek Vasut81e093f2015-07-27 22:39:38 +0200409 return ret;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000410}
411
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500412static int dwmci_control_clken(struct dwmci_host *host, bool on)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000413{
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500414 const u32 val = on ? DWMCI_CLKEN_ENABLE | DWMCI_CLKEN_LOW_PWR : 0;
415 const u32 cmd_only_clk = DWMCI_CMD_PRV_DAT_WAIT | DWMCI_CMD_UPD_CLK;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000416 int timeout = 10000;
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500417 u32 status;
418
419 dwmci_writel(host, DWMCI_CLKENA, val);
420
421 /* Inform CIU */
422 dwmci_writel(host, DWMCI_CMD, DWMCI_CMD_START | cmd_only_clk);
423 do {
424 status = dwmci_readl(host, DWMCI_CMD);
425 if (timeout-- < 0) {
426 debug("%s: Timeout!\n", __func__);
427 return -ETIMEDOUT;
428 }
429 } while (status & DWMCI_CMD_START);
430
431 return 0;
432}
433
434/*
435 * Update the clock divider.
436 *
437 * To prevent a clock glitch keep the clock stopped during the update of
438 * clock divider and clock source.
439 */
440static int dwmci_update_div(struct dwmci_host *host, u32 div)
441{
442 int ret;
443
444 /* Disable clock */
445 ret = dwmci_control_clken(host, false);
446 if (ret)
447 return ret;
448
449 /* Set clock to desired speed */
450 dwmci_writel(host, DWMCI_CLKDIV, div);
451 dwmci_writel(host, DWMCI_CLKSRC, 0);
452
453 /* Enable clock */
454 return dwmci_control_clken(host, true);
455}
456
457static int dwmci_setup_bus(struct dwmci_host *host, u32 freq)
458{
459 u32 div;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000460 unsigned long sclk;
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500461 int ret;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000462
Amar902664c2013-04-27 11:42:54 +0530463 if ((freq == host->clock) || (freq == 0))
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000464 return 0;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000465 /*
Pavel Macheka425f5d2014-09-05 12:49:48 +0200466 * If host->get_mmc_clk isn't defined,
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000467 * then assume that host->bus_hz is source clock value.
Pavel Macheka425f5d2014-09-05 12:49:48 +0200468 * host->bus_hz should be set by user.
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000469 */
Jaehoon Chungd94735b2013-10-06 18:59:31 +0900470 if (host->get_mmc_clk)
Simon Glasseff76682015-08-30 16:55:15 -0600471 sclk = host->get_mmc_clk(host, freq);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000472 else if (host->bus_hz)
473 sclk = host->bus_hz;
474 else {
Simon Glass4c9b9482015-08-06 20:16:27 -0600475 debug("%s: Didn't get source clock value.\n", __func__);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000476 return -EINVAL;
477 }
478
Chin Liang See4cfff952014-06-10 01:26:52 -0500479 if (sclk == freq)
480 div = 0; /* bypass mode */
481 else
482 div = DIV_ROUND_UP(sclk, 2 * freq);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000483
Sam Protsenkofd5387f2024-08-07 22:14:11 -0500484 ret = dwmci_update_div(host, div);
485 if (ret)
486 return ret;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000487
488 host->clock = freq;
489
490 return 0;
491}
492
Simon Glasseba48f92017-07-29 11:35:31 -0600493#ifdef CONFIG_DM_MMC
Jaehoon Chungad220ac2016-06-28 15:52:21 +0900494static int dwmci_set_ios(struct udevice *dev)
Simon Glassff5c1b72016-06-12 23:30:23 -0600495{
496 struct mmc *mmc = mmc_get_mmc_dev(dev);
497#else
Jaehoon Chungb6cd1d32016-12-30 15:30:16 +0900498static int dwmci_set_ios(struct mmc *mmc)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000499{
Simon Glassff5c1b72016-06-12 23:30:23 -0600500#endif
Jaehoon Chunge8672942014-05-16 13:59:55 +0900501 struct dwmci_host *host = (struct dwmci_host *)mmc->priv;
502 u32 ctype, regs;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000503
Pavel Macheka425f5d2014-09-05 12:49:48 +0200504 debug("Buswidth = %d, clock: %d\n", mmc->bus_width, mmc->clock);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000505
506 dwmci_setup_bus(host, mmc->clock);
507 switch (mmc->bus_width) {
508 case 8:
509 ctype = DWMCI_CTYPE_8BIT;
510 break;
511 case 4:
512 ctype = DWMCI_CTYPE_4BIT;
513 break;
514 default:
515 ctype = DWMCI_CTYPE_1BIT;
516 break;
517 }
518
519 dwmci_writel(host, DWMCI_CTYPE, ctype);
520
Jaehoon Chunge8672942014-05-16 13:59:55 +0900521 regs = dwmci_readl(host, DWMCI_UHS_REG);
Andrew Gabbasov54c0e222014-12-01 06:59:12 -0600522 if (mmc->ddr_mode)
Jaehoon Chunge8672942014-05-16 13:59:55 +0900523 regs |= DWMCI_DDR_MODE;
524 else
Jaehoon Chung401fc502015-01-14 17:37:53 +0900525 regs &= ~DWMCI_DDR_MODE;
Jaehoon Chunge8672942014-05-16 13:59:55 +0900526
527 dwmci_writel(host, DWMCI_UHS_REG, regs);
528
Siew Chin Limc51e7e12020-12-24 18:21:03 +0800529 if (host->clksel) {
530 int ret;
531
532 ret = host->clksel(host);
533 if (ret)
534 return ret;
535 }
Jaehoon Chungb6cd1d32016-12-30 15:30:16 +0900536
Urja Rannikko9932a012019-05-13 13:25:27 +0000537#if CONFIG_IS_ENABLED(DM_REGULATOR)
538 if (mmc->vqmmc_supply) {
539 int ret;
540
Jonas Karlmana117d612023-07-19 21:21:00 +0000541 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, false);
542 if (ret)
543 return ret;
544
Urja Rannikko9932a012019-05-13 13:25:27 +0000545 if (mmc->signal_voltage == MMC_SIGNAL_VOLTAGE_180)
546 regulator_set_value(mmc->vqmmc_supply, 1800000);
547 else
548 regulator_set_value(mmc->vqmmc_supply, 3300000);
549
550 ret = regulator_set_enable_if_allowed(mmc->vqmmc_supply, true);
551 if (ret)
552 return ret;
553 }
554#endif
555
Simon Glassff5c1b72016-06-12 23:30:23 -0600556 return 0;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000557}
558
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500559static void dwmci_init_fifo(struct dwmci_host *host)
560{
561 if (!host->fifoth_val) {
562 u32 fifo_size;
563
564 fifo_size = dwmci_readl(host, DWMCI_FIFOTH);
565 fifo_size = ((fifo_size & RX_WMARK_MASK) >> RX_WMARK_SHIFT) + 1;
566 host->fifoth_val = MSIZE(0x2) | RX_WMARK(fifo_size / 2 - 1) |
567 TX_WMARK(fifo_size / 2);
568 }
569
570 dwmci_writel(host, DWMCI_FIFOTH, host->fifoth_val);
571}
572
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000573static int dwmci_init(struct mmc *mmc)
574{
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200575 struct dwmci_host *host = mmc->priv;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000576
Jaehoon Chung42f81a82013-11-29 20:08:57 +0900577 if (host->board_init)
578 host->board_init(host);
Rajeshwari Shinde70163092013-10-29 12:53:13 +0530579
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000580 dwmci_writel(host, DWMCI_PWREN, 1);
581
582 if (!dwmci_wait_reset(host, DWMCI_RESET_ALL)) {
Simon Glass4c9b9482015-08-06 20:16:27 -0600583 debug("%s[%d] Fail-reset!!\n", __func__, __LINE__);
584 return -EIO;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000585 }
586
Amar902664c2013-04-27 11:42:54 +0530587 /* Enumerate at 400KHz */
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200588 dwmci_setup_bus(host, mmc->cfg->f_min);
Amar902664c2013-04-27 11:42:54 +0530589
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000590 dwmci_writel(host, DWMCI_RINTSTS, 0xFFFFFFFF);
591 dwmci_writel(host, DWMCI_INTMASK, 0);
592
593 dwmci_writel(host, DWMCI_TMOUT, 0xFFFFFFFF);
594
595 dwmci_writel(host, DWMCI_IDINTEN, 0);
596 dwmci_writel(host, DWMCI_BMOD, 1);
Sam Protsenko0845f4f2024-08-07 22:14:10 -0500597 dwmci_init_fifo(host);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000598
599 dwmci_writel(host, DWMCI_CLKENA, 0);
600 dwmci_writel(host, DWMCI_CLKSRC, 0);
601
Ley Foon Tanb98e8922018-12-20 17:55:41 +0800602 if (!host->fifo_mode)
603 dwmci_writel(host, DWMCI_IDINTEN, DWMCI_IDINTEN_MASK);
604
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000605 return 0;
606}
607
Simon Glasseba48f92017-07-29 11:35:31 -0600608#ifdef CONFIG_DM_MMC
Simon Glassff5c1b72016-06-12 23:30:23 -0600609int dwmci_probe(struct udevice *dev)
610{
611 struct mmc *mmc = mmc_get_mmc_dev(dev);
612
613 return dwmci_init(mmc);
614}
615
616const struct dm_mmc_ops dm_dwmci_ops = {
617 .send_cmd = dwmci_send_cmd,
618 .set_ios = dwmci_set_ios,
619};
620
621#else
Pantelis Antoniouc9e75912014-02-26 19:28:45 +0200622static const struct mmc_ops dwmci_ops = {
623 .send_cmd = dwmci_send_cmd,
624 .set_ios = dwmci_set_ios,
625 .init = dwmci_init,
626};
Simon Glassff5c1b72016-06-12 23:30:23 -0600627#endif
Pantelis Antoniouc9e75912014-02-26 19:28:45 +0200628
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900629void dwmci_setup_cfg(struct mmc_config *cfg, struct dwmci_host *host,
630 u32 max_clk, u32 min_clk)
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000631{
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900632 cfg->name = host->name;
Simon Glasseba48f92017-07-29 11:35:31 -0600633#ifndef CONFIG_DM_MMC
Simon Glass82682542016-05-14 14:03:07 -0600634 cfg->ops = &dwmci_ops;
Simon Glassff5c1b72016-06-12 23:30:23 -0600635#endif
Simon Glass82682542016-05-14 14:03:07 -0600636 cfg->f_min = min_clk;
637 cfg->f_max = max_clk;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000638
Simon Glass82682542016-05-14 14:03:07 -0600639 cfg->voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000640
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900641 cfg->host_caps = host->caps;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000642
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900643 if (host->buswidth == 8) {
Simon Glass82682542016-05-14 14:03:07 -0600644 cfg->host_caps |= MMC_MODE_8BIT;
645 cfg->host_caps &= ~MMC_MODE_4BIT;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000646 } else {
Simon Glass82682542016-05-14 14:03:07 -0600647 cfg->host_caps |= MMC_MODE_4BIT;
648 cfg->host_caps &= ~MMC_MODE_8BIT;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000649 }
Simon Glass82682542016-05-14 14:03:07 -0600650 cfg->host_caps |= MMC_MODE_HS | MMC_MODE_HS_52MHz;
651
652 cfg->b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;
653}
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200654
Simon Glass82682542016-05-14 14:03:07 -0600655#ifdef CONFIG_BLK
656int dwmci_bind(struct udevice *dev, struct mmc *mmc, struct mmc_config *cfg)
657{
658 return mmc_bind(dev, mmc, cfg);
659}
660#else
661int add_dwmci(struct dwmci_host *host, u32 max_clk, u32 min_clk)
662{
Jaehoon Chungbf819d02016-09-23 19:13:16 +0900663 dwmci_setup_cfg(&host->cfg, host, max_clk, min_clk);
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000664
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200665 host->mmc = mmc_create(&host->cfg, host);
666 if (host->mmc == NULL)
667 return -1;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000668
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200669 return 0;
Jaehoon Chung7cf73072012-10-15 19:10:29 +0000670}
Simon Glass82682542016-05-14 14:03:07 -0600671#endif