blob: 1e83007286b2e73833fea8c4c2a50ec93fb193d3 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Andy Flemingad347bb2008-10-30 16:41:01 -05002/*
3 * Copyright 2008, Freescale Semiconductor, Inc
Yangbo Luf9049b22020-06-17 18:08:58 +08004 * Copyright 2020 NXP
Andy Flemingad347bb2008-10-30 16:41:01 -05005 * Andy Fleming
6 *
7 * Based vaguely on the Linux code
Andy Flemingad347bb2008-10-30 16:41:01 -05008 */
9
10#include <config.h>
11#include <common.h>
Simon Glass655306c2020-05-10 11:39:58 -060012#include <blk.h>
Andy Flemingad347bb2008-10-30 16:41:01 -050013#include <command.h>
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -060014#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -060016#include <dm/device-internal.h>
Stephen Warrenbf0c7852014-05-23 12:47:06 -060017#include <errno.h>
Andy Flemingad347bb2008-10-30 16:41:01 -050018#include <mmc.h>
19#include <part.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060020#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060021#include <linux/delay.h>
Peng Fan15305962016-10-11 15:08:43 +080022#include <power/regulator.h>
Andy Flemingad347bb2008-10-30 16:41:01 -050023#include <malloc.h>
Simon Glass2dd337a2015-09-02 17:24:58 -060024#include <memalign.h>
Andy Flemingad347bb2008-10-30 16:41:01 -050025#include <linux/list.h>
Rabin Vincent69d4e2c2009-04-05 13:30:54 +053026#include <div64.h>
Paul Burton8d30cc92013-09-09 15:30:26 +010027#include "mmc_private.h"
Andy Flemingad347bb2008-10-30 16:41:01 -050028
Jean-Jacques Hiblot201559c2019-07-02 10:53:54 +020029#define DEFAULT_CMD6_TIMEOUT_MS 500
30
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +020031static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
Marek Vasutf537e392016-12-01 02:06:33 +010032
Simon Glasseba48f92017-07-29 11:35:31 -060033#if !CONFIG_IS_ENABLED(DM_MMC)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +020034
Sam Protsenkodb174c62019-08-14 22:52:51 +030035static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +020036{
37 return -ENOSYS;
38}
39
Jeroen Hofsteeaedeeaa2014-07-12 21:24:08 +020040__weak int board_mmc_getwp(struct mmc *mmc)
Nikita Kiryanov020f2612012-12-03 02:19:46 +000041{
42 return -1;
43}
44
45int mmc_getwp(struct mmc *mmc)
46{
47 int wp;
48
49 wp = board_mmc_getwp(mmc);
50
Peter Korsgaardf7b15102013-03-21 04:00:03 +000051 if (wp < 0) {
Pantelis Antoniou2c850462014-03-11 19:34:20 +020052 if (mmc->cfg->ops->getwp)
53 wp = mmc->cfg->ops->getwp(mmc);
Peter Korsgaardf7b15102013-03-21 04:00:03 +000054 else
55 wp = 0;
56 }
Nikita Kiryanov020f2612012-12-03 02:19:46 +000057
58 return wp;
59}
60
Jeroen Hofstee47726302014-07-10 22:46:28 +020061__weak int board_mmc_getcd(struct mmc *mmc)
62{
Stefano Babic6e00edf2010-02-05 15:04:43 +010063 return -1;
64}
Simon Glass394dfc02016-06-12 23:30:22 -060065#endif
Stefano Babic6e00edf2010-02-05 15:04:43 +010066
Simon Glassb23d96e2016-06-12 23:30:20 -060067#ifdef CONFIG_MMC_TRACE
68void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
Andy Flemingad347bb2008-10-30 16:41:01 -050069{
Simon Glassb23d96e2016-06-12 23:30:20 -060070 printf("CMD_SEND:%d\n", cmd->cmdidx);
Marek Vasut6eeee302019-03-23 18:54:45 +010071 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
Simon Glassb23d96e2016-06-12 23:30:20 -060072}
Marek Vasutdccb6082012-03-15 18:41:35 +000073
Simon Glassb23d96e2016-06-12 23:30:20 -060074void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
75{
Raffaele Recalcati894b1e22011-03-11 02:01:14 +000076 int i;
77 u8 *ptr;
78
Bin Meng8d1ad1e2016-03-17 21:53:14 -070079 if (ret) {
80 printf("\t\tRET\t\t\t %d\n", ret);
81 } else {
82 switch (cmd->resp_type) {
83 case MMC_RSP_NONE:
84 printf("\t\tMMC_RSP_NONE\n");
85 break;
86 case MMC_RSP_R1:
Marek Vasut6eeee302019-03-23 18:54:45 +010087 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -070088 cmd->response[0]);
89 break;
90 case MMC_RSP_R1b:
Marek Vasut6eeee302019-03-23 18:54:45 +010091 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -070092 cmd->response[0]);
93 break;
94 case MMC_RSP_R2:
Marek Vasut6eeee302019-03-23 18:54:45 +010095 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -070096 cmd->response[0]);
Marek Vasut6eeee302019-03-23 18:54:45 +010097 printf("\t\t \t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -070098 cmd->response[1]);
Marek Vasut6eeee302019-03-23 18:54:45 +010099 printf("\t\t \t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -0700100 cmd->response[2]);
Marek Vasut6eeee302019-03-23 18:54:45 +0100101 printf("\t\t \t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -0700102 cmd->response[3]);
Raffaele Recalcati894b1e22011-03-11 02:01:14 +0000103 printf("\n");
Bin Meng8d1ad1e2016-03-17 21:53:14 -0700104 printf("\t\t\t\t\tDUMPING DATA\n");
105 for (i = 0; i < 4; i++) {
106 int j;
107 printf("\t\t\t\t\t%03d - ", i*4);
108 ptr = (u8 *)&cmd->response[i];
109 ptr += 3;
110 for (j = 0; j < 4; j++)
Marek Vasut6eeee302019-03-23 18:54:45 +0100111 printf("%02x ", *ptr--);
Bin Meng8d1ad1e2016-03-17 21:53:14 -0700112 printf("\n");
113 }
114 break;
115 case MMC_RSP_R3:
Marek Vasut6eeee302019-03-23 18:54:45 +0100116 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -0700117 cmd->response[0]);
118 break;
119 default:
120 printf("\t\tERROR MMC rsp not supported\n");
121 break;
Bin Meng4a4ef872016-03-17 21:53:13 -0700122 }
Raffaele Recalcati894b1e22011-03-11 02:01:14 +0000123 }
Simon Glassb23d96e2016-06-12 23:30:20 -0600124}
125
126void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
127{
128 int status;
129
130 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
131 printf("CURR STATE:%d\n", status);
132}
Raffaele Recalcati894b1e22011-03-11 02:01:14 +0000133#endif
Simon Glassb23d96e2016-06-12 23:30:20 -0600134
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +0200135#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
136const char *mmc_mode_name(enum bus_mode mode)
137{
138 static const char *const names[] = {
139 [MMC_LEGACY] = "MMC legacy",
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +0200140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
Peng Fan46801252018-08-10 14:07:54 +0800150 [MMC_HS_400] = "HS400 (200MHz)",
Peng Faneede83b2019-07-10 14:43:07 +0800151 [MMC_HS_400_ES] = "HS400ES (200MHz)",
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +0200152 };
153
154 if (mode >= MMC_MODES_END)
155 return "Unknown mode";
156 else
157 return names[mode];
158}
159#endif
160
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200161static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162{
163 static const int freqs[] = {
Jaehoon Chung7c5c7302018-01-30 14:10:16 +0900164 [MMC_LEGACY] = 25000000,
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200165 [MMC_HS] = 26000000,
166 [SD_HS] = 50000000,
Jaehoon Chung7c5c7302018-01-30 14:10:16 +0900167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200172 [UHS_DDR50] = 50000000,
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100173 [UHS_SDR104] = 208000000,
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200174 [MMC_HS_200] = 200000000,
Peng Fan46801252018-08-10 14:07:54 +0800175 [MMC_HS_400] = 200000000,
Peng Faneede83b2019-07-10 14:43:07 +0800176 [MMC_HS_400_ES] = 200000000,
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200177 };
178
179 if (mode == MMC_LEGACY)
180 return mmc->legacy_speed;
181 else if (mode >= MMC_MODES_END)
182 return 0;
183 else
184 return freqs[mode];
185}
186
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +0200187static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188{
189 mmc->selected_mode = mode;
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200190 mmc->tran_speed = mmc_mode2freq(mmc, mode);
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200191 mmc->ddr_mode = mmc_is_mode_ddr(mode);
Masahiro Yamadaf97b1482018-01-28 19:11:42 +0900192 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
193 mmc->tran_speed / 1000000);
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +0200194 return 0;
195}
196
Simon Glasseba48f92017-07-29 11:35:31 -0600197#if !CONFIG_IS_ENABLED(DM_MMC)
Simon Glassb23d96e2016-06-12 23:30:20 -0600198int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
199{
200 int ret;
201
202 mmmc_trace_before_send(mmc, cmd);
203 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
204 mmmc_trace_after_send(mmc, cmd, ret);
205
Marek Vasutdccb6082012-03-15 18:41:35 +0000206 return ret;
Andy Flemingad347bb2008-10-30 16:41:01 -0500207}
Simon Glass394dfc02016-06-12 23:30:22 -0600208#endif
Andy Flemingad347bb2008-10-30 16:41:01 -0500209
Sean Anderson86325092020-10-17 08:36:27 -0400210/**
211 * mmc_send_cmd_retry() - send a command to the mmc device, retrying on error
212 *
213 * @dev: device to receive the command
214 * @cmd: command to send
215 * @data: additional data to send/receive
216 * @retries: how many times to retry; mmc_send_cmd is always called at least
217 * once
218 * @return 0 if ok, -ve on error
219 */
220static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
221 struct mmc_data *data, uint retries)
222{
223 int ret;
224
225 do {
226 ret = mmc_send_cmd(mmc, cmd, data);
227 } while (ret && retries--);
228
229 return ret;
230}
231
232/**
233 * mmc_send_cmd_quirks() - send a command to the mmc device, retrying if a
234 * specific quirk is enabled
235 *
236 * @dev: device to receive the command
237 * @cmd: command to send
238 * @data: additional data to send/receive
239 * @quirk: retry only if this quirk is enabled
240 * @retries: how many times to retry; mmc_send_cmd is always called at least
241 * once
242 * @return 0 if ok, -ve on error
243 */
244static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
245 struct mmc_data *data, u32 quirk, uint retries)
246{
247 if (CONFIG_IS_ENABLED(MMC_QUIRKS) && mmc->quirks & quirk)
248 return mmc_send_cmd_retry(mmc, cmd, data, retries);
249 else
250 return mmc_send_cmd(mmc, cmd, data);
251}
252
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200253int mmc_send_status(struct mmc *mmc, unsigned int *status)
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000254{
255 struct mmc_cmd cmd;
Sean Anderson86325092020-10-17 08:36:27 -0400256 int ret;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000257
258 cmd.cmdidx = MMC_CMD_SEND_STATUS;
259 cmd.resp_type = MMC_RSP_R1;
Marek Vasutc4427392011-08-10 09:24:48 +0200260 if (!mmc_host_is_spi(mmc))
261 cmd.cmdarg = mmc->rca << 16;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000262
Sean Anderson86325092020-10-17 08:36:27 -0400263 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200264 mmc_trace_state(mmc, &cmd);
Sean Anderson86325092020-10-17 08:36:27 -0400265 if (!ret)
266 *status = cmd.response[0];
267
268 return ret;
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200269}
270
Sam Protsenkodb174c62019-08-14 22:52:51 +0300271int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200272{
273 unsigned int status;
274 int err;
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +0200275
Sam Protsenkodb174c62019-08-14 22:52:51 +0300276 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
Jean-Jacques Hiblot4f04a322019-07-02 10:53:53 +0200277 if (err != -ENOSYS)
278 return err;
279
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200280 while (1) {
281 err = mmc_send_status(mmc, &status);
282 if (err)
283 return err;
284
285 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
286 (status & MMC_STATUS_CURR_STATE) !=
287 MMC_STATE_PRG)
288 break;
289
290 if (status & MMC_STATUS_MASK) {
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100291#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200292 pr_err("Status Error: 0x%08x\n", status);
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100293#endif
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200294 return -ECOMM;
295 }
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000296
Sam Protsenkodb174c62019-08-14 22:52:51 +0300297 if (timeout_ms-- <= 0)
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500298 break;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000299
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500300 udelay(1000);
301 }
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000302
Sam Protsenkodb174c62019-08-14 22:52:51 +0300303 if (timeout_ms <= 0) {
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100304#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +0100305 pr_err("Timeout waiting card ready\n");
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100306#endif
Jaehoon Chung7825d202016-07-19 16:33:36 +0900307 return -ETIMEDOUT;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000308 }
309
310 return 0;
311}
312
Paul Burton8d30cc92013-09-09 15:30:26 +0100313int mmc_set_blocklen(struct mmc *mmc, int len)
Andy Flemingad347bb2008-10-30 16:41:01 -0500314{
315 struct mmc_cmd cmd;
316
Andrew Gabbasov9fc2a412014-12-01 06:59:09 -0600317 if (mmc->ddr_mode)
Jaehoon Chung38ce30b2014-05-16 13:59:54 +0900318 return 0;
319
Andy Flemingad347bb2008-10-30 16:41:01 -0500320 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
321 cmd.resp_type = MMC_RSP_R1;
322 cmd.cmdarg = len;
Andy Flemingad347bb2008-10-30 16:41:01 -0500323
Sean Anderson86325092020-10-17 08:36:27 -0400324 return mmc_send_cmd_quirks(mmc, &cmd, NULL,
325 MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
Andy Flemingad347bb2008-10-30 16:41:01 -0500326}
327
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100328#ifdef MMC_SUPPORTS_TUNING
Jean-Jacques Hiblot71264bb2017-09-21 16:30:12 +0200329static const u8 tuning_blk_pattern_4bit[] = {
330 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
331 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
332 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
333 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
334 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
335 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
336 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
337 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
338};
339
340static const u8 tuning_blk_pattern_8bit[] = {
341 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
342 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
343 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
344 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
345 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
346 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
347 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
348 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
349 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
350 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
351 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
352 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
353 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
354 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
355 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
356 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
357};
358
359int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
360{
361 struct mmc_cmd cmd;
362 struct mmc_data data;
363 const u8 *tuning_block_pattern;
364 int size, err;
365
366 if (mmc->bus_width == 8) {
367 tuning_block_pattern = tuning_blk_pattern_8bit;
368 size = sizeof(tuning_blk_pattern_8bit);
369 } else if (mmc->bus_width == 4) {
370 tuning_block_pattern = tuning_blk_pattern_4bit;
371 size = sizeof(tuning_blk_pattern_4bit);
372 } else {
373 return -EINVAL;
374 }
375
376 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
377
378 cmd.cmdidx = opcode;
379 cmd.cmdarg = 0;
380 cmd.resp_type = MMC_RSP_R1;
381
382 data.dest = (void *)data_buf;
383 data.blocks = 1;
384 data.blocksize = size;
385 data.flags = MMC_DATA_READ;
386
387 err = mmc_send_cmd(mmc, &cmd, &data);
388 if (err)
389 return err;
390
391 if (memcmp(data_buf, tuning_block_pattern, size))
392 return -EIO;
393
394 return 0;
395}
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100396#endif
Jean-Jacques Hiblot71264bb2017-09-21 16:30:12 +0200397
Sascha Silbe4bdf6fd2013-06-14 13:07:25 +0200398static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
Kim Phillips87ea3892012-10-29 13:34:43 +0000399 lbaint_t blkcnt)
Andy Flemingad347bb2008-10-30 16:41:01 -0500400{
401 struct mmc_cmd cmd;
402 struct mmc_data data;
403
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700404 if (blkcnt > 1)
405 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
406 else
407 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
Andy Flemingad347bb2008-10-30 16:41:01 -0500408
409 if (mmc->high_capacity)
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700410 cmd.cmdarg = start;
Andy Flemingad347bb2008-10-30 16:41:01 -0500411 else
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700412 cmd.cmdarg = start * mmc->read_bl_len;
Andy Flemingad347bb2008-10-30 16:41:01 -0500413
414 cmd.resp_type = MMC_RSP_R1;
Andy Flemingad347bb2008-10-30 16:41:01 -0500415
416 data.dest = dst;
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700417 data.blocks = blkcnt;
Andy Flemingad347bb2008-10-30 16:41:01 -0500418 data.blocksize = mmc->read_bl_len;
419 data.flags = MMC_DATA_READ;
420
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700421 if (mmc_send_cmd(mmc, &cmd, &data))
422 return 0;
Andy Flemingad347bb2008-10-30 16:41:01 -0500423
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700424 if (blkcnt > 1) {
425 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
426 cmd.cmdarg = 0;
427 cmd.resp_type = MMC_RSP_R1b;
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700428 if (mmc_send_cmd(mmc, &cmd, NULL)) {
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100429#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +0100430 pr_err("mmc fail to send stop cmd\n");
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100431#endif
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700432 return 0;
433 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500434 }
435
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700436 return blkcnt;
Andy Flemingad347bb2008-10-30 16:41:01 -0500437}
438
Marek Vasut31976d92020-04-04 12:45:05 +0200439#if !CONFIG_IS_ENABLED(DM_MMC)
440static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
441{
442 if (mmc->cfg->ops->get_b_max)
443 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
444 else
445 return mmc->cfg->b_max;
446}
447#endif
448
Simon Glass5f4bd8c2017-07-04 13:31:19 -0600449#if CONFIG_IS_ENABLED(BLK)
Simon Glass62e293a2016-06-12 23:30:15 -0600450ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
Simon Glass59bc6f22016-05-01 13:52:41 -0600451#else
Simon Glass62e293a2016-06-12 23:30:15 -0600452ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
453 void *dst)
Simon Glass59bc6f22016-05-01 13:52:41 -0600454#endif
Andy Flemingad347bb2008-10-30 16:41:01 -0500455{
Simon Glass5f4bd8c2017-07-04 13:31:19 -0600456#if CONFIG_IS_ENABLED(BLK)
Simon Glass71fa5b42020-12-03 16:55:18 -0700457 struct blk_desc *block_dev = dev_get_uclass_plat(dev);
Simon Glass59bc6f22016-05-01 13:52:41 -0600458#endif
Simon Glass2f26fff2016-02-29 15:25:51 -0700459 int dev_num = block_dev->devnum;
Stephen Warren1e0f92a2015-12-07 11:38:49 -0700460 int err;
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700461 lbaint_t cur, blocks_todo = blkcnt;
Marek Vasut31976d92020-04-04 12:45:05 +0200462 uint b_max;
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700463
464 if (blkcnt == 0)
465 return 0;
Andy Flemingad347bb2008-10-30 16:41:01 -0500466
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700467 struct mmc *mmc = find_mmc_device(dev_num);
Andy Flemingad347bb2008-10-30 16:41:01 -0500468 if (!mmc)
469 return 0;
470
Marek Vasutf537e392016-12-01 02:06:33 +0100471 if (CONFIG_IS_ENABLED(MMC_TINY))
472 err = mmc_switch_part(mmc, block_dev->hwpart);
473 else
474 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
475
Stephen Warren1e0f92a2015-12-07 11:38:49 -0700476 if (err < 0)
477 return 0;
478
Simon Glasse5db1152016-05-01 13:52:35 -0600479 if ((start + blkcnt) > block_dev->lba) {
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100480#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +0100481 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
482 start + blkcnt, block_dev->lba);
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100483#endif
Lei Wene1cc9c82010-09-13 22:07:27 +0800484 return 0;
485 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500486
Simon Glassa4343c42015-06-23 15:38:50 -0600487 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +0900488 pr_debug("%s: Failed to set blocklen\n", __func__);
Andy Flemingad347bb2008-10-30 16:41:01 -0500489 return 0;
Simon Glassa4343c42015-06-23 15:38:50 -0600490 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500491
Marek Vasut31976d92020-04-04 12:45:05 +0200492 b_max = mmc_get_b_max(mmc, dst, blkcnt);
493
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700494 do {
Marek Vasut31976d92020-04-04 12:45:05 +0200495 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
Simon Glassa4343c42015-06-23 15:38:50 -0600496 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +0900497 pr_debug("%s: Failed to read blocks\n", __func__);
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700498 return 0;
Simon Glassa4343c42015-06-23 15:38:50 -0600499 }
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700500 blocks_todo -= cur;
501 start += cur;
502 dst += cur * mmc->read_bl_len;
503 } while (blocks_todo > 0);
Andy Flemingad347bb2008-10-30 16:41:01 -0500504
505 return blkcnt;
506}
507
Kim Phillips87ea3892012-10-29 13:34:43 +0000508static int mmc_go_idle(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -0500509{
510 struct mmc_cmd cmd;
511 int err;
512
513 udelay(1000);
514
515 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
516 cmd.cmdarg = 0;
517 cmd.resp_type = MMC_RSP_NONE;
Andy Flemingad347bb2008-10-30 16:41:01 -0500518
519 err = mmc_send_cmd(mmc, &cmd, NULL);
520
521 if (err)
522 return err;
523
524 udelay(2000);
525
526 return 0;
527}
528
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100529#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200530static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
531{
532 struct mmc_cmd cmd;
533 int err = 0;
534
535 /*
536 * Send CMD11 only if the request is to switch the card to
537 * 1.8V signalling.
538 */
539 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
540 return mmc_set_signal_voltage(mmc, signal_voltage);
541
542 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
543 cmd.cmdarg = 0;
544 cmd.resp_type = MMC_RSP_R1;
545
546 err = mmc_send_cmd(mmc, &cmd, NULL);
547 if (err)
548 return err;
549
550 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
551 return -EIO;
552
553 /*
554 * The card should drive cmd and dat[0:3] low immediately
555 * after the response of cmd11, but wait 100 us to be sure
556 */
557 err = mmc_wait_dat0(mmc, 0, 100);
558 if (err == -ENOSYS)
559 udelay(100);
560 else if (err)
561 return -ETIMEDOUT;
562
563 /*
564 * During a signal voltage level switch, the clock must be gated
565 * for 5 ms according to the SD spec
566 */
Jaehoon Chung239cb2f2018-01-26 19:25:29 +0900567 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200568
569 err = mmc_set_signal_voltage(mmc, signal_voltage);
570 if (err)
571 return err;
572
573 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
574 mdelay(10);
Jaehoon Chung239cb2f2018-01-26 19:25:29 +0900575 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200576
577 /*
578 * Failure to switch is indicated by the card holding
579 * dat[0:3] low. Wait for at least 1 ms according to spec
580 */
581 err = mmc_wait_dat0(mmc, 1, 1000);
582 if (err == -ENOSYS)
583 udelay(1000);
584 else if (err)
585 return -ETIMEDOUT;
586
587 return 0;
588}
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100589#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200590
591static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
Andy Flemingad347bb2008-10-30 16:41:01 -0500592{
593 int timeout = 1000;
594 int err;
595 struct mmc_cmd cmd;
596
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500597 while (1) {
Andy Flemingad347bb2008-10-30 16:41:01 -0500598 cmd.cmdidx = MMC_CMD_APP_CMD;
599 cmd.resp_type = MMC_RSP_R1;
600 cmd.cmdarg = 0;
Andy Flemingad347bb2008-10-30 16:41:01 -0500601
602 err = mmc_send_cmd(mmc, &cmd, NULL);
603
604 if (err)
605 return err;
606
607 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
608 cmd.resp_type = MMC_RSP_R3;
Stefano Babicf8e9a212010-01-20 18:20:39 +0100609
610 /*
611 * Most cards do not answer if some reserved bits
612 * in the ocr are set. However, Some controller
613 * can set bit 7 (reserved for low voltages), but
614 * how to manage low voltages SD card is not yet
615 * specified.
616 */
Thomas Chou1254c3d2010-12-24 13:12:21 +0000617 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200618 (mmc->cfg->voltages & 0xff8000);
Andy Flemingad347bb2008-10-30 16:41:01 -0500619
620 if (mmc->version == SD_VERSION_2)
621 cmd.cmdarg |= OCR_HCS;
622
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200623 if (uhs_en)
624 cmd.cmdarg |= OCR_S18R;
625
Andy Flemingad347bb2008-10-30 16:41:01 -0500626 err = mmc_send_cmd(mmc, &cmd, NULL);
627
628 if (err)
629 return err;
630
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500631 if (cmd.response[0] & OCR_BUSY)
632 break;
Andy Flemingad347bb2008-10-30 16:41:01 -0500633
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500634 if (timeout-- <= 0)
Jaehoon Chung7825d202016-07-19 16:33:36 +0900635 return -EOPNOTSUPP;
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500636
637 udelay(1000);
638 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500639
640 if (mmc->version != SD_VERSION_2)
641 mmc->version = SD_VERSION_1_0;
642
Thomas Chou1254c3d2010-12-24 13:12:21 +0000643 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
644 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
645 cmd.resp_type = MMC_RSP_R3;
646 cmd.cmdarg = 0;
Thomas Chou1254c3d2010-12-24 13:12:21 +0000647
648 err = mmc_send_cmd(mmc, &cmd, NULL);
649
650 if (err)
651 return err;
652 }
653
Rabin Vincentb6eed942009-04-05 13:30:56 +0530654 mmc->ocr = cmd.response[0];
Andy Flemingad347bb2008-10-30 16:41:01 -0500655
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100656#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200657 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
658 == 0x41000000) {
659 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
660 if (err)
661 return err;
662 }
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100663#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200664
Andy Flemingad347bb2008-10-30 16:41:01 -0500665 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
666 mmc->rca = 0;
667
668 return 0;
669}
670
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500671static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
Andy Flemingad347bb2008-10-30 16:41:01 -0500672{
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500673 struct mmc_cmd cmd;
Andy Flemingad347bb2008-10-30 16:41:01 -0500674 int err;
675
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500676 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
677 cmd.resp_type = MMC_RSP_R3;
678 cmd.cmdarg = 0;
Rob Herring5fd3edd2015-03-23 17:56:59 -0500679 if (use_arg && !mmc_host_is_spi(mmc))
680 cmd.cmdarg = OCR_HCS |
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200681 (mmc->cfg->voltages &
Andrew Gabbasovec600d12015-03-19 07:44:03 -0500682 (mmc->ocr & OCR_VOLTAGE_MASK)) |
683 (mmc->ocr & OCR_ACCESS_MODE);
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000684
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500685 err = mmc_send_cmd(mmc, &cmd, NULL);
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000686 if (err)
687 return err;
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500688 mmc->ocr = cmd.response[0];
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000689 return 0;
690}
691
Jeroen Hofsteeaedeeaa2014-07-12 21:24:08 +0200692static int mmc_send_op_cond(struct mmc *mmc)
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000693{
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000694 int err, i;
Haibo Chen71949512020-06-15 17:18:12 +0800695 int timeout = 1000;
696 uint start;
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000697
Andy Flemingad347bb2008-10-30 16:41:01 -0500698 /* Some cards seem to need this */
699 mmc_go_idle(mmc);
700
Haibo Chen71949512020-06-15 17:18:12 +0800701 start = get_timer(0);
Raffaele Recalcati1df837e2011-03-11 02:01:13 +0000702 /* Asking to the card its capabilities */
Haibo Chen71949512020-06-15 17:18:12 +0800703 for (i = 0; ; i++) {
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500704 err = mmc_send_op_cond_iter(mmc, i != 0);
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000705 if (err)
706 return err;
Wolfgang Denk80f70212011-05-19 22:21:41 +0200707
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000708 /* exit if not busy (flag seems to be inverted) */
Andrew Gabbasovec600d12015-03-19 07:44:03 -0500709 if (mmc->ocr & OCR_BUSY)
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -0500710 break;
Haibo Chen71949512020-06-15 17:18:12 +0800711
712 if (get_timer(start) > timeout)
713 return -ETIMEDOUT;
714 udelay(100);
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000715 }
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -0500716 mmc->op_cond_pending = 1;
717 return 0;
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000718}
Wolfgang Denk80f70212011-05-19 22:21:41 +0200719
Jeroen Hofsteeaedeeaa2014-07-12 21:24:08 +0200720static int mmc_complete_op_cond(struct mmc *mmc)
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000721{
722 struct mmc_cmd cmd;
723 int timeout = 1000;
Vipul Kumardbad7b42018-05-03 12:20:54 +0530724 ulong start;
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000725 int err;
Wolfgang Denk80f70212011-05-19 22:21:41 +0200726
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000727 mmc->op_cond_pending = 0;
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500728 if (!(mmc->ocr & OCR_BUSY)) {
Yangbo Lu9c720612016-08-02 15:33:18 +0800729 /* Some cards seem to need this */
730 mmc_go_idle(mmc);
731
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500732 start = get_timer(0);
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500733 while (1) {
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500734 err = mmc_send_op_cond_iter(mmc, 1);
735 if (err)
736 return err;
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500737 if (mmc->ocr & OCR_BUSY)
738 break;
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500739 if (get_timer(start) > timeout)
Jaehoon Chung7825d202016-07-19 16:33:36 +0900740 return -EOPNOTSUPP;
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500741 udelay(100);
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500742 }
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500743 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500744
Thomas Chou1254c3d2010-12-24 13:12:21 +0000745 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
746 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
747 cmd.resp_type = MMC_RSP_R3;
748 cmd.cmdarg = 0;
Thomas Chou1254c3d2010-12-24 13:12:21 +0000749
750 err = mmc_send_cmd(mmc, &cmd, NULL);
751
752 if (err)
753 return err;
Andrew Gabbasovec600d12015-03-19 07:44:03 -0500754
755 mmc->ocr = cmd.response[0];
Thomas Chou1254c3d2010-12-24 13:12:21 +0000756 }
757
Andy Flemingad347bb2008-10-30 16:41:01 -0500758 mmc->version = MMC_VERSION_UNKNOWN;
Andy Flemingad347bb2008-10-30 16:41:01 -0500759
760 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
Stephen Warrenf6545f12014-01-30 16:11:12 -0700761 mmc->rca = 1;
Andy Flemingad347bb2008-10-30 16:41:01 -0500762
763 return 0;
764}
765
766
Heinrich Schuchardtbf230e12020-03-30 07:24:17 +0200767int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
Andy Flemingad347bb2008-10-30 16:41:01 -0500768{
769 struct mmc_cmd cmd;
770 struct mmc_data data;
771 int err;
772
773 /* Get the Card Status Register */
774 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
775 cmd.resp_type = MMC_RSP_R1;
776 cmd.cmdarg = 0;
Andy Flemingad347bb2008-10-30 16:41:01 -0500777
Yoshihiro Shimodaf6bec732012-06-07 19:09:11 +0000778 data.dest = (char *)ext_csd;
Andy Flemingad347bb2008-10-30 16:41:01 -0500779 data.blocks = 1;
Simon Glassa09c2b72013-04-03 08:54:30 +0000780 data.blocksize = MMC_MAX_BLOCK_LEN;
Andy Flemingad347bb2008-10-30 16:41:01 -0500781 data.flags = MMC_DATA_READ;
782
783 err = mmc_send_cmd(mmc, &cmd, &data);
784
785 return err;
786}
787
Marek Vasut8a966472019-02-06 11:34:27 +0100788static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
789 bool send_status)
Andy Flemingad347bb2008-10-30 16:41:01 -0500790{
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200791 unsigned int status, start;
Andy Flemingad347bb2008-10-30 16:41:01 -0500792 struct mmc_cmd cmd;
Sam Protsenkodb174c62019-08-14 22:52:51 +0300793 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
Jean-Jacques Hiblot7f5b1692019-07-02 10:53:55 +0200794 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
795 (index == EXT_CSD_PART_CONF);
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000796 int ret;
Andy Flemingad347bb2008-10-30 16:41:01 -0500797
Jean-Jacques Hiblot201559c2019-07-02 10:53:54 +0200798 if (mmc->gen_cmd6_time)
Sam Protsenkodb174c62019-08-14 22:52:51 +0300799 timeout_ms = mmc->gen_cmd6_time * 10;
Jean-Jacques Hiblot201559c2019-07-02 10:53:54 +0200800
Jean-Jacques Hiblot7f5b1692019-07-02 10:53:55 +0200801 if (is_part_switch && mmc->part_switch_time)
Sam Protsenkodb174c62019-08-14 22:52:51 +0300802 timeout_ms = mmc->part_switch_time * 10;
Jean-Jacques Hiblot7f5b1692019-07-02 10:53:55 +0200803
Andy Flemingad347bb2008-10-30 16:41:01 -0500804 cmd.cmdidx = MMC_CMD_SWITCH;
805 cmd.resp_type = MMC_RSP_R1b;
806 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000807 (index << 16) |
808 (value << 8);
Andy Flemingad347bb2008-10-30 16:41:01 -0500809
Sean Anderson86325092020-10-17 08:36:27 -0400810 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200811 if (ret)
812 return ret;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000813
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200814 start = get_timer(0);
Marek Vasut8a966472019-02-06 11:34:27 +0100815
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200816 /* poll dat0 for rdy/buys status */
Sam Protsenkodb174c62019-08-14 22:52:51 +0300817 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200818 if (ret && ret != -ENOSYS)
819 return ret;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000820
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200821 /*
822 * In cases when not allowed to poll by using CMD13 or because we aren't
823 * capable of polling by using mmc_wait_dat0, then rely on waiting the
824 * stated timeout to be sufficient.
825 */
Haibo Chend8de5e42020-09-22 18:11:42 +0800826 if (ret == -ENOSYS && !send_status) {
Sam Protsenkodb174c62019-08-14 22:52:51 +0300827 mdelay(timeout_ms);
Haibo Chend8de5e42020-09-22 18:11:42 +0800828 return 0;
829 }
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200830
831 /* Finally wait until the card is ready or indicates a failure
832 * to switch. It doesn't hurt to use CMD13 here even if send_status
Sam Protsenkodb174c62019-08-14 22:52:51 +0300833 * is false, because by now (after 'timeout_ms' ms) the bus should be
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200834 * reliable.
835 */
836 do {
837 ret = mmc_send_status(mmc, &status);
838
839 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
840 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
841 value);
842 return -EIO;
843 }
Stefan Boscha463bbe2021-01-23 13:37:41 +0100844 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA) &&
845 (status & MMC_STATUS_CURR_STATE) == MMC_STATE_TRANS)
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200846 return 0;
847 udelay(100);
Sam Protsenkodb174c62019-08-14 22:52:51 +0300848 } while (get_timer(start) < timeout_ms);
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000849
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200850 return -ETIMEDOUT;
Andy Flemingad347bb2008-10-30 16:41:01 -0500851}
852
Marek Vasut8a966472019-02-06 11:34:27 +0100853int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
854{
855 return __mmc_switch(mmc, set, index, value, true);
856}
857
Heinrich Schuchardt75e5a642020-03-30 07:24:19 +0200858int mmc_boot_wp(struct mmc *mmc)
859{
860 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
861}
862
Marek Vasuta318a7a2018-04-15 00:37:11 +0200863#if !CONFIG_IS_ENABLED(MMC_TINY)
Marek Vasut111572f2019-01-03 21:19:24 +0100864static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
865 bool hsdowngrade)
Andy Flemingad347bb2008-10-30 16:41:01 -0500866{
Andy Flemingad347bb2008-10-30 16:41:01 -0500867 int err;
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200868 int speed_bits;
869
870 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
871
872 switch (mode) {
873 case MMC_HS:
874 case MMC_HS_52:
875 case MMC_DDR_52:
876 speed_bits = EXT_CSD_TIMING_HS;
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +0200877 break;
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +0100878#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +0200879 case MMC_HS_200:
880 speed_bits = EXT_CSD_TIMING_HS200;
881 break;
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +0100882#endif
Peng Fan46801252018-08-10 14:07:54 +0800883#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
884 case MMC_HS_400:
885 speed_bits = EXT_CSD_TIMING_HS400;
886 break;
887#endif
Peng Faneede83b2019-07-10 14:43:07 +0800888#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
889 case MMC_HS_400_ES:
890 speed_bits = EXT_CSD_TIMING_HS400;
891 break;
892#endif
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200893 case MMC_LEGACY:
894 speed_bits = EXT_CSD_TIMING_LEGACY;
895 break;
896 default:
897 return -EINVAL;
898 }
Marek Vasut8a966472019-02-06 11:34:27 +0100899
900 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
901 speed_bits, !hsdowngrade);
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200902 if (err)
903 return err;
904
Marek Vasut111572f2019-01-03 21:19:24 +0100905#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
906 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
907 /*
908 * In case the eMMC is in HS200/HS400 mode and we are downgrading
909 * to HS mode, the card clock are still running much faster than
910 * the supported HS mode clock, so we can not reliably read out
911 * Extended CSD. Reconfigure the controller to run at HS mode.
912 */
913 if (hsdowngrade) {
914 mmc_select_mode(mmc, MMC_HS);
915 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
916 }
917#endif
918
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200919 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
920 /* Now check to see that it worked */
921 err = mmc_send_ext_csd(mmc, test_csd);
922 if (err)
923 return err;
924
925 /* No high-speed support */
926 if (!test_csd[EXT_CSD_HS_TIMING])
927 return -ENOTSUPP;
928 }
929
930 return 0;
931}
932
933static int mmc_get_capabilities(struct mmc *mmc)
934{
935 u8 *ext_csd = mmc->ext_csd;
936 char cardtype;
Andy Flemingad347bb2008-10-30 16:41:01 -0500937
Jean-Jacques Hiblot3f2ffc22017-11-30 17:43:56 +0100938 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
Andy Flemingad347bb2008-10-30 16:41:01 -0500939
Thomas Chou1254c3d2010-12-24 13:12:21 +0000940 if (mmc_host_is_spi(mmc))
941 return 0;
942
Andy Flemingad347bb2008-10-30 16:41:01 -0500943 /* Only version 4 supports high-speed */
944 if (mmc->version < MMC_VERSION_4)
945 return 0;
946
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200947 if (!ext_csd) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +0100948 pr_err("No ext_csd found!\n"); /* this should enver happen */
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200949 return -ENOTSUPP;
950 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500951
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200952 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
Andy Flemingad347bb2008-10-30 16:41:01 -0500953
Peng Fan46801252018-08-10 14:07:54 +0800954 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +0200955 mmc->cardtype = cardtype;
Andy Flemingad347bb2008-10-30 16:41:01 -0500956
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +0100957#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +0200958 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
959 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
960 mmc->card_caps |= MMC_MODE_HS200;
961 }
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +0100962#endif
Peng Faneede83b2019-07-10 14:43:07 +0800963#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
964 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
Peng Fan46801252018-08-10 14:07:54 +0800965 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
966 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
967 mmc->card_caps |= MMC_MODE_HS400;
968 }
969#endif
Jaehoon Chung38ce30b2014-05-16 13:59:54 +0900970 if (cardtype & EXT_CSD_CARD_TYPE_52) {
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200971 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
Jaehoon Chung38ce30b2014-05-16 13:59:54 +0900972 mmc->card_caps |= MMC_MODE_DDR_52MHz;
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200973 mmc->card_caps |= MMC_MODE_HS_52MHz;
Jaehoon Chung38ce30b2014-05-16 13:59:54 +0900974 }
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200975 if (cardtype & EXT_CSD_CARD_TYPE_26)
976 mmc->card_caps |= MMC_MODE_HS;
Andy Flemingad347bb2008-10-30 16:41:01 -0500977
Peng Faneede83b2019-07-10 14:43:07 +0800978#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
979 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
980 (mmc->card_caps & MMC_MODE_HS400)) {
981 mmc->card_caps |= MMC_MODE_HS400_ES;
982 }
983#endif
984
Andy Flemingad347bb2008-10-30 16:41:01 -0500985 return 0;
986}
Marek Vasuta318a7a2018-04-15 00:37:11 +0200987#endif
Andy Flemingad347bb2008-10-30 16:41:01 -0500988
Stephen Warrene315ae82013-06-11 15:14:01 -0600989static int mmc_set_capacity(struct mmc *mmc, int part_num)
990{
991 switch (part_num) {
992 case 0:
993 mmc->capacity = mmc->capacity_user;
994 break;
995 case 1:
996 case 2:
997 mmc->capacity = mmc->capacity_boot;
998 break;
999 case 3:
1000 mmc->capacity = mmc->capacity_rpmb;
1001 break;
1002 case 4:
1003 case 5:
1004 case 6:
1005 case 7:
1006 mmc->capacity = mmc->capacity_gp[part_num - 4];
1007 break;
1008 default:
1009 return -1;
1010 }
1011
Simon Glasse5db1152016-05-01 13:52:35 -06001012 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
Stephen Warrene315ae82013-06-11 15:14:01 -06001013
1014 return 0;
1015}
1016
Simon Glass62e293a2016-06-12 23:30:15 -06001017int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
Lei Wen31b99802011-05-02 16:26:26 +00001018{
Stephen Warrene315ae82013-06-11 15:14:01 -06001019 int ret;
Jean-Jacques Hiblotfaf5c952019-07-02 10:53:58 +02001020 int retry = 3;
Lei Wen31b99802011-05-02 16:26:26 +00001021
Jean-Jacques Hiblotfaf5c952019-07-02 10:53:58 +02001022 do {
1023 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1024 EXT_CSD_PART_CONF,
1025 (mmc->part_config & ~PART_ACCESS_MASK)
1026 | (part_num & PART_ACCESS_MASK));
1027 } while (ret && retry--);
Peter Bigot45fde892014-09-02 18:31:23 -05001028
1029 /*
1030 * Set the capacity if the switch succeeded or was intended
1031 * to return to representing the raw device.
1032 */
Stephen Warren1e0f92a2015-12-07 11:38:49 -07001033 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
Peter Bigot45fde892014-09-02 18:31:23 -05001034 ret = mmc_set_capacity(mmc, part_num);
Simon Glass984db5d2016-05-01 13:52:37 -06001035 mmc_get_blk_desc(mmc)->hwpart = part_num;
Stephen Warren1e0f92a2015-12-07 11:38:49 -07001036 }
Stephen Warrene315ae82013-06-11 15:14:01 -06001037
Peter Bigot45fde892014-09-02 18:31:23 -05001038 return ret;
Lei Wen31b99802011-05-02 16:26:26 +00001039}
1040
Jean-Jacques Hiblot1d7769a2017-11-30 17:44:02 +01001041#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001042int mmc_hwpart_config(struct mmc *mmc,
1043 const struct mmc_hwpart_conf *conf,
1044 enum mmc_hwpart_conf_mode mode)
1045{
1046 u8 part_attrs = 0;
1047 u32 enh_size_mult;
1048 u32 enh_start_addr;
1049 u32 gp_size_mult[4];
1050 u32 max_enh_size_mult;
1051 u32 tot_enh_size_mult = 0;
Diego Santa Cruz80200272014-12-23 10:50:31 +01001052 u8 wr_rel_set;
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001053 int i, pidx, err;
1054 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1055
1056 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1057 return -EINVAL;
1058
1059 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001060 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001061 return -EMEDIUMTYPE;
1062 }
1063
1064 if (!(mmc->part_support & PART_SUPPORT)) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001065 pr_err("Card does not support partitioning\n");
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001066 return -EMEDIUMTYPE;
1067 }
1068
1069 if (!mmc->hc_wp_grp_size) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001070 pr_err("Card does not define HC WP group size\n");
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001071 return -EMEDIUMTYPE;
1072 }
1073
1074 /* check partition alignment and total enhanced size */
1075 if (conf->user.enh_size) {
1076 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1077 conf->user.enh_start % mmc->hc_wp_grp_size) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001078 pr_err("User data enhanced area not HC WP group "
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001079 "size aligned\n");
1080 return -EINVAL;
1081 }
1082 part_attrs |= EXT_CSD_ENH_USR;
1083 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1084 if (mmc->high_capacity) {
1085 enh_start_addr = conf->user.enh_start;
1086 } else {
1087 enh_start_addr = (conf->user.enh_start << 9);
1088 }
1089 } else {
1090 enh_size_mult = 0;
1091 enh_start_addr = 0;
1092 }
1093 tot_enh_size_mult += enh_size_mult;
1094
1095 for (pidx = 0; pidx < 4; pidx++) {
1096 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001097 pr_err("GP%i partition not HC WP group size "
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001098 "aligned\n", pidx+1);
1099 return -EINVAL;
1100 }
1101 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1102 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1103 part_attrs |= EXT_CSD_ENH_GP(pidx);
1104 tot_enh_size_mult += gp_size_mult[pidx];
1105 }
1106 }
1107
1108 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001109 pr_err("Card does not support enhanced attribute\n");
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001110 return -EMEDIUMTYPE;
1111 }
1112
1113 err = mmc_send_ext_csd(mmc, ext_csd);
1114 if (err)
1115 return err;
1116
1117 max_enh_size_mult =
1118 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1119 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1120 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1121 if (tot_enh_size_mult > max_enh_size_mult) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001122 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001123 tot_enh_size_mult, max_enh_size_mult);
1124 return -EMEDIUMTYPE;
1125 }
1126
Diego Santa Cruz80200272014-12-23 10:50:31 +01001127 /* The default value of EXT_CSD_WR_REL_SET is device
1128 * dependent, the values can only be changed if the
1129 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1130 * changed only once and before partitioning is completed. */
1131 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1132 if (conf->user.wr_rel_change) {
1133 if (conf->user.wr_rel_set)
1134 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1135 else
1136 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1137 }
1138 for (pidx = 0; pidx < 4; pidx++) {
1139 if (conf->gp_part[pidx].wr_rel_change) {
1140 if (conf->gp_part[pidx].wr_rel_set)
1141 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1142 else
1143 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1144 }
1145 }
1146
1147 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1148 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1149 puts("Card does not support host controlled partition write "
1150 "reliability settings\n");
1151 return -EMEDIUMTYPE;
1152 }
1153
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001154 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1155 EXT_CSD_PARTITION_SETTING_COMPLETED) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001156 pr_err("Card already partitioned\n");
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001157 return -EPERM;
1158 }
1159
1160 if (mode == MMC_HWPART_CONF_CHECK)
1161 return 0;
1162
1163 /* Partitioning requires high-capacity size definitions */
1164 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1165 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1166 EXT_CSD_ERASE_GROUP_DEF, 1);
1167
1168 if (err)
1169 return err;
1170
1171 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1172
Jaehoon Chung58b9eb82020-01-17 15:06:54 +09001173#if CONFIG_IS_ENABLED(MMC_WRITE)
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001174 /* update erase group size to be high-capacity */
1175 mmc->erase_grp_size =
1176 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
Jaehoon Chung58b9eb82020-01-17 15:06:54 +09001177#endif
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001178
1179 }
1180
1181 /* all OK, write the configuration */
1182 for (i = 0; i < 4; i++) {
1183 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1184 EXT_CSD_ENH_START_ADDR+i,
1185 (enh_start_addr >> (i*8)) & 0xFF);
1186 if (err)
1187 return err;
1188 }
1189 for (i = 0; i < 3; i++) {
1190 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1191 EXT_CSD_ENH_SIZE_MULT+i,
1192 (enh_size_mult >> (i*8)) & 0xFF);
1193 if (err)
1194 return err;
1195 }
1196 for (pidx = 0; pidx < 4; pidx++) {
1197 for (i = 0; i < 3; i++) {
1198 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1199 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1200 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1201 if (err)
1202 return err;
1203 }
1204 }
1205 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1206 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1207 if (err)
1208 return err;
1209
1210 if (mode == MMC_HWPART_CONF_SET)
1211 return 0;
1212
Diego Santa Cruz80200272014-12-23 10:50:31 +01001213 /* The WR_REL_SET is a write-once register but shall be
1214 * written before setting PART_SETTING_COMPLETED. As it is
1215 * write-once we can only write it when completing the
1216 * partitioning. */
1217 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1218 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1219 EXT_CSD_WR_REL_SET, wr_rel_set);
1220 if (err)
1221 return err;
1222 }
1223
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001224 /* Setting PART_SETTING_COMPLETED confirms the partition
1225 * configuration but it only becomes effective after power
1226 * cycle, so we do not adjust the partition related settings
1227 * in the mmc struct. */
1228
1229 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1230 EXT_CSD_PARTITION_SETTING,
1231 EXT_CSD_PARTITION_SETTING_COMPLETED);
1232 if (err)
1233 return err;
1234
1235 return 0;
1236}
Jean-Jacques Hiblot1d7769a2017-11-30 17:44:02 +01001237#endif
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001238
Simon Glasseba48f92017-07-29 11:35:31 -06001239#if !CONFIG_IS_ENABLED(DM_MMC)
Thierry Redingb9c8b772012-01-02 01:15:37 +00001240int mmc_getcd(struct mmc *mmc)
1241{
1242 int cd;
1243
1244 cd = board_mmc_getcd(mmc);
1245
Peter Korsgaardf7b15102013-03-21 04:00:03 +00001246 if (cd < 0) {
Pantelis Antoniou2c850462014-03-11 19:34:20 +02001247 if (mmc->cfg->ops->getcd)
1248 cd = mmc->cfg->ops->getcd(mmc);
Peter Korsgaardf7b15102013-03-21 04:00:03 +00001249 else
1250 cd = 1;
1251 }
Thierry Redingb9c8b772012-01-02 01:15:37 +00001252
1253 return cd;
1254}
Simon Glass394dfc02016-06-12 23:30:22 -06001255#endif
Thierry Redingb9c8b772012-01-02 01:15:37 +00001256
Marek Vasuta318a7a2018-04-15 00:37:11 +02001257#if !CONFIG_IS_ENABLED(MMC_TINY)
Kim Phillips87ea3892012-10-29 13:34:43 +00001258static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
Andy Flemingad347bb2008-10-30 16:41:01 -05001259{
1260 struct mmc_cmd cmd;
1261 struct mmc_data data;
1262
1263 /* Switch the frequency */
1264 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1265 cmd.resp_type = MMC_RSP_R1;
1266 cmd.cmdarg = (mode << 31) | 0xffffff;
1267 cmd.cmdarg &= ~(0xf << (group * 4));
1268 cmd.cmdarg |= value << (group * 4);
Andy Flemingad347bb2008-10-30 16:41:01 -05001269
1270 data.dest = (char *)resp;
1271 data.blocksize = 64;
1272 data.blocks = 1;
1273 data.flags = MMC_DATA_READ;
1274
1275 return mmc_send_cmd(mmc, &cmd, &data);
1276}
1277
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001278static int sd_get_capabilities(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -05001279{
1280 int err;
1281 struct mmc_cmd cmd;
Suniel Mahesh2f423da2017-10-05 11:32:00 +05301282 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1283 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
Andy Flemingad347bb2008-10-30 16:41:01 -05001284 struct mmc_data data;
1285 int timeout;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001286#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001287 u32 sd3_bus_mode;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001288#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05001289
Faiz Abbas01db77e2020-02-26 13:44:32 +05301290 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
Andy Flemingad347bb2008-10-30 16:41:01 -05001291
Thomas Chou1254c3d2010-12-24 13:12:21 +00001292 if (mmc_host_is_spi(mmc))
1293 return 0;
1294
Andy Flemingad347bb2008-10-30 16:41:01 -05001295 /* Read the SCR to find out if this card supports higher speeds */
1296 cmd.cmdidx = MMC_CMD_APP_CMD;
1297 cmd.resp_type = MMC_RSP_R1;
1298 cmd.cmdarg = mmc->rca << 16;
Andy Flemingad347bb2008-10-30 16:41:01 -05001299
1300 err = mmc_send_cmd(mmc, &cmd, NULL);
1301
1302 if (err)
1303 return err;
1304
1305 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1306 cmd.resp_type = MMC_RSP_R1;
1307 cmd.cmdarg = 0;
Andy Flemingad347bb2008-10-30 16:41:01 -05001308
Anton staaf9b00f0d2011-10-03 13:54:59 +00001309 data.dest = (char *)scr;
Andy Flemingad347bb2008-10-30 16:41:01 -05001310 data.blocksize = 8;
1311 data.blocks = 1;
1312 data.flags = MMC_DATA_READ;
1313
Sean Anderson86325092020-10-17 08:36:27 -04001314 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
Andy Flemingad347bb2008-10-30 16:41:01 -05001315
Sean Anderson86325092020-10-17 08:36:27 -04001316 if (err)
Andy Flemingad347bb2008-10-30 16:41:01 -05001317 return err;
Andy Flemingad347bb2008-10-30 16:41:01 -05001318
Yauhen Kharuzhy6e8edf42009-05-07 00:43:30 +03001319 mmc->scr[0] = __be32_to_cpu(scr[0]);
1320 mmc->scr[1] = __be32_to_cpu(scr[1]);
Andy Flemingad347bb2008-10-30 16:41:01 -05001321
1322 switch ((mmc->scr[0] >> 24) & 0xf) {
Bin Meng4a4ef872016-03-17 21:53:13 -07001323 case 0:
1324 mmc->version = SD_VERSION_1_0;
1325 break;
1326 case 1:
1327 mmc->version = SD_VERSION_1_10;
1328 break;
1329 case 2:
1330 mmc->version = SD_VERSION_2;
1331 if ((mmc->scr[0] >> 15) & 0x1)
1332 mmc->version = SD_VERSION_3;
1333 break;
1334 default:
1335 mmc->version = SD_VERSION_1_0;
1336 break;
Andy Flemingad347bb2008-10-30 16:41:01 -05001337 }
1338
Alagu Sankar24bb5ab2010-05-12 15:08:24 +05301339 if (mmc->scr[0] & SD_DATA_4BIT)
1340 mmc->card_caps |= MMC_MODE_4BIT;
1341
Andy Flemingad347bb2008-10-30 16:41:01 -05001342 /* Version 1.0 doesn't support switching */
1343 if (mmc->version == SD_VERSION_1_0)
1344 return 0;
1345
1346 timeout = 4;
1347 while (timeout--) {
1348 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
Anton staaf9b00f0d2011-10-03 13:54:59 +00001349 (u8 *)switch_status);
Andy Flemingad347bb2008-10-30 16:41:01 -05001350
1351 if (err)
1352 return err;
1353
1354 /* The high-speed function is busy. Try again */
Yauhen Kharuzhy6e8edf42009-05-07 00:43:30 +03001355 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
Andy Flemingad347bb2008-10-30 16:41:01 -05001356 break;
1357 }
1358
Andy Flemingad347bb2008-10-30 16:41:01 -05001359 /* If high-speed isn't supported, we return */
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001360 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1361 mmc->card_caps |= MMC_CAP(SD_HS);
Andy Flemingad347bb2008-10-30 16:41:01 -05001362
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001363#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001364 /* Version before 3.0 don't support UHS modes */
1365 if (mmc->version < SD_VERSION_3)
1366 return 0;
1367
1368 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1369 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1370 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1371 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1372 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1373 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1374 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1375 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1376 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1377 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1378 mmc->card_caps |= MMC_CAP(UHS_DDR50);
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001379#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001380
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001381 return 0;
1382}
1383
1384static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1385{
1386 int err;
1387
1388 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001389 int speed;
Macpaul Lin24e92ec2011-11-28 16:31:09 +00001390
Marek Vasut4105e972018-11-18 03:25:08 +01001391 /* SD version 1.00 and 1.01 does not support CMD 6 */
1392 if (mmc->version == SD_VERSION_1_0)
1393 return 0;
1394
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001395 switch (mode) {
Faiz Abbas01db77e2020-02-26 13:44:32 +05301396 case MMC_LEGACY:
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001397 speed = UHS_SDR12_BUS_SPEED;
1398 break;
1399 case SD_HS:
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +01001400 speed = HIGH_SPEED_BUS_SPEED;
1401 break;
1402#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1403 case UHS_SDR12:
1404 speed = UHS_SDR12_BUS_SPEED;
1405 break;
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001406 case UHS_SDR25:
1407 speed = UHS_SDR25_BUS_SPEED;
1408 break;
1409 case UHS_SDR50:
1410 speed = UHS_SDR50_BUS_SPEED;
1411 break;
1412 case UHS_DDR50:
1413 speed = UHS_DDR50_BUS_SPEED;
1414 break;
1415 case UHS_SDR104:
1416 speed = UHS_SDR104_BUS_SPEED;
1417 break;
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +01001418#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001419 default:
1420 return -EINVAL;
1421 }
1422
1423 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001424 if (err)
1425 return err;
1426
Jean-Jacques Hiblote7f664e2018-02-09 12:09:27 +01001427 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001428 return -ENOTSUPP;
1429
1430 return 0;
1431}
Andy Flemingad347bb2008-10-30 16:41:01 -05001432
Marek Vasut8ff55fb2018-04-15 00:36:45 +02001433static int sd_select_bus_width(struct mmc *mmc, int w)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001434{
1435 int err;
1436 struct mmc_cmd cmd;
1437
1438 if ((w != 4) && (w != 1))
1439 return -EINVAL;
1440
1441 cmd.cmdidx = MMC_CMD_APP_CMD;
1442 cmd.resp_type = MMC_RSP_R1;
1443 cmd.cmdarg = mmc->rca << 16;
1444
1445 err = mmc_send_cmd(mmc, &cmd, NULL);
Andy Flemingad347bb2008-10-30 16:41:01 -05001446 if (err)
1447 return err;
1448
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001449 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1450 cmd.resp_type = MMC_RSP_R1;
1451 if (w == 4)
1452 cmd.cmdarg = 2;
1453 else if (w == 1)
1454 cmd.cmdarg = 0;
1455 err = mmc_send_cmd(mmc, &cmd, NULL);
1456 if (err)
1457 return err;
Andy Flemingad347bb2008-10-30 16:41:01 -05001458
1459 return 0;
1460}
Marek Vasuta318a7a2018-04-15 00:37:11 +02001461#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05001462
Jean-Jacques Hiblotcb534f02018-01-04 15:23:33 +01001463#if CONFIG_IS_ENABLED(MMC_WRITE)
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001464static int sd_read_ssr(struct mmc *mmc)
1465{
Jean-Jacques Hiblotcb534f02018-01-04 15:23:33 +01001466 static const unsigned int sd_au_size[] = {
1467 0, SZ_16K / 512, SZ_32K / 512,
1468 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1469 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1470 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1471 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1472 SZ_64M / 512,
1473 };
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001474 int err, i;
1475 struct mmc_cmd cmd;
1476 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1477 struct mmc_data data;
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001478 unsigned int au, eo, et, es;
1479
1480 cmd.cmdidx = MMC_CMD_APP_CMD;
1481 cmd.resp_type = MMC_RSP_R1;
1482 cmd.cmdarg = mmc->rca << 16;
1483
Sean Anderson86325092020-10-17 08:36:27 -04001484 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001485 if (err)
1486 return err;
1487
1488 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1489 cmd.resp_type = MMC_RSP_R1;
1490 cmd.cmdarg = 0;
1491
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001492 data.dest = (char *)ssr;
1493 data.blocksize = 64;
1494 data.blocks = 1;
1495 data.flags = MMC_DATA_READ;
1496
Sean Anderson86325092020-10-17 08:36:27 -04001497 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1498 if (err)
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001499 return err;
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001500
1501 for (i = 0; i < 16; i++)
1502 ssr[i] = be32_to_cpu(ssr[i]);
1503
1504 au = (ssr[2] >> 12) & 0xF;
1505 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1506 mmc->ssr.au = sd_au_size[au];
1507 es = (ssr[3] >> 24) & 0xFF;
1508 es |= (ssr[2] & 0xFF) << 8;
1509 et = (ssr[3] >> 18) & 0x3F;
1510 if (es && et) {
1511 eo = (ssr[3] >> 16) & 0x3;
1512 mmc->ssr.erase_timeout = (et * 1000) / es;
1513 mmc->ssr.erase_offset = eo * 1000;
1514 }
1515 } else {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001516 pr_debug("Invalid Allocation Unit Size.\n");
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001517 }
1518
1519 return 0;
1520}
Jean-Jacques Hiblotcb534f02018-01-04 15:23:33 +01001521#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05001522/* frequency bases */
1523/* divided by 10 to be nice to platforms without floating point */
Mike Frysingerb588caf2010-10-20 01:15:53 +00001524static const int fbase[] = {
Andy Flemingad347bb2008-10-30 16:41:01 -05001525 10000,
1526 100000,
1527 1000000,
1528 10000000,
1529};
1530
1531/* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1532 * to platforms without floating point.
1533 */
Simon Glass03317cc2016-05-14 14:02:57 -06001534static const u8 multipliers[] = {
Andy Flemingad347bb2008-10-30 16:41:01 -05001535 0, /* reserved */
1536 10,
1537 12,
1538 13,
1539 15,
1540 20,
1541 25,
1542 30,
1543 35,
1544 40,
1545 45,
1546 50,
1547 55,
1548 60,
1549 70,
1550 80,
1551};
1552
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001553static inline int bus_width(uint cap)
1554{
1555 if (cap == MMC_MODE_8BIT)
1556 return 8;
1557 if (cap == MMC_MODE_4BIT)
1558 return 4;
1559 if (cap == MMC_MODE_1BIT)
1560 return 1;
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001561 pr_warn("invalid bus witdh capability 0x%x\n", cap);
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001562 return 0;
1563}
1564
Simon Glasseba48f92017-07-29 11:35:31 -06001565#if !CONFIG_IS_ENABLED(DM_MMC)
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001566#ifdef MMC_SUPPORTS_TUNING
Kishon Vijay Abraham Iae7174f2017-09-21 16:30:05 +02001567static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1568{
1569 return -ENOTSUPP;
1570}
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001571#endif
Kishon Vijay Abraham Iae7174f2017-09-21 16:30:05 +02001572
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001573static int mmc_set_ios(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -05001574{
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001575 int ret = 0;
1576
Pantelis Antoniou2c850462014-03-11 19:34:20 +02001577 if (mmc->cfg->ops->set_ios)
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001578 ret = mmc->cfg->ops->set_ios(mmc);
1579
1580 return ret;
Andy Flemingad347bb2008-10-30 16:41:01 -05001581}
Yann Gautier6f558332019-09-19 17:56:12 +02001582
1583static int mmc_host_power_cycle(struct mmc *mmc)
1584{
1585 int ret = 0;
1586
1587 if (mmc->cfg->ops->host_power_cycle)
1588 ret = mmc->cfg->ops->host_power_cycle(mmc);
1589
1590 return ret;
1591}
Simon Glass394dfc02016-06-12 23:30:22 -06001592#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05001593
Kishon Vijay Abraham Id6246bf2017-09-21 16:30:03 +02001594int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
Andy Flemingad347bb2008-10-30 16:41:01 -05001595{
Jaehoon Chungab4d4052018-01-23 14:04:30 +09001596 if (!disable) {
Jaehoon Chung8a933292018-01-17 19:36:58 +09001597 if (clock > mmc->cfg->f_max)
1598 clock = mmc->cfg->f_max;
Andy Flemingad347bb2008-10-30 16:41:01 -05001599
Jaehoon Chung8a933292018-01-17 19:36:58 +09001600 if (clock < mmc->cfg->f_min)
1601 clock = mmc->cfg->f_min;
1602 }
Andy Flemingad347bb2008-10-30 16:41:01 -05001603
1604 mmc->clock = clock;
Kishon Vijay Abraham Id6246bf2017-09-21 16:30:03 +02001605 mmc->clk_disable = disable;
Andy Flemingad347bb2008-10-30 16:41:01 -05001606
Jaehoon Chungc8477d62018-01-26 19:25:30 +09001607 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1608
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001609 return mmc_set_ios(mmc);
Andy Flemingad347bb2008-10-30 16:41:01 -05001610}
1611
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001612static int mmc_set_bus_width(struct mmc *mmc, uint width)
Andy Flemingad347bb2008-10-30 16:41:01 -05001613{
1614 mmc->bus_width = width;
1615
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001616 return mmc_set_ios(mmc);
Andy Flemingad347bb2008-10-30 16:41:01 -05001617}
1618
Jean-Jacques Hiblot00de5042017-09-21 16:29:54 +02001619#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1620/*
1621 * helper function to display the capabilities in a human
1622 * friendly manner. The capabilities include bus width and
1623 * supported modes.
1624 */
1625void mmc_dump_capabilities(const char *text, uint caps)
1626{
1627 enum bus_mode mode;
1628
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001629 pr_debug("%s: widths [", text);
Jean-Jacques Hiblot00de5042017-09-21 16:29:54 +02001630 if (caps & MMC_MODE_8BIT)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001631 pr_debug("8, ");
Jean-Jacques Hiblot00de5042017-09-21 16:29:54 +02001632 if (caps & MMC_MODE_4BIT)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001633 pr_debug("4, ");
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001634 if (caps & MMC_MODE_1BIT)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001635 pr_debug("1, ");
1636 pr_debug("\b\b] modes [");
Jean-Jacques Hiblot00de5042017-09-21 16:29:54 +02001637 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1638 if (MMC_CAP(mode) & caps)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001639 pr_debug("%s, ", mmc_mode_name(mode));
1640 pr_debug("\b\b]\n");
Jean-Jacques Hiblot00de5042017-09-21 16:29:54 +02001641}
1642#endif
1643
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001644struct mode_width_tuning {
1645 enum bus_mode mode;
1646 uint widths;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001647#ifdef MMC_SUPPORTS_TUNING
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +02001648 uint tuning;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001649#endif
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001650};
1651
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001652#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001653int mmc_voltage_to_mv(enum mmc_voltage voltage)
1654{
1655 switch (voltage) {
1656 case MMC_SIGNAL_VOLTAGE_000: return 0;
1657 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1658 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1659 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1660 }
1661 return -EINVAL;
1662}
1663
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +02001664static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1665{
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001666 int err;
1667
1668 if (mmc->signal_voltage == signal_voltage)
1669 return 0;
1670
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +02001671 mmc->signal_voltage = signal_voltage;
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001672 err = mmc_set_ios(mmc);
1673 if (err)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001674 pr_debug("unable to set voltage (err %d)\n", err);
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001675
1676 return err;
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +02001677}
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001678#else
1679static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1680{
1681 return 0;
1682}
1683#endif
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +02001684
Marek Vasuta318a7a2018-04-15 00:37:11 +02001685#if !CONFIG_IS_ENABLED(MMC_TINY)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001686static const struct mode_width_tuning sd_modes_by_pref[] = {
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001687#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1688#ifdef MMC_SUPPORTS_TUNING
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001689 {
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001690 .mode = UHS_SDR104,
1691 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1692 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1693 },
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001694#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001695 {
1696 .mode = UHS_SDR50,
1697 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1698 },
1699 {
1700 .mode = UHS_DDR50,
1701 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1702 },
1703 {
1704 .mode = UHS_SDR25,
1705 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1706 },
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001707#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001708 {
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001709 .mode = SD_HS,
1710 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1711 },
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001712#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001713 {
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001714 .mode = UHS_SDR12,
1715 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1716 },
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001717#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001718 {
Faiz Abbas01db77e2020-02-26 13:44:32 +05301719 .mode = MMC_LEGACY,
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001720 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1721 }
1722};
1723
1724#define for_each_sd_mode_by_pref(caps, mwt) \
1725 for (mwt = sd_modes_by_pref;\
1726 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1727 mwt++) \
1728 if (caps & MMC_CAP(mwt->mode))
1729
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02001730static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001731{
1732 int err;
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001733 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1734 const struct mode_width_tuning *mwt;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001735#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001736 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001737#else
1738 bool uhs_en = false;
1739#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001740 uint caps;
1741
Jean-Jacques Hiblot93c31d12017-11-30 17:43:54 +01001742#ifdef DEBUG
1743 mmc_dump_capabilities("sd card", card_caps);
Jean-Jacques Hiblotd7e5e032017-11-30 17:43:57 +01001744 mmc_dump_capabilities("host", mmc->host_caps);
Jean-Jacques Hiblot93c31d12017-11-30 17:43:54 +01001745#endif
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001746
Anup Pateld9c92c72019-07-08 04:10:43 +00001747 if (mmc_host_is_spi(mmc)) {
1748 mmc_set_bus_width(mmc, 1);
Faiz Abbas01db77e2020-02-26 13:44:32 +05301749 mmc_select_mode(mmc, MMC_LEGACY);
Anup Pateld9c92c72019-07-08 04:10:43 +00001750 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
Pragnesh Patela01f57e2020-06-29 15:17:26 +05301751#if CONFIG_IS_ENABLED(MMC_WRITE)
1752 err = sd_read_ssr(mmc);
1753 if (err)
1754 pr_warn("unable to read ssr\n");
1755#endif
Anup Pateld9c92c72019-07-08 04:10:43 +00001756 return 0;
1757 }
1758
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001759 /* Restrict card's capabilities by what the host can do */
Jean-Jacques Hiblotd7e5e032017-11-30 17:43:57 +01001760 caps = card_caps & mmc->host_caps;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001761
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001762 if (!uhs_en)
1763 caps &= ~UHS_CAPS;
1764
1765 for_each_sd_mode_by_pref(caps, mwt) {
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001766 uint *w;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001767
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001768 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001769 if (*w & caps & mwt->widths) {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001770 pr_debug("trying mode %s width %d (at %d MHz)\n",
1771 mmc_mode_name(mwt->mode),
1772 bus_width(*w),
1773 mmc_mode2freq(mmc, mwt->mode) / 1000000);
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001774
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001775 /* configure the bus width (card + host) */
1776 err = sd_select_bus_width(mmc, bus_width(*w));
1777 if (err)
1778 goto error;
1779 mmc_set_bus_width(mmc, bus_width(*w));
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001780
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001781 /* configure the bus mode (card) */
1782 err = sd_set_card_speed(mmc, mwt->mode);
1783 if (err)
1784 goto error;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001785
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001786 /* configure the bus mode (host) */
1787 mmc_select_mode(mmc, mwt->mode);
Jaehoon Chung239cb2f2018-01-26 19:25:29 +09001788 mmc_set_clock(mmc, mmc->tran_speed,
1789 MMC_CLK_ENABLE);
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001790
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001791#ifdef MMC_SUPPORTS_TUNING
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001792 /* execute tuning if needed */
1793 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1794 err = mmc_execute_tuning(mmc,
1795 mwt->tuning);
1796 if (err) {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001797 pr_debug("tuning failed\n");
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001798 goto error;
1799 }
1800 }
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001801#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001802
Jean-Jacques Hiblotcb534f02018-01-04 15:23:33 +01001803#if CONFIG_IS_ENABLED(MMC_WRITE)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001804 err = sd_read_ssr(mmc);
Peng Fan2d2fe8e2018-03-05 16:20:40 +08001805 if (err)
Jean-Jacques Hiblotcb534f02018-01-04 15:23:33 +01001806 pr_warn("unable to read ssr\n");
1807#endif
1808 if (!err)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001809 return 0;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001810
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001811error:
1812 /* revert to a safer bus speed */
Faiz Abbas01db77e2020-02-26 13:44:32 +05301813 mmc_select_mode(mmc, MMC_LEGACY);
Jaehoon Chung239cb2f2018-01-26 19:25:29 +09001814 mmc_set_clock(mmc, mmc->tran_speed,
1815 MMC_CLK_ENABLE);
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001816 }
1817 }
1818 }
1819
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001820 pr_err("unable to select a mode\n");
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001821 return -ENOTSUPP;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001822}
1823
Jean-Jacques Hiblot933d1262017-09-21 16:29:52 +02001824/*
1825 * read the compare the part of ext csd that is constant.
1826 * This can be used to check that the transfer is working
1827 * as expected.
1828 */
1829static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001830{
Jean-Jacques Hiblot933d1262017-09-21 16:29:52 +02001831 int err;
Jean-Jacques Hibloted9506b2017-09-21 16:29:51 +02001832 const u8 *ext_csd = mmc->ext_csd;
Jean-Jacques Hiblot933d1262017-09-21 16:29:52 +02001833 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1834
Jean-Jacques Hiblot7ab1b622017-11-30 17:43:58 +01001835 if (mmc->version < MMC_VERSION_4)
1836 return 0;
1837
Jean-Jacques Hiblot933d1262017-09-21 16:29:52 +02001838 err = mmc_send_ext_csd(mmc, test_csd);
1839 if (err)
1840 return err;
1841
1842 /* Only compare read only fields */
1843 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1844 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1845 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1846 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1847 ext_csd[EXT_CSD_REV]
1848 == test_csd[EXT_CSD_REV] &&
1849 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1850 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1851 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1852 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1853 return 0;
1854
1855 return -EBADMSG;
1856}
1857
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001858#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001859static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1860 uint32_t allowed_mask)
1861{
1862 u32 card_mask = 0;
1863
1864 switch (mode) {
Peng Faneede83b2019-07-10 14:43:07 +08001865 case MMC_HS_400_ES:
Peng Fan46801252018-08-10 14:07:54 +08001866 case MMC_HS_400:
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001867 case MMC_HS_200:
Peng Fan46801252018-08-10 14:07:54 +08001868 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1869 EXT_CSD_CARD_TYPE_HS400_1_8V))
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001870 card_mask |= MMC_SIGNAL_VOLTAGE_180;
Peng Fan46801252018-08-10 14:07:54 +08001871 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1872 EXT_CSD_CARD_TYPE_HS400_1_2V))
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001873 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1874 break;
1875 case MMC_DDR_52:
1876 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1877 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1878 MMC_SIGNAL_VOLTAGE_180;
1879 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1880 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1881 break;
1882 default:
1883 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1884 break;
1885 }
1886
1887 while (card_mask & allowed_mask) {
1888 enum mmc_voltage best_match;
1889
1890 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1891 if (!mmc_set_signal_voltage(mmc, best_match))
1892 return 0;
1893
1894 allowed_mask &= ~best_match;
1895 }
1896
1897 return -ENOTSUPP;
1898}
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001899#else
1900static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1901 uint32_t allowed_mask)
1902{
1903 return 0;
1904}
1905#endif
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001906
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02001907static const struct mode_width_tuning mmc_modes_by_pref[] = {
Peng Faneede83b2019-07-10 14:43:07 +08001908#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1909 {
1910 .mode = MMC_HS_400_ES,
1911 .widths = MMC_MODE_8BIT,
1912 },
1913#endif
Peng Fan46801252018-08-10 14:07:54 +08001914#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1915 {
1916 .mode = MMC_HS_400,
1917 .widths = MMC_MODE_8BIT,
1918 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1919 },
1920#endif
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001921#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02001922 {
1923 .mode = MMC_HS_200,
1924 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +02001925 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02001926 },
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001927#endif
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02001928 {
1929 .mode = MMC_DDR_52,
1930 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1931 },
1932 {
1933 .mode = MMC_HS_52,
1934 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1935 },
1936 {
1937 .mode = MMC_HS,
1938 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1939 },
1940 {
1941 .mode = MMC_LEGACY,
1942 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1943 }
1944};
1945
1946#define for_each_mmc_mode_by_pref(caps, mwt) \
1947 for (mwt = mmc_modes_by_pref;\
1948 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1949 mwt++) \
1950 if (caps & MMC_CAP(mwt->mode))
1951
1952static const struct ext_csd_bus_width {
1953 uint cap;
1954 bool is_ddr;
1955 uint ext_csd_bits;
1956} ext_csd_bus_width[] = {
1957 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1958 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1959 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1960 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1961 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1962};
1963
Peng Fan46801252018-08-10 14:07:54 +08001964#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1965static int mmc_select_hs400(struct mmc *mmc)
1966{
1967 int err;
1968
1969 /* Set timing to HS200 for tuning */
Marek Vasut111572f2019-01-03 21:19:24 +01001970 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
Peng Fan46801252018-08-10 14:07:54 +08001971 if (err)
1972 return err;
1973
1974 /* configure the bus mode (host) */
1975 mmc_select_mode(mmc, MMC_HS_200);
1976 mmc_set_clock(mmc, mmc->tran_speed, false);
1977
1978 /* execute tuning if needed */
Yangbo Lu3ed53ac2020-09-01 16:58:03 +08001979 mmc->hs400_tuning = 1;
Peng Fan46801252018-08-10 14:07:54 +08001980 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
Yangbo Lu3ed53ac2020-09-01 16:58:03 +08001981 mmc->hs400_tuning = 0;
Peng Fan46801252018-08-10 14:07:54 +08001982 if (err) {
1983 debug("tuning failed\n");
1984 return err;
1985 }
1986
1987 /* Set back to HS */
BOUGH CHEN8702bbc2019-03-26 06:24:17 +00001988 mmc_set_card_speed(mmc, MMC_HS, true);
Peng Fan46801252018-08-10 14:07:54 +08001989
Yangbo Lu5347aea2020-09-01 16:58:04 +08001990 err = mmc_hs400_prepare_ddr(mmc);
1991 if (err)
1992 return err;
1993
Peng Fan46801252018-08-10 14:07:54 +08001994 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1995 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1996 if (err)
1997 return err;
1998
Marek Vasut111572f2019-01-03 21:19:24 +01001999 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
Peng Fan46801252018-08-10 14:07:54 +08002000 if (err)
2001 return err;
2002
2003 mmc_select_mode(mmc, MMC_HS_400);
2004 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2005 if (err)
2006 return err;
2007
2008 return 0;
2009}
2010#else
2011static int mmc_select_hs400(struct mmc *mmc)
2012{
2013 return -ENOTSUPP;
2014}
2015#endif
2016
Peng Faneede83b2019-07-10 14:43:07 +08002017#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2018#if !CONFIG_IS_ENABLED(DM_MMC)
2019static int mmc_set_enhanced_strobe(struct mmc *mmc)
2020{
2021 return -ENOTSUPP;
2022}
2023#endif
2024static int mmc_select_hs400es(struct mmc *mmc)
2025{
2026 int err;
2027
2028 err = mmc_set_card_speed(mmc, MMC_HS, true);
2029 if (err)
2030 return err;
2031
2032 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2033 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2034 EXT_CSD_BUS_WIDTH_STROBE);
2035 if (err) {
2036 printf("switch to bus width for hs400 failed\n");
2037 return err;
2038 }
2039 /* TODO: driver strength */
2040 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2041 if (err)
2042 return err;
2043
2044 mmc_select_mode(mmc, MMC_HS_400_ES);
2045 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2046 if (err)
2047 return err;
2048
2049 return mmc_set_enhanced_strobe(mmc);
2050}
2051#else
2052static int mmc_select_hs400es(struct mmc *mmc)
2053{
2054 return -ENOTSUPP;
2055}
2056#endif
2057
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002058#define for_each_supported_width(caps, ddr, ecbv) \
2059 for (ecbv = ext_csd_bus_width;\
2060 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2061 ecbv++) \
2062 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2063
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02002064static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
Jean-Jacques Hiblot933d1262017-09-21 16:29:52 +02002065{
Jaehoon Chung6b3431c2020-12-04 06:36:00 +09002066 int err = 0;
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002067 const struct mode_width_tuning *mwt;
2068 const struct ext_csd_bus_width *ecbw;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002069
Jean-Jacques Hiblot93c31d12017-11-30 17:43:54 +01002070#ifdef DEBUG
2071 mmc_dump_capabilities("mmc", card_caps);
Jean-Jacques Hiblotd7e5e032017-11-30 17:43:57 +01002072 mmc_dump_capabilities("host", mmc->host_caps);
Jean-Jacques Hiblot93c31d12017-11-30 17:43:54 +01002073#endif
2074
Anup Pateld9c92c72019-07-08 04:10:43 +00002075 if (mmc_host_is_spi(mmc)) {
2076 mmc_set_bus_width(mmc, 1);
2077 mmc_select_mode(mmc, MMC_LEGACY);
2078 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2079 return 0;
2080 }
2081
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002082 /* Restrict card's capabilities by what the host can do */
Jean-Jacques Hiblotd7e5e032017-11-30 17:43:57 +01002083 card_caps &= mmc->host_caps;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002084
2085 /* Only version 4 of MMC supports wider bus widths */
2086 if (mmc->version < MMC_VERSION_4)
2087 return 0;
2088
Jean-Jacques Hibloted9506b2017-09-21 16:29:51 +02002089 if (!mmc->ext_csd) {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002090 pr_debug("No ext_csd found!\n"); /* this should enver happen */
Jean-Jacques Hibloted9506b2017-09-21 16:29:51 +02002091 return -ENOTSUPP;
2092 }
2093
Marek Vasut111572f2019-01-03 21:19:24 +01002094#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2095 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2096 /*
2097 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2098 * before doing anything else, since a transition from either of
2099 * the HS200/HS400 mode directly to legacy mode is not supported.
2100 */
2101 if (mmc->selected_mode == MMC_HS_200 ||
2102 mmc->selected_mode == MMC_HS_400)
2103 mmc_set_card_speed(mmc, MMC_HS, true);
2104 else
2105#endif
2106 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02002107
2108 for_each_mmc_mode_by_pref(card_caps, mwt) {
2109 for_each_supported_width(card_caps & mwt->widths,
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002110 mmc_is_mode_ddr(mwt->mode), ecbw) {
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02002111 enum mmc_voltage old_voltage;
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002112 pr_debug("trying mode %s width %d (at %d MHz)\n",
2113 mmc_mode_name(mwt->mode),
2114 bus_width(ecbw->cap),
2115 mmc_mode2freq(mmc, mwt->mode) / 1000000);
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02002116 old_voltage = mmc->signal_voltage;
2117 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2118 MMC_ALL_SIGNAL_VOLTAGE);
2119 if (err)
2120 continue;
2121
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002122 /* configure the bus width (card + host) */
2123 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2124 EXT_CSD_BUS_WIDTH,
2125 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2126 if (err)
2127 goto error;
2128 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002129
Peng Fan46801252018-08-10 14:07:54 +08002130 if (mwt->mode == MMC_HS_400) {
2131 err = mmc_select_hs400(mmc);
2132 if (err) {
2133 printf("Select HS400 failed %d\n", err);
2134 goto error;
2135 }
Peng Faneede83b2019-07-10 14:43:07 +08002136 } else if (mwt->mode == MMC_HS_400_ES) {
2137 err = mmc_select_hs400es(mmc);
2138 if (err) {
2139 printf("Select HS400ES failed %d\n",
2140 err);
2141 goto error;
2142 }
Peng Fan46801252018-08-10 14:07:54 +08002143 } else {
2144 /* configure the bus speed (card) */
Marek Vasut111572f2019-01-03 21:19:24 +01002145 err = mmc_set_card_speed(mmc, mwt->mode, false);
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002146 if (err)
2147 goto error;
Peng Fan46801252018-08-10 14:07:54 +08002148
2149 /*
2150 * configure the bus width AND the ddr mode
2151 * (card). The host side will be taken care
2152 * of in the next step
2153 */
2154 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2155 err = mmc_switch(mmc,
2156 EXT_CSD_CMD_SET_NORMAL,
2157 EXT_CSD_BUS_WIDTH,
2158 ecbw->ext_csd_bits);
2159 if (err)
2160 goto error;
2161 }
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002162
Peng Fan46801252018-08-10 14:07:54 +08002163 /* configure the bus mode (host) */
2164 mmc_select_mode(mmc, mwt->mode);
2165 mmc_set_clock(mmc, mmc->tran_speed,
2166 MMC_CLK_ENABLE);
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01002167#ifdef MMC_SUPPORTS_TUNING
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002168
Peng Fan46801252018-08-10 14:07:54 +08002169 /* execute tuning if needed */
2170 if (mwt->tuning) {
2171 err = mmc_execute_tuning(mmc,
2172 mwt->tuning);
2173 if (err) {
Jaehoon Chungad9f7ce2020-11-17 07:04:59 +09002174 pr_debug("tuning failed : %d\n", err);
Peng Fan46801252018-08-10 14:07:54 +08002175 goto error;
2176 }
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +02002177 }
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01002178#endif
Peng Fan46801252018-08-10 14:07:54 +08002179 }
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +02002180
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002181 /* do a transfer to check the configuration */
2182 err = mmc_read_and_compare_ext_csd(mmc);
2183 if (!err)
2184 return 0;
2185error:
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02002186 mmc_set_signal_voltage(mmc, old_voltage);
Naoki Hayama3110dcb2020-10-12 18:35:22 +09002187 /* if an error occurred, revert to a safer bus mode */
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002188 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2189 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2190 mmc_select_mode(mmc, MMC_LEGACY);
2191 mmc_set_bus_width(mmc, 1);
2192 }
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002193 }
2194
Jaehoon Chungad9f7ce2020-11-17 07:04:59 +09002195 pr_err("unable to select a mode : %d\n", err);
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002196
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002197 return -ENOTSUPP;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002198}
Marek Vasuta318a7a2018-04-15 00:37:11 +02002199#endif
2200
2201#if CONFIG_IS_ENABLED(MMC_TINY)
2202DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2203#endif
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002204
Jean-Jacques Hibloted9506b2017-09-21 16:29:51 +02002205static int mmc_startup_v4(struct mmc *mmc)
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002206{
2207 int err, i;
2208 u64 capacity;
2209 bool has_parts = false;
2210 bool part_completed;
Jean-Jacques Hiblotfa6c5772018-01-04 15:23:31 +01002211 static const u32 mmc_versions[] = {
2212 MMC_VERSION_4,
2213 MMC_VERSION_4_1,
2214 MMC_VERSION_4_2,
2215 MMC_VERSION_4_3,
Jean-Jacques Hiblotc64862b2018-02-09 12:09:28 +01002216 MMC_VERSION_4_4,
Jean-Jacques Hiblotfa6c5772018-01-04 15:23:31 +01002217 MMC_VERSION_4_41,
2218 MMC_VERSION_4_5,
2219 MMC_VERSION_5_0,
2220 MMC_VERSION_5_1
2221 };
2222
Marek Vasuta318a7a2018-04-15 00:37:11 +02002223#if CONFIG_IS_ENABLED(MMC_TINY)
2224 u8 *ext_csd = ext_csd_bkup;
2225
2226 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2227 return 0;
2228
2229 if (!mmc->ext_csd)
2230 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2231
2232 err = mmc_send_ext_csd(mmc, ext_csd);
2233 if (err)
2234 goto error;
2235
2236 /* store the ext csd for future reference */
2237 if (!mmc->ext_csd)
2238 mmc->ext_csd = ext_csd;
2239#else
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002240 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002241
2242 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2243 return 0;
2244
2245 /* check ext_csd version and capacity */
2246 err = mmc_send_ext_csd(mmc, ext_csd);
2247 if (err)
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002248 goto error;
2249
2250 /* store the ext csd for future reference */
2251 if (!mmc->ext_csd)
2252 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2253 if (!mmc->ext_csd)
2254 return -ENOMEM;
2255 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
Marek Vasuta318a7a2018-04-15 00:37:11 +02002256#endif
Alexander Kochetkovf1133c92018-02-20 14:35:55 +03002257 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
Jean-Jacques Hiblotfa6c5772018-01-04 15:23:31 +01002258 return -EINVAL;
2259
2260 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2261
2262 if (mmc->version >= MMC_VERSION_4_2) {
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002263 /*
2264 * According to the JEDEC Standard, the value of
2265 * ext_csd's capacity is valid if the value is more
2266 * than 2GB
2267 */
2268 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2269 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2270 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2271 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2272 capacity *= MMC_MAX_BLOCK_LEN;
2273 if ((capacity >> 20) > 2 * 1024)
2274 mmc->capacity_user = capacity;
2275 }
2276
Jean-Jacques Hiblot201559c2019-07-02 10:53:54 +02002277 if (mmc->version >= MMC_VERSION_4_5)
2278 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2279
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002280 /* The partition data may be non-zero but it is only
2281 * effective if PARTITION_SETTING_COMPLETED is set in
2282 * EXT_CSD, so ignore any data if this bit is not set,
2283 * except for enabling the high-capacity group size
2284 * definition (see below).
2285 */
2286 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2287 EXT_CSD_PARTITION_SETTING_COMPLETED);
2288
Jean-Jacques Hiblot7f5b1692019-07-02 10:53:55 +02002289 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2290 /* Some eMMC set the value too low so set a minimum */
2291 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2292 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2293
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002294 /* store the partition info of emmc */
2295 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2296 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2297 ext_csd[EXT_CSD_BOOT_MULT])
2298 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2299 if (part_completed &&
2300 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2301 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2302
2303 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2304
2305 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2306
2307 for (i = 0; i < 4; i++) {
2308 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2309 uint mult = (ext_csd[idx + 2] << 16) +
2310 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2311 if (mult)
2312 has_parts = true;
2313 if (!part_completed)
2314 continue;
2315 mmc->capacity_gp[i] = mult;
2316 mmc->capacity_gp[i] *=
2317 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2318 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2319 mmc->capacity_gp[i] <<= 19;
2320 }
2321
Jean-Jacques Hiblotc94c5472018-01-04 15:23:35 +01002322#ifndef CONFIG_SPL_BUILD
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002323 if (part_completed) {
2324 mmc->enh_user_size =
2325 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2326 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2327 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2328 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2329 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2330 mmc->enh_user_size <<= 19;
2331 mmc->enh_user_start =
2332 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2333 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2334 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2335 ext_csd[EXT_CSD_ENH_START_ADDR];
2336 if (mmc->high_capacity)
2337 mmc->enh_user_start <<= 9;
2338 }
Jean-Jacques Hiblotc94c5472018-01-04 15:23:35 +01002339#endif
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002340
2341 /*
2342 * Host needs to enable ERASE_GRP_DEF bit if device is
2343 * partitioned. This bit will be lost every time after a reset
2344 * or power off. This will affect erase size.
2345 */
2346 if (part_completed)
2347 has_parts = true;
2348 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2349 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2350 has_parts = true;
2351 if (has_parts) {
2352 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2353 EXT_CSD_ERASE_GROUP_DEF, 1);
2354
2355 if (err)
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002356 goto error;
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002357
2358 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2359 }
2360
2361 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002362#if CONFIG_IS_ENABLED(MMC_WRITE)
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002363 /* Read out group size from ext_csd */
2364 mmc->erase_grp_size =
2365 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002366#endif
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002367 /*
2368 * if high capacity and partition setting completed
2369 * SEC_COUNT is valid even if it is smaller than 2 GiB
2370 * JEDEC Standard JESD84-B45, 6.2.4
2371 */
2372 if (mmc->high_capacity && part_completed) {
2373 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2374 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2375 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2376 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2377 capacity *= MMC_MAX_BLOCK_LEN;
2378 mmc->capacity_user = capacity;
2379 }
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002380 }
2381#if CONFIG_IS_ENABLED(MMC_WRITE)
2382 else {
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002383 /* Calculate the group size from the csd value. */
2384 int erase_gsz, erase_gmul;
2385
2386 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2387 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2388 mmc->erase_grp_size = (erase_gsz + 1)
2389 * (erase_gmul + 1);
2390 }
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002391#endif
Jean-Jacques Hiblotba54ab82018-01-04 15:23:36 +01002392#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002393 mmc->hc_wp_grp_size = 1024
2394 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2395 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
Jean-Jacques Hiblotba54ab82018-01-04 15:23:36 +01002396#endif
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002397
2398 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2399
2400 return 0;
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002401error:
2402 if (mmc->ext_csd) {
Marek Vasuta318a7a2018-04-15 00:37:11 +02002403#if !CONFIG_IS_ENABLED(MMC_TINY)
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002404 free(mmc->ext_csd);
Marek Vasuta318a7a2018-04-15 00:37:11 +02002405#endif
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002406 mmc->ext_csd = NULL;
2407 }
2408 return err;
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002409}
2410
Kim Phillips87ea3892012-10-29 13:34:43 +00002411static int mmc_startup(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -05002412{
Stephen Warrene315ae82013-06-11 15:14:01 -06002413 int err, i;
Andy Flemingad347bb2008-10-30 16:41:01 -05002414 uint mult, freq;
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002415 u64 cmult, csize;
Andy Flemingad347bb2008-10-30 16:41:01 -05002416 struct mmc_cmd cmd;
Simon Glasse5db1152016-05-01 13:52:35 -06002417 struct blk_desc *bdesc;
Andy Flemingad347bb2008-10-30 16:41:01 -05002418
Thomas Chou1254c3d2010-12-24 13:12:21 +00002419#ifdef CONFIG_MMC_SPI_CRC_ON
2420 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2421 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2422 cmd.resp_type = MMC_RSP_R1;
2423 cmd.cmdarg = 1;
Thomas Chou1254c3d2010-12-24 13:12:21 +00002424 err = mmc_send_cmd(mmc, &cmd, NULL);
Thomas Chou1254c3d2010-12-24 13:12:21 +00002425 if (err)
2426 return err;
2427 }
2428#endif
2429
Andy Flemingad347bb2008-10-30 16:41:01 -05002430 /* Put the Card in Identify Mode */
Thomas Chou1254c3d2010-12-24 13:12:21 +00002431 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2432 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
Andy Flemingad347bb2008-10-30 16:41:01 -05002433 cmd.resp_type = MMC_RSP_R2;
2434 cmd.cmdarg = 0;
Andy Flemingad347bb2008-10-30 16:41:01 -05002435
Sean Anderson86325092020-10-17 08:36:27 -04002436 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
Andy Flemingad347bb2008-10-30 16:41:01 -05002437 if (err)
2438 return err;
2439
2440 memcpy(mmc->cid, cmd.response, 16);
2441
2442 /*
2443 * For MMC cards, set the Relative Address.
2444 * For SD cards, get the Relatvie Address.
2445 * This also puts the cards into Standby State
2446 */
Thomas Chou1254c3d2010-12-24 13:12:21 +00002447 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2448 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2449 cmd.cmdarg = mmc->rca << 16;
2450 cmd.resp_type = MMC_RSP_R6;
Andy Flemingad347bb2008-10-30 16:41:01 -05002451
Thomas Chou1254c3d2010-12-24 13:12:21 +00002452 err = mmc_send_cmd(mmc, &cmd, NULL);
Andy Flemingad347bb2008-10-30 16:41:01 -05002453
Thomas Chou1254c3d2010-12-24 13:12:21 +00002454 if (err)
2455 return err;
Andy Flemingad347bb2008-10-30 16:41:01 -05002456
Thomas Chou1254c3d2010-12-24 13:12:21 +00002457 if (IS_SD(mmc))
2458 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2459 }
Andy Flemingad347bb2008-10-30 16:41:01 -05002460
2461 /* Get the Card-Specific Data */
2462 cmd.cmdidx = MMC_CMD_SEND_CSD;
2463 cmd.resp_type = MMC_RSP_R2;
2464 cmd.cmdarg = mmc->rca << 16;
Andy Flemingad347bb2008-10-30 16:41:01 -05002465
2466 err = mmc_send_cmd(mmc, &cmd, NULL);
2467
2468 if (err)
2469 return err;
2470
Rabin Vincentb6eed942009-04-05 13:30:56 +05302471 mmc->csd[0] = cmd.response[0];
2472 mmc->csd[1] = cmd.response[1];
2473 mmc->csd[2] = cmd.response[2];
2474 mmc->csd[3] = cmd.response[3];
Andy Flemingad347bb2008-10-30 16:41:01 -05002475
2476 if (mmc->version == MMC_VERSION_UNKNOWN) {
Rabin Vincentbdf7a682009-04-05 13:30:55 +05302477 int version = (cmd.response[0] >> 26) & 0xf;
Andy Flemingad347bb2008-10-30 16:41:01 -05002478
2479 switch (version) {
Bin Meng4a4ef872016-03-17 21:53:13 -07002480 case 0:
2481 mmc->version = MMC_VERSION_1_2;
2482 break;
2483 case 1:
2484 mmc->version = MMC_VERSION_1_4;
2485 break;
2486 case 2:
2487 mmc->version = MMC_VERSION_2_2;
2488 break;
2489 case 3:
2490 mmc->version = MMC_VERSION_3;
2491 break;
2492 case 4:
2493 mmc->version = MMC_VERSION_4;
2494 break;
2495 default:
2496 mmc->version = MMC_VERSION_1_2;
2497 break;
Andy Flemingad347bb2008-10-30 16:41:01 -05002498 }
2499 }
2500
2501 /* divide frequency by 10, since the mults are 10x bigger */
Rabin Vincentbdf7a682009-04-05 13:30:55 +05302502 freq = fbase[(cmd.response[0] & 0x7)];
2503 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
Andy Flemingad347bb2008-10-30 16:41:01 -05002504
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +02002505 mmc->legacy_speed = freq * mult;
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +02002506 mmc_select_mode(mmc, MMC_LEGACY);
Andy Flemingad347bb2008-10-30 16:41:01 -05002507
Markus Niebel03951412013-12-16 13:40:46 +01002508 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
Rabin Vincentb6eed942009-04-05 13:30:56 +05302509 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002510#if CONFIG_IS_ENABLED(MMC_WRITE)
Andy Flemingad347bb2008-10-30 16:41:01 -05002511
2512 if (IS_SD(mmc))
2513 mmc->write_bl_len = mmc->read_bl_len;
2514 else
Rabin Vincentb6eed942009-04-05 13:30:56 +05302515 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002516#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05002517
2518 if (mmc->high_capacity) {
2519 csize = (mmc->csd[1] & 0x3f) << 16
2520 | (mmc->csd[2] & 0xffff0000) >> 16;
2521 cmult = 8;
2522 } else {
2523 csize = (mmc->csd[1] & 0x3ff) << 2
2524 | (mmc->csd[2] & 0xc0000000) >> 30;
2525 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2526 }
2527
Stephen Warrene315ae82013-06-11 15:14:01 -06002528 mmc->capacity_user = (csize + 1) << (cmult + 2);
2529 mmc->capacity_user *= mmc->read_bl_len;
2530 mmc->capacity_boot = 0;
2531 mmc->capacity_rpmb = 0;
2532 for (i = 0; i < 4; i++)
2533 mmc->capacity_gp[i] = 0;
Andy Flemingad347bb2008-10-30 16:41:01 -05002534
Simon Glassa09c2b72013-04-03 08:54:30 +00002535 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2536 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
Andy Flemingad347bb2008-10-30 16:41:01 -05002537
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002538#if CONFIG_IS_ENABLED(MMC_WRITE)
Simon Glassa09c2b72013-04-03 08:54:30 +00002539 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2540 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002541#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05002542
Markus Niebel03951412013-12-16 13:40:46 +01002543 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2544 cmd.cmdidx = MMC_CMD_SET_DSR;
2545 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2546 cmd.resp_type = MMC_RSP_NONE;
2547 if (mmc_send_cmd(mmc, &cmd, NULL))
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01002548 pr_warn("MMC: SET_DSR failed\n");
Markus Niebel03951412013-12-16 13:40:46 +01002549 }
2550
Andy Flemingad347bb2008-10-30 16:41:01 -05002551 /* Select the card, and put it into Transfer Mode */
Thomas Chou1254c3d2010-12-24 13:12:21 +00002552 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2553 cmd.cmdidx = MMC_CMD_SELECT_CARD;
Ajay Bhargav4a32fba2011-10-05 03:13:23 +00002554 cmd.resp_type = MMC_RSP_R1;
Thomas Chou1254c3d2010-12-24 13:12:21 +00002555 cmd.cmdarg = mmc->rca << 16;
Thomas Chou1254c3d2010-12-24 13:12:21 +00002556 err = mmc_send_cmd(mmc, &cmd, NULL);
Andy Flemingad347bb2008-10-30 16:41:01 -05002557
Thomas Chou1254c3d2010-12-24 13:12:21 +00002558 if (err)
2559 return err;
2560 }
Andy Flemingad347bb2008-10-30 16:41:01 -05002561
Lei Wenea526762011-06-22 17:03:31 +00002562 /*
2563 * For SD, its erase group is always one sector
2564 */
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002565#if CONFIG_IS_ENABLED(MMC_WRITE)
Lei Wenea526762011-06-22 17:03:31 +00002566 mmc->erase_grp_size = 1;
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002567#endif
Lei Wen31b99802011-05-02 16:26:26 +00002568 mmc->part_config = MMCPART_NOAVAILABLE;
Diego Santa Cruza7a75992014-12-23 10:50:27 +01002569
Jean-Jacques Hibloted9506b2017-09-21 16:29:51 +02002570 err = mmc_startup_v4(mmc);
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002571 if (err)
2572 return err;
Sukumar Ghorai232293c2010-09-20 18:29:29 +05302573
Simon Glasse5db1152016-05-01 13:52:35 -06002574 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
Stephen Warrene315ae82013-06-11 15:14:01 -06002575 if (err)
2576 return err;
2577
Marek Vasuta318a7a2018-04-15 00:37:11 +02002578#if CONFIG_IS_ENABLED(MMC_TINY)
2579 mmc_set_clock(mmc, mmc->legacy_speed, false);
Faiz Abbas01db77e2020-02-26 13:44:32 +05302580 mmc_select_mode(mmc, MMC_LEGACY);
Marek Vasuta318a7a2018-04-15 00:37:11 +02002581 mmc_set_bus_width(mmc, 1);
2582#else
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02002583 if (IS_SD(mmc)) {
2584 err = sd_get_capabilities(mmc);
2585 if (err)
2586 return err;
2587 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2588 } else {
2589 err = mmc_get_capabilities(mmc);
2590 if (err)
2591 return err;
Masahiro Yamadabf1f25c2020-01-23 14:31:12 +09002592 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02002593 }
Marek Vasuta318a7a2018-04-15 00:37:11 +02002594#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05002595 if (err)
2596 return err;
2597
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02002598 mmc->best_mode = mmc->selected_mode;
Jaehoon Chunge1d4c7b2012-03-26 21:16:03 +00002599
Andrew Gabbasov532663b2014-12-01 06:59:11 -06002600 /* Fix the block length for DDR mode */
2601 if (mmc->ddr_mode) {
2602 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002603#if CONFIG_IS_ENABLED(MMC_WRITE)
Andrew Gabbasov532663b2014-12-01 06:59:11 -06002604 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002605#endif
Andrew Gabbasov532663b2014-12-01 06:59:11 -06002606 }
2607
Andy Flemingad347bb2008-10-30 16:41:01 -05002608 /* fill in device description */
Simon Glasse5db1152016-05-01 13:52:35 -06002609 bdesc = mmc_get_blk_desc(mmc);
2610 bdesc->lun = 0;
2611 bdesc->hwpart = 0;
2612 bdesc->type = 0;
2613 bdesc->blksz = mmc->read_bl_len;
2614 bdesc->log2blksz = LOG2(bdesc->blksz);
2615 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
Sjoerd Simonsd67754f2015-12-04 23:27:40 +01002616#if !defined(CONFIG_SPL_BUILD) || \
2617 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
Simon Glass7611ac62019-09-25 08:56:27 -06002618 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
Simon Glasse5db1152016-05-01 13:52:35 -06002619 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
Taylor Hutt7367ec22012-10-20 17:15:59 +00002620 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2621 (mmc->cid[3] >> 16) & 0xffff);
Simon Glasse5db1152016-05-01 13:52:35 -06002622 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
Taylor Hutt7367ec22012-10-20 17:15:59 +00002623 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2624 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2625 (mmc->cid[2] >> 24) & 0xff);
Simon Glasse5db1152016-05-01 13:52:35 -06002626 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
Taylor Hutt7367ec22012-10-20 17:15:59 +00002627 (mmc->cid[2] >> 16) & 0xf);
Paul Burton6a7c5ba2013-09-04 16:12:25 +01002628#else
Simon Glasse5db1152016-05-01 13:52:35 -06002629 bdesc->vendor[0] = 0;
2630 bdesc->product[0] = 0;
2631 bdesc->revision[0] = 0;
Paul Burton6a7c5ba2013-09-04 16:12:25 +01002632#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05002633
Andre Przywara17798042018-12-17 10:05:45 +00002634#if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2635 part_init(bdesc);
2636#endif
2637
Andy Flemingad347bb2008-10-30 16:41:01 -05002638 return 0;
2639}
2640
Kim Phillips87ea3892012-10-29 13:34:43 +00002641static int mmc_send_if_cond(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -05002642{
2643 struct mmc_cmd cmd;
2644 int err;
2645
2646 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2647 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
Pantelis Antoniou2c850462014-03-11 19:34:20 +02002648 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
Andy Flemingad347bb2008-10-30 16:41:01 -05002649 cmd.resp_type = MMC_RSP_R7;
Andy Flemingad347bb2008-10-30 16:41:01 -05002650
2651 err = mmc_send_cmd(mmc, &cmd, NULL);
2652
2653 if (err)
2654 return err;
2655
Rabin Vincentb6eed942009-04-05 13:30:56 +05302656 if ((cmd.response[0] & 0xff) != 0xaa)
Jaehoon Chung7825d202016-07-19 16:33:36 +09002657 return -EOPNOTSUPP;
Andy Flemingad347bb2008-10-30 16:41:01 -05002658 else
2659 mmc->version = SD_VERSION_2;
2660
2661 return 0;
2662}
2663
Simon Glass5f4bd8c2017-07-04 13:31:19 -06002664#if !CONFIG_IS_ENABLED(DM_MMC)
Paul Kocialkowski2439fe92014-11-08 20:55:45 +01002665/* board-specific MMC power initializations. */
2666__weak void board_mmc_power_init(void)
2667{
2668}
Simon Glass833b80d2017-04-22 19:10:56 -06002669#endif
Paul Kocialkowski2439fe92014-11-08 20:55:45 +01002670
Peng Fan15305962016-10-11 15:08:43 +08002671static int mmc_power_init(struct mmc *mmc)
2672{
Simon Glass5f4bd8c2017-07-04 13:31:19 -06002673#if CONFIG_IS_ENABLED(DM_MMC)
Jean-Jacques Hiblota49ffa12017-09-21 16:29:48 +02002674#if CONFIG_IS_ENABLED(DM_REGULATOR)
Peng Fan15305962016-10-11 15:08:43 +08002675 int ret;
2676
2677 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
Jean-Jacques Hiblota49ffa12017-09-21 16:29:48 +02002678 &mmc->vmmc_supply);
2679 if (ret)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002680 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
Jean-Jacques Hiblota49ffa12017-09-21 16:29:48 +02002681
2682 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2683 &mmc->vqmmc_supply);
2684 if (ret)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002685 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002686#endif
2687#else /* !CONFIG_DM_MMC */
2688 /*
2689 * Driver model should use a regulator, as above, rather than calling
2690 * out to board code.
2691 */
2692 board_mmc_power_init();
2693#endif
2694 return 0;
2695}
2696
2697/*
2698 * put the host in the initial state:
2699 * - turn on Vdd (card power supply)
2700 * - configure the bus width and clock to minimal values
2701 */
2702static void mmc_set_initial_state(struct mmc *mmc)
2703{
2704 int err;
2705
2706 /* First try to set 3.3V. If it fails set to 1.8V */
2707 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2708 if (err != 0)
2709 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2710 if (err != 0)
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01002711 pr_warn("mmc: failed to set signal voltage\n");
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002712
2713 mmc_select_mode(mmc, MMC_LEGACY);
2714 mmc_set_bus_width(mmc, 1);
Jaehoon Chung239cb2f2018-01-26 19:25:29 +09002715 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002716}
Peng Fan15305962016-10-11 15:08:43 +08002717
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002718static int mmc_power_on(struct mmc *mmc)
2719{
2720#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
Jean-Jacques Hiblota49ffa12017-09-21 16:29:48 +02002721 if (mmc->vmmc_supply) {
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002722 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2723
Jaehoon Chungc71c95c2020-11-06 20:30:41 +09002724 if (ret && ret != -EACCES) {
Jaehoon Chungad9f7ce2020-11-17 07:04:59 +09002725 printf("Error enabling VMMC supply : %d\n", ret);
Jean-Jacques Hiblota49ffa12017-09-21 16:29:48 +02002726 return ret;
2727 }
Peng Fan15305962016-10-11 15:08:43 +08002728 }
2729#endif
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002730 return 0;
2731}
2732
2733static int mmc_power_off(struct mmc *mmc)
2734{
Jaehoon Chung239cb2f2018-01-26 19:25:29 +09002735 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002736#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2737 if (mmc->vmmc_supply) {
2738 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2739
Jaehoon Chungc71c95c2020-11-06 20:30:41 +09002740 if (ret && ret != -EACCES) {
Jaehoon Chungad9f7ce2020-11-17 07:04:59 +09002741 pr_debug("Error disabling VMMC supply : %d\n", ret);
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002742 return ret;
2743 }
2744 }
Simon Glass833b80d2017-04-22 19:10:56 -06002745#endif
Peng Fan15305962016-10-11 15:08:43 +08002746 return 0;
2747}
2748
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002749static int mmc_power_cycle(struct mmc *mmc)
2750{
2751 int ret;
2752
2753 ret = mmc_power_off(mmc);
2754 if (ret)
2755 return ret;
Yann Gautier6f558332019-09-19 17:56:12 +02002756
2757 ret = mmc_host_power_cycle(mmc);
2758 if (ret)
2759 return ret;
2760
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002761 /*
2762 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2763 * to be on the safer side.
2764 */
2765 udelay(2000);
2766 return mmc_power_on(mmc);
2767}
2768
Jon Nettleton2663fe42018-06-11 15:26:19 +03002769int mmc_get_op_cond(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -05002770{
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02002771 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
Macpaul Lin028bde12011-11-14 23:35:39 +00002772 int err;
Andy Flemingad347bb2008-10-30 16:41:01 -05002773
Lei Wen31b99802011-05-02 16:26:26 +00002774 if (mmc->has_init)
2775 return 0;
2776
Peng Fan15305962016-10-11 15:08:43 +08002777 err = mmc_power_init(mmc);
2778 if (err)
2779 return err;
Paul Kocialkowski2439fe92014-11-08 20:55:45 +01002780
Kishon Vijay Abraham I07baaa62017-09-21 16:30:10 +02002781#ifdef CONFIG_MMC_QUIRKS
2782 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
Joel Johnson5ea041b2020-01-11 09:08:14 -07002783 MMC_QUIRK_RETRY_SEND_CID |
2784 MMC_QUIRK_RETRY_APP_CMD;
Kishon Vijay Abraham I07baaa62017-09-21 16:30:10 +02002785#endif
2786
Jean-Jacques Hiblotdc030fb2017-09-21 16:30:08 +02002787 err = mmc_power_cycle(mmc);
2788 if (err) {
2789 /*
2790 * if power cycling is not supported, we should not try
2791 * to use the UHS modes, because we wouldn't be able to
2792 * recover from an error during the UHS initialization.
2793 */
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002794 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
Jean-Jacques Hiblotdc030fb2017-09-21 16:30:08 +02002795 uhs_en = false;
2796 mmc->host_caps &= ~UHS_CAPS;
2797 err = mmc_power_on(mmc);
2798 }
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002799 if (err)
2800 return err;
2801
Simon Glasseba48f92017-07-29 11:35:31 -06002802#if CONFIG_IS_ENABLED(DM_MMC)
Yangbo Luc46f5d72020-09-01 16:57:59 +08002803 /*
2804 * Re-initialization is needed to clear old configuration for
2805 * mmc rescan.
2806 */
2807 err = mmc_reinit(mmc);
Simon Glass394dfc02016-06-12 23:30:22 -06002808#else
Pantelis Antoniouc9e75912014-02-26 19:28:45 +02002809 /* made sure it's not NULL earlier */
Pantelis Antoniou2c850462014-03-11 19:34:20 +02002810 err = mmc->cfg->ops->init(mmc);
Yangbo Luc46f5d72020-09-01 16:57:59 +08002811#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05002812 if (err)
2813 return err;
Andrew Gabbasov9fc2a412014-12-01 06:59:09 -06002814 mmc->ddr_mode = 0;
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +02002815
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02002816retry:
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002817 mmc_set_initial_state(mmc);
Jean-Jacques Hiblot5f23d872017-09-21 16:30:01 +02002818
Andy Flemingad347bb2008-10-30 16:41:01 -05002819 /* Reset the Card */
2820 err = mmc_go_idle(mmc);
2821
2822 if (err)
2823 return err;
2824
Marcel Ziswilerb2b7fc82019-05-20 02:44:53 +02002825 /* The internal partition reset to user partition(0) at every CMD0 */
Simon Glasse5db1152016-05-01 13:52:35 -06002826 mmc_get_blk_desc(mmc)->hwpart = 0;
Lei Wen31b99802011-05-02 16:26:26 +00002827
Andy Flemingad347bb2008-10-30 16:41:01 -05002828 /* Test for SD version 2 */
Macpaul Lin028bde12011-11-14 23:35:39 +00002829 err = mmc_send_if_cond(mmc);
Andy Flemingad347bb2008-10-30 16:41:01 -05002830
Andy Flemingad347bb2008-10-30 16:41:01 -05002831 /* Now try to get the SD card's operating condition */
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02002832 err = sd_send_op_cond(mmc, uhs_en);
2833 if (err && uhs_en) {
2834 uhs_en = false;
2835 mmc_power_cycle(mmc);
2836 goto retry;
2837 }
Andy Flemingad347bb2008-10-30 16:41:01 -05002838
2839 /* If the command timed out, we check for an MMC card */
Jaehoon Chung7825d202016-07-19 16:33:36 +09002840 if (err == -ETIMEDOUT) {
Andy Flemingad347bb2008-10-30 16:41:01 -05002841 err = mmc_send_op_cond(mmc);
2842
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -05002843 if (err) {
Paul Burton6a7c5ba2013-09-04 16:12:25 +01002844#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
Jaehoon Chungad9f7ce2020-11-17 07:04:59 +09002845 pr_err("Card did not respond to voltage select! : %d\n", err);
Paul Burton6a7c5ba2013-09-04 16:12:25 +01002846#endif
Jaehoon Chung7825d202016-07-19 16:33:36 +09002847 return -EOPNOTSUPP;
Andy Flemingad347bb2008-10-30 16:41:01 -05002848 }
2849 }
2850
Jon Nettleton2663fe42018-06-11 15:26:19 +03002851 return err;
2852}
2853
2854int mmc_start_init(struct mmc *mmc)
2855{
2856 bool no_card;
2857 int err = 0;
2858
2859 /*
2860 * all hosts are capable of 1 bit bus-width and able to use the legacy
2861 * timings.
2862 */
Faiz Abbas01db77e2020-02-26 13:44:32 +05302863 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
Jon Nettleton2663fe42018-06-11 15:26:19 +03002864 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
Faiz Abbasf6fd4ec2020-02-26 13:44:30 +05302865#if CONFIG_IS_ENABLED(DM_MMC)
2866 mmc_deferred_probe(mmc);
2867#endif
Jon Nettleton2663fe42018-06-11 15:26:19 +03002868#if !defined(CONFIG_MMC_BROKEN_CD)
Jon Nettleton2663fe42018-06-11 15:26:19 +03002869 no_card = mmc_getcd(mmc) == 0;
2870#else
2871 no_card = 0;
2872#endif
2873#if !CONFIG_IS_ENABLED(DM_MMC)
Baruch Siach0448ce62019-07-22 15:52:12 +03002874 /* we pretend there's no card when init is NULL */
Jon Nettleton2663fe42018-06-11 15:26:19 +03002875 no_card = no_card || (mmc->cfg->ops->init == NULL);
2876#endif
2877 if (no_card) {
2878 mmc->has_init = 0;
2879#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2880 pr_err("MMC: no card present\n");
2881#endif
2882 return -ENOMEDIUM;
2883 }
2884
2885 err = mmc_get_op_cond(mmc);
2886
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -05002887 if (!err)
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002888 mmc->init_in_progress = 1;
2889
2890 return err;
2891}
2892
2893static int mmc_complete_init(struct mmc *mmc)
2894{
2895 int err = 0;
2896
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -05002897 mmc->init_in_progress = 0;
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002898 if (mmc->op_cond_pending)
2899 err = mmc_complete_op_cond(mmc);
2900
2901 if (!err)
2902 err = mmc_startup(mmc);
Lei Wen31b99802011-05-02 16:26:26 +00002903 if (err)
2904 mmc->has_init = 0;
2905 else
2906 mmc->has_init = 1;
2907 return err;
Andy Flemingad347bb2008-10-30 16:41:01 -05002908}
2909
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002910int mmc_init(struct mmc *mmc)
2911{
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -05002912 int err = 0;
Vipul Kumardbad7b42018-05-03 12:20:54 +05302913 __maybe_unused ulong start;
Simon Glass5f4bd8c2017-07-04 13:31:19 -06002914#if CONFIG_IS_ENABLED(DM_MMC)
Simon Glass59bc6f22016-05-01 13:52:41 -06002915 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002916
Simon Glass59bc6f22016-05-01 13:52:41 -06002917 upriv->mmc = mmc;
2918#endif
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002919 if (mmc->has_init)
2920 return 0;
Mateusz Zalegada351782014-04-29 20:15:30 +02002921
2922 start = get_timer(0);
2923
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002924 if (!mmc->init_in_progress)
2925 err = mmc_start_init(mmc);
2926
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -05002927 if (!err)
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002928 err = mmc_complete_init(mmc);
Jagan Teki9bee2b52017-01-10 11:18:43 +01002929 if (err)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002930 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
Jagan Teki9bee2b52017-01-10 11:18:43 +01002931
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002932 return err;
2933}
2934
Marek Vasuta4773fc2019-01-29 04:45:51 +01002935#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2936 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2937 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2938int mmc_deinit(struct mmc *mmc)
2939{
2940 u32 caps_filtered;
2941
2942 if (!mmc->has_init)
2943 return 0;
2944
2945 if (IS_SD(mmc)) {
2946 caps_filtered = mmc->card_caps &
2947 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2948 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2949 MMC_CAP(UHS_SDR104));
2950
2951 return sd_select_mode_and_width(mmc, caps_filtered);
2952 } else {
2953 caps_filtered = mmc->card_caps &
2954 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2955
2956 return mmc_select_mode_and_width(mmc, caps_filtered);
2957 }
2958}
2959#endif
2960
Markus Niebel03951412013-12-16 13:40:46 +01002961int mmc_set_dsr(struct mmc *mmc, u16 val)
2962{
2963 mmc->dsr = val;
2964 return 0;
2965}
2966
Jeroen Hofstee47726302014-07-10 22:46:28 +02002967/* CPU-specific MMC initializations */
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09002968__weak int cpu_mmc_init(struct bd_info *bis)
Andy Flemingad347bb2008-10-30 16:41:01 -05002969{
2970 return -1;
2971}
2972
Jeroen Hofstee47726302014-07-10 22:46:28 +02002973/* board-specific MMC initializations. */
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09002974__weak int board_mmc_init(struct bd_info *bis)
Jeroen Hofstee47726302014-07-10 22:46:28 +02002975{
2976 return -1;
2977}
Andy Flemingad347bb2008-10-30 16:41:01 -05002978
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002979void mmc_set_preinit(struct mmc *mmc, int preinit)
2980{
2981 mmc->preinit = preinit;
2982}
2983
Faiz Abbasb3857fd2018-02-12 19:35:24 +05302984#if CONFIG_IS_ENABLED(DM_MMC)
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09002985static int mmc_probe(struct bd_info *bis)
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06002986{
Simon Glass547cb342015-12-29 05:22:49 -07002987 int ret, i;
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06002988 struct uclass *uc;
Simon Glass547cb342015-12-29 05:22:49 -07002989 struct udevice *dev;
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06002990
2991 ret = uclass_get(UCLASS_MMC, &uc);
2992 if (ret)
2993 return ret;
2994
Simon Glass547cb342015-12-29 05:22:49 -07002995 /*
2996 * Try to add them in sequence order. Really with driver model we
2997 * should allow holes, but the current MMC list does not allow that.
2998 * So if we request 0, 1, 3 we will get 0, 1, 2.
2999 */
3000 for (i = 0; ; i++) {
3001 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3002 if (ret == -ENODEV)
3003 break;
3004 }
3005 uclass_foreach_dev(dev, uc) {
3006 ret = device_probe(dev);
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06003007 if (ret)
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01003008 pr_err("%s - probe failed: %d\n", dev->name, ret);
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06003009 }
3010
3011 return 0;
3012}
3013#else
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09003014static int mmc_probe(struct bd_info *bis)
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06003015{
3016 if (board_mmc_init(bis) < 0)
3017 cpu_mmc_init(bis);
3018
3019 return 0;
3020}
3021#endif
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00003022
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09003023int mmc_initialize(struct bd_info *bis)
Andy Flemingad347bb2008-10-30 16:41:01 -05003024{
Daniel Kochmański13df57b2015-05-29 16:55:43 +02003025 static int initialized = 0;
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06003026 int ret;
Daniel Kochmański13df57b2015-05-29 16:55:43 +02003027 if (initialized) /* Avoid initializing mmc multiple times */
3028 return 0;
3029 initialized = 1;
3030
Simon Glass5f4bd8c2017-07-04 13:31:19 -06003031#if !CONFIG_IS_ENABLED(BLK)
Marek Vasutf537e392016-12-01 02:06:33 +01003032#if !CONFIG_IS_ENABLED(MMC_TINY)
Simon Glasse5db1152016-05-01 13:52:35 -06003033 mmc_list_init();
3034#endif
Marek Vasutf537e392016-12-01 02:06:33 +01003035#endif
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06003036 ret = mmc_probe(bis);
3037 if (ret)
3038 return ret;
Andy Flemingad347bb2008-10-30 16:41:01 -05003039
Ying Zhang9ff70262013-08-16 15:16:11 +08003040#ifndef CONFIG_SPL_BUILD
Andy Flemingad347bb2008-10-30 16:41:01 -05003041 print_mmc_devices(',');
Ying Zhang9ff70262013-08-16 15:16:11 +08003042#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05003043
Simon Glasse5db1152016-05-01 13:52:35 -06003044 mmc_do_preinit();
Andy Flemingad347bb2008-10-30 16:41:01 -05003045 return 0;
3046}
Tomas Melinc17dae52016-11-25 11:01:03 +02003047
Lokesh Vutlac59b41c2019-09-09 14:40:36 +05303048#if CONFIG_IS_ENABLED(DM_MMC)
3049int mmc_init_device(int num)
3050{
3051 struct udevice *dev;
3052 struct mmc *m;
3053 int ret;
3054
Aswath Govindraju57e2ccb2021-03-25 12:48:47 +05303055 if (uclass_get_device_by_seq(UCLASS_MMC, num, &dev)) {
3056 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3057 if (ret)
3058 return ret;
3059 }
Lokesh Vutlac59b41c2019-09-09 14:40:36 +05303060
3061 m = mmc_get_mmc_dev(dev);
3062 if (!m)
3063 return 0;
Lokesh Vutlac59b41c2019-09-09 14:40:36 +05303064 if (m->preinit)
3065 mmc_start_init(m);
3066
3067 return 0;
3068}
3069#endif
3070
Tomas Melinc17dae52016-11-25 11:01:03 +02003071#ifdef CONFIG_CMD_BKOPS_ENABLE
3072int mmc_set_bkops_enable(struct mmc *mmc)
3073{
3074 int err;
3075 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3076
3077 err = mmc_send_ext_csd(mmc, ext_csd);
3078 if (err) {
3079 puts("Could not get ext_csd register values\n");
3080 return err;
3081 }
3082
3083 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3084 puts("Background operations not supported on device\n");
3085 return -EMEDIUMTYPE;
3086 }
3087
3088 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3089 puts("Background operations already enabled\n");
3090 return 0;
3091 }
3092
3093 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3094 if (err) {
3095 puts("Failed to enable manual background operations\n");
3096 return err;
3097 }
3098
3099 puts("Enabled manual background operations\n");
3100
3101 return 0;
3102}
3103#endif
David Woodhouse49fee032020-08-04 10:05:46 +01003104
3105__weak int mmc_get_env_dev(void)
3106{
3107#ifdef CONFIG_SYS_MMC_ENV_DEV
3108 return CONFIG_SYS_MMC_ENV_DEV;
3109#else
3110 return 0;
3111#endif
3112}