blob: 36aab50f64e76350f74110c074dfd95bbf69efe7 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Andy Flemingad347bb2008-10-30 16:41:01 -05002/*
3 * Copyright 2008, Freescale Semiconductor, Inc
Yangbo Luf9049b22020-06-17 18:08:58 +08004 * Copyright 2020 NXP
Andy Flemingad347bb2008-10-30 16:41:01 -05005 * Andy Fleming
6 *
7 * Based vaguely on the Linux code
Andy Flemingad347bb2008-10-30 16:41:01 -05008 */
9
10#include <config.h>
11#include <common.h>
Simon Glass655306c2020-05-10 11:39:58 -060012#include <blk.h>
Andy Flemingad347bb2008-10-30 16:41:01 -050013#include <command.h>
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -060014#include <dm.h>
Simon Glass0f2af882020-05-10 11:40:05 -060015#include <log.h>
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -060016#include <dm/device-internal.h>
Stephen Warrenbf0c7852014-05-23 12:47:06 -060017#include <errno.h>
Andy Flemingad347bb2008-10-30 16:41:01 -050018#include <mmc.h>
19#include <part.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060020#include <linux/bitops.h>
Simon Glassdbd79542020-05-10 11:40:11 -060021#include <linux/delay.h>
Peng Fan15305962016-10-11 15:08:43 +080022#include <power/regulator.h>
Andy Flemingad347bb2008-10-30 16:41:01 -050023#include <malloc.h>
Simon Glass2dd337a2015-09-02 17:24:58 -060024#include <memalign.h>
Andy Flemingad347bb2008-10-30 16:41:01 -050025#include <linux/list.h>
Rabin Vincent69d4e2c2009-04-05 13:30:54 +053026#include <div64.h>
Paul Burton8d30cc92013-09-09 15:30:26 +010027#include "mmc_private.h"
Andy Flemingad347bb2008-10-30 16:41:01 -050028
Jean-Jacques Hiblot201559c2019-07-02 10:53:54 +020029#define DEFAULT_CMD6_TIMEOUT_MS 500
30
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +020031static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage);
Marek Vasutf537e392016-12-01 02:06:33 +010032
Simon Glasseba48f92017-07-29 11:35:31 -060033#if !CONFIG_IS_ENABLED(DM_MMC)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +020034
Sam Protsenkodb174c62019-08-14 22:52:51 +030035static int mmc_wait_dat0(struct mmc *mmc, int state, int timeout_us)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +020036{
37 return -ENOSYS;
38}
39
Jeroen Hofsteeaedeeaa2014-07-12 21:24:08 +020040__weak int board_mmc_getwp(struct mmc *mmc)
Nikita Kiryanov020f2612012-12-03 02:19:46 +000041{
42 return -1;
43}
44
45int mmc_getwp(struct mmc *mmc)
46{
47 int wp;
48
49 wp = board_mmc_getwp(mmc);
50
Peter Korsgaardf7b15102013-03-21 04:00:03 +000051 if (wp < 0) {
Pantelis Antoniou2c850462014-03-11 19:34:20 +020052 if (mmc->cfg->ops->getwp)
53 wp = mmc->cfg->ops->getwp(mmc);
Peter Korsgaardf7b15102013-03-21 04:00:03 +000054 else
55 wp = 0;
56 }
Nikita Kiryanov020f2612012-12-03 02:19:46 +000057
58 return wp;
59}
60
Jeroen Hofstee47726302014-07-10 22:46:28 +020061__weak int board_mmc_getcd(struct mmc *mmc)
62{
Stefano Babic6e00edf2010-02-05 15:04:43 +010063 return -1;
64}
Simon Glass394dfc02016-06-12 23:30:22 -060065#endif
Stefano Babic6e00edf2010-02-05 15:04:43 +010066
Simon Glassb23d96e2016-06-12 23:30:20 -060067#ifdef CONFIG_MMC_TRACE
68void mmmc_trace_before_send(struct mmc *mmc, struct mmc_cmd *cmd)
Andy Flemingad347bb2008-10-30 16:41:01 -050069{
Simon Glassb23d96e2016-06-12 23:30:20 -060070 printf("CMD_SEND:%d\n", cmd->cmdidx);
Marek Vasut6eeee302019-03-23 18:54:45 +010071 printf("\t\tARG\t\t\t 0x%08x\n", cmd->cmdarg);
Simon Glassb23d96e2016-06-12 23:30:20 -060072}
Marek Vasutdccb6082012-03-15 18:41:35 +000073
Simon Glassb23d96e2016-06-12 23:30:20 -060074void mmmc_trace_after_send(struct mmc *mmc, struct mmc_cmd *cmd, int ret)
75{
Raffaele Recalcati894b1e22011-03-11 02:01:14 +000076 int i;
77 u8 *ptr;
78
Bin Meng8d1ad1e2016-03-17 21:53:14 -070079 if (ret) {
80 printf("\t\tRET\t\t\t %d\n", ret);
81 } else {
82 switch (cmd->resp_type) {
83 case MMC_RSP_NONE:
84 printf("\t\tMMC_RSP_NONE\n");
85 break;
86 case MMC_RSP_R1:
Marek Vasut6eeee302019-03-23 18:54:45 +010087 printf("\t\tMMC_RSP_R1,5,6,7 \t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -070088 cmd->response[0]);
89 break;
90 case MMC_RSP_R1b:
Marek Vasut6eeee302019-03-23 18:54:45 +010091 printf("\t\tMMC_RSP_R1b\t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -070092 cmd->response[0]);
93 break;
94 case MMC_RSP_R2:
Marek Vasut6eeee302019-03-23 18:54:45 +010095 printf("\t\tMMC_RSP_R2\t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -070096 cmd->response[0]);
Marek Vasut6eeee302019-03-23 18:54:45 +010097 printf("\t\t \t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -070098 cmd->response[1]);
Marek Vasut6eeee302019-03-23 18:54:45 +010099 printf("\t\t \t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -0700100 cmd->response[2]);
Marek Vasut6eeee302019-03-23 18:54:45 +0100101 printf("\t\t \t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -0700102 cmd->response[3]);
Raffaele Recalcati894b1e22011-03-11 02:01:14 +0000103 printf("\n");
Bin Meng8d1ad1e2016-03-17 21:53:14 -0700104 printf("\t\t\t\t\tDUMPING DATA\n");
105 for (i = 0; i < 4; i++) {
106 int j;
107 printf("\t\t\t\t\t%03d - ", i*4);
108 ptr = (u8 *)&cmd->response[i];
109 ptr += 3;
110 for (j = 0; j < 4; j++)
Marek Vasut6eeee302019-03-23 18:54:45 +0100111 printf("%02x ", *ptr--);
Bin Meng8d1ad1e2016-03-17 21:53:14 -0700112 printf("\n");
113 }
114 break;
115 case MMC_RSP_R3:
Marek Vasut6eeee302019-03-23 18:54:45 +0100116 printf("\t\tMMC_RSP_R3,4\t\t 0x%08x \n",
Bin Meng8d1ad1e2016-03-17 21:53:14 -0700117 cmd->response[0]);
118 break;
119 default:
120 printf("\t\tERROR MMC rsp not supported\n");
121 break;
Bin Meng4a4ef872016-03-17 21:53:13 -0700122 }
Raffaele Recalcati894b1e22011-03-11 02:01:14 +0000123 }
Simon Glassb23d96e2016-06-12 23:30:20 -0600124}
125
126void mmc_trace_state(struct mmc *mmc, struct mmc_cmd *cmd)
127{
128 int status;
129
130 status = (cmd->response[0] & MMC_STATUS_CURR_STATE) >> 9;
131 printf("CURR STATE:%d\n", status);
132}
Raffaele Recalcati894b1e22011-03-11 02:01:14 +0000133#endif
Simon Glassb23d96e2016-06-12 23:30:20 -0600134
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +0200135#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
136const char *mmc_mode_name(enum bus_mode mode)
137{
138 static const char *const names[] = {
139 [MMC_LEGACY] = "MMC legacy",
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +0200140 [MMC_HS] = "MMC High Speed (26MHz)",
141 [SD_HS] = "SD High Speed (50MHz)",
142 [UHS_SDR12] = "UHS SDR12 (25MHz)",
143 [UHS_SDR25] = "UHS SDR25 (50MHz)",
144 [UHS_SDR50] = "UHS SDR50 (100MHz)",
145 [UHS_SDR104] = "UHS SDR104 (208MHz)",
146 [UHS_DDR50] = "UHS DDR50 (50MHz)",
147 [MMC_HS_52] = "MMC High Speed (52MHz)",
148 [MMC_DDR_52] = "MMC DDR52 (52MHz)",
149 [MMC_HS_200] = "HS200 (200MHz)",
Peng Fan46801252018-08-10 14:07:54 +0800150 [MMC_HS_400] = "HS400 (200MHz)",
Peng Faneede83b2019-07-10 14:43:07 +0800151 [MMC_HS_400_ES] = "HS400ES (200MHz)",
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +0200152 };
153
154 if (mode >= MMC_MODES_END)
155 return "Unknown mode";
156 else
157 return names[mode];
158}
159#endif
160
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200161static uint mmc_mode2freq(struct mmc *mmc, enum bus_mode mode)
162{
163 static const int freqs[] = {
Jaehoon Chung7c5c7302018-01-30 14:10:16 +0900164 [MMC_LEGACY] = 25000000,
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200165 [MMC_HS] = 26000000,
166 [SD_HS] = 50000000,
Jaehoon Chung7c5c7302018-01-30 14:10:16 +0900167 [MMC_HS_52] = 52000000,
168 [MMC_DDR_52] = 52000000,
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200169 [UHS_SDR12] = 25000000,
170 [UHS_SDR25] = 50000000,
171 [UHS_SDR50] = 100000000,
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200172 [UHS_DDR50] = 50000000,
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100173 [UHS_SDR104] = 208000000,
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200174 [MMC_HS_200] = 200000000,
Peng Fan46801252018-08-10 14:07:54 +0800175 [MMC_HS_400] = 200000000,
Peng Faneede83b2019-07-10 14:43:07 +0800176 [MMC_HS_400_ES] = 200000000,
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200177 };
178
179 if (mode == MMC_LEGACY)
180 return mmc->legacy_speed;
181 else if (mode >= MMC_MODES_END)
182 return 0;
183 else
184 return freqs[mode];
185}
186
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +0200187static int mmc_select_mode(struct mmc *mmc, enum bus_mode mode)
188{
189 mmc->selected_mode = mode;
Jean-Jacques Hiblot78422312017-09-21 16:29:55 +0200190 mmc->tran_speed = mmc_mode2freq(mmc, mode);
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200191 mmc->ddr_mode = mmc_is_mode_ddr(mode);
Masahiro Yamadaf97b1482018-01-28 19:11:42 +0900192 pr_debug("selecting mode %s (freq : %d MHz)\n", mmc_mode_name(mode),
193 mmc->tran_speed / 1000000);
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +0200194 return 0;
195}
196
Simon Glasseba48f92017-07-29 11:35:31 -0600197#if !CONFIG_IS_ENABLED(DM_MMC)
Simon Glassb23d96e2016-06-12 23:30:20 -0600198int mmc_send_cmd(struct mmc *mmc, struct mmc_cmd *cmd, struct mmc_data *data)
199{
200 int ret;
201
202 mmmc_trace_before_send(mmc, cmd);
203 ret = mmc->cfg->ops->send_cmd(mmc, cmd, data);
204 mmmc_trace_after_send(mmc, cmd, ret);
205
Marek Vasutdccb6082012-03-15 18:41:35 +0000206 return ret;
Andy Flemingad347bb2008-10-30 16:41:01 -0500207}
Simon Glass394dfc02016-06-12 23:30:22 -0600208#endif
Andy Flemingad347bb2008-10-30 16:41:01 -0500209
Sean Anderson86325092020-10-17 08:36:27 -0400210/**
211 * mmc_send_cmd_retry() - send a command to the mmc device, retrying on error
212 *
213 * @dev: device to receive the command
214 * @cmd: command to send
215 * @data: additional data to send/receive
216 * @retries: how many times to retry; mmc_send_cmd is always called at least
217 * once
218 * @return 0 if ok, -ve on error
219 */
220static int mmc_send_cmd_retry(struct mmc *mmc, struct mmc_cmd *cmd,
221 struct mmc_data *data, uint retries)
222{
223 int ret;
224
225 do {
226 ret = mmc_send_cmd(mmc, cmd, data);
227 } while (ret && retries--);
228
229 return ret;
230}
231
232/**
233 * mmc_send_cmd_quirks() - send a command to the mmc device, retrying if a
234 * specific quirk is enabled
235 *
236 * @dev: device to receive the command
237 * @cmd: command to send
238 * @data: additional data to send/receive
239 * @quirk: retry only if this quirk is enabled
240 * @retries: how many times to retry; mmc_send_cmd is always called at least
241 * once
242 * @return 0 if ok, -ve on error
243 */
244static int mmc_send_cmd_quirks(struct mmc *mmc, struct mmc_cmd *cmd,
245 struct mmc_data *data, u32 quirk, uint retries)
246{
247 if (CONFIG_IS_ENABLED(MMC_QUIRKS) && mmc->quirks & quirk)
248 return mmc_send_cmd_retry(mmc, cmd, data, retries);
249 else
250 return mmc_send_cmd(mmc, cmd, data);
251}
252
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200253int mmc_send_status(struct mmc *mmc, unsigned int *status)
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000254{
255 struct mmc_cmd cmd;
Sean Anderson86325092020-10-17 08:36:27 -0400256 int ret;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000257
258 cmd.cmdidx = MMC_CMD_SEND_STATUS;
259 cmd.resp_type = MMC_RSP_R1;
Marek Vasutc4427392011-08-10 09:24:48 +0200260 if (!mmc_host_is_spi(mmc))
261 cmd.cmdarg = mmc->rca << 16;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000262
Sean Anderson86325092020-10-17 08:36:27 -0400263 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 4);
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200264 mmc_trace_state(mmc, &cmd);
Sean Anderson86325092020-10-17 08:36:27 -0400265 if (!ret)
266 *status = cmd.response[0];
267
268 return ret;
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200269}
270
Sam Protsenkodb174c62019-08-14 22:52:51 +0300271int mmc_poll_for_busy(struct mmc *mmc, int timeout_ms)
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200272{
273 unsigned int status;
274 int err;
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +0200275
Sam Protsenkodb174c62019-08-14 22:52:51 +0300276 err = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
Jean-Jacques Hiblot4f04a322019-07-02 10:53:53 +0200277 if (err != -ENOSYS)
278 return err;
279
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200280 while (1) {
281 err = mmc_send_status(mmc, &status);
282 if (err)
283 return err;
284
285 if ((status & MMC_STATUS_RDY_FOR_DATA) &&
286 (status & MMC_STATUS_CURR_STATE) !=
287 MMC_STATE_PRG)
288 break;
289
290 if (status & MMC_STATUS_MASK) {
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100291#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200292 pr_err("Status Error: 0x%08x\n", status);
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100293#endif
Jean-Jacques Hiblot443edbe2019-07-02 10:53:52 +0200294 return -ECOMM;
295 }
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000296
Sam Protsenkodb174c62019-08-14 22:52:51 +0300297 if (timeout_ms-- <= 0)
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500298 break;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000299
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500300 udelay(1000);
301 }
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000302
Sam Protsenkodb174c62019-08-14 22:52:51 +0300303 if (timeout_ms <= 0) {
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100304#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +0100305 pr_err("Timeout waiting card ready\n");
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100306#endif
Jaehoon Chung7825d202016-07-19 16:33:36 +0900307 return -ETIMEDOUT;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000308 }
309
310 return 0;
311}
312
Paul Burton8d30cc92013-09-09 15:30:26 +0100313int mmc_set_blocklen(struct mmc *mmc, int len)
Andy Flemingad347bb2008-10-30 16:41:01 -0500314{
315 struct mmc_cmd cmd;
316
Andrew Gabbasov9fc2a412014-12-01 06:59:09 -0600317 if (mmc->ddr_mode)
Jaehoon Chung38ce30b2014-05-16 13:59:54 +0900318 return 0;
319
Andy Flemingad347bb2008-10-30 16:41:01 -0500320 cmd.cmdidx = MMC_CMD_SET_BLOCKLEN;
321 cmd.resp_type = MMC_RSP_R1;
322 cmd.cmdarg = len;
Andy Flemingad347bb2008-10-30 16:41:01 -0500323
Sean Anderson86325092020-10-17 08:36:27 -0400324 return mmc_send_cmd_quirks(mmc, &cmd, NULL,
325 MMC_QUIRK_RETRY_SET_BLOCKLEN, 4);
Andy Flemingad347bb2008-10-30 16:41:01 -0500326}
327
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100328#ifdef MMC_SUPPORTS_TUNING
Jean-Jacques Hiblot71264bb2017-09-21 16:30:12 +0200329static const u8 tuning_blk_pattern_4bit[] = {
330 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
331 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
332 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
333 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
334 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
335 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
336 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
337 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
338};
339
340static const u8 tuning_blk_pattern_8bit[] = {
341 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
342 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
343 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
344 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
345 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
346 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
347 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
348 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
349 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
350 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
351 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
352 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
353 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
354 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
355 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
356 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
357};
358
359int mmc_send_tuning(struct mmc *mmc, u32 opcode, int *cmd_error)
360{
361 struct mmc_cmd cmd;
362 struct mmc_data data;
363 const u8 *tuning_block_pattern;
364 int size, err;
365
366 if (mmc->bus_width == 8) {
367 tuning_block_pattern = tuning_blk_pattern_8bit;
368 size = sizeof(tuning_blk_pattern_8bit);
369 } else if (mmc->bus_width == 4) {
370 tuning_block_pattern = tuning_blk_pattern_4bit;
371 size = sizeof(tuning_blk_pattern_4bit);
372 } else {
373 return -EINVAL;
374 }
375
376 ALLOC_CACHE_ALIGN_BUFFER(u8, data_buf, size);
377
378 cmd.cmdidx = opcode;
379 cmd.cmdarg = 0;
380 cmd.resp_type = MMC_RSP_R1;
381
382 data.dest = (void *)data_buf;
383 data.blocks = 1;
384 data.blocksize = size;
385 data.flags = MMC_DATA_READ;
386
387 err = mmc_send_cmd(mmc, &cmd, &data);
388 if (err)
389 return err;
390
391 if (memcmp(data_buf, tuning_block_pattern, size))
392 return -EIO;
393
394 return 0;
395}
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100396#endif
Jean-Jacques Hiblot71264bb2017-09-21 16:30:12 +0200397
Sascha Silbe4bdf6fd2013-06-14 13:07:25 +0200398static int mmc_read_blocks(struct mmc *mmc, void *dst, lbaint_t start,
Kim Phillips87ea3892012-10-29 13:34:43 +0000399 lbaint_t blkcnt)
Andy Flemingad347bb2008-10-30 16:41:01 -0500400{
401 struct mmc_cmd cmd;
402 struct mmc_data data;
403
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700404 if (blkcnt > 1)
405 cmd.cmdidx = MMC_CMD_READ_MULTIPLE_BLOCK;
406 else
407 cmd.cmdidx = MMC_CMD_READ_SINGLE_BLOCK;
Andy Flemingad347bb2008-10-30 16:41:01 -0500408
409 if (mmc->high_capacity)
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700410 cmd.cmdarg = start;
Andy Flemingad347bb2008-10-30 16:41:01 -0500411 else
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700412 cmd.cmdarg = start * mmc->read_bl_len;
Andy Flemingad347bb2008-10-30 16:41:01 -0500413
414 cmd.resp_type = MMC_RSP_R1;
Andy Flemingad347bb2008-10-30 16:41:01 -0500415
416 data.dest = dst;
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700417 data.blocks = blkcnt;
Andy Flemingad347bb2008-10-30 16:41:01 -0500418 data.blocksize = mmc->read_bl_len;
419 data.flags = MMC_DATA_READ;
420
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700421 if (mmc_send_cmd(mmc, &cmd, &data))
422 return 0;
Andy Flemingad347bb2008-10-30 16:41:01 -0500423
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700424 if (blkcnt > 1) {
425 cmd.cmdidx = MMC_CMD_STOP_TRANSMISSION;
426 cmd.cmdarg = 0;
427 cmd.resp_type = MMC_RSP_R1b;
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700428 if (mmc_send_cmd(mmc, &cmd, NULL)) {
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100429#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +0100430 pr_err("mmc fail to send stop cmd\n");
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100431#endif
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700432 return 0;
433 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500434 }
435
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700436 return blkcnt;
Andy Flemingad347bb2008-10-30 16:41:01 -0500437}
438
Marek Vasut31976d92020-04-04 12:45:05 +0200439#if !CONFIG_IS_ENABLED(DM_MMC)
440static int mmc_get_b_max(struct mmc *mmc, void *dst, lbaint_t blkcnt)
441{
442 if (mmc->cfg->ops->get_b_max)
443 return mmc->cfg->ops->get_b_max(mmc, dst, blkcnt);
444 else
445 return mmc->cfg->b_max;
446}
447#endif
448
Simon Glass5f4bd8c2017-07-04 13:31:19 -0600449#if CONFIG_IS_ENABLED(BLK)
Simon Glass62e293a2016-06-12 23:30:15 -0600450ulong mmc_bread(struct udevice *dev, lbaint_t start, lbaint_t blkcnt, void *dst)
Simon Glass59bc6f22016-05-01 13:52:41 -0600451#else
Simon Glass62e293a2016-06-12 23:30:15 -0600452ulong mmc_bread(struct blk_desc *block_dev, lbaint_t start, lbaint_t blkcnt,
453 void *dst)
Simon Glass59bc6f22016-05-01 13:52:41 -0600454#endif
Andy Flemingad347bb2008-10-30 16:41:01 -0500455{
Simon Glass5f4bd8c2017-07-04 13:31:19 -0600456#if CONFIG_IS_ENABLED(BLK)
Simon Glass71fa5b42020-12-03 16:55:18 -0700457 struct blk_desc *block_dev = dev_get_uclass_plat(dev);
Simon Glass59bc6f22016-05-01 13:52:41 -0600458#endif
Simon Glass2f26fff2016-02-29 15:25:51 -0700459 int dev_num = block_dev->devnum;
Stephen Warren1e0f92a2015-12-07 11:38:49 -0700460 int err;
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700461 lbaint_t cur, blocks_todo = blkcnt;
Marek Vasut31976d92020-04-04 12:45:05 +0200462 uint b_max;
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700463
464 if (blkcnt == 0)
465 return 0;
Andy Flemingad347bb2008-10-30 16:41:01 -0500466
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700467 struct mmc *mmc = find_mmc_device(dev_num);
Andy Flemingad347bb2008-10-30 16:41:01 -0500468 if (!mmc)
469 return 0;
470
Marek Vasutf537e392016-12-01 02:06:33 +0100471 if (CONFIG_IS_ENABLED(MMC_TINY))
472 err = mmc_switch_part(mmc, block_dev->hwpart);
473 else
474 err = blk_dselect_hwpart(block_dev, block_dev->hwpart);
475
Stephen Warren1e0f92a2015-12-07 11:38:49 -0700476 if (err < 0)
477 return 0;
478
Simon Glasse5db1152016-05-01 13:52:35 -0600479 if ((start + blkcnt) > block_dev->lba) {
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100480#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +0100481 pr_err("MMC: block number 0x" LBAF " exceeds max(0x" LBAF ")\n",
482 start + blkcnt, block_dev->lba);
Paul Burton6a7c5ba2013-09-04 16:12:25 +0100483#endif
Lei Wene1cc9c82010-09-13 22:07:27 +0800484 return 0;
485 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500486
Simon Glassa4343c42015-06-23 15:38:50 -0600487 if (mmc_set_blocklen(mmc, mmc->read_bl_len)) {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +0900488 pr_debug("%s: Failed to set blocklen\n", __func__);
Andy Flemingad347bb2008-10-30 16:41:01 -0500489 return 0;
Simon Glassa4343c42015-06-23 15:38:50 -0600490 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500491
Marek Vasut31976d92020-04-04 12:45:05 +0200492 b_max = mmc_get_b_max(mmc, dst, blkcnt);
493
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700494 do {
Marek Vasut31976d92020-04-04 12:45:05 +0200495 cur = (blocks_todo > b_max) ? b_max : blocks_todo;
Simon Glassa4343c42015-06-23 15:38:50 -0600496 if (mmc_read_blocks(mmc, dst, start, cur) != cur) {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +0900497 pr_debug("%s: Failed to read blocks\n", __func__);
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700498 return 0;
Simon Glassa4343c42015-06-23 15:38:50 -0600499 }
Alagu Sankarc25d1b92010-10-25 07:23:56 -0700500 blocks_todo -= cur;
501 start += cur;
502 dst += cur * mmc->read_bl_len;
503 } while (blocks_todo > 0);
Andy Flemingad347bb2008-10-30 16:41:01 -0500504
505 return blkcnt;
506}
507
Kim Phillips87ea3892012-10-29 13:34:43 +0000508static int mmc_go_idle(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -0500509{
510 struct mmc_cmd cmd;
511 int err;
512
513 udelay(1000);
514
515 cmd.cmdidx = MMC_CMD_GO_IDLE_STATE;
516 cmd.cmdarg = 0;
517 cmd.resp_type = MMC_RSP_NONE;
Andy Flemingad347bb2008-10-30 16:41:01 -0500518
519 err = mmc_send_cmd(mmc, &cmd, NULL);
520
521 if (err)
522 return err;
523
524 udelay(2000);
525
526 return 0;
527}
528
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100529#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200530static int mmc_switch_voltage(struct mmc *mmc, int signal_voltage)
531{
532 struct mmc_cmd cmd;
533 int err = 0;
534
535 /*
536 * Send CMD11 only if the request is to switch the card to
537 * 1.8V signalling.
538 */
539 if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
540 return mmc_set_signal_voltage(mmc, signal_voltage);
541
542 cmd.cmdidx = SD_CMD_SWITCH_UHS18V;
543 cmd.cmdarg = 0;
544 cmd.resp_type = MMC_RSP_R1;
545
546 err = mmc_send_cmd(mmc, &cmd, NULL);
547 if (err)
548 return err;
549
550 if (!mmc_host_is_spi(mmc) && (cmd.response[0] & MMC_STATUS_ERROR))
551 return -EIO;
552
553 /*
554 * The card should drive cmd and dat[0:3] low immediately
555 * after the response of cmd11, but wait 100 us to be sure
556 */
557 err = mmc_wait_dat0(mmc, 0, 100);
558 if (err == -ENOSYS)
559 udelay(100);
560 else if (err)
561 return -ETIMEDOUT;
562
563 /*
564 * During a signal voltage level switch, the clock must be gated
565 * for 5 ms according to the SD spec
566 */
Jaehoon Chung239cb2f2018-01-26 19:25:29 +0900567 mmc_set_clock(mmc, mmc->clock, MMC_CLK_DISABLE);
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200568
569 err = mmc_set_signal_voltage(mmc, signal_voltage);
570 if (err)
571 return err;
572
573 /* Keep clock gated for at least 10 ms, though spec only says 5 ms */
574 mdelay(10);
Jaehoon Chung239cb2f2018-01-26 19:25:29 +0900575 mmc_set_clock(mmc, mmc->clock, MMC_CLK_ENABLE);
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200576
577 /*
578 * Failure to switch is indicated by the card holding
579 * dat[0:3] low. Wait for at least 1 ms according to spec
580 */
581 err = mmc_wait_dat0(mmc, 1, 1000);
582 if (err == -ENOSYS)
583 udelay(1000);
584 else if (err)
585 return -ETIMEDOUT;
586
587 return 0;
588}
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100589#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200590
591static int sd_send_op_cond(struct mmc *mmc, bool uhs_en)
Andy Flemingad347bb2008-10-30 16:41:01 -0500592{
593 int timeout = 1000;
594 int err;
595 struct mmc_cmd cmd;
596
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500597 while (1) {
Andy Flemingad347bb2008-10-30 16:41:01 -0500598 cmd.cmdidx = MMC_CMD_APP_CMD;
599 cmd.resp_type = MMC_RSP_R1;
600 cmd.cmdarg = 0;
Andy Flemingad347bb2008-10-30 16:41:01 -0500601
602 err = mmc_send_cmd(mmc, &cmd, NULL);
603
604 if (err)
605 return err;
606
607 cmd.cmdidx = SD_CMD_APP_SEND_OP_COND;
608 cmd.resp_type = MMC_RSP_R3;
Stefano Babicf8e9a212010-01-20 18:20:39 +0100609
610 /*
611 * Most cards do not answer if some reserved bits
612 * in the ocr are set. However, Some controller
613 * can set bit 7 (reserved for low voltages), but
614 * how to manage low voltages SD card is not yet
615 * specified.
616 */
Thomas Chou1254c3d2010-12-24 13:12:21 +0000617 cmd.cmdarg = mmc_host_is_spi(mmc) ? 0 :
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200618 (mmc->cfg->voltages & 0xff8000);
Andy Flemingad347bb2008-10-30 16:41:01 -0500619
620 if (mmc->version == SD_VERSION_2)
621 cmd.cmdarg |= OCR_HCS;
622
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200623 if (uhs_en)
624 cmd.cmdarg |= OCR_S18R;
625
Andy Flemingad347bb2008-10-30 16:41:01 -0500626 err = mmc_send_cmd(mmc, &cmd, NULL);
627
628 if (err)
629 return err;
630
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500631 if (cmd.response[0] & OCR_BUSY)
632 break;
Andy Flemingad347bb2008-10-30 16:41:01 -0500633
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500634 if (timeout-- <= 0)
Jaehoon Chung7825d202016-07-19 16:33:36 +0900635 return -EOPNOTSUPP;
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500636
637 udelay(1000);
638 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500639
640 if (mmc->version != SD_VERSION_2)
641 mmc->version = SD_VERSION_1_0;
642
Thomas Chou1254c3d2010-12-24 13:12:21 +0000643 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
644 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
645 cmd.resp_type = MMC_RSP_R3;
646 cmd.cmdarg = 0;
Thomas Chou1254c3d2010-12-24 13:12:21 +0000647
648 err = mmc_send_cmd(mmc, &cmd, NULL);
649
650 if (err)
651 return err;
652 }
653
Rabin Vincentb6eed942009-04-05 13:30:56 +0530654 mmc->ocr = cmd.response[0];
Andy Flemingad347bb2008-10-30 16:41:01 -0500655
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100656#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200657 if (uhs_en && !(mmc_host_is_spi(mmc)) && (cmd.response[0] & 0x41000000)
658 == 0x41000000) {
659 err = mmc_switch_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
660 if (err)
661 return err;
662 }
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +0100663#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +0200664
Andy Flemingad347bb2008-10-30 16:41:01 -0500665 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
666 mmc->rca = 0;
667
668 return 0;
669}
670
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500671static int mmc_send_op_cond_iter(struct mmc *mmc, int use_arg)
Andy Flemingad347bb2008-10-30 16:41:01 -0500672{
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500673 struct mmc_cmd cmd;
Andy Flemingad347bb2008-10-30 16:41:01 -0500674 int err;
675
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500676 cmd.cmdidx = MMC_CMD_SEND_OP_COND;
677 cmd.resp_type = MMC_RSP_R3;
678 cmd.cmdarg = 0;
Rob Herring5fd3edd2015-03-23 17:56:59 -0500679 if (use_arg && !mmc_host_is_spi(mmc))
680 cmd.cmdarg = OCR_HCS |
Pantelis Antoniou2c850462014-03-11 19:34:20 +0200681 (mmc->cfg->voltages &
Andrew Gabbasovec600d12015-03-19 07:44:03 -0500682 (mmc->ocr & OCR_VOLTAGE_MASK)) |
683 (mmc->ocr & OCR_ACCESS_MODE);
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000684
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500685 err = mmc_send_cmd(mmc, &cmd, NULL);
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000686 if (err)
687 return err;
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500688 mmc->ocr = cmd.response[0];
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000689 return 0;
690}
691
Jeroen Hofsteeaedeeaa2014-07-12 21:24:08 +0200692static int mmc_send_op_cond(struct mmc *mmc)
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000693{
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000694 int err, i;
Haibo Chen71949512020-06-15 17:18:12 +0800695 int timeout = 1000;
696 uint start;
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000697
Andy Flemingad347bb2008-10-30 16:41:01 -0500698 /* Some cards seem to need this */
699 mmc_go_idle(mmc);
700
Haibo Chen71949512020-06-15 17:18:12 +0800701 start = get_timer(0);
Raffaele Recalcati1df837e2011-03-11 02:01:13 +0000702 /* Asking to the card its capabilities */
Haibo Chen71949512020-06-15 17:18:12 +0800703 for (i = 0; ; i++) {
Andrew Gabbasovfafa6a02015-03-19 07:44:04 -0500704 err = mmc_send_op_cond_iter(mmc, i != 0);
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000705 if (err)
706 return err;
Wolfgang Denk80f70212011-05-19 22:21:41 +0200707
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000708 /* exit if not busy (flag seems to be inverted) */
Andrew Gabbasovec600d12015-03-19 07:44:03 -0500709 if (mmc->ocr & OCR_BUSY)
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -0500710 break;
Haibo Chen71949512020-06-15 17:18:12 +0800711
712 if (get_timer(start) > timeout)
713 return -ETIMEDOUT;
714 udelay(100);
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000715 }
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -0500716 mmc->op_cond_pending = 1;
717 return 0;
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000718}
Wolfgang Denk80f70212011-05-19 22:21:41 +0200719
Jeroen Hofsteeaedeeaa2014-07-12 21:24:08 +0200720static int mmc_complete_op_cond(struct mmc *mmc)
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000721{
722 struct mmc_cmd cmd;
723 int timeout = 1000;
Vipul Kumardbad7b42018-05-03 12:20:54 +0530724 ulong start;
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000725 int err;
Wolfgang Denk80f70212011-05-19 22:21:41 +0200726
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +0000727 mmc->op_cond_pending = 0;
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500728 if (!(mmc->ocr & OCR_BUSY)) {
Yangbo Lu9c720612016-08-02 15:33:18 +0800729 /* Some cards seem to need this */
730 mmc_go_idle(mmc);
731
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500732 start = get_timer(0);
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500733 while (1) {
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500734 err = mmc_send_op_cond_iter(mmc, 1);
735 if (err)
736 return err;
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500737 if (mmc->ocr & OCR_BUSY)
738 break;
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500739 if (get_timer(start) > timeout)
Jaehoon Chung7825d202016-07-19 16:33:36 +0900740 return -EOPNOTSUPP;
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500741 udelay(100);
Andrew Gabbasov034857c2015-03-19 07:44:06 -0500742 }
Andrew Gabbasov5a513ca2015-03-19 07:44:05 -0500743 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500744
Thomas Chou1254c3d2010-12-24 13:12:21 +0000745 if (mmc_host_is_spi(mmc)) { /* read OCR for spi */
746 cmd.cmdidx = MMC_CMD_SPI_READ_OCR;
747 cmd.resp_type = MMC_RSP_R3;
748 cmd.cmdarg = 0;
Thomas Chou1254c3d2010-12-24 13:12:21 +0000749
750 err = mmc_send_cmd(mmc, &cmd, NULL);
751
752 if (err)
753 return err;
Andrew Gabbasovec600d12015-03-19 07:44:03 -0500754
755 mmc->ocr = cmd.response[0];
Thomas Chou1254c3d2010-12-24 13:12:21 +0000756 }
757
Andy Flemingad347bb2008-10-30 16:41:01 -0500758 mmc->version = MMC_VERSION_UNKNOWN;
Andy Flemingad347bb2008-10-30 16:41:01 -0500759
760 mmc->high_capacity = ((mmc->ocr & OCR_HCS) == OCR_HCS);
Stephen Warrenf6545f12014-01-30 16:11:12 -0700761 mmc->rca = 1;
Andy Flemingad347bb2008-10-30 16:41:01 -0500762
763 return 0;
764}
765
766
Heinrich Schuchardtbf230e12020-03-30 07:24:17 +0200767int mmc_send_ext_csd(struct mmc *mmc, u8 *ext_csd)
Andy Flemingad347bb2008-10-30 16:41:01 -0500768{
769 struct mmc_cmd cmd;
770 struct mmc_data data;
771 int err;
772
773 /* Get the Card Status Register */
774 cmd.cmdidx = MMC_CMD_SEND_EXT_CSD;
775 cmd.resp_type = MMC_RSP_R1;
776 cmd.cmdarg = 0;
Andy Flemingad347bb2008-10-30 16:41:01 -0500777
Yoshihiro Shimodaf6bec732012-06-07 19:09:11 +0000778 data.dest = (char *)ext_csd;
Andy Flemingad347bb2008-10-30 16:41:01 -0500779 data.blocks = 1;
Simon Glassa09c2b72013-04-03 08:54:30 +0000780 data.blocksize = MMC_MAX_BLOCK_LEN;
Andy Flemingad347bb2008-10-30 16:41:01 -0500781 data.flags = MMC_DATA_READ;
782
783 err = mmc_send_cmd(mmc, &cmd, &data);
784
785 return err;
786}
787
Marek Vasut8a966472019-02-06 11:34:27 +0100788static int __mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value,
789 bool send_status)
Andy Flemingad347bb2008-10-30 16:41:01 -0500790{
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200791 unsigned int status, start;
Andy Flemingad347bb2008-10-30 16:41:01 -0500792 struct mmc_cmd cmd;
Sam Protsenkodb174c62019-08-14 22:52:51 +0300793 int timeout_ms = DEFAULT_CMD6_TIMEOUT_MS;
Jean-Jacques Hiblot7f5b1692019-07-02 10:53:55 +0200794 bool is_part_switch = (set == EXT_CSD_CMD_SET_NORMAL) &&
795 (index == EXT_CSD_PART_CONF);
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000796 int ret;
Andy Flemingad347bb2008-10-30 16:41:01 -0500797
Jean-Jacques Hiblot201559c2019-07-02 10:53:54 +0200798 if (mmc->gen_cmd6_time)
Sam Protsenkodb174c62019-08-14 22:52:51 +0300799 timeout_ms = mmc->gen_cmd6_time * 10;
Jean-Jacques Hiblot201559c2019-07-02 10:53:54 +0200800
Jean-Jacques Hiblot7f5b1692019-07-02 10:53:55 +0200801 if (is_part_switch && mmc->part_switch_time)
Sam Protsenkodb174c62019-08-14 22:52:51 +0300802 timeout_ms = mmc->part_switch_time * 10;
Jean-Jacques Hiblot7f5b1692019-07-02 10:53:55 +0200803
Andy Flemingad347bb2008-10-30 16:41:01 -0500804 cmd.cmdidx = MMC_CMD_SWITCH;
805 cmd.resp_type = MMC_RSP_R1b;
806 cmd.cmdarg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000807 (index << 16) |
808 (value << 8);
Andy Flemingad347bb2008-10-30 16:41:01 -0500809
Sean Anderson86325092020-10-17 08:36:27 -0400810 ret = mmc_send_cmd_retry(mmc, &cmd, NULL, 3);
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200811 if (ret)
812 return ret;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000813
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200814 start = get_timer(0);
Marek Vasut8a966472019-02-06 11:34:27 +0100815
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200816 /* poll dat0 for rdy/buys status */
Sam Protsenkodb174c62019-08-14 22:52:51 +0300817 ret = mmc_wait_dat0(mmc, 1, timeout_ms * 1000);
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200818 if (ret && ret != -ENOSYS)
819 return ret;
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000820
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200821 /*
822 * In cases when not allowed to poll by using CMD13 or because we aren't
823 * capable of polling by using mmc_wait_dat0, then rely on waiting the
824 * stated timeout to be sufficient.
825 */
Haibo Chend8de5e42020-09-22 18:11:42 +0800826 if (ret == -ENOSYS && !send_status) {
Sam Protsenkodb174c62019-08-14 22:52:51 +0300827 mdelay(timeout_ms);
Haibo Chend8de5e42020-09-22 18:11:42 +0800828 return 0;
829 }
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200830
831 /* Finally wait until the card is ready or indicates a failure
832 * to switch. It doesn't hurt to use CMD13 here even if send_status
Sam Protsenkodb174c62019-08-14 22:52:51 +0300833 * is false, because by now (after 'timeout_ms' ms) the bus should be
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200834 * reliable.
835 */
836 do {
837 ret = mmc_send_status(mmc, &status);
838
839 if (!ret && (status & MMC_STATUS_SWITCH_ERROR)) {
840 pr_debug("switch failed %d/%d/0x%x !\n", set, index,
841 value);
842 return -EIO;
843 }
844 if (!ret && (status & MMC_STATUS_RDY_FOR_DATA))
845 return 0;
846 udelay(100);
Sam Protsenkodb174c62019-08-14 22:52:51 +0300847 } while (get_timer(start) < timeout_ms);
Raffaele Recalcati01a0dc62011-03-11 02:01:12 +0000848
Jean-Jacques Hiblot5a7cf402019-07-02 10:53:56 +0200849 return -ETIMEDOUT;
Andy Flemingad347bb2008-10-30 16:41:01 -0500850}
851
Marek Vasut8a966472019-02-06 11:34:27 +0100852int mmc_switch(struct mmc *mmc, u8 set, u8 index, u8 value)
853{
854 return __mmc_switch(mmc, set, index, value, true);
855}
856
Heinrich Schuchardt75e5a642020-03-30 07:24:19 +0200857int mmc_boot_wp(struct mmc *mmc)
858{
859 return mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, 1);
860}
861
Marek Vasuta318a7a2018-04-15 00:37:11 +0200862#if !CONFIG_IS_ENABLED(MMC_TINY)
Marek Vasut111572f2019-01-03 21:19:24 +0100863static int mmc_set_card_speed(struct mmc *mmc, enum bus_mode mode,
864 bool hsdowngrade)
Andy Flemingad347bb2008-10-30 16:41:01 -0500865{
Andy Flemingad347bb2008-10-30 16:41:01 -0500866 int err;
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200867 int speed_bits;
868
869 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
870
871 switch (mode) {
872 case MMC_HS:
873 case MMC_HS_52:
874 case MMC_DDR_52:
875 speed_bits = EXT_CSD_TIMING_HS;
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +0200876 break;
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +0100877#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +0200878 case MMC_HS_200:
879 speed_bits = EXT_CSD_TIMING_HS200;
880 break;
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +0100881#endif
Peng Fan46801252018-08-10 14:07:54 +0800882#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
883 case MMC_HS_400:
884 speed_bits = EXT_CSD_TIMING_HS400;
885 break;
886#endif
Peng Faneede83b2019-07-10 14:43:07 +0800887#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
888 case MMC_HS_400_ES:
889 speed_bits = EXT_CSD_TIMING_HS400;
890 break;
891#endif
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200892 case MMC_LEGACY:
893 speed_bits = EXT_CSD_TIMING_LEGACY;
894 break;
895 default:
896 return -EINVAL;
897 }
Marek Vasut8a966472019-02-06 11:34:27 +0100898
899 err = __mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
900 speed_bits, !hsdowngrade);
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200901 if (err)
902 return err;
903
Marek Vasut111572f2019-01-03 21:19:24 +0100904#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
905 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
906 /*
907 * In case the eMMC is in HS200/HS400 mode and we are downgrading
908 * to HS mode, the card clock are still running much faster than
909 * the supported HS mode clock, so we can not reliably read out
910 * Extended CSD. Reconfigure the controller to run at HS mode.
911 */
912 if (hsdowngrade) {
913 mmc_select_mode(mmc, MMC_HS);
914 mmc_set_clock(mmc, mmc_mode2freq(mmc, MMC_HS), false);
915 }
916#endif
917
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200918 if ((mode == MMC_HS) || (mode == MMC_HS_52)) {
919 /* Now check to see that it worked */
920 err = mmc_send_ext_csd(mmc, test_csd);
921 if (err)
922 return err;
923
924 /* No high-speed support */
925 if (!test_csd[EXT_CSD_HS_TIMING])
926 return -ENOTSUPP;
927 }
928
929 return 0;
930}
931
932static int mmc_get_capabilities(struct mmc *mmc)
933{
934 u8 *ext_csd = mmc->ext_csd;
935 char cardtype;
Andy Flemingad347bb2008-10-30 16:41:01 -0500936
Jean-Jacques Hiblot3f2ffc22017-11-30 17:43:56 +0100937 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
Andy Flemingad347bb2008-10-30 16:41:01 -0500938
Thomas Chou1254c3d2010-12-24 13:12:21 +0000939 if (mmc_host_is_spi(mmc))
940 return 0;
941
Andy Flemingad347bb2008-10-30 16:41:01 -0500942 /* Only version 4 supports high-speed */
943 if (mmc->version < MMC_VERSION_4)
944 return 0;
945
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200946 if (!ext_csd) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +0100947 pr_err("No ext_csd found!\n"); /* this should enver happen */
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200948 return -ENOTSUPP;
949 }
Andy Flemingad347bb2008-10-30 16:41:01 -0500950
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200951 mmc->card_caps |= MMC_MODE_4BIT | MMC_MODE_8BIT;
Andy Flemingad347bb2008-10-30 16:41:01 -0500952
Peng Fan46801252018-08-10 14:07:54 +0800953 cardtype = ext_csd[EXT_CSD_CARD_TYPE];
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +0200954 mmc->cardtype = cardtype;
Andy Flemingad347bb2008-10-30 16:41:01 -0500955
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +0100956#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +0200957 if (cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
958 EXT_CSD_CARD_TYPE_HS200_1_8V)) {
959 mmc->card_caps |= MMC_MODE_HS200;
960 }
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +0100961#endif
Peng Faneede83b2019-07-10 14:43:07 +0800962#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT) || \
963 CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
Peng Fan46801252018-08-10 14:07:54 +0800964 if (cardtype & (EXT_CSD_CARD_TYPE_HS400_1_2V |
965 EXT_CSD_CARD_TYPE_HS400_1_8V)) {
966 mmc->card_caps |= MMC_MODE_HS400;
967 }
968#endif
Jaehoon Chung38ce30b2014-05-16 13:59:54 +0900969 if (cardtype & EXT_CSD_CARD_TYPE_52) {
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200970 if (cardtype & EXT_CSD_CARD_TYPE_DDR_52)
Jaehoon Chung38ce30b2014-05-16 13:59:54 +0900971 mmc->card_caps |= MMC_MODE_DDR_52MHz;
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200972 mmc->card_caps |= MMC_MODE_HS_52MHz;
Jaehoon Chung38ce30b2014-05-16 13:59:54 +0900973 }
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +0200974 if (cardtype & EXT_CSD_CARD_TYPE_26)
975 mmc->card_caps |= MMC_MODE_HS;
Andy Flemingad347bb2008-10-30 16:41:01 -0500976
Peng Faneede83b2019-07-10 14:43:07 +0800977#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
978 if (ext_csd[EXT_CSD_STROBE_SUPPORT] &&
979 (mmc->card_caps & MMC_MODE_HS400)) {
980 mmc->card_caps |= MMC_MODE_HS400_ES;
981 }
982#endif
983
Andy Flemingad347bb2008-10-30 16:41:01 -0500984 return 0;
985}
Marek Vasuta318a7a2018-04-15 00:37:11 +0200986#endif
Andy Flemingad347bb2008-10-30 16:41:01 -0500987
Stephen Warrene315ae82013-06-11 15:14:01 -0600988static int mmc_set_capacity(struct mmc *mmc, int part_num)
989{
990 switch (part_num) {
991 case 0:
992 mmc->capacity = mmc->capacity_user;
993 break;
994 case 1:
995 case 2:
996 mmc->capacity = mmc->capacity_boot;
997 break;
998 case 3:
999 mmc->capacity = mmc->capacity_rpmb;
1000 break;
1001 case 4:
1002 case 5:
1003 case 6:
1004 case 7:
1005 mmc->capacity = mmc->capacity_gp[part_num - 4];
1006 break;
1007 default:
1008 return -1;
1009 }
1010
Simon Glasse5db1152016-05-01 13:52:35 -06001011 mmc_get_blk_desc(mmc)->lba = lldiv(mmc->capacity, mmc->read_bl_len);
Stephen Warrene315ae82013-06-11 15:14:01 -06001012
1013 return 0;
1014}
1015
Simon Glass62e293a2016-06-12 23:30:15 -06001016int mmc_switch_part(struct mmc *mmc, unsigned int part_num)
Lei Wen31b99802011-05-02 16:26:26 +00001017{
Stephen Warrene315ae82013-06-11 15:14:01 -06001018 int ret;
Jean-Jacques Hiblotfaf5c952019-07-02 10:53:58 +02001019 int retry = 3;
Lei Wen31b99802011-05-02 16:26:26 +00001020
Jean-Jacques Hiblotfaf5c952019-07-02 10:53:58 +02001021 do {
1022 ret = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1023 EXT_CSD_PART_CONF,
1024 (mmc->part_config & ~PART_ACCESS_MASK)
1025 | (part_num & PART_ACCESS_MASK));
1026 } while (ret && retry--);
Peter Bigot45fde892014-09-02 18:31:23 -05001027
1028 /*
1029 * Set the capacity if the switch succeeded or was intended
1030 * to return to representing the raw device.
1031 */
Stephen Warren1e0f92a2015-12-07 11:38:49 -07001032 if ((ret == 0) || ((ret == -ENODEV) && (part_num == 0))) {
Peter Bigot45fde892014-09-02 18:31:23 -05001033 ret = mmc_set_capacity(mmc, part_num);
Simon Glass984db5d2016-05-01 13:52:37 -06001034 mmc_get_blk_desc(mmc)->hwpart = part_num;
Stephen Warren1e0f92a2015-12-07 11:38:49 -07001035 }
Stephen Warrene315ae82013-06-11 15:14:01 -06001036
Peter Bigot45fde892014-09-02 18:31:23 -05001037 return ret;
Lei Wen31b99802011-05-02 16:26:26 +00001038}
1039
Jean-Jacques Hiblot1d7769a2017-11-30 17:44:02 +01001040#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001041int mmc_hwpart_config(struct mmc *mmc,
1042 const struct mmc_hwpart_conf *conf,
1043 enum mmc_hwpart_conf_mode mode)
1044{
1045 u8 part_attrs = 0;
1046 u32 enh_size_mult;
1047 u32 enh_start_addr;
1048 u32 gp_size_mult[4];
1049 u32 max_enh_size_mult;
1050 u32 tot_enh_size_mult = 0;
Diego Santa Cruz80200272014-12-23 10:50:31 +01001051 u8 wr_rel_set;
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001052 int i, pidx, err;
1053 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
1054
1055 if (mode < MMC_HWPART_CONF_CHECK || mode > MMC_HWPART_CONF_COMPLETE)
1056 return -EINVAL;
1057
1058 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4_41)) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001059 pr_err("eMMC >= 4.4 required for enhanced user data area\n");
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001060 return -EMEDIUMTYPE;
1061 }
1062
1063 if (!(mmc->part_support & PART_SUPPORT)) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001064 pr_err("Card does not support partitioning\n");
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001065 return -EMEDIUMTYPE;
1066 }
1067
1068 if (!mmc->hc_wp_grp_size) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001069 pr_err("Card does not define HC WP group size\n");
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001070 return -EMEDIUMTYPE;
1071 }
1072
1073 /* check partition alignment and total enhanced size */
1074 if (conf->user.enh_size) {
1075 if (conf->user.enh_size % mmc->hc_wp_grp_size ||
1076 conf->user.enh_start % mmc->hc_wp_grp_size) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001077 pr_err("User data enhanced area not HC WP group "
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001078 "size aligned\n");
1079 return -EINVAL;
1080 }
1081 part_attrs |= EXT_CSD_ENH_USR;
1082 enh_size_mult = conf->user.enh_size / mmc->hc_wp_grp_size;
1083 if (mmc->high_capacity) {
1084 enh_start_addr = conf->user.enh_start;
1085 } else {
1086 enh_start_addr = (conf->user.enh_start << 9);
1087 }
1088 } else {
1089 enh_size_mult = 0;
1090 enh_start_addr = 0;
1091 }
1092 tot_enh_size_mult += enh_size_mult;
1093
1094 for (pidx = 0; pidx < 4; pidx++) {
1095 if (conf->gp_part[pidx].size % mmc->hc_wp_grp_size) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001096 pr_err("GP%i partition not HC WP group size "
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001097 "aligned\n", pidx+1);
1098 return -EINVAL;
1099 }
1100 gp_size_mult[pidx] = conf->gp_part[pidx].size / mmc->hc_wp_grp_size;
1101 if (conf->gp_part[pidx].size && conf->gp_part[pidx].enhanced) {
1102 part_attrs |= EXT_CSD_ENH_GP(pidx);
1103 tot_enh_size_mult += gp_size_mult[pidx];
1104 }
1105 }
1106
1107 if (part_attrs && ! (mmc->part_support & ENHNCD_SUPPORT)) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001108 pr_err("Card does not support enhanced attribute\n");
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001109 return -EMEDIUMTYPE;
1110 }
1111
1112 err = mmc_send_ext_csd(mmc, ext_csd);
1113 if (err)
1114 return err;
1115
1116 max_enh_size_mult =
1117 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+2] << 16) +
1118 (ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT+1] << 8) +
1119 ext_csd[EXT_CSD_MAX_ENH_SIZE_MULT];
1120 if (tot_enh_size_mult > max_enh_size_mult) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001121 pr_err("Total enhanced size exceeds maximum (%u > %u)\n",
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001122 tot_enh_size_mult, max_enh_size_mult);
1123 return -EMEDIUMTYPE;
1124 }
1125
Diego Santa Cruz80200272014-12-23 10:50:31 +01001126 /* The default value of EXT_CSD_WR_REL_SET is device
1127 * dependent, the values can only be changed if the
1128 * EXT_CSD_HS_CTRL_REL bit is set. The values can be
1129 * changed only once and before partitioning is completed. */
1130 wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
1131 if (conf->user.wr_rel_change) {
1132 if (conf->user.wr_rel_set)
1133 wr_rel_set |= EXT_CSD_WR_DATA_REL_USR;
1134 else
1135 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_USR;
1136 }
1137 for (pidx = 0; pidx < 4; pidx++) {
1138 if (conf->gp_part[pidx].wr_rel_change) {
1139 if (conf->gp_part[pidx].wr_rel_set)
1140 wr_rel_set |= EXT_CSD_WR_DATA_REL_GP(pidx);
1141 else
1142 wr_rel_set &= ~EXT_CSD_WR_DATA_REL_GP(pidx);
1143 }
1144 }
1145
1146 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET] &&
1147 !(ext_csd[EXT_CSD_WR_REL_PARAM] & EXT_CSD_HS_CTRL_REL)) {
1148 puts("Card does not support host controlled partition write "
1149 "reliability settings\n");
1150 return -EMEDIUMTYPE;
1151 }
1152
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001153 if (ext_csd[EXT_CSD_PARTITION_SETTING] &
1154 EXT_CSD_PARTITION_SETTING_COMPLETED) {
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001155 pr_err("Card already partitioned\n");
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001156 return -EPERM;
1157 }
1158
1159 if (mode == MMC_HWPART_CONF_CHECK)
1160 return 0;
1161
1162 /* Partitioning requires high-capacity size definitions */
1163 if (!(ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01)) {
1164 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1165 EXT_CSD_ERASE_GROUP_DEF, 1);
1166
1167 if (err)
1168 return err;
1169
1170 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
1171
Jaehoon Chung58b9eb82020-01-17 15:06:54 +09001172#if CONFIG_IS_ENABLED(MMC_WRITE)
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001173 /* update erase group size to be high-capacity */
1174 mmc->erase_grp_size =
1175 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
Jaehoon Chung58b9eb82020-01-17 15:06:54 +09001176#endif
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001177
1178 }
1179
1180 /* all OK, write the configuration */
1181 for (i = 0; i < 4; i++) {
1182 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1183 EXT_CSD_ENH_START_ADDR+i,
1184 (enh_start_addr >> (i*8)) & 0xFF);
1185 if (err)
1186 return err;
1187 }
1188 for (i = 0; i < 3; i++) {
1189 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1190 EXT_CSD_ENH_SIZE_MULT+i,
1191 (enh_size_mult >> (i*8)) & 0xFF);
1192 if (err)
1193 return err;
1194 }
1195 for (pidx = 0; pidx < 4; pidx++) {
1196 for (i = 0; i < 3; i++) {
1197 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1198 EXT_CSD_GP_SIZE_MULT+pidx*3+i,
1199 (gp_size_mult[pidx] >> (i*8)) & 0xFF);
1200 if (err)
1201 return err;
1202 }
1203 }
1204 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1205 EXT_CSD_PARTITIONS_ATTRIBUTE, part_attrs);
1206 if (err)
1207 return err;
1208
1209 if (mode == MMC_HWPART_CONF_SET)
1210 return 0;
1211
Diego Santa Cruz80200272014-12-23 10:50:31 +01001212 /* The WR_REL_SET is a write-once register but shall be
1213 * written before setting PART_SETTING_COMPLETED. As it is
1214 * write-once we can only write it when completing the
1215 * partitioning. */
1216 if (wr_rel_set != ext_csd[EXT_CSD_WR_REL_SET]) {
1217 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1218 EXT_CSD_WR_REL_SET, wr_rel_set);
1219 if (err)
1220 return err;
1221 }
1222
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001223 /* Setting PART_SETTING_COMPLETED confirms the partition
1224 * configuration but it only becomes effective after power
1225 * cycle, so we do not adjust the partition related settings
1226 * in the mmc struct. */
1227
1228 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
1229 EXT_CSD_PARTITION_SETTING,
1230 EXT_CSD_PARTITION_SETTING_COMPLETED);
1231 if (err)
1232 return err;
1233
1234 return 0;
1235}
Jean-Jacques Hiblot1d7769a2017-11-30 17:44:02 +01001236#endif
Diego Santa Cruz69eb71a02014-12-23 10:50:29 +01001237
Simon Glasseba48f92017-07-29 11:35:31 -06001238#if !CONFIG_IS_ENABLED(DM_MMC)
Thierry Redingb9c8b772012-01-02 01:15:37 +00001239int mmc_getcd(struct mmc *mmc)
1240{
1241 int cd;
1242
1243 cd = board_mmc_getcd(mmc);
1244
Peter Korsgaardf7b15102013-03-21 04:00:03 +00001245 if (cd < 0) {
Pantelis Antoniou2c850462014-03-11 19:34:20 +02001246 if (mmc->cfg->ops->getcd)
1247 cd = mmc->cfg->ops->getcd(mmc);
Peter Korsgaardf7b15102013-03-21 04:00:03 +00001248 else
1249 cd = 1;
1250 }
Thierry Redingb9c8b772012-01-02 01:15:37 +00001251
1252 return cd;
1253}
Simon Glass394dfc02016-06-12 23:30:22 -06001254#endif
Thierry Redingb9c8b772012-01-02 01:15:37 +00001255
Marek Vasuta318a7a2018-04-15 00:37:11 +02001256#if !CONFIG_IS_ENABLED(MMC_TINY)
Kim Phillips87ea3892012-10-29 13:34:43 +00001257static int sd_switch(struct mmc *mmc, int mode, int group, u8 value, u8 *resp)
Andy Flemingad347bb2008-10-30 16:41:01 -05001258{
1259 struct mmc_cmd cmd;
1260 struct mmc_data data;
1261
1262 /* Switch the frequency */
1263 cmd.cmdidx = SD_CMD_SWITCH_FUNC;
1264 cmd.resp_type = MMC_RSP_R1;
1265 cmd.cmdarg = (mode << 31) | 0xffffff;
1266 cmd.cmdarg &= ~(0xf << (group * 4));
1267 cmd.cmdarg |= value << (group * 4);
Andy Flemingad347bb2008-10-30 16:41:01 -05001268
1269 data.dest = (char *)resp;
1270 data.blocksize = 64;
1271 data.blocks = 1;
1272 data.flags = MMC_DATA_READ;
1273
1274 return mmc_send_cmd(mmc, &cmd, &data);
1275}
1276
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001277static int sd_get_capabilities(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -05001278{
1279 int err;
1280 struct mmc_cmd cmd;
Suniel Mahesh2f423da2017-10-05 11:32:00 +05301281 ALLOC_CACHE_ALIGN_BUFFER(__be32, scr, 2);
1282 ALLOC_CACHE_ALIGN_BUFFER(__be32, switch_status, 16);
Andy Flemingad347bb2008-10-30 16:41:01 -05001283 struct mmc_data data;
1284 int timeout;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001285#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001286 u32 sd3_bus_mode;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001287#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05001288
Faiz Abbas01db77e2020-02-26 13:44:32 +05301289 mmc->card_caps = MMC_MODE_1BIT | MMC_CAP(MMC_LEGACY);
Andy Flemingad347bb2008-10-30 16:41:01 -05001290
Thomas Chou1254c3d2010-12-24 13:12:21 +00001291 if (mmc_host_is_spi(mmc))
1292 return 0;
1293
Andy Flemingad347bb2008-10-30 16:41:01 -05001294 /* Read the SCR to find out if this card supports higher speeds */
1295 cmd.cmdidx = MMC_CMD_APP_CMD;
1296 cmd.resp_type = MMC_RSP_R1;
1297 cmd.cmdarg = mmc->rca << 16;
Andy Flemingad347bb2008-10-30 16:41:01 -05001298
1299 err = mmc_send_cmd(mmc, &cmd, NULL);
1300
1301 if (err)
1302 return err;
1303
1304 cmd.cmdidx = SD_CMD_APP_SEND_SCR;
1305 cmd.resp_type = MMC_RSP_R1;
1306 cmd.cmdarg = 0;
Andy Flemingad347bb2008-10-30 16:41:01 -05001307
Anton staaf9b00f0d2011-10-03 13:54:59 +00001308 data.dest = (char *)scr;
Andy Flemingad347bb2008-10-30 16:41:01 -05001309 data.blocksize = 8;
1310 data.blocks = 1;
1311 data.flags = MMC_DATA_READ;
1312
Sean Anderson86325092020-10-17 08:36:27 -04001313 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
Andy Flemingad347bb2008-10-30 16:41:01 -05001314
Sean Anderson86325092020-10-17 08:36:27 -04001315 if (err)
Andy Flemingad347bb2008-10-30 16:41:01 -05001316 return err;
Andy Flemingad347bb2008-10-30 16:41:01 -05001317
Yauhen Kharuzhy6e8edf42009-05-07 00:43:30 +03001318 mmc->scr[0] = __be32_to_cpu(scr[0]);
1319 mmc->scr[1] = __be32_to_cpu(scr[1]);
Andy Flemingad347bb2008-10-30 16:41:01 -05001320
1321 switch ((mmc->scr[0] >> 24) & 0xf) {
Bin Meng4a4ef872016-03-17 21:53:13 -07001322 case 0:
1323 mmc->version = SD_VERSION_1_0;
1324 break;
1325 case 1:
1326 mmc->version = SD_VERSION_1_10;
1327 break;
1328 case 2:
1329 mmc->version = SD_VERSION_2;
1330 if ((mmc->scr[0] >> 15) & 0x1)
1331 mmc->version = SD_VERSION_3;
1332 break;
1333 default:
1334 mmc->version = SD_VERSION_1_0;
1335 break;
Andy Flemingad347bb2008-10-30 16:41:01 -05001336 }
1337
Alagu Sankar24bb5ab2010-05-12 15:08:24 +05301338 if (mmc->scr[0] & SD_DATA_4BIT)
1339 mmc->card_caps |= MMC_MODE_4BIT;
1340
Andy Flemingad347bb2008-10-30 16:41:01 -05001341 /* Version 1.0 doesn't support switching */
1342 if (mmc->version == SD_VERSION_1_0)
1343 return 0;
1344
1345 timeout = 4;
1346 while (timeout--) {
1347 err = sd_switch(mmc, SD_SWITCH_CHECK, 0, 1,
Anton staaf9b00f0d2011-10-03 13:54:59 +00001348 (u8 *)switch_status);
Andy Flemingad347bb2008-10-30 16:41:01 -05001349
1350 if (err)
1351 return err;
1352
1353 /* The high-speed function is busy. Try again */
Yauhen Kharuzhy6e8edf42009-05-07 00:43:30 +03001354 if (!(__be32_to_cpu(switch_status[7]) & SD_HIGHSPEED_BUSY))
Andy Flemingad347bb2008-10-30 16:41:01 -05001355 break;
1356 }
1357
Andy Flemingad347bb2008-10-30 16:41:01 -05001358 /* If high-speed isn't supported, we return */
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001359 if (__be32_to_cpu(switch_status[3]) & SD_HIGHSPEED_SUPPORTED)
1360 mmc->card_caps |= MMC_CAP(SD_HS);
Andy Flemingad347bb2008-10-30 16:41:01 -05001361
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001362#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001363 /* Version before 3.0 don't support UHS modes */
1364 if (mmc->version < SD_VERSION_3)
1365 return 0;
1366
1367 sd3_bus_mode = __be32_to_cpu(switch_status[3]) >> 16 & 0x1f;
1368 if (sd3_bus_mode & SD_MODE_UHS_SDR104)
1369 mmc->card_caps |= MMC_CAP(UHS_SDR104);
1370 if (sd3_bus_mode & SD_MODE_UHS_SDR50)
1371 mmc->card_caps |= MMC_CAP(UHS_SDR50);
1372 if (sd3_bus_mode & SD_MODE_UHS_SDR25)
1373 mmc->card_caps |= MMC_CAP(UHS_SDR25);
1374 if (sd3_bus_mode & SD_MODE_UHS_SDR12)
1375 mmc->card_caps |= MMC_CAP(UHS_SDR12);
1376 if (sd3_bus_mode & SD_MODE_UHS_DDR50)
1377 mmc->card_caps |= MMC_CAP(UHS_DDR50);
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001378#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001379
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001380 return 0;
1381}
1382
1383static int sd_set_card_speed(struct mmc *mmc, enum bus_mode mode)
1384{
1385 int err;
1386
1387 ALLOC_CACHE_ALIGN_BUFFER(uint, switch_status, 16);
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001388 int speed;
Macpaul Lin24e92ec2011-11-28 16:31:09 +00001389
Marek Vasut4105e972018-11-18 03:25:08 +01001390 /* SD version 1.00 and 1.01 does not support CMD 6 */
1391 if (mmc->version == SD_VERSION_1_0)
1392 return 0;
1393
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001394 switch (mode) {
Faiz Abbas01db77e2020-02-26 13:44:32 +05301395 case MMC_LEGACY:
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001396 speed = UHS_SDR12_BUS_SPEED;
1397 break;
1398 case SD_HS:
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +01001399 speed = HIGH_SPEED_BUS_SPEED;
1400 break;
1401#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1402 case UHS_SDR12:
1403 speed = UHS_SDR12_BUS_SPEED;
1404 break;
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001405 case UHS_SDR25:
1406 speed = UHS_SDR25_BUS_SPEED;
1407 break;
1408 case UHS_SDR50:
1409 speed = UHS_SDR50_BUS_SPEED;
1410 break;
1411 case UHS_DDR50:
1412 speed = UHS_DDR50_BUS_SPEED;
1413 break;
1414 case UHS_SDR104:
1415 speed = UHS_SDR104_BUS_SPEED;
1416 break;
Jean-Jacques Hiblot74c98b22018-01-04 15:23:30 +01001417#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001418 default:
1419 return -EINVAL;
1420 }
1421
1422 err = sd_switch(mmc, SD_SWITCH_SWITCH, 0, speed, (u8 *)switch_status);
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001423 if (err)
1424 return err;
1425
Jean-Jacques Hiblote7f664e2018-02-09 12:09:27 +01001426 if (((__be32_to_cpu(switch_status[4]) >> 24) & 0xF) != speed)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001427 return -ENOTSUPP;
1428
1429 return 0;
1430}
Andy Flemingad347bb2008-10-30 16:41:01 -05001431
Marek Vasut8ff55fb2018-04-15 00:36:45 +02001432static int sd_select_bus_width(struct mmc *mmc, int w)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001433{
1434 int err;
1435 struct mmc_cmd cmd;
1436
1437 if ((w != 4) && (w != 1))
1438 return -EINVAL;
1439
1440 cmd.cmdidx = MMC_CMD_APP_CMD;
1441 cmd.resp_type = MMC_RSP_R1;
1442 cmd.cmdarg = mmc->rca << 16;
1443
1444 err = mmc_send_cmd(mmc, &cmd, NULL);
Andy Flemingad347bb2008-10-30 16:41:01 -05001445 if (err)
1446 return err;
1447
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001448 cmd.cmdidx = SD_CMD_APP_SET_BUS_WIDTH;
1449 cmd.resp_type = MMC_RSP_R1;
1450 if (w == 4)
1451 cmd.cmdarg = 2;
1452 else if (w == 1)
1453 cmd.cmdarg = 0;
1454 err = mmc_send_cmd(mmc, &cmd, NULL);
1455 if (err)
1456 return err;
Andy Flemingad347bb2008-10-30 16:41:01 -05001457
1458 return 0;
1459}
Marek Vasuta318a7a2018-04-15 00:37:11 +02001460#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05001461
Jean-Jacques Hiblotcb534f02018-01-04 15:23:33 +01001462#if CONFIG_IS_ENABLED(MMC_WRITE)
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001463static int sd_read_ssr(struct mmc *mmc)
1464{
Jean-Jacques Hiblotcb534f02018-01-04 15:23:33 +01001465 static const unsigned int sd_au_size[] = {
1466 0, SZ_16K / 512, SZ_32K / 512,
1467 SZ_64K / 512, SZ_128K / 512, SZ_256K / 512,
1468 SZ_512K / 512, SZ_1M / 512, SZ_2M / 512,
1469 SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512,
1470 SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512,
1471 SZ_64M / 512,
1472 };
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001473 int err, i;
1474 struct mmc_cmd cmd;
1475 ALLOC_CACHE_ALIGN_BUFFER(uint, ssr, 16);
1476 struct mmc_data data;
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001477 unsigned int au, eo, et, es;
1478
1479 cmd.cmdidx = MMC_CMD_APP_CMD;
1480 cmd.resp_type = MMC_RSP_R1;
1481 cmd.cmdarg = mmc->rca << 16;
1482
Sean Anderson86325092020-10-17 08:36:27 -04001483 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_APP_CMD, 4);
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001484 if (err)
1485 return err;
1486
1487 cmd.cmdidx = SD_CMD_APP_SD_STATUS;
1488 cmd.resp_type = MMC_RSP_R1;
1489 cmd.cmdarg = 0;
1490
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001491 data.dest = (char *)ssr;
1492 data.blocksize = 64;
1493 data.blocks = 1;
1494 data.flags = MMC_DATA_READ;
1495
Sean Anderson86325092020-10-17 08:36:27 -04001496 err = mmc_send_cmd_retry(mmc, &cmd, &data, 3);
1497 if (err)
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001498 return err;
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001499
1500 for (i = 0; i < 16; i++)
1501 ssr[i] = be32_to_cpu(ssr[i]);
1502
1503 au = (ssr[2] >> 12) & 0xF;
1504 if ((au <= 9) || (mmc->version == SD_VERSION_3)) {
1505 mmc->ssr.au = sd_au_size[au];
1506 es = (ssr[3] >> 24) & 0xFF;
1507 es |= (ssr[2] & 0xFF) << 8;
1508 et = (ssr[3] >> 18) & 0x3F;
1509 if (es && et) {
1510 eo = (ssr[3] >> 16) & 0x3;
1511 mmc->ssr.erase_timeout = (et * 1000) / es;
1512 mmc->ssr.erase_offset = eo * 1000;
1513 }
1514 } else {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001515 pr_debug("Invalid Allocation Unit Size.\n");
Peng Fanb3fcf1e2016-09-01 11:13:38 +08001516 }
1517
1518 return 0;
1519}
Jean-Jacques Hiblotcb534f02018-01-04 15:23:33 +01001520#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05001521/* frequency bases */
1522/* divided by 10 to be nice to platforms without floating point */
Mike Frysingerb588caf2010-10-20 01:15:53 +00001523static const int fbase[] = {
Andy Flemingad347bb2008-10-30 16:41:01 -05001524 10000,
1525 100000,
1526 1000000,
1527 10000000,
1528};
1529
1530/* Multiplier values for TRAN_SPEED. Multiplied by 10 to be nice
1531 * to platforms without floating point.
1532 */
Simon Glass03317cc2016-05-14 14:02:57 -06001533static const u8 multipliers[] = {
Andy Flemingad347bb2008-10-30 16:41:01 -05001534 0, /* reserved */
1535 10,
1536 12,
1537 13,
1538 15,
1539 20,
1540 25,
1541 30,
1542 35,
1543 40,
1544 45,
1545 50,
1546 55,
1547 60,
1548 70,
1549 80,
1550};
1551
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001552static inline int bus_width(uint cap)
1553{
1554 if (cap == MMC_MODE_8BIT)
1555 return 8;
1556 if (cap == MMC_MODE_4BIT)
1557 return 4;
1558 if (cap == MMC_MODE_1BIT)
1559 return 1;
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01001560 pr_warn("invalid bus witdh capability 0x%x\n", cap);
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001561 return 0;
1562}
1563
Simon Glasseba48f92017-07-29 11:35:31 -06001564#if !CONFIG_IS_ENABLED(DM_MMC)
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001565#ifdef MMC_SUPPORTS_TUNING
Kishon Vijay Abraham Iae7174f2017-09-21 16:30:05 +02001566static int mmc_execute_tuning(struct mmc *mmc, uint opcode)
1567{
1568 return -ENOTSUPP;
1569}
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001570#endif
Kishon Vijay Abraham Iae7174f2017-09-21 16:30:05 +02001571
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001572static int mmc_set_ios(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -05001573{
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001574 int ret = 0;
1575
Pantelis Antoniou2c850462014-03-11 19:34:20 +02001576 if (mmc->cfg->ops->set_ios)
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001577 ret = mmc->cfg->ops->set_ios(mmc);
1578
1579 return ret;
Andy Flemingad347bb2008-10-30 16:41:01 -05001580}
Yann Gautier6f558332019-09-19 17:56:12 +02001581
1582static int mmc_host_power_cycle(struct mmc *mmc)
1583{
1584 int ret = 0;
1585
1586 if (mmc->cfg->ops->host_power_cycle)
1587 ret = mmc->cfg->ops->host_power_cycle(mmc);
1588
1589 return ret;
1590}
Simon Glass394dfc02016-06-12 23:30:22 -06001591#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05001592
Kishon Vijay Abraham Id6246bf2017-09-21 16:30:03 +02001593int mmc_set_clock(struct mmc *mmc, uint clock, bool disable)
Andy Flemingad347bb2008-10-30 16:41:01 -05001594{
Jaehoon Chungab4d4052018-01-23 14:04:30 +09001595 if (!disable) {
Jaehoon Chung8a933292018-01-17 19:36:58 +09001596 if (clock > mmc->cfg->f_max)
1597 clock = mmc->cfg->f_max;
Andy Flemingad347bb2008-10-30 16:41:01 -05001598
Jaehoon Chung8a933292018-01-17 19:36:58 +09001599 if (clock < mmc->cfg->f_min)
1600 clock = mmc->cfg->f_min;
1601 }
Andy Flemingad347bb2008-10-30 16:41:01 -05001602
1603 mmc->clock = clock;
Kishon Vijay Abraham Id6246bf2017-09-21 16:30:03 +02001604 mmc->clk_disable = disable;
Andy Flemingad347bb2008-10-30 16:41:01 -05001605
Jaehoon Chungc8477d62018-01-26 19:25:30 +09001606 debug("clock is %s (%dHz)\n", disable ? "disabled" : "enabled", clock);
1607
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001608 return mmc_set_ios(mmc);
Andy Flemingad347bb2008-10-30 16:41:01 -05001609}
1610
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001611static int mmc_set_bus_width(struct mmc *mmc, uint width)
Andy Flemingad347bb2008-10-30 16:41:01 -05001612{
1613 mmc->bus_width = width;
1614
Kishon Vijay Abraham Ie178c112017-09-21 16:29:59 +02001615 return mmc_set_ios(mmc);
Andy Flemingad347bb2008-10-30 16:41:01 -05001616}
1617
Jean-Jacques Hiblot00de5042017-09-21 16:29:54 +02001618#if CONFIG_IS_ENABLED(MMC_VERBOSE) || defined(DEBUG)
1619/*
1620 * helper function to display the capabilities in a human
1621 * friendly manner. The capabilities include bus width and
1622 * supported modes.
1623 */
1624void mmc_dump_capabilities(const char *text, uint caps)
1625{
1626 enum bus_mode mode;
1627
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001628 pr_debug("%s: widths [", text);
Jean-Jacques Hiblot00de5042017-09-21 16:29:54 +02001629 if (caps & MMC_MODE_8BIT)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001630 pr_debug("8, ");
Jean-Jacques Hiblot00de5042017-09-21 16:29:54 +02001631 if (caps & MMC_MODE_4BIT)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001632 pr_debug("4, ");
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001633 if (caps & MMC_MODE_1BIT)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001634 pr_debug("1, ");
1635 pr_debug("\b\b] modes [");
Jean-Jacques Hiblot00de5042017-09-21 16:29:54 +02001636 for (mode = MMC_LEGACY; mode < MMC_MODES_END; mode++)
1637 if (MMC_CAP(mode) & caps)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001638 pr_debug("%s, ", mmc_mode_name(mode));
1639 pr_debug("\b\b]\n");
Jean-Jacques Hiblot00de5042017-09-21 16:29:54 +02001640}
1641#endif
1642
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001643struct mode_width_tuning {
1644 enum bus_mode mode;
1645 uint widths;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001646#ifdef MMC_SUPPORTS_TUNING
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +02001647 uint tuning;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001648#endif
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001649};
1650
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001651#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001652int mmc_voltage_to_mv(enum mmc_voltage voltage)
1653{
1654 switch (voltage) {
1655 case MMC_SIGNAL_VOLTAGE_000: return 0;
1656 case MMC_SIGNAL_VOLTAGE_330: return 3300;
1657 case MMC_SIGNAL_VOLTAGE_180: return 1800;
1658 case MMC_SIGNAL_VOLTAGE_120: return 1200;
1659 }
1660 return -EINVAL;
1661}
1662
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +02001663static int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1664{
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001665 int err;
1666
1667 if (mmc->signal_voltage == signal_voltage)
1668 return 0;
1669
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +02001670 mmc->signal_voltage = signal_voltage;
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001671 err = mmc_set_ios(mmc);
1672 if (err)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001673 pr_debug("unable to set voltage (err %d)\n", err);
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001674
1675 return err;
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +02001676}
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001677#else
1678static inline int mmc_set_signal_voltage(struct mmc *mmc, uint signal_voltage)
1679{
1680 return 0;
1681}
1682#endif
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +02001683
Marek Vasuta318a7a2018-04-15 00:37:11 +02001684#if !CONFIG_IS_ENABLED(MMC_TINY)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001685static const struct mode_width_tuning sd_modes_by_pref[] = {
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001686#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
1687#ifdef MMC_SUPPORTS_TUNING
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001688 {
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001689 .mode = UHS_SDR104,
1690 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1691 .tuning = MMC_CMD_SEND_TUNING_BLOCK
1692 },
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001693#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001694 {
1695 .mode = UHS_SDR50,
1696 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1697 },
1698 {
1699 .mode = UHS_DDR50,
1700 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1701 },
1702 {
1703 .mode = UHS_SDR25,
1704 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1705 },
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001706#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001707 {
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001708 .mode = SD_HS,
1709 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1710 },
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001711#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001712 {
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001713 .mode = UHS_SDR12,
1714 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1715 },
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001716#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001717 {
Faiz Abbas01db77e2020-02-26 13:44:32 +05301718 .mode = MMC_LEGACY,
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001719 .widths = MMC_MODE_4BIT | MMC_MODE_1BIT,
1720 }
1721};
1722
1723#define for_each_sd_mode_by_pref(caps, mwt) \
1724 for (mwt = sd_modes_by_pref;\
1725 mwt < sd_modes_by_pref + ARRAY_SIZE(sd_modes_by_pref);\
1726 mwt++) \
1727 if (caps & MMC_CAP(mwt->mode))
1728
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02001729static int sd_select_mode_and_width(struct mmc *mmc, uint card_caps)
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001730{
1731 int err;
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001732 uint widths[] = {MMC_MODE_4BIT, MMC_MODE_1BIT};
1733 const struct mode_width_tuning *mwt;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001734#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT)
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001735 bool uhs_en = (mmc->ocr & OCR_S18R) ? true : false;
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001736#else
1737 bool uhs_en = false;
1738#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001739 uint caps;
1740
Jean-Jacques Hiblot93c31d12017-11-30 17:43:54 +01001741#ifdef DEBUG
1742 mmc_dump_capabilities("sd card", card_caps);
Jean-Jacques Hiblotd7e5e032017-11-30 17:43:57 +01001743 mmc_dump_capabilities("host", mmc->host_caps);
Jean-Jacques Hiblot93c31d12017-11-30 17:43:54 +01001744#endif
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001745
Anup Pateld9c92c72019-07-08 04:10:43 +00001746 if (mmc_host_is_spi(mmc)) {
1747 mmc_set_bus_width(mmc, 1);
Faiz Abbas01db77e2020-02-26 13:44:32 +05301748 mmc_select_mode(mmc, MMC_LEGACY);
Anup Pateld9c92c72019-07-08 04:10:43 +00001749 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
Pragnesh Patela01f57e2020-06-29 15:17:26 +05301750#if CONFIG_IS_ENABLED(MMC_WRITE)
1751 err = sd_read_ssr(mmc);
1752 if (err)
1753 pr_warn("unable to read ssr\n");
1754#endif
Anup Pateld9c92c72019-07-08 04:10:43 +00001755 return 0;
1756 }
1757
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001758 /* Restrict card's capabilities by what the host can do */
Jean-Jacques Hiblotd7e5e032017-11-30 17:43:57 +01001759 caps = card_caps & mmc->host_caps;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001760
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001761 if (!uhs_en)
1762 caps &= ~UHS_CAPS;
1763
1764 for_each_sd_mode_by_pref(caps, mwt) {
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001765 uint *w;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001766
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001767 for (w = widths; w < widths + ARRAY_SIZE(widths); w++) {
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001768 if (*w & caps & mwt->widths) {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001769 pr_debug("trying mode %s width %d (at %d MHz)\n",
1770 mmc_mode_name(mwt->mode),
1771 bus_width(*w),
1772 mmc_mode2freq(mmc, mwt->mode) / 1000000);
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001773
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001774 /* configure the bus width (card + host) */
1775 err = sd_select_bus_width(mmc, bus_width(*w));
1776 if (err)
1777 goto error;
1778 mmc_set_bus_width(mmc, bus_width(*w));
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001779
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001780 /* configure the bus mode (card) */
1781 err = sd_set_card_speed(mmc, mwt->mode);
1782 if (err)
1783 goto error;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001784
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001785 /* configure the bus mode (host) */
1786 mmc_select_mode(mmc, mwt->mode);
Jaehoon Chung239cb2f2018-01-26 19:25:29 +09001787 mmc_set_clock(mmc, mmc->tran_speed,
1788 MMC_CLK_ENABLE);
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001789
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001790#ifdef MMC_SUPPORTS_TUNING
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001791 /* execute tuning if needed */
1792 if (mwt->tuning && !mmc_host_is_spi(mmc)) {
1793 err = mmc_execute_tuning(mmc,
1794 mwt->tuning);
1795 if (err) {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001796 pr_debug("tuning failed\n");
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001797 goto error;
1798 }
1799 }
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001800#endif
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02001801
Jean-Jacques Hiblotcb534f02018-01-04 15:23:33 +01001802#if CONFIG_IS_ENABLED(MMC_WRITE)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001803 err = sd_read_ssr(mmc);
Peng Fan2d2fe8e2018-03-05 16:20:40 +08001804 if (err)
Jean-Jacques Hiblotcb534f02018-01-04 15:23:33 +01001805 pr_warn("unable to read ssr\n");
1806#endif
1807 if (!err)
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001808 return 0;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001809
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001810error:
1811 /* revert to a safer bus speed */
Faiz Abbas01db77e2020-02-26 13:44:32 +05301812 mmc_select_mode(mmc, MMC_LEGACY);
Jaehoon Chung239cb2f2018-01-26 19:25:29 +09001813 mmc_set_clock(mmc, mmc->tran_speed,
1814 MMC_CLK_ENABLE);
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001815 }
1816 }
1817 }
1818
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09001819 pr_err("unable to select a mode\n");
Jean-Jacques Hiblot5b1a4d92017-09-21 16:29:57 +02001820 return -ENOTSUPP;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001821}
1822
Jean-Jacques Hiblot933d1262017-09-21 16:29:52 +02001823/*
1824 * read the compare the part of ext csd that is constant.
1825 * This can be used to check that the transfer is working
1826 * as expected.
1827 */
1828static int mmc_read_and_compare_ext_csd(struct mmc *mmc)
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02001829{
Jean-Jacques Hiblot933d1262017-09-21 16:29:52 +02001830 int err;
Jean-Jacques Hibloted9506b2017-09-21 16:29:51 +02001831 const u8 *ext_csd = mmc->ext_csd;
Jean-Jacques Hiblot933d1262017-09-21 16:29:52 +02001832 ALLOC_CACHE_ALIGN_BUFFER(u8, test_csd, MMC_MAX_BLOCK_LEN);
1833
Jean-Jacques Hiblot7ab1b622017-11-30 17:43:58 +01001834 if (mmc->version < MMC_VERSION_4)
1835 return 0;
1836
Jean-Jacques Hiblot933d1262017-09-21 16:29:52 +02001837 err = mmc_send_ext_csd(mmc, test_csd);
1838 if (err)
1839 return err;
1840
1841 /* Only compare read only fields */
1842 if (ext_csd[EXT_CSD_PARTITIONING_SUPPORT]
1843 == test_csd[EXT_CSD_PARTITIONING_SUPPORT] &&
1844 ext_csd[EXT_CSD_HC_WP_GRP_SIZE]
1845 == test_csd[EXT_CSD_HC_WP_GRP_SIZE] &&
1846 ext_csd[EXT_CSD_REV]
1847 == test_csd[EXT_CSD_REV] &&
1848 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
1849 == test_csd[EXT_CSD_HC_ERASE_GRP_SIZE] &&
1850 memcmp(&ext_csd[EXT_CSD_SEC_CNT],
1851 &test_csd[EXT_CSD_SEC_CNT], 4) == 0)
1852 return 0;
1853
1854 return -EBADMSG;
1855}
1856
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001857#if CONFIG_IS_ENABLED(MMC_IO_VOLTAGE)
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001858static int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1859 uint32_t allowed_mask)
1860{
1861 u32 card_mask = 0;
1862
1863 switch (mode) {
Peng Faneede83b2019-07-10 14:43:07 +08001864 case MMC_HS_400_ES:
Peng Fan46801252018-08-10 14:07:54 +08001865 case MMC_HS_400:
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001866 case MMC_HS_200:
Peng Fan46801252018-08-10 14:07:54 +08001867 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_8V |
1868 EXT_CSD_CARD_TYPE_HS400_1_8V))
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001869 card_mask |= MMC_SIGNAL_VOLTAGE_180;
Peng Fan46801252018-08-10 14:07:54 +08001870 if (mmc->cardtype & (EXT_CSD_CARD_TYPE_HS200_1_2V |
1871 EXT_CSD_CARD_TYPE_HS400_1_2V))
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001872 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1873 break;
1874 case MMC_DDR_52:
1875 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_8V)
1876 card_mask |= MMC_SIGNAL_VOLTAGE_330 |
1877 MMC_SIGNAL_VOLTAGE_180;
1878 if (mmc->cardtype & EXT_CSD_CARD_TYPE_DDR_1_2V)
1879 card_mask |= MMC_SIGNAL_VOLTAGE_120;
1880 break;
1881 default:
1882 card_mask |= MMC_SIGNAL_VOLTAGE_330;
1883 break;
1884 }
1885
1886 while (card_mask & allowed_mask) {
1887 enum mmc_voltage best_match;
1888
1889 best_match = 1 << (ffs(card_mask & allowed_mask) - 1);
1890 if (!mmc_set_signal_voltage(mmc, best_match))
1891 return 0;
1892
1893 allowed_mask &= ~best_match;
1894 }
1895
1896 return -ENOTSUPP;
1897}
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001898#else
1899static inline int mmc_set_lowest_voltage(struct mmc *mmc, enum bus_mode mode,
1900 uint32_t allowed_mask)
1901{
1902 return 0;
1903}
1904#endif
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02001905
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02001906static const struct mode_width_tuning mmc_modes_by_pref[] = {
Peng Faneede83b2019-07-10 14:43:07 +08001907#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
1908 {
1909 .mode = MMC_HS_400_ES,
1910 .widths = MMC_MODE_8BIT,
1911 },
1912#endif
Peng Fan46801252018-08-10 14:07:54 +08001913#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1914 {
1915 .mode = MMC_HS_400,
1916 .widths = MMC_MODE_8BIT,
1917 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
1918 },
1919#endif
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001920#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT)
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02001921 {
1922 .mode = MMC_HS_200,
1923 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +02001924 .tuning = MMC_CMD_SEND_TUNING_BLOCK_HS200
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02001925 },
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01001926#endif
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02001927 {
1928 .mode = MMC_DDR_52,
1929 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT,
1930 },
1931 {
1932 .mode = MMC_HS_52,
1933 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1934 },
1935 {
1936 .mode = MMC_HS,
1937 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1938 },
1939 {
1940 .mode = MMC_LEGACY,
1941 .widths = MMC_MODE_8BIT | MMC_MODE_4BIT | MMC_MODE_1BIT,
1942 }
1943};
1944
1945#define for_each_mmc_mode_by_pref(caps, mwt) \
1946 for (mwt = mmc_modes_by_pref;\
1947 mwt < mmc_modes_by_pref + ARRAY_SIZE(mmc_modes_by_pref);\
1948 mwt++) \
1949 if (caps & MMC_CAP(mwt->mode))
1950
1951static const struct ext_csd_bus_width {
1952 uint cap;
1953 bool is_ddr;
1954 uint ext_csd_bits;
1955} ext_csd_bus_width[] = {
1956 {MMC_MODE_8BIT, true, EXT_CSD_DDR_BUS_WIDTH_8},
1957 {MMC_MODE_4BIT, true, EXT_CSD_DDR_BUS_WIDTH_4},
1958 {MMC_MODE_8BIT, false, EXT_CSD_BUS_WIDTH_8},
1959 {MMC_MODE_4BIT, false, EXT_CSD_BUS_WIDTH_4},
1960 {MMC_MODE_1BIT, false, EXT_CSD_BUS_WIDTH_1},
1961};
1962
Peng Fan46801252018-08-10 14:07:54 +08001963#if CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
1964static int mmc_select_hs400(struct mmc *mmc)
1965{
1966 int err;
1967
1968 /* Set timing to HS200 for tuning */
Marek Vasut111572f2019-01-03 21:19:24 +01001969 err = mmc_set_card_speed(mmc, MMC_HS_200, false);
Peng Fan46801252018-08-10 14:07:54 +08001970 if (err)
1971 return err;
1972
1973 /* configure the bus mode (host) */
1974 mmc_select_mode(mmc, MMC_HS_200);
1975 mmc_set_clock(mmc, mmc->tran_speed, false);
1976
1977 /* execute tuning if needed */
Yangbo Lu3ed53ac2020-09-01 16:58:03 +08001978 mmc->hs400_tuning = 1;
Peng Fan46801252018-08-10 14:07:54 +08001979 err = mmc_execute_tuning(mmc, MMC_CMD_SEND_TUNING_BLOCK_HS200);
Yangbo Lu3ed53ac2020-09-01 16:58:03 +08001980 mmc->hs400_tuning = 0;
Peng Fan46801252018-08-10 14:07:54 +08001981 if (err) {
1982 debug("tuning failed\n");
1983 return err;
1984 }
1985
1986 /* Set back to HS */
BOUGH CHEN8702bbc2019-03-26 06:24:17 +00001987 mmc_set_card_speed(mmc, MMC_HS, true);
Peng Fan46801252018-08-10 14:07:54 +08001988
Yangbo Lu5347aea2020-09-01 16:58:04 +08001989 err = mmc_hs400_prepare_ddr(mmc);
1990 if (err)
1991 return err;
1992
Peng Fan46801252018-08-10 14:07:54 +08001993 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
1994 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG);
1995 if (err)
1996 return err;
1997
Marek Vasut111572f2019-01-03 21:19:24 +01001998 err = mmc_set_card_speed(mmc, MMC_HS_400, false);
Peng Fan46801252018-08-10 14:07:54 +08001999 if (err)
2000 return err;
2001
2002 mmc_select_mode(mmc, MMC_HS_400);
2003 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2004 if (err)
2005 return err;
2006
2007 return 0;
2008}
2009#else
2010static int mmc_select_hs400(struct mmc *mmc)
2011{
2012 return -ENOTSUPP;
2013}
2014#endif
2015
Peng Faneede83b2019-07-10 14:43:07 +08002016#if CONFIG_IS_ENABLED(MMC_HS400_ES_SUPPORT)
2017#if !CONFIG_IS_ENABLED(DM_MMC)
2018static int mmc_set_enhanced_strobe(struct mmc *mmc)
2019{
2020 return -ENOTSUPP;
2021}
2022#endif
2023static int mmc_select_hs400es(struct mmc *mmc)
2024{
2025 int err;
2026
2027 err = mmc_set_card_speed(mmc, MMC_HS, true);
2028 if (err)
2029 return err;
2030
2031 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
2032 EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_FLAG |
2033 EXT_CSD_BUS_WIDTH_STROBE);
2034 if (err) {
2035 printf("switch to bus width for hs400 failed\n");
2036 return err;
2037 }
2038 /* TODO: driver strength */
2039 err = mmc_set_card_speed(mmc, MMC_HS_400_ES, false);
2040 if (err)
2041 return err;
2042
2043 mmc_select_mode(mmc, MMC_HS_400_ES);
2044 err = mmc_set_clock(mmc, mmc->tran_speed, false);
2045 if (err)
2046 return err;
2047
2048 return mmc_set_enhanced_strobe(mmc);
2049}
2050#else
2051static int mmc_select_hs400es(struct mmc *mmc)
2052{
2053 return -ENOTSUPP;
2054}
2055#endif
2056
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002057#define for_each_supported_width(caps, ddr, ecbv) \
2058 for (ecbv = ext_csd_bus_width;\
2059 ecbv < ext_csd_bus_width + ARRAY_SIZE(ext_csd_bus_width);\
2060 ecbv++) \
2061 if ((ddr == ecbv->is_ddr) && (caps & ecbv->cap))
2062
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02002063static int mmc_select_mode_and_width(struct mmc *mmc, uint card_caps)
Jean-Jacques Hiblot933d1262017-09-21 16:29:52 +02002064{
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002065 int err;
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002066 const struct mode_width_tuning *mwt;
2067 const struct ext_csd_bus_width *ecbw;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002068
Jean-Jacques Hiblot93c31d12017-11-30 17:43:54 +01002069#ifdef DEBUG
2070 mmc_dump_capabilities("mmc", card_caps);
Jean-Jacques Hiblotd7e5e032017-11-30 17:43:57 +01002071 mmc_dump_capabilities("host", mmc->host_caps);
Jean-Jacques Hiblot93c31d12017-11-30 17:43:54 +01002072#endif
2073
Anup Pateld9c92c72019-07-08 04:10:43 +00002074 if (mmc_host_is_spi(mmc)) {
2075 mmc_set_bus_width(mmc, 1);
2076 mmc_select_mode(mmc, MMC_LEGACY);
2077 mmc_set_clock(mmc, mmc->tran_speed, MMC_CLK_ENABLE);
2078 return 0;
2079 }
2080
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002081 /* Restrict card's capabilities by what the host can do */
Jean-Jacques Hiblotd7e5e032017-11-30 17:43:57 +01002082 card_caps &= mmc->host_caps;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002083
2084 /* Only version 4 of MMC supports wider bus widths */
2085 if (mmc->version < MMC_VERSION_4)
2086 return 0;
2087
Jean-Jacques Hibloted9506b2017-09-21 16:29:51 +02002088 if (!mmc->ext_csd) {
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002089 pr_debug("No ext_csd found!\n"); /* this should enver happen */
Jean-Jacques Hibloted9506b2017-09-21 16:29:51 +02002090 return -ENOTSUPP;
2091 }
2092
Marek Vasut111572f2019-01-03 21:19:24 +01002093#if CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2094 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2095 /*
2096 * In case the eMMC is in HS200/HS400 mode, downgrade to HS mode
2097 * before doing anything else, since a transition from either of
2098 * the HS200/HS400 mode directly to legacy mode is not supported.
2099 */
2100 if (mmc->selected_mode == MMC_HS_200 ||
2101 mmc->selected_mode == MMC_HS_400)
2102 mmc_set_card_speed(mmc, MMC_HS, true);
2103 else
2104#endif
2105 mmc_set_clock(mmc, mmc->legacy_speed, MMC_CLK_ENABLE);
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02002106
2107 for_each_mmc_mode_by_pref(card_caps, mwt) {
2108 for_each_supported_width(card_caps & mwt->widths,
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002109 mmc_is_mode_ddr(mwt->mode), ecbw) {
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02002110 enum mmc_voltage old_voltage;
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002111 pr_debug("trying mode %s width %d (at %d MHz)\n",
2112 mmc_mode_name(mwt->mode),
2113 bus_width(ecbw->cap),
2114 mmc_mode2freq(mmc, mwt->mode) / 1000000);
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02002115 old_voltage = mmc->signal_voltage;
2116 err = mmc_set_lowest_voltage(mmc, mwt->mode,
2117 MMC_ALL_SIGNAL_VOLTAGE);
2118 if (err)
2119 continue;
2120
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002121 /* configure the bus width (card + host) */
2122 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2123 EXT_CSD_BUS_WIDTH,
2124 ecbw->ext_csd_bits & ~EXT_CSD_DDR_FLAG);
2125 if (err)
2126 goto error;
2127 mmc_set_bus_width(mmc, bus_width(ecbw->cap));
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002128
Peng Fan46801252018-08-10 14:07:54 +08002129 if (mwt->mode == MMC_HS_400) {
2130 err = mmc_select_hs400(mmc);
2131 if (err) {
2132 printf("Select HS400 failed %d\n", err);
2133 goto error;
2134 }
Peng Faneede83b2019-07-10 14:43:07 +08002135 } else if (mwt->mode == MMC_HS_400_ES) {
2136 err = mmc_select_hs400es(mmc);
2137 if (err) {
2138 printf("Select HS400ES failed %d\n",
2139 err);
2140 goto error;
2141 }
Peng Fan46801252018-08-10 14:07:54 +08002142 } else {
2143 /* configure the bus speed (card) */
Marek Vasut111572f2019-01-03 21:19:24 +01002144 err = mmc_set_card_speed(mmc, mwt->mode, false);
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002145 if (err)
2146 goto error;
Peng Fan46801252018-08-10 14:07:54 +08002147
2148 /*
2149 * configure the bus width AND the ddr mode
2150 * (card). The host side will be taken care
2151 * of in the next step
2152 */
2153 if (ecbw->ext_csd_bits & EXT_CSD_DDR_FLAG) {
2154 err = mmc_switch(mmc,
2155 EXT_CSD_CMD_SET_NORMAL,
2156 EXT_CSD_BUS_WIDTH,
2157 ecbw->ext_csd_bits);
2158 if (err)
2159 goto error;
2160 }
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002161
Peng Fan46801252018-08-10 14:07:54 +08002162 /* configure the bus mode (host) */
2163 mmc_select_mode(mmc, mwt->mode);
2164 mmc_set_clock(mmc, mmc->tran_speed,
2165 MMC_CLK_ENABLE);
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01002166#ifdef MMC_SUPPORTS_TUNING
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002167
Peng Fan46801252018-08-10 14:07:54 +08002168 /* execute tuning if needed */
2169 if (mwt->tuning) {
2170 err = mmc_execute_tuning(mmc,
2171 mwt->tuning);
2172 if (err) {
Jaehoon Chungad9f7ce2020-11-17 07:04:59 +09002173 pr_debug("tuning failed : %d\n", err);
Peng Fan46801252018-08-10 14:07:54 +08002174 goto error;
2175 }
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +02002176 }
Jean-Jacques Hiblot6051e782017-11-30 17:44:01 +01002177#endif
Peng Fan46801252018-08-10 14:07:54 +08002178 }
Kishon Vijay Abraham I210369f2017-09-21 16:30:06 +02002179
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002180 /* do a transfer to check the configuration */
2181 err = mmc_read_and_compare_ext_csd(mmc);
2182 if (!err)
2183 return 0;
2184error:
Jean-Jacques Hiblotb6937d62017-09-21 16:30:11 +02002185 mmc_set_signal_voltage(mmc, old_voltage);
Naoki Hayama3110dcb2020-10-12 18:35:22 +09002186 /* if an error occurred, revert to a safer bus mode */
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002187 mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2188 EXT_CSD_BUS_WIDTH, EXT_CSD_BUS_WIDTH_1);
2189 mmc_select_mode(mmc, MMC_LEGACY);
2190 mmc_set_bus_width(mmc, 1);
2191 }
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002192 }
2193
Jaehoon Chungad9f7ce2020-11-17 07:04:59 +09002194 pr_err("unable to select a mode : %d\n", err);
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002195
Jean-Jacques Hiblotec346832017-09-21 16:29:58 +02002196 return -ENOTSUPP;
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002197}
Marek Vasuta318a7a2018-04-15 00:37:11 +02002198#endif
2199
2200#if CONFIG_IS_ENABLED(MMC_TINY)
2201DEFINE_CACHE_ALIGN_BUFFER(u8, ext_csd_bkup, MMC_MAX_BLOCK_LEN);
2202#endif
Jean-Jacques Hiblot31e7cf32017-09-21 16:29:49 +02002203
Jean-Jacques Hibloted9506b2017-09-21 16:29:51 +02002204static int mmc_startup_v4(struct mmc *mmc)
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002205{
2206 int err, i;
2207 u64 capacity;
2208 bool has_parts = false;
2209 bool part_completed;
Jean-Jacques Hiblotfa6c5772018-01-04 15:23:31 +01002210 static const u32 mmc_versions[] = {
2211 MMC_VERSION_4,
2212 MMC_VERSION_4_1,
2213 MMC_VERSION_4_2,
2214 MMC_VERSION_4_3,
Jean-Jacques Hiblotc64862b2018-02-09 12:09:28 +01002215 MMC_VERSION_4_4,
Jean-Jacques Hiblotfa6c5772018-01-04 15:23:31 +01002216 MMC_VERSION_4_41,
2217 MMC_VERSION_4_5,
2218 MMC_VERSION_5_0,
2219 MMC_VERSION_5_1
2220 };
2221
Marek Vasuta318a7a2018-04-15 00:37:11 +02002222#if CONFIG_IS_ENABLED(MMC_TINY)
2223 u8 *ext_csd = ext_csd_bkup;
2224
2225 if (IS_SD(mmc) || mmc->version < MMC_VERSION_4)
2226 return 0;
2227
2228 if (!mmc->ext_csd)
2229 memset(ext_csd_bkup, 0, sizeof(ext_csd_bkup));
2230
2231 err = mmc_send_ext_csd(mmc, ext_csd);
2232 if (err)
2233 goto error;
2234
2235 /* store the ext csd for future reference */
2236 if (!mmc->ext_csd)
2237 mmc->ext_csd = ext_csd;
2238#else
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002239 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002240
2241 if (IS_SD(mmc) || (mmc->version < MMC_VERSION_4))
2242 return 0;
2243
2244 /* check ext_csd version and capacity */
2245 err = mmc_send_ext_csd(mmc, ext_csd);
2246 if (err)
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002247 goto error;
2248
2249 /* store the ext csd for future reference */
2250 if (!mmc->ext_csd)
2251 mmc->ext_csd = malloc(MMC_MAX_BLOCK_LEN);
2252 if (!mmc->ext_csd)
2253 return -ENOMEM;
2254 memcpy(mmc->ext_csd, ext_csd, MMC_MAX_BLOCK_LEN);
Marek Vasuta318a7a2018-04-15 00:37:11 +02002255#endif
Alexander Kochetkovf1133c92018-02-20 14:35:55 +03002256 if (ext_csd[EXT_CSD_REV] >= ARRAY_SIZE(mmc_versions))
Jean-Jacques Hiblotfa6c5772018-01-04 15:23:31 +01002257 return -EINVAL;
2258
2259 mmc->version = mmc_versions[ext_csd[EXT_CSD_REV]];
2260
2261 if (mmc->version >= MMC_VERSION_4_2) {
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002262 /*
2263 * According to the JEDEC Standard, the value of
2264 * ext_csd's capacity is valid if the value is more
2265 * than 2GB
2266 */
2267 capacity = ext_csd[EXT_CSD_SEC_CNT] << 0
2268 | ext_csd[EXT_CSD_SEC_CNT + 1] << 8
2269 | ext_csd[EXT_CSD_SEC_CNT + 2] << 16
2270 | ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
2271 capacity *= MMC_MAX_BLOCK_LEN;
2272 if ((capacity >> 20) > 2 * 1024)
2273 mmc->capacity_user = capacity;
2274 }
2275
Jean-Jacques Hiblot201559c2019-07-02 10:53:54 +02002276 if (mmc->version >= MMC_VERSION_4_5)
2277 mmc->gen_cmd6_time = ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
2278
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002279 /* The partition data may be non-zero but it is only
2280 * effective if PARTITION_SETTING_COMPLETED is set in
2281 * EXT_CSD, so ignore any data if this bit is not set,
2282 * except for enabling the high-capacity group size
2283 * definition (see below).
2284 */
2285 part_completed = !!(ext_csd[EXT_CSD_PARTITION_SETTING] &
2286 EXT_CSD_PARTITION_SETTING_COMPLETED);
2287
Jean-Jacques Hiblot7f5b1692019-07-02 10:53:55 +02002288 mmc->part_switch_time = ext_csd[EXT_CSD_PART_SWITCH_TIME];
2289 /* Some eMMC set the value too low so set a minimum */
2290 if (mmc->part_switch_time < MMC_MIN_PART_SWITCH_TIME && mmc->part_switch_time)
2291 mmc->part_switch_time = MMC_MIN_PART_SWITCH_TIME;
2292
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002293 /* store the partition info of emmc */
2294 mmc->part_support = ext_csd[EXT_CSD_PARTITIONING_SUPPORT];
2295 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) ||
2296 ext_csd[EXT_CSD_BOOT_MULT])
2297 mmc->part_config = ext_csd[EXT_CSD_PART_CONF];
2298 if (part_completed &&
2299 (ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & ENHNCD_SUPPORT))
2300 mmc->part_attr = ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE];
2301
2302 mmc->capacity_boot = ext_csd[EXT_CSD_BOOT_MULT] << 17;
2303
2304 mmc->capacity_rpmb = ext_csd[EXT_CSD_RPMB_MULT] << 17;
2305
2306 for (i = 0; i < 4; i++) {
2307 int idx = EXT_CSD_GP_SIZE_MULT + i * 3;
2308 uint mult = (ext_csd[idx + 2] << 16) +
2309 (ext_csd[idx + 1] << 8) + ext_csd[idx];
2310 if (mult)
2311 has_parts = true;
2312 if (!part_completed)
2313 continue;
2314 mmc->capacity_gp[i] = mult;
2315 mmc->capacity_gp[i] *=
2316 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2317 mmc->capacity_gp[i] *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2318 mmc->capacity_gp[i] <<= 19;
2319 }
2320
Jean-Jacques Hiblotc94c5472018-01-04 15:23:35 +01002321#ifndef CONFIG_SPL_BUILD
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002322 if (part_completed) {
2323 mmc->enh_user_size =
2324 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 2] << 16) +
2325 (ext_csd[EXT_CSD_ENH_SIZE_MULT + 1] << 8) +
2326 ext_csd[EXT_CSD_ENH_SIZE_MULT];
2327 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
2328 mmc->enh_user_size *= ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
2329 mmc->enh_user_size <<= 19;
2330 mmc->enh_user_start =
2331 (ext_csd[EXT_CSD_ENH_START_ADDR + 3] << 24) +
2332 (ext_csd[EXT_CSD_ENH_START_ADDR + 2] << 16) +
2333 (ext_csd[EXT_CSD_ENH_START_ADDR + 1] << 8) +
2334 ext_csd[EXT_CSD_ENH_START_ADDR];
2335 if (mmc->high_capacity)
2336 mmc->enh_user_start <<= 9;
2337 }
Jean-Jacques Hiblotc94c5472018-01-04 15:23:35 +01002338#endif
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002339
2340 /*
2341 * Host needs to enable ERASE_GRP_DEF bit if device is
2342 * partitioned. This bit will be lost every time after a reset
2343 * or power off. This will affect erase size.
2344 */
2345 if (part_completed)
2346 has_parts = true;
2347 if ((ext_csd[EXT_CSD_PARTITIONING_SUPPORT] & PART_SUPPORT) &&
2348 (ext_csd[EXT_CSD_PARTITIONS_ATTRIBUTE] & PART_ENH_ATTRIB))
2349 has_parts = true;
2350 if (has_parts) {
2351 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL,
2352 EXT_CSD_ERASE_GROUP_DEF, 1);
2353
2354 if (err)
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002355 goto error;
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002356
2357 ext_csd[EXT_CSD_ERASE_GROUP_DEF] = 1;
2358 }
2359
2360 if (ext_csd[EXT_CSD_ERASE_GROUP_DEF] & 0x01) {
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002361#if CONFIG_IS_ENABLED(MMC_WRITE)
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002362 /* Read out group size from ext_csd */
2363 mmc->erase_grp_size =
2364 ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] * 1024;
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002365#endif
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002366 /*
2367 * if high capacity and partition setting completed
2368 * SEC_COUNT is valid even if it is smaller than 2 GiB
2369 * JEDEC Standard JESD84-B45, 6.2.4
2370 */
2371 if (mmc->high_capacity && part_completed) {
2372 capacity = (ext_csd[EXT_CSD_SEC_CNT]) |
2373 (ext_csd[EXT_CSD_SEC_CNT + 1] << 8) |
2374 (ext_csd[EXT_CSD_SEC_CNT + 2] << 16) |
2375 (ext_csd[EXT_CSD_SEC_CNT + 3] << 24);
2376 capacity *= MMC_MAX_BLOCK_LEN;
2377 mmc->capacity_user = capacity;
2378 }
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002379 }
2380#if CONFIG_IS_ENABLED(MMC_WRITE)
2381 else {
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002382 /* Calculate the group size from the csd value. */
2383 int erase_gsz, erase_gmul;
2384
2385 erase_gsz = (mmc->csd[2] & 0x00007c00) >> 10;
2386 erase_gmul = (mmc->csd[2] & 0x000003e0) >> 5;
2387 mmc->erase_grp_size = (erase_gsz + 1)
2388 * (erase_gmul + 1);
2389 }
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002390#endif
Jean-Jacques Hiblotba54ab82018-01-04 15:23:36 +01002391#if CONFIG_IS_ENABLED(MMC_HW_PARTITIONING)
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002392 mmc->hc_wp_grp_size = 1024
2393 * ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]
2394 * ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
Jean-Jacques Hiblotba54ab82018-01-04 15:23:36 +01002395#endif
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002396
2397 mmc->wr_rel_set = ext_csd[EXT_CSD_WR_REL_SET];
2398
2399 return 0;
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002400error:
2401 if (mmc->ext_csd) {
Marek Vasuta318a7a2018-04-15 00:37:11 +02002402#if !CONFIG_IS_ENABLED(MMC_TINY)
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002403 free(mmc->ext_csd);
Marek Vasuta318a7a2018-04-15 00:37:11 +02002404#endif
Jean-Jacques Hiblot06976eb2017-11-30 17:43:59 +01002405 mmc->ext_csd = NULL;
2406 }
2407 return err;
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002408}
2409
Kim Phillips87ea3892012-10-29 13:34:43 +00002410static int mmc_startup(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -05002411{
Stephen Warrene315ae82013-06-11 15:14:01 -06002412 int err, i;
Andy Flemingad347bb2008-10-30 16:41:01 -05002413 uint mult, freq;
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002414 u64 cmult, csize;
Andy Flemingad347bb2008-10-30 16:41:01 -05002415 struct mmc_cmd cmd;
Simon Glasse5db1152016-05-01 13:52:35 -06002416 struct blk_desc *bdesc;
Andy Flemingad347bb2008-10-30 16:41:01 -05002417
Thomas Chou1254c3d2010-12-24 13:12:21 +00002418#ifdef CONFIG_MMC_SPI_CRC_ON
2419 if (mmc_host_is_spi(mmc)) { /* enable CRC check for spi */
2420 cmd.cmdidx = MMC_CMD_SPI_CRC_ON_OFF;
2421 cmd.resp_type = MMC_RSP_R1;
2422 cmd.cmdarg = 1;
Thomas Chou1254c3d2010-12-24 13:12:21 +00002423 err = mmc_send_cmd(mmc, &cmd, NULL);
Thomas Chou1254c3d2010-12-24 13:12:21 +00002424 if (err)
2425 return err;
2426 }
2427#endif
2428
Andy Flemingad347bb2008-10-30 16:41:01 -05002429 /* Put the Card in Identify Mode */
Thomas Chou1254c3d2010-12-24 13:12:21 +00002430 cmd.cmdidx = mmc_host_is_spi(mmc) ? MMC_CMD_SEND_CID :
2431 MMC_CMD_ALL_SEND_CID; /* cmd not supported in spi */
Andy Flemingad347bb2008-10-30 16:41:01 -05002432 cmd.resp_type = MMC_RSP_R2;
2433 cmd.cmdarg = 0;
Andy Flemingad347bb2008-10-30 16:41:01 -05002434
Sean Anderson86325092020-10-17 08:36:27 -04002435 err = mmc_send_cmd_quirks(mmc, &cmd, NULL, MMC_QUIRK_RETRY_SEND_CID, 4);
Andy Flemingad347bb2008-10-30 16:41:01 -05002436 if (err)
2437 return err;
2438
2439 memcpy(mmc->cid, cmd.response, 16);
2440
2441 /*
2442 * For MMC cards, set the Relative Address.
2443 * For SD cards, get the Relatvie Address.
2444 * This also puts the cards into Standby State
2445 */
Thomas Chou1254c3d2010-12-24 13:12:21 +00002446 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2447 cmd.cmdidx = SD_CMD_SEND_RELATIVE_ADDR;
2448 cmd.cmdarg = mmc->rca << 16;
2449 cmd.resp_type = MMC_RSP_R6;
Andy Flemingad347bb2008-10-30 16:41:01 -05002450
Thomas Chou1254c3d2010-12-24 13:12:21 +00002451 err = mmc_send_cmd(mmc, &cmd, NULL);
Andy Flemingad347bb2008-10-30 16:41:01 -05002452
Thomas Chou1254c3d2010-12-24 13:12:21 +00002453 if (err)
2454 return err;
Andy Flemingad347bb2008-10-30 16:41:01 -05002455
Thomas Chou1254c3d2010-12-24 13:12:21 +00002456 if (IS_SD(mmc))
2457 mmc->rca = (cmd.response[0] >> 16) & 0xffff;
2458 }
Andy Flemingad347bb2008-10-30 16:41:01 -05002459
2460 /* Get the Card-Specific Data */
2461 cmd.cmdidx = MMC_CMD_SEND_CSD;
2462 cmd.resp_type = MMC_RSP_R2;
2463 cmd.cmdarg = mmc->rca << 16;
Andy Flemingad347bb2008-10-30 16:41:01 -05002464
2465 err = mmc_send_cmd(mmc, &cmd, NULL);
2466
2467 if (err)
2468 return err;
2469
Rabin Vincentb6eed942009-04-05 13:30:56 +05302470 mmc->csd[0] = cmd.response[0];
2471 mmc->csd[1] = cmd.response[1];
2472 mmc->csd[2] = cmd.response[2];
2473 mmc->csd[3] = cmd.response[3];
Andy Flemingad347bb2008-10-30 16:41:01 -05002474
2475 if (mmc->version == MMC_VERSION_UNKNOWN) {
Rabin Vincentbdf7a682009-04-05 13:30:55 +05302476 int version = (cmd.response[0] >> 26) & 0xf;
Andy Flemingad347bb2008-10-30 16:41:01 -05002477
2478 switch (version) {
Bin Meng4a4ef872016-03-17 21:53:13 -07002479 case 0:
2480 mmc->version = MMC_VERSION_1_2;
2481 break;
2482 case 1:
2483 mmc->version = MMC_VERSION_1_4;
2484 break;
2485 case 2:
2486 mmc->version = MMC_VERSION_2_2;
2487 break;
2488 case 3:
2489 mmc->version = MMC_VERSION_3;
2490 break;
2491 case 4:
2492 mmc->version = MMC_VERSION_4;
2493 break;
2494 default:
2495 mmc->version = MMC_VERSION_1_2;
2496 break;
Andy Flemingad347bb2008-10-30 16:41:01 -05002497 }
2498 }
2499
2500 /* divide frequency by 10, since the mults are 10x bigger */
Rabin Vincentbdf7a682009-04-05 13:30:55 +05302501 freq = fbase[(cmd.response[0] & 0x7)];
2502 mult = multipliers[((cmd.response[0] >> 3) & 0xf)];
Andy Flemingad347bb2008-10-30 16:41:01 -05002503
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +02002504 mmc->legacy_speed = freq * mult;
Jean-Jacques Hiblota94fb412017-09-21 16:29:53 +02002505 mmc_select_mode(mmc, MMC_LEGACY);
Andy Flemingad347bb2008-10-30 16:41:01 -05002506
Markus Niebel03951412013-12-16 13:40:46 +01002507 mmc->dsr_imp = ((cmd.response[1] >> 12) & 0x1);
Rabin Vincentb6eed942009-04-05 13:30:56 +05302508 mmc->read_bl_len = 1 << ((cmd.response[1] >> 16) & 0xf);
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002509#if CONFIG_IS_ENABLED(MMC_WRITE)
Andy Flemingad347bb2008-10-30 16:41:01 -05002510
2511 if (IS_SD(mmc))
2512 mmc->write_bl_len = mmc->read_bl_len;
2513 else
Rabin Vincentb6eed942009-04-05 13:30:56 +05302514 mmc->write_bl_len = 1 << ((cmd.response[3] >> 22) & 0xf);
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002515#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05002516
2517 if (mmc->high_capacity) {
2518 csize = (mmc->csd[1] & 0x3f) << 16
2519 | (mmc->csd[2] & 0xffff0000) >> 16;
2520 cmult = 8;
2521 } else {
2522 csize = (mmc->csd[1] & 0x3ff) << 2
2523 | (mmc->csd[2] & 0xc0000000) >> 30;
2524 cmult = (mmc->csd[2] & 0x00038000) >> 15;
2525 }
2526
Stephen Warrene315ae82013-06-11 15:14:01 -06002527 mmc->capacity_user = (csize + 1) << (cmult + 2);
2528 mmc->capacity_user *= mmc->read_bl_len;
2529 mmc->capacity_boot = 0;
2530 mmc->capacity_rpmb = 0;
2531 for (i = 0; i < 4; i++)
2532 mmc->capacity_gp[i] = 0;
Andy Flemingad347bb2008-10-30 16:41:01 -05002533
Simon Glassa09c2b72013-04-03 08:54:30 +00002534 if (mmc->read_bl_len > MMC_MAX_BLOCK_LEN)
2535 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
Andy Flemingad347bb2008-10-30 16:41:01 -05002536
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002537#if CONFIG_IS_ENABLED(MMC_WRITE)
Simon Glassa09c2b72013-04-03 08:54:30 +00002538 if (mmc->write_bl_len > MMC_MAX_BLOCK_LEN)
2539 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002540#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05002541
Markus Niebel03951412013-12-16 13:40:46 +01002542 if ((mmc->dsr_imp) && (0xffffffff != mmc->dsr)) {
2543 cmd.cmdidx = MMC_CMD_SET_DSR;
2544 cmd.cmdarg = (mmc->dsr & 0xffff) << 16;
2545 cmd.resp_type = MMC_RSP_NONE;
2546 if (mmc_send_cmd(mmc, &cmd, NULL))
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01002547 pr_warn("MMC: SET_DSR failed\n");
Markus Niebel03951412013-12-16 13:40:46 +01002548 }
2549
Andy Flemingad347bb2008-10-30 16:41:01 -05002550 /* Select the card, and put it into Transfer Mode */
Thomas Chou1254c3d2010-12-24 13:12:21 +00002551 if (!mmc_host_is_spi(mmc)) { /* cmd not supported in spi */
2552 cmd.cmdidx = MMC_CMD_SELECT_CARD;
Ajay Bhargav4a32fba2011-10-05 03:13:23 +00002553 cmd.resp_type = MMC_RSP_R1;
Thomas Chou1254c3d2010-12-24 13:12:21 +00002554 cmd.cmdarg = mmc->rca << 16;
Thomas Chou1254c3d2010-12-24 13:12:21 +00002555 err = mmc_send_cmd(mmc, &cmd, NULL);
Andy Flemingad347bb2008-10-30 16:41:01 -05002556
Thomas Chou1254c3d2010-12-24 13:12:21 +00002557 if (err)
2558 return err;
2559 }
Andy Flemingad347bb2008-10-30 16:41:01 -05002560
Lei Wenea526762011-06-22 17:03:31 +00002561 /*
2562 * For SD, its erase group is always one sector
2563 */
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002564#if CONFIG_IS_ENABLED(MMC_WRITE)
Lei Wenea526762011-06-22 17:03:31 +00002565 mmc->erase_grp_size = 1;
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002566#endif
Lei Wen31b99802011-05-02 16:26:26 +00002567 mmc->part_config = MMCPART_NOAVAILABLE;
Diego Santa Cruza7a75992014-12-23 10:50:27 +01002568
Jean-Jacques Hibloted9506b2017-09-21 16:29:51 +02002569 err = mmc_startup_v4(mmc);
Jean-Jacques Hiblote84459c2017-09-21 16:29:50 +02002570 if (err)
2571 return err;
Sukumar Ghorai232293c2010-09-20 18:29:29 +05302572
Simon Glasse5db1152016-05-01 13:52:35 -06002573 err = mmc_set_capacity(mmc, mmc_get_blk_desc(mmc)->hwpart);
Stephen Warrene315ae82013-06-11 15:14:01 -06002574 if (err)
2575 return err;
2576
Marek Vasuta318a7a2018-04-15 00:37:11 +02002577#if CONFIG_IS_ENABLED(MMC_TINY)
2578 mmc_set_clock(mmc, mmc->legacy_speed, false);
Faiz Abbas01db77e2020-02-26 13:44:32 +05302579 mmc_select_mode(mmc, MMC_LEGACY);
Marek Vasuta318a7a2018-04-15 00:37:11 +02002580 mmc_set_bus_width(mmc, 1);
2581#else
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02002582 if (IS_SD(mmc)) {
2583 err = sd_get_capabilities(mmc);
2584 if (err)
2585 return err;
2586 err = sd_select_mode_and_width(mmc, mmc->card_caps);
2587 } else {
2588 err = mmc_get_capabilities(mmc);
2589 if (err)
2590 return err;
Masahiro Yamadabf1f25c2020-01-23 14:31:12 +09002591 err = mmc_select_mode_and_width(mmc, mmc->card_caps);
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02002592 }
Marek Vasuta318a7a2018-04-15 00:37:11 +02002593#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05002594 if (err)
2595 return err;
2596
Jean-Jacques Hiblot3d30972b2017-09-21 16:30:09 +02002597 mmc->best_mode = mmc->selected_mode;
Jaehoon Chunge1d4c7b2012-03-26 21:16:03 +00002598
Andrew Gabbasov532663b2014-12-01 06:59:11 -06002599 /* Fix the block length for DDR mode */
2600 if (mmc->ddr_mode) {
2601 mmc->read_bl_len = MMC_MAX_BLOCK_LEN;
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002602#if CONFIG_IS_ENABLED(MMC_WRITE)
Andrew Gabbasov532663b2014-12-01 06:59:11 -06002603 mmc->write_bl_len = MMC_MAX_BLOCK_LEN;
Jean-Jacques Hiblot27edffe2018-01-04 15:23:34 +01002604#endif
Andrew Gabbasov532663b2014-12-01 06:59:11 -06002605 }
2606
Andy Flemingad347bb2008-10-30 16:41:01 -05002607 /* fill in device description */
Simon Glasse5db1152016-05-01 13:52:35 -06002608 bdesc = mmc_get_blk_desc(mmc);
2609 bdesc->lun = 0;
2610 bdesc->hwpart = 0;
2611 bdesc->type = 0;
2612 bdesc->blksz = mmc->read_bl_len;
2613 bdesc->log2blksz = LOG2(bdesc->blksz);
2614 bdesc->lba = lldiv(mmc->capacity, mmc->read_bl_len);
Sjoerd Simonsd67754f2015-12-04 23:27:40 +01002615#if !defined(CONFIG_SPL_BUILD) || \
2616 (defined(CONFIG_SPL_LIBCOMMON_SUPPORT) && \
Simon Glass7611ac62019-09-25 08:56:27 -06002617 !CONFIG_IS_ENABLED(USE_TINY_PRINTF))
Simon Glasse5db1152016-05-01 13:52:35 -06002618 sprintf(bdesc->vendor, "Man %06x Snr %04x%04x",
Taylor Hutt7367ec22012-10-20 17:15:59 +00002619 mmc->cid[0] >> 24, (mmc->cid[2] & 0xffff),
2620 (mmc->cid[3] >> 16) & 0xffff);
Simon Glasse5db1152016-05-01 13:52:35 -06002621 sprintf(bdesc->product, "%c%c%c%c%c%c", mmc->cid[0] & 0xff,
Taylor Hutt7367ec22012-10-20 17:15:59 +00002622 (mmc->cid[1] >> 24), (mmc->cid[1] >> 16) & 0xff,
2623 (mmc->cid[1] >> 8) & 0xff, mmc->cid[1] & 0xff,
2624 (mmc->cid[2] >> 24) & 0xff);
Simon Glasse5db1152016-05-01 13:52:35 -06002625 sprintf(bdesc->revision, "%d.%d", (mmc->cid[2] >> 20) & 0xf,
Taylor Hutt7367ec22012-10-20 17:15:59 +00002626 (mmc->cid[2] >> 16) & 0xf);
Paul Burton6a7c5ba2013-09-04 16:12:25 +01002627#else
Simon Glasse5db1152016-05-01 13:52:35 -06002628 bdesc->vendor[0] = 0;
2629 bdesc->product[0] = 0;
2630 bdesc->revision[0] = 0;
Paul Burton6a7c5ba2013-09-04 16:12:25 +01002631#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05002632
Andre Przywara17798042018-12-17 10:05:45 +00002633#if !defined(CONFIG_DM_MMC) && (!defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBDISK_SUPPORT))
2634 part_init(bdesc);
2635#endif
2636
Andy Flemingad347bb2008-10-30 16:41:01 -05002637 return 0;
2638}
2639
Kim Phillips87ea3892012-10-29 13:34:43 +00002640static int mmc_send_if_cond(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -05002641{
2642 struct mmc_cmd cmd;
2643 int err;
2644
2645 cmd.cmdidx = SD_CMD_SEND_IF_COND;
2646 /* We set the bit if the host supports voltages between 2.7 and 3.6 V */
Pantelis Antoniou2c850462014-03-11 19:34:20 +02002647 cmd.cmdarg = ((mmc->cfg->voltages & 0xff8000) != 0) << 8 | 0xaa;
Andy Flemingad347bb2008-10-30 16:41:01 -05002648 cmd.resp_type = MMC_RSP_R7;
Andy Flemingad347bb2008-10-30 16:41:01 -05002649
2650 err = mmc_send_cmd(mmc, &cmd, NULL);
2651
2652 if (err)
2653 return err;
2654
Rabin Vincentb6eed942009-04-05 13:30:56 +05302655 if ((cmd.response[0] & 0xff) != 0xaa)
Jaehoon Chung7825d202016-07-19 16:33:36 +09002656 return -EOPNOTSUPP;
Andy Flemingad347bb2008-10-30 16:41:01 -05002657 else
2658 mmc->version = SD_VERSION_2;
2659
2660 return 0;
2661}
2662
Simon Glass5f4bd8c2017-07-04 13:31:19 -06002663#if !CONFIG_IS_ENABLED(DM_MMC)
Paul Kocialkowski2439fe92014-11-08 20:55:45 +01002664/* board-specific MMC power initializations. */
2665__weak void board_mmc_power_init(void)
2666{
2667}
Simon Glass833b80d2017-04-22 19:10:56 -06002668#endif
Paul Kocialkowski2439fe92014-11-08 20:55:45 +01002669
Peng Fan15305962016-10-11 15:08:43 +08002670static int mmc_power_init(struct mmc *mmc)
2671{
Simon Glass5f4bd8c2017-07-04 13:31:19 -06002672#if CONFIG_IS_ENABLED(DM_MMC)
Jean-Jacques Hiblota49ffa12017-09-21 16:29:48 +02002673#if CONFIG_IS_ENABLED(DM_REGULATOR)
Peng Fan15305962016-10-11 15:08:43 +08002674 int ret;
2675
2676 ret = device_get_supply_regulator(mmc->dev, "vmmc-supply",
Jean-Jacques Hiblota49ffa12017-09-21 16:29:48 +02002677 &mmc->vmmc_supply);
2678 if (ret)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002679 pr_debug("%s: No vmmc supply\n", mmc->dev->name);
Jean-Jacques Hiblota49ffa12017-09-21 16:29:48 +02002680
2681 ret = device_get_supply_regulator(mmc->dev, "vqmmc-supply",
2682 &mmc->vqmmc_supply);
2683 if (ret)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002684 pr_debug("%s: No vqmmc supply\n", mmc->dev->name);
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002685#endif
2686#else /* !CONFIG_DM_MMC */
2687 /*
2688 * Driver model should use a regulator, as above, rather than calling
2689 * out to board code.
2690 */
2691 board_mmc_power_init();
2692#endif
2693 return 0;
2694}
2695
2696/*
2697 * put the host in the initial state:
2698 * - turn on Vdd (card power supply)
2699 * - configure the bus width and clock to minimal values
2700 */
2701static void mmc_set_initial_state(struct mmc *mmc)
2702{
2703 int err;
2704
2705 /* First try to set 3.3V. If it fails set to 1.8V */
2706 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_330);
2707 if (err != 0)
2708 err = mmc_set_signal_voltage(mmc, MMC_SIGNAL_VOLTAGE_180);
2709 if (err != 0)
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01002710 pr_warn("mmc: failed to set signal voltage\n");
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002711
2712 mmc_select_mode(mmc, MMC_LEGACY);
2713 mmc_set_bus_width(mmc, 1);
Jaehoon Chung239cb2f2018-01-26 19:25:29 +09002714 mmc_set_clock(mmc, 0, MMC_CLK_ENABLE);
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002715}
Peng Fan15305962016-10-11 15:08:43 +08002716
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002717static int mmc_power_on(struct mmc *mmc)
2718{
2719#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
Jean-Jacques Hiblota49ffa12017-09-21 16:29:48 +02002720 if (mmc->vmmc_supply) {
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002721 int ret = regulator_set_enable(mmc->vmmc_supply, true);
2722
Jaehoon Chungc71c95c2020-11-06 20:30:41 +09002723 if (ret && ret != -EACCES) {
Jaehoon Chungad9f7ce2020-11-17 07:04:59 +09002724 printf("Error enabling VMMC supply : %d\n", ret);
Jean-Jacques Hiblota49ffa12017-09-21 16:29:48 +02002725 return ret;
2726 }
Peng Fan15305962016-10-11 15:08:43 +08002727 }
2728#endif
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002729 return 0;
2730}
2731
2732static int mmc_power_off(struct mmc *mmc)
2733{
Jaehoon Chung239cb2f2018-01-26 19:25:29 +09002734 mmc_set_clock(mmc, 0, MMC_CLK_DISABLE);
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002735#if CONFIG_IS_ENABLED(DM_MMC) && CONFIG_IS_ENABLED(DM_REGULATOR)
2736 if (mmc->vmmc_supply) {
2737 int ret = regulator_set_enable(mmc->vmmc_supply, false);
2738
Jaehoon Chungc71c95c2020-11-06 20:30:41 +09002739 if (ret && ret != -EACCES) {
Jaehoon Chungad9f7ce2020-11-17 07:04:59 +09002740 pr_debug("Error disabling VMMC supply : %d\n", ret);
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002741 return ret;
2742 }
2743 }
Simon Glass833b80d2017-04-22 19:10:56 -06002744#endif
Peng Fan15305962016-10-11 15:08:43 +08002745 return 0;
2746}
2747
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002748static int mmc_power_cycle(struct mmc *mmc)
2749{
2750 int ret;
2751
2752 ret = mmc_power_off(mmc);
2753 if (ret)
2754 return ret;
Yann Gautier6f558332019-09-19 17:56:12 +02002755
2756 ret = mmc_host_power_cycle(mmc);
2757 if (ret)
2758 return ret;
2759
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002760 /*
2761 * SD spec recommends at least 1ms of delay. Let's wait for 2ms
2762 * to be on the safer side.
2763 */
2764 udelay(2000);
2765 return mmc_power_on(mmc);
2766}
2767
Jon Nettleton2663fe42018-06-11 15:26:19 +03002768int mmc_get_op_cond(struct mmc *mmc)
Andy Flemingad347bb2008-10-30 16:41:01 -05002769{
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02002770 bool uhs_en = supports_uhs(mmc->cfg->host_caps);
Macpaul Lin028bde12011-11-14 23:35:39 +00002771 int err;
Andy Flemingad347bb2008-10-30 16:41:01 -05002772
Lei Wen31b99802011-05-02 16:26:26 +00002773 if (mmc->has_init)
2774 return 0;
2775
Peng Fan15305962016-10-11 15:08:43 +08002776 err = mmc_power_init(mmc);
2777 if (err)
2778 return err;
Paul Kocialkowski2439fe92014-11-08 20:55:45 +01002779
Kishon Vijay Abraham I07baaa62017-09-21 16:30:10 +02002780#ifdef CONFIG_MMC_QUIRKS
2781 mmc->quirks = MMC_QUIRK_RETRY_SET_BLOCKLEN |
Joel Johnson5ea041b2020-01-11 09:08:14 -07002782 MMC_QUIRK_RETRY_SEND_CID |
2783 MMC_QUIRK_RETRY_APP_CMD;
Kishon Vijay Abraham I07baaa62017-09-21 16:30:10 +02002784#endif
2785
Jean-Jacques Hiblotdc030fb2017-09-21 16:30:08 +02002786 err = mmc_power_cycle(mmc);
2787 if (err) {
2788 /*
2789 * if power cycling is not supported, we should not try
2790 * to use the UHS modes, because we wouldn't be able to
2791 * recover from an error during the UHS initialization.
2792 */
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002793 pr_debug("Unable to do a full power cycle. Disabling the UHS modes for safety\n");
Jean-Jacques Hiblotdc030fb2017-09-21 16:30:08 +02002794 uhs_en = false;
2795 mmc->host_caps &= ~UHS_CAPS;
2796 err = mmc_power_on(mmc);
2797 }
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002798 if (err)
2799 return err;
2800
Simon Glasseba48f92017-07-29 11:35:31 -06002801#if CONFIG_IS_ENABLED(DM_MMC)
Yangbo Luc46f5d72020-09-01 16:57:59 +08002802 /*
2803 * Re-initialization is needed to clear old configuration for
2804 * mmc rescan.
2805 */
2806 err = mmc_reinit(mmc);
Simon Glass394dfc02016-06-12 23:30:22 -06002807#else
Pantelis Antoniouc9e75912014-02-26 19:28:45 +02002808 /* made sure it's not NULL earlier */
Pantelis Antoniou2c850462014-03-11 19:34:20 +02002809 err = mmc->cfg->ops->init(mmc);
Yangbo Luc46f5d72020-09-01 16:57:59 +08002810#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05002811 if (err)
2812 return err;
Andrew Gabbasov9fc2a412014-12-01 06:59:09 -06002813 mmc->ddr_mode = 0;
Kishon Vijay Abraham I4afb12b2017-09-21 16:30:00 +02002814
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02002815retry:
Kishon Vijay Abraham I80b87e12017-09-21 16:30:02 +02002816 mmc_set_initial_state(mmc);
Jean-Jacques Hiblot5f23d872017-09-21 16:30:01 +02002817
Andy Flemingad347bb2008-10-30 16:41:01 -05002818 /* Reset the Card */
2819 err = mmc_go_idle(mmc);
2820
2821 if (err)
2822 return err;
2823
Marcel Ziswilerb2b7fc82019-05-20 02:44:53 +02002824 /* The internal partition reset to user partition(0) at every CMD0 */
Simon Glasse5db1152016-05-01 13:52:35 -06002825 mmc_get_blk_desc(mmc)->hwpart = 0;
Lei Wen31b99802011-05-02 16:26:26 +00002826
Andy Flemingad347bb2008-10-30 16:41:01 -05002827 /* Test for SD version 2 */
Macpaul Lin028bde12011-11-14 23:35:39 +00002828 err = mmc_send_if_cond(mmc);
Andy Flemingad347bb2008-10-30 16:41:01 -05002829
Andy Flemingad347bb2008-10-30 16:41:01 -05002830 /* Now try to get the SD card's operating condition */
Jean-Jacques Hiblotf4d5b3e2017-09-21 16:30:07 +02002831 err = sd_send_op_cond(mmc, uhs_en);
2832 if (err && uhs_en) {
2833 uhs_en = false;
2834 mmc_power_cycle(mmc);
2835 goto retry;
2836 }
Andy Flemingad347bb2008-10-30 16:41:01 -05002837
2838 /* If the command timed out, we check for an MMC card */
Jaehoon Chung7825d202016-07-19 16:33:36 +09002839 if (err == -ETIMEDOUT) {
Andy Flemingad347bb2008-10-30 16:41:01 -05002840 err = mmc_send_op_cond(mmc);
2841
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -05002842 if (err) {
Paul Burton6a7c5ba2013-09-04 16:12:25 +01002843#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
Jaehoon Chungad9f7ce2020-11-17 07:04:59 +09002844 pr_err("Card did not respond to voltage select! : %d\n", err);
Paul Burton6a7c5ba2013-09-04 16:12:25 +01002845#endif
Jaehoon Chung7825d202016-07-19 16:33:36 +09002846 return -EOPNOTSUPP;
Andy Flemingad347bb2008-10-30 16:41:01 -05002847 }
2848 }
2849
Jon Nettleton2663fe42018-06-11 15:26:19 +03002850 return err;
2851}
2852
2853int mmc_start_init(struct mmc *mmc)
2854{
2855 bool no_card;
2856 int err = 0;
2857
2858 /*
2859 * all hosts are capable of 1 bit bus-width and able to use the legacy
2860 * timings.
2861 */
Faiz Abbas01db77e2020-02-26 13:44:32 +05302862 mmc->host_caps = mmc->cfg->host_caps | MMC_CAP(MMC_LEGACY) |
Jon Nettleton2663fe42018-06-11 15:26:19 +03002863 MMC_CAP(MMC_LEGACY) | MMC_MODE_1BIT;
Faiz Abbasf6fd4ec2020-02-26 13:44:30 +05302864#if CONFIG_IS_ENABLED(DM_MMC)
2865 mmc_deferred_probe(mmc);
2866#endif
Jon Nettleton2663fe42018-06-11 15:26:19 +03002867#if !defined(CONFIG_MMC_BROKEN_CD)
Jon Nettleton2663fe42018-06-11 15:26:19 +03002868 no_card = mmc_getcd(mmc) == 0;
2869#else
2870 no_card = 0;
2871#endif
2872#if !CONFIG_IS_ENABLED(DM_MMC)
Baruch Siach0448ce62019-07-22 15:52:12 +03002873 /* we pretend there's no card when init is NULL */
Jon Nettleton2663fe42018-06-11 15:26:19 +03002874 no_card = no_card || (mmc->cfg->ops->init == NULL);
2875#endif
2876 if (no_card) {
2877 mmc->has_init = 0;
2878#if !defined(CONFIG_SPL_BUILD) || defined(CONFIG_SPL_LIBCOMMON_SUPPORT)
2879 pr_err("MMC: no card present\n");
2880#endif
2881 return -ENOMEDIUM;
2882 }
2883
2884 err = mmc_get_op_cond(mmc);
2885
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -05002886 if (!err)
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002887 mmc->init_in_progress = 1;
2888
2889 return err;
2890}
2891
2892static int mmc_complete_init(struct mmc *mmc)
2893{
2894 int err = 0;
2895
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -05002896 mmc->init_in_progress = 0;
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002897 if (mmc->op_cond_pending)
2898 err = mmc_complete_op_cond(mmc);
2899
2900 if (!err)
2901 err = mmc_startup(mmc);
Lei Wen31b99802011-05-02 16:26:26 +00002902 if (err)
2903 mmc->has_init = 0;
2904 else
2905 mmc->has_init = 1;
2906 return err;
Andy Flemingad347bb2008-10-30 16:41:01 -05002907}
2908
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002909int mmc_init(struct mmc *mmc)
2910{
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -05002911 int err = 0;
Vipul Kumardbad7b42018-05-03 12:20:54 +05302912 __maybe_unused ulong start;
Simon Glass5f4bd8c2017-07-04 13:31:19 -06002913#if CONFIG_IS_ENABLED(DM_MMC)
Simon Glass59bc6f22016-05-01 13:52:41 -06002914 struct mmc_uclass_priv *upriv = dev_get_uclass_priv(mmc->dev);
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002915
Simon Glass59bc6f22016-05-01 13:52:41 -06002916 upriv->mmc = mmc;
2917#endif
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002918 if (mmc->has_init)
2919 return 0;
Mateusz Zalegada351782014-04-29 20:15:30 +02002920
2921 start = get_timer(0);
2922
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002923 if (!mmc->init_in_progress)
2924 err = mmc_start_init(mmc);
2925
Andrew Gabbasov3a669bc2015-03-19 07:44:07 -05002926 if (!err)
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002927 err = mmc_complete_init(mmc);
Jagan Teki9bee2b52017-01-10 11:18:43 +01002928 if (err)
Masahiro Yamadaf97b1482018-01-28 19:11:42 +09002929 pr_info("%s: %d, time %lu\n", __func__, err, get_timer(start));
Jagan Teki9bee2b52017-01-10 11:18:43 +01002930
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002931 return err;
2932}
2933
Marek Vasuta4773fc2019-01-29 04:45:51 +01002934#if CONFIG_IS_ENABLED(MMC_UHS_SUPPORT) || \
2935 CONFIG_IS_ENABLED(MMC_HS200_SUPPORT) || \
2936 CONFIG_IS_ENABLED(MMC_HS400_SUPPORT)
2937int mmc_deinit(struct mmc *mmc)
2938{
2939 u32 caps_filtered;
2940
2941 if (!mmc->has_init)
2942 return 0;
2943
2944 if (IS_SD(mmc)) {
2945 caps_filtered = mmc->card_caps &
2946 ~(MMC_CAP(UHS_SDR12) | MMC_CAP(UHS_SDR25) |
2947 MMC_CAP(UHS_SDR50) | MMC_CAP(UHS_DDR50) |
2948 MMC_CAP(UHS_SDR104));
2949
2950 return sd_select_mode_and_width(mmc, caps_filtered);
2951 } else {
2952 caps_filtered = mmc->card_caps &
2953 ~(MMC_CAP(MMC_HS_200) | MMC_CAP(MMC_HS_400));
2954
2955 return mmc_select_mode_and_width(mmc, caps_filtered);
2956 }
2957}
2958#endif
2959
Markus Niebel03951412013-12-16 13:40:46 +01002960int mmc_set_dsr(struct mmc *mmc, u16 val)
2961{
2962 mmc->dsr = val;
2963 return 0;
2964}
2965
Jeroen Hofstee47726302014-07-10 22:46:28 +02002966/* CPU-specific MMC initializations */
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09002967__weak int cpu_mmc_init(struct bd_info *bis)
Andy Flemingad347bb2008-10-30 16:41:01 -05002968{
2969 return -1;
2970}
2971
Jeroen Hofstee47726302014-07-10 22:46:28 +02002972/* board-specific MMC initializations. */
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09002973__weak int board_mmc_init(struct bd_info *bis)
Jeroen Hofstee47726302014-07-10 22:46:28 +02002974{
2975 return -1;
2976}
Andy Flemingad347bb2008-10-30 16:41:01 -05002977
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00002978void mmc_set_preinit(struct mmc *mmc, int preinit)
2979{
2980 mmc->preinit = preinit;
2981}
2982
Faiz Abbasb3857fd2018-02-12 19:35:24 +05302983#if CONFIG_IS_ENABLED(DM_MMC)
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09002984static int mmc_probe(struct bd_info *bis)
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06002985{
Simon Glass547cb342015-12-29 05:22:49 -07002986 int ret, i;
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06002987 struct uclass *uc;
Simon Glass547cb342015-12-29 05:22:49 -07002988 struct udevice *dev;
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06002989
2990 ret = uclass_get(UCLASS_MMC, &uc);
2991 if (ret)
2992 return ret;
2993
Simon Glass547cb342015-12-29 05:22:49 -07002994 /*
2995 * Try to add them in sequence order. Really with driver model we
2996 * should allow holes, but the current MMC list does not allow that.
2997 * So if we request 0, 1, 3 we will get 0, 1, 2.
2998 */
2999 for (i = 0; ; i++) {
3000 ret = uclass_get_device_by_seq(UCLASS_MMC, i, &dev);
3001 if (ret == -ENODEV)
3002 break;
3003 }
3004 uclass_foreach_dev(dev, uc) {
3005 ret = device_probe(dev);
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06003006 if (ret)
Jean-Jacques Hiblot678b6082017-11-30 17:44:00 +01003007 pr_err("%s - probe failed: %d\n", dev->name, ret);
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06003008 }
3009
3010 return 0;
3011}
3012#else
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09003013static int mmc_probe(struct bd_info *bis)
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06003014{
3015 if (board_mmc_init(bis) < 0)
3016 cpu_mmc_init(bis);
3017
3018 return 0;
3019}
3020#endif
Che-Liang Chiou4a2c7d72012-11-28 15:21:13 +00003021
Masahiro Yamadaf7ed78b2020-06-26 15:13:33 +09003022int mmc_initialize(struct bd_info *bis)
Andy Flemingad347bb2008-10-30 16:41:01 -05003023{
Daniel Kochmański13df57b2015-05-29 16:55:43 +02003024 static int initialized = 0;
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06003025 int ret;
Daniel Kochmański13df57b2015-05-29 16:55:43 +02003026 if (initialized) /* Avoid initializing mmc multiple times */
3027 return 0;
3028 initialized = 1;
3029
Simon Glass5f4bd8c2017-07-04 13:31:19 -06003030#if !CONFIG_IS_ENABLED(BLK)
Marek Vasutf537e392016-12-01 02:06:33 +01003031#if !CONFIG_IS_ENABLED(MMC_TINY)
Simon Glasse5db1152016-05-01 13:52:35 -06003032 mmc_list_init();
3033#endif
Marek Vasutf537e392016-12-01 02:06:33 +01003034#endif
Sjoerd Simonsdf8aa522015-08-30 16:55:45 -06003035 ret = mmc_probe(bis);
3036 if (ret)
3037 return ret;
Andy Flemingad347bb2008-10-30 16:41:01 -05003038
Ying Zhang9ff70262013-08-16 15:16:11 +08003039#ifndef CONFIG_SPL_BUILD
Andy Flemingad347bb2008-10-30 16:41:01 -05003040 print_mmc_devices(',');
Ying Zhang9ff70262013-08-16 15:16:11 +08003041#endif
Andy Flemingad347bb2008-10-30 16:41:01 -05003042
Simon Glasse5db1152016-05-01 13:52:35 -06003043 mmc_do_preinit();
Andy Flemingad347bb2008-10-30 16:41:01 -05003044 return 0;
3045}
Tomas Melinc17dae52016-11-25 11:01:03 +02003046
Lokesh Vutlac59b41c2019-09-09 14:40:36 +05303047#if CONFIG_IS_ENABLED(DM_MMC)
3048int mmc_init_device(int num)
3049{
3050 struct udevice *dev;
3051 struct mmc *m;
3052 int ret;
3053
3054 ret = uclass_get_device(UCLASS_MMC, num, &dev);
3055 if (ret)
3056 return ret;
3057
3058 m = mmc_get_mmc_dev(dev);
3059 if (!m)
3060 return 0;
Lokesh Vutlac59b41c2019-09-09 14:40:36 +05303061 if (m->preinit)
3062 mmc_start_init(m);
3063
3064 return 0;
3065}
3066#endif
3067
Tomas Melinc17dae52016-11-25 11:01:03 +02003068#ifdef CONFIG_CMD_BKOPS_ENABLE
3069int mmc_set_bkops_enable(struct mmc *mmc)
3070{
3071 int err;
3072 ALLOC_CACHE_ALIGN_BUFFER(u8, ext_csd, MMC_MAX_BLOCK_LEN);
3073
3074 err = mmc_send_ext_csd(mmc, ext_csd);
3075 if (err) {
3076 puts("Could not get ext_csd register values\n");
3077 return err;
3078 }
3079
3080 if (!(ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1)) {
3081 puts("Background operations not supported on device\n");
3082 return -EMEDIUMTYPE;
3083 }
3084
3085 if (ext_csd[EXT_CSD_BKOPS_EN] & 0x1) {
3086 puts("Background operations already enabled\n");
3087 return 0;
3088 }
3089
3090 err = mmc_switch(mmc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN, 1);
3091 if (err) {
3092 puts("Failed to enable manual background operations\n");
3093 return err;
3094 }
3095
3096 puts("Enabled manual background operations\n");
3097
3098 return 0;
3099}
3100#endif
David Woodhouse49fee032020-08-04 10:05:46 +01003101
3102__weak int mmc_get_env_dev(void)
3103{
3104#ifdef CONFIG_SYS_MMC_ENV_DEV
3105 return CONFIG_SYS_MMC_ENV_DEV;
3106#else
3107 return 0;
3108#endif
3109}