blob: 72cc24f40376eb95349c3984f0c32bb12ebb5a97 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roesed351b2b2006-10-10 12:36:02 +02002/*
Miquel Raynal1f1ae152018-08-16 17:30:07 +02003 * drivers/mtd/nand/raw/nand_util.c
Stefan Roesed351b2b2006-10-10 12:36:02 +02004 *
5 * Copyright (C) 2006 by Weiss-Electronic GmbH.
6 * All rights reserved.
7 *
8 * @author: Guido Classen <clagix@gmail.com>
9 * @descr: NAND Flash support
10 * @references: borrowed heavily from Linux mtd-utils code:
11 * flash_eraseall.c by Arcom Control System Ltd
12 * nandwrite.c by Steven J. Hill (sjhill@realitydiluted.com)
13 * and Thomas Gleixner (tglx@linutronix.de)
14 *
Ben Gardiner34dd5672011-06-14 16:35:06 -040015 * Copyright (C) 2008 Nokia Corporation: drop_ffs() function by
16 * Artem Bityutskiy <dedekind1@gmail.com> from mtd-utils
17 *
Tom Rinib7bef6a2013-10-31 09:24:00 -040018 * Copyright 2010 Freescale Semiconductor
Stefan Roesed351b2b2006-10-10 12:36:02 +020019 */
20
21#include <common.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020022#include <command.h>
Simon Glass0f2af882020-05-10 11:40:05 -060023#include <log.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020024#include <watchdog.h>
25#include <malloc.h>
Simon Glassa87fc0a2015-09-02 17:24:57 -060026#include <memalign.h>
Dirk Behme32d1f762007-08-02 17:42:08 +020027#include <div64.h>
Simon Glass274e0b02020-05-10 11:39:56 -060028#include <asm/cache.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070029#include <dm/devres.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020030
Masahiro Yamada56a931c2016-09-21 11:28:55 +090031#include <linux/errno.h>
William Juul52c07962007-10-31 13:53:06 +010032#include <linux/mtd/mtd.h>
Tom Rini3bde7e22021-09-22 14:50:35 -040033#include <linux/mtd/rawnand.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020034#include <nand.h>
35#include <jffs2/jffs2.h>
36
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000037typedef struct erase_info erase_info_t;
38typedef struct mtd_info mtd_info_t;
Stefan Roesed351b2b2006-10-10 12:36:02 +020039
40/* support only for native endian JFFS2 */
41#define cpu_to_je16(x) (x)
42#define cpu_to_je32(x) (x)
43
Stefan Roesed351b2b2006-10-10 12:36:02 +020044/**
45 * nand_erase_opts: - erase NAND flash with support for various options
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000046 * (jffs2 formatting)
Stefan Roesed351b2b2006-10-10 12:36:02 +020047 *
Scott Wood08364d92016-05-30 13:57:54 -050048 * @param mtd nand mtd instance to erase
Stefan Roesed351b2b2006-10-10 12:36:02 +020049 * @param opts options, @see struct nand_erase_options
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +010050 * Return: 0 in case of success
Stefan Roesed351b2b2006-10-10 12:36:02 +020051 *
52 * This code is ported from flash_eraseall.c from Linux mtd utils by
53 * Arcom Control System Ltd.
54 */
Scott Wood08364d92016-05-30 13:57:54 -050055int nand_erase_opts(struct mtd_info *mtd,
56 const nand_erase_options_t *opts)
Stefan Roesed351b2b2006-10-10 12:36:02 +020057{
58 struct jffs2_unknown_node cleanmarker;
Stefan Roesed351b2b2006-10-10 12:36:02 +020059 erase_info_t erase;
Scott Wood1b5cd512010-08-25 14:43:29 -050060 unsigned long erase_length, erased_length; /* in blocks */
Stefan Roesed351b2b2006-10-10 12:36:02 +020061 int result;
62 int percent_complete = -1;
Scott Wood08364d92016-05-30 13:57:54 -050063 const char *mtd_device = mtd->name;
William Juul52c07962007-10-31 13:53:06 +010064 struct mtd_oob_ops oob_opts;
Scott Wood17fed142016-05-30 13:57:56 -050065 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +020066
Scott Wood08364d92016-05-30 13:57:54 -050067 if ((opts->offset & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeauad5d6ee2012-11-05 10:16:15 +000068 printf("Attempt to erase non block-aligned data\n");
Scott Wood1b5cd512010-08-25 14:43:29 -050069 return -1;
70 }
71
Stefan Roesed351b2b2006-10-10 12:36:02 +020072 memset(&erase, 0, sizeof(erase));
William Juul52c07962007-10-31 13:53:06 +010073 memset(&oob_opts, 0, sizeof(oob_opts));
Stefan Roesed351b2b2006-10-10 12:36:02 +020074
Scott Wood08364d92016-05-30 13:57:54 -050075 erase.mtd = mtd;
76 erase.len = mtd->erasesize;
Stefan Roese198b23e2006-10-28 15:55:52 +020077 erase.addr = opts->offset;
Scott Wood08364d92016-05-30 13:57:54 -050078 erase_length = lldiv(opts->length + mtd->erasesize - 1,
79 mtd->erasesize);
Stefan Roesed351b2b2006-10-10 12:36:02 +020080
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000081 cleanmarker.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
82 cleanmarker.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
William Juul52c07962007-10-31 13:53:06 +010083 cleanmarker.totlen = cpu_to_je32(8);
Stefan Roesed351b2b2006-10-10 12:36:02 +020084
85 /* scrub option allows to erase badblock. To prevent internal
86 * check from erase() method, set block check method to dummy
87 * and disable bad block table while erasing.
88 */
89 if (opts->scrub) {
Marek Vasut971d9a12011-09-12 06:04:06 +020090 erase.scrub = opts->scrub;
91 /*
92 * We don't need the bad block table anymore...
Stefan Roesed351b2b2006-10-10 12:36:02 +020093 * after scrub, there are no bad blocks left!
94 */
Marek Vasut971d9a12011-09-12 06:04:06 +020095 if (chip->bbt) {
96 kfree(chip->bbt);
Stefan Roesed351b2b2006-10-10 12:36:02 +020097 }
Marek Vasut971d9a12011-09-12 06:04:06 +020098 chip->bbt = NULL;
Masahiro Yamada8d100542014-12-26 22:20:58 +090099 chip->options &= ~NAND_BBT_SCANNED;
Stefan Roesed351b2b2006-10-10 12:36:02 +0200100 }
101
Scott Wood1b5cd512010-08-25 14:43:29 -0500102 for (erased_length = 0;
103 erased_length < erase_length;
Scott Wood08364d92016-05-30 13:57:54 -0500104 erase.addr += mtd->erasesize) {
William Juulb76ec382007-11-08 10:39:53 +0100105
Stefan Roese80877fa2022-09-02 14:10:46 +0200106 schedule();
Stefan Roesed351b2b2006-10-10 12:36:02 +0200107
Heiko Schocher0ae98b42013-06-24 18:50:40 +0200108 if (opts->lim && (erase.addr >= (opts->offset + opts->lim))) {
109 puts("Size of erase exceeds limit\n");
110 return -EFBIG;
111 }
Masahiro Yamada7580d582013-07-12 10:53:37 +0900112 if (!opts->scrub) {
Scott Wood08364d92016-05-30 13:57:54 -0500113 int ret = mtd_block_isbad(mtd, erase.addr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200114 if (ret > 0) {
115 if (!opts->quiet)
Michael Trimarchia9675582023-02-27 16:01:43 +0100116 printf("\rSkipping %s at "
Stefan Roese586b3a62009-05-11 16:03:55 +0200117 "0x%08llx "
Wolfgang Denkd5cf1a42006-10-12 11:43:47 +0200118 " \n",
Michael Trimarchia9675582023-02-27 16:01:43 +0100119 ret == 1 ? "bad block" : "bbt reserved",
Wolfgang Denkd5cf1a42006-10-12 11:43:47 +0200120 erase.addr);
Scott Wood1b5cd512010-08-25 14:43:29 -0500121
122 if (!opts->spread)
123 erased_length++;
124
Stefan Roesed351b2b2006-10-10 12:36:02 +0200125 continue;
126
127 } else if (ret < 0) {
128 printf("\n%s: MTD get bad block failed: %d\n",
129 mtd_device,
130 ret);
131 return -1;
132 }
133 }
134
Scott Wood1b5cd512010-08-25 14:43:29 -0500135 erased_length++;
136
Scott Wood08364d92016-05-30 13:57:54 -0500137 result = mtd_erase(mtd, &erase);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200138 if (result != 0) {
139 printf("\n%s: MTD Erase failure: %d\n",
140 mtd_device, result);
141 continue;
142 }
143
144 /* format for JFFS2 ? */
Scott Woodd50ad352008-10-29 14:20:26 -0500145 if (opts->jffs2 && chip->ecc.layout->oobavail >= 8) {
Sergey Lapin3a38a552013-01-14 03:46:50 +0000146 struct mtd_oob_ops ops;
147 ops.ooblen = 8;
148 ops.datbuf = NULL;
149 ops.oobbuf = (uint8_t *)&cleanmarker;
150 ops.ooboffs = 0;
151 ops.mode = MTD_OPS_AUTO_OOB;
William Juulb76ec382007-11-08 10:39:53 +0100152
Scott Wood08364d92016-05-30 13:57:54 -0500153 result = mtd_write_oob(mtd, erase.addr, &ops);
William Juul52c07962007-10-31 13:53:06 +0100154 if (result != 0) {
155 printf("\n%s: MTD writeoob failure: %d\n",
Scott Woodd50ad352008-10-29 14:20:26 -0500156 mtd_device, result);
William Juul52c07962007-10-31 13:53:06 +0100157 continue;
Stefan Roesed351b2b2006-10-10 12:36:02 +0200158 }
159 }
160
161 if (!opts->quiet) {
Scott Wood1b5cd512010-08-25 14:43:29 -0500162 unsigned long long n = erased_length * 100ULL;
Matthias Fuchs82714b92007-09-11 17:04:00 +0200163 int percent;
164
165 do_div(n, erase_length);
166 percent = (int)n;
Stefan Roesed351b2b2006-10-10 12:36:02 +0200167
168 /* output progress message only at whole percent
169 * steps to reduce the number of messages printed
170 * on (slow) serial consoles
171 */
172 if (percent != percent_complete) {
173 percent_complete = percent;
174
Stefan Roese586b3a62009-05-11 16:03:55 +0200175 printf("\rErasing at 0x%llx -- %3d%% complete.",
Scott Woodd50ad352008-10-29 14:20:26 -0500176 erase.addr, percent);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200177
178 if (opts->jffs2 && result == 0)
Stefan Roese586b3a62009-05-11 16:03:55 +0200179 printf(" Cleanmarker written at 0x%llx.",
Scott Woodd50ad352008-10-29 14:20:26 -0500180 erase.addr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200181 }
182 }
183 }
184 if (!opts->quiet)
185 printf("\n");
186
Stefan Roesed351b2b2006-10-10 12:36:02 +0200187 return 0;
188}
189
Nishanth Menonb20f8402008-12-13 09:43:06 -0600190#ifdef CONFIG_CMD_NAND_LOCK_UNLOCK
191
Heiko Schocherf5895d12014-06-24 10:10:04 +0200192#define NAND_CMD_LOCK_TIGHT 0x2c
193#define NAND_CMD_LOCK_STATUS 0x7a
Wolfgang Denk9d328a62021-09-27 17:42:38 +0200194
Stefan Roesed351b2b2006-10-10 12:36:02 +0200195/******************************************************************************
196 * Support for locking / unlocking operations of some NAND devices
197 *****************************************************************************/
198
Stefan Roesed351b2b2006-10-10 12:36:02 +0200199/**
200 * nand_lock: Set all pages of NAND flash chip to the LOCK or LOCK-TIGHT
201 * state
202 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600203 * @param mtd nand mtd instance
Stefan Roesed351b2b2006-10-10 12:36:02 +0200204 * @param tight bring device in lock tight mode
205 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100206 * Return: 0 on success, -1 in case of error
Stefan Roesed351b2b2006-10-10 12:36:02 +0200207 *
208 * The lock / lock-tight command only applies to the whole chip. To get some
209 * parts of the chip lock and others unlocked use the following sequence:
210 *
211 * - Lock all pages of the chip using nand_lock(mtd, 0) (or the lockpre pin)
212 * - Call nand_unlock() once for each consecutive area to be unlocked
213 * - If desired: Bring the chip to the lock-tight state using nand_lock(mtd, 1)
214 *
215 * If the device is in lock-tight state software can't change the
216 * current active lock/unlock state of all pages. nand_lock() / nand_unlock()
217 * calls will fail. It is only posible to leave lock-tight state by
218 * an hardware signal (low pulse on _WP pin) or by power down.
219 */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600220int nand_lock(struct mtd_info *mtd, int tight)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200221{
222 int ret = 0;
223 int status;
Scott Wood17fed142016-05-30 13:57:56 -0500224 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200225
226 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600227 chip->select_chip(mtd, 0);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200228
Joe Hershberger8b177d42013-02-08 09:27:19 +0000229 /* check the Lock Tight Status */
230 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, 0);
231 if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
232 printf("nand_lock: Device is locked tight!\n");
233 ret = -1;
234 goto out;
235 }
236
Nishanth Menonb20f8402008-12-13 09:43:06 -0600237 chip->cmdfunc(mtd,
Stefan Roesed351b2b2006-10-10 12:36:02 +0200238 (tight ? NAND_CMD_LOCK_TIGHT : NAND_CMD_LOCK),
239 -1, -1);
240
241 /* call wait ready function */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600242 status = chip->waitfunc(mtd, chip);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200243
244 /* see if device thinks it succeeded */
245 if (status & 0x01) {
246 ret = -1;
247 }
248
Joe Hershberger8b177d42013-02-08 09:27:19 +0000249 out:
Stefan Roesed351b2b2006-10-10 12:36:02 +0200250 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600251 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200252 return ret;
253}
254
255/**
256 * nand_get_lock_status: - query current lock state from one page of NAND
257 * flash
258 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600259 * @param mtd nand mtd instance
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000260 * @param offset page address to query (must be page-aligned!)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200261 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100262 * Return: -1 in case of error
Stefan Roesed351b2b2006-10-10 12:36:02 +0200263 * >0 lock status:
264 * bitfield with the following combinations:
265 * NAND_LOCK_STATUS_TIGHT: page in tight state
Stefan Roesed351b2b2006-10-10 12:36:02 +0200266 * NAND_LOCK_STATUS_UNLOCK: page unlocked
267 *
268 */
Jean-Christophe PLAGNIOL-VILLARD2511ba02009-05-16 14:27:40 +0200269int nand_get_lock_status(struct mtd_info *mtd, loff_t offset)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200270{
271 int ret = 0;
272 int chipnr;
273 int page;
Scott Wood17fed142016-05-30 13:57:56 -0500274 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200275
276 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600277 chipnr = (int)(offset >> chip->chip_shift);
278 chip->select_chip(mtd, chipnr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200279
280
Nishanth Menonb20f8402008-12-13 09:43:06 -0600281 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000282 printf("nand_get_lock_status: "
Stefan Roesed351b2b2006-10-10 12:36:02 +0200283 "Start address must be beginning of "
284 "nand page!\n");
285 ret = -1;
286 goto out;
287 }
288
289 /* check the Lock Status */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600290 page = (int)(offset >> chip->page_shift);
291 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200292
Nishanth Menonb20f8402008-12-13 09:43:06 -0600293 ret = chip->read_byte(mtd) & (NAND_LOCK_STATUS_TIGHT
Stefan Roesed351b2b2006-10-10 12:36:02 +0200294 | NAND_LOCK_STATUS_UNLOCK);
295
296 out:
297 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600298 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200299 return ret;
300}
301
302/**
303 * nand_unlock: - Unlock area of NAND pages
304 * only one consecutive area can be unlocked at one time!
305 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600306 * @param mtd nand mtd instance
Stefan Roesed351b2b2006-10-10 12:36:02 +0200307 * @param start start byte address
308 * @param length number of bytes to unlock (must be a multiple of
Scott Wood08364d92016-05-30 13:57:54 -0500309 * page size mtd->writesize)
Joe Hershbergercccf5952012-08-22 16:49:42 -0500310 * @param allexcept if set, unlock everything not selected
Stefan Roesed351b2b2006-10-10 12:36:02 +0200311 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100312 * Return: 0 on success, -1 in case of error
Stefan Roesed351b2b2006-10-10 12:36:02 +0200313 */
Joe Hershbergerdfa8ba42012-08-22 16:49:43 -0500314int nand_unlock(struct mtd_info *mtd, loff_t start, size_t length,
315 int allexcept)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200316{
317 int ret = 0;
318 int chipnr;
319 int status;
320 int page;
Scott Wood17fed142016-05-30 13:57:56 -0500321 struct nand_chip *chip = mtd_to_nand(mtd);
Joe Hershbergercccf5952012-08-22 16:49:42 -0500322
Tom Rini8e40bf62013-12-16 09:59:34 -0500323 debug("nand_unlock%s: start: %08llx, length: %zd!\n",
Joe Hershbergercccf5952012-08-22 16:49:42 -0500324 allexcept ? " (allexcept)" : "", start, length);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200325
326 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600327 chipnr = (int)(start >> chip->chip_shift);
328 chip->select_chip(mtd, chipnr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200329
330 /* check the WP bit */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600331 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
332 if (!(chip->read_byte(mtd) & NAND_STATUS_WP)) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000333 printf("nand_unlock: Device is write protected!\n");
Stefan Roesed351b2b2006-10-10 12:36:02 +0200334 ret = -1;
335 goto out;
336 }
337
Joe Hershberger8b177d42013-02-08 09:27:19 +0000338 /* check the Lock Tight Status */
339 page = (int)(start >> chip->page_shift);
340 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
341 if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
342 printf("nand_unlock: Device is locked tight!\n");
343 ret = -1;
344 goto out;
345 }
346
Nishanth Menonb20f8402008-12-13 09:43:06 -0600347 if ((start & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000348 printf("nand_unlock: Start address must be beginning of "
Nishanth Menonb20f8402008-12-13 09:43:06 -0600349 "nand block!\n");
Stefan Roesed351b2b2006-10-10 12:36:02 +0200350 ret = -1;
351 goto out;
352 }
353
Nishanth Menonb20f8402008-12-13 09:43:06 -0600354 if (length == 0 || (length & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000355 printf("nand_unlock: Length must be a multiple of nand block "
Nishanth Menonb20f8402008-12-13 09:43:06 -0600356 "size %08x!\n", mtd->erasesize);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200357 ret = -1;
358 goto out;
359 }
360
Nishanth Menonb20f8402008-12-13 09:43:06 -0600361 /*
362 * Set length so that the last address is set to the
363 * starting address of the last block
364 */
365 length -= mtd->erasesize;
366
Stefan Roesed351b2b2006-10-10 12:36:02 +0200367 /* submit address of first page to unlock */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600368 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200369
370 /* submit ADDRESS of LAST page to unlock */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600371 page += (int)(length >> chip->page_shift);
Joe Hershbergercccf5952012-08-22 16:49:42 -0500372
373 /*
374 * Page addresses for unlocking are supposed to be block-aligned.
375 * At least some NAND chips use the low bit to indicate that the
376 * page range should be inverted.
377 */
378 if (allexcept)
379 page |= 1;
380
Nishanth Menonb20f8402008-12-13 09:43:06 -0600381 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200382
383 /* call wait ready function */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600384 status = chip->waitfunc(mtd, chip);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200385 /* see if device thinks it succeeded */
386 if (status & 0x01) {
387 /* there was an error */
388 ret = -1;
389 goto out;
390 }
391
392 out:
393 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600394 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200395 return ret;
396}
William Juul52c07962007-10-31 13:53:06 +0100397#endif
Stefan Roesed351b2b2006-10-10 12:36:02 +0200398
Scott Woodcc5f3392008-06-12 13:20:16 -0500399/**
Scott Woodfe3b5e12010-07-30 16:11:41 -0500400 * check_skip_len
Scott Woodcc5f3392008-06-12 13:20:16 -0500401 *
Scott Woodfe3b5e12010-07-30 16:11:41 -0500402 * Check if there are any bad blocks, and whether length including bad
403 * blocks fits into device
Scott Woodcc5f3392008-06-12 13:20:16 -0500404 *
Scott Wood08364d92016-05-30 13:57:54 -0500405 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500406 * @param offset offset in flash
407 * @param length image length
Tom Rini32d96182013-03-14 05:32:50 +0000408 * @param used length of flash needed for the requested length
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100409 * Return: 0 if the image fits and there are no bad blocks
Scott Woodfe3b5e12010-07-30 16:11:41 -0500410 * 1 if the image fits, but there are bad blocks
411 * -1 if the image does not fit
Scott Woodcc5f3392008-06-12 13:20:16 -0500412 */
Scott Wood08364d92016-05-30 13:57:54 -0500413static int check_skip_len(struct mtd_info *mtd, loff_t offset, size_t length,
414 size_t *used)
Scott Woodcc5f3392008-06-12 13:20:16 -0500415{
Scott Woodcc5f3392008-06-12 13:20:16 -0500416 size_t len_excl_bad = 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500417 int ret = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500418
419 while (len_excl_bad < length) {
Scott Woodfe3b5e12010-07-30 16:11:41 -0500420 size_t block_len, block_off;
421 loff_t block_start;
Scott Woodcc5f3392008-06-12 13:20:16 -0500422
Scott Wood08364d92016-05-30 13:57:54 -0500423 if (offset >= mtd->size)
Scott Woodfe3b5e12010-07-30 16:11:41 -0500424 return -1;
Scott Woodcc5f3392008-06-12 13:20:16 -0500425
Scott Wood08364d92016-05-30 13:57:54 -0500426 block_start = offset & ~(loff_t)(mtd->erasesize - 1);
427 block_off = offset & (mtd->erasesize - 1);
428 block_len = mtd->erasesize - block_off;
Scott Woodcc5f3392008-06-12 13:20:16 -0500429
Scott Wood08364d92016-05-30 13:57:54 -0500430 if (!nand_block_isbad(mtd, block_start))
Scott Woodfe3b5e12010-07-30 16:11:41 -0500431 len_excl_bad += block_len;
432 else
433 ret = 1;
434
435 offset += block_len;
Tom Rini32d96182013-03-14 05:32:50 +0000436 *used += block_len;
Scott Woodcc5f3392008-06-12 13:20:16 -0500437 }
438
Tom Rini32d96182013-03-14 05:32:50 +0000439 /* If the length is not a multiple of block_len, adjust. */
440 if (len_excl_bad > length)
441 *used -= (len_excl_bad - length);
442
Scott Woodfe3b5e12010-07-30 16:11:41 -0500443 return ret;
Scott Woodcc5f3392008-06-12 13:20:16 -0500444}
Ben Gardiner34dd5672011-06-14 16:35:06 -0400445
446#ifdef CONFIG_CMD_NAND_TRIMFFS
Scott Wood08364d92016-05-30 13:57:54 -0500447static size_t drop_ffs(const struct mtd_info *mtd, const u_char *buf,
Ben Gardiner34dd5672011-06-14 16:35:06 -0400448 const size_t *len)
449{
htbegin610e8552013-03-01 23:00:34 +0000450 size_t l = *len;
451 ssize_t i;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400452
453 for (i = l - 1; i >= 0; i--)
454 if (buf[i] != 0xFF)
455 break;
456
457 /* The resulting length must be aligned to the minimum flash I/O size */
458 l = i + 1;
Scott Wood08364d92016-05-30 13:57:54 -0500459 l = (l + mtd->writesize - 1) / mtd->writesize;
460 l *= mtd->writesize;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400461
462 /*
463 * since the input length may be unaligned, prevent access past the end
464 * of the buffer
465 */
466 return min(l, *len);
467}
468#endif
Scott Woodcc5f3392008-06-12 13:20:16 -0500469
470/**
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600471 * nand_verify_page_oob:
472 *
473 * Verify a page of NAND flash, including the OOB.
474 * Reads page of NAND and verifies the contents and OOB against the
475 * values in ops.
476 *
Scott Wood08364d92016-05-30 13:57:54 -0500477 * @param mtd nand mtd instance
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600478 * @param ops MTD operations, including data to verify
479 * @param ofs offset in flash
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100480 * Return: 0 in case of success
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600481 */
Scott Wood08364d92016-05-30 13:57:54 -0500482int nand_verify_page_oob(struct mtd_info *mtd, struct mtd_oob_ops *ops,
483 loff_t ofs)
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600484{
485 int rval;
486 struct mtd_oob_ops vops;
Scott Wood08364d92016-05-30 13:57:54 -0500487 size_t verlen = mtd->writesize + mtd->oobsize;
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600488
489 memcpy(&vops, ops, sizeof(vops));
490
Stephen Warren12db8182015-04-14 08:59:00 -0600491 vops.datbuf = memalign(ARCH_DMA_MINALIGN, verlen);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600492
493 if (!vops.datbuf)
494 return -ENOMEM;
495
Scott Wood08364d92016-05-30 13:57:54 -0500496 vops.oobbuf = vops.datbuf + mtd->writesize;
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600497
Scott Wood08364d92016-05-30 13:57:54 -0500498 rval = mtd_read_oob(mtd, ofs, &vops);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600499 if (!rval)
500 rval = memcmp(ops->datbuf, vops.datbuf, vops.len);
501 if (!rval)
502 rval = memcmp(ops->oobbuf, vops.oobbuf, vops.ooblen);
503
504 free(vops.datbuf);
505
506 return rval ? -EIO : 0;
507}
508
509/**
510 * nand_verify:
511 *
512 * Verify a region of NAND flash.
513 * Reads NAND in page-sized chunks and verifies the contents against
514 * the contents of a buffer. The offset into the NAND must be
515 * page-aligned, and the function doesn't handle skipping bad blocks.
516 *
Scott Wood08364d92016-05-30 13:57:54 -0500517 * @param mtd nand mtd instance
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600518 * @param ofs offset in flash
519 * @param len buffer length
520 * @param buf buffer to read from
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100521 * Return: 0 in case of success
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600522 */
Scott Wood08364d92016-05-30 13:57:54 -0500523int nand_verify(struct mtd_info *mtd, loff_t ofs, size_t len, u_char *buf)
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600524{
525 int rval = 0;
526 size_t verofs;
Scott Wood08364d92016-05-30 13:57:54 -0500527 size_t verlen = mtd->writesize;
Stephen Warren12db8182015-04-14 08:59:00 -0600528 uint8_t *verbuf = memalign(ARCH_DMA_MINALIGN, verlen);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600529
530 if (!verbuf)
531 return -ENOMEM;
532
533 /* Read the NAND back in page-size groups to limit malloc size */
534 for (verofs = ofs; verofs < ofs + len;
535 verofs += verlen, buf += verlen) {
Scott Wood08364d92016-05-30 13:57:54 -0500536 verlen = min(mtd->writesize, (uint32_t)(ofs + len - verofs));
537 rval = nand_read(mtd, verofs, &verlen, verbuf);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600538 if (!rval || (rval == -EUCLEAN))
539 rval = memcmp(buf, verbuf, verlen);
540
541 if (rval)
542 break;
543 }
544
545 free(verbuf);
546
547 return rval ? -EIO : 0;
548}
549
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600550/**
Scott Woodcc5f3392008-06-12 13:20:16 -0500551 * nand_write_skip_bad:
552 *
553 * Write image to NAND flash.
554 * Blocks that are marked bad are skipped and the is written to the next
555 * block instead as long as the image is short enough to fit even after
Tom Rini32d96182013-03-14 05:32:50 +0000556 * skipping the bad blocks. Due to bad blocks we may not be able to
557 * perform the requested write. In the case where the write would
558 * extend beyond the end of the NAND device, both length and actual (if
559 * not NULL) are set to 0. In the case where the write would extend
560 * beyond the limit we are passed, length is set to 0 and actual is set
561 * to the required length.
Scott Woodcc5f3392008-06-12 13:20:16 -0500562 *
Scott Wood08364d92016-05-30 13:57:54 -0500563 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500564 * @param offset offset in flash
565 * @param length buffer length
Tom Rini32d96182013-03-14 05:32:50 +0000566 * @param actual set to size required to write length worth of
567 * buffer or 0 on error, if not NULL
568 * @param lim maximum size that actual may be in order to not
569 * exceed the buffer
Lei Wen4b5deaa2011-01-06 11:11:58 +0800570 * @param buffer buffer to read from
Ben Gardiner1caafbb2011-05-24 10:18:35 -0400571 * @param flags flags modifying the behaviour of the write to NAND
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100572 * Return: 0 in case of success
Scott Woodcc5f3392008-06-12 13:20:16 -0500573 */
Scott Wood08364d92016-05-30 13:57:54 -0500574int nand_write_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
575 size_t *actual, loff_t lim, u_char *buffer, int flags)
Scott Woodcc5f3392008-06-12 13:20:16 -0500576{
Lei Wen4b5deaa2011-01-06 11:11:58 +0800577 int rval = 0, blocksize;
Scott Woodcc5f3392008-06-12 13:20:16 -0500578 size_t left_to_write = *length;
Tom Rini32d96182013-03-14 05:32:50 +0000579 size_t used_for_write = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500580 u_char *p_buffer = buffer;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500581 int need_skip;
Scott Woodcc5f3392008-06-12 13:20:16 -0500582
Tom Rini32d96182013-03-14 05:32:50 +0000583 if (actual)
584 *actual = 0;
585
Scott Wood08364d92016-05-30 13:57:54 -0500586 blocksize = mtd->erasesize;
Lei Wen4b5deaa2011-01-06 11:11:58 +0800587
Scott Woodfe3b5e12010-07-30 16:11:41 -0500588 /*
589 * nand_write() handles unaligned, partial page writes.
590 *
591 * We allow length to be unaligned, for convenience in
592 * using the $filesize variable.
593 *
594 * However, starting at an unaligned offset makes the
595 * semantics of bad block skipping ambiguous (really,
596 * you should only start a block skipping access at a
597 * partition boundary). So don't try to handle that.
598 */
Scott Wood08364d92016-05-30 13:57:54 -0500599 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000600 printf("Attempt to write non page-aligned data\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500601 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500602 return -EINVAL;
603 }
604
Scott Wood08364d92016-05-30 13:57:54 -0500605 need_skip = check_skip_len(mtd, offset, *length, &used_for_write);
Tom Rini32d96182013-03-14 05:32:50 +0000606
607 if (actual)
608 *actual = used_for_write;
609
Scott Woodfe3b5e12010-07-30 16:11:41 -0500610 if (need_skip < 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000611 printf("Attempt to write outside the flash area\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500612 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500613 return -EINVAL;
614 }
615
Tom Rini32d96182013-03-14 05:32:50 +0000616 if (used_for_write > lim) {
617 puts("Size of write exceeds partition or device limit\n");
618 *length = 0;
619 return -EFBIG;
620 }
621
Ben Gardiner34dd5672011-06-14 16:35:06 -0400622 if (!need_skip && !(flags & WITH_DROP_FFS)) {
Scott Wood08364d92016-05-30 13:57:54 -0500623 rval = nand_write(mtd, offset, length, buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600624
625 if ((flags & WITH_WR_VERIFY) && !rval)
Scott Wood08364d92016-05-30 13:57:54 -0500626 rval = nand_verify(mtd, offset, *length, buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600627
Scott Woodfe3b5e12010-07-30 16:11:41 -0500628 if (rval == 0)
629 return 0;
Scott Wood90e0a6b2008-11-25 10:47:02 -0600630
Scott Woodfe3b5e12010-07-30 16:11:41 -0500631 *length = 0;
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000632 printf("NAND write to offset %llx failed %d\n",
Scott Woodfe3b5e12010-07-30 16:11:41 -0500633 offset, rval);
Scott Wood90e0a6b2008-11-25 10:47:02 -0600634 return rval;
Scott Woodcc5f3392008-06-12 13:20:16 -0500635 }
636
637 while (left_to_write > 0) {
T Karthik Reddy63a95ab2020-08-31 14:27:37 +0200638 loff_t block_start = offset & ~(loff_t)(mtd->erasesize - 1);
Scott Wood08364d92016-05-30 13:57:54 -0500639 size_t block_offset = offset & (mtd->erasesize - 1);
Ben Gardiner34dd5672011-06-14 16:35:06 -0400640 size_t write_size, truncated_write_size;
Scott Woodcc5f3392008-06-12 13:20:16 -0500641
Stefan Roese80877fa2022-09-02 14:10:46 +0200642 schedule();
Giulio Benetti749bd662009-07-31 17:30:34 -0500643
T Karthik Reddy63a95ab2020-08-31 14:27:37 +0200644 if (nand_block_isbad(mtd, block_start)) {
645 printf("Skip bad block 0x%08llx\n", block_start);
Scott Wood08364d92016-05-30 13:57:54 -0500646 offset += mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500647 continue;
648 }
649
Lei Wen4b5deaa2011-01-06 11:11:58 +0800650 if (left_to_write < (blocksize - block_offset))
Scott Woodcc5f3392008-06-12 13:20:16 -0500651 write_size = left_to_write;
652 else
Lei Wen4b5deaa2011-01-06 11:11:58 +0800653 write_size = blocksize - block_offset;
654
Peter Tyserae6ad782015-02-03 11:58:16 -0600655 truncated_write_size = write_size;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400656#ifdef CONFIG_CMD_NAND_TRIMFFS
Peter Tyserae6ad782015-02-03 11:58:16 -0600657 if (flags & WITH_DROP_FFS)
Scott Wood08364d92016-05-30 13:57:54 -0500658 truncated_write_size = drop_ffs(mtd, p_buffer,
Peter Tyserae6ad782015-02-03 11:58:16 -0600659 &write_size);
Ben Gardiner34dd5672011-06-14 16:35:06 -0400660#endif
661
Scott Wood08364d92016-05-30 13:57:54 -0500662 rval = nand_write(mtd, offset, &truncated_write_size,
Peter Tyserae6ad782015-02-03 11:58:16 -0600663 p_buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600664
Peter Tyserae6ad782015-02-03 11:58:16 -0600665 if ((flags & WITH_WR_VERIFY) && !rval)
Scott Wood08364d92016-05-30 13:57:54 -0500666 rval = nand_verify(mtd, offset,
Peter Tyserae6ad782015-02-03 11:58:16 -0600667 truncated_write_size, p_buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600668
Peter Tyserae6ad782015-02-03 11:58:16 -0600669 offset += write_size;
670 p_buffer += write_size;
Lei Wen4b5deaa2011-01-06 11:11:58 +0800671
Scott Woodcc5f3392008-06-12 13:20:16 -0500672 if (rval != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000673 printf("NAND write to offset %llx failed %d\n",
Wolfgang Denk74e0dde2008-08-14 14:41:06 +0200674 offset, rval);
Scott Woodcc5f3392008-06-12 13:20:16 -0500675 *length -= left_to_write;
676 return rval;
677 }
678
679 left_to_write -= write_size;
Scott Woodcc5f3392008-06-12 13:20:16 -0500680 }
681
682 return 0;
683}
684
685/**
686 * nand_read_skip_bad:
687 *
688 * Read image from NAND flash.
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000689 * Blocks that are marked bad are skipped and the next block is read
Tom Rini32d96182013-03-14 05:32:50 +0000690 * instead as long as the image is short enough to fit even after
691 * skipping the bad blocks. Due to bad blocks we may not be able to
692 * perform the requested read. In the case where the read would extend
693 * beyond the end of the NAND device, both length and actual (if not
694 * NULL) are set to 0. In the case where the read would extend beyond
695 * the limit we are passed, length is set to 0 and actual is set to the
696 * required length.
Scott Woodcc5f3392008-06-12 13:20:16 -0500697 *
Scott Wood08364d92016-05-30 13:57:54 -0500698 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500699 * @param offset offset in flash
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000700 * @param length buffer length, on return holds number of read bytes
Tom Rini32d96182013-03-14 05:32:50 +0000701 * @param actual set to size required to read length worth of buffer or 0
702 * on error, if not NULL
703 * @param lim maximum size that actual may be in order to not exceed the
704 * buffer
Scott Woodcc5f3392008-06-12 13:20:16 -0500705 * @param buffer buffer to write to
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100706 * Return: 0 in case of success
Scott Woodcc5f3392008-06-12 13:20:16 -0500707 */
Scott Wood08364d92016-05-30 13:57:54 -0500708int nand_read_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
709 size_t *actual, loff_t lim, u_char *buffer)
Scott Woodcc5f3392008-06-12 13:20:16 -0500710{
711 int rval;
712 size_t left_to_read = *length;
Tom Rini32d96182013-03-14 05:32:50 +0000713 size_t used_for_read = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500714 u_char *p_buffer = buffer;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500715 int need_skip;
Scott Woodcc5f3392008-06-12 13:20:16 -0500716
Scott Wood08364d92016-05-30 13:57:54 -0500717 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000718 printf("Attempt to read non page-aligned data\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500719 *length = 0;
Tom Rini32d96182013-03-14 05:32:50 +0000720 if (actual)
721 *actual = 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500722 return -EINVAL;
723 }
Scott Woodcc5f3392008-06-12 13:20:16 -0500724
Scott Wood08364d92016-05-30 13:57:54 -0500725 need_skip = check_skip_len(mtd, offset, *length, &used_for_read);
Tom Rini32d96182013-03-14 05:32:50 +0000726
727 if (actual)
728 *actual = used_for_read;
729
Scott Woodfe3b5e12010-07-30 16:11:41 -0500730 if (need_skip < 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000731 printf("Attempt to read outside the flash area\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500732 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500733 return -EINVAL;
734 }
735
Tom Rini32d96182013-03-14 05:32:50 +0000736 if (used_for_read > lim) {
737 puts("Size of read exceeds partition or device limit\n");
738 *length = 0;
739 return -EFBIG;
740 }
741
Scott Woodfe3b5e12010-07-30 16:11:41 -0500742 if (!need_skip) {
Scott Wood08364d92016-05-30 13:57:54 -0500743 rval = nand_read(mtd, offset, length, buffer);
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300744 if (!rval || rval == -EUCLEAN)
745 return 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500746
747 *length = 0;
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000748 printf("NAND read from offset %llx failed %d\n",
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300749 offset, rval);
Scott Wood90e0a6b2008-11-25 10:47:02 -0600750 return rval;
Scott Woodcc5f3392008-06-12 13:20:16 -0500751 }
752
753 while (left_to_read > 0) {
Scott Wood08364d92016-05-30 13:57:54 -0500754 size_t block_offset = offset & (mtd->erasesize - 1);
Scott Woodcc5f3392008-06-12 13:20:16 -0500755 size_t read_length;
756
Stefan Roese80877fa2022-09-02 14:10:46 +0200757 schedule();
Giulio Benetti749bd662009-07-31 17:30:34 -0500758
Scott Wood08364d92016-05-30 13:57:54 -0500759 if (nand_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000760 printf("Skipping bad block 0x%08llx\n",
Scott Wood08364d92016-05-30 13:57:54 -0500761 offset & ~(mtd->erasesize - 1));
762 offset += mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500763 continue;
764 }
765
Scott Wood08364d92016-05-30 13:57:54 -0500766 if (left_to_read < (mtd->erasesize - block_offset))
Scott Woodcc5f3392008-06-12 13:20:16 -0500767 read_length = left_to_read;
768 else
Scott Wood08364d92016-05-30 13:57:54 -0500769 read_length = mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500770
Scott Wood08364d92016-05-30 13:57:54 -0500771 rval = nand_read(mtd, offset, &read_length, p_buffer);
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300772 if (rval && rval != -EUCLEAN) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000773 printf("NAND read from offset %llx failed %d\n",
Wolfgang Denk74e0dde2008-08-14 14:41:06 +0200774 offset, rval);
Scott Woodcc5f3392008-06-12 13:20:16 -0500775 *length -= left_to_read;
776 return rval;
777 }
778
779 left_to_read -= read_length;
780 offset += read_length;
781 p_buffer += read_length;
782 }
783
784 return 0;
785}
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100786
787#ifdef CONFIG_CMD_NAND_TORTURE
788
789/**
790 * check_pattern:
791 *
792 * Check if buffer contains only a certain byte pattern.
793 *
794 * @param buf buffer to check
795 * @param patt the pattern to check
796 * @param size buffer size in bytes
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100797 * Return: 1 if there are only patt bytes in buf
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100798 * 0 if something else was found
799 */
800static int check_pattern(const u_char *buf, u_char patt, int size)
801{
802 int i;
803
804 for (i = 0; i < size; i++)
805 if (buf[i] != patt)
806 return 0;
807 return 1;
808}
809
810/**
811 * nand_torture:
812 *
813 * Torture a block of NAND flash.
814 * This is useful to determine if a block that caused a write error is still
815 * good or should be marked as bad.
816 *
Scott Wood08364d92016-05-30 13:57:54 -0500817 * @param mtd nand mtd instance
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100818 * @param offset offset in flash
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +0100819 * Return: 0 if the block is still good
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100820 */
Scott Wood08364d92016-05-30 13:57:54 -0500821int nand_torture(struct mtd_info *mtd, loff_t offset)
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100822{
823 u_char patterns[] = {0xa5, 0x5a, 0x00};
824 struct erase_info instr = {
Max Krummenacherf3178d72016-06-13 10:15:47 +0200825 .mtd = mtd,
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100826 .addr = offset,
Scott Wood08364d92016-05-30 13:57:54 -0500827 .len = mtd->erasesize,
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100828 };
829 size_t retlen;
830 int err, ret = -1, i, patt_count;
831 u_char *buf;
832
Scott Wood08364d92016-05-30 13:57:54 -0500833 if ((offset & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100834 puts("Attempt to torture a block at a non block-aligned offset\n");
835 return -EINVAL;
836 }
837
Scott Wood08364d92016-05-30 13:57:54 -0500838 if (offset + mtd->erasesize > mtd->size) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100839 puts("Attempt to torture a block outside the flash area\n");
840 return -EINVAL;
841 }
842
843 patt_count = ARRAY_SIZE(patterns);
844
Scott Wood08364d92016-05-30 13:57:54 -0500845 buf = malloc_cache_aligned(mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100846 if (buf == NULL) {
847 puts("Out of memory for erase block buffer\n");
848 return -ENOMEM;
849 }
850
851 for (i = 0; i < patt_count; i++) {
Max Krummenachera21c4482016-05-30 16:28:28 +0200852 err = mtd_erase(mtd, &instr);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100853 if (err) {
854 printf("%s: erase() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500855 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100856 goto out;
857 }
858
859 /* Make sure the block contains only 0xff bytes */
Max Krummenachera21c4482016-05-30 16:28:28 +0200860 err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500861 if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100862 printf("%s: read() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500863 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100864 goto out;
865 }
866
Scott Wood08364d92016-05-30 13:57:54 -0500867 err = check_pattern(buf, 0xff, mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100868 if (!err) {
869 printf("Erased block at 0x%llx, but a non-0xff byte was found\n",
870 offset);
871 ret = -EIO;
872 goto out;
873 }
874
875 /* Write a pattern and check it */
Scott Wood08364d92016-05-30 13:57:54 -0500876 memset(buf, patterns[i], mtd->erasesize);
Max Krummenachera21c4482016-05-30 16:28:28 +0200877 err = mtd_write(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500878 if (err || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100879 printf("%s: write() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500880 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100881 goto out;
882 }
883
Max Krummenachera21c4482016-05-30 16:28:28 +0200884 err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500885 if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100886 printf("%s: read() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500887 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100888 goto out;
889 }
890
Scott Wood08364d92016-05-30 13:57:54 -0500891 err = check_pattern(buf, patterns[i], mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100892 if (!err) {
893 printf("Pattern 0x%.2x checking failed for block at "
894 "0x%llx\n", patterns[i], offset);
895 ret = -EIO;
896 goto out;
897 }
898 }
899
900 ret = 0;
901
902out:
903 free(buf);
904 return ret;
905}
906
907#endif