blob: 9c8a37397013d7e3cd482c9deeed612788f82dc6 [file] [log] [blame]
Stefan Roesed351b2b2006-10-10 12:36:02 +02001/*
Marcel Ziswileraea68562007-12-30 03:30:46 +01002 * drivers/mtd/nand/nand_util.c
Stefan Roesed351b2b2006-10-10 12:36:02 +02003 *
4 * Copyright (C) 2006 by Weiss-Electronic GmbH.
5 * All rights reserved.
6 *
7 * @author: Guido Classen <clagix@gmail.com>
8 * @descr: NAND Flash support
9 * @references: borrowed heavily from Linux mtd-utils code:
10 * flash_eraseall.c by Arcom Control System Ltd
11 * nandwrite.c by Steven J. Hill (sjhill@realitydiluted.com)
12 * and Thomas Gleixner (tglx@linutronix.de)
13 *
Ben Gardiner34dd5672011-06-14 16:35:06 -040014 * Copyright (C) 2008 Nokia Corporation: drop_ffs() function by
15 * Artem Bityutskiy <dedekind1@gmail.com> from mtd-utils
16 *
Tom Rinib7bef6a2013-10-31 09:24:00 -040017 * Copyright 2010 Freescale Semiconductor
18 *
19 * SPDX-License-Identifier: GPL-2.0
Stefan Roesed351b2b2006-10-10 12:36:02 +020020 */
21
22#include <common.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020023#include <command.h>
24#include <watchdog.h>
25#include <malloc.h>
Simon Glassa87fc0a2015-09-02 17:24:57 -060026#include <memalign.h>
Dirk Behme32d1f762007-08-02 17:42:08 +020027#include <div64.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020028
Masahiro Yamada56a931c2016-09-21 11:28:55 +090029#include <linux/errno.h>
William Juul52c07962007-10-31 13:53:06 +010030#include <linux/mtd/mtd.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020031#include <nand.h>
32#include <jffs2/jffs2.h>
33
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000034typedef struct erase_info erase_info_t;
35typedef struct mtd_info mtd_info_t;
Stefan Roesed351b2b2006-10-10 12:36:02 +020036
37/* support only for native endian JFFS2 */
38#define cpu_to_je16(x) (x)
39#define cpu_to_je32(x) (x)
40
Stefan Roesed351b2b2006-10-10 12:36:02 +020041/**
42 * nand_erase_opts: - erase NAND flash with support for various options
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000043 * (jffs2 formatting)
Stefan Roesed351b2b2006-10-10 12:36:02 +020044 *
Scott Wood08364d92016-05-30 13:57:54 -050045 * @param mtd nand mtd instance to erase
Stefan Roesed351b2b2006-10-10 12:36:02 +020046 * @param opts options, @see struct nand_erase_options
47 * @return 0 in case of success
48 *
49 * This code is ported from flash_eraseall.c from Linux mtd utils by
50 * Arcom Control System Ltd.
51 */
Scott Wood08364d92016-05-30 13:57:54 -050052int nand_erase_opts(struct mtd_info *mtd,
53 const nand_erase_options_t *opts)
Stefan Roesed351b2b2006-10-10 12:36:02 +020054{
55 struct jffs2_unknown_node cleanmarker;
Stefan Roesed351b2b2006-10-10 12:36:02 +020056 erase_info_t erase;
Scott Wood1b5cd512010-08-25 14:43:29 -050057 unsigned long erase_length, erased_length; /* in blocks */
Stefan Roesed351b2b2006-10-10 12:36:02 +020058 int result;
59 int percent_complete = -1;
Scott Wood08364d92016-05-30 13:57:54 -050060 const char *mtd_device = mtd->name;
William Juul52c07962007-10-31 13:53:06 +010061 struct mtd_oob_ops oob_opts;
Scott Wood17fed142016-05-30 13:57:56 -050062 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +020063
Scott Wood08364d92016-05-30 13:57:54 -050064 if ((opts->offset & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeauad5d6ee2012-11-05 10:16:15 +000065 printf("Attempt to erase non block-aligned data\n");
Scott Wood1b5cd512010-08-25 14:43:29 -050066 return -1;
67 }
68
Stefan Roesed351b2b2006-10-10 12:36:02 +020069 memset(&erase, 0, sizeof(erase));
William Juul52c07962007-10-31 13:53:06 +010070 memset(&oob_opts, 0, sizeof(oob_opts));
Stefan Roesed351b2b2006-10-10 12:36:02 +020071
Scott Wood08364d92016-05-30 13:57:54 -050072 erase.mtd = mtd;
73 erase.len = mtd->erasesize;
Stefan Roese198b23e2006-10-28 15:55:52 +020074 erase.addr = opts->offset;
Scott Wood08364d92016-05-30 13:57:54 -050075 erase_length = lldiv(opts->length + mtd->erasesize - 1,
76 mtd->erasesize);
Stefan Roesed351b2b2006-10-10 12:36:02 +020077
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000078 cleanmarker.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
79 cleanmarker.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
William Juul52c07962007-10-31 13:53:06 +010080 cleanmarker.totlen = cpu_to_je32(8);
Stefan Roesed351b2b2006-10-10 12:36:02 +020081
82 /* scrub option allows to erase badblock. To prevent internal
83 * check from erase() method, set block check method to dummy
84 * and disable bad block table while erasing.
85 */
86 if (opts->scrub) {
Marek Vasut971d9a12011-09-12 06:04:06 +020087 erase.scrub = opts->scrub;
88 /*
89 * We don't need the bad block table anymore...
Stefan Roesed351b2b2006-10-10 12:36:02 +020090 * after scrub, there are no bad blocks left!
91 */
Marek Vasut971d9a12011-09-12 06:04:06 +020092 if (chip->bbt) {
93 kfree(chip->bbt);
Stefan Roesed351b2b2006-10-10 12:36:02 +020094 }
Marek Vasut971d9a12011-09-12 06:04:06 +020095 chip->bbt = NULL;
Masahiro Yamada8d100542014-12-26 22:20:58 +090096 chip->options &= ~NAND_BBT_SCANNED;
Stefan Roesed351b2b2006-10-10 12:36:02 +020097 }
98
Scott Wood1b5cd512010-08-25 14:43:29 -050099 for (erased_length = 0;
100 erased_length < erase_length;
Scott Wood08364d92016-05-30 13:57:54 -0500101 erase.addr += mtd->erasesize) {
William Juulb76ec382007-11-08 10:39:53 +0100102
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000103 WATCHDOG_RESET();
Stefan Roesed351b2b2006-10-10 12:36:02 +0200104
Heiko Schocher0ae98b42013-06-24 18:50:40 +0200105 if (opts->lim && (erase.addr >= (opts->offset + opts->lim))) {
106 puts("Size of erase exceeds limit\n");
107 return -EFBIG;
108 }
Masahiro Yamada7580d582013-07-12 10:53:37 +0900109 if (!opts->scrub) {
Scott Wood08364d92016-05-30 13:57:54 -0500110 int ret = mtd_block_isbad(mtd, erase.addr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200111 if (ret > 0) {
112 if (!opts->quiet)
113 printf("\rSkipping bad block at "
Stefan Roese586b3a62009-05-11 16:03:55 +0200114 "0x%08llx "
Wolfgang Denkd5cf1a42006-10-12 11:43:47 +0200115 " \n",
116 erase.addr);
Scott Wood1b5cd512010-08-25 14:43:29 -0500117
118 if (!opts->spread)
119 erased_length++;
120
Stefan Roesed351b2b2006-10-10 12:36:02 +0200121 continue;
122
123 } else if (ret < 0) {
124 printf("\n%s: MTD get bad block failed: %d\n",
125 mtd_device,
126 ret);
127 return -1;
128 }
129 }
130
Scott Wood1b5cd512010-08-25 14:43:29 -0500131 erased_length++;
132
Scott Wood08364d92016-05-30 13:57:54 -0500133 result = mtd_erase(mtd, &erase);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200134 if (result != 0) {
135 printf("\n%s: MTD Erase failure: %d\n",
136 mtd_device, result);
137 continue;
138 }
139
140 /* format for JFFS2 ? */
Scott Woodd50ad352008-10-29 14:20:26 -0500141 if (opts->jffs2 && chip->ecc.layout->oobavail >= 8) {
Sergey Lapin3a38a552013-01-14 03:46:50 +0000142 struct mtd_oob_ops ops;
143 ops.ooblen = 8;
144 ops.datbuf = NULL;
145 ops.oobbuf = (uint8_t *)&cleanmarker;
146 ops.ooboffs = 0;
147 ops.mode = MTD_OPS_AUTO_OOB;
William Juulb76ec382007-11-08 10:39:53 +0100148
Scott Wood08364d92016-05-30 13:57:54 -0500149 result = mtd_write_oob(mtd, erase.addr, &ops);
William Juul52c07962007-10-31 13:53:06 +0100150 if (result != 0) {
151 printf("\n%s: MTD writeoob failure: %d\n",
Scott Woodd50ad352008-10-29 14:20:26 -0500152 mtd_device, result);
William Juul52c07962007-10-31 13:53:06 +0100153 continue;
Stefan Roesed351b2b2006-10-10 12:36:02 +0200154 }
155 }
156
157 if (!opts->quiet) {
Scott Wood1b5cd512010-08-25 14:43:29 -0500158 unsigned long long n = erased_length * 100ULL;
Matthias Fuchs82714b92007-09-11 17:04:00 +0200159 int percent;
160
161 do_div(n, erase_length);
162 percent = (int)n;
Stefan Roesed351b2b2006-10-10 12:36:02 +0200163
164 /* output progress message only at whole percent
165 * steps to reduce the number of messages printed
166 * on (slow) serial consoles
167 */
168 if (percent != percent_complete) {
169 percent_complete = percent;
170
Stefan Roese586b3a62009-05-11 16:03:55 +0200171 printf("\rErasing at 0x%llx -- %3d%% complete.",
Scott Woodd50ad352008-10-29 14:20:26 -0500172 erase.addr, percent);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200173
174 if (opts->jffs2 && result == 0)
Stefan Roese586b3a62009-05-11 16:03:55 +0200175 printf(" Cleanmarker written at 0x%llx.",
Scott Woodd50ad352008-10-29 14:20:26 -0500176 erase.addr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200177 }
178 }
179 }
180 if (!opts->quiet)
181 printf("\n");
182
Stefan Roesed351b2b2006-10-10 12:36:02 +0200183 return 0;
184}
185
Nishanth Menonb20f8402008-12-13 09:43:06 -0600186#ifdef CONFIG_CMD_NAND_LOCK_UNLOCK
187
Heiko Schocherf5895d12014-06-24 10:10:04 +0200188#define NAND_CMD_LOCK_TIGHT 0x2c
189#define NAND_CMD_LOCK_STATUS 0x7a
190
Stefan Roesed351b2b2006-10-10 12:36:02 +0200191/******************************************************************************
192 * Support for locking / unlocking operations of some NAND devices
193 *****************************************************************************/
194
Stefan Roesed351b2b2006-10-10 12:36:02 +0200195/**
196 * nand_lock: Set all pages of NAND flash chip to the LOCK or LOCK-TIGHT
197 * state
198 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600199 * @param mtd nand mtd instance
Stefan Roesed351b2b2006-10-10 12:36:02 +0200200 * @param tight bring device in lock tight mode
201 *
202 * @return 0 on success, -1 in case of error
203 *
204 * The lock / lock-tight command only applies to the whole chip. To get some
205 * parts of the chip lock and others unlocked use the following sequence:
206 *
207 * - Lock all pages of the chip using nand_lock(mtd, 0) (or the lockpre pin)
208 * - Call nand_unlock() once for each consecutive area to be unlocked
209 * - If desired: Bring the chip to the lock-tight state using nand_lock(mtd, 1)
210 *
211 * If the device is in lock-tight state software can't change the
212 * current active lock/unlock state of all pages. nand_lock() / nand_unlock()
213 * calls will fail. It is only posible to leave lock-tight state by
214 * an hardware signal (low pulse on _WP pin) or by power down.
215 */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600216int nand_lock(struct mtd_info *mtd, int tight)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200217{
218 int ret = 0;
219 int status;
Scott Wood17fed142016-05-30 13:57:56 -0500220 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200221
222 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600223 chip->select_chip(mtd, 0);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200224
Joe Hershberger8b177d42013-02-08 09:27:19 +0000225 /* check the Lock Tight Status */
226 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, 0);
227 if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
228 printf("nand_lock: Device is locked tight!\n");
229 ret = -1;
230 goto out;
231 }
232
Nishanth Menonb20f8402008-12-13 09:43:06 -0600233 chip->cmdfunc(mtd,
Stefan Roesed351b2b2006-10-10 12:36:02 +0200234 (tight ? NAND_CMD_LOCK_TIGHT : NAND_CMD_LOCK),
235 -1, -1);
236
237 /* call wait ready function */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600238 status = chip->waitfunc(mtd, chip);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200239
240 /* see if device thinks it succeeded */
241 if (status & 0x01) {
242 ret = -1;
243 }
244
Joe Hershberger8b177d42013-02-08 09:27:19 +0000245 out:
Stefan Roesed351b2b2006-10-10 12:36:02 +0200246 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600247 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200248 return ret;
249}
250
251/**
252 * nand_get_lock_status: - query current lock state from one page of NAND
253 * flash
254 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600255 * @param mtd nand mtd instance
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000256 * @param offset page address to query (must be page-aligned!)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200257 *
258 * @return -1 in case of error
259 * >0 lock status:
260 * bitfield with the following combinations:
261 * NAND_LOCK_STATUS_TIGHT: page in tight state
Stefan Roesed351b2b2006-10-10 12:36:02 +0200262 * NAND_LOCK_STATUS_UNLOCK: page unlocked
263 *
264 */
Jean-Christophe PLAGNIOL-VILLARD2511ba02009-05-16 14:27:40 +0200265int nand_get_lock_status(struct mtd_info *mtd, loff_t offset)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200266{
267 int ret = 0;
268 int chipnr;
269 int page;
Scott Wood17fed142016-05-30 13:57:56 -0500270 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200271
272 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600273 chipnr = (int)(offset >> chip->chip_shift);
274 chip->select_chip(mtd, chipnr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200275
276
Nishanth Menonb20f8402008-12-13 09:43:06 -0600277 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000278 printf("nand_get_lock_status: "
Stefan Roesed351b2b2006-10-10 12:36:02 +0200279 "Start address must be beginning of "
280 "nand page!\n");
281 ret = -1;
282 goto out;
283 }
284
285 /* check the Lock Status */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600286 page = (int)(offset >> chip->page_shift);
287 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200288
Nishanth Menonb20f8402008-12-13 09:43:06 -0600289 ret = chip->read_byte(mtd) & (NAND_LOCK_STATUS_TIGHT
Stefan Roesed351b2b2006-10-10 12:36:02 +0200290 | NAND_LOCK_STATUS_UNLOCK);
291
292 out:
293 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600294 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200295 return ret;
296}
297
298/**
299 * nand_unlock: - Unlock area of NAND pages
300 * only one consecutive area can be unlocked at one time!
301 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600302 * @param mtd nand mtd instance
Stefan Roesed351b2b2006-10-10 12:36:02 +0200303 * @param start start byte address
304 * @param length number of bytes to unlock (must be a multiple of
Scott Wood08364d92016-05-30 13:57:54 -0500305 * page size mtd->writesize)
Joe Hershbergercccf5952012-08-22 16:49:42 -0500306 * @param allexcept if set, unlock everything not selected
Stefan Roesed351b2b2006-10-10 12:36:02 +0200307 *
308 * @return 0 on success, -1 in case of error
309 */
Joe Hershbergerdfa8ba42012-08-22 16:49:43 -0500310int nand_unlock(struct mtd_info *mtd, loff_t start, size_t length,
311 int allexcept)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200312{
313 int ret = 0;
314 int chipnr;
315 int status;
316 int page;
Scott Wood17fed142016-05-30 13:57:56 -0500317 struct nand_chip *chip = mtd_to_nand(mtd);
Joe Hershbergercccf5952012-08-22 16:49:42 -0500318
Tom Rini8e40bf62013-12-16 09:59:34 -0500319 debug("nand_unlock%s: start: %08llx, length: %zd!\n",
Joe Hershbergercccf5952012-08-22 16:49:42 -0500320 allexcept ? " (allexcept)" : "", start, length);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200321
322 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600323 chipnr = (int)(start >> chip->chip_shift);
324 chip->select_chip(mtd, chipnr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200325
326 /* check the WP bit */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600327 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
328 if (!(chip->read_byte(mtd) & NAND_STATUS_WP)) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000329 printf("nand_unlock: Device is write protected!\n");
Stefan Roesed351b2b2006-10-10 12:36:02 +0200330 ret = -1;
331 goto out;
332 }
333
Joe Hershberger8b177d42013-02-08 09:27:19 +0000334 /* check the Lock Tight Status */
335 page = (int)(start >> chip->page_shift);
336 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
337 if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
338 printf("nand_unlock: Device is locked tight!\n");
339 ret = -1;
340 goto out;
341 }
342
Nishanth Menonb20f8402008-12-13 09:43:06 -0600343 if ((start & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000344 printf("nand_unlock: Start address must be beginning of "
Nishanth Menonb20f8402008-12-13 09:43:06 -0600345 "nand block!\n");
Stefan Roesed351b2b2006-10-10 12:36:02 +0200346 ret = -1;
347 goto out;
348 }
349
Nishanth Menonb20f8402008-12-13 09:43:06 -0600350 if (length == 0 || (length & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000351 printf("nand_unlock: Length must be a multiple of nand block "
Nishanth Menonb20f8402008-12-13 09:43:06 -0600352 "size %08x!\n", mtd->erasesize);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200353 ret = -1;
354 goto out;
355 }
356
Nishanth Menonb20f8402008-12-13 09:43:06 -0600357 /*
358 * Set length so that the last address is set to the
359 * starting address of the last block
360 */
361 length -= mtd->erasesize;
362
Stefan Roesed351b2b2006-10-10 12:36:02 +0200363 /* submit address of first page to unlock */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600364 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200365
366 /* submit ADDRESS of LAST page to unlock */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600367 page += (int)(length >> chip->page_shift);
Joe Hershbergercccf5952012-08-22 16:49:42 -0500368
369 /*
370 * Page addresses for unlocking are supposed to be block-aligned.
371 * At least some NAND chips use the low bit to indicate that the
372 * page range should be inverted.
373 */
374 if (allexcept)
375 page |= 1;
376
Nishanth Menonb20f8402008-12-13 09:43:06 -0600377 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200378
379 /* call wait ready function */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600380 status = chip->waitfunc(mtd, chip);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200381 /* see if device thinks it succeeded */
382 if (status & 0x01) {
383 /* there was an error */
384 ret = -1;
385 goto out;
386 }
387
388 out:
389 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600390 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200391 return ret;
392}
William Juul52c07962007-10-31 13:53:06 +0100393#endif
Stefan Roesed351b2b2006-10-10 12:36:02 +0200394
Scott Woodcc5f3392008-06-12 13:20:16 -0500395/**
Scott Woodfe3b5e12010-07-30 16:11:41 -0500396 * check_skip_len
Scott Woodcc5f3392008-06-12 13:20:16 -0500397 *
Scott Woodfe3b5e12010-07-30 16:11:41 -0500398 * Check if there are any bad blocks, and whether length including bad
399 * blocks fits into device
Scott Woodcc5f3392008-06-12 13:20:16 -0500400 *
Scott Wood08364d92016-05-30 13:57:54 -0500401 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500402 * @param offset offset in flash
403 * @param length image length
Tom Rini32d96182013-03-14 05:32:50 +0000404 * @param used length of flash needed for the requested length
Scott Woodfe3b5e12010-07-30 16:11:41 -0500405 * @return 0 if the image fits and there are no bad blocks
406 * 1 if the image fits, but there are bad blocks
407 * -1 if the image does not fit
Scott Woodcc5f3392008-06-12 13:20:16 -0500408 */
Scott Wood08364d92016-05-30 13:57:54 -0500409static int check_skip_len(struct mtd_info *mtd, loff_t offset, size_t length,
410 size_t *used)
Scott Woodcc5f3392008-06-12 13:20:16 -0500411{
Scott Woodcc5f3392008-06-12 13:20:16 -0500412 size_t len_excl_bad = 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500413 int ret = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500414
415 while (len_excl_bad < length) {
Scott Woodfe3b5e12010-07-30 16:11:41 -0500416 size_t block_len, block_off;
417 loff_t block_start;
Scott Woodcc5f3392008-06-12 13:20:16 -0500418
Scott Wood08364d92016-05-30 13:57:54 -0500419 if (offset >= mtd->size)
Scott Woodfe3b5e12010-07-30 16:11:41 -0500420 return -1;
Scott Woodcc5f3392008-06-12 13:20:16 -0500421
Scott Wood08364d92016-05-30 13:57:54 -0500422 block_start = offset & ~(loff_t)(mtd->erasesize - 1);
423 block_off = offset & (mtd->erasesize - 1);
424 block_len = mtd->erasesize - block_off;
Scott Woodcc5f3392008-06-12 13:20:16 -0500425
Scott Wood08364d92016-05-30 13:57:54 -0500426 if (!nand_block_isbad(mtd, block_start))
Scott Woodfe3b5e12010-07-30 16:11:41 -0500427 len_excl_bad += block_len;
428 else
429 ret = 1;
430
431 offset += block_len;
Tom Rini32d96182013-03-14 05:32:50 +0000432 *used += block_len;
Scott Woodcc5f3392008-06-12 13:20:16 -0500433 }
434
Tom Rini32d96182013-03-14 05:32:50 +0000435 /* If the length is not a multiple of block_len, adjust. */
436 if (len_excl_bad > length)
437 *used -= (len_excl_bad - length);
438
Scott Woodfe3b5e12010-07-30 16:11:41 -0500439 return ret;
Scott Woodcc5f3392008-06-12 13:20:16 -0500440}
Ben Gardiner34dd5672011-06-14 16:35:06 -0400441
442#ifdef CONFIG_CMD_NAND_TRIMFFS
Scott Wood08364d92016-05-30 13:57:54 -0500443static size_t drop_ffs(const struct mtd_info *mtd, const u_char *buf,
Ben Gardiner34dd5672011-06-14 16:35:06 -0400444 const size_t *len)
445{
htbegin610e8552013-03-01 23:00:34 +0000446 size_t l = *len;
447 ssize_t i;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400448
449 for (i = l - 1; i >= 0; i--)
450 if (buf[i] != 0xFF)
451 break;
452
453 /* The resulting length must be aligned to the minimum flash I/O size */
454 l = i + 1;
Scott Wood08364d92016-05-30 13:57:54 -0500455 l = (l + mtd->writesize - 1) / mtd->writesize;
456 l *= mtd->writesize;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400457
458 /*
459 * since the input length may be unaligned, prevent access past the end
460 * of the buffer
461 */
462 return min(l, *len);
463}
464#endif
Scott Woodcc5f3392008-06-12 13:20:16 -0500465
466/**
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600467 * nand_verify_page_oob:
468 *
469 * Verify a page of NAND flash, including the OOB.
470 * Reads page of NAND and verifies the contents and OOB against the
471 * values in ops.
472 *
Scott Wood08364d92016-05-30 13:57:54 -0500473 * @param mtd nand mtd instance
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600474 * @param ops MTD operations, including data to verify
475 * @param ofs offset in flash
476 * @return 0 in case of success
477 */
Scott Wood08364d92016-05-30 13:57:54 -0500478int nand_verify_page_oob(struct mtd_info *mtd, struct mtd_oob_ops *ops,
479 loff_t ofs)
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600480{
481 int rval;
482 struct mtd_oob_ops vops;
Scott Wood08364d92016-05-30 13:57:54 -0500483 size_t verlen = mtd->writesize + mtd->oobsize;
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600484
485 memcpy(&vops, ops, sizeof(vops));
486
Stephen Warren12db8182015-04-14 08:59:00 -0600487 vops.datbuf = memalign(ARCH_DMA_MINALIGN, verlen);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600488
489 if (!vops.datbuf)
490 return -ENOMEM;
491
Scott Wood08364d92016-05-30 13:57:54 -0500492 vops.oobbuf = vops.datbuf + mtd->writesize;
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600493
Scott Wood08364d92016-05-30 13:57:54 -0500494 rval = mtd_read_oob(mtd, ofs, &vops);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600495 if (!rval)
496 rval = memcmp(ops->datbuf, vops.datbuf, vops.len);
497 if (!rval)
498 rval = memcmp(ops->oobbuf, vops.oobbuf, vops.ooblen);
499
500 free(vops.datbuf);
501
502 return rval ? -EIO : 0;
503}
504
505/**
506 * nand_verify:
507 *
508 * Verify a region of NAND flash.
509 * Reads NAND in page-sized chunks and verifies the contents against
510 * the contents of a buffer. The offset into the NAND must be
511 * page-aligned, and the function doesn't handle skipping bad blocks.
512 *
Scott Wood08364d92016-05-30 13:57:54 -0500513 * @param mtd nand mtd instance
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600514 * @param ofs offset in flash
515 * @param len buffer length
516 * @param buf buffer to read from
517 * @return 0 in case of success
518 */
Scott Wood08364d92016-05-30 13:57:54 -0500519int nand_verify(struct mtd_info *mtd, loff_t ofs, size_t len, u_char *buf)
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600520{
521 int rval = 0;
522 size_t verofs;
Scott Wood08364d92016-05-30 13:57:54 -0500523 size_t verlen = mtd->writesize;
Stephen Warren12db8182015-04-14 08:59:00 -0600524 uint8_t *verbuf = memalign(ARCH_DMA_MINALIGN, verlen);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600525
526 if (!verbuf)
527 return -ENOMEM;
528
529 /* Read the NAND back in page-size groups to limit malloc size */
530 for (verofs = ofs; verofs < ofs + len;
531 verofs += verlen, buf += verlen) {
Scott Wood08364d92016-05-30 13:57:54 -0500532 verlen = min(mtd->writesize, (uint32_t)(ofs + len - verofs));
533 rval = nand_read(mtd, verofs, &verlen, verbuf);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600534 if (!rval || (rval == -EUCLEAN))
535 rval = memcmp(buf, verbuf, verlen);
536
537 if (rval)
538 break;
539 }
540
541 free(verbuf);
542
543 return rval ? -EIO : 0;
544}
545
546
547
548/**
Scott Woodcc5f3392008-06-12 13:20:16 -0500549 * nand_write_skip_bad:
550 *
551 * Write image to NAND flash.
552 * Blocks that are marked bad are skipped and the is written to the next
553 * block instead as long as the image is short enough to fit even after
Tom Rini32d96182013-03-14 05:32:50 +0000554 * skipping the bad blocks. Due to bad blocks we may not be able to
555 * perform the requested write. In the case where the write would
556 * extend beyond the end of the NAND device, both length and actual (if
557 * not NULL) are set to 0. In the case where the write would extend
558 * beyond the limit we are passed, length is set to 0 and actual is set
559 * to the required length.
Scott Woodcc5f3392008-06-12 13:20:16 -0500560 *
Scott Wood08364d92016-05-30 13:57:54 -0500561 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500562 * @param offset offset in flash
563 * @param length buffer length
Tom Rini32d96182013-03-14 05:32:50 +0000564 * @param actual set to size required to write length worth of
565 * buffer or 0 on error, if not NULL
566 * @param lim maximum size that actual may be in order to not
567 * exceed the buffer
Lei Wen4b5deaa2011-01-06 11:11:58 +0800568 * @param buffer buffer to read from
Ben Gardiner1caafbb2011-05-24 10:18:35 -0400569 * @param flags flags modifying the behaviour of the write to NAND
Scott Woodcc5f3392008-06-12 13:20:16 -0500570 * @return 0 in case of success
571 */
Scott Wood08364d92016-05-30 13:57:54 -0500572int nand_write_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
573 size_t *actual, loff_t lim, u_char *buffer, int flags)
Scott Woodcc5f3392008-06-12 13:20:16 -0500574{
Lei Wen4b5deaa2011-01-06 11:11:58 +0800575 int rval = 0, blocksize;
Scott Woodcc5f3392008-06-12 13:20:16 -0500576 size_t left_to_write = *length;
Tom Rini32d96182013-03-14 05:32:50 +0000577 size_t used_for_write = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500578 u_char *p_buffer = buffer;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500579 int need_skip;
Scott Woodcc5f3392008-06-12 13:20:16 -0500580
Tom Rini32d96182013-03-14 05:32:50 +0000581 if (actual)
582 *actual = 0;
583
Scott Wood08364d92016-05-30 13:57:54 -0500584 blocksize = mtd->erasesize;
Lei Wen4b5deaa2011-01-06 11:11:58 +0800585
Scott Woodfe3b5e12010-07-30 16:11:41 -0500586 /*
587 * nand_write() handles unaligned, partial page writes.
588 *
589 * We allow length to be unaligned, for convenience in
590 * using the $filesize variable.
591 *
592 * However, starting at an unaligned offset makes the
593 * semantics of bad block skipping ambiguous (really,
594 * you should only start a block skipping access at a
595 * partition boundary). So don't try to handle that.
596 */
Scott Wood08364d92016-05-30 13:57:54 -0500597 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000598 printf("Attempt to write non page-aligned data\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500599 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500600 return -EINVAL;
601 }
602
Scott Wood08364d92016-05-30 13:57:54 -0500603 need_skip = check_skip_len(mtd, offset, *length, &used_for_write);
Tom Rini32d96182013-03-14 05:32:50 +0000604
605 if (actual)
606 *actual = used_for_write;
607
Scott Woodfe3b5e12010-07-30 16:11:41 -0500608 if (need_skip < 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000609 printf("Attempt to write outside the flash area\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500610 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500611 return -EINVAL;
612 }
613
Tom Rini32d96182013-03-14 05:32:50 +0000614 if (used_for_write > lim) {
615 puts("Size of write exceeds partition or device limit\n");
616 *length = 0;
617 return -EFBIG;
618 }
619
Ben Gardiner34dd5672011-06-14 16:35:06 -0400620 if (!need_skip && !(flags & WITH_DROP_FFS)) {
Scott Wood08364d92016-05-30 13:57:54 -0500621 rval = nand_write(mtd, offset, length, buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600622
623 if ((flags & WITH_WR_VERIFY) && !rval)
Scott Wood08364d92016-05-30 13:57:54 -0500624 rval = nand_verify(mtd, offset, *length, buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600625
Scott Woodfe3b5e12010-07-30 16:11:41 -0500626 if (rval == 0)
627 return 0;
Scott Wood90e0a6b2008-11-25 10:47:02 -0600628
Scott Woodfe3b5e12010-07-30 16:11:41 -0500629 *length = 0;
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000630 printf("NAND write to offset %llx failed %d\n",
Scott Woodfe3b5e12010-07-30 16:11:41 -0500631 offset, rval);
Scott Wood90e0a6b2008-11-25 10:47:02 -0600632 return rval;
Scott Woodcc5f3392008-06-12 13:20:16 -0500633 }
634
635 while (left_to_write > 0) {
Scott Wood08364d92016-05-30 13:57:54 -0500636 size_t block_offset = offset & (mtd->erasesize - 1);
Ben Gardiner34dd5672011-06-14 16:35:06 -0400637 size_t write_size, truncated_write_size;
Scott Woodcc5f3392008-06-12 13:20:16 -0500638
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000639 WATCHDOG_RESET();
Giulio Benetti749bd662009-07-31 17:30:34 -0500640
Scott Wood08364d92016-05-30 13:57:54 -0500641 if (nand_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000642 printf("Skip bad block 0x%08llx\n",
Scott Wood08364d92016-05-30 13:57:54 -0500643 offset & ~(mtd->erasesize - 1));
644 offset += mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500645 continue;
646 }
647
Lei Wen4b5deaa2011-01-06 11:11:58 +0800648 if (left_to_write < (blocksize - block_offset))
Scott Woodcc5f3392008-06-12 13:20:16 -0500649 write_size = left_to_write;
650 else
Lei Wen4b5deaa2011-01-06 11:11:58 +0800651 write_size = blocksize - block_offset;
652
Peter Tyserae6ad782015-02-03 11:58:16 -0600653 truncated_write_size = write_size;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400654#ifdef CONFIG_CMD_NAND_TRIMFFS
Peter Tyserae6ad782015-02-03 11:58:16 -0600655 if (flags & WITH_DROP_FFS)
Scott Wood08364d92016-05-30 13:57:54 -0500656 truncated_write_size = drop_ffs(mtd, p_buffer,
Peter Tyserae6ad782015-02-03 11:58:16 -0600657 &write_size);
Ben Gardiner34dd5672011-06-14 16:35:06 -0400658#endif
659
Scott Wood08364d92016-05-30 13:57:54 -0500660 rval = nand_write(mtd, offset, &truncated_write_size,
Peter Tyserae6ad782015-02-03 11:58:16 -0600661 p_buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600662
Peter Tyserae6ad782015-02-03 11:58:16 -0600663 if ((flags & WITH_WR_VERIFY) && !rval)
Scott Wood08364d92016-05-30 13:57:54 -0500664 rval = nand_verify(mtd, offset,
Peter Tyserae6ad782015-02-03 11:58:16 -0600665 truncated_write_size, p_buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600666
Peter Tyserae6ad782015-02-03 11:58:16 -0600667 offset += write_size;
668 p_buffer += write_size;
Lei Wen4b5deaa2011-01-06 11:11:58 +0800669
Scott Woodcc5f3392008-06-12 13:20:16 -0500670 if (rval != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000671 printf("NAND write to offset %llx failed %d\n",
Wolfgang Denk74e0dde2008-08-14 14:41:06 +0200672 offset, rval);
Scott Woodcc5f3392008-06-12 13:20:16 -0500673 *length -= left_to_write;
674 return rval;
675 }
676
677 left_to_write -= write_size;
Scott Woodcc5f3392008-06-12 13:20:16 -0500678 }
679
680 return 0;
681}
682
683/**
684 * nand_read_skip_bad:
685 *
686 * Read image from NAND flash.
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000687 * Blocks that are marked bad are skipped and the next block is read
Tom Rini32d96182013-03-14 05:32:50 +0000688 * instead as long as the image is short enough to fit even after
689 * skipping the bad blocks. Due to bad blocks we may not be able to
690 * perform the requested read. In the case where the read would extend
691 * beyond the end of the NAND device, both length and actual (if not
692 * NULL) are set to 0. In the case where the read would extend beyond
693 * the limit we are passed, length is set to 0 and actual is set to the
694 * required length.
Scott Woodcc5f3392008-06-12 13:20:16 -0500695 *
Scott Wood08364d92016-05-30 13:57:54 -0500696 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500697 * @param offset offset in flash
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000698 * @param length buffer length, on return holds number of read bytes
Tom Rini32d96182013-03-14 05:32:50 +0000699 * @param actual set to size required to read length worth of buffer or 0
700 * on error, if not NULL
701 * @param lim maximum size that actual may be in order to not exceed the
702 * buffer
Scott Woodcc5f3392008-06-12 13:20:16 -0500703 * @param buffer buffer to write to
704 * @return 0 in case of success
705 */
Scott Wood08364d92016-05-30 13:57:54 -0500706int nand_read_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
707 size_t *actual, loff_t lim, u_char *buffer)
Scott Woodcc5f3392008-06-12 13:20:16 -0500708{
709 int rval;
710 size_t left_to_read = *length;
Tom Rini32d96182013-03-14 05:32:50 +0000711 size_t used_for_read = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500712 u_char *p_buffer = buffer;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500713 int need_skip;
Scott Woodcc5f3392008-06-12 13:20:16 -0500714
Scott Wood08364d92016-05-30 13:57:54 -0500715 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000716 printf("Attempt to read non page-aligned data\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500717 *length = 0;
Tom Rini32d96182013-03-14 05:32:50 +0000718 if (actual)
719 *actual = 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500720 return -EINVAL;
721 }
Scott Woodcc5f3392008-06-12 13:20:16 -0500722
Scott Wood08364d92016-05-30 13:57:54 -0500723 need_skip = check_skip_len(mtd, offset, *length, &used_for_read);
Tom Rini32d96182013-03-14 05:32:50 +0000724
725 if (actual)
726 *actual = used_for_read;
727
Scott Woodfe3b5e12010-07-30 16:11:41 -0500728 if (need_skip < 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000729 printf("Attempt to read outside the flash area\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500730 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500731 return -EINVAL;
732 }
733
Tom Rini32d96182013-03-14 05:32:50 +0000734 if (used_for_read > lim) {
735 puts("Size of read exceeds partition or device limit\n");
736 *length = 0;
737 return -EFBIG;
738 }
739
Scott Woodfe3b5e12010-07-30 16:11:41 -0500740 if (!need_skip) {
Scott Wood08364d92016-05-30 13:57:54 -0500741 rval = nand_read(mtd, offset, length, buffer);
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300742 if (!rval || rval == -EUCLEAN)
743 return 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500744
745 *length = 0;
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000746 printf("NAND read from offset %llx failed %d\n",
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300747 offset, rval);
Scott Wood90e0a6b2008-11-25 10:47:02 -0600748 return rval;
Scott Woodcc5f3392008-06-12 13:20:16 -0500749 }
750
751 while (left_to_read > 0) {
Scott Wood08364d92016-05-30 13:57:54 -0500752 size_t block_offset = offset & (mtd->erasesize - 1);
Scott Woodcc5f3392008-06-12 13:20:16 -0500753 size_t read_length;
754
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000755 WATCHDOG_RESET();
Giulio Benetti749bd662009-07-31 17:30:34 -0500756
Scott Wood08364d92016-05-30 13:57:54 -0500757 if (nand_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000758 printf("Skipping bad block 0x%08llx\n",
Scott Wood08364d92016-05-30 13:57:54 -0500759 offset & ~(mtd->erasesize - 1));
760 offset += mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500761 continue;
762 }
763
Scott Wood08364d92016-05-30 13:57:54 -0500764 if (left_to_read < (mtd->erasesize - block_offset))
Scott Woodcc5f3392008-06-12 13:20:16 -0500765 read_length = left_to_read;
766 else
Scott Wood08364d92016-05-30 13:57:54 -0500767 read_length = mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500768
Scott Wood08364d92016-05-30 13:57:54 -0500769 rval = nand_read(mtd, offset, &read_length, p_buffer);
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300770 if (rval && rval != -EUCLEAN) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000771 printf("NAND read from offset %llx failed %d\n",
Wolfgang Denk74e0dde2008-08-14 14:41:06 +0200772 offset, rval);
Scott Woodcc5f3392008-06-12 13:20:16 -0500773 *length -= left_to_read;
774 return rval;
775 }
776
777 left_to_read -= read_length;
778 offset += read_length;
779 p_buffer += read_length;
780 }
781
782 return 0;
783}
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100784
785#ifdef CONFIG_CMD_NAND_TORTURE
786
787/**
788 * check_pattern:
789 *
790 * Check if buffer contains only a certain byte pattern.
791 *
792 * @param buf buffer to check
793 * @param patt the pattern to check
794 * @param size buffer size in bytes
795 * @return 1 if there are only patt bytes in buf
796 * 0 if something else was found
797 */
798static int check_pattern(const u_char *buf, u_char patt, int size)
799{
800 int i;
801
802 for (i = 0; i < size; i++)
803 if (buf[i] != patt)
804 return 0;
805 return 1;
806}
807
808/**
809 * nand_torture:
810 *
811 * Torture a block of NAND flash.
812 * This is useful to determine if a block that caused a write error is still
813 * good or should be marked as bad.
814 *
Scott Wood08364d92016-05-30 13:57:54 -0500815 * @param mtd nand mtd instance
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100816 * @param offset offset in flash
817 * @return 0 if the block is still good
818 */
Scott Wood08364d92016-05-30 13:57:54 -0500819int nand_torture(struct mtd_info *mtd, loff_t offset)
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100820{
821 u_char patterns[] = {0xa5, 0x5a, 0x00};
822 struct erase_info instr = {
Max Krummenacherf3178d72016-06-13 10:15:47 +0200823 .mtd = mtd,
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100824 .addr = offset,
Scott Wood08364d92016-05-30 13:57:54 -0500825 .len = mtd->erasesize,
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100826 };
827 size_t retlen;
828 int err, ret = -1, i, patt_count;
829 u_char *buf;
830
Scott Wood08364d92016-05-30 13:57:54 -0500831 if ((offset & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100832 puts("Attempt to torture a block at a non block-aligned offset\n");
833 return -EINVAL;
834 }
835
Scott Wood08364d92016-05-30 13:57:54 -0500836 if (offset + mtd->erasesize > mtd->size) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100837 puts("Attempt to torture a block outside the flash area\n");
838 return -EINVAL;
839 }
840
841 patt_count = ARRAY_SIZE(patterns);
842
Scott Wood08364d92016-05-30 13:57:54 -0500843 buf = malloc_cache_aligned(mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100844 if (buf == NULL) {
845 puts("Out of memory for erase block buffer\n");
846 return -ENOMEM;
847 }
848
849 for (i = 0; i < patt_count; i++) {
Max Krummenachera21c4482016-05-30 16:28:28 +0200850 err = mtd_erase(mtd, &instr);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100851 if (err) {
852 printf("%s: erase() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500853 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100854 goto out;
855 }
856
857 /* Make sure the block contains only 0xff bytes */
Max Krummenachera21c4482016-05-30 16:28:28 +0200858 err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500859 if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100860 printf("%s: read() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500861 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100862 goto out;
863 }
864
Scott Wood08364d92016-05-30 13:57:54 -0500865 err = check_pattern(buf, 0xff, mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100866 if (!err) {
867 printf("Erased block at 0x%llx, but a non-0xff byte was found\n",
868 offset);
869 ret = -EIO;
870 goto out;
871 }
872
873 /* Write a pattern and check it */
Scott Wood08364d92016-05-30 13:57:54 -0500874 memset(buf, patterns[i], mtd->erasesize);
Max Krummenachera21c4482016-05-30 16:28:28 +0200875 err = mtd_write(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500876 if (err || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100877 printf("%s: write() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500878 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100879 goto out;
880 }
881
Max Krummenachera21c4482016-05-30 16:28:28 +0200882 err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500883 if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100884 printf("%s: read() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500885 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100886 goto out;
887 }
888
Scott Wood08364d92016-05-30 13:57:54 -0500889 err = check_pattern(buf, patterns[i], mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100890 if (!err) {
891 printf("Pattern 0x%.2x checking failed for block at "
892 "0x%llx\n", patterns[i], offset);
893 ret = -EIO;
894 goto out;
895 }
896 }
897
898 ret = 0;
899
900out:
901 free(buf);
902 return ret;
903}
904
905#endif