blob: fc2235c1a0ec5d82d25797e02e979ebbc3d11b7b [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roesed351b2b2006-10-10 12:36:02 +02002/*
Miquel Raynal1f1ae152018-08-16 17:30:07 +02003 * drivers/mtd/nand/raw/nand_util.c
Stefan Roesed351b2b2006-10-10 12:36:02 +02004 *
5 * Copyright (C) 2006 by Weiss-Electronic GmbH.
6 * All rights reserved.
7 *
8 * @author: Guido Classen <clagix@gmail.com>
9 * @descr: NAND Flash support
10 * @references: borrowed heavily from Linux mtd-utils code:
11 * flash_eraseall.c by Arcom Control System Ltd
12 * nandwrite.c by Steven J. Hill (sjhill@realitydiluted.com)
13 * and Thomas Gleixner (tglx@linutronix.de)
14 *
Ben Gardiner34dd5672011-06-14 16:35:06 -040015 * Copyright (C) 2008 Nokia Corporation: drop_ffs() function by
16 * Artem Bityutskiy <dedekind1@gmail.com> from mtd-utils
17 *
Tom Rinib7bef6a2013-10-31 09:24:00 -040018 * Copyright 2010 Freescale Semiconductor
Stefan Roesed351b2b2006-10-10 12:36:02 +020019 */
20
21#include <common.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020022#include <command.h>
23#include <watchdog.h>
24#include <malloc.h>
Simon Glassa87fc0a2015-09-02 17:24:57 -060025#include <memalign.h>
Dirk Behme32d1f762007-08-02 17:42:08 +020026#include <div64.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020027
Masahiro Yamada56a931c2016-09-21 11:28:55 +090028#include <linux/errno.h>
William Juul52c07962007-10-31 13:53:06 +010029#include <linux/mtd/mtd.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020030#include <nand.h>
31#include <jffs2/jffs2.h>
32
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000033typedef struct erase_info erase_info_t;
34typedef struct mtd_info mtd_info_t;
Stefan Roesed351b2b2006-10-10 12:36:02 +020035
36/* support only for native endian JFFS2 */
37#define cpu_to_je16(x) (x)
38#define cpu_to_je32(x) (x)
39
Stefan Roesed351b2b2006-10-10 12:36:02 +020040/**
41 * nand_erase_opts: - erase NAND flash with support for various options
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000042 * (jffs2 formatting)
Stefan Roesed351b2b2006-10-10 12:36:02 +020043 *
Scott Wood08364d92016-05-30 13:57:54 -050044 * @param mtd nand mtd instance to erase
Stefan Roesed351b2b2006-10-10 12:36:02 +020045 * @param opts options, @see struct nand_erase_options
46 * @return 0 in case of success
47 *
48 * This code is ported from flash_eraseall.c from Linux mtd utils by
49 * Arcom Control System Ltd.
50 */
Scott Wood08364d92016-05-30 13:57:54 -050051int nand_erase_opts(struct mtd_info *mtd,
52 const nand_erase_options_t *opts)
Stefan Roesed351b2b2006-10-10 12:36:02 +020053{
54 struct jffs2_unknown_node cleanmarker;
Stefan Roesed351b2b2006-10-10 12:36:02 +020055 erase_info_t erase;
Scott Wood1b5cd512010-08-25 14:43:29 -050056 unsigned long erase_length, erased_length; /* in blocks */
Stefan Roesed351b2b2006-10-10 12:36:02 +020057 int result;
58 int percent_complete = -1;
Scott Wood08364d92016-05-30 13:57:54 -050059 const char *mtd_device = mtd->name;
William Juul52c07962007-10-31 13:53:06 +010060 struct mtd_oob_ops oob_opts;
Scott Wood17fed142016-05-30 13:57:56 -050061 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +020062
Scott Wood08364d92016-05-30 13:57:54 -050063 if ((opts->offset & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeauad5d6ee2012-11-05 10:16:15 +000064 printf("Attempt to erase non block-aligned data\n");
Scott Wood1b5cd512010-08-25 14:43:29 -050065 return -1;
66 }
67
Stefan Roesed351b2b2006-10-10 12:36:02 +020068 memset(&erase, 0, sizeof(erase));
William Juul52c07962007-10-31 13:53:06 +010069 memset(&oob_opts, 0, sizeof(oob_opts));
Stefan Roesed351b2b2006-10-10 12:36:02 +020070
Scott Wood08364d92016-05-30 13:57:54 -050071 erase.mtd = mtd;
72 erase.len = mtd->erasesize;
Stefan Roese198b23e2006-10-28 15:55:52 +020073 erase.addr = opts->offset;
Scott Wood08364d92016-05-30 13:57:54 -050074 erase_length = lldiv(opts->length + mtd->erasesize - 1,
75 mtd->erasesize);
Stefan Roesed351b2b2006-10-10 12:36:02 +020076
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000077 cleanmarker.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
78 cleanmarker.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
William Juul52c07962007-10-31 13:53:06 +010079 cleanmarker.totlen = cpu_to_je32(8);
Stefan Roesed351b2b2006-10-10 12:36:02 +020080
81 /* scrub option allows to erase badblock. To prevent internal
82 * check from erase() method, set block check method to dummy
83 * and disable bad block table while erasing.
84 */
85 if (opts->scrub) {
Marek Vasut971d9a12011-09-12 06:04:06 +020086 erase.scrub = opts->scrub;
87 /*
88 * We don't need the bad block table anymore...
Stefan Roesed351b2b2006-10-10 12:36:02 +020089 * after scrub, there are no bad blocks left!
90 */
Marek Vasut971d9a12011-09-12 06:04:06 +020091 if (chip->bbt) {
92 kfree(chip->bbt);
Stefan Roesed351b2b2006-10-10 12:36:02 +020093 }
Marek Vasut971d9a12011-09-12 06:04:06 +020094 chip->bbt = NULL;
Masahiro Yamada8d100542014-12-26 22:20:58 +090095 chip->options &= ~NAND_BBT_SCANNED;
Stefan Roesed351b2b2006-10-10 12:36:02 +020096 }
97
Scott Wood1b5cd512010-08-25 14:43:29 -050098 for (erased_length = 0;
99 erased_length < erase_length;
Scott Wood08364d92016-05-30 13:57:54 -0500100 erase.addr += mtd->erasesize) {
William Juulb76ec382007-11-08 10:39:53 +0100101
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000102 WATCHDOG_RESET();
Stefan Roesed351b2b2006-10-10 12:36:02 +0200103
Heiko Schocher0ae98b42013-06-24 18:50:40 +0200104 if (opts->lim && (erase.addr >= (opts->offset + opts->lim))) {
105 puts("Size of erase exceeds limit\n");
106 return -EFBIG;
107 }
Masahiro Yamada7580d582013-07-12 10:53:37 +0900108 if (!opts->scrub) {
Scott Wood08364d92016-05-30 13:57:54 -0500109 int ret = mtd_block_isbad(mtd, erase.addr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200110 if (ret > 0) {
111 if (!opts->quiet)
112 printf("\rSkipping bad block at "
Stefan Roese586b3a62009-05-11 16:03:55 +0200113 "0x%08llx "
Wolfgang Denkd5cf1a42006-10-12 11:43:47 +0200114 " \n",
115 erase.addr);
Scott Wood1b5cd512010-08-25 14:43:29 -0500116
117 if (!opts->spread)
118 erased_length++;
119
Stefan Roesed351b2b2006-10-10 12:36:02 +0200120 continue;
121
122 } else if (ret < 0) {
123 printf("\n%s: MTD get bad block failed: %d\n",
124 mtd_device,
125 ret);
126 return -1;
127 }
128 }
129
Scott Wood1b5cd512010-08-25 14:43:29 -0500130 erased_length++;
131
Scott Wood08364d92016-05-30 13:57:54 -0500132 result = mtd_erase(mtd, &erase);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200133 if (result != 0) {
134 printf("\n%s: MTD Erase failure: %d\n",
135 mtd_device, result);
136 continue;
137 }
138
139 /* format for JFFS2 ? */
Scott Woodd50ad352008-10-29 14:20:26 -0500140 if (opts->jffs2 && chip->ecc.layout->oobavail >= 8) {
Sergey Lapin3a38a552013-01-14 03:46:50 +0000141 struct mtd_oob_ops ops;
142 ops.ooblen = 8;
143 ops.datbuf = NULL;
144 ops.oobbuf = (uint8_t *)&cleanmarker;
145 ops.ooboffs = 0;
146 ops.mode = MTD_OPS_AUTO_OOB;
William Juulb76ec382007-11-08 10:39:53 +0100147
Scott Wood08364d92016-05-30 13:57:54 -0500148 result = mtd_write_oob(mtd, erase.addr, &ops);
William Juul52c07962007-10-31 13:53:06 +0100149 if (result != 0) {
150 printf("\n%s: MTD writeoob failure: %d\n",
Scott Woodd50ad352008-10-29 14:20:26 -0500151 mtd_device, result);
William Juul52c07962007-10-31 13:53:06 +0100152 continue;
Stefan Roesed351b2b2006-10-10 12:36:02 +0200153 }
154 }
155
156 if (!opts->quiet) {
Scott Wood1b5cd512010-08-25 14:43:29 -0500157 unsigned long long n = erased_length * 100ULL;
Matthias Fuchs82714b92007-09-11 17:04:00 +0200158 int percent;
159
160 do_div(n, erase_length);
161 percent = (int)n;
Stefan Roesed351b2b2006-10-10 12:36:02 +0200162
163 /* output progress message only at whole percent
164 * steps to reduce the number of messages printed
165 * on (slow) serial consoles
166 */
167 if (percent != percent_complete) {
168 percent_complete = percent;
169
Stefan Roese586b3a62009-05-11 16:03:55 +0200170 printf("\rErasing at 0x%llx -- %3d%% complete.",
Scott Woodd50ad352008-10-29 14:20:26 -0500171 erase.addr, percent);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200172
173 if (opts->jffs2 && result == 0)
Stefan Roese586b3a62009-05-11 16:03:55 +0200174 printf(" Cleanmarker written at 0x%llx.",
Scott Woodd50ad352008-10-29 14:20:26 -0500175 erase.addr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200176 }
177 }
178 }
179 if (!opts->quiet)
180 printf("\n");
181
Stefan Roesed351b2b2006-10-10 12:36:02 +0200182 return 0;
183}
184
Nishanth Menonb20f8402008-12-13 09:43:06 -0600185#ifdef CONFIG_CMD_NAND_LOCK_UNLOCK
186
Heiko Schocherf5895d12014-06-24 10:10:04 +0200187#define NAND_CMD_LOCK_TIGHT 0x2c
188#define NAND_CMD_LOCK_STATUS 0x7a
189
Stefan Roesed351b2b2006-10-10 12:36:02 +0200190/******************************************************************************
191 * Support for locking / unlocking operations of some NAND devices
192 *****************************************************************************/
193
Stefan Roesed351b2b2006-10-10 12:36:02 +0200194/**
195 * nand_lock: Set all pages of NAND flash chip to the LOCK or LOCK-TIGHT
196 * state
197 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600198 * @param mtd nand mtd instance
Stefan Roesed351b2b2006-10-10 12:36:02 +0200199 * @param tight bring device in lock tight mode
200 *
201 * @return 0 on success, -1 in case of error
202 *
203 * The lock / lock-tight command only applies to the whole chip. To get some
204 * parts of the chip lock and others unlocked use the following sequence:
205 *
206 * - Lock all pages of the chip using nand_lock(mtd, 0) (or the lockpre pin)
207 * - Call nand_unlock() once for each consecutive area to be unlocked
208 * - If desired: Bring the chip to the lock-tight state using nand_lock(mtd, 1)
209 *
210 * If the device is in lock-tight state software can't change the
211 * current active lock/unlock state of all pages. nand_lock() / nand_unlock()
212 * calls will fail. It is only posible to leave lock-tight state by
213 * an hardware signal (low pulse on _WP pin) or by power down.
214 */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600215int nand_lock(struct mtd_info *mtd, int tight)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200216{
217 int ret = 0;
218 int status;
Scott Wood17fed142016-05-30 13:57:56 -0500219 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200220
221 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600222 chip->select_chip(mtd, 0);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200223
Joe Hershberger8b177d42013-02-08 09:27:19 +0000224 /* check the Lock Tight Status */
225 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, 0);
226 if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
227 printf("nand_lock: Device is locked tight!\n");
228 ret = -1;
229 goto out;
230 }
231
Nishanth Menonb20f8402008-12-13 09:43:06 -0600232 chip->cmdfunc(mtd,
Stefan Roesed351b2b2006-10-10 12:36:02 +0200233 (tight ? NAND_CMD_LOCK_TIGHT : NAND_CMD_LOCK),
234 -1, -1);
235
236 /* call wait ready function */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600237 status = chip->waitfunc(mtd, chip);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200238
239 /* see if device thinks it succeeded */
240 if (status & 0x01) {
241 ret = -1;
242 }
243
Joe Hershberger8b177d42013-02-08 09:27:19 +0000244 out:
Stefan Roesed351b2b2006-10-10 12:36:02 +0200245 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600246 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200247 return ret;
248}
249
250/**
251 * nand_get_lock_status: - query current lock state from one page of NAND
252 * flash
253 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600254 * @param mtd nand mtd instance
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000255 * @param offset page address to query (must be page-aligned!)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200256 *
257 * @return -1 in case of error
258 * >0 lock status:
259 * bitfield with the following combinations:
260 * NAND_LOCK_STATUS_TIGHT: page in tight state
Stefan Roesed351b2b2006-10-10 12:36:02 +0200261 * NAND_LOCK_STATUS_UNLOCK: page unlocked
262 *
263 */
Jean-Christophe PLAGNIOL-VILLARD2511ba02009-05-16 14:27:40 +0200264int nand_get_lock_status(struct mtd_info *mtd, loff_t offset)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200265{
266 int ret = 0;
267 int chipnr;
268 int page;
Scott Wood17fed142016-05-30 13:57:56 -0500269 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200270
271 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600272 chipnr = (int)(offset >> chip->chip_shift);
273 chip->select_chip(mtd, chipnr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200274
275
Nishanth Menonb20f8402008-12-13 09:43:06 -0600276 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000277 printf("nand_get_lock_status: "
Stefan Roesed351b2b2006-10-10 12:36:02 +0200278 "Start address must be beginning of "
279 "nand page!\n");
280 ret = -1;
281 goto out;
282 }
283
284 /* check the Lock Status */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600285 page = (int)(offset >> chip->page_shift);
286 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200287
Nishanth Menonb20f8402008-12-13 09:43:06 -0600288 ret = chip->read_byte(mtd) & (NAND_LOCK_STATUS_TIGHT
Stefan Roesed351b2b2006-10-10 12:36:02 +0200289 | NAND_LOCK_STATUS_UNLOCK);
290
291 out:
292 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600293 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200294 return ret;
295}
296
297/**
298 * nand_unlock: - Unlock area of NAND pages
299 * only one consecutive area can be unlocked at one time!
300 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600301 * @param mtd nand mtd instance
Stefan Roesed351b2b2006-10-10 12:36:02 +0200302 * @param start start byte address
303 * @param length number of bytes to unlock (must be a multiple of
Scott Wood08364d92016-05-30 13:57:54 -0500304 * page size mtd->writesize)
Joe Hershbergercccf5952012-08-22 16:49:42 -0500305 * @param allexcept if set, unlock everything not selected
Stefan Roesed351b2b2006-10-10 12:36:02 +0200306 *
307 * @return 0 on success, -1 in case of error
308 */
Joe Hershbergerdfa8ba42012-08-22 16:49:43 -0500309int nand_unlock(struct mtd_info *mtd, loff_t start, size_t length,
310 int allexcept)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200311{
312 int ret = 0;
313 int chipnr;
314 int status;
315 int page;
Scott Wood17fed142016-05-30 13:57:56 -0500316 struct nand_chip *chip = mtd_to_nand(mtd);
Joe Hershbergercccf5952012-08-22 16:49:42 -0500317
Tom Rini8e40bf62013-12-16 09:59:34 -0500318 debug("nand_unlock%s: start: %08llx, length: %zd!\n",
Joe Hershbergercccf5952012-08-22 16:49:42 -0500319 allexcept ? " (allexcept)" : "", start, length);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200320
321 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600322 chipnr = (int)(start >> chip->chip_shift);
323 chip->select_chip(mtd, chipnr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200324
325 /* check the WP bit */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600326 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
327 if (!(chip->read_byte(mtd) & NAND_STATUS_WP)) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000328 printf("nand_unlock: Device is write protected!\n");
Stefan Roesed351b2b2006-10-10 12:36:02 +0200329 ret = -1;
330 goto out;
331 }
332
Joe Hershberger8b177d42013-02-08 09:27:19 +0000333 /* check the Lock Tight Status */
334 page = (int)(start >> chip->page_shift);
335 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
336 if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
337 printf("nand_unlock: Device is locked tight!\n");
338 ret = -1;
339 goto out;
340 }
341
Nishanth Menonb20f8402008-12-13 09:43:06 -0600342 if ((start & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000343 printf("nand_unlock: Start address must be beginning of "
Nishanth Menonb20f8402008-12-13 09:43:06 -0600344 "nand block!\n");
Stefan Roesed351b2b2006-10-10 12:36:02 +0200345 ret = -1;
346 goto out;
347 }
348
Nishanth Menonb20f8402008-12-13 09:43:06 -0600349 if (length == 0 || (length & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000350 printf("nand_unlock: Length must be a multiple of nand block "
Nishanth Menonb20f8402008-12-13 09:43:06 -0600351 "size %08x!\n", mtd->erasesize);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200352 ret = -1;
353 goto out;
354 }
355
Nishanth Menonb20f8402008-12-13 09:43:06 -0600356 /*
357 * Set length so that the last address is set to the
358 * starting address of the last block
359 */
360 length -= mtd->erasesize;
361
Stefan Roesed351b2b2006-10-10 12:36:02 +0200362 /* submit address of first page to unlock */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600363 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200364
365 /* submit ADDRESS of LAST page to unlock */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600366 page += (int)(length >> chip->page_shift);
Joe Hershbergercccf5952012-08-22 16:49:42 -0500367
368 /*
369 * Page addresses for unlocking are supposed to be block-aligned.
370 * At least some NAND chips use the low bit to indicate that the
371 * page range should be inverted.
372 */
373 if (allexcept)
374 page |= 1;
375
Nishanth Menonb20f8402008-12-13 09:43:06 -0600376 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200377
378 /* call wait ready function */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600379 status = chip->waitfunc(mtd, chip);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200380 /* see if device thinks it succeeded */
381 if (status & 0x01) {
382 /* there was an error */
383 ret = -1;
384 goto out;
385 }
386
387 out:
388 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600389 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200390 return ret;
391}
William Juul52c07962007-10-31 13:53:06 +0100392#endif
Stefan Roesed351b2b2006-10-10 12:36:02 +0200393
Scott Woodcc5f3392008-06-12 13:20:16 -0500394/**
Scott Woodfe3b5e12010-07-30 16:11:41 -0500395 * check_skip_len
Scott Woodcc5f3392008-06-12 13:20:16 -0500396 *
Scott Woodfe3b5e12010-07-30 16:11:41 -0500397 * Check if there are any bad blocks, and whether length including bad
398 * blocks fits into device
Scott Woodcc5f3392008-06-12 13:20:16 -0500399 *
Scott Wood08364d92016-05-30 13:57:54 -0500400 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500401 * @param offset offset in flash
402 * @param length image length
Tom Rini32d96182013-03-14 05:32:50 +0000403 * @param used length of flash needed for the requested length
Scott Woodfe3b5e12010-07-30 16:11:41 -0500404 * @return 0 if the image fits and there are no bad blocks
405 * 1 if the image fits, but there are bad blocks
406 * -1 if the image does not fit
Scott Woodcc5f3392008-06-12 13:20:16 -0500407 */
Scott Wood08364d92016-05-30 13:57:54 -0500408static int check_skip_len(struct mtd_info *mtd, loff_t offset, size_t length,
409 size_t *used)
Scott Woodcc5f3392008-06-12 13:20:16 -0500410{
Scott Woodcc5f3392008-06-12 13:20:16 -0500411 size_t len_excl_bad = 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500412 int ret = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500413
414 while (len_excl_bad < length) {
Scott Woodfe3b5e12010-07-30 16:11:41 -0500415 size_t block_len, block_off;
416 loff_t block_start;
Scott Woodcc5f3392008-06-12 13:20:16 -0500417
Scott Wood08364d92016-05-30 13:57:54 -0500418 if (offset >= mtd->size)
Scott Woodfe3b5e12010-07-30 16:11:41 -0500419 return -1;
Scott Woodcc5f3392008-06-12 13:20:16 -0500420
Scott Wood08364d92016-05-30 13:57:54 -0500421 block_start = offset & ~(loff_t)(mtd->erasesize - 1);
422 block_off = offset & (mtd->erasesize - 1);
423 block_len = mtd->erasesize - block_off;
Scott Woodcc5f3392008-06-12 13:20:16 -0500424
Scott Wood08364d92016-05-30 13:57:54 -0500425 if (!nand_block_isbad(mtd, block_start))
Scott Woodfe3b5e12010-07-30 16:11:41 -0500426 len_excl_bad += block_len;
427 else
428 ret = 1;
429
430 offset += block_len;
Tom Rini32d96182013-03-14 05:32:50 +0000431 *used += block_len;
Scott Woodcc5f3392008-06-12 13:20:16 -0500432 }
433
Tom Rini32d96182013-03-14 05:32:50 +0000434 /* If the length is not a multiple of block_len, adjust. */
435 if (len_excl_bad > length)
436 *used -= (len_excl_bad - length);
437
Scott Woodfe3b5e12010-07-30 16:11:41 -0500438 return ret;
Scott Woodcc5f3392008-06-12 13:20:16 -0500439}
Ben Gardiner34dd5672011-06-14 16:35:06 -0400440
441#ifdef CONFIG_CMD_NAND_TRIMFFS
Scott Wood08364d92016-05-30 13:57:54 -0500442static size_t drop_ffs(const struct mtd_info *mtd, const u_char *buf,
Ben Gardiner34dd5672011-06-14 16:35:06 -0400443 const size_t *len)
444{
htbegin610e8552013-03-01 23:00:34 +0000445 size_t l = *len;
446 ssize_t i;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400447
448 for (i = l - 1; i >= 0; i--)
449 if (buf[i] != 0xFF)
450 break;
451
452 /* The resulting length must be aligned to the minimum flash I/O size */
453 l = i + 1;
Scott Wood08364d92016-05-30 13:57:54 -0500454 l = (l + mtd->writesize - 1) / mtd->writesize;
455 l *= mtd->writesize;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400456
457 /*
458 * since the input length may be unaligned, prevent access past the end
459 * of the buffer
460 */
461 return min(l, *len);
462}
463#endif
Scott Woodcc5f3392008-06-12 13:20:16 -0500464
465/**
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600466 * nand_verify_page_oob:
467 *
468 * Verify a page of NAND flash, including the OOB.
469 * Reads page of NAND and verifies the contents and OOB against the
470 * values in ops.
471 *
Scott Wood08364d92016-05-30 13:57:54 -0500472 * @param mtd nand mtd instance
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600473 * @param ops MTD operations, including data to verify
474 * @param ofs offset in flash
475 * @return 0 in case of success
476 */
Scott Wood08364d92016-05-30 13:57:54 -0500477int nand_verify_page_oob(struct mtd_info *mtd, struct mtd_oob_ops *ops,
478 loff_t ofs)
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600479{
480 int rval;
481 struct mtd_oob_ops vops;
Scott Wood08364d92016-05-30 13:57:54 -0500482 size_t verlen = mtd->writesize + mtd->oobsize;
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600483
484 memcpy(&vops, ops, sizeof(vops));
485
Stephen Warren12db8182015-04-14 08:59:00 -0600486 vops.datbuf = memalign(ARCH_DMA_MINALIGN, verlen);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600487
488 if (!vops.datbuf)
489 return -ENOMEM;
490
Scott Wood08364d92016-05-30 13:57:54 -0500491 vops.oobbuf = vops.datbuf + mtd->writesize;
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600492
Scott Wood08364d92016-05-30 13:57:54 -0500493 rval = mtd_read_oob(mtd, ofs, &vops);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600494 if (!rval)
495 rval = memcmp(ops->datbuf, vops.datbuf, vops.len);
496 if (!rval)
497 rval = memcmp(ops->oobbuf, vops.oobbuf, vops.ooblen);
498
499 free(vops.datbuf);
500
501 return rval ? -EIO : 0;
502}
503
504/**
505 * nand_verify:
506 *
507 * Verify a region of NAND flash.
508 * Reads NAND in page-sized chunks and verifies the contents against
509 * the contents of a buffer. The offset into the NAND must be
510 * page-aligned, and the function doesn't handle skipping bad blocks.
511 *
Scott Wood08364d92016-05-30 13:57:54 -0500512 * @param mtd nand mtd instance
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600513 * @param ofs offset in flash
514 * @param len buffer length
515 * @param buf buffer to read from
516 * @return 0 in case of success
517 */
Scott Wood08364d92016-05-30 13:57:54 -0500518int nand_verify(struct mtd_info *mtd, loff_t ofs, size_t len, u_char *buf)
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600519{
520 int rval = 0;
521 size_t verofs;
Scott Wood08364d92016-05-30 13:57:54 -0500522 size_t verlen = mtd->writesize;
Stephen Warren12db8182015-04-14 08:59:00 -0600523 uint8_t *verbuf = memalign(ARCH_DMA_MINALIGN, verlen);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600524
525 if (!verbuf)
526 return -ENOMEM;
527
528 /* Read the NAND back in page-size groups to limit malloc size */
529 for (verofs = ofs; verofs < ofs + len;
530 verofs += verlen, buf += verlen) {
Scott Wood08364d92016-05-30 13:57:54 -0500531 verlen = min(mtd->writesize, (uint32_t)(ofs + len - verofs));
532 rval = nand_read(mtd, verofs, &verlen, verbuf);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600533 if (!rval || (rval == -EUCLEAN))
534 rval = memcmp(buf, verbuf, verlen);
535
536 if (rval)
537 break;
538 }
539
540 free(verbuf);
541
542 return rval ? -EIO : 0;
543}
544
545
546
547/**
Scott Woodcc5f3392008-06-12 13:20:16 -0500548 * nand_write_skip_bad:
549 *
550 * Write image to NAND flash.
551 * Blocks that are marked bad are skipped and the is written to the next
552 * block instead as long as the image is short enough to fit even after
Tom Rini32d96182013-03-14 05:32:50 +0000553 * skipping the bad blocks. Due to bad blocks we may not be able to
554 * perform the requested write. In the case where the write would
555 * extend beyond the end of the NAND device, both length and actual (if
556 * not NULL) are set to 0. In the case where the write would extend
557 * beyond the limit we are passed, length is set to 0 and actual is set
558 * to the required length.
Scott Woodcc5f3392008-06-12 13:20:16 -0500559 *
Scott Wood08364d92016-05-30 13:57:54 -0500560 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500561 * @param offset offset in flash
562 * @param length buffer length
Tom Rini32d96182013-03-14 05:32:50 +0000563 * @param actual set to size required to write length worth of
564 * buffer or 0 on error, if not NULL
565 * @param lim maximum size that actual may be in order to not
566 * exceed the buffer
Lei Wen4b5deaa2011-01-06 11:11:58 +0800567 * @param buffer buffer to read from
Ben Gardiner1caafbb2011-05-24 10:18:35 -0400568 * @param flags flags modifying the behaviour of the write to NAND
Scott Woodcc5f3392008-06-12 13:20:16 -0500569 * @return 0 in case of success
570 */
Scott Wood08364d92016-05-30 13:57:54 -0500571int nand_write_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
572 size_t *actual, loff_t lim, u_char *buffer, int flags)
Scott Woodcc5f3392008-06-12 13:20:16 -0500573{
Lei Wen4b5deaa2011-01-06 11:11:58 +0800574 int rval = 0, blocksize;
Scott Woodcc5f3392008-06-12 13:20:16 -0500575 size_t left_to_write = *length;
Tom Rini32d96182013-03-14 05:32:50 +0000576 size_t used_for_write = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500577 u_char *p_buffer = buffer;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500578 int need_skip;
Scott Woodcc5f3392008-06-12 13:20:16 -0500579
Tom Rini32d96182013-03-14 05:32:50 +0000580 if (actual)
581 *actual = 0;
582
Scott Wood08364d92016-05-30 13:57:54 -0500583 blocksize = mtd->erasesize;
Lei Wen4b5deaa2011-01-06 11:11:58 +0800584
Scott Woodfe3b5e12010-07-30 16:11:41 -0500585 /*
586 * nand_write() handles unaligned, partial page writes.
587 *
588 * We allow length to be unaligned, for convenience in
589 * using the $filesize variable.
590 *
591 * However, starting at an unaligned offset makes the
592 * semantics of bad block skipping ambiguous (really,
593 * you should only start a block skipping access at a
594 * partition boundary). So don't try to handle that.
595 */
Scott Wood08364d92016-05-30 13:57:54 -0500596 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000597 printf("Attempt to write non page-aligned data\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500598 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500599 return -EINVAL;
600 }
601
Scott Wood08364d92016-05-30 13:57:54 -0500602 need_skip = check_skip_len(mtd, offset, *length, &used_for_write);
Tom Rini32d96182013-03-14 05:32:50 +0000603
604 if (actual)
605 *actual = used_for_write;
606
Scott Woodfe3b5e12010-07-30 16:11:41 -0500607 if (need_skip < 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000608 printf("Attempt to write outside the flash area\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500609 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500610 return -EINVAL;
611 }
612
Tom Rini32d96182013-03-14 05:32:50 +0000613 if (used_for_write > lim) {
614 puts("Size of write exceeds partition or device limit\n");
615 *length = 0;
616 return -EFBIG;
617 }
618
Ben Gardiner34dd5672011-06-14 16:35:06 -0400619 if (!need_skip && !(flags & WITH_DROP_FFS)) {
Scott Wood08364d92016-05-30 13:57:54 -0500620 rval = nand_write(mtd, offset, length, buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600621
622 if ((flags & WITH_WR_VERIFY) && !rval)
Scott Wood08364d92016-05-30 13:57:54 -0500623 rval = nand_verify(mtd, offset, *length, buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600624
Scott Woodfe3b5e12010-07-30 16:11:41 -0500625 if (rval == 0)
626 return 0;
Scott Wood90e0a6b2008-11-25 10:47:02 -0600627
Scott Woodfe3b5e12010-07-30 16:11:41 -0500628 *length = 0;
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000629 printf("NAND write to offset %llx failed %d\n",
Scott Woodfe3b5e12010-07-30 16:11:41 -0500630 offset, rval);
Scott Wood90e0a6b2008-11-25 10:47:02 -0600631 return rval;
Scott Woodcc5f3392008-06-12 13:20:16 -0500632 }
633
634 while (left_to_write > 0) {
Scott Wood08364d92016-05-30 13:57:54 -0500635 size_t block_offset = offset & (mtd->erasesize - 1);
Ben Gardiner34dd5672011-06-14 16:35:06 -0400636 size_t write_size, truncated_write_size;
Scott Woodcc5f3392008-06-12 13:20:16 -0500637
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000638 WATCHDOG_RESET();
Giulio Benetti749bd662009-07-31 17:30:34 -0500639
Scott Wood08364d92016-05-30 13:57:54 -0500640 if (nand_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000641 printf("Skip bad block 0x%08llx\n",
Scott Wood08364d92016-05-30 13:57:54 -0500642 offset & ~(mtd->erasesize - 1));
643 offset += mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500644 continue;
645 }
646
Lei Wen4b5deaa2011-01-06 11:11:58 +0800647 if (left_to_write < (blocksize - block_offset))
Scott Woodcc5f3392008-06-12 13:20:16 -0500648 write_size = left_to_write;
649 else
Lei Wen4b5deaa2011-01-06 11:11:58 +0800650 write_size = blocksize - block_offset;
651
Peter Tyserae6ad782015-02-03 11:58:16 -0600652 truncated_write_size = write_size;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400653#ifdef CONFIG_CMD_NAND_TRIMFFS
Peter Tyserae6ad782015-02-03 11:58:16 -0600654 if (flags & WITH_DROP_FFS)
Scott Wood08364d92016-05-30 13:57:54 -0500655 truncated_write_size = drop_ffs(mtd, p_buffer,
Peter Tyserae6ad782015-02-03 11:58:16 -0600656 &write_size);
Ben Gardiner34dd5672011-06-14 16:35:06 -0400657#endif
658
Scott Wood08364d92016-05-30 13:57:54 -0500659 rval = nand_write(mtd, offset, &truncated_write_size,
Peter Tyserae6ad782015-02-03 11:58:16 -0600660 p_buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600661
Peter Tyserae6ad782015-02-03 11:58:16 -0600662 if ((flags & WITH_WR_VERIFY) && !rval)
Scott Wood08364d92016-05-30 13:57:54 -0500663 rval = nand_verify(mtd, offset,
Peter Tyserae6ad782015-02-03 11:58:16 -0600664 truncated_write_size, p_buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600665
Peter Tyserae6ad782015-02-03 11:58:16 -0600666 offset += write_size;
667 p_buffer += write_size;
Lei Wen4b5deaa2011-01-06 11:11:58 +0800668
Scott Woodcc5f3392008-06-12 13:20:16 -0500669 if (rval != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000670 printf("NAND write to offset %llx failed %d\n",
Wolfgang Denk74e0dde2008-08-14 14:41:06 +0200671 offset, rval);
Scott Woodcc5f3392008-06-12 13:20:16 -0500672 *length -= left_to_write;
673 return rval;
674 }
675
676 left_to_write -= write_size;
Scott Woodcc5f3392008-06-12 13:20:16 -0500677 }
678
679 return 0;
680}
681
682/**
683 * nand_read_skip_bad:
684 *
685 * Read image from NAND flash.
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000686 * Blocks that are marked bad are skipped and the next block is read
Tom Rini32d96182013-03-14 05:32:50 +0000687 * instead as long as the image is short enough to fit even after
688 * skipping the bad blocks. Due to bad blocks we may not be able to
689 * perform the requested read. In the case where the read would extend
690 * beyond the end of the NAND device, both length and actual (if not
691 * NULL) are set to 0. In the case where the read would extend beyond
692 * the limit we are passed, length is set to 0 and actual is set to the
693 * required length.
Scott Woodcc5f3392008-06-12 13:20:16 -0500694 *
Scott Wood08364d92016-05-30 13:57:54 -0500695 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500696 * @param offset offset in flash
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000697 * @param length buffer length, on return holds number of read bytes
Tom Rini32d96182013-03-14 05:32:50 +0000698 * @param actual set to size required to read length worth of buffer or 0
699 * on error, if not NULL
700 * @param lim maximum size that actual may be in order to not exceed the
701 * buffer
Scott Woodcc5f3392008-06-12 13:20:16 -0500702 * @param buffer buffer to write to
703 * @return 0 in case of success
704 */
Scott Wood08364d92016-05-30 13:57:54 -0500705int nand_read_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
706 size_t *actual, loff_t lim, u_char *buffer)
Scott Woodcc5f3392008-06-12 13:20:16 -0500707{
708 int rval;
709 size_t left_to_read = *length;
Tom Rini32d96182013-03-14 05:32:50 +0000710 size_t used_for_read = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500711 u_char *p_buffer = buffer;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500712 int need_skip;
Scott Woodcc5f3392008-06-12 13:20:16 -0500713
Scott Wood08364d92016-05-30 13:57:54 -0500714 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000715 printf("Attempt to read non page-aligned data\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500716 *length = 0;
Tom Rini32d96182013-03-14 05:32:50 +0000717 if (actual)
718 *actual = 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500719 return -EINVAL;
720 }
Scott Woodcc5f3392008-06-12 13:20:16 -0500721
Scott Wood08364d92016-05-30 13:57:54 -0500722 need_skip = check_skip_len(mtd, offset, *length, &used_for_read);
Tom Rini32d96182013-03-14 05:32:50 +0000723
724 if (actual)
725 *actual = used_for_read;
726
Scott Woodfe3b5e12010-07-30 16:11:41 -0500727 if (need_skip < 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000728 printf("Attempt to read outside the flash area\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500729 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500730 return -EINVAL;
731 }
732
Tom Rini32d96182013-03-14 05:32:50 +0000733 if (used_for_read > lim) {
734 puts("Size of read exceeds partition or device limit\n");
735 *length = 0;
736 return -EFBIG;
737 }
738
Scott Woodfe3b5e12010-07-30 16:11:41 -0500739 if (!need_skip) {
Scott Wood08364d92016-05-30 13:57:54 -0500740 rval = nand_read(mtd, offset, length, buffer);
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300741 if (!rval || rval == -EUCLEAN)
742 return 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500743
744 *length = 0;
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000745 printf("NAND read from offset %llx failed %d\n",
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300746 offset, rval);
Scott Wood90e0a6b2008-11-25 10:47:02 -0600747 return rval;
Scott Woodcc5f3392008-06-12 13:20:16 -0500748 }
749
750 while (left_to_read > 0) {
Scott Wood08364d92016-05-30 13:57:54 -0500751 size_t block_offset = offset & (mtd->erasesize - 1);
Scott Woodcc5f3392008-06-12 13:20:16 -0500752 size_t read_length;
753
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000754 WATCHDOG_RESET();
Giulio Benetti749bd662009-07-31 17:30:34 -0500755
Scott Wood08364d92016-05-30 13:57:54 -0500756 if (nand_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000757 printf("Skipping bad block 0x%08llx\n",
Scott Wood08364d92016-05-30 13:57:54 -0500758 offset & ~(mtd->erasesize - 1));
759 offset += mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500760 continue;
761 }
762
Scott Wood08364d92016-05-30 13:57:54 -0500763 if (left_to_read < (mtd->erasesize - block_offset))
Scott Woodcc5f3392008-06-12 13:20:16 -0500764 read_length = left_to_read;
765 else
Scott Wood08364d92016-05-30 13:57:54 -0500766 read_length = mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500767
Scott Wood08364d92016-05-30 13:57:54 -0500768 rval = nand_read(mtd, offset, &read_length, p_buffer);
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300769 if (rval && rval != -EUCLEAN) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000770 printf("NAND read from offset %llx failed %d\n",
Wolfgang Denk74e0dde2008-08-14 14:41:06 +0200771 offset, rval);
Scott Woodcc5f3392008-06-12 13:20:16 -0500772 *length -= left_to_read;
773 return rval;
774 }
775
776 left_to_read -= read_length;
777 offset += read_length;
778 p_buffer += read_length;
779 }
780
781 return 0;
782}
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100783
784#ifdef CONFIG_CMD_NAND_TORTURE
785
786/**
787 * check_pattern:
788 *
789 * Check if buffer contains only a certain byte pattern.
790 *
791 * @param buf buffer to check
792 * @param patt the pattern to check
793 * @param size buffer size in bytes
794 * @return 1 if there are only patt bytes in buf
795 * 0 if something else was found
796 */
797static int check_pattern(const u_char *buf, u_char patt, int size)
798{
799 int i;
800
801 for (i = 0; i < size; i++)
802 if (buf[i] != patt)
803 return 0;
804 return 1;
805}
806
807/**
808 * nand_torture:
809 *
810 * Torture a block of NAND flash.
811 * This is useful to determine if a block that caused a write error is still
812 * good or should be marked as bad.
813 *
Scott Wood08364d92016-05-30 13:57:54 -0500814 * @param mtd nand mtd instance
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100815 * @param offset offset in flash
816 * @return 0 if the block is still good
817 */
Scott Wood08364d92016-05-30 13:57:54 -0500818int nand_torture(struct mtd_info *mtd, loff_t offset)
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100819{
820 u_char patterns[] = {0xa5, 0x5a, 0x00};
821 struct erase_info instr = {
Max Krummenacherf3178d72016-06-13 10:15:47 +0200822 .mtd = mtd,
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100823 .addr = offset,
Scott Wood08364d92016-05-30 13:57:54 -0500824 .len = mtd->erasesize,
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100825 };
826 size_t retlen;
827 int err, ret = -1, i, patt_count;
828 u_char *buf;
829
Scott Wood08364d92016-05-30 13:57:54 -0500830 if ((offset & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100831 puts("Attempt to torture a block at a non block-aligned offset\n");
832 return -EINVAL;
833 }
834
Scott Wood08364d92016-05-30 13:57:54 -0500835 if (offset + mtd->erasesize > mtd->size) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100836 puts("Attempt to torture a block outside the flash area\n");
837 return -EINVAL;
838 }
839
840 patt_count = ARRAY_SIZE(patterns);
841
Scott Wood08364d92016-05-30 13:57:54 -0500842 buf = malloc_cache_aligned(mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100843 if (buf == NULL) {
844 puts("Out of memory for erase block buffer\n");
845 return -ENOMEM;
846 }
847
848 for (i = 0; i < patt_count; i++) {
Max Krummenachera21c4482016-05-30 16:28:28 +0200849 err = mtd_erase(mtd, &instr);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100850 if (err) {
851 printf("%s: erase() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500852 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100853 goto out;
854 }
855
856 /* Make sure the block contains only 0xff bytes */
Max Krummenachera21c4482016-05-30 16:28:28 +0200857 err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500858 if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100859 printf("%s: read() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500860 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100861 goto out;
862 }
863
Scott Wood08364d92016-05-30 13:57:54 -0500864 err = check_pattern(buf, 0xff, mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100865 if (!err) {
866 printf("Erased block at 0x%llx, but a non-0xff byte was found\n",
867 offset);
868 ret = -EIO;
869 goto out;
870 }
871
872 /* Write a pattern and check it */
Scott Wood08364d92016-05-30 13:57:54 -0500873 memset(buf, patterns[i], mtd->erasesize);
Max Krummenachera21c4482016-05-30 16:28:28 +0200874 err = mtd_write(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500875 if (err || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100876 printf("%s: write() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500877 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100878 goto out;
879 }
880
Max Krummenachera21c4482016-05-30 16:28:28 +0200881 err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500882 if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100883 printf("%s: read() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500884 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100885 goto out;
886 }
887
Scott Wood08364d92016-05-30 13:57:54 -0500888 err = check_pattern(buf, patterns[i], mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100889 if (!err) {
890 printf("Pattern 0x%.2x checking failed for block at "
891 "0x%llx\n", patterns[i], offset);
892 ret = -EIO;
893 goto out;
894 }
895 }
896
897 ret = 0;
898
899out:
900 free(buf);
901 return ret;
902}
903
904#endif