blob: 9c4af8a5d63d31784b9a1bcff21d4d8a5dff6214 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roesed351b2b2006-10-10 12:36:02 +02002/*
Miquel Raynal1f1ae152018-08-16 17:30:07 +02003 * drivers/mtd/nand/raw/nand_util.c
Stefan Roesed351b2b2006-10-10 12:36:02 +02004 *
5 * Copyright (C) 2006 by Weiss-Electronic GmbH.
6 * All rights reserved.
7 *
8 * @author: Guido Classen <clagix@gmail.com>
9 * @descr: NAND Flash support
10 * @references: borrowed heavily from Linux mtd-utils code:
11 * flash_eraseall.c by Arcom Control System Ltd
12 * nandwrite.c by Steven J. Hill (sjhill@realitydiluted.com)
13 * and Thomas Gleixner (tglx@linutronix.de)
14 *
Ben Gardiner34dd5672011-06-14 16:35:06 -040015 * Copyright (C) 2008 Nokia Corporation: drop_ffs() function by
16 * Artem Bityutskiy <dedekind1@gmail.com> from mtd-utils
17 *
Tom Rinib7bef6a2013-10-31 09:24:00 -040018 * Copyright 2010 Freescale Semiconductor
Stefan Roesed351b2b2006-10-10 12:36:02 +020019 */
20
21#include <common.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020022#include <command.h>
23#include <watchdog.h>
24#include <malloc.h>
Simon Glassa87fc0a2015-09-02 17:24:57 -060025#include <memalign.h>
Dirk Behme32d1f762007-08-02 17:42:08 +020026#include <div64.h>
Simon Glass274e0b02020-05-10 11:39:56 -060027#include <asm/cache.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070028#include <dm/devres.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020029
Masahiro Yamada56a931c2016-09-21 11:28:55 +090030#include <linux/errno.h>
William Juul52c07962007-10-31 13:53:06 +010031#include <linux/mtd/mtd.h>
Stefan Roesed351b2b2006-10-10 12:36:02 +020032#include <nand.h>
33#include <jffs2/jffs2.h>
34
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000035typedef struct erase_info erase_info_t;
36typedef struct mtd_info mtd_info_t;
Stefan Roesed351b2b2006-10-10 12:36:02 +020037
38/* support only for native endian JFFS2 */
39#define cpu_to_je16(x) (x)
40#define cpu_to_je32(x) (x)
41
Stefan Roesed351b2b2006-10-10 12:36:02 +020042/**
43 * nand_erase_opts: - erase NAND flash with support for various options
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000044 * (jffs2 formatting)
Stefan Roesed351b2b2006-10-10 12:36:02 +020045 *
Scott Wood08364d92016-05-30 13:57:54 -050046 * @param mtd nand mtd instance to erase
Stefan Roesed351b2b2006-10-10 12:36:02 +020047 * @param opts options, @see struct nand_erase_options
48 * @return 0 in case of success
49 *
50 * This code is ported from flash_eraseall.c from Linux mtd utils by
51 * Arcom Control System Ltd.
52 */
Scott Wood08364d92016-05-30 13:57:54 -050053int nand_erase_opts(struct mtd_info *mtd,
54 const nand_erase_options_t *opts)
Stefan Roesed351b2b2006-10-10 12:36:02 +020055{
56 struct jffs2_unknown_node cleanmarker;
Stefan Roesed351b2b2006-10-10 12:36:02 +020057 erase_info_t erase;
Scott Wood1b5cd512010-08-25 14:43:29 -050058 unsigned long erase_length, erased_length; /* in blocks */
Stefan Roesed351b2b2006-10-10 12:36:02 +020059 int result;
60 int percent_complete = -1;
Scott Wood08364d92016-05-30 13:57:54 -050061 const char *mtd_device = mtd->name;
William Juul52c07962007-10-31 13:53:06 +010062 struct mtd_oob_ops oob_opts;
Scott Wood17fed142016-05-30 13:57:56 -050063 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +020064
Scott Wood08364d92016-05-30 13:57:54 -050065 if ((opts->offset & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeauad5d6ee2012-11-05 10:16:15 +000066 printf("Attempt to erase non block-aligned data\n");
Scott Wood1b5cd512010-08-25 14:43:29 -050067 return -1;
68 }
69
Stefan Roesed351b2b2006-10-10 12:36:02 +020070 memset(&erase, 0, sizeof(erase));
William Juul52c07962007-10-31 13:53:06 +010071 memset(&oob_opts, 0, sizeof(oob_opts));
Stefan Roesed351b2b2006-10-10 12:36:02 +020072
Scott Wood08364d92016-05-30 13:57:54 -050073 erase.mtd = mtd;
74 erase.len = mtd->erasesize;
Stefan Roese198b23e2006-10-28 15:55:52 +020075 erase.addr = opts->offset;
Scott Wood08364d92016-05-30 13:57:54 -050076 erase_length = lldiv(opts->length + mtd->erasesize - 1,
77 mtd->erasesize);
Stefan Roesed351b2b2006-10-10 12:36:02 +020078
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +000079 cleanmarker.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
80 cleanmarker.nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER);
William Juul52c07962007-10-31 13:53:06 +010081 cleanmarker.totlen = cpu_to_je32(8);
Stefan Roesed351b2b2006-10-10 12:36:02 +020082
83 /* scrub option allows to erase badblock. To prevent internal
84 * check from erase() method, set block check method to dummy
85 * and disable bad block table while erasing.
86 */
87 if (opts->scrub) {
Marek Vasut971d9a12011-09-12 06:04:06 +020088 erase.scrub = opts->scrub;
89 /*
90 * We don't need the bad block table anymore...
Stefan Roesed351b2b2006-10-10 12:36:02 +020091 * after scrub, there are no bad blocks left!
92 */
Marek Vasut971d9a12011-09-12 06:04:06 +020093 if (chip->bbt) {
94 kfree(chip->bbt);
Stefan Roesed351b2b2006-10-10 12:36:02 +020095 }
Marek Vasut971d9a12011-09-12 06:04:06 +020096 chip->bbt = NULL;
Masahiro Yamada8d100542014-12-26 22:20:58 +090097 chip->options &= ~NAND_BBT_SCANNED;
Stefan Roesed351b2b2006-10-10 12:36:02 +020098 }
99
Scott Wood1b5cd512010-08-25 14:43:29 -0500100 for (erased_length = 0;
101 erased_length < erase_length;
Scott Wood08364d92016-05-30 13:57:54 -0500102 erase.addr += mtd->erasesize) {
William Juulb76ec382007-11-08 10:39:53 +0100103
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000104 WATCHDOG_RESET();
Stefan Roesed351b2b2006-10-10 12:36:02 +0200105
Heiko Schocher0ae98b42013-06-24 18:50:40 +0200106 if (opts->lim && (erase.addr >= (opts->offset + opts->lim))) {
107 puts("Size of erase exceeds limit\n");
108 return -EFBIG;
109 }
Masahiro Yamada7580d582013-07-12 10:53:37 +0900110 if (!opts->scrub) {
Scott Wood08364d92016-05-30 13:57:54 -0500111 int ret = mtd_block_isbad(mtd, erase.addr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200112 if (ret > 0) {
113 if (!opts->quiet)
114 printf("\rSkipping bad block at "
Stefan Roese586b3a62009-05-11 16:03:55 +0200115 "0x%08llx "
Wolfgang Denkd5cf1a42006-10-12 11:43:47 +0200116 " \n",
117 erase.addr);
Scott Wood1b5cd512010-08-25 14:43:29 -0500118
119 if (!opts->spread)
120 erased_length++;
121
Stefan Roesed351b2b2006-10-10 12:36:02 +0200122 continue;
123
124 } else if (ret < 0) {
125 printf("\n%s: MTD get bad block failed: %d\n",
126 mtd_device,
127 ret);
128 return -1;
129 }
130 }
131
Scott Wood1b5cd512010-08-25 14:43:29 -0500132 erased_length++;
133
Scott Wood08364d92016-05-30 13:57:54 -0500134 result = mtd_erase(mtd, &erase);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200135 if (result != 0) {
136 printf("\n%s: MTD Erase failure: %d\n",
137 mtd_device, result);
138 continue;
139 }
140
141 /* format for JFFS2 ? */
Scott Woodd50ad352008-10-29 14:20:26 -0500142 if (opts->jffs2 && chip->ecc.layout->oobavail >= 8) {
Sergey Lapin3a38a552013-01-14 03:46:50 +0000143 struct mtd_oob_ops ops;
144 ops.ooblen = 8;
145 ops.datbuf = NULL;
146 ops.oobbuf = (uint8_t *)&cleanmarker;
147 ops.ooboffs = 0;
148 ops.mode = MTD_OPS_AUTO_OOB;
William Juulb76ec382007-11-08 10:39:53 +0100149
Scott Wood08364d92016-05-30 13:57:54 -0500150 result = mtd_write_oob(mtd, erase.addr, &ops);
William Juul52c07962007-10-31 13:53:06 +0100151 if (result != 0) {
152 printf("\n%s: MTD writeoob failure: %d\n",
Scott Woodd50ad352008-10-29 14:20:26 -0500153 mtd_device, result);
William Juul52c07962007-10-31 13:53:06 +0100154 continue;
Stefan Roesed351b2b2006-10-10 12:36:02 +0200155 }
156 }
157
158 if (!opts->quiet) {
Scott Wood1b5cd512010-08-25 14:43:29 -0500159 unsigned long long n = erased_length * 100ULL;
Matthias Fuchs82714b92007-09-11 17:04:00 +0200160 int percent;
161
162 do_div(n, erase_length);
163 percent = (int)n;
Stefan Roesed351b2b2006-10-10 12:36:02 +0200164
165 /* output progress message only at whole percent
166 * steps to reduce the number of messages printed
167 * on (slow) serial consoles
168 */
169 if (percent != percent_complete) {
170 percent_complete = percent;
171
Stefan Roese586b3a62009-05-11 16:03:55 +0200172 printf("\rErasing at 0x%llx -- %3d%% complete.",
Scott Woodd50ad352008-10-29 14:20:26 -0500173 erase.addr, percent);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200174
175 if (opts->jffs2 && result == 0)
Stefan Roese586b3a62009-05-11 16:03:55 +0200176 printf(" Cleanmarker written at 0x%llx.",
Scott Woodd50ad352008-10-29 14:20:26 -0500177 erase.addr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200178 }
179 }
180 }
181 if (!opts->quiet)
182 printf("\n");
183
Stefan Roesed351b2b2006-10-10 12:36:02 +0200184 return 0;
185}
186
Nishanth Menonb20f8402008-12-13 09:43:06 -0600187#ifdef CONFIG_CMD_NAND_LOCK_UNLOCK
188
Heiko Schocherf5895d12014-06-24 10:10:04 +0200189#define NAND_CMD_LOCK_TIGHT 0x2c
190#define NAND_CMD_LOCK_STATUS 0x7a
191
Stefan Roesed351b2b2006-10-10 12:36:02 +0200192/******************************************************************************
193 * Support for locking / unlocking operations of some NAND devices
194 *****************************************************************************/
195
Stefan Roesed351b2b2006-10-10 12:36:02 +0200196/**
197 * nand_lock: Set all pages of NAND flash chip to the LOCK or LOCK-TIGHT
198 * state
199 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600200 * @param mtd nand mtd instance
Stefan Roesed351b2b2006-10-10 12:36:02 +0200201 * @param tight bring device in lock tight mode
202 *
203 * @return 0 on success, -1 in case of error
204 *
205 * The lock / lock-tight command only applies to the whole chip. To get some
206 * parts of the chip lock and others unlocked use the following sequence:
207 *
208 * - Lock all pages of the chip using nand_lock(mtd, 0) (or the lockpre pin)
209 * - Call nand_unlock() once for each consecutive area to be unlocked
210 * - If desired: Bring the chip to the lock-tight state using nand_lock(mtd, 1)
211 *
212 * If the device is in lock-tight state software can't change the
213 * current active lock/unlock state of all pages. nand_lock() / nand_unlock()
214 * calls will fail. It is only posible to leave lock-tight state by
215 * an hardware signal (low pulse on _WP pin) or by power down.
216 */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600217int nand_lock(struct mtd_info *mtd, int tight)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200218{
219 int ret = 0;
220 int status;
Scott Wood17fed142016-05-30 13:57:56 -0500221 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200222
223 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600224 chip->select_chip(mtd, 0);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200225
Joe Hershberger8b177d42013-02-08 09:27:19 +0000226 /* check the Lock Tight Status */
227 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, 0);
228 if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
229 printf("nand_lock: Device is locked tight!\n");
230 ret = -1;
231 goto out;
232 }
233
Nishanth Menonb20f8402008-12-13 09:43:06 -0600234 chip->cmdfunc(mtd,
Stefan Roesed351b2b2006-10-10 12:36:02 +0200235 (tight ? NAND_CMD_LOCK_TIGHT : NAND_CMD_LOCK),
236 -1, -1);
237
238 /* call wait ready function */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600239 status = chip->waitfunc(mtd, chip);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200240
241 /* see if device thinks it succeeded */
242 if (status & 0x01) {
243 ret = -1;
244 }
245
Joe Hershberger8b177d42013-02-08 09:27:19 +0000246 out:
Stefan Roesed351b2b2006-10-10 12:36:02 +0200247 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600248 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200249 return ret;
250}
251
252/**
253 * nand_get_lock_status: - query current lock state from one page of NAND
254 * flash
255 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600256 * @param mtd nand mtd instance
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000257 * @param offset page address to query (must be page-aligned!)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200258 *
259 * @return -1 in case of error
260 * >0 lock status:
261 * bitfield with the following combinations:
262 * NAND_LOCK_STATUS_TIGHT: page in tight state
Stefan Roesed351b2b2006-10-10 12:36:02 +0200263 * NAND_LOCK_STATUS_UNLOCK: page unlocked
264 *
265 */
Jean-Christophe PLAGNIOL-VILLARD2511ba02009-05-16 14:27:40 +0200266int nand_get_lock_status(struct mtd_info *mtd, loff_t offset)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200267{
268 int ret = 0;
269 int chipnr;
270 int page;
Scott Wood17fed142016-05-30 13:57:56 -0500271 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200272
273 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600274 chipnr = (int)(offset >> chip->chip_shift);
275 chip->select_chip(mtd, chipnr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200276
277
Nishanth Menonb20f8402008-12-13 09:43:06 -0600278 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000279 printf("nand_get_lock_status: "
Stefan Roesed351b2b2006-10-10 12:36:02 +0200280 "Start address must be beginning of "
281 "nand page!\n");
282 ret = -1;
283 goto out;
284 }
285
286 /* check the Lock Status */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600287 page = (int)(offset >> chip->page_shift);
288 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200289
Nishanth Menonb20f8402008-12-13 09:43:06 -0600290 ret = chip->read_byte(mtd) & (NAND_LOCK_STATUS_TIGHT
Stefan Roesed351b2b2006-10-10 12:36:02 +0200291 | NAND_LOCK_STATUS_UNLOCK);
292
293 out:
294 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600295 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200296 return ret;
297}
298
299/**
300 * nand_unlock: - Unlock area of NAND pages
301 * only one consecutive area can be unlocked at one time!
302 *
Nishanth Menonb20f8402008-12-13 09:43:06 -0600303 * @param mtd nand mtd instance
Stefan Roesed351b2b2006-10-10 12:36:02 +0200304 * @param start start byte address
305 * @param length number of bytes to unlock (must be a multiple of
Scott Wood08364d92016-05-30 13:57:54 -0500306 * page size mtd->writesize)
Joe Hershbergercccf5952012-08-22 16:49:42 -0500307 * @param allexcept if set, unlock everything not selected
Stefan Roesed351b2b2006-10-10 12:36:02 +0200308 *
309 * @return 0 on success, -1 in case of error
310 */
Joe Hershbergerdfa8ba42012-08-22 16:49:43 -0500311int nand_unlock(struct mtd_info *mtd, loff_t start, size_t length,
312 int allexcept)
Stefan Roesed351b2b2006-10-10 12:36:02 +0200313{
314 int ret = 0;
315 int chipnr;
316 int status;
317 int page;
Scott Wood17fed142016-05-30 13:57:56 -0500318 struct nand_chip *chip = mtd_to_nand(mtd);
Joe Hershbergercccf5952012-08-22 16:49:42 -0500319
Tom Rini8e40bf62013-12-16 09:59:34 -0500320 debug("nand_unlock%s: start: %08llx, length: %zd!\n",
Joe Hershbergercccf5952012-08-22 16:49:42 -0500321 allexcept ? " (allexcept)" : "", start, length);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200322
323 /* select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600324 chipnr = (int)(start >> chip->chip_shift);
325 chip->select_chip(mtd, chipnr);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200326
327 /* check the WP bit */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600328 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
329 if (!(chip->read_byte(mtd) & NAND_STATUS_WP)) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000330 printf("nand_unlock: Device is write protected!\n");
Stefan Roesed351b2b2006-10-10 12:36:02 +0200331 ret = -1;
332 goto out;
333 }
334
Joe Hershberger8b177d42013-02-08 09:27:19 +0000335 /* check the Lock Tight Status */
336 page = (int)(start >> chip->page_shift);
337 chip->cmdfunc(mtd, NAND_CMD_LOCK_STATUS, -1, page & chip->pagemask);
338 if (chip->read_byte(mtd) & NAND_LOCK_STATUS_TIGHT) {
339 printf("nand_unlock: Device is locked tight!\n");
340 ret = -1;
341 goto out;
342 }
343
Nishanth Menonb20f8402008-12-13 09:43:06 -0600344 if ((start & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000345 printf("nand_unlock: Start address must be beginning of "
Nishanth Menonb20f8402008-12-13 09:43:06 -0600346 "nand block!\n");
Stefan Roesed351b2b2006-10-10 12:36:02 +0200347 ret = -1;
348 goto out;
349 }
350
Nishanth Menonb20f8402008-12-13 09:43:06 -0600351 if (length == 0 || (length & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000352 printf("nand_unlock: Length must be a multiple of nand block "
Nishanth Menonb20f8402008-12-13 09:43:06 -0600353 "size %08x!\n", mtd->erasesize);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200354 ret = -1;
355 goto out;
356 }
357
Nishanth Menonb20f8402008-12-13 09:43:06 -0600358 /*
359 * Set length so that the last address is set to the
360 * starting address of the last block
361 */
362 length -= mtd->erasesize;
363
Stefan Roesed351b2b2006-10-10 12:36:02 +0200364 /* submit address of first page to unlock */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600365 chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200366
367 /* submit ADDRESS of LAST page to unlock */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600368 page += (int)(length >> chip->page_shift);
Joe Hershbergercccf5952012-08-22 16:49:42 -0500369
370 /*
371 * Page addresses for unlocking are supposed to be block-aligned.
372 * At least some NAND chips use the low bit to indicate that the
373 * page range should be inverted.
374 */
375 if (allexcept)
376 page |= 1;
377
Nishanth Menonb20f8402008-12-13 09:43:06 -0600378 chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1, page & chip->pagemask);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200379
380 /* call wait ready function */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600381 status = chip->waitfunc(mtd, chip);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200382 /* see if device thinks it succeeded */
383 if (status & 0x01) {
384 /* there was an error */
385 ret = -1;
386 goto out;
387 }
388
389 out:
390 /* de-select the NAND device */
Nishanth Menonb20f8402008-12-13 09:43:06 -0600391 chip->select_chip(mtd, -1);
Stefan Roesed351b2b2006-10-10 12:36:02 +0200392 return ret;
393}
William Juul52c07962007-10-31 13:53:06 +0100394#endif
Stefan Roesed351b2b2006-10-10 12:36:02 +0200395
Scott Woodcc5f3392008-06-12 13:20:16 -0500396/**
Scott Woodfe3b5e12010-07-30 16:11:41 -0500397 * check_skip_len
Scott Woodcc5f3392008-06-12 13:20:16 -0500398 *
Scott Woodfe3b5e12010-07-30 16:11:41 -0500399 * Check if there are any bad blocks, and whether length including bad
400 * blocks fits into device
Scott Woodcc5f3392008-06-12 13:20:16 -0500401 *
Scott Wood08364d92016-05-30 13:57:54 -0500402 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500403 * @param offset offset in flash
404 * @param length image length
Tom Rini32d96182013-03-14 05:32:50 +0000405 * @param used length of flash needed for the requested length
Scott Woodfe3b5e12010-07-30 16:11:41 -0500406 * @return 0 if the image fits and there are no bad blocks
407 * 1 if the image fits, but there are bad blocks
408 * -1 if the image does not fit
Scott Woodcc5f3392008-06-12 13:20:16 -0500409 */
Scott Wood08364d92016-05-30 13:57:54 -0500410static int check_skip_len(struct mtd_info *mtd, loff_t offset, size_t length,
411 size_t *used)
Scott Woodcc5f3392008-06-12 13:20:16 -0500412{
Scott Woodcc5f3392008-06-12 13:20:16 -0500413 size_t len_excl_bad = 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500414 int ret = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500415
416 while (len_excl_bad < length) {
Scott Woodfe3b5e12010-07-30 16:11:41 -0500417 size_t block_len, block_off;
418 loff_t block_start;
Scott Woodcc5f3392008-06-12 13:20:16 -0500419
Scott Wood08364d92016-05-30 13:57:54 -0500420 if (offset >= mtd->size)
Scott Woodfe3b5e12010-07-30 16:11:41 -0500421 return -1;
Scott Woodcc5f3392008-06-12 13:20:16 -0500422
Scott Wood08364d92016-05-30 13:57:54 -0500423 block_start = offset & ~(loff_t)(mtd->erasesize - 1);
424 block_off = offset & (mtd->erasesize - 1);
425 block_len = mtd->erasesize - block_off;
Scott Woodcc5f3392008-06-12 13:20:16 -0500426
Scott Wood08364d92016-05-30 13:57:54 -0500427 if (!nand_block_isbad(mtd, block_start))
Scott Woodfe3b5e12010-07-30 16:11:41 -0500428 len_excl_bad += block_len;
429 else
430 ret = 1;
431
432 offset += block_len;
Tom Rini32d96182013-03-14 05:32:50 +0000433 *used += block_len;
Scott Woodcc5f3392008-06-12 13:20:16 -0500434 }
435
Tom Rini32d96182013-03-14 05:32:50 +0000436 /* If the length is not a multiple of block_len, adjust. */
437 if (len_excl_bad > length)
438 *used -= (len_excl_bad - length);
439
Scott Woodfe3b5e12010-07-30 16:11:41 -0500440 return ret;
Scott Woodcc5f3392008-06-12 13:20:16 -0500441}
Ben Gardiner34dd5672011-06-14 16:35:06 -0400442
443#ifdef CONFIG_CMD_NAND_TRIMFFS
Scott Wood08364d92016-05-30 13:57:54 -0500444static size_t drop_ffs(const struct mtd_info *mtd, const u_char *buf,
Ben Gardiner34dd5672011-06-14 16:35:06 -0400445 const size_t *len)
446{
htbegin610e8552013-03-01 23:00:34 +0000447 size_t l = *len;
448 ssize_t i;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400449
450 for (i = l - 1; i >= 0; i--)
451 if (buf[i] != 0xFF)
452 break;
453
454 /* The resulting length must be aligned to the minimum flash I/O size */
455 l = i + 1;
Scott Wood08364d92016-05-30 13:57:54 -0500456 l = (l + mtd->writesize - 1) / mtd->writesize;
457 l *= mtd->writesize;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400458
459 /*
460 * since the input length may be unaligned, prevent access past the end
461 * of the buffer
462 */
463 return min(l, *len);
464}
465#endif
Scott Woodcc5f3392008-06-12 13:20:16 -0500466
467/**
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600468 * nand_verify_page_oob:
469 *
470 * Verify a page of NAND flash, including the OOB.
471 * Reads page of NAND and verifies the contents and OOB against the
472 * values in ops.
473 *
Scott Wood08364d92016-05-30 13:57:54 -0500474 * @param mtd nand mtd instance
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600475 * @param ops MTD operations, including data to verify
476 * @param ofs offset in flash
477 * @return 0 in case of success
478 */
Scott Wood08364d92016-05-30 13:57:54 -0500479int nand_verify_page_oob(struct mtd_info *mtd, struct mtd_oob_ops *ops,
480 loff_t ofs)
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600481{
482 int rval;
483 struct mtd_oob_ops vops;
Scott Wood08364d92016-05-30 13:57:54 -0500484 size_t verlen = mtd->writesize + mtd->oobsize;
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600485
486 memcpy(&vops, ops, sizeof(vops));
487
Stephen Warren12db8182015-04-14 08:59:00 -0600488 vops.datbuf = memalign(ARCH_DMA_MINALIGN, verlen);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600489
490 if (!vops.datbuf)
491 return -ENOMEM;
492
Scott Wood08364d92016-05-30 13:57:54 -0500493 vops.oobbuf = vops.datbuf + mtd->writesize;
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600494
Scott Wood08364d92016-05-30 13:57:54 -0500495 rval = mtd_read_oob(mtd, ofs, &vops);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600496 if (!rval)
497 rval = memcmp(ops->datbuf, vops.datbuf, vops.len);
498 if (!rval)
499 rval = memcmp(ops->oobbuf, vops.oobbuf, vops.ooblen);
500
501 free(vops.datbuf);
502
503 return rval ? -EIO : 0;
504}
505
506/**
507 * nand_verify:
508 *
509 * Verify a region of NAND flash.
510 * Reads NAND in page-sized chunks and verifies the contents against
511 * the contents of a buffer. The offset into the NAND must be
512 * page-aligned, and the function doesn't handle skipping bad blocks.
513 *
Scott Wood08364d92016-05-30 13:57:54 -0500514 * @param mtd nand mtd instance
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600515 * @param ofs offset in flash
516 * @param len buffer length
517 * @param buf buffer to read from
518 * @return 0 in case of success
519 */
Scott Wood08364d92016-05-30 13:57:54 -0500520int nand_verify(struct mtd_info *mtd, loff_t ofs, size_t len, u_char *buf)
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600521{
522 int rval = 0;
523 size_t verofs;
Scott Wood08364d92016-05-30 13:57:54 -0500524 size_t verlen = mtd->writesize;
Stephen Warren12db8182015-04-14 08:59:00 -0600525 uint8_t *verbuf = memalign(ARCH_DMA_MINALIGN, verlen);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600526
527 if (!verbuf)
528 return -ENOMEM;
529
530 /* Read the NAND back in page-size groups to limit malloc size */
531 for (verofs = ofs; verofs < ofs + len;
532 verofs += verlen, buf += verlen) {
Scott Wood08364d92016-05-30 13:57:54 -0500533 verlen = min(mtd->writesize, (uint32_t)(ofs + len - verofs));
534 rval = nand_read(mtd, verofs, &verlen, verbuf);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600535 if (!rval || (rval == -EUCLEAN))
536 rval = memcmp(buf, verbuf, verlen);
537
538 if (rval)
539 break;
540 }
541
542 free(verbuf);
543
544 return rval ? -EIO : 0;
545}
546
547
548
549/**
Scott Woodcc5f3392008-06-12 13:20:16 -0500550 * nand_write_skip_bad:
551 *
552 * Write image to NAND flash.
553 * Blocks that are marked bad are skipped and the is written to the next
554 * block instead as long as the image is short enough to fit even after
Tom Rini32d96182013-03-14 05:32:50 +0000555 * skipping the bad blocks. Due to bad blocks we may not be able to
556 * perform the requested write. In the case where the write would
557 * extend beyond the end of the NAND device, both length and actual (if
558 * not NULL) are set to 0. In the case where the write would extend
559 * beyond the limit we are passed, length is set to 0 and actual is set
560 * to the required length.
Scott Woodcc5f3392008-06-12 13:20:16 -0500561 *
Scott Wood08364d92016-05-30 13:57:54 -0500562 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500563 * @param offset offset in flash
564 * @param length buffer length
Tom Rini32d96182013-03-14 05:32:50 +0000565 * @param actual set to size required to write length worth of
566 * buffer or 0 on error, if not NULL
567 * @param lim maximum size that actual may be in order to not
568 * exceed the buffer
Lei Wen4b5deaa2011-01-06 11:11:58 +0800569 * @param buffer buffer to read from
Ben Gardiner1caafbb2011-05-24 10:18:35 -0400570 * @param flags flags modifying the behaviour of the write to NAND
Scott Woodcc5f3392008-06-12 13:20:16 -0500571 * @return 0 in case of success
572 */
Scott Wood08364d92016-05-30 13:57:54 -0500573int nand_write_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
574 size_t *actual, loff_t lim, u_char *buffer, int flags)
Scott Woodcc5f3392008-06-12 13:20:16 -0500575{
Lei Wen4b5deaa2011-01-06 11:11:58 +0800576 int rval = 0, blocksize;
Scott Woodcc5f3392008-06-12 13:20:16 -0500577 size_t left_to_write = *length;
Tom Rini32d96182013-03-14 05:32:50 +0000578 size_t used_for_write = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500579 u_char *p_buffer = buffer;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500580 int need_skip;
Scott Woodcc5f3392008-06-12 13:20:16 -0500581
Tom Rini32d96182013-03-14 05:32:50 +0000582 if (actual)
583 *actual = 0;
584
Scott Wood08364d92016-05-30 13:57:54 -0500585 blocksize = mtd->erasesize;
Lei Wen4b5deaa2011-01-06 11:11:58 +0800586
Scott Woodfe3b5e12010-07-30 16:11:41 -0500587 /*
588 * nand_write() handles unaligned, partial page writes.
589 *
590 * We allow length to be unaligned, for convenience in
591 * using the $filesize variable.
592 *
593 * However, starting at an unaligned offset makes the
594 * semantics of bad block skipping ambiguous (really,
595 * you should only start a block skipping access at a
596 * partition boundary). So don't try to handle that.
597 */
Scott Wood08364d92016-05-30 13:57:54 -0500598 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000599 printf("Attempt to write non page-aligned data\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500600 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500601 return -EINVAL;
602 }
603
Scott Wood08364d92016-05-30 13:57:54 -0500604 need_skip = check_skip_len(mtd, offset, *length, &used_for_write);
Tom Rini32d96182013-03-14 05:32:50 +0000605
606 if (actual)
607 *actual = used_for_write;
608
Scott Woodfe3b5e12010-07-30 16:11:41 -0500609 if (need_skip < 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000610 printf("Attempt to write outside the flash area\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500611 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500612 return -EINVAL;
613 }
614
Tom Rini32d96182013-03-14 05:32:50 +0000615 if (used_for_write > lim) {
616 puts("Size of write exceeds partition or device limit\n");
617 *length = 0;
618 return -EFBIG;
619 }
620
Ben Gardiner34dd5672011-06-14 16:35:06 -0400621 if (!need_skip && !(flags & WITH_DROP_FFS)) {
Scott Wood08364d92016-05-30 13:57:54 -0500622 rval = nand_write(mtd, offset, length, buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600623
624 if ((flags & WITH_WR_VERIFY) && !rval)
Scott Wood08364d92016-05-30 13:57:54 -0500625 rval = nand_verify(mtd, offset, *length, buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600626
Scott Woodfe3b5e12010-07-30 16:11:41 -0500627 if (rval == 0)
628 return 0;
Scott Wood90e0a6b2008-11-25 10:47:02 -0600629
Scott Woodfe3b5e12010-07-30 16:11:41 -0500630 *length = 0;
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000631 printf("NAND write to offset %llx failed %d\n",
Scott Woodfe3b5e12010-07-30 16:11:41 -0500632 offset, rval);
Scott Wood90e0a6b2008-11-25 10:47:02 -0600633 return rval;
Scott Woodcc5f3392008-06-12 13:20:16 -0500634 }
635
636 while (left_to_write > 0) {
Scott Wood08364d92016-05-30 13:57:54 -0500637 size_t block_offset = offset & (mtd->erasesize - 1);
Ben Gardiner34dd5672011-06-14 16:35:06 -0400638 size_t write_size, truncated_write_size;
Scott Woodcc5f3392008-06-12 13:20:16 -0500639
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000640 WATCHDOG_RESET();
Giulio Benetti749bd662009-07-31 17:30:34 -0500641
Scott Wood08364d92016-05-30 13:57:54 -0500642 if (nand_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000643 printf("Skip bad block 0x%08llx\n",
Scott Wood08364d92016-05-30 13:57:54 -0500644 offset & ~(mtd->erasesize - 1));
645 offset += mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500646 continue;
647 }
648
Lei Wen4b5deaa2011-01-06 11:11:58 +0800649 if (left_to_write < (blocksize - block_offset))
Scott Woodcc5f3392008-06-12 13:20:16 -0500650 write_size = left_to_write;
651 else
Lei Wen4b5deaa2011-01-06 11:11:58 +0800652 write_size = blocksize - block_offset;
653
Peter Tyserae6ad782015-02-03 11:58:16 -0600654 truncated_write_size = write_size;
Ben Gardiner34dd5672011-06-14 16:35:06 -0400655#ifdef CONFIG_CMD_NAND_TRIMFFS
Peter Tyserae6ad782015-02-03 11:58:16 -0600656 if (flags & WITH_DROP_FFS)
Scott Wood08364d92016-05-30 13:57:54 -0500657 truncated_write_size = drop_ffs(mtd, p_buffer,
Peter Tyserae6ad782015-02-03 11:58:16 -0600658 &write_size);
Ben Gardiner34dd5672011-06-14 16:35:06 -0400659#endif
660
Scott Wood08364d92016-05-30 13:57:54 -0500661 rval = nand_write(mtd, offset, &truncated_write_size,
Peter Tyserae6ad782015-02-03 11:58:16 -0600662 p_buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600663
Peter Tyserae6ad782015-02-03 11:58:16 -0600664 if ((flags & WITH_WR_VERIFY) && !rval)
Scott Wood08364d92016-05-30 13:57:54 -0500665 rval = nand_verify(mtd, offset,
Peter Tyserae6ad782015-02-03 11:58:16 -0600666 truncated_write_size, p_buffer);
Peter Tyserc9d92cf2015-02-03 11:58:12 -0600667
Peter Tyserae6ad782015-02-03 11:58:16 -0600668 offset += write_size;
669 p_buffer += write_size;
Lei Wen4b5deaa2011-01-06 11:11:58 +0800670
Scott Woodcc5f3392008-06-12 13:20:16 -0500671 if (rval != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000672 printf("NAND write to offset %llx failed %d\n",
Wolfgang Denk74e0dde2008-08-14 14:41:06 +0200673 offset, rval);
Scott Woodcc5f3392008-06-12 13:20:16 -0500674 *length -= left_to_write;
675 return rval;
676 }
677
678 left_to_write -= write_size;
Scott Woodcc5f3392008-06-12 13:20:16 -0500679 }
680
681 return 0;
682}
683
684/**
685 * nand_read_skip_bad:
686 *
687 * Read image from NAND flash.
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000688 * Blocks that are marked bad are skipped and the next block is read
Tom Rini32d96182013-03-14 05:32:50 +0000689 * instead as long as the image is short enough to fit even after
690 * skipping the bad blocks. Due to bad blocks we may not be able to
691 * perform the requested read. In the case where the read would extend
692 * beyond the end of the NAND device, both length and actual (if not
693 * NULL) are set to 0. In the case where the read would extend beyond
694 * the limit we are passed, length is set to 0 and actual is set to the
695 * required length.
Scott Woodcc5f3392008-06-12 13:20:16 -0500696 *
Scott Wood08364d92016-05-30 13:57:54 -0500697 * @param mtd nand mtd instance
Scott Woodcc5f3392008-06-12 13:20:16 -0500698 * @param offset offset in flash
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000699 * @param length buffer length, on return holds number of read bytes
Tom Rini32d96182013-03-14 05:32:50 +0000700 * @param actual set to size required to read length worth of buffer or 0
701 * on error, if not NULL
702 * @param lim maximum size that actual may be in order to not exceed the
703 * buffer
Scott Woodcc5f3392008-06-12 13:20:16 -0500704 * @param buffer buffer to write to
705 * @return 0 in case of success
706 */
Scott Wood08364d92016-05-30 13:57:54 -0500707int nand_read_skip_bad(struct mtd_info *mtd, loff_t offset, size_t *length,
708 size_t *actual, loff_t lim, u_char *buffer)
Scott Woodcc5f3392008-06-12 13:20:16 -0500709{
710 int rval;
711 size_t left_to_read = *length;
Tom Rini32d96182013-03-14 05:32:50 +0000712 size_t used_for_read = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500713 u_char *p_buffer = buffer;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500714 int need_skip;
Scott Woodcc5f3392008-06-12 13:20:16 -0500715
Scott Wood08364d92016-05-30 13:57:54 -0500716 if ((offset & (mtd->writesize - 1)) != 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000717 printf("Attempt to read non page-aligned data\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500718 *length = 0;
Tom Rini32d96182013-03-14 05:32:50 +0000719 if (actual)
720 *actual = 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500721 return -EINVAL;
722 }
Scott Woodcc5f3392008-06-12 13:20:16 -0500723
Scott Wood08364d92016-05-30 13:57:54 -0500724 need_skip = check_skip_len(mtd, offset, *length, &used_for_read);
Tom Rini32d96182013-03-14 05:32:50 +0000725
726 if (actual)
727 *actual = used_for_read;
728
Scott Woodfe3b5e12010-07-30 16:11:41 -0500729 if (need_skip < 0) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000730 printf("Attempt to read outside the flash area\n");
Scott Woodfe3b5e12010-07-30 16:11:41 -0500731 *length = 0;
Scott Woodcc5f3392008-06-12 13:20:16 -0500732 return -EINVAL;
733 }
734
Tom Rini32d96182013-03-14 05:32:50 +0000735 if (used_for_read > lim) {
736 puts("Size of read exceeds partition or device limit\n");
737 *length = 0;
738 return -EFBIG;
739 }
740
Scott Woodfe3b5e12010-07-30 16:11:41 -0500741 if (!need_skip) {
Scott Wood08364d92016-05-30 13:57:54 -0500742 rval = nand_read(mtd, offset, length, buffer);
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300743 if (!rval || rval == -EUCLEAN)
744 return 0;
Scott Woodfe3b5e12010-07-30 16:11:41 -0500745
746 *length = 0;
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000747 printf("NAND read from offset %llx failed %d\n",
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300748 offset, rval);
Scott Wood90e0a6b2008-11-25 10:47:02 -0600749 return rval;
Scott Woodcc5f3392008-06-12 13:20:16 -0500750 }
751
752 while (left_to_read > 0) {
Scott Wood08364d92016-05-30 13:57:54 -0500753 size_t block_offset = offset & (mtd->erasesize - 1);
Scott Woodcc5f3392008-06-12 13:20:16 -0500754 size_t read_length;
755
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000756 WATCHDOG_RESET();
Giulio Benetti749bd662009-07-31 17:30:34 -0500757
Scott Wood08364d92016-05-30 13:57:54 -0500758 if (nand_block_isbad(mtd, offset & ~(mtd->erasesize - 1))) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000759 printf("Skipping bad block 0x%08llx\n",
Scott Wood08364d92016-05-30 13:57:54 -0500760 offset & ~(mtd->erasesize - 1));
761 offset += mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500762 continue;
763 }
764
Scott Wood08364d92016-05-30 13:57:54 -0500765 if (left_to_read < (mtd->erasesize - block_offset))
Scott Woodcc5f3392008-06-12 13:20:16 -0500766 read_length = left_to_read;
767 else
Scott Wood08364d92016-05-30 13:57:54 -0500768 read_length = mtd->erasesize - block_offset;
Scott Woodcc5f3392008-06-12 13:20:16 -0500769
Scott Wood08364d92016-05-30 13:57:54 -0500770 rval = nand_read(mtd, offset, &read_length, p_buffer);
Valeriy Glushkovf2aead82009-07-14 13:51:10 +0300771 if (rval && rval != -EUCLEAN) {
Benoît Thébaudeaue26708e2012-11-05 10:15:46 +0000772 printf("NAND read from offset %llx failed %d\n",
Wolfgang Denk74e0dde2008-08-14 14:41:06 +0200773 offset, rval);
Scott Woodcc5f3392008-06-12 13:20:16 -0500774 *length -= left_to_read;
775 return rval;
776 }
777
778 left_to_read -= read_length;
779 offset += read_length;
780 p_buffer += read_length;
781 }
782
783 return 0;
784}
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100785
786#ifdef CONFIG_CMD_NAND_TORTURE
787
788/**
789 * check_pattern:
790 *
791 * Check if buffer contains only a certain byte pattern.
792 *
793 * @param buf buffer to check
794 * @param patt the pattern to check
795 * @param size buffer size in bytes
796 * @return 1 if there are only patt bytes in buf
797 * 0 if something else was found
798 */
799static int check_pattern(const u_char *buf, u_char patt, int size)
800{
801 int i;
802
803 for (i = 0; i < size; i++)
804 if (buf[i] != patt)
805 return 0;
806 return 1;
807}
808
809/**
810 * nand_torture:
811 *
812 * Torture a block of NAND flash.
813 * This is useful to determine if a block that caused a write error is still
814 * good or should be marked as bad.
815 *
Scott Wood08364d92016-05-30 13:57:54 -0500816 * @param mtd nand mtd instance
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100817 * @param offset offset in flash
818 * @return 0 if the block is still good
819 */
Scott Wood08364d92016-05-30 13:57:54 -0500820int nand_torture(struct mtd_info *mtd, loff_t offset)
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100821{
822 u_char patterns[] = {0xa5, 0x5a, 0x00};
823 struct erase_info instr = {
Max Krummenacherf3178d72016-06-13 10:15:47 +0200824 .mtd = mtd,
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100825 .addr = offset,
Scott Wood08364d92016-05-30 13:57:54 -0500826 .len = mtd->erasesize,
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100827 };
828 size_t retlen;
829 int err, ret = -1, i, patt_count;
830 u_char *buf;
831
Scott Wood08364d92016-05-30 13:57:54 -0500832 if ((offset & (mtd->erasesize - 1)) != 0) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100833 puts("Attempt to torture a block at a non block-aligned offset\n");
834 return -EINVAL;
835 }
836
Scott Wood08364d92016-05-30 13:57:54 -0500837 if (offset + mtd->erasesize > mtd->size) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100838 puts("Attempt to torture a block outside the flash area\n");
839 return -EINVAL;
840 }
841
842 patt_count = ARRAY_SIZE(patterns);
843
Scott Wood08364d92016-05-30 13:57:54 -0500844 buf = malloc_cache_aligned(mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100845 if (buf == NULL) {
846 puts("Out of memory for erase block buffer\n");
847 return -ENOMEM;
848 }
849
850 for (i = 0; i < patt_count; i++) {
Max Krummenachera21c4482016-05-30 16:28:28 +0200851 err = mtd_erase(mtd, &instr);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100852 if (err) {
853 printf("%s: erase() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500854 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100855 goto out;
856 }
857
858 /* Make sure the block contains only 0xff bytes */
Max Krummenachera21c4482016-05-30 16:28:28 +0200859 err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500860 if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100861 printf("%s: read() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500862 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100863 goto out;
864 }
865
Scott Wood08364d92016-05-30 13:57:54 -0500866 err = check_pattern(buf, 0xff, mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100867 if (!err) {
868 printf("Erased block at 0x%llx, but a non-0xff byte was found\n",
869 offset);
870 ret = -EIO;
871 goto out;
872 }
873
874 /* Write a pattern and check it */
Scott Wood08364d92016-05-30 13:57:54 -0500875 memset(buf, patterns[i], mtd->erasesize);
Max Krummenachera21c4482016-05-30 16:28:28 +0200876 err = mtd_write(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500877 if (err || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100878 printf("%s: write() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500879 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100880 goto out;
881 }
882
Max Krummenachera21c4482016-05-30 16:28:28 +0200883 err = mtd_read(mtd, offset, mtd->erasesize, &retlen, buf);
Scott Wood08364d92016-05-30 13:57:54 -0500884 if ((err && err != -EUCLEAN) || retlen != mtd->erasesize) {
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100885 printf("%s: read() failed for block at 0x%llx: %d\n",
Scott Wood08364d92016-05-30 13:57:54 -0500886 mtd->name, instr.addr, err);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100887 goto out;
888 }
889
Scott Wood08364d92016-05-30 13:57:54 -0500890 err = check_pattern(buf, patterns[i], mtd->erasesize);
Benoît Thébaudeau5661f702012-11-16 20:20:54 +0100891 if (!err) {
892 printf("Pattern 0x%.2x checking failed for block at "
893 "0x%llx\n", patterns[i], offset);
894 ret = -EIO;
895 goto out;
896 }
897 }
898
899 ret = 0;
900
901out:
902 free(buf);
903 return ret;
904}
905
906#endif