blob: b8d24d203beb097b52bbb97f45a90528691be903 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +00002/*
3 * dfu_nand.c -- DFU for NAND routines.
4 *
5 * Copyright (C) 2012-2013 Texas Instruments, Inc.
6 *
7 * Based on dfu_mmc.c which is:
8 * Copyright (C) 2012 Samsung Electronics
9 * author: Lukasz Majewski <l.majewski@samsung.com>
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000010 */
11
12#include <common.h>
Simon Glass0f2af882020-05-10 11:40:05 -060013#include <log.h>
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000014#include <malloc.h>
15#include <errno.h>
16#include <div64.h>
17#include <dfu.h>
18#include <linux/mtd/mtd.h>
19#include <jffs2/load_kernel.h>
20#include <nand.h>
21
Afzal Mohammedb9a4a6b2013-09-18 01:14:50 +053022static int nand_block_op(enum dfu_op op, struct dfu_entity *dfu,
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000023 u64 offset, void *buf, long *len)
24{
25 loff_t start, lim;
26 size_t count, actual;
27 int ret;
Scott Wood08364d92016-05-30 13:57:54 -050028 struct mtd_info *mtd;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000029
30 /* if buf == NULL return total size of the area */
31 if (buf == NULL) {
32 *len = dfu->data.nand.size;
33 return 0;
34 }
35
36 start = dfu->data.nand.start + offset + dfu->bad_skip;
37 lim = dfu->data.nand.start + dfu->data.nand.size - start;
38 count = *len;
39
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -050040 mtd = get_nand_dev_by_index(nand_curr_device);
41
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000042 if (nand_curr_device < 0 ||
43 nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -050044 !mtd) {
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000045 printf("%s: invalid nand device\n", __func__);
46 return -1;
47 }
48
Heiko Schocher0ae98b42013-06-24 18:50:40 +020049 if (op == DFU_OP_READ) {
Scott Wood08364d92016-05-30 13:57:54 -050050 ret = nand_read_skip_bad(mtd, start, &count, &actual,
51 lim, buf);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020052 } else {
53 nand_erase_options_t opts;
Guillermo Rodríguez5378fb12019-12-16 16:27:57 +010054 int write_flags = WITH_WR_VERIFY;
Heiko Schocher0ae98b42013-06-24 18:50:40 +020055
56 memset(&opts, 0, sizeof(opts));
57 opts.offset = start;
58 opts.length = count;
59 opts.spread = 1;
60 opts.quiet = 1;
61 opts.lim = lim;
62 /* first erase */
Scott Wood08364d92016-05-30 13:57:54 -050063 ret = nand_erase_opts(mtd, &opts);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020064 if (ret)
65 return ret;
66 /* then write */
Guillermo Rodríguez5378fb12019-12-16 16:27:57 +010067#ifdef CONFIG_DFU_NAND_TRIMFFS
68 if (dfu->data.nand.ubi)
69 write_flags |= WITH_DROP_FFS;
70#endif
Scott Wood08364d92016-05-30 13:57:54 -050071 ret = nand_write_skip_bad(mtd, start, &count, &actual,
Guillermo Rodríguez5378fb12019-12-16 16:27:57 +010072 lim, buf, write_flags);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020073 }
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000074
75 if (ret != 0) {
76 printf("%s: nand_%s_skip_bad call failed at %llx!\n",
77 __func__, op == DFU_OP_READ ? "read" : "write",
78 start);
79 return ret;
80 }
81
82 /*
83 * Find out where we stopped writing data. This can be deeper into
84 * the NAND than we expected due to having to skip bad blocks. So
85 * we must take this into account for the next write, if any.
86 */
87 if (actual > count)
88 dfu->bad_skip += actual - count;
89
90 return ret;
91}
92
93static inline int nand_block_write(struct dfu_entity *dfu,
94 u64 offset, void *buf, long *len)
95{
96 return nand_block_op(DFU_OP_WRITE, dfu, offset, buf, len);
97}
98
99static inline int nand_block_read(struct dfu_entity *dfu,
100 u64 offset, void *buf, long *len)
101{
102 return nand_block_op(DFU_OP_READ, dfu, offset, buf, len);
103}
104
105static int dfu_write_medium_nand(struct dfu_entity *dfu,
106 u64 offset, void *buf, long *len)
107{
108 int ret = -1;
109
110 switch (dfu->layout) {
111 case DFU_RAW_ADDR:
112 ret = nand_block_write(dfu, offset, buf, len);
113 break;
114 default:
115 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
116 dfu_get_layout(dfu->layout));
117 }
118
119 return ret;
120}
121
Patrick Delaunayf4e934f2017-07-19 16:39:23 +0200122int dfu_get_medium_size_nand(struct dfu_entity *dfu, u64 *size)
Stephen Warren020e6f32014-06-11 12:47:27 -0600123{
Patrick Delaunay5ea9b892017-07-19 16:39:22 +0200124 *size = dfu->data.nand.size;
125
126 return 0;
Stephen Warren020e6f32014-06-11 12:47:27 -0600127}
128
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000129static int dfu_read_medium_nand(struct dfu_entity *dfu, u64 offset, void *buf,
130 long *len)
131{
132 int ret = -1;
133
134 switch (dfu->layout) {
135 case DFU_RAW_ADDR:
136 ret = nand_block_read(dfu, offset, buf, len);
137 break;
138 default:
139 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
140 dfu_get_layout(dfu->layout));
141 }
142
143 return ret;
144}
145
Heiko Schocherad401392013-07-25 06:43:11 +0200146static int dfu_flush_medium_nand(struct dfu_entity *dfu)
147{
148 int ret = 0;
Heiko Schocher0ed9da52016-06-07 08:55:44 +0200149 u64 off;
Heiko Schocherad401392013-07-25 06:43:11 +0200150
151 /* in case of ubi partition, erase rest of the partition */
152 if (dfu->data.nand.ubi) {
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -0500153 struct mtd_info *mtd = get_nand_dev_by_index(nand_curr_device);
Heiko Schocherad401392013-07-25 06:43:11 +0200154 nand_erase_options_t opts;
155
156 if (nand_curr_device < 0 ||
157 nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -0500158 !mtd) {
Heiko Schocherad401392013-07-25 06:43:11 +0200159 printf("%s: invalid nand device\n", __func__);
160 return -1;
161 }
162
Heiko Schocherad401392013-07-25 06:43:11 +0200163 memset(&opts, 0, sizeof(opts));
Heiko Schocher0ed9da52016-06-07 08:55:44 +0200164 off = dfu->offset;
165 if ((off & (mtd->erasesize - 1)) != 0) {
166 /*
167 * last write ended with unaligned length
168 * sector is erased, jump to next
169 */
170 off = off & ~((mtd->erasesize - 1));
171 off += mtd->erasesize;
172 }
173 opts.offset = dfu->data.nand.start + off +
Heiko Schocherad401392013-07-25 06:43:11 +0200174 dfu->bad_skip;
175 opts.length = dfu->data.nand.start +
176 dfu->data.nand.size - opts.offset;
Scott Wood08364d92016-05-30 13:57:54 -0500177 ret = nand_erase_opts(mtd, &opts);
Heiko Schocherad401392013-07-25 06:43:11 +0200178 if (ret != 0)
179 printf("Failure erase: %d\n", ret);
180 }
181
182 return ret;
183}
184
Heiko Schocherb9488102014-04-11 07:59:47 +0200185unsigned int dfu_polltimeout_nand(struct dfu_entity *dfu)
186{
187 /*
188 * Currently, Poll Timeout != 0 is only needed on nand
189 * ubi partition, as the not used sectors need an erase
190 */
191 if (dfu->data.nand.ubi)
192 return DFU_MANIFEST_POLL_TIMEOUT;
193
194 return DFU_DEFAULT_POLL_TIMEOUT;
195}
196
Stephen Warren4afe50f2014-06-11 16:03:33 -0600197int dfu_fill_entity_nand(struct dfu_entity *dfu, char *devstr, char *s)
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000198{
199 char *st;
200 int ret, dev, part;
201
Heiko Schocherad401392013-07-25 06:43:11 +0200202 dfu->data.nand.ubi = 0;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000203 dfu->dev_type = DFU_DEV_NAND;
204 st = strsep(&s, " ");
205 if (!strcmp(st, "raw")) {
206 dfu->layout = DFU_RAW_ADDR;
207 dfu->data.nand.start = simple_strtoul(s, &s, 16);
208 s++;
209 dfu->data.nand.size = simple_strtoul(s, &s, 16);
Heiko Schocherad401392013-07-25 06:43:11 +0200210 } else if ((!strcmp(st, "part")) || (!strcmp(st, "partubi"))) {
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000211 char mtd_id[32];
212 struct mtd_device *mtd_dev;
213 u8 part_num;
214 struct part_info *pi;
215
216 dfu->layout = DFU_RAW_ADDR;
217
218 dev = simple_strtoul(s, &s, 10);
219 s++;
220 part = simple_strtoul(s, &s, 10);
221
222 sprintf(mtd_id, "%s%d,%d", "nand", dev, part - 1);
Ralph Siemsen756d9e12019-08-27 14:28:19 -0400223 debug("using id '%s'\n", mtd_id);
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000224
225 mtdparts_init();
226
227 ret = find_dev_and_part(mtd_id, &mtd_dev, &part_num, &pi);
228 if (ret != 0) {
229 printf("Could not locate '%s'\n", mtd_id);
230 return -1;
231 }
232
233 dfu->data.nand.start = pi->offset;
234 dfu->data.nand.size = pi->size;
Heiko Schocherad401392013-07-25 06:43:11 +0200235 if (!strcmp(st, "partubi"))
236 dfu->data.nand.ubi = 1;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000237 } else {
238 printf("%s: Memory layout (%s) not supported!\n", __func__, st);
239 return -1;
240 }
241
Stephen Warren020e6f32014-06-11 12:47:27 -0600242 dfu->get_medium_size = dfu_get_medium_size_nand;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000243 dfu->read_medium = dfu_read_medium_nand;
244 dfu->write_medium = dfu_write_medium_nand;
Heiko Schocherad401392013-07-25 06:43:11 +0200245 dfu->flush_medium = dfu_flush_medium_nand;
Heiko Schocherb9488102014-04-11 07:59:47 +0200246 dfu->poll_timeout = dfu_polltimeout_nand;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000247
248 /* initial state */
249 dfu->inited = 0;
250
251 return 0;
252}