blob: c7075f12eca9062ea4845fc9d0c609f4adc7e62a [file] [log] [blame]
Patrick Delaunayf7aee232019-10-14 09:28:04 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * dfu_mtd.c -- DFU for MTD device.
4 *
5 * Copyright (C) 2019,STMicroelectronics - All Rights Reserved
6 *
7 * Based on dfu_nand.c
8 */
9
10#include <common.h>
11#include <dfu.h>
12#include <mtd.h>
Patrick Delaunaycb47cb02019-10-14 09:28:05 +020013#include <jffs2/load_kernel.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070014#include <linux/err.h>
Masami Hiramatsuf815c332022-01-31 11:52:29 +090015#include <linux/ctype.h>
Patrick Delaunayf7aee232019-10-14 09:28:04 +020016
17static bool mtd_is_aligned_with_block_size(struct mtd_info *mtd, u64 size)
18{
19 return !do_div(size, mtd->erasesize);
20}
21
Patrick Delaunayfd1f2632022-01-18 10:26:21 +010022/* Logic taken from cmd/mtd.c:mtd_oob_write_is_empty() */
23static bool mtd_page_is_empty(struct mtd_oob_ops *op)
24{
25 int i;
26
27 for (i = 0; i < op->len; i++)
28 if (op->datbuf[i] != 0xff)
29 return false;
30
31 /* oob is not used, with MTD_OPS_AUTO_OOB & ooblen=0 */
32
33 return true;
34}
35
Patrick Delaunayf7aee232019-10-14 09:28:04 +020036static int mtd_block_op(enum dfu_op op, struct dfu_entity *dfu,
37 u64 offset, void *buf, long *len)
38{
Sughosh Ganud6c907e2020-12-30 19:27:06 +053039 u64 off, lim, remaining, lock_ofs, lock_len;
Patrick Delaunayf7aee232019-10-14 09:28:04 +020040 struct mtd_info *mtd = dfu->data.mtd.info;
41 struct mtd_oob_ops io_op = {};
42 int ret = 0;
43 bool has_pages = mtd->type == MTD_NANDFLASH ||
44 mtd->type == MTD_MLCNANDFLASH;
45
46 /* if buf == NULL return total size of the area */
47 if (!buf) {
48 *len = dfu->data.mtd.size;
49 return 0;
50 }
51
Sughosh Ganud6c907e2020-12-30 19:27:06 +053052 off = lock_ofs = dfu->data.mtd.start + offset + dfu->bad_skip;
Patrick Delaunayf7aee232019-10-14 09:28:04 +020053 lim = dfu->data.mtd.start + dfu->data.mtd.size;
54
55 if (off >= lim) {
56 printf("Limit reached 0x%llx\n", lim);
57 *len = 0;
58 return op == DFU_OP_READ ? 0 : -EIO;
59 }
60 /* limit request with the available size */
61 if (off + *len >= lim)
62 *len = lim - off;
63
64 if (!mtd_is_aligned_with_block_size(mtd, off)) {
65 printf("Offset not aligned with a block (0x%x)\n",
66 mtd->erasesize);
67 return 0;
68 }
69
70 /* first erase */
71 if (op == DFU_OP_WRITE) {
72 struct erase_info erase_op = {};
73
Sughosh Ganud6c907e2020-12-30 19:27:06 +053074 remaining = lock_len = round_up(*len, mtd->erasesize);
Patrick Delaunayf7aee232019-10-14 09:28:04 +020075 erase_op.mtd = mtd;
76 erase_op.addr = off;
77 erase_op.len = mtd->erasesize;
78 erase_op.scrub = 0;
79
Sughosh Ganud6c907e2020-12-30 19:27:06 +053080 debug("Unlocking the mtd device\n");
81 ret = mtd_unlock(mtd, lock_ofs, lock_len);
82 if (ret && ret != -EOPNOTSUPP) {
83 printf("MTD device unlock failed\n");
84 return 0;
85 }
86
Patrick Delaunayf7aee232019-10-14 09:28:04 +020087 while (remaining) {
88 if (erase_op.addr + remaining > lim) {
89 printf("Limit reached 0x%llx while erasing at offset 0x%llx\n",
90 lim, off);
91 return -EIO;
92 }
93
94 ret = mtd_erase(mtd, &erase_op);
95
96 if (ret) {
97 /* Abort if its not a bad block error */
98 if (ret != -EIO) {
99 printf("Failure while erasing at offset 0x%llx\n",
100 erase_op.fail_addr);
101 return 0;
102 }
103 printf("Skipping bad block at 0x%08llx\n",
104 erase_op.addr);
105 } else {
106 remaining -= mtd->erasesize;
107 }
108
109 /* Continue erase behind bad block */
110 erase_op.addr += mtd->erasesize;
111 }
112 }
113
114 io_op.mode = MTD_OPS_AUTO_OOB;
115 io_op.len = *len;
116 if (has_pages && io_op.len > mtd->writesize)
117 io_op.len = mtd->writesize;
118 io_op.ooblen = 0;
119 io_op.datbuf = buf;
120 io_op.oobbuf = NULL;
121
122 /* Loop over to do the actual read/write */
123 remaining = *len;
124 while (remaining) {
125 if (off + remaining > lim) {
126 printf("Limit reached 0x%llx while %s at offset 0x%llx\n",
127 lim, op == DFU_OP_READ ? "reading" : "writing",
128 off);
129 if (op == DFU_OP_READ) {
130 *len -= remaining;
131 return 0;
132 } else {
133 return -EIO;
134 }
135 }
136
137 /* Skip the block if it is bad */
138 if (mtd_is_aligned_with_block_size(mtd, off) &&
139 mtd_block_isbad(mtd, off)) {
140 off += mtd->erasesize;
141 dfu->bad_skip += mtd->erasesize;
142 continue;
143 }
144
145 if (op == DFU_OP_READ)
146 ret = mtd_read_oob(mtd, off, &io_op);
Patrick Delaunayfd1f2632022-01-18 10:26:21 +0100147 else if (has_pages && dfu->data.mtd.ubi && mtd_page_is_empty(&io_op)) {
148 /* in case of ubi partition, do not write an empty page, only skip it */
149 ret = 0;
150 io_op.retlen = mtd->writesize;
151 io_op.oobretlen = mtd->oobsize;
152 } else {
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200153 ret = mtd_write_oob(mtd, off, &io_op);
Patrick Delaunayfd1f2632022-01-18 10:26:21 +0100154 }
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200155
156 if (ret) {
157 printf("Failure while %s at offset 0x%llx\n",
158 op == DFU_OP_READ ? "reading" : "writing", off);
159 return -EIO;
160 }
161
162 off += io_op.retlen;
163 remaining -= io_op.retlen;
164 io_op.datbuf += io_op.retlen;
165 io_op.len = remaining;
166 if (has_pages && io_op.len > mtd->writesize)
167 io_op.len = mtd->writesize;
168 }
169
Sughosh Ganud6c907e2020-12-30 19:27:06 +0530170 if (op == DFU_OP_WRITE) {
171 /* Write done, lock again */
172 debug("Locking the mtd device\n");
173 ret = mtd_lock(mtd, lock_ofs, lock_len);
Patrick Delaunay07c43fb2021-03-10 10:27:22 +0100174 if (ret == -EOPNOTSUPP)
175 ret = 0;
176 else if (ret)
Sughosh Ganud6c907e2020-12-30 19:27:06 +0530177 printf("MTD device lock failed\n");
178 }
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200179 return ret;
180}
181
182static int dfu_get_medium_size_mtd(struct dfu_entity *dfu, u64 *size)
183{
184 *size = dfu->data.mtd.info->size;
185
186 return 0;
187}
188
189static int dfu_read_medium_mtd(struct dfu_entity *dfu, u64 offset, void *buf,
190 long *len)
191{
192 int ret = -1;
193
194 switch (dfu->layout) {
195 case DFU_RAW_ADDR:
196 ret = mtd_block_op(DFU_OP_READ, dfu, offset, buf, len);
197 break;
198 default:
199 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
200 dfu_get_layout(dfu->layout));
201 }
202
203 return ret;
204}
205
206static int dfu_write_medium_mtd(struct dfu_entity *dfu,
207 u64 offset, void *buf, long *len)
208{
209 int ret = -1;
210
211 switch (dfu->layout) {
212 case DFU_RAW_ADDR:
213 ret = mtd_block_op(DFU_OP_WRITE, dfu, offset, buf, len);
214 break;
215 default:
216 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
217 dfu_get_layout(dfu->layout));
218 }
219
220 return ret;
221}
222
223static int dfu_flush_medium_mtd(struct dfu_entity *dfu)
224{
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200225 struct mtd_info *mtd = dfu->data.mtd.info;
226 u64 remaining;
227 int ret;
228
229 /* in case of ubi partition, erase rest of the partition */
Guillermo Rodriguezabbe8c42020-09-02 13:06:06 +0200230 if (dfu->data.mtd.ubi) {
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200231 struct erase_info erase_op = {};
232
233 erase_op.mtd = dfu->data.mtd.info;
234 erase_op.addr = round_up(dfu->data.mtd.start + dfu->offset +
235 dfu->bad_skip, mtd->erasesize);
236 erase_op.len = mtd->erasesize;
237 erase_op.scrub = 0;
238
239 remaining = dfu->data.mtd.start + dfu->data.mtd.size -
240 erase_op.addr;
241
242 while (remaining) {
243 ret = mtd_erase(mtd, &erase_op);
244
245 if (ret) {
246 /* Abort if its not a bad block error */
247 if (ret != -EIO)
248 break;
249 printf("Skipping bad block at 0x%08llx\n",
250 erase_op.addr);
251 }
252
253 /* Skip bad block and continue behind it */
254 erase_op.addr += mtd->erasesize;
255 remaining -= mtd->erasesize;
256 }
257 }
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200258 return 0;
259}
260
261static unsigned int dfu_polltimeout_mtd(struct dfu_entity *dfu)
262{
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200263 /*
264 * Currently, Poll Timeout != 0 is only needed on nand
265 * ubi partition, as sectors which are not used need
266 * to be erased
267 */
Guillermo Rodriguezabbe8c42020-09-02 13:06:06 +0200268 if (dfu->data.mtd.ubi)
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200269 return DFU_MANIFEST_POLL_TIMEOUT;
270
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200271 return DFU_DEFAULT_POLL_TIMEOUT;
272}
273
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900274int dfu_fill_entity_mtd(struct dfu_entity *dfu, char *devstr, char **argv, int argc)
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200275{
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900276 char *s;
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200277 struct mtd_info *mtd;
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200278 int ret, part;
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200279
280 mtd = get_mtd_device_nm(devstr);
281 if (IS_ERR_OR_NULL(mtd))
282 return -ENODEV;
283 put_mtd_device(mtd);
284
285 dfu->dev_type = DFU_DEV_MTD;
286 dfu->data.mtd.info = mtd;
Patrick Delaunay97f69af2021-03-04 17:47:56 +0100287 dfu->max_buf_size = mtd->erasesize;
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900288 if (argc < 1)
289 return -EINVAL;
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200290
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900291 if (!strcmp(argv[0], "raw")) {
292 if (argc != 3)
293 return -EINVAL;
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200294 dfu->layout = DFU_RAW_ADDR;
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900295 dfu->data.mtd.start = hextoul(argv[1], &s);
296 if (*s)
297 return -EINVAL;
298 dfu->data.mtd.size = hextoul(argv[2], &s);
299 if (*s)
300 return -EINVAL;
301 } else if ((!strcmp(argv[0], "part")) || (!strcmp(argv[0], "partubi"))) {
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200302 char mtd_id[32];
303 struct mtd_device *mtd_dev;
304 u8 part_num;
305 struct part_info *pi;
306
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900307 if (argc != 2)
308 return -EINVAL;
309
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200310 dfu->layout = DFU_RAW_ADDR;
311
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900312 part = dectoul(argv[1], &s);
313 if (*s)
314 return -EINVAL;
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200315
316 sprintf(mtd_id, "%s,%d", devstr, part - 1);
317 printf("using id '%s'\n", mtd_id);
318
319 mtdparts_init();
320
321 ret = find_dev_and_part(mtd_id, &mtd_dev, &part_num, &pi);
322 if (ret != 0) {
323 printf("Could not locate '%s'\n", mtd_id);
324 return -1;
325 }
326
327 dfu->data.mtd.start = pi->offset;
328 dfu->data.mtd.size = pi->size;
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900329 if (!strcmp(argv[0], "partubi"))
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200330 dfu->data.mtd.ubi = 1;
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200331 } else {
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900332 printf("%s: Memory layout (%s) not supported!\n", __func__, argv[0]);
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200333 return -1;
334 }
335
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200336 if (!mtd_is_aligned_with_block_size(mtd, dfu->data.mtd.start)) {
337 printf("Offset not aligned with a block (0x%x)\n",
338 mtd->erasesize);
339 return -EINVAL;
340 }
341 if (!mtd_is_aligned_with_block_size(mtd, dfu->data.mtd.size)) {
342 printf("Size not aligned with a block (0x%x)\n",
343 mtd->erasesize);
344 return -EINVAL;
345 }
346
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200347 dfu->get_medium_size = dfu_get_medium_size_mtd;
348 dfu->read_medium = dfu_read_medium_mtd;
349 dfu->write_medium = dfu_write_medium_mtd;
350 dfu->flush_medium = dfu_flush_medium_mtd;
351 dfu->poll_timeout = dfu_polltimeout_mtd;
352
353 /* initial state */
354 dfu->inited = 0;
355
356 return 0;
357}