blob: 0b7f17761fd557777fd243fc24d4a56a9454f26d [file] [log] [blame]
Patrick Delaunayf7aee232019-10-14 09:28:04 +02001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * dfu_mtd.c -- DFU for MTD device.
4 *
5 * Copyright (C) 2019,STMicroelectronics - All Rights Reserved
6 *
7 * Based on dfu_nand.c
8 */
9
10#include <common.h>
11#include <dfu.h>
12#include <mtd.h>
Patrick Delaunaycb47cb02019-10-14 09:28:05 +020013#include <jffs2/load_kernel.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070014#include <linux/err.h>
Patrick Delaunayf7aee232019-10-14 09:28:04 +020015
16static bool mtd_is_aligned_with_block_size(struct mtd_info *mtd, u64 size)
17{
18 return !do_div(size, mtd->erasesize);
19}
20
21static int mtd_block_op(enum dfu_op op, struct dfu_entity *dfu,
22 u64 offset, void *buf, long *len)
23{
Sughosh Ganud6c907e2020-12-30 19:27:06 +053024 u64 off, lim, remaining, lock_ofs, lock_len;
Patrick Delaunayf7aee232019-10-14 09:28:04 +020025 struct mtd_info *mtd = dfu->data.mtd.info;
26 struct mtd_oob_ops io_op = {};
27 int ret = 0;
28 bool has_pages = mtd->type == MTD_NANDFLASH ||
29 mtd->type == MTD_MLCNANDFLASH;
30
31 /* if buf == NULL return total size of the area */
32 if (!buf) {
33 *len = dfu->data.mtd.size;
34 return 0;
35 }
36
Sughosh Ganud6c907e2020-12-30 19:27:06 +053037 off = lock_ofs = dfu->data.mtd.start + offset + dfu->bad_skip;
Patrick Delaunayf7aee232019-10-14 09:28:04 +020038 lim = dfu->data.mtd.start + dfu->data.mtd.size;
39
40 if (off >= lim) {
41 printf("Limit reached 0x%llx\n", lim);
42 *len = 0;
43 return op == DFU_OP_READ ? 0 : -EIO;
44 }
45 /* limit request with the available size */
46 if (off + *len >= lim)
47 *len = lim - off;
48
49 if (!mtd_is_aligned_with_block_size(mtd, off)) {
50 printf("Offset not aligned with a block (0x%x)\n",
51 mtd->erasesize);
52 return 0;
53 }
54
55 /* first erase */
56 if (op == DFU_OP_WRITE) {
57 struct erase_info erase_op = {};
58
Sughosh Ganud6c907e2020-12-30 19:27:06 +053059 remaining = lock_len = round_up(*len, mtd->erasesize);
Patrick Delaunayf7aee232019-10-14 09:28:04 +020060 erase_op.mtd = mtd;
61 erase_op.addr = off;
62 erase_op.len = mtd->erasesize;
63 erase_op.scrub = 0;
64
Sughosh Ganud6c907e2020-12-30 19:27:06 +053065 debug("Unlocking the mtd device\n");
66 ret = mtd_unlock(mtd, lock_ofs, lock_len);
67 if (ret && ret != -EOPNOTSUPP) {
68 printf("MTD device unlock failed\n");
69 return 0;
70 }
71
Patrick Delaunayf7aee232019-10-14 09:28:04 +020072 while (remaining) {
73 if (erase_op.addr + remaining > lim) {
74 printf("Limit reached 0x%llx while erasing at offset 0x%llx\n",
75 lim, off);
76 return -EIO;
77 }
78
79 ret = mtd_erase(mtd, &erase_op);
80
81 if (ret) {
82 /* Abort if its not a bad block error */
83 if (ret != -EIO) {
84 printf("Failure while erasing at offset 0x%llx\n",
85 erase_op.fail_addr);
86 return 0;
87 }
88 printf("Skipping bad block at 0x%08llx\n",
89 erase_op.addr);
90 } else {
91 remaining -= mtd->erasesize;
92 }
93
94 /* Continue erase behind bad block */
95 erase_op.addr += mtd->erasesize;
96 }
97 }
98
99 io_op.mode = MTD_OPS_AUTO_OOB;
100 io_op.len = *len;
101 if (has_pages && io_op.len > mtd->writesize)
102 io_op.len = mtd->writesize;
103 io_op.ooblen = 0;
104 io_op.datbuf = buf;
105 io_op.oobbuf = NULL;
106
107 /* Loop over to do the actual read/write */
108 remaining = *len;
109 while (remaining) {
110 if (off + remaining > lim) {
111 printf("Limit reached 0x%llx while %s at offset 0x%llx\n",
112 lim, op == DFU_OP_READ ? "reading" : "writing",
113 off);
114 if (op == DFU_OP_READ) {
115 *len -= remaining;
116 return 0;
117 } else {
118 return -EIO;
119 }
120 }
121
122 /* Skip the block if it is bad */
123 if (mtd_is_aligned_with_block_size(mtd, off) &&
124 mtd_block_isbad(mtd, off)) {
125 off += mtd->erasesize;
126 dfu->bad_skip += mtd->erasesize;
127 continue;
128 }
129
130 if (op == DFU_OP_READ)
131 ret = mtd_read_oob(mtd, off, &io_op);
132 else
133 ret = mtd_write_oob(mtd, off, &io_op);
134
135 if (ret) {
136 printf("Failure while %s at offset 0x%llx\n",
137 op == DFU_OP_READ ? "reading" : "writing", off);
138 return -EIO;
139 }
140
141 off += io_op.retlen;
142 remaining -= io_op.retlen;
143 io_op.datbuf += io_op.retlen;
144 io_op.len = remaining;
145 if (has_pages && io_op.len > mtd->writesize)
146 io_op.len = mtd->writesize;
147 }
148
Sughosh Ganud6c907e2020-12-30 19:27:06 +0530149 if (op == DFU_OP_WRITE) {
150 /* Write done, lock again */
151 debug("Locking the mtd device\n");
152 ret = mtd_lock(mtd, lock_ofs, lock_len);
Patrick Delaunay07c43fb2021-03-10 10:27:22 +0100153 if (ret == -EOPNOTSUPP)
154 ret = 0;
155 else if (ret)
Sughosh Ganud6c907e2020-12-30 19:27:06 +0530156 printf("MTD device lock failed\n");
157 }
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200158 return ret;
159}
160
161static int dfu_get_medium_size_mtd(struct dfu_entity *dfu, u64 *size)
162{
163 *size = dfu->data.mtd.info->size;
164
165 return 0;
166}
167
168static int dfu_read_medium_mtd(struct dfu_entity *dfu, u64 offset, void *buf,
169 long *len)
170{
171 int ret = -1;
172
173 switch (dfu->layout) {
174 case DFU_RAW_ADDR:
175 ret = mtd_block_op(DFU_OP_READ, dfu, offset, buf, len);
176 break;
177 default:
178 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
179 dfu_get_layout(dfu->layout));
180 }
181
182 return ret;
183}
184
185static int dfu_write_medium_mtd(struct dfu_entity *dfu,
186 u64 offset, void *buf, long *len)
187{
188 int ret = -1;
189
190 switch (dfu->layout) {
191 case DFU_RAW_ADDR:
192 ret = mtd_block_op(DFU_OP_WRITE, dfu, offset, buf, len);
193 break;
194 default:
195 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
196 dfu_get_layout(dfu->layout));
197 }
198
199 return ret;
200}
201
202static int dfu_flush_medium_mtd(struct dfu_entity *dfu)
203{
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200204 struct mtd_info *mtd = dfu->data.mtd.info;
205 u64 remaining;
206 int ret;
207
208 /* in case of ubi partition, erase rest of the partition */
Guillermo Rodriguezabbe8c42020-09-02 13:06:06 +0200209 if (dfu->data.mtd.ubi) {
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200210 struct erase_info erase_op = {};
211
212 erase_op.mtd = dfu->data.mtd.info;
213 erase_op.addr = round_up(dfu->data.mtd.start + dfu->offset +
214 dfu->bad_skip, mtd->erasesize);
215 erase_op.len = mtd->erasesize;
216 erase_op.scrub = 0;
217
218 remaining = dfu->data.mtd.start + dfu->data.mtd.size -
219 erase_op.addr;
220
221 while (remaining) {
222 ret = mtd_erase(mtd, &erase_op);
223
224 if (ret) {
225 /* Abort if its not a bad block error */
226 if (ret != -EIO)
227 break;
228 printf("Skipping bad block at 0x%08llx\n",
229 erase_op.addr);
230 }
231
232 /* Skip bad block and continue behind it */
233 erase_op.addr += mtd->erasesize;
234 remaining -= mtd->erasesize;
235 }
236 }
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200237 return 0;
238}
239
240static unsigned int dfu_polltimeout_mtd(struct dfu_entity *dfu)
241{
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200242 /*
243 * Currently, Poll Timeout != 0 is only needed on nand
244 * ubi partition, as sectors which are not used need
245 * to be erased
246 */
Guillermo Rodriguezabbe8c42020-09-02 13:06:06 +0200247 if (dfu->data.mtd.ubi)
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200248 return DFU_MANIFEST_POLL_TIMEOUT;
249
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200250 return DFU_DEFAULT_POLL_TIMEOUT;
251}
252
253int dfu_fill_entity_mtd(struct dfu_entity *dfu, char *devstr, char *s)
254{
255 char *st;
256 struct mtd_info *mtd;
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200257 int ret, part;
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200258
259 mtd = get_mtd_device_nm(devstr);
260 if (IS_ERR_OR_NULL(mtd))
261 return -ENODEV;
262 put_mtd_device(mtd);
263
264 dfu->dev_type = DFU_DEV_MTD;
265 dfu->data.mtd.info = mtd;
Patrick Delaunay97f69af2021-03-04 17:47:56 +0100266 dfu->max_buf_size = mtd->erasesize;
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200267
268 st = strsep(&s, " ");
269 if (!strcmp(st, "raw")) {
270 dfu->layout = DFU_RAW_ADDR;
Simon Glass3ff49ec2021-07-24 09:03:29 -0600271 dfu->data.mtd.start = hextoul(s, &s);
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200272 s++;
Simon Glass3ff49ec2021-07-24 09:03:29 -0600273 dfu->data.mtd.size = hextoul(s, &s);
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200274 } else if ((!strcmp(st, "part")) || (!strcmp(st, "partubi"))) {
275 char mtd_id[32];
276 struct mtd_device *mtd_dev;
277 u8 part_num;
278 struct part_info *pi;
279
280 dfu->layout = DFU_RAW_ADDR;
281
Simon Glassff9b9032021-07-24 09:03:30 -0600282 part = dectoul(s, &s);
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200283
284 sprintf(mtd_id, "%s,%d", devstr, part - 1);
285 printf("using id '%s'\n", mtd_id);
286
287 mtdparts_init();
288
289 ret = find_dev_and_part(mtd_id, &mtd_dev, &part_num, &pi);
290 if (ret != 0) {
291 printf("Could not locate '%s'\n", mtd_id);
292 return -1;
293 }
294
295 dfu->data.mtd.start = pi->offset;
296 dfu->data.mtd.size = pi->size;
297 if (!strcmp(st, "partubi"))
298 dfu->data.mtd.ubi = 1;
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200299 } else {
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200300 printf("%s: Memory layout (%s) not supported!\n", __func__, st);
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200301 return -1;
302 }
303
Patrick Delaunaycb47cb02019-10-14 09:28:05 +0200304 if (!mtd_is_aligned_with_block_size(mtd, dfu->data.mtd.start)) {
305 printf("Offset not aligned with a block (0x%x)\n",
306 mtd->erasesize);
307 return -EINVAL;
308 }
309 if (!mtd_is_aligned_with_block_size(mtd, dfu->data.mtd.size)) {
310 printf("Size not aligned with a block (0x%x)\n",
311 mtd->erasesize);
312 return -EINVAL;
313 }
314
Patrick Delaunayf7aee232019-10-14 09:28:04 +0200315 dfu->get_medium_size = dfu_get_medium_size_mtd;
316 dfu->read_medium = dfu_read_medium_mtd;
317 dfu->write_medium = dfu_write_medium_mtd;
318 dfu->flush_medium = dfu_flush_medium_mtd;
319 dfu->poll_timeout = dfu_polltimeout_mtd;
320
321 /* initial state */
322 dfu->inited = 0;
323
324 return 0;
325}