blob: 940cfefc986c8874d7168f68bee701d4c5f44df8 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +00002/*
3 * dfu_nand.c -- DFU for NAND routines.
4 *
5 * Copyright (C) 2012-2013 Texas Instruments, Inc.
6 *
7 * Based on dfu_mmc.c which is:
8 * Copyright (C) 2012 Samsung Electronics
9 * author: Lukasz Majewski <l.majewski@samsung.com>
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000010 */
11
Simon Glass0f2af882020-05-10 11:40:05 -060012#include <log.h>
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000013#include <malloc.h>
14#include <errno.h>
15#include <div64.h>
16#include <dfu.h>
17#include <linux/mtd/mtd.h>
18#include <jffs2/load_kernel.h>
19#include <nand.h>
20
Afzal Mohammedb9a4a6b2013-09-18 01:14:50 +053021static int nand_block_op(enum dfu_op op, struct dfu_entity *dfu,
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000022 u64 offset, void *buf, long *len)
23{
24 loff_t start, lim;
25 size_t count, actual;
26 int ret;
Scott Wood08364d92016-05-30 13:57:54 -050027 struct mtd_info *mtd;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000028
29 /* if buf == NULL return total size of the area */
30 if (buf == NULL) {
31 *len = dfu->data.nand.size;
32 return 0;
33 }
34
35 start = dfu->data.nand.start + offset + dfu->bad_skip;
36 lim = dfu->data.nand.start + dfu->data.nand.size - start;
37 count = *len;
38
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -050039 mtd = get_nand_dev_by_index(nand_curr_device);
40
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000041 if (nand_curr_device < 0 ||
42 nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -050043 !mtd) {
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000044 printf("%s: invalid nand device\n", __func__);
45 return -1;
46 }
47
Heiko Schocher0ae98b42013-06-24 18:50:40 +020048 if (op == DFU_OP_READ) {
Scott Wood08364d92016-05-30 13:57:54 -050049 ret = nand_read_skip_bad(mtd, start, &count, &actual,
50 lim, buf);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020051 } else {
52 nand_erase_options_t opts;
Guillermo Rodríguez5378fb12019-12-16 16:27:57 +010053 int write_flags = WITH_WR_VERIFY;
Heiko Schocher0ae98b42013-06-24 18:50:40 +020054
55 memset(&opts, 0, sizeof(opts));
56 opts.offset = start;
57 opts.length = count;
58 opts.spread = 1;
59 opts.quiet = 1;
60 opts.lim = lim;
61 /* first erase */
Scott Wood08364d92016-05-30 13:57:54 -050062 ret = nand_erase_opts(mtd, &opts);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020063 if (ret)
64 return ret;
65 /* then write */
Guillermo Rodríguez5378fb12019-12-16 16:27:57 +010066#ifdef CONFIG_DFU_NAND_TRIMFFS
67 if (dfu->data.nand.ubi)
68 write_flags |= WITH_DROP_FFS;
69#endif
Scott Wood08364d92016-05-30 13:57:54 -050070 ret = nand_write_skip_bad(mtd, start, &count, &actual,
Guillermo Rodríguez5378fb12019-12-16 16:27:57 +010071 lim, buf, write_flags);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020072 }
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000073
74 if (ret != 0) {
75 printf("%s: nand_%s_skip_bad call failed at %llx!\n",
76 __func__, op == DFU_OP_READ ? "read" : "write",
77 start);
78 return ret;
79 }
80
81 /*
82 * Find out where we stopped writing data. This can be deeper into
83 * the NAND than we expected due to having to skip bad blocks. So
84 * we must take this into account for the next write, if any.
85 */
86 if (actual > count)
87 dfu->bad_skip += actual - count;
88
89 return ret;
90}
91
92static inline int nand_block_write(struct dfu_entity *dfu,
93 u64 offset, void *buf, long *len)
94{
95 return nand_block_op(DFU_OP_WRITE, dfu, offset, buf, len);
96}
97
98static inline int nand_block_read(struct dfu_entity *dfu,
99 u64 offset, void *buf, long *len)
100{
101 return nand_block_op(DFU_OP_READ, dfu, offset, buf, len);
102}
103
104static int dfu_write_medium_nand(struct dfu_entity *dfu,
105 u64 offset, void *buf, long *len)
106{
107 int ret = -1;
108
109 switch (dfu->layout) {
110 case DFU_RAW_ADDR:
111 ret = nand_block_write(dfu, offset, buf, len);
112 break;
113 default:
114 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
115 dfu_get_layout(dfu->layout));
116 }
117
118 return ret;
119}
120
Patrick Delaunayf4e934f2017-07-19 16:39:23 +0200121int dfu_get_medium_size_nand(struct dfu_entity *dfu, u64 *size)
Stephen Warren020e6f32014-06-11 12:47:27 -0600122{
Patrick Delaunay5ea9b892017-07-19 16:39:22 +0200123 *size = dfu->data.nand.size;
124
125 return 0;
Stephen Warren020e6f32014-06-11 12:47:27 -0600126}
127
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000128static int dfu_read_medium_nand(struct dfu_entity *dfu, u64 offset, void *buf,
129 long *len)
130{
131 int ret = -1;
132
133 switch (dfu->layout) {
134 case DFU_RAW_ADDR:
135 ret = nand_block_read(dfu, offset, buf, len);
136 break;
137 default:
138 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
139 dfu_get_layout(dfu->layout));
140 }
141
142 return ret;
143}
144
Heiko Schocherad401392013-07-25 06:43:11 +0200145static int dfu_flush_medium_nand(struct dfu_entity *dfu)
146{
147 int ret = 0;
Heiko Schocher0ed9da52016-06-07 08:55:44 +0200148 u64 off;
Heiko Schocherad401392013-07-25 06:43:11 +0200149
150 /* in case of ubi partition, erase rest of the partition */
151 if (dfu->data.nand.ubi) {
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -0500152 struct mtd_info *mtd = get_nand_dev_by_index(nand_curr_device);
Heiko Schocherad401392013-07-25 06:43:11 +0200153 nand_erase_options_t opts;
154
155 if (nand_curr_device < 0 ||
156 nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -0500157 !mtd) {
Heiko Schocherad401392013-07-25 06:43:11 +0200158 printf("%s: invalid nand device\n", __func__);
159 return -1;
160 }
161
Heiko Schocherad401392013-07-25 06:43:11 +0200162 memset(&opts, 0, sizeof(opts));
Heiko Schocher0ed9da52016-06-07 08:55:44 +0200163 off = dfu->offset;
164 if ((off & (mtd->erasesize - 1)) != 0) {
165 /*
166 * last write ended with unaligned length
167 * sector is erased, jump to next
168 */
169 off = off & ~((mtd->erasesize - 1));
170 off += mtd->erasesize;
171 }
172 opts.offset = dfu->data.nand.start + off +
Heiko Schocherad401392013-07-25 06:43:11 +0200173 dfu->bad_skip;
174 opts.length = dfu->data.nand.start +
175 dfu->data.nand.size - opts.offset;
Scott Wood08364d92016-05-30 13:57:54 -0500176 ret = nand_erase_opts(mtd, &opts);
Heiko Schocherad401392013-07-25 06:43:11 +0200177 if (ret != 0)
178 printf("Failure erase: %d\n", ret);
179 }
180
181 return ret;
182}
183
Heiko Schocherb9488102014-04-11 07:59:47 +0200184unsigned int dfu_polltimeout_nand(struct dfu_entity *dfu)
185{
186 /*
187 * Currently, Poll Timeout != 0 is only needed on nand
188 * ubi partition, as the not used sectors need an erase
189 */
190 if (dfu->data.nand.ubi)
191 return DFU_MANIFEST_POLL_TIMEOUT;
192
193 return DFU_DEFAULT_POLL_TIMEOUT;
194}
195
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900196int dfu_fill_entity_nand(struct dfu_entity *dfu, char *devstr, char **argv, int argc)
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000197{
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900198 char *s;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000199 int ret, dev, part;
200
Heiko Schocherad401392013-07-25 06:43:11 +0200201 dfu->data.nand.ubi = 0;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000202 dfu->dev_type = DFU_DEV_NAND;
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900203 if (argc != 3)
204 return -EINVAL;
205
206 if (!strcmp(argv[0], "raw")) {
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000207 dfu->layout = DFU_RAW_ADDR;
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900208 dfu->data.nand.start = hextoul(argv[1], &s);
209 if (*s)
210 return -EINVAL;
211 dfu->data.nand.size = hextoul(argv[2], &s);
212 if (*s)
213 return -EINVAL;
214 } else if ((!strcmp(argv[0], "part")) || (!strcmp(argv[0], "partubi"))) {
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000215 char mtd_id[32];
216 struct mtd_device *mtd_dev;
217 u8 part_num;
218 struct part_info *pi;
219
220 dfu->layout = DFU_RAW_ADDR;
221
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900222 dev = dectoul(argv[1], &s);
223 if (*s)
224 return -EINVAL;
225 part = dectoul(argv[2], &s);
226 if (*s)
227 return -EINVAL;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000228
229 sprintf(mtd_id, "%s%d,%d", "nand", dev, part - 1);
Ralph Siemsen756d9e12019-08-27 14:28:19 -0400230 debug("using id '%s'\n", mtd_id);
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000231
232 mtdparts_init();
233
234 ret = find_dev_and_part(mtd_id, &mtd_dev, &part_num, &pi);
235 if (ret != 0) {
236 printf("Could not locate '%s'\n", mtd_id);
237 return -1;
238 }
239
240 dfu->data.nand.start = pi->offset;
241 dfu->data.nand.size = pi->size;
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900242 if (!strcmp(argv[0], "partubi"))
Heiko Schocherad401392013-07-25 06:43:11 +0200243 dfu->data.nand.ubi = 1;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000244 } else {
Masami Hiramatsufa4282a2022-01-31 11:52:37 +0900245 printf("%s: Memory layout (%s) not supported!\n", __func__, argv[0]);
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000246 return -1;
247 }
248
Stephen Warren020e6f32014-06-11 12:47:27 -0600249 dfu->get_medium_size = dfu_get_medium_size_nand;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000250 dfu->read_medium = dfu_read_medium_nand;
251 dfu->write_medium = dfu_write_medium_nand;
Heiko Schocherad401392013-07-25 06:43:11 +0200252 dfu->flush_medium = dfu_flush_medium_nand;
Heiko Schocherb9488102014-04-11 07:59:47 +0200253 dfu->poll_timeout = dfu_polltimeout_nand;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000254
255 /* initial state */
256 dfu->inited = 0;
257
258 return 0;
259}