blob: b812a3dfb136ec954305a76b7bf7555e155619ee [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +00002/*
3 * dfu_nand.c -- DFU for NAND routines.
4 *
5 * Copyright (C) 2012-2013 Texas Instruments, Inc.
6 *
7 * Based on dfu_mmc.c which is:
8 * Copyright (C) 2012 Samsung Electronics
9 * author: Lukasz Majewski <l.majewski@samsung.com>
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000010 */
11
12#include <common.h>
13#include <malloc.h>
14#include <errno.h>
15#include <div64.h>
16#include <dfu.h>
17#include <linux/mtd/mtd.h>
18#include <jffs2/load_kernel.h>
19#include <nand.h>
20
Afzal Mohammedb9a4a6b2013-09-18 01:14:50 +053021static int nand_block_op(enum dfu_op op, struct dfu_entity *dfu,
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000022 u64 offset, void *buf, long *len)
23{
24 loff_t start, lim;
25 size_t count, actual;
26 int ret;
Scott Wood08364d92016-05-30 13:57:54 -050027 struct mtd_info *mtd;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000028
29 /* if buf == NULL return total size of the area */
30 if (buf == NULL) {
31 *len = dfu->data.nand.size;
32 return 0;
33 }
34
35 start = dfu->data.nand.start + offset + dfu->bad_skip;
36 lim = dfu->data.nand.start + dfu->data.nand.size - start;
37 count = *len;
38
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -050039 mtd = get_nand_dev_by_index(nand_curr_device);
40
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000041 if (nand_curr_device < 0 ||
42 nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -050043 !mtd) {
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000044 printf("%s: invalid nand device\n", __func__);
45 return -1;
46 }
47
Heiko Schocher0ae98b42013-06-24 18:50:40 +020048 if (op == DFU_OP_READ) {
Scott Wood08364d92016-05-30 13:57:54 -050049 ret = nand_read_skip_bad(mtd, start, &count, &actual,
50 lim, buf);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020051 } else {
52 nand_erase_options_t opts;
53
54 memset(&opts, 0, sizeof(opts));
55 opts.offset = start;
56 opts.length = count;
57 opts.spread = 1;
58 opts.quiet = 1;
59 opts.lim = lim;
60 /* first erase */
Scott Wood08364d92016-05-30 13:57:54 -050061 ret = nand_erase_opts(mtd, &opts);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020062 if (ret)
63 return ret;
64 /* then write */
Scott Wood08364d92016-05-30 13:57:54 -050065 ret = nand_write_skip_bad(mtd, start, &count, &actual,
66 lim, buf, WITH_WR_VERIFY);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020067 }
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000068
69 if (ret != 0) {
70 printf("%s: nand_%s_skip_bad call failed at %llx!\n",
71 __func__, op == DFU_OP_READ ? "read" : "write",
72 start);
73 return ret;
74 }
75
76 /*
77 * Find out where we stopped writing data. This can be deeper into
78 * the NAND than we expected due to having to skip bad blocks. So
79 * we must take this into account for the next write, if any.
80 */
81 if (actual > count)
82 dfu->bad_skip += actual - count;
83
84 return ret;
85}
86
87static inline int nand_block_write(struct dfu_entity *dfu,
88 u64 offset, void *buf, long *len)
89{
90 return nand_block_op(DFU_OP_WRITE, dfu, offset, buf, len);
91}
92
93static inline int nand_block_read(struct dfu_entity *dfu,
94 u64 offset, void *buf, long *len)
95{
96 return nand_block_op(DFU_OP_READ, dfu, offset, buf, len);
97}
98
99static int dfu_write_medium_nand(struct dfu_entity *dfu,
100 u64 offset, void *buf, long *len)
101{
102 int ret = -1;
103
104 switch (dfu->layout) {
105 case DFU_RAW_ADDR:
106 ret = nand_block_write(dfu, offset, buf, len);
107 break;
108 default:
109 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
110 dfu_get_layout(dfu->layout));
111 }
112
113 return ret;
114}
115
Patrick Delaunayf4e934f2017-07-19 16:39:23 +0200116int dfu_get_medium_size_nand(struct dfu_entity *dfu, u64 *size)
Stephen Warren020e6f32014-06-11 12:47:27 -0600117{
Patrick Delaunay5ea9b892017-07-19 16:39:22 +0200118 *size = dfu->data.nand.size;
119
120 return 0;
Stephen Warren020e6f32014-06-11 12:47:27 -0600121}
122
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000123static int dfu_read_medium_nand(struct dfu_entity *dfu, u64 offset, void *buf,
124 long *len)
125{
126 int ret = -1;
127
128 switch (dfu->layout) {
129 case DFU_RAW_ADDR:
130 ret = nand_block_read(dfu, offset, buf, len);
131 break;
132 default:
133 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
134 dfu_get_layout(dfu->layout));
135 }
136
137 return ret;
138}
139
Heiko Schocherad401392013-07-25 06:43:11 +0200140static int dfu_flush_medium_nand(struct dfu_entity *dfu)
141{
142 int ret = 0;
Heiko Schocher0ed9da52016-06-07 08:55:44 +0200143 u64 off;
Heiko Schocherad401392013-07-25 06:43:11 +0200144
145 /* in case of ubi partition, erase rest of the partition */
146 if (dfu->data.nand.ubi) {
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -0500147 struct mtd_info *mtd = get_nand_dev_by_index(nand_curr_device);
Heiko Schocherad401392013-07-25 06:43:11 +0200148 nand_erase_options_t opts;
149
150 if (nand_curr_device < 0 ||
151 nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
Grygorii Strashkoae1e9c72017-06-26 19:12:53 -0500152 !mtd) {
Heiko Schocherad401392013-07-25 06:43:11 +0200153 printf("%s: invalid nand device\n", __func__);
154 return -1;
155 }
156
Heiko Schocherad401392013-07-25 06:43:11 +0200157 memset(&opts, 0, sizeof(opts));
Heiko Schocher0ed9da52016-06-07 08:55:44 +0200158 off = dfu->offset;
159 if ((off & (mtd->erasesize - 1)) != 0) {
160 /*
161 * last write ended with unaligned length
162 * sector is erased, jump to next
163 */
164 off = off & ~((mtd->erasesize - 1));
165 off += mtd->erasesize;
166 }
167 opts.offset = dfu->data.nand.start + off +
Heiko Schocherad401392013-07-25 06:43:11 +0200168 dfu->bad_skip;
169 opts.length = dfu->data.nand.start +
170 dfu->data.nand.size - opts.offset;
Scott Wood08364d92016-05-30 13:57:54 -0500171 ret = nand_erase_opts(mtd, &opts);
Heiko Schocherad401392013-07-25 06:43:11 +0200172 if (ret != 0)
173 printf("Failure erase: %d\n", ret);
174 }
175
176 return ret;
177}
178
Heiko Schocherb9488102014-04-11 07:59:47 +0200179unsigned int dfu_polltimeout_nand(struct dfu_entity *dfu)
180{
181 /*
182 * Currently, Poll Timeout != 0 is only needed on nand
183 * ubi partition, as the not used sectors need an erase
184 */
185 if (dfu->data.nand.ubi)
186 return DFU_MANIFEST_POLL_TIMEOUT;
187
188 return DFU_DEFAULT_POLL_TIMEOUT;
189}
190
Stephen Warren4afe50f2014-06-11 16:03:33 -0600191int dfu_fill_entity_nand(struct dfu_entity *dfu, char *devstr, char *s)
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000192{
193 char *st;
194 int ret, dev, part;
195
Heiko Schocherad401392013-07-25 06:43:11 +0200196 dfu->data.nand.ubi = 0;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000197 dfu->dev_type = DFU_DEV_NAND;
198 st = strsep(&s, " ");
199 if (!strcmp(st, "raw")) {
200 dfu->layout = DFU_RAW_ADDR;
201 dfu->data.nand.start = simple_strtoul(s, &s, 16);
202 s++;
203 dfu->data.nand.size = simple_strtoul(s, &s, 16);
Heiko Schocherad401392013-07-25 06:43:11 +0200204 } else if ((!strcmp(st, "part")) || (!strcmp(st, "partubi"))) {
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000205 char mtd_id[32];
206 struct mtd_device *mtd_dev;
207 u8 part_num;
208 struct part_info *pi;
209
210 dfu->layout = DFU_RAW_ADDR;
211
212 dev = simple_strtoul(s, &s, 10);
213 s++;
214 part = simple_strtoul(s, &s, 10);
215
216 sprintf(mtd_id, "%s%d,%d", "nand", dev, part - 1);
Ralph Siemsen756d9e12019-08-27 14:28:19 -0400217 debug("using id '%s'\n", mtd_id);
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000218
219 mtdparts_init();
220
221 ret = find_dev_and_part(mtd_id, &mtd_dev, &part_num, &pi);
222 if (ret != 0) {
223 printf("Could not locate '%s'\n", mtd_id);
224 return -1;
225 }
226
227 dfu->data.nand.start = pi->offset;
228 dfu->data.nand.size = pi->size;
Heiko Schocherad401392013-07-25 06:43:11 +0200229 if (!strcmp(st, "partubi"))
230 dfu->data.nand.ubi = 1;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000231 } else {
232 printf("%s: Memory layout (%s) not supported!\n", __func__, st);
233 return -1;
234 }
235
Stephen Warren020e6f32014-06-11 12:47:27 -0600236 dfu->get_medium_size = dfu_get_medium_size_nand;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000237 dfu->read_medium = dfu_read_medium_nand;
238 dfu->write_medium = dfu_write_medium_nand;
Heiko Schocherad401392013-07-25 06:43:11 +0200239 dfu->flush_medium = dfu_flush_medium_nand;
Heiko Schocherb9488102014-04-11 07:59:47 +0200240 dfu->poll_timeout = dfu_polltimeout_nand;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000241
242 /* initial state */
243 dfu->inited = 0;
244
245 return 0;
246}