blob: 4652541aeff8dd6bfcfc5dd0f3a6c9deded68416 [file] [log] [blame]
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +00001/*
2 * dfu_nand.c -- DFU for NAND routines.
3 *
4 * Copyright (C) 2012-2013 Texas Instruments, Inc.
5 *
6 * Based on dfu_mmc.c which is:
7 * Copyright (C) 2012 Samsung Electronics
8 * author: Lukasz Majewski <l.majewski@samsung.com>
9 *
Wolfgang Denkd79de1d2013-07-08 09:37:19 +020010 * SPDX-License-Identifier: GPL-2.0+
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000011 */
12
13#include <common.h>
14#include <malloc.h>
15#include <errno.h>
16#include <div64.h>
17#include <dfu.h>
18#include <linux/mtd/mtd.h>
19#include <jffs2/load_kernel.h>
20#include <nand.h>
21
Afzal Mohammedb9a4a6b2013-09-18 01:14:50 +053022static int nand_block_op(enum dfu_op op, struct dfu_entity *dfu,
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000023 u64 offset, void *buf, long *len)
24{
25 loff_t start, lim;
26 size_t count, actual;
27 int ret;
Scott Wood08364d92016-05-30 13:57:54 -050028 struct mtd_info *mtd;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000029
30 /* if buf == NULL return total size of the area */
31 if (buf == NULL) {
32 *len = dfu->data.nand.size;
33 return 0;
34 }
35
36 start = dfu->data.nand.start + offset + dfu->bad_skip;
37 lim = dfu->data.nand.start + dfu->data.nand.size - start;
38 count = *len;
39
40 if (nand_curr_device < 0 ||
41 nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
42 !nand_info[nand_curr_device].name) {
43 printf("%s: invalid nand device\n", __func__);
44 return -1;
45 }
46
Scott Wood08364d92016-05-30 13:57:54 -050047 mtd = &nand_info[nand_curr_device];
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000048
Heiko Schocher0ae98b42013-06-24 18:50:40 +020049 if (op == DFU_OP_READ) {
Scott Wood08364d92016-05-30 13:57:54 -050050 ret = nand_read_skip_bad(mtd, start, &count, &actual,
51 lim, buf);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020052 } else {
53 nand_erase_options_t opts;
54
55 memset(&opts, 0, sizeof(opts));
56 opts.offset = start;
57 opts.length = count;
58 opts.spread = 1;
59 opts.quiet = 1;
60 opts.lim = lim;
61 /* first erase */
Scott Wood08364d92016-05-30 13:57:54 -050062 ret = nand_erase_opts(mtd, &opts);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020063 if (ret)
64 return ret;
65 /* then write */
Scott Wood08364d92016-05-30 13:57:54 -050066 ret = nand_write_skip_bad(mtd, start, &count, &actual,
67 lim, buf, WITH_WR_VERIFY);
Heiko Schocher0ae98b42013-06-24 18:50:40 +020068 }
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +000069
70 if (ret != 0) {
71 printf("%s: nand_%s_skip_bad call failed at %llx!\n",
72 __func__, op == DFU_OP_READ ? "read" : "write",
73 start);
74 return ret;
75 }
76
77 /*
78 * Find out where we stopped writing data. This can be deeper into
79 * the NAND than we expected due to having to skip bad blocks. So
80 * we must take this into account for the next write, if any.
81 */
82 if (actual > count)
83 dfu->bad_skip += actual - count;
84
85 return ret;
86}
87
88static inline int nand_block_write(struct dfu_entity *dfu,
89 u64 offset, void *buf, long *len)
90{
91 return nand_block_op(DFU_OP_WRITE, dfu, offset, buf, len);
92}
93
94static inline int nand_block_read(struct dfu_entity *dfu,
95 u64 offset, void *buf, long *len)
96{
97 return nand_block_op(DFU_OP_READ, dfu, offset, buf, len);
98}
99
100static int dfu_write_medium_nand(struct dfu_entity *dfu,
101 u64 offset, void *buf, long *len)
102{
103 int ret = -1;
104
105 switch (dfu->layout) {
106 case DFU_RAW_ADDR:
107 ret = nand_block_write(dfu, offset, buf, len);
108 break;
109 default:
110 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
111 dfu_get_layout(dfu->layout));
112 }
113
114 return ret;
115}
116
Stephen Warren020e6f32014-06-11 12:47:27 -0600117long dfu_get_medium_size_nand(struct dfu_entity *dfu)
118{
119 return dfu->data.nand.size;
120}
121
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000122static int dfu_read_medium_nand(struct dfu_entity *dfu, u64 offset, void *buf,
123 long *len)
124{
125 int ret = -1;
126
127 switch (dfu->layout) {
128 case DFU_RAW_ADDR:
129 ret = nand_block_read(dfu, offset, buf, len);
130 break;
131 default:
132 printf("%s: Layout (%s) not (yet) supported!\n", __func__,
133 dfu_get_layout(dfu->layout));
134 }
135
136 return ret;
137}
138
Heiko Schocherad401392013-07-25 06:43:11 +0200139static int dfu_flush_medium_nand(struct dfu_entity *dfu)
140{
141 int ret = 0;
142
143 /* in case of ubi partition, erase rest of the partition */
144 if (dfu->data.nand.ubi) {
Scott Wood08364d92016-05-30 13:57:54 -0500145 struct mtd_info *mtd;
Heiko Schocherad401392013-07-25 06:43:11 +0200146 nand_erase_options_t opts;
147
148 if (nand_curr_device < 0 ||
149 nand_curr_device >= CONFIG_SYS_MAX_NAND_DEVICE ||
150 !nand_info[nand_curr_device].name) {
151 printf("%s: invalid nand device\n", __func__);
152 return -1;
153 }
154
Scott Wood08364d92016-05-30 13:57:54 -0500155 mtd = &nand_info[nand_curr_device];
Heiko Schocherad401392013-07-25 06:43:11 +0200156
157 memset(&opts, 0, sizeof(opts));
158 opts.offset = dfu->data.nand.start + dfu->offset +
159 dfu->bad_skip;
160 opts.length = dfu->data.nand.start +
161 dfu->data.nand.size - opts.offset;
Scott Wood08364d92016-05-30 13:57:54 -0500162 ret = nand_erase_opts(mtd, &opts);
Heiko Schocherad401392013-07-25 06:43:11 +0200163 if (ret != 0)
164 printf("Failure erase: %d\n", ret);
165 }
166
167 return ret;
168}
169
Heiko Schocherb9488102014-04-11 07:59:47 +0200170unsigned int dfu_polltimeout_nand(struct dfu_entity *dfu)
171{
172 /*
173 * Currently, Poll Timeout != 0 is only needed on nand
174 * ubi partition, as the not used sectors need an erase
175 */
176 if (dfu->data.nand.ubi)
177 return DFU_MANIFEST_POLL_TIMEOUT;
178
179 return DFU_DEFAULT_POLL_TIMEOUT;
180}
181
Stephen Warren4afe50f2014-06-11 16:03:33 -0600182int dfu_fill_entity_nand(struct dfu_entity *dfu, char *devstr, char *s)
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000183{
184 char *st;
185 int ret, dev, part;
186
Heiko Schocherad401392013-07-25 06:43:11 +0200187 dfu->data.nand.ubi = 0;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000188 dfu->dev_type = DFU_DEV_NAND;
189 st = strsep(&s, " ");
190 if (!strcmp(st, "raw")) {
191 dfu->layout = DFU_RAW_ADDR;
192 dfu->data.nand.start = simple_strtoul(s, &s, 16);
193 s++;
194 dfu->data.nand.size = simple_strtoul(s, &s, 16);
Heiko Schocherad401392013-07-25 06:43:11 +0200195 } else if ((!strcmp(st, "part")) || (!strcmp(st, "partubi"))) {
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000196 char mtd_id[32];
197 struct mtd_device *mtd_dev;
198 u8 part_num;
199 struct part_info *pi;
200
201 dfu->layout = DFU_RAW_ADDR;
202
203 dev = simple_strtoul(s, &s, 10);
204 s++;
205 part = simple_strtoul(s, &s, 10);
206
207 sprintf(mtd_id, "%s%d,%d", "nand", dev, part - 1);
208 printf("using id '%s'\n", mtd_id);
209
210 mtdparts_init();
211
212 ret = find_dev_and_part(mtd_id, &mtd_dev, &part_num, &pi);
213 if (ret != 0) {
214 printf("Could not locate '%s'\n", mtd_id);
215 return -1;
216 }
217
218 dfu->data.nand.start = pi->offset;
219 dfu->data.nand.size = pi->size;
Heiko Schocherad401392013-07-25 06:43:11 +0200220 if (!strcmp(st, "partubi"))
221 dfu->data.nand.ubi = 1;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000222 } else {
223 printf("%s: Memory layout (%s) not supported!\n", __func__, st);
224 return -1;
225 }
226
Stephen Warren020e6f32014-06-11 12:47:27 -0600227 dfu->get_medium_size = dfu_get_medium_size_nand;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000228 dfu->read_medium = dfu_read_medium_nand;
229 dfu->write_medium = dfu_write_medium_nand;
Heiko Schocherad401392013-07-25 06:43:11 +0200230 dfu->flush_medium = dfu_flush_medium_nand;
Heiko Schocherb9488102014-04-11 07:59:47 +0200231 dfu->poll_timeout = dfu_polltimeout_nand;
Pantelis Antonioucf14d0d2013-03-14 05:32:52 +0000232
233 /* initial state */
234 dfu->inited = 0;
235
236 return 0;
237}