blob: 50a7754640d2b95dffae6abece2e1580817af091 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese75659da2015-07-23 10:26:16 +02002/*
3 * drivers/mtd/nand/pxa3xx_nand.c
4 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
Stefan Roese75659da2015-07-23 10:26:16 +02007 */
8
9#include <common.h>
10#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030011#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020012#include <nand.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090013#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020014#include <asm/io.h>
15#include <asm/arch/cpu.h>
16#include <linux/mtd/mtd.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090017#include <linux/mtd/rawnand.h>
Stefan Roese75659da2015-07-23 10:26:16 +020018#include <linux/types.h>
19
20#include "pxa3xx_nand.h"
21
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030022DECLARE_GLOBAL_DATA_PTR;
23
Stefan Roese75659da2015-07-23 10:26:16 +020024#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
25#define CHIP_DELAY_TIMEOUT 200
26#define NAND_STOP_DELAY 40
Stefan Roese75659da2015-07-23 10:26:16 +020027
28/*
29 * Define a buffer size for the initial command that detects the flash device:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030030 * STATUS, READID and PARAM.
31 * ONFI param page is 256 bytes, and there are three redundant copies
32 * to be read. JEDEC param page is 512 bytes, and there are also three
33 * redundant copies to be read.
34 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
Stefan Roese75659da2015-07-23 10:26:16 +020035 */
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030036#define INIT_BUFFER_SIZE 2048
Stefan Roese75659da2015-07-23 10:26:16 +020037
38/* registers and bit definitions */
39#define NDCR (0x00) /* Control register */
40#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
41#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
42#define NDSR (0x14) /* Status Register */
43#define NDPCR (0x18) /* Page Count Register */
44#define NDBDR0 (0x1C) /* Bad Block Register 0 */
45#define NDBDR1 (0x20) /* Bad Block Register 1 */
46#define NDECCCTRL (0x28) /* ECC control */
47#define NDDB (0x40) /* Data Buffer */
48#define NDCB0 (0x48) /* Command Buffer0 */
49#define NDCB1 (0x4C) /* Command Buffer1 */
50#define NDCB2 (0x50) /* Command Buffer2 */
51
52#define NDCR_SPARE_EN (0x1 << 31)
53#define NDCR_ECC_EN (0x1 << 30)
54#define NDCR_DMA_EN (0x1 << 29)
55#define NDCR_ND_RUN (0x1 << 28)
56#define NDCR_DWIDTH_C (0x1 << 27)
57#define NDCR_DWIDTH_M (0x1 << 26)
58#define NDCR_PAGE_SZ (0x1 << 24)
59#define NDCR_NCSX (0x1 << 23)
60#define NDCR_ND_MODE (0x3 << 21)
61#define NDCR_NAND_MODE (0x0)
62#define NDCR_CLR_PG_CNT (0x1 << 20)
Ofer Heifetz531816e2018-08-29 11:56:07 +030063#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
64#define NFCV2_NDCR_STOP_ON_UNCOR (0x1 << 19)
Stefan Roese75659da2015-07-23 10:26:16 +020065#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
66#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
67
68#define NDCR_RA_START (0x1 << 15)
69#define NDCR_PG_PER_BLK (0x1 << 14)
70#define NDCR_ND_ARB_EN (0x1 << 12)
71#define NDCR_INT_MASK (0xFFF)
72
73#define NDSR_MASK (0xfff)
74#define NDSR_ERR_CNT_OFF (16)
75#define NDSR_ERR_CNT_MASK (0x1f)
76#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
77#define NDSR_RDY (0x1 << 12)
78#define NDSR_FLASH_RDY (0x1 << 11)
79#define NDSR_CS0_PAGED (0x1 << 10)
80#define NDSR_CS1_PAGED (0x1 << 9)
81#define NDSR_CS0_CMDD (0x1 << 8)
82#define NDSR_CS1_CMDD (0x1 << 7)
83#define NDSR_CS0_BBD (0x1 << 6)
84#define NDSR_CS1_BBD (0x1 << 5)
85#define NDSR_UNCORERR (0x1 << 4)
86#define NDSR_CORERR (0x1 << 3)
87#define NDSR_WRDREQ (0x1 << 2)
88#define NDSR_RDDREQ (0x1 << 1)
89#define NDSR_WRCMDREQ (0x1)
90
91#define NDCB0_LEN_OVRD (0x1 << 28)
92#define NDCB0_ST_ROW_EN (0x1 << 26)
93#define NDCB0_AUTO_RS (0x1 << 25)
94#define NDCB0_CSEL (0x1 << 24)
95#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
96#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
97#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
98#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
99#define NDCB0_NC (0x1 << 20)
100#define NDCB0_DBC (0x1 << 19)
101#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
102#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
103#define NDCB0_CMD2_MASK (0xff << 8)
104#define NDCB0_CMD1_MASK (0xff)
105#define NDCB0_ADDR_CYC_SHIFT (16)
106
107#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
108#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
109#define EXT_CMD_TYPE_READ 4 /* Read */
110#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
111#define EXT_CMD_TYPE_FINAL 3 /* Final command */
112#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
113#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
114
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300115/*
116 * This should be large enough to read 'ONFI' and 'JEDEC'.
117 * Let's use 7 bytes, which is the maximum ID count supported
118 * by the controller (see NDCR_RD_ID_CNT_MASK).
119 */
120#define READ_ID_BYTES 7
121
Stefan Roese75659da2015-07-23 10:26:16 +0200122/* macros for registers read/write */
123#define nand_writel(info, off, val) \
124 writel((val), (info)->mmio_base + (off))
125
126#define nand_readl(info, off) \
127 readl((info)->mmio_base + (off))
128
129/* error code and state */
130enum {
131 ERR_NONE = 0,
132 ERR_DMABUSERR = -1,
133 ERR_SENDCMD = -2,
134 ERR_UNCORERR = -3,
135 ERR_BBERR = -4,
136 ERR_CORERR = -5,
137};
138
139enum {
140 STATE_IDLE = 0,
141 STATE_PREPARED,
142 STATE_CMD_HANDLE,
143 STATE_DMA_READING,
144 STATE_DMA_WRITING,
145 STATE_DMA_DONE,
146 STATE_PIO_READING,
147 STATE_PIO_WRITING,
148 STATE_CMD_DONE,
149 STATE_READY,
150};
151
152enum pxa3xx_nand_variant {
153 PXA3XX_NAND_VARIANT_PXA,
154 PXA3XX_NAND_VARIANT_ARMADA370,
155};
156
157struct pxa3xx_nand_host {
158 struct nand_chip chip;
Stefan Roese75659da2015-07-23 10:26:16 +0200159 void *info_data;
160
161 /* page size of attached chip */
162 int use_ecc;
163 int cs;
164
165 /* calculated from pxa3xx_nand_flash data */
166 unsigned int col_addr_cycles;
167 unsigned int row_addr_cycles;
Stefan Roese75659da2015-07-23 10:26:16 +0200168};
169
170struct pxa3xx_nand_info {
171 struct nand_hw_control controller;
172 struct pxa3xx_nand_platform_data *pdata;
173
174 struct clk *clk;
175 void __iomem *mmio_base;
176 unsigned long mmio_phys;
177 int cmd_complete, dev_ready;
178
179 unsigned int buf_start;
180 unsigned int buf_count;
181 unsigned int buf_size;
182 unsigned int data_buff_pos;
183 unsigned int oob_buff_pos;
184
185 unsigned char *data_buff;
186 unsigned char *oob_buff;
187
188 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
189 unsigned int state;
190
191 /*
192 * This driver supports NFCv1 (as found in PXA SoC)
193 * and NFCv2 (as found in Armada 370/XP SoC).
194 */
195 enum pxa3xx_nand_variant variant;
196
197 int cs;
198 int use_ecc; /* use HW ECC ? */
199 int ecc_bch; /* using BCH ECC? */
200 int use_spare; /* use spare ? */
201 int need_wait;
202
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300203 /* Amount of real data per full chunk */
204 unsigned int chunk_size;
205
206 /* Amount of spare data per full chunk */
Stefan Roese75659da2015-07-23 10:26:16 +0200207 unsigned int spare_size;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300208
209 /* Number of full chunks (i.e chunk_size + spare_size) */
210 unsigned int nfullchunks;
211
212 /*
213 * Total number of chunks. If equal to nfullchunks, then there
214 * are only full chunks. Otherwise, there is one last chunk of
215 * size (last_chunk_size + last_spare_size)
216 */
217 unsigned int ntotalchunks;
218
219 /* Amount of real data in the last chunk */
220 unsigned int last_chunk_size;
221
222 /* Amount of spare data in the last chunk */
223 unsigned int last_spare_size;
224
Stefan Roese75659da2015-07-23 10:26:16 +0200225 unsigned int ecc_size;
226 unsigned int ecc_err_cnt;
227 unsigned int max_bitflips;
228 int retcode;
229
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300230 /*
231 * Variables only valid during command
232 * execution. step_chunk_size and step_spare_size is the
233 * amount of real data and spare data in the current
234 * chunk. cur_chunk is the current chunk being
235 * read/programmed.
236 */
237 unsigned int step_chunk_size;
238 unsigned int step_spare_size;
239 unsigned int cur_chunk;
240
Stefan Roese75659da2015-07-23 10:26:16 +0200241 /* cached register value */
242 uint32_t reg_ndcr;
243 uint32_t ndtr0cs0;
244 uint32_t ndtr1cs0;
245
246 /* generated NDCBx register values */
247 uint32_t ndcb0;
248 uint32_t ndcb1;
249 uint32_t ndcb2;
250 uint32_t ndcb3;
251};
252
253static struct pxa3xx_nand_timing timing[] = {
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300254 /*ch cs wh wp rh rp r whr ar */
Stefan Roese75659da2015-07-23 10:26:16 +0200255 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
256 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
257 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
258 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300259 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
Stefan Roese75659da2015-07-23 10:26:16 +0200260};
261
262static struct pxa3xx_nand_flash builtin_flash_types[] = {
263 { 0x46ec, 16, 16, &timing[1] },
264 { 0xdaec, 8, 8, &timing[1] },
265 { 0xd7ec, 8, 8, &timing[1] },
266 { 0xa12c, 8, 8, &timing[2] },
267 { 0xb12c, 16, 16, &timing[2] },
268 { 0xdc2c, 8, 8, &timing[2] },
269 { 0xcc2c, 16, 16, &timing[2] },
270 { 0xba20, 16, 16, &timing[3] },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300271 { 0xda98, 8, 8, &timing[4] },
Stefan Roese75659da2015-07-23 10:26:16 +0200272};
273
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100274#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
Stefan Roese75659da2015-07-23 10:26:16 +0200275static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
276static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
277
278static struct nand_bbt_descr bbt_main_descr = {
279 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
280 | NAND_BBT_2BIT | NAND_BBT_VERSION,
281 .offs = 8,
282 .len = 6,
283 .veroffs = 14,
284 .maxblocks = 8, /* Last 8 blocks in each chip */
285 .pattern = bbt_pattern
286};
287
288static struct nand_bbt_descr bbt_mirror_descr = {
289 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
290 | NAND_BBT_2BIT | NAND_BBT_VERSION,
291 .offs = 8,
292 .len = 6,
293 .veroffs = 14,
294 .maxblocks = 8, /* Last 8 blocks in each chip */
295 .pattern = bbt_mirror_pattern
296};
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100297#endif
Stefan Roese75659da2015-07-23 10:26:16 +0200298
299static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
300 .eccbytes = 32,
301 .eccpos = {
302 32, 33, 34, 35, 36, 37, 38, 39,
303 40, 41, 42, 43, 44, 45, 46, 47,
304 48, 49, 50, 51, 52, 53, 54, 55,
305 56, 57, 58, 59, 60, 61, 62, 63},
306 .oobfree = { {2, 30} }
307};
308
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300309static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
310 .eccbytes = 64,
311 .eccpos = {
312 64, 65, 66, 67, 68, 69, 70, 71,
313 72, 73, 74, 75, 76, 77, 78, 79,
314 80, 81, 82, 83, 84, 85, 86, 87,
315 88, 89, 90, 91, 92, 93, 94, 95,
316 96, 97, 98, 99, 100, 101, 102, 103,
317 104, 105, 106, 107, 108, 109, 110, 111,
318 112, 113, 114, 115, 116, 117, 118, 119,
319 120, 121, 122, 123, 124, 125, 126, 127},
320 .oobfree = { {1, 4}, {6, 26} }
321};
322
Stefan Roese75659da2015-07-23 10:26:16 +0200323static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
324 .eccbytes = 64,
325 .eccpos = {
326 32, 33, 34, 35, 36, 37, 38, 39,
327 40, 41, 42, 43, 44, 45, 46, 47,
328 48, 49, 50, 51, 52, 53, 54, 55,
329 56, 57, 58, 59, 60, 61, 62, 63,
330 96, 97, 98, 99, 100, 101, 102, 103,
331 104, 105, 106, 107, 108, 109, 110, 111,
332 112, 113, 114, 115, 116, 117, 118, 119,
333 120, 121, 122, 123, 124, 125, 126, 127},
334 /* Bootrom looks in bytes 0 & 5 for bad blocks */
335 .oobfree = { {6, 26}, { 64, 32} }
336};
337
338static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
339 .eccbytes = 128,
340 .eccpos = {
341 32, 33, 34, 35, 36, 37, 38, 39,
342 40, 41, 42, 43, 44, 45, 46, 47,
343 48, 49, 50, 51, 52, 53, 54, 55,
344 56, 57, 58, 59, 60, 61, 62, 63},
345 .oobfree = { }
346};
347
348#define NDTR0_tCH(c) (min((c), 7) << 19)
349#define NDTR0_tCS(c) (min((c), 7) << 16)
350#define NDTR0_tWH(c) (min((c), 7) << 11)
351#define NDTR0_tWP(c) (min((c), 7) << 8)
352#define NDTR0_tRH(c) (min((c), 7) << 3)
353#define NDTR0_tRP(c) (min((c), 7) << 0)
354
355#define NDTR1_tR(c) (min((c), 65535) << 16)
356#define NDTR1_tWHR(c) (min((c), 15) << 4)
357#define NDTR1_tAR(c) (min((c), 15) << 0)
358
359/* convert nano-seconds to nand flash controller clock cycles */
360#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
361
362static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
363{
364 /* We only support the Armada 370/XP/38x for now */
365 return PXA3XX_NAND_VARIANT_ARMADA370;
366}
367
368static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
369 const struct pxa3xx_nand_timing *t)
370{
371 struct pxa3xx_nand_info *info = host->info_data;
372 unsigned long nand_clk = mvebu_get_nand_clock();
373 uint32_t ndtr0, ndtr1;
374
375 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
376 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
377 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
378 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
379 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
380 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
381
382 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
383 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
384 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
385
386 info->ndtr0cs0 = ndtr0;
387 info->ndtr1cs0 = ndtr1;
388 nand_writel(info, NDTR0CS0, ndtr0);
389 nand_writel(info, NDTR1CS0, ndtr1);
390}
391
392static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
393 const struct nand_sdr_timings *t)
394{
395 struct pxa3xx_nand_info *info = host->info_data;
396 struct nand_chip *chip = &host->chip;
397 unsigned long nand_clk = mvebu_get_nand_clock();
398 uint32_t ndtr0, ndtr1;
399
400 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
401 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
402 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300403 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200404 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300405 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200406 u32 tR = chip->chip_delay * 1000;
407 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
408 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
409
410 /* fallback to a default value if tR = 0 */
411 if (!tR)
412 tR = 20000;
413
414 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
415 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
416 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
417 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
418 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
419 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
420
421 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
422 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
423 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
424
425 info->ndtr0cs0 = ndtr0;
426 info->ndtr1cs0 = ndtr1;
427 nand_writel(info, NDTR0CS0, ndtr0);
428 nand_writel(info, NDTR1CS0, ndtr1);
429}
430
431static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
432{
433 const struct nand_sdr_timings *timings;
434 struct nand_chip *chip = &host->chip;
435 struct pxa3xx_nand_info *info = host->info_data;
436 const struct pxa3xx_nand_flash *f = NULL;
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300437 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200438 int mode, id, ntypes, i;
439
440 mode = onfi_get_async_timing_mode(chip);
441 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
442 ntypes = ARRAY_SIZE(builtin_flash_types);
443
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300444 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
Stefan Roese75659da2015-07-23 10:26:16 +0200445
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300446 id = chip->read_byte(mtd);
447 id |= chip->read_byte(mtd) << 0x8;
Stefan Roese75659da2015-07-23 10:26:16 +0200448
449 for (i = 0; i < ntypes; i++) {
450 f = &builtin_flash_types[i];
451
452 if (f->chip_id == id)
453 break;
454 }
455
456 if (i == ntypes) {
457 dev_err(&info->pdev->dev, "Error: timings not found\n");
458 return -EINVAL;
459 }
460
461 pxa3xx_nand_set_timing(host, f->timing);
462
463 if (f->flash_width == 16) {
464 info->reg_ndcr |= NDCR_DWIDTH_M;
465 chip->options |= NAND_BUSWIDTH_16;
466 }
467
468 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
469 } else {
470 mode = fls(mode) - 1;
471 if (mode < 0)
472 mode = 0;
473
474 timings = onfi_async_timing_mode_to_sdr_timings(mode);
475 if (IS_ERR(timings))
476 return PTR_ERR(timings);
477
478 pxa3xx_nand_set_sdr_timing(host, timings);
479 }
480
481 return 0;
482}
483
Stefan Roese75659da2015-07-23 10:26:16 +0200484/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800485 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200486 * command buffer, otherwise, it does not work.
487 * We enable all the interrupt at the same time, and
488 * let pxa3xx_nand_irq to handle all logic.
489 */
490static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
491{
492 uint32_t ndcr;
493
494 ndcr = info->reg_ndcr;
495
496 if (info->use_ecc) {
497 ndcr |= NDCR_ECC_EN;
498 if (info->ecc_bch)
499 nand_writel(info, NDECCCTRL, 0x1);
500 } else {
501 ndcr &= ~NDCR_ECC_EN;
502 if (info->ecc_bch)
503 nand_writel(info, NDECCCTRL, 0x0);
504 }
505
506 ndcr &= ~NDCR_DMA_EN;
507
508 if (info->use_spare)
509 ndcr |= NDCR_SPARE_EN;
510 else
511 ndcr &= ~NDCR_SPARE_EN;
512
513 ndcr |= NDCR_ND_RUN;
514
515 /* clear status bits and run */
Stefan Roese75659da2015-07-23 10:26:16 +0200516 nand_writel(info, NDSR, NDSR_MASK);
Ofer Heifetzd92d8992018-08-29 11:56:03 +0300517 nand_writel(info, NDCR, 0);
Stefan Roese75659da2015-07-23 10:26:16 +0200518 nand_writel(info, NDCR, ndcr);
519}
520
521static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
522{
523 uint32_t ndcr;
524
525 ndcr = nand_readl(info, NDCR);
526 nand_writel(info, NDCR, ndcr | int_mask);
527}
528
529static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
530{
531 if (info->ecc_bch) {
532 u32 ts;
533
534 /*
535 * According to the datasheet, when reading from NDDB
536 * with BCH enabled, after each 32 bytes reads, we
537 * have to make sure that the NDSR.RDDREQ bit is set.
538 *
539 * Drain the FIFO 8 32 bits reads at a time, and skip
540 * the polling on the last read.
541 */
542 while (len > 8) {
543 readsl(info->mmio_base + NDDB, data, 8);
544
545 ts = get_timer(0);
546 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
547 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
548 dev_err(&info->pdev->dev,
549 "Timeout on RDDREQ while draining the FIFO\n");
550 return;
551 }
552 }
553
554 data += 32;
555 len -= 8;
556 }
557 }
558
559 readsl(info->mmio_base + NDDB, data, len);
560}
561
562static void handle_data_pio(struct pxa3xx_nand_info *info)
563{
Stefan Roese75659da2015-07-23 10:26:16 +0200564 switch (info->state) {
565 case STATE_PIO_WRITING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300566 if (info->step_chunk_size)
567 writesl(info->mmio_base + NDDB,
568 info->data_buff + info->data_buff_pos,
569 DIV_ROUND_UP(info->step_chunk_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200570
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300571 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200572 writesl(info->mmio_base + NDDB,
573 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300574 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200575 break;
576 case STATE_PIO_READING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300577 if (info->step_chunk_size)
578 drain_fifo(info,
579 info->data_buff + info->data_buff_pos,
580 DIV_ROUND_UP(info->step_chunk_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200581
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300582 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200583 drain_fifo(info,
584 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300585 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200586 break;
587 default:
588 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300589 info->state);
Stefan Roese75659da2015-07-23 10:26:16 +0200590 BUG();
591 }
592
593 /* Update buffer pointers for multi-page read/write */
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300594 info->data_buff_pos += info->step_chunk_size;
595 info->oob_buff_pos += info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200596}
597
598static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
599{
600 handle_data_pio(info);
601
602 info->state = STATE_CMD_DONE;
603 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
604}
605
606static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
607{
608 unsigned int status, is_completed = 0, is_ready = 0;
609 unsigned int ready, cmd_done;
610 irqreturn_t ret = IRQ_HANDLED;
611
612 if (info->cs == 0) {
613 ready = NDSR_FLASH_RDY;
614 cmd_done = NDSR_CS0_CMDD;
615 } else {
616 ready = NDSR_RDY;
617 cmd_done = NDSR_CS1_CMDD;
618 }
619
620 status = nand_readl(info, NDSR);
621
622 if (status & NDSR_UNCORERR)
623 info->retcode = ERR_UNCORERR;
624 if (status & NDSR_CORERR) {
625 info->retcode = ERR_CORERR;
626 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
627 info->ecc_bch)
628 info->ecc_err_cnt = NDSR_ERR_CNT(status);
629 else
630 info->ecc_err_cnt = 1;
631
632 /*
633 * Each chunk composing a page is corrected independently,
634 * and we need to store maximum number of corrected bitflips
635 * to return it to the MTD layer in ecc.read_page().
636 */
637 info->max_bitflips = max_t(unsigned int,
638 info->max_bitflips,
639 info->ecc_err_cnt);
640 }
641 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
642 info->state = (status & NDSR_RDDREQ) ?
643 STATE_PIO_READING : STATE_PIO_WRITING;
644 /* Call the IRQ thread in U-Boot directly */
645 pxa3xx_nand_irq_thread(info);
646 return 0;
647 }
648 if (status & cmd_done) {
649 info->state = STATE_CMD_DONE;
650 is_completed = 1;
651 }
652 if (status & ready) {
653 info->state = STATE_READY;
654 is_ready = 1;
655 }
656
Ofer Heifetzde323162018-08-29 11:56:04 +0300657 /*
658 * Clear all status bit before issuing the next command, which
659 * can and will alter the status bits and will deserve a new
660 * interrupt on its own. This lets the controller exit the IRQ
661 */
662 nand_writel(info, NDSR, status);
663
Stefan Roese75659da2015-07-23 10:26:16 +0200664 if (status & NDSR_WRCMDREQ) {
Stefan Roese75659da2015-07-23 10:26:16 +0200665 status &= ~NDSR_WRCMDREQ;
666 info->state = STATE_CMD_HANDLE;
667
668 /*
669 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
670 * must be loaded by writing directly either 12 or 16
671 * bytes directly to NDCB0, four bytes at a time.
672 *
673 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
674 * but each NDCBx register can be read.
675 */
676 nand_writel(info, NDCB0, info->ndcb0);
677 nand_writel(info, NDCB0, info->ndcb1);
678 nand_writel(info, NDCB0, info->ndcb2);
679
680 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
681 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
682 nand_writel(info, NDCB0, info->ndcb3);
683 }
684
Stefan Roese75659da2015-07-23 10:26:16 +0200685 if (is_completed)
686 info->cmd_complete = 1;
687 if (is_ready)
688 info->dev_ready = 1;
689
690 return ret;
691}
692
693static inline int is_buf_blank(uint8_t *buf, size_t len)
694{
695 for (; len > 0; len--)
696 if (*buf++ != 0xff)
697 return 0;
698 return 1;
699}
700
701static void set_command_address(struct pxa3xx_nand_info *info,
702 unsigned int page_size, uint16_t column, int page_addr)
703{
704 /* small page addr setting */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300705 if (page_size < info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200706 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
707 | (column & 0xFF);
708
709 info->ndcb2 = 0;
710 } else {
711 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
712 | (column & 0xFFFF);
713
714 if (page_addr & 0xFF0000)
715 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
716 else
717 info->ndcb2 = 0;
718 }
719}
720
721static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
722{
723 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300724 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200725
726 /* reset data and oob column point to handle data */
727 info->buf_start = 0;
728 info->buf_count = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200729 info->data_buff_pos = 0;
730 info->oob_buff_pos = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300731 info->step_chunk_size = 0;
732 info->step_spare_size = 0;
733 info->cur_chunk = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200734 info->use_ecc = 0;
735 info->use_spare = 1;
736 info->retcode = ERR_NONE;
737 info->ecc_err_cnt = 0;
738 info->ndcb3 = 0;
739 info->need_wait = 0;
740
741 switch (command) {
742 case NAND_CMD_READ0:
Boris Brezillona558a392018-08-29 11:56:12 +0300743 case NAND_CMD_READOOB:
Stefan Roese75659da2015-07-23 10:26:16 +0200744 case NAND_CMD_PAGEPROG:
745 info->use_ecc = 1;
Stefan Roese75659da2015-07-23 10:26:16 +0200746 break;
747 case NAND_CMD_PARAM:
748 info->use_spare = 0;
749 break;
750 default:
751 info->ndcb1 = 0;
752 info->ndcb2 = 0;
753 break;
754 }
755
756 /*
757 * If we are about to issue a read command, or about to set
758 * the write address, then clean the data buffer.
759 */
760 if (command == NAND_CMD_READ0 ||
761 command == NAND_CMD_READOOB ||
762 command == NAND_CMD_SEQIN) {
763 info->buf_count = mtd->writesize + mtd->oobsize;
764 memset(info->data_buff, 0xFF, info->buf_count);
765 }
766}
767
768static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
769 int ext_cmd_type, uint16_t column, int page_addr)
770{
771 int addr_cycle, exec_cmd;
772 struct pxa3xx_nand_host *host;
773 struct mtd_info *mtd;
774
775 host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300776 mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200777 addr_cycle = 0;
778 exec_cmd = 1;
779
780 if (info->cs != 0)
781 info->ndcb0 = NDCB0_CSEL;
782 else
783 info->ndcb0 = 0;
784
785 if (command == NAND_CMD_SEQIN)
786 exec_cmd = 0;
787
788 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
789 + host->col_addr_cycles);
790
791 switch (command) {
792 case NAND_CMD_READOOB:
793 case NAND_CMD_READ0:
794 info->buf_start = column;
795 info->ndcb0 |= NDCB0_CMD_TYPE(0)
796 | addr_cycle
797 | NAND_CMD_READ0;
798
799 if (command == NAND_CMD_READOOB)
800 info->buf_start += mtd->writesize;
801
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300802 if (info->cur_chunk < info->nfullchunks) {
803 info->step_chunk_size = info->chunk_size;
804 info->step_spare_size = info->spare_size;
805 } else {
806 info->step_chunk_size = info->last_chunk_size;
807 info->step_spare_size = info->last_spare_size;
808 }
809
Stefan Roese75659da2015-07-23 10:26:16 +0200810 /*
811 * Multiple page read needs an 'extended command type' field,
812 * which is either naked-read or last-read according to the
813 * state.
814 */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300815 if (mtd->writesize == info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200816 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300817 } else if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200818 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
819 | NDCB0_LEN_OVRD
820 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300821 info->ndcb3 = info->step_chunk_size +
822 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200823 }
824
825 set_command_address(info, mtd->writesize, column, page_addr);
826 break;
827
828 case NAND_CMD_SEQIN:
829
830 info->buf_start = column;
831 set_command_address(info, mtd->writesize, 0, page_addr);
832
833 /*
834 * Multiple page programming needs to execute the initial
835 * SEQIN command that sets the page address.
836 */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300837 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200838 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
839 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
840 | addr_cycle
841 | command;
Stefan Roese75659da2015-07-23 10:26:16 +0200842 exec_cmd = 1;
843 }
844 break;
845
846 case NAND_CMD_PAGEPROG:
847 if (is_buf_blank(info->data_buff,
848 (mtd->writesize + mtd->oobsize))) {
849 exec_cmd = 0;
850 break;
851 }
852
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300853 if (info->cur_chunk < info->nfullchunks) {
854 info->step_chunk_size = info->chunk_size;
855 info->step_spare_size = info->spare_size;
856 } else {
857 info->step_chunk_size = info->last_chunk_size;
858 info->step_spare_size = info->last_spare_size;
859 }
860
Stefan Roese75659da2015-07-23 10:26:16 +0200861 /* Second command setting for large pages */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300862 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200863 /*
864 * Multiple page write uses the 'extended command'
865 * field. This can be used to issue a command dispatch
866 * or a naked-write depending on the current stage.
867 */
868 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
869 | NDCB0_LEN_OVRD
870 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300871 info->ndcb3 = info->step_chunk_size +
872 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200873
874 /*
875 * This is the command dispatch that completes a chunked
876 * page program operation.
877 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300878 if (info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +0200879 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
880 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
881 | command;
882 info->ndcb1 = 0;
883 info->ndcb2 = 0;
884 info->ndcb3 = 0;
885 }
886 } else {
887 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
888 | NDCB0_AUTO_RS
889 | NDCB0_ST_ROW_EN
890 | NDCB0_DBC
891 | (NAND_CMD_PAGEPROG << 8)
892 | NAND_CMD_SEQIN
893 | addr_cycle;
894 }
895 break;
896
897 case NAND_CMD_PARAM:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300898 info->buf_count = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200899 info->ndcb0 |= NDCB0_CMD_TYPE(0)
900 | NDCB0_ADDR_CYC(1)
901 | NDCB0_LEN_OVRD
902 | command;
903 info->ndcb1 = (column & 0xFF);
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300904 info->ndcb3 = INIT_BUFFER_SIZE;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300905 info->step_chunk_size = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200906 break;
907
908 case NAND_CMD_READID:
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300909 info->buf_count = READ_ID_BYTES;
Stefan Roese75659da2015-07-23 10:26:16 +0200910 info->ndcb0 |= NDCB0_CMD_TYPE(3)
911 | NDCB0_ADDR_CYC(1)
912 | command;
913 info->ndcb1 = (column & 0xFF);
914
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300915 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +0200916 break;
917 case NAND_CMD_STATUS:
918 info->buf_count = 1;
919 info->ndcb0 |= NDCB0_CMD_TYPE(4)
920 | NDCB0_ADDR_CYC(1)
921 | command;
922
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300923 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +0200924 break;
925
926 case NAND_CMD_ERASE1:
927 info->ndcb0 |= NDCB0_CMD_TYPE(2)
928 | NDCB0_AUTO_RS
929 | NDCB0_ADDR_CYC(3)
930 | NDCB0_DBC
931 | (NAND_CMD_ERASE2 << 8)
932 | NAND_CMD_ERASE1;
933 info->ndcb1 = page_addr;
934 info->ndcb2 = 0;
935
936 break;
937 case NAND_CMD_RESET:
938 info->ndcb0 |= NDCB0_CMD_TYPE(5)
939 | command;
940
941 break;
942
943 case NAND_CMD_ERASE2:
944 exec_cmd = 0;
945 break;
946
947 default:
948 exec_cmd = 0;
949 dev_err(&info->pdev->dev, "non-supported command %x\n",
950 command);
951 break;
952 }
953
954 return exec_cmd;
955}
956
957static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
958 int column, int page_addr)
959{
Scott Wood17fed142016-05-30 13:57:56 -0500960 struct nand_chip *chip = mtd_to_nand(mtd);
961 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200962 struct pxa3xx_nand_info *info = host->info_data;
963 int exec_cmd;
964
965 /*
966 * if this is a x16 device ,then convert the input
967 * "byte" address into a "word" address appropriate
968 * for indexing a word-oriented device
969 */
970 if (info->reg_ndcr & NDCR_DWIDTH_M)
971 column /= 2;
972
973 /*
974 * There may be different NAND chip hooked to
975 * different chip select, so check whether
976 * chip select has been changed, if yes, reset the timing
977 */
978 if (info->cs != host->cs) {
979 info->cs = host->cs;
980 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
981 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
982 }
983
984 prepare_start_command(info, command);
985
986 info->state = STATE_PREPARED;
987 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
988
989 if (exec_cmd) {
990 u32 ts;
991
992 info->cmd_complete = 0;
993 info->dev_ready = 0;
994 info->need_wait = 1;
995 pxa3xx_nand_start(info);
996
997 ts = get_timer(0);
998 while (1) {
999 u32 status;
1000
1001 status = nand_readl(info, NDSR);
1002 if (status)
1003 pxa3xx_nand_irq(info);
1004
1005 if (info->cmd_complete)
1006 break;
1007
1008 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1009 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1010 return;
1011 }
1012 }
1013 }
1014 info->state = STATE_IDLE;
1015}
1016
1017static void nand_cmdfunc_extended(struct mtd_info *mtd,
1018 const unsigned command,
1019 int column, int page_addr)
1020{
Scott Wood17fed142016-05-30 13:57:56 -05001021 struct nand_chip *chip = mtd_to_nand(mtd);
1022 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001023 struct pxa3xx_nand_info *info = host->info_data;
1024 int exec_cmd, ext_cmd_type;
1025
1026 /*
1027 * if this is a x16 device then convert the input
1028 * "byte" address into a "word" address appropriate
1029 * for indexing a word-oriented device
1030 */
1031 if (info->reg_ndcr & NDCR_DWIDTH_M)
1032 column /= 2;
1033
1034 /*
1035 * There may be different NAND chip hooked to
1036 * different chip select, so check whether
1037 * chip select has been changed, if yes, reset the timing
1038 */
1039 if (info->cs != host->cs) {
1040 info->cs = host->cs;
1041 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1042 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1043 }
1044
1045 /* Select the extended command for the first command */
1046 switch (command) {
1047 case NAND_CMD_READ0:
1048 case NAND_CMD_READOOB:
1049 ext_cmd_type = EXT_CMD_TYPE_MONO;
1050 break;
1051 case NAND_CMD_SEQIN:
1052 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1053 break;
1054 case NAND_CMD_PAGEPROG:
1055 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1056 break;
1057 default:
1058 ext_cmd_type = 0;
1059 break;
1060 }
1061
1062 prepare_start_command(info, command);
1063
1064 /*
1065 * Prepare the "is ready" completion before starting a command
1066 * transaction sequence. If the command is not executed the
1067 * completion will be completed, see below.
1068 *
1069 * We can do that inside the loop because the command variable
1070 * is invariant and thus so is the exec_cmd.
1071 */
1072 info->need_wait = 1;
1073 info->dev_ready = 0;
1074
1075 do {
1076 u32 ts;
1077
1078 info->state = STATE_PREPARED;
1079 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1080 column, page_addr);
1081 if (!exec_cmd) {
1082 info->need_wait = 0;
1083 info->dev_ready = 1;
1084 break;
1085 }
1086
1087 info->cmd_complete = 0;
1088 pxa3xx_nand_start(info);
1089
1090 ts = get_timer(0);
1091 while (1) {
1092 u32 status;
1093
1094 status = nand_readl(info, NDSR);
1095 if (status)
1096 pxa3xx_nand_irq(info);
1097
1098 if (info->cmd_complete)
1099 break;
1100
1101 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1102 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1103 return;
1104 }
1105 }
1106
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001107 /* Only a few commands need several steps */
1108 if (command != NAND_CMD_PAGEPROG &&
1109 command != NAND_CMD_READ0 &&
1110 command != NAND_CMD_READOOB)
1111 break;
1112
1113 info->cur_chunk++;
1114
Stefan Roese75659da2015-07-23 10:26:16 +02001115 /* Check if the sequence is complete */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001116 if (info->cur_chunk == info->ntotalchunks &&
1117 command != NAND_CMD_PAGEPROG)
Stefan Roese75659da2015-07-23 10:26:16 +02001118 break;
1119
1120 /*
1121 * After a splitted program command sequence has issued
1122 * the command dispatch, the command sequence is complete.
1123 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001124 if (info->cur_chunk == (info->ntotalchunks + 1) &&
Stefan Roese75659da2015-07-23 10:26:16 +02001125 command == NAND_CMD_PAGEPROG &&
1126 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1127 break;
1128
1129 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1130 /* Last read: issue a 'last naked read' */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001131 if (info->cur_chunk == info->ntotalchunks - 1)
Stefan Roese75659da2015-07-23 10:26:16 +02001132 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1133 else
1134 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1135
1136 /*
1137 * If a splitted program command has no more data to transfer,
1138 * the command dispatch must be issued to complete.
1139 */
1140 } else if (command == NAND_CMD_PAGEPROG &&
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001141 info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +02001142 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1143 }
1144 } while (1);
1145
1146 info->state = STATE_IDLE;
1147}
1148
1149static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001150 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1151 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001152{
1153 chip->write_buf(mtd, buf, mtd->writesize);
1154 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1155
1156 return 0;
1157}
1158
1159static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1160 struct nand_chip *chip, uint8_t *buf, int oob_required,
1161 int page)
1162{
Scott Wood17fed142016-05-30 13:57:56 -05001163 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001164 struct pxa3xx_nand_info *info = host->info_data;
1165
1166 chip->read_buf(mtd, buf, mtd->writesize);
1167 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1168
1169 if (info->retcode == ERR_CORERR && info->use_ecc) {
1170 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1171
1172 } else if (info->retcode == ERR_UNCORERR) {
1173 /*
1174 * for blank page (all 0xff), HW will calculate its ECC as
1175 * 0, which is different from the ECC information within
1176 * OOB, ignore such uncorrectable errors
1177 */
1178 if (is_buf_blank(buf, mtd->writesize))
1179 info->retcode = ERR_NONE;
1180 else
1181 mtd->ecc_stats.failed++;
1182 }
1183
1184 return info->max_bitflips;
1185}
1186
1187static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1188{
Scott Wood17fed142016-05-30 13:57:56 -05001189 struct nand_chip *chip = mtd_to_nand(mtd);
1190 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001191 struct pxa3xx_nand_info *info = host->info_data;
1192 char retval = 0xFF;
1193
1194 if (info->buf_start < info->buf_count)
1195 /* Has just send a new command? */
1196 retval = info->data_buff[info->buf_start++];
1197
1198 return retval;
1199}
1200
1201static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1202{
Scott Wood17fed142016-05-30 13:57:56 -05001203 struct nand_chip *chip = mtd_to_nand(mtd);
1204 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001205 struct pxa3xx_nand_info *info = host->info_data;
1206 u16 retval = 0xFFFF;
1207
1208 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1209 retval = *((u16 *)(info->data_buff+info->buf_start));
1210 info->buf_start += 2;
1211 }
1212 return retval;
1213}
1214
1215static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1216{
Scott Wood17fed142016-05-30 13:57:56 -05001217 struct nand_chip *chip = mtd_to_nand(mtd);
1218 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001219 struct pxa3xx_nand_info *info = host->info_data;
1220 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1221
1222 memcpy(buf, info->data_buff + info->buf_start, real_len);
1223 info->buf_start += real_len;
1224}
1225
1226static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1227 const uint8_t *buf, int len)
1228{
Scott Wood17fed142016-05-30 13:57:56 -05001229 struct nand_chip *chip = mtd_to_nand(mtd);
1230 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001231 struct pxa3xx_nand_info *info = host->info_data;
1232 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1233
1234 memcpy(info->data_buff + info->buf_start, buf, real_len);
1235 info->buf_start += real_len;
1236}
1237
1238static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1239{
1240 return;
1241}
1242
1243static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1244{
Scott Wood17fed142016-05-30 13:57:56 -05001245 struct nand_chip *chip = mtd_to_nand(mtd);
1246 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001247 struct pxa3xx_nand_info *info = host->info_data;
1248
1249 if (info->need_wait) {
1250 u32 ts;
1251
1252 info->need_wait = 0;
1253
1254 ts = get_timer(0);
1255 while (1) {
1256 u32 status;
1257
1258 status = nand_readl(info, NDSR);
1259 if (status)
1260 pxa3xx_nand_irq(info);
1261
1262 if (info->dev_ready)
1263 break;
1264
1265 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1266 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1267 return NAND_STATUS_FAIL;
1268 }
1269 }
1270 }
1271
1272 /* pxa3xx_nand_send_command has waited for command complete */
1273 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1274 if (info->retcode == ERR_NONE)
1275 return 0;
1276 else
1277 return NAND_STATUS_FAIL;
1278 }
1279
1280 return NAND_STATUS_READY;
1281}
1282
Ofer Heifetz531816e2018-08-29 11:56:07 +03001283static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1284{
1285 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1286
1287 /* Configure default flash values */
Ofer Heifetz531816e2018-08-29 11:56:07 +03001288 info->reg_ndcr = 0x0; /* enable all interrupts */
1289 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1290 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1291 info->reg_ndcr |= NDCR_SPARE_EN;
1292
1293 return 0;
1294}
1295
1296static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001297{
1298 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz531816e2018-08-29 11:56:07 +03001299 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001300 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001301
1302 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1303 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1304 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001305}
1306
Ofer Heifetz268979f2018-08-29 11:56:08 +03001307static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001308{
Ofer Heifetz531816e2018-08-29 11:56:07 +03001309 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001310 uint32_t ndcr = nand_readl(info, NDCR);
1311
Stefan Roese75659da2015-07-23 10:26:16 +02001312 /* Set an initial chunk size */
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001313 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001314 info->reg_ndcr = ndcr &
1315 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1316 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001317 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1318 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
Stefan Roese75659da2015-07-23 10:26:16 +02001319}
1320
1321static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1322{
1323 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1324 if (info->data_buff == NULL)
1325 return -ENOMEM;
1326 return 0;
1327}
1328
1329static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1330{
1331 struct pxa3xx_nand_info *info = host->info_data;
1332 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1333 struct mtd_info *mtd;
1334 struct nand_chip *chip;
1335 const struct nand_sdr_timings *timings;
1336 int ret;
1337
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001338 mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001339 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001340
1341 /* configure default flash values */
1342 info->reg_ndcr = 0x0; /* enable all interrupts */
1343 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001344 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
Stefan Roese75659da2015-07-23 10:26:16 +02001345 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1346
1347 /* use the common timing to make a try */
1348 timings = onfi_async_timing_mode_to_sdr_timings(0);
1349 if (IS_ERR(timings))
1350 return PTR_ERR(timings);
1351
1352 pxa3xx_nand_set_sdr_timing(host, timings);
1353
1354 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1355 ret = chip->waitfunc(mtd, chip);
1356 if (ret & NAND_STATUS_FAIL)
1357 return -ENODEV;
1358
1359 return 0;
1360}
1361
1362static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1363 struct nand_ecc_ctrl *ecc,
1364 int strength, int ecc_stepsize, int page_size)
1365{
1366 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001367 info->nfullchunks = 1;
1368 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001369 info->chunk_size = 2048;
1370 info->spare_size = 40;
1371 info->ecc_size = 24;
1372 ecc->mode = NAND_ECC_HW;
1373 ecc->size = 512;
1374 ecc->strength = 1;
1375
1376 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001377 info->nfullchunks = 1;
1378 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001379 info->chunk_size = 512;
1380 info->spare_size = 8;
1381 info->ecc_size = 8;
1382 ecc->mode = NAND_ECC_HW;
1383 ecc->size = 512;
1384 ecc->strength = 1;
1385
1386 /*
1387 * Required ECC: 4-bit correction per 512 bytes
1388 * Select: 16-bit correction per 2048 bytes
1389 */
1390 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1391 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001392 info->nfullchunks = 1;
1393 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001394 info->chunk_size = 2048;
1395 info->spare_size = 32;
1396 info->ecc_size = 32;
1397 ecc->mode = NAND_ECC_HW;
1398 ecc->size = info->chunk_size;
1399 ecc->layout = &ecc_layout_2KB_bch4bit;
1400 ecc->strength = 16;
1401
1402 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1403 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001404 info->nfullchunks = 2;
1405 info->ntotalchunks = 2;
Stefan Roese75659da2015-07-23 10:26:16 +02001406 info->chunk_size = 2048;
1407 info->spare_size = 32;
1408 info->ecc_size = 32;
1409 ecc->mode = NAND_ECC_HW;
1410 ecc->size = info->chunk_size;
1411 ecc->layout = &ecc_layout_4KB_bch4bit;
1412 ecc->strength = 16;
1413
1414 /*
1415 * Required ECC: 8-bit correction per 512 bytes
1416 * Select: 16-bit correction per 1024 bytes
1417 */
1418 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1419 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001420 info->nfullchunks = 4;
1421 info->ntotalchunks = 5;
Stefan Roese75659da2015-07-23 10:26:16 +02001422 info->chunk_size = 1024;
1423 info->spare_size = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001424 info->last_chunk_size = 0;
1425 info->last_spare_size = 64;
Stefan Roese75659da2015-07-23 10:26:16 +02001426 info->ecc_size = 32;
1427 ecc->mode = NAND_ECC_HW;
1428 ecc->size = info->chunk_size;
1429 ecc->layout = &ecc_layout_4KB_bch8bit;
1430 ecc->strength = 16;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001431 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1432 info->ecc_bch = 1;
1433 info->nfullchunks = 1;
1434 info->ntotalchunks = 2;
1435 info->chunk_size = 1024;
1436 info->spare_size = 0;
1437 info->last_chunk_size = 1024;
1438 info->last_spare_size = 64;
1439 info->ecc_size = 32;
1440 ecc->mode = NAND_ECC_HW;
1441 ecc->size = info->chunk_size;
1442 ecc->layout = &ecc_layout_2KB_bch8bit;
1443 ecc->strength = 16;
Stefan Roese75659da2015-07-23 10:26:16 +02001444 } else {
1445 dev_err(&info->pdev->dev,
1446 "ECC strength %d at page size %d is not supported\n",
1447 strength, page_size);
1448 return -ENODEV;
1449 }
1450
1451 return 0;
1452}
1453
1454static int pxa3xx_nand_scan(struct mtd_info *mtd)
1455{
Scott Wood17fed142016-05-30 13:57:56 -05001456 struct nand_chip *chip = mtd_to_nand(mtd);
1457 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001458 struct pxa3xx_nand_info *info = host->info_data;
1459 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001460 int ret;
1461 uint16_t ecc_strength, ecc_step;
1462
Ofer Heifetz268979f2018-08-29 11:56:08 +03001463 if (pdata->keep_config) {
1464 pxa3xx_nand_detect_config(info);
1465 } else {
1466 ret = pxa3xx_nand_config_ident(info);
1467 if (ret)
1468 return ret;
1469 ret = pxa3xx_nand_sensing(host);
1470 if (ret) {
1471 dev_info(&info->pdev->dev,
1472 "There is no chip on cs %d!\n",
1473 info->cs);
1474 return ret;
1475 }
Stefan Roese75659da2015-07-23 10:26:16 +02001476 }
1477
Stefan Roese75659da2015-07-23 10:26:16 +02001478 /* Device detection must be done with ECC disabled */
1479 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1480 nand_writel(info, NDECCCTRL, 0x0);
1481
1482 if (nand_scan_ident(mtd, 1, NULL))
1483 return -ENODEV;
1484
1485 if (!pdata->keep_config) {
1486 ret = pxa3xx_nand_init_timings(host);
1487 if (ret) {
1488 dev_err(&info->pdev->dev,
1489 "Failed to set timings: %d\n", ret);
1490 return ret;
1491 }
1492 }
1493
Stefan Roese75659da2015-07-23 10:26:16 +02001494#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1495 /*
1496 * We'll use a bad block table stored in-flash and don't
1497 * allow writing the bad block marker to the flash.
1498 */
1499 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1500 chip->bbt_td = &bbt_main_descr;
1501 chip->bbt_md = &bbt_mirror_descr;
1502#endif
1503
Stefan Roese75659da2015-07-23 10:26:16 +02001504 if (pdata->ecc_strength && pdata->ecc_step_size) {
1505 ecc_strength = pdata->ecc_strength;
1506 ecc_step = pdata->ecc_step_size;
1507 } else {
1508 ecc_strength = chip->ecc_strength_ds;
1509 ecc_step = chip->ecc_step_ds;
1510 }
1511
1512 /* Set default ECC strength requirements on non-ONFI devices */
1513 if (ecc_strength < 1 && ecc_step < 1) {
1514 ecc_strength = 1;
1515 ecc_step = 512;
1516 }
1517
1518 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1519 ecc_step, mtd->writesize);
1520 if (ret)
1521 return ret;
1522
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001523 /*
1524 * If the page size is bigger than the FIFO size, let's check
1525 * we are given the right variant and then switch to the extended
1526 * (aka split) command handling,
1527 */
1528 if (mtd->writesize > info->chunk_size) {
1529 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1530 chip->cmdfunc = nand_cmdfunc_extended;
1531 } else {
1532 dev_err(&info->pdev->dev,
1533 "unsupported page size on this variant\n");
1534 return -ENODEV;
1535 }
1536 }
1537
Stefan Roese75659da2015-07-23 10:26:16 +02001538 /* calculate addressing information */
1539 if (mtd->writesize >= 2048)
1540 host->col_addr_cycles = 2;
1541 else
1542 host->col_addr_cycles = 1;
1543
1544 /* release the initial buffer */
1545 kfree(info->data_buff);
1546
1547 /* allocate the real data + oob buffer */
1548 info->buf_size = mtd->writesize + mtd->oobsize;
1549 ret = pxa3xx_nand_init_buff(info);
1550 if (ret)
1551 return ret;
1552 info->oob_buff = info->data_buff + mtd->writesize;
1553
1554 if ((mtd->size >> chip->page_shift) > 65536)
1555 host->row_addr_cycles = 3;
1556 else
1557 host->row_addr_cycles = 2;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001558
1559 if (!pdata->keep_config)
1560 pxa3xx_nand_config_tail(info);
1561
Stefan Roese75659da2015-07-23 10:26:16 +02001562 return nand_scan_tail(mtd);
1563}
1564
1565static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1566{
1567 struct pxa3xx_nand_platform_data *pdata;
1568 struct pxa3xx_nand_host *host;
1569 struct nand_chip *chip = NULL;
1570 struct mtd_info *mtd;
1571 int ret, cs;
1572
1573 pdata = info->pdata;
1574 if (pdata->num_cs <= 0)
1575 return -ENODEV;
1576
1577 info->variant = pxa3xx_nand_get_variant();
1578 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001579 chip = (struct nand_chip *)
1580 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001581 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001582 host = (struct pxa3xx_nand_host *)chip;
1583 info->host[cs] = host;
Stefan Roese75659da2015-07-23 10:26:16 +02001584 host->cs = cs;
1585 host->info_data = info;
Stefan Roese75659da2015-07-23 10:26:16 +02001586 mtd->owner = THIS_MODULE;
1587
Chris Packham3c2170a2016-08-29 15:20:52 +12001588 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001589 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1590 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1591 chip->controller = &info->controller;
1592 chip->waitfunc = pxa3xx_nand_waitfunc;
1593 chip->select_chip = pxa3xx_nand_select_chip;
1594 chip->read_word = pxa3xx_nand_read_word;
1595 chip->read_byte = pxa3xx_nand_read_byte;
1596 chip->read_buf = pxa3xx_nand_read_buf;
1597 chip->write_buf = pxa3xx_nand_write_buf;
1598 chip->options |= NAND_NO_SUBPAGE_WRITE;
1599 chip->cmdfunc = nand_cmdfunc;
1600 }
1601
Stefan Roese75659da2015-07-23 10:26:16 +02001602 /* Allocate a buffer to allow flash detection */
1603 info->buf_size = INIT_BUFFER_SIZE;
1604 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1605 if (info->data_buff == NULL) {
1606 ret = -ENOMEM;
1607 goto fail_disable_clk;
1608 }
1609
1610 /* initialize all interrupts to be disabled */
1611 disable_int(info, NDSR_MASK);
1612
1613 return 0;
1614
1615 kfree(info->data_buff);
1616fail_disable_clk:
1617 return ret;
1618}
1619
1620static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1621{
1622 struct pxa3xx_nand_platform_data *pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001623 const void *blob = gd->fdt_blob;
1624 int node = -1;
Stefan Roese75659da2015-07-23 10:26:16 +02001625
1626 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1627 if (!pdata)
1628 return -ENOMEM;
1629
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001630 /* Get address decoding nodes from the FDT blob */
1631 do {
1632 node = fdt_node_offset_by_compatible(blob, node,
1633 "marvell,mvebu-pxa3xx-nand");
1634 if (node < 0)
1635 break;
1636
1637 /* Bypass disabeld nodes */
1638 if (!fdtdec_get_is_enabled(blob, node))
1639 continue;
Stefan Roese75659da2015-07-23 10:26:16 +02001640
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001641 /* Get the first enabled NAND controler base address */
1642 info->mmio_base =
1643 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1644 blob, node, "reg", 0, NULL, true);
Stefan Roese75659da2015-07-23 10:26:16 +02001645
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001646 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1647 if (pdata->num_cs != 1) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001648 pr_err("pxa3xx driver supports single CS only\n");
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001649 break;
1650 }
1651
1652 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1653 pdata->enable_arbiter = 1;
1654
1655 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1656 pdata->keep_config = 1;
1657
1658 /*
1659 * ECC parameters.
1660 * If these are not set, they will be selected according
1661 * to the detected flash type.
1662 */
1663 /* ECC strength */
1664 pdata->ecc_strength = fdtdec_get_int(blob, node,
1665 "nand-ecc-strength", 0);
1666
1667 /* ECC step size */
1668 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1669 "nand-ecc-step-size", 0);
1670
1671 info->pdata = pdata;
1672
1673 /* Currently support only a single NAND controller */
1674 return 0;
1675
1676 } while (node >= 0);
1677
1678 return -EINVAL;
Stefan Roese75659da2015-07-23 10:26:16 +02001679}
1680
1681static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1682{
1683 struct pxa3xx_nand_platform_data *pdata;
1684 int ret, cs, probe_success;
1685
1686 ret = pxa3xx_nand_probe_dt(info);
1687 if (ret)
1688 return ret;
1689
1690 pdata = info->pdata;
1691
1692 ret = alloc_nand_resource(info);
1693 if (ret) {
1694 dev_err(&pdev->dev, "alloc nand resource failed\n");
1695 return ret;
1696 }
1697
1698 probe_success = 0;
1699 for (cs = 0; cs < pdata->num_cs; cs++) {
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001700 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001701
1702 /*
1703 * The mtd name matches the one used in 'mtdparts' kernel
1704 * parameter. This name cannot be changed or otherwise
1705 * user's mtd partitions configuration would get broken.
1706 */
1707 mtd->name = "pxa3xx_nand-0";
1708 info->cs = cs;
1709 ret = pxa3xx_nand_scan(mtd);
1710 if (ret) {
1711 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1712 cs);
1713 continue;
1714 }
1715
Scott Wood2c1b7e12016-05-30 13:57:55 -05001716 if (nand_register(cs, mtd))
1717 continue;
1718
1719 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001720 }
1721
1722 if (!probe_success)
1723 return -ENODEV;
1724
1725 return 0;
1726}
1727
1728/*
1729 * Main initialization routine
1730 */
1731void board_nand_init(void)
1732{
1733 struct pxa3xx_nand_info *info;
1734 struct pxa3xx_nand_host *host;
1735 int ret;
1736
Kevin Smithf6ca2a62016-01-14 16:01:39 +00001737 info = kzalloc(sizeof(*info) +
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001738 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1739 GFP_KERNEL);
Stefan Roese75659da2015-07-23 10:26:16 +02001740 if (!info)
1741 return;
1742
Stefan Roese75659da2015-07-23 10:26:16 +02001743 ret = pxa3xx_nand_probe(info);
1744 if (ret)
1745 return;
Stefan Roese75659da2015-07-23 10:26:16 +02001746}