blob: dfe8966b56b69b7b896b5cc51ae14bf80108ff7f [file] [log] [blame]
Stefan Roese75659da2015-07-23 10:26:16 +02001/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * SPDX-License-Identifier: GPL-2.0
8 */
9
10#include <common.h>
11#include <malloc.h>
12#include <nand.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090013#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020014#include <asm/io.h>
15#include <asm/arch/cpu.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/nand.h>
18#include <linux/types.h>
19
20#include "pxa3xx_nand.h"
21
Stefan Roese75659da2015-07-23 10:26:16 +020022#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
23#define CHIP_DELAY_TIMEOUT 200
24#define NAND_STOP_DELAY 40
25#define PAGE_CHUNK_SIZE (2048)
26
27/*
28 * Define a buffer size for the initial command that detects the flash device:
29 * STATUS, READID and PARAM. The largest of these is the PARAM command,
30 * needing 256 bytes.
31 */
32#define INIT_BUFFER_SIZE 256
33
34/* registers and bit definitions */
35#define NDCR (0x00) /* Control register */
36#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
37#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
38#define NDSR (0x14) /* Status Register */
39#define NDPCR (0x18) /* Page Count Register */
40#define NDBDR0 (0x1C) /* Bad Block Register 0 */
41#define NDBDR1 (0x20) /* Bad Block Register 1 */
42#define NDECCCTRL (0x28) /* ECC control */
43#define NDDB (0x40) /* Data Buffer */
44#define NDCB0 (0x48) /* Command Buffer0 */
45#define NDCB1 (0x4C) /* Command Buffer1 */
46#define NDCB2 (0x50) /* Command Buffer2 */
47
48#define NDCR_SPARE_EN (0x1 << 31)
49#define NDCR_ECC_EN (0x1 << 30)
50#define NDCR_DMA_EN (0x1 << 29)
51#define NDCR_ND_RUN (0x1 << 28)
52#define NDCR_DWIDTH_C (0x1 << 27)
53#define NDCR_DWIDTH_M (0x1 << 26)
54#define NDCR_PAGE_SZ (0x1 << 24)
55#define NDCR_NCSX (0x1 << 23)
56#define NDCR_ND_MODE (0x3 << 21)
57#define NDCR_NAND_MODE (0x0)
58#define NDCR_CLR_PG_CNT (0x1 << 20)
59#define NDCR_STOP_ON_UNCOR (0x1 << 19)
60#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
61#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
62
63#define NDCR_RA_START (0x1 << 15)
64#define NDCR_PG_PER_BLK (0x1 << 14)
65#define NDCR_ND_ARB_EN (0x1 << 12)
66#define NDCR_INT_MASK (0xFFF)
67
68#define NDSR_MASK (0xfff)
69#define NDSR_ERR_CNT_OFF (16)
70#define NDSR_ERR_CNT_MASK (0x1f)
71#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
72#define NDSR_RDY (0x1 << 12)
73#define NDSR_FLASH_RDY (0x1 << 11)
74#define NDSR_CS0_PAGED (0x1 << 10)
75#define NDSR_CS1_PAGED (0x1 << 9)
76#define NDSR_CS0_CMDD (0x1 << 8)
77#define NDSR_CS1_CMDD (0x1 << 7)
78#define NDSR_CS0_BBD (0x1 << 6)
79#define NDSR_CS1_BBD (0x1 << 5)
80#define NDSR_UNCORERR (0x1 << 4)
81#define NDSR_CORERR (0x1 << 3)
82#define NDSR_WRDREQ (0x1 << 2)
83#define NDSR_RDDREQ (0x1 << 1)
84#define NDSR_WRCMDREQ (0x1)
85
86#define NDCB0_LEN_OVRD (0x1 << 28)
87#define NDCB0_ST_ROW_EN (0x1 << 26)
88#define NDCB0_AUTO_RS (0x1 << 25)
89#define NDCB0_CSEL (0x1 << 24)
90#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
91#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
92#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
93#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
94#define NDCB0_NC (0x1 << 20)
95#define NDCB0_DBC (0x1 << 19)
96#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
97#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
98#define NDCB0_CMD2_MASK (0xff << 8)
99#define NDCB0_CMD1_MASK (0xff)
100#define NDCB0_ADDR_CYC_SHIFT (16)
101
102#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
103#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
104#define EXT_CMD_TYPE_READ 4 /* Read */
105#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
106#define EXT_CMD_TYPE_FINAL 3 /* Final command */
107#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
108#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
109
110/* macros for registers read/write */
111#define nand_writel(info, off, val) \
112 writel((val), (info)->mmio_base + (off))
113
114#define nand_readl(info, off) \
115 readl((info)->mmio_base + (off))
116
117/* error code and state */
118enum {
119 ERR_NONE = 0,
120 ERR_DMABUSERR = -1,
121 ERR_SENDCMD = -2,
122 ERR_UNCORERR = -3,
123 ERR_BBERR = -4,
124 ERR_CORERR = -5,
125};
126
127enum {
128 STATE_IDLE = 0,
129 STATE_PREPARED,
130 STATE_CMD_HANDLE,
131 STATE_DMA_READING,
132 STATE_DMA_WRITING,
133 STATE_DMA_DONE,
134 STATE_PIO_READING,
135 STATE_PIO_WRITING,
136 STATE_CMD_DONE,
137 STATE_READY,
138};
139
140enum pxa3xx_nand_variant {
141 PXA3XX_NAND_VARIANT_PXA,
142 PXA3XX_NAND_VARIANT_ARMADA370,
143};
144
145struct pxa3xx_nand_host {
146 struct nand_chip chip;
147 struct mtd_info *mtd;
148 void *info_data;
149
150 /* page size of attached chip */
151 int use_ecc;
152 int cs;
153
154 /* calculated from pxa3xx_nand_flash data */
155 unsigned int col_addr_cycles;
156 unsigned int row_addr_cycles;
157 size_t read_id_bytes;
158
159};
160
161struct pxa3xx_nand_info {
162 struct nand_hw_control controller;
163 struct pxa3xx_nand_platform_data *pdata;
164
165 struct clk *clk;
166 void __iomem *mmio_base;
167 unsigned long mmio_phys;
168 int cmd_complete, dev_ready;
169
170 unsigned int buf_start;
171 unsigned int buf_count;
172 unsigned int buf_size;
173 unsigned int data_buff_pos;
174 unsigned int oob_buff_pos;
175
176 unsigned char *data_buff;
177 unsigned char *oob_buff;
178
179 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
180 unsigned int state;
181
182 /*
183 * This driver supports NFCv1 (as found in PXA SoC)
184 * and NFCv2 (as found in Armada 370/XP SoC).
185 */
186 enum pxa3xx_nand_variant variant;
187
188 int cs;
189 int use_ecc; /* use HW ECC ? */
190 int ecc_bch; /* using BCH ECC? */
191 int use_spare; /* use spare ? */
192 int need_wait;
193
194 unsigned int data_size; /* data to be read from FIFO */
195 unsigned int chunk_size; /* split commands chunk size */
196 unsigned int oob_size;
197 unsigned int spare_size;
198 unsigned int ecc_size;
199 unsigned int ecc_err_cnt;
200 unsigned int max_bitflips;
201 int retcode;
202
203 /* cached register value */
204 uint32_t reg_ndcr;
205 uint32_t ndtr0cs0;
206 uint32_t ndtr1cs0;
207
208 /* generated NDCBx register values */
209 uint32_t ndcb0;
210 uint32_t ndcb1;
211 uint32_t ndcb2;
212 uint32_t ndcb3;
213};
214
215static struct pxa3xx_nand_timing timing[] = {
216 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
217 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
218 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
219 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
220};
221
222static struct pxa3xx_nand_flash builtin_flash_types[] = {
223 { 0x46ec, 16, 16, &timing[1] },
224 { 0xdaec, 8, 8, &timing[1] },
225 { 0xd7ec, 8, 8, &timing[1] },
226 { 0xa12c, 8, 8, &timing[2] },
227 { 0xb12c, 16, 16, &timing[2] },
228 { 0xdc2c, 8, 8, &timing[2] },
229 { 0xcc2c, 16, 16, &timing[2] },
230 { 0xba20, 16, 16, &timing[3] },
231};
232
233static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
234static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
235
236static struct nand_bbt_descr bbt_main_descr = {
237 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
238 | NAND_BBT_2BIT | NAND_BBT_VERSION,
239 .offs = 8,
240 .len = 6,
241 .veroffs = 14,
242 .maxblocks = 8, /* Last 8 blocks in each chip */
243 .pattern = bbt_pattern
244};
245
246static struct nand_bbt_descr bbt_mirror_descr = {
247 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
248 | NAND_BBT_2BIT | NAND_BBT_VERSION,
249 .offs = 8,
250 .len = 6,
251 .veroffs = 14,
252 .maxblocks = 8, /* Last 8 blocks in each chip */
253 .pattern = bbt_mirror_pattern
254};
255
256static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
257 .eccbytes = 32,
258 .eccpos = {
259 32, 33, 34, 35, 36, 37, 38, 39,
260 40, 41, 42, 43, 44, 45, 46, 47,
261 48, 49, 50, 51, 52, 53, 54, 55,
262 56, 57, 58, 59, 60, 61, 62, 63},
263 .oobfree = { {2, 30} }
264};
265
266static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
267 .eccbytes = 64,
268 .eccpos = {
269 32, 33, 34, 35, 36, 37, 38, 39,
270 40, 41, 42, 43, 44, 45, 46, 47,
271 48, 49, 50, 51, 52, 53, 54, 55,
272 56, 57, 58, 59, 60, 61, 62, 63,
273 96, 97, 98, 99, 100, 101, 102, 103,
274 104, 105, 106, 107, 108, 109, 110, 111,
275 112, 113, 114, 115, 116, 117, 118, 119,
276 120, 121, 122, 123, 124, 125, 126, 127},
277 /* Bootrom looks in bytes 0 & 5 for bad blocks */
278 .oobfree = { {6, 26}, { 64, 32} }
279};
280
281static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
282 .eccbytes = 128,
283 .eccpos = {
284 32, 33, 34, 35, 36, 37, 38, 39,
285 40, 41, 42, 43, 44, 45, 46, 47,
286 48, 49, 50, 51, 52, 53, 54, 55,
287 56, 57, 58, 59, 60, 61, 62, 63},
288 .oobfree = { }
289};
290
291#define NDTR0_tCH(c) (min((c), 7) << 19)
292#define NDTR0_tCS(c) (min((c), 7) << 16)
293#define NDTR0_tWH(c) (min((c), 7) << 11)
294#define NDTR0_tWP(c) (min((c), 7) << 8)
295#define NDTR0_tRH(c) (min((c), 7) << 3)
296#define NDTR0_tRP(c) (min((c), 7) << 0)
297
298#define NDTR1_tR(c) (min((c), 65535) << 16)
299#define NDTR1_tWHR(c) (min((c), 15) << 4)
300#define NDTR1_tAR(c) (min((c), 15) << 0)
301
302/* convert nano-seconds to nand flash controller clock cycles */
303#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
304
305static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
306{
307 /* We only support the Armada 370/XP/38x for now */
308 return PXA3XX_NAND_VARIANT_ARMADA370;
309}
310
311static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
312 const struct pxa3xx_nand_timing *t)
313{
314 struct pxa3xx_nand_info *info = host->info_data;
315 unsigned long nand_clk = mvebu_get_nand_clock();
316 uint32_t ndtr0, ndtr1;
317
318 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
319 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
320 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
321 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
322 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
323 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
324
325 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
326 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
327 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
328
329 info->ndtr0cs0 = ndtr0;
330 info->ndtr1cs0 = ndtr1;
331 nand_writel(info, NDTR0CS0, ndtr0);
332 nand_writel(info, NDTR1CS0, ndtr1);
333}
334
335static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
336 const struct nand_sdr_timings *t)
337{
338 struct pxa3xx_nand_info *info = host->info_data;
339 struct nand_chip *chip = &host->chip;
340 unsigned long nand_clk = mvebu_get_nand_clock();
341 uint32_t ndtr0, ndtr1;
342
343 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
344 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
345 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
346 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
347 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
348 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
349 u32 tR = chip->chip_delay * 1000;
350 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
351 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
352
353 /* fallback to a default value if tR = 0 */
354 if (!tR)
355 tR = 20000;
356
357 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
358 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
359 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
360 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
361 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
362 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
363
364 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
365 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
366 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
367
368 info->ndtr0cs0 = ndtr0;
369 info->ndtr1cs0 = ndtr1;
370 nand_writel(info, NDTR0CS0, ndtr0);
371 nand_writel(info, NDTR1CS0, ndtr1);
372}
373
374static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
375{
376 const struct nand_sdr_timings *timings;
377 struct nand_chip *chip = &host->chip;
378 struct pxa3xx_nand_info *info = host->info_data;
379 const struct pxa3xx_nand_flash *f = NULL;
380 int mode, id, ntypes, i;
381
382 mode = onfi_get_async_timing_mode(chip);
383 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
384 ntypes = ARRAY_SIZE(builtin_flash_types);
385
386 chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
387
388 id = chip->read_byte(host->mtd);
389 id |= chip->read_byte(host->mtd) << 0x8;
390
391 for (i = 0; i < ntypes; i++) {
392 f = &builtin_flash_types[i];
393
394 if (f->chip_id == id)
395 break;
396 }
397
398 if (i == ntypes) {
399 dev_err(&info->pdev->dev, "Error: timings not found\n");
400 return -EINVAL;
401 }
402
403 pxa3xx_nand_set_timing(host, f->timing);
404
405 if (f->flash_width == 16) {
406 info->reg_ndcr |= NDCR_DWIDTH_M;
407 chip->options |= NAND_BUSWIDTH_16;
408 }
409
410 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
411 } else {
412 mode = fls(mode) - 1;
413 if (mode < 0)
414 mode = 0;
415
416 timings = onfi_async_timing_mode_to_sdr_timings(mode);
417 if (IS_ERR(timings))
418 return PTR_ERR(timings);
419
420 pxa3xx_nand_set_sdr_timing(host, timings);
421 }
422
423 return 0;
424}
425
426/*
427 * Set the data and OOB size, depending on the selected
428 * spare and ECC configuration.
429 * Only applicable to READ0, READOOB and PAGEPROG commands.
430 */
431static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
432 struct mtd_info *mtd)
433{
434 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
435
436 info->data_size = mtd->writesize;
437 if (!oob_enable)
438 return;
439
440 info->oob_size = info->spare_size;
441 if (!info->use_ecc)
442 info->oob_size += info->ecc_size;
443}
444
445/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800446 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200447 * command buffer, otherwise, it does not work.
448 * We enable all the interrupt at the same time, and
449 * let pxa3xx_nand_irq to handle all logic.
450 */
451static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
452{
453 uint32_t ndcr;
454
455 ndcr = info->reg_ndcr;
456
457 if (info->use_ecc) {
458 ndcr |= NDCR_ECC_EN;
459 if (info->ecc_bch)
460 nand_writel(info, NDECCCTRL, 0x1);
461 } else {
462 ndcr &= ~NDCR_ECC_EN;
463 if (info->ecc_bch)
464 nand_writel(info, NDECCCTRL, 0x0);
465 }
466
467 ndcr &= ~NDCR_DMA_EN;
468
469 if (info->use_spare)
470 ndcr |= NDCR_SPARE_EN;
471 else
472 ndcr &= ~NDCR_SPARE_EN;
473
474 ndcr |= NDCR_ND_RUN;
475
476 /* clear status bits and run */
477 nand_writel(info, NDCR, 0);
478 nand_writel(info, NDSR, NDSR_MASK);
479 nand_writel(info, NDCR, ndcr);
480}
481
482static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
483{
484 uint32_t ndcr;
485
486 ndcr = nand_readl(info, NDCR);
487 nand_writel(info, NDCR, ndcr | int_mask);
488}
489
490static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
491{
492 if (info->ecc_bch) {
493 u32 ts;
494
495 /*
496 * According to the datasheet, when reading from NDDB
497 * with BCH enabled, after each 32 bytes reads, we
498 * have to make sure that the NDSR.RDDREQ bit is set.
499 *
500 * Drain the FIFO 8 32 bits reads at a time, and skip
501 * the polling on the last read.
502 */
503 while (len > 8) {
504 readsl(info->mmio_base + NDDB, data, 8);
505
506 ts = get_timer(0);
507 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
508 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
509 dev_err(&info->pdev->dev,
510 "Timeout on RDDREQ while draining the FIFO\n");
511 return;
512 }
513 }
514
515 data += 32;
516 len -= 8;
517 }
518 }
519
520 readsl(info->mmio_base + NDDB, data, len);
521}
522
523static void handle_data_pio(struct pxa3xx_nand_info *info)
524{
525 unsigned int do_bytes = min(info->data_size, info->chunk_size);
526
527 switch (info->state) {
528 case STATE_PIO_WRITING:
529 writesl(info->mmio_base + NDDB,
530 info->data_buff + info->data_buff_pos,
531 DIV_ROUND_UP(do_bytes, 4));
532
533 if (info->oob_size > 0)
534 writesl(info->mmio_base + NDDB,
535 info->oob_buff + info->oob_buff_pos,
536 DIV_ROUND_UP(info->oob_size, 4));
537 break;
538 case STATE_PIO_READING:
539 drain_fifo(info,
540 info->data_buff + info->data_buff_pos,
541 DIV_ROUND_UP(do_bytes, 4));
542
543 if (info->oob_size > 0)
544 drain_fifo(info,
545 info->oob_buff + info->oob_buff_pos,
546 DIV_ROUND_UP(info->oob_size, 4));
547 break;
548 default:
549 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
550 info->state);
551 BUG();
552 }
553
554 /* Update buffer pointers for multi-page read/write */
555 info->data_buff_pos += do_bytes;
556 info->oob_buff_pos += info->oob_size;
557 info->data_size -= do_bytes;
558}
559
560static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
561{
562 handle_data_pio(info);
563
564 info->state = STATE_CMD_DONE;
565 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
566}
567
568static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
569{
570 unsigned int status, is_completed = 0, is_ready = 0;
571 unsigned int ready, cmd_done;
572 irqreturn_t ret = IRQ_HANDLED;
573
574 if (info->cs == 0) {
575 ready = NDSR_FLASH_RDY;
576 cmd_done = NDSR_CS0_CMDD;
577 } else {
578 ready = NDSR_RDY;
579 cmd_done = NDSR_CS1_CMDD;
580 }
581
582 status = nand_readl(info, NDSR);
583
584 if (status & NDSR_UNCORERR)
585 info->retcode = ERR_UNCORERR;
586 if (status & NDSR_CORERR) {
587 info->retcode = ERR_CORERR;
588 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
589 info->ecc_bch)
590 info->ecc_err_cnt = NDSR_ERR_CNT(status);
591 else
592 info->ecc_err_cnt = 1;
593
594 /*
595 * Each chunk composing a page is corrected independently,
596 * and we need to store maximum number of corrected bitflips
597 * to return it to the MTD layer in ecc.read_page().
598 */
599 info->max_bitflips = max_t(unsigned int,
600 info->max_bitflips,
601 info->ecc_err_cnt);
602 }
603 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
604 info->state = (status & NDSR_RDDREQ) ?
605 STATE_PIO_READING : STATE_PIO_WRITING;
606 /* Call the IRQ thread in U-Boot directly */
607 pxa3xx_nand_irq_thread(info);
608 return 0;
609 }
610 if (status & cmd_done) {
611 info->state = STATE_CMD_DONE;
612 is_completed = 1;
613 }
614 if (status & ready) {
615 info->state = STATE_READY;
616 is_ready = 1;
617 }
618
619 if (status & NDSR_WRCMDREQ) {
620 nand_writel(info, NDSR, NDSR_WRCMDREQ);
621 status &= ~NDSR_WRCMDREQ;
622 info->state = STATE_CMD_HANDLE;
623
624 /*
625 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
626 * must be loaded by writing directly either 12 or 16
627 * bytes directly to NDCB0, four bytes at a time.
628 *
629 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
630 * but each NDCBx register can be read.
631 */
632 nand_writel(info, NDCB0, info->ndcb0);
633 nand_writel(info, NDCB0, info->ndcb1);
634 nand_writel(info, NDCB0, info->ndcb2);
635
636 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
637 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
638 nand_writel(info, NDCB0, info->ndcb3);
639 }
640
641 /* clear NDSR to let the controller exit the IRQ */
642 nand_writel(info, NDSR, status);
643 if (is_completed)
644 info->cmd_complete = 1;
645 if (is_ready)
646 info->dev_ready = 1;
647
648 return ret;
649}
650
651static inline int is_buf_blank(uint8_t *buf, size_t len)
652{
653 for (; len > 0; len--)
654 if (*buf++ != 0xff)
655 return 0;
656 return 1;
657}
658
659static void set_command_address(struct pxa3xx_nand_info *info,
660 unsigned int page_size, uint16_t column, int page_addr)
661{
662 /* small page addr setting */
663 if (page_size < PAGE_CHUNK_SIZE) {
664 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
665 | (column & 0xFF);
666
667 info->ndcb2 = 0;
668 } else {
669 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
670 | (column & 0xFFFF);
671
672 if (page_addr & 0xFF0000)
673 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
674 else
675 info->ndcb2 = 0;
676 }
677}
678
679static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
680{
681 struct pxa3xx_nand_host *host = info->host[info->cs];
682 struct mtd_info *mtd = host->mtd;
683
684 /* reset data and oob column point to handle data */
685 info->buf_start = 0;
686 info->buf_count = 0;
687 info->oob_size = 0;
688 info->data_buff_pos = 0;
689 info->oob_buff_pos = 0;
690 info->use_ecc = 0;
691 info->use_spare = 1;
692 info->retcode = ERR_NONE;
693 info->ecc_err_cnt = 0;
694 info->ndcb3 = 0;
695 info->need_wait = 0;
696
697 switch (command) {
698 case NAND_CMD_READ0:
699 case NAND_CMD_PAGEPROG:
700 info->use_ecc = 1;
701 case NAND_CMD_READOOB:
702 pxa3xx_set_datasize(info, mtd);
703 break;
704 case NAND_CMD_PARAM:
705 info->use_spare = 0;
706 break;
707 default:
708 info->ndcb1 = 0;
709 info->ndcb2 = 0;
710 break;
711 }
712
713 /*
714 * If we are about to issue a read command, or about to set
715 * the write address, then clean the data buffer.
716 */
717 if (command == NAND_CMD_READ0 ||
718 command == NAND_CMD_READOOB ||
719 command == NAND_CMD_SEQIN) {
720 info->buf_count = mtd->writesize + mtd->oobsize;
721 memset(info->data_buff, 0xFF, info->buf_count);
722 }
723}
724
725static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
726 int ext_cmd_type, uint16_t column, int page_addr)
727{
728 int addr_cycle, exec_cmd;
729 struct pxa3xx_nand_host *host;
730 struct mtd_info *mtd;
731
732 host = info->host[info->cs];
733 mtd = host->mtd;
734 addr_cycle = 0;
735 exec_cmd = 1;
736
737 if (info->cs != 0)
738 info->ndcb0 = NDCB0_CSEL;
739 else
740 info->ndcb0 = 0;
741
742 if (command == NAND_CMD_SEQIN)
743 exec_cmd = 0;
744
745 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
746 + host->col_addr_cycles);
747
748 switch (command) {
749 case NAND_CMD_READOOB:
750 case NAND_CMD_READ0:
751 info->buf_start = column;
752 info->ndcb0 |= NDCB0_CMD_TYPE(0)
753 | addr_cycle
754 | NAND_CMD_READ0;
755
756 if (command == NAND_CMD_READOOB)
757 info->buf_start += mtd->writesize;
758
759 /*
760 * Multiple page read needs an 'extended command type' field,
761 * which is either naked-read or last-read according to the
762 * state.
763 */
764 if (mtd->writesize == PAGE_CHUNK_SIZE) {
765 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
766 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
767 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
768 | NDCB0_LEN_OVRD
769 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
770 info->ndcb3 = info->chunk_size +
771 info->oob_size;
772 }
773
774 set_command_address(info, mtd->writesize, column, page_addr);
775 break;
776
777 case NAND_CMD_SEQIN:
778
779 info->buf_start = column;
780 set_command_address(info, mtd->writesize, 0, page_addr);
781
782 /*
783 * Multiple page programming needs to execute the initial
784 * SEQIN command that sets the page address.
785 */
786 if (mtd->writesize > PAGE_CHUNK_SIZE) {
787 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
788 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
789 | addr_cycle
790 | command;
791 /* No data transfer in this case */
792 info->data_size = 0;
793 exec_cmd = 1;
794 }
795 break;
796
797 case NAND_CMD_PAGEPROG:
798 if (is_buf_blank(info->data_buff,
799 (mtd->writesize + mtd->oobsize))) {
800 exec_cmd = 0;
801 break;
802 }
803
804 /* Second command setting for large pages */
805 if (mtd->writesize > PAGE_CHUNK_SIZE) {
806 /*
807 * Multiple page write uses the 'extended command'
808 * field. This can be used to issue a command dispatch
809 * or a naked-write depending on the current stage.
810 */
811 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
812 | NDCB0_LEN_OVRD
813 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
814 info->ndcb3 = info->chunk_size +
815 info->oob_size;
816
817 /*
818 * This is the command dispatch that completes a chunked
819 * page program operation.
820 */
821 if (info->data_size == 0) {
822 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
823 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
824 | command;
825 info->ndcb1 = 0;
826 info->ndcb2 = 0;
827 info->ndcb3 = 0;
828 }
829 } else {
830 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
831 | NDCB0_AUTO_RS
832 | NDCB0_ST_ROW_EN
833 | NDCB0_DBC
834 | (NAND_CMD_PAGEPROG << 8)
835 | NAND_CMD_SEQIN
836 | addr_cycle;
837 }
838 break;
839
840 case NAND_CMD_PARAM:
841 info->buf_count = 256;
842 info->ndcb0 |= NDCB0_CMD_TYPE(0)
843 | NDCB0_ADDR_CYC(1)
844 | NDCB0_LEN_OVRD
845 | command;
846 info->ndcb1 = (column & 0xFF);
847 info->ndcb3 = 256;
848 info->data_size = 256;
849 break;
850
851 case NAND_CMD_READID:
852 info->buf_count = host->read_id_bytes;
853 info->ndcb0 |= NDCB0_CMD_TYPE(3)
854 | NDCB0_ADDR_CYC(1)
855 | command;
856 info->ndcb1 = (column & 0xFF);
857
858 info->data_size = 8;
859 break;
860 case NAND_CMD_STATUS:
861 info->buf_count = 1;
862 info->ndcb0 |= NDCB0_CMD_TYPE(4)
863 | NDCB0_ADDR_CYC(1)
864 | command;
865
866 info->data_size = 8;
867 break;
868
869 case NAND_CMD_ERASE1:
870 info->ndcb0 |= NDCB0_CMD_TYPE(2)
871 | NDCB0_AUTO_RS
872 | NDCB0_ADDR_CYC(3)
873 | NDCB0_DBC
874 | (NAND_CMD_ERASE2 << 8)
875 | NAND_CMD_ERASE1;
876 info->ndcb1 = page_addr;
877 info->ndcb2 = 0;
878
879 break;
880 case NAND_CMD_RESET:
881 info->ndcb0 |= NDCB0_CMD_TYPE(5)
882 | command;
883
884 break;
885
886 case NAND_CMD_ERASE2:
887 exec_cmd = 0;
888 break;
889
890 default:
891 exec_cmd = 0;
892 dev_err(&info->pdev->dev, "non-supported command %x\n",
893 command);
894 break;
895 }
896
897 return exec_cmd;
898}
899
900static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
901 int column, int page_addr)
902{
Scott Wood17fed142016-05-30 13:57:56 -0500903 struct nand_chip *chip = mtd_to_nand(mtd);
904 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200905 struct pxa3xx_nand_info *info = host->info_data;
906 int exec_cmd;
907
908 /*
909 * if this is a x16 device ,then convert the input
910 * "byte" address into a "word" address appropriate
911 * for indexing a word-oriented device
912 */
913 if (info->reg_ndcr & NDCR_DWIDTH_M)
914 column /= 2;
915
916 /*
917 * There may be different NAND chip hooked to
918 * different chip select, so check whether
919 * chip select has been changed, if yes, reset the timing
920 */
921 if (info->cs != host->cs) {
922 info->cs = host->cs;
923 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
924 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
925 }
926
927 prepare_start_command(info, command);
928
929 info->state = STATE_PREPARED;
930 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
931
932 if (exec_cmd) {
933 u32 ts;
934
935 info->cmd_complete = 0;
936 info->dev_ready = 0;
937 info->need_wait = 1;
938 pxa3xx_nand_start(info);
939
940 ts = get_timer(0);
941 while (1) {
942 u32 status;
943
944 status = nand_readl(info, NDSR);
945 if (status)
946 pxa3xx_nand_irq(info);
947
948 if (info->cmd_complete)
949 break;
950
951 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
952 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
953 return;
954 }
955 }
956 }
957 info->state = STATE_IDLE;
958}
959
960static void nand_cmdfunc_extended(struct mtd_info *mtd,
961 const unsigned command,
962 int column, int page_addr)
963{
Scott Wood17fed142016-05-30 13:57:56 -0500964 struct nand_chip *chip = mtd_to_nand(mtd);
965 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200966 struct pxa3xx_nand_info *info = host->info_data;
967 int exec_cmd, ext_cmd_type;
968
969 /*
970 * if this is a x16 device then convert the input
971 * "byte" address into a "word" address appropriate
972 * for indexing a word-oriented device
973 */
974 if (info->reg_ndcr & NDCR_DWIDTH_M)
975 column /= 2;
976
977 /*
978 * There may be different NAND chip hooked to
979 * different chip select, so check whether
980 * chip select has been changed, if yes, reset the timing
981 */
982 if (info->cs != host->cs) {
983 info->cs = host->cs;
984 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
985 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
986 }
987
988 /* Select the extended command for the first command */
989 switch (command) {
990 case NAND_CMD_READ0:
991 case NAND_CMD_READOOB:
992 ext_cmd_type = EXT_CMD_TYPE_MONO;
993 break;
994 case NAND_CMD_SEQIN:
995 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
996 break;
997 case NAND_CMD_PAGEPROG:
998 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
999 break;
1000 default:
1001 ext_cmd_type = 0;
1002 break;
1003 }
1004
1005 prepare_start_command(info, command);
1006
1007 /*
1008 * Prepare the "is ready" completion before starting a command
1009 * transaction sequence. If the command is not executed the
1010 * completion will be completed, see below.
1011 *
1012 * We can do that inside the loop because the command variable
1013 * is invariant and thus so is the exec_cmd.
1014 */
1015 info->need_wait = 1;
1016 info->dev_ready = 0;
1017
1018 do {
1019 u32 ts;
1020
1021 info->state = STATE_PREPARED;
1022 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1023 column, page_addr);
1024 if (!exec_cmd) {
1025 info->need_wait = 0;
1026 info->dev_ready = 1;
1027 break;
1028 }
1029
1030 info->cmd_complete = 0;
1031 pxa3xx_nand_start(info);
1032
1033 ts = get_timer(0);
1034 while (1) {
1035 u32 status;
1036
1037 status = nand_readl(info, NDSR);
1038 if (status)
1039 pxa3xx_nand_irq(info);
1040
1041 if (info->cmd_complete)
1042 break;
1043
1044 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1045 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1046 return;
1047 }
1048 }
1049
1050 /* Check if the sequence is complete */
1051 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1052 break;
1053
1054 /*
1055 * After a splitted program command sequence has issued
1056 * the command dispatch, the command sequence is complete.
1057 */
1058 if (info->data_size == 0 &&
1059 command == NAND_CMD_PAGEPROG &&
1060 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1061 break;
1062
1063 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1064 /* Last read: issue a 'last naked read' */
1065 if (info->data_size == info->chunk_size)
1066 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1067 else
1068 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1069
1070 /*
1071 * If a splitted program command has no more data to transfer,
1072 * the command dispatch must be issued to complete.
1073 */
1074 } else if (command == NAND_CMD_PAGEPROG &&
1075 info->data_size == 0) {
1076 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1077 }
1078 } while (1);
1079
1080 info->state = STATE_IDLE;
1081}
1082
1083static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001084 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1085 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001086{
1087 chip->write_buf(mtd, buf, mtd->writesize);
1088 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1089
1090 return 0;
1091}
1092
1093static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1094 struct nand_chip *chip, uint8_t *buf, int oob_required,
1095 int page)
1096{
Scott Wood17fed142016-05-30 13:57:56 -05001097 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001098 struct pxa3xx_nand_info *info = host->info_data;
1099
1100 chip->read_buf(mtd, buf, mtd->writesize);
1101 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1102
1103 if (info->retcode == ERR_CORERR && info->use_ecc) {
1104 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1105
1106 } else if (info->retcode == ERR_UNCORERR) {
1107 /*
1108 * for blank page (all 0xff), HW will calculate its ECC as
1109 * 0, which is different from the ECC information within
1110 * OOB, ignore such uncorrectable errors
1111 */
1112 if (is_buf_blank(buf, mtd->writesize))
1113 info->retcode = ERR_NONE;
1114 else
1115 mtd->ecc_stats.failed++;
1116 }
1117
1118 return info->max_bitflips;
1119}
1120
1121static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1122{
Scott Wood17fed142016-05-30 13:57:56 -05001123 struct nand_chip *chip = mtd_to_nand(mtd);
1124 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001125 struct pxa3xx_nand_info *info = host->info_data;
1126 char retval = 0xFF;
1127
1128 if (info->buf_start < info->buf_count)
1129 /* Has just send a new command? */
1130 retval = info->data_buff[info->buf_start++];
1131
1132 return retval;
1133}
1134
1135static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1136{
Scott Wood17fed142016-05-30 13:57:56 -05001137 struct nand_chip *chip = mtd_to_nand(mtd);
1138 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001139 struct pxa3xx_nand_info *info = host->info_data;
1140 u16 retval = 0xFFFF;
1141
1142 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1143 retval = *((u16 *)(info->data_buff+info->buf_start));
1144 info->buf_start += 2;
1145 }
1146 return retval;
1147}
1148
1149static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1150{
Scott Wood17fed142016-05-30 13:57:56 -05001151 struct nand_chip *chip = mtd_to_nand(mtd);
1152 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001153 struct pxa3xx_nand_info *info = host->info_data;
1154 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1155
1156 memcpy(buf, info->data_buff + info->buf_start, real_len);
1157 info->buf_start += real_len;
1158}
1159
1160static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1161 const uint8_t *buf, int len)
1162{
Scott Wood17fed142016-05-30 13:57:56 -05001163 struct nand_chip *chip = mtd_to_nand(mtd);
1164 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001165 struct pxa3xx_nand_info *info = host->info_data;
1166 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1167
1168 memcpy(info->data_buff + info->buf_start, buf, real_len);
1169 info->buf_start += real_len;
1170}
1171
1172static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1173{
1174 return;
1175}
1176
1177static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1178{
Scott Wood17fed142016-05-30 13:57:56 -05001179 struct nand_chip *chip = mtd_to_nand(mtd);
1180 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001181 struct pxa3xx_nand_info *info = host->info_data;
1182
1183 if (info->need_wait) {
1184 u32 ts;
1185
1186 info->need_wait = 0;
1187
1188 ts = get_timer(0);
1189 while (1) {
1190 u32 status;
1191
1192 status = nand_readl(info, NDSR);
1193 if (status)
1194 pxa3xx_nand_irq(info);
1195
1196 if (info->dev_ready)
1197 break;
1198
1199 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1200 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1201 return NAND_STATUS_FAIL;
1202 }
1203 }
1204 }
1205
1206 /* pxa3xx_nand_send_command has waited for command complete */
1207 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1208 if (info->retcode == ERR_NONE)
1209 return 0;
1210 else
1211 return NAND_STATUS_FAIL;
1212 }
1213
1214 return NAND_STATUS_READY;
1215}
1216
1217static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1218{
1219 struct pxa3xx_nand_host *host = info->host[info->cs];
1220 struct mtd_info *mtd = host->mtd;
Scott Wood17fed142016-05-30 13:57:56 -05001221 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001222
1223 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1224 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1225 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1226
1227 return 0;
1228}
1229
1230static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1231{
1232 /*
1233 * We set 0 by hard coding here, for we don't support keep_config
1234 * when there is more than one chip attached to the controller
1235 */
1236 struct pxa3xx_nand_host *host = info->host[0];
1237 uint32_t ndcr = nand_readl(info, NDCR);
1238
1239 if (ndcr & NDCR_PAGE_SZ) {
1240 /* Controller's FIFO size */
1241 info->chunk_size = 2048;
1242 host->read_id_bytes = 4;
1243 } else {
1244 info->chunk_size = 512;
1245 host->read_id_bytes = 2;
1246 }
1247
1248 /* Set an initial chunk size */
1249 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1250 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1251 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1252 return 0;
1253}
1254
1255static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1256{
1257 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1258 if (info->data_buff == NULL)
1259 return -ENOMEM;
1260 return 0;
1261}
1262
1263static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1264{
1265 struct pxa3xx_nand_info *info = host->info_data;
1266 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1267 struct mtd_info *mtd;
1268 struct nand_chip *chip;
1269 const struct nand_sdr_timings *timings;
1270 int ret;
1271
1272 mtd = info->host[info->cs]->mtd;
Scott Wood17fed142016-05-30 13:57:56 -05001273 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001274
1275 /* configure default flash values */
1276 info->reg_ndcr = 0x0; /* enable all interrupts */
1277 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1278 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1279 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1280
1281 /* use the common timing to make a try */
1282 timings = onfi_async_timing_mode_to_sdr_timings(0);
1283 if (IS_ERR(timings))
1284 return PTR_ERR(timings);
1285
1286 pxa3xx_nand_set_sdr_timing(host, timings);
1287
1288 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1289 ret = chip->waitfunc(mtd, chip);
1290 if (ret & NAND_STATUS_FAIL)
1291 return -ENODEV;
1292
1293 return 0;
1294}
1295
1296static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1297 struct nand_ecc_ctrl *ecc,
1298 int strength, int ecc_stepsize, int page_size)
1299{
1300 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1301 info->chunk_size = 2048;
1302 info->spare_size = 40;
1303 info->ecc_size = 24;
1304 ecc->mode = NAND_ECC_HW;
1305 ecc->size = 512;
1306 ecc->strength = 1;
1307
1308 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1309 info->chunk_size = 512;
1310 info->spare_size = 8;
1311 info->ecc_size = 8;
1312 ecc->mode = NAND_ECC_HW;
1313 ecc->size = 512;
1314 ecc->strength = 1;
1315
1316 /*
1317 * Required ECC: 4-bit correction per 512 bytes
1318 * Select: 16-bit correction per 2048 bytes
1319 */
1320 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1321 info->ecc_bch = 1;
1322 info->chunk_size = 2048;
1323 info->spare_size = 32;
1324 info->ecc_size = 32;
1325 ecc->mode = NAND_ECC_HW;
1326 ecc->size = info->chunk_size;
1327 ecc->layout = &ecc_layout_2KB_bch4bit;
1328 ecc->strength = 16;
1329
1330 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1331 info->ecc_bch = 1;
1332 info->chunk_size = 2048;
1333 info->spare_size = 32;
1334 info->ecc_size = 32;
1335 ecc->mode = NAND_ECC_HW;
1336 ecc->size = info->chunk_size;
1337 ecc->layout = &ecc_layout_4KB_bch4bit;
1338 ecc->strength = 16;
1339
1340 /*
1341 * Required ECC: 8-bit correction per 512 bytes
1342 * Select: 16-bit correction per 1024 bytes
1343 */
1344 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1345 info->ecc_bch = 1;
1346 info->chunk_size = 1024;
1347 info->spare_size = 0;
1348 info->ecc_size = 32;
1349 ecc->mode = NAND_ECC_HW;
1350 ecc->size = info->chunk_size;
1351 ecc->layout = &ecc_layout_4KB_bch8bit;
1352 ecc->strength = 16;
1353 } else {
1354 dev_err(&info->pdev->dev,
1355 "ECC strength %d at page size %d is not supported\n",
1356 strength, page_size);
1357 return -ENODEV;
1358 }
1359
1360 return 0;
1361}
1362
1363static int pxa3xx_nand_scan(struct mtd_info *mtd)
1364{
Scott Wood17fed142016-05-30 13:57:56 -05001365 struct nand_chip *chip = mtd_to_nand(mtd);
1366 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001367 struct pxa3xx_nand_info *info = host->info_data;
1368 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001369 int ret;
1370 uint16_t ecc_strength, ecc_step;
1371
1372 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1373 goto KEEP_CONFIG;
1374
1375 /* Set a default chunk size */
1376 info->chunk_size = 512;
1377
1378 ret = pxa3xx_nand_sensing(host);
1379 if (ret) {
1380 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1381 info->cs);
1382
1383 return ret;
1384 }
1385
1386KEEP_CONFIG:
1387 /* Device detection must be done with ECC disabled */
1388 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1389 nand_writel(info, NDECCCTRL, 0x0);
1390
1391 if (nand_scan_ident(mtd, 1, NULL))
1392 return -ENODEV;
1393
1394 if (!pdata->keep_config) {
1395 ret = pxa3xx_nand_init_timings(host);
1396 if (ret) {
1397 dev_err(&info->pdev->dev,
1398 "Failed to set timings: %d\n", ret);
1399 return ret;
1400 }
1401 }
1402
1403 ret = pxa3xx_nand_config_flash(info);
1404 if (ret)
1405 return ret;
1406
1407#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1408 /*
1409 * We'll use a bad block table stored in-flash and don't
1410 * allow writing the bad block marker to the flash.
1411 */
1412 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1413 chip->bbt_td = &bbt_main_descr;
1414 chip->bbt_md = &bbt_mirror_descr;
1415#endif
1416
1417 /*
1418 * If the page size is bigger than the FIFO size, let's check
1419 * we are given the right variant and then switch to the extended
1420 * (aka splitted) command handling,
1421 */
1422 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1423 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1424 chip->cmdfunc = nand_cmdfunc_extended;
1425 } else {
1426 dev_err(&info->pdev->dev,
1427 "unsupported page size on this variant\n");
1428 return -ENODEV;
1429 }
1430 }
1431
1432 if (pdata->ecc_strength && pdata->ecc_step_size) {
1433 ecc_strength = pdata->ecc_strength;
1434 ecc_step = pdata->ecc_step_size;
1435 } else {
1436 ecc_strength = chip->ecc_strength_ds;
1437 ecc_step = chip->ecc_step_ds;
1438 }
1439
1440 /* Set default ECC strength requirements on non-ONFI devices */
1441 if (ecc_strength < 1 && ecc_step < 1) {
1442 ecc_strength = 1;
1443 ecc_step = 512;
1444 }
1445
1446 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1447 ecc_step, mtd->writesize);
1448 if (ret)
1449 return ret;
1450
1451 /* calculate addressing information */
1452 if (mtd->writesize >= 2048)
1453 host->col_addr_cycles = 2;
1454 else
1455 host->col_addr_cycles = 1;
1456
1457 /* release the initial buffer */
1458 kfree(info->data_buff);
1459
1460 /* allocate the real data + oob buffer */
1461 info->buf_size = mtd->writesize + mtd->oobsize;
1462 ret = pxa3xx_nand_init_buff(info);
1463 if (ret)
1464 return ret;
1465 info->oob_buff = info->data_buff + mtd->writesize;
1466
1467 if ((mtd->size >> chip->page_shift) > 65536)
1468 host->row_addr_cycles = 3;
1469 else
1470 host->row_addr_cycles = 2;
1471 return nand_scan_tail(mtd);
1472}
1473
1474static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1475{
1476 struct pxa3xx_nand_platform_data *pdata;
1477 struct pxa3xx_nand_host *host;
1478 struct nand_chip *chip = NULL;
1479 struct mtd_info *mtd;
1480 int ret, cs;
1481
1482 pdata = info->pdata;
1483 if (pdata->num_cs <= 0)
1484 return -ENODEV;
1485
1486 info->variant = pxa3xx_nand_get_variant();
1487 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001488 chip = (struct nand_chip *)
1489 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001490 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001491 host = (struct pxa3xx_nand_host *)chip;
1492 info->host[cs] = host;
1493 host->mtd = mtd;
1494 host->cs = cs;
1495 host->info_data = info;
1496 host->read_id_bytes = 4;
Stefan Roese75659da2015-07-23 10:26:16 +02001497 mtd->owner = THIS_MODULE;
1498
Chris Packham3c2170a2016-08-29 15:20:52 +12001499 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001500 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1501 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1502 chip->controller = &info->controller;
1503 chip->waitfunc = pxa3xx_nand_waitfunc;
1504 chip->select_chip = pxa3xx_nand_select_chip;
1505 chip->read_word = pxa3xx_nand_read_word;
1506 chip->read_byte = pxa3xx_nand_read_byte;
1507 chip->read_buf = pxa3xx_nand_read_buf;
1508 chip->write_buf = pxa3xx_nand_write_buf;
1509 chip->options |= NAND_NO_SUBPAGE_WRITE;
1510 chip->cmdfunc = nand_cmdfunc;
1511 }
1512
1513 info->mmio_base = (void __iomem *)MVEBU_NAND_BASE;
1514
1515 /* Allocate a buffer to allow flash detection */
1516 info->buf_size = INIT_BUFFER_SIZE;
1517 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1518 if (info->data_buff == NULL) {
1519 ret = -ENOMEM;
1520 goto fail_disable_clk;
1521 }
1522
1523 /* initialize all interrupts to be disabled */
1524 disable_int(info, NDSR_MASK);
1525
1526 return 0;
1527
1528 kfree(info->data_buff);
1529fail_disable_clk:
1530 return ret;
1531}
1532
1533static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1534{
1535 struct pxa3xx_nand_platform_data *pdata;
1536
1537 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1538 if (!pdata)
1539 return -ENOMEM;
1540
1541 pdata->enable_arbiter = 1;
1542 pdata->num_cs = 1;
1543
1544 info->pdata = pdata;
1545
1546 return 0;
1547}
1548
1549static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1550{
1551 struct pxa3xx_nand_platform_data *pdata;
1552 int ret, cs, probe_success;
1553
1554 ret = pxa3xx_nand_probe_dt(info);
1555 if (ret)
1556 return ret;
1557
1558 pdata = info->pdata;
1559
1560 ret = alloc_nand_resource(info);
1561 if (ret) {
1562 dev_err(&pdev->dev, "alloc nand resource failed\n");
1563 return ret;
1564 }
1565
1566 probe_success = 0;
1567 for (cs = 0; cs < pdata->num_cs; cs++) {
1568 struct mtd_info *mtd = info->host[cs]->mtd;
1569
1570 /*
1571 * The mtd name matches the one used in 'mtdparts' kernel
1572 * parameter. This name cannot be changed or otherwise
1573 * user's mtd partitions configuration would get broken.
1574 */
1575 mtd->name = "pxa3xx_nand-0";
1576 info->cs = cs;
1577 ret = pxa3xx_nand_scan(mtd);
1578 if (ret) {
1579 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1580 cs);
1581 continue;
1582 }
1583
Scott Wood2c1b7e12016-05-30 13:57:55 -05001584 if (nand_register(cs, mtd))
1585 continue;
1586
1587 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001588 }
1589
1590 if (!probe_success)
1591 return -ENODEV;
1592
1593 return 0;
1594}
1595
1596/*
1597 * Main initialization routine
1598 */
1599void board_nand_init(void)
1600{
1601 struct pxa3xx_nand_info *info;
1602 struct pxa3xx_nand_host *host;
1603 int ret;
1604
Kevin Smithf6ca2a62016-01-14 16:01:39 +00001605 info = kzalloc(sizeof(*info) +
1606 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1607 GFP_KERNEL);
Stefan Roese75659da2015-07-23 10:26:16 +02001608 if (!info)
1609 return;
1610
Stefan Roese75659da2015-07-23 10:26:16 +02001611 ret = pxa3xx_nand_probe(info);
1612 if (ret)
1613 return;
Stefan Roese75659da2015-07-23 10:26:16 +02001614}