blob: 6ab3c8a25add16857e225440f42ee381395046e4 [file] [log] [blame]
Stefan Roese75659da2015-07-23 10:26:16 +02001/*
2 * drivers/mtd/nand/pxa3xx_nand.c
3 *
4 * Copyright © 2005 Intel Corporation
5 * Copyright © 2006 Marvell International Ltd.
6 *
7 * SPDX-License-Identifier: GPL-2.0
8 */
9
10#include <common.h>
11#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030012#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020013#include <nand.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090014#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020015#include <asm/io.h>
16#include <asm/arch/cpu.h>
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h>
19#include <linux/types.h>
20
21#include "pxa3xx_nand.h"
22
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030023DECLARE_GLOBAL_DATA_PTR;
24
Stefan Roese75659da2015-07-23 10:26:16 +020025#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
26#define CHIP_DELAY_TIMEOUT 200
27#define NAND_STOP_DELAY 40
28#define PAGE_CHUNK_SIZE (2048)
29
30/*
31 * Define a buffer size for the initial command that detects the flash device:
32 * STATUS, READID and PARAM. The largest of these is the PARAM command,
33 * needing 256 bytes.
34 */
35#define INIT_BUFFER_SIZE 256
36
37/* registers and bit definitions */
38#define NDCR (0x00) /* Control register */
39#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
40#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
41#define NDSR (0x14) /* Status Register */
42#define NDPCR (0x18) /* Page Count Register */
43#define NDBDR0 (0x1C) /* Bad Block Register 0 */
44#define NDBDR1 (0x20) /* Bad Block Register 1 */
45#define NDECCCTRL (0x28) /* ECC control */
46#define NDDB (0x40) /* Data Buffer */
47#define NDCB0 (0x48) /* Command Buffer0 */
48#define NDCB1 (0x4C) /* Command Buffer1 */
49#define NDCB2 (0x50) /* Command Buffer2 */
50
51#define NDCR_SPARE_EN (0x1 << 31)
52#define NDCR_ECC_EN (0x1 << 30)
53#define NDCR_DMA_EN (0x1 << 29)
54#define NDCR_ND_RUN (0x1 << 28)
55#define NDCR_DWIDTH_C (0x1 << 27)
56#define NDCR_DWIDTH_M (0x1 << 26)
57#define NDCR_PAGE_SZ (0x1 << 24)
58#define NDCR_NCSX (0x1 << 23)
59#define NDCR_ND_MODE (0x3 << 21)
60#define NDCR_NAND_MODE (0x0)
61#define NDCR_CLR_PG_CNT (0x1 << 20)
62#define NDCR_STOP_ON_UNCOR (0x1 << 19)
63#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
64#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
65
66#define NDCR_RA_START (0x1 << 15)
67#define NDCR_PG_PER_BLK (0x1 << 14)
68#define NDCR_ND_ARB_EN (0x1 << 12)
69#define NDCR_INT_MASK (0xFFF)
70
71#define NDSR_MASK (0xfff)
72#define NDSR_ERR_CNT_OFF (16)
73#define NDSR_ERR_CNT_MASK (0x1f)
74#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
75#define NDSR_RDY (0x1 << 12)
76#define NDSR_FLASH_RDY (0x1 << 11)
77#define NDSR_CS0_PAGED (0x1 << 10)
78#define NDSR_CS1_PAGED (0x1 << 9)
79#define NDSR_CS0_CMDD (0x1 << 8)
80#define NDSR_CS1_CMDD (0x1 << 7)
81#define NDSR_CS0_BBD (0x1 << 6)
82#define NDSR_CS1_BBD (0x1 << 5)
83#define NDSR_UNCORERR (0x1 << 4)
84#define NDSR_CORERR (0x1 << 3)
85#define NDSR_WRDREQ (0x1 << 2)
86#define NDSR_RDDREQ (0x1 << 1)
87#define NDSR_WRCMDREQ (0x1)
88
89#define NDCB0_LEN_OVRD (0x1 << 28)
90#define NDCB0_ST_ROW_EN (0x1 << 26)
91#define NDCB0_AUTO_RS (0x1 << 25)
92#define NDCB0_CSEL (0x1 << 24)
93#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
94#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
95#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
96#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
97#define NDCB0_NC (0x1 << 20)
98#define NDCB0_DBC (0x1 << 19)
99#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
100#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
101#define NDCB0_CMD2_MASK (0xff << 8)
102#define NDCB0_CMD1_MASK (0xff)
103#define NDCB0_ADDR_CYC_SHIFT (16)
104
105#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
106#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
107#define EXT_CMD_TYPE_READ 4 /* Read */
108#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
109#define EXT_CMD_TYPE_FINAL 3 /* Final command */
110#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
111#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
112
113/* macros for registers read/write */
114#define nand_writel(info, off, val) \
115 writel((val), (info)->mmio_base + (off))
116
117#define nand_readl(info, off) \
118 readl((info)->mmio_base + (off))
119
120/* error code and state */
121enum {
122 ERR_NONE = 0,
123 ERR_DMABUSERR = -1,
124 ERR_SENDCMD = -2,
125 ERR_UNCORERR = -3,
126 ERR_BBERR = -4,
127 ERR_CORERR = -5,
128};
129
130enum {
131 STATE_IDLE = 0,
132 STATE_PREPARED,
133 STATE_CMD_HANDLE,
134 STATE_DMA_READING,
135 STATE_DMA_WRITING,
136 STATE_DMA_DONE,
137 STATE_PIO_READING,
138 STATE_PIO_WRITING,
139 STATE_CMD_DONE,
140 STATE_READY,
141};
142
143enum pxa3xx_nand_variant {
144 PXA3XX_NAND_VARIANT_PXA,
145 PXA3XX_NAND_VARIANT_ARMADA370,
146};
147
148struct pxa3xx_nand_host {
149 struct nand_chip chip;
150 struct mtd_info *mtd;
151 void *info_data;
152
153 /* page size of attached chip */
154 int use_ecc;
155 int cs;
156
157 /* calculated from pxa3xx_nand_flash data */
158 unsigned int col_addr_cycles;
159 unsigned int row_addr_cycles;
160 size_t read_id_bytes;
161
162};
163
164struct pxa3xx_nand_info {
165 struct nand_hw_control controller;
166 struct pxa3xx_nand_platform_data *pdata;
167
168 struct clk *clk;
169 void __iomem *mmio_base;
170 unsigned long mmio_phys;
171 int cmd_complete, dev_ready;
172
173 unsigned int buf_start;
174 unsigned int buf_count;
175 unsigned int buf_size;
176 unsigned int data_buff_pos;
177 unsigned int oob_buff_pos;
178
179 unsigned char *data_buff;
180 unsigned char *oob_buff;
181
182 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
183 unsigned int state;
184
185 /*
186 * This driver supports NFCv1 (as found in PXA SoC)
187 * and NFCv2 (as found in Armada 370/XP SoC).
188 */
189 enum pxa3xx_nand_variant variant;
190
191 int cs;
192 int use_ecc; /* use HW ECC ? */
193 int ecc_bch; /* using BCH ECC? */
194 int use_spare; /* use spare ? */
195 int need_wait;
196
197 unsigned int data_size; /* data to be read from FIFO */
198 unsigned int chunk_size; /* split commands chunk size */
199 unsigned int oob_size;
200 unsigned int spare_size;
201 unsigned int ecc_size;
202 unsigned int ecc_err_cnt;
203 unsigned int max_bitflips;
204 int retcode;
205
206 /* cached register value */
207 uint32_t reg_ndcr;
208 uint32_t ndtr0cs0;
209 uint32_t ndtr1cs0;
210
211 /* generated NDCBx register values */
212 uint32_t ndcb0;
213 uint32_t ndcb1;
214 uint32_t ndcb2;
215 uint32_t ndcb3;
216};
217
218static struct pxa3xx_nand_timing timing[] = {
219 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
220 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
221 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
222 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
223};
224
225static struct pxa3xx_nand_flash builtin_flash_types[] = {
226 { 0x46ec, 16, 16, &timing[1] },
227 { 0xdaec, 8, 8, &timing[1] },
228 { 0xd7ec, 8, 8, &timing[1] },
229 { 0xa12c, 8, 8, &timing[2] },
230 { 0xb12c, 16, 16, &timing[2] },
231 { 0xdc2c, 8, 8, &timing[2] },
232 { 0xcc2c, 16, 16, &timing[2] },
233 { 0xba20, 16, 16, &timing[3] },
234};
235
236static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
237static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
238
239static struct nand_bbt_descr bbt_main_descr = {
240 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
241 | NAND_BBT_2BIT | NAND_BBT_VERSION,
242 .offs = 8,
243 .len = 6,
244 .veroffs = 14,
245 .maxblocks = 8, /* Last 8 blocks in each chip */
246 .pattern = bbt_pattern
247};
248
249static struct nand_bbt_descr bbt_mirror_descr = {
250 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
251 | NAND_BBT_2BIT | NAND_BBT_VERSION,
252 .offs = 8,
253 .len = 6,
254 .veroffs = 14,
255 .maxblocks = 8, /* Last 8 blocks in each chip */
256 .pattern = bbt_mirror_pattern
257};
258
259static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
260 .eccbytes = 32,
261 .eccpos = {
262 32, 33, 34, 35, 36, 37, 38, 39,
263 40, 41, 42, 43, 44, 45, 46, 47,
264 48, 49, 50, 51, 52, 53, 54, 55,
265 56, 57, 58, 59, 60, 61, 62, 63},
266 .oobfree = { {2, 30} }
267};
268
269static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
270 .eccbytes = 64,
271 .eccpos = {
272 32, 33, 34, 35, 36, 37, 38, 39,
273 40, 41, 42, 43, 44, 45, 46, 47,
274 48, 49, 50, 51, 52, 53, 54, 55,
275 56, 57, 58, 59, 60, 61, 62, 63,
276 96, 97, 98, 99, 100, 101, 102, 103,
277 104, 105, 106, 107, 108, 109, 110, 111,
278 112, 113, 114, 115, 116, 117, 118, 119,
279 120, 121, 122, 123, 124, 125, 126, 127},
280 /* Bootrom looks in bytes 0 & 5 for bad blocks */
281 .oobfree = { {6, 26}, { 64, 32} }
282};
283
284static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
285 .eccbytes = 128,
286 .eccpos = {
287 32, 33, 34, 35, 36, 37, 38, 39,
288 40, 41, 42, 43, 44, 45, 46, 47,
289 48, 49, 50, 51, 52, 53, 54, 55,
290 56, 57, 58, 59, 60, 61, 62, 63},
291 .oobfree = { }
292};
293
294#define NDTR0_tCH(c) (min((c), 7) << 19)
295#define NDTR0_tCS(c) (min((c), 7) << 16)
296#define NDTR0_tWH(c) (min((c), 7) << 11)
297#define NDTR0_tWP(c) (min((c), 7) << 8)
298#define NDTR0_tRH(c) (min((c), 7) << 3)
299#define NDTR0_tRP(c) (min((c), 7) << 0)
300
301#define NDTR1_tR(c) (min((c), 65535) << 16)
302#define NDTR1_tWHR(c) (min((c), 15) << 4)
303#define NDTR1_tAR(c) (min((c), 15) << 0)
304
305/* convert nano-seconds to nand flash controller clock cycles */
306#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
307
308static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
309{
310 /* We only support the Armada 370/XP/38x for now */
311 return PXA3XX_NAND_VARIANT_ARMADA370;
312}
313
314static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
315 const struct pxa3xx_nand_timing *t)
316{
317 struct pxa3xx_nand_info *info = host->info_data;
318 unsigned long nand_clk = mvebu_get_nand_clock();
319 uint32_t ndtr0, ndtr1;
320
321 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
322 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
323 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
324 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
325 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
326 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
327
328 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
329 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
330 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
331
332 info->ndtr0cs0 = ndtr0;
333 info->ndtr1cs0 = ndtr1;
334 nand_writel(info, NDTR0CS0, ndtr0);
335 nand_writel(info, NDTR1CS0, ndtr1);
336}
337
338static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
339 const struct nand_sdr_timings *t)
340{
341 struct pxa3xx_nand_info *info = host->info_data;
342 struct nand_chip *chip = &host->chip;
343 unsigned long nand_clk = mvebu_get_nand_clock();
344 uint32_t ndtr0, ndtr1;
345
346 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
347 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
348 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
349 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - tWH_min, 1000);
350 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
351 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - tREH_min, 1000);
352 u32 tR = chip->chip_delay * 1000;
353 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
354 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
355
356 /* fallback to a default value if tR = 0 */
357 if (!tR)
358 tR = 20000;
359
360 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
361 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
362 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
363 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
364 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
365 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
366
367 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
368 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
369 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
370
371 info->ndtr0cs0 = ndtr0;
372 info->ndtr1cs0 = ndtr1;
373 nand_writel(info, NDTR0CS0, ndtr0);
374 nand_writel(info, NDTR1CS0, ndtr1);
375}
376
377static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
378{
379 const struct nand_sdr_timings *timings;
380 struct nand_chip *chip = &host->chip;
381 struct pxa3xx_nand_info *info = host->info_data;
382 const struct pxa3xx_nand_flash *f = NULL;
383 int mode, id, ntypes, i;
384
385 mode = onfi_get_async_timing_mode(chip);
386 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
387 ntypes = ARRAY_SIZE(builtin_flash_types);
388
389 chip->cmdfunc(host->mtd, NAND_CMD_READID, 0x00, -1);
390
391 id = chip->read_byte(host->mtd);
392 id |= chip->read_byte(host->mtd) << 0x8;
393
394 for (i = 0; i < ntypes; i++) {
395 f = &builtin_flash_types[i];
396
397 if (f->chip_id == id)
398 break;
399 }
400
401 if (i == ntypes) {
402 dev_err(&info->pdev->dev, "Error: timings not found\n");
403 return -EINVAL;
404 }
405
406 pxa3xx_nand_set_timing(host, f->timing);
407
408 if (f->flash_width == 16) {
409 info->reg_ndcr |= NDCR_DWIDTH_M;
410 chip->options |= NAND_BUSWIDTH_16;
411 }
412
413 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
414 } else {
415 mode = fls(mode) - 1;
416 if (mode < 0)
417 mode = 0;
418
419 timings = onfi_async_timing_mode_to_sdr_timings(mode);
420 if (IS_ERR(timings))
421 return PTR_ERR(timings);
422
423 pxa3xx_nand_set_sdr_timing(host, timings);
424 }
425
426 return 0;
427}
428
429/*
430 * Set the data and OOB size, depending on the selected
431 * spare and ECC configuration.
432 * Only applicable to READ0, READOOB and PAGEPROG commands.
433 */
434static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info,
435 struct mtd_info *mtd)
436{
437 int oob_enable = info->reg_ndcr & NDCR_SPARE_EN;
438
439 info->data_size = mtd->writesize;
440 if (!oob_enable)
441 return;
442
443 info->oob_size = info->spare_size;
444 if (!info->use_ecc)
445 info->oob_size += info->ecc_size;
446}
447
448/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800449 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200450 * command buffer, otherwise, it does not work.
451 * We enable all the interrupt at the same time, and
452 * let pxa3xx_nand_irq to handle all logic.
453 */
454static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
455{
456 uint32_t ndcr;
457
458 ndcr = info->reg_ndcr;
459
460 if (info->use_ecc) {
461 ndcr |= NDCR_ECC_EN;
462 if (info->ecc_bch)
463 nand_writel(info, NDECCCTRL, 0x1);
464 } else {
465 ndcr &= ~NDCR_ECC_EN;
466 if (info->ecc_bch)
467 nand_writel(info, NDECCCTRL, 0x0);
468 }
469
470 ndcr &= ~NDCR_DMA_EN;
471
472 if (info->use_spare)
473 ndcr |= NDCR_SPARE_EN;
474 else
475 ndcr &= ~NDCR_SPARE_EN;
476
477 ndcr |= NDCR_ND_RUN;
478
479 /* clear status bits and run */
480 nand_writel(info, NDCR, 0);
481 nand_writel(info, NDSR, NDSR_MASK);
482 nand_writel(info, NDCR, ndcr);
483}
484
485static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
486{
487 uint32_t ndcr;
488
489 ndcr = nand_readl(info, NDCR);
490 nand_writel(info, NDCR, ndcr | int_mask);
491}
492
493static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
494{
495 if (info->ecc_bch) {
496 u32 ts;
497
498 /*
499 * According to the datasheet, when reading from NDDB
500 * with BCH enabled, after each 32 bytes reads, we
501 * have to make sure that the NDSR.RDDREQ bit is set.
502 *
503 * Drain the FIFO 8 32 bits reads at a time, and skip
504 * the polling on the last read.
505 */
506 while (len > 8) {
507 readsl(info->mmio_base + NDDB, data, 8);
508
509 ts = get_timer(0);
510 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
511 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
512 dev_err(&info->pdev->dev,
513 "Timeout on RDDREQ while draining the FIFO\n");
514 return;
515 }
516 }
517
518 data += 32;
519 len -= 8;
520 }
521 }
522
523 readsl(info->mmio_base + NDDB, data, len);
524}
525
526static void handle_data_pio(struct pxa3xx_nand_info *info)
527{
528 unsigned int do_bytes = min(info->data_size, info->chunk_size);
529
530 switch (info->state) {
531 case STATE_PIO_WRITING:
532 writesl(info->mmio_base + NDDB,
533 info->data_buff + info->data_buff_pos,
534 DIV_ROUND_UP(do_bytes, 4));
535
536 if (info->oob_size > 0)
537 writesl(info->mmio_base + NDDB,
538 info->oob_buff + info->oob_buff_pos,
539 DIV_ROUND_UP(info->oob_size, 4));
540 break;
541 case STATE_PIO_READING:
542 drain_fifo(info,
543 info->data_buff + info->data_buff_pos,
544 DIV_ROUND_UP(do_bytes, 4));
545
546 if (info->oob_size > 0)
547 drain_fifo(info,
548 info->oob_buff + info->oob_buff_pos,
549 DIV_ROUND_UP(info->oob_size, 4));
550 break;
551 default:
552 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
553 info->state);
554 BUG();
555 }
556
557 /* Update buffer pointers for multi-page read/write */
558 info->data_buff_pos += do_bytes;
559 info->oob_buff_pos += info->oob_size;
560 info->data_size -= do_bytes;
561}
562
563static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
564{
565 handle_data_pio(info);
566
567 info->state = STATE_CMD_DONE;
568 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
569}
570
571static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
572{
573 unsigned int status, is_completed = 0, is_ready = 0;
574 unsigned int ready, cmd_done;
575 irqreturn_t ret = IRQ_HANDLED;
576
577 if (info->cs == 0) {
578 ready = NDSR_FLASH_RDY;
579 cmd_done = NDSR_CS0_CMDD;
580 } else {
581 ready = NDSR_RDY;
582 cmd_done = NDSR_CS1_CMDD;
583 }
584
585 status = nand_readl(info, NDSR);
586
587 if (status & NDSR_UNCORERR)
588 info->retcode = ERR_UNCORERR;
589 if (status & NDSR_CORERR) {
590 info->retcode = ERR_CORERR;
591 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
592 info->ecc_bch)
593 info->ecc_err_cnt = NDSR_ERR_CNT(status);
594 else
595 info->ecc_err_cnt = 1;
596
597 /*
598 * Each chunk composing a page is corrected independently,
599 * and we need to store maximum number of corrected bitflips
600 * to return it to the MTD layer in ecc.read_page().
601 */
602 info->max_bitflips = max_t(unsigned int,
603 info->max_bitflips,
604 info->ecc_err_cnt);
605 }
606 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
607 info->state = (status & NDSR_RDDREQ) ?
608 STATE_PIO_READING : STATE_PIO_WRITING;
609 /* Call the IRQ thread in U-Boot directly */
610 pxa3xx_nand_irq_thread(info);
611 return 0;
612 }
613 if (status & cmd_done) {
614 info->state = STATE_CMD_DONE;
615 is_completed = 1;
616 }
617 if (status & ready) {
618 info->state = STATE_READY;
619 is_ready = 1;
620 }
621
622 if (status & NDSR_WRCMDREQ) {
623 nand_writel(info, NDSR, NDSR_WRCMDREQ);
624 status &= ~NDSR_WRCMDREQ;
625 info->state = STATE_CMD_HANDLE;
626
627 /*
628 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
629 * must be loaded by writing directly either 12 or 16
630 * bytes directly to NDCB0, four bytes at a time.
631 *
632 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
633 * but each NDCBx register can be read.
634 */
635 nand_writel(info, NDCB0, info->ndcb0);
636 nand_writel(info, NDCB0, info->ndcb1);
637 nand_writel(info, NDCB0, info->ndcb2);
638
639 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
640 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
641 nand_writel(info, NDCB0, info->ndcb3);
642 }
643
644 /* clear NDSR to let the controller exit the IRQ */
645 nand_writel(info, NDSR, status);
646 if (is_completed)
647 info->cmd_complete = 1;
648 if (is_ready)
649 info->dev_ready = 1;
650
651 return ret;
652}
653
654static inline int is_buf_blank(uint8_t *buf, size_t len)
655{
656 for (; len > 0; len--)
657 if (*buf++ != 0xff)
658 return 0;
659 return 1;
660}
661
662static void set_command_address(struct pxa3xx_nand_info *info,
663 unsigned int page_size, uint16_t column, int page_addr)
664{
665 /* small page addr setting */
666 if (page_size < PAGE_CHUNK_SIZE) {
667 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
668 | (column & 0xFF);
669
670 info->ndcb2 = 0;
671 } else {
672 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
673 | (column & 0xFFFF);
674
675 if (page_addr & 0xFF0000)
676 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
677 else
678 info->ndcb2 = 0;
679 }
680}
681
682static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
683{
684 struct pxa3xx_nand_host *host = info->host[info->cs];
685 struct mtd_info *mtd = host->mtd;
686
687 /* reset data and oob column point to handle data */
688 info->buf_start = 0;
689 info->buf_count = 0;
690 info->oob_size = 0;
691 info->data_buff_pos = 0;
692 info->oob_buff_pos = 0;
693 info->use_ecc = 0;
694 info->use_spare = 1;
695 info->retcode = ERR_NONE;
696 info->ecc_err_cnt = 0;
697 info->ndcb3 = 0;
698 info->need_wait = 0;
699
700 switch (command) {
701 case NAND_CMD_READ0:
702 case NAND_CMD_PAGEPROG:
703 info->use_ecc = 1;
704 case NAND_CMD_READOOB:
705 pxa3xx_set_datasize(info, mtd);
706 break;
707 case NAND_CMD_PARAM:
708 info->use_spare = 0;
709 break;
710 default:
711 info->ndcb1 = 0;
712 info->ndcb2 = 0;
713 break;
714 }
715
716 /*
717 * If we are about to issue a read command, or about to set
718 * the write address, then clean the data buffer.
719 */
720 if (command == NAND_CMD_READ0 ||
721 command == NAND_CMD_READOOB ||
722 command == NAND_CMD_SEQIN) {
723 info->buf_count = mtd->writesize + mtd->oobsize;
724 memset(info->data_buff, 0xFF, info->buf_count);
725 }
726}
727
728static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
729 int ext_cmd_type, uint16_t column, int page_addr)
730{
731 int addr_cycle, exec_cmd;
732 struct pxa3xx_nand_host *host;
733 struct mtd_info *mtd;
734
735 host = info->host[info->cs];
736 mtd = host->mtd;
737 addr_cycle = 0;
738 exec_cmd = 1;
739
740 if (info->cs != 0)
741 info->ndcb0 = NDCB0_CSEL;
742 else
743 info->ndcb0 = 0;
744
745 if (command == NAND_CMD_SEQIN)
746 exec_cmd = 0;
747
748 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
749 + host->col_addr_cycles);
750
751 switch (command) {
752 case NAND_CMD_READOOB:
753 case NAND_CMD_READ0:
754 info->buf_start = column;
755 info->ndcb0 |= NDCB0_CMD_TYPE(0)
756 | addr_cycle
757 | NAND_CMD_READ0;
758
759 if (command == NAND_CMD_READOOB)
760 info->buf_start += mtd->writesize;
761
762 /*
763 * Multiple page read needs an 'extended command type' field,
764 * which is either naked-read or last-read according to the
765 * state.
766 */
767 if (mtd->writesize == PAGE_CHUNK_SIZE) {
768 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
769 } else if (mtd->writesize > PAGE_CHUNK_SIZE) {
770 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
771 | NDCB0_LEN_OVRD
772 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
773 info->ndcb3 = info->chunk_size +
774 info->oob_size;
775 }
776
777 set_command_address(info, mtd->writesize, column, page_addr);
778 break;
779
780 case NAND_CMD_SEQIN:
781
782 info->buf_start = column;
783 set_command_address(info, mtd->writesize, 0, page_addr);
784
785 /*
786 * Multiple page programming needs to execute the initial
787 * SEQIN command that sets the page address.
788 */
789 if (mtd->writesize > PAGE_CHUNK_SIZE) {
790 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
791 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
792 | addr_cycle
793 | command;
794 /* No data transfer in this case */
795 info->data_size = 0;
796 exec_cmd = 1;
797 }
798 break;
799
800 case NAND_CMD_PAGEPROG:
801 if (is_buf_blank(info->data_buff,
802 (mtd->writesize + mtd->oobsize))) {
803 exec_cmd = 0;
804 break;
805 }
806
807 /* Second command setting for large pages */
808 if (mtd->writesize > PAGE_CHUNK_SIZE) {
809 /*
810 * Multiple page write uses the 'extended command'
811 * field. This can be used to issue a command dispatch
812 * or a naked-write depending on the current stage.
813 */
814 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
815 | NDCB0_LEN_OVRD
816 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
817 info->ndcb3 = info->chunk_size +
818 info->oob_size;
819
820 /*
821 * This is the command dispatch that completes a chunked
822 * page program operation.
823 */
824 if (info->data_size == 0) {
825 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
826 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
827 | command;
828 info->ndcb1 = 0;
829 info->ndcb2 = 0;
830 info->ndcb3 = 0;
831 }
832 } else {
833 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
834 | NDCB0_AUTO_RS
835 | NDCB0_ST_ROW_EN
836 | NDCB0_DBC
837 | (NAND_CMD_PAGEPROG << 8)
838 | NAND_CMD_SEQIN
839 | addr_cycle;
840 }
841 break;
842
843 case NAND_CMD_PARAM:
844 info->buf_count = 256;
845 info->ndcb0 |= NDCB0_CMD_TYPE(0)
846 | NDCB0_ADDR_CYC(1)
847 | NDCB0_LEN_OVRD
848 | command;
849 info->ndcb1 = (column & 0xFF);
850 info->ndcb3 = 256;
851 info->data_size = 256;
852 break;
853
854 case NAND_CMD_READID:
855 info->buf_count = host->read_id_bytes;
856 info->ndcb0 |= NDCB0_CMD_TYPE(3)
857 | NDCB0_ADDR_CYC(1)
858 | command;
859 info->ndcb1 = (column & 0xFF);
860
861 info->data_size = 8;
862 break;
863 case NAND_CMD_STATUS:
864 info->buf_count = 1;
865 info->ndcb0 |= NDCB0_CMD_TYPE(4)
866 | NDCB0_ADDR_CYC(1)
867 | command;
868
869 info->data_size = 8;
870 break;
871
872 case NAND_CMD_ERASE1:
873 info->ndcb0 |= NDCB0_CMD_TYPE(2)
874 | NDCB0_AUTO_RS
875 | NDCB0_ADDR_CYC(3)
876 | NDCB0_DBC
877 | (NAND_CMD_ERASE2 << 8)
878 | NAND_CMD_ERASE1;
879 info->ndcb1 = page_addr;
880 info->ndcb2 = 0;
881
882 break;
883 case NAND_CMD_RESET:
884 info->ndcb0 |= NDCB0_CMD_TYPE(5)
885 | command;
886
887 break;
888
889 case NAND_CMD_ERASE2:
890 exec_cmd = 0;
891 break;
892
893 default:
894 exec_cmd = 0;
895 dev_err(&info->pdev->dev, "non-supported command %x\n",
896 command);
897 break;
898 }
899
900 return exec_cmd;
901}
902
903static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
904 int column, int page_addr)
905{
Scott Wood17fed142016-05-30 13:57:56 -0500906 struct nand_chip *chip = mtd_to_nand(mtd);
907 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200908 struct pxa3xx_nand_info *info = host->info_data;
909 int exec_cmd;
910
911 /*
912 * if this is a x16 device ,then convert the input
913 * "byte" address into a "word" address appropriate
914 * for indexing a word-oriented device
915 */
916 if (info->reg_ndcr & NDCR_DWIDTH_M)
917 column /= 2;
918
919 /*
920 * There may be different NAND chip hooked to
921 * different chip select, so check whether
922 * chip select has been changed, if yes, reset the timing
923 */
924 if (info->cs != host->cs) {
925 info->cs = host->cs;
926 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
927 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
928 }
929
930 prepare_start_command(info, command);
931
932 info->state = STATE_PREPARED;
933 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
934
935 if (exec_cmd) {
936 u32 ts;
937
938 info->cmd_complete = 0;
939 info->dev_ready = 0;
940 info->need_wait = 1;
941 pxa3xx_nand_start(info);
942
943 ts = get_timer(0);
944 while (1) {
945 u32 status;
946
947 status = nand_readl(info, NDSR);
948 if (status)
949 pxa3xx_nand_irq(info);
950
951 if (info->cmd_complete)
952 break;
953
954 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
955 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
956 return;
957 }
958 }
959 }
960 info->state = STATE_IDLE;
961}
962
963static void nand_cmdfunc_extended(struct mtd_info *mtd,
964 const unsigned command,
965 int column, int page_addr)
966{
Scott Wood17fed142016-05-30 13:57:56 -0500967 struct nand_chip *chip = mtd_to_nand(mtd);
968 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200969 struct pxa3xx_nand_info *info = host->info_data;
970 int exec_cmd, ext_cmd_type;
971
972 /*
973 * if this is a x16 device then convert the input
974 * "byte" address into a "word" address appropriate
975 * for indexing a word-oriented device
976 */
977 if (info->reg_ndcr & NDCR_DWIDTH_M)
978 column /= 2;
979
980 /*
981 * There may be different NAND chip hooked to
982 * different chip select, so check whether
983 * chip select has been changed, if yes, reset the timing
984 */
985 if (info->cs != host->cs) {
986 info->cs = host->cs;
987 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
988 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
989 }
990
991 /* Select the extended command for the first command */
992 switch (command) {
993 case NAND_CMD_READ0:
994 case NAND_CMD_READOOB:
995 ext_cmd_type = EXT_CMD_TYPE_MONO;
996 break;
997 case NAND_CMD_SEQIN:
998 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
999 break;
1000 case NAND_CMD_PAGEPROG:
1001 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1002 break;
1003 default:
1004 ext_cmd_type = 0;
1005 break;
1006 }
1007
1008 prepare_start_command(info, command);
1009
1010 /*
1011 * Prepare the "is ready" completion before starting a command
1012 * transaction sequence. If the command is not executed the
1013 * completion will be completed, see below.
1014 *
1015 * We can do that inside the loop because the command variable
1016 * is invariant and thus so is the exec_cmd.
1017 */
1018 info->need_wait = 1;
1019 info->dev_ready = 0;
1020
1021 do {
1022 u32 ts;
1023
1024 info->state = STATE_PREPARED;
1025 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1026 column, page_addr);
1027 if (!exec_cmd) {
1028 info->need_wait = 0;
1029 info->dev_ready = 1;
1030 break;
1031 }
1032
1033 info->cmd_complete = 0;
1034 pxa3xx_nand_start(info);
1035
1036 ts = get_timer(0);
1037 while (1) {
1038 u32 status;
1039
1040 status = nand_readl(info, NDSR);
1041 if (status)
1042 pxa3xx_nand_irq(info);
1043
1044 if (info->cmd_complete)
1045 break;
1046
1047 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1048 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1049 return;
1050 }
1051 }
1052
1053 /* Check if the sequence is complete */
1054 if (info->data_size == 0 && command != NAND_CMD_PAGEPROG)
1055 break;
1056
1057 /*
1058 * After a splitted program command sequence has issued
1059 * the command dispatch, the command sequence is complete.
1060 */
1061 if (info->data_size == 0 &&
1062 command == NAND_CMD_PAGEPROG &&
1063 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1064 break;
1065
1066 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1067 /* Last read: issue a 'last naked read' */
1068 if (info->data_size == info->chunk_size)
1069 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1070 else
1071 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1072
1073 /*
1074 * If a splitted program command has no more data to transfer,
1075 * the command dispatch must be issued to complete.
1076 */
1077 } else if (command == NAND_CMD_PAGEPROG &&
1078 info->data_size == 0) {
1079 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1080 }
1081 } while (1);
1082
1083 info->state = STATE_IDLE;
1084}
1085
1086static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001087 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1088 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001089{
1090 chip->write_buf(mtd, buf, mtd->writesize);
1091 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1092
1093 return 0;
1094}
1095
1096static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1097 struct nand_chip *chip, uint8_t *buf, int oob_required,
1098 int page)
1099{
Scott Wood17fed142016-05-30 13:57:56 -05001100 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001101 struct pxa3xx_nand_info *info = host->info_data;
1102
1103 chip->read_buf(mtd, buf, mtd->writesize);
1104 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1105
1106 if (info->retcode == ERR_CORERR && info->use_ecc) {
1107 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1108
1109 } else if (info->retcode == ERR_UNCORERR) {
1110 /*
1111 * for blank page (all 0xff), HW will calculate its ECC as
1112 * 0, which is different from the ECC information within
1113 * OOB, ignore such uncorrectable errors
1114 */
1115 if (is_buf_blank(buf, mtd->writesize))
1116 info->retcode = ERR_NONE;
1117 else
1118 mtd->ecc_stats.failed++;
1119 }
1120
1121 return info->max_bitflips;
1122}
1123
1124static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1125{
Scott Wood17fed142016-05-30 13:57:56 -05001126 struct nand_chip *chip = mtd_to_nand(mtd);
1127 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001128 struct pxa3xx_nand_info *info = host->info_data;
1129 char retval = 0xFF;
1130
1131 if (info->buf_start < info->buf_count)
1132 /* Has just send a new command? */
1133 retval = info->data_buff[info->buf_start++];
1134
1135 return retval;
1136}
1137
1138static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1139{
Scott Wood17fed142016-05-30 13:57:56 -05001140 struct nand_chip *chip = mtd_to_nand(mtd);
1141 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001142 struct pxa3xx_nand_info *info = host->info_data;
1143 u16 retval = 0xFFFF;
1144
1145 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1146 retval = *((u16 *)(info->data_buff+info->buf_start));
1147 info->buf_start += 2;
1148 }
1149 return retval;
1150}
1151
1152static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1153{
Scott Wood17fed142016-05-30 13:57:56 -05001154 struct nand_chip *chip = mtd_to_nand(mtd);
1155 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001156 struct pxa3xx_nand_info *info = host->info_data;
1157 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1158
1159 memcpy(buf, info->data_buff + info->buf_start, real_len);
1160 info->buf_start += real_len;
1161}
1162
1163static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1164 const uint8_t *buf, int len)
1165{
Scott Wood17fed142016-05-30 13:57:56 -05001166 struct nand_chip *chip = mtd_to_nand(mtd);
1167 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001168 struct pxa3xx_nand_info *info = host->info_data;
1169 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1170
1171 memcpy(info->data_buff + info->buf_start, buf, real_len);
1172 info->buf_start += real_len;
1173}
1174
1175static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1176{
1177 return;
1178}
1179
1180static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1181{
Scott Wood17fed142016-05-30 13:57:56 -05001182 struct nand_chip *chip = mtd_to_nand(mtd);
1183 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001184 struct pxa3xx_nand_info *info = host->info_data;
1185
1186 if (info->need_wait) {
1187 u32 ts;
1188
1189 info->need_wait = 0;
1190
1191 ts = get_timer(0);
1192 while (1) {
1193 u32 status;
1194
1195 status = nand_readl(info, NDSR);
1196 if (status)
1197 pxa3xx_nand_irq(info);
1198
1199 if (info->dev_ready)
1200 break;
1201
1202 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1203 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1204 return NAND_STATUS_FAIL;
1205 }
1206 }
1207 }
1208
1209 /* pxa3xx_nand_send_command has waited for command complete */
1210 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1211 if (info->retcode == ERR_NONE)
1212 return 0;
1213 else
1214 return NAND_STATUS_FAIL;
1215 }
1216
1217 return NAND_STATUS_READY;
1218}
1219
1220static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info)
1221{
1222 struct pxa3xx_nand_host *host = info->host[info->cs];
1223 struct mtd_info *mtd = host->mtd;
Scott Wood17fed142016-05-30 13:57:56 -05001224 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001225
1226 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1227 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1228 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
1229
1230 return 0;
1231}
1232
1233static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
1234{
1235 /*
1236 * We set 0 by hard coding here, for we don't support keep_config
1237 * when there is more than one chip attached to the controller
1238 */
1239 struct pxa3xx_nand_host *host = info->host[0];
1240 uint32_t ndcr = nand_readl(info, NDCR);
1241
1242 if (ndcr & NDCR_PAGE_SZ) {
1243 /* Controller's FIFO size */
1244 info->chunk_size = 2048;
1245 host->read_id_bytes = 4;
1246 } else {
1247 info->chunk_size = 512;
1248 host->read_id_bytes = 2;
1249 }
1250
1251 /* Set an initial chunk size */
1252 info->reg_ndcr = ndcr & ~NDCR_INT_MASK;
1253 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1254 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
1255 return 0;
1256}
1257
1258static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1259{
1260 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1261 if (info->data_buff == NULL)
1262 return -ENOMEM;
1263 return 0;
1264}
1265
1266static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1267{
1268 struct pxa3xx_nand_info *info = host->info_data;
1269 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1270 struct mtd_info *mtd;
1271 struct nand_chip *chip;
1272 const struct nand_sdr_timings *timings;
1273 int ret;
1274
1275 mtd = info->host[info->cs]->mtd;
Scott Wood17fed142016-05-30 13:57:56 -05001276 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001277
1278 /* configure default flash values */
1279 info->reg_ndcr = 0x0; /* enable all interrupts */
1280 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1281 info->reg_ndcr |= NDCR_RD_ID_CNT(host->read_id_bytes);
1282 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1283
1284 /* use the common timing to make a try */
1285 timings = onfi_async_timing_mode_to_sdr_timings(0);
1286 if (IS_ERR(timings))
1287 return PTR_ERR(timings);
1288
1289 pxa3xx_nand_set_sdr_timing(host, timings);
1290
1291 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1292 ret = chip->waitfunc(mtd, chip);
1293 if (ret & NAND_STATUS_FAIL)
1294 return -ENODEV;
1295
1296 return 0;
1297}
1298
1299static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1300 struct nand_ecc_ctrl *ecc,
1301 int strength, int ecc_stepsize, int page_size)
1302{
1303 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
1304 info->chunk_size = 2048;
1305 info->spare_size = 40;
1306 info->ecc_size = 24;
1307 ecc->mode = NAND_ECC_HW;
1308 ecc->size = 512;
1309 ecc->strength = 1;
1310
1311 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
1312 info->chunk_size = 512;
1313 info->spare_size = 8;
1314 info->ecc_size = 8;
1315 ecc->mode = NAND_ECC_HW;
1316 ecc->size = 512;
1317 ecc->strength = 1;
1318
1319 /*
1320 * Required ECC: 4-bit correction per 512 bytes
1321 * Select: 16-bit correction per 2048 bytes
1322 */
1323 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1324 info->ecc_bch = 1;
1325 info->chunk_size = 2048;
1326 info->spare_size = 32;
1327 info->ecc_size = 32;
1328 ecc->mode = NAND_ECC_HW;
1329 ecc->size = info->chunk_size;
1330 ecc->layout = &ecc_layout_2KB_bch4bit;
1331 ecc->strength = 16;
1332
1333 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1334 info->ecc_bch = 1;
1335 info->chunk_size = 2048;
1336 info->spare_size = 32;
1337 info->ecc_size = 32;
1338 ecc->mode = NAND_ECC_HW;
1339 ecc->size = info->chunk_size;
1340 ecc->layout = &ecc_layout_4KB_bch4bit;
1341 ecc->strength = 16;
1342
1343 /*
1344 * Required ECC: 8-bit correction per 512 bytes
1345 * Select: 16-bit correction per 1024 bytes
1346 */
1347 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1348 info->ecc_bch = 1;
1349 info->chunk_size = 1024;
1350 info->spare_size = 0;
1351 info->ecc_size = 32;
1352 ecc->mode = NAND_ECC_HW;
1353 ecc->size = info->chunk_size;
1354 ecc->layout = &ecc_layout_4KB_bch8bit;
1355 ecc->strength = 16;
1356 } else {
1357 dev_err(&info->pdev->dev,
1358 "ECC strength %d at page size %d is not supported\n",
1359 strength, page_size);
1360 return -ENODEV;
1361 }
1362
1363 return 0;
1364}
1365
1366static int pxa3xx_nand_scan(struct mtd_info *mtd)
1367{
Scott Wood17fed142016-05-30 13:57:56 -05001368 struct nand_chip *chip = mtd_to_nand(mtd);
1369 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001370 struct pxa3xx_nand_info *info = host->info_data;
1371 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001372 int ret;
1373 uint16_t ecc_strength, ecc_step;
1374
1375 if (pdata->keep_config && !pxa3xx_nand_detect_config(info))
1376 goto KEEP_CONFIG;
1377
1378 /* Set a default chunk size */
1379 info->chunk_size = 512;
1380
1381 ret = pxa3xx_nand_sensing(host);
1382 if (ret) {
1383 dev_info(&info->pdev->dev, "There is no chip on cs %d!\n",
1384 info->cs);
1385
1386 return ret;
1387 }
1388
1389KEEP_CONFIG:
1390 /* Device detection must be done with ECC disabled */
1391 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1392 nand_writel(info, NDECCCTRL, 0x0);
1393
1394 if (nand_scan_ident(mtd, 1, NULL))
1395 return -ENODEV;
1396
1397 if (!pdata->keep_config) {
1398 ret = pxa3xx_nand_init_timings(host);
1399 if (ret) {
1400 dev_err(&info->pdev->dev,
1401 "Failed to set timings: %d\n", ret);
1402 return ret;
1403 }
1404 }
1405
1406 ret = pxa3xx_nand_config_flash(info);
1407 if (ret)
1408 return ret;
1409
1410#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1411 /*
1412 * We'll use a bad block table stored in-flash and don't
1413 * allow writing the bad block marker to the flash.
1414 */
1415 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1416 chip->bbt_td = &bbt_main_descr;
1417 chip->bbt_md = &bbt_mirror_descr;
1418#endif
1419
1420 /*
1421 * If the page size is bigger than the FIFO size, let's check
1422 * we are given the right variant and then switch to the extended
1423 * (aka splitted) command handling,
1424 */
1425 if (mtd->writesize > PAGE_CHUNK_SIZE) {
1426 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1427 chip->cmdfunc = nand_cmdfunc_extended;
1428 } else {
1429 dev_err(&info->pdev->dev,
1430 "unsupported page size on this variant\n");
1431 return -ENODEV;
1432 }
1433 }
1434
1435 if (pdata->ecc_strength && pdata->ecc_step_size) {
1436 ecc_strength = pdata->ecc_strength;
1437 ecc_step = pdata->ecc_step_size;
1438 } else {
1439 ecc_strength = chip->ecc_strength_ds;
1440 ecc_step = chip->ecc_step_ds;
1441 }
1442
1443 /* Set default ECC strength requirements on non-ONFI devices */
1444 if (ecc_strength < 1 && ecc_step < 1) {
1445 ecc_strength = 1;
1446 ecc_step = 512;
1447 }
1448
1449 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1450 ecc_step, mtd->writesize);
1451 if (ret)
1452 return ret;
1453
1454 /* calculate addressing information */
1455 if (mtd->writesize >= 2048)
1456 host->col_addr_cycles = 2;
1457 else
1458 host->col_addr_cycles = 1;
1459
1460 /* release the initial buffer */
1461 kfree(info->data_buff);
1462
1463 /* allocate the real data + oob buffer */
1464 info->buf_size = mtd->writesize + mtd->oobsize;
1465 ret = pxa3xx_nand_init_buff(info);
1466 if (ret)
1467 return ret;
1468 info->oob_buff = info->data_buff + mtd->writesize;
1469
1470 if ((mtd->size >> chip->page_shift) > 65536)
1471 host->row_addr_cycles = 3;
1472 else
1473 host->row_addr_cycles = 2;
1474 return nand_scan_tail(mtd);
1475}
1476
1477static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1478{
1479 struct pxa3xx_nand_platform_data *pdata;
1480 struct pxa3xx_nand_host *host;
1481 struct nand_chip *chip = NULL;
1482 struct mtd_info *mtd;
1483 int ret, cs;
1484
1485 pdata = info->pdata;
1486 if (pdata->num_cs <= 0)
1487 return -ENODEV;
1488
1489 info->variant = pxa3xx_nand_get_variant();
1490 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001491 chip = (struct nand_chip *)
1492 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001493 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001494 host = (struct pxa3xx_nand_host *)chip;
1495 info->host[cs] = host;
1496 host->mtd = mtd;
1497 host->cs = cs;
1498 host->info_data = info;
1499 host->read_id_bytes = 4;
Stefan Roese75659da2015-07-23 10:26:16 +02001500 mtd->owner = THIS_MODULE;
1501
Chris Packham3c2170a2016-08-29 15:20:52 +12001502 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001503 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1504 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1505 chip->controller = &info->controller;
1506 chip->waitfunc = pxa3xx_nand_waitfunc;
1507 chip->select_chip = pxa3xx_nand_select_chip;
1508 chip->read_word = pxa3xx_nand_read_word;
1509 chip->read_byte = pxa3xx_nand_read_byte;
1510 chip->read_buf = pxa3xx_nand_read_buf;
1511 chip->write_buf = pxa3xx_nand_write_buf;
1512 chip->options |= NAND_NO_SUBPAGE_WRITE;
1513 chip->cmdfunc = nand_cmdfunc;
1514 }
1515
Stefan Roese75659da2015-07-23 10:26:16 +02001516 /* Allocate a buffer to allow flash detection */
1517 info->buf_size = INIT_BUFFER_SIZE;
1518 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1519 if (info->data_buff == NULL) {
1520 ret = -ENOMEM;
1521 goto fail_disable_clk;
1522 }
1523
1524 /* initialize all interrupts to be disabled */
1525 disable_int(info, NDSR_MASK);
1526
1527 return 0;
1528
1529 kfree(info->data_buff);
1530fail_disable_clk:
1531 return ret;
1532}
1533
1534static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1535{
1536 struct pxa3xx_nand_platform_data *pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001537 const void *blob = gd->fdt_blob;
1538 int node = -1;
Stefan Roese75659da2015-07-23 10:26:16 +02001539
1540 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1541 if (!pdata)
1542 return -ENOMEM;
1543
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001544 /* Get address decoding nodes from the FDT blob */
1545 do {
1546 node = fdt_node_offset_by_compatible(blob, node,
1547 "marvell,mvebu-pxa3xx-nand");
1548 if (node < 0)
1549 break;
1550
1551 /* Bypass disabeld nodes */
1552 if (!fdtdec_get_is_enabled(blob, node))
1553 continue;
Stefan Roese75659da2015-07-23 10:26:16 +02001554
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001555 /* Get the first enabled NAND controler base address */
1556 info->mmio_base =
1557 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1558 blob, node, "reg", 0, NULL, true);
Stefan Roese75659da2015-07-23 10:26:16 +02001559
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001560 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1561 if (pdata->num_cs != 1) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001562 pr_err("pxa3xx driver supports single CS only\n");
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001563 break;
1564 }
1565
1566 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1567 pdata->enable_arbiter = 1;
1568
1569 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1570 pdata->keep_config = 1;
1571
1572 /*
1573 * ECC parameters.
1574 * If these are not set, they will be selected according
1575 * to the detected flash type.
1576 */
1577 /* ECC strength */
1578 pdata->ecc_strength = fdtdec_get_int(blob, node,
1579 "nand-ecc-strength", 0);
1580
1581 /* ECC step size */
1582 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1583 "nand-ecc-step-size", 0);
1584
1585 info->pdata = pdata;
1586
1587 /* Currently support only a single NAND controller */
1588 return 0;
1589
1590 } while (node >= 0);
1591
1592 return -EINVAL;
Stefan Roese75659da2015-07-23 10:26:16 +02001593}
1594
1595static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1596{
1597 struct pxa3xx_nand_platform_data *pdata;
1598 int ret, cs, probe_success;
1599
1600 ret = pxa3xx_nand_probe_dt(info);
1601 if (ret)
1602 return ret;
1603
1604 pdata = info->pdata;
1605
1606 ret = alloc_nand_resource(info);
1607 if (ret) {
1608 dev_err(&pdev->dev, "alloc nand resource failed\n");
1609 return ret;
1610 }
1611
1612 probe_success = 0;
1613 for (cs = 0; cs < pdata->num_cs; cs++) {
1614 struct mtd_info *mtd = info->host[cs]->mtd;
1615
1616 /*
1617 * The mtd name matches the one used in 'mtdparts' kernel
1618 * parameter. This name cannot be changed or otherwise
1619 * user's mtd partitions configuration would get broken.
1620 */
1621 mtd->name = "pxa3xx_nand-0";
1622 info->cs = cs;
1623 ret = pxa3xx_nand_scan(mtd);
1624 if (ret) {
1625 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1626 cs);
1627 continue;
1628 }
1629
Scott Wood2c1b7e12016-05-30 13:57:55 -05001630 if (nand_register(cs, mtd))
1631 continue;
1632
1633 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001634 }
1635
1636 if (!probe_success)
1637 return -ENODEV;
1638
1639 return 0;
1640}
1641
1642/*
1643 * Main initialization routine
1644 */
1645void board_nand_init(void)
1646{
1647 struct pxa3xx_nand_info *info;
1648 struct pxa3xx_nand_host *host;
1649 int ret;
1650
Kevin Smithf6ca2a62016-01-14 16:01:39 +00001651 info = kzalloc(sizeof(*info) +
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001652 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1653 GFP_KERNEL);
Stefan Roese75659da2015-07-23 10:26:16 +02001654 if (!info)
1655 return;
1656
Stefan Roese75659da2015-07-23 10:26:16 +02001657 ret = pxa3xx_nand_probe(info);
1658 if (ret)
1659 return;
Stefan Roese75659da2015-07-23 10:26:16 +02001660}