blob: 3323557999f352ee248b0779f89781aeca50ddd3 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese75659da2015-07-23 10:26:16 +02002/*
3 * drivers/mtd/nand/pxa3xx_nand.c
4 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
Stefan Roese75659da2015-07-23 10:26:16 +02007 */
8
9#include <common.h>
10#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030011#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020012#include <nand.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090013#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020014#include <asm/io.h>
15#include <asm/arch/cpu.h>
16#include <linux/mtd/mtd.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090017#include <linux/mtd/rawnand.h>
Stefan Roese75659da2015-07-23 10:26:16 +020018#include <linux/types.h>
19
20#include "pxa3xx_nand.h"
21
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030022DECLARE_GLOBAL_DATA_PTR;
23
Stefan Roese75659da2015-07-23 10:26:16 +020024#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
25#define CHIP_DELAY_TIMEOUT 200
26#define NAND_STOP_DELAY 40
Stefan Roese75659da2015-07-23 10:26:16 +020027
28/*
29 * Define a buffer size for the initial command that detects the flash device:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030030 * STATUS, READID and PARAM.
31 * ONFI param page is 256 bytes, and there are three redundant copies
32 * to be read. JEDEC param page is 512 bytes, and there are also three
33 * redundant copies to be read.
34 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
Stefan Roese75659da2015-07-23 10:26:16 +020035 */
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030036#define INIT_BUFFER_SIZE 2048
Stefan Roese75659da2015-07-23 10:26:16 +020037
38/* registers and bit definitions */
39#define NDCR (0x00) /* Control register */
40#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
41#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
42#define NDSR (0x14) /* Status Register */
43#define NDPCR (0x18) /* Page Count Register */
44#define NDBDR0 (0x1C) /* Bad Block Register 0 */
45#define NDBDR1 (0x20) /* Bad Block Register 1 */
46#define NDECCCTRL (0x28) /* ECC control */
47#define NDDB (0x40) /* Data Buffer */
48#define NDCB0 (0x48) /* Command Buffer0 */
49#define NDCB1 (0x4C) /* Command Buffer1 */
50#define NDCB2 (0x50) /* Command Buffer2 */
51
52#define NDCR_SPARE_EN (0x1 << 31)
53#define NDCR_ECC_EN (0x1 << 30)
54#define NDCR_DMA_EN (0x1 << 29)
55#define NDCR_ND_RUN (0x1 << 28)
56#define NDCR_DWIDTH_C (0x1 << 27)
57#define NDCR_DWIDTH_M (0x1 << 26)
58#define NDCR_PAGE_SZ (0x1 << 24)
59#define NDCR_NCSX (0x1 << 23)
60#define NDCR_ND_MODE (0x3 << 21)
61#define NDCR_NAND_MODE (0x0)
62#define NDCR_CLR_PG_CNT (0x1 << 20)
Ofer Heifetz531816e2018-08-29 11:56:07 +030063#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
Stefan Roese75659da2015-07-23 10:26:16 +020064#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
65#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
66
67#define NDCR_RA_START (0x1 << 15)
68#define NDCR_PG_PER_BLK (0x1 << 14)
69#define NDCR_ND_ARB_EN (0x1 << 12)
70#define NDCR_INT_MASK (0xFFF)
71
72#define NDSR_MASK (0xfff)
73#define NDSR_ERR_CNT_OFF (16)
74#define NDSR_ERR_CNT_MASK (0x1f)
75#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
76#define NDSR_RDY (0x1 << 12)
77#define NDSR_FLASH_RDY (0x1 << 11)
78#define NDSR_CS0_PAGED (0x1 << 10)
79#define NDSR_CS1_PAGED (0x1 << 9)
80#define NDSR_CS0_CMDD (0x1 << 8)
81#define NDSR_CS1_CMDD (0x1 << 7)
82#define NDSR_CS0_BBD (0x1 << 6)
83#define NDSR_CS1_BBD (0x1 << 5)
84#define NDSR_UNCORERR (0x1 << 4)
85#define NDSR_CORERR (0x1 << 3)
86#define NDSR_WRDREQ (0x1 << 2)
87#define NDSR_RDDREQ (0x1 << 1)
88#define NDSR_WRCMDREQ (0x1)
89
90#define NDCB0_LEN_OVRD (0x1 << 28)
91#define NDCB0_ST_ROW_EN (0x1 << 26)
92#define NDCB0_AUTO_RS (0x1 << 25)
93#define NDCB0_CSEL (0x1 << 24)
94#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
95#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
96#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
97#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
98#define NDCB0_NC (0x1 << 20)
99#define NDCB0_DBC (0x1 << 19)
100#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
101#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
102#define NDCB0_CMD2_MASK (0xff << 8)
103#define NDCB0_CMD1_MASK (0xff)
104#define NDCB0_ADDR_CYC_SHIFT (16)
105
106#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
107#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
108#define EXT_CMD_TYPE_READ 4 /* Read */
109#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
110#define EXT_CMD_TYPE_FINAL 3 /* Final command */
111#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
112#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
113
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300114/*
115 * This should be large enough to read 'ONFI' and 'JEDEC'.
116 * Let's use 7 bytes, which is the maximum ID count supported
117 * by the controller (see NDCR_RD_ID_CNT_MASK).
118 */
119#define READ_ID_BYTES 7
120
Stefan Roese75659da2015-07-23 10:26:16 +0200121/* macros for registers read/write */
122#define nand_writel(info, off, val) \
123 writel((val), (info)->mmio_base + (off))
124
125#define nand_readl(info, off) \
126 readl((info)->mmio_base + (off))
127
128/* error code and state */
129enum {
130 ERR_NONE = 0,
131 ERR_DMABUSERR = -1,
132 ERR_SENDCMD = -2,
133 ERR_UNCORERR = -3,
134 ERR_BBERR = -4,
135 ERR_CORERR = -5,
136};
137
138enum {
139 STATE_IDLE = 0,
140 STATE_PREPARED,
141 STATE_CMD_HANDLE,
142 STATE_DMA_READING,
143 STATE_DMA_WRITING,
144 STATE_DMA_DONE,
145 STATE_PIO_READING,
146 STATE_PIO_WRITING,
147 STATE_CMD_DONE,
148 STATE_READY,
149};
150
151enum pxa3xx_nand_variant {
152 PXA3XX_NAND_VARIANT_PXA,
153 PXA3XX_NAND_VARIANT_ARMADA370,
154};
155
156struct pxa3xx_nand_host {
157 struct nand_chip chip;
Stefan Roese75659da2015-07-23 10:26:16 +0200158 void *info_data;
159
160 /* page size of attached chip */
161 int use_ecc;
162 int cs;
163
164 /* calculated from pxa3xx_nand_flash data */
165 unsigned int col_addr_cycles;
166 unsigned int row_addr_cycles;
Stefan Roese75659da2015-07-23 10:26:16 +0200167};
168
169struct pxa3xx_nand_info {
170 struct nand_hw_control controller;
171 struct pxa3xx_nand_platform_data *pdata;
172
173 struct clk *clk;
174 void __iomem *mmio_base;
175 unsigned long mmio_phys;
176 int cmd_complete, dev_ready;
177
178 unsigned int buf_start;
179 unsigned int buf_count;
180 unsigned int buf_size;
181 unsigned int data_buff_pos;
182 unsigned int oob_buff_pos;
183
184 unsigned char *data_buff;
185 unsigned char *oob_buff;
186
187 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
188 unsigned int state;
189
190 /*
191 * This driver supports NFCv1 (as found in PXA SoC)
192 * and NFCv2 (as found in Armada 370/XP SoC).
193 */
194 enum pxa3xx_nand_variant variant;
195
196 int cs;
197 int use_ecc; /* use HW ECC ? */
198 int ecc_bch; /* using BCH ECC? */
199 int use_spare; /* use spare ? */
200 int need_wait;
201
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300202 /* Amount of real data per full chunk */
203 unsigned int chunk_size;
204
205 /* Amount of spare data per full chunk */
Stefan Roese75659da2015-07-23 10:26:16 +0200206 unsigned int spare_size;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300207
208 /* Number of full chunks (i.e chunk_size + spare_size) */
209 unsigned int nfullchunks;
210
211 /*
212 * Total number of chunks. If equal to nfullchunks, then there
213 * are only full chunks. Otherwise, there is one last chunk of
214 * size (last_chunk_size + last_spare_size)
215 */
216 unsigned int ntotalchunks;
217
218 /* Amount of real data in the last chunk */
219 unsigned int last_chunk_size;
220
221 /* Amount of spare data in the last chunk */
222 unsigned int last_spare_size;
223
Stefan Roese75659da2015-07-23 10:26:16 +0200224 unsigned int ecc_size;
225 unsigned int ecc_err_cnt;
226 unsigned int max_bitflips;
227 int retcode;
228
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300229 /*
230 * Variables only valid during command
231 * execution. step_chunk_size and step_spare_size is the
232 * amount of real data and spare data in the current
233 * chunk. cur_chunk is the current chunk being
234 * read/programmed.
235 */
236 unsigned int step_chunk_size;
237 unsigned int step_spare_size;
238 unsigned int cur_chunk;
239
Stefan Roese75659da2015-07-23 10:26:16 +0200240 /* cached register value */
241 uint32_t reg_ndcr;
242 uint32_t ndtr0cs0;
243 uint32_t ndtr1cs0;
244
245 /* generated NDCBx register values */
246 uint32_t ndcb0;
247 uint32_t ndcb1;
248 uint32_t ndcb2;
249 uint32_t ndcb3;
250};
251
252static struct pxa3xx_nand_timing timing[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300253 /*
254 * tCH Enable signal hold time
255 * tCS Enable signal setup time
256 * tWH ND_nWE high duration
257 * tWP ND_nWE pulse time
258 * tRH ND_nRE high duration
259 * tRP ND_nRE pulse width
260 * tR ND_nWE high to ND_nRE low for read
261 * tWHR ND_nWE high to ND_nRE low for status read
262 * tAR ND_ALE low to ND_nRE low delay
263 */
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300264 /*ch cs wh wp rh rp r whr ar */
Stefan Roese75659da2015-07-23 10:26:16 +0200265 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
266 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
267 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
268 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300269 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
Stefan Roese75659da2015-07-23 10:26:16 +0200270};
271
272static struct pxa3xx_nand_flash builtin_flash_types[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300273 /*
274 * chip_id
275 * flash_width Width of Flash memory (DWIDTH_M)
276 * dfc_width Width of flash controller(DWIDTH_C)
277 * *timing
278 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
279 */
Stefan Roese75659da2015-07-23 10:26:16 +0200280 { 0x46ec, 16, 16, &timing[1] },
281 { 0xdaec, 8, 8, &timing[1] },
282 { 0xd7ec, 8, 8, &timing[1] },
283 { 0xa12c, 8, 8, &timing[2] },
284 { 0xb12c, 16, 16, &timing[2] },
285 { 0xdc2c, 8, 8, &timing[2] },
286 { 0xcc2c, 16, 16, &timing[2] },
287 { 0xba20, 16, 16, &timing[3] },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300288 { 0xda98, 8, 8, &timing[4] },
Stefan Roese75659da2015-07-23 10:26:16 +0200289};
290
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100291#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
Stefan Roese75659da2015-07-23 10:26:16 +0200292static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
293static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
294
295static struct nand_bbt_descr bbt_main_descr = {
296 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
297 | NAND_BBT_2BIT | NAND_BBT_VERSION,
298 .offs = 8,
299 .len = 6,
300 .veroffs = 14,
301 .maxblocks = 8, /* Last 8 blocks in each chip */
302 .pattern = bbt_pattern
303};
304
305static struct nand_bbt_descr bbt_mirror_descr = {
306 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
307 | NAND_BBT_2BIT | NAND_BBT_VERSION,
308 .offs = 8,
309 .len = 6,
310 .veroffs = 14,
311 .maxblocks = 8, /* Last 8 blocks in each chip */
312 .pattern = bbt_mirror_pattern
313};
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100314#endif
Stefan Roese75659da2015-07-23 10:26:16 +0200315
316static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
317 .eccbytes = 32,
318 .eccpos = {
319 32, 33, 34, 35, 36, 37, 38, 39,
320 40, 41, 42, 43, 44, 45, 46, 47,
321 48, 49, 50, 51, 52, 53, 54, 55,
322 56, 57, 58, 59, 60, 61, 62, 63},
323 .oobfree = { {2, 30} }
324};
325
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300326static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
327 .eccbytes = 64,
328 .eccpos = {
329 64, 65, 66, 67, 68, 69, 70, 71,
330 72, 73, 74, 75, 76, 77, 78, 79,
331 80, 81, 82, 83, 84, 85, 86, 87,
332 88, 89, 90, 91, 92, 93, 94, 95,
333 96, 97, 98, 99, 100, 101, 102, 103,
334 104, 105, 106, 107, 108, 109, 110, 111,
335 112, 113, 114, 115, 116, 117, 118, 119,
336 120, 121, 122, 123, 124, 125, 126, 127},
337 .oobfree = { {1, 4}, {6, 26} }
338};
339
Stefan Roese75659da2015-07-23 10:26:16 +0200340static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
341 .eccbytes = 64,
342 .eccpos = {
343 32, 33, 34, 35, 36, 37, 38, 39,
344 40, 41, 42, 43, 44, 45, 46, 47,
345 48, 49, 50, 51, 52, 53, 54, 55,
346 56, 57, 58, 59, 60, 61, 62, 63,
347 96, 97, 98, 99, 100, 101, 102, 103,
348 104, 105, 106, 107, 108, 109, 110, 111,
349 112, 113, 114, 115, 116, 117, 118, 119,
350 120, 121, 122, 123, 124, 125, 126, 127},
351 /* Bootrom looks in bytes 0 & 5 for bad blocks */
352 .oobfree = { {6, 26}, { 64, 32} }
353};
354
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300355static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
356 .eccbytes = 128,
357 .eccpos = {
358 32, 33, 34, 35, 36, 37, 38, 39,
359 40, 41, 42, 43, 44, 45, 46, 47,
360 48, 49, 50, 51, 52, 53, 54, 55,
361 56, 57, 58, 59, 60, 61, 62, 63,
362
363 96, 97, 98, 99, 100, 101, 102, 103,
364 104, 105, 106, 107, 108, 109, 110, 111,
365 112, 113, 114, 115, 116, 117, 118, 119,
366 120, 121, 122, 123, 124, 125, 126, 127,
367
368 160, 161, 162, 163, 164, 165, 166, 167,
369 168, 169, 170, 171, 172, 173, 174, 175,
370 176, 177, 178, 179, 180, 181, 182, 183,
371 184, 185, 186, 187, 188, 189, 190, 191,
372
373 224, 225, 226, 227, 228, 229, 230, 231,
374 232, 233, 234, 235, 236, 237, 238, 239,
375 240, 241, 242, 243, 244, 245, 246, 247,
376 248, 249, 250, 251, 252, 253, 254, 255},
377
378 /* Bootrom looks in bytes 0 & 5 for bad blocks */
379 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
380};
381
Stefan Roese75659da2015-07-23 10:26:16 +0200382static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
383 .eccbytes = 128,
384 .eccpos = {
385 32, 33, 34, 35, 36, 37, 38, 39,
386 40, 41, 42, 43, 44, 45, 46, 47,
387 48, 49, 50, 51, 52, 53, 54, 55,
388 56, 57, 58, 59, 60, 61, 62, 63},
389 .oobfree = { }
390};
391
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300392static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
393 .eccbytes = 256,
394 .eccpos = {},
395 /* HW ECC handles all ECC data and all spare area is free for OOB */
396 .oobfree = {{0, 160} }
397};
398
Stefan Roese75659da2015-07-23 10:26:16 +0200399#define NDTR0_tCH(c) (min((c), 7) << 19)
400#define NDTR0_tCS(c) (min((c), 7) << 16)
401#define NDTR0_tWH(c) (min((c), 7) << 11)
402#define NDTR0_tWP(c) (min((c), 7) << 8)
403#define NDTR0_tRH(c) (min((c), 7) << 3)
404#define NDTR0_tRP(c) (min((c), 7) << 0)
405
406#define NDTR1_tR(c) (min((c), 65535) << 16)
407#define NDTR1_tWHR(c) (min((c), 15) << 4)
408#define NDTR1_tAR(c) (min((c), 15) << 0)
409
410/* convert nano-seconds to nand flash controller clock cycles */
411#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
412
413static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
414{
415 /* We only support the Armada 370/XP/38x for now */
416 return PXA3XX_NAND_VARIANT_ARMADA370;
417}
418
419static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
420 const struct pxa3xx_nand_timing *t)
421{
422 struct pxa3xx_nand_info *info = host->info_data;
423 unsigned long nand_clk = mvebu_get_nand_clock();
424 uint32_t ndtr0, ndtr1;
425
426 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
427 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
428 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
429 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
430 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
431 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
432
433 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
434 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
435 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
436
437 info->ndtr0cs0 = ndtr0;
438 info->ndtr1cs0 = ndtr1;
439 nand_writel(info, NDTR0CS0, ndtr0);
440 nand_writel(info, NDTR1CS0, ndtr1);
441}
442
443static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
444 const struct nand_sdr_timings *t)
445{
446 struct pxa3xx_nand_info *info = host->info_data;
447 struct nand_chip *chip = &host->chip;
448 unsigned long nand_clk = mvebu_get_nand_clock();
449 uint32_t ndtr0, ndtr1;
450
451 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
452 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
453 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300454 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200455 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300456 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200457 u32 tR = chip->chip_delay * 1000;
458 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
459 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
460
461 /* fallback to a default value if tR = 0 */
462 if (!tR)
463 tR = 20000;
464
465 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
466 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
467 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
468 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
469 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
470 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
471
472 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
473 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
474 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
475
476 info->ndtr0cs0 = ndtr0;
477 info->ndtr1cs0 = ndtr1;
478 nand_writel(info, NDTR0CS0, ndtr0);
479 nand_writel(info, NDTR1CS0, ndtr1);
480}
481
482static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
483{
484 const struct nand_sdr_timings *timings;
485 struct nand_chip *chip = &host->chip;
486 struct pxa3xx_nand_info *info = host->info_data;
487 const struct pxa3xx_nand_flash *f = NULL;
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300488 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200489 int mode, id, ntypes, i;
490
491 mode = onfi_get_async_timing_mode(chip);
492 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
493 ntypes = ARRAY_SIZE(builtin_flash_types);
494
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300495 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
Stefan Roese75659da2015-07-23 10:26:16 +0200496
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300497 id = chip->read_byte(mtd);
498 id |= chip->read_byte(mtd) << 0x8;
Stefan Roese75659da2015-07-23 10:26:16 +0200499
500 for (i = 0; i < ntypes; i++) {
501 f = &builtin_flash_types[i];
502
503 if (f->chip_id == id)
504 break;
505 }
506
507 if (i == ntypes) {
508 dev_err(&info->pdev->dev, "Error: timings not found\n");
509 return -EINVAL;
510 }
511
512 pxa3xx_nand_set_timing(host, f->timing);
513
514 if (f->flash_width == 16) {
515 info->reg_ndcr |= NDCR_DWIDTH_M;
516 chip->options |= NAND_BUSWIDTH_16;
517 }
518
519 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
520 } else {
521 mode = fls(mode) - 1;
522 if (mode < 0)
523 mode = 0;
524
525 timings = onfi_async_timing_mode_to_sdr_timings(mode);
526 if (IS_ERR(timings))
527 return PTR_ERR(timings);
528
529 pxa3xx_nand_set_sdr_timing(host, timings);
530 }
531
532 return 0;
533}
534
Stefan Roese75659da2015-07-23 10:26:16 +0200535/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800536 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200537 * command buffer, otherwise, it does not work.
538 * We enable all the interrupt at the same time, and
539 * let pxa3xx_nand_irq to handle all logic.
540 */
541static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
542{
543 uint32_t ndcr;
544
545 ndcr = info->reg_ndcr;
546
547 if (info->use_ecc) {
548 ndcr |= NDCR_ECC_EN;
549 if (info->ecc_bch)
550 nand_writel(info, NDECCCTRL, 0x1);
551 } else {
552 ndcr &= ~NDCR_ECC_EN;
553 if (info->ecc_bch)
554 nand_writel(info, NDECCCTRL, 0x0);
555 }
556
557 ndcr &= ~NDCR_DMA_EN;
558
559 if (info->use_spare)
560 ndcr |= NDCR_SPARE_EN;
561 else
562 ndcr &= ~NDCR_SPARE_EN;
563
564 ndcr |= NDCR_ND_RUN;
565
566 /* clear status bits and run */
Stefan Roese75659da2015-07-23 10:26:16 +0200567 nand_writel(info, NDSR, NDSR_MASK);
Ofer Heifetzd92d8992018-08-29 11:56:03 +0300568 nand_writel(info, NDCR, 0);
Stefan Roese75659da2015-07-23 10:26:16 +0200569 nand_writel(info, NDCR, ndcr);
570}
571
572static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
573{
574 uint32_t ndcr;
575
576 ndcr = nand_readl(info, NDCR);
577 nand_writel(info, NDCR, ndcr | int_mask);
578}
579
580static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
581{
582 if (info->ecc_bch) {
583 u32 ts;
584
585 /*
586 * According to the datasheet, when reading from NDDB
587 * with BCH enabled, after each 32 bytes reads, we
588 * have to make sure that the NDSR.RDDREQ bit is set.
589 *
590 * Drain the FIFO 8 32 bits reads at a time, and skip
591 * the polling on the last read.
592 */
593 while (len > 8) {
594 readsl(info->mmio_base + NDDB, data, 8);
595
596 ts = get_timer(0);
597 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
598 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
599 dev_err(&info->pdev->dev,
600 "Timeout on RDDREQ while draining the FIFO\n");
601 return;
602 }
603 }
604
605 data += 32;
606 len -= 8;
607 }
608 }
609
610 readsl(info->mmio_base + NDDB, data, len);
611}
612
613static void handle_data_pio(struct pxa3xx_nand_info *info)
614{
Stefan Roese75659da2015-07-23 10:26:16 +0200615 switch (info->state) {
616 case STATE_PIO_WRITING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300617 if (info->step_chunk_size)
618 writesl(info->mmio_base + NDDB,
619 info->data_buff + info->data_buff_pos,
620 DIV_ROUND_UP(info->step_chunk_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200621
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300622 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200623 writesl(info->mmio_base + NDDB,
624 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300625 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200626 break;
627 case STATE_PIO_READING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300628 if (info->step_chunk_size)
629 drain_fifo(info,
630 info->data_buff + info->data_buff_pos,
631 DIV_ROUND_UP(info->step_chunk_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200632
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300633 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200634 drain_fifo(info,
635 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300636 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200637 break;
638 default:
639 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300640 info->state);
Stefan Roese75659da2015-07-23 10:26:16 +0200641 BUG();
642 }
643
644 /* Update buffer pointers for multi-page read/write */
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300645 info->data_buff_pos += info->step_chunk_size;
646 info->oob_buff_pos += info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200647}
648
649static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
650{
651 handle_data_pio(info);
652
653 info->state = STATE_CMD_DONE;
654 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
655}
656
657static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
658{
659 unsigned int status, is_completed = 0, is_ready = 0;
660 unsigned int ready, cmd_done;
661 irqreturn_t ret = IRQ_HANDLED;
662
663 if (info->cs == 0) {
664 ready = NDSR_FLASH_RDY;
665 cmd_done = NDSR_CS0_CMDD;
666 } else {
667 ready = NDSR_RDY;
668 cmd_done = NDSR_CS1_CMDD;
669 }
670
671 status = nand_readl(info, NDSR);
672
673 if (status & NDSR_UNCORERR)
674 info->retcode = ERR_UNCORERR;
675 if (status & NDSR_CORERR) {
676 info->retcode = ERR_CORERR;
677 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
678 info->ecc_bch)
679 info->ecc_err_cnt = NDSR_ERR_CNT(status);
680 else
681 info->ecc_err_cnt = 1;
682
683 /*
684 * Each chunk composing a page is corrected independently,
685 * and we need to store maximum number of corrected bitflips
686 * to return it to the MTD layer in ecc.read_page().
687 */
688 info->max_bitflips = max_t(unsigned int,
689 info->max_bitflips,
690 info->ecc_err_cnt);
691 }
692 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
693 info->state = (status & NDSR_RDDREQ) ?
694 STATE_PIO_READING : STATE_PIO_WRITING;
695 /* Call the IRQ thread in U-Boot directly */
696 pxa3xx_nand_irq_thread(info);
697 return 0;
698 }
699 if (status & cmd_done) {
700 info->state = STATE_CMD_DONE;
701 is_completed = 1;
702 }
703 if (status & ready) {
704 info->state = STATE_READY;
705 is_ready = 1;
706 }
707
Ofer Heifetzde323162018-08-29 11:56:04 +0300708 /*
709 * Clear all status bit before issuing the next command, which
710 * can and will alter the status bits and will deserve a new
711 * interrupt on its own. This lets the controller exit the IRQ
712 */
713 nand_writel(info, NDSR, status);
714
Stefan Roese75659da2015-07-23 10:26:16 +0200715 if (status & NDSR_WRCMDREQ) {
Stefan Roese75659da2015-07-23 10:26:16 +0200716 status &= ~NDSR_WRCMDREQ;
717 info->state = STATE_CMD_HANDLE;
718
719 /*
720 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
721 * must be loaded by writing directly either 12 or 16
722 * bytes directly to NDCB0, four bytes at a time.
723 *
724 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
725 * but each NDCBx register can be read.
726 */
727 nand_writel(info, NDCB0, info->ndcb0);
728 nand_writel(info, NDCB0, info->ndcb1);
729 nand_writel(info, NDCB0, info->ndcb2);
730
731 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
732 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
733 nand_writel(info, NDCB0, info->ndcb3);
734 }
735
Stefan Roese75659da2015-07-23 10:26:16 +0200736 if (is_completed)
737 info->cmd_complete = 1;
738 if (is_ready)
739 info->dev_ready = 1;
740
741 return ret;
742}
743
744static inline int is_buf_blank(uint8_t *buf, size_t len)
745{
746 for (; len > 0; len--)
747 if (*buf++ != 0xff)
748 return 0;
749 return 1;
750}
751
752static void set_command_address(struct pxa3xx_nand_info *info,
753 unsigned int page_size, uint16_t column, int page_addr)
754{
755 /* small page addr setting */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300756 if (page_size < info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200757 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
758 | (column & 0xFF);
759
760 info->ndcb2 = 0;
761 } else {
762 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
763 | (column & 0xFFFF);
764
765 if (page_addr & 0xFF0000)
766 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
767 else
768 info->ndcb2 = 0;
769 }
770}
771
772static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
773{
774 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300775 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200776
777 /* reset data and oob column point to handle data */
778 info->buf_start = 0;
779 info->buf_count = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200780 info->data_buff_pos = 0;
781 info->oob_buff_pos = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300782 info->step_chunk_size = 0;
783 info->step_spare_size = 0;
784 info->cur_chunk = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200785 info->use_ecc = 0;
786 info->use_spare = 1;
787 info->retcode = ERR_NONE;
788 info->ecc_err_cnt = 0;
789 info->ndcb3 = 0;
790 info->need_wait = 0;
791
792 switch (command) {
793 case NAND_CMD_READ0:
Boris Brezillona558a392018-08-29 11:56:12 +0300794 case NAND_CMD_READOOB:
Stefan Roese75659da2015-07-23 10:26:16 +0200795 case NAND_CMD_PAGEPROG:
796 info->use_ecc = 1;
Stefan Roese75659da2015-07-23 10:26:16 +0200797 break;
798 case NAND_CMD_PARAM:
799 info->use_spare = 0;
800 break;
801 default:
802 info->ndcb1 = 0;
803 info->ndcb2 = 0;
804 break;
805 }
806
807 /*
808 * If we are about to issue a read command, or about to set
809 * the write address, then clean the data buffer.
810 */
811 if (command == NAND_CMD_READ0 ||
812 command == NAND_CMD_READOOB ||
813 command == NAND_CMD_SEQIN) {
814 info->buf_count = mtd->writesize + mtd->oobsize;
815 memset(info->data_buff, 0xFF, info->buf_count);
816 }
817}
818
819static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
820 int ext_cmd_type, uint16_t column, int page_addr)
821{
822 int addr_cycle, exec_cmd;
823 struct pxa3xx_nand_host *host;
824 struct mtd_info *mtd;
825
826 host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300827 mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200828 addr_cycle = 0;
829 exec_cmd = 1;
830
831 if (info->cs != 0)
832 info->ndcb0 = NDCB0_CSEL;
833 else
834 info->ndcb0 = 0;
835
836 if (command == NAND_CMD_SEQIN)
837 exec_cmd = 0;
838
839 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
840 + host->col_addr_cycles);
841
842 switch (command) {
843 case NAND_CMD_READOOB:
844 case NAND_CMD_READ0:
845 info->buf_start = column;
846 info->ndcb0 |= NDCB0_CMD_TYPE(0)
847 | addr_cycle
848 | NAND_CMD_READ0;
849
850 if (command == NAND_CMD_READOOB)
851 info->buf_start += mtd->writesize;
852
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300853 if (info->cur_chunk < info->nfullchunks) {
854 info->step_chunk_size = info->chunk_size;
855 info->step_spare_size = info->spare_size;
856 } else {
857 info->step_chunk_size = info->last_chunk_size;
858 info->step_spare_size = info->last_spare_size;
859 }
860
Stefan Roese75659da2015-07-23 10:26:16 +0200861 /*
862 * Multiple page read needs an 'extended command type' field,
863 * which is either naked-read or last-read according to the
864 * state.
865 */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300866 if (mtd->writesize == info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200867 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300868 } else if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200869 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
870 | NDCB0_LEN_OVRD
871 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300872 info->ndcb3 = info->step_chunk_size +
873 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200874 }
875
876 set_command_address(info, mtd->writesize, column, page_addr);
877 break;
878
879 case NAND_CMD_SEQIN:
880
881 info->buf_start = column;
882 set_command_address(info, mtd->writesize, 0, page_addr);
883
884 /*
885 * Multiple page programming needs to execute the initial
886 * SEQIN command that sets the page address.
887 */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300888 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200889 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
890 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
891 | addr_cycle
892 | command;
Stefan Roese75659da2015-07-23 10:26:16 +0200893 exec_cmd = 1;
894 }
895 break;
896
897 case NAND_CMD_PAGEPROG:
898 if (is_buf_blank(info->data_buff,
899 (mtd->writesize + mtd->oobsize))) {
900 exec_cmd = 0;
901 break;
902 }
903
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300904 if (info->cur_chunk < info->nfullchunks) {
905 info->step_chunk_size = info->chunk_size;
906 info->step_spare_size = info->spare_size;
907 } else {
908 info->step_chunk_size = info->last_chunk_size;
909 info->step_spare_size = info->last_spare_size;
910 }
911
Stefan Roese75659da2015-07-23 10:26:16 +0200912 /* Second command setting for large pages */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300913 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200914 /*
915 * Multiple page write uses the 'extended command'
916 * field. This can be used to issue a command dispatch
917 * or a naked-write depending on the current stage.
918 */
919 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
920 | NDCB0_LEN_OVRD
921 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300922 info->ndcb3 = info->step_chunk_size +
923 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200924
925 /*
926 * This is the command dispatch that completes a chunked
927 * page program operation.
928 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300929 if (info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +0200930 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
931 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
932 | command;
933 info->ndcb1 = 0;
934 info->ndcb2 = 0;
935 info->ndcb3 = 0;
936 }
937 } else {
938 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
939 | NDCB0_AUTO_RS
940 | NDCB0_ST_ROW_EN
941 | NDCB0_DBC
942 | (NAND_CMD_PAGEPROG << 8)
943 | NAND_CMD_SEQIN
944 | addr_cycle;
945 }
946 break;
947
948 case NAND_CMD_PARAM:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300949 info->buf_count = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200950 info->ndcb0 |= NDCB0_CMD_TYPE(0)
951 | NDCB0_ADDR_CYC(1)
952 | NDCB0_LEN_OVRD
953 | command;
954 info->ndcb1 = (column & 0xFF);
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300955 info->ndcb3 = INIT_BUFFER_SIZE;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300956 info->step_chunk_size = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200957 break;
958
959 case NAND_CMD_READID:
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300960 info->buf_count = READ_ID_BYTES;
Stefan Roese75659da2015-07-23 10:26:16 +0200961 info->ndcb0 |= NDCB0_CMD_TYPE(3)
962 | NDCB0_ADDR_CYC(1)
963 | command;
964 info->ndcb1 = (column & 0xFF);
965
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300966 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +0200967 break;
968 case NAND_CMD_STATUS:
969 info->buf_count = 1;
970 info->ndcb0 |= NDCB0_CMD_TYPE(4)
971 | NDCB0_ADDR_CYC(1)
972 | command;
973
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300974 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +0200975 break;
976
977 case NAND_CMD_ERASE1:
978 info->ndcb0 |= NDCB0_CMD_TYPE(2)
979 | NDCB0_AUTO_RS
980 | NDCB0_ADDR_CYC(3)
981 | NDCB0_DBC
982 | (NAND_CMD_ERASE2 << 8)
983 | NAND_CMD_ERASE1;
984 info->ndcb1 = page_addr;
985 info->ndcb2 = 0;
986
987 break;
988 case NAND_CMD_RESET:
989 info->ndcb0 |= NDCB0_CMD_TYPE(5)
990 | command;
991
992 break;
993
994 case NAND_CMD_ERASE2:
995 exec_cmd = 0;
996 break;
997
998 default:
999 exec_cmd = 0;
1000 dev_err(&info->pdev->dev, "non-supported command %x\n",
1001 command);
1002 break;
1003 }
1004
1005 return exec_cmd;
1006}
1007
1008static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1009 int column, int page_addr)
1010{
Scott Wood17fed142016-05-30 13:57:56 -05001011 struct nand_chip *chip = mtd_to_nand(mtd);
1012 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001013 struct pxa3xx_nand_info *info = host->info_data;
1014 int exec_cmd;
1015
1016 /*
1017 * if this is a x16 device ,then convert the input
1018 * "byte" address into a "word" address appropriate
1019 * for indexing a word-oriented device
1020 */
1021 if (info->reg_ndcr & NDCR_DWIDTH_M)
1022 column /= 2;
1023
1024 /*
1025 * There may be different NAND chip hooked to
1026 * different chip select, so check whether
1027 * chip select has been changed, if yes, reset the timing
1028 */
1029 if (info->cs != host->cs) {
1030 info->cs = host->cs;
1031 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1032 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1033 }
1034
1035 prepare_start_command(info, command);
1036
1037 info->state = STATE_PREPARED;
1038 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1039
1040 if (exec_cmd) {
1041 u32 ts;
1042
1043 info->cmd_complete = 0;
1044 info->dev_ready = 0;
1045 info->need_wait = 1;
1046 pxa3xx_nand_start(info);
1047
1048 ts = get_timer(0);
1049 while (1) {
1050 u32 status;
1051
1052 status = nand_readl(info, NDSR);
1053 if (status)
1054 pxa3xx_nand_irq(info);
1055
1056 if (info->cmd_complete)
1057 break;
1058
1059 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1060 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1061 return;
1062 }
1063 }
1064 }
1065 info->state = STATE_IDLE;
1066}
1067
1068static void nand_cmdfunc_extended(struct mtd_info *mtd,
1069 const unsigned command,
1070 int column, int page_addr)
1071{
Scott Wood17fed142016-05-30 13:57:56 -05001072 struct nand_chip *chip = mtd_to_nand(mtd);
1073 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001074 struct pxa3xx_nand_info *info = host->info_data;
1075 int exec_cmd, ext_cmd_type;
1076
1077 /*
1078 * if this is a x16 device then convert the input
1079 * "byte" address into a "word" address appropriate
1080 * for indexing a word-oriented device
1081 */
1082 if (info->reg_ndcr & NDCR_DWIDTH_M)
1083 column /= 2;
1084
1085 /*
1086 * There may be different NAND chip hooked to
1087 * different chip select, so check whether
1088 * chip select has been changed, if yes, reset the timing
1089 */
1090 if (info->cs != host->cs) {
1091 info->cs = host->cs;
1092 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1093 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1094 }
1095
1096 /* Select the extended command for the first command */
1097 switch (command) {
1098 case NAND_CMD_READ0:
1099 case NAND_CMD_READOOB:
1100 ext_cmd_type = EXT_CMD_TYPE_MONO;
1101 break;
1102 case NAND_CMD_SEQIN:
1103 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1104 break;
1105 case NAND_CMD_PAGEPROG:
1106 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1107 break;
1108 default:
1109 ext_cmd_type = 0;
1110 break;
1111 }
1112
1113 prepare_start_command(info, command);
1114
1115 /*
1116 * Prepare the "is ready" completion before starting a command
1117 * transaction sequence. If the command is not executed the
1118 * completion will be completed, see below.
1119 *
1120 * We can do that inside the loop because the command variable
1121 * is invariant and thus so is the exec_cmd.
1122 */
1123 info->need_wait = 1;
1124 info->dev_ready = 0;
1125
1126 do {
1127 u32 ts;
1128
1129 info->state = STATE_PREPARED;
1130 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1131 column, page_addr);
1132 if (!exec_cmd) {
1133 info->need_wait = 0;
1134 info->dev_ready = 1;
1135 break;
1136 }
1137
1138 info->cmd_complete = 0;
1139 pxa3xx_nand_start(info);
1140
1141 ts = get_timer(0);
1142 while (1) {
1143 u32 status;
1144
1145 status = nand_readl(info, NDSR);
1146 if (status)
1147 pxa3xx_nand_irq(info);
1148
1149 if (info->cmd_complete)
1150 break;
1151
1152 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1153 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1154 return;
1155 }
1156 }
1157
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001158 /* Only a few commands need several steps */
1159 if (command != NAND_CMD_PAGEPROG &&
1160 command != NAND_CMD_READ0 &&
1161 command != NAND_CMD_READOOB)
1162 break;
1163
1164 info->cur_chunk++;
1165
Stefan Roese75659da2015-07-23 10:26:16 +02001166 /* Check if the sequence is complete */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001167 if (info->cur_chunk == info->ntotalchunks &&
1168 command != NAND_CMD_PAGEPROG)
Stefan Roese75659da2015-07-23 10:26:16 +02001169 break;
1170
1171 /*
1172 * After a splitted program command sequence has issued
1173 * the command dispatch, the command sequence is complete.
1174 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001175 if (info->cur_chunk == (info->ntotalchunks + 1) &&
Stefan Roese75659da2015-07-23 10:26:16 +02001176 command == NAND_CMD_PAGEPROG &&
1177 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1178 break;
1179
1180 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1181 /* Last read: issue a 'last naked read' */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001182 if (info->cur_chunk == info->ntotalchunks - 1)
Stefan Roese75659da2015-07-23 10:26:16 +02001183 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1184 else
1185 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1186
1187 /*
1188 * If a splitted program command has no more data to transfer,
1189 * the command dispatch must be issued to complete.
1190 */
1191 } else if (command == NAND_CMD_PAGEPROG &&
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001192 info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +02001193 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1194 }
1195 } while (1);
1196
1197 info->state = STATE_IDLE;
1198}
1199
1200static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001201 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1202 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001203{
1204 chip->write_buf(mtd, buf, mtd->writesize);
1205 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1206
1207 return 0;
1208}
1209
1210static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1211 struct nand_chip *chip, uint8_t *buf, int oob_required,
1212 int page)
1213{
Scott Wood17fed142016-05-30 13:57:56 -05001214 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001215 struct pxa3xx_nand_info *info = host->info_data;
1216
1217 chip->read_buf(mtd, buf, mtd->writesize);
1218 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1219
1220 if (info->retcode == ERR_CORERR && info->use_ecc) {
1221 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1222
1223 } else if (info->retcode == ERR_UNCORERR) {
1224 /*
1225 * for blank page (all 0xff), HW will calculate its ECC as
1226 * 0, which is different from the ECC information within
1227 * OOB, ignore such uncorrectable errors
1228 */
1229 if (is_buf_blank(buf, mtd->writesize))
1230 info->retcode = ERR_NONE;
1231 else
1232 mtd->ecc_stats.failed++;
1233 }
1234
1235 return info->max_bitflips;
1236}
1237
1238static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1239{
Scott Wood17fed142016-05-30 13:57:56 -05001240 struct nand_chip *chip = mtd_to_nand(mtd);
1241 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001242 struct pxa3xx_nand_info *info = host->info_data;
1243 char retval = 0xFF;
1244
1245 if (info->buf_start < info->buf_count)
1246 /* Has just send a new command? */
1247 retval = info->data_buff[info->buf_start++];
1248
1249 return retval;
1250}
1251
1252static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1253{
Scott Wood17fed142016-05-30 13:57:56 -05001254 struct nand_chip *chip = mtd_to_nand(mtd);
1255 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001256 struct pxa3xx_nand_info *info = host->info_data;
1257 u16 retval = 0xFFFF;
1258
1259 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1260 retval = *((u16 *)(info->data_buff+info->buf_start));
1261 info->buf_start += 2;
1262 }
1263 return retval;
1264}
1265
1266static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1267{
Scott Wood17fed142016-05-30 13:57:56 -05001268 struct nand_chip *chip = mtd_to_nand(mtd);
1269 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001270 struct pxa3xx_nand_info *info = host->info_data;
1271 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1272
1273 memcpy(buf, info->data_buff + info->buf_start, real_len);
1274 info->buf_start += real_len;
1275}
1276
1277static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1278 const uint8_t *buf, int len)
1279{
Scott Wood17fed142016-05-30 13:57:56 -05001280 struct nand_chip *chip = mtd_to_nand(mtd);
1281 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001282 struct pxa3xx_nand_info *info = host->info_data;
1283 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1284
1285 memcpy(info->data_buff + info->buf_start, buf, real_len);
1286 info->buf_start += real_len;
1287}
1288
1289static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1290{
1291 return;
1292}
1293
1294static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1295{
Scott Wood17fed142016-05-30 13:57:56 -05001296 struct nand_chip *chip = mtd_to_nand(mtd);
1297 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001298 struct pxa3xx_nand_info *info = host->info_data;
1299
1300 if (info->need_wait) {
1301 u32 ts;
1302
1303 info->need_wait = 0;
1304
1305 ts = get_timer(0);
1306 while (1) {
1307 u32 status;
1308
1309 status = nand_readl(info, NDSR);
1310 if (status)
1311 pxa3xx_nand_irq(info);
1312
1313 if (info->dev_ready)
1314 break;
1315
1316 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1317 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1318 return NAND_STATUS_FAIL;
1319 }
1320 }
1321 }
1322
1323 /* pxa3xx_nand_send_command has waited for command complete */
1324 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1325 if (info->retcode == ERR_NONE)
1326 return 0;
1327 else
1328 return NAND_STATUS_FAIL;
1329 }
1330
1331 return NAND_STATUS_READY;
1332}
1333
Ofer Heifetz531816e2018-08-29 11:56:07 +03001334static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1335{
1336 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1337
1338 /* Configure default flash values */
Ofer Heifetz531816e2018-08-29 11:56:07 +03001339 info->reg_ndcr = 0x0; /* enable all interrupts */
1340 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1341 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1342 info->reg_ndcr |= NDCR_SPARE_EN;
1343
1344 return 0;
1345}
1346
1347static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001348{
1349 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz531816e2018-08-29 11:56:07 +03001350 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001351 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001352
1353 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1354 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1355 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001356}
1357
Ofer Heifetz268979f2018-08-29 11:56:08 +03001358static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001359{
Ofer Heifetz531816e2018-08-29 11:56:07 +03001360 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001361 uint32_t ndcr = nand_readl(info, NDCR);
1362
Stefan Roese75659da2015-07-23 10:26:16 +02001363 /* Set an initial chunk size */
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001364 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001365 info->reg_ndcr = ndcr &
1366 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1367 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001368 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1369 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
Stefan Roese75659da2015-07-23 10:26:16 +02001370}
1371
1372static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1373{
1374 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1375 if (info->data_buff == NULL)
1376 return -ENOMEM;
1377 return 0;
1378}
1379
1380static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1381{
1382 struct pxa3xx_nand_info *info = host->info_data;
1383 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1384 struct mtd_info *mtd;
1385 struct nand_chip *chip;
1386 const struct nand_sdr_timings *timings;
1387 int ret;
1388
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001389 mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001390 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001391
1392 /* configure default flash values */
1393 info->reg_ndcr = 0x0; /* enable all interrupts */
1394 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001395 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
Stefan Roese75659da2015-07-23 10:26:16 +02001396 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1397
1398 /* use the common timing to make a try */
1399 timings = onfi_async_timing_mode_to_sdr_timings(0);
1400 if (IS_ERR(timings))
1401 return PTR_ERR(timings);
1402
1403 pxa3xx_nand_set_sdr_timing(host, timings);
1404
1405 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1406 ret = chip->waitfunc(mtd, chip);
1407 if (ret & NAND_STATUS_FAIL)
1408 return -ENODEV;
1409
1410 return 0;
1411}
1412
1413static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1414 struct nand_ecc_ctrl *ecc,
1415 int strength, int ecc_stepsize, int page_size)
1416{
1417 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001418 info->nfullchunks = 1;
1419 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001420 info->chunk_size = 2048;
1421 info->spare_size = 40;
1422 info->ecc_size = 24;
1423 ecc->mode = NAND_ECC_HW;
1424 ecc->size = 512;
1425 ecc->strength = 1;
1426
1427 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001428 info->nfullchunks = 1;
1429 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001430 info->chunk_size = 512;
1431 info->spare_size = 8;
1432 info->ecc_size = 8;
1433 ecc->mode = NAND_ECC_HW;
1434 ecc->size = 512;
1435 ecc->strength = 1;
1436
1437 /*
1438 * Required ECC: 4-bit correction per 512 bytes
1439 * Select: 16-bit correction per 2048 bytes
1440 */
1441 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1442 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001443 info->nfullchunks = 1;
1444 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001445 info->chunk_size = 2048;
1446 info->spare_size = 32;
1447 info->ecc_size = 32;
1448 ecc->mode = NAND_ECC_HW;
1449 ecc->size = info->chunk_size;
1450 ecc->layout = &ecc_layout_2KB_bch4bit;
1451 ecc->strength = 16;
1452
1453 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1454 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001455 info->nfullchunks = 2;
1456 info->ntotalchunks = 2;
Stefan Roese75659da2015-07-23 10:26:16 +02001457 info->chunk_size = 2048;
1458 info->spare_size = 32;
1459 info->ecc_size = 32;
1460 ecc->mode = NAND_ECC_HW;
1461 ecc->size = info->chunk_size;
1462 ecc->layout = &ecc_layout_4KB_bch4bit;
1463 ecc->strength = 16;
1464
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001465 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1466 info->ecc_bch = 1;
1467 info->nfullchunks = 4;
1468 info->ntotalchunks = 4;
1469 info->chunk_size = 2048;
1470 info->spare_size = 32;
1471 info->ecc_size = 32;
1472 ecc->mode = NAND_ECC_HW;
1473 ecc->size = info->chunk_size;
1474 ecc->layout = &ecc_layout_8KB_bch4bit;
1475 ecc->strength = 16;
1476
Stefan Roese75659da2015-07-23 10:26:16 +02001477 /*
1478 * Required ECC: 8-bit correction per 512 bytes
1479 * Select: 16-bit correction per 1024 bytes
1480 */
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001481 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1482 info->ecc_bch = 1;
1483 info->nfullchunks = 1;
1484 info->ntotalchunks = 2;
1485 info->chunk_size = 1024;
1486 info->spare_size = 0;
1487 info->last_chunk_size = 1024;
1488 info->last_spare_size = 64;
1489 info->ecc_size = 32;
1490 ecc->mode = NAND_ECC_HW;
1491 ecc->size = info->chunk_size;
1492 ecc->layout = &ecc_layout_2KB_bch8bit;
1493 ecc->strength = 16;
1494
Stefan Roese75659da2015-07-23 10:26:16 +02001495 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1496 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001497 info->nfullchunks = 4;
1498 info->ntotalchunks = 5;
Stefan Roese75659da2015-07-23 10:26:16 +02001499 info->chunk_size = 1024;
1500 info->spare_size = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001501 info->last_chunk_size = 0;
1502 info->last_spare_size = 64;
Stefan Roese75659da2015-07-23 10:26:16 +02001503 info->ecc_size = 32;
1504 ecc->mode = NAND_ECC_HW;
1505 ecc->size = info->chunk_size;
1506 ecc->layout = &ecc_layout_4KB_bch8bit;
1507 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001508
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001509 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001510 info->ecc_bch = 1;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001511 info->nfullchunks = 8;
1512 info->ntotalchunks = 9;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001513 info->chunk_size = 1024;
1514 info->spare_size = 0;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001515 info->last_chunk_size = 0;
1516 info->last_spare_size = 160;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001517 info->ecc_size = 32;
1518 ecc->mode = NAND_ECC_HW;
1519 ecc->size = info->chunk_size;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001520 ecc->layout = &ecc_layout_8KB_bch8bit;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001521 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001522
Stefan Roese75659da2015-07-23 10:26:16 +02001523 } else {
1524 dev_err(&info->pdev->dev,
1525 "ECC strength %d at page size %d is not supported\n",
1526 strength, page_size);
1527 return -ENODEV;
1528 }
1529
1530 return 0;
1531}
1532
1533static int pxa3xx_nand_scan(struct mtd_info *mtd)
1534{
Scott Wood17fed142016-05-30 13:57:56 -05001535 struct nand_chip *chip = mtd_to_nand(mtd);
1536 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001537 struct pxa3xx_nand_info *info = host->info_data;
1538 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001539 int ret;
1540 uint16_t ecc_strength, ecc_step;
1541
Ofer Heifetz268979f2018-08-29 11:56:08 +03001542 if (pdata->keep_config) {
1543 pxa3xx_nand_detect_config(info);
1544 } else {
1545 ret = pxa3xx_nand_config_ident(info);
1546 if (ret)
1547 return ret;
1548 ret = pxa3xx_nand_sensing(host);
1549 if (ret) {
1550 dev_info(&info->pdev->dev,
1551 "There is no chip on cs %d!\n",
1552 info->cs);
1553 return ret;
1554 }
Stefan Roese75659da2015-07-23 10:26:16 +02001555 }
1556
Stefan Roese75659da2015-07-23 10:26:16 +02001557 /* Device detection must be done with ECC disabled */
1558 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1559 nand_writel(info, NDECCCTRL, 0x0);
1560
1561 if (nand_scan_ident(mtd, 1, NULL))
1562 return -ENODEV;
1563
1564 if (!pdata->keep_config) {
1565 ret = pxa3xx_nand_init_timings(host);
1566 if (ret) {
1567 dev_err(&info->pdev->dev,
1568 "Failed to set timings: %d\n", ret);
1569 return ret;
1570 }
1571 }
1572
Stefan Roese75659da2015-07-23 10:26:16 +02001573#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1574 /*
1575 * We'll use a bad block table stored in-flash and don't
1576 * allow writing the bad block marker to the flash.
1577 */
1578 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1579 chip->bbt_td = &bbt_main_descr;
1580 chip->bbt_md = &bbt_mirror_descr;
1581#endif
1582
Stefan Roese75659da2015-07-23 10:26:16 +02001583 if (pdata->ecc_strength && pdata->ecc_step_size) {
1584 ecc_strength = pdata->ecc_strength;
1585 ecc_step = pdata->ecc_step_size;
1586 } else {
1587 ecc_strength = chip->ecc_strength_ds;
1588 ecc_step = chip->ecc_step_ds;
1589 }
1590
1591 /* Set default ECC strength requirements on non-ONFI devices */
1592 if (ecc_strength < 1 && ecc_step < 1) {
1593 ecc_strength = 1;
1594 ecc_step = 512;
1595 }
1596
1597 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1598 ecc_step, mtd->writesize);
1599 if (ret)
1600 return ret;
1601
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001602 /*
1603 * If the page size is bigger than the FIFO size, let's check
1604 * we are given the right variant and then switch to the extended
1605 * (aka split) command handling,
1606 */
1607 if (mtd->writesize > info->chunk_size) {
1608 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1609 chip->cmdfunc = nand_cmdfunc_extended;
1610 } else {
1611 dev_err(&info->pdev->dev,
1612 "unsupported page size on this variant\n");
1613 return -ENODEV;
1614 }
1615 }
1616
Stefan Roese75659da2015-07-23 10:26:16 +02001617 /* calculate addressing information */
1618 if (mtd->writesize >= 2048)
1619 host->col_addr_cycles = 2;
1620 else
1621 host->col_addr_cycles = 1;
1622
1623 /* release the initial buffer */
1624 kfree(info->data_buff);
1625
1626 /* allocate the real data + oob buffer */
1627 info->buf_size = mtd->writesize + mtd->oobsize;
1628 ret = pxa3xx_nand_init_buff(info);
1629 if (ret)
1630 return ret;
1631 info->oob_buff = info->data_buff + mtd->writesize;
1632
1633 if ((mtd->size >> chip->page_shift) > 65536)
1634 host->row_addr_cycles = 3;
1635 else
1636 host->row_addr_cycles = 2;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001637
1638 if (!pdata->keep_config)
1639 pxa3xx_nand_config_tail(info);
1640
Stefan Roese75659da2015-07-23 10:26:16 +02001641 return nand_scan_tail(mtd);
1642}
1643
1644static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1645{
1646 struct pxa3xx_nand_platform_data *pdata;
1647 struct pxa3xx_nand_host *host;
1648 struct nand_chip *chip = NULL;
1649 struct mtd_info *mtd;
1650 int ret, cs;
1651
1652 pdata = info->pdata;
1653 if (pdata->num_cs <= 0)
1654 return -ENODEV;
1655
1656 info->variant = pxa3xx_nand_get_variant();
1657 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001658 chip = (struct nand_chip *)
1659 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001660 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001661 host = (struct pxa3xx_nand_host *)chip;
1662 info->host[cs] = host;
Stefan Roese75659da2015-07-23 10:26:16 +02001663 host->cs = cs;
1664 host->info_data = info;
Stefan Roese75659da2015-07-23 10:26:16 +02001665 mtd->owner = THIS_MODULE;
1666
Chris Packham3c2170a2016-08-29 15:20:52 +12001667 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001668 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
1669 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1670 chip->controller = &info->controller;
1671 chip->waitfunc = pxa3xx_nand_waitfunc;
1672 chip->select_chip = pxa3xx_nand_select_chip;
1673 chip->read_word = pxa3xx_nand_read_word;
1674 chip->read_byte = pxa3xx_nand_read_byte;
1675 chip->read_buf = pxa3xx_nand_read_buf;
1676 chip->write_buf = pxa3xx_nand_write_buf;
1677 chip->options |= NAND_NO_SUBPAGE_WRITE;
1678 chip->cmdfunc = nand_cmdfunc;
1679 }
1680
Stefan Roese75659da2015-07-23 10:26:16 +02001681 /* Allocate a buffer to allow flash detection */
1682 info->buf_size = INIT_BUFFER_SIZE;
1683 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1684 if (info->data_buff == NULL) {
1685 ret = -ENOMEM;
1686 goto fail_disable_clk;
1687 }
1688
1689 /* initialize all interrupts to be disabled */
1690 disable_int(info, NDSR_MASK);
1691
1692 return 0;
1693
1694 kfree(info->data_buff);
1695fail_disable_clk:
1696 return ret;
1697}
1698
1699static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1700{
1701 struct pxa3xx_nand_platform_data *pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001702 const void *blob = gd->fdt_blob;
1703 int node = -1;
Stefan Roese75659da2015-07-23 10:26:16 +02001704
1705 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1706 if (!pdata)
1707 return -ENOMEM;
1708
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001709 /* Get address decoding nodes from the FDT blob */
1710 do {
1711 node = fdt_node_offset_by_compatible(blob, node,
1712 "marvell,mvebu-pxa3xx-nand");
1713 if (node < 0)
1714 break;
1715
1716 /* Bypass disabeld nodes */
1717 if (!fdtdec_get_is_enabled(blob, node))
1718 continue;
Stefan Roese75659da2015-07-23 10:26:16 +02001719
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001720 /* Get the first enabled NAND controler base address */
1721 info->mmio_base =
1722 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1723 blob, node, "reg", 0, NULL, true);
Stefan Roese75659da2015-07-23 10:26:16 +02001724
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001725 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1726 if (pdata->num_cs != 1) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001727 pr_err("pxa3xx driver supports single CS only\n");
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001728 break;
1729 }
1730
1731 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1732 pdata->enable_arbiter = 1;
1733
1734 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1735 pdata->keep_config = 1;
1736
1737 /*
1738 * ECC parameters.
1739 * If these are not set, they will be selected according
1740 * to the detected flash type.
1741 */
1742 /* ECC strength */
1743 pdata->ecc_strength = fdtdec_get_int(blob, node,
1744 "nand-ecc-strength", 0);
1745
1746 /* ECC step size */
1747 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1748 "nand-ecc-step-size", 0);
1749
1750 info->pdata = pdata;
1751
1752 /* Currently support only a single NAND controller */
1753 return 0;
1754
1755 } while (node >= 0);
1756
1757 return -EINVAL;
Stefan Roese75659da2015-07-23 10:26:16 +02001758}
1759
1760static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1761{
1762 struct pxa3xx_nand_platform_data *pdata;
1763 int ret, cs, probe_success;
1764
1765 ret = pxa3xx_nand_probe_dt(info);
1766 if (ret)
1767 return ret;
1768
1769 pdata = info->pdata;
1770
1771 ret = alloc_nand_resource(info);
1772 if (ret) {
1773 dev_err(&pdev->dev, "alloc nand resource failed\n");
1774 return ret;
1775 }
1776
1777 probe_success = 0;
1778 for (cs = 0; cs < pdata->num_cs; cs++) {
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001779 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001780
1781 /*
1782 * The mtd name matches the one used in 'mtdparts' kernel
1783 * parameter. This name cannot be changed or otherwise
1784 * user's mtd partitions configuration would get broken.
1785 */
1786 mtd->name = "pxa3xx_nand-0";
1787 info->cs = cs;
1788 ret = pxa3xx_nand_scan(mtd);
1789 if (ret) {
1790 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1791 cs);
1792 continue;
1793 }
1794
Scott Wood2c1b7e12016-05-30 13:57:55 -05001795 if (nand_register(cs, mtd))
1796 continue;
1797
1798 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001799 }
1800
1801 if (!probe_success)
1802 return -ENODEV;
1803
1804 return 0;
1805}
1806
1807/*
1808 * Main initialization routine
1809 */
1810void board_nand_init(void)
1811{
1812 struct pxa3xx_nand_info *info;
1813 struct pxa3xx_nand_host *host;
1814 int ret;
1815
Kevin Smithf6ca2a62016-01-14 16:01:39 +00001816 info = kzalloc(sizeof(*info) +
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001817 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1818 GFP_KERNEL);
Stefan Roese75659da2015-07-23 10:26:16 +02001819 if (!info)
1820 return;
1821
Stefan Roese75659da2015-07-23 10:26:16 +02001822 ret = pxa3xx_nand_probe(info);
1823 if (ret)
1824 return;
Stefan Roese75659da2015-07-23 10:26:16 +02001825}