blob: 5fb3081c8390dabbc7a9d1d25a83287ea783985e [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese75659da2015-07-23 10:26:16 +02002/*
Miquel Raynal1f1ae152018-08-16 17:30:07 +02003 * drivers/mtd/nand/raw/pxa3xx_nand.c
Stefan Roese75659da2015-07-23 10:26:16 +02004 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
Stefan Roese75659da2015-07-23 10:26:16 +02007 */
8
9#include <common.h>
10#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030011#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020012#include <nand.h>
Simon Glass9bc15642020-02-03 07:36:16 -070013#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070014#include <dm/devres.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060015#include <linux/bitops.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060016#include <linux/bug.h>
Simon Glassdbd79542020-05-10 11:40:11 -060017#include <linux/delay.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070018#include <linux/err.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090019#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020020#include <asm/io.h>
21#include <asm/arch/cpu.h>
22#include <linux/mtd/mtd.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090023#include <linux/mtd/rawnand.h>
Stefan Roese75659da2015-07-23 10:26:16 +020024#include <linux/types.h>
25
26#include "pxa3xx_nand.h"
27
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030028DECLARE_GLOBAL_DATA_PTR;
29
Stefan Roese75659da2015-07-23 10:26:16 +020030#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
31#define CHIP_DELAY_TIMEOUT 200
32#define NAND_STOP_DELAY 40
Stefan Roese75659da2015-07-23 10:26:16 +020033
34/*
35 * Define a buffer size for the initial command that detects the flash device:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030036 * STATUS, READID and PARAM.
37 * ONFI param page is 256 bytes, and there are three redundant copies
38 * to be read. JEDEC param page is 512 bytes, and there are also three
39 * redundant copies to be read.
40 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
Stefan Roese75659da2015-07-23 10:26:16 +020041 */
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030042#define INIT_BUFFER_SIZE 2048
Stefan Roese75659da2015-07-23 10:26:16 +020043
44/* registers and bit definitions */
45#define NDCR (0x00) /* Control register */
46#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
47#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
48#define NDSR (0x14) /* Status Register */
49#define NDPCR (0x18) /* Page Count Register */
50#define NDBDR0 (0x1C) /* Bad Block Register 0 */
51#define NDBDR1 (0x20) /* Bad Block Register 1 */
52#define NDECCCTRL (0x28) /* ECC control */
53#define NDDB (0x40) /* Data Buffer */
54#define NDCB0 (0x48) /* Command Buffer0 */
55#define NDCB1 (0x4C) /* Command Buffer1 */
56#define NDCB2 (0x50) /* Command Buffer2 */
57
58#define NDCR_SPARE_EN (0x1 << 31)
59#define NDCR_ECC_EN (0x1 << 30)
60#define NDCR_DMA_EN (0x1 << 29)
61#define NDCR_ND_RUN (0x1 << 28)
62#define NDCR_DWIDTH_C (0x1 << 27)
63#define NDCR_DWIDTH_M (0x1 << 26)
64#define NDCR_PAGE_SZ (0x1 << 24)
65#define NDCR_NCSX (0x1 << 23)
66#define NDCR_ND_MODE (0x3 << 21)
67#define NDCR_NAND_MODE (0x0)
68#define NDCR_CLR_PG_CNT (0x1 << 20)
Ofer Heifetz531816e2018-08-29 11:56:07 +030069#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
Stefan Roese75659da2015-07-23 10:26:16 +020070#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
71#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
72
73#define NDCR_RA_START (0x1 << 15)
74#define NDCR_PG_PER_BLK (0x1 << 14)
75#define NDCR_ND_ARB_EN (0x1 << 12)
76#define NDCR_INT_MASK (0xFFF)
77
78#define NDSR_MASK (0xfff)
79#define NDSR_ERR_CNT_OFF (16)
80#define NDSR_ERR_CNT_MASK (0x1f)
81#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
82#define NDSR_RDY (0x1 << 12)
83#define NDSR_FLASH_RDY (0x1 << 11)
84#define NDSR_CS0_PAGED (0x1 << 10)
85#define NDSR_CS1_PAGED (0x1 << 9)
86#define NDSR_CS0_CMDD (0x1 << 8)
87#define NDSR_CS1_CMDD (0x1 << 7)
88#define NDSR_CS0_BBD (0x1 << 6)
89#define NDSR_CS1_BBD (0x1 << 5)
90#define NDSR_UNCORERR (0x1 << 4)
91#define NDSR_CORERR (0x1 << 3)
92#define NDSR_WRDREQ (0x1 << 2)
93#define NDSR_RDDREQ (0x1 << 1)
94#define NDSR_WRCMDREQ (0x1)
95
96#define NDCB0_LEN_OVRD (0x1 << 28)
97#define NDCB0_ST_ROW_EN (0x1 << 26)
98#define NDCB0_AUTO_RS (0x1 << 25)
99#define NDCB0_CSEL (0x1 << 24)
100#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
101#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
102#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
103#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
104#define NDCB0_NC (0x1 << 20)
105#define NDCB0_DBC (0x1 << 19)
106#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
107#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
108#define NDCB0_CMD2_MASK (0xff << 8)
109#define NDCB0_CMD1_MASK (0xff)
110#define NDCB0_ADDR_CYC_SHIFT (16)
111
112#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
113#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
114#define EXT_CMD_TYPE_READ 4 /* Read */
115#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
116#define EXT_CMD_TYPE_FINAL 3 /* Final command */
117#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
118#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
119
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300120/*
121 * This should be large enough to read 'ONFI' and 'JEDEC'.
122 * Let's use 7 bytes, which is the maximum ID count supported
123 * by the controller (see NDCR_RD_ID_CNT_MASK).
124 */
125#define READ_ID_BYTES 7
126
Stefan Roese75659da2015-07-23 10:26:16 +0200127/* macros for registers read/write */
128#define nand_writel(info, off, val) \
129 writel((val), (info)->mmio_base + (off))
130
131#define nand_readl(info, off) \
132 readl((info)->mmio_base + (off))
133
134/* error code and state */
135enum {
136 ERR_NONE = 0,
137 ERR_DMABUSERR = -1,
138 ERR_SENDCMD = -2,
139 ERR_UNCORERR = -3,
140 ERR_BBERR = -4,
141 ERR_CORERR = -5,
142};
143
144enum {
145 STATE_IDLE = 0,
146 STATE_PREPARED,
147 STATE_CMD_HANDLE,
148 STATE_DMA_READING,
149 STATE_DMA_WRITING,
150 STATE_DMA_DONE,
151 STATE_PIO_READING,
152 STATE_PIO_WRITING,
153 STATE_CMD_DONE,
154 STATE_READY,
155};
156
157enum pxa3xx_nand_variant {
158 PXA3XX_NAND_VARIANT_PXA,
159 PXA3XX_NAND_VARIANT_ARMADA370,
160};
161
162struct pxa3xx_nand_host {
163 struct nand_chip chip;
Stefan Roese75659da2015-07-23 10:26:16 +0200164 void *info_data;
165
166 /* page size of attached chip */
167 int use_ecc;
168 int cs;
169
170 /* calculated from pxa3xx_nand_flash data */
171 unsigned int col_addr_cycles;
172 unsigned int row_addr_cycles;
Stefan Roese75659da2015-07-23 10:26:16 +0200173};
174
175struct pxa3xx_nand_info {
176 struct nand_hw_control controller;
177 struct pxa3xx_nand_platform_data *pdata;
178
179 struct clk *clk;
180 void __iomem *mmio_base;
181 unsigned long mmio_phys;
182 int cmd_complete, dev_ready;
183
184 unsigned int buf_start;
185 unsigned int buf_count;
186 unsigned int buf_size;
187 unsigned int data_buff_pos;
188 unsigned int oob_buff_pos;
189
190 unsigned char *data_buff;
191 unsigned char *oob_buff;
192
193 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
194 unsigned int state;
195
196 /*
197 * This driver supports NFCv1 (as found in PXA SoC)
198 * and NFCv2 (as found in Armada 370/XP SoC).
199 */
200 enum pxa3xx_nand_variant variant;
201
202 int cs;
203 int use_ecc; /* use HW ECC ? */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200204 int force_raw; /* prevent use_ecc to be set */
Stefan Roese75659da2015-07-23 10:26:16 +0200205 int ecc_bch; /* using BCH ECC? */
206 int use_spare; /* use spare ? */
207 int need_wait;
208
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300209 /* Amount of real data per full chunk */
210 unsigned int chunk_size;
211
212 /* Amount of spare data per full chunk */
Stefan Roese75659da2015-07-23 10:26:16 +0200213 unsigned int spare_size;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300214
215 /* Number of full chunks (i.e chunk_size + spare_size) */
216 unsigned int nfullchunks;
217
218 /*
219 * Total number of chunks. If equal to nfullchunks, then there
220 * are only full chunks. Otherwise, there is one last chunk of
221 * size (last_chunk_size + last_spare_size)
222 */
223 unsigned int ntotalchunks;
224
225 /* Amount of real data in the last chunk */
226 unsigned int last_chunk_size;
227
228 /* Amount of spare data in the last chunk */
229 unsigned int last_spare_size;
230
Stefan Roese75659da2015-07-23 10:26:16 +0200231 unsigned int ecc_size;
232 unsigned int ecc_err_cnt;
233 unsigned int max_bitflips;
234 int retcode;
235
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300236 /*
237 * Variables only valid during command
238 * execution. step_chunk_size and step_spare_size is the
239 * amount of real data and spare data in the current
240 * chunk. cur_chunk is the current chunk being
241 * read/programmed.
242 */
243 unsigned int step_chunk_size;
244 unsigned int step_spare_size;
245 unsigned int cur_chunk;
246
Stefan Roese75659da2015-07-23 10:26:16 +0200247 /* cached register value */
248 uint32_t reg_ndcr;
249 uint32_t ndtr0cs0;
250 uint32_t ndtr1cs0;
251
252 /* generated NDCBx register values */
253 uint32_t ndcb0;
254 uint32_t ndcb1;
255 uint32_t ndcb2;
256 uint32_t ndcb3;
257};
258
259static struct pxa3xx_nand_timing timing[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300260 /*
261 * tCH Enable signal hold time
262 * tCS Enable signal setup time
263 * tWH ND_nWE high duration
264 * tWP ND_nWE pulse time
265 * tRH ND_nRE high duration
266 * tRP ND_nRE pulse width
267 * tR ND_nWE high to ND_nRE low for read
268 * tWHR ND_nWE high to ND_nRE low for status read
269 * tAR ND_ALE low to ND_nRE low delay
270 */
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300271 /*ch cs wh wp rh rp r whr ar */
Stefan Roese75659da2015-07-23 10:26:16 +0200272 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
273 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
274 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
275 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300276 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
Stefan Roese75659da2015-07-23 10:26:16 +0200277};
278
279static struct pxa3xx_nand_flash builtin_flash_types[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300280 /*
281 * chip_id
282 * flash_width Width of Flash memory (DWIDTH_M)
283 * dfc_width Width of flash controller(DWIDTH_C)
284 * *timing
285 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
286 */
Stefan Roese75659da2015-07-23 10:26:16 +0200287 { 0x46ec, 16, 16, &timing[1] },
288 { 0xdaec, 8, 8, &timing[1] },
289 { 0xd7ec, 8, 8, &timing[1] },
290 { 0xa12c, 8, 8, &timing[2] },
291 { 0xb12c, 16, 16, &timing[2] },
292 { 0xdc2c, 8, 8, &timing[2] },
293 { 0xcc2c, 16, 16, &timing[2] },
294 { 0xba20, 16, 16, &timing[3] },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300295 { 0xda98, 8, 8, &timing[4] },
Stefan Roese75659da2015-07-23 10:26:16 +0200296};
297
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100298#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
Stefan Roese75659da2015-07-23 10:26:16 +0200299static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
300static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
301
302static struct nand_bbt_descr bbt_main_descr = {
303 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
304 | NAND_BBT_2BIT | NAND_BBT_VERSION,
305 .offs = 8,
306 .len = 6,
307 .veroffs = 14,
308 .maxblocks = 8, /* Last 8 blocks in each chip */
309 .pattern = bbt_pattern
310};
311
312static struct nand_bbt_descr bbt_mirror_descr = {
313 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315 .offs = 8,
316 .len = 6,
317 .veroffs = 14,
318 .maxblocks = 8, /* Last 8 blocks in each chip */
319 .pattern = bbt_mirror_pattern
320};
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100321#endif
Stefan Roese75659da2015-07-23 10:26:16 +0200322
323static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
324 .eccbytes = 32,
325 .eccpos = {
326 32, 33, 34, 35, 36, 37, 38, 39,
327 40, 41, 42, 43, 44, 45, 46, 47,
328 48, 49, 50, 51, 52, 53, 54, 55,
329 56, 57, 58, 59, 60, 61, 62, 63},
330 .oobfree = { {2, 30} }
331};
332
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300333static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
334 .eccbytes = 64,
335 .eccpos = {
Miquel Raynal53e9c122018-10-11 17:45:44 +0200336 32, 33, 34, 35, 36, 37, 38, 39,
337 40, 41, 42, 43, 44, 45, 46, 47,
338 48, 49, 50, 51, 52, 53, 54, 55,
339 56, 57, 58, 59, 60, 61, 62, 63,
340 64, 65, 66, 67, 68, 69, 70, 71,
341 72, 73, 74, 75, 76, 77, 78, 79,
342 80, 81, 82, 83, 84, 85, 86, 87,
343 88, 89, 90, 91, 92, 93, 94, 95},
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300344 .oobfree = { {1, 4}, {6, 26} }
345};
346
Stefan Roese75659da2015-07-23 10:26:16 +0200347static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
348 .eccbytes = 64,
349 .eccpos = {
350 32, 33, 34, 35, 36, 37, 38, 39,
351 40, 41, 42, 43, 44, 45, 46, 47,
352 48, 49, 50, 51, 52, 53, 54, 55,
353 56, 57, 58, 59, 60, 61, 62, 63,
354 96, 97, 98, 99, 100, 101, 102, 103,
355 104, 105, 106, 107, 108, 109, 110, 111,
356 112, 113, 114, 115, 116, 117, 118, 119,
357 120, 121, 122, 123, 124, 125, 126, 127},
358 /* Bootrom looks in bytes 0 & 5 for bad blocks */
359 .oobfree = { {6, 26}, { 64, 32} }
360};
361
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300362static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
363 .eccbytes = 128,
364 .eccpos = {
365 32, 33, 34, 35, 36, 37, 38, 39,
366 40, 41, 42, 43, 44, 45, 46, 47,
367 48, 49, 50, 51, 52, 53, 54, 55,
368 56, 57, 58, 59, 60, 61, 62, 63,
369
370 96, 97, 98, 99, 100, 101, 102, 103,
371 104, 105, 106, 107, 108, 109, 110, 111,
372 112, 113, 114, 115, 116, 117, 118, 119,
373 120, 121, 122, 123, 124, 125, 126, 127,
374
375 160, 161, 162, 163, 164, 165, 166, 167,
376 168, 169, 170, 171, 172, 173, 174, 175,
377 176, 177, 178, 179, 180, 181, 182, 183,
378 184, 185, 186, 187, 188, 189, 190, 191,
379
380 224, 225, 226, 227, 228, 229, 230, 231,
381 232, 233, 234, 235, 236, 237, 238, 239,
382 240, 241, 242, 243, 244, 245, 246, 247,
383 248, 249, 250, 251, 252, 253, 254, 255},
384
385 /* Bootrom looks in bytes 0 & 5 for bad blocks */
386 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
387};
388
Stefan Roese75659da2015-07-23 10:26:16 +0200389static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
390 .eccbytes = 128,
391 .eccpos = {
392 32, 33, 34, 35, 36, 37, 38, 39,
393 40, 41, 42, 43, 44, 45, 46, 47,
394 48, 49, 50, 51, 52, 53, 54, 55,
395 56, 57, 58, 59, 60, 61, 62, 63},
396 .oobfree = { }
397};
398
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300399static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
400 .eccbytes = 256,
401 .eccpos = {},
402 /* HW ECC handles all ECC data and all spare area is free for OOB */
403 .oobfree = {{0, 160} }
404};
405
Stefan Roese75659da2015-07-23 10:26:16 +0200406#define NDTR0_tCH(c) (min((c), 7) << 19)
407#define NDTR0_tCS(c) (min((c), 7) << 16)
408#define NDTR0_tWH(c) (min((c), 7) << 11)
409#define NDTR0_tWP(c) (min((c), 7) << 8)
410#define NDTR0_tRH(c) (min((c), 7) << 3)
411#define NDTR0_tRP(c) (min((c), 7) << 0)
412
413#define NDTR1_tR(c) (min((c), 65535) << 16)
414#define NDTR1_tWHR(c) (min((c), 15) << 4)
415#define NDTR1_tAR(c) (min((c), 15) << 0)
416
417/* convert nano-seconds to nand flash controller clock cycles */
418#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
419
420static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
421{
422 /* We only support the Armada 370/XP/38x for now */
423 return PXA3XX_NAND_VARIANT_ARMADA370;
424}
425
426static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
427 const struct pxa3xx_nand_timing *t)
428{
429 struct pxa3xx_nand_info *info = host->info_data;
430 unsigned long nand_clk = mvebu_get_nand_clock();
431 uint32_t ndtr0, ndtr1;
432
433 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
434 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
435 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
436 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
437 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
438 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
439
440 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
441 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
442 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
443
444 info->ndtr0cs0 = ndtr0;
445 info->ndtr1cs0 = ndtr1;
446 nand_writel(info, NDTR0CS0, ndtr0);
447 nand_writel(info, NDTR1CS0, ndtr1);
448}
449
450static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
451 const struct nand_sdr_timings *t)
452{
453 struct pxa3xx_nand_info *info = host->info_data;
454 struct nand_chip *chip = &host->chip;
455 unsigned long nand_clk = mvebu_get_nand_clock();
456 uint32_t ndtr0, ndtr1;
457
458 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
459 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
460 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300461 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200462 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300463 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200464 u32 tR = chip->chip_delay * 1000;
465 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
466 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
467
468 /* fallback to a default value if tR = 0 */
469 if (!tR)
470 tR = 20000;
471
472 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
473 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
474 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
475 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
476 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
477 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
478
479 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
480 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
481 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
482
483 info->ndtr0cs0 = ndtr0;
484 info->ndtr1cs0 = ndtr1;
485 nand_writel(info, NDTR0CS0, ndtr0);
486 nand_writel(info, NDTR1CS0, ndtr1);
487}
488
489static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
490{
491 const struct nand_sdr_timings *timings;
492 struct nand_chip *chip = &host->chip;
493 struct pxa3xx_nand_info *info = host->info_data;
494 const struct pxa3xx_nand_flash *f = NULL;
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300495 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200496 int mode, id, ntypes, i;
497
498 mode = onfi_get_async_timing_mode(chip);
499 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
500 ntypes = ARRAY_SIZE(builtin_flash_types);
501
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300502 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
Stefan Roese75659da2015-07-23 10:26:16 +0200503
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300504 id = chip->read_byte(mtd);
505 id |= chip->read_byte(mtd) << 0x8;
Stefan Roese75659da2015-07-23 10:26:16 +0200506
507 for (i = 0; i < ntypes; i++) {
508 f = &builtin_flash_types[i];
509
510 if (f->chip_id == id)
511 break;
512 }
513
514 if (i == ntypes) {
Sean Andersonc6302f02020-09-15 10:44:40 -0400515 dev_err(mtd->dev, "Error: timings not found\n");
Stefan Roese75659da2015-07-23 10:26:16 +0200516 return -EINVAL;
517 }
518
519 pxa3xx_nand_set_timing(host, f->timing);
520
521 if (f->flash_width == 16) {
522 info->reg_ndcr |= NDCR_DWIDTH_M;
523 chip->options |= NAND_BUSWIDTH_16;
524 }
525
526 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
527 } else {
528 mode = fls(mode) - 1;
529 if (mode < 0)
530 mode = 0;
531
532 timings = onfi_async_timing_mode_to_sdr_timings(mode);
533 if (IS_ERR(timings))
534 return PTR_ERR(timings);
535
536 pxa3xx_nand_set_sdr_timing(host, timings);
537 }
538
539 return 0;
540}
541
Stefan Roese75659da2015-07-23 10:26:16 +0200542/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800543 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200544 * command buffer, otherwise, it does not work.
545 * We enable all the interrupt at the same time, and
546 * let pxa3xx_nand_irq to handle all logic.
547 */
548static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
549{
550 uint32_t ndcr;
551
552 ndcr = info->reg_ndcr;
553
554 if (info->use_ecc) {
555 ndcr |= NDCR_ECC_EN;
556 if (info->ecc_bch)
557 nand_writel(info, NDECCCTRL, 0x1);
558 } else {
559 ndcr &= ~NDCR_ECC_EN;
560 if (info->ecc_bch)
561 nand_writel(info, NDECCCTRL, 0x0);
562 }
563
564 ndcr &= ~NDCR_DMA_EN;
565
566 if (info->use_spare)
567 ndcr |= NDCR_SPARE_EN;
568 else
569 ndcr &= ~NDCR_SPARE_EN;
570
571 ndcr |= NDCR_ND_RUN;
572
573 /* clear status bits and run */
Stefan Roese75659da2015-07-23 10:26:16 +0200574 nand_writel(info, NDSR, NDSR_MASK);
Ofer Heifetzd92d8992018-08-29 11:56:03 +0300575 nand_writel(info, NDCR, 0);
Stefan Roese75659da2015-07-23 10:26:16 +0200576 nand_writel(info, NDCR, ndcr);
577}
578
579static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
580{
581 uint32_t ndcr;
582
583 ndcr = nand_readl(info, NDCR);
584 nand_writel(info, NDCR, ndcr | int_mask);
585}
586
587static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
588{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200589 if (info->ecc_bch && !info->force_raw) {
Stefan Roese75659da2015-07-23 10:26:16 +0200590 u32 ts;
591
592 /*
593 * According to the datasheet, when reading from NDDB
594 * with BCH enabled, after each 32 bytes reads, we
595 * have to make sure that the NDSR.RDDREQ bit is set.
596 *
597 * Drain the FIFO 8 32 bits reads at a time, and skip
598 * the polling on the last read.
599 */
600 while (len > 8) {
601 readsl(info->mmio_base + NDDB, data, 8);
602
603 ts = get_timer(0);
604 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
605 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
Sean Andersonc6302f02020-09-15 10:44:40 -0400606 dev_err(info->controller.active->mtd.dev,
Stefan Roese75659da2015-07-23 10:26:16 +0200607 "Timeout on RDDREQ while draining the FIFO\n");
608 return;
609 }
610 }
611
612 data += 32;
613 len -= 8;
614 }
615 }
616
617 readsl(info->mmio_base + NDDB, data, len);
618}
619
620static void handle_data_pio(struct pxa3xx_nand_info *info)
621{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200622 int data_len = info->step_chunk_size;
623
624 /*
625 * In raw mode, include the spare area and the ECC bytes that are not
626 * consumed by the controller in the data section. Do not reorganize
627 * here, do it in the ->read_page_raw() handler instead.
628 */
629 if (info->force_raw)
630 data_len += info->step_spare_size + info->ecc_size;
631
Stefan Roese75659da2015-07-23 10:26:16 +0200632 switch (info->state) {
633 case STATE_PIO_WRITING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300634 if (info->step_chunk_size)
635 writesl(info->mmio_base + NDDB,
636 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200637 DIV_ROUND_UP(data_len, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200638
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300639 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200640 writesl(info->mmio_base + NDDB,
641 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300642 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200643 break;
644 case STATE_PIO_READING:
Baruch Siach9167e4d2020-04-05 19:19:31 +0300645 if (data_len)
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300646 drain_fifo(info,
647 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200648 DIV_ROUND_UP(data_len, 4));
649
650 if (info->force_raw)
651 break;
Stefan Roese75659da2015-07-23 10:26:16 +0200652
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300653 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200654 drain_fifo(info,
655 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300656 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200657 break;
658 default:
Sean Andersonc6302f02020-09-15 10:44:40 -0400659 dev_err(info->controller.active->mtd.dev,
660 "%s: invalid state %d\n", __func__, info->state);
Stefan Roese75659da2015-07-23 10:26:16 +0200661 BUG();
662 }
663
664 /* Update buffer pointers for multi-page read/write */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200665 info->data_buff_pos += data_len;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300666 info->oob_buff_pos += info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200667}
668
669static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
670{
671 handle_data_pio(info);
672
673 info->state = STATE_CMD_DONE;
674 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
675}
676
677static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
678{
679 unsigned int status, is_completed = 0, is_ready = 0;
680 unsigned int ready, cmd_done;
681 irqreturn_t ret = IRQ_HANDLED;
682
683 if (info->cs == 0) {
684 ready = NDSR_FLASH_RDY;
685 cmd_done = NDSR_CS0_CMDD;
686 } else {
687 ready = NDSR_RDY;
688 cmd_done = NDSR_CS1_CMDD;
689 }
690
David Sniatkiwicz2087f7e2018-08-29 11:56:18 +0300691 /* TODO - find out why we need the delay during write operation. */
692 ndelay(1);
693
Stefan Roese75659da2015-07-23 10:26:16 +0200694 status = nand_readl(info, NDSR);
695
696 if (status & NDSR_UNCORERR)
697 info->retcode = ERR_UNCORERR;
698 if (status & NDSR_CORERR) {
699 info->retcode = ERR_CORERR;
700 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
701 info->ecc_bch)
702 info->ecc_err_cnt = NDSR_ERR_CNT(status);
703 else
704 info->ecc_err_cnt = 1;
705
706 /*
707 * Each chunk composing a page is corrected independently,
708 * and we need to store maximum number of corrected bitflips
709 * to return it to the MTD layer in ecc.read_page().
710 */
711 info->max_bitflips = max_t(unsigned int,
712 info->max_bitflips,
713 info->ecc_err_cnt);
714 }
715 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
716 info->state = (status & NDSR_RDDREQ) ?
717 STATE_PIO_READING : STATE_PIO_WRITING;
718 /* Call the IRQ thread in U-Boot directly */
719 pxa3xx_nand_irq_thread(info);
720 return 0;
721 }
722 if (status & cmd_done) {
723 info->state = STATE_CMD_DONE;
724 is_completed = 1;
725 }
726 if (status & ready) {
727 info->state = STATE_READY;
728 is_ready = 1;
729 }
730
Ofer Heifetzde323162018-08-29 11:56:04 +0300731 /*
732 * Clear all status bit before issuing the next command, which
733 * can and will alter the status bits and will deserve a new
734 * interrupt on its own. This lets the controller exit the IRQ
735 */
736 nand_writel(info, NDSR, status);
737
Stefan Roese75659da2015-07-23 10:26:16 +0200738 if (status & NDSR_WRCMDREQ) {
Stefan Roese75659da2015-07-23 10:26:16 +0200739 status &= ~NDSR_WRCMDREQ;
740 info->state = STATE_CMD_HANDLE;
741
742 /*
743 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
744 * must be loaded by writing directly either 12 or 16
745 * bytes directly to NDCB0, four bytes at a time.
746 *
747 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
748 * but each NDCBx register can be read.
749 */
750 nand_writel(info, NDCB0, info->ndcb0);
751 nand_writel(info, NDCB0, info->ndcb1);
752 nand_writel(info, NDCB0, info->ndcb2);
753
754 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
755 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
756 nand_writel(info, NDCB0, info->ndcb3);
757 }
758
Stefan Roese75659da2015-07-23 10:26:16 +0200759 if (is_completed)
760 info->cmd_complete = 1;
761 if (is_ready)
762 info->dev_ready = 1;
763
764 return ret;
765}
766
767static inline int is_buf_blank(uint8_t *buf, size_t len)
768{
769 for (; len > 0; len--)
770 if (*buf++ != 0xff)
771 return 0;
772 return 1;
773}
774
775static void set_command_address(struct pxa3xx_nand_info *info,
776 unsigned int page_size, uint16_t column, int page_addr)
777{
778 /* small page addr setting */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300779 if (page_size < info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200780 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
781 | (column & 0xFF);
782
783 info->ndcb2 = 0;
784 } else {
785 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
786 | (column & 0xFFFF);
787
788 if (page_addr & 0xFF0000)
789 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
790 else
791 info->ndcb2 = 0;
792 }
793}
794
795static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
796{
797 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300798 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200799
800 /* reset data and oob column point to handle data */
801 info->buf_start = 0;
802 info->buf_count = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200803 info->data_buff_pos = 0;
804 info->oob_buff_pos = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300805 info->step_chunk_size = 0;
806 info->step_spare_size = 0;
807 info->cur_chunk = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200808 info->use_ecc = 0;
809 info->use_spare = 1;
810 info->retcode = ERR_NONE;
811 info->ecc_err_cnt = 0;
812 info->ndcb3 = 0;
813 info->need_wait = 0;
814
815 switch (command) {
816 case NAND_CMD_READ0:
Boris Brezillona558a392018-08-29 11:56:12 +0300817 case NAND_CMD_READOOB:
Stefan Roese75659da2015-07-23 10:26:16 +0200818 case NAND_CMD_PAGEPROG:
Miquel Raynal30a016a2018-10-11 17:45:42 +0200819 if (!info->force_raw)
820 info->use_ecc = 1;
Stefan Roese75659da2015-07-23 10:26:16 +0200821 break;
822 case NAND_CMD_PARAM:
823 info->use_spare = 0;
824 break;
825 default:
826 info->ndcb1 = 0;
827 info->ndcb2 = 0;
828 break;
829 }
830
831 /*
832 * If we are about to issue a read command, or about to set
833 * the write address, then clean the data buffer.
834 */
835 if (command == NAND_CMD_READ0 ||
836 command == NAND_CMD_READOOB ||
837 command == NAND_CMD_SEQIN) {
838 info->buf_count = mtd->writesize + mtd->oobsize;
839 memset(info->data_buff, 0xFF, info->buf_count);
840 }
841}
842
843static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
844 int ext_cmd_type, uint16_t column, int page_addr)
845{
846 int addr_cycle, exec_cmd;
847 struct pxa3xx_nand_host *host;
848 struct mtd_info *mtd;
849
850 host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300851 mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200852 addr_cycle = 0;
853 exec_cmd = 1;
854
855 if (info->cs != 0)
856 info->ndcb0 = NDCB0_CSEL;
857 else
858 info->ndcb0 = 0;
859
860 if (command == NAND_CMD_SEQIN)
861 exec_cmd = 0;
862
863 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
864 + host->col_addr_cycles);
865
866 switch (command) {
867 case NAND_CMD_READOOB:
868 case NAND_CMD_READ0:
869 info->buf_start = column;
870 info->ndcb0 |= NDCB0_CMD_TYPE(0)
871 | addr_cycle
872 | NAND_CMD_READ0;
873
874 if (command == NAND_CMD_READOOB)
875 info->buf_start += mtd->writesize;
876
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300877 if (info->cur_chunk < info->nfullchunks) {
878 info->step_chunk_size = info->chunk_size;
879 info->step_spare_size = info->spare_size;
880 } else {
881 info->step_chunk_size = info->last_chunk_size;
882 info->step_spare_size = info->last_spare_size;
883 }
884
Stefan Roese75659da2015-07-23 10:26:16 +0200885 /*
886 * Multiple page read needs an 'extended command type' field,
887 * which is either naked-read or last-read according to the
888 * state.
889 */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200890 if (info->force_raw) {
891 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
892 NDCB0_LEN_OVRD |
893 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
894 info->ndcb3 = info->step_chunk_size +
895 info->step_spare_size + info->ecc_size;
896 } else if (mtd->writesize == info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200897 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300898 } else if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200899 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
900 | NDCB0_LEN_OVRD
901 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300902 info->ndcb3 = info->step_chunk_size +
903 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200904 }
905
906 set_command_address(info, mtd->writesize, column, page_addr);
907 break;
908
909 case NAND_CMD_SEQIN:
910
911 info->buf_start = column;
912 set_command_address(info, mtd->writesize, 0, page_addr);
913
914 /*
915 * Multiple page programming needs to execute the initial
916 * SEQIN command that sets the page address.
917 */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300918 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200919 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
920 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
921 | addr_cycle
922 | command;
Stefan Roese75659da2015-07-23 10:26:16 +0200923 exec_cmd = 1;
924 }
925 break;
926
927 case NAND_CMD_PAGEPROG:
928 if (is_buf_blank(info->data_buff,
929 (mtd->writesize + mtd->oobsize))) {
930 exec_cmd = 0;
931 break;
932 }
933
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300934 if (info->cur_chunk < info->nfullchunks) {
935 info->step_chunk_size = info->chunk_size;
936 info->step_spare_size = info->spare_size;
937 } else {
938 info->step_chunk_size = info->last_chunk_size;
939 info->step_spare_size = info->last_spare_size;
940 }
941
Stefan Roese75659da2015-07-23 10:26:16 +0200942 /* Second command setting for large pages */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300943 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200944 /*
945 * Multiple page write uses the 'extended command'
946 * field. This can be used to issue a command dispatch
947 * or a naked-write depending on the current stage.
948 */
949 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
950 | NDCB0_LEN_OVRD
951 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300952 info->ndcb3 = info->step_chunk_size +
953 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200954
955 /*
956 * This is the command dispatch that completes a chunked
957 * page program operation.
958 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300959 if (info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +0200960 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
961 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
962 | command;
963 info->ndcb1 = 0;
964 info->ndcb2 = 0;
965 info->ndcb3 = 0;
966 }
967 } else {
968 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
969 | NDCB0_AUTO_RS
970 | NDCB0_ST_ROW_EN
971 | NDCB0_DBC
972 | (NAND_CMD_PAGEPROG << 8)
973 | NAND_CMD_SEQIN
974 | addr_cycle;
975 }
976 break;
977
978 case NAND_CMD_PARAM:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300979 info->buf_count = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200980 info->ndcb0 |= NDCB0_CMD_TYPE(0)
981 | NDCB0_ADDR_CYC(1)
982 | NDCB0_LEN_OVRD
983 | command;
984 info->ndcb1 = (column & 0xFF);
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300985 info->ndcb3 = INIT_BUFFER_SIZE;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300986 info->step_chunk_size = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200987 break;
988
989 case NAND_CMD_READID:
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300990 info->buf_count = READ_ID_BYTES;
Stefan Roese75659da2015-07-23 10:26:16 +0200991 info->ndcb0 |= NDCB0_CMD_TYPE(3)
992 | NDCB0_ADDR_CYC(1)
993 | command;
994 info->ndcb1 = (column & 0xFF);
995
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300996 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +0200997 break;
998 case NAND_CMD_STATUS:
999 info->buf_count = 1;
1000 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1001 | NDCB0_ADDR_CYC(1)
1002 | command;
1003
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001004 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +02001005 break;
1006
1007 case NAND_CMD_ERASE1:
1008 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1009 | NDCB0_AUTO_RS
1010 | NDCB0_ADDR_CYC(3)
1011 | NDCB0_DBC
1012 | (NAND_CMD_ERASE2 << 8)
1013 | NAND_CMD_ERASE1;
1014 info->ndcb1 = page_addr;
1015 info->ndcb2 = 0;
1016
1017 break;
1018 case NAND_CMD_RESET:
1019 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1020 | command;
1021
1022 break;
1023
1024 case NAND_CMD_ERASE2:
1025 exec_cmd = 0;
1026 break;
1027
1028 default:
1029 exec_cmd = 0;
Sean Andersonc6302f02020-09-15 10:44:40 -04001030 dev_err(mtd->dev, "non-supported command %x\n",
Stefan Roese75659da2015-07-23 10:26:16 +02001031 command);
1032 break;
1033 }
1034
1035 return exec_cmd;
1036}
1037
1038static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1039 int column, int page_addr)
1040{
Scott Wood17fed142016-05-30 13:57:56 -05001041 struct nand_chip *chip = mtd_to_nand(mtd);
1042 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001043 struct pxa3xx_nand_info *info = host->info_data;
1044 int exec_cmd;
1045
1046 /*
1047 * if this is a x16 device ,then convert the input
1048 * "byte" address into a "word" address appropriate
1049 * for indexing a word-oriented device
1050 */
1051 if (info->reg_ndcr & NDCR_DWIDTH_M)
1052 column /= 2;
1053
1054 /*
1055 * There may be different NAND chip hooked to
1056 * different chip select, so check whether
1057 * chip select has been changed, if yes, reset the timing
1058 */
1059 if (info->cs != host->cs) {
1060 info->cs = host->cs;
1061 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1062 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1063 }
1064
1065 prepare_start_command(info, command);
1066
1067 info->state = STATE_PREPARED;
1068 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1069
1070 if (exec_cmd) {
1071 u32 ts;
1072
1073 info->cmd_complete = 0;
1074 info->dev_ready = 0;
1075 info->need_wait = 1;
1076 pxa3xx_nand_start(info);
1077
1078 ts = get_timer(0);
1079 while (1) {
1080 u32 status;
1081
1082 status = nand_readl(info, NDSR);
1083 if (status)
1084 pxa3xx_nand_irq(info);
1085
1086 if (info->cmd_complete)
1087 break;
1088
1089 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001090 dev_err(mtd->dev, "Wait timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001091 return;
1092 }
1093 }
1094 }
1095 info->state = STATE_IDLE;
1096}
1097
1098static void nand_cmdfunc_extended(struct mtd_info *mtd,
1099 const unsigned command,
1100 int column, int page_addr)
1101{
Scott Wood17fed142016-05-30 13:57:56 -05001102 struct nand_chip *chip = mtd_to_nand(mtd);
1103 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001104 struct pxa3xx_nand_info *info = host->info_data;
1105 int exec_cmd, ext_cmd_type;
1106
1107 /*
1108 * if this is a x16 device then convert the input
1109 * "byte" address into a "word" address appropriate
1110 * for indexing a word-oriented device
1111 */
1112 if (info->reg_ndcr & NDCR_DWIDTH_M)
1113 column /= 2;
1114
1115 /*
1116 * There may be different NAND chip hooked to
1117 * different chip select, so check whether
1118 * chip select has been changed, if yes, reset the timing
1119 */
1120 if (info->cs != host->cs) {
1121 info->cs = host->cs;
1122 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1123 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1124 }
1125
1126 /* Select the extended command for the first command */
1127 switch (command) {
1128 case NAND_CMD_READ0:
1129 case NAND_CMD_READOOB:
1130 ext_cmd_type = EXT_CMD_TYPE_MONO;
1131 break;
1132 case NAND_CMD_SEQIN:
1133 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1134 break;
1135 case NAND_CMD_PAGEPROG:
1136 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1137 break;
1138 default:
1139 ext_cmd_type = 0;
1140 break;
1141 }
1142
1143 prepare_start_command(info, command);
1144
1145 /*
1146 * Prepare the "is ready" completion before starting a command
1147 * transaction sequence. If the command is not executed the
1148 * completion will be completed, see below.
1149 *
1150 * We can do that inside the loop because the command variable
1151 * is invariant and thus so is the exec_cmd.
1152 */
1153 info->need_wait = 1;
1154 info->dev_ready = 0;
1155
1156 do {
1157 u32 ts;
1158
1159 info->state = STATE_PREPARED;
1160 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1161 column, page_addr);
1162 if (!exec_cmd) {
1163 info->need_wait = 0;
1164 info->dev_ready = 1;
1165 break;
1166 }
1167
1168 info->cmd_complete = 0;
1169 pxa3xx_nand_start(info);
1170
1171 ts = get_timer(0);
1172 while (1) {
1173 u32 status;
1174
1175 status = nand_readl(info, NDSR);
1176 if (status)
1177 pxa3xx_nand_irq(info);
1178
1179 if (info->cmd_complete)
1180 break;
1181
1182 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001183 dev_err(mtd->dev, "Wait timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001184 return;
1185 }
1186 }
1187
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001188 /* Only a few commands need several steps */
1189 if (command != NAND_CMD_PAGEPROG &&
1190 command != NAND_CMD_READ0 &&
1191 command != NAND_CMD_READOOB)
1192 break;
1193
1194 info->cur_chunk++;
1195
Stefan Roese75659da2015-07-23 10:26:16 +02001196 /* Check if the sequence is complete */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001197 if (info->cur_chunk == info->ntotalchunks &&
1198 command != NAND_CMD_PAGEPROG)
Stefan Roese75659da2015-07-23 10:26:16 +02001199 break;
1200
1201 /*
1202 * After a splitted program command sequence has issued
1203 * the command dispatch, the command sequence is complete.
1204 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001205 if (info->cur_chunk == (info->ntotalchunks + 1) &&
Stefan Roese75659da2015-07-23 10:26:16 +02001206 command == NAND_CMD_PAGEPROG &&
1207 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1208 break;
1209
1210 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1211 /* Last read: issue a 'last naked read' */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001212 if (info->cur_chunk == info->ntotalchunks - 1)
Stefan Roese75659da2015-07-23 10:26:16 +02001213 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1214 else
1215 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1216
1217 /*
1218 * If a splitted program command has no more data to transfer,
1219 * the command dispatch must be issued to complete.
1220 */
1221 } else if (command == NAND_CMD_PAGEPROG &&
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001222 info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +02001223 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1224 }
1225 } while (1);
1226
1227 info->state = STATE_IDLE;
1228}
1229
1230static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001231 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1232 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001233{
1234 chip->write_buf(mtd, buf, mtd->writesize);
1235 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1236
1237 return 0;
1238}
1239
1240static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1241 struct nand_chip *chip, uint8_t *buf, int oob_required,
1242 int page)
1243{
Scott Wood17fed142016-05-30 13:57:56 -05001244 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001245 struct pxa3xx_nand_info *info = host->info_data;
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001246 int bf;
Stefan Roese75659da2015-07-23 10:26:16 +02001247
1248 chip->read_buf(mtd, buf, mtd->writesize);
1249 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1250
1251 if (info->retcode == ERR_CORERR && info->use_ecc) {
1252 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1253
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001254 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
Stefan Roese75659da2015-07-23 10:26:16 +02001255 /*
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001256 * Empty pages will trigger uncorrectable errors. Re-read the
1257 * entire page in raw mode and check for bits not being "1".
1258 * If there are more than the supported strength, then it means
1259 * this is an actual uncorrectable error.
Stefan Roese75659da2015-07-23 10:26:16 +02001260 */
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001261 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1262 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1263 chip->oob_poi, mtd->oobsize,
1264 NULL, 0, chip->ecc.strength);
1265 if (bf < 0) {
1266 mtd->ecc_stats.failed++;
1267 } else if (bf) {
1268 mtd->ecc_stats.corrected += bf;
1269 info->max_bitflips = max_t(unsigned int,
1270 info->max_bitflips, bf);
1271 info->retcode = ERR_CORERR;
1272 } else {
1273 info->retcode = ERR_NONE;
1274 }
1275
1276 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1277 /* Raw read is not supported with Hamming ECC engine */
Stefan Roese75659da2015-07-23 10:26:16 +02001278 if (is_buf_blank(buf, mtd->writesize))
1279 info->retcode = ERR_NONE;
1280 else
1281 mtd->ecc_stats.failed++;
1282 }
1283
1284 return info->max_bitflips;
1285}
1286
Miquel Raynal30a016a2018-10-11 17:45:42 +02001287static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1288 struct nand_chip *chip, uint8_t *buf,
1289 int oob_required, int page)
1290{
1291 struct pxa3xx_nand_host *host = chip->priv;
1292 struct pxa3xx_nand_info *info = host->info_data;
1293 int chunk, ecc_off_buf;
1294
1295 if (!info->ecc_bch)
1296 return -ENOTSUPP;
1297
1298 /*
1299 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1300 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1301 */
1302 info->force_raw = true;
1303 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1304
1305 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1306 info->last_spare_size;
1307 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1308 chip->read_buf(mtd,
1309 buf + (chunk * info->chunk_size),
1310 info->chunk_size);
1311 chip->read_buf(mtd,
1312 chip->oob_poi +
1313 (chunk * (info->spare_size)),
1314 info->spare_size);
1315 chip->read_buf(mtd,
1316 chip->oob_poi + ecc_off_buf +
1317 (chunk * (info->ecc_size)),
1318 info->ecc_size - 2);
1319 }
1320
1321 if (info->ntotalchunks > info->nfullchunks) {
1322 chip->read_buf(mtd,
1323 buf + (info->nfullchunks * info->chunk_size),
1324 info->last_chunk_size);
1325 chip->read_buf(mtd,
1326 chip->oob_poi +
1327 (info->nfullchunks * (info->spare_size)),
1328 info->last_spare_size);
1329 chip->read_buf(mtd,
1330 chip->oob_poi + ecc_off_buf +
1331 (info->nfullchunks * (info->ecc_size)),
1332 info->ecc_size - 2);
1333 }
1334
1335 info->force_raw = false;
1336
1337 return 0;
1338}
1339
1340static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1341 struct nand_chip *chip, int page)
1342{
1343 /* Invalidate page cache */
1344 chip->pagebuf = -1;
1345
1346 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1347 page);
1348}
1349
Stefan Roese75659da2015-07-23 10:26:16 +02001350static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1351{
Scott Wood17fed142016-05-30 13:57:56 -05001352 struct nand_chip *chip = mtd_to_nand(mtd);
1353 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001354 struct pxa3xx_nand_info *info = host->info_data;
1355 char retval = 0xFF;
1356
1357 if (info->buf_start < info->buf_count)
1358 /* Has just send a new command? */
1359 retval = info->data_buff[info->buf_start++];
1360
1361 return retval;
1362}
1363
1364static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1365{
Scott Wood17fed142016-05-30 13:57:56 -05001366 struct nand_chip *chip = mtd_to_nand(mtd);
1367 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001368 struct pxa3xx_nand_info *info = host->info_data;
1369 u16 retval = 0xFFFF;
1370
1371 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1372 retval = *((u16 *)(info->data_buff+info->buf_start));
1373 info->buf_start += 2;
1374 }
1375 return retval;
1376}
1377
1378static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1379{
Scott Wood17fed142016-05-30 13:57:56 -05001380 struct nand_chip *chip = mtd_to_nand(mtd);
1381 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001382 struct pxa3xx_nand_info *info = host->info_data;
1383 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1384
1385 memcpy(buf, info->data_buff + info->buf_start, real_len);
1386 info->buf_start += real_len;
1387}
1388
1389static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1390 const uint8_t *buf, int len)
1391{
Scott Wood17fed142016-05-30 13:57:56 -05001392 struct nand_chip *chip = mtd_to_nand(mtd);
1393 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001394 struct pxa3xx_nand_info *info = host->info_data;
1395 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1396
1397 memcpy(info->data_buff + info->buf_start, buf, real_len);
1398 info->buf_start += real_len;
1399}
1400
1401static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1402{
1403 return;
1404}
1405
1406static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1407{
Scott Wood17fed142016-05-30 13:57:56 -05001408 struct nand_chip *chip = mtd_to_nand(mtd);
1409 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001410 struct pxa3xx_nand_info *info = host->info_data;
1411
1412 if (info->need_wait) {
1413 u32 ts;
1414
1415 info->need_wait = 0;
1416
1417 ts = get_timer(0);
1418 while (1) {
1419 u32 status;
1420
1421 status = nand_readl(info, NDSR);
1422 if (status)
1423 pxa3xx_nand_irq(info);
1424
1425 if (info->dev_ready)
1426 break;
1427
1428 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001429 dev_err(mtd->dev, "Ready timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001430 return NAND_STATUS_FAIL;
1431 }
1432 }
1433 }
1434
1435 /* pxa3xx_nand_send_command has waited for command complete */
1436 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1437 if (info->retcode == ERR_NONE)
1438 return 0;
1439 else
1440 return NAND_STATUS_FAIL;
1441 }
1442
1443 return NAND_STATUS_READY;
1444}
1445
Ofer Heifetz531816e2018-08-29 11:56:07 +03001446static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1447{
1448 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1449
1450 /* Configure default flash values */
Ofer Heifetz531816e2018-08-29 11:56:07 +03001451 info->reg_ndcr = 0x0; /* enable all interrupts */
1452 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1453 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1454 info->reg_ndcr |= NDCR_SPARE_EN;
1455
1456 return 0;
1457}
1458
1459static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001460{
1461 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz531816e2018-08-29 11:56:07 +03001462 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001463 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001464
1465 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1466 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1467 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001468}
1469
Ofer Heifetz268979f2018-08-29 11:56:08 +03001470static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001471{
Ofer Heifetz531816e2018-08-29 11:56:07 +03001472 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001473 uint32_t ndcr = nand_readl(info, NDCR);
1474
Stefan Roese75659da2015-07-23 10:26:16 +02001475 /* Set an initial chunk size */
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001476 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001477 info->reg_ndcr = ndcr &
1478 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1479 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001480 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1481 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
Stefan Roese75659da2015-07-23 10:26:16 +02001482}
1483
1484static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1485{
1486 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1487 if (info->data_buff == NULL)
1488 return -ENOMEM;
1489 return 0;
1490}
1491
1492static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1493{
1494 struct pxa3xx_nand_info *info = host->info_data;
1495 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1496 struct mtd_info *mtd;
1497 struct nand_chip *chip;
1498 const struct nand_sdr_timings *timings;
1499 int ret;
1500
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001501 mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001502 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001503
1504 /* configure default flash values */
1505 info->reg_ndcr = 0x0; /* enable all interrupts */
1506 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001507 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
Stefan Roese75659da2015-07-23 10:26:16 +02001508 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1509
1510 /* use the common timing to make a try */
1511 timings = onfi_async_timing_mode_to_sdr_timings(0);
1512 if (IS_ERR(timings))
1513 return PTR_ERR(timings);
1514
1515 pxa3xx_nand_set_sdr_timing(host, timings);
1516
1517 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1518 ret = chip->waitfunc(mtd, chip);
1519 if (ret & NAND_STATUS_FAIL)
1520 return -ENODEV;
1521
1522 return 0;
1523}
1524
1525static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1526 struct nand_ecc_ctrl *ecc,
1527 int strength, int ecc_stepsize, int page_size)
1528{
1529 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001530 info->nfullchunks = 1;
1531 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001532 info->chunk_size = 2048;
1533 info->spare_size = 40;
1534 info->ecc_size = 24;
1535 ecc->mode = NAND_ECC_HW;
1536 ecc->size = 512;
1537 ecc->strength = 1;
1538
1539 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001540 info->nfullchunks = 1;
1541 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001542 info->chunk_size = 512;
1543 info->spare_size = 8;
1544 info->ecc_size = 8;
1545 ecc->mode = NAND_ECC_HW;
1546 ecc->size = 512;
1547 ecc->strength = 1;
1548
1549 /*
1550 * Required ECC: 4-bit correction per 512 bytes
1551 * Select: 16-bit correction per 2048 bytes
1552 */
1553 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1554 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001555 info->nfullchunks = 1;
1556 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001557 info->chunk_size = 2048;
1558 info->spare_size = 32;
1559 info->ecc_size = 32;
1560 ecc->mode = NAND_ECC_HW;
1561 ecc->size = info->chunk_size;
1562 ecc->layout = &ecc_layout_2KB_bch4bit;
1563 ecc->strength = 16;
1564
1565 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1566 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001567 info->nfullchunks = 2;
1568 info->ntotalchunks = 2;
Stefan Roese75659da2015-07-23 10:26:16 +02001569 info->chunk_size = 2048;
1570 info->spare_size = 32;
1571 info->ecc_size = 32;
1572 ecc->mode = NAND_ECC_HW;
1573 ecc->size = info->chunk_size;
1574 ecc->layout = &ecc_layout_4KB_bch4bit;
1575 ecc->strength = 16;
1576
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001577 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1578 info->ecc_bch = 1;
1579 info->nfullchunks = 4;
1580 info->ntotalchunks = 4;
1581 info->chunk_size = 2048;
1582 info->spare_size = 32;
1583 info->ecc_size = 32;
1584 ecc->mode = NAND_ECC_HW;
1585 ecc->size = info->chunk_size;
1586 ecc->layout = &ecc_layout_8KB_bch4bit;
1587 ecc->strength = 16;
1588
Stefan Roese75659da2015-07-23 10:26:16 +02001589 /*
1590 * Required ECC: 8-bit correction per 512 bytes
1591 * Select: 16-bit correction per 1024 bytes
1592 */
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001593 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1594 info->ecc_bch = 1;
1595 info->nfullchunks = 1;
1596 info->ntotalchunks = 2;
1597 info->chunk_size = 1024;
1598 info->spare_size = 0;
1599 info->last_chunk_size = 1024;
Miquel Raynal53e9c122018-10-11 17:45:44 +02001600 info->last_spare_size = 32;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001601 info->ecc_size = 32;
1602 ecc->mode = NAND_ECC_HW;
1603 ecc->size = info->chunk_size;
1604 ecc->layout = &ecc_layout_2KB_bch8bit;
1605 ecc->strength = 16;
1606
Stefan Roese75659da2015-07-23 10:26:16 +02001607 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1608 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001609 info->nfullchunks = 4;
1610 info->ntotalchunks = 5;
Stefan Roese75659da2015-07-23 10:26:16 +02001611 info->chunk_size = 1024;
1612 info->spare_size = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001613 info->last_chunk_size = 0;
1614 info->last_spare_size = 64;
Stefan Roese75659da2015-07-23 10:26:16 +02001615 info->ecc_size = 32;
1616 ecc->mode = NAND_ECC_HW;
1617 ecc->size = info->chunk_size;
1618 ecc->layout = &ecc_layout_4KB_bch8bit;
1619 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001620
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001621 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001622 info->ecc_bch = 1;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001623 info->nfullchunks = 8;
1624 info->ntotalchunks = 9;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001625 info->chunk_size = 1024;
1626 info->spare_size = 0;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001627 info->last_chunk_size = 0;
1628 info->last_spare_size = 160;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001629 info->ecc_size = 32;
1630 ecc->mode = NAND_ECC_HW;
1631 ecc->size = info->chunk_size;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001632 ecc->layout = &ecc_layout_8KB_bch8bit;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001633 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001634
Stefan Roese75659da2015-07-23 10:26:16 +02001635 } else {
Sean Andersonc6302f02020-09-15 10:44:40 -04001636 dev_err(info->controller.active->mtd.dev,
Stefan Roese75659da2015-07-23 10:26:16 +02001637 "ECC strength %d at page size %d is not supported\n",
1638 strength, page_size);
1639 return -ENODEV;
1640 }
1641
1642 return 0;
1643}
1644
1645static int pxa3xx_nand_scan(struct mtd_info *mtd)
1646{
Scott Wood17fed142016-05-30 13:57:56 -05001647 struct nand_chip *chip = mtd_to_nand(mtd);
1648 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001649 struct pxa3xx_nand_info *info = host->info_data;
1650 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001651 int ret;
1652 uint16_t ecc_strength, ecc_step;
1653
Ofer Heifetz268979f2018-08-29 11:56:08 +03001654 if (pdata->keep_config) {
1655 pxa3xx_nand_detect_config(info);
1656 } else {
1657 ret = pxa3xx_nand_config_ident(info);
1658 if (ret)
1659 return ret;
1660 ret = pxa3xx_nand_sensing(host);
1661 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001662 dev_info(mtd->dev, "There is no chip on cs %d!\n",
Ofer Heifetz268979f2018-08-29 11:56:08 +03001663 info->cs);
1664 return ret;
1665 }
Stefan Roese75659da2015-07-23 10:26:16 +02001666 }
1667
Stefan Roese75659da2015-07-23 10:26:16 +02001668 /* Device detection must be done with ECC disabled */
1669 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1670 nand_writel(info, NDECCCTRL, 0x0);
1671
1672 if (nand_scan_ident(mtd, 1, NULL))
1673 return -ENODEV;
1674
1675 if (!pdata->keep_config) {
1676 ret = pxa3xx_nand_init_timings(host);
1677 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001678 dev_err(mtd->dev,
Stefan Roese75659da2015-07-23 10:26:16 +02001679 "Failed to set timings: %d\n", ret);
1680 return ret;
1681 }
1682 }
1683
Stefan Roese75659da2015-07-23 10:26:16 +02001684#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1685 /*
1686 * We'll use a bad block table stored in-flash and don't
1687 * allow writing the bad block marker to the flash.
1688 */
1689 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1690 chip->bbt_td = &bbt_main_descr;
1691 chip->bbt_md = &bbt_mirror_descr;
1692#endif
1693
Stefan Roese75659da2015-07-23 10:26:16 +02001694 if (pdata->ecc_strength && pdata->ecc_step_size) {
1695 ecc_strength = pdata->ecc_strength;
1696 ecc_step = pdata->ecc_step_size;
1697 } else {
1698 ecc_strength = chip->ecc_strength_ds;
1699 ecc_step = chip->ecc_step_ds;
1700 }
1701
1702 /* Set default ECC strength requirements on non-ONFI devices */
1703 if (ecc_strength < 1 && ecc_step < 1) {
1704 ecc_strength = 1;
1705 ecc_step = 512;
1706 }
1707
1708 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1709 ecc_step, mtd->writesize);
1710 if (ret)
1711 return ret;
1712
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001713 /*
1714 * If the page size is bigger than the FIFO size, let's check
1715 * we are given the right variant and then switch to the extended
1716 * (aka split) command handling,
1717 */
1718 if (mtd->writesize > info->chunk_size) {
1719 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1720 chip->cmdfunc = nand_cmdfunc_extended;
1721 } else {
Sean Andersonc6302f02020-09-15 10:44:40 -04001722 dev_err(mtd->dev,
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001723 "unsupported page size on this variant\n");
1724 return -ENODEV;
1725 }
1726 }
1727
Stefan Roese75659da2015-07-23 10:26:16 +02001728 /* calculate addressing information */
1729 if (mtd->writesize >= 2048)
1730 host->col_addr_cycles = 2;
1731 else
1732 host->col_addr_cycles = 1;
1733
1734 /* release the initial buffer */
1735 kfree(info->data_buff);
1736
1737 /* allocate the real data + oob buffer */
1738 info->buf_size = mtd->writesize + mtd->oobsize;
1739 ret = pxa3xx_nand_init_buff(info);
1740 if (ret)
1741 return ret;
1742 info->oob_buff = info->data_buff + mtd->writesize;
1743
1744 if ((mtd->size >> chip->page_shift) > 65536)
1745 host->row_addr_cycles = 3;
1746 else
1747 host->row_addr_cycles = 2;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001748
1749 if (!pdata->keep_config)
1750 pxa3xx_nand_config_tail(info);
1751
Stefan Roese75659da2015-07-23 10:26:16 +02001752 return nand_scan_tail(mtd);
1753}
1754
1755static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1756{
1757 struct pxa3xx_nand_platform_data *pdata;
1758 struct pxa3xx_nand_host *host;
1759 struct nand_chip *chip = NULL;
1760 struct mtd_info *mtd;
1761 int ret, cs;
1762
1763 pdata = info->pdata;
1764 if (pdata->num_cs <= 0)
1765 return -ENODEV;
1766
1767 info->variant = pxa3xx_nand_get_variant();
1768 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001769 chip = (struct nand_chip *)
1770 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001771 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001772 host = (struct pxa3xx_nand_host *)chip;
1773 info->host[cs] = host;
Stefan Roese75659da2015-07-23 10:26:16 +02001774 host->cs = cs;
1775 host->info_data = info;
Stefan Roese75659da2015-07-23 10:26:16 +02001776 mtd->owner = THIS_MODULE;
1777
Chris Packham3c2170a2016-08-29 15:20:52 +12001778 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001779 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
Miquel Raynal30a016a2018-10-11 17:45:42 +02001780 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1781 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
Stefan Roese75659da2015-07-23 10:26:16 +02001782 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1783 chip->controller = &info->controller;
1784 chip->waitfunc = pxa3xx_nand_waitfunc;
1785 chip->select_chip = pxa3xx_nand_select_chip;
1786 chip->read_word = pxa3xx_nand_read_word;
1787 chip->read_byte = pxa3xx_nand_read_byte;
1788 chip->read_buf = pxa3xx_nand_read_buf;
1789 chip->write_buf = pxa3xx_nand_write_buf;
1790 chip->options |= NAND_NO_SUBPAGE_WRITE;
1791 chip->cmdfunc = nand_cmdfunc;
1792 }
1793
Stefan Roese75659da2015-07-23 10:26:16 +02001794 /* Allocate a buffer to allow flash detection */
1795 info->buf_size = INIT_BUFFER_SIZE;
1796 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1797 if (info->data_buff == NULL) {
1798 ret = -ENOMEM;
1799 goto fail_disable_clk;
1800 }
1801
1802 /* initialize all interrupts to be disabled */
1803 disable_int(info, NDSR_MASK);
1804
1805 return 0;
1806
1807 kfree(info->data_buff);
1808fail_disable_clk:
1809 return ret;
1810}
1811
1812static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1813{
1814 struct pxa3xx_nand_platform_data *pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001815 const void *blob = gd->fdt_blob;
1816 int node = -1;
Stefan Roese75659da2015-07-23 10:26:16 +02001817
1818 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1819 if (!pdata)
1820 return -ENOMEM;
1821
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001822 /* Get address decoding nodes from the FDT blob */
1823 do {
1824 node = fdt_node_offset_by_compatible(blob, node,
1825 "marvell,mvebu-pxa3xx-nand");
1826 if (node < 0)
1827 break;
1828
1829 /* Bypass disabeld nodes */
1830 if (!fdtdec_get_is_enabled(blob, node))
1831 continue;
Stefan Roese75659da2015-07-23 10:26:16 +02001832
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001833 /* Get the first enabled NAND controler base address */
1834 info->mmio_base =
1835 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1836 blob, node, "reg", 0, NULL, true);
Stefan Roese75659da2015-07-23 10:26:16 +02001837
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001838 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1839 if (pdata->num_cs != 1) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001840 pr_err("pxa3xx driver supports single CS only\n");
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001841 break;
1842 }
1843
1844 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1845 pdata->enable_arbiter = 1;
1846
1847 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1848 pdata->keep_config = 1;
1849
1850 /*
1851 * ECC parameters.
1852 * If these are not set, they will be selected according
1853 * to the detected flash type.
1854 */
1855 /* ECC strength */
1856 pdata->ecc_strength = fdtdec_get_int(blob, node,
1857 "nand-ecc-strength", 0);
1858
1859 /* ECC step size */
1860 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1861 "nand-ecc-step-size", 0);
1862
1863 info->pdata = pdata;
1864
1865 /* Currently support only a single NAND controller */
1866 return 0;
1867
1868 } while (node >= 0);
1869
1870 return -EINVAL;
Stefan Roese75659da2015-07-23 10:26:16 +02001871}
1872
1873static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1874{
Sean Andersonc6302f02020-09-15 10:44:40 -04001875 struct mtd_info *mtd = &info->controller.active->mtd;
Stefan Roese75659da2015-07-23 10:26:16 +02001876 struct pxa3xx_nand_platform_data *pdata;
1877 int ret, cs, probe_success;
1878
1879 ret = pxa3xx_nand_probe_dt(info);
1880 if (ret)
1881 return ret;
1882
1883 pdata = info->pdata;
1884
1885 ret = alloc_nand_resource(info);
1886 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001887 dev_err(mtd->dev, "alloc nand resource failed\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001888 return ret;
1889 }
1890
1891 probe_success = 0;
1892 for (cs = 0; cs < pdata->num_cs; cs++) {
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001893 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001894
1895 /*
1896 * The mtd name matches the one used in 'mtdparts' kernel
1897 * parameter. This name cannot be changed or otherwise
1898 * user's mtd partitions configuration would get broken.
1899 */
1900 mtd->name = "pxa3xx_nand-0";
1901 info->cs = cs;
1902 ret = pxa3xx_nand_scan(mtd);
1903 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001904 dev_info(mtd->dev, "failed to scan nand at cs %d\n",
Stefan Roese75659da2015-07-23 10:26:16 +02001905 cs);
1906 continue;
1907 }
1908
Scott Wood2c1b7e12016-05-30 13:57:55 -05001909 if (nand_register(cs, mtd))
1910 continue;
1911
1912 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001913 }
1914
1915 if (!probe_success)
1916 return -ENODEV;
1917
1918 return 0;
1919}
1920
1921/*
1922 * Main initialization routine
1923 */
1924void board_nand_init(void)
1925{
1926 struct pxa3xx_nand_info *info;
1927 struct pxa3xx_nand_host *host;
1928 int ret;
1929
Kevin Smithf6ca2a62016-01-14 16:01:39 +00001930 info = kzalloc(sizeof(*info) +
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001931 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1932 GFP_KERNEL);
Stefan Roese75659da2015-07-23 10:26:16 +02001933 if (!info)
1934 return;
1935
Stefan Roese75659da2015-07-23 10:26:16 +02001936 ret = pxa3xx_nand_probe(info);
1937 if (ret)
1938 return;
Stefan Roese75659da2015-07-23 10:26:16 +02001939}