blob: 8481c6e3bf917251fc68adc4fe4bdd50b0e898fa [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese75659da2015-07-23 10:26:16 +02002/*
Miquel Raynal1f1ae152018-08-16 17:30:07 +02003 * drivers/mtd/nand/raw/pxa3xx_nand.c
Stefan Roese75659da2015-07-23 10:26:16 +02004 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
Stefan Roese75659da2015-07-23 10:26:16 +02007 */
8
9#include <common.h>
10#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030011#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020012#include <nand.h>
Simon Glass9bc15642020-02-03 07:36:16 -070013#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070014#include <dm/devres.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060015#include <linux/bitops.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060016#include <linux/bug.h>
Simon Glassdbd79542020-05-10 11:40:11 -060017#include <linux/delay.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070018#include <linux/err.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090019#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020020#include <asm/io.h>
21#include <asm/arch/cpu.h>
22#include <linux/mtd/mtd.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090023#include <linux/mtd/rawnand.h>
Stefan Roese75659da2015-07-23 10:26:16 +020024#include <linux/types.h>
Shmuel Hazan759349e2020-10-29 08:52:18 +020025#include <dm/uclass.h>
26#include <dm/read.h>
Stefan Roese75659da2015-07-23 10:26:16 +020027
28#include "pxa3xx_nand.h"
29
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030030DECLARE_GLOBAL_DATA_PTR;
31
Stefan Roese75659da2015-07-23 10:26:16 +020032#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
33#define CHIP_DELAY_TIMEOUT 200
34#define NAND_STOP_DELAY 40
Stefan Roese75659da2015-07-23 10:26:16 +020035
36/*
37 * Define a buffer size for the initial command that detects the flash device:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030038 * STATUS, READID and PARAM.
39 * ONFI param page is 256 bytes, and there are three redundant copies
40 * to be read. JEDEC param page is 512 bytes, and there are also three
41 * redundant copies to be read.
42 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
Stefan Roese75659da2015-07-23 10:26:16 +020043 */
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030044#define INIT_BUFFER_SIZE 2048
Stefan Roese75659da2015-07-23 10:26:16 +020045
46/* registers and bit definitions */
47#define NDCR (0x00) /* Control register */
48#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
49#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
50#define NDSR (0x14) /* Status Register */
51#define NDPCR (0x18) /* Page Count Register */
52#define NDBDR0 (0x1C) /* Bad Block Register 0 */
53#define NDBDR1 (0x20) /* Bad Block Register 1 */
54#define NDECCCTRL (0x28) /* ECC control */
55#define NDDB (0x40) /* Data Buffer */
56#define NDCB0 (0x48) /* Command Buffer0 */
57#define NDCB1 (0x4C) /* Command Buffer1 */
58#define NDCB2 (0x50) /* Command Buffer2 */
59
60#define NDCR_SPARE_EN (0x1 << 31)
61#define NDCR_ECC_EN (0x1 << 30)
62#define NDCR_DMA_EN (0x1 << 29)
63#define NDCR_ND_RUN (0x1 << 28)
64#define NDCR_DWIDTH_C (0x1 << 27)
65#define NDCR_DWIDTH_M (0x1 << 26)
66#define NDCR_PAGE_SZ (0x1 << 24)
67#define NDCR_NCSX (0x1 << 23)
68#define NDCR_ND_MODE (0x3 << 21)
69#define NDCR_NAND_MODE (0x0)
70#define NDCR_CLR_PG_CNT (0x1 << 20)
Ofer Heifetz531816e2018-08-29 11:56:07 +030071#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
Stefan Roese75659da2015-07-23 10:26:16 +020072#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
73#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
74
75#define NDCR_RA_START (0x1 << 15)
76#define NDCR_PG_PER_BLK (0x1 << 14)
77#define NDCR_ND_ARB_EN (0x1 << 12)
78#define NDCR_INT_MASK (0xFFF)
79
80#define NDSR_MASK (0xfff)
81#define NDSR_ERR_CNT_OFF (16)
82#define NDSR_ERR_CNT_MASK (0x1f)
83#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
84#define NDSR_RDY (0x1 << 12)
85#define NDSR_FLASH_RDY (0x1 << 11)
86#define NDSR_CS0_PAGED (0x1 << 10)
87#define NDSR_CS1_PAGED (0x1 << 9)
88#define NDSR_CS0_CMDD (0x1 << 8)
89#define NDSR_CS1_CMDD (0x1 << 7)
90#define NDSR_CS0_BBD (0x1 << 6)
91#define NDSR_CS1_BBD (0x1 << 5)
92#define NDSR_UNCORERR (0x1 << 4)
93#define NDSR_CORERR (0x1 << 3)
94#define NDSR_WRDREQ (0x1 << 2)
95#define NDSR_RDDREQ (0x1 << 1)
96#define NDSR_WRCMDREQ (0x1)
97
98#define NDCB0_LEN_OVRD (0x1 << 28)
99#define NDCB0_ST_ROW_EN (0x1 << 26)
100#define NDCB0_AUTO_RS (0x1 << 25)
101#define NDCB0_CSEL (0x1 << 24)
102#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
103#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
104#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
105#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
106#define NDCB0_NC (0x1 << 20)
107#define NDCB0_DBC (0x1 << 19)
108#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
109#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
110#define NDCB0_CMD2_MASK (0xff << 8)
111#define NDCB0_CMD1_MASK (0xff)
112#define NDCB0_ADDR_CYC_SHIFT (16)
113
114#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
115#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
116#define EXT_CMD_TYPE_READ 4 /* Read */
117#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
118#define EXT_CMD_TYPE_FINAL 3 /* Final command */
119#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
120#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
121
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300122/*
123 * This should be large enough to read 'ONFI' and 'JEDEC'.
124 * Let's use 7 bytes, which is the maximum ID count supported
125 * by the controller (see NDCR_RD_ID_CNT_MASK).
126 */
127#define READ_ID_BYTES 7
128
Stefan Roese75659da2015-07-23 10:26:16 +0200129/* macros for registers read/write */
130#define nand_writel(info, off, val) \
131 writel((val), (info)->mmio_base + (off))
132
133#define nand_readl(info, off) \
134 readl((info)->mmio_base + (off))
135
136/* error code and state */
137enum {
138 ERR_NONE = 0,
139 ERR_DMABUSERR = -1,
140 ERR_SENDCMD = -2,
141 ERR_UNCORERR = -3,
142 ERR_BBERR = -4,
143 ERR_CORERR = -5,
144};
145
146enum {
147 STATE_IDLE = 0,
148 STATE_PREPARED,
149 STATE_CMD_HANDLE,
150 STATE_DMA_READING,
151 STATE_DMA_WRITING,
152 STATE_DMA_DONE,
153 STATE_PIO_READING,
154 STATE_PIO_WRITING,
155 STATE_CMD_DONE,
156 STATE_READY,
157};
158
159enum pxa3xx_nand_variant {
160 PXA3XX_NAND_VARIANT_PXA,
161 PXA3XX_NAND_VARIANT_ARMADA370,
162};
163
164struct pxa3xx_nand_host {
165 struct nand_chip chip;
Stefan Roese75659da2015-07-23 10:26:16 +0200166 void *info_data;
167
168 /* page size of attached chip */
169 int use_ecc;
170 int cs;
171
172 /* calculated from pxa3xx_nand_flash data */
173 unsigned int col_addr_cycles;
174 unsigned int row_addr_cycles;
Stefan Roese75659da2015-07-23 10:26:16 +0200175};
176
177struct pxa3xx_nand_info {
178 struct nand_hw_control controller;
179 struct pxa3xx_nand_platform_data *pdata;
180
181 struct clk *clk;
182 void __iomem *mmio_base;
183 unsigned long mmio_phys;
184 int cmd_complete, dev_ready;
185
186 unsigned int buf_start;
187 unsigned int buf_count;
188 unsigned int buf_size;
189 unsigned int data_buff_pos;
190 unsigned int oob_buff_pos;
191
192 unsigned char *data_buff;
193 unsigned char *oob_buff;
194
195 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
196 unsigned int state;
197
198 /*
199 * This driver supports NFCv1 (as found in PXA SoC)
200 * and NFCv2 (as found in Armada 370/XP SoC).
201 */
202 enum pxa3xx_nand_variant variant;
203
204 int cs;
205 int use_ecc; /* use HW ECC ? */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200206 int force_raw; /* prevent use_ecc to be set */
Stefan Roese75659da2015-07-23 10:26:16 +0200207 int ecc_bch; /* using BCH ECC? */
208 int use_spare; /* use spare ? */
209 int need_wait;
210
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300211 /* Amount of real data per full chunk */
212 unsigned int chunk_size;
213
214 /* Amount of spare data per full chunk */
Stefan Roese75659da2015-07-23 10:26:16 +0200215 unsigned int spare_size;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300216
217 /* Number of full chunks (i.e chunk_size + spare_size) */
218 unsigned int nfullchunks;
219
220 /*
221 * Total number of chunks. If equal to nfullchunks, then there
222 * are only full chunks. Otherwise, there is one last chunk of
223 * size (last_chunk_size + last_spare_size)
224 */
225 unsigned int ntotalchunks;
226
227 /* Amount of real data in the last chunk */
228 unsigned int last_chunk_size;
229
230 /* Amount of spare data in the last chunk */
231 unsigned int last_spare_size;
232
Stefan Roese75659da2015-07-23 10:26:16 +0200233 unsigned int ecc_size;
234 unsigned int ecc_err_cnt;
235 unsigned int max_bitflips;
236 int retcode;
237
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300238 /*
239 * Variables only valid during command
240 * execution. step_chunk_size and step_spare_size is the
241 * amount of real data and spare data in the current
242 * chunk. cur_chunk is the current chunk being
243 * read/programmed.
244 */
245 unsigned int step_chunk_size;
246 unsigned int step_spare_size;
247 unsigned int cur_chunk;
248
Stefan Roese75659da2015-07-23 10:26:16 +0200249 /* cached register value */
250 uint32_t reg_ndcr;
251 uint32_t ndtr0cs0;
252 uint32_t ndtr1cs0;
253
254 /* generated NDCBx register values */
255 uint32_t ndcb0;
256 uint32_t ndcb1;
257 uint32_t ndcb2;
258 uint32_t ndcb3;
259};
260
261static struct pxa3xx_nand_timing timing[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300262 /*
263 * tCH Enable signal hold time
264 * tCS Enable signal setup time
265 * tWH ND_nWE high duration
266 * tWP ND_nWE pulse time
267 * tRH ND_nRE high duration
268 * tRP ND_nRE pulse width
269 * tR ND_nWE high to ND_nRE low for read
270 * tWHR ND_nWE high to ND_nRE low for status read
271 * tAR ND_ALE low to ND_nRE low delay
272 */
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300273 /*ch cs wh wp rh rp r whr ar */
Stefan Roese75659da2015-07-23 10:26:16 +0200274 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
275 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
276 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
277 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300278 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
Stefan Roese75659da2015-07-23 10:26:16 +0200279};
280
281static struct pxa3xx_nand_flash builtin_flash_types[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300282 /*
283 * chip_id
284 * flash_width Width of Flash memory (DWIDTH_M)
285 * dfc_width Width of flash controller(DWIDTH_C)
286 * *timing
287 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
288 */
Stefan Roese75659da2015-07-23 10:26:16 +0200289 { 0x46ec, 16, 16, &timing[1] },
290 { 0xdaec, 8, 8, &timing[1] },
291 { 0xd7ec, 8, 8, &timing[1] },
292 { 0xa12c, 8, 8, &timing[2] },
293 { 0xb12c, 16, 16, &timing[2] },
294 { 0xdc2c, 8, 8, &timing[2] },
295 { 0xcc2c, 16, 16, &timing[2] },
296 { 0xba20, 16, 16, &timing[3] },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300297 { 0xda98, 8, 8, &timing[4] },
Stefan Roese75659da2015-07-23 10:26:16 +0200298};
299
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100300#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
Stefan Roese75659da2015-07-23 10:26:16 +0200301static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
302static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
303
304static struct nand_bbt_descr bbt_main_descr = {
305 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
306 | NAND_BBT_2BIT | NAND_BBT_VERSION,
307 .offs = 8,
308 .len = 6,
309 .veroffs = 14,
310 .maxblocks = 8, /* Last 8 blocks in each chip */
311 .pattern = bbt_pattern
312};
313
314static struct nand_bbt_descr bbt_mirror_descr = {
315 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
316 | NAND_BBT_2BIT | NAND_BBT_VERSION,
317 .offs = 8,
318 .len = 6,
319 .veroffs = 14,
320 .maxblocks = 8, /* Last 8 blocks in each chip */
321 .pattern = bbt_mirror_pattern
322};
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100323#endif
Stefan Roese75659da2015-07-23 10:26:16 +0200324
325static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
326 .eccbytes = 32,
327 .eccpos = {
328 32, 33, 34, 35, 36, 37, 38, 39,
329 40, 41, 42, 43, 44, 45, 46, 47,
330 48, 49, 50, 51, 52, 53, 54, 55,
331 56, 57, 58, 59, 60, 61, 62, 63},
332 .oobfree = { {2, 30} }
333};
334
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300335static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
336 .eccbytes = 64,
337 .eccpos = {
Miquel Raynal53e9c122018-10-11 17:45:44 +0200338 32, 33, 34, 35, 36, 37, 38, 39,
339 40, 41, 42, 43, 44, 45, 46, 47,
340 48, 49, 50, 51, 52, 53, 54, 55,
341 56, 57, 58, 59, 60, 61, 62, 63,
342 64, 65, 66, 67, 68, 69, 70, 71,
343 72, 73, 74, 75, 76, 77, 78, 79,
344 80, 81, 82, 83, 84, 85, 86, 87,
345 88, 89, 90, 91, 92, 93, 94, 95},
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300346 .oobfree = { {1, 4}, {6, 26} }
347};
348
Stefan Roese75659da2015-07-23 10:26:16 +0200349static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
350 .eccbytes = 64,
351 .eccpos = {
352 32, 33, 34, 35, 36, 37, 38, 39,
353 40, 41, 42, 43, 44, 45, 46, 47,
354 48, 49, 50, 51, 52, 53, 54, 55,
355 56, 57, 58, 59, 60, 61, 62, 63,
356 96, 97, 98, 99, 100, 101, 102, 103,
357 104, 105, 106, 107, 108, 109, 110, 111,
358 112, 113, 114, 115, 116, 117, 118, 119,
359 120, 121, 122, 123, 124, 125, 126, 127},
360 /* Bootrom looks in bytes 0 & 5 for bad blocks */
361 .oobfree = { {6, 26}, { 64, 32} }
362};
363
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300364static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
365 .eccbytes = 128,
366 .eccpos = {
367 32, 33, 34, 35, 36, 37, 38, 39,
368 40, 41, 42, 43, 44, 45, 46, 47,
369 48, 49, 50, 51, 52, 53, 54, 55,
370 56, 57, 58, 59, 60, 61, 62, 63,
371
372 96, 97, 98, 99, 100, 101, 102, 103,
373 104, 105, 106, 107, 108, 109, 110, 111,
374 112, 113, 114, 115, 116, 117, 118, 119,
375 120, 121, 122, 123, 124, 125, 126, 127,
376
377 160, 161, 162, 163, 164, 165, 166, 167,
378 168, 169, 170, 171, 172, 173, 174, 175,
379 176, 177, 178, 179, 180, 181, 182, 183,
380 184, 185, 186, 187, 188, 189, 190, 191,
381
382 224, 225, 226, 227, 228, 229, 230, 231,
383 232, 233, 234, 235, 236, 237, 238, 239,
384 240, 241, 242, 243, 244, 245, 246, 247,
385 248, 249, 250, 251, 252, 253, 254, 255},
386
387 /* Bootrom looks in bytes 0 & 5 for bad blocks */
388 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
389};
390
Stefan Roese75659da2015-07-23 10:26:16 +0200391static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
392 .eccbytes = 128,
393 .eccpos = {
394 32, 33, 34, 35, 36, 37, 38, 39,
395 40, 41, 42, 43, 44, 45, 46, 47,
396 48, 49, 50, 51, 52, 53, 54, 55,
397 56, 57, 58, 59, 60, 61, 62, 63},
398 .oobfree = { }
399};
400
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300401static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
402 .eccbytes = 256,
403 .eccpos = {},
404 /* HW ECC handles all ECC data and all spare area is free for OOB */
405 .oobfree = {{0, 160} }
406};
407
Stefan Roese75659da2015-07-23 10:26:16 +0200408#define NDTR0_tCH(c) (min((c), 7) << 19)
409#define NDTR0_tCS(c) (min((c), 7) << 16)
410#define NDTR0_tWH(c) (min((c), 7) << 11)
411#define NDTR0_tWP(c) (min((c), 7) << 8)
412#define NDTR0_tRH(c) (min((c), 7) << 3)
413#define NDTR0_tRP(c) (min((c), 7) << 0)
414
415#define NDTR1_tR(c) (min((c), 65535) << 16)
416#define NDTR1_tWHR(c) (min((c), 15) << 4)
417#define NDTR1_tAR(c) (min((c), 15) << 0)
418
419/* convert nano-seconds to nand flash controller clock cycles */
420#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
421
Shmuel Hazan759349e2020-10-29 08:52:18 +0200422static const struct udevice_id pxa3xx_nand_dt_ids[] = {
423 {
424 .compatible = "marvell,mvebu-pxa3xx-nand",
425 .data = PXA3XX_NAND_VARIANT_ARMADA370,
426 },
427 {}
428};
429
Stefan Roese75659da2015-07-23 10:26:16 +0200430static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
431{
432 /* We only support the Armada 370/XP/38x for now */
433 return PXA3XX_NAND_VARIANT_ARMADA370;
434}
435
436static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
437 const struct pxa3xx_nand_timing *t)
438{
439 struct pxa3xx_nand_info *info = host->info_data;
440 unsigned long nand_clk = mvebu_get_nand_clock();
441 uint32_t ndtr0, ndtr1;
442
443 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
444 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
445 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
446 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
447 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
448 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
449
450 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
451 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
452 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
453
454 info->ndtr0cs0 = ndtr0;
455 info->ndtr1cs0 = ndtr1;
456 nand_writel(info, NDTR0CS0, ndtr0);
457 nand_writel(info, NDTR1CS0, ndtr1);
458}
459
460static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
461 const struct nand_sdr_timings *t)
462{
463 struct pxa3xx_nand_info *info = host->info_data;
464 struct nand_chip *chip = &host->chip;
465 unsigned long nand_clk = mvebu_get_nand_clock();
466 uint32_t ndtr0, ndtr1;
467
468 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
469 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
470 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300471 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200472 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300473 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200474 u32 tR = chip->chip_delay * 1000;
475 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
476 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
477
478 /* fallback to a default value if tR = 0 */
479 if (!tR)
480 tR = 20000;
481
482 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
483 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
484 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
485 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
486 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
487 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
488
489 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
490 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
491 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
492
493 info->ndtr0cs0 = ndtr0;
494 info->ndtr1cs0 = ndtr1;
495 nand_writel(info, NDTR0CS0, ndtr0);
496 nand_writel(info, NDTR1CS0, ndtr1);
497}
498
499static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
500{
501 const struct nand_sdr_timings *timings;
502 struct nand_chip *chip = &host->chip;
503 struct pxa3xx_nand_info *info = host->info_data;
504 const struct pxa3xx_nand_flash *f = NULL;
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300505 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200506 int mode, id, ntypes, i;
507
508 mode = onfi_get_async_timing_mode(chip);
509 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
510 ntypes = ARRAY_SIZE(builtin_flash_types);
511
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300512 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
Stefan Roese75659da2015-07-23 10:26:16 +0200513
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300514 id = chip->read_byte(mtd);
515 id |= chip->read_byte(mtd) << 0x8;
Stefan Roese75659da2015-07-23 10:26:16 +0200516
517 for (i = 0; i < ntypes; i++) {
518 f = &builtin_flash_types[i];
519
520 if (f->chip_id == id)
521 break;
522 }
523
524 if (i == ntypes) {
Sean Andersonc6302f02020-09-15 10:44:40 -0400525 dev_err(mtd->dev, "Error: timings not found\n");
Stefan Roese75659da2015-07-23 10:26:16 +0200526 return -EINVAL;
527 }
528
529 pxa3xx_nand_set_timing(host, f->timing);
530
531 if (f->flash_width == 16) {
532 info->reg_ndcr |= NDCR_DWIDTH_M;
533 chip->options |= NAND_BUSWIDTH_16;
534 }
535
536 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
537 } else {
538 mode = fls(mode) - 1;
539 if (mode < 0)
540 mode = 0;
541
542 timings = onfi_async_timing_mode_to_sdr_timings(mode);
543 if (IS_ERR(timings))
544 return PTR_ERR(timings);
545
546 pxa3xx_nand_set_sdr_timing(host, timings);
547 }
548
549 return 0;
550}
551
Stefan Roese75659da2015-07-23 10:26:16 +0200552/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800553 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200554 * command buffer, otherwise, it does not work.
555 * We enable all the interrupt at the same time, and
556 * let pxa3xx_nand_irq to handle all logic.
557 */
558static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
559{
560 uint32_t ndcr;
561
562 ndcr = info->reg_ndcr;
563
564 if (info->use_ecc) {
565 ndcr |= NDCR_ECC_EN;
566 if (info->ecc_bch)
567 nand_writel(info, NDECCCTRL, 0x1);
568 } else {
569 ndcr &= ~NDCR_ECC_EN;
570 if (info->ecc_bch)
571 nand_writel(info, NDECCCTRL, 0x0);
572 }
573
574 ndcr &= ~NDCR_DMA_EN;
575
576 if (info->use_spare)
577 ndcr |= NDCR_SPARE_EN;
578 else
579 ndcr &= ~NDCR_SPARE_EN;
580
581 ndcr |= NDCR_ND_RUN;
582
583 /* clear status bits and run */
Stefan Roese75659da2015-07-23 10:26:16 +0200584 nand_writel(info, NDSR, NDSR_MASK);
Ofer Heifetzd92d8992018-08-29 11:56:03 +0300585 nand_writel(info, NDCR, 0);
Stefan Roese75659da2015-07-23 10:26:16 +0200586 nand_writel(info, NDCR, ndcr);
587}
588
589static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
590{
591 uint32_t ndcr;
592
593 ndcr = nand_readl(info, NDCR);
594 nand_writel(info, NDCR, ndcr | int_mask);
595}
596
597static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
598{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200599 if (info->ecc_bch && !info->force_raw) {
Stefan Roese75659da2015-07-23 10:26:16 +0200600 u32 ts;
601
602 /*
603 * According to the datasheet, when reading from NDDB
604 * with BCH enabled, after each 32 bytes reads, we
605 * have to make sure that the NDSR.RDDREQ bit is set.
606 *
607 * Drain the FIFO 8 32 bits reads at a time, and skip
608 * the polling on the last read.
609 */
610 while (len > 8) {
611 readsl(info->mmio_base + NDDB, data, 8);
612
613 ts = get_timer(0);
614 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
615 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
Sean Andersonc6302f02020-09-15 10:44:40 -0400616 dev_err(info->controller.active->mtd.dev,
Stefan Roese75659da2015-07-23 10:26:16 +0200617 "Timeout on RDDREQ while draining the FIFO\n");
618 return;
619 }
620 }
621
622 data += 32;
623 len -= 8;
624 }
625 }
626
627 readsl(info->mmio_base + NDDB, data, len);
628}
629
630static void handle_data_pio(struct pxa3xx_nand_info *info)
631{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200632 int data_len = info->step_chunk_size;
633
634 /*
635 * In raw mode, include the spare area and the ECC bytes that are not
636 * consumed by the controller in the data section. Do not reorganize
637 * here, do it in the ->read_page_raw() handler instead.
638 */
639 if (info->force_raw)
640 data_len += info->step_spare_size + info->ecc_size;
641
Stefan Roese75659da2015-07-23 10:26:16 +0200642 switch (info->state) {
643 case STATE_PIO_WRITING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300644 if (info->step_chunk_size)
645 writesl(info->mmio_base + NDDB,
646 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200647 DIV_ROUND_UP(data_len, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200648
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300649 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200650 writesl(info->mmio_base + NDDB,
651 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300652 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200653 break;
654 case STATE_PIO_READING:
Baruch Siach9167e4d2020-04-05 19:19:31 +0300655 if (data_len)
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300656 drain_fifo(info,
657 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200658 DIV_ROUND_UP(data_len, 4));
659
660 if (info->force_raw)
661 break;
Stefan Roese75659da2015-07-23 10:26:16 +0200662
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300663 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200664 drain_fifo(info,
665 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300666 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200667 break;
668 default:
Sean Andersonc6302f02020-09-15 10:44:40 -0400669 dev_err(info->controller.active->mtd.dev,
670 "%s: invalid state %d\n", __func__, info->state);
Stefan Roese75659da2015-07-23 10:26:16 +0200671 BUG();
672 }
673
674 /* Update buffer pointers for multi-page read/write */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200675 info->data_buff_pos += data_len;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300676 info->oob_buff_pos += info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200677}
678
679static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
680{
681 handle_data_pio(info);
682
683 info->state = STATE_CMD_DONE;
684 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
685}
686
687static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
688{
689 unsigned int status, is_completed = 0, is_ready = 0;
690 unsigned int ready, cmd_done;
691 irqreturn_t ret = IRQ_HANDLED;
692
693 if (info->cs == 0) {
694 ready = NDSR_FLASH_RDY;
695 cmd_done = NDSR_CS0_CMDD;
696 } else {
697 ready = NDSR_RDY;
698 cmd_done = NDSR_CS1_CMDD;
699 }
700
David Sniatkiwicz2087f7e2018-08-29 11:56:18 +0300701 /* TODO - find out why we need the delay during write operation. */
702 ndelay(1);
703
Stefan Roese75659da2015-07-23 10:26:16 +0200704 status = nand_readl(info, NDSR);
705
706 if (status & NDSR_UNCORERR)
707 info->retcode = ERR_UNCORERR;
708 if (status & NDSR_CORERR) {
709 info->retcode = ERR_CORERR;
710 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
711 info->ecc_bch)
712 info->ecc_err_cnt = NDSR_ERR_CNT(status);
713 else
714 info->ecc_err_cnt = 1;
715
716 /*
717 * Each chunk composing a page is corrected independently,
718 * and we need to store maximum number of corrected bitflips
719 * to return it to the MTD layer in ecc.read_page().
720 */
721 info->max_bitflips = max_t(unsigned int,
722 info->max_bitflips,
723 info->ecc_err_cnt);
724 }
725 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
726 info->state = (status & NDSR_RDDREQ) ?
727 STATE_PIO_READING : STATE_PIO_WRITING;
728 /* Call the IRQ thread in U-Boot directly */
729 pxa3xx_nand_irq_thread(info);
730 return 0;
731 }
732 if (status & cmd_done) {
733 info->state = STATE_CMD_DONE;
734 is_completed = 1;
735 }
736 if (status & ready) {
737 info->state = STATE_READY;
738 is_ready = 1;
739 }
740
Ofer Heifetzde323162018-08-29 11:56:04 +0300741 /*
742 * Clear all status bit before issuing the next command, which
743 * can and will alter the status bits and will deserve a new
744 * interrupt on its own. This lets the controller exit the IRQ
745 */
746 nand_writel(info, NDSR, status);
747
Stefan Roese75659da2015-07-23 10:26:16 +0200748 if (status & NDSR_WRCMDREQ) {
Stefan Roese75659da2015-07-23 10:26:16 +0200749 status &= ~NDSR_WRCMDREQ;
750 info->state = STATE_CMD_HANDLE;
751
752 /*
753 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
754 * must be loaded by writing directly either 12 or 16
755 * bytes directly to NDCB0, four bytes at a time.
756 *
757 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
758 * but each NDCBx register can be read.
759 */
760 nand_writel(info, NDCB0, info->ndcb0);
761 nand_writel(info, NDCB0, info->ndcb1);
762 nand_writel(info, NDCB0, info->ndcb2);
763
764 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
765 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
766 nand_writel(info, NDCB0, info->ndcb3);
767 }
768
Stefan Roese75659da2015-07-23 10:26:16 +0200769 if (is_completed)
770 info->cmd_complete = 1;
771 if (is_ready)
772 info->dev_ready = 1;
773
774 return ret;
775}
776
777static inline int is_buf_blank(uint8_t *buf, size_t len)
778{
779 for (; len > 0; len--)
780 if (*buf++ != 0xff)
781 return 0;
782 return 1;
783}
784
785static void set_command_address(struct pxa3xx_nand_info *info,
786 unsigned int page_size, uint16_t column, int page_addr)
787{
788 /* small page addr setting */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300789 if (page_size < info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200790 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
791 | (column & 0xFF);
792
793 info->ndcb2 = 0;
794 } else {
795 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
796 | (column & 0xFFFF);
797
798 if (page_addr & 0xFF0000)
799 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
800 else
801 info->ndcb2 = 0;
802 }
803}
804
805static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
806{
807 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300808 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200809
810 /* reset data and oob column point to handle data */
811 info->buf_start = 0;
812 info->buf_count = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200813 info->data_buff_pos = 0;
814 info->oob_buff_pos = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300815 info->step_chunk_size = 0;
816 info->step_spare_size = 0;
817 info->cur_chunk = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200818 info->use_ecc = 0;
819 info->use_spare = 1;
820 info->retcode = ERR_NONE;
821 info->ecc_err_cnt = 0;
822 info->ndcb3 = 0;
823 info->need_wait = 0;
824
825 switch (command) {
826 case NAND_CMD_READ0:
Boris Brezillona558a392018-08-29 11:56:12 +0300827 case NAND_CMD_READOOB:
Stefan Roese75659da2015-07-23 10:26:16 +0200828 case NAND_CMD_PAGEPROG:
Miquel Raynal30a016a2018-10-11 17:45:42 +0200829 if (!info->force_raw)
830 info->use_ecc = 1;
Stefan Roese75659da2015-07-23 10:26:16 +0200831 break;
832 case NAND_CMD_PARAM:
833 info->use_spare = 0;
834 break;
835 default:
836 info->ndcb1 = 0;
837 info->ndcb2 = 0;
838 break;
839 }
840
841 /*
842 * If we are about to issue a read command, or about to set
843 * the write address, then clean the data buffer.
844 */
845 if (command == NAND_CMD_READ0 ||
846 command == NAND_CMD_READOOB ||
847 command == NAND_CMD_SEQIN) {
848 info->buf_count = mtd->writesize + mtd->oobsize;
849 memset(info->data_buff, 0xFF, info->buf_count);
850 }
851}
852
853static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
854 int ext_cmd_type, uint16_t column, int page_addr)
855{
856 int addr_cycle, exec_cmd;
857 struct pxa3xx_nand_host *host;
858 struct mtd_info *mtd;
859
860 host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300861 mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200862 addr_cycle = 0;
863 exec_cmd = 1;
864
865 if (info->cs != 0)
866 info->ndcb0 = NDCB0_CSEL;
867 else
868 info->ndcb0 = 0;
869
870 if (command == NAND_CMD_SEQIN)
871 exec_cmd = 0;
872
873 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
874 + host->col_addr_cycles);
875
876 switch (command) {
877 case NAND_CMD_READOOB:
878 case NAND_CMD_READ0:
879 info->buf_start = column;
880 info->ndcb0 |= NDCB0_CMD_TYPE(0)
881 | addr_cycle
882 | NAND_CMD_READ0;
883
884 if (command == NAND_CMD_READOOB)
885 info->buf_start += mtd->writesize;
886
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300887 if (info->cur_chunk < info->nfullchunks) {
888 info->step_chunk_size = info->chunk_size;
889 info->step_spare_size = info->spare_size;
890 } else {
891 info->step_chunk_size = info->last_chunk_size;
892 info->step_spare_size = info->last_spare_size;
893 }
894
Stefan Roese75659da2015-07-23 10:26:16 +0200895 /*
896 * Multiple page read needs an 'extended command type' field,
897 * which is either naked-read or last-read according to the
898 * state.
899 */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200900 if (info->force_raw) {
901 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
902 NDCB0_LEN_OVRD |
903 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
904 info->ndcb3 = info->step_chunk_size +
905 info->step_spare_size + info->ecc_size;
906 } else if (mtd->writesize == info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200907 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300908 } else if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200909 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
910 | NDCB0_LEN_OVRD
911 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300912 info->ndcb3 = info->step_chunk_size +
913 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200914 }
915
916 set_command_address(info, mtd->writesize, column, page_addr);
917 break;
918
919 case NAND_CMD_SEQIN:
920
921 info->buf_start = column;
922 set_command_address(info, mtd->writesize, 0, page_addr);
923
924 /*
925 * Multiple page programming needs to execute the initial
926 * SEQIN command that sets the page address.
927 */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300928 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200929 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
930 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
931 | addr_cycle
932 | command;
Stefan Roese75659da2015-07-23 10:26:16 +0200933 exec_cmd = 1;
934 }
935 break;
936
937 case NAND_CMD_PAGEPROG:
938 if (is_buf_blank(info->data_buff,
939 (mtd->writesize + mtd->oobsize))) {
940 exec_cmd = 0;
941 break;
942 }
943
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300944 if (info->cur_chunk < info->nfullchunks) {
945 info->step_chunk_size = info->chunk_size;
946 info->step_spare_size = info->spare_size;
947 } else {
948 info->step_chunk_size = info->last_chunk_size;
949 info->step_spare_size = info->last_spare_size;
950 }
951
Stefan Roese75659da2015-07-23 10:26:16 +0200952 /* Second command setting for large pages */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300953 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200954 /*
955 * Multiple page write uses the 'extended command'
956 * field. This can be used to issue a command dispatch
957 * or a naked-write depending on the current stage.
958 */
959 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
960 | NDCB0_LEN_OVRD
961 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300962 info->ndcb3 = info->step_chunk_size +
963 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200964
965 /*
966 * This is the command dispatch that completes a chunked
967 * page program operation.
968 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300969 if (info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +0200970 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
971 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
972 | command;
973 info->ndcb1 = 0;
974 info->ndcb2 = 0;
975 info->ndcb3 = 0;
976 }
977 } else {
978 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
979 | NDCB0_AUTO_RS
980 | NDCB0_ST_ROW_EN
981 | NDCB0_DBC
982 | (NAND_CMD_PAGEPROG << 8)
983 | NAND_CMD_SEQIN
984 | addr_cycle;
985 }
986 break;
987
988 case NAND_CMD_PARAM:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300989 info->buf_count = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200990 info->ndcb0 |= NDCB0_CMD_TYPE(0)
991 | NDCB0_ADDR_CYC(1)
992 | NDCB0_LEN_OVRD
993 | command;
994 info->ndcb1 = (column & 0xFF);
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300995 info->ndcb3 = INIT_BUFFER_SIZE;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300996 info->step_chunk_size = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200997 break;
998
999 case NAND_CMD_READID:
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001000 info->buf_count = READ_ID_BYTES;
Stefan Roese75659da2015-07-23 10:26:16 +02001001 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1002 | NDCB0_ADDR_CYC(1)
1003 | command;
1004 info->ndcb1 = (column & 0xFF);
1005
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001006 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +02001007 break;
1008 case NAND_CMD_STATUS:
1009 info->buf_count = 1;
1010 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1011 | NDCB0_ADDR_CYC(1)
1012 | command;
1013
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001014 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +02001015 break;
1016
1017 case NAND_CMD_ERASE1:
1018 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1019 | NDCB0_AUTO_RS
1020 | NDCB0_ADDR_CYC(3)
1021 | NDCB0_DBC
1022 | (NAND_CMD_ERASE2 << 8)
1023 | NAND_CMD_ERASE1;
1024 info->ndcb1 = page_addr;
1025 info->ndcb2 = 0;
1026
1027 break;
1028 case NAND_CMD_RESET:
1029 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1030 | command;
1031
1032 break;
1033
1034 case NAND_CMD_ERASE2:
1035 exec_cmd = 0;
1036 break;
1037
1038 default:
1039 exec_cmd = 0;
Sean Andersonc6302f02020-09-15 10:44:40 -04001040 dev_err(mtd->dev, "non-supported command %x\n",
Stefan Roese75659da2015-07-23 10:26:16 +02001041 command);
1042 break;
1043 }
1044
1045 return exec_cmd;
1046}
1047
1048static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1049 int column, int page_addr)
1050{
Scott Wood17fed142016-05-30 13:57:56 -05001051 struct nand_chip *chip = mtd_to_nand(mtd);
1052 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001053 struct pxa3xx_nand_info *info = host->info_data;
1054 int exec_cmd;
1055
1056 /*
1057 * if this is a x16 device ,then convert the input
1058 * "byte" address into a "word" address appropriate
1059 * for indexing a word-oriented device
1060 */
1061 if (info->reg_ndcr & NDCR_DWIDTH_M)
1062 column /= 2;
1063
1064 /*
1065 * There may be different NAND chip hooked to
1066 * different chip select, so check whether
1067 * chip select has been changed, if yes, reset the timing
1068 */
1069 if (info->cs != host->cs) {
1070 info->cs = host->cs;
1071 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1072 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1073 }
1074
1075 prepare_start_command(info, command);
1076
1077 info->state = STATE_PREPARED;
1078 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1079
1080 if (exec_cmd) {
1081 u32 ts;
1082
1083 info->cmd_complete = 0;
1084 info->dev_ready = 0;
1085 info->need_wait = 1;
1086 pxa3xx_nand_start(info);
1087
1088 ts = get_timer(0);
1089 while (1) {
1090 u32 status;
1091
1092 status = nand_readl(info, NDSR);
1093 if (status)
1094 pxa3xx_nand_irq(info);
1095
1096 if (info->cmd_complete)
1097 break;
1098
1099 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001100 dev_err(mtd->dev, "Wait timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001101 return;
1102 }
1103 }
1104 }
1105 info->state = STATE_IDLE;
1106}
1107
1108static void nand_cmdfunc_extended(struct mtd_info *mtd,
1109 const unsigned command,
1110 int column, int page_addr)
1111{
Scott Wood17fed142016-05-30 13:57:56 -05001112 struct nand_chip *chip = mtd_to_nand(mtd);
1113 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001114 struct pxa3xx_nand_info *info = host->info_data;
1115 int exec_cmd, ext_cmd_type;
1116
1117 /*
1118 * if this is a x16 device then convert the input
1119 * "byte" address into a "word" address appropriate
1120 * for indexing a word-oriented device
1121 */
1122 if (info->reg_ndcr & NDCR_DWIDTH_M)
1123 column /= 2;
1124
1125 /*
1126 * There may be different NAND chip hooked to
1127 * different chip select, so check whether
1128 * chip select has been changed, if yes, reset the timing
1129 */
1130 if (info->cs != host->cs) {
1131 info->cs = host->cs;
1132 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1133 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1134 }
1135
1136 /* Select the extended command for the first command */
1137 switch (command) {
1138 case NAND_CMD_READ0:
1139 case NAND_CMD_READOOB:
1140 ext_cmd_type = EXT_CMD_TYPE_MONO;
1141 break;
1142 case NAND_CMD_SEQIN:
1143 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1144 break;
1145 case NAND_CMD_PAGEPROG:
1146 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1147 break;
1148 default:
1149 ext_cmd_type = 0;
1150 break;
1151 }
1152
1153 prepare_start_command(info, command);
1154
1155 /*
1156 * Prepare the "is ready" completion before starting a command
1157 * transaction sequence. If the command is not executed the
1158 * completion will be completed, see below.
1159 *
1160 * We can do that inside the loop because the command variable
1161 * is invariant and thus so is the exec_cmd.
1162 */
1163 info->need_wait = 1;
1164 info->dev_ready = 0;
1165
1166 do {
1167 u32 ts;
1168
1169 info->state = STATE_PREPARED;
1170 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1171 column, page_addr);
1172 if (!exec_cmd) {
1173 info->need_wait = 0;
1174 info->dev_ready = 1;
1175 break;
1176 }
1177
1178 info->cmd_complete = 0;
1179 pxa3xx_nand_start(info);
1180
1181 ts = get_timer(0);
1182 while (1) {
1183 u32 status;
1184
1185 status = nand_readl(info, NDSR);
1186 if (status)
1187 pxa3xx_nand_irq(info);
1188
1189 if (info->cmd_complete)
1190 break;
1191
1192 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001193 dev_err(mtd->dev, "Wait timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001194 return;
1195 }
1196 }
1197
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001198 /* Only a few commands need several steps */
1199 if (command != NAND_CMD_PAGEPROG &&
1200 command != NAND_CMD_READ0 &&
1201 command != NAND_CMD_READOOB)
1202 break;
1203
1204 info->cur_chunk++;
1205
Stefan Roese75659da2015-07-23 10:26:16 +02001206 /* Check if the sequence is complete */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001207 if (info->cur_chunk == info->ntotalchunks &&
1208 command != NAND_CMD_PAGEPROG)
Stefan Roese75659da2015-07-23 10:26:16 +02001209 break;
1210
1211 /*
1212 * After a splitted program command sequence has issued
1213 * the command dispatch, the command sequence is complete.
1214 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001215 if (info->cur_chunk == (info->ntotalchunks + 1) &&
Stefan Roese75659da2015-07-23 10:26:16 +02001216 command == NAND_CMD_PAGEPROG &&
1217 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1218 break;
1219
1220 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1221 /* Last read: issue a 'last naked read' */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001222 if (info->cur_chunk == info->ntotalchunks - 1)
Stefan Roese75659da2015-07-23 10:26:16 +02001223 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1224 else
1225 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1226
1227 /*
1228 * If a splitted program command has no more data to transfer,
1229 * the command dispatch must be issued to complete.
1230 */
1231 } else if (command == NAND_CMD_PAGEPROG &&
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001232 info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +02001233 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1234 }
1235 } while (1);
1236
1237 info->state = STATE_IDLE;
1238}
1239
1240static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001241 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1242 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001243{
1244 chip->write_buf(mtd, buf, mtd->writesize);
1245 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1246
1247 return 0;
1248}
1249
1250static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1251 struct nand_chip *chip, uint8_t *buf, int oob_required,
1252 int page)
1253{
Scott Wood17fed142016-05-30 13:57:56 -05001254 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001255 struct pxa3xx_nand_info *info = host->info_data;
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001256 int bf;
Stefan Roese75659da2015-07-23 10:26:16 +02001257
1258 chip->read_buf(mtd, buf, mtd->writesize);
1259 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1260
1261 if (info->retcode == ERR_CORERR && info->use_ecc) {
1262 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1263
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001264 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
Stefan Roese75659da2015-07-23 10:26:16 +02001265 /*
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001266 * Empty pages will trigger uncorrectable errors. Re-read the
1267 * entire page in raw mode and check for bits not being "1".
1268 * If there are more than the supported strength, then it means
1269 * this is an actual uncorrectable error.
Stefan Roese75659da2015-07-23 10:26:16 +02001270 */
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001271 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1272 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1273 chip->oob_poi, mtd->oobsize,
1274 NULL, 0, chip->ecc.strength);
1275 if (bf < 0) {
1276 mtd->ecc_stats.failed++;
1277 } else if (bf) {
1278 mtd->ecc_stats.corrected += bf;
1279 info->max_bitflips = max_t(unsigned int,
1280 info->max_bitflips, bf);
1281 info->retcode = ERR_CORERR;
1282 } else {
1283 info->retcode = ERR_NONE;
1284 }
1285
1286 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1287 /* Raw read is not supported with Hamming ECC engine */
Stefan Roese75659da2015-07-23 10:26:16 +02001288 if (is_buf_blank(buf, mtd->writesize))
1289 info->retcode = ERR_NONE;
1290 else
1291 mtd->ecc_stats.failed++;
1292 }
1293
1294 return info->max_bitflips;
1295}
1296
Miquel Raynal30a016a2018-10-11 17:45:42 +02001297static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1298 struct nand_chip *chip, uint8_t *buf,
1299 int oob_required, int page)
1300{
1301 struct pxa3xx_nand_host *host = chip->priv;
1302 struct pxa3xx_nand_info *info = host->info_data;
1303 int chunk, ecc_off_buf;
1304
1305 if (!info->ecc_bch)
1306 return -ENOTSUPP;
1307
1308 /*
1309 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1310 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1311 */
1312 info->force_raw = true;
1313 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1314
1315 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1316 info->last_spare_size;
1317 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1318 chip->read_buf(mtd,
1319 buf + (chunk * info->chunk_size),
1320 info->chunk_size);
1321 chip->read_buf(mtd,
1322 chip->oob_poi +
1323 (chunk * (info->spare_size)),
1324 info->spare_size);
1325 chip->read_buf(mtd,
1326 chip->oob_poi + ecc_off_buf +
1327 (chunk * (info->ecc_size)),
1328 info->ecc_size - 2);
1329 }
1330
1331 if (info->ntotalchunks > info->nfullchunks) {
1332 chip->read_buf(mtd,
1333 buf + (info->nfullchunks * info->chunk_size),
1334 info->last_chunk_size);
1335 chip->read_buf(mtd,
1336 chip->oob_poi +
1337 (info->nfullchunks * (info->spare_size)),
1338 info->last_spare_size);
1339 chip->read_buf(mtd,
1340 chip->oob_poi + ecc_off_buf +
1341 (info->nfullchunks * (info->ecc_size)),
1342 info->ecc_size - 2);
1343 }
1344
1345 info->force_raw = false;
1346
1347 return 0;
1348}
1349
1350static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1351 struct nand_chip *chip, int page)
1352{
1353 /* Invalidate page cache */
1354 chip->pagebuf = -1;
1355
1356 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1357 page);
1358}
1359
Stefan Roese75659da2015-07-23 10:26:16 +02001360static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1361{
Scott Wood17fed142016-05-30 13:57:56 -05001362 struct nand_chip *chip = mtd_to_nand(mtd);
1363 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001364 struct pxa3xx_nand_info *info = host->info_data;
1365 char retval = 0xFF;
1366
1367 if (info->buf_start < info->buf_count)
1368 /* Has just send a new command? */
1369 retval = info->data_buff[info->buf_start++];
1370
1371 return retval;
1372}
1373
1374static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1375{
Scott Wood17fed142016-05-30 13:57:56 -05001376 struct nand_chip *chip = mtd_to_nand(mtd);
1377 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001378 struct pxa3xx_nand_info *info = host->info_data;
1379 u16 retval = 0xFFFF;
1380
1381 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1382 retval = *((u16 *)(info->data_buff+info->buf_start));
1383 info->buf_start += 2;
1384 }
1385 return retval;
1386}
1387
1388static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1389{
Scott Wood17fed142016-05-30 13:57:56 -05001390 struct nand_chip *chip = mtd_to_nand(mtd);
1391 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001392 struct pxa3xx_nand_info *info = host->info_data;
1393 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1394
1395 memcpy(buf, info->data_buff + info->buf_start, real_len);
1396 info->buf_start += real_len;
1397}
1398
1399static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1400 const uint8_t *buf, int len)
1401{
Scott Wood17fed142016-05-30 13:57:56 -05001402 struct nand_chip *chip = mtd_to_nand(mtd);
1403 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001404 struct pxa3xx_nand_info *info = host->info_data;
1405 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1406
1407 memcpy(info->data_buff + info->buf_start, buf, real_len);
1408 info->buf_start += real_len;
1409}
1410
1411static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1412{
1413 return;
1414}
1415
1416static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1417{
Scott Wood17fed142016-05-30 13:57:56 -05001418 struct nand_chip *chip = mtd_to_nand(mtd);
1419 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001420 struct pxa3xx_nand_info *info = host->info_data;
1421
1422 if (info->need_wait) {
1423 u32 ts;
1424
1425 info->need_wait = 0;
1426
1427 ts = get_timer(0);
1428 while (1) {
1429 u32 status;
1430
1431 status = nand_readl(info, NDSR);
1432 if (status)
1433 pxa3xx_nand_irq(info);
1434
1435 if (info->dev_ready)
1436 break;
1437
1438 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001439 dev_err(mtd->dev, "Ready timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001440 return NAND_STATUS_FAIL;
1441 }
1442 }
1443 }
1444
1445 /* pxa3xx_nand_send_command has waited for command complete */
1446 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1447 if (info->retcode == ERR_NONE)
1448 return 0;
1449 else
1450 return NAND_STATUS_FAIL;
1451 }
1452
1453 return NAND_STATUS_READY;
1454}
1455
Ofer Heifetz531816e2018-08-29 11:56:07 +03001456static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1457{
1458 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1459
1460 /* Configure default flash values */
Ofer Heifetz531816e2018-08-29 11:56:07 +03001461 info->reg_ndcr = 0x0; /* enable all interrupts */
1462 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1463 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1464 info->reg_ndcr |= NDCR_SPARE_EN;
1465
1466 return 0;
1467}
1468
1469static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001470{
1471 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz531816e2018-08-29 11:56:07 +03001472 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001473 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001474
1475 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1476 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1477 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001478}
1479
Ofer Heifetz268979f2018-08-29 11:56:08 +03001480static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001481{
Ofer Heifetz531816e2018-08-29 11:56:07 +03001482 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001483 uint32_t ndcr = nand_readl(info, NDCR);
1484
Stefan Roese75659da2015-07-23 10:26:16 +02001485 /* Set an initial chunk size */
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001486 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001487 info->reg_ndcr = ndcr &
1488 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1489 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001490 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1491 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
Stefan Roese75659da2015-07-23 10:26:16 +02001492}
1493
1494static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1495{
1496 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1497 if (info->data_buff == NULL)
1498 return -ENOMEM;
1499 return 0;
1500}
1501
1502static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1503{
1504 struct pxa3xx_nand_info *info = host->info_data;
1505 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1506 struct mtd_info *mtd;
1507 struct nand_chip *chip;
1508 const struct nand_sdr_timings *timings;
1509 int ret;
1510
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001511 mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001512 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001513
1514 /* configure default flash values */
1515 info->reg_ndcr = 0x0; /* enable all interrupts */
1516 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001517 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
Stefan Roese75659da2015-07-23 10:26:16 +02001518 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1519
1520 /* use the common timing to make a try */
1521 timings = onfi_async_timing_mode_to_sdr_timings(0);
1522 if (IS_ERR(timings))
1523 return PTR_ERR(timings);
1524
1525 pxa3xx_nand_set_sdr_timing(host, timings);
1526
1527 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1528 ret = chip->waitfunc(mtd, chip);
1529 if (ret & NAND_STATUS_FAIL)
1530 return -ENODEV;
1531
1532 return 0;
1533}
1534
1535static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1536 struct nand_ecc_ctrl *ecc,
1537 int strength, int ecc_stepsize, int page_size)
1538{
1539 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001540 info->nfullchunks = 1;
1541 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001542 info->chunk_size = 2048;
1543 info->spare_size = 40;
1544 info->ecc_size = 24;
1545 ecc->mode = NAND_ECC_HW;
1546 ecc->size = 512;
1547 ecc->strength = 1;
1548
1549 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001550 info->nfullchunks = 1;
1551 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001552 info->chunk_size = 512;
1553 info->spare_size = 8;
1554 info->ecc_size = 8;
1555 ecc->mode = NAND_ECC_HW;
1556 ecc->size = 512;
1557 ecc->strength = 1;
1558
1559 /*
1560 * Required ECC: 4-bit correction per 512 bytes
1561 * Select: 16-bit correction per 2048 bytes
1562 */
1563 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1564 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001565 info->nfullchunks = 1;
1566 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001567 info->chunk_size = 2048;
1568 info->spare_size = 32;
1569 info->ecc_size = 32;
1570 ecc->mode = NAND_ECC_HW;
1571 ecc->size = info->chunk_size;
1572 ecc->layout = &ecc_layout_2KB_bch4bit;
1573 ecc->strength = 16;
1574
1575 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1576 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001577 info->nfullchunks = 2;
1578 info->ntotalchunks = 2;
Stefan Roese75659da2015-07-23 10:26:16 +02001579 info->chunk_size = 2048;
1580 info->spare_size = 32;
1581 info->ecc_size = 32;
1582 ecc->mode = NAND_ECC_HW;
1583 ecc->size = info->chunk_size;
1584 ecc->layout = &ecc_layout_4KB_bch4bit;
1585 ecc->strength = 16;
1586
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001587 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1588 info->ecc_bch = 1;
1589 info->nfullchunks = 4;
1590 info->ntotalchunks = 4;
1591 info->chunk_size = 2048;
1592 info->spare_size = 32;
1593 info->ecc_size = 32;
1594 ecc->mode = NAND_ECC_HW;
1595 ecc->size = info->chunk_size;
1596 ecc->layout = &ecc_layout_8KB_bch4bit;
1597 ecc->strength = 16;
1598
Stefan Roese75659da2015-07-23 10:26:16 +02001599 /*
1600 * Required ECC: 8-bit correction per 512 bytes
1601 * Select: 16-bit correction per 1024 bytes
1602 */
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001603 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1604 info->ecc_bch = 1;
1605 info->nfullchunks = 1;
1606 info->ntotalchunks = 2;
1607 info->chunk_size = 1024;
1608 info->spare_size = 0;
1609 info->last_chunk_size = 1024;
Miquel Raynal53e9c122018-10-11 17:45:44 +02001610 info->last_spare_size = 32;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001611 info->ecc_size = 32;
1612 ecc->mode = NAND_ECC_HW;
1613 ecc->size = info->chunk_size;
1614 ecc->layout = &ecc_layout_2KB_bch8bit;
1615 ecc->strength = 16;
1616
Stefan Roese75659da2015-07-23 10:26:16 +02001617 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1618 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001619 info->nfullchunks = 4;
1620 info->ntotalchunks = 5;
Stefan Roese75659da2015-07-23 10:26:16 +02001621 info->chunk_size = 1024;
1622 info->spare_size = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001623 info->last_chunk_size = 0;
1624 info->last_spare_size = 64;
Stefan Roese75659da2015-07-23 10:26:16 +02001625 info->ecc_size = 32;
1626 ecc->mode = NAND_ECC_HW;
1627 ecc->size = info->chunk_size;
1628 ecc->layout = &ecc_layout_4KB_bch8bit;
1629 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001630
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001631 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001632 info->ecc_bch = 1;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001633 info->nfullchunks = 8;
1634 info->ntotalchunks = 9;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001635 info->chunk_size = 1024;
1636 info->spare_size = 0;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001637 info->last_chunk_size = 0;
1638 info->last_spare_size = 160;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001639 info->ecc_size = 32;
1640 ecc->mode = NAND_ECC_HW;
1641 ecc->size = info->chunk_size;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001642 ecc->layout = &ecc_layout_8KB_bch8bit;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001643 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001644
Stefan Roese75659da2015-07-23 10:26:16 +02001645 } else {
Sean Andersonc6302f02020-09-15 10:44:40 -04001646 dev_err(info->controller.active->mtd.dev,
Stefan Roese75659da2015-07-23 10:26:16 +02001647 "ECC strength %d at page size %d is not supported\n",
1648 strength, page_size);
1649 return -ENODEV;
1650 }
1651
1652 return 0;
1653}
1654
1655static int pxa3xx_nand_scan(struct mtd_info *mtd)
1656{
Scott Wood17fed142016-05-30 13:57:56 -05001657 struct nand_chip *chip = mtd_to_nand(mtd);
1658 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001659 struct pxa3xx_nand_info *info = host->info_data;
1660 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001661 int ret;
1662 uint16_t ecc_strength, ecc_step;
1663
Ofer Heifetz268979f2018-08-29 11:56:08 +03001664 if (pdata->keep_config) {
1665 pxa3xx_nand_detect_config(info);
1666 } else {
1667 ret = pxa3xx_nand_config_ident(info);
1668 if (ret)
1669 return ret;
1670 ret = pxa3xx_nand_sensing(host);
1671 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001672 dev_info(mtd->dev, "There is no chip on cs %d!\n",
Ofer Heifetz268979f2018-08-29 11:56:08 +03001673 info->cs);
1674 return ret;
1675 }
Stefan Roese75659da2015-07-23 10:26:16 +02001676 }
1677
Stefan Roese75659da2015-07-23 10:26:16 +02001678 /* Device detection must be done with ECC disabled */
1679 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1680 nand_writel(info, NDECCCTRL, 0x0);
1681
1682 if (nand_scan_ident(mtd, 1, NULL))
1683 return -ENODEV;
1684
1685 if (!pdata->keep_config) {
1686 ret = pxa3xx_nand_init_timings(host);
1687 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001688 dev_err(mtd->dev,
Stefan Roese75659da2015-07-23 10:26:16 +02001689 "Failed to set timings: %d\n", ret);
1690 return ret;
1691 }
1692 }
1693
Stefan Roese75659da2015-07-23 10:26:16 +02001694#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1695 /*
1696 * We'll use a bad block table stored in-flash and don't
1697 * allow writing the bad block marker to the flash.
1698 */
1699 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1700 chip->bbt_td = &bbt_main_descr;
1701 chip->bbt_md = &bbt_mirror_descr;
1702#endif
1703
Stefan Roese75659da2015-07-23 10:26:16 +02001704 if (pdata->ecc_strength && pdata->ecc_step_size) {
1705 ecc_strength = pdata->ecc_strength;
1706 ecc_step = pdata->ecc_step_size;
1707 } else {
1708 ecc_strength = chip->ecc_strength_ds;
1709 ecc_step = chip->ecc_step_ds;
1710 }
1711
1712 /* Set default ECC strength requirements on non-ONFI devices */
1713 if (ecc_strength < 1 && ecc_step < 1) {
1714 ecc_strength = 1;
1715 ecc_step = 512;
1716 }
1717
1718 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1719 ecc_step, mtd->writesize);
1720 if (ret)
1721 return ret;
1722
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001723 /*
1724 * If the page size is bigger than the FIFO size, let's check
1725 * we are given the right variant and then switch to the extended
1726 * (aka split) command handling,
1727 */
1728 if (mtd->writesize > info->chunk_size) {
1729 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1730 chip->cmdfunc = nand_cmdfunc_extended;
1731 } else {
Sean Andersonc6302f02020-09-15 10:44:40 -04001732 dev_err(mtd->dev,
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001733 "unsupported page size on this variant\n");
1734 return -ENODEV;
1735 }
1736 }
1737
Stefan Roese75659da2015-07-23 10:26:16 +02001738 /* calculate addressing information */
1739 if (mtd->writesize >= 2048)
1740 host->col_addr_cycles = 2;
1741 else
1742 host->col_addr_cycles = 1;
1743
1744 /* release the initial buffer */
1745 kfree(info->data_buff);
1746
1747 /* allocate the real data + oob buffer */
1748 info->buf_size = mtd->writesize + mtd->oobsize;
1749 ret = pxa3xx_nand_init_buff(info);
1750 if (ret)
1751 return ret;
1752 info->oob_buff = info->data_buff + mtd->writesize;
1753
1754 if ((mtd->size >> chip->page_shift) > 65536)
1755 host->row_addr_cycles = 3;
1756 else
1757 host->row_addr_cycles = 2;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001758
1759 if (!pdata->keep_config)
1760 pxa3xx_nand_config_tail(info);
1761
Stefan Roese75659da2015-07-23 10:26:16 +02001762 return nand_scan_tail(mtd);
1763}
1764
1765static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1766{
1767 struct pxa3xx_nand_platform_data *pdata;
1768 struct pxa3xx_nand_host *host;
1769 struct nand_chip *chip = NULL;
1770 struct mtd_info *mtd;
Baruch Siach807ae582020-10-29 08:52:19 +02001771 int cs;
Stefan Roese75659da2015-07-23 10:26:16 +02001772
1773 pdata = info->pdata;
1774 if (pdata->num_cs <= 0)
1775 return -ENODEV;
1776
1777 info->variant = pxa3xx_nand_get_variant();
1778 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001779 chip = (struct nand_chip *)
1780 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001781 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001782 host = (struct pxa3xx_nand_host *)chip;
1783 info->host[cs] = host;
Stefan Roese75659da2015-07-23 10:26:16 +02001784 host->cs = cs;
1785 host->info_data = info;
Stefan Roese75659da2015-07-23 10:26:16 +02001786 mtd->owner = THIS_MODULE;
1787
Chris Packham3c2170a2016-08-29 15:20:52 +12001788 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001789 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
Miquel Raynal30a016a2018-10-11 17:45:42 +02001790 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1791 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
Stefan Roese75659da2015-07-23 10:26:16 +02001792 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1793 chip->controller = &info->controller;
1794 chip->waitfunc = pxa3xx_nand_waitfunc;
1795 chip->select_chip = pxa3xx_nand_select_chip;
1796 chip->read_word = pxa3xx_nand_read_word;
1797 chip->read_byte = pxa3xx_nand_read_byte;
1798 chip->read_buf = pxa3xx_nand_read_buf;
1799 chip->write_buf = pxa3xx_nand_write_buf;
1800 chip->options |= NAND_NO_SUBPAGE_WRITE;
1801 chip->cmdfunc = nand_cmdfunc;
1802 }
1803
Stefan Roese75659da2015-07-23 10:26:16 +02001804 /* Allocate a buffer to allow flash detection */
1805 info->buf_size = INIT_BUFFER_SIZE;
1806 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
Baruch Siach807ae582020-10-29 08:52:19 +02001807 if (info->data_buff == NULL)
1808 return -ENOMEM;
Stefan Roese75659da2015-07-23 10:26:16 +02001809
1810 /* initialize all interrupts to be disabled */
1811 disable_int(info, NDSR_MASK);
1812
1813 return 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001814}
1815
Shmuel Hazan759349e2020-10-29 08:52:18 +02001816static int pxa3xx_nand_probe_dt(struct udevice *dev, struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001817{
1818 struct pxa3xx_nand_platform_data *pdata;
1819
1820 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1821 if (!pdata)
1822 return -ENOMEM;
1823
Shmuel Hazan759349e2020-10-29 08:52:18 +02001824 info->mmio_base = dev_read_addr_ptr(dev);
Stefan Roese75659da2015-07-23 10:26:16 +02001825
Shmuel Hazan759349e2020-10-29 08:52:18 +02001826 pdata->num_cs = dev_read_u32_default(dev, "num-cs", 1);
1827 if (pdata->num_cs != 1) {
1828 pr_err("pxa3xx driver supports single CS only\n");
1829 return -EINVAL;
1830 }
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001831
Shmuel Hazan759349e2020-10-29 08:52:18 +02001832 if (dev_read_bool(dev, "nand-enable-arbiter"))
1833 pdata->enable_arbiter = 1;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001834
Shmuel Hazan759349e2020-10-29 08:52:18 +02001835 if (dev_read_bool(dev, "nand-keep-config"))
1836 pdata->keep_config = 1;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001837
Shmuel Hazan759349e2020-10-29 08:52:18 +02001838 /*
1839 * ECC parameters.
1840 * If these are not set, they will be selected according
1841 * to the detected flash type.
1842 */
1843 /* ECC strength */
1844 pdata->ecc_strength = dev_read_u32_default(dev, "nand-ecc-strength", 0);
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001845
Shmuel Hazan759349e2020-10-29 08:52:18 +02001846 /* ECC step size */
1847 pdata->ecc_step_size = dev_read_u32_default(dev, "nand-ecc-step-size",
1848 0);
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001849
Shmuel Hazan759349e2020-10-29 08:52:18 +02001850 info->pdata = pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001851
Shmuel Hazan759349e2020-10-29 08:52:18 +02001852 return 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001853}
1854
Shmuel Hazan759349e2020-10-29 08:52:18 +02001855static int pxa3xx_nand_probe(struct udevice *dev)
Stefan Roese75659da2015-07-23 10:26:16 +02001856{
1857 struct pxa3xx_nand_platform_data *pdata;
1858 int ret, cs, probe_success;
Shmuel Hazan759349e2020-10-29 08:52:18 +02001859 struct pxa3xx_nand_info *info = dev_get_priv(dev);
Stefan Roese75659da2015-07-23 10:26:16 +02001860
Shmuel Hazan759349e2020-10-29 08:52:18 +02001861 ret = pxa3xx_nand_probe_dt(dev, info);
Stefan Roese75659da2015-07-23 10:26:16 +02001862 if (ret)
1863 return ret;
1864
1865 pdata = info->pdata;
1866
1867 ret = alloc_nand_resource(info);
1868 if (ret) {
Shmuel Hazan759349e2020-10-29 08:52:18 +02001869 dev_err(dev, "alloc nand resource failed\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001870 return ret;
1871 }
1872
1873 probe_success = 0;
1874 for (cs = 0; cs < pdata->num_cs; cs++) {
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001875 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001876
1877 /*
1878 * The mtd name matches the one used in 'mtdparts' kernel
1879 * parameter. This name cannot be changed or otherwise
1880 * user's mtd partitions configuration would get broken.
1881 */
1882 mtd->name = "pxa3xx_nand-0";
1883 info->cs = cs;
1884 ret = pxa3xx_nand_scan(mtd);
1885 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001886 dev_info(mtd->dev, "failed to scan nand at cs %d\n",
Stefan Roese75659da2015-07-23 10:26:16 +02001887 cs);
1888 continue;
1889 }
1890
Scott Wood2c1b7e12016-05-30 13:57:55 -05001891 if (nand_register(cs, mtd))
1892 continue;
1893
1894 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001895 }
1896
1897 if (!probe_success)
1898 return -ENODEV;
1899
1900 return 0;
1901}
1902
Shmuel Hazan759349e2020-10-29 08:52:18 +02001903U_BOOT_DRIVER(pxa3xx_nand) = {
1904 .name = "pxa3xx-nand",
1905 .id = UCLASS_MTD,
1906 .of_match = pxa3xx_nand_dt_ids,
1907 .probe = pxa3xx_nand_probe,
1908 .priv_auto_alloc_size = sizeof(struct pxa3xx_nand_info) +
1909 sizeof(struct pxa3xx_nand_host) * CONFIG_SYS_MAX_NAND_DEVICE,
1910};
1911
Stefan Roese75659da2015-07-23 10:26:16 +02001912void board_nand_init(void)
1913{
Shmuel Hazan759349e2020-10-29 08:52:18 +02001914 struct udevice *dev;
Stefan Roese75659da2015-07-23 10:26:16 +02001915 int ret;
1916
Shmuel Hazan759349e2020-10-29 08:52:18 +02001917 ret = uclass_get_device_by_driver(UCLASS_MTD,
1918 DM_GET_DRIVER(pxa3xx_nand), &dev);
1919 if (ret && ret != -ENODEV) {
1920 pr_err("Failed to initialize %s. (error %d)\n", dev->name,
1921 ret);
1922 }
Stefan Roese75659da2015-07-23 10:26:16 +02001923}