blob: 244b0fecb7a40611010ace3f439f61ce41c06df6 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese75659da2015-07-23 10:26:16 +02002/*
Miquel Raynal1f1ae152018-08-16 17:30:07 +02003 * drivers/mtd/nand/raw/pxa3xx_nand.c
Stefan Roese75659da2015-07-23 10:26:16 +02004 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
Stefan Roese75659da2015-07-23 10:26:16 +02007 */
8
9#include <common.h>
10#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030011#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020012#include <nand.h>
Simon Glass9bc15642020-02-03 07:36:16 -070013#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070014#include <dm/devres.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060015#include <linux/bug.h>
Simon Glassdbd79542020-05-10 11:40:11 -060016#include <linux/delay.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070017#include <linux/err.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090018#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020019#include <asm/io.h>
20#include <asm/arch/cpu.h>
21#include <linux/mtd/mtd.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090022#include <linux/mtd/rawnand.h>
Stefan Roese75659da2015-07-23 10:26:16 +020023#include <linux/types.h>
24
25#include "pxa3xx_nand.h"
26
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030027DECLARE_GLOBAL_DATA_PTR;
28
Stefan Roese75659da2015-07-23 10:26:16 +020029#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
30#define CHIP_DELAY_TIMEOUT 200
31#define NAND_STOP_DELAY 40
Stefan Roese75659da2015-07-23 10:26:16 +020032
33/*
34 * Define a buffer size for the initial command that detects the flash device:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030035 * STATUS, READID and PARAM.
36 * ONFI param page is 256 bytes, and there are three redundant copies
37 * to be read. JEDEC param page is 512 bytes, and there are also three
38 * redundant copies to be read.
39 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
Stefan Roese75659da2015-07-23 10:26:16 +020040 */
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030041#define INIT_BUFFER_SIZE 2048
Stefan Roese75659da2015-07-23 10:26:16 +020042
43/* registers and bit definitions */
44#define NDCR (0x00) /* Control register */
45#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
46#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
47#define NDSR (0x14) /* Status Register */
48#define NDPCR (0x18) /* Page Count Register */
49#define NDBDR0 (0x1C) /* Bad Block Register 0 */
50#define NDBDR1 (0x20) /* Bad Block Register 1 */
51#define NDECCCTRL (0x28) /* ECC control */
52#define NDDB (0x40) /* Data Buffer */
53#define NDCB0 (0x48) /* Command Buffer0 */
54#define NDCB1 (0x4C) /* Command Buffer1 */
55#define NDCB2 (0x50) /* Command Buffer2 */
56
57#define NDCR_SPARE_EN (0x1 << 31)
58#define NDCR_ECC_EN (0x1 << 30)
59#define NDCR_DMA_EN (0x1 << 29)
60#define NDCR_ND_RUN (0x1 << 28)
61#define NDCR_DWIDTH_C (0x1 << 27)
62#define NDCR_DWIDTH_M (0x1 << 26)
63#define NDCR_PAGE_SZ (0x1 << 24)
64#define NDCR_NCSX (0x1 << 23)
65#define NDCR_ND_MODE (0x3 << 21)
66#define NDCR_NAND_MODE (0x0)
67#define NDCR_CLR_PG_CNT (0x1 << 20)
Ofer Heifetz531816e2018-08-29 11:56:07 +030068#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
Stefan Roese75659da2015-07-23 10:26:16 +020069#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
70#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
71
72#define NDCR_RA_START (0x1 << 15)
73#define NDCR_PG_PER_BLK (0x1 << 14)
74#define NDCR_ND_ARB_EN (0x1 << 12)
75#define NDCR_INT_MASK (0xFFF)
76
77#define NDSR_MASK (0xfff)
78#define NDSR_ERR_CNT_OFF (16)
79#define NDSR_ERR_CNT_MASK (0x1f)
80#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
81#define NDSR_RDY (0x1 << 12)
82#define NDSR_FLASH_RDY (0x1 << 11)
83#define NDSR_CS0_PAGED (0x1 << 10)
84#define NDSR_CS1_PAGED (0x1 << 9)
85#define NDSR_CS0_CMDD (0x1 << 8)
86#define NDSR_CS1_CMDD (0x1 << 7)
87#define NDSR_CS0_BBD (0x1 << 6)
88#define NDSR_CS1_BBD (0x1 << 5)
89#define NDSR_UNCORERR (0x1 << 4)
90#define NDSR_CORERR (0x1 << 3)
91#define NDSR_WRDREQ (0x1 << 2)
92#define NDSR_RDDREQ (0x1 << 1)
93#define NDSR_WRCMDREQ (0x1)
94
95#define NDCB0_LEN_OVRD (0x1 << 28)
96#define NDCB0_ST_ROW_EN (0x1 << 26)
97#define NDCB0_AUTO_RS (0x1 << 25)
98#define NDCB0_CSEL (0x1 << 24)
99#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
100#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
101#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
102#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
103#define NDCB0_NC (0x1 << 20)
104#define NDCB0_DBC (0x1 << 19)
105#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
106#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
107#define NDCB0_CMD2_MASK (0xff << 8)
108#define NDCB0_CMD1_MASK (0xff)
109#define NDCB0_ADDR_CYC_SHIFT (16)
110
111#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
112#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
113#define EXT_CMD_TYPE_READ 4 /* Read */
114#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
115#define EXT_CMD_TYPE_FINAL 3 /* Final command */
116#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
117#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
118
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300119/*
120 * This should be large enough to read 'ONFI' and 'JEDEC'.
121 * Let's use 7 bytes, which is the maximum ID count supported
122 * by the controller (see NDCR_RD_ID_CNT_MASK).
123 */
124#define READ_ID_BYTES 7
125
Stefan Roese75659da2015-07-23 10:26:16 +0200126/* macros for registers read/write */
127#define nand_writel(info, off, val) \
128 writel((val), (info)->mmio_base + (off))
129
130#define nand_readl(info, off) \
131 readl((info)->mmio_base + (off))
132
133/* error code and state */
134enum {
135 ERR_NONE = 0,
136 ERR_DMABUSERR = -1,
137 ERR_SENDCMD = -2,
138 ERR_UNCORERR = -3,
139 ERR_BBERR = -4,
140 ERR_CORERR = -5,
141};
142
143enum {
144 STATE_IDLE = 0,
145 STATE_PREPARED,
146 STATE_CMD_HANDLE,
147 STATE_DMA_READING,
148 STATE_DMA_WRITING,
149 STATE_DMA_DONE,
150 STATE_PIO_READING,
151 STATE_PIO_WRITING,
152 STATE_CMD_DONE,
153 STATE_READY,
154};
155
156enum pxa3xx_nand_variant {
157 PXA3XX_NAND_VARIANT_PXA,
158 PXA3XX_NAND_VARIANT_ARMADA370,
159};
160
161struct pxa3xx_nand_host {
162 struct nand_chip chip;
Stefan Roese75659da2015-07-23 10:26:16 +0200163 void *info_data;
164
165 /* page size of attached chip */
166 int use_ecc;
167 int cs;
168
169 /* calculated from pxa3xx_nand_flash data */
170 unsigned int col_addr_cycles;
171 unsigned int row_addr_cycles;
Stefan Roese75659da2015-07-23 10:26:16 +0200172};
173
174struct pxa3xx_nand_info {
175 struct nand_hw_control controller;
176 struct pxa3xx_nand_platform_data *pdata;
177
178 struct clk *clk;
179 void __iomem *mmio_base;
180 unsigned long mmio_phys;
181 int cmd_complete, dev_ready;
182
183 unsigned int buf_start;
184 unsigned int buf_count;
185 unsigned int buf_size;
186 unsigned int data_buff_pos;
187 unsigned int oob_buff_pos;
188
189 unsigned char *data_buff;
190 unsigned char *oob_buff;
191
192 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
193 unsigned int state;
194
195 /*
196 * This driver supports NFCv1 (as found in PXA SoC)
197 * and NFCv2 (as found in Armada 370/XP SoC).
198 */
199 enum pxa3xx_nand_variant variant;
200
201 int cs;
202 int use_ecc; /* use HW ECC ? */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200203 int force_raw; /* prevent use_ecc to be set */
Stefan Roese75659da2015-07-23 10:26:16 +0200204 int ecc_bch; /* using BCH ECC? */
205 int use_spare; /* use spare ? */
206 int need_wait;
207
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300208 /* Amount of real data per full chunk */
209 unsigned int chunk_size;
210
211 /* Amount of spare data per full chunk */
Stefan Roese75659da2015-07-23 10:26:16 +0200212 unsigned int spare_size;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300213
214 /* Number of full chunks (i.e chunk_size + spare_size) */
215 unsigned int nfullchunks;
216
217 /*
218 * Total number of chunks. If equal to nfullchunks, then there
219 * are only full chunks. Otherwise, there is one last chunk of
220 * size (last_chunk_size + last_spare_size)
221 */
222 unsigned int ntotalchunks;
223
224 /* Amount of real data in the last chunk */
225 unsigned int last_chunk_size;
226
227 /* Amount of spare data in the last chunk */
228 unsigned int last_spare_size;
229
Stefan Roese75659da2015-07-23 10:26:16 +0200230 unsigned int ecc_size;
231 unsigned int ecc_err_cnt;
232 unsigned int max_bitflips;
233 int retcode;
234
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300235 /*
236 * Variables only valid during command
237 * execution. step_chunk_size and step_spare_size is the
238 * amount of real data and spare data in the current
239 * chunk. cur_chunk is the current chunk being
240 * read/programmed.
241 */
242 unsigned int step_chunk_size;
243 unsigned int step_spare_size;
244 unsigned int cur_chunk;
245
Stefan Roese75659da2015-07-23 10:26:16 +0200246 /* cached register value */
247 uint32_t reg_ndcr;
248 uint32_t ndtr0cs0;
249 uint32_t ndtr1cs0;
250
251 /* generated NDCBx register values */
252 uint32_t ndcb0;
253 uint32_t ndcb1;
254 uint32_t ndcb2;
255 uint32_t ndcb3;
256};
257
258static struct pxa3xx_nand_timing timing[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300259 /*
260 * tCH Enable signal hold time
261 * tCS Enable signal setup time
262 * tWH ND_nWE high duration
263 * tWP ND_nWE pulse time
264 * tRH ND_nRE high duration
265 * tRP ND_nRE pulse width
266 * tR ND_nWE high to ND_nRE low for read
267 * tWHR ND_nWE high to ND_nRE low for status read
268 * tAR ND_ALE low to ND_nRE low delay
269 */
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300270 /*ch cs wh wp rh rp r whr ar */
Stefan Roese75659da2015-07-23 10:26:16 +0200271 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
272 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
273 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
274 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300275 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
Stefan Roese75659da2015-07-23 10:26:16 +0200276};
277
278static struct pxa3xx_nand_flash builtin_flash_types[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300279 /*
280 * chip_id
281 * flash_width Width of Flash memory (DWIDTH_M)
282 * dfc_width Width of flash controller(DWIDTH_C)
283 * *timing
284 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
285 */
Stefan Roese75659da2015-07-23 10:26:16 +0200286 { 0x46ec, 16, 16, &timing[1] },
287 { 0xdaec, 8, 8, &timing[1] },
288 { 0xd7ec, 8, 8, &timing[1] },
289 { 0xa12c, 8, 8, &timing[2] },
290 { 0xb12c, 16, 16, &timing[2] },
291 { 0xdc2c, 8, 8, &timing[2] },
292 { 0xcc2c, 16, 16, &timing[2] },
293 { 0xba20, 16, 16, &timing[3] },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300294 { 0xda98, 8, 8, &timing[4] },
Stefan Roese75659da2015-07-23 10:26:16 +0200295};
296
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100297#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
Stefan Roese75659da2015-07-23 10:26:16 +0200298static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
299static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
300
301static struct nand_bbt_descr bbt_main_descr = {
302 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
303 | NAND_BBT_2BIT | NAND_BBT_VERSION,
304 .offs = 8,
305 .len = 6,
306 .veroffs = 14,
307 .maxblocks = 8, /* Last 8 blocks in each chip */
308 .pattern = bbt_pattern
309};
310
311static struct nand_bbt_descr bbt_mirror_descr = {
312 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
313 | NAND_BBT_2BIT | NAND_BBT_VERSION,
314 .offs = 8,
315 .len = 6,
316 .veroffs = 14,
317 .maxblocks = 8, /* Last 8 blocks in each chip */
318 .pattern = bbt_mirror_pattern
319};
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100320#endif
Stefan Roese75659da2015-07-23 10:26:16 +0200321
322static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
323 .eccbytes = 32,
324 .eccpos = {
325 32, 33, 34, 35, 36, 37, 38, 39,
326 40, 41, 42, 43, 44, 45, 46, 47,
327 48, 49, 50, 51, 52, 53, 54, 55,
328 56, 57, 58, 59, 60, 61, 62, 63},
329 .oobfree = { {2, 30} }
330};
331
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300332static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
333 .eccbytes = 64,
334 .eccpos = {
Miquel Raynal53e9c122018-10-11 17:45:44 +0200335 32, 33, 34, 35, 36, 37, 38, 39,
336 40, 41, 42, 43, 44, 45, 46, 47,
337 48, 49, 50, 51, 52, 53, 54, 55,
338 56, 57, 58, 59, 60, 61, 62, 63,
339 64, 65, 66, 67, 68, 69, 70, 71,
340 72, 73, 74, 75, 76, 77, 78, 79,
341 80, 81, 82, 83, 84, 85, 86, 87,
342 88, 89, 90, 91, 92, 93, 94, 95},
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300343 .oobfree = { {1, 4}, {6, 26} }
344};
345
Stefan Roese75659da2015-07-23 10:26:16 +0200346static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
347 .eccbytes = 64,
348 .eccpos = {
349 32, 33, 34, 35, 36, 37, 38, 39,
350 40, 41, 42, 43, 44, 45, 46, 47,
351 48, 49, 50, 51, 52, 53, 54, 55,
352 56, 57, 58, 59, 60, 61, 62, 63,
353 96, 97, 98, 99, 100, 101, 102, 103,
354 104, 105, 106, 107, 108, 109, 110, 111,
355 112, 113, 114, 115, 116, 117, 118, 119,
356 120, 121, 122, 123, 124, 125, 126, 127},
357 /* Bootrom looks in bytes 0 & 5 for bad blocks */
358 .oobfree = { {6, 26}, { 64, 32} }
359};
360
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300361static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
362 .eccbytes = 128,
363 .eccpos = {
364 32, 33, 34, 35, 36, 37, 38, 39,
365 40, 41, 42, 43, 44, 45, 46, 47,
366 48, 49, 50, 51, 52, 53, 54, 55,
367 56, 57, 58, 59, 60, 61, 62, 63,
368
369 96, 97, 98, 99, 100, 101, 102, 103,
370 104, 105, 106, 107, 108, 109, 110, 111,
371 112, 113, 114, 115, 116, 117, 118, 119,
372 120, 121, 122, 123, 124, 125, 126, 127,
373
374 160, 161, 162, 163, 164, 165, 166, 167,
375 168, 169, 170, 171, 172, 173, 174, 175,
376 176, 177, 178, 179, 180, 181, 182, 183,
377 184, 185, 186, 187, 188, 189, 190, 191,
378
379 224, 225, 226, 227, 228, 229, 230, 231,
380 232, 233, 234, 235, 236, 237, 238, 239,
381 240, 241, 242, 243, 244, 245, 246, 247,
382 248, 249, 250, 251, 252, 253, 254, 255},
383
384 /* Bootrom looks in bytes 0 & 5 for bad blocks */
385 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
386};
387
Stefan Roese75659da2015-07-23 10:26:16 +0200388static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
389 .eccbytes = 128,
390 .eccpos = {
391 32, 33, 34, 35, 36, 37, 38, 39,
392 40, 41, 42, 43, 44, 45, 46, 47,
393 48, 49, 50, 51, 52, 53, 54, 55,
394 56, 57, 58, 59, 60, 61, 62, 63},
395 .oobfree = { }
396};
397
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300398static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
399 .eccbytes = 256,
400 .eccpos = {},
401 /* HW ECC handles all ECC data and all spare area is free for OOB */
402 .oobfree = {{0, 160} }
403};
404
Stefan Roese75659da2015-07-23 10:26:16 +0200405#define NDTR0_tCH(c) (min((c), 7) << 19)
406#define NDTR0_tCS(c) (min((c), 7) << 16)
407#define NDTR0_tWH(c) (min((c), 7) << 11)
408#define NDTR0_tWP(c) (min((c), 7) << 8)
409#define NDTR0_tRH(c) (min((c), 7) << 3)
410#define NDTR0_tRP(c) (min((c), 7) << 0)
411
412#define NDTR1_tR(c) (min((c), 65535) << 16)
413#define NDTR1_tWHR(c) (min((c), 15) << 4)
414#define NDTR1_tAR(c) (min((c), 15) << 0)
415
416/* convert nano-seconds to nand flash controller clock cycles */
417#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
418
419static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(void)
420{
421 /* We only support the Armada 370/XP/38x for now */
422 return PXA3XX_NAND_VARIANT_ARMADA370;
423}
424
425static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
426 const struct pxa3xx_nand_timing *t)
427{
428 struct pxa3xx_nand_info *info = host->info_data;
429 unsigned long nand_clk = mvebu_get_nand_clock();
430 uint32_t ndtr0, ndtr1;
431
432 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
433 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
434 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
435 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
436 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
437 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
438
439 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
440 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
441 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
442
443 info->ndtr0cs0 = ndtr0;
444 info->ndtr1cs0 = ndtr1;
445 nand_writel(info, NDTR0CS0, ndtr0);
446 nand_writel(info, NDTR1CS0, ndtr1);
447}
448
449static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
450 const struct nand_sdr_timings *t)
451{
452 struct pxa3xx_nand_info *info = host->info_data;
453 struct nand_chip *chip = &host->chip;
454 unsigned long nand_clk = mvebu_get_nand_clock();
455 uint32_t ndtr0, ndtr1;
456
457 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
458 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
459 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300460 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200461 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300462 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200463 u32 tR = chip->chip_delay * 1000;
464 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
465 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
466
467 /* fallback to a default value if tR = 0 */
468 if (!tR)
469 tR = 20000;
470
471 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
472 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
473 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
474 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
475 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
476 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
477
478 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
479 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
480 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
481
482 info->ndtr0cs0 = ndtr0;
483 info->ndtr1cs0 = ndtr1;
484 nand_writel(info, NDTR0CS0, ndtr0);
485 nand_writel(info, NDTR1CS0, ndtr1);
486}
487
488static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
489{
490 const struct nand_sdr_timings *timings;
491 struct nand_chip *chip = &host->chip;
492 struct pxa3xx_nand_info *info = host->info_data;
493 const struct pxa3xx_nand_flash *f = NULL;
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300494 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200495 int mode, id, ntypes, i;
496
497 mode = onfi_get_async_timing_mode(chip);
498 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
499 ntypes = ARRAY_SIZE(builtin_flash_types);
500
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300501 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
Stefan Roese75659da2015-07-23 10:26:16 +0200502
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300503 id = chip->read_byte(mtd);
504 id |= chip->read_byte(mtd) << 0x8;
Stefan Roese75659da2015-07-23 10:26:16 +0200505
506 for (i = 0; i < ntypes; i++) {
507 f = &builtin_flash_types[i];
508
509 if (f->chip_id == id)
510 break;
511 }
512
513 if (i == ntypes) {
514 dev_err(&info->pdev->dev, "Error: timings not found\n");
515 return -EINVAL;
516 }
517
518 pxa3xx_nand_set_timing(host, f->timing);
519
520 if (f->flash_width == 16) {
521 info->reg_ndcr |= NDCR_DWIDTH_M;
522 chip->options |= NAND_BUSWIDTH_16;
523 }
524
525 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
526 } else {
527 mode = fls(mode) - 1;
528 if (mode < 0)
529 mode = 0;
530
531 timings = onfi_async_timing_mode_to_sdr_timings(mode);
532 if (IS_ERR(timings))
533 return PTR_ERR(timings);
534
535 pxa3xx_nand_set_sdr_timing(host, timings);
536 }
537
538 return 0;
539}
540
Stefan Roese75659da2015-07-23 10:26:16 +0200541/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800542 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200543 * command buffer, otherwise, it does not work.
544 * We enable all the interrupt at the same time, and
545 * let pxa3xx_nand_irq to handle all logic.
546 */
547static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
548{
549 uint32_t ndcr;
550
551 ndcr = info->reg_ndcr;
552
553 if (info->use_ecc) {
554 ndcr |= NDCR_ECC_EN;
555 if (info->ecc_bch)
556 nand_writel(info, NDECCCTRL, 0x1);
557 } else {
558 ndcr &= ~NDCR_ECC_EN;
559 if (info->ecc_bch)
560 nand_writel(info, NDECCCTRL, 0x0);
561 }
562
563 ndcr &= ~NDCR_DMA_EN;
564
565 if (info->use_spare)
566 ndcr |= NDCR_SPARE_EN;
567 else
568 ndcr &= ~NDCR_SPARE_EN;
569
570 ndcr |= NDCR_ND_RUN;
571
572 /* clear status bits and run */
Stefan Roese75659da2015-07-23 10:26:16 +0200573 nand_writel(info, NDSR, NDSR_MASK);
Ofer Heifetzd92d8992018-08-29 11:56:03 +0300574 nand_writel(info, NDCR, 0);
Stefan Roese75659da2015-07-23 10:26:16 +0200575 nand_writel(info, NDCR, ndcr);
576}
577
578static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
579{
580 uint32_t ndcr;
581
582 ndcr = nand_readl(info, NDCR);
583 nand_writel(info, NDCR, ndcr | int_mask);
584}
585
586static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
587{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200588 if (info->ecc_bch && !info->force_raw) {
Stefan Roese75659da2015-07-23 10:26:16 +0200589 u32 ts;
590
591 /*
592 * According to the datasheet, when reading from NDDB
593 * with BCH enabled, after each 32 bytes reads, we
594 * have to make sure that the NDSR.RDDREQ bit is set.
595 *
596 * Drain the FIFO 8 32 bits reads at a time, and skip
597 * the polling on the last read.
598 */
599 while (len > 8) {
600 readsl(info->mmio_base + NDDB, data, 8);
601
602 ts = get_timer(0);
603 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
604 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
605 dev_err(&info->pdev->dev,
606 "Timeout on RDDREQ while draining the FIFO\n");
607 return;
608 }
609 }
610
611 data += 32;
612 len -= 8;
613 }
614 }
615
616 readsl(info->mmio_base + NDDB, data, len);
617}
618
619static void handle_data_pio(struct pxa3xx_nand_info *info)
620{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200621 int data_len = info->step_chunk_size;
622
623 /*
624 * In raw mode, include the spare area and the ECC bytes that are not
625 * consumed by the controller in the data section. Do not reorganize
626 * here, do it in the ->read_page_raw() handler instead.
627 */
628 if (info->force_raw)
629 data_len += info->step_spare_size + info->ecc_size;
630
Stefan Roese75659da2015-07-23 10:26:16 +0200631 switch (info->state) {
632 case STATE_PIO_WRITING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300633 if (info->step_chunk_size)
634 writesl(info->mmio_base + NDDB,
635 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200636 DIV_ROUND_UP(data_len, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200637
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300638 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200639 writesl(info->mmio_base + NDDB,
640 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300641 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200642 break;
643 case STATE_PIO_READING:
Baruch Siach9167e4d2020-04-05 19:19:31 +0300644 if (data_len)
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300645 drain_fifo(info,
646 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200647 DIV_ROUND_UP(data_len, 4));
648
649 if (info->force_raw)
650 break;
Stefan Roese75659da2015-07-23 10:26:16 +0200651
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300652 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200653 drain_fifo(info,
654 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300655 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200656 break;
657 default:
658 dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300659 info->state);
Stefan Roese75659da2015-07-23 10:26:16 +0200660 BUG();
661 }
662
663 /* Update buffer pointers for multi-page read/write */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200664 info->data_buff_pos += data_len;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300665 info->oob_buff_pos += info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200666}
667
668static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
669{
670 handle_data_pio(info);
671
672 info->state = STATE_CMD_DONE;
673 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
674}
675
676static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
677{
678 unsigned int status, is_completed = 0, is_ready = 0;
679 unsigned int ready, cmd_done;
680 irqreturn_t ret = IRQ_HANDLED;
681
682 if (info->cs == 0) {
683 ready = NDSR_FLASH_RDY;
684 cmd_done = NDSR_CS0_CMDD;
685 } else {
686 ready = NDSR_RDY;
687 cmd_done = NDSR_CS1_CMDD;
688 }
689
David Sniatkiwicz2087f7e2018-08-29 11:56:18 +0300690 /* TODO - find out why we need the delay during write operation. */
691 ndelay(1);
692
Stefan Roese75659da2015-07-23 10:26:16 +0200693 status = nand_readl(info, NDSR);
694
695 if (status & NDSR_UNCORERR)
696 info->retcode = ERR_UNCORERR;
697 if (status & NDSR_CORERR) {
698 info->retcode = ERR_CORERR;
699 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
700 info->ecc_bch)
701 info->ecc_err_cnt = NDSR_ERR_CNT(status);
702 else
703 info->ecc_err_cnt = 1;
704
705 /*
706 * Each chunk composing a page is corrected independently,
707 * and we need to store maximum number of corrected bitflips
708 * to return it to the MTD layer in ecc.read_page().
709 */
710 info->max_bitflips = max_t(unsigned int,
711 info->max_bitflips,
712 info->ecc_err_cnt);
713 }
714 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
715 info->state = (status & NDSR_RDDREQ) ?
716 STATE_PIO_READING : STATE_PIO_WRITING;
717 /* Call the IRQ thread in U-Boot directly */
718 pxa3xx_nand_irq_thread(info);
719 return 0;
720 }
721 if (status & cmd_done) {
722 info->state = STATE_CMD_DONE;
723 is_completed = 1;
724 }
725 if (status & ready) {
726 info->state = STATE_READY;
727 is_ready = 1;
728 }
729
Ofer Heifetzde323162018-08-29 11:56:04 +0300730 /*
731 * Clear all status bit before issuing the next command, which
732 * can and will alter the status bits and will deserve a new
733 * interrupt on its own. This lets the controller exit the IRQ
734 */
735 nand_writel(info, NDSR, status);
736
Stefan Roese75659da2015-07-23 10:26:16 +0200737 if (status & NDSR_WRCMDREQ) {
Stefan Roese75659da2015-07-23 10:26:16 +0200738 status &= ~NDSR_WRCMDREQ;
739 info->state = STATE_CMD_HANDLE;
740
741 /*
742 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
743 * must be loaded by writing directly either 12 or 16
744 * bytes directly to NDCB0, four bytes at a time.
745 *
746 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
747 * but each NDCBx register can be read.
748 */
749 nand_writel(info, NDCB0, info->ndcb0);
750 nand_writel(info, NDCB0, info->ndcb1);
751 nand_writel(info, NDCB0, info->ndcb2);
752
753 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
754 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
755 nand_writel(info, NDCB0, info->ndcb3);
756 }
757
Stefan Roese75659da2015-07-23 10:26:16 +0200758 if (is_completed)
759 info->cmd_complete = 1;
760 if (is_ready)
761 info->dev_ready = 1;
762
763 return ret;
764}
765
766static inline int is_buf_blank(uint8_t *buf, size_t len)
767{
768 for (; len > 0; len--)
769 if (*buf++ != 0xff)
770 return 0;
771 return 1;
772}
773
774static void set_command_address(struct pxa3xx_nand_info *info,
775 unsigned int page_size, uint16_t column, int page_addr)
776{
777 /* small page addr setting */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300778 if (page_size < info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200779 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
780 | (column & 0xFF);
781
782 info->ndcb2 = 0;
783 } else {
784 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
785 | (column & 0xFFFF);
786
787 if (page_addr & 0xFF0000)
788 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
789 else
790 info->ndcb2 = 0;
791 }
792}
793
794static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
795{
796 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300797 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200798
799 /* reset data and oob column point to handle data */
800 info->buf_start = 0;
801 info->buf_count = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200802 info->data_buff_pos = 0;
803 info->oob_buff_pos = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300804 info->step_chunk_size = 0;
805 info->step_spare_size = 0;
806 info->cur_chunk = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200807 info->use_ecc = 0;
808 info->use_spare = 1;
809 info->retcode = ERR_NONE;
810 info->ecc_err_cnt = 0;
811 info->ndcb3 = 0;
812 info->need_wait = 0;
813
814 switch (command) {
815 case NAND_CMD_READ0:
Boris Brezillona558a392018-08-29 11:56:12 +0300816 case NAND_CMD_READOOB:
Stefan Roese75659da2015-07-23 10:26:16 +0200817 case NAND_CMD_PAGEPROG:
Miquel Raynal30a016a2018-10-11 17:45:42 +0200818 if (!info->force_raw)
819 info->use_ecc = 1;
Stefan Roese75659da2015-07-23 10:26:16 +0200820 break;
821 case NAND_CMD_PARAM:
822 info->use_spare = 0;
823 break;
824 default:
825 info->ndcb1 = 0;
826 info->ndcb2 = 0;
827 break;
828 }
829
830 /*
831 * If we are about to issue a read command, or about to set
832 * the write address, then clean the data buffer.
833 */
834 if (command == NAND_CMD_READ0 ||
835 command == NAND_CMD_READOOB ||
836 command == NAND_CMD_SEQIN) {
837 info->buf_count = mtd->writesize + mtd->oobsize;
838 memset(info->data_buff, 0xFF, info->buf_count);
839 }
840}
841
842static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
843 int ext_cmd_type, uint16_t column, int page_addr)
844{
845 int addr_cycle, exec_cmd;
846 struct pxa3xx_nand_host *host;
847 struct mtd_info *mtd;
848
849 host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300850 mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200851 addr_cycle = 0;
852 exec_cmd = 1;
853
854 if (info->cs != 0)
855 info->ndcb0 = NDCB0_CSEL;
856 else
857 info->ndcb0 = 0;
858
859 if (command == NAND_CMD_SEQIN)
860 exec_cmd = 0;
861
862 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
863 + host->col_addr_cycles);
864
865 switch (command) {
866 case NAND_CMD_READOOB:
867 case NAND_CMD_READ0:
868 info->buf_start = column;
869 info->ndcb0 |= NDCB0_CMD_TYPE(0)
870 | addr_cycle
871 | NAND_CMD_READ0;
872
873 if (command == NAND_CMD_READOOB)
874 info->buf_start += mtd->writesize;
875
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300876 if (info->cur_chunk < info->nfullchunks) {
877 info->step_chunk_size = info->chunk_size;
878 info->step_spare_size = info->spare_size;
879 } else {
880 info->step_chunk_size = info->last_chunk_size;
881 info->step_spare_size = info->last_spare_size;
882 }
883
Stefan Roese75659da2015-07-23 10:26:16 +0200884 /*
885 * Multiple page read needs an 'extended command type' field,
886 * which is either naked-read or last-read according to the
887 * state.
888 */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200889 if (info->force_raw) {
890 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
891 NDCB0_LEN_OVRD |
892 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
893 info->ndcb3 = info->step_chunk_size +
894 info->step_spare_size + info->ecc_size;
895 } else if (mtd->writesize == info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200896 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300897 } else if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200898 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
899 | NDCB0_LEN_OVRD
900 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300901 info->ndcb3 = info->step_chunk_size +
902 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200903 }
904
905 set_command_address(info, mtd->writesize, column, page_addr);
906 break;
907
908 case NAND_CMD_SEQIN:
909
910 info->buf_start = column;
911 set_command_address(info, mtd->writesize, 0, page_addr);
912
913 /*
914 * Multiple page programming needs to execute the initial
915 * SEQIN command that sets the page address.
916 */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300917 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200918 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
919 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
920 | addr_cycle
921 | command;
Stefan Roese75659da2015-07-23 10:26:16 +0200922 exec_cmd = 1;
923 }
924 break;
925
926 case NAND_CMD_PAGEPROG:
927 if (is_buf_blank(info->data_buff,
928 (mtd->writesize + mtd->oobsize))) {
929 exec_cmd = 0;
930 break;
931 }
932
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300933 if (info->cur_chunk < info->nfullchunks) {
934 info->step_chunk_size = info->chunk_size;
935 info->step_spare_size = info->spare_size;
936 } else {
937 info->step_chunk_size = info->last_chunk_size;
938 info->step_spare_size = info->last_spare_size;
939 }
940
Stefan Roese75659da2015-07-23 10:26:16 +0200941 /* Second command setting for large pages */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300942 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200943 /*
944 * Multiple page write uses the 'extended command'
945 * field. This can be used to issue a command dispatch
946 * or a naked-write depending on the current stage.
947 */
948 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
949 | NDCB0_LEN_OVRD
950 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300951 info->ndcb3 = info->step_chunk_size +
952 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200953
954 /*
955 * This is the command dispatch that completes a chunked
956 * page program operation.
957 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300958 if (info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +0200959 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
960 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
961 | command;
962 info->ndcb1 = 0;
963 info->ndcb2 = 0;
964 info->ndcb3 = 0;
965 }
966 } else {
967 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
968 | NDCB0_AUTO_RS
969 | NDCB0_ST_ROW_EN
970 | NDCB0_DBC
971 | (NAND_CMD_PAGEPROG << 8)
972 | NAND_CMD_SEQIN
973 | addr_cycle;
974 }
975 break;
976
977 case NAND_CMD_PARAM:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300978 info->buf_count = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200979 info->ndcb0 |= NDCB0_CMD_TYPE(0)
980 | NDCB0_ADDR_CYC(1)
981 | NDCB0_LEN_OVRD
982 | command;
983 info->ndcb1 = (column & 0xFF);
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300984 info->ndcb3 = INIT_BUFFER_SIZE;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300985 info->step_chunk_size = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200986 break;
987
988 case NAND_CMD_READID:
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300989 info->buf_count = READ_ID_BYTES;
Stefan Roese75659da2015-07-23 10:26:16 +0200990 info->ndcb0 |= NDCB0_CMD_TYPE(3)
991 | NDCB0_ADDR_CYC(1)
992 | command;
993 info->ndcb1 = (column & 0xFF);
994
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300995 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +0200996 break;
997 case NAND_CMD_STATUS:
998 info->buf_count = 1;
999 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1000 | NDCB0_ADDR_CYC(1)
1001 | command;
1002
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001003 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +02001004 break;
1005
1006 case NAND_CMD_ERASE1:
1007 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1008 | NDCB0_AUTO_RS
1009 | NDCB0_ADDR_CYC(3)
1010 | NDCB0_DBC
1011 | (NAND_CMD_ERASE2 << 8)
1012 | NAND_CMD_ERASE1;
1013 info->ndcb1 = page_addr;
1014 info->ndcb2 = 0;
1015
1016 break;
1017 case NAND_CMD_RESET:
1018 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1019 | command;
1020
1021 break;
1022
1023 case NAND_CMD_ERASE2:
1024 exec_cmd = 0;
1025 break;
1026
1027 default:
1028 exec_cmd = 0;
1029 dev_err(&info->pdev->dev, "non-supported command %x\n",
1030 command);
1031 break;
1032 }
1033
1034 return exec_cmd;
1035}
1036
1037static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1038 int column, int page_addr)
1039{
Scott Wood17fed142016-05-30 13:57:56 -05001040 struct nand_chip *chip = mtd_to_nand(mtd);
1041 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001042 struct pxa3xx_nand_info *info = host->info_data;
1043 int exec_cmd;
1044
1045 /*
1046 * if this is a x16 device ,then convert the input
1047 * "byte" address into a "word" address appropriate
1048 * for indexing a word-oriented device
1049 */
1050 if (info->reg_ndcr & NDCR_DWIDTH_M)
1051 column /= 2;
1052
1053 /*
1054 * There may be different NAND chip hooked to
1055 * different chip select, so check whether
1056 * chip select has been changed, if yes, reset the timing
1057 */
1058 if (info->cs != host->cs) {
1059 info->cs = host->cs;
1060 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1061 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1062 }
1063
1064 prepare_start_command(info, command);
1065
1066 info->state = STATE_PREPARED;
1067 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1068
1069 if (exec_cmd) {
1070 u32 ts;
1071
1072 info->cmd_complete = 0;
1073 info->dev_ready = 0;
1074 info->need_wait = 1;
1075 pxa3xx_nand_start(info);
1076
1077 ts = get_timer(0);
1078 while (1) {
1079 u32 status;
1080
1081 status = nand_readl(info, NDSR);
1082 if (status)
1083 pxa3xx_nand_irq(info);
1084
1085 if (info->cmd_complete)
1086 break;
1087
1088 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1089 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1090 return;
1091 }
1092 }
1093 }
1094 info->state = STATE_IDLE;
1095}
1096
1097static void nand_cmdfunc_extended(struct mtd_info *mtd,
1098 const unsigned command,
1099 int column, int page_addr)
1100{
Scott Wood17fed142016-05-30 13:57:56 -05001101 struct nand_chip *chip = mtd_to_nand(mtd);
1102 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001103 struct pxa3xx_nand_info *info = host->info_data;
1104 int exec_cmd, ext_cmd_type;
1105
1106 /*
1107 * if this is a x16 device then convert the input
1108 * "byte" address into a "word" address appropriate
1109 * for indexing a word-oriented device
1110 */
1111 if (info->reg_ndcr & NDCR_DWIDTH_M)
1112 column /= 2;
1113
1114 /*
1115 * There may be different NAND chip hooked to
1116 * different chip select, so check whether
1117 * chip select has been changed, if yes, reset the timing
1118 */
1119 if (info->cs != host->cs) {
1120 info->cs = host->cs;
1121 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1122 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1123 }
1124
1125 /* Select the extended command for the first command */
1126 switch (command) {
1127 case NAND_CMD_READ0:
1128 case NAND_CMD_READOOB:
1129 ext_cmd_type = EXT_CMD_TYPE_MONO;
1130 break;
1131 case NAND_CMD_SEQIN:
1132 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1133 break;
1134 case NAND_CMD_PAGEPROG:
1135 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1136 break;
1137 default:
1138 ext_cmd_type = 0;
1139 break;
1140 }
1141
1142 prepare_start_command(info, command);
1143
1144 /*
1145 * Prepare the "is ready" completion before starting a command
1146 * transaction sequence. If the command is not executed the
1147 * completion will be completed, see below.
1148 *
1149 * We can do that inside the loop because the command variable
1150 * is invariant and thus so is the exec_cmd.
1151 */
1152 info->need_wait = 1;
1153 info->dev_ready = 0;
1154
1155 do {
1156 u32 ts;
1157
1158 info->state = STATE_PREPARED;
1159 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1160 column, page_addr);
1161 if (!exec_cmd) {
1162 info->need_wait = 0;
1163 info->dev_ready = 1;
1164 break;
1165 }
1166
1167 info->cmd_complete = 0;
1168 pxa3xx_nand_start(info);
1169
1170 ts = get_timer(0);
1171 while (1) {
1172 u32 status;
1173
1174 status = nand_readl(info, NDSR);
1175 if (status)
1176 pxa3xx_nand_irq(info);
1177
1178 if (info->cmd_complete)
1179 break;
1180
1181 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1182 dev_err(&info->pdev->dev, "Wait timeout!!!\n");
1183 return;
1184 }
1185 }
1186
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001187 /* Only a few commands need several steps */
1188 if (command != NAND_CMD_PAGEPROG &&
1189 command != NAND_CMD_READ0 &&
1190 command != NAND_CMD_READOOB)
1191 break;
1192
1193 info->cur_chunk++;
1194
Stefan Roese75659da2015-07-23 10:26:16 +02001195 /* Check if the sequence is complete */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001196 if (info->cur_chunk == info->ntotalchunks &&
1197 command != NAND_CMD_PAGEPROG)
Stefan Roese75659da2015-07-23 10:26:16 +02001198 break;
1199
1200 /*
1201 * After a splitted program command sequence has issued
1202 * the command dispatch, the command sequence is complete.
1203 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001204 if (info->cur_chunk == (info->ntotalchunks + 1) &&
Stefan Roese75659da2015-07-23 10:26:16 +02001205 command == NAND_CMD_PAGEPROG &&
1206 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1207 break;
1208
1209 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1210 /* Last read: issue a 'last naked read' */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001211 if (info->cur_chunk == info->ntotalchunks - 1)
Stefan Roese75659da2015-07-23 10:26:16 +02001212 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1213 else
1214 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1215
1216 /*
1217 * If a splitted program command has no more data to transfer,
1218 * the command dispatch must be issued to complete.
1219 */
1220 } else if (command == NAND_CMD_PAGEPROG &&
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001221 info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +02001222 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1223 }
1224 } while (1);
1225
1226 info->state = STATE_IDLE;
1227}
1228
1229static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001230 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1231 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001232{
1233 chip->write_buf(mtd, buf, mtd->writesize);
1234 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1235
1236 return 0;
1237}
1238
1239static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1240 struct nand_chip *chip, uint8_t *buf, int oob_required,
1241 int page)
1242{
Scott Wood17fed142016-05-30 13:57:56 -05001243 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001244 struct pxa3xx_nand_info *info = host->info_data;
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001245 int bf;
Stefan Roese75659da2015-07-23 10:26:16 +02001246
1247 chip->read_buf(mtd, buf, mtd->writesize);
1248 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1249
1250 if (info->retcode == ERR_CORERR && info->use_ecc) {
1251 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1252
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001253 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
Stefan Roese75659da2015-07-23 10:26:16 +02001254 /*
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001255 * Empty pages will trigger uncorrectable errors. Re-read the
1256 * entire page in raw mode and check for bits not being "1".
1257 * If there are more than the supported strength, then it means
1258 * this is an actual uncorrectable error.
Stefan Roese75659da2015-07-23 10:26:16 +02001259 */
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001260 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1261 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1262 chip->oob_poi, mtd->oobsize,
1263 NULL, 0, chip->ecc.strength);
1264 if (bf < 0) {
1265 mtd->ecc_stats.failed++;
1266 } else if (bf) {
1267 mtd->ecc_stats.corrected += bf;
1268 info->max_bitflips = max_t(unsigned int,
1269 info->max_bitflips, bf);
1270 info->retcode = ERR_CORERR;
1271 } else {
1272 info->retcode = ERR_NONE;
1273 }
1274
1275 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1276 /* Raw read is not supported with Hamming ECC engine */
Stefan Roese75659da2015-07-23 10:26:16 +02001277 if (is_buf_blank(buf, mtd->writesize))
1278 info->retcode = ERR_NONE;
1279 else
1280 mtd->ecc_stats.failed++;
1281 }
1282
1283 return info->max_bitflips;
1284}
1285
Miquel Raynal30a016a2018-10-11 17:45:42 +02001286static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1287 struct nand_chip *chip, uint8_t *buf,
1288 int oob_required, int page)
1289{
1290 struct pxa3xx_nand_host *host = chip->priv;
1291 struct pxa3xx_nand_info *info = host->info_data;
1292 int chunk, ecc_off_buf;
1293
1294 if (!info->ecc_bch)
1295 return -ENOTSUPP;
1296
1297 /*
1298 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1299 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1300 */
1301 info->force_raw = true;
1302 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1303
1304 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1305 info->last_spare_size;
1306 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1307 chip->read_buf(mtd,
1308 buf + (chunk * info->chunk_size),
1309 info->chunk_size);
1310 chip->read_buf(mtd,
1311 chip->oob_poi +
1312 (chunk * (info->spare_size)),
1313 info->spare_size);
1314 chip->read_buf(mtd,
1315 chip->oob_poi + ecc_off_buf +
1316 (chunk * (info->ecc_size)),
1317 info->ecc_size - 2);
1318 }
1319
1320 if (info->ntotalchunks > info->nfullchunks) {
1321 chip->read_buf(mtd,
1322 buf + (info->nfullchunks * info->chunk_size),
1323 info->last_chunk_size);
1324 chip->read_buf(mtd,
1325 chip->oob_poi +
1326 (info->nfullchunks * (info->spare_size)),
1327 info->last_spare_size);
1328 chip->read_buf(mtd,
1329 chip->oob_poi + ecc_off_buf +
1330 (info->nfullchunks * (info->ecc_size)),
1331 info->ecc_size - 2);
1332 }
1333
1334 info->force_raw = false;
1335
1336 return 0;
1337}
1338
1339static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1340 struct nand_chip *chip, int page)
1341{
1342 /* Invalidate page cache */
1343 chip->pagebuf = -1;
1344
1345 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1346 page);
1347}
1348
Stefan Roese75659da2015-07-23 10:26:16 +02001349static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1350{
Scott Wood17fed142016-05-30 13:57:56 -05001351 struct nand_chip *chip = mtd_to_nand(mtd);
1352 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001353 struct pxa3xx_nand_info *info = host->info_data;
1354 char retval = 0xFF;
1355
1356 if (info->buf_start < info->buf_count)
1357 /* Has just send a new command? */
1358 retval = info->data_buff[info->buf_start++];
1359
1360 return retval;
1361}
1362
1363static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1364{
Scott Wood17fed142016-05-30 13:57:56 -05001365 struct nand_chip *chip = mtd_to_nand(mtd);
1366 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001367 struct pxa3xx_nand_info *info = host->info_data;
1368 u16 retval = 0xFFFF;
1369
1370 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1371 retval = *((u16 *)(info->data_buff+info->buf_start));
1372 info->buf_start += 2;
1373 }
1374 return retval;
1375}
1376
1377static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1378{
Scott Wood17fed142016-05-30 13:57:56 -05001379 struct nand_chip *chip = mtd_to_nand(mtd);
1380 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001381 struct pxa3xx_nand_info *info = host->info_data;
1382 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1383
1384 memcpy(buf, info->data_buff + info->buf_start, real_len);
1385 info->buf_start += real_len;
1386}
1387
1388static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1389 const uint8_t *buf, int len)
1390{
Scott Wood17fed142016-05-30 13:57:56 -05001391 struct nand_chip *chip = mtd_to_nand(mtd);
1392 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001393 struct pxa3xx_nand_info *info = host->info_data;
1394 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1395
1396 memcpy(info->data_buff + info->buf_start, buf, real_len);
1397 info->buf_start += real_len;
1398}
1399
1400static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1401{
1402 return;
1403}
1404
1405static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1406{
Scott Wood17fed142016-05-30 13:57:56 -05001407 struct nand_chip *chip = mtd_to_nand(mtd);
1408 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001409 struct pxa3xx_nand_info *info = host->info_data;
1410
1411 if (info->need_wait) {
1412 u32 ts;
1413
1414 info->need_wait = 0;
1415
1416 ts = get_timer(0);
1417 while (1) {
1418 u32 status;
1419
1420 status = nand_readl(info, NDSR);
1421 if (status)
1422 pxa3xx_nand_irq(info);
1423
1424 if (info->dev_ready)
1425 break;
1426
1427 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
1428 dev_err(&info->pdev->dev, "Ready timeout!!!\n");
1429 return NAND_STATUS_FAIL;
1430 }
1431 }
1432 }
1433
1434 /* pxa3xx_nand_send_command has waited for command complete */
1435 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1436 if (info->retcode == ERR_NONE)
1437 return 0;
1438 else
1439 return NAND_STATUS_FAIL;
1440 }
1441
1442 return NAND_STATUS_READY;
1443}
1444
Ofer Heifetz531816e2018-08-29 11:56:07 +03001445static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1446{
1447 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1448
1449 /* Configure default flash values */
Ofer Heifetz531816e2018-08-29 11:56:07 +03001450 info->reg_ndcr = 0x0; /* enable all interrupts */
1451 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1452 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1453 info->reg_ndcr |= NDCR_SPARE_EN;
1454
1455 return 0;
1456}
1457
1458static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001459{
1460 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz531816e2018-08-29 11:56:07 +03001461 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001462 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001463
1464 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1465 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1466 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001467}
1468
Ofer Heifetz268979f2018-08-29 11:56:08 +03001469static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001470{
Ofer Heifetz531816e2018-08-29 11:56:07 +03001471 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001472 uint32_t ndcr = nand_readl(info, NDCR);
1473
Stefan Roese75659da2015-07-23 10:26:16 +02001474 /* Set an initial chunk size */
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001475 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001476 info->reg_ndcr = ndcr &
1477 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1478 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001479 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1480 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
Stefan Roese75659da2015-07-23 10:26:16 +02001481}
1482
1483static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1484{
1485 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1486 if (info->data_buff == NULL)
1487 return -ENOMEM;
1488 return 0;
1489}
1490
1491static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1492{
1493 struct pxa3xx_nand_info *info = host->info_data;
1494 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1495 struct mtd_info *mtd;
1496 struct nand_chip *chip;
1497 const struct nand_sdr_timings *timings;
1498 int ret;
1499
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001500 mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001501 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001502
1503 /* configure default flash values */
1504 info->reg_ndcr = 0x0; /* enable all interrupts */
1505 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001506 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
Stefan Roese75659da2015-07-23 10:26:16 +02001507 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1508
1509 /* use the common timing to make a try */
1510 timings = onfi_async_timing_mode_to_sdr_timings(0);
1511 if (IS_ERR(timings))
1512 return PTR_ERR(timings);
1513
1514 pxa3xx_nand_set_sdr_timing(host, timings);
1515
1516 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1517 ret = chip->waitfunc(mtd, chip);
1518 if (ret & NAND_STATUS_FAIL)
1519 return -ENODEV;
1520
1521 return 0;
1522}
1523
1524static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1525 struct nand_ecc_ctrl *ecc,
1526 int strength, int ecc_stepsize, int page_size)
1527{
1528 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001529 info->nfullchunks = 1;
1530 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001531 info->chunk_size = 2048;
1532 info->spare_size = 40;
1533 info->ecc_size = 24;
1534 ecc->mode = NAND_ECC_HW;
1535 ecc->size = 512;
1536 ecc->strength = 1;
1537
1538 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001539 info->nfullchunks = 1;
1540 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001541 info->chunk_size = 512;
1542 info->spare_size = 8;
1543 info->ecc_size = 8;
1544 ecc->mode = NAND_ECC_HW;
1545 ecc->size = 512;
1546 ecc->strength = 1;
1547
1548 /*
1549 * Required ECC: 4-bit correction per 512 bytes
1550 * Select: 16-bit correction per 2048 bytes
1551 */
1552 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1553 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001554 info->nfullchunks = 1;
1555 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001556 info->chunk_size = 2048;
1557 info->spare_size = 32;
1558 info->ecc_size = 32;
1559 ecc->mode = NAND_ECC_HW;
1560 ecc->size = info->chunk_size;
1561 ecc->layout = &ecc_layout_2KB_bch4bit;
1562 ecc->strength = 16;
1563
1564 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1565 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001566 info->nfullchunks = 2;
1567 info->ntotalchunks = 2;
Stefan Roese75659da2015-07-23 10:26:16 +02001568 info->chunk_size = 2048;
1569 info->spare_size = 32;
1570 info->ecc_size = 32;
1571 ecc->mode = NAND_ECC_HW;
1572 ecc->size = info->chunk_size;
1573 ecc->layout = &ecc_layout_4KB_bch4bit;
1574 ecc->strength = 16;
1575
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001576 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1577 info->ecc_bch = 1;
1578 info->nfullchunks = 4;
1579 info->ntotalchunks = 4;
1580 info->chunk_size = 2048;
1581 info->spare_size = 32;
1582 info->ecc_size = 32;
1583 ecc->mode = NAND_ECC_HW;
1584 ecc->size = info->chunk_size;
1585 ecc->layout = &ecc_layout_8KB_bch4bit;
1586 ecc->strength = 16;
1587
Stefan Roese75659da2015-07-23 10:26:16 +02001588 /*
1589 * Required ECC: 8-bit correction per 512 bytes
1590 * Select: 16-bit correction per 1024 bytes
1591 */
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001592 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1593 info->ecc_bch = 1;
1594 info->nfullchunks = 1;
1595 info->ntotalchunks = 2;
1596 info->chunk_size = 1024;
1597 info->spare_size = 0;
1598 info->last_chunk_size = 1024;
Miquel Raynal53e9c122018-10-11 17:45:44 +02001599 info->last_spare_size = 32;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001600 info->ecc_size = 32;
1601 ecc->mode = NAND_ECC_HW;
1602 ecc->size = info->chunk_size;
1603 ecc->layout = &ecc_layout_2KB_bch8bit;
1604 ecc->strength = 16;
1605
Stefan Roese75659da2015-07-23 10:26:16 +02001606 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1607 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001608 info->nfullchunks = 4;
1609 info->ntotalchunks = 5;
Stefan Roese75659da2015-07-23 10:26:16 +02001610 info->chunk_size = 1024;
1611 info->spare_size = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001612 info->last_chunk_size = 0;
1613 info->last_spare_size = 64;
Stefan Roese75659da2015-07-23 10:26:16 +02001614 info->ecc_size = 32;
1615 ecc->mode = NAND_ECC_HW;
1616 ecc->size = info->chunk_size;
1617 ecc->layout = &ecc_layout_4KB_bch8bit;
1618 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001619
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001620 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001621 info->ecc_bch = 1;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001622 info->nfullchunks = 8;
1623 info->ntotalchunks = 9;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001624 info->chunk_size = 1024;
1625 info->spare_size = 0;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001626 info->last_chunk_size = 0;
1627 info->last_spare_size = 160;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001628 info->ecc_size = 32;
1629 ecc->mode = NAND_ECC_HW;
1630 ecc->size = info->chunk_size;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001631 ecc->layout = &ecc_layout_8KB_bch8bit;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001632 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001633
Stefan Roese75659da2015-07-23 10:26:16 +02001634 } else {
1635 dev_err(&info->pdev->dev,
1636 "ECC strength %d at page size %d is not supported\n",
1637 strength, page_size);
1638 return -ENODEV;
1639 }
1640
1641 return 0;
1642}
1643
1644static int pxa3xx_nand_scan(struct mtd_info *mtd)
1645{
Scott Wood17fed142016-05-30 13:57:56 -05001646 struct nand_chip *chip = mtd_to_nand(mtd);
1647 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001648 struct pxa3xx_nand_info *info = host->info_data;
1649 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001650 int ret;
1651 uint16_t ecc_strength, ecc_step;
1652
Ofer Heifetz268979f2018-08-29 11:56:08 +03001653 if (pdata->keep_config) {
1654 pxa3xx_nand_detect_config(info);
1655 } else {
1656 ret = pxa3xx_nand_config_ident(info);
1657 if (ret)
1658 return ret;
1659 ret = pxa3xx_nand_sensing(host);
1660 if (ret) {
1661 dev_info(&info->pdev->dev,
1662 "There is no chip on cs %d!\n",
1663 info->cs);
1664 return ret;
1665 }
Stefan Roese75659da2015-07-23 10:26:16 +02001666 }
1667
Stefan Roese75659da2015-07-23 10:26:16 +02001668 /* Device detection must be done with ECC disabled */
1669 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
1670 nand_writel(info, NDECCCTRL, 0x0);
1671
1672 if (nand_scan_ident(mtd, 1, NULL))
1673 return -ENODEV;
1674
1675 if (!pdata->keep_config) {
1676 ret = pxa3xx_nand_init_timings(host);
1677 if (ret) {
1678 dev_err(&info->pdev->dev,
1679 "Failed to set timings: %d\n", ret);
1680 return ret;
1681 }
1682 }
1683
Stefan Roese75659da2015-07-23 10:26:16 +02001684#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1685 /*
1686 * We'll use a bad block table stored in-flash and don't
1687 * allow writing the bad block marker to the flash.
1688 */
1689 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1690 chip->bbt_td = &bbt_main_descr;
1691 chip->bbt_md = &bbt_mirror_descr;
1692#endif
1693
Stefan Roese75659da2015-07-23 10:26:16 +02001694 if (pdata->ecc_strength && pdata->ecc_step_size) {
1695 ecc_strength = pdata->ecc_strength;
1696 ecc_step = pdata->ecc_step_size;
1697 } else {
1698 ecc_strength = chip->ecc_strength_ds;
1699 ecc_step = chip->ecc_step_ds;
1700 }
1701
1702 /* Set default ECC strength requirements on non-ONFI devices */
1703 if (ecc_strength < 1 && ecc_step < 1) {
1704 ecc_strength = 1;
1705 ecc_step = 512;
1706 }
1707
1708 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1709 ecc_step, mtd->writesize);
1710 if (ret)
1711 return ret;
1712
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001713 /*
1714 * If the page size is bigger than the FIFO size, let's check
1715 * we are given the right variant and then switch to the extended
1716 * (aka split) command handling,
1717 */
1718 if (mtd->writesize > info->chunk_size) {
1719 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
1720 chip->cmdfunc = nand_cmdfunc_extended;
1721 } else {
1722 dev_err(&info->pdev->dev,
1723 "unsupported page size on this variant\n");
1724 return -ENODEV;
1725 }
1726 }
1727
Stefan Roese75659da2015-07-23 10:26:16 +02001728 /* calculate addressing information */
1729 if (mtd->writesize >= 2048)
1730 host->col_addr_cycles = 2;
1731 else
1732 host->col_addr_cycles = 1;
1733
1734 /* release the initial buffer */
1735 kfree(info->data_buff);
1736
1737 /* allocate the real data + oob buffer */
1738 info->buf_size = mtd->writesize + mtd->oobsize;
1739 ret = pxa3xx_nand_init_buff(info);
1740 if (ret)
1741 return ret;
1742 info->oob_buff = info->data_buff + mtd->writesize;
1743
1744 if ((mtd->size >> chip->page_shift) > 65536)
1745 host->row_addr_cycles = 3;
1746 else
1747 host->row_addr_cycles = 2;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001748
1749 if (!pdata->keep_config)
1750 pxa3xx_nand_config_tail(info);
1751
Stefan Roese75659da2015-07-23 10:26:16 +02001752 return nand_scan_tail(mtd);
1753}
1754
1755static int alloc_nand_resource(struct pxa3xx_nand_info *info)
1756{
1757 struct pxa3xx_nand_platform_data *pdata;
1758 struct pxa3xx_nand_host *host;
1759 struct nand_chip *chip = NULL;
1760 struct mtd_info *mtd;
1761 int ret, cs;
1762
1763 pdata = info->pdata;
1764 if (pdata->num_cs <= 0)
1765 return -ENODEV;
1766
1767 info->variant = pxa3xx_nand_get_variant();
1768 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001769 chip = (struct nand_chip *)
1770 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001771 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001772 host = (struct pxa3xx_nand_host *)chip;
1773 info->host[cs] = host;
Stefan Roese75659da2015-07-23 10:26:16 +02001774 host->cs = cs;
1775 host->info_data = info;
Stefan Roese75659da2015-07-23 10:26:16 +02001776 mtd->owner = THIS_MODULE;
1777
Chris Packham3c2170a2016-08-29 15:20:52 +12001778 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001779 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
Miquel Raynal30a016a2018-10-11 17:45:42 +02001780 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1781 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
Stefan Roese75659da2015-07-23 10:26:16 +02001782 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1783 chip->controller = &info->controller;
1784 chip->waitfunc = pxa3xx_nand_waitfunc;
1785 chip->select_chip = pxa3xx_nand_select_chip;
1786 chip->read_word = pxa3xx_nand_read_word;
1787 chip->read_byte = pxa3xx_nand_read_byte;
1788 chip->read_buf = pxa3xx_nand_read_buf;
1789 chip->write_buf = pxa3xx_nand_write_buf;
1790 chip->options |= NAND_NO_SUBPAGE_WRITE;
1791 chip->cmdfunc = nand_cmdfunc;
1792 }
1793
Stefan Roese75659da2015-07-23 10:26:16 +02001794 /* Allocate a buffer to allow flash detection */
1795 info->buf_size = INIT_BUFFER_SIZE;
1796 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1797 if (info->data_buff == NULL) {
1798 ret = -ENOMEM;
1799 goto fail_disable_clk;
1800 }
1801
1802 /* initialize all interrupts to be disabled */
1803 disable_int(info, NDSR_MASK);
1804
1805 return 0;
1806
1807 kfree(info->data_buff);
1808fail_disable_clk:
1809 return ret;
1810}
1811
1812static int pxa3xx_nand_probe_dt(struct pxa3xx_nand_info *info)
1813{
1814 struct pxa3xx_nand_platform_data *pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001815 const void *blob = gd->fdt_blob;
1816 int node = -1;
Stefan Roese75659da2015-07-23 10:26:16 +02001817
1818 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1819 if (!pdata)
1820 return -ENOMEM;
1821
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001822 /* Get address decoding nodes from the FDT blob */
1823 do {
1824 node = fdt_node_offset_by_compatible(blob, node,
1825 "marvell,mvebu-pxa3xx-nand");
1826 if (node < 0)
1827 break;
1828
1829 /* Bypass disabeld nodes */
1830 if (!fdtdec_get_is_enabled(blob, node))
1831 continue;
Stefan Roese75659da2015-07-23 10:26:16 +02001832
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001833 /* Get the first enabled NAND controler base address */
1834 info->mmio_base =
1835 (void __iomem *)fdtdec_get_addr_size_auto_noparent(
1836 blob, node, "reg", 0, NULL, true);
Stefan Roese75659da2015-07-23 10:26:16 +02001837
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001838 pdata->num_cs = fdtdec_get_int(blob, node, "num-cs", 1);
1839 if (pdata->num_cs != 1) {
Masahiro Yamada81e10422017-09-16 14:10:41 +09001840 pr_err("pxa3xx driver supports single CS only\n");
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001841 break;
1842 }
1843
1844 if (fdtdec_get_bool(blob, node, "nand-enable-arbiter"))
1845 pdata->enable_arbiter = 1;
1846
1847 if (fdtdec_get_bool(blob, node, "nand-keep-config"))
1848 pdata->keep_config = 1;
1849
1850 /*
1851 * ECC parameters.
1852 * If these are not set, they will be selected according
1853 * to the detected flash type.
1854 */
1855 /* ECC strength */
1856 pdata->ecc_strength = fdtdec_get_int(blob, node,
1857 "nand-ecc-strength", 0);
1858
1859 /* ECC step size */
1860 pdata->ecc_step_size = fdtdec_get_int(blob, node,
1861 "nand-ecc-step-size", 0);
1862
1863 info->pdata = pdata;
1864
1865 /* Currently support only a single NAND controller */
1866 return 0;
1867
1868 } while (node >= 0);
1869
1870 return -EINVAL;
Stefan Roese75659da2015-07-23 10:26:16 +02001871}
1872
1873static int pxa3xx_nand_probe(struct pxa3xx_nand_info *info)
1874{
1875 struct pxa3xx_nand_platform_data *pdata;
1876 int ret, cs, probe_success;
1877
1878 ret = pxa3xx_nand_probe_dt(info);
1879 if (ret)
1880 return ret;
1881
1882 pdata = info->pdata;
1883
1884 ret = alloc_nand_resource(info);
1885 if (ret) {
1886 dev_err(&pdev->dev, "alloc nand resource failed\n");
1887 return ret;
1888 }
1889
1890 probe_success = 0;
1891 for (cs = 0; cs < pdata->num_cs; cs++) {
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001892 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001893
1894 /*
1895 * The mtd name matches the one used in 'mtdparts' kernel
1896 * parameter. This name cannot be changed or otherwise
1897 * user's mtd partitions configuration would get broken.
1898 */
1899 mtd->name = "pxa3xx_nand-0";
1900 info->cs = cs;
1901 ret = pxa3xx_nand_scan(mtd);
1902 if (ret) {
1903 dev_info(&pdev->dev, "failed to scan nand at cs %d\n",
1904 cs);
1905 continue;
1906 }
1907
Scott Wood2c1b7e12016-05-30 13:57:55 -05001908 if (nand_register(cs, mtd))
1909 continue;
1910
1911 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001912 }
1913
1914 if (!probe_success)
1915 return -ENODEV;
1916
1917 return 0;
1918}
1919
1920/*
1921 * Main initialization routine
1922 */
1923void board_nand_init(void)
1924{
1925 struct pxa3xx_nand_info *info;
1926 struct pxa3xx_nand_host *host;
1927 int ret;
1928
Kevin Smithf6ca2a62016-01-14 16:01:39 +00001929 info = kzalloc(sizeof(*info) +
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001930 sizeof(*host) * CONFIG_SYS_MAX_NAND_DEVICE,
1931 GFP_KERNEL);
Stefan Roese75659da2015-07-23 10:26:16 +02001932 if (!info)
1933 return;
1934
Stefan Roese75659da2015-07-23 10:26:16 +02001935 ret = pxa3xx_nand_probe(info);
1936 if (ret)
1937 return;
Stefan Roese75659da2015-07-23 10:26:16 +02001938}