blob: fcd1b9c6361464e269ee2a146664bafd3035a9c7 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese75659da2015-07-23 10:26:16 +02002/*
Miquel Raynal1f1ae152018-08-16 17:30:07 +02003 * drivers/mtd/nand/raw/pxa3xx_nand.c
Stefan Roese75659da2015-07-23 10:26:16 +02004 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
Stefan Roese75659da2015-07-23 10:26:16 +02007 */
8
9#include <common.h>
10#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030011#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020012#include <nand.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060013#include <asm/global_data.h>
Simon Glass9bc15642020-02-03 07:36:16 -070014#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070015#include <dm/devres.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060016#include <linux/bitops.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060017#include <linux/bug.h>
Simon Glassdbd79542020-05-10 11:40:11 -060018#include <linux/delay.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070019#include <linux/err.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090020#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020021#include <asm/io.h>
22#include <asm/arch/cpu.h>
23#include <linux/mtd/mtd.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090024#include <linux/mtd/rawnand.h>
Stefan Roese75659da2015-07-23 10:26:16 +020025#include <linux/types.h>
Shmuel Hazan58983222020-10-29 08:52:20 +020026#include <syscon.h>
27#include <regmap.h>
Shmuel Hazan759349e2020-10-29 08:52:18 +020028#include <dm/uclass.h>
29#include <dm/read.h>
Stefan Roese75659da2015-07-23 10:26:16 +020030
31#include "pxa3xx_nand.h"
32
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030033DECLARE_GLOBAL_DATA_PTR;
34
Stefan Roese75659da2015-07-23 10:26:16 +020035#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
36#define CHIP_DELAY_TIMEOUT 200
37#define NAND_STOP_DELAY 40
Stefan Roese75659da2015-07-23 10:26:16 +020038
39/*
40 * Define a buffer size for the initial command that detects the flash device:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030041 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
Stefan Roese75659da2015-07-23 10:26:16 +020046 */
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030047#define INIT_BUFFER_SIZE 2048
Stefan Roese75659da2015-07-23 10:26:16 +020048
49/* registers and bit definitions */
50#define NDCR (0x00) /* Control register */
51#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53#define NDSR (0x14) /* Status Register */
54#define NDPCR (0x18) /* Page Count Register */
55#define NDBDR0 (0x1C) /* Bad Block Register 0 */
56#define NDBDR1 (0x20) /* Bad Block Register 1 */
57#define NDECCCTRL (0x28) /* ECC control */
58#define NDDB (0x40) /* Data Buffer */
59#define NDCB0 (0x48) /* Command Buffer0 */
60#define NDCB1 (0x4C) /* Command Buffer1 */
61#define NDCB2 (0x50) /* Command Buffer2 */
62
63#define NDCR_SPARE_EN (0x1 << 31)
64#define NDCR_ECC_EN (0x1 << 30)
65#define NDCR_DMA_EN (0x1 << 29)
66#define NDCR_ND_RUN (0x1 << 28)
67#define NDCR_DWIDTH_C (0x1 << 27)
68#define NDCR_DWIDTH_M (0x1 << 26)
69#define NDCR_PAGE_SZ (0x1 << 24)
70#define NDCR_NCSX (0x1 << 23)
71#define NDCR_ND_MODE (0x3 << 21)
72#define NDCR_NAND_MODE (0x0)
73#define NDCR_CLR_PG_CNT (0x1 << 20)
Ofer Heifetz531816e2018-08-29 11:56:07 +030074#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
Stefan Roese75659da2015-07-23 10:26:16 +020075#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
76#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
77
78#define NDCR_RA_START (0x1 << 15)
79#define NDCR_PG_PER_BLK (0x1 << 14)
80#define NDCR_ND_ARB_EN (0x1 << 12)
81#define NDCR_INT_MASK (0xFFF)
82
83#define NDSR_MASK (0xfff)
84#define NDSR_ERR_CNT_OFF (16)
85#define NDSR_ERR_CNT_MASK (0x1f)
86#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
87#define NDSR_RDY (0x1 << 12)
88#define NDSR_FLASH_RDY (0x1 << 11)
89#define NDSR_CS0_PAGED (0x1 << 10)
90#define NDSR_CS1_PAGED (0x1 << 9)
91#define NDSR_CS0_CMDD (0x1 << 8)
92#define NDSR_CS1_CMDD (0x1 << 7)
93#define NDSR_CS0_BBD (0x1 << 6)
94#define NDSR_CS1_BBD (0x1 << 5)
95#define NDSR_UNCORERR (0x1 << 4)
96#define NDSR_CORERR (0x1 << 3)
97#define NDSR_WRDREQ (0x1 << 2)
98#define NDSR_RDDREQ (0x1 << 1)
99#define NDSR_WRCMDREQ (0x1)
100
101#define NDCB0_LEN_OVRD (0x1 << 28)
102#define NDCB0_ST_ROW_EN (0x1 << 26)
103#define NDCB0_AUTO_RS (0x1 << 25)
104#define NDCB0_CSEL (0x1 << 24)
105#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
106#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
107#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
108#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
109#define NDCB0_NC (0x1 << 20)
110#define NDCB0_DBC (0x1 << 19)
111#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
112#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
113#define NDCB0_CMD2_MASK (0xff << 8)
114#define NDCB0_CMD1_MASK (0xff)
115#define NDCB0_ADDR_CYC_SHIFT (16)
116
117#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
118#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
119#define EXT_CMD_TYPE_READ 4 /* Read */
120#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
121#define EXT_CMD_TYPE_FINAL 3 /* Final command */
122#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
123#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
124
Shmuel Hazan58983222020-10-29 08:52:20 +0200125/* System control register and bit to enable NAND on some SoCs */
126#define GENCONF_SOC_DEVICE_MUX 0x208
127#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
128
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300129/*
130 * This should be large enough to read 'ONFI' and 'JEDEC'.
131 * Let's use 7 bytes, which is the maximum ID count supported
132 * by the controller (see NDCR_RD_ID_CNT_MASK).
133 */
134#define READ_ID_BYTES 7
135
Stefan Roese75659da2015-07-23 10:26:16 +0200136/* macros for registers read/write */
137#define nand_writel(info, off, val) \
138 writel((val), (info)->mmio_base + (off))
139
140#define nand_readl(info, off) \
141 readl((info)->mmio_base + (off))
142
143/* error code and state */
144enum {
145 ERR_NONE = 0,
146 ERR_DMABUSERR = -1,
147 ERR_SENDCMD = -2,
148 ERR_UNCORERR = -3,
149 ERR_BBERR = -4,
150 ERR_CORERR = -5,
151};
152
153enum {
154 STATE_IDLE = 0,
155 STATE_PREPARED,
156 STATE_CMD_HANDLE,
157 STATE_DMA_READING,
158 STATE_DMA_WRITING,
159 STATE_DMA_DONE,
160 STATE_PIO_READING,
161 STATE_PIO_WRITING,
162 STATE_CMD_DONE,
163 STATE_READY,
164};
165
166enum pxa3xx_nand_variant {
167 PXA3XX_NAND_VARIANT_PXA,
168 PXA3XX_NAND_VARIANT_ARMADA370,
Shmuel Hazan58983222020-10-29 08:52:20 +0200169 PXA3XX_NAND_VARIANT_ARMADA_8K,
Stefan Roese75659da2015-07-23 10:26:16 +0200170};
171
172struct pxa3xx_nand_host {
173 struct nand_chip chip;
Stefan Roese75659da2015-07-23 10:26:16 +0200174 void *info_data;
175
176 /* page size of attached chip */
177 int use_ecc;
178 int cs;
179
180 /* calculated from pxa3xx_nand_flash data */
181 unsigned int col_addr_cycles;
182 unsigned int row_addr_cycles;
Stefan Roese75659da2015-07-23 10:26:16 +0200183};
184
185struct pxa3xx_nand_info {
186 struct nand_hw_control controller;
187 struct pxa3xx_nand_platform_data *pdata;
188
189 struct clk *clk;
190 void __iomem *mmio_base;
191 unsigned long mmio_phys;
192 int cmd_complete, dev_ready;
193
194 unsigned int buf_start;
195 unsigned int buf_count;
196 unsigned int buf_size;
197 unsigned int data_buff_pos;
198 unsigned int oob_buff_pos;
199
200 unsigned char *data_buff;
201 unsigned char *oob_buff;
202
203 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
204 unsigned int state;
205
206 /*
207 * This driver supports NFCv1 (as found in PXA SoC)
208 * and NFCv2 (as found in Armada 370/XP SoC).
209 */
210 enum pxa3xx_nand_variant variant;
211
212 int cs;
213 int use_ecc; /* use HW ECC ? */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200214 int force_raw; /* prevent use_ecc to be set */
Stefan Roese75659da2015-07-23 10:26:16 +0200215 int ecc_bch; /* using BCH ECC? */
216 int use_spare; /* use spare ? */
217 int need_wait;
218
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300219 /* Amount of real data per full chunk */
220 unsigned int chunk_size;
221
222 /* Amount of spare data per full chunk */
Stefan Roese75659da2015-07-23 10:26:16 +0200223 unsigned int spare_size;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300224
225 /* Number of full chunks (i.e chunk_size + spare_size) */
226 unsigned int nfullchunks;
227
228 /*
229 * Total number of chunks. If equal to nfullchunks, then there
230 * are only full chunks. Otherwise, there is one last chunk of
231 * size (last_chunk_size + last_spare_size)
232 */
233 unsigned int ntotalchunks;
234
235 /* Amount of real data in the last chunk */
236 unsigned int last_chunk_size;
237
238 /* Amount of spare data in the last chunk */
239 unsigned int last_spare_size;
240
Stefan Roese75659da2015-07-23 10:26:16 +0200241 unsigned int ecc_size;
242 unsigned int ecc_err_cnt;
243 unsigned int max_bitflips;
244 int retcode;
245
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300246 /*
247 * Variables only valid during command
248 * execution. step_chunk_size and step_spare_size is the
249 * amount of real data and spare data in the current
250 * chunk. cur_chunk is the current chunk being
251 * read/programmed.
252 */
253 unsigned int step_chunk_size;
254 unsigned int step_spare_size;
255 unsigned int cur_chunk;
256
Stefan Roese75659da2015-07-23 10:26:16 +0200257 /* cached register value */
258 uint32_t reg_ndcr;
259 uint32_t ndtr0cs0;
260 uint32_t ndtr1cs0;
261
262 /* generated NDCBx register values */
263 uint32_t ndcb0;
264 uint32_t ndcb1;
265 uint32_t ndcb2;
266 uint32_t ndcb3;
267};
268
269static struct pxa3xx_nand_timing timing[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300270 /*
271 * tCH Enable signal hold time
272 * tCS Enable signal setup time
273 * tWH ND_nWE high duration
274 * tWP ND_nWE pulse time
275 * tRH ND_nRE high duration
276 * tRP ND_nRE pulse width
277 * tR ND_nWE high to ND_nRE low for read
278 * tWHR ND_nWE high to ND_nRE low for status read
279 * tAR ND_ALE low to ND_nRE low delay
280 */
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300281 /*ch cs wh wp rh rp r whr ar */
Stefan Roese75659da2015-07-23 10:26:16 +0200282 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
283 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
284 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
285 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300286 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
Stefan Roese75659da2015-07-23 10:26:16 +0200287};
288
289static struct pxa3xx_nand_flash builtin_flash_types[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300290 /*
291 * chip_id
292 * flash_width Width of Flash memory (DWIDTH_M)
293 * dfc_width Width of flash controller(DWIDTH_C)
294 * *timing
295 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
296 */
Stefan Roese75659da2015-07-23 10:26:16 +0200297 { 0x46ec, 16, 16, &timing[1] },
298 { 0xdaec, 8, 8, &timing[1] },
299 { 0xd7ec, 8, 8, &timing[1] },
300 { 0xa12c, 8, 8, &timing[2] },
301 { 0xb12c, 16, 16, &timing[2] },
302 { 0xdc2c, 8, 8, &timing[2] },
303 { 0xcc2c, 16, 16, &timing[2] },
304 { 0xba20, 16, 16, &timing[3] },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300305 { 0xda98, 8, 8, &timing[4] },
Stefan Roese75659da2015-07-23 10:26:16 +0200306};
307
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100308#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
Stefan Roese75659da2015-07-23 10:26:16 +0200309static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
310static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
311
312static struct nand_bbt_descr bbt_main_descr = {
313 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315 .offs = 8,
316 .len = 6,
317 .veroffs = 14,
318 .maxblocks = 8, /* Last 8 blocks in each chip */
319 .pattern = bbt_pattern
320};
321
322static struct nand_bbt_descr bbt_mirror_descr = {
323 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
324 | NAND_BBT_2BIT | NAND_BBT_VERSION,
325 .offs = 8,
326 .len = 6,
327 .veroffs = 14,
328 .maxblocks = 8, /* Last 8 blocks in each chip */
329 .pattern = bbt_mirror_pattern
330};
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100331#endif
Stefan Roese75659da2015-07-23 10:26:16 +0200332
Chris Packham03085ca2022-08-25 16:59:49 +1200333struct marvell_hw_ecc_layout {
334 int page_size;
335 int strength;
336 unsigned int ecc_size;
337 unsigned int nfullchunks;
338 unsigned int chunk_size;
339 unsigned int spare_size;
340 unsigned int last_chunk_size;
341 unsigned int last_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200342};
343
Chris Packham03085ca2022-08-25 16:59:49 +1200344static const struct marvell_hw_ecc_layout nfc_layouts[] = {
345 /* page_size strength ecc_size nfullchunks chunk_size spare_size last_chunk last_spare */
346 { 512, 1, 8, 1, 512, 8, 0, 0 },
347 { 2048, 1, 24, 1, 2048, 40, 0, 0 },
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300348
Chris Packham03085ca2022-08-25 16:59:49 +1200349 { 2048, 4, 32, 1, 2048, 32, 0, 0 },
350 { 2048, 8, 32, 1, 1024, 0, 1024, 32 },
351 { 2048, 12, 32, 2, 704, 0, 640, 0 },
352 { 2048, 16, 32, 4, 512, 0, 0, 32 },
353 { 4096, 4, 32, 2, 2048, 32, 0, 0 },
354 { 4096, 8, 32, 4, 1024, 0, 0, 64 },
355 { 4096, 12, 32, 5, 704, 0, 576, 32 },
356 { 4096, 16, 32, 8, 512, 0, 0, 32 },
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300357
Chris Packham03085ca2022-08-25 16:59:49 +1200358 { 8192, 4, 32, 4, 2048, 32, 0, 0 },
359 { 8192, 8, 32, 8, 1024, 0, 0, 160 },
360 { 8192, 12, 32, 11, 704, 0, 448, 64 },
361 { 8192, 16, 32, 16, 512, 0, 0, 32 },
362 { },
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300363};
364
Chris Packham03085ca2022-08-25 16:59:49 +1200365static struct nand_ecclayout ecc_layout_empty = {
366 .eccbytes = 0,
367 .eccpos = { },
Stefan Roese75659da2015-07-23 10:26:16 +0200368 .oobfree = { }
369};
370
371#define NDTR0_tCH(c) (min((c), 7) << 19)
372#define NDTR0_tCS(c) (min((c), 7) << 16)
373#define NDTR0_tWH(c) (min((c), 7) << 11)
374#define NDTR0_tWP(c) (min((c), 7) << 8)
375#define NDTR0_tRH(c) (min((c), 7) << 3)
376#define NDTR0_tRP(c) (min((c), 7) << 0)
377
378#define NDTR1_tR(c) (min((c), 65535) << 16)
379#define NDTR1_tWHR(c) (min((c), 15) << 4)
380#define NDTR1_tAR(c) (min((c), 15) << 0)
381
382/* convert nano-seconds to nand flash controller clock cycles */
383#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
384
Shmuel Hazan759349e2020-10-29 08:52:18 +0200385static const struct udevice_id pxa3xx_nand_dt_ids[] = {
386 {
Pali Rohárc96bc1d2022-07-27 14:47:35 +0200387 .compatible = "marvell,armada370-nand-controller",
Shmuel Hazan759349e2020-10-29 08:52:18 +0200388 .data = PXA3XX_NAND_VARIANT_ARMADA370,
389 },
Shmuel Hazan58983222020-10-29 08:52:20 +0200390 {
391 .compatible = "marvell,armada-8k-nand-controller",
392 .data = PXA3XX_NAND_VARIANT_ARMADA_8K,
393 },
Shmuel Hazan759349e2020-10-29 08:52:18 +0200394 {}
395};
396
Shmuel Hazan58983222020-10-29 08:52:20 +0200397static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(struct udevice *dev)
Stefan Roese75659da2015-07-23 10:26:16 +0200398{
Shmuel Hazan58983222020-10-29 08:52:20 +0200399 return dev_get_driver_data(dev);
Stefan Roese75659da2015-07-23 10:26:16 +0200400}
401
402static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
403 const struct pxa3xx_nand_timing *t)
404{
405 struct pxa3xx_nand_info *info = host->info_data;
406 unsigned long nand_clk = mvebu_get_nand_clock();
407 uint32_t ndtr0, ndtr1;
408
409 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
410 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
411 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
412 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
413 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
414 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
415
416 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
417 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
418 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
419
420 info->ndtr0cs0 = ndtr0;
421 info->ndtr1cs0 = ndtr1;
422 nand_writel(info, NDTR0CS0, ndtr0);
423 nand_writel(info, NDTR1CS0, ndtr1);
424}
425
426static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
427 const struct nand_sdr_timings *t)
428{
429 struct pxa3xx_nand_info *info = host->info_data;
430 struct nand_chip *chip = &host->chip;
431 unsigned long nand_clk = mvebu_get_nand_clock();
432 uint32_t ndtr0, ndtr1;
433
434 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
435 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
436 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300437 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200438 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300439 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200440 u32 tR = chip->chip_delay * 1000;
441 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
442 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
443
444 /* fallback to a default value if tR = 0 */
445 if (!tR)
446 tR = 20000;
447
448 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
449 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
450 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
451 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
452 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
453 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
454
455 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
456 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
457 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
458
459 info->ndtr0cs0 = ndtr0;
460 info->ndtr1cs0 = ndtr1;
461 nand_writel(info, NDTR0CS0, ndtr0);
462 nand_writel(info, NDTR1CS0, ndtr1);
463}
464
465static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
466{
467 const struct nand_sdr_timings *timings;
468 struct nand_chip *chip = &host->chip;
469 struct pxa3xx_nand_info *info = host->info_data;
470 const struct pxa3xx_nand_flash *f = NULL;
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300471 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200472 int mode, id, ntypes, i;
473
474 mode = onfi_get_async_timing_mode(chip);
475 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
476 ntypes = ARRAY_SIZE(builtin_flash_types);
477
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300478 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
Stefan Roese75659da2015-07-23 10:26:16 +0200479
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300480 id = chip->read_byte(mtd);
481 id |= chip->read_byte(mtd) << 0x8;
Stefan Roese75659da2015-07-23 10:26:16 +0200482
483 for (i = 0; i < ntypes; i++) {
484 f = &builtin_flash_types[i];
485
486 if (f->chip_id == id)
487 break;
488 }
489
490 if (i == ntypes) {
Sean Andersonc6302f02020-09-15 10:44:40 -0400491 dev_err(mtd->dev, "Error: timings not found\n");
Stefan Roese75659da2015-07-23 10:26:16 +0200492 return -EINVAL;
493 }
494
495 pxa3xx_nand_set_timing(host, f->timing);
496
497 if (f->flash_width == 16) {
498 info->reg_ndcr |= NDCR_DWIDTH_M;
499 chip->options |= NAND_BUSWIDTH_16;
500 }
501
502 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
503 } else {
504 mode = fls(mode) - 1;
505 if (mode < 0)
506 mode = 0;
507
508 timings = onfi_async_timing_mode_to_sdr_timings(mode);
509 if (IS_ERR(timings))
510 return PTR_ERR(timings);
511
512 pxa3xx_nand_set_sdr_timing(host, timings);
513 }
514
515 return 0;
516}
517
Stefan Roese75659da2015-07-23 10:26:16 +0200518/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800519 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200520 * command buffer, otherwise, it does not work.
521 * We enable all the interrupt at the same time, and
522 * let pxa3xx_nand_irq to handle all logic.
523 */
524static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
525{
526 uint32_t ndcr;
527
528 ndcr = info->reg_ndcr;
529
530 if (info->use_ecc) {
531 ndcr |= NDCR_ECC_EN;
532 if (info->ecc_bch)
533 nand_writel(info, NDECCCTRL, 0x1);
534 } else {
535 ndcr &= ~NDCR_ECC_EN;
536 if (info->ecc_bch)
537 nand_writel(info, NDECCCTRL, 0x0);
538 }
539
540 ndcr &= ~NDCR_DMA_EN;
541
542 if (info->use_spare)
543 ndcr |= NDCR_SPARE_EN;
544 else
545 ndcr &= ~NDCR_SPARE_EN;
546
547 ndcr |= NDCR_ND_RUN;
548
549 /* clear status bits and run */
Stefan Roese75659da2015-07-23 10:26:16 +0200550 nand_writel(info, NDSR, NDSR_MASK);
Ofer Heifetzd92d8992018-08-29 11:56:03 +0300551 nand_writel(info, NDCR, 0);
Stefan Roese75659da2015-07-23 10:26:16 +0200552 nand_writel(info, NDCR, ndcr);
553}
554
555static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
556{
557 uint32_t ndcr;
558
559 ndcr = nand_readl(info, NDCR);
560 nand_writel(info, NDCR, ndcr | int_mask);
561}
562
563static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
564{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200565 if (info->ecc_bch && !info->force_raw) {
Stefan Roese75659da2015-07-23 10:26:16 +0200566 u32 ts;
567
568 /*
569 * According to the datasheet, when reading from NDDB
570 * with BCH enabled, after each 32 bytes reads, we
571 * have to make sure that the NDSR.RDDREQ bit is set.
572 *
573 * Drain the FIFO 8 32 bits reads at a time, and skip
574 * the polling on the last read.
575 */
576 while (len > 8) {
577 readsl(info->mmio_base + NDDB, data, 8);
578
579 ts = get_timer(0);
580 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
581 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
Sean Andersonc6302f02020-09-15 10:44:40 -0400582 dev_err(info->controller.active->mtd.dev,
Stefan Roese75659da2015-07-23 10:26:16 +0200583 "Timeout on RDDREQ while draining the FIFO\n");
584 return;
585 }
586 }
587
588 data += 32;
589 len -= 8;
590 }
591 }
592
593 readsl(info->mmio_base + NDDB, data, len);
594}
595
596static void handle_data_pio(struct pxa3xx_nand_info *info)
597{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200598 int data_len = info->step_chunk_size;
599
600 /*
601 * In raw mode, include the spare area and the ECC bytes that are not
602 * consumed by the controller in the data section. Do not reorganize
603 * here, do it in the ->read_page_raw() handler instead.
604 */
605 if (info->force_raw)
606 data_len += info->step_spare_size + info->ecc_size;
607
Stefan Roese75659da2015-07-23 10:26:16 +0200608 switch (info->state) {
609 case STATE_PIO_WRITING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300610 if (info->step_chunk_size)
611 writesl(info->mmio_base + NDDB,
612 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200613 DIV_ROUND_UP(data_len, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200614
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300615 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200616 writesl(info->mmio_base + NDDB,
617 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300618 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200619 break;
620 case STATE_PIO_READING:
Baruch Siach9167e4d2020-04-05 19:19:31 +0300621 if (data_len)
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300622 drain_fifo(info,
623 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200624 DIV_ROUND_UP(data_len, 4));
625
626 if (info->force_raw)
627 break;
Stefan Roese75659da2015-07-23 10:26:16 +0200628
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300629 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200630 drain_fifo(info,
631 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300632 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200633 break;
634 default:
Sean Andersonc6302f02020-09-15 10:44:40 -0400635 dev_err(info->controller.active->mtd.dev,
636 "%s: invalid state %d\n", __func__, info->state);
Stefan Roese75659da2015-07-23 10:26:16 +0200637 BUG();
638 }
639
640 /* Update buffer pointers for multi-page read/write */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200641 info->data_buff_pos += data_len;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300642 info->oob_buff_pos += info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200643}
644
645static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
646{
647 handle_data_pio(info);
648
649 info->state = STATE_CMD_DONE;
650 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
651}
652
653static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
654{
655 unsigned int status, is_completed = 0, is_ready = 0;
656 unsigned int ready, cmd_done;
657 irqreturn_t ret = IRQ_HANDLED;
658
659 if (info->cs == 0) {
660 ready = NDSR_FLASH_RDY;
661 cmd_done = NDSR_CS0_CMDD;
662 } else {
663 ready = NDSR_RDY;
664 cmd_done = NDSR_CS1_CMDD;
665 }
666
David Sniatkiwicz2087f7e2018-08-29 11:56:18 +0300667 /* TODO - find out why we need the delay during write operation. */
668 ndelay(1);
669
Stefan Roese75659da2015-07-23 10:26:16 +0200670 status = nand_readl(info, NDSR);
671
672 if (status & NDSR_UNCORERR)
673 info->retcode = ERR_UNCORERR;
674 if (status & NDSR_CORERR) {
675 info->retcode = ERR_CORERR;
Shmuel Hazan58983222020-10-29 08:52:20 +0200676 if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
677 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
Stefan Roese75659da2015-07-23 10:26:16 +0200678 info->ecc_bch)
679 info->ecc_err_cnt = NDSR_ERR_CNT(status);
680 else
681 info->ecc_err_cnt = 1;
682
683 /*
684 * Each chunk composing a page is corrected independently,
685 * and we need to store maximum number of corrected bitflips
686 * to return it to the MTD layer in ecc.read_page().
687 */
688 info->max_bitflips = max_t(unsigned int,
689 info->max_bitflips,
690 info->ecc_err_cnt);
691 }
692 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
693 info->state = (status & NDSR_RDDREQ) ?
694 STATE_PIO_READING : STATE_PIO_WRITING;
695 /* Call the IRQ thread in U-Boot directly */
696 pxa3xx_nand_irq_thread(info);
697 return 0;
698 }
699 if (status & cmd_done) {
700 info->state = STATE_CMD_DONE;
701 is_completed = 1;
702 }
703 if (status & ready) {
704 info->state = STATE_READY;
705 is_ready = 1;
706 }
707
Ofer Heifetzde323162018-08-29 11:56:04 +0300708 /*
709 * Clear all status bit before issuing the next command, which
710 * can and will alter the status bits and will deserve a new
711 * interrupt on its own. This lets the controller exit the IRQ
712 */
713 nand_writel(info, NDSR, status);
714
Stefan Roese75659da2015-07-23 10:26:16 +0200715 if (status & NDSR_WRCMDREQ) {
Stefan Roese75659da2015-07-23 10:26:16 +0200716 status &= ~NDSR_WRCMDREQ;
717 info->state = STATE_CMD_HANDLE;
718
719 /*
720 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
721 * must be loaded by writing directly either 12 or 16
722 * bytes directly to NDCB0, four bytes at a time.
723 *
724 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
725 * but each NDCBx register can be read.
726 */
727 nand_writel(info, NDCB0, info->ndcb0);
728 nand_writel(info, NDCB0, info->ndcb1);
729 nand_writel(info, NDCB0, info->ndcb2);
730
731 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
Shmuel Hazan58983222020-10-29 08:52:20 +0200732 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
733 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
Stefan Roese75659da2015-07-23 10:26:16 +0200734 nand_writel(info, NDCB0, info->ndcb3);
735 }
736
Stefan Roese75659da2015-07-23 10:26:16 +0200737 if (is_completed)
738 info->cmd_complete = 1;
739 if (is_ready)
740 info->dev_ready = 1;
741
742 return ret;
743}
744
745static inline int is_buf_blank(uint8_t *buf, size_t len)
746{
747 for (; len > 0; len--)
748 if (*buf++ != 0xff)
749 return 0;
750 return 1;
751}
752
753static void set_command_address(struct pxa3xx_nand_info *info,
754 unsigned int page_size, uint16_t column, int page_addr)
755{
756 /* small page addr setting */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300757 if (page_size < info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200758 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
759 | (column & 0xFF);
760
761 info->ndcb2 = 0;
762 } else {
763 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
764 | (column & 0xFFFF);
765
766 if (page_addr & 0xFF0000)
767 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
768 else
769 info->ndcb2 = 0;
770 }
771}
772
773static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
774{
775 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300776 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200777
778 /* reset data and oob column point to handle data */
779 info->buf_start = 0;
780 info->buf_count = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200781 info->data_buff_pos = 0;
782 info->oob_buff_pos = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300783 info->step_chunk_size = 0;
784 info->step_spare_size = 0;
785 info->cur_chunk = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200786 info->use_ecc = 0;
787 info->use_spare = 1;
788 info->retcode = ERR_NONE;
789 info->ecc_err_cnt = 0;
790 info->ndcb3 = 0;
791 info->need_wait = 0;
792
793 switch (command) {
794 case NAND_CMD_READ0:
Boris Brezillona558a392018-08-29 11:56:12 +0300795 case NAND_CMD_READOOB:
Stefan Roese75659da2015-07-23 10:26:16 +0200796 case NAND_CMD_PAGEPROG:
Miquel Raynal30a016a2018-10-11 17:45:42 +0200797 if (!info->force_raw)
798 info->use_ecc = 1;
Stefan Roese75659da2015-07-23 10:26:16 +0200799 break;
800 case NAND_CMD_PARAM:
801 info->use_spare = 0;
802 break;
803 default:
804 info->ndcb1 = 0;
805 info->ndcb2 = 0;
806 break;
807 }
808
809 /*
810 * If we are about to issue a read command, or about to set
811 * the write address, then clean the data buffer.
812 */
813 if (command == NAND_CMD_READ0 ||
814 command == NAND_CMD_READOOB ||
815 command == NAND_CMD_SEQIN) {
816 info->buf_count = mtd->writesize + mtd->oobsize;
817 memset(info->data_buff, 0xFF, info->buf_count);
818 }
819}
820
821static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
822 int ext_cmd_type, uint16_t column, int page_addr)
823{
824 int addr_cycle, exec_cmd;
825 struct pxa3xx_nand_host *host;
826 struct mtd_info *mtd;
827
828 host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300829 mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200830 addr_cycle = 0;
831 exec_cmd = 1;
832
833 if (info->cs != 0)
834 info->ndcb0 = NDCB0_CSEL;
835 else
836 info->ndcb0 = 0;
837
838 if (command == NAND_CMD_SEQIN)
839 exec_cmd = 0;
840
841 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
842 + host->col_addr_cycles);
843
844 switch (command) {
845 case NAND_CMD_READOOB:
846 case NAND_CMD_READ0:
847 info->buf_start = column;
848 info->ndcb0 |= NDCB0_CMD_TYPE(0)
849 | addr_cycle
850 | NAND_CMD_READ0;
851
852 if (command == NAND_CMD_READOOB)
853 info->buf_start += mtd->writesize;
854
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300855 if (info->cur_chunk < info->nfullchunks) {
856 info->step_chunk_size = info->chunk_size;
857 info->step_spare_size = info->spare_size;
858 } else {
859 info->step_chunk_size = info->last_chunk_size;
860 info->step_spare_size = info->last_spare_size;
861 }
862
Stefan Roese75659da2015-07-23 10:26:16 +0200863 /*
864 * Multiple page read needs an 'extended command type' field,
865 * which is either naked-read or last-read according to the
866 * state.
867 */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200868 if (info->force_raw) {
869 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
870 NDCB0_LEN_OVRD |
871 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
872 info->ndcb3 = info->step_chunk_size +
873 info->step_spare_size + info->ecc_size;
874 } else if (mtd->writesize == info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200875 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300876 } else if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200877 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
878 | NDCB0_LEN_OVRD
879 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300880 info->ndcb3 = info->step_chunk_size +
881 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200882 }
883
884 set_command_address(info, mtd->writesize, column, page_addr);
885 break;
886
887 case NAND_CMD_SEQIN:
888
889 info->buf_start = column;
890 set_command_address(info, mtd->writesize, 0, page_addr);
891
892 /*
893 * Multiple page programming needs to execute the initial
894 * SEQIN command that sets the page address.
895 */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300896 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200897 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
898 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
899 | addr_cycle
900 | command;
Stefan Roese75659da2015-07-23 10:26:16 +0200901 exec_cmd = 1;
902 }
903 break;
904
905 case NAND_CMD_PAGEPROG:
906 if (is_buf_blank(info->data_buff,
907 (mtd->writesize + mtd->oobsize))) {
908 exec_cmd = 0;
909 break;
910 }
911
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300912 if (info->cur_chunk < info->nfullchunks) {
913 info->step_chunk_size = info->chunk_size;
914 info->step_spare_size = info->spare_size;
915 } else {
916 info->step_chunk_size = info->last_chunk_size;
917 info->step_spare_size = info->last_spare_size;
918 }
919
Stefan Roese75659da2015-07-23 10:26:16 +0200920 /* Second command setting for large pages */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300921 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200922 /*
923 * Multiple page write uses the 'extended command'
924 * field. This can be used to issue a command dispatch
925 * or a naked-write depending on the current stage.
926 */
927 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
928 | NDCB0_LEN_OVRD
929 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300930 info->ndcb3 = info->step_chunk_size +
931 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200932
933 /*
934 * This is the command dispatch that completes a chunked
935 * page program operation.
936 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300937 if (info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +0200938 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
939 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
940 | command;
941 info->ndcb1 = 0;
942 info->ndcb2 = 0;
943 info->ndcb3 = 0;
944 }
945 } else {
946 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
947 | NDCB0_AUTO_RS
948 | NDCB0_ST_ROW_EN
949 | NDCB0_DBC
950 | (NAND_CMD_PAGEPROG << 8)
951 | NAND_CMD_SEQIN
952 | addr_cycle;
953 }
954 break;
955
956 case NAND_CMD_PARAM:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300957 info->buf_count = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200958 info->ndcb0 |= NDCB0_CMD_TYPE(0)
959 | NDCB0_ADDR_CYC(1)
960 | NDCB0_LEN_OVRD
961 | command;
962 info->ndcb1 = (column & 0xFF);
Ofer Heifetzfdf5b232018-08-29 11:56:00 +0300963 info->ndcb3 = INIT_BUFFER_SIZE;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300964 info->step_chunk_size = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +0200965 break;
966
967 case NAND_CMD_READID:
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300968 info->buf_count = READ_ID_BYTES;
Stefan Roese75659da2015-07-23 10:26:16 +0200969 info->ndcb0 |= NDCB0_CMD_TYPE(3)
970 | NDCB0_ADDR_CYC(1)
971 | command;
972 info->ndcb1 = (column & 0xFF);
973
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300974 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +0200975 break;
976 case NAND_CMD_STATUS:
977 info->buf_count = 1;
978 info->ndcb0 |= NDCB0_CMD_TYPE(4)
979 | NDCB0_ADDR_CYC(1)
980 | command;
981
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300982 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +0200983 break;
984
985 case NAND_CMD_ERASE1:
986 info->ndcb0 |= NDCB0_CMD_TYPE(2)
987 | NDCB0_AUTO_RS
988 | NDCB0_ADDR_CYC(3)
989 | NDCB0_DBC
990 | (NAND_CMD_ERASE2 << 8)
991 | NAND_CMD_ERASE1;
992 info->ndcb1 = page_addr;
993 info->ndcb2 = 0;
994
995 break;
996 case NAND_CMD_RESET:
997 info->ndcb0 |= NDCB0_CMD_TYPE(5)
998 | command;
999
1000 break;
1001
1002 case NAND_CMD_ERASE2:
1003 exec_cmd = 0;
1004 break;
1005
1006 default:
1007 exec_cmd = 0;
Sean Andersonc6302f02020-09-15 10:44:40 -04001008 dev_err(mtd->dev, "non-supported command %x\n",
Stefan Roese75659da2015-07-23 10:26:16 +02001009 command);
1010 break;
1011 }
1012
1013 return exec_cmd;
1014}
1015
1016static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1017 int column, int page_addr)
1018{
Scott Wood17fed142016-05-30 13:57:56 -05001019 struct nand_chip *chip = mtd_to_nand(mtd);
1020 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001021 struct pxa3xx_nand_info *info = host->info_data;
1022 int exec_cmd;
1023
1024 /*
1025 * if this is a x16 device ,then convert the input
1026 * "byte" address into a "word" address appropriate
1027 * for indexing a word-oriented device
1028 */
1029 if (info->reg_ndcr & NDCR_DWIDTH_M)
1030 column /= 2;
1031
1032 /*
1033 * There may be different NAND chip hooked to
1034 * different chip select, so check whether
1035 * chip select has been changed, if yes, reset the timing
1036 */
1037 if (info->cs != host->cs) {
1038 info->cs = host->cs;
1039 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1040 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1041 }
1042
1043 prepare_start_command(info, command);
1044
1045 info->state = STATE_PREPARED;
1046 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1047
1048 if (exec_cmd) {
1049 u32 ts;
1050
1051 info->cmd_complete = 0;
1052 info->dev_ready = 0;
1053 info->need_wait = 1;
1054 pxa3xx_nand_start(info);
1055
1056 ts = get_timer(0);
1057 while (1) {
1058 u32 status;
1059
1060 status = nand_readl(info, NDSR);
1061 if (status)
1062 pxa3xx_nand_irq(info);
1063
1064 if (info->cmd_complete)
1065 break;
1066
1067 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001068 dev_err(mtd->dev, "Wait timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001069 return;
1070 }
1071 }
1072 }
1073 info->state = STATE_IDLE;
1074}
1075
1076static void nand_cmdfunc_extended(struct mtd_info *mtd,
1077 const unsigned command,
1078 int column, int page_addr)
1079{
Scott Wood17fed142016-05-30 13:57:56 -05001080 struct nand_chip *chip = mtd_to_nand(mtd);
1081 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001082 struct pxa3xx_nand_info *info = host->info_data;
1083 int exec_cmd, ext_cmd_type;
1084
1085 /*
1086 * if this is a x16 device then convert the input
1087 * "byte" address into a "word" address appropriate
1088 * for indexing a word-oriented device
1089 */
1090 if (info->reg_ndcr & NDCR_DWIDTH_M)
1091 column /= 2;
1092
1093 /*
1094 * There may be different NAND chip hooked to
1095 * different chip select, so check whether
1096 * chip select has been changed, if yes, reset the timing
1097 */
1098 if (info->cs != host->cs) {
1099 info->cs = host->cs;
1100 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1101 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1102 }
1103
1104 /* Select the extended command for the first command */
1105 switch (command) {
1106 case NAND_CMD_READ0:
1107 case NAND_CMD_READOOB:
1108 ext_cmd_type = EXT_CMD_TYPE_MONO;
1109 break;
1110 case NAND_CMD_SEQIN:
1111 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1112 break;
1113 case NAND_CMD_PAGEPROG:
1114 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1115 break;
1116 default:
1117 ext_cmd_type = 0;
1118 break;
1119 }
1120
1121 prepare_start_command(info, command);
1122
1123 /*
1124 * Prepare the "is ready" completion before starting a command
1125 * transaction sequence. If the command is not executed the
1126 * completion will be completed, see below.
1127 *
1128 * We can do that inside the loop because the command variable
1129 * is invariant and thus so is the exec_cmd.
1130 */
1131 info->need_wait = 1;
1132 info->dev_ready = 0;
1133
1134 do {
1135 u32 ts;
1136
1137 info->state = STATE_PREPARED;
1138 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1139 column, page_addr);
1140 if (!exec_cmd) {
1141 info->need_wait = 0;
1142 info->dev_ready = 1;
1143 break;
1144 }
1145
1146 info->cmd_complete = 0;
1147 pxa3xx_nand_start(info);
1148
1149 ts = get_timer(0);
1150 while (1) {
1151 u32 status;
1152
1153 status = nand_readl(info, NDSR);
1154 if (status)
1155 pxa3xx_nand_irq(info);
1156
1157 if (info->cmd_complete)
1158 break;
1159
1160 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001161 dev_err(mtd->dev, "Wait timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001162 return;
1163 }
1164 }
1165
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001166 /* Only a few commands need several steps */
1167 if (command != NAND_CMD_PAGEPROG &&
1168 command != NAND_CMD_READ0 &&
1169 command != NAND_CMD_READOOB)
1170 break;
1171
1172 info->cur_chunk++;
1173
Stefan Roese75659da2015-07-23 10:26:16 +02001174 /* Check if the sequence is complete */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001175 if (info->cur_chunk == info->ntotalchunks &&
1176 command != NAND_CMD_PAGEPROG)
Stefan Roese75659da2015-07-23 10:26:16 +02001177 break;
1178
1179 /*
1180 * After a splitted program command sequence has issued
1181 * the command dispatch, the command sequence is complete.
1182 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001183 if (info->cur_chunk == (info->ntotalchunks + 1) &&
Stefan Roese75659da2015-07-23 10:26:16 +02001184 command == NAND_CMD_PAGEPROG &&
1185 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1186 break;
1187
1188 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1189 /* Last read: issue a 'last naked read' */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001190 if (info->cur_chunk == info->ntotalchunks - 1)
Stefan Roese75659da2015-07-23 10:26:16 +02001191 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1192 else
1193 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1194
1195 /*
1196 * If a splitted program command has no more data to transfer,
1197 * the command dispatch must be issued to complete.
1198 */
1199 } else if (command == NAND_CMD_PAGEPROG &&
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001200 info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +02001201 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1202 }
1203 } while (1);
1204
1205 info->state = STATE_IDLE;
1206}
1207
1208static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001209 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1210 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001211{
1212 chip->write_buf(mtd, buf, mtd->writesize);
1213 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1214
1215 return 0;
1216}
1217
1218static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1219 struct nand_chip *chip, uint8_t *buf, int oob_required,
1220 int page)
1221{
Scott Wood17fed142016-05-30 13:57:56 -05001222 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001223 struct pxa3xx_nand_info *info = host->info_data;
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001224 int bf;
Stefan Roese75659da2015-07-23 10:26:16 +02001225
1226 chip->read_buf(mtd, buf, mtd->writesize);
1227 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1228
1229 if (info->retcode == ERR_CORERR && info->use_ecc) {
1230 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1231
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001232 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
Stefan Roese75659da2015-07-23 10:26:16 +02001233 /*
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001234 * Empty pages will trigger uncorrectable errors. Re-read the
1235 * entire page in raw mode and check for bits not being "1".
1236 * If there are more than the supported strength, then it means
1237 * this is an actual uncorrectable error.
Stefan Roese75659da2015-07-23 10:26:16 +02001238 */
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001239 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1240 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1241 chip->oob_poi, mtd->oobsize,
1242 NULL, 0, chip->ecc.strength);
1243 if (bf < 0) {
1244 mtd->ecc_stats.failed++;
1245 } else if (bf) {
1246 mtd->ecc_stats.corrected += bf;
1247 info->max_bitflips = max_t(unsigned int,
1248 info->max_bitflips, bf);
1249 info->retcode = ERR_CORERR;
1250 } else {
1251 info->retcode = ERR_NONE;
1252 }
1253
1254 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1255 /* Raw read is not supported with Hamming ECC engine */
Stefan Roese75659da2015-07-23 10:26:16 +02001256 if (is_buf_blank(buf, mtd->writesize))
1257 info->retcode = ERR_NONE;
1258 else
1259 mtd->ecc_stats.failed++;
1260 }
1261
1262 return info->max_bitflips;
1263}
1264
Miquel Raynal30a016a2018-10-11 17:45:42 +02001265static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1266 struct nand_chip *chip, uint8_t *buf,
1267 int oob_required, int page)
1268{
1269 struct pxa3xx_nand_host *host = chip->priv;
1270 struct pxa3xx_nand_info *info = host->info_data;
1271 int chunk, ecc_off_buf;
1272
1273 if (!info->ecc_bch)
1274 return -ENOTSUPP;
1275
1276 /*
1277 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1278 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1279 */
1280 info->force_raw = true;
1281 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1282
1283 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1284 info->last_spare_size;
1285 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1286 chip->read_buf(mtd,
1287 buf + (chunk * info->chunk_size),
1288 info->chunk_size);
1289 chip->read_buf(mtd,
1290 chip->oob_poi +
1291 (chunk * (info->spare_size)),
1292 info->spare_size);
1293 chip->read_buf(mtd,
1294 chip->oob_poi + ecc_off_buf +
1295 (chunk * (info->ecc_size)),
1296 info->ecc_size - 2);
1297 }
1298
1299 if (info->ntotalchunks > info->nfullchunks) {
1300 chip->read_buf(mtd,
1301 buf + (info->nfullchunks * info->chunk_size),
1302 info->last_chunk_size);
1303 chip->read_buf(mtd,
1304 chip->oob_poi +
1305 (info->nfullchunks * (info->spare_size)),
1306 info->last_spare_size);
1307 chip->read_buf(mtd,
1308 chip->oob_poi + ecc_off_buf +
1309 (info->nfullchunks * (info->ecc_size)),
1310 info->ecc_size - 2);
1311 }
1312
1313 info->force_raw = false;
1314
1315 return 0;
1316}
1317
1318static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1319 struct nand_chip *chip, int page)
1320{
1321 /* Invalidate page cache */
1322 chip->pagebuf = -1;
1323
1324 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1325 page);
1326}
1327
Stefan Roese75659da2015-07-23 10:26:16 +02001328static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1329{
Scott Wood17fed142016-05-30 13:57:56 -05001330 struct nand_chip *chip = mtd_to_nand(mtd);
1331 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001332 struct pxa3xx_nand_info *info = host->info_data;
1333 char retval = 0xFF;
1334
1335 if (info->buf_start < info->buf_count)
1336 /* Has just send a new command? */
1337 retval = info->data_buff[info->buf_start++];
1338
1339 return retval;
1340}
1341
1342static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1343{
Scott Wood17fed142016-05-30 13:57:56 -05001344 struct nand_chip *chip = mtd_to_nand(mtd);
1345 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001346 struct pxa3xx_nand_info *info = host->info_data;
1347 u16 retval = 0xFFFF;
1348
1349 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1350 retval = *((u16 *)(info->data_buff+info->buf_start));
1351 info->buf_start += 2;
1352 }
1353 return retval;
1354}
1355
1356static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1357{
Scott Wood17fed142016-05-30 13:57:56 -05001358 struct nand_chip *chip = mtd_to_nand(mtd);
1359 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001360 struct pxa3xx_nand_info *info = host->info_data;
1361 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1362
1363 memcpy(buf, info->data_buff + info->buf_start, real_len);
1364 info->buf_start += real_len;
1365}
1366
1367static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1368 const uint8_t *buf, int len)
1369{
Scott Wood17fed142016-05-30 13:57:56 -05001370 struct nand_chip *chip = mtd_to_nand(mtd);
1371 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001372 struct pxa3xx_nand_info *info = host->info_data;
1373 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1374
1375 memcpy(info->data_buff + info->buf_start, buf, real_len);
1376 info->buf_start += real_len;
1377}
1378
1379static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1380{
1381 return;
1382}
1383
1384static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1385{
Scott Wood17fed142016-05-30 13:57:56 -05001386 struct nand_chip *chip = mtd_to_nand(mtd);
1387 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001388 struct pxa3xx_nand_info *info = host->info_data;
1389
1390 if (info->need_wait) {
1391 u32 ts;
1392
1393 info->need_wait = 0;
1394
1395 ts = get_timer(0);
1396 while (1) {
1397 u32 status;
1398
1399 status = nand_readl(info, NDSR);
1400 if (status)
1401 pxa3xx_nand_irq(info);
1402
1403 if (info->dev_ready)
1404 break;
1405
1406 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001407 dev_err(mtd->dev, "Ready timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001408 return NAND_STATUS_FAIL;
1409 }
1410 }
1411 }
1412
1413 /* pxa3xx_nand_send_command has waited for command complete */
1414 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1415 if (info->retcode == ERR_NONE)
1416 return 0;
1417 else
1418 return NAND_STATUS_FAIL;
1419 }
1420
1421 return NAND_STATUS_READY;
1422}
1423
Ofer Heifetz531816e2018-08-29 11:56:07 +03001424static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1425{
1426 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1427
1428 /* Configure default flash values */
Ofer Heifetz531816e2018-08-29 11:56:07 +03001429 info->reg_ndcr = 0x0; /* enable all interrupts */
1430 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1431 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1432 info->reg_ndcr |= NDCR_SPARE_EN;
1433
1434 return 0;
1435}
1436
1437static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001438{
1439 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz531816e2018-08-29 11:56:07 +03001440 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001441 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001442
1443 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1444 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1445 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001446}
1447
Ofer Heifetz268979f2018-08-29 11:56:08 +03001448static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001449{
Ofer Heifetz531816e2018-08-29 11:56:07 +03001450 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001451 uint32_t ndcr = nand_readl(info, NDCR);
1452
Stefan Roese75659da2015-07-23 10:26:16 +02001453 /* Set an initial chunk size */
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001454 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001455 info->reg_ndcr = ndcr &
1456 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1457 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001458 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1459 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
Stefan Roese75659da2015-07-23 10:26:16 +02001460}
1461
1462static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1463{
1464 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1465 if (info->data_buff == NULL)
1466 return -ENOMEM;
1467 return 0;
1468}
1469
1470static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1471{
1472 struct pxa3xx_nand_info *info = host->info_data;
1473 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1474 struct mtd_info *mtd;
1475 struct nand_chip *chip;
1476 const struct nand_sdr_timings *timings;
1477 int ret;
1478
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001479 mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001480 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001481
1482 /* configure default flash values */
1483 info->reg_ndcr = 0x0; /* enable all interrupts */
1484 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001485 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
Stefan Roese75659da2015-07-23 10:26:16 +02001486 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1487
1488 /* use the common timing to make a try */
1489 timings = onfi_async_timing_mode_to_sdr_timings(0);
1490 if (IS_ERR(timings))
1491 return PTR_ERR(timings);
1492
1493 pxa3xx_nand_set_sdr_timing(host, timings);
1494
1495 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1496 ret = chip->waitfunc(mtd, chip);
1497 if (ret & NAND_STATUS_FAIL)
1498 return -ENODEV;
1499
1500 return 0;
1501}
1502
1503static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1504 struct nand_ecc_ctrl *ecc,
1505 int strength, int ecc_stepsize, int page_size)
1506{
Chris Packham03085ca2022-08-25 16:59:49 +12001507 int i = 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001508
Chris Packham03085ca2022-08-25 16:59:49 +12001509 /* if ecc strength is 1 ecc algo is Hamming else bch */
1510 info->ecc_bch = (strength == 1) ? 0 : 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001511
Chris Packham03085ca2022-08-25 16:59:49 +12001512 ecc->mode = NAND_ECC_HW;
Stefan Roese75659da2015-07-23 10:26:16 +02001513
Chris Packham03085ca2022-08-25 16:59:49 +12001514 /* ecc->layout is not in use for pxa driver (but shouldn't be NULL)*/
1515 if (info->ecc_bch == 1)
1516 ecc->layout = &ecc_layout_empty;
Stefan Roese75659da2015-07-23 10:26:16 +02001517
Chris Packham03085ca2022-08-25 16:59:49 +12001518 /* for bch actual ecc strength is 16 per chunk */
1519 ecc->strength = (info->ecc_bch == 1) ? 16 : 1;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001520
Chris Packham03085ca2022-08-25 16:59:49 +12001521 while (nfc_layouts[i].strength) {
1522 if (strength == nfc_layouts[i].strength && page_size == nfc_layouts[i].page_size) {
1523 info->nfullchunks = nfc_layouts[i].nfullchunks;
1524 info->chunk_size = nfc_layouts[i].chunk_size;
1525 info->spare_size = nfc_layouts[i].spare_size;
1526 info->last_chunk_size = nfc_layouts[i].last_chunk_size;
1527 info->last_spare_size = nfc_layouts[i].last_spare_size;
1528 info->ntotalchunks = (info->last_spare_size || info->last_chunk_size) ?
1529 info->nfullchunks + 1 : info->nfullchunks;
1530 info->ecc_size = nfc_layouts[i].ecc_size;
1531 break;
1532 }
1533 ++i;
1534 }
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001535
Chris Packham03085ca2022-08-25 16:59:49 +12001536 /* for bch the ecc is calculated per chunk size and for Hamming it is 512 */
1537 ecc->size = (info->ecc_bch) ? info->chunk_size : 512;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001538
Chris Packham03085ca2022-08-25 16:59:49 +12001539 /* nand_scan_tail func perform validity tests for ECC strength, and it
1540 * assumes that all chunks are with same size. in our case when ecc is 12
1541 * the chunk size is 704 but the last chunk is with different size so
1542 * we cheat it nand_scan_tail validity tests by set info->ecc_size value to 512
1543 */
1544 if (strength == 12)
1545 ecc->size = 512;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001546
Chris Packham03085ca2022-08-25 16:59:49 +12001547 if (ecc_stepsize != 512 || !(nfc_layouts[i].strength)) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001548 dev_err(info->controller.active->mtd.dev,
Stefan Roese75659da2015-07-23 10:26:16 +02001549 "ECC strength %d at page size %d is not supported\n",
1550 strength, page_size);
1551 return -ENODEV;
1552 }
1553
1554 return 0;
1555}
1556
1557static int pxa3xx_nand_scan(struct mtd_info *mtd)
1558{
Scott Wood17fed142016-05-30 13:57:56 -05001559 struct nand_chip *chip = mtd_to_nand(mtd);
1560 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001561 struct pxa3xx_nand_info *info = host->info_data;
1562 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001563 int ret;
1564 uint16_t ecc_strength, ecc_step;
1565
Ofer Heifetz268979f2018-08-29 11:56:08 +03001566 if (pdata->keep_config) {
1567 pxa3xx_nand_detect_config(info);
1568 } else {
1569 ret = pxa3xx_nand_config_ident(info);
1570 if (ret)
1571 return ret;
1572 ret = pxa3xx_nand_sensing(host);
1573 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001574 dev_info(mtd->dev, "There is no chip on cs %d!\n",
Ofer Heifetz268979f2018-08-29 11:56:08 +03001575 info->cs);
1576 return ret;
1577 }
Stefan Roese75659da2015-07-23 10:26:16 +02001578 }
1579
Stefan Roese75659da2015-07-23 10:26:16 +02001580 /* Device detection must be done with ECC disabled */
Shmuel Hazan58983222020-10-29 08:52:20 +02001581 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1582 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
Stefan Roese75659da2015-07-23 10:26:16 +02001583 nand_writel(info, NDECCCTRL, 0x0);
1584
1585 if (nand_scan_ident(mtd, 1, NULL))
1586 return -ENODEV;
1587
1588 if (!pdata->keep_config) {
1589 ret = pxa3xx_nand_init_timings(host);
1590 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001591 dev_err(mtd->dev,
Stefan Roese75659da2015-07-23 10:26:16 +02001592 "Failed to set timings: %d\n", ret);
1593 return ret;
1594 }
1595 }
1596
Stefan Roese75659da2015-07-23 10:26:16 +02001597#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1598 /*
1599 * We'll use a bad block table stored in-flash and don't
1600 * allow writing the bad block marker to the flash.
1601 */
1602 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1603 chip->bbt_td = &bbt_main_descr;
1604 chip->bbt_md = &bbt_mirror_descr;
1605#endif
1606
Stefan Roese75659da2015-07-23 10:26:16 +02001607 if (pdata->ecc_strength && pdata->ecc_step_size) {
1608 ecc_strength = pdata->ecc_strength;
1609 ecc_step = pdata->ecc_step_size;
1610 } else {
1611 ecc_strength = chip->ecc_strength_ds;
1612 ecc_step = chip->ecc_step_ds;
1613 }
1614
1615 /* Set default ECC strength requirements on non-ONFI devices */
1616 if (ecc_strength < 1 && ecc_step < 1) {
1617 ecc_strength = 1;
1618 ecc_step = 512;
1619 }
1620
1621 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1622 ecc_step, mtd->writesize);
1623 if (ret)
1624 return ret;
1625
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001626 /*
1627 * If the page size is bigger than the FIFO size, let's check
1628 * we are given the right variant and then switch to the extended
1629 * (aka split) command handling,
1630 */
1631 if (mtd->writesize > info->chunk_size) {
Shmuel Hazan58983222020-10-29 08:52:20 +02001632 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1633 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001634 chip->cmdfunc = nand_cmdfunc_extended;
1635 } else {
Sean Andersonc6302f02020-09-15 10:44:40 -04001636 dev_err(mtd->dev,
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001637 "unsupported page size on this variant\n");
1638 return -ENODEV;
1639 }
1640 }
1641
Stefan Roese75659da2015-07-23 10:26:16 +02001642 /* calculate addressing information */
1643 if (mtd->writesize >= 2048)
1644 host->col_addr_cycles = 2;
1645 else
1646 host->col_addr_cycles = 1;
1647
1648 /* release the initial buffer */
1649 kfree(info->data_buff);
1650
1651 /* allocate the real data + oob buffer */
1652 info->buf_size = mtd->writesize + mtd->oobsize;
1653 ret = pxa3xx_nand_init_buff(info);
1654 if (ret)
1655 return ret;
1656 info->oob_buff = info->data_buff + mtd->writesize;
1657
1658 if ((mtd->size >> chip->page_shift) > 65536)
1659 host->row_addr_cycles = 3;
1660 else
1661 host->row_addr_cycles = 2;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001662
1663 if (!pdata->keep_config)
1664 pxa3xx_nand_config_tail(info);
1665
Stefan Roese75659da2015-07-23 10:26:16 +02001666 return nand_scan_tail(mtd);
1667}
1668
Shmuel Hazan58983222020-10-29 08:52:20 +02001669static int alloc_nand_resource(struct udevice *dev, struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001670{
1671 struct pxa3xx_nand_platform_data *pdata;
1672 struct pxa3xx_nand_host *host;
1673 struct nand_chip *chip = NULL;
1674 struct mtd_info *mtd;
Baruch Siach807ae582020-10-29 08:52:19 +02001675 int cs;
Stefan Roese75659da2015-07-23 10:26:16 +02001676
1677 pdata = info->pdata;
1678 if (pdata->num_cs <= 0)
1679 return -ENODEV;
1680
Shmuel Hazan58983222020-10-29 08:52:20 +02001681 info->variant = pxa3xx_nand_get_variant(dev);
Stefan Roese75659da2015-07-23 10:26:16 +02001682 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001683 chip = (struct nand_chip *)
1684 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001685 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001686 host = (struct pxa3xx_nand_host *)chip;
1687 info->host[cs] = host;
Stefan Roese75659da2015-07-23 10:26:16 +02001688 host->cs = cs;
1689 host->info_data = info;
Stefan Roese75659da2015-07-23 10:26:16 +02001690 mtd->owner = THIS_MODULE;
1691
Chris Packham3c2170a2016-08-29 15:20:52 +12001692 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001693 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
Miquel Raynal30a016a2018-10-11 17:45:42 +02001694 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1695 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
Stefan Roese75659da2015-07-23 10:26:16 +02001696 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1697 chip->controller = &info->controller;
1698 chip->waitfunc = pxa3xx_nand_waitfunc;
1699 chip->select_chip = pxa3xx_nand_select_chip;
1700 chip->read_word = pxa3xx_nand_read_word;
1701 chip->read_byte = pxa3xx_nand_read_byte;
1702 chip->read_buf = pxa3xx_nand_read_buf;
1703 chip->write_buf = pxa3xx_nand_write_buf;
1704 chip->options |= NAND_NO_SUBPAGE_WRITE;
1705 chip->cmdfunc = nand_cmdfunc;
1706 }
1707
Stefan Roese75659da2015-07-23 10:26:16 +02001708 /* Allocate a buffer to allow flash detection */
1709 info->buf_size = INIT_BUFFER_SIZE;
1710 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
Baruch Siach807ae582020-10-29 08:52:19 +02001711 if (info->data_buff == NULL)
1712 return -ENOMEM;
Stefan Roese75659da2015-07-23 10:26:16 +02001713
1714 /* initialize all interrupts to be disabled */
1715 disable_int(info, NDSR_MASK);
1716
Shmuel Hazan58983222020-10-29 08:52:20 +02001717 /*
1718 * Some SoCs like A7k/A8k need to enable manually the NAND
1719 * controller to avoid being bootloader dependent. This is done
1720 * through the use of a single bit in the System Functions registers.
1721 */
1722 if (pxa3xx_nand_get_variant(dev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1723 struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
1724 dev, "marvell,system-controller");
1725 u32 reg;
1726
1727 if (IS_ERR(sysctrl_base))
1728 return PTR_ERR(sysctrl_base);
1729
1730 regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
1731 reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
1732 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
1733 }
1734
Stefan Roese75659da2015-07-23 10:26:16 +02001735 return 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001736}
1737
Shmuel Hazan759349e2020-10-29 08:52:18 +02001738static int pxa3xx_nand_probe_dt(struct udevice *dev, struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001739{
1740 struct pxa3xx_nand_platform_data *pdata;
1741
1742 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1743 if (!pdata)
1744 return -ENOMEM;
1745
Shmuel Hazan759349e2020-10-29 08:52:18 +02001746 info->mmio_base = dev_read_addr_ptr(dev);
Stefan Roese75659da2015-07-23 10:26:16 +02001747
Shmuel Hazan759349e2020-10-29 08:52:18 +02001748 pdata->num_cs = dev_read_u32_default(dev, "num-cs", 1);
1749 if (pdata->num_cs != 1) {
1750 pr_err("pxa3xx driver supports single CS only\n");
1751 return -EINVAL;
1752 }
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001753
Pierre Bourdonfa7890e2021-12-25 05:46:29 +01001754 if (dev_read_bool(dev, "marvell,nand-enable-arbiter"))
Shmuel Hazan759349e2020-10-29 08:52:18 +02001755 pdata->enable_arbiter = 1;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001756
Pierre Bourdonfa7890e2021-12-25 05:46:29 +01001757 if (dev_read_bool(dev, "marvell,nand-keep-config"))
Shmuel Hazan759349e2020-10-29 08:52:18 +02001758 pdata->keep_config = 1;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001759
Shmuel Hazan759349e2020-10-29 08:52:18 +02001760 /*
1761 * ECC parameters.
1762 * If these are not set, they will be selected according
1763 * to the detected flash type.
1764 */
1765 /* ECC strength */
1766 pdata->ecc_strength = dev_read_u32_default(dev, "nand-ecc-strength", 0);
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001767
Shmuel Hazan759349e2020-10-29 08:52:18 +02001768 /* ECC step size */
1769 pdata->ecc_step_size = dev_read_u32_default(dev, "nand-ecc-step-size",
1770 0);
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001771
Shmuel Hazan759349e2020-10-29 08:52:18 +02001772 info->pdata = pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001773
Shmuel Hazan759349e2020-10-29 08:52:18 +02001774 return 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001775}
1776
Shmuel Hazan759349e2020-10-29 08:52:18 +02001777static int pxa3xx_nand_probe(struct udevice *dev)
Stefan Roese75659da2015-07-23 10:26:16 +02001778{
1779 struct pxa3xx_nand_platform_data *pdata;
1780 int ret, cs, probe_success;
Shmuel Hazan759349e2020-10-29 08:52:18 +02001781 struct pxa3xx_nand_info *info = dev_get_priv(dev);
Stefan Roese75659da2015-07-23 10:26:16 +02001782
Shmuel Hazan759349e2020-10-29 08:52:18 +02001783 ret = pxa3xx_nand_probe_dt(dev, info);
Stefan Roese75659da2015-07-23 10:26:16 +02001784 if (ret)
1785 return ret;
1786
1787 pdata = info->pdata;
1788
Shmuel Hazan58983222020-10-29 08:52:20 +02001789 ret = alloc_nand_resource(dev, info);
Stefan Roese75659da2015-07-23 10:26:16 +02001790 if (ret) {
Shmuel Hazan759349e2020-10-29 08:52:18 +02001791 dev_err(dev, "alloc nand resource failed\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001792 return ret;
1793 }
1794
1795 probe_success = 0;
1796 for (cs = 0; cs < pdata->num_cs; cs++) {
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001797 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001798
1799 /*
1800 * The mtd name matches the one used in 'mtdparts' kernel
1801 * parameter. This name cannot be changed or otherwise
1802 * user's mtd partitions configuration would get broken.
1803 */
1804 mtd->name = "pxa3xx_nand-0";
Robert Marko142f41a2022-01-05 16:01:00 +01001805 mtd->dev = dev;
Stefan Roese75659da2015-07-23 10:26:16 +02001806 info->cs = cs;
1807 ret = pxa3xx_nand_scan(mtd);
1808 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001809 dev_info(mtd->dev, "failed to scan nand at cs %d\n",
Stefan Roese75659da2015-07-23 10:26:16 +02001810 cs);
1811 continue;
1812 }
1813
Scott Wood2c1b7e12016-05-30 13:57:55 -05001814 if (nand_register(cs, mtd))
1815 continue;
1816
1817 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001818 }
1819
1820 if (!probe_success)
1821 return -ENODEV;
1822
1823 return 0;
1824}
1825
Shmuel Hazan759349e2020-10-29 08:52:18 +02001826U_BOOT_DRIVER(pxa3xx_nand) = {
1827 .name = "pxa3xx-nand",
1828 .id = UCLASS_MTD,
1829 .of_match = pxa3xx_nand_dt_ids,
1830 .probe = pxa3xx_nand_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -07001831 .priv_auto = sizeof(struct pxa3xx_nand_info) +
Shmuel Hazan759349e2020-10-29 08:52:18 +02001832 sizeof(struct pxa3xx_nand_host) * CONFIG_SYS_MAX_NAND_DEVICE,
1833};
1834
Stefan Roese75659da2015-07-23 10:26:16 +02001835void board_nand_init(void)
1836{
Shmuel Hazan759349e2020-10-29 08:52:18 +02001837 struct udevice *dev;
Stefan Roese75659da2015-07-23 10:26:16 +02001838 int ret;
1839
Shmuel Hazan759349e2020-10-29 08:52:18 +02001840 ret = uclass_get_device_by_driver(UCLASS_MTD,
Simon Glass65130cd2020-12-28 20:34:56 -07001841 DM_DRIVER_GET(pxa3xx_nand), &dev);
Shmuel Hazan759349e2020-10-29 08:52:18 +02001842 if (ret && ret != -ENODEV) {
1843 pr_err("Failed to initialize %s. (error %d)\n", dev->name,
1844 ret);
1845 }
Stefan Roese75659da2015-07-23 10:26:16 +02001846}