blob: 20d1aee7b3e15b3befcdec51a1c7cf03d32e8ae0 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0
Stefan Roese75659da2015-07-23 10:26:16 +02002/*
Miquel Raynal1f1ae152018-08-16 17:30:07 +02003 * drivers/mtd/nand/raw/pxa3xx_nand.c
Stefan Roese75659da2015-07-23 10:26:16 +02004 *
5 * Copyright © 2005 Intel Corporation
6 * Copyright © 2006 Marvell International Ltd.
Stefan Roese75659da2015-07-23 10:26:16 +02007 */
8
9#include <common.h>
10#include <malloc.h>
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030011#include <fdtdec.h>
Stefan Roese75659da2015-07-23 10:26:16 +020012#include <nand.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060013#include <asm/global_data.h>
Simon Glass9bc15642020-02-03 07:36:16 -070014#include <dm/device_compat.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070015#include <dm/devres.h>
Simon Glass4dcacfc2020-05-10 11:40:13 -060016#include <linux/bitops.h>
Simon Glassc06c1be2020-05-10 11:40:08 -060017#include <linux/bug.h>
Simon Glassdbd79542020-05-10 11:40:11 -060018#include <linux/delay.h>
Simon Glassd66c5f72020-02-03 07:36:15 -070019#include <linux/err.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090020#include <linux/errno.h>
Stefan Roese75659da2015-07-23 10:26:16 +020021#include <asm/io.h>
22#include <asm/arch/cpu.h>
23#include <linux/mtd/mtd.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090024#include <linux/mtd/rawnand.h>
Stefan Roese75659da2015-07-23 10:26:16 +020025#include <linux/types.h>
Shmuel Hazan58983222020-10-29 08:52:20 +020026#include <syscon.h>
27#include <regmap.h>
Shmuel Hazan759349e2020-10-29 08:52:18 +020028#include <dm/uclass.h>
29#include <dm/read.h>
Stefan Roese75659da2015-07-23 10:26:16 +020030
31#include "pxa3xx_nand.h"
32
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +030033DECLARE_GLOBAL_DATA_PTR;
34
Stefan Roese75659da2015-07-23 10:26:16 +020035#define TIMEOUT_DRAIN_FIFO 5 /* in ms */
36#define CHIP_DELAY_TIMEOUT 200
37#define NAND_STOP_DELAY 40
Stefan Roese75659da2015-07-23 10:26:16 +020038
39/*
40 * Define a buffer size for the initial command that detects the flash device:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030041 * STATUS, READID and PARAM.
42 * ONFI param page is 256 bytes, and there are three redundant copies
43 * to be read. JEDEC param page is 512 bytes, and there are also three
44 * redundant copies to be read.
45 * Hence this buffer should be at least 512 x 3. Let's pick 2048.
Stefan Roese75659da2015-07-23 10:26:16 +020046 */
Ofer Heifetzfdf5b232018-08-29 11:56:00 +030047#define INIT_BUFFER_SIZE 2048
Stefan Roese75659da2015-07-23 10:26:16 +020048
49/* registers and bit definitions */
50#define NDCR (0x00) /* Control register */
51#define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */
52#define NDTR1CS0 (0x0C) /* Timing Parameter 1 for CS0 */
53#define NDSR (0x14) /* Status Register */
54#define NDPCR (0x18) /* Page Count Register */
55#define NDBDR0 (0x1C) /* Bad Block Register 0 */
56#define NDBDR1 (0x20) /* Bad Block Register 1 */
57#define NDECCCTRL (0x28) /* ECC control */
58#define NDDB (0x40) /* Data Buffer */
59#define NDCB0 (0x48) /* Command Buffer0 */
60#define NDCB1 (0x4C) /* Command Buffer1 */
61#define NDCB2 (0x50) /* Command Buffer2 */
62
63#define NDCR_SPARE_EN (0x1 << 31)
64#define NDCR_ECC_EN (0x1 << 30)
65#define NDCR_DMA_EN (0x1 << 29)
66#define NDCR_ND_RUN (0x1 << 28)
67#define NDCR_DWIDTH_C (0x1 << 27)
68#define NDCR_DWIDTH_M (0x1 << 26)
69#define NDCR_PAGE_SZ (0x1 << 24)
70#define NDCR_NCSX (0x1 << 23)
71#define NDCR_ND_MODE (0x3 << 21)
72#define NDCR_NAND_MODE (0x0)
73#define NDCR_CLR_PG_CNT (0x1 << 20)
Ofer Heifetz531816e2018-08-29 11:56:07 +030074#define NFCV1_NDCR_ARB_CNTL (0x1 << 19)
Stefan Roese75659da2015-07-23 10:26:16 +020075#define NDCR_RD_ID_CNT_MASK (0x7 << 16)
76#define NDCR_RD_ID_CNT(x) (((x) << 16) & NDCR_RD_ID_CNT_MASK)
77
78#define NDCR_RA_START (0x1 << 15)
79#define NDCR_PG_PER_BLK (0x1 << 14)
80#define NDCR_ND_ARB_EN (0x1 << 12)
81#define NDCR_INT_MASK (0xFFF)
82
83#define NDSR_MASK (0xfff)
84#define NDSR_ERR_CNT_OFF (16)
85#define NDSR_ERR_CNT_MASK (0x1f)
86#define NDSR_ERR_CNT(sr) ((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
87#define NDSR_RDY (0x1 << 12)
88#define NDSR_FLASH_RDY (0x1 << 11)
89#define NDSR_CS0_PAGED (0x1 << 10)
90#define NDSR_CS1_PAGED (0x1 << 9)
91#define NDSR_CS0_CMDD (0x1 << 8)
92#define NDSR_CS1_CMDD (0x1 << 7)
93#define NDSR_CS0_BBD (0x1 << 6)
94#define NDSR_CS1_BBD (0x1 << 5)
95#define NDSR_UNCORERR (0x1 << 4)
96#define NDSR_CORERR (0x1 << 3)
97#define NDSR_WRDREQ (0x1 << 2)
98#define NDSR_RDDREQ (0x1 << 1)
99#define NDSR_WRCMDREQ (0x1)
100
101#define NDCB0_LEN_OVRD (0x1 << 28)
102#define NDCB0_ST_ROW_EN (0x1 << 26)
103#define NDCB0_AUTO_RS (0x1 << 25)
104#define NDCB0_CSEL (0x1 << 24)
105#define NDCB0_EXT_CMD_TYPE_MASK (0x7 << 29)
106#define NDCB0_EXT_CMD_TYPE(x) (((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
107#define NDCB0_CMD_TYPE_MASK (0x7 << 21)
108#define NDCB0_CMD_TYPE(x) (((x) << 21) & NDCB0_CMD_TYPE_MASK)
109#define NDCB0_NC (0x1 << 20)
110#define NDCB0_DBC (0x1 << 19)
111#define NDCB0_ADDR_CYC_MASK (0x7 << 16)
112#define NDCB0_ADDR_CYC(x) (((x) << 16) & NDCB0_ADDR_CYC_MASK)
113#define NDCB0_CMD2_MASK (0xff << 8)
114#define NDCB0_CMD1_MASK (0xff)
115#define NDCB0_ADDR_CYC_SHIFT (16)
116
117#define EXT_CMD_TYPE_DISPATCH 6 /* Command dispatch */
118#define EXT_CMD_TYPE_NAKED_RW 5 /* Naked read or Naked write */
119#define EXT_CMD_TYPE_READ 4 /* Read */
120#define EXT_CMD_TYPE_DISP_WR 4 /* Command dispatch with write */
121#define EXT_CMD_TYPE_FINAL 3 /* Final command */
122#define EXT_CMD_TYPE_LAST_RW 1 /* Last naked read/write */
123#define EXT_CMD_TYPE_MONO 0 /* Monolithic read/write */
124
Shmuel Hazan58983222020-10-29 08:52:20 +0200125/* System control register and bit to enable NAND on some SoCs */
126#define GENCONF_SOC_DEVICE_MUX 0x208
127#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0)
128
Ofer Heifetz4a574aa2018-08-29 11:56:05 +0300129/*
130 * This should be large enough to read 'ONFI' and 'JEDEC'.
131 * Let's use 7 bytes, which is the maximum ID count supported
132 * by the controller (see NDCR_RD_ID_CNT_MASK).
133 */
134#define READ_ID_BYTES 7
135
Stefan Roese75659da2015-07-23 10:26:16 +0200136/* macros for registers read/write */
137#define nand_writel(info, off, val) \
138 writel((val), (info)->mmio_base + (off))
139
140#define nand_readl(info, off) \
141 readl((info)->mmio_base + (off))
142
143/* error code and state */
144enum {
145 ERR_NONE = 0,
146 ERR_DMABUSERR = -1,
147 ERR_SENDCMD = -2,
148 ERR_UNCORERR = -3,
149 ERR_BBERR = -4,
150 ERR_CORERR = -5,
151};
152
153enum {
154 STATE_IDLE = 0,
155 STATE_PREPARED,
156 STATE_CMD_HANDLE,
157 STATE_DMA_READING,
158 STATE_DMA_WRITING,
159 STATE_DMA_DONE,
160 STATE_PIO_READING,
161 STATE_PIO_WRITING,
162 STATE_CMD_DONE,
163 STATE_READY,
164};
165
166enum pxa3xx_nand_variant {
167 PXA3XX_NAND_VARIANT_PXA,
168 PXA3XX_NAND_VARIANT_ARMADA370,
Shmuel Hazan58983222020-10-29 08:52:20 +0200169 PXA3XX_NAND_VARIANT_ARMADA_8K,
Stefan Roese75659da2015-07-23 10:26:16 +0200170};
171
172struct pxa3xx_nand_host {
173 struct nand_chip chip;
Stefan Roese75659da2015-07-23 10:26:16 +0200174 void *info_data;
175
176 /* page size of attached chip */
177 int use_ecc;
178 int cs;
179
180 /* calculated from pxa3xx_nand_flash data */
181 unsigned int col_addr_cycles;
182 unsigned int row_addr_cycles;
Stefan Roese75659da2015-07-23 10:26:16 +0200183};
184
185struct pxa3xx_nand_info {
186 struct nand_hw_control controller;
187 struct pxa3xx_nand_platform_data *pdata;
188
189 struct clk *clk;
190 void __iomem *mmio_base;
191 unsigned long mmio_phys;
192 int cmd_complete, dev_ready;
193
194 unsigned int buf_start;
195 unsigned int buf_count;
196 unsigned int buf_size;
197 unsigned int data_buff_pos;
198 unsigned int oob_buff_pos;
199
200 unsigned char *data_buff;
201 unsigned char *oob_buff;
202
203 struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
204 unsigned int state;
205
206 /*
207 * This driver supports NFCv1 (as found in PXA SoC)
208 * and NFCv2 (as found in Armada 370/XP SoC).
209 */
210 enum pxa3xx_nand_variant variant;
211
212 int cs;
213 int use_ecc; /* use HW ECC ? */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200214 int force_raw; /* prevent use_ecc to be set */
Stefan Roese75659da2015-07-23 10:26:16 +0200215 int ecc_bch; /* using BCH ECC? */
216 int use_spare; /* use spare ? */
217 int need_wait;
218
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300219 /* Amount of real data per full chunk */
220 unsigned int chunk_size;
221
222 /* Amount of spare data per full chunk */
Stefan Roese75659da2015-07-23 10:26:16 +0200223 unsigned int spare_size;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300224
225 /* Number of full chunks (i.e chunk_size + spare_size) */
226 unsigned int nfullchunks;
227
228 /*
229 * Total number of chunks. If equal to nfullchunks, then there
230 * are only full chunks. Otherwise, there is one last chunk of
231 * size (last_chunk_size + last_spare_size)
232 */
233 unsigned int ntotalchunks;
234
235 /* Amount of real data in the last chunk */
236 unsigned int last_chunk_size;
237
238 /* Amount of spare data in the last chunk */
239 unsigned int last_spare_size;
240
Stefan Roese75659da2015-07-23 10:26:16 +0200241 unsigned int ecc_size;
242 unsigned int ecc_err_cnt;
243 unsigned int max_bitflips;
244 int retcode;
245
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300246 /*
247 * Variables only valid during command
248 * execution. step_chunk_size and step_spare_size is the
249 * amount of real data and spare data in the current
250 * chunk. cur_chunk is the current chunk being
251 * read/programmed.
252 */
253 unsigned int step_chunk_size;
254 unsigned int step_spare_size;
255 unsigned int cur_chunk;
256
Stefan Roese75659da2015-07-23 10:26:16 +0200257 /* cached register value */
258 uint32_t reg_ndcr;
259 uint32_t ndtr0cs0;
260 uint32_t ndtr1cs0;
261
262 /* generated NDCBx register values */
263 uint32_t ndcb0;
264 uint32_t ndcb1;
265 uint32_t ndcb2;
266 uint32_t ndcb3;
267};
268
269static struct pxa3xx_nand_timing timing[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300270 /*
271 * tCH Enable signal hold time
272 * tCS Enable signal setup time
273 * tWH ND_nWE high duration
274 * tWP ND_nWE pulse time
275 * tRH ND_nRE high duration
276 * tRP ND_nRE pulse width
277 * tR ND_nWE high to ND_nRE low for read
278 * tWHR ND_nWE high to ND_nRE low for status read
279 * tAR ND_ALE low to ND_nRE low delay
280 */
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300281 /*ch cs wh wp rh rp r whr ar */
Stefan Roese75659da2015-07-23 10:26:16 +0200282 { 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
283 { 10, 0, 20, 40, 30, 40, 11123, 110, 10, },
284 { 10, 25, 15, 25, 15, 30, 25000, 60, 10, },
285 { 10, 35, 15, 25, 15, 25, 25000, 60, 10, },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300286 { 5, 20, 10, 12, 10, 12, 25000, 60, 10, },
Stefan Roese75659da2015-07-23 10:26:16 +0200287};
288
289static struct pxa3xx_nand_flash builtin_flash_types[] = {
Konstantin Porotchkina692cde2018-08-29 11:56:16 +0300290 /*
291 * chip_id
292 * flash_width Width of Flash memory (DWIDTH_M)
293 * dfc_width Width of flash controller(DWIDTH_C)
294 * *timing
295 * http://www.linux-mtd.infradead.org/nand-data/nanddata.html
296 */
Stefan Roese75659da2015-07-23 10:26:16 +0200297 { 0x46ec, 16, 16, &timing[1] },
298 { 0xdaec, 8, 8, &timing[1] },
299 { 0xd7ec, 8, 8, &timing[1] },
300 { 0xa12c, 8, 8, &timing[2] },
301 { 0xb12c, 16, 16, &timing[2] },
302 { 0xdc2c, 8, 8, &timing[2] },
303 { 0xcc2c, 16, 16, &timing[2] },
304 { 0xba20, 16, 16, &timing[3] },
Konstantin Porotchkin029be942018-08-29 11:56:14 +0300305 { 0xda98, 8, 8, &timing[4] },
Stefan Roese75659da2015-07-23 10:26:16 +0200306};
307
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100308#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
Stefan Roese75659da2015-07-23 10:26:16 +0200309static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
310static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
311
312static struct nand_bbt_descr bbt_main_descr = {
313 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
314 | NAND_BBT_2BIT | NAND_BBT_VERSION,
315 .offs = 8,
316 .len = 6,
317 .veroffs = 14,
318 .maxblocks = 8, /* Last 8 blocks in each chip */
319 .pattern = bbt_pattern
320};
321
322static struct nand_bbt_descr bbt_mirror_descr = {
323 .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
324 | NAND_BBT_2BIT | NAND_BBT_VERSION,
325 .offs = 8,
326 .len = 6,
327 .veroffs = 14,
328 .maxblocks = 8, /* Last 8 blocks in each chip */
329 .pattern = bbt_mirror_pattern
330};
Sean Nyekjaera12a8e82017-11-22 13:39:08 +0100331#endif
Stefan Roese75659da2015-07-23 10:26:16 +0200332
333static struct nand_ecclayout ecc_layout_2KB_bch4bit = {
334 .eccbytes = 32,
335 .eccpos = {
336 32, 33, 34, 35, 36, 37, 38, 39,
337 40, 41, 42, 43, 44, 45, 46, 47,
338 48, 49, 50, 51, 52, 53, 54, 55,
339 56, 57, 58, 59, 60, 61, 62, 63},
340 .oobfree = { {2, 30} }
341};
342
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300343static struct nand_ecclayout ecc_layout_2KB_bch8bit = {
344 .eccbytes = 64,
345 .eccpos = {
Miquel Raynal53e9c122018-10-11 17:45:44 +0200346 32, 33, 34, 35, 36, 37, 38, 39,
347 40, 41, 42, 43, 44, 45, 46, 47,
348 48, 49, 50, 51, 52, 53, 54, 55,
349 56, 57, 58, 59, 60, 61, 62, 63,
350 64, 65, 66, 67, 68, 69, 70, 71,
351 72, 73, 74, 75, 76, 77, 78, 79,
352 80, 81, 82, 83, 84, 85, 86, 87,
353 88, 89, 90, 91, 92, 93, 94, 95},
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +0300354 .oobfree = { {1, 4}, {6, 26} }
355};
356
Stefan Roese75659da2015-07-23 10:26:16 +0200357static struct nand_ecclayout ecc_layout_4KB_bch4bit = {
358 .eccbytes = 64,
359 .eccpos = {
360 32, 33, 34, 35, 36, 37, 38, 39,
361 40, 41, 42, 43, 44, 45, 46, 47,
362 48, 49, 50, 51, 52, 53, 54, 55,
363 56, 57, 58, 59, 60, 61, 62, 63,
364 96, 97, 98, 99, 100, 101, 102, 103,
365 104, 105, 106, 107, 108, 109, 110, 111,
366 112, 113, 114, 115, 116, 117, 118, 119,
367 120, 121, 122, 123, 124, 125, 126, 127},
368 /* Bootrom looks in bytes 0 & 5 for bad blocks */
369 .oobfree = { {6, 26}, { 64, 32} }
370};
371
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300372static struct nand_ecclayout ecc_layout_8KB_bch4bit = {
373 .eccbytes = 128,
374 .eccpos = {
375 32, 33, 34, 35, 36, 37, 38, 39,
376 40, 41, 42, 43, 44, 45, 46, 47,
377 48, 49, 50, 51, 52, 53, 54, 55,
378 56, 57, 58, 59, 60, 61, 62, 63,
379
380 96, 97, 98, 99, 100, 101, 102, 103,
381 104, 105, 106, 107, 108, 109, 110, 111,
382 112, 113, 114, 115, 116, 117, 118, 119,
383 120, 121, 122, 123, 124, 125, 126, 127,
384
385 160, 161, 162, 163, 164, 165, 166, 167,
386 168, 169, 170, 171, 172, 173, 174, 175,
387 176, 177, 178, 179, 180, 181, 182, 183,
388 184, 185, 186, 187, 188, 189, 190, 191,
389
390 224, 225, 226, 227, 228, 229, 230, 231,
391 232, 233, 234, 235, 236, 237, 238, 239,
392 240, 241, 242, 243, 244, 245, 246, 247,
393 248, 249, 250, 251, 252, 253, 254, 255},
394
395 /* Bootrom looks in bytes 0 & 5 for bad blocks */
396 .oobfree = { {1, 4}, {6, 26}, { 64, 32}, {128, 32}, {192, 32} }
397};
398
Stefan Roese75659da2015-07-23 10:26:16 +0200399static struct nand_ecclayout ecc_layout_4KB_bch8bit = {
400 .eccbytes = 128,
401 .eccpos = {
402 32, 33, 34, 35, 36, 37, 38, 39,
403 40, 41, 42, 43, 44, 45, 46, 47,
404 48, 49, 50, 51, 52, 53, 54, 55,
405 56, 57, 58, 59, 60, 61, 62, 63},
406 .oobfree = { }
407};
408
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +0300409static struct nand_ecclayout ecc_layout_8KB_bch8bit = {
410 .eccbytes = 256,
411 .eccpos = {},
412 /* HW ECC handles all ECC data and all spare area is free for OOB */
413 .oobfree = {{0, 160} }
414};
415
Stefan Roese75659da2015-07-23 10:26:16 +0200416#define NDTR0_tCH(c) (min((c), 7) << 19)
417#define NDTR0_tCS(c) (min((c), 7) << 16)
418#define NDTR0_tWH(c) (min((c), 7) << 11)
419#define NDTR0_tWP(c) (min((c), 7) << 8)
420#define NDTR0_tRH(c) (min((c), 7) << 3)
421#define NDTR0_tRP(c) (min((c), 7) << 0)
422
423#define NDTR1_tR(c) (min((c), 65535) << 16)
424#define NDTR1_tWHR(c) (min((c), 15) << 4)
425#define NDTR1_tAR(c) (min((c), 15) << 0)
426
427/* convert nano-seconds to nand flash controller clock cycles */
428#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000)
429
Shmuel Hazan759349e2020-10-29 08:52:18 +0200430static const struct udevice_id pxa3xx_nand_dt_ids[] = {
431 {
432 .compatible = "marvell,mvebu-pxa3xx-nand",
433 .data = PXA3XX_NAND_VARIANT_ARMADA370,
434 },
Shmuel Hazan58983222020-10-29 08:52:20 +0200435 {
436 .compatible = "marvell,armada-8k-nand-controller",
437 .data = PXA3XX_NAND_VARIANT_ARMADA_8K,
438 },
Shmuel Hazan759349e2020-10-29 08:52:18 +0200439 {}
440};
441
Shmuel Hazan58983222020-10-29 08:52:20 +0200442static enum pxa3xx_nand_variant pxa3xx_nand_get_variant(struct udevice *dev)
Stefan Roese75659da2015-07-23 10:26:16 +0200443{
Shmuel Hazan58983222020-10-29 08:52:20 +0200444 return dev_get_driver_data(dev);
Stefan Roese75659da2015-07-23 10:26:16 +0200445}
446
447static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
448 const struct pxa3xx_nand_timing *t)
449{
450 struct pxa3xx_nand_info *info = host->info_data;
451 unsigned long nand_clk = mvebu_get_nand_clock();
452 uint32_t ndtr0, ndtr1;
453
454 ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
455 NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
456 NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
457 NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
458 NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
459 NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
460
461 ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
462 NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
463 NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
464
465 info->ndtr0cs0 = ndtr0;
466 info->ndtr1cs0 = ndtr1;
467 nand_writel(info, NDTR0CS0, ndtr0);
468 nand_writel(info, NDTR1CS0, ndtr1);
469}
470
471static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
472 const struct nand_sdr_timings *t)
473{
474 struct pxa3xx_nand_info *info = host->info_data;
475 struct nand_chip *chip = &host->chip;
476 unsigned long nand_clk = mvebu_get_nand_clock();
477 uint32_t ndtr0, ndtr1;
478
479 u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
480 u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
481 u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300482 u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200483 u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
Ofer Heifetz8f8d4582018-08-29 11:56:02 +0300484 u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
Stefan Roese75659da2015-07-23 10:26:16 +0200485 u32 tR = chip->chip_delay * 1000;
486 u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
487 u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
488
489 /* fallback to a default value if tR = 0 */
490 if (!tR)
491 tR = 20000;
492
493 ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
494 NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
495 NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
496 NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
497 NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
498 NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
499
500 ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
501 NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
502 NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
503
504 info->ndtr0cs0 = ndtr0;
505 info->ndtr1cs0 = ndtr1;
506 nand_writel(info, NDTR0CS0, ndtr0);
507 nand_writel(info, NDTR1CS0, ndtr1);
508}
509
510static int pxa3xx_nand_init_timings(struct pxa3xx_nand_host *host)
511{
512 const struct nand_sdr_timings *timings;
513 struct nand_chip *chip = &host->chip;
514 struct pxa3xx_nand_info *info = host->info_data;
515 const struct pxa3xx_nand_flash *f = NULL;
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300516 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200517 int mode, id, ntypes, i;
518
519 mode = onfi_get_async_timing_mode(chip);
520 if (mode == ONFI_TIMING_MODE_UNKNOWN) {
521 ntypes = ARRAY_SIZE(builtin_flash_types);
522
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300523 chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
Stefan Roese75659da2015-07-23 10:26:16 +0200524
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300525 id = chip->read_byte(mtd);
526 id |= chip->read_byte(mtd) << 0x8;
Stefan Roese75659da2015-07-23 10:26:16 +0200527
528 for (i = 0; i < ntypes; i++) {
529 f = &builtin_flash_types[i];
530
531 if (f->chip_id == id)
532 break;
533 }
534
535 if (i == ntypes) {
Sean Andersonc6302f02020-09-15 10:44:40 -0400536 dev_err(mtd->dev, "Error: timings not found\n");
Stefan Roese75659da2015-07-23 10:26:16 +0200537 return -EINVAL;
538 }
539
540 pxa3xx_nand_set_timing(host, f->timing);
541
542 if (f->flash_width == 16) {
543 info->reg_ndcr |= NDCR_DWIDTH_M;
544 chip->options |= NAND_BUSWIDTH_16;
545 }
546
547 info->reg_ndcr |= (f->dfc_width == 16) ? NDCR_DWIDTH_C : 0;
548 } else {
549 mode = fls(mode) - 1;
550 if (mode < 0)
551 mode = 0;
552
553 timings = onfi_async_timing_mode_to_sdr_timings(mode);
554 if (IS_ERR(timings))
555 return PTR_ERR(timings);
556
557 pxa3xx_nand_set_sdr_timing(host, timings);
558 }
559
560 return 0;
561}
562
Stefan Roese75659da2015-07-23 10:26:16 +0200563/**
Vagrant Cascadianbeb288b2015-11-24 14:46:24 -0800564 * NOTE: it is a must to set ND_RUN first, then write
Stefan Roese75659da2015-07-23 10:26:16 +0200565 * command buffer, otherwise, it does not work.
566 * We enable all the interrupt at the same time, and
567 * let pxa3xx_nand_irq to handle all logic.
568 */
569static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
570{
571 uint32_t ndcr;
572
573 ndcr = info->reg_ndcr;
574
575 if (info->use_ecc) {
576 ndcr |= NDCR_ECC_EN;
577 if (info->ecc_bch)
578 nand_writel(info, NDECCCTRL, 0x1);
579 } else {
580 ndcr &= ~NDCR_ECC_EN;
581 if (info->ecc_bch)
582 nand_writel(info, NDECCCTRL, 0x0);
583 }
584
585 ndcr &= ~NDCR_DMA_EN;
586
587 if (info->use_spare)
588 ndcr |= NDCR_SPARE_EN;
589 else
590 ndcr &= ~NDCR_SPARE_EN;
591
592 ndcr |= NDCR_ND_RUN;
593
594 /* clear status bits and run */
Stefan Roese75659da2015-07-23 10:26:16 +0200595 nand_writel(info, NDSR, NDSR_MASK);
Ofer Heifetzd92d8992018-08-29 11:56:03 +0300596 nand_writel(info, NDCR, 0);
Stefan Roese75659da2015-07-23 10:26:16 +0200597 nand_writel(info, NDCR, ndcr);
598}
599
600static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
601{
602 uint32_t ndcr;
603
604 ndcr = nand_readl(info, NDCR);
605 nand_writel(info, NDCR, ndcr | int_mask);
606}
607
608static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
609{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200610 if (info->ecc_bch && !info->force_raw) {
Stefan Roese75659da2015-07-23 10:26:16 +0200611 u32 ts;
612
613 /*
614 * According to the datasheet, when reading from NDDB
615 * with BCH enabled, after each 32 bytes reads, we
616 * have to make sure that the NDSR.RDDREQ bit is set.
617 *
618 * Drain the FIFO 8 32 bits reads at a time, and skip
619 * the polling on the last read.
620 */
621 while (len > 8) {
622 readsl(info->mmio_base + NDDB, data, 8);
623
624 ts = get_timer(0);
625 while (!(nand_readl(info, NDSR) & NDSR_RDDREQ)) {
626 if (get_timer(ts) > TIMEOUT_DRAIN_FIFO) {
Sean Andersonc6302f02020-09-15 10:44:40 -0400627 dev_err(info->controller.active->mtd.dev,
Stefan Roese75659da2015-07-23 10:26:16 +0200628 "Timeout on RDDREQ while draining the FIFO\n");
629 return;
630 }
631 }
632
633 data += 32;
634 len -= 8;
635 }
636 }
637
638 readsl(info->mmio_base + NDDB, data, len);
639}
640
641static void handle_data_pio(struct pxa3xx_nand_info *info)
642{
Miquel Raynal30a016a2018-10-11 17:45:42 +0200643 int data_len = info->step_chunk_size;
644
645 /*
646 * In raw mode, include the spare area and the ECC bytes that are not
647 * consumed by the controller in the data section. Do not reorganize
648 * here, do it in the ->read_page_raw() handler instead.
649 */
650 if (info->force_raw)
651 data_len += info->step_spare_size + info->ecc_size;
652
Stefan Roese75659da2015-07-23 10:26:16 +0200653 switch (info->state) {
654 case STATE_PIO_WRITING:
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300655 if (info->step_chunk_size)
656 writesl(info->mmio_base + NDDB,
657 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200658 DIV_ROUND_UP(data_len, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200659
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300660 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200661 writesl(info->mmio_base + NDDB,
662 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300663 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200664 break;
665 case STATE_PIO_READING:
Baruch Siach9167e4d2020-04-05 19:19:31 +0300666 if (data_len)
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300667 drain_fifo(info,
668 info->data_buff + info->data_buff_pos,
Miquel Raynal30a016a2018-10-11 17:45:42 +0200669 DIV_ROUND_UP(data_len, 4));
670
671 if (info->force_raw)
672 break;
Stefan Roese75659da2015-07-23 10:26:16 +0200673
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300674 if (info->step_spare_size)
Stefan Roese75659da2015-07-23 10:26:16 +0200675 drain_fifo(info,
676 info->oob_buff + info->oob_buff_pos,
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300677 DIV_ROUND_UP(info->step_spare_size, 4));
Stefan Roese75659da2015-07-23 10:26:16 +0200678 break;
679 default:
Sean Andersonc6302f02020-09-15 10:44:40 -0400680 dev_err(info->controller.active->mtd.dev,
681 "%s: invalid state %d\n", __func__, info->state);
Stefan Roese75659da2015-07-23 10:26:16 +0200682 BUG();
683 }
684
685 /* Update buffer pointers for multi-page read/write */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200686 info->data_buff_pos += data_len;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300687 info->oob_buff_pos += info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200688}
689
690static void pxa3xx_nand_irq_thread(struct pxa3xx_nand_info *info)
691{
692 handle_data_pio(info);
693
694 info->state = STATE_CMD_DONE;
695 nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
696}
697
698static irqreturn_t pxa3xx_nand_irq(struct pxa3xx_nand_info *info)
699{
700 unsigned int status, is_completed = 0, is_ready = 0;
701 unsigned int ready, cmd_done;
702 irqreturn_t ret = IRQ_HANDLED;
703
704 if (info->cs == 0) {
705 ready = NDSR_FLASH_RDY;
706 cmd_done = NDSR_CS0_CMDD;
707 } else {
708 ready = NDSR_RDY;
709 cmd_done = NDSR_CS1_CMDD;
710 }
711
David Sniatkiwicz2087f7e2018-08-29 11:56:18 +0300712 /* TODO - find out why we need the delay during write operation. */
713 ndelay(1);
714
Stefan Roese75659da2015-07-23 10:26:16 +0200715 status = nand_readl(info, NDSR);
716
717 if (status & NDSR_UNCORERR)
718 info->retcode = ERR_UNCORERR;
719 if (status & NDSR_CORERR) {
720 info->retcode = ERR_CORERR;
Shmuel Hazan58983222020-10-29 08:52:20 +0200721 if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
722 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) &&
Stefan Roese75659da2015-07-23 10:26:16 +0200723 info->ecc_bch)
724 info->ecc_err_cnt = NDSR_ERR_CNT(status);
725 else
726 info->ecc_err_cnt = 1;
727
728 /*
729 * Each chunk composing a page is corrected independently,
730 * and we need to store maximum number of corrected bitflips
731 * to return it to the MTD layer in ecc.read_page().
732 */
733 info->max_bitflips = max_t(unsigned int,
734 info->max_bitflips,
735 info->ecc_err_cnt);
736 }
737 if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
738 info->state = (status & NDSR_RDDREQ) ?
739 STATE_PIO_READING : STATE_PIO_WRITING;
740 /* Call the IRQ thread in U-Boot directly */
741 pxa3xx_nand_irq_thread(info);
742 return 0;
743 }
744 if (status & cmd_done) {
745 info->state = STATE_CMD_DONE;
746 is_completed = 1;
747 }
748 if (status & ready) {
749 info->state = STATE_READY;
750 is_ready = 1;
751 }
752
Ofer Heifetzde323162018-08-29 11:56:04 +0300753 /*
754 * Clear all status bit before issuing the next command, which
755 * can and will alter the status bits and will deserve a new
756 * interrupt on its own. This lets the controller exit the IRQ
757 */
758 nand_writel(info, NDSR, status);
759
Stefan Roese75659da2015-07-23 10:26:16 +0200760 if (status & NDSR_WRCMDREQ) {
Stefan Roese75659da2015-07-23 10:26:16 +0200761 status &= ~NDSR_WRCMDREQ;
762 info->state = STATE_CMD_HANDLE;
763
764 /*
765 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
766 * must be loaded by writing directly either 12 or 16
767 * bytes directly to NDCB0, four bytes at a time.
768 *
769 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
770 * but each NDCBx register can be read.
771 */
772 nand_writel(info, NDCB0, info->ndcb0);
773 nand_writel(info, NDCB0, info->ndcb1);
774 nand_writel(info, NDCB0, info->ndcb2);
775
776 /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
Shmuel Hazan58983222020-10-29 08:52:20 +0200777 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
778 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
Stefan Roese75659da2015-07-23 10:26:16 +0200779 nand_writel(info, NDCB0, info->ndcb3);
780 }
781
Stefan Roese75659da2015-07-23 10:26:16 +0200782 if (is_completed)
783 info->cmd_complete = 1;
784 if (is_ready)
785 info->dev_ready = 1;
786
787 return ret;
788}
789
790static inline int is_buf_blank(uint8_t *buf, size_t len)
791{
792 for (; len > 0; len--)
793 if (*buf++ != 0xff)
794 return 0;
795 return 1;
796}
797
798static void set_command_address(struct pxa3xx_nand_info *info,
799 unsigned int page_size, uint16_t column, int page_addr)
800{
801 /* small page addr setting */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300802 if (page_size < info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200803 info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
804 | (column & 0xFF);
805
806 info->ndcb2 = 0;
807 } else {
808 info->ndcb1 = ((page_addr & 0xFFFF) << 16)
809 | (column & 0xFFFF);
810
811 if (page_addr & 0xFF0000)
812 info->ndcb2 = (page_addr & 0xFF0000) >> 16;
813 else
814 info->ndcb2 = 0;
815 }
816}
817
818static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
819{
820 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300821 struct mtd_info *mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200822
823 /* reset data and oob column point to handle data */
824 info->buf_start = 0;
825 info->buf_count = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200826 info->data_buff_pos = 0;
827 info->oob_buff_pos = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300828 info->step_chunk_size = 0;
829 info->step_spare_size = 0;
830 info->cur_chunk = 0;
Stefan Roese75659da2015-07-23 10:26:16 +0200831 info->use_ecc = 0;
832 info->use_spare = 1;
833 info->retcode = ERR_NONE;
834 info->ecc_err_cnt = 0;
835 info->ndcb3 = 0;
836 info->need_wait = 0;
837
838 switch (command) {
839 case NAND_CMD_READ0:
Boris Brezillona558a392018-08-29 11:56:12 +0300840 case NAND_CMD_READOOB:
Stefan Roese75659da2015-07-23 10:26:16 +0200841 case NAND_CMD_PAGEPROG:
Miquel Raynal30a016a2018-10-11 17:45:42 +0200842 if (!info->force_raw)
843 info->use_ecc = 1;
Stefan Roese75659da2015-07-23 10:26:16 +0200844 break;
845 case NAND_CMD_PARAM:
846 info->use_spare = 0;
847 break;
848 default:
849 info->ndcb1 = 0;
850 info->ndcb2 = 0;
851 break;
852 }
853
854 /*
855 * If we are about to issue a read command, or about to set
856 * the write address, then clean the data buffer.
857 */
858 if (command == NAND_CMD_READ0 ||
859 command == NAND_CMD_READOOB ||
860 command == NAND_CMD_SEQIN) {
861 info->buf_count = mtd->writesize + mtd->oobsize;
862 memset(info->data_buff, 0xFF, info->buf_count);
863 }
864}
865
866static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
867 int ext_cmd_type, uint16_t column, int page_addr)
868{
869 int addr_cycle, exec_cmd;
870 struct pxa3xx_nand_host *host;
871 struct mtd_info *mtd;
872
873 host = info->host[info->cs];
Ofer Heifetz0da35df2018-08-29 11:56:01 +0300874 mtd = nand_to_mtd(&host->chip);
Stefan Roese75659da2015-07-23 10:26:16 +0200875 addr_cycle = 0;
876 exec_cmd = 1;
877
878 if (info->cs != 0)
879 info->ndcb0 = NDCB0_CSEL;
880 else
881 info->ndcb0 = 0;
882
883 if (command == NAND_CMD_SEQIN)
884 exec_cmd = 0;
885
886 addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
887 + host->col_addr_cycles);
888
889 switch (command) {
890 case NAND_CMD_READOOB:
891 case NAND_CMD_READ0:
892 info->buf_start = column;
893 info->ndcb0 |= NDCB0_CMD_TYPE(0)
894 | addr_cycle
895 | NAND_CMD_READ0;
896
897 if (command == NAND_CMD_READOOB)
898 info->buf_start += mtd->writesize;
899
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300900 if (info->cur_chunk < info->nfullchunks) {
901 info->step_chunk_size = info->chunk_size;
902 info->step_spare_size = info->spare_size;
903 } else {
904 info->step_chunk_size = info->last_chunk_size;
905 info->step_spare_size = info->last_spare_size;
906 }
907
Stefan Roese75659da2015-07-23 10:26:16 +0200908 /*
909 * Multiple page read needs an 'extended command type' field,
910 * which is either naked-read or last-read according to the
911 * state.
912 */
Miquel Raynal30a016a2018-10-11 17:45:42 +0200913 if (info->force_raw) {
914 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) |
915 NDCB0_LEN_OVRD |
916 NDCB0_EXT_CMD_TYPE(ext_cmd_type);
917 info->ndcb3 = info->step_chunk_size +
918 info->step_spare_size + info->ecc_size;
919 } else if (mtd->writesize == info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200920 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300921 } else if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200922 info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
923 | NDCB0_LEN_OVRD
924 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300925 info->ndcb3 = info->step_chunk_size +
926 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200927 }
928
929 set_command_address(info, mtd->writesize, column, page_addr);
930 break;
931
932 case NAND_CMD_SEQIN:
933
934 info->buf_start = column;
935 set_command_address(info, mtd->writesize, 0, page_addr);
936
937 /*
938 * Multiple page programming needs to execute the initial
939 * SEQIN command that sets the page address.
940 */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300941 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200942 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
943 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
944 | addr_cycle
945 | command;
Stefan Roese75659da2015-07-23 10:26:16 +0200946 exec_cmd = 1;
947 }
948 break;
949
950 case NAND_CMD_PAGEPROG:
951 if (is_buf_blank(info->data_buff,
952 (mtd->writesize + mtd->oobsize))) {
953 exec_cmd = 0;
954 break;
955 }
956
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300957 if (info->cur_chunk < info->nfullchunks) {
958 info->step_chunk_size = info->chunk_size;
959 info->step_spare_size = info->spare_size;
960 } else {
961 info->step_chunk_size = info->last_chunk_size;
962 info->step_spare_size = info->last_spare_size;
963 }
964
Stefan Roese75659da2015-07-23 10:26:16 +0200965 /* Second command setting for large pages */
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +0300966 if (mtd->writesize > info->chunk_size) {
Stefan Roese75659da2015-07-23 10:26:16 +0200967 /*
968 * Multiple page write uses the 'extended command'
969 * field. This can be used to issue a command dispatch
970 * or a naked-write depending on the current stage.
971 */
972 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
973 | NDCB0_LEN_OVRD
974 | NDCB0_EXT_CMD_TYPE(ext_cmd_type);
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300975 info->ndcb3 = info->step_chunk_size +
976 info->step_spare_size;
Stefan Roese75659da2015-07-23 10:26:16 +0200977
978 /*
979 * This is the command dispatch that completes a chunked
980 * page program operation.
981 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +0300982 if (info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +0200983 info->ndcb0 = NDCB0_CMD_TYPE(0x1)
984 | NDCB0_EXT_CMD_TYPE(ext_cmd_type)
985 | command;
986 info->ndcb1 = 0;
987 info->ndcb2 = 0;
988 info->ndcb3 = 0;
989 }
990 } else {
991 info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
992 | NDCB0_AUTO_RS
993 | NDCB0_ST_ROW_EN
994 | NDCB0_DBC
995 | (NAND_CMD_PAGEPROG << 8)
996 | NAND_CMD_SEQIN
997 | addr_cycle;
998 }
999 break;
1000
1001 case NAND_CMD_PARAM:
Ofer Heifetzfdf5b232018-08-29 11:56:00 +03001002 info->buf_count = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +02001003 info->ndcb0 |= NDCB0_CMD_TYPE(0)
1004 | NDCB0_ADDR_CYC(1)
1005 | NDCB0_LEN_OVRD
1006 | command;
1007 info->ndcb1 = (column & 0xFF);
Ofer Heifetzfdf5b232018-08-29 11:56:00 +03001008 info->ndcb3 = INIT_BUFFER_SIZE;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001009 info->step_chunk_size = INIT_BUFFER_SIZE;
Stefan Roese75659da2015-07-23 10:26:16 +02001010 break;
1011
1012 case NAND_CMD_READID:
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001013 info->buf_count = READ_ID_BYTES;
Stefan Roese75659da2015-07-23 10:26:16 +02001014 info->ndcb0 |= NDCB0_CMD_TYPE(3)
1015 | NDCB0_ADDR_CYC(1)
1016 | command;
1017 info->ndcb1 = (column & 0xFF);
1018
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001019 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +02001020 break;
1021 case NAND_CMD_STATUS:
1022 info->buf_count = 1;
1023 info->ndcb0 |= NDCB0_CMD_TYPE(4)
1024 | NDCB0_ADDR_CYC(1)
1025 | command;
1026
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001027 info->step_chunk_size = 8;
Stefan Roese75659da2015-07-23 10:26:16 +02001028 break;
1029
1030 case NAND_CMD_ERASE1:
1031 info->ndcb0 |= NDCB0_CMD_TYPE(2)
1032 | NDCB0_AUTO_RS
1033 | NDCB0_ADDR_CYC(3)
1034 | NDCB0_DBC
1035 | (NAND_CMD_ERASE2 << 8)
1036 | NAND_CMD_ERASE1;
1037 info->ndcb1 = page_addr;
1038 info->ndcb2 = 0;
1039
1040 break;
1041 case NAND_CMD_RESET:
1042 info->ndcb0 |= NDCB0_CMD_TYPE(5)
1043 | command;
1044
1045 break;
1046
1047 case NAND_CMD_ERASE2:
1048 exec_cmd = 0;
1049 break;
1050
1051 default:
1052 exec_cmd = 0;
Sean Andersonc6302f02020-09-15 10:44:40 -04001053 dev_err(mtd->dev, "non-supported command %x\n",
Stefan Roese75659da2015-07-23 10:26:16 +02001054 command);
1055 break;
1056 }
1057
1058 return exec_cmd;
1059}
1060
1061static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
1062 int column, int page_addr)
1063{
Scott Wood17fed142016-05-30 13:57:56 -05001064 struct nand_chip *chip = mtd_to_nand(mtd);
1065 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001066 struct pxa3xx_nand_info *info = host->info_data;
1067 int exec_cmd;
1068
1069 /*
1070 * if this is a x16 device ,then convert the input
1071 * "byte" address into a "word" address appropriate
1072 * for indexing a word-oriented device
1073 */
1074 if (info->reg_ndcr & NDCR_DWIDTH_M)
1075 column /= 2;
1076
1077 /*
1078 * There may be different NAND chip hooked to
1079 * different chip select, so check whether
1080 * chip select has been changed, if yes, reset the timing
1081 */
1082 if (info->cs != host->cs) {
1083 info->cs = host->cs;
1084 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1085 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1086 }
1087
1088 prepare_start_command(info, command);
1089
1090 info->state = STATE_PREPARED;
1091 exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
1092
1093 if (exec_cmd) {
1094 u32 ts;
1095
1096 info->cmd_complete = 0;
1097 info->dev_ready = 0;
1098 info->need_wait = 1;
1099 pxa3xx_nand_start(info);
1100
1101 ts = get_timer(0);
1102 while (1) {
1103 u32 status;
1104
1105 status = nand_readl(info, NDSR);
1106 if (status)
1107 pxa3xx_nand_irq(info);
1108
1109 if (info->cmd_complete)
1110 break;
1111
1112 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001113 dev_err(mtd->dev, "Wait timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001114 return;
1115 }
1116 }
1117 }
1118 info->state = STATE_IDLE;
1119}
1120
1121static void nand_cmdfunc_extended(struct mtd_info *mtd,
1122 const unsigned command,
1123 int column, int page_addr)
1124{
Scott Wood17fed142016-05-30 13:57:56 -05001125 struct nand_chip *chip = mtd_to_nand(mtd);
1126 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001127 struct pxa3xx_nand_info *info = host->info_data;
1128 int exec_cmd, ext_cmd_type;
1129
1130 /*
1131 * if this is a x16 device then convert the input
1132 * "byte" address into a "word" address appropriate
1133 * for indexing a word-oriented device
1134 */
1135 if (info->reg_ndcr & NDCR_DWIDTH_M)
1136 column /= 2;
1137
1138 /*
1139 * There may be different NAND chip hooked to
1140 * different chip select, so check whether
1141 * chip select has been changed, if yes, reset the timing
1142 */
1143 if (info->cs != host->cs) {
1144 info->cs = host->cs;
1145 nand_writel(info, NDTR0CS0, info->ndtr0cs0);
1146 nand_writel(info, NDTR1CS0, info->ndtr1cs0);
1147 }
1148
1149 /* Select the extended command for the first command */
1150 switch (command) {
1151 case NAND_CMD_READ0:
1152 case NAND_CMD_READOOB:
1153 ext_cmd_type = EXT_CMD_TYPE_MONO;
1154 break;
1155 case NAND_CMD_SEQIN:
1156 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1157 break;
1158 case NAND_CMD_PAGEPROG:
1159 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1160 break;
1161 default:
1162 ext_cmd_type = 0;
1163 break;
1164 }
1165
1166 prepare_start_command(info, command);
1167
1168 /*
1169 * Prepare the "is ready" completion before starting a command
1170 * transaction sequence. If the command is not executed the
1171 * completion will be completed, see below.
1172 *
1173 * We can do that inside the loop because the command variable
1174 * is invariant and thus so is the exec_cmd.
1175 */
1176 info->need_wait = 1;
1177 info->dev_ready = 0;
1178
1179 do {
1180 u32 ts;
1181
1182 info->state = STATE_PREPARED;
1183 exec_cmd = prepare_set_command(info, command, ext_cmd_type,
1184 column, page_addr);
1185 if (!exec_cmd) {
1186 info->need_wait = 0;
1187 info->dev_ready = 1;
1188 break;
1189 }
1190
1191 info->cmd_complete = 0;
1192 pxa3xx_nand_start(info);
1193
1194 ts = get_timer(0);
1195 while (1) {
1196 u32 status;
1197
1198 status = nand_readl(info, NDSR);
1199 if (status)
1200 pxa3xx_nand_irq(info);
1201
1202 if (info->cmd_complete)
1203 break;
1204
1205 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001206 dev_err(mtd->dev, "Wait timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001207 return;
1208 }
1209 }
1210
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001211 /* Only a few commands need several steps */
1212 if (command != NAND_CMD_PAGEPROG &&
1213 command != NAND_CMD_READ0 &&
1214 command != NAND_CMD_READOOB)
1215 break;
1216
1217 info->cur_chunk++;
1218
Stefan Roese75659da2015-07-23 10:26:16 +02001219 /* Check if the sequence is complete */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001220 if (info->cur_chunk == info->ntotalchunks &&
1221 command != NAND_CMD_PAGEPROG)
Stefan Roese75659da2015-07-23 10:26:16 +02001222 break;
1223
1224 /*
1225 * After a splitted program command sequence has issued
1226 * the command dispatch, the command sequence is complete.
1227 */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001228 if (info->cur_chunk == (info->ntotalchunks + 1) &&
Stefan Roese75659da2015-07-23 10:26:16 +02001229 command == NAND_CMD_PAGEPROG &&
1230 ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
1231 break;
1232
1233 if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
1234 /* Last read: issue a 'last naked read' */
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001235 if (info->cur_chunk == info->ntotalchunks - 1)
Stefan Roese75659da2015-07-23 10:26:16 +02001236 ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
1237 else
1238 ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
1239
1240 /*
1241 * If a splitted program command has no more data to transfer,
1242 * the command dispatch must be issued to complete.
1243 */
1244 } else if (command == NAND_CMD_PAGEPROG &&
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001245 info->cur_chunk == info->ntotalchunks) {
Stefan Roese75659da2015-07-23 10:26:16 +02001246 ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
1247 }
1248 } while (1);
1249
1250 info->state = STATE_IDLE;
1251}
1252
1253static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
Scott Wood46e13102016-05-30 13:57:57 -05001254 struct nand_chip *chip, const uint8_t *buf, int oob_required,
1255 int page)
Stefan Roese75659da2015-07-23 10:26:16 +02001256{
1257 chip->write_buf(mtd, buf, mtd->writesize);
1258 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1259
1260 return 0;
1261}
1262
1263static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
1264 struct nand_chip *chip, uint8_t *buf, int oob_required,
1265 int page)
1266{
Scott Wood17fed142016-05-30 13:57:56 -05001267 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001268 struct pxa3xx_nand_info *info = host->info_data;
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001269 int bf;
Stefan Roese75659da2015-07-23 10:26:16 +02001270
1271 chip->read_buf(mtd, buf, mtd->writesize);
1272 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1273
1274 if (info->retcode == ERR_CORERR && info->use_ecc) {
1275 mtd->ecc_stats.corrected += info->ecc_err_cnt;
1276
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001277 } else if (info->retcode == ERR_UNCORERR && info->ecc_bch) {
Stefan Roese75659da2015-07-23 10:26:16 +02001278 /*
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001279 * Empty pages will trigger uncorrectable errors. Re-read the
1280 * entire page in raw mode and check for bits not being "1".
1281 * If there are more than the supported strength, then it means
1282 * this is an actual uncorrectable error.
Stefan Roese75659da2015-07-23 10:26:16 +02001283 */
Miquel Raynal35f1ebd2018-10-11 17:45:43 +02001284 chip->ecc.read_page_raw(mtd, chip, buf, oob_required, page);
1285 bf = nand_check_erased_ecc_chunk(buf, mtd->writesize,
1286 chip->oob_poi, mtd->oobsize,
1287 NULL, 0, chip->ecc.strength);
1288 if (bf < 0) {
1289 mtd->ecc_stats.failed++;
1290 } else if (bf) {
1291 mtd->ecc_stats.corrected += bf;
1292 info->max_bitflips = max_t(unsigned int,
1293 info->max_bitflips, bf);
1294 info->retcode = ERR_CORERR;
1295 } else {
1296 info->retcode = ERR_NONE;
1297 }
1298
1299 } else if (info->retcode == ERR_UNCORERR && !info->ecc_bch) {
1300 /* Raw read is not supported with Hamming ECC engine */
Stefan Roese75659da2015-07-23 10:26:16 +02001301 if (is_buf_blank(buf, mtd->writesize))
1302 info->retcode = ERR_NONE;
1303 else
1304 mtd->ecc_stats.failed++;
1305 }
1306
1307 return info->max_bitflips;
1308}
1309
Miquel Raynal30a016a2018-10-11 17:45:42 +02001310static int pxa3xx_nand_read_page_raw(struct mtd_info *mtd,
1311 struct nand_chip *chip, uint8_t *buf,
1312 int oob_required, int page)
1313{
1314 struct pxa3xx_nand_host *host = chip->priv;
1315 struct pxa3xx_nand_info *info = host->info_data;
1316 int chunk, ecc_off_buf;
1317
1318 if (!info->ecc_bch)
1319 return -ENOTSUPP;
1320
1321 /*
1322 * Set the force_raw boolean, then re-call ->cmdfunc() that will run
1323 * pxa3xx_nand_start(), which will actually disable the ECC engine.
1324 */
1325 info->force_raw = true;
1326 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1327
1328 ecc_off_buf = (info->nfullchunks * info->spare_size) +
1329 info->last_spare_size;
1330 for (chunk = 0; chunk < info->nfullchunks; chunk++) {
1331 chip->read_buf(mtd,
1332 buf + (chunk * info->chunk_size),
1333 info->chunk_size);
1334 chip->read_buf(mtd,
1335 chip->oob_poi +
1336 (chunk * (info->spare_size)),
1337 info->spare_size);
1338 chip->read_buf(mtd,
1339 chip->oob_poi + ecc_off_buf +
1340 (chunk * (info->ecc_size)),
1341 info->ecc_size - 2);
1342 }
1343
1344 if (info->ntotalchunks > info->nfullchunks) {
1345 chip->read_buf(mtd,
1346 buf + (info->nfullchunks * info->chunk_size),
1347 info->last_chunk_size);
1348 chip->read_buf(mtd,
1349 chip->oob_poi +
1350 (info->nfullchunks * (info->spare_size)),
1351 info->last_spare_size);
1352 chip->read_buf(mtd,
1353 chip->oob_poi + ecc_off_buf +
1354 (info->nfullchunks * (info->ecc_size)),
1355 info->ecc_size - 2);
1356 }
1357
1358 info->force_raw = false;
1359
1360 return 0;
1361}
1362
1363static int pxa3xx_nand_read_oob_raw(struct mtd_info *mtd,
1364 struct nand_chip *chip, int page)
1365{
1366 /* Invalidate page cache */
1367 chip->pagebuf = -1;
1368
1369 return chip->ecc.read_page_raw(mtd, chip, chip->buffers->databuf, true,
1370 page);
1371}
1372
Stefan Roese75659da2015-07-23 10:26:16 +02001373static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
1374{
Scott Wood17fed142016-05-30 13:57:56 -05001375 struct nand_chip *chip = mtd_to_nand(mtd);
1376 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001377 struct pxa3xx_nand_info *info = host->info_data;
1378 char retval = 0xFF;
1379
1380 if (info->buf_start < info->buf_count)
1381 /* Has just send a new command? */
1382 retval = info->data_buff[info->buf_start++];
1383
1384 return retval;
1385}
1386
1387static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
1388{
Scott Wood17fed142016-05-30 13:57:56 -05001389 struct nand_chip *chip = mtd_to_nand(mtd);
1390 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001391 struct pxa3xx_nand_info *info = host->info_data;
1392 u16 retval = 0xFFFF;
1393
1394 if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
1395 retval = *((u16 *)(info->data_buff+info->buf_start));
1396 info->buf_start += 2;
1397 }
1398 return retval;
1399}
1400
1401static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
1402{
Scott Wood17fed142016-05-30 13:57:56 -05001403 struct nand_chip *chip = mtd_to_nand(mtd);
1404 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001405 struct pxa3xx_nand_info *info = host->info_data;
1406 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1407
1408 memcpy(buf, info->data_buff + info->buf_start, real_len);
1409 info->buf_start += real_len;
1410}
1411
1412static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
1413 const uint8_t *buf, int len)
1414{
Scott Wood17fed142016-05-30 13:57:56 -05001415 struct nand_chip *chip = mtd_to_nand(mtd);
1416 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001417 struct pxa3xx_nand_info *info = host->info_data;
1418 int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
1419
1420 memcpy(info->data_buff + info->buf_start, buf, real_len);
1421 info->buf_start += real_len;
1422}
1423
1424static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
1425{
1426 return;
1427}
1428
1429static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
1430{
Scott Wood17fed142016-05-30 13:57:56 -05001431 struct nand_chip *chip = mtd_to_nand(mtd);
1432 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001433 struct pxa3xx_nand_info *info = host->info_data;
1434
1435 if (info->need_wait) {
1436 u32 ts;
1437
1438 info->need_wait = 0;
1439
1440 ts = get_timer(0);
1441 while (1) {
1442 u32 status;
1443
1444 status = nand_readl(info, NDSR);
1445 if (status)
1446 pxa3xx_nand_irq(info);
1447
1448 if (info->dev_ready)
1449 break;
1450
1451 if (get_timer(ts) > CHIP_DELAY_TIMEOUT) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001452 dev_err(mtd->dev, "Ready timeout!!!\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001453 return NAND_STATUS_FAIL;
1454 }
1455 }
1456 }
1457
1458 /* pxa3xx_nand_send_command has waited for command complete */
1459 if (this->state == FL_WRITING || this->state == FL_ERASING) {
1460 if (info->retcode == ERR_NONE)
1461 return 0;
1462 else
1463 return NAND_STATUS_FAIL;
1464 }
1465
1466 return NAND_STATUS_READY;
1467}
1468
Ofer Heifetz531816e2018-08-29 11:56:07 +03001469static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
1470{
1471 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1472
1473 /* Configure default flash values */
Ofer Heifetz531816e2018-08-29 11:56:07 +03001474 info->reg_ndcr = 0x0; /* enable all interrupts */
1475 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
1476 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
1477 info->reg_ndcr |= NDCR_SPARE_EN;
1478
1479 return 0;
1480}
1481
1482static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001483{
1484 struct pxa3xx_nand_host *host = info->host[info->cs];
Ofer Heifetz531816e2018-08-29 11:56:07 +03001485 struct mtd_info *mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001486 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001487
1488 info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
1489 info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
1490 info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001491}
1492
Ofer Heifetz268979f2018-08-29 11:56:08 +03001493static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001494{
Ofer Heifetz531816e2018-08-29 11:56:07 +03001495 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001496 uint32_t ndcr = nand_readl(info, NDCR);
1497
Stefan Roese75659da2015-07-23 10:26:16 +02001498 /* Set an initial chunk size */
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001499 info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001500 info->reg_ndcr = ndcr &
1501 ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
1502 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001503 info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
1504 info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
Stefan Roese75659da2015-07-23 10:26:16 +02001505}
1506
1507static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
1508{
1509 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
1510 if (info->data_buff == NULL)
1511 return -ENOMEM;
1512 return 0;
1513}
1514
1515static int pxa3xx_nand_sensing(struct pxa3xx_nand_host *host)
1516{
1517 struct pxa3xx_nand_info *info = host->info_data;
1518 struct pxa3xx_nand_platform_data *pdata = info->pdata;
1519 struct mtd_info *mtd;
1520 struct nand_chip *chip;
1521 const struct nand_sdr_timings *timings;
1522 int ret;
1523
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001524 mtd = nand_to_mtd(&info->host[info->cs]->chip);
Scott Wood17fed142016-05-30 13:57:56 -05001525 chip = mtd_to_nand(mtd);
Stefan Roese75659da2015-07-23 10:26:16 +02001526
1527 /* configure default flash values */
1528 info->reg_ndcr = 0x0; /* enable all interrupts */
1529 info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
Ofer Heifetz4a574aa2018-08-29 11:56:05 +03001530 info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
Stefan Roese75659da2015-07-23 10:26:16 +02001531 info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */
1532
1533 /* use the common timing to make a try */
1534 timings = onfi_async_timing_mode_to_sdr_timings(0);
1535 if (IS_ERR(timings))
1536 return PTR_ERR(timings);
1537
1538 pxa3xx_nand_set_sdr_timing(host, timings);
1539
1540 chip->cmdfunc(mtd, NAND_CMD_RESET, 0, 0);
1541 ret = chip->waitfunc(mtd, chip);
1542 if (ret & NAND_STATUS_FAIL)
1543 return -ENODEV;
1544
1545 return 0;
1546}
1547
1548static int pxa_ecc_init(struct pxa3xx_nand_info *info,
1549 struct nand_ecc_ctrl *ecc,
1550 int strength, int ecc_stepsize, int page_size)
1551{
1552 if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001553 info->nfullchunks = 1;
1554 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001555 info->chunk_size = 2048;
1556 info->spare_size = 40;
1557 info->ecc_size = 24;
1558 ecc->mode = NAND_ECC_HW;
1559 ecc->size = 512;
1560 ecc->strength = 1;
1561
1562 } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001563 info->nfullchunks = 1;
1564 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001565 info->chunk_size = 512;
1566 info->spare_size = 8;
1567 info->ecc_size = 8;
1568 ecc->mode = NAND_ECC_HW;
1569 ecc->size = 512;
1570 ecc->strength = 1;
1571
1572 /*
1573 * Required ECC: 4-bit correction per 512 bytes
1574 * Select: 16-bit correction per 2048 bytes
1575 */
1576 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
1577 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001578 info->nfullchunks = 1;
1579 info->ntotalchunks = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001580 info->chunk_size = 2048;
1581 info->spare_size = 32;
1582 info->ecc_size = 32;
1583 ecc->mode = NAND_ECC_HW;
1584 ecc->size = info->chunk_size;
1585 ecc->layout = &ecc_layout_2KB_bch4bit;
1586 ecc->strength = 16;
1587
1588 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
1589 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001590 info->nfullchunks = 2;
1591 info->ntotalchunks = 2;
Stefan Roese75659da2015-07-23 10:26:16 +02001592 info->chunk_size = 2048;
1593 info->spare_size = 32;
1594 info->ecc_size = 32;
1595 ecc->mode = NAND_ECC_HW;
1596 ecc->size = info->chunk_size;
1597 ecc->layout = &ecc_layout_4KB_bch4bit;
1598 ecc->strength = 16;
1599
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001600 } else if (strength == 4 && ecc_stepsize == 512 && page_size == 8192) {
1601 info->ecc_bch = 1;
1602 info->nfullchunks = 4;
1603 info->ntotalchunks = 4;
1604 info->chunk_size = 2048;
1605 info->spare_size = 32;
1606 info->ecc_size = 32;
1607 ecc->mode = NAND_ECC_HW;
1608 ecc->size = info->chunk_size;
1609 ecc->layout = &ecc_layout_8KB_bch4bit;
1610 ecc->strength = 16;
1611
Stefan Roese75659da2015-07-23 10:26:16 +02001612 /*
1613 * Required ECC: 8-bit correction per 512 bytes
1614 * Select: 16-bit correction per 1024 bytes
1615 */
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001616 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 2048) {
1617 info->ecc_bch = 1;
1618 info->nfullchunks = 1;
1619 info->ntotalchunks = 2;
1620 info->chunk_size = 1024;
1621 info->spare_size = 0;
1622 info->last_chunk_size = 1024;
Miquel Raynal53e9c122018-10-11 17:45:44 +02001623 info->last_spare_size = 32;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001624 info->ecc_size = 32;
1625 ecc->mode = NAND_ECC_HW;
1626 ecc->size = info->chunk_size;
1627 ecc->layout = &ecc_layout_2KB_bch8bit;
1628 ecc->strength = 16;
1629
Stefan Roese75659da2015-07-23 10:26:16 +02001630 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
1631 info->ecc_bch = 1;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001632 info->nfullchunks = 4;
1633 info->ntotalchunks = 5;
Stefan Roese75659da2015-07-23 10:26:16 +02001634 info->chunk_size = 1024;
1635 info->spare_size = 0;
Ofer Heifetz191b5be2018-08-29 11:56:09 +03001636 info->last_chunk_size = 0;
1637 info->last_spare_size = 64;
Stefan Roese75659da2015-07-23 10:26:16 +02001638 info->ecc_size = 32;
1639 ecc->mode = NAND_ECC_HW;
1640 ecc->size = info->chunk_size;
1641 ecc->layout = &ecc_layout_4KB_bch8bit;
1642 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001643
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001644 } else if (strength == 8 && ecc_stepsize == 512 && page_size == 8192) {
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001645 info->ecc_bch = 1;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001646 info->nfullchunks = 8;
1647 info->ntotalchunks = 9;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001648 info->chunk_size = 1024;
1649 info->spare_size = 0;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001650 info->last_chunk_size = 0;
1651 info->last_spare_size = 160;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001652 info->ecc_size = 32;
1653 ecc->mode = NAND_ECC_HW;
1654 ecc->size = info->chunk_size;
Konstantin Porotchkine0e232e2018-08-29 11:56:17 +03001655 ecc->layout = &ecc_layout_8KB_bch8bit;
Victor Axelrodfdf9dfb2018-08-29 11:56:13 +03001656 ecc->strength = 16;
Konstantin Porotchkina692cde2018-08-29 11:56:16 +03001657
Stefan Roese75659da2015-07-23 10:26:16 +02001658 } else {
Sean Andersonc6302f02020-09-15 10:44:40 -04001659 dev_err(info->controller.active->mtd.dev,
Stefan Roese75659da2015-07-23 10:26:16 +02001660 "ECC strength %d at page size %d is not supported\n",
1661 strength, page_size);
1662 return -ENODEV;
1663 }
1664
1665 return 0;
1666}
1667
1668static int pxa3xx_nand_scan(struct mtd_info *mtd)
1669{
Scott Wood17fed142016-05-30 13:57:56 -05001670 struct nand_chip *chip = mtd_to_nand(mtd);
1671 struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001672 struct pxa3xx_nand_info *info = host->info_data;
1673 struct pxa3xx_nand_platform_data *pdata = info->pdata;
Stefan Roese75659da2015-07-23 10:26:16 +02001674 int ret;
1675 uint16_t ecc_strength, ecc_step;
1676
Ofer Heifetz268979f2018-08-29 11:56:08 +03001677 if (pdata->keep_config) {
1678 pxa3xx_nand_detect_config(info);
1679 } else {
1680 ret = pxa3xx_nand_config_ident(info);
1681 if (ret)
1682 return ret;
1683 ret = pxa3xx_nand_sensing(host);
1684 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001685 dev_info(mtd->dev, "There is no chip on cs %d!\n",
Ofer Heifetz268979f2018-08-29 11:56:08 +03001686 info->cs);
1687 return ret;
1688 }
Stefan Roese75659da2015-07-23 10:26:16 +02001689 }
1690
Stefan Roese75659da2015-07-23 10:26:16 +02001691 /* Device detection must be done with ECC disabled */
Shmuel Hazan58983222020-10-29 08:52:20 +02001692 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1693 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K)
Stefan Roese75659da2015-07-23 10:26:16 +02001694 nand_writel(info, NDECCCTRL, 0x0);
1695
1696 if (nand_scan_ident(mtd, 1, NULL))
1697 return -ENODEV;
1698
1699 if (!pdata->keep_config) {
1700 ret = pxa3xx_nand_init_timings(host);
1701 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001702 dev_err(mtd->dev,
Stefan Roese75659da2015-07-23 10:26:16 +02001703 "Failed to set timings: %d\n", ret);
1704 return ret;
1705 }
1706 }
1707
Stefan Roese75659da2015-07-23 10:26:16 +02001708#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1709 /*
1710 * We'll use a bad block table stored in-flash and don't
1711 * allow writing the bad block marker to the flash.
1712 */
1713 chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB_BBM;
1714 chip->bbt_td = &bbt_main_descr;
1715 chip->bbt_md = &bbt_mirror_descr;
1716#endif
1717
Stefan Roese75659da2015-07-23 10:26:16 +02001718 if (pdata->ecc_strength && pdata->ecc_step_size) {
1719 ecc_strength = pdata->ecc_strength;
1720 ecc_step = pdata->ecc_step_size;
1721 } else {
1722 ecc_strength = chip->ecc_strength_ds;
1723 ecc_step = chip->ecc_step_ds;
1724 }
1725
1726 /* Set default ECC strength requirements on non-ONFI devices */
1727 if (ecc_strength < 1 && ecc_step < 1) {
1728 ecc_strength = 1;
1729 ecc_step = 512;
1730 }
1731
1732 ret = pxa_ecc_init(info, &chip->ecc, ecc_strength,
1733 ecc_step, mtd->writesize);
1734 if (ret)
1735 return ret;
1736
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001737 /*
1738 * If the page size is bigger than the FIFO size, let's check
1739 * we are given the right variant and then switch to the extended
1740 * (aka split) command handling,
1741 */
1742 if (mtd->writesize > info->chunk_size) {
Shmuel Hazan58983222020-10-29 08:52:20 +02001743 if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 ||
1744 info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) {
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001745 chip->cmdfunc = nand_cmdfunc_extended;
1746 } else {
Sean Andersonc6302f02020-09-15 10:44:40 -04001747 dev_err(mtd->dev,
Konstantin Porotchkin06f9b6b2018-08-29 11:56:15 +03001748 "unsupported page size on this variant\n");
1749 return -ENODEV;
1750 }
1751 }
1752
Stefan Roese75659da2015-07-23 10:26:16 +02001753 /* calculate addressing information */
1754 if (mtd->writesize >= 2048)
1755 host->col_addr_cycles = 2;
1756 else
1757 host->col_addr_cycles = 1;
1758
1759 /* release the initial buffer */
1760 kfree(info->data_buff);
1761
1762 /* allocate the real data + oob buffer */
1763 info->buf_size = mtd->writesize + mtd->oobsize;
1764 ret = pxa3xx_nand_init_buff(info);
1765 if (ret)
1766 return ret;
1767 info->oob_buff = info->data_buff + mtd->writesize;
1768
1769 if ((mtd->size >> chip->page_shift) > 65536)
1770 host->row_addr_cycles = 3;
1771 else
1772 host->row_addr_cycles = 2;
Ofer Heifetz531816e2018-08-29 11:56:07 +03001773
1774 if (!pdata->keep_config)
1775 pxa3xx_nand_config_tail(info);
1776
Stefan Roese75659da2015-07-23 10:26:16 +02001777 return nand_scan_tail(mtd);
1778}
1779
Shmuel Hazan58983222020-10-29 08:52:20 +02001780static int alloc_nand_resource(struct udevice *dev, struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001781{
1782 struct pxa3xx_nand_platform_data *pdata;
1783 struct pxa3xx_nand_host *host;
1784 struct nand_chip *chip = NULL;
1785 struct mtd_info *mtd;
Baruch Siach807ae582020-10-29 08:52:19 +02001786 int cs;
Stefan Roese75659da2015-07-23 10:26:16 +02001787
1788 pdata = info->pdata;
1789 if (pdata->num_cs <= 0)
1790 return -ENODEV;
1791
Shmuel Hazan58983222020-10-29 08:52:20 +02001792 info->variant = pxa3xx_nand_get_variant(dev);
Stefan Roese75659da2015-07-23 10:26:16 +02001793 for (cs = 0; cs < pdata->num_cs; cs++) {
Kevin Smith4d21b592016-01-14 16:01:38 +00001794 chip = (struct nand_chip *)
1795 ((u8 *)&info[1] + sizeof(*host) * cs);
Scott Wood17fed142016-05-30 13:57:56 -05001796 mtd = nand_to_mtd(chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001797 host = (struct pxa3xx_nand_host *)chip;
1798 info->host[cs] = host;
Stefan Roese75659da2015-07-23 10:26:16 +02001799 host->cs = cs;
1800 host->info_data = info;
Stefan Roese75659da2015-07-23 10:26:16 +02001801 mtd->owner = THIS_MODULE;
1802
Chris Packham3c2170a2016-08-29 15:20:52 +12001803 nand_set_controller_data(chip, host);
Stefan Roese75659da2015-07-23 10:26:16 +02001804 chip->ecc.read_page = pxa3xx_nand_read_page_hwecc;
Miquel Raynal30a016a2018-10-11 17:45:42 +02001805 chip->ecc.read_page_raw = pxa3xx_nand_read_page_raw;
1806 chip->ecc.read_oob_raw = pxa3xx_nand_read_oob_raw;
Stefan Roese75659da2015-07-23 10:26:16 +02001807 chip->ecc.write_page = pxa3xx_nand_write_page_hwecc;
1808 chip->controller = &info->controller;
1809 chip->waitfunc = pxa3xx_nand_waitfunc;
1810 chip->select_chip = pxa3xx_nand_select_chip;
1811 chip->read_word = pxa3xx_nand_read_word;
1812 chip->read_byte = pxa3xx_nand_read_byte;
1813 chip->read_buf = pxa3xx_nand_read_buf;
1814 chip->write_buf = pxa3xx_nand_write_buf;
1815 chip->options |= NAND_NO_SUBPAGE_WRITE;
1816 chip->cmdfunc = nand_cmdfunc;
1817 }
1818
Stefan Roese75659da2015-07-23 10:26:16 +02001819 /* Allocate a buffer to allow flash detection */
1820 info->buf_size = INIT_BUFFER_SIZE;
1821 info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
Baruch Siach807ae582020-10-29 08:52:19 +02001822 if (info->data_buff == NULL)
1823 return -ENOMEM;
Stefan Roese75659da2015-07-23 10:26:16 +02001824
1825 /* initialize all interrupts to be disabled */
1826 disable_int(info, NDSR_MASK);
1827
Shmuel Hazan58983222020-10-29 08:52:20 +02001828 /*
1829 * Some SoCs like A7k/A8k need to enable manually the NAND
1830 * controller to avoid being bootloader dependent. This is done
1831 * through the use of a single bit in the System Functions registers.
1832 */
1833 if (pxa3xx_nand_get_variant(dev) == PXA3XX_NAND_VARIANT_ARMADA_8K) {
1834 struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle(
1835 dev, "marvell,system-controller");
1836 u32 reg;
1837
1838 if (IS_ERR(sysctrl_base))
1839 return PTR_ERR(sysctrl_base);
1840
1841 regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, &reg);
1842 reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN;
1843 regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg);
1844 }
1845
Stefan Roese75659da2015-07-23 10:26:16 +02001846 return 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001847}
1848
Shmuel Hazan759349e2020-10-29 08:52:18 +02001849static int pxa3xx_nand_probe_dt(struct udevice *dev, struct pxa3xx_nand_info *info)
Stefan Roese75659da2015-07-23 10:26:16 +02001850{
1851 struct pxa3xx_nand_platform_data *pdata;
1852
1853 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
1854 if (!pdata)
1855 return -ENOMEM;
1856
Shmuel Hazan759349e2020-10-29 08:52:18 +02001857 info->mmio_base = dev_read_addr_ptr(dev);
Stefan Roese75659da2015-07-23 10:26:16 +02001858
Shmuel Hazan759349e2020-10-29 08:52:18 +02001859 pdata->num_cs = dev_read_u32_default(dev, "num-cs", 1);
1860 if (pdata->num_cs != 1) {
1861 pr_err("pxa3xx driver supports single CS only\n");
1862 return -EINVAL;
1863 }
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001864
Pierre Bourdonfa7890e2021-12-25 05:46:29 +01001865 if (dev_read_bool(dev, "marvell,nand-enable-arbiter"))
Shmuel Hazan759349e2020-10-29 08:52:18 +02001866 pdata->enable_arbiter = 1;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001867
Pierre Bourdonfa7890e2021-12-25 05:46:29 +01001868 if (dev_read_bool(dev, "marvell,nand-keep-config"))
Shmuel Hazan759349e2020-10-29 08:52:18 +02001869 pdata->keep_config = 1;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001870
Shmuel Hazan759349e2020-10-29 08:52:18 +02001871 /*
1872 * ECC parameters.
1873 * If these are not set, they will be selected according
1874 * to the detected flash type.
1875 */
1876 /* ECC strength */
1877 pdata->ecc_strength = dev_read_u32_default(dev, "nand-ecc-strength", 0);
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001878
Shmuel Hazan759349e2020-10-29 08:52:18 +02001879 /* ECC step size */
1880 pdata->ecc_step_size = dev_read_u32_default(dev, "nand-ecc-step-size",
1881 0);
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001882
Shmuel Hazan759349e2020-10-29 08:52:18 +02001883 info->pdata = pdata;
Konstantin Porotchkin93c9f392017-03-28 18:16:54 +03001884
Shmuel Hazan759349e2020-10-29 08:52:18 +02001885 return 0;
Stefan Roese75659da2015-07-23 10:26:16 +02001886}
1887
Shmuel Hazan759349e2020-10-29 08:52:18 +02001888static int pxa3xx_nand_probe(struct udevice *dev)
Stefan Roese75659da2015-07-23 10:26:16 +02001889{
1890 struct pxa3xx_nand_platform_data *pdata;
1891 int ret, cs, probe_success;
Shmuel Hazan759349e2020-10-29 08:52:18 +02001892 struct pxa3xx_nand_info *info = dev_get_priv(dev);
Stefan Roese75659da2015-07-23 10:26:16 +02001893
Shmuel Hazan759349e2020-10-29 08:52:18 +02001894 ret = pxa3xx_nand_probe_dt(dev, info);
Stefan Roese75659da2015-07-23 10:26:16 +02001895 if (ret)
1896 return ret;
1897
1898 pdata = info->pdata;
1899
Shmuel Hazan58983222020-10-29 08:52:20 +02001900 ret = alloc_nand_resource(dev, info);
Stefan Roese75659da2015-07-23 10:26:16 +02001901 if (ret) {
Shmuel Hazan759349e2020-10-29 08:52:18 +02001902 dev_err(dev, "alloc nand resource failed\n");
Stefan Roese75659da2015-07-23 10:26:16 +02001903 return ret;
1904 }
1905
1906 probe_success = 0;
1907 for (cs = 0; cs < pdata->num_cs; cs++) {
Ofer Heifetz0da35df2018-08-29 11:56:01 +03001908 struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
Stefan Roese75659da2015-07-23 10:26:16 +02001909
1910 /*
1911 * The mtd name matches the one used in 'mtdparts' kernel
1912 * parameter. This name cannot be changed or otherwise
1913 * user's mtd partitions configuration would get broken.
1914 */
1915 mtd->name = "pxa3xx_nand-0";
Robert Marko142f41a2022-01-05 16:01:00 +01001916 mtd->dev = dev;
Stefan Roese75659da2015-07-23 10:26:16 +02001917 info->cs = cs;
1918 ret = pxa3xx_nand_scan(mtd);
1919 if (ret) {
Sean Andersonc6302f02020-09-15 10:44:40 -04001920 dev_info(mtd->dev, "failed to scan nand at cs %d\n",
Stefan Roese75659da2015-07-23 10:26:16 +02001921 cs);
1922 continue;
1923 }
1924
Scott Wood2c1b7e12016-05-30 13:57:55 -05001925 if (nand_register(cs, mtd))
1926 continue;
1927
1928 probe_success = 1;
Stefan Roese75659da2015-07-23 10:26:16 +02001929 }
1930
1931 if (!probe_success)
1932 return -ENODEV;
1933
1934 return 0;
1935}
1936
Shmuel Hazan759349e2020-10-29 08:52:18 +02001937U_BOOT_DRIVER(pxa3xx_nand) = {
1938 .name = "pxa3xx-nand",
1939 .id = UCLASS_MTD,
1940 .of_match = pxa3xx_nand_dt_ids,
1941 .probe = pxa3xx_nand_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -07001942 .priv_auto = sizeof(struct pxa3xx_nand_info) +
Shmuel Hazan759349e2020-10-29 08:52:18 +02001943 sizeof(struct pxa3xx_nand_host) * CONFIG_SYS_MAX_NAND_DEVICE,
1944};
1945
Stefan Roese75659da2015-07-23 10:26:16 +02001946void board_nand_init(void)
1947{
Shmuel Hazan759349e2020-10-29 08:52:18 +02001948 struct udevice *dev;
Stefan Roese75659da2015-07-23 10:26:16 +02001949 int ret;
1950
Shmuel Hazan759349e2020-10-29 08:52:18 +02001951 ret = uclass_get_device_by_driver(UCLASS_MTD,
Simon Glass65130cd2020-12-28 20:34:56 -07001952 DM_DRIVER_GET(pxa3xx_nand), &dev);
Shmuel Hazan759349e2020-10-29 08:52:18 +02001953 if (ret && ret != -ENODEV) {
1954 pr_err("Failed to initialize %s. (error %d)\n", dev->name,
1955 ret);
1956 }
Stefan Roese75659da2015-07-23 10:26:16 +02001957}