blob: a7852a841ce2b0e1d54e580ecc10ab75330c903f [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut913a7252011-11-08 23:18:16 +00002/*
3 * Freescale i.MX28 NAND flash driver
4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH
7 *
8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver
10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
Peng Fan9e813732020-05-04 22:08:53 +080013 * Copyright 2017-2019 NXP
Marek Vasut913a7252011-11-08 23:18:16 +000014 */
15
Tom Warrenc88d30f2012-09-10 08:47:51 -070016#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070017#include <cpu_func.h>
Stefan Agner19f90512018-06-22 18:06:16 +020018#include <dm.h>
Simon Glass274e0b02020-05-10 11:39:56 -060019#include <asm/cache.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090020#include <linux/mtd/rawnand.h>
Stefan Agner4d42ac12018-06-22 17:19:51 +020021#include <linux/sizes.h>
Marek Vasut913a7252011-11-08 23:18:16 +000022#include <linux/types.h>
Marek Vasut913a7252011-11-08 23:18:16 +000023#include <malloc.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090024#include <linux/errno.h>
Marek Vasut913a7252011-11-08 23:18:16 +000025#include <asm/io.h>
26#include <asm/arch/clock.h>
27#include <asm/arch/imx-regs.h>
Stefano Babic33731bc2017-06-29 10:16:06 +020028#include <asm/mach-imx/regs-bch.h>
29#include <asm/mach-imx/regs-gpmi.h>
Marek Vasut913a7252011-11-08 23:18:16 +000030#include <asm/arch/sys_proto.h>
Shyam Sainif63ef492019-06-14 13:05:33 +053031#include <mxs_nand.h>
Marek Vasut913a7252011-11-08 23:18:16 +000032
33#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
34
Peng Fan128abf42020-05-04 22:09:00 +080035#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \
36 defined(CONFIG_IMX8M)
Stefan Roese8338d1d2013-04-15 21:14:12 +000037#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
38#else
39#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
40#endif
Marek Vasut913a7252011-11-08 23:18:16 +000041#define MXS_NAND_METADATA_SIZE 10
Jörg Krause1d870262015-04-15 09:27:22 +020042#define MXS_NAND_BITS_PER_ECC_LEVEL 13
Stefan Agner54bf8082016-08-01 23:55:18 -070043
44#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
Marek Vasut913a7252011-11-08 23:18:16 +000045#define MXS_NAND_COMMAND_BUFFER_SIZE 32
Stefan Agner54bf8082016-08-01 23:55:18 -070046#else
47#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
48#endif
Marek Vasut913a7252011-11-08 23:18:16 +000049
50#define MXS_NAND_BCH_TIMEOUT 10000
51
Marek Vasut913a7252011-11-08 23:18:16 +000052struct nand_ecclayout fake_ecc_layout;
53
Marek Vasut1b120e82012-03-15 18:33:19 +000054/*
55 * Cache management functions
56 */
Trevor Woerner43ec7e02019-05-03 09:41:00 -040057#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Marek Vasut1b120e82012-03-15 18:33:19 +000058static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
59{
Peng Fan128abf42020-05-04 22:09:00 +080060 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000061
62 flush_dcache_range(addr, addr + info->data_buf_size);
63}
64
65static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
66{
Peng Fan128abf42020-05-04 22:09:00 +080067 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000068
69 invalidate_dcache_range(addr, addr + info->data_buf_size);
70}
71
72static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
73{
Peng Fan128abf42020-05-04 22:09:00 +080074 uint32_t addr = (uintptr_t)info->cmd_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000075
76 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
77}
78#else
79static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
80static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
81static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
82#endif
83
Marek Vasut913a7252011-11-08 23:18:16 +000084static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
85{
86 struct mxs_dma_desc *desc;
87
88 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
89 printf("MXS NAND: Too many DMA descriptors requested\n");
90 return NULL;
91 }
92
93 desc = info->desc[info->desc_index];
94 info->desc_index++;
95
96 return desc;
97}
98
99static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
100{
101 int i;
102 struct mxs_dma_desc *desc;
103
104 for (i = 0; i < info->desc_index; i++) {
105 desc = info->desc[i];
106 memset(desc, 0, sizeof(struct mxs_dma_desc));
107 desc->address = (dma_addr_t)desc;
108 }
109
110 info->desc_index = 0;
111}
112
Marek Vasut913a7252011-11-08 23:18:16 +0000113static uint32_t mxs_nand_aux_status_offset(void)
114{
115 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
116}
117
Ye Li94547442020-05-04 22:08:50 +0800118static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct mtd_info *mtd,
119 unsigned int *chunk_num)
Marek Vasut913a7252011-11-08 23:18:16 +0000120{
Ye Li94547442020-05-04 22:08:50 +0800121 unsigned int i, j;
Marek Vasut913a7252011-11-08 23:18:16 +0000122
Ye Li94547442020-05-04 22:08:50 +0800123 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
124 dev_err(this->dev, "The size of chunk0 must equal to chunkn\n");
125 return false;
126 }
Marek Vasut913a7252011-11-08 23:18:16 +0000127
Ye Li94547442020-05-04 22:08:50 +0800128 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
129 (geo->gf_len * geo->ecc_strength +
130 geo->ecc_chunkn_size * 8);
Marek Vasut913a7252011-11-08 23:18:16 +0000131
Ye Li94547442020-05-04 22:08:50 +0800132 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
133 (geo->gf_len * geo->ecc_strength +
134 geo->ecc_chunkn_size * 8) * i;
Marek Vasut913a7252011-11-08 23:18:16 +0000135
Ye Li94547442020-05-04 22:08:50 +0800136 if (j < geo->ecc_chunkn_size * 8) {
137 *chunk_num = i + 1;
138 dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n",
139 geo->ecc_strength, *chunk_num);
140 return true;
141 }
Marek Vasut913a7252011-11-08 23:18:16 +0000142
Ye Li94547442020-05-04 22:08:50 +0800143 return false;
Marek Vasut913a7252011-11-08 23:18:16 +0000144}
145
Stefan Agner4d42ac12018-06-22 17:19:51 +0200146static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
Stefan Agneread66eb2018-06-22 18:06:18 +0200147 struct mtd_info *mtd,
148 unsigned int ecc_strength,
149 unsigned int ecc_step)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200150{
151 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Agner4dc98db2018-06-22 18:06:15 +0200152 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800153 unsigned int block_mark_bit_offset;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200154
Stefan Agneread66eb2018-06-22 18:06:18 +0200155 switch (ecc_step) {
Stefan Agner4d42ac12018-06-22 17:19:51 +0200156 case SZ_512:
157 geo->gf_len = 13;
158 break;
159 case SZ_1K:
160 geo->gf_len = 14;
161 break;
162 default:
163 return -EINVAL;
164 }
165
Ye Li94547442020-05-04 22:08:50 +0800166 geo->ecc_chunk0_size = ecc_step;
167 geo->ecc_chunkn_size = ecc_step;
Stefan Agneread66eb2018-06-22 18:06:18 +0200168 geo->ecc_strength = round_up(ecc_strength, 2);
Stefan Agner4d42ac12018-06-22 17:19:51 +0200169
170 /* Keep the C >= O */
Ye Li94547442020-05-04 22:08:50 +0800171 if (geo->ecc_chunkn_size < mtd->oobsize)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200172 return -EINVAL;
173
Stefan Agner4dc98db2018-06-22 18:06:15 +0200174 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200175 return -EINVAL;
176
Ye Li94547442020-05-04 22:08:50 +0800177 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
178
179 /* For bit swap. */
180 block_mark_bit_offset = mtd->writesize * 8 -
181 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
182 + MXS_NAND_METADATA_SIZE * 8);
183
184 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
185 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200186
187 return 0;
188}
189
Ye Li94547442020-05-04 22:08:50 +0800190static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
Stefan Agnerd0778b32018-06-22 17:19:49 +0200191 struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +0000192{
Stefan Agner4dc98db2018-06-22 18:06:15 +0200193 struct nand_chip *chip = mtd_to_nand(mtd);
194 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800195 unsigned int block_mark_bit_offset;
Stefan Agner4dc98db2018-06-22 18:06:15 +0200196
Stefan Agnerd0778b32018-06-22 17:19:49 +0200197 /* The default for the length of Galois Field. */
198 geo->gf_len = 13;
199
200 /* The default for chunk size. */
Ye Li94547442020-05-04 22:08:50 +0800201 geo->ecc_chunk0_size = 512;
202 geo->ecc_chunkn_size = 512;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200203
Ye Li94547442020-05-04 22:08:50 +0800204 if (geo->ecc_chunkn_size < mtd->oobsize) {
Stefan Agnerd0778b32018-06-22 17:19:49 +0200205 geo->gf_len = 14;
Ye Li94547442020-05-04 22:08:50 +0800206 geo->ecc_chunk0_size *= 2;
207 geo->ecc_chunkn_size *= 2;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200208 }
209
Ye Li94547442020-05-04 22:08:50 +0800210 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200211
Stefan Agnerd0778b32018-06-22 17:19:49 +0200212 /*
213 * Determine the ECC layout with the formula:
214 * ECC bits per chunk = (total page spare data bits) /
215 * (bits per ECC level) / (chunks per page)
216 * where:
217 * total page spare data bits =
218 * (page oob size - meta data size) * (bits per byte)
219 */
220 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
221 / (geo->gf_len * geo->ecc_chunk_count);
222
Stefan Agner4d42ac12018-06-22 17:19:51 +0200223 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
Stefan Agner4dc98db2018-06-22 18:06:15 +0200224 nand_info->max_ecc_strength_supported);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200225
Ye Li94547442020-05-04 22:08:50 +0800226 block_mark_bit_offset = mtd->writesize * 8 -
227 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
228 + MXS_NAND_METADATA_SIZE * 8);
229
230 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
231 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
232
233 return 0;
234}
235
236static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
237 struct mtd_info *mtd)
238{
239 struct nand_chip *chip = mtd_to_nand(mtd);
240 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
241 unsigned int block_mark_bit_offset;
242 unsigned int max_ecc;
243 unsigned int bbm_chunk;
244 unsigned int i;
245
246 /* sanity check for the minimum ecc nand required */
247 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
248 return -EINVAL;
249 geo->ecc_strength = chip->ecc_strength_ds;
250
251 /* calculate the maximum ecc platform can support*/
252 geo->gf_len = 14;
253 geo->ecc_chunk0_size = 1024;
254 geo->ecc_chunkn_size = 1024;
255 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
256 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
257 / (geo->gf_len * geo->ecc_chunk_count);
258 max_ecc = min(round_down(max_ecc, 2),
259 nand_info->max_ecc_strength_supported);
260
261
262 /* search a supported ecc strength that makes bbm */
263 /* located in data chunk */
264 geo->ecc_strength = chip->ecc_strength_ds;
265 while (!(geo->ecc_strength > max_ecc)) {
266 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
267 break;
268 geo->ecc_strength += 2;
269 }
270
271 /* if none of them works, keep using the minimum ecc */
272 /* nand required but changing ecc page layout */
273 if (geo->ecc_strength > max_ecc) {
274 geo->ecc_strength = chip->ecc_strength_ds;
275 /* add extra ecc for meta data */
276 geo->ecc_chunk0_size = 0;
277 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
278 geo->ecc_for_meta = 1;
279 /* check if oob can afford this extra ecc chunk */
280 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
281 geo->gf_len * geo->ecc_strength
282 * geo->ecc_chunk_count) {
283 printf("unsupported NAND chip with new layout\n");
284 return -EINVAL;
285 }
286
287 /* calculate in which chunk bbm located */
288 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
289 geo->gf_len * geo->ecc_strength) /
290 (geo->gf_len * geo->ecc_strength +
291 geo->ecc_chunkn_size * 8) + 1;
292 }
293
294 /* calculate the number of ecc chunk behind the bbm */
295 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
296
297 block_mark_bit_offset = mtd->writesize * 8 -
298 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
299 + MXS_NAND_METADATA_SIZE * 8);
300
301 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
302 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
303
Stefan Agnerd0778b32018-06-22 17:19:49 +0200304 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000305}
306
307/*
308 * Wait for BCH complete IRQ and clear the IRQ
309 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200310static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +0000311{
Marek Vasut913a7252011-11-08 23:18:16 +0000312 int timeout = MXS_NAND_BCH_TIMEOUT;
313 int ret;
314
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200315 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
Marek Vasut913a7252011-11-08 23:18:16 +0000316 BCH_CTRL_COMPLETE_IRQ, timeout);
317
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200318 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
Marek Vasut913a7252011-11-08 23:18:16 +0000319
320 return ret;
321}
322
323/*
324 * This is the function that we install in the cmd_ctrl function pointer of the
325 * owning struct nand_chip. The only functions in the reference implementation
326 * that use these functions pointers are cmdfunc and select_chip.
327 *
328 * In this driver, we implement our own select_chip, so this function will only
329 * be called by the reference implementation's cmdfunc. For this reason, we can
330 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
331 * Flash.
332 */
333static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
334{
Scott Wood17fed142016-05-30 13:57:56 -0500335 struct nand_chip *nand = mtd_to_nand(mtd);
336 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000337 struct mxs_dma_desc *d;
338 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
339 int ret;
340
341 /*
342 * If this condition is true, something is _VERY_ wrong in MTD
343 * subsystem!
344 */
345 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
346 printf("MXS NAND: Command queue too long\n");
347 return;
348 }
349
350 /*
351 * Every operation begins with a command byte and a series of zero or
352 * more address bytes. These are distinguished by either the Address
353 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
354 * asserted. When MTD is ready to execute the command, it will
355 * deasert both latch enables.
356 *
357 * Rather than run a separate DMA operation for every single byte, we
358 * queue them up and run a single DMA operation for the entire series
359 * of command and data bytes.
360 */
361 if (ctrl & (NAND_ALE | NAND_CLE)) {
362 if (data != NAND_CMD_NONE)
363 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
364 return;
365 }
366
367 /*
368 * If control arrives here, MTD has deasserted both the ALE and CLE,
369 * which means it's ready to run an operation. Check if we have any
370 * bytes to send.
371 */
372 if (nand_info->cmd_queue_len == 0)
373 return;
374
375 /* Compile the DMA descriptor -- a descriptor that sends command. */
376 d = mxs_nand_get_dma_desc(nand_info);
377 d->cmd.data =
378 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
379 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
380 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
381 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
382
383 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
384
385 d->cmd.pio_words[0] =
386 GPMI_CTRL0_COMMAND_MODE_WRITE |
387 GPMI_CTRL0_WORD_LENGTH |
388 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
389 GPMI_CTRL0_ADDRESS_NAND_CLE |
390 GPMI_CTRL0_ADDRESS_INCREMENT |
391 nand_info->cmd_queue_len;
392
393 mxs_dma_desc_append(channel, d);
394
Marek Vasut1b120e82012-03-15 18:33:19 +0000395 /* Flush caches */
396 mxs_nand_flush_cmd_buf(nand_info);
397
Marek Vasut913a7252011-11-08 23:18:16 +0000398 /* Execute the DMA chain. */
399 ret = mxs_dma_go(channel);
400 if (ret)
401 printf("MXS NAND: Error sending command\n");
402
403 mxs_nand_return_dma_descs(nand_info);
404
405 /* Reset the command queue. */
406 nand_info->cmd_queue_len = 0;
407}
408
409/*
410 * Test if the NAND flash is ready.
411 */
412static int mxs_nand_device_ready(struct mtd_info *mtd)
413{
Scott Wood17fed142016-05-30 13:57:56 -0500414 struct nand_chip *chip = mtd_to_nand(mtd);
415 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000416 uint32_t tmp;
417
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200418 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
Marek Vasut913a7252011-11-08 23:18:16 +0000419 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
420
421 return tmp & 1;
422}
423
424/*
425 * Select the NAND chip.
426 */
427static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
428{
Scott Wood17fed142016-05-30 13:57:56 -0500429 struct nand_chip *nand = mtd_to_nand(mtd);
430 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000431
432 nand_info->cur_chip = chip;
433}
434
435/*
436 * Handle block mark swapping.
437 *
438 * Note that, when this function is called, it doesn't know whether it's
439 * swapping the block mark, or swapping it *back* -- but it doesn't matter
440 * because the the operation is the same.
441 */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200442static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
443 uint8_t *data_buf, uint8_t *oob_buf)
Marek Vasut913a7252011-11-08 23:18:16 +0000444{
Stefan Agnerd0778b32018-06-22 17:19:49 +0200445 uint32_t bit_offset = geo->block_mark_bit_offset;
446 uint32_t buf_offset = geo->block_mark_byte_offset;
Marek Vasut913a7252011-11-08 23:18:16 +0000447
448 uint32_t src;
449 uint32_t dst;
450
Marek Vasut913a7252011-11-08 23:18:16 +0000451 /*
452 * Get the byte from the data area that overlays the block mark. Since
453 * the ECC engine applies its own view to the bits in the page, the
454 * physical block mark won't (in general) appear on a byte boundary in
455 * the data.
456 */
457 src = data_buf[buf_offset] >> bit_offset;
458 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
459
460 dst = oob_buf[0];
461
462 oob_buf[0] = src;
463
464 data_buf[buf_offset] &= ~(0xff << bit_offset);
465 data_buf[buf_offset + 1] &= 0xff << bit_offset;
466
467 data_buf[buf_offset] |= dst << bit_offset;
468 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
469}
470
471/*
472 * Read data from NAND.
473 */
474static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
475{
Scott Wood17fed142016-05-30 13:57:56 -0500476 struct nand_chip *nand = mtd_to_nand(mtd);
477 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000478 struct mxs_dma_desc *d;
479 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
480 int ret;
481
482 if (length > NAND_MAX_PAGESIZE) {
483 printf("MXS NAND: DMA buffer too big\n");
484 return;
485 }
486
487 if (!buf) {
488 printf("MXS NAND: DMA buffer is NULL\n");
489 return;
490 }
491
492 /* Compile the DMA descriptor - a descriptor that reads data. */
493 d = mxs_nand_get_dma_desc(nand_info);
494 d->cmd.data =
495 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
496 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
497 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
498 (length << MXS_DMA_DESC_BYTES_OFFSET);
499
500 d->cmd.address = (dma_addr_t)nand_info->data_buf;
501
502 d->cmd.pio_words[0] =
503 GPMI_CTRL0_COMMAND_MODE_READ |
504 GPMI_CTRL0_WORD_LENGTH |
505 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
506 GPMI_CTRL0_ADDRESS_NAND_DATA |
507 length;
508
509 mxs_dma_desc_append(channel, d);
510
511 /*
512 * A DMA descriptor that waits for the command to end and the chip to
513 * become ready.
514 *
515 * I think we actually should *not* be waiting for the chip to become
516 * ready because, after all, we don't care. I think the original code
517 * did that and no one has re-thought it yet.
518 */
519 d = mxs_nand_get_dma_desc(nand_info);
520 d->cmd.data =
521 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
522 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
Luca Ellero80f06b82014-12-16 15:36:14 +0100523 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
Marek Vasut913a7252011-11-08 23:18:16 +0000524
525 d->cmd.address = 0;
526
527 d->cmd.pio_words[0] =
528 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
529 GPMI_CTRL0_WORD_LENGTH |
530 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
531 GPMI_CTRL0_ADDRESS_NAND_DATA;
532
533 mxs_dma_desc_append(channel, d);
534
Peng Fane3bbfb72015-07-21 16:15:21 +0800535 /* Invalidate caches */
536 mxs_nand_inval_data_buf(nand_info);
537
Marek Vasut913a7252011-11-08 23:18:16 +0000538 /* Execute the DMA chain. */
539 ret = mxs_dma_go(channel);
540 if (ret) {
541 printf("MXS NAND: DMA read error\n");
542 goto rtn;
543 }
544
Marek Vasut1b120e82012-03-15 18:33:19 +0000545 /* Invalidate caches */
546 mxs_nand_inval_data_buf(nand_info);
547
Marek Vasut913a7252011-11-08 23:18:16 +0000548 memcpy(buf, nand_info->data_buf, length);
549
550rtn:
551 mxs_nand_return_dma_descs(nand_info);
552}
553
554/*
555 * Write data to NAND.
556 */
557static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
558 int length)
559{
Scott Wood17fed142016-05-30 13:57:56 -0500560 struct nand_chip *nand = mtd_to_nand(mtd);
561 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000562 struct mxs_dma_desc *d;
563 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
564 int ret;
565
566 if (length > NAND_MAX_PAGESIZE) {
567 printf("MXS NAND: DMA buffer too big\n");
568 return;
569 }
570
571 if (!buf) {
572 printf("MXS NAND: DMA buffer is NULL\n");
573 return;
574 }
575
576 memcpy(nand_info->data_buf, buf, length);
577
578 /* Compile the DMA descriptor - a descriptor that writes data. */
579 d = mxs_nand_get_dma_desc(nand_info);
580 d->cmd.data =
581 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
582 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
Luca Ellero966f1cd2014-12-16 15:36:15 +0100583 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
Marek Vasut913a7252011-11-08 23:18:16 +0000584 (length << MXS_DMA_DESC_BYTES_OFFSET);
585
586 d->cmd.address = (dma_addr_t)nand_info->data_buf;
587
588 d->cmd.pio_words[0] =
589 GPMI_CTRL0_COMMAND_MODE_WRITE |
590 GPMI_CTRL0_WORD_LENGTH |
591 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
592 GPMI_CTRL0_ADDRESS_NAND_DATA |
593 length;
594
595 mxs_dma_desc_append(channel, d);
596
Marek Vasut1b120e82012-03-15 18:33:19 +0000597 /* Flush caches */
598 mxs_nand_flush_data_buf(nand_info);
599
Marek Vasut913a7252011-11-08 23:18:16 +0000600 /* Execute the DMA chain. */
601 ret = mxs_dma_go(channel);
602 if (ret)
603 printf("MXS NAND: DMA write error\n");
604
605 mxs_nand_return_dma_descs(nand_info);
606}
607
608/*
609 * Read a single byte from NAND.
610 */
611static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
612{
613 uint8_t buf;
614 mxs_nand_read_buf(mtd, &buf, 1);
615 return buf;
616}
617
Peng Fandf23c9d2020-05-04 22:08:52 +0800618static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
619 u8 *buf, int chunk, int page)
620{
621 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
622 struct bch_geometry *geo = &nand_info->bch_geometry;
623 unsigned int flip_bits = 0, flip_bits_noecc = 0;
624 unsigned int threshold;
625 unsigned int base = geo->ecc_chunkn_size * chunk;
626 u32 *dma_buf = (u32 *)buf;
627 int i;
628
629 threshold = geo->gf_len / 2;
630 if (threshold > geo->ecc_strength)
631 threshold = geo->ecc_strength;
632
633 for (i = 0; i < geo->ecc_chunkn_size; i++) {
634 flip_bits += hweight8(~buf[base + i]);
635 if (flip_bits > threshold)
636 return false;
637 }
638
639 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
640 nand->read_buf(mtd, buf, mtd->writesize);
641
642 for (i = 0; i < mtd->writesize / 4; i++) {
643 flip_bits_noecc += hweight32(~dma_buf[i]);
644 if (flip_bits_noecc > threshold)
645 return false;
646 }
647
648 mtd->ecc_stats.corrected += flip_bits;
649
650 memset(buf, 0xff, mtd->writesize);
651
652 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
653
654 return true;
655}
656
Marek Vasut913a7252011-11-08 23:18:16 +0000657/*
658 * Read a page from NAND.
659 */
660static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +0000661 uint8_t *buf, int oob_required,
662 int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000663{
Scott Wood17fed142016-05-30 13:57:56 -0500664 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200665 struct bch_geometry *geo = &nand_info->bch_geometry;
Peng Fan9e813732020-05-04 22:08:53 +0800666 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +0000667 struct mxs_dma_desc *d;
668 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
669 uint32_t corrected = 0, failed = 0;
670 uint8_t *status;
671 int i, ret;
Peng Fan9e813732020-05-04 22:08:53 +0800672 int flag = 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000673
674 /* Compile the DMA descriptor - wait for ready. */
675 d = mxs_nand_get_dma_desc(nand_info);
676 d->cmd.data =
677 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
678 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
679 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
680
681 d->cmd.address = 0;
682
683 d->cmd.pio_words[0] =
684 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
685 GPMI_CTRL0_WORD_LENGTH |
686 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
687 GPMI_CTRL0_ADDRESS_NAND_DATA;
688
689 mxs_dma_desc_append(channel, d);
690
691 /* Compile the DMA descriptor - enable the BCH block and read. */
692 d = mxs_nand_get_dma_desc(nand_info);
693 d->cmd.data =
694 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
695 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
696
697 d->cmd.address = 0;
698
699 d->cmd.pio_words[0] =
700 GPMI_CTRL0_COMMAND_MODE_READ |
701 GPMI_CTRL0_WORD_LENGTH |
702 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
703 GPMI_CTRL0_ADDRESS_NAND_DATA |
704 (mtd->writesize + mtd->oobsize);
705 d->cmd.pio_words[1] = 0;
706 d->cmd.pio_words[2] =
707 GPMI_ECCCTRL_ENABLE_ECC |
708 GPMI_ECCCTRL_ECC_CMD_DECODE |
709 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
710 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
711 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
712 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
713
Han Xuafed2a12020-05-06 20:59:19 +0800714 if (nand_info->en_randomizer) {
Alice Guo3f277782020-05-04 22:09:03 +0800715 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
716 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
717 d->cmd.pio_words[3] |= (page % 256) << 16;
718 }
719
Marek Vasut913a7252011-11-08 23:18:16 +0000720 mxs_dma_desc_append(channel, d);
721
722 /* Compile the DMA descriptor - disable the BCH block. */
723 d = mxs_nand_get_dma_desc(nand_info);
724 d->cmd.data =
725 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
726 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
727 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
728
729 d->cmd.address = 0;
730
731 d->cmd.pio_words[0] =
732 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
733 GPMI_CTRL0_WORD_LENGTH |
734 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
735 GPMI_CTRL0_ADDRESS_NAND_DATA |
736 (mtd->writesize + mtd->oobsize);
737 d->cmd.pio_words[1] = 0;
738 d->cmd.pio_words[2] = 0;
739
740 mxs_dma_desc_append(channel, d);
741
742 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
743 d = mxs_nand_get_dma_desc(nand_info);
744 d->cmd.data =
745 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
746 MXS_DMA_DESC_DEC_SEM;
747
748 d->cmd.address = 0;
749
750 mxs_dma_desc_append(channel, d);
751
Peng Fane3bbfb72015-07-21 16:15:21 +0800752 /* Invalidate caches */
753 mxs_nand_inval_data_buf(nand_info);
754
Marek Vasut913a7252011-11-08 23:18:16 +0000755 /* Execute the DMA chain. */
756 ret = mxs_dma_go(channel);
757 if (ret) {
758 printf("MXS NAND: DMA read error\n");
759 goto rtn;
760 }
761
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200762 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000763 if (ret) {
764 printf("MXS NAND: BCH read timeout\n");
765 goto rtn;
766 }
767
Peng Fandf23c9d2020-05-04 22:08:52 +0800768 mxs_nand_return_dma_descs(nand_info);
769
Marek Vasut1b120e82012-03-15 18:33:19 +0000770 /* Invalidate caches */
771 mxs_nand_inval_data_buf(nand_info);
772
Marek Vasut913a7252011-11-08 23:18:16 +0000773 /* Read DMA completed, now do the mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200774 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000775
776 /* Loop over status bytes, accumulating ECC status. */
777 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
Stefan Agnerd0778b32018-06-22 17:19:49 +0200778 for (i = 0; i < geo->ecc_chunk_count; i++) {
Marek Vasut913a7252011-11-08 23:18:16 +0000779 if (status[i] == 0x00)
780 continue;
781
Peng Fan9e813732020-05-04 22:08:53 +0800782 if (status[i] == 0xff) {
Han Xue4f2b002020-05-04 22:09:02 +0800783 if (!nand_info->en_randomizer &&
784 (is_mx6dqp() || is_mx7() || is_mx6ul() ||
785 is_imx8() || is_imx8m()))
Peng Fan9e813732020-05-04 22:08:53 +0800786 if (readl(&bch_regs->hw_bch_debug1))
787 flag = 1;
Marek Vasut913a7252011-11-08 23:18:16 +0000788 continue;
Peng Fan9e813732020-05-04 22:08:53 +0800789 }
Marek Vasut913a7252011-11-08 23:18:16 +0000790
791 if (status[i] == 0xfe) {
Peng Fandf23c9d2020-05-04 22:08:52 +0800792 if (mxs_nand_erased_page(mtd, nand,
793 nand_info->data_buf, i, page))
794 break;
Marek Vasut913a7252011-11-08 23:18:16 +0000795 failed++;
796 continue;
797 }
798
799 corrected += status[i];
800 }
801
802 /* Propagate ECC status to the owning MTD. */
803 mtd->ecc_stats.failed += failed;
804 mtd->ecc_stats.corrected += corrected;
805
806 /*
807 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
808 * details about our policy for delivering the OOB.
809 *
810 * We fill the caller's buffer with set bits, and then copy the block
811 * mark to the caller's buffer. Note that, if block mark swapping was
812 * necessary, it has already been done, so we can rely on the first
813 * byte of the auxiliary buffer to contain the block mark.
814 */
815 memset(nand->oob_poi, 0xff, mtd->oobsize);
816
817 nand->oob_poi[0] = nand_info->oob_buf[0];
818
819 memcpy(buf, nand_info->data_buf, mtd->writesize);
820
Peng Fan9e813732020-05-04 22:08:53 +0800821 if (flag)
822 memset(buf, 0xff, mtd->writesize);
Marek Vasut913a7252011-11-08 23:18:16 +0000823rtn:
824 mxs_nand_return_dma_descs(nand_info);
825
826 return ret;
827}
828
829/*
830 * Write a page to NAND.
831 */
Sergey Lapin3a38a552013-01-14 03:46:50 +0000832static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
833 struct nand_chip *nand, const uint8_t *buf,
Scott Wood46e13102016-05-30 13:57:57 -0500834 int oob_required, int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000835{
Scott Wood17fed142016-05-30 13:57:56 -0500836 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200837 struct bch_geometry *geo = &nand_info->bch_geometry;
Marek Vasut913a7252011-11-08 23:18:16 +0000838 struct mxs_dma_desc *d;
839 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
840 int ret;
841
842 memcpy(nand_info->data_buf, buf, mtd->writesize);
843 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
844
845 /* Handle block mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200846 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000847
848 /* Compile the DMA descriptor - write data. */
849 d = mxs_nand_get_dma_desc(nand_info);
850 d->cmd.data =
851 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
852 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
853 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
854
855 d->cmd.address = 0;
856
857 d->cmd.pio_words[0] =
858 GPMI_CTRL0_COMMAND_MODE_WRITE |
859 GPMI_CTRL0_WORD_LENGTH |
860 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
861 GPMI_CTRL0_ADDRESS_NAND_DATA;
862 d->cmd.pio_words[1] = 0;
863 d->cmd.pio_words[2] =
864 GPMI_ECCCTRL_ENABLE_ECC |
865 GPMI_ECCCTRL_ECC_CMD_ENCODE |
866 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
867 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
868 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
869 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
870
Han Xuafed2a12020-05-06 20:59:19 +0800871 if (nand_info->en_randomizer) {
Igor Opaniukc55401372019-11-03 16:49:43 +0100872 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
873 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
874 /*
875 * Write NAND page number needed to be randomized
876 * to GPMI_ECCCOUNT register.
877 *
878 * The value is between 0-255. For additional details
879 * check 9.6.6.4 of i.MX7D Applications Processor reference
880 */
Alice Guo3f277782020-05-04 22:09:03 +0800881 d->cmd.pio_words[3] |= (page % 256) << 16;
Igor Opaniukc55401372019-11-03 16:49:43 +0100882 }
883
Marek Vasut913a7252011-11-08 23:18:16 +0000884 mxs_dma_desc_append(channel, d);
885
Marek Vasut1b120e82012-03-15 18:33:19 +0000886 /* Flush caches */
887 mxs_nand_flush_data_buf(nand_info);
888
Marek Vasut913a7252011-11-08 23:18:16 +0000889 /* Execute the DMA chain. */
890 ret = mxs_dma_go(channel);
891 if (ret) {
892 printf("MXS NAND: DMA write error\n");
893 goto rtn;
894 }
895
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200896 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000897 if (ret) {
898 printf("MXS NAND: BCH write timeout\n");
899 goto rtn;
900 }
901
902rtn:
903 mxs_nand_return_dma_descs(nand_info);
Sergey Lapin3a38a552013-01-14 03:46:50 +0000904 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000905}
906
907/*
908 * Read OOB from NAND.
909 *
910 * This function is a veneer that replaces the function originally installed by
911 * the NAND Flash MTD code.
912 */
913static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
914 struct mtd_oob_ops *ops)
915{
Scott Wood17fed142016-05-30 13:57:56 -0500916 struct nand_chip *chip = mtd_to_nand(mtd);
917 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000918 int ret;
919
Sergey Lapin3a38a552013-01-14 03:46:50 +0000920 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000921 nand_info->raw_oob_mode = 1;
922 else
923 nand_info->raw_oob_mode = 0;
924
925 ret = nand_info->hooked_read_oob(mtd, from, ops);
926
927 nand_info->raw_oob_mode = 0;
928
929 return ret;
930}
931
932/*
933 * Write OOB to NAND.
934 *
935 * This function is a veneer that replaces the function originally installed by
936 * the NAND Flash MTD code.
937 */
938static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
939 struct mtd_oob_ops *ops)
940{
Scott Wood17fed142016-05-30 13:57:56 -0500941 struct nand_chip *chip = mtd_to_nand(mtd);
942 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000943 int ret;
944
Sergey Lapin3a38a552013-01-14 03:46:50 +0000945 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000946 nand_info->raw_oob_mode = 1;
947 else
948 nand_info->raw_oob_mode = 0;
949
950 ret = nand_info->hooked_write_oob(mtd, to, ops);
951
952 nand_info->raw_oob_mode = 0;
953
954 return ret;
955}
956
957/*
958 * Mark a block bad in NAND.
959 *
960 * This function is a veneer that replaces the function originally installed by
961 * the NAND Flash MTD code.
962 */
963static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
964{
Scott Wood17fed142016-05-30 13:57:56 -0500965 struct nand_chip *chip = mtd_to_nand(mtd);
966 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000967 int ret;
968
969 nand_info->marking_block_bad = 1;
970
971 ret = nand_info->hooked_block_markbad(mtd, ofs);
972
973 nand_info->marking_block_bad = 0;
974
975 return ret;
976}
977
978/*
979 * There are several places in this driver where we have to handle the OOB and
980 * block marks. This is the function where things are the most complicated, so
981 * this is where we try to explain it all. All the other places refer back to
982 * here.
983 *
984 * These are the rules, in order of decreasing importance:
985 *
986 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
987 * write operations take measures to protect it.
988 *
989 * 2) In read operations, the first byte of the OOB we return must reflect the
990 * true state of the block mark, no matter where that block mark appears in
991 * the physical page.
992 *
993 * 3) ECC-based read operations return an OOB full of set bits (since we never
994 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
995 * return).
996 *
997 * 4) "Raw" read operations return a direct view of the physical bytes in the
998 * page, using the conventional definition of which bytes are data and which
999 * are OOB. This gives the caller a way to see the actual, physical bytes
1000 * in the page, without the distortions applied by our ECC engine.
1001 *
1002 * What we do for this specific read operation depends on whether we're doing
1003 * "raw" read, or an ECC-based read.
1004 *
1005 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1006 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1007 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1008 * ECC-based or raw view of the page is implicit in which function it calls
1009 * (there is a similar pair of ECC-based/raw functions for writing).
1010 *
1011 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1012 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1013 * caller wants an ECC-based or raw view of the page is not propagated down to
1014 * this driver.
1015 *
1016 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
1017 * ecc.read_oob and ecc.write_oob function pointers in the owning
1018 * struct mtd_info with our own functions. These hook functions set the
1019 * raw_oob_mode field so that, when control finally arrives here, we'll know
1020 * what to do.
1021 */
1022static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +00001023 int page)
Marek Vasut913a7252011-11-08 23:18:16 +00001024{
Scott Wood17fed142016-05-30 13:57:56 -05001025 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001026
1027 /*
1028 * First, fill in the OOB buffer. If we're doing a raw read, we need to
1029 * get the bytes from the physical page. If we're not doing a raw read,
1030 * we need to fill the buffer with set bits.
1031 */
1032 if (nand_info->raw_oob_mode) {
1033 /*
1034 * If control arrives here, we're doing a "raw" read. Send the
1035 * command to read the conventional OOB and read it.
1036 */
1037 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1038 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1039 } else {
1040 /*
1041 * If control arrives here, we're not doing a "raw" read. Fill
1042 * the OOB buffer with set bits and correct the block mark.
1043 */
1044 memset(nand->oob_poi, 0xff, mtd->oobsize);
1045
1046 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1047 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1048 }
1049
1050 return 0;
1051
1052}
1053
1054/*
1055 * Write OOB data to NAND.
1056 */
1057static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1058 int page)
1059{
Scott Wood17fed142016-05-30 13:57:56 -05001060 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001061 uint8_t block_mark = 0;
1062
1063 /*
1064 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1065 * the NAND Flash MTD model that make it essentially impossible to write
1066 * the out-of-band bytes.
1067 *
1068 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1069 * mark a block bad, we can do that.
1070 */
1071
1072 if (!nand_info->marking_block_bad) {
1073 printf("NXS NAND: Writing OOB isn't supported\n");
1074 return -EIO;
1075 }
1076
1077 /* Write the block mark. */
1078 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1079 nand->write_buf(mtd, &block_mark, 1);
1080 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1081
1082 /* Check if it worked. */
1083 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1084 return -EIO;
1085
1086 return 0;
1087}
1088
1089/*
1090 * Claims all blocks are good.
1091 *
1092 * In principle, this function is *only* called when the NAND Flash MTD system
1093 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1094 * the driver for bad block information.
1095 *
1096 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1097 * this function is *only* called when we take it away.
1098 *
1099 * Thus, this function is only called when we want *all* blocks to look good,
1100 * so it *always* return success.
1101 */
Scott Wood52ab7ce2016-05-30 13:57:58 -05001102static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
Marek Vasut913a7252011-11-08 23:18:16 +00001103{
Stefan Agneread66eb2018-06-22 18:06:18 +02001104 return 0;
1105}
1106
1107static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1108{
1109 struct nand_chip *chip = mtd_to_nand(mtd);
1110 struct nand_chip *nand = mtd_to_nand(mtd);
1111 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1112
Ye Li94547442020-05-04 22:08:50 +08001113 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1114 printf("unsupported NAND chip, minimum ecc required %d\n"
1115 , chip->ecc_strength_ds);
1116 return -EINVAL;
1117 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001118
Ye Lic6736132020-05-04 22:08:51 +08001119 if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) &&
1120 mtd->oobsize < 1024) || nand_info->legacy_bch_geometry) {
Ye Li94547442020-05-04 22:08:50 +08001121 dev_warn(this->dev, "use legacy bch geometry\n");
1122 return mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1123 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001124
Ye Li94547442020-05-04 22:08:50 +08001125 if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize)
1126 return mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1127
1128 return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
Stefan Agneread66eb2018-06-22 18:06:18 +02001129 chip->ecc_strength_ds, chip->ecc_step_ds);
Stefan Agneread66eb2018-06-22 18:06:18 +02001130
Marek Vasut913a7252011-11-08 23:18:16 +00001131 return 0;
1132}
1133
1134/*
Marek Vasut913a7252011-11-08 23:18:16 +00001135 * At this point, the physical NAND Flash chips have been identified and
1136 * counted, so we know the physical geometry. This enables us to make some
1137 * important configuration decisions.
1138 *
Robert P. J. Day8d56db92016-07-15 13:44:45 -04001139 * The return value of this function propagates directly back to this driver's
Stefan Agner5883e552018-06-22 17:19:47 +02001140 * board_nand_init(). Anything other than zero will cause this driver to
Marek Vasut913a7252011-11-08 23:18:16 +00001141 * tear everything down and declare failure.
1142 */
Stefan Agner5883e552018-06-22 17:19:47 +02001143int mxs_nand_setup_ecc(struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +00001144{
Scott Wood17fed142016-05-30 13:57:56 -05001145 struct nand_chip *nand = mtd_to_nand(mtd);
1146 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +02001147 struct bch_geometry *geo = &nand_info->bch_geometry;
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001148 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +00001149 uint32_t tmp;
Stefan Agneread66eb2018-06-22 18:06:18 +02001150 int ret;
Stefan Agner4d42ac12018-06-22 17:19:51 +02001151
Igor Opaniukc55401372019-11-03 16:49:43 +01001152 nand_info->en_randomizer = 0;
1153 nand_info->oobsize = mtd->oobsize;
1154 nand_info->writesize = mtd->writesize;
1155
Stefan Agneread66eb2018-06-22 18:06:18 +02001156 ret = mxs_nand_set_geometry(mtd, geo);
Stefan Agner4d42ac12018-06-22 17:19:51 +02001157 if (ret)
1158 return ret;
1159
Marek Vasut913a7252011-11-08 23:18:16 +00001160 /* Configure BCH and set NFC geometry */
Otavio Salvadorcbf0bf22012-08-13 09:53:12 +00001161 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001162
1163 /* Configure layout 0 */
Stefan Agnerd0778b32018-06-22 17:19:49 +02001164 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001165 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001166 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001167 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001168 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001169 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001170 writel(tmp, &bch_regs->hw_bch_flash0layout0);
Igor Opaniukc55401372019-11-03 16:49:43 +01001171 nand_info->bch_flash0layout0 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001172
1173 tmp = (mtd->writesize + mtd->oobsize)
1174 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001175 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001176 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001177 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001178 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001179 writel(tmp, &bch_regs->hw_bch_flash0layout1);
Igor Opaniukc55401372019-11-03 16:49:43 +01001180 nand_info->bch_flash0layout1 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001181
Peng Fan9e813732020-05-04 22:08:53 +08001182 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
1183 if (is_mx6dqp() || is_mx7() ||
Peng Fan128abf42020-05-04 22:09:00 +08001184 is_mx6ul() || is_imx8() || is_imx8m())
Peng Fan9e813732020-05-04 22:08:53 +08001185 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1186 &bch_regs->hw_bch_mode);
1187
Marek Vasut913a7252011-11-08 23:18:16 +00001188 /* Set *all* chip selects to use layout 0 */
1189 writel(0, &bch_regs->hw_bch_layoutselect);
1190
1191 /* Enable BCH complete interrupt */
1192 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1193
1194 /* Hook some operations at the MTD level. */
Sergey Lapin3a38a552013-01-14 03:46:50 +00001195 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1196 nand_info->hooked_read_oob = mtd->_read_oob;
1197 mtd->_read_oob = mxs_nand_hook_read_oob;
Marek Vasut913a7252011-11-08 23:18:16 +00001198 }
1199
Sergey Lapin3a38a552013-01-14 03:46:50 +00001200 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1201 nand_info->hooked_write_oob = mtd->_write_oob;
1202 mtd->_write_oob = mxs_nand_hook_write_oob;
Marek Vasut913a7252011-11-08 23:18:16 +00001203 }
1204
Sergey Lapin3a38a552013-01-14 03:46:50 +00001205 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1206 nand_info->hooked_block_markbad = mtd->_block_markbad;
1207 mtd->_block_markbad = mxs_nand_hook_block_markbad;
Marek Vasut913a7252011-11-08 23:18:16 +00001208 }
1209
Stefan Agner5883e552018-06-22 17:19:47 +02001210 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001211}
1212
1213/*
1214 * Allocate DMA buffers
1215 */
1216int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1217{
1218 uint8_t *buf;
1219 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1220
Marek Vasut1b120e82012-03-15 18:33:19 +00001221 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1222
Marek Vasut913a7252011-11-08 23:18:16 +00001223 /* DMA buffers */
Marek Vasut1b120e82012-03-15 18:33:19 +00001224 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001225 if (!buf) {
1226 printf("MXS NAND: Error allocating DMA buffers\n");
1227 return -ENOMEM;
1228 }
1229
Marek Vasut1b120e82012-03-15 18:33:19 +00001230 memset(buf, 0, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001231
1232 nand_info->data_buf = buf;
1233 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
Marek Vasut913a7252011-11-08 23:18:16 +00001234 /* Command buffers */
1235 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1236 MXS_NAND_COMMAND_BUFFER_SIZE);
1237 if (!nand_info->cmd_buf) {
1238 free(buf);
1239 printf("MXS NAND: Error allocating command buffers\n");
1240 return -ENOMEM;
1241 }
1242 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1243 nand_info->cmd_queue_len = 0;
1244
1245 return 0;
1246}
1247
1248/*
1249 * Initializes the NFC hardware.
1250 */
Adam Ford6edb91a2019-01-12 06:25:48 -06001251static int mxs_nand_init_dma(struct mxs_nand_info *info)
Marek Vasut913a7252011-11-08 23:18:16 +00001252{
Peng Fane37d5a92016-01-27 10:38:02 +08001253 int i = 0, j, ret = 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001254
1255 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1256 MXS_NAND_DMA_DESCRIPTOR_COUNT);
Peng Fane37d5a92016-01-27 10:38:02 +08001257 if (!info->desc) {
1258 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001259 goto err1;
Peng Fane37d5a92016-01-27 10:38:02 +08001260 }
Marek Vasut913a7252011-11-08 23:18:16 +00001261
1262 /* Allocate the DMA descriptors. */
1263 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1264 info->desc[i] = mxs_dma_desc_alloc();
Peng Fane37d5a92016-01-27 10:38:02 +08001265 if (!info->desc[i]) {
1266 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001267 goto err2;
Peng Fane37d5a92016-01-27 10:38:02 +08001268 }
Marek Vasut913a7252011-11-08 23:18:16 +00001269 }
1270
1271 /* Init the DMA controller. */
Fabio Estevam17156222017-06-29 09:33:44 -03001272 mxs_dma_init();
Marek Vasut93541b42012-04-08 17:34:46 +00001273 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1274 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
Peng Fane37d5a92016-01-27 10:38:02 +08001275 ret = mxs_dma_init_channel(j);
1276 if (ret)
Marek Vasut93541b42012-04-08 17:34:46 +00001277 goto err3;
1278 }
Marek Vasut913a7252011-11-08 23:18:16 +00001279
1280 /* Reset the GPMI block. */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001281 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1282 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001283
1284 /*
1285 * Choose NAND mode, set IRQ polarity, disable write protection and
1286 * select BCH ECC.
1287 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001288 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
Marek Vasut913a7252011-11-08 23:18:16 +00001289 GPMI_CTRL1_GPMI_MODE,
1290 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1291 GPMI_CTRL1_BCH_MODE);
1292
1293 return 0;
1294
Marek Vasut93541b42012-04-08 17:34:46 +00001295err3:
Peng Fane37d5a92016-01-27 10:38:02 +08001296 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
Marek Vasut93541b42012-04-08 17:34:46 +00001297 mxs_dma_release(j);
Marek Vasut913a7252011-11-08 23:18:16 +00001298err2:
Marek Vasut913a7252011-11-08 23:18:16 +00001299 for (--i; i >= 0; i--)
1300 mxs_dma_desc_free(info->desc[i]);
Peng Fane37d5a92016-01-27 10:38:02 +08001301 free(info->desc);
1302err1:
1303 if (ret == -ENOMEM)
1304 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1305 return ret;
Marek Vasut913a7252011-11-08 23:18:16 +00001306}
1307
Stefan Agner7152f342018-06-22 17:19:46 +02001308int mxs_nand_init_spl(struct nand_chip *nand)
1309{
1310 struct mxs_nand_info *nand_info;
1311 int err;
1312
1313 nand_info = malloc(sizeof(struct mxs_nand_info));
1314 if (!nand_info) {
1315 printf("MXS NAND: Failed to allocate private data\n");
1316 return -ENOMEM;
1317 }
1318 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1319
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001320 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1321 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
Adam Ford10210732019-01-02 20:36:52 -06001322
Peng Fan128abf42020-05-04 22:09:00 +08001323 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
Adam Ford10210732019-01-02 20:36:52 -06001324 nand_info->max_ecc_strength_supported = 62;
1325 else
1326 nand_info->max_ecc_strength_supported = 40;
1327
Stefan Agner7152f342018-06-22 17:19:46 +02001328 err = mxs_nand_alloc_buffers(nand_info);
1329 if (err)
1330 return err;
1331
Stefan Agner00e65162018-06-22 18:06:13 +02001332 err = mxs_nand_init_dma(nand_info);
Stefan Agner7152f342018-06-22 17:19:46 +02001333 if (err)
1334 return err;
1335
1336 nand_set_controller_data(nand, nand_info);
1337
1338 nand->options |= NAND_NO_SUBPAGE_WRITE;
1339
1340 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1341 nand->dev_ready = mxs_nand_device_ready;
1342 nand->select_chip = mxs_nand_select_chip;
Stefan Agner7152f342018-06-22 17:19:46 +02001343
1344 nand->read_byte = mxs_nand_read_byte;
1345 nand->read_buf = mxs_nand_read_buf;
1346
1347 nand->ecc.read_page = mxs_nand_ecc_read_page;
1348
1349 nand->ecc.mode = NAND_ECC_HW;
Stefan Agner7152f342018-06-22 17:19:46 +02001350
1351 return 0;
1352}
1353
Stefan Agner19f90512018-06-22 18:06:16 +02001354int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +00001355{
Stefan Agner5883e552018-06-22 17:19:47 +02001356 struct mtd_info *mtd;
Stefan Agner5883e552018-06-22 17:19:47 +02001357 struct nand_chip *nand;
Marek Vasut913a7252011-11-08 23:18:16 +00001358 int err;
1359
Stefan Agner5883e552018-06-22 17:19:47 +02001360 nand = &nand_info->chip;
1361 mtd = nand_to_mtd(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001362 err = mxs_nand_alloc_buffers(nand_info);
1363 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001364 return err;
Marek Vasut913a7252011-11-08 23:18:16 +00001365
Stefan Agner00e65162018-06-22 18:06:13 +02001366 err = mxs_nand_init_dma(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001367 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001368 goto err_free_buffers;
Marek Vasut913a7252011-11-08 23:18:16 +00001369
1370 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1371
Stefan Agner95f376f2018-06-22 17:19:48 +02001372#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1373 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1374#endif
1375
Scott Wood17fed142016-05-30 13:57:56 -05001376 nand_set_controller_data(nand, nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001377 nand->options |= NAND_NO_SUBPAGE_WRITE;
1378
Stefan Agner150ddbc2018-06-22 18:06:17 +02001379 if (nand_info->dev)
1380 nand->flash_node = dev_of_offset(nand_info->dev);
1381
Marek Vasut913a7252011-11-08 23:18:16 +00001382 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1383
1384 nand->dev_ready = mxs_nand_device_ready;
1385 nand->select_chip = mxs_nand_select_chip;
1386 nand->block_bad = mxs_nand_block_bad;
Marek Vasut913a7252011-11-08 23:18:16 +00001387
1388 nand->read_byte = mxs_nand_read_byte;
1389
1390 nand->read_buf = mxs_nand_read_buf;
1391 nand->write_buf = mxs_nand_write_buf;
1392
Stefan Agner5883e552018-06-22 17:19:47 +02001393 /* first scan to find the device and get the page size */
1394 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
Stefan Agner404b1102018-06-22 18:06:14 +02001395 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001396
1397 if (mxs_nand_setup_ecc(mtd))
Stefan Agner404b1102018-06-22 18:06:14 +02001398 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001399
Marek Vasut913a7252011-11-08 23:18:16 +00001400 nand->ecc.read_page = mxs_nand_ecc_read_page;
1401 nand->ecc.write_page = mxs_nand_ecc_write_page;
1402 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1403 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1404
1405 nand->ecc.layout = &fake_ecc_layout;
1406 nand->ecc.mode = NAND_ECC_HW;
Ye Li94547442020-05-04 22:08:50 +08001407 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
Stefan Agner72d627d2018-06-22 17:19:50 +02001408 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
Marek Vasut913a7252011-11-08 23:18:16 +00001409
Stefan Agner5883e552018-06-22 17:19:47 +02001410 /* second phase scan */
1411 err = nand_scan_tail(mtd);
1412 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001413 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001414
1415 err = nand_register(0, mtd);
1416 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001417 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001418
Stefan Agner404b1102018-06-22 18:06:14 +02001419 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001420
Stefan Agner404b1102018-06-22 18:06:14 +02001421err_free_buffers:
Marek Vasut913a7252011-11-08 23:18:16 +00001422 free(nand_info->data_buf);
1423 free(nand_info->cmd_buf);
Stefan Agner404b1102018-06-22 18:06:14 +02001424
1425 return err;
1426}
1427
Stefan Agner150ddbc2018-06-22 18:06:17 +02001428#ifndef CONFIG_NAND_MXS_DT
Stefan Agner404b1102018-06-22 18:06:14 +02001429void board_nand_init(void)
1430{
1431 struct mxs_nand_info *nand_info;
1432
1433 nand_info = malloc(sizeof(struct mxs_nand_info));
1434 if (!nand_info) {
1435 printf("MXS NAND: Failed to allocate private data\n");
1436 return;
1437 }
1438 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1439
1440 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1441 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1442
Stefan Agner4dc98db2018-06-22 18:06:15 +02001443 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1444 if (is_mx6sx() || is_mx7())
1445 nand_info->max_ecc_strength_supported = 62;
1446 else
1447 nand_info->max_ecc_strength_supported = 40;
1448
1449#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1450 nand_info->use_minimum_ecc = true;
1451#endif
1452
Stefan Agner19f90512018-06-22 18:06:16 +02001453 if (mxs_nand_init_ctrl(nand_info) < 0)
Stefan Agner404b1102018-06-22 18:06:14 +02001454 goto err;
1455
Stefan Agner5883e552018-06-22 17:19:47 +02001456 return;
Stefan Agner404b1102018-06-22 18:06:14 +02001457
1458err:
1459 free(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001460}
Stefan Agner150ddbc2018-06-22 18:06:17 +02001461#endif
Igor Opaniukc55401372019-11-03 16:49:43 +01001462
1463/*
1464 * Read NAND layout for FCB block generation.
1465 */
1466void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1467{
1468 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1469 u32 tmp;
1470
1471 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1472 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1473 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1474 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1475 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1476
1477 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1478 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1479 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1480 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1481 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1482 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1483 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1484 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1485 BCH_FLASHLAYOUT1_ECCN_OFFSET;
Han Xu33543b52020-05-04 22:08:58 +08001486 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1487 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Igor Opaniukc55401372019-11-03 16:49:43 +01001488}
1489
1490/*
1491 * Set BCH to specific layout used by ROM bootloader to read FCB.
1492 */
Han Xuafed2a12020-05-06 20:59:19 +08001493void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd)
Igor Opaniukc55401372019-11-03 16:49:43 +01001494{
1495 u32 tmp;
1496 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1497 struct nand_chip *nand = mtd_to_nand(mtd);
1498 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1499
1500 nand_info->en_randomizer = 1;
1501
1502 mtd->writesize = 1024;
1503 mtd->oobsize = 1862 - 1024;
1504
1505 /* 8 ecc_chunks_*/
1506 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1507 /* 32 bytes for metadata */
1508 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1509 /* using ECC62 level to be performed */
1510 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1511 /* 0x20 * 4 bytes of the data0 block */
1512 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1513 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1514 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1515
1516 /* 1024 for data + 838 for OOB */
1517 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1518 /* using ECC62 level to be performed */
1519 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1520 /* 0x20 * 4 bytes of the data0 block */
1521 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1522 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1523 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1524}
1525
1526/*
Han Xuafed2a12020-05-06 20:59:19 +08001527 * Set BCH to specific layout used by ROM bootloader to read FCB.
1528 */
1529void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd)
1530{
1531 u32 tmp;
1532 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1533 struct nand_chip *nand = mtd_to_nand(mtd);
1534 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1535
1536 /* no randomizer in this setting*/
1537 nand_info->en_randomizer = 0;
1538
1539 mtd->writesize = 1024;
1540 mtd->oobsize = 1576 - 1024;
1541
1542 /* 8 ecc_chunks_*/
1543 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1544 /* 32 bytes for metadata */
1545 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1546 /* using ECC40 level to be performed */
1547 tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1548 /* 0x20 * 4 bytes of the data0 block */
1549 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1550 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1551 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1552
1553 /* 1024 for data + 552 for OOB */
1554 tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1555 /* using ECC40 level to be performed */
1556 tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1557 /* 0x20 * 4 bytes of the data0 block */
1558 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1559 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1560 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1561}
1562
1563/*
Igor Opaniukc55401372019-11-03 16:49:43 +01001564 * Restore BCH to normal settings.
1565 */
1566void mxs_nand_mode_normal(struct mtd_info *mtd)
1567{
1568 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1569 struct nand_chip *nand = mtd_to_nand(mtd);
1570 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1571
1572 nand_info->en_randomizer = 0;
1573
1574 mtd->writesize = nand_info->writesize;
1575 mtd->oobsize = nand_info->oobsize;
1576
1577 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1578 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1579}
1580
1581uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1582{
1583 struct nand_chip *chip = mtd_to_nand(mtd);
1584 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1585 struct bch_geometry *geo = &nand_info->bch_geometry;
1586
1587 return geo->block_mark_byte_offset;
1588}
1589
1590uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1591{
1592 struct nand_chip *chip = mtd_to_nand(mtd);
1593 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1594 struct bch_geometry *geo = &nand_info->bch_geometry;
1595
1596 return geo->block_mark_bit_offset;
1597}