blob: ee5d7fde9ce1087cd31588883623dde6ee029adf [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut913a7252011-11-08 23:18:16 +00002/*
3 * Freescale i.MX28 NAND flash driver
4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH
7 *
8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver
10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
Peng Fan9e813732020-05-04 22:08:53 +080013 * Copyright 2017-2019 NXP
Marek Vasut913a7252011-11-08 23:18:16 +000014 */
15
Tom Warrenc88d30f2012-09-10 08:47:51 -070016#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070017#include <cpu_func.h>
Stefan Agner19f90512018-06-22 18:06:16 +020018#include <dm.h>
Sean Anderson0a749442020-10-04 21:39:45 -040019#include <dm/device_compat.h>
Marek Vasut913a7252011-11-08 23:18:16 +000020#include <malloc.h>
Sean Anderson0a749442020-10-04 21:39:45 -040021#include <mxs_nand.h>
Marek Vasut913a7252011-11-08 23:18:16 +000022#include <asm/arch/clock.h>
23#include <asm/arch/imx-regs.h>
Sean Anderson0a749442020-10-04 21:39:45 -040024#include <asm/arch/sys_proto.h>
25#include <asm/cache.h>
26#include <asm/io.h>
Stefano Babic33731bc2017-06-29 10:16:06 +020027#include <asm/mach-imx/regs-bch.h>
28#include <asm/mach-imx/regs-gpmi.h>
Sean Anderson0a749442020-10-04 21:39:45 -040029#include <linux/errno.h>
30#include <linux/mtd/rawnand.h>
31#include <linux/sizes.h>
32#include <linux/types.h>
Marek Vasut913a7252011-11-08 23:18:16 +000033
34#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
35
Peng Fan128abf42020-05-04 22:09:00 +080036#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \
37 defined(CONFIG_IMX8M)
Stefan Roese8338d1d2013-04-15 21:14:12 +000038#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
39#else
40#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
41#endif
Marek Vasut913a7252011-11-08 23:18:16 +000042#define MXS_NAND_METADATA_SIZE 10
Jörg Krause1d870262015-04-15 09:27:22 +020043#define MXS_NAND_BITS_PER_ECC_LEVEL 13
Stefan Agner54bf8082016-08-01 23:55:18 -070044
45#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
Marek Vasut913a7252011-11-08 23:18:16 +000046#define MXS_NAND_COMMAND_BUFFER_SIZE 32
Stefan Agner54bf8082016-08-01 23:55:18 -070047#else
48#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
49#endif
Marek Vasut913a7252011-11-08 23:18:16 +000050
51#define MXS_NAND_BCH_TIMEOUT 10000
52
Marek Vasut913a7252011-11-08 23:18:16 +000053struct nand_ecclayout fake_ecc_layout;
54
Marek Vasut1b120e82012-03-15 18:33:19 +000055/*
56 * Cache management functions
57 */
Trevor Woerner43ec7e02019-05-03 09:41:00 -040058#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Marek Vasut1b120e82012-03-15 18:33:19 +000059static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
60{
Peng Fan128abf42020-05-04 22:09:00 +080061 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000062
63 flush_dcache_range(addr, addr + info->data_buf_size);
64}
65
66static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
67{
Peng Fan128abf42020-05-04 22:09:00 +080068 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000069
70 invalidate_dcache_range(addr, addr + info->data_buf_size);
71}
72
73static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
74{
Peng Fan128abf42020-05-04 22:09:00 +080075 uint32_t addr = (uintptr_t)info->cmd_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000076
77 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
78}
79#else
80static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
81static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
82static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
83#endif
84
Marek Vasut913a7252011-11-08 23:18:16 +000085static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
86{
87 struct mxs_dma_desc *desc;
88
89 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
90 printf("MXS NAND: Too many DMA descriptors requested\n");
91 return NULL;
92 }
93
94 desc = info->desc[info->desc_index];
95 info->desc_index++;
96
97 return desc;
98}
99
100static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
101{
102 int i;
103 struct mxs_dma_desc *desc;
104
105 for (i = 0; i < info->desc_index; i++) {
106 desc = info->desc[i];
107 memset(desc, 0, sizeof(struct mxs_dma_desc));
108 desc->address = (dma_addr_t)desc;
109 }
110
111 info->desc_index = 0;
112}
113
Marek Vasut913a7252011-11-08 23:18:16 +0000114static uint32_t mxs_nand_aux_status_offset(void)
115{
116 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
117}
118
Sean Anderson0a749442020-10-04 21:39:45 -0400119static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo,
120 struct mtd_info *mtd,
121 unsigned int *chunk_num)
Marek Vasut913a7252011-11-08 23:18:16 +0000122{
Ye Li94547442020-05-04 22:08:50 +0800123 unsigned int i, j;
Marek Vasut913a7252011-11-08 23:18:16 +0000124
Ye Li94547442020-05-04 22:08:50 +0800125 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
Sean Anderson0a749442020-10-04 21:39:45 -0400126 dev_err(mtd->dev, "The size of chunk0 must equal to chunkn\n");
Ye Li94547442020-05-04 22:08:50 +0800127 return false;
128 }
Marek Vasut913a7252011-11-08 23:18:16 +0000129
Ye Li94547442020-05-04 22:08:50 +0800130 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
131 (geo->gf_len * geo->ecc_strength +
132 geo->ecc_chunkn_size * 8);
Marek Vasut913a7252011-11-08 23:18:16 +0000133
Ye Li94547442020-05-04 22:08:50 +0800134 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
135 (geo->gf_len * geo->ecc_strength +
136 geo->ecc_chunkn_size * 8) * i;
Marek Vasut913a7252011-11-08 23:18:16 +0000137
Ye Li94547442020-05-04 22:08:50 +0800138 if (j < geo->ecc_chunkn_size * 8) {
139 *chunk_num = i + 1;
Sean Anderson0a749442020-10-04 21:39:45 -0400140 dev_dbg(mtd->dev, "Set ecc to %d and bbm in chunk %d\n",
Ye Li94547442020-05-04 22:08:50 +0800141 geo->ecc_strength, *chunk_num);
142 return true;
143 }
Marek Vasut913a7252011-11-08 23:18:16 +0000144
Ye Li94547442020-05-04 22:08:50 +0800145 return false;
Marek Vasut913a7252011-11-08 23:18:16 +0000146}
147
Stefan Agner4d42ac12018-06-22 17:19:51 +0200148static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
Stefan Agneread66eb2018-06-22 18:06:18 +0200149 struct mtd_info *mtd,
150 unsigned int ecc_strength,
151 unsigned int ecc_step)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200152{
153 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Agner4dc98db2018-06-22 18:06:15 +0200154 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800155 unsigned int block_mark_bit_offset;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200156
Stefan Agneread66eb2018-06-22 18:06:18 +0200157 switch (ecc_step) {
Stefan Agner4d42ac12018-06-22 17:19:51 +0200158 case SZ_512:
159 geo->gf_len = 13;
160 break;
161 case SZ_1K:
162 geo->gf_len = 14;
163 break;
164 default:
165 return -EINVAL;
166 }
167
Ye Li94547442020-05-04 22:08:50 +0800168 geo->ecc_chunk0_size = ecc_step;
169 geo->ecc_chunkn_size = ecc_step;
Stefan Agneread66eb2018-06-22 18:06:18 +0200170 geo->ecc_strength = round_up(ecc_strength, 2);
Stefan Agner4d42ac12018-06-22 17:19:51 +0200171
172 /* Keep the C >= O */
Ye Li94547442020-05-04 22:08:50 +0800173 if (geo->ecc_chunkn_size < mtd->oobsize)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200174 return -EINVAL;
175
Stefan Agner4dc98db2018-06-22 18:06:15 +0200176 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200177 return -EINVAL;
178
Ye Li94547442020-05-04 22:08:50 +0800179 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
180
181 /* For bit swap. */
182 block_mark_bit_offset = mtd->writesize * 8 -
183 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
184 + MXS_NAND_METADATA_SIZE * 8);
185
186 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
187 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200188
189 return 0;
190}
191
Ye Li94547442020-05-04 22:08:50 +0800192static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
Stefan Agnerd0778b32018-06-22 17:19:49 +0200193 struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +0000194{
Stefan Agner4dc98db2018-06-22 18:06:15 +0200195 struct nand_chip *chip = mtd_to_nand(mtd);
196 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800197 unsigned int block_mark_bit_offset;
Han Xu2ee499e2022-03-25 08:36:38 -0500198 int corr, ds_corr;
Stefan Agner4dc98db2018-06-22 18:06:15 +0200199
Stefan Agnerd0778b32018-06-22 17:19:49 +0200200 /* The default for the length of Galois Field. */
201 geo->gf_len = 13;
202
203 /* The default for chunk size. */
Ye Li94547442020-05-04 22:08:50 +0800204 geo->ecc_chunk0_size = 512;
205 geo->ecc_chunkn_size = 512;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200206
Ye Li94547442020-05-04 22:08:50 +0800207 if (geo->ecc_chunkn_size < mtd->oobsize) {
Stefan Agnerd0778b32018-06-22 17:19:49 +0200208 geo->gf_len = 14;
Ye Li94547442020-05-04 22:08:50 +0800209 geo->ecc_chunk0_size *= 2;
210 geo->ecc_chunkn_size *= 2;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200211 }
212
Ye Li94547442020-05-04 22:08:50 +0800213 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200214
Stefan Agnerd0778b32018-06-22 17:19:49 +0200215 /*
216 * Determine the ECC layout with the formula:
217 * ECC bits per chunk = (total page spare data bits) /
218 * (bits per ECC level) / (chunks per page)
219 * where:
220 * total page spare data bits =
221 * (page oob size - meta data size) * (bits per byte)
222 */
223 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
224 / (geo->gf_len * geo->ecc_chunk_count);
225
Stefan Agner4d42ac12018-06-22 17:19:51 +0200226 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
Stefan Agner4dc98db2018-06-22 18:06:15 +0200227 nand_info->max_ecc_strength_supported);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200228
Han Xu2ee499e2022-03-25 08:36:38 -0500229 /* check ecc strength, same as nand_ecc_is_strong_enough() did*/
230 if (chip->ecc_step_ds) {
231 corr = mtd->writesize * geo->ecc_strength /
232 geo->ecc_chunkn_size;
233 ds_corr = mtd->writesize * chip->ecc_strength_ds /
234 chip->ecc_step_ds;
235 if (corr < ds_corr ||
236 geo->ecc_strength < chip->ecc_strength_ds)
237 return -EINVAL;
238 }
239
Ye Li94547442020-05-04 22:08:50 +0800240 block_mark_bit_offset = mtd->writesize * 8 -
241 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
242 + MXS_NAND_METADATA_SIZE * 8);
243
244 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
245 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
246
247 return 0;
248}
249
250static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
251 struct mtd_info *mtd)
252{
253 struct nand_chip *chip = mtd_to_nand(mtd);
254 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
255 unsigned int block_mark_bit_offset;
256 unsigned int max_ecc;
257 unsigned int bbm_chunk;
258 unsigned int i;
259
260 /* sanity check for the minimum ecc nand required */
261 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
262 return -EINVAL;
263 geo->ecc_strength = chip->ecc_strength_ds;
264
265 /* calculate the maximum ecc platform can support*/
266 geo->gf_len = 14;
267 geo->ecc_chunk0_size = 1024;
268 geo->ecc_chunkn_size = 1024;
269 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
270 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
271 / (geo->gf_len * geo->ecc_chunk_count);
272 max_ecc = min(round_down(max_ecc, 2),
273 nand_info->max_ecc_strength_supported);
274
275
276 /* search a supported ecc strength that makes bbm */
277 /* located in data chunk */
278 geo->ecc_strength = chip->ecc_strength_ds;
279 while (!(geo->ecc_strength > max_ecc)) {
280 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
281 break;
282 geo->ecc_strength += 2;
283 }
284
285 /* if none of them works, keep using the minimum ecc */
286 /* nand required but changing ecc page layout */
287 if (geo->ecc_strength > max_ecc) {
288 geo->ecc_strength = chip->ecc_strength_ds;
289 /* add extra ecc for meta data */
290 geo->ecc_chunk0_size = 0;
291 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
292 geo->ecc_for_meta = 1;
293 /* check if oob can afford this extra ecc chunk */
294 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
295 geo->gf_len * geo->ecc_strength
296 * geo->ecc_chunk_count) {
297 printf("unsupported NAND chip with new layout\n");
298 return -EINVAL;
299 }
300
301 /* calculate in which chunk bbm located */
302 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
303 geo->gf_len * geo->ecc_strength) /
304 (geo->gf_len * geo->ecc_strength +
305 geo->ecc_chunkn_size * 8) + 1;
306 }
307
308 /* calculate the number of ecc chunk behind the bbm */
309 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
310
311 block_mark_bit_offset = mtd->writesize * 8 -
312 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
313 + MXS_NAND_METADATA_SIZE * 8);
314
315 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
316 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
317
Stefan Agnerd0778b32018-06-22 17:19:49 +0200318 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000319}
320
321/*
322 * Wait for BCH complete IRQ and clear the IRQ
323 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200324static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +0000325{
Marek Vasut913a7252011-11-08 23:18:16 +0000326 int timeout = MXS_NAND_BCH_TIMEOUT;
327 int ret;
328
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200329 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
Marek Vasut913a7252011-11-08 23:18:16 +0000330 BCH_CTRL_COMPLETE_IRQ, timeout);
331
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200332 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
Marek Vasut913a7252011-11-08 23:18:16 +0000333
334 return ret;
335}
336
337/*
338 * This is the function that we install in the cmd_ctrl function pointer of the
339 * owning struct nand_chip. The only functions in the reference implementation
340 * that use these functions pointers are cmdfunc and select_chip.
341 *
342 * In this driver, we implement our own select_chip, so this function will only
343 * be called by the reference implementation's cmdfunc. For this reason, we can
344 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
345 * Flash.
346 */
347static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
348{
Scott Wood17fed142016-05-30 13:57:56 -0500349 struct nand_chip *nand = mtd_to_nand(mtd);
350 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000351 struct mxs_dma_desc *d;
352 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
353 int ret;
354
355 /*
356 * If this condition is true, something is _VERY_ wrong in MTD
357 * subsystem!
358 */
359 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
360 printf("MXS NAND: Command queue too long\n");
361 return;
362 }
363
364 /*
365 * Every operation begins with a command byte and a series of zero or
366 * more address bytes. These are distinguished by either the Address
367 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
368 * asserted. When MTD is ready to execute the command, it will
369 * deasert both latch enables.
370 *
371 * Rather than run a separate DMA operation for every single byte, we
372 * queue them up and run a single DMA operation for the entire series
373 * of command and data bytes.
374 */
375 if (ctrl & (NAND_ALE | NAND_CLE)) {
376 if (data != NAND_CMD_NONE)
377 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
378 return;
379 }
380
381 /*
382 * If control arrives here, MTD has deasserted both the ALE and CLE,
383 * which means it's ready to run an operation. Check if we have any
384 * bytes to send.
385 */
386 if (nand_info->cmd_queue_len == 0)
387 return;
388
389 /* Compile the DMA descriptor -- a descriptor that sends command. */
390 d = mxs_nand_get_dma_desc(nand_info);
391 d->cmd.data =
392 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
393 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
394 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
395 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
396
397 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
398
399 d->cmd.pio_words[0] =
400 GPMI_CTRL0_COMMAND_MODE_WRITE |
401 GPMI_CTRL0_WORD_LENGTH |
402 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
403 GPMI_CTRL0_ADDRESS_NAND_CLE |
404 GPMI_CTRL0_ADDRESS_INCREMENT |
405 nand_info->cmd_queue_len;
406
407 mxs_dma_desc_append(channel, d);
408
Marek Vasut1b120e82012-03-15 18:33:19 +0000409 /* Flush caches */
410 mxs_nand_flush_cmd_buf(nand_info);
411
Marek Vasut913a7252011-11-08 23:18:16 +0000412 /* Execute the DMA chain. */
413 ret = mxs_dma_go(channel);
414 if (ret)
415 printf("MXS NAND: Error sending command\n");
416
417 mxs_nand_return_dma_descs(nand_info);
418
419 /* Reset the command queue. */
420 nand_info->cmd_queue_len = 0;
421}
422
423/*
424 * Test if the NAND flash is ready.
425 */
426static int mxs_nand_device_ready(struct mtd_info *mtd)
427{
Scott Wood17fed142016-05-30 13:57:56 -0500428 struct nand_chip *chip = mtd_to_nand(mtd);
429 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000430 uint32_t tmp;
431
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200432 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
Marek Vasut913a7252011-11-08 23:18:16 +0000433 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
434
435 return tmp & 1;
436}
437
438/*
439 * Select the NAND chip.
440 */
441static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
442{
Scott Wood17fed142016-05-30 13:57:56 -0500443 struct nand_chip *nand = mtd_to_nand(mtd);
444 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000445
446 nand_info->cur_chip = chip;
447}
448
449/*
450 * Handle block mark swapping.
451 *
452 * Note that, when this function is called, it doesn't know whether it's
453 * swapping the block mark, or swapping it *back* -- but it doesn't matter
454 * because the the operation is the same.
455 */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200456static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
457 uint8_t *data_buf, uint8_t *oob_buf)
Marek Vasut913a7252011-11-08 23:18:16 +0000458{
Stefan Agnerd0778b32018-06-22 17:19:49 +0200459 uint32_t bit_offset = geo->block_mark_bit_offset;
460 uint32_t buf_offset = geo->block_mark_byte_offset;
Marek Vasut913a7252011-11-08 23:18:16 +0000461
462 uint32_t src;
463 uint32_t dst;
464
Marek Vasut913a7252011-11-08 23:18:16 +0000465 /*
466 * Get the byte from the data area that overlays the block mark. Since
467 * the ECC engine applies its own view to the bits in the page, the
468 * physical block mark won't (in general) appear on a byte boundary in
469 * the data.
470 */
471 src = data_buf[buf_offset] >> bit_offset;
472 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
473
474 dst = oob_buf[0];
475
476 oob_buf[0] = src;
477
478 data_buf[buf_offset] &= ~(0xff << bit_offset);
479 data_buf[buf_offset + 1] &= 0xff << bit_offset;
480
481 data_buf[buf_offset] |= dst << bit_offset;
482 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
483}
484
485/*
486 * Read data from NAND.
487 */
488static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
489{
Scott Wood17fed142016-05-30 13:57:56 -0500490 struct nand_chip *nand = mtd_to_nand(mtd);
491 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000492 struct mxs_dma_desc *d;
493 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
494 int ret;
495
496 if (length > NAND_MAX_PAGESIZE) {
497 printf("MXS NAND: DMA buffer too big\n");
498 return;
499 }
500
501 if (!buf) {
502 printf("MXS NAND: DMA buffer is NULL\n");
503 return;
504 }
505
506 /* Compile the DMA descriptor - a descriptor that reads data. */
507 d = mxs_nand_get_dma_desc(nand_info);
508 d->cmd.data =
509 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
510 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
511 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
512 (length << MXS_DMA_DESC_BYTES_OFFSET);
513
514 d->cmd.address = (dma_addr_t)nand_info->data_buf;
515
516 d->cmd.pio_words[0] =
517 GPMI_CTRL0_COMMAND_MODE_READ |
518 GPMI_CTRL0_WORD_LENGTH |
519 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
520 GPMI_CTRL0_ADDRESS_NAND_DATA |
521 length;
522
523 mxs_dma_desc_append(channel, d);
524
525 /*
526 * A DMA descriptor that waits for the command to end and the chip to
527 * become ready.
528 *
529 * I think we actually should *not* be waiting for the chip to become
530 * ready because, after all, we don't care. I think the original code
531 * did that and no one has re-thought it yet.
532 */
533 d = mxs_nand_get_dma_desc(nand_info);
534 d->cmd.data =
535 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
536 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
Luca Ellero80f06b82014-12-16 15:36:14 +0100537 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
Marek Vasut913a7252011-11-08 23:18:16 +0000538
539 d->cmd.address = 0;
540
541 d->cmd.pio_words[0] =
542 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
543 GPMI_CTRL0_WORD_LENGTH |
544 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
545 GPMI_CTRL0_ADDRESS_NAND_DATA;
546
547 mxs_dma_desc_append(channel, d);
548
Peng Fane3bbfb72015-07-21 16:15:21 +0800549 /* Invalidate caches */
550 mxs_nand_inval_data_buf(nand_info);
551
Marek Vasut913a7252011-11-08 23:18:16 +0000552 /* Execute the DMA chain. */
553 ret = mxs_dma_go(channel);
554 if (ret) {
555 printf("MXS NAND: DMA read error\n");
556 goto rtn;
557 }
558
Marek Vasut1b120e82012-03-15 18:33:19 +0000559 /* Invalidate caches */
560 mxs_nand_inval_data_buf(nand_info);
561
Marek Vasut913a7252011-11-08 23:18:16 +0000562 memcpy(buf, nand_info->data_buf, length);
563
564rtn:
565 mxs_nand_return_dma_descs(nand_info);
566}
567
568/*
569 * Write data to NAND.
570 */
571static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
572 int length)
573{
Scott Wood17fed142016-05-30 13:57:56 -0500574 struct nand_chip *nand = mtd_to_nand(mtd);
575 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000576 struct mxs_dma_desc *d;
577 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
578 int ret;
579
580 if (length > NAND_MAX_PAGESIZE) {
581 printf("MXS NAND: DMA buffer too big\n");
582 return;
583 }
584
585 if (!buf) {
586 printf("MXS NAND: DMA buffer is NULL\n");
587 return;
588 }
589
590 memcpy(nand_info->data_buf, buf, length);
591
592 /* Compile the DMA descriptor - a descriptor that writes data. */
593 d = mxs_nand_get_dma_desc(nand_info);
594 d->cmd.data =
595 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
596 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
Luca Ellero966f1cd2014-12-16 15:36:15 +0100597 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
Marek Vasut913a7252011-11-08 23:18:16 +0000598 (length << MXS_DMA_DESC_BYTES_OFFSET);
599
600 d->cmd.address = (dma_addr_t)nand_info->data_buf;
601
602 d->cmd.pio_words[0] =
603 GPMI_CTRL0_COMMAND_MODE_WRITE |
604 GPMI_CTRL0_WORD_LENGTH |
605 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
606 GPMI_CTRL0_ADDRESS_NAND_DATA |
607 length;
608
609 mxs_dma_desc_append(channel, d);
610
Marek Vasut1b120e82012-03-15 18:33:19 +0000611 /* Flush caches */
612 mxs_nand_flush_data_buf(nand_info);
613
Marek Vasut913a7252011-11-08 23:18:16 +0000614 /* Execute the DMA chain. */
615 ret = mxs_dma_go(channel);
616 if (ret)
617 printf("MXS NAND: DMA write error\n");
618
619 mxs_nand_return_dma_descs(nand_info);
620}
621
622/*
623 * Read a single byte from NAND.
624 */
625static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
626{
627 uint8_t buf;
628 mxs_nand_read_buf(mtd, &buf, 1);
629 return buf;
630}
631
Peng Fandf23c9d2020-05-04 22:08:52 +0800632static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
633 u8 *buf, int chunk, int page)
634{
635 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
636 struct bch_geometry *geo = &nand_info->bch_geometry;
637 unsigned int flip_bits = 0, flip_bits_noecc = 0;
638 unsigned int threshold;
639 unsigned int base = geo->ecc_chunkn_size * chunk;
640 u32 *dma_buf = (u32 *)buf;
641 int i;
642
643 threshold = geo->gf_len / 2;
644 if (threshold > geo->ecc_strength)
645 threshold = geo->ecc_strength;
646
647 for (i = 0; i < geo->ecc_chunkn_size; i++) {
648 flip_bits += hweight8(~buf[base + i]);
649 if (flip_bits > threshold)
650 return false;
651 }
652
653 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
654 nand->read_buf(mtd, buf, mtd->writesize);
655
656 for (i = 0; i < mtd->writesize / 4; i++) {
657 flip_bits_noecc += hweight32(~dma_buf[i]);
658 if (flip_bits_noecc > threshold)
659 return false;
660 }
661
662 mtd->ecc_stats.corrected += flip_bits;
663
664 memset(buf, 0xff, mtd->writesize);
665
666 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
667
668 return true;
669}
670
Marek Vasut913a7252011-11-08 23:18:16 +0000671/*
672 * Read a page from NAND.
673 */
674static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +0000675 uint8_t *buf, int oob_required,
676 int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000677{
Scott Wood17fed142016-05-30 13:57:56 -0500678 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200679 struct bch_geometry *geo = &nand_info->bch_geometry;
Peng Fan9e813732020-05-04 22:08:53 +0800680 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +0000681 struct mxs_dma_desc *d;
682 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
683 uint32_t corrected = 0, failed = 0;
684 uint8_t *status;
685 int i, ret;
Peng Fan9e813732020-05-04 22:08:53 +0800686 int flag = 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000687
688 /* Compile the DMA descriptor - wait for ready. */
689 d = mxs_nand_get_dma_desc(nand_info);
690 d->cmd.data =
691 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
692 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
693 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
694
695 d->cmd.address = 0;
696
697 d->cmd.pio_words[0] =
698 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
699 GPMI_CTRL0_WORD_LENGTH |
700 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
701 GPMI_CTRL0_ADDRESS_NAND_DATA;
702
703 mxs_dma_desc_append(channel, d);
704
705 /* Compile the DMA descriptor - enable the BCH block and read. */
706 d = mxs_nand_get_dma_desc(nand_info);
707 d->cmd.data =
708 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
709 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
710
711 d->cmd.address = 0;
712
713 d->cmd.pio_words[0] =
714 GPMI_CTRL0_COMMAND_MODE_READ |
715 GPMI_CTRL0_WORD_LENGTH |
716 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
717 GPMI_CTRL0_ADDRESS_NAND_DATA |
718 (mtd->writesize + mtd->oobsize);
719 d->cmd.pio_words[1] = 0;
720 d->cmd.pio_words[2] =
721 GPMI_ECCCTRL_ENABLE_ECC |
722 GPMI_ECCCTRL_ECC_CMD_DECODE |
723 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
724 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
725 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
726 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
727
Han Xuafed2a12020-05-06 20:59:19 +0800728 if (nand_info->en_randomizer) {
Alice Guo3f277782020-05-04 22:09:03 +0800729 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
730 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
731 d->cmd.pio_words[3] |= (page % 256) << 16;
732 }
733
Marek Vasut913a7252011-11-08 23:18:16 +0000734 mxs_dma_desc_append(channel, d);
735
736 /* Compile the DMA descriptor - disable the BCH block. */
737 d = mxs_nand_get_dma_desc(nand_info);
738 d->cmd.data =
739 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
740 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
741 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
742
743 d->cmd.address = 0;
744
745 d->cmd.pio_words[0] =
746 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
747 GPMI_CTRL0_WORD_LENGTH |
748 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
749 GPMI_CTRL0_ADDRESS_NAND_DATA |
750 (mtd->writesize + mtd->oobsize);
751 d->cmd.pio_words[1] = 0;
752 d->cmd.pio_words[2] = 0;
753
754 mxs_dma_desc_append(channel, d);
755
756 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
757 d = mxs_nand_get_dma_desc(nand_info);
758 d->cmd.data =
759 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
760 MXS_DMA_DESC_DEC_SEM;
761
762 d->cmd.address = 0;
763
764 mxs_dma_desc_append(channel, d);
765
Peng Fane3bbfb72015-07-21 16:15:21 +0800766 /* Invalidate caches */
767 mxs_nand_inval_data_buf(nand_info);
768
Marek Vasut913a7252011-11-08 23:18:16 +0000769 /* Execute the DMA chain. */
770 ret = mxs_dma_go(channel);
771 if (ret) {
772 printf("MXS NAND: DMA read error\n");
773 goto rtn;
774 }
775
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200776 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000777 if (ret) {
778 printf("MXS NAND: BCH read timeout\n");
779 goto rtn;
780 }
781
Peng Fandf23c9d2020-05-04 22:08:52 +0800782 mxs_nand_return_dma_descs(nand_info);
783
Marek Vasut1b120e82012-03-15 18:33:19 +0000784 /* Invalidate caches */
785 mxs_nand_inval_data_buf(nand_info);
786
Marek Vasut913a7252011-11-08 23:18:16 +0000787 /* Read DMA completed, now do the mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200788 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000789
790 /* Loop over status bytes, accumulating ECC status. */
791 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
Stefan Agnerd0778b32018-06-22 17:19:49 +0200792 for (i = 0; i < geo->ecc_chunk_count; i++) {
Marek Vasut913a7252011-11-08 23:18:16 +0000793 if (status[i] == 0x00)
794 continue;
795
Peng Fan9e813732020-05-04 22:08:53 +0800796 if (status[i] == 0xff) {
Han Xue4f2b002020-05-04 22:09:02 +0800797 if (!nand_info->en_randomizer &&
798 (is_mx6dqp() || is_mx7() || is_mx6ul() ||
799 is_imx8() || is_imx8m()))
Peng Fan9e813732020-05-04 22:08:53 +0800800 if (readl(&bch_regs->hw_bch_debug1))
801 flag = 1;
Marek Vasut913a7252011-11-08 23:18:16 +0000802 continue;
Peng Fan9e813732020-05-04 22:08:53 +0800803 }
Marek Vasut913a7252011-11-08 23:18:16 +0000804
805 if (status[i] == 0xfe) {
Peng Fandf23c9d2020-05-04 22:08:52 +0800806 if (mxs_nand_erased_page(mtd, nand,
807 nand_info->data_buf, i, page))
808 break;
Marek Vasut913a7252011-11-08 23:18:16 +0000809 failed++;
810 continue;
811 }
812
813 corrected += status[i];
814 }
815
816 /* Propagate ECC status to the owning MTD. */
817 mtd->ecc_stats.failed += failed;
818 mtd->ecc_stats.corrected += corrected;
819
820 /*
821 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
822 * details about our policy for delivering the OOB.
823 *
824 * We fill the caller's buffer with set bits, and then copy the block
825 * mark to the caller's buffer. Note that, if block mark swapping was
826 * necessary, it has already been done, so we can rely on the first
827 * byte of the auxiliary buffer to contain the block mark.
828 */
829 memset(nand->oob_poi, 0xff, mtd->oobsize);
830
831 nand->oob_poi[0] = nand_info->oob_buf[0];
832
833 memcpy(buf, nand_info->data_buf, mtd->writesize);
834
Peng Fan9e813732020-05-04 22:08:53 +0800835 if (flag)
836 memset(buf, 0xff, mtd->writesize);
Marek Vasut913a7252011-11-08 23:18:16 +0000837rtn:
838 mxs_nand_return_dma_descs(nand_info);
839
840 return ret;
841}
842
843/*
844 * Write a page to NAND.
845 */
Sergey Lapin3a38a552013-01-14 03:46:50 +0000846static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
847 struct nand_chip *nand, const uint8_t *buf,
Scott Wood46e13102016-05-30 13:57:57 -0500848 int oob_required, int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000849{
Scott Wood17fed142016-05-30 13:57:56 -0500850 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200851 struct bch_geometry *geo = &nand_info->bch_geometry;
Marek Vasut913a7252011-11-08 23:18:16 +0000852 struct mxs_dma_desc *d;
853 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
854 int ret;
855
856 memcpy(nand_info->data_buf, buf, mtd->writesize);
857 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
858
859 /* Handle block mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200860 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000861
862 /* Compile the DMA descriptor - write data. */
863 d = mxs_nand_get_dma_desc(nand_info);
864 d->cmd.data =
865 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
866 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
867 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
868
869 d->cmd.address = 0;
870
871 d->cmd.pio_words[0] =
872 GPMI_CTRL0_COMMAND_MODE_WRITE |
873 GPMI_CTRL0_WORD_LENGTH |
874 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
875 GPMI_CTRL0_ADDRESS_NAND_DATA;
876 d->cmd.pio_words[1] = 0;
877 d->cmd.pio_words[2] =
878 GPMI_ECCCTRL_ENABLE_ECC |
879 GPMI_ECCCTRL_ECC_CMD_ENCODE |
880 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
881 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
882 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
883 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
884
Han Xuafed2a12020-05-06 20:59:19 +0800885 if (nand_info->en_randomizer) {
Igor Opaniukc55401372019-11-03 16:49:43 +0100886 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
887 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
888 /*
889 * Write NAND page number needed to be randomized
890 * to GPMI_ECCCOUNT register.
891 *
892 * The value is between 0-255. For additional details
893 * check 9.6.6.4 of i.MX7D Applications Processor reference
894 */
Alice Guo3f277782020-05-04 22:09:03 +0800895 d->cmd.pio_words[3] |= (page % 256) << 16;
Igor Opaniukc55401372019-11-03 16:49:43 +0100896 }
897
Marek Vasut913a7252011-11-08 23:18:16 +0000898 mxs_dma_desc_append(channel, d);
899
Marek Vasut1b120e82012-03-15 18:33:19 +0000900 /* Flush caches */
901 mxs_nand_flush_data_buf(nand_info);
902
Marek Vasut913a7252011-11-08 23:18:16 +0000903 /* Execute the DMA chain. */
904 ret = mxs_dma_go(channel);
905 if (ret) {
906 printf("MXS NAND: DMA write error\n");
907 goto rtn;
908 }
909
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200910 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000911 if (ret) {
912 printf("MXS NAND: BCH write timeout\n");
913 goto rtn;
914 }
915
916rtn:
917 mxs_nand_return_dma_descs(nand_info);
Sergey Lapin3a38a552013-01-14 03:46:50 +0000918 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000919}
920
921/*
922 * Read OOB from NAND.
923 *
924 * This function is a veneer that replaces the function originally installed by
925 * the NAND Flash MTD code.
926 */
927static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
928 struct mtd_oob_ops *ops)
929{
Scott Wood17fed142016-05-30 13:57:56 -0500930 struct nand_chip *chip = mtd_to_nand(mtd);
931 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000932 int ret;
933
Sergey Lapin3a38a552013-01-14 03:46:50 +0000934 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000935 nand_info->raw_oob_mode = 1;
936 else
937 nand_info->raw_oob_mode = 0;
938
939 ret = nand_info->hooked_read_oob(mtd, from, ops);
940
941 nand_info->raw_oob_mode = 0;
942
943 return ret;
944}
945
946/*
947 * Write OOB to NAND.
948 *
949 * This function is a veneer that replaces the function originally installed by
950 * the NAND Flash MTD code.
951 */
952static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
953 struct mtd_oob_ops *ops)
954{
Scott Wood17fed142016-05-30 13:57:56 -0500955 struct nand_chip *chip = mtd_to_nand(mtd);
956 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000957 int ret;
958
Sergey Lapin3a38a552013-01-14 03:46:50 +0000959 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000960 nand_info->raw_oob_mode = 1;
961 else
962 nand_info->raw_oob_mode = 0;
963
964 ret = nand_info->hooked_write_oob(mtd, to, ops);
965
966 nand_info->raw_oob_mode = 0;
967
968 return ret;
969}
970
971/*
972 * Mark a block bad in NAND.
973 *
974 * This function is a veneer that replaces the function originally installed by
975 * the NAND Flash MTD code.
976 */
977static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
978{
Scott Wood17fed142016-05-30 13:57:56 -0500979 struct nand_chip *chip = mtd_to_nand(mtd);
980 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000981 int ret;
982
983 nand_info->marking_block_bad = 1;
984
985 ret = nand_info->hooked_block_markbad(mtd, ofs);
986
987 nand_info->marking_block_bad = 0;
988
989 return ret;
990}
991
992/*
993 * There are several places in this driver where we have to handle the OOB and
994 * block marks. This is the function where things are the most complicated, so
995 * this is where we try to explain it all. All the other places refer back to
996 * here.
997 *
998 * These are the rules, in order of decreasing importance:
999 *
1000 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
1001 * write operations take measures to protect it.
1002 *
1003 * 2) In read operations, the first byte of the OOB we return must reflect the
1004 * true state of the block mark, no matter where that block mark appears in
1005 * the physical page.
1006 *
1007 * 3) ECC-based read operations return an OOB full of set bits (since we never
1008 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1009 * return).
1010 *
1011 * 4) "Raw" read operations return a direct view of the physical bytes in the
1012 * page, using the conventional definition of which bytes are data and which
1013 * are OOB. This gives the caller a way to see the actual, physical bytes
1014 * in the page, without the distortions applied by our ECC engine.
1015 *
1016 * What we do for this specific read operation depends on whether we're doing
1017 * "raw" read, or an ECC-based read.
1018 *
1019 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1020 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1021 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1022 * ECC-based or raw view of the page is implicit in which function it calls
1023 * (there is a similar pair of ECC-based/raw functions for writing).
1024 *
1025 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1026 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1027 * caller wants an ECC-based or raw view of the page is not propagated down to
1028 * this driver.
1029 *
1030 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
1031 * ecc.read_oob and ecc.write_oob function pointers in the owning
1032 * struct mtd_info with our own functions. These hook functions set the
1033 * raw_oob_mode field so that, when control finally arrives here, we'll know
1034 * what to do.
1035 */
1036static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +00001037 int page)
Marek Vasut913a7252011-11-08 23:18:16 +00001038{
Scott Wood17fed142016-05-30 13:57:56 -05001039 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001040
1041 /*
1042 * First, fill in the OOB buffer. If we're doing a raw read, we need to
1043 * get the bytes from the physical page. If we're not doing a raw read,
1044 * we need to fill the buffer with set bits.
1045 */
1046 if (nand_info->raw_oob_mode) {
1047 /*
1048 * If control arrives here, we're doing a "raw" read. Send the
1049 * command to read the conventional OOB and read it.
1050 */
1051 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1052 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1053 } else {
1054 /*
1055 * If control arrives here, we're not doing a "raw" read. Fill
1056 * the OOB buffer with set bits and correct the block mark.
1057 */
1058 memset(nand->oob_poi, 0xff, mtd->oobsize);
1059
1060 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1061 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1062 }
1063
1064 return 0;
1065
1066}
1067
1068/*
1069 * Write OOB data to NAND.
1070 */
1071static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1072 int page)
1073{
Scott Wood17fed142016-05-30 13:57:56 -05001074 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001075 uint8_t block_mark = 0;
1076
1077 /*
1078 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1079 * the NAND Flash MTD model that make it essentially impossible to write
1080 * the out-of-band bytes.
1081 *
1082 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1083 * mark a block bad, we can do that.
1084 */
1085
1086 if (!nand_info->marking_block_bad) {
1087 printf("NXS NAND: Writing OOB isn't supported\n");
1088 return -EIO;
1089 }
1090
1091 /* Write the block mark. */
1092 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1093 nand->write_buf(mtd, &block_mark, 1);
1094 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1095
1096 /* Check if it worked. */
1097 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1098 return -EIO;
1099
1100 return 0;
1101}
1102
1103/*
1104 * Claims all blocks are good.
1105 *
1106 * In principle, this function is *only* called when the NAND Flash MTD system
1107 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1108 * the driver for bad block information.
1109 *
1110 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1111 * this function is *only* called when we take it away.
1112 *
1113 * Thus, this function is only called when we want *all* blocks to look good,
1114 * so it *always* return success.
1115 */
Scott Wood52ab7ce2016-05-30 13:57:58 -05001116static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
Marek Vasut913a7252011-11-08 23:18:16 +00001117{
Stefan Agneread66eb2018-06-22 18:06:18 +02001118 return 0;
1119}
1120
1121static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1122{
1123 struct nand_chip *chip = mtd_to_nand(mtd);
1124 struct nand_chip *nand = mtd_to_nand(mtd);
1125 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Han Xu2ee499e2022-03-25 08:36:38 -05001126 int err;
Stefan Agneread66eb2018-06-22 18:06:18 +02001127
Ye Li94547442020-05-04 22:08:50 +08001128 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1129 printf("unsupported NAND chip, minimum ecc required %d\n"
1130 , chip->ecc_strength_ds);
1131 return -EINVAL;
1132 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001133
Han Xu2ee499e2022-03-25 08:36:38 -05001134 /* use the legacy bch setting by default */
1135 if ((!nand_info->use_minimum_ecc && mtd->oobsize < 1024) ||
1136 !(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) {
1137 dev_dbg(mtd->dev, "use legacy bch geometry\n");
1138 err = mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1139 if (!err)
1140 return 0;
Ye Li94547442020-05-04 22:08:50 +08001141 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001142
Han Xu2ee499e2022-03-25 08:36:38 -05001143 /* for large oob nand */
1144 if (mtd->oobsize > 1024) {
1145 dev_dbg(mtd->dev, "use large oob bch geometry\n");
1146 err = mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1147 if (!err)
1148 return 0;
1149 }
Ye Li94547442020-05-04 22:08:50 +08001150
Han Xu2ee499e2022-03-25 08:36:38 -05001151 /* otherwise use the minimum ecc nand chips required */
1152 dev_dbg(mtd->dev, "use minimum ecc bch geometry\n");
1153 err = mxs_nand_calc_ecc_layout_by_info(geo, mtd, chip->ecc_strength_ds,
1154 chip->ecc_step_ds);
Stefan Agneread66eb2018-06-22 18:06:18 +02001155
Han Xu2ee499e2022-03-25 08:36:38 -05001156 if (err)
1157 dev_err(mtd->dev, "none of the bch geometry setting works\n");
1158
1159 return err;
1160}
1161
1162void mxs_nand_dump_geo(struct mtd_info *mtd)
1163{
1164 struct nand_chip *nand = mtd_to_nand(mtd);
1165 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1166 struct bch_geometry *geo = &nand_info->bch_geometry;
1167
1168 dev_dbg(mtd->dev, "BCH Geometry :\n"
1169 "GF Length\t\t: %u\n"
1170 "ECC Strength\t\t: %u\n"
1171 "ECC for Meta\t\t: %u\n"
1172 "ECC Chunk0 Size\t\t: %u\n"
1173 "ECC Chunkn Size\t\t: %u\n"
1174 "ECC Chunk Count\t\t: %u\n"
1175 "Block Mark Byte Offset\t: %u\n"
1176 "Block Mark Bit Offset\t: %u\n",
1177 geo->gf_len,
1178 geo->ecc_strength,
1179 geo->ecc_for_meta,
1180 geo->ecc_chunk0_size,
1181 geo->ecc_chunkn_size,
1182 geo->ecc_chunk_count,
1183 geo->block_mark_byte_offset,
1184 geo->block_mark_bit_offset);
Marek Vasut913a7252011-11-08 23:18:16 +00001185}
1186
1187/*
Marek Vasut913a7252011-11-08 23:18:16 +00001188 * At this point, the physical NAND Flash chips have been identified and
1189 * counted, so we know the physical geometry. This enables us to make some
1190 * important configuration decisions.
1191 *
Robert P. J. Day8d56db92016-07-15 13:44:45 -04001192 * The return value of this function propagates directly back to this driver's
Stefan Agner5883e552018-06-22 17:19:47 +02001193 * board_nand_init(). Anything other than zero will cause this driver to
Marek Vasut913a7252011-11-08 23:18:16 +00001194 * tear everything down and declare failure.
1195 */
Stefan Agner5883e552018-06-22 17:19:47 +02001196int mxs_nand_setup_ecc(struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +00001197{
Scott Wood17fed142016-05-30 13:57:56 -05001198 struct nand_chip *nand = mtd_to_nand(mtd);
1199 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +02001200 struct bch_geometry *geo = &nand_info->bch_geometry;
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001201 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +00001202 uint32_t tmp;
Stefan Agneread66eb2018-06-22 18:06:18 +02001203 int ret;
Stefan Agner4d42ac12018-06-22 17:19:51 +02001204
Igor Opaniukc55401372019-11-03 16:49:43 +01001205 nand_info->en_randomizer = 0;
1206 nand_info->oobsize = mtd->oobsize;
1207 nand_info->writesize = mtd->writesize;
1208
Stefan Agneread66eb2018-06-22 18:06:18 +02001209 ret = mxs_nand_set_geometry(mtd, geo);
Stefan Agner4d42ac12018-06-22 17:19:51 +02001210 if (ret)
1211 return ret;
1212
Han Xu2ee499e2022-03-25 08:36:38 -05001213 mxs_nand_dump_geo(mtd);
1214
Marek Vasut913a7252011-11-08 23:18:16 +00001215 /* Configure BCH and set NFC geometry */
Otavio Salvadorcbf0bf22012-08-13 09:53:12 +00001216 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001217
1218 /* Configure layout 0 */
Stefan Agnerd0778b32018-06-22 17:19:49 +02001219 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001220 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001221 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001222 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001223 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001224 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001225 writel(tmp, &bch_regs->hw_bch_flash0layout0);
Igor Opaniukc55401372019-11-03 16:49:43 +01001226 nand_info->bch_flash0layout0 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001227
1228 tmp = (mtd->writesize + mtd->oobsize)
1229 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001230 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001231 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001232 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001233 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001234 writel(tmp, &bch_regs->hw_bch_flash0layout1);
Igor Opaniukc55401372019-11-03 16:49:43 +01001235 nand_info->bch_flash0layout1 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001236
Peng Fan9e813732020-05-04 22:08:53 +08001237 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
1238 if (is_mx6dqp() || is_mx7() ||
Peng Fan128abf42020-05-04 22:09:00 +08001239 is_mx6ul() || is_imx8() || is_imx8m())
Peng Fan9e813732020-05-04 22:08:53 +08001240 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1241 &bch_regs->hw_bch_mode);
1242
Marek Vasut913a7252011-11-08 23:18:16 +00001243 /* Set *all* chip selects to use layout 0 */
1244 writel(0, &bch_regs->hw_bch_layoutselect);
1245
1246 /* Enable BCH complete interrupt */
1247 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1248
1249 /* Hook some operations at the MTD level. */
Sergey Lapin3a38a552013-01-14 03:46:50 +00001250 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1251 nand_info->hooked_read_oob = mtd->_read_oob;
1252 mtd->_read_oob = mxs_nand_hook_read_oob;
Marek Vasut913a7252011-11-08 23:18:16 +00001253 }
1254
Sergey Lapin3a38a552013-01-14 03:46:50 +00001255 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1256 nand_info->hooked_write_oob = mtd->_write_oob;
1257 mtd->_write_oob = mxs_nand_hook_write_oob;
Marek Vasut913a7252011-11-08 23:18:16 +00001258 }
1259
Sergey Lapin3a38a552013-01-14 03:46:50 +00001260 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1261 nand_info->hooked_block_markbad = mtd->_block_markbad;
1262 mtd->_block_markbad = mxs_nand_hook_block_markbad;
Marek Vasut913a7252011-11-08 23:18:16 +00001263 }
1264
Stefan Agner5883e552018-06-22 17:19:47 +02001265 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001266}
1267
1268/*
1269 * Allocate DMA buffers
1270 */
1271int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1272{
1273 uint8_t *buf;
1274 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1275
Marek Vasut1b120e82012-03-15 18:33:19 +00001276 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1277
Marek Vasut913a7252011-11-08 23:18:16 +00001278 /* DMA buffers */
Marek Vasut1b120e82012-03-15 18:33:19 +00001279 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001280 if (!buf) {
1281 printf("MXS NAND: Error allocating DMA buffers\n");
1282 return -ENOMEM;
1283 }
1284
Marek Vasut1b120e82012-03-15 18:33:19 +00001285 memset(buf, 0, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001286
1287 nand_info->data_buf = buf;
1288 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
Marek Vasut913a7252011-11-08 23:18:16 +00001289 /* Command buffers */
1290 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1291 MXS_NAND_COMMAND_BUFFER_SIZE);
1292 if (!nand_info->cmd_buf) {
1293 free(buf);
1294 printf("MXS NAND: Error allocating command buffers\n");
1295 return -ENOMEM;
1296 }
1297 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1298 nand_info->cmd_queue_len = 0;
1299
1300 return 0;
1301}
1302
1303/*
1304 * Initializes the NFC hardware.
1305 */
Adam Ford6edb91a2019-01-12 06:25:48 -06001306static int mxs_nand_init_dma(struct mxs_nand_info *info)
Marek Vasut913a7252011-11-08 23:18:16 +00001307{
Peng Fane37d5a92016-01-27 10:38:02 +08001308 int i = 0, j, ret = 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001309
1310 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1311 MXS_NAND_DMA_DESCRIPTOR_COUNT);
Peng Fane37d5a92016-01-27 10:38:02 +08001312 if (!info->desc) {
1313 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001314 goto err1;
Peng Fane37d5a92016-01-27 10:38:02 +08001315 }
Marek Vasut913a7252011-11-08 23:18:16 +00001316
1317 /* Allocate the DMA descriptors. */
1318 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1319 info->desc[i] = mxs_dma_desc_alloc();
Peng Fane37d5a92016-01-27 10:38:02 +08001320 if (!info->desc[i]) {
1321 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001322 goto err2;
Peng Fane37d5a92016-01-27 10:38:02 +08001323 }
Marek Vasut913a7252011-11-08 23:18:16 +00001324 }
1325
1326 /* Init the DMA controller. */
Fabio Estevam17156222017-06-29 09:33:44 -03001327 mxs_dma_init();
Marek Vasut93541b42012-04-08 17:34:46 +00001328 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1329 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
Peng Fane37d5a92016-01-27 10:38:02 +08001330 ret = mxs_dma_init_channel(j);
1331 if (ret)
Marek Vasut93541b42012-04-08 17:34:46 +00001332 goto err3;
1333 }
Marek Vasut913a7252011-11-08 23:18:16 +00001334
1335 /* Reset the GPMI block. */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001336 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1337 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001338
1339 /*
1340 * Choose NAND mode, set IRQ polarity, disable write protection and
1341 * select BCH ECC.
1342 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001343 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
Marek Vasut913a7252011-11-08 23:18:16 +00001344 GPMI_CTRL1_GPMI_MODE,
1345 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1346 GPMI_CTRL1_BCH_MODE);
1347
1348 return 0;
1349
Marek Vasut93541b42012-04-08 17:34:46 +00001350err3:
Peng Fane37d5a92016-01-27 10:38:02 +08001351 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
Marek Vasut93541b42012-04-08 17:34:46 +00001352 mxs_dma_release(j);
Marek Vasut913a7252011-11-08 23:18:16 +00001353err2:
Marek Vasut913a7252011-11-08 23:18:16 +00001354 for (--i; i >= 0; i--)
1355 mxs_dma_desc_free(info->desc[i]);
Peng Fane37d5a92016-01-27 10:38:02 +08001356 free(info->desc);
1357err1:
1358 if (ret == -ENOMEM)
1359 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1360 return ret;
Marek Vasut913a7252011-11-08 23:18:16 +00001361}
1362
Stefan Agner7152f342018-06-22 17:19:46 +02001363int mxs_nand_init_spl(struct nand_chip *nand)
1364{
1365 struct mxs_nand_info *nand_info;
1366 int err;
1367
1368 nand_info = malloc(sizeof(struct mxs_nand_info));
1369 if (!nand_info) {
1370 printf("MXS NAND: Failed to allocate private data\n");
1371 return -ENOMEM;
1372 }
1373 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1374
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001375 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1376 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
Adam Ford10210732019-01-02 20:36:52 -06001377
Peng Fan128abf42020-05-04 22:09:00 +08001378 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
Adam Ford10210732019-01-02 20:36:52 -06001379 nand_info->max_ecc_strength_supported = 62;
1380 else
1381 nand_info->max_ecc_strength_supported = 40;
1382
Stefan Agner7152f342018-06-22 17:19:46 +02001383 err = mxs_nand_alloc_buffers(nand_info);
1384 if (err)
1385 return err;
1386
Stefan Agner00e65162018-06-22 18:06:13 +02001387 err = mxs_nand_init_dma(nand_info);
Stefan Agner7152f342018-06-22 17:19:46 +02001388 if (err)
1389 return err;
1390
1391 nand_set_controller_data(nand, nand_info);
1392
1393 nand->options |= NAND_NO_SUBPAGE_WRITE;
1394
1395 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1396 nand->dev_ready = mxs_nand_device_ready;
1397 nand->select_chip = mxs_nand_select_chip;
Stefan Agner7152f342018-06-22 17:19:46 +02001398
1399 nand->read_byte = mxs_nand_read_byte;
1400 nand->read_buf = mxs_nand_read_buf;
1401
1402 nand->ecc.read_page = mxs_nand_ecc_read_page;
1403
1404 nand->ecc.mode = NAND_ECC_HW;
Stefan Agner7152f342018-06-22 17:19:46 +02001405
1406 return 0;
1407}
1408
Stefan Agner19f90512018-06-22 18:06:16 +02001409int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +00001410{
Stefan Agner5883e552018-06-22 17:19:47 +02001411 struct mtd_info *mtd;
Stefan Agner5883e552018-06-22 17:19:47 +02001412 struct nand_chip *nand;
Marek Vasut913a7252011-11-08 23:18:16 +00001413 int err;
1414
Stefan Agner5883e552018-06-22 17:19:47 +02001415 nand = &nand_info->chip;
1416 mtd = nand_to_mtd(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001417 err = mxs_nand_alloc_buffers(nand_info);
1418 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001419 return err;
Marek Vasut913a7252011-11-08 23:18:16 +00001420
Stefan Agner00e65162018-06-22 18:06:13 +02001421 err = mxs_nand_init_dma(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001422 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001423 goto err_free_buffers;
Marek Vasut913a7252011-11-08 23:18:16 +00001424
1425 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1426
Stefan Agner95f376f2018-06-22 17:19:48 +02001427#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1428 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1429#endif
1430
Scott Wood17fed142016-05-30 13:57:56 -05001431 nand_set_controller_data(nand, nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001432 nand->options |= NAND_NO_SUBPAGE_WRITE;
1433
Stefan Agner150ddbc2018-06-22 18:06:17 +02001434 if (nand_info->dev)
Patrice Chotard33d2cf92021-09-13 16:25:53 +02001435 nand->flash_node = dev_ofnode(nand_info->dev);
Stefan Agner150ddbc2018-06-22 18:06:17 +02001436
Marek Vasut913a7252011-11-08 23:18:16 +00001437 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1438
1439 nand->dev_ready = mxs_nand_device_ready;
1440 nand->select_chip = mxs_nand_select_chip;
1441 nand->block_bad = mxs_nand_block_bad;
Marek Vasut913a7252011-11-08 23:18:16 +00001442
1443 nand->read_byte = mxs_nand_read_byte;
1444
1445 nand->read_buf = mxs_nand_read_buf;
1446 nand->write_buf = mxs_nand_write_buf;
1447
Stefan Agner5883e552018-06-22 17:19:47 +02001448 /* first scan to find the device and get the page size */
1449 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
Stefan Agner404b1102018-06-22 18:06:14 +02001450 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001451
1452 if (mxs_nand_setup_ecc(mtd))
Stefan Agner404b1102018-06-22 18:06:14 +02001453 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001454
Marek Vasut913a7252011-11-08 23:18:16 +00001455 nand->ecc.read_page = mxs_nand_ecc_read_page;
1456 nand->ecc.write_page = mxs_nand_ecc_write_page;
1457 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1458 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1459
1460 nand->ecc.layout = &fake_ecc_layout;
1461 nand->ecc.mode = NAND_ECC_HW;
Ye Li94547442020-05-04 22:08:50 +08001462 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
Stefan Agner72d627d2018-06-22 17:19:50 +02001463 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
Marek Vasut913a7252011-11-08 23:18:16 +00001464
Stefan Agner5883e552018-06-22 17:19:47 +02001465 /* second phase scan */
1466 err = nand_scan_tail(mtd);
1467 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001468 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001469
1470 err = nand_register(0, mtd);
1471 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001472 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001473
Stefan Agner404b1102018-06-22 18:06:14 +02001474 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001475
Stefan Agner404b1102018-06-22 18:06:14 +02001476err_free_buffers:
Marek Vasut913a7252011-11-08 23:18:16 +00001477 free(nand_info->data_buf);
1478 free(nand_info->cmd_buf);
Stefan Agner404b1102018-06-22 18:06:14 +02001479
1480 return err;
1481}
1482
Stefan Agner150ddbc2018-06-22 18:06:17 +02001483#ifndef CONFIG_NAND_MXS_DT
Stefan Agner404b1102018-06-22 18:06:14 +02001484void board_nand_init(void)
1485{
1486 struct mxs_nand_info *nand_info;
1487
1488 nand_info = malloc(sizeof(struct mxs_nand_info));
1489 if (!nand_info) {
1490 printf("MXS NAND: Failed to allocate private data\n");
1491 return;
1492 }
1493 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1494
1495 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1496 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1497
Stefan Agner4dc98db2018-06-22 18:06:15 +02001498 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1499 if (is_mx6sx() || is_mx7())
1500 nand_info->max_ecc_strength_supported = 62;
1501 else
1502 nand_info->max_ecc_strength_supported = 40;
1503
1504#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1505 nand_info->use_minimum_ecc = true;
1506#endif
1507
Stefan Agner19f90512018-06-22 18:06:16 +02001508 if (mxs_nand_init_ctrl(nand_info) < 0)
Stefan Agner404b1102018-06-22 18:06:14 +02001509 goto err;
1510
Stefan Agner5883e552018-06-22 17:19:47 +02001511 return;
Stefan Agner404b1102018-06-22 18:06:14 +02001512
1513err:
1514 free(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001515}
Stefan Agner150ddbc2018-06-22 18:06:17 +02001516#endif
Igor Opaniukc55401372019-11-03 16:49:43 +01001517
1518/*
1519 * Read NAND layout for FCB block generation.
1520 */
1521void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1522{
1523 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1524 u32 tmp;
1525
1526 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1527 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1528 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1529 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1530 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1531
1532 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1533 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1534 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1535 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1536 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1537 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1538 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1539 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1540 BCH_FLASHLAYOUT1_ECCN_OFFSET;
Han Xu33543b52020-05-04 22:08:58 +08001541 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1542 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Igor Opaniukc55401372019-11-03 16:49:43 +01001543}
1544
1545/*
1546 * Set BCH to specific layout used by ROM bootloader to read FCB.
1547 */
Han Xuafed2a12020-05-06 20:59:19 +08001548void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd)
Igor Opaniukc55401372019-11-03 16:49:43 +01001549{
1550 u32 tmp;
1551 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1552 struct nand_chip *nand = mtd_to_nand(mtd);
1553 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1554
1555 nand_info->en_randomizer = 1;
1556
1557 mtd->writesize = 1024;
1558 mtd->oobsize = 1862 - 1024;
1559
1560 /* 8 ecc_chunks_*/
1561 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1562 /* 32 bytes for metadata */
1563 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1564 /* using ECC62 level to be performed */
1565 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1566 /* 0x20 * 4 bytes of the data0 block */
1567 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1568 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1569 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1570
1571 /* 1024 for data + 838 for OOB */
1572 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1573 /* using ECC62 level to be performed */
1574 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1575 /* 0x20 * 4 bytes of the data0 block */
1576 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1577 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1578 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1579}
1580
1581/*
Han Xuafed2a12020-05-06 20:59:19 +08001582 * Set BCH to specific layout used by ROM bootloader to read FCB.
1583 */
1584void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd)
1585{
1586 u32 tmp;
1587 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1588 struct nand_chip *nand = mtd_to_nand(mtd);
1589 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1590
1591 /* no randomizer in this setting*/
1592 nand_info->en_randomizer = 0;
1593
1594 mtd->writesize = 1024;
1595 mtd->oobsize = 1576 - 1024;
1596
1597 /* 8 ecc_chunks_*/
1598 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1599 /* 32 bytes for metadata */
1600 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1601 /* using ECC40 level to be performed */
1602 tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1603 /* 0x20 * 4 bytes of the data0 block */
1604 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1605 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1606 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1607
1608 /* 1024 for data + 552 for OOB */
1609 tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1610 /* using ECC40 level to be performed */
1611 tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1612 /* 0x20 * 4 bytes of the data0 block */
1613 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1614 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1615 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1616}
1617
1618/*
Igor Opaniukc55401372019-11-03 16:49:43 +01001619 * Restore BCH to normal settings.
1620 */
1621void mxs_nand_mode_normal(struct mtd_info *mtd)
1622{
1623 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1624 struct nand_chip *nand = mtd_to_nand(mtd);
1625 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1626
1627 nand_info->en_randomizer = 0;
1628
1629 mtd->writesize = nand_info->writesize;
1630 mtd->oobsize = nand_info->oobsize;
1631
1632 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1633 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1634}
1635
1636uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1637{
1638 struct nand_chip *chip = mtd_to_nand(mtd);
1639 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1640 struct bch_geometry *geo = &nand_info->bch_geometry;
1641
1642 return geo->block_mark_byte_offset;
1643}
1644
1645uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1646{
1647 struct nand_chip *chip = mtd_to_nand(mtd);
1648 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1649 struct bch_geometry *geo = &nand_info->bch_geometry;
1650
1651 return geo->block_mark_bit_offset;
1652}