blob: fd65772af806691ab35047d94a487469831ba183 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut913a7252011-11-08 23:18:16 +00002/*
3 * Freescale i.MX28 NAND flash driver
4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH
7 *
8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver
10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
Peng Fan9e813732020-05-04 22:08:53 +080013 * Copyright 2017-2019 NXP
Marek Vasut913a7252011-11-08 23:18:16 +000014 */
15
Tom Warrenc88d30f2012-09-10 08:47:51 -070016#include <common.h>
Michael Trimarchifd6e13e2022-08-30 16:48:47 +020017#include <clk.h>
Simon Glass63334482019-11-14 12:57:39 -070018#include <cpu_func.h>
Stefan Agner19f90512018-06-22 18:06:16 +020019#include <dm.h>
Sean Anderson0a749442020-10-04 21:39:45 -040020#include <dm/device_compat.h>
Marek Vasut913a7252011-11-08 23:18:16 +000021#include <malloc.h>
Sean Anderson0a749442020-10-04 21:39:45 -040022#include <mxs_nand.h>
Marek Vasut913a7252011-11-08 23:18:16 +000023#include <asm/arch/clock.h>
24#include <asm/arch/imx-regs.h>
Sean Anderson0a749442020-10-04 21:39:45 -040025#include <asm/arch/sys_proto.h>
26#include <asm/cache.h>
27#include <asm/io.h>
Stefano Babic33731bc2017-06-29 10:16:06 +020028#include <asm/mach-imx/regs-bch.h>
29#include <asm/mach-imx/regs-gpmi.h>
Michael Trimarchifd6e13e2022-08-30 16:48:47 +020030#include <linux/delay.h>
Sean Anderson0a749442020-10-04 21:39:45 -040031#include <linux/errno.h>
32#include <linux/mtd/rawnand.h>
33#include <linux/sizes.h>
Igor Prusovc3421ea2023-11-09 20:10:04 +030034#include <linux/time.h>
Sean Anderson0a749442020-10-04 21:39:45 -040035#include <linux/types.h>
Michael Trimarchifd6e13e2022-08-30 16:48:47 +020036#include <linux/math64.h>
Marek Vasut913a7252011-11-08 23:18:16 +000037
38#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
39
Peng Fan128abf42020-05-04 22:09:00 +080040#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \
41 defined(CONFIG_IMX8M)
Stefan Roese8338d1d2013-04-15 21:14:12 +000042#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
43#else
44#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
45#endif
Marek Vasut913a7252011-11-08 23:18:16 +000046#define MXS_NAND_METADATA_SIZE 10
Jörg Krause1d870262015-04-15 09:27:22 +020047#define MXS_NAND_BITS_PER_ECC_LEVEL 13
Stefan Agner54bf8082016-08-01 23:55:18 -070048
49#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
Marek Vasut913a7252011-11-08 23:18:16 +000050#define MXS_NAND_COMMAND_BUFFER_SIZE 32
Stefan Agner54bf8082016-08-01 23:55:18 -070051#else
52#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
53#endif
Marek Vasut913a7252011-11-08 23:18:16 +000054
55#define MXS_NAND_BCH_TIMEOUT 10000
Michael Trimarchifd6e13e2022-08-30 16:48:47 +020056
57#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
Marek Vasut913a7252011-11-08 23:18:16 +000058
Marek Vasut913a7252011-11-08 23:18:16 +000059struct nand_ecclayout fake_ecc_layout;
60
Marek Vasut1b120e82012-03-15 18:33:19 +000061/*
62 * Cache management functions
63 */
Trevor Woerner43ec7e02019-05-03 09:41:00 -040064#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Marek Vasut1b120e82012-03-15 18:33:19 +000065static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
66{
Peng Fan128abf42020-05-04 22:09:00 +080067 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000068
69 flush_dcache_range(addr, addr + info->data_buf_size);
70}
71
72static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
73{
Peng Fan128abf42020-05-04 22:09:00 +080074 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000075
76 invalidate_dcache_range(addr, addr + info->data_buf_size);
77}
78
79static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
80{
Peng Fan128abf42020-05-04 22:09:00 +080081 uint32_t addr = (uintptr_t)info->cmd_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000082
83 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
84}
85#else
86static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
87static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
88static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
89#endif
90
Marek Vasut913a7252011-11-08 23:18:16 +000091static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
92{
93 struct mxs_dma_desc *desc;
94
95 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
96 printf("MXS NAND: Too many DMA descriptors requested\n");
97 return NULL;
98 }
99
100 desc = info->desc[info->desc_index];
101 info->desc_index++;
102
103 return desc;
104}
105
106static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
107{
108 int i;
109 struct mxs_dma_desc *desc;
110
111 for (i = 0; i < info->desc_index; i++) {
112 desc = info->desc[i];
113 memset(desc, 0, sizeof(struct mxs_dma_desc));
114 desc->address = (dma_addr_t)desc;
115 }
116
117 info->desc_index = 0;
118}
119
Marek Vasut913a7252011-11-08 23:18:16 +0000120static uint32_t mxs_nand_aux_status_offset(void)
121{
122 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
123}
124
Sean Anderson0a749442020-10-04 21:39:45 -0400125static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo,
126 struct mtd_info *mtd,
127 unsigned int *chunk_num)
Marek Vasut913a7252011-11-08 23:18:16 +0000128{
Ye Li94547442020-05-04 22:08:50 +0800129 unsigned int i, j;
Marek Vasut913a7252011-11-08 23:18:16 +0000130
Ye Li94547442020-05-04 22:08:50 +0800131 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
Sean Anderson0a749442020-10-04 21:39:45 -0400132 dev_err(mtd->dev, "The size of chunk0 must equal to chunkn\n");
Ye Li94547442020-05-04 22:08:50 +0800133 return false;
134 }
Marek Vasut913a7252011-11-08 23:18:16 +0000135
Ye Li94547442020-05-04 22:08:50 +0800136 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
137 (geo->gf_len * geo->ecc_strength +
138 geo->ecc_chunkn_size * 8);
Marek Vasut913a7252011-11-08 23:18:16 +0000139
Ye Li94547442020-05-04 22:08:50 +0800140 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
141 (geo->gf_len * geo->ecc_strength +
142 geo->ecc_chunkn_size * 8) * i;
Marek Vasut913a7252011-11-08 23:18:16 +0000143
Ye Li94547442020-05-04 22:08:50 +0800144 if (j < geo->ecc_chunkn_size * 8) {
145 *chunk_num = i + 1;
Sean Anderson0a749442020-10-04 21:39:45 -0400146 dev_dbg(mtd->dev, "Set ecc to %d and bbm in chunk %d\n",
Ye Li94547442020-05-04 22:08:50 +0800147 geo->ecc_strength, *chunk_num);
148 return true;
149 }
Marek Vasut913a7252011-11-08 23:18:16 +0000150
Ye Li94547442020-05-04 22:08:50 +0800151 return false;
Marek Vasut913a7252011-11-08 23:18:16 +0000152}
153
Stefan Agner4d42ac12018-06-22 17:19:51 +0200154static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
Stefan Agneread66eb2018-06-22 18:06:18 +0200155 struct mtd_info *mtd,
156 unsigned int ecc_strength,
157 unsigned int ecc_step)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200158{
159 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Agner4dc98db2018-06-22 18:06:15 +0200160 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800161 unsigned int block_mark_bit_offset;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200162
Stefan Agneread66eb2018-06-22 18:06:18 +0200163 switch (ecc_step) {
Stefan Agner4d42ac12018-06-22 17:19:51 +0200164 case SZ_512:
165 geo->gf_len = 13;
166 break;
167 case SZ_1K:
168 geo->gf_len = 14;
169 break;
170 default:
171 return -EINVAL;
172 }
173
Ye Li94547442020-05-04 22:08:50 +0800174 geo->ecc_chunk0_size = ecc_step;
175 geo->ecc_chunkn_size = ecc_step;
Stefan Agneread66eb2018-06-22 18:06:18 +0200176 geo->ecc_strength = round_up(ecc_strength, 2);
Stefan Agner4d42ac12018-06-22 17:19:51 +0200177
178 /* Keep the C >= O */
Ye Li94547442020-05-04 22:08:50 +0800179 if (geo->ecc_chunkn_size < mtd->oobsize)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200180 return -EINVAL;
181
Stefan Agner4dc98db2018-06-22 18:06:15 +0200182 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200183 return -EINVAL;
184
Ye Li94547442020-05-04 22:08:50 +0800185 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
186
187 /* For bit swap. */
188 block_mark_bit_offset = mtd->writesize * 8 -
189 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
190 + MXS_NAND_METADATA_SIZE * 8);
191
192 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
193 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200194
195 return 0;
196}
197
Ye Li94547442020-05-04 22:08:50 +0800198static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
Stefan Agnerd0778b32018-06-22 17:19:49 +0200199 struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +0000200{
Stefan Agner4dc98db2018-06-22 18:06:15 +0200201 struct nand_chip *chip = mtd_to_nand(mtd);
202 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800203 unsigned int block_mark_bit_offset;
Han Xu2ee499e2022-03-25 08:36:38 -0500204 int corr, ds_corr;
Stefan Agner4dc98db2018-06-22 18:06:15 +0200205
Stefan Agnerd0778b32018-06-22 17:19:49 +0200206 /* The default for the length of Galois Field. */
207 geo->gf_len = 13;
208
209 /* The default for chunk size. */
Ye Li94547442020-05-04 22:08:50 +0800210 geo->ecc_chunk0_size = 512;
211 geo->ecc_chunkn_size = 512;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200212
Ye Li94547442020-05-04 22:08:50 +0800213 if (geo->ecc_chunkn_size < mtd->oobsize) {
Stefan Agnerd0778b32018-06-22 17:19:49 +0200214 geo->gf_len = 14;
Ye Li94547442020-05-04 22:08:50 +0800215 geo->ecc_chunk0_size *= 2;
216 geo->ecc_chunkn_size *= 2;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200217 }
218
Ye Li94547442020-05-04 22:08:50 +0800219 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200220
Stefan Agnerd0778b32018-06-22 17:19:49 +0200221 /*
222 * Determine the ECC layout with the formula:
223 * ECC bits per chunk = (total page spare data bits) /
224 * (bits per ECC level) / (chunks per page)
225 * where:
226 * total page spare data bits =
227 * (page oob size - meta data size) * (bits per byte)
228 */
229 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
230 / (geo->gf_len * geo->ecc_chunk_count);
231
Stefan Agner4d42ac12018-06-22 17:19:51 +0200232 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
Stefan Agner4dc98db2018-06-22 18:06:15 +0200233 nand_info->max_ecc_strength_supported);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200234
Han Xu2ee499e2022-03-25 08:36:38 -0500235 /* check ecc strength, same as nand_ecc_is_strong_enough() did*/
236 if (chip->ecc_step_ds) {
237 corr = mtd->writesize * geo->ecc_strength /
238 geo->ecc_chunkn_size;
239 ds_corr = mtd->writesize * chip->ecc_strength_ds /
240 chip->ecc_step_ds;
241 if (corr < ds_corr ||
242 geo->ecc_strength < chip->ecc_strength_ds)
243 return -EINVAL;
244 }
245
Ye Li94547442020-05-04 22:08:50 +0800246 block_mark_bit_offset = mtd->writesize * 8 -
247 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
248 + MXS_NAND_METADATA_SIZE * 8);
249
250 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
251 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
252
253 return 0;
254}
255
256static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
257 struct mtd_info *mtd)
258{
259 struct nand_chip *chip = mtd_to_nand(mtd);
260 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
261 unsigned int block_mark_bit_offset;
262 unsigned int max_ecc;
263 unsigned int bbm_chunk;
264 unsigned int i;
265
266 /* sanity check for the minimum ecc nand required */
267 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
268 return -EINVAL;
269 geo->ecc_strength = chip->ecc_strength_ds;
270
271 /* calculate the maximum ecc platform can support*/
272 geo->gf_len = 14;
273 geo->ecc_chunk0_size = 1024;
274 geo->ecc_chunkn_size = 1024;
275 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
276 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
277 / (geo->gf_len * geo->ecc_chunk_count);
278 max_ecc = min(round_down(max_ecc, 2),
279 nand_info->max_ecc_strength_supported);
280
281
282 /* search a supported ecc strength that makes bbm */
283 /* located in data chunk */
284 geo->ecc_strength = chip->ecc_strength_ds;
285 while (!(geo->ecc_strength > max_ecc)) {
286 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
287 break;
288 geo->ecc_strength += 2;
289 }
290
291 /* if none of them works, keep using the minimum ecc */
292 /* nand required but changing ecc page layout */
293 if (geo->ecc_strength > max_ecc) {
294 geo->ecc_strength = chip->ecc_strength_ds;
295 /* add extra ecc for meta data */
296 geo->ecc_chunk0_size = 0;
297 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
298 geo->ecc_for_meta = 1;
299 /* check if oob can afford this extra ecc chunk */
300 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
301 geo->gf_len * geo->ecc_strength
302 * geo->ecc_chunk_count) {
303 printf("unsupported NAND chip with new layout\n");
304 return -EINVAL;
305 }
306
307 /* calculate in which chunk bbm located */
308 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
309 geo->gf_len * geo->ecc_strength) /
310 (geo->gf_len * geo->ecc_strength +
311 geo->ecc_chunkn_size * 8) + 1;
312 }
313
314 /* calculate the number of ecc chunk behind the bbm */
315 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
316
317 block_mark_bit_offset = mtd->writesize * 8 -
318 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
319 + MXS_NAND_METADATA_SIZE * 8);
320
321 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
322 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
323
Stefan Agnerd0778b32018-06-22 17:19:49 +0200324 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000325}
326
327/*
328 * Wait for BCH complete IRQ and clear the IRQ
329 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200330static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +0000331{
Marek Vasut913a7252011-11-08 23:18:16 +0000332 int timeout = MXS_NAND_BCH_TIMEOUT;
333 int ret;
334
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200335 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
Marek Vasut913a7252011-11-08 23:18:16 +0000336 BCH_CTRL_COMPLETE_IRQ, timeout);
337
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200338 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
Marek Vasut913a7252011-11-08 23:18:16 +0000339
340 return ret;
341}
342
343/*
344 * This is the function that we install in the cmd_ctrl function pointer of the
345 * owning struct nand_chip. The only functions in the reference implementation
346 * that use these functions pointers are cmdfunc and select_chip.
347 *
348 * In this driver, we implement our own select_chip, so this function will only
349 * be called by the reference implementation's cmdfunc. For this reason, we can
350 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
351 * Flash.
352 */
353static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
354{
Scott Wood17fed142016-05-30 13:57:56 -0500355 struct nand_chip *nand = mtd_to_nand(mtd);
356 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000357 struct mxs_dma_desc *d;
358 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
359 int ret;
360
361 /*
362 * If this condition is true, something is _VERY_ wrong in MTD
363 * subsystem!
364 */
365 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
366 printf("MXS NAND: Command queue too long\n");
367 return;
368 }
369
370 /*
371 * Every operation begins with a command byte and a series of zero or
372 * more address bytes. These are distinguished by either the Address
373 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
374 * asserted. When MTD is ready to execute the command, it will
375 * deasert both latch enables.
376 *
377 * Rather than run a separate DMA operation for every single byte, we
378 * queue them up and run a single DMA operation for the entire series
379 * of command and data bytes.
380 */
381 if (ctrl & (NAND_ALE | NAND_CLE)) {
382 if (data != NAND_CMD_NONE)
383 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
384 return;
385 }
386
387 /*
388 * If control arrives here, MTD has deasserted both the ALE and CLE,
389 * which means it's ready to run an operation. Check if we have any
390 * bytes to send.
391 */
392 if (nand_info->cmd_queue_len == 0)
393 return;
394
395 /* Compile the DMA descriptor -- a descriptor that sends command. */
396 d = mxs_nand_get_dma_desc(nand_info);
397 d->cmd.data =
398 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
399 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
400 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
401 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
402
403 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
404
405 d->cmd.pio_words[0] =
406 GPMI_CTRL0_COMMAND_MODE_WRITE |
407 GPMI_CTRL0_WORD_LENGTH |
408 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
409 GPMI_CTRL0_ADDRESS_NAND_CLE |
410 GPMI_CTRL0_ADDRESS_INCREMENT |
411 nand_info->cmd_queue_len;
412
413 mxs_dma_desc_append(channel, d);
414
Marek Vasut1b120e82012-03-15 18:33:19 +0000415 /* Flush caches */
416 mxs_nand_flush_cmd_buf(nand_info);
417
Marek Vasut913a7252011-11-08 23:18:16 +0000418 /* Execute the DMA chain. */
419 ret = mxs_dma_go(channel);
420 if (ret)
421 printf("MXS NAND: Error sending command\n");
422
423 mxs_nand_return_dma_descs(nand_info);
424
425 /* Reset the command queue. */
426 nand_info->cmd_queue_len = 0;
427}
428
429/*
430 * Test if the NAND flash is ready.
431 */
432static int mxs_nand_device_ready(struct mtd_info *mtd)
433{
Scott Wood17fed142016-05-30 13:57:56 -0500434 struct nand_chip *chip = mtd_to_nand(mtd);
435 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000436 uint32_t tmp;
437
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200438 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
Marek Vasut913a7252011-11-08 23:18:16 +0000439 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
440
441 return tmp & 1;
442}
443
444/*
445 * Select the NAND chip.
446 */
447static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
448{
Scott Wood17fed142016-05-30 13:57:56 -0500449 struct nand_chip *nand = mtd_to_nand(mtd);
450 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000451
452 nand_info->cur_chip = chip;
453}
454
455/*
456 * Handle block mark swapping.
457 *
458 * Note that, when this function is called, it doesn't know whether it's
459 * swapping the block mark, or swapping it *back* -- but it doesn't matter
460 * because the the operation is the same.
461 */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200462static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
463 uint8_t *data_buf, uint8_t *oob_buf)
Marek Vasut913a7252011-11-08 23:18:16 +0000464{
Stefan Agnerd0778b32018-06-22 17:19:49 +0200465 uint32_t bit_offset = geo->block_mark_bit_offset;
466 uint32_t buf_offset = geo->block_mark_byte_offset;
Marek Vasut913a7252011-11-08 23:18:16 +0000467
468 uint32_t src;
469 uint32_t dst;
470
Marek Vasut913a7252011-11-08 23:18:16 +0000471 /*
472 * Get the byte from the data area that overlays the block mark. Since
473 * the ECC engine applies its own view to the bits in the page, the
474 * physical block mark won't (in general) appear on a byte boundary in
475 * the data.
476 */
477 src = data_buf[buf_offset] >> bit_offset;
478 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
479
480 dst = oob_buf[0];
481
482 oob_buf[0] = src;
483
484 data_buf[buf_offset] &= ~(0xff << bit_offset);
485 data_buf[buf_offset + 1] &= 0xff << bit_offset;
486
487 data_buf[buf_offset] |= dst << bit_offset;
488 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
489}
490
491/*
492 * Read data from NAND.
493 */
494static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
495{
Scott Wood17fed142016-05-30 13:57:56 -0500496 struct nand_chip *nand = mtd_to_nand(mtd);
497 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000498 struct mxs_dma_desc *d;
499 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
500 int ret;
501
502 if (length > NAND_MAX_PAGESIZE) {
503 printf("MXS NAND: DMA buffer too big\n");
504 return;
505 }
506
507 if (!buf) {
508 printf("MXS NAND: DMA buffer is NULL\n");
509 return;
510 }
511
512 /* Compile the DMA descriptor - a descriptor that reads data. */
513 d = mxs_nand_get_dma_desc(nand_info);
514 d->cmd.data =
515 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
516 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
517 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
518 (length << MXS_DMA_DESC_BYTES_OFFSET);
519
520 d->cmd.address = (dma_addr_t)nand_info->data_buf;
521
522 d->cmd.pio_words[0] =
523 GPMI_CTRL0_COMMAND_MODE_READ |
524 GPMI_CTRL0_WORD_LENGTH |
525 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
526 GPMI_CTRL0_ADDRESS_NAND_DATA |
527 length;
528
529 mxs_dma_desc_append(channel, d);
530
531 /*
532 * A DMA descriptor that waits for the command to end and the chip to
533 * become ready.
534 *
535 * I think we actually should *not* be waiting for the chip to become
536 * ready because, after all, we don't care. I think the original code
537 * did that and no one has re-thought it yet.
538 */
539 d = mxs_nand_get_dma_desc(nand_info);
540 d->cmd.data =
541 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
542 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
Luca Ellero80f06b82014-12-16 15:36:14 +0100543 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
Marek Vasut913a7252011-11-08 23:18:16 +0000544
545 d->cmd.address = 0;
546
547 d->cmd.pio_words[0] =
548 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
549 GPMI_CTRL0_WORD_LENGTH |
550 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
551 GPMI_CTRL0_ADDRESS_NAND_DATA;
552
553 mxs_dma_desc_append(channel, d);
554
Peng Fane3bbfb72015-07-21 16:15:21 +0800555 /* Invalidate caches */
556 mxs_nand_inval_data_buf(nand_info);
557
Marek Vasut913a7252011-11-08 23:18:16 +0000558 /* Execute the DMA chain. */
559 ret = mxs_dma_go(channel);
560 if (ret) {
561 printf("MXS NAND: DMA read error\n");
562 goto rtn;
563 }
564
Marek Vasut1b120e82012-03-15 18:33:19 +0000565 /* Invalidate caches */
566 mxs_nand_inval_data_buf(nand_info);
567
Marek Vasut913a7252011-11-08 23:18:16 +0000568 memcpy(buf, nand_info->data_buf, length);
569
570rtn:
571 mxs_nand_return_dma_descs(nand_info);
572}
573
574/*
575 * Write data to NAND.
576 */
577static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
578 int length)
579{
Scott Wood17fed142016-05-30 13:57:56 -0500580 struct nand_chip *nand = mtd_to_nand(mtd);
581 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000582 struct mxs_dma_desc *d;
583 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
584 int ret;
585
586 if (length > NAND_MAX_PAGESIZE) {
587 printf("MXS NAND: DMA buffer too big\n");
588 return;
589 }
590
591 if (!buf) {
592 printf("MXS NAND: DMA buffer is NULL\n");
593 return;
594 }
595
596 memcpy(nand_info->data_buf, buf, length);
597
598 /* Compile the DMA descriptor - a descriptor that writes data. */
599 d = mxs_nand_get_dma_desc(nand_info);
600 d->cmd.data =
601 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
602 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
Luca Ellero966f1cd2014-12-16 15:36:15 +0100603 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
Marek Vasut913a7252011-11-08 23:18:16 +0000604 (length << MXS_DMA_DESC_BYTES_OFFSET);
605
606 d->cmd.address = (dma_addr_t)nand_info->data_buf;
607
608 d->cmd.pio_words[0] =
609 GPMI_CTRL0_COMMAND_MODE_WRITE |
610 GPMI_CTRL0_WORD_LENGTH |
611 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
612 GPMI_CTRL0_ADDRESS_NAND_DATA |
613 length;
614
615 mxs_dma_desc_append(channel, d);
616
Marek Vasut1b120e82012-03-15 18:33:19 +0000617 /* Flush caches */
618 mxs_nand_flush_data_buf(nand_info);
619
Marek Vasut913a7252011-11-08 23:18:16 +0000620 /* Execute the DMA chain. */
621 ret = mxs_dma_go(channel);
622 if (ret)
623 printf("MXS NAND: DMA write error\n");
624
625 mxs_nand_return_dma_descs(nand_info);
626}
627
628/*
629 * Read a single byte from NAND.
630 */
631static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
632{
633 uint8_t buf;
634 mxs_nand_read_buf(mtd, &buf, 1);
635 return buf;
636}
637
Peng Fandf23c9d2020-05-04 22:08:52 +0800638static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
639 u8 *buf, int chunk, int page)
640{
641 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
642 struct bch_geometry *geo = &nand_info->bch_geometry;
643 unsigned int flip_bits = 0, flip_bits_noecc = 0;
644 unsigned int threshold;
645 unsigned int base = geo->ecc_chunkn_size * chunk;
646 u32 *dma_buf = (u32 *)buf;
647 int i;
648
649 threshold = geo->gf_len / 2;
650 if (threshold > geo->ecc_strength)
651 threshold = geo->ecc_strength;
652
653 for (i = 0; i < geo->ecc_chunkn_size; i++) {
654 flip_bits += hweight8(~buf[base + i]);
655 if (flip_bits > threshold)
656 return false;
657 }
658
659 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
660 nand->read_buf(mtd, buf, mtd->writesize);
661
662 for (i = 0; i < mtd->writesize / 4; i++) {
663 flip_bits_noecc += hweight32(~dma_buf[i]);
664 if (flip_bits_noecc > threshold)
665 return false;
666 }
667
668 mtd->ecc_stats.corrected += flip_bits;
669
670 memset(buf, 0xff, mtd->writesize);
671
672 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
673
674 return true;
675}
676
Marek Vasut913a7252011-11-08 23:18:16 +0000677/*
678 * Read a page from NAND.
679 */
680static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +0000681 uint8_t *buf, int oob_required,
682 int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000683{
Scott Wood17fed142016-05-30 13:57:56 -0500684 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200685 struct bch_geometry *geo = &nand_info->bch_geometry;
Peng Fan9e813732020-05-04 22:08:53 +0800686 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +0000687 struct mxs_dma_desc *d;
688 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
689 uint32_t corrected = 0, failed = 0;
690 uint8_t *status;
691 int i, ret;
Peng Fan9e813732020-05-04 22:08:53 +0800692 int flag = 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000693
694 /* Compile the DMA descriptor - wait for ready. */
695 d = mxs_nand_get_dma_desc(nand_info);
696 d->cmd.data =
697 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
698 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
699 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
700
701 d->cmd.address = 0;
702
703 d->cmd.pio_words[0] =
704 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
705 GPMI_CTRL0_WORD_LENGTH |
706 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
707 GPMI_CTRL0_ADDRESS_NAND_DATA;
708
709 mxs_dma_desc_append(channel, d);
710
711 /* Compile the DMA descriptor - enable the BCH block and read. */
712 d = mxs_nand_get_dma_desc(nand_info);
713 d->cmd.data =
714 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
715 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
716
717 d->cmd.address = 0;
718
719 d->cmd.pio_words[0] =
720 GPMI_CTRL0_COMMAND_MODE_READ |
721 GPMI_CTRL0_WORD_LENGTH |
722 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
723 GPMI_CTRL0_ADDRESS_NAND_DATA |
724 (mtd->writesize + mtd->oobsize);
725 d->cmd.pio_words[1] = 0;
726 d->cmd.pio_words[2] =
727 GPMI_ECCCTRL_ENABLE_ECC |
728 GPMI_ECCCTRL_ECC_CMD_DECODE |
729 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
730 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
731 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
732 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
733
Han Xuafed2a12020-05-06 20:59:19 +0800734 if (nand_info->en_randomizer) {
Alice Guo3f277782020-05-04 22:09:03 +0800735 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
736 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
737 d->cmd.pio_words[3] |= (page % 256) << 16;
738 }
739
Marek Vasut913a7252011-11-08 23:18:16 +0000740 mxs_dma_desc_append(channel, d);
741
742 /* Compile the DMA descriptor - disable the BCH block. */
743 d = mxs_nand_get_dma_desc(nand_info);
744 d->cmd.data =
745 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
746 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
747 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
748
749 d->cmd.address = 0;
750
751 d->cmd.pio_words[0] =
752 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
753 GPMI_CTRL0_WORD_LENGTH |
754 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
755 GPMI_CTRL0_ADDRESS_NAND_DATA |
756 (mtd->writesize + mtd->oobsize);
757 d->cmd.pio_words[1] = 0;
758 d->cmd.pio_words[2] = 0;
759
760 mxs_dma_desc_append(channel, d);
761
762 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
763 d = mxs_nand_get_dma_desc(nand_info);
764 d->cmd.data =
765 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
766 MXS_DMA_DESC_DEC_SEM;
767
768 d->cmd.address = 0;
769
770 mxs_dma_desc_append(channel, d);
771
Peng Fane3bbfb72015-07-21 16:15:21 +0800772 /* Invalidate caches */
773 mxs_nand_inval_data_buf(nand_info);
774
Marek Vasut913a7252011-11-08 23:18:16 +0000775 /* Execute the DMA chain. */
776 ret = mxs_dma_go(channel);
777 if (ret) {
778 printf("MXS NAND: DMA read error\n");
779 goto rtn;
780 }
781
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200782 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000783 if (ret) {
784 printf("MXS NAND: BCH read timeout\n");
785 goto rtn;
786 }
787
Peng Fandf23c9d2020-05-04 22:08:52 +0800788 mxs_nand_return_dma_descs(nand_info);
789
Marek Vasut1b120e82012-03-15 18:33:19 +0000790 /* Invalidate caches */
791 mxs_nand_inval_data_buf(nand_info);
792
Marek Vasut913a7252011-11-08 23:18:16 +0000793 /* Read DMA completed, now do the mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200794 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000795
796 /* Loop over status bytes, accumulating ECC status. */
797 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
Stefan Agnerd0778b32018-06-22 17:19:49 +0200798 for (i = 0; i < geo->ecc_chunk_count; i++) {
Marek Vasut913a7252011-11-08 23:18:16 +0000799 if (status[i] == 0x00)
800 continue;
801
Peng Fan9e813732020-05-04 22:08:53 +0800802 if (status[i] == 0xff) {
Han Xue4f2b002020-05-04 22:09:02 +0800803 if (!nand_info->en_randomizer &&
804 (is_mx6dqp() || is_mx7() || is_mx6ul() ||
805 is_imx8() || is_imx8m()))
Peng Fan9e813732020-05-04 22:08:53 +0800806 if (readl(&bch_regs->hw_bch_debug1))
807 flag = 1;
Marek Vasut913a7252011-11-08 23:18:16 +0000808 continue;
Peng Fan9e813732020-05-04 22:08:53 +0800809 }
Marek Vasut913a7252011-11-08 23:18:16 +0000810
811 if (status[i] == 0xfe) {
Peng Fandf23c9d2020-05-04 22:08:52 +0800812 if (mxs_nand_erased_page(mtd, nand,
813 nand_info->data_buf, i, page))
814 break;
Marek Vasut913a7252011-11-08 23:18:16 +0000815 failed++;
816 continue;
817 }
818
819 corrected += status[i];
820 }
821
822 /* Propagate ECC status to the owning MTD. */
823 mtd->ecc_stats.failed += failed;
824 mtd->ecc_stats.corrected += corrected;
825
826 /*
827 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
828 * details about our policy for delivering the OOB.
829 *
830 * We fill the caller's buffer with set bits, and then copy the block
831 * mark to the caller's buffer. Note that, if block mark swapping was
832 * necessary, it has already been done, so we can rely on the first
833 * byte of the auxiliary buffer to contain the block mark.
834 */
835 memset(nand->oob_poi, 0xff, mtd->oobsize);
836
837 nand->oob_poi[0] = nand_info->oob_buf[0];
838
839 memcpy(buf, nand_info->data_buf, mtd->writesize);
840
Peng Fan9e813732020-05-04 22:08:53 +0800841 if (flag)
842 memset(buf, 0xff, mtd->writesize);
Marek Vasut913a7252011-11-08 23:18:16 +0000843rtn:
844 mxs_nand_return_dma_descs(nand_info);
845
846 return ret;
847}
848
849/*
850 * Write a page to NAND.
851 */
Sergey Lapin3a38a552013-01-14 03:46:50 +0000852static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
853 struct nand_chip *nand, const uint8_t *buf,
Scott Wood46e13102016-05-30 13:57:57 -0500854 int oob_required, int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000855{
Scott Wood17fed142016-05-30 13:57:56 -0500856 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200857 struct bch_geometry *geo = &nand_info->bch_geometry;
Marek Vasut913a7252011-11-08 23:18:16 +0000858 struct mxs_dma_desc *d;
859 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
860 int ret;
861
862 memcpy(nand_info->data_buf, buf, mtd->writesize);
863 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
864
865 /* Handle block mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200866 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000867
868 /* Compile the DMA descriptor - write data. */
869 d = mxs_nand_get_dma_desc(nand_info);
870 d->cmd.data =
871 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
872 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
873 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
874
875 d->cmd.address = 0;
876
877 d->cmd.pio_words[0] =
878 GPMI_CTRL0_COMMAND_MODE_WRITE |
879 GPMI_CTRL0_WORD_LENGTH |
880 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
881 GPMI_CTRL0_ADDRESS_NAND_DATA;
882 d->cmd.pio_words[1] = 0;
883 d->cmd.pio_words[2] =
884 GPMI_ECCCTRL_ENABLE_ECC |
885 GPMI_ECCCTRL_ECC_CMD_ENCODE |
886 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
887 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
888 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
889 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
890
Han Xuafed2a12020-05-06 20:59:19 +0800891 if (nand_info->en_randomizer) {
Igor Opaniukc55401372019-11-03 16:49:43 +0100892 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
893 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
894 /*
895 * Write NAND page number needed to be randomized
896 * to GPMI_ECCCOUNT register.
897 *
898 * The value is between 0-255. For additional details
899 * check 9.6.6.4 of i.MX7D Applications Processor reference
900 */
Alice Guo3f277782020-05-04 22:09:03 +0800901 d->cmd.pio_words[3] |= (page % 256) << 16;
Igor Opaniukc55401372019-11-03 16:49:43 +0100902 }
903
Marek Vasut913a7252011-11-08 23:18:16 +0000904 mxs_dma_desc_append(channel, d);
905
Marek Vasut1b120e82012-03-15 18:33:19 +0000906 /* Flush caches */
907 mxs_nand_flush_data_buf(nand_info);
908
Marek Vasut913a7252011-11-08 23:18:16 +0000909 /* Execute the DMA chain. */
910 ret = mxs_dma_go(channel);
911 if (ret) {
912 printf("MXS NAND: DMA write error\n");
913 goto rtn;
914 }
915
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200916 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000917 if (ret) {
918 printf("MXS NAND: BCH write timeout\n");
919 goto rtn;
920 }
921
922rtn:
923 mxs_nand_return_dma_descs(nand_info);
Sergey Lapin3a38a552013-01-14 03:46:50 +0000924 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000925}
926
927/*
928 * Read OOB from NAND.
929 *
930 * This function is a veneer that replaces the function originally installed by
931 * the NAND Flash MTD code.
932 */
933static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
934 struct mtd_oob_ops *ops)
935{
Scott Wood17fed142016-05-30 13:57:56 -0500936 struct nand_chip *chip = mtd_to_nand(mtd);
937 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000938 int ret;
939
Sergey Lapin3a38a552013-01-14 03:46:50 +0000940 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000941 nand_info->raw_oob_mode = 1;
942 else
943 nand_info->raw_oob_mode = 0;
944
945 ret = nand_info->hooked_read_oob(mtd, from, ops);
946
947 nand_info->raw_oob_mode = 0;
948
949 return ret;
950}
951
952/*
953 * Write OOB to NAND.
954 *
955 * This function is a veneer that replaces the function originally installed by
956 * the NAND Flash MTD code.
957 */
958static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
959 struct mtd_oob_ops *ops)
960{
Scott Wood17fed142016-05-30 13:57:56 -0500961 struct nand_chip *chip = mtd_to_nand(mtd);
962 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000963 int ret;
964
Sergey Lapin3a38a552013-01-14 03:46:50 +0000965 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000966 nand_info->raw_oob_mode = 1;
967 else
968 nand_info->raw_oob_mode = 0;
969
970 ret = nand_info->hooked_write_oob(mtd, to, ops);
971
972 nand_info->raw_oob_mode = 0;
973
974 return ret;
975}
976
977/*
978 * Mark a block bad in NAND.
979 *
980 * This function is a veneer that replaces the function originally installed by
981 * the NAND Flash MTD code.
982 */
983static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
984{
Scott Wood17fed142016-05-30 13:57:56 -0500985 struct nand_chip *chip = mtd_to_nand(mtd);
986 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000987 int ret;
988
989 nand_info->marking_block_bad = 1;
990
991 ret = nand_info->hooked_block_markbad(mtd, ofs);
992
993 nand_info->marking_block_bad = 0;
994
995 return ret;
996}
997
998/*
999 * There are several places in this driver where we have to handle the OOB and
1000 * block marks. This is the function where things are the most complicated, so
1001 * this is where we try to explain it all. All the other places refer back to
1002 * here.
1003 *
1004 * These are the rules, in order of decreasing importance:
1005 *
1006 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
1007 * write operations take measures to protect it.
1008 *
1009 * 2) In read operations, the first byte of the OOB we return must reflect the
1010 * true state of the block mark, no matter where that block mark appears in
1011 * the physical page.
1012 *
1013 * 3) ECC-based read operations return an OOB full of set bits (since we never
1014 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1015 * return).
1016 *
1017 * 4) "Raw" read operations return a direct view of the physical bytes in the
1018 * page, using the conventional definition of which bytes are data and which
1019 * are OOB. This gives the caller a way to see the actual, physical bytes
1020 * in the page, without the distortions applied by our ECC engine.
1021 *
1022 * What we do for this specific read operation depends on whether we're doing
1023 * "raw" read, or an ECC-based read.
1024 *
1025 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1026 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1027 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1028 * ECC-based or raw view of the page is implicit in which function it calls
1029 * (there is a similar pair of ECC-based/raw functions for writing).
1030 *
1031 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1032 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1033 * caller wants an ECC-based or raw view of the page is not propagated down to
1034 * this driver.
1035 *
1036 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
1037 * ecc.read_oob and ecc.write_oob function pointers in the owning
1038 * struct mtd_info with our own functions. These hook functions set the
1039 * raw_oob_mode field so that, when control finally arrives here, we'll know
1040 * what to do.
1041 */
1042static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +00001043 int page)
Marek Vasut913a7252011-11-08 23:18:16 +00001044{
Scott Wood17fed142016-05-30 13:57:56 -05001045 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001046
1047 /*
1048 * First, fill in the OOB buffer. If we're doing a raw read, we need to
1049 * get the bytes from the physical page. If we're not doing a raw read,
1050 * we need to fill the buffer with set bits.
1051 */
1052 if (nand_info->raw_oob_mode) {
1053 /*
1054 * If control arrives here, we're doing a "raw" read. Send the
1055 * command to read the conventional OOB and read it.
1056 */
1057 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1058 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1059 } else {
1060 /*
1061 * If control arrives here, we're not doing a "raw" read. Fill
1062 * the OOB buffer with set bits and correct the block mark.
1063 */
1064 memset(nand->oob_poi, 0xff, mtd->oobsize);
1065
1066 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1067 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1068 }
1069
1070 return 0;
1071
1072}
1073
1074/*
1075 * Write OOB data to NAND.
1076 */
1077static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1078 int page)
1079{
Scott Wood17fed142016-05-30 13:57:56 -05001080 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001081 uint8_t block_mark = 0;
1082
1083 /*
1084 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1085 * the NAND Flash MTD model that make it essentially impossible to write
1086 * the out-of-band bytes.
1087 *
1088 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1089 * mark a block bad, we can do that.
1090 */
1091
1092 if (!nand_info->marking_block_bad) {
1093 printf("NXS NAND: Writing OOB isn't supported\n");
1094 return -EIO;
1095 }
1096
1097 /* Write the block mark. */
1098 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1099 nand->write_buf(mtd, &block_mark, 1);
1100 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1101
1102 /* Check if it worked. */
1103 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1104 return -EIO;
1105
1106 return 0;
1107}
1108
1109/*
1110 * Claims all blocks are good.
1111 *
1112 * In principle, this function is *only* called when the NAND Flash MTD system
1113 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1114 * the driver for bad block information.
1115 *
1116 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1117 * this function is *only* called when we take it away.
1118 *
1119 * Thus, this function is only called when we want *all* blocks to look good,
1120 * so it *always* return success.
1121 */
Scott Wood52ab7ce2016-05-30 13:57:58 -05001122static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
Marek Vasut913a7252011-11-08 23:18:16 +00001123{
Stefan Agneread66eb2018-06-22 18:06:18 +02001124 return 0;
1125}
1126
1127static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1128{
1129 struct nand_chip *chip = mtd_to_nand(mtd);
1130 struct nand_chip *nand = mtd_to_nand(mtd);
1131 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Han Xu2ee499e2022-03-25 08:36:38 -05001132 int err;
Stefan Agneread66eb2018-06-22 18:06:18 +02001133
Ye Li94547442020-05-04 22:08:50 +08001134 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1135 printf("unsupported NAND chip, minimum ecc required %d\n"
1136 , chip->ecc_strength_ds);
1137 return -EINVAL;
1138 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001139
Han Xu2ee499e2022-03-25 08:36:38 -05001140 /* use the legacy bch setting by default */
1141 if ((!nand_info->use_minimum_ecc && mtd->oobsize < 1024) ||
1142 !(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) {
1143 dev_dbg(mtd->dev, "use legacy bch geometry\n");
1144 err = mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1145 if (!err)
1146 return 0;
Ye Li94547442020-05-04 22:08:50 +08001147 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001148
Han Xu2ee499e2022-03-25 08:36:38 -05001149 /* for large oob nand */
1150 if (mtd->oobsize > 1024) {
1151 dev_dbg(mtd->dev, "use large oob bch geometry\n");
1152 err = mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1153 if (!err)
1154 return 0;
1155 }
Ye Li94547442020-05-04 22:08:50 +08001156
Han Xu2ee499e2022-03-25 08:36:38 -05001157 /* otherwise use the minimum ecc nand chips required */
1158 dev_dbg(mtd->dev, "use minimum ecc bch geometry\n");
1159 err = mxs_nand_calc_ecc_layout_by_info(geo, mtd, chip->ecc_strength_ds,
1160 chip->ecc_step_ds);
Stefan Agneread66eb2018-06-22 18:06:18 +02001161
Han Xu2ee499e2022-03-25 08:36:38 -05001162 if (err)
1163 dev_err(mtd->dev, "none of the bch geometry setting works\n");
1164
1165 return err;
1166}
1167
1168void mxs_nand_dump_geo(struct mtd_info *mtd)
1169{
1170 struct nand_chip *nand = mtd_to_nand(mtd);
1171 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1172 struct bch_geometry *geo = &nand_info->bch_geometry;
1173
1174 dev_dbg(mtd->dev, "BCH Geometry :\n"
1175 "GF Length\t\t: %u\n"
1176 "ECC Strength\t\t: %u\n"
1177 "ECC for Meta\t\t: %u\n"
1178 "ECC Chunk0 Size\t\t: %u\n"
1179 "ECC Chunkn Size\t\t: %u\n"
1180 "ECC Chunk Count\t\t: %u\n"
1181 "Block Mark Byte Offset\t: %u\n"
1182 "Block Mark Bit Offset\t: %u\n",
1183 geo->gf_len,
1184 geo->ecc_strength,
1185 geo->ecc_for_meta,
1186 geo->ecc_chunk0_size,
1187 geo->ecc_chunkn_size,
1188 geo->ecc_chunk_count,
1189 geo->block_mark_byte_offset,
1190 geo->block_mark_bit_offset);
Marek Vasut913a7252011-11-08 23:18:16 +00001191}
1192
1193/*
Marek Vasut913a7252011-11-08 23:18:16 +00001194 * At this point, the physical NAND Flash chips have been identified and
1195 * counted, so we know the physical geometry. This enables us to make some
1196 * important configuration decisions.
1197 *
Robert P. J. Day8d56db92016-07-15 13:44:45 -04001198 * The return value of this function propagates directly back to this driver's
Stefan Agner5883e552018-06-22 17:19:47 +02001199 * board_nand_init(). Anything other than zero will cause this driver to
Marek Vasut913a7252011-11-08 23:18:16 +00001200 * tear everything down and declare failure.
1201 */
Stefan Agner5883e552018-06-22 17:19:47 +02001202int mxs_nand_setup_ecc(struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +00001203{
Scott Wood17fed142016-05-30 13:57:56 -05001204 struct nand_chip *nand = mtd_to_nand(mtd);
1205 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +02001206 struct bch_geometry *geo = &nand_info->bch_geometry;
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001207 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +00001208 uint32_t tmp;
Stefan Agneread66eb2018-06-22 18:06:18 +02001209 int ret;
Stefan Agner4d42ac12018-06-22 17:19:51 +02001210
Igor Opaniukc55401372019-11-03 16:49:43 +01001211 nand_info->en_randomizer = 0;
1212 nand_info->oobsize = mtd->oobsize;
1213 nand_info->writesize = mtd->writesize;
1214
Stefan Agneread66eb2018-06-22 18:06:18 +02001215 ret = mxs_nand_set_geometry(mtd, geo);
Stefan Agner4d42ac12018-06-22 17:19:51 +02001216 if (ret)
1217 return ret;
1218
Han Xu2ee499e2022-03-25 08:36:38 -05001219 mxs_nand_dump_geo(mtd);
1220
Marek Vasut913a7252011-11-08 23:18:16 +00001221 /* Configure BCH and set NFC geometry */
Otavio Salvadorcbf0bf22012-08-13 09:53:12 +00001222 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001223
1224 /* Configure layout 0 */
Stefan Agnerd0778b32018-06-22 17:19:49 +02001225 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001226 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001227 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001228 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001229 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001230 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001231 writel(tmp, &bch_regs->hw_bch_flash0layout0);
Igor Opaniukc55401372019-11-03 16:49:43 +01001232 nand_info->bch_flash0layout0 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001233
1234 tmp = (mtd->writesize + mtd->oobsize)
1235 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001236 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001237 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001238 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001239 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001240 writel(tmp, &bch_regs->hw_bch_flash0layout1);
Igor Opaniukc55401372019-11-03 16:49:43 +01001241 nand_info->bch_flash0layout1 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001242
Peng Fan9e813732020-05-04 22:08:53 +08001243 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
1244 if (is_mx6dqp() || is_mx7() ||
Peng Fan128abf42020-05-04 22:09:00 +08001245 is_mx6ul() || is_imx8() || is_imx8m())
Peng Fan9e813732020-05-04 22:08:53 +08001246 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1247 &bch_regs->hw_bch_mode);
1248
Marek Vasut913a7252011-11-08 23:18:16 +00001249 /* Set *all* chip selects to use layout 0 */
1250 writel(0, &bch_regs->hw_bch_layoutselect);
1251
1252 /* Enable BCH complete interrupt */
1253 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1254
Stefan Agner5883e552018-06-22 17:19:47 +02001255 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001256}
1257
1258/*
1259 * Allocate DMA buffers
1260 */
1261int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1262{
1263 uint8_t *buf;
1264 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1265
Marek Vasut1b120e82012-03-15 18:33:19 +00001266 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1267
Marek Vasut913a7252011-11-08 23:18:16 +00001268 /* DMA buffers */
Marek Vasut1b120e82012-03-15 18:33:19 +00001269 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001270 if (!buf) {
1271 printf("MXS NAND: Error allocating DMA buffers\n");
1272 return -ENOMEM;
1273 }
1274
Marek Vasut1b120e82012-03-15 18:33:19 +00001275 memset(buf, 0, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001276
1277 nand_info->data_buf = buf;
1278 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
Marek Vasut913a7252011-11-08 23:18:16 +00001279 /* Command buffers */
1280 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1281 MXS_NAND_COMMAND_BUFFER_SIZE);
1282 if (!nand_info->cmd_buf) {
1283 free(buf);
1284 printf("MXS NAND: Error allocating command buffers\n");
1285 return -ENOMEM;
1286 }
1287 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1288 nand_info->cmd_queue_len = 0;
1289
1290 return 0;
1291}
1292
1293/*
1294 * Initializes the NFC hardware.
1295 */
Adam Ford6edb91a2019-01-12 06:25:48 -06001296static int mxs_nand_init_dma(struct mxs_nand_info *info)
Marek Vasut913a7252011-11-08 23:18:16 +00001297{
Peng Fane37d5a92016-01-27 10:38:02 +08001298 int i = 0, j, ret = 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001299
1300 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1301 MXS_NAND_DMA_DESCRIPTOR_COUNT);
Peng Fane37d5a92016-01-27 10:38:02 +08001302 if (!info->desc) {
1303 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001304 goto err1;
Peng Fane37d5a92016-01-27 10:38:02 +08001305 }
Marek Vasut913a7252011-11-08 23:18:16 +00001306
1307 /* Allocate the DMA descriptors. */
1308 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1309 info->desc[i] = mxs_dma_desc_alloc();
Peng Fane37d5a92016-01-27 10:38:02 +08001310 if (!info->desc[i]) {
1311 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001312 goto err2;
Peng Fane37d5a92016-01-27 10:38:02 +08001313 }
Marek Vasut913a7252011-11-08 23:18:16 +00001314 }
1315
1316 /* Init the DMA controller. */
Fabio Estevam17156222017-06-29 09:33:44 -03001317 mxs_dma_init();
Marek Vasut93541b42012-04-08 17:34:46 +00001318 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1319 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
Peng Fane37d5a92016-01-27 10:38:02 +08001320 ret = mxs_dma_init_channel(j);
1321 if (ret)
Marek Vasut93541b42012-04-08 17:34:46 +00001322 goto err3;
1323 }
Marek Vasut913a7252011-11-08 23:18:16 +00001324
1325 /* Reset the GPMI block. */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001326 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1327 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001328
1329 /*
1330 * Choose NAND mode, set IRQ polarity, disable write protection and
1331 * select BCH ECC.
1332 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001333 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
Marek Vasut913a7252011-11-08 23:18:16 +00001334 GPMI_CTRL1_GPMI_MODE,
1335 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1336 GPMI_CTRL1_BCH_MODE);
1337
1338 return 0;
1339
Marek Vasut93541b42012-04-08 17:34:46 +00001340err3:
Peng Fane37d5a92016-01-27 10:38:02 +08001341 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
Marek Vasut93541b42012-04-08 17:34:46 +00001342 mxs_dma_release(j);
Marek Vasut913a7252011-11-08 23:18:16 +00001343err2:
Marek Vasut913a7252011-11-08 23:18:16 +00001344 for (--i; i >= 0; i--)
1345 mxs_dma_desc_free(info->desc[i]);
Peng Fane37d5a92016-01-27 10:38:02 +08001346 free(info->desc);
1347err1:
1348 if (ret == -ENOMEM)
1349 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1350 return ret;
Marek Vasut913a7252011-11-08 23:18:16 +00001351}
1352
Michael Trimarchifd6e13e2022-08-30 16:48:47 +02001353/*
1354 * <1> Firstly, we should know what's the GPMI-clock means.
1355 * The GPMI-clock is the internal clock in the gpmi nand controller.
1356 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
1357 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
1358 *
1359 * <2> Secondly, we should know what's the frequency on the nand chip pins.
1360 * The frequency on the nand chip pins is derived from the GPMI-clock.
1361 * We can get it from the following equation:
1362 *
1363 * F = G / (DS + DH)
1364 *
1365 * F : the frequency on the nand chip pins.
1366 * G : the GPMI clock, such as 100MHz.
1367 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
1368 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
1369 *
1370 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
1371 * the nand EDO(extended Data Out) timing could be applied.
1372 * The GPMI implements a feedback read strobe to sample the read data.
1373 * The feedback read strobe can be delayed to support the nand EDO timing
1374 * where the read strobe may deasserts before the read data is valid, and
1375 * read data is valid for some time after read strobe.
1376 *
1377 * The following figure illustrates some aspects of a NAND Flash read:
1378 *
1379 * |<---tREA---->|
1380 * | |
1381 * | | |
1382 * |<--tRP-->| |
1383 * | | |
1384 * __ ___|__________________________________
1385 * RDN \________/ |
1386 * |
1387 * /---------\
1388 * Read Data --------------< >---------
1389 * \---------/
1390 * | |
1391 * |<-D->|
1392 * FeedbackRDN ________ ____________
1393 * \___________/
1394 *
1395 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
1396 *
1397 *
1398 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
1399 *
1400 * 4.1) From the aspect of the nand chip pins:
1401 * Delay = (tREA + C - tRP) {1}
1402 *
1403 * tREA : the maximum read access time.
1404 * C : a constant to adjust the delay. default is 4000ps.
1405 * tRP : the read pulse width, which is exactly:
1406 * tRP = (GPMI-clock-period) * DATA_SETUP
1407 *
1408 * 4.2) From the aspect of the GPMI nand controller:
1409 * Delay = RDN_DELAY * 0.125 * RP {2}
1410 *
1411 * RP : the DLL reference period.
1412 * if (GPMI-clock-period > DLL_THRETHOLD)
1413 * RP = GPMI-clock-period / 2;
1414 * else
1415 * RP = GPMI-clock-period;
1416 *
1417 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
1418 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
1419 * is 16000ps, but in mx6q, we use 12000ps.
1420 *
1421 * 4.3) since {1} equals {2}, we get:
1422 *
1423 * (tREA + 4000 - tRP) * 8
1424 * RDN_DELAY = ----------------------- {3}
1425 * RP
1426 */
1427static void mxs_compute_timings(struct nand_chip *chip,
1428 const struct nand_sdr_timings *sdr)
1429{
1430 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1431 unsigned long clk_rate;
1432 unsigned int dll_wait_time_us;
1433 unsigned int dll_threshold_ps = nand_info->max_chain_delay;
1434 unsigned int period_ps, reference_period_ps;
1435 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
1436 unsigned int tRP_ps;
1437 bool use_half_period;
1438 int sample_delay_ps, sample_delay_factor;
1439 u16 busy_timeout_cycles;
1440 u8 wrn_dly_sel;
1441 u32 timing0;
1442 u32 timing1;
1443 u32 ctrl1n;
1444
1445 if (sdr->tRC_min >= 30000) {
1446 /* ONFI non-EDO modes [0-3] */
1447 clk_rate = 22000000;
1448 wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
1449 } else if (sdr->tRC_min >= 25000) {
1450 /* ONFI EDO mode 4 */
1451 clk_rate = 80000000;
1452 wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
1453 debug("%s, setting ONFI onfi edo 4\n", __func__);
1454 } else {
1455 /* ONFI EDO mode 5 */
1456 clk_rate = 100000000;
1457 wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
1458 debug("%s, setting ONFI onfi edo 5\n", __func__);
1459 }
1460
1461 /* SDR core timings are given in picoseconds */
1462 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, clk_rate);
1463
1464 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
1465 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
1466 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
1467 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
1468
1469 timing0 = (addr_setup_cycles << GPMI_TIMING0_ADDRESS_SETUP_OFFSET) |
1470 (data_hold_cycles << GPMI_TIMING0_DATA_HOLD_OFFSET) |
1471 (data_setup_cycles << GPMI_TIMING0_DATA_SETUP_OFFSET);
1472 timing1 = (busy_timeout_cycles * 4096) << GPMI_TIMING1_DEVICE_BUSY_TIMEOUT_OFFSET;
1473
1474 /*
1475 * Derive NFC ideal delay from {3}:
1476 *
1477 * (tREA + 4000 - tRP) * 8
1478 * RDN_DELAY = -----------------------
1479 * RP
1480 */
1481 if (period_ps > dll_threshold_ps) {
1482 use_half_period = true;
1483 reference_period_ps = period_ps / 2;
1484 } else {
1485 use_half_period = false;
1486 reference_period_ps = period_ps;
1487 }
1488
1489 tRP_ps = data_setup_cycles * period_ps;
1490 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
1491 if (sample_delay_ps > 0)
1492 sample_delay_factor = sample_delay_ps / reference_period_ps;
1493 else
1494 sample_delay_factor = 0;
1495
1496 ctrl1n = (wrn_dly_sel << GPMI_CTRL1_WRN_DLY_SEL_OFFSET);
1497 if (sample_delay_factor)
1498 ctrl1n |= (sample_delay_factor << GPMI_CTRL1_RDN_DELAY_OFFSET) |
1499 GPMI_CTRL1_DLL_ENABLE |
1500 (use_half_period ? GPMI_CTRL1_HALF_PERIOD : 0);
1501
1502 writel(timing0, &nand_info->gpmi_regs->hw_gpmi_timing0);
1503 writel(timing1, &nand_info->gpmi_regs->hw_gpmi_timing1);
1504
1505 /*
1506 * Clear several CTRL1 fields, DLL must be disabled when setting
1507 * RDN_DELAY or HALF_PERIOD.
1508 */
1509 writel(GPMI_CTRL1_CLEAR_MASK, &nand_info->gpmi_regs->hw_gpmi_ctrl1_clr);
1510 writel(ctrl1n, &nand_info->gpmi_regs->hw_gpmi_ctrl1_set);
1511
1512 clk_set_rate(nand_info->gpmi_clk, clk_rate);
1513
1514 /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
1515 dll_wait_time_us = USEC_PER_SEC / clk_rate * 64;
1516 if (!dll_wait_time_us)
1517 dll_wait_time_us = 1;
1518
1519 /* Wait for the DLL to settle. */
1520 udelay(dll_wait_time_us);
1521}
1522
1523static int mxs_nand_setup_interface(struct mtd_info *mtd, int chipnr,
1524 const struct nand_data_interface *conf)
1525{
1526 struct nand_chip *chip = mtd_to_nand(mtd);
1527 const struct nand_sdr_timings *sdr;
1528
1529 sdr = nand_get_sdr_timings(conf);
1530 if (IS_ERR(sdr))
1531 return PTR_ERR(sdr);
1532
1533 /* Stop here if this call was just a check */
1534 if (chipnr < 0)
1535 return 0;
1536
1537 /* Do the actual derivation of the controller timings */
1538 mxs_compute_timings(chip, sdr);
1539
1540 return 0;
1541}
1542
Stefan Agner7152f342018-06-22 17:19:46 +02001543int mxs_nand_init_spl(struct nand_chip *nand)
1544{
1545 struct mxs_nand_info *nand_info;
1546 int err;
1547
1548 nand_info = malloc(sizeof(struct mxs_nand_info));
1549 if (!nand_info) {
1550 printf("MXS NAND: Failed to allocate private data\n");
1551 return -ENOMEM;
1552 }
1553 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1554
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001555 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1556 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
Adam Ford10210732019-01-02 20:36:52 -06001557
Peng Fan128abf42020-05-04 22:09:00 +08001558 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
Adam Ford10210732019-01-02 20:36:52 -06001559 nand_info->max_ecc_strength_supported = 62;
1560 else
1561 nand_info->max_ecc_strength_supported = 40;
1562
Ye Li61771d22022-03-31 13:27:47 +08001563 if (IS_ENABLED(CONFIG_NAND_MXS_USE_MINIMUM_ECC))
1564 nand_info->use_minimum_ecc = true;
1565
Stefan Agner7152f342018-06-22 17:19:46 +02001566 err = mxs_nand_alloc_buffers(nand_info);
1567 if (err)
1568 return err;
1569
Stefan Agner00e65162018-06-22 18:06:13 +02001570 err = mxs_nand_init_dma(nand_info);
Stefan Agner7152f342018-06-22 17:19:46 +02001571 if (err)
1572 return err;
1573
1574 nand_set_controller_data(nand, nand_info);
1575
1576 nand->options |= NAND_NO_SUBPAGE_WRITE;
1577
1578 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1579 nand->dev_ready = mxs_nand_device_ready;
1580 nand->select_chip = mxs_nand_select_chip;
Stefan Agner7152f342018-06-22 17:19:46 +02001581
1582 nand->read_byte = mxs_nand_read_byte;
1583 nand->read_buf = mxs_nand_read_buf;
1584
1585 nand->ecc.read_page = mxs_nand_ecc_read_page;
1586
1587 nand->ecc.mode = NAND_ECC_HW;
Stefan Agner7152f342018-06-22 17:19:46 +02001588
1589 return 0;
1590}
1591
Stefan Agner19f90512018-06-22 18:06:16 +02001592int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +00001593{
Stefan Agner5883e552018-06-22 17:19:47 +02001594 struct mtd_info *mtd;
Stefan Agner5883e552018-06-22 17:19:47 +02001595 struct nand_chip *nand;
Marek Vasut913a7252011-11-08 23:18:16 +00001596 int err;
1597
Stefan Agner5883e552018-06-22 17:19:47 +02001598 nand = &nand_info->chip;
1599 mtd = nand_to_mtd(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001600 err = mxs_nand_alloc_buffers(nand_info);
1601 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001602 return err;
Marek Vasut913a7252011-11-08 23:18:16 +00001603
Stefan Agner00e65162018-06-22 18:06:13 +02001604 err = mxs_nand_init_dma(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001605 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001606 goto err_free_buffers;
Marek Vasut913a7252011-11-08 23:18:16 +00001607
1608 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1609
Stefan Agner95f376f2018-06-22 17:19:48 +02001610#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1611 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1612#endif
1613
Scott Wood17fed142016-05-30 13:57:56 -05001614 nand_set_controller_data(nand, nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001615 nand->options |= NAND_NO_SUBPAGE_WRITE;
1616
Stefan Agner150ddbc2018-06-22 18:06:17 +02001617 if (nand_info->dev)
Patrice Chotard33d2cf92021-09-13 16:25:53 +02001618 nand->flash_node = dev_ofnode(nand_info->dev);
Stefan Agner150ddbc2018-06-22 18:06:17 +02001619
Marek Vasut913a7252011-11-08 23:18:16 +00001620 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1621
1622 nand->dev_ready = mxs_nand_device_ready;
1623 nand->select_chip = mxs_nand_select_chip;
1624 nand->block_bad = mxs_nand_block_bad;
Marek Vasut913a7252011-11-08 23:18:16 +00001625
1626 nand->read_byte = mxs_nand_read_byte;
1627
1628 nand->read_buf = mxs_nand_read_buf;
1629 nand->write_buf = mxs_nand_write_buf;
1630
Michael Trimarchifd6e13e2022-08-30 16:48:47 +02001631 if (nand_info->gpmi_clk)
1632 nand->setup_data_interface = mxs_nand_setup_interface;
1633
Stefan Agner5883e552018-06-22 17:19:47 +02001634 /* first scan to find the device and get the page size */
1635 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
Stefan Agner404b1102018-06-22 18:06:14 +02001636 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001637
1638 if (mxs_nand_setup_ecc(mtd))
Stefan Agner404b1102018-06-22 18:06:14 +02001639 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001640
Marek Vasut913a7252011-11-08 23:18:16 +00001641 nand->ecc.read_page = mxs_nand_ecc_read_page;
1642 nand->ecc.write_page = mxs_nand_ecc_write_page;
1643 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1644 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1645
1646 nand->ecc.layout = &fake_ecc_layout;
1647 nand->ecc.mode = NAND_ECC_HW;
Ye Li94547442020-05-04 22:08:50 +08001648 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
Stefan Agner72d627d2018-06-22 17:19:50 +02001649 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
Marek Vasut913a7252011-11-08 23:18:16 +00001650
Stefan Agner5883e552018-06-22 17:19:47 +02001651 /* second phase scan */
1652 err = nand_scan_tail(mtd);
1653 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001654 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001655
Michael Trimarchidc3da882022-05-15 11:35:30 +02001656 /* Hook some operations at the MTD level. */
1657 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1658 nand_info->hooked_read_oob = mtd->_read_oob;
1659 mtd->_read_oob = mxs_nand_hook_read_oob;
1660 }
1661
1662 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1663 nand_info->hooked_write_oob = mtd->_write_oob;
1664 mtd->_write_oob = mxs_nand_hook_write_oob;
1665 }
1666
1667 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1668 nand_info->hooked_block_markbad = mtd->_block_markbad;
1669 mtd->_block_markbad = mxs_nand_hook_block_markbad;
1670 }
1671
Stefan Agner5883e552018-06-22 17:19:47 +02001672 err = nand_register(0, mtd);
1673 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001674 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001675
Stefan Agner404b1102018-06-22 18:06:14 +02001676 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001677
Stefan Agner404b1102018-06-22 18:06:14 +02001678err_free_buffers:
Marek Vasut913a7252011-11-08 23:18:16 +00001679 free(nand_info->data_buf);
1680 free(nand_info->cmd_buf);
Stefan Agner404b1102018-06-22 18:06:14 +02001681
1682 return err;
1683}
1684
Stefan Agner150ddbc2018-06-22 18:06:17 +02001685#ifndef CONFIG_NAND_MXS_DT
Stefan Agner404b1102018-06-22 18:06:14 +02001686void board_nand_init(void)
1687{
1688 struct mxs_nand_info *nand_info;
1689
1690 nand_info = malloc(sizeof(struct mxs_nand_info));
1691 if (!nand_info) {
1692 printf("MXS NAND: Failed to allocate private data\n");
1693 return;
1694 }
1695 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1696
1697 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1698 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1699
Stefan Agner4dc98db2018-06-22 18:06:15 +02001700 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1701 if (is_mx6sx() || is_mx7())
1702 nand_info->max_ecc_strength_supported = 62;
1703 else
1704 nand_info->max_ecc_strength_supported = 40;
1705
1706#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1707 nand_info->use_minimum_ecc = true;
1708#endif
1709
Stefan Agner19f90512018-06-22 18:06:16 +02001710 if (mxs_nand_init_ctrl(nand_info) < 0)
Stefan Agner404b1102018-06-22 18:06:14 +02001711 goto err;
1712
Stefan Agner5883e552018-06-22 17:19:47 +02001713 return;
Stefan Agner404b1102018-06-22 18:06:14 +02001714
1715err:
1716 free(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001717}
Stefan Agner150ddbc2018-06-22 18:06:17 +02001718#endif
Igor Opaniukc55401372019-11-03 16:49:43 +01001719
1720/*
1721 * Read NAND layout for FCB block generation.
1722 */
1723void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1724{
1725 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1726 u32 tmp;
1727
1728 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1729 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1730 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1731 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1732 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1733
1734 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1735 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1736 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1737 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1738 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1739 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1740 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1741 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1742 BCH_FLASHLAYOUT1_ECCN_OFFSET;
Han Xu33543b52020-05-04 22:08:58 +08001743 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1744 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Igor Opaniukc55401372019-11-03 16:49:43 +01001745}
1746
1747/*
1748 * Set BCH to specific layout used by ROM bootloader to read FCB.
1749 */
Han Xuafed2a12020-05-06 20:59:19 +08001750void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd)
Igor Opaniukc55401372019-11-03 16:49:43 +01001751{
1752 u32 tmp;
1753 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1754 struct nand_chip *nand = mtd_to_nand(mtd);
1755 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1756
1757 nand_info->en_randomizer = 1;
1758
1759 mtd->writesize = 1024;
1760 mtd->oobsize = 1862 - 1024;
1761
1762 /* 8 ecc_chunks_*/
1763 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1764 /* 32 bytes for metadata */
1765 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1766 /* using ECC62 level to be performed */
1767 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1768 /* 0x20 * 4 bytes of the data0 block */
1769 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1770 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1771 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1772
1773 /* 1024 for data + 838 for OOB */
1774 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1775 /* using ECC62 level to be performed */
1776 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1777 /* 0x20 * 4 bytes of the data0 block */
1778 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1779 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1780 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1781}
1782
1783/*
Han Xuafed2a12020-05-06 20:59:19 +08001784 * Set BCH to specific layout used by ROM bootloader to read FCB.
1785 */
1786void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd)
1787{
1788 u32 tmp;
1789 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1790 struct nand_chip *nand = mtd_to_nand(mtd);
1791 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1792
1793 /* no randomizer in this setting*/
1794 nand_info->en_randomizer = 0;
1795
1796 mtd->writesize = 1024;
1797 mtd->oobsize = 1576 - 1024;
1798
1799 /* 8 ecc_chunks_*/
1800 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1801 /* 32 bytes for metadata */
1802 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1803 /* using ECC40 level to be performed */
1804 tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1805 /* 0x20 * 4 bytes of the data0 block */
1806 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1807 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1808 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1809
1810 /* 1024 for data + 552 for OOB */
1811 tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1812 /* using ECC40 level to be performed */
1813 tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1814 /* 0x20 * 4 bytes of the data0 block */
1815 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1816 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1817 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1818}
1819
1820/*
Igor Opaniukc55401372019-11-03 16:49:43 +01001821 * Restore BCH to normal settings.
1822 */
1823void mxs_nand_mode_normal(struct mtd_info *mtd)
1824{
1825 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1826 struct nand_chip *nand = mtd_to_nand(mtd);
1827 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1828
1829 nand_info->en_randomizer = 0;
1830
1831 mtd->writesize = nand_info->writesize;
1832 mtd->oobsize = nand_info->oobsize;
1833
1834 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1835 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1836}
1837
1838uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1839{
1840 struct nand_chip *chip = mtd_to_nand(mtd);
1841 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1842 struct bch_geometry *geo = &nand_info->bch_geometry;
1843
1844 return geo->block_mark_byte_offset;
1845}
1846
1847uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1848{
1849 struct nand_chip *chip = mtd_to_nand(mtd);
1850 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1851 struct bch_geometry *geo = &nand_info->bch_geometry;
1852
1853 return geo->block_mark_bit_offset;
1854}