blob: 80d9307cdd1fb0e3145de109217204f027f59d81 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut913a7252011-11-08 23:18:16 +00002/*
3 * Freescale i.MX28 NAND flash driver
4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH
7 *
8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver
10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
Peng Fan9e813732020-05-04 22:08:53 +080013 * Copyright 2017-2019 NXP
Marek Vasut913a7252011-11-08 23:18:16 +000014 */
15
Michael Trimarchifd6e13e2022-08-30 16:48:47 +020016#include <clk.h>
Simon Glass63334482019-11-14 12:57:39 -070017#include <cpu_func.h>
Stefan Agner19f90512018-06-22 18:06:16 +020018#include <dm.h>
Sean Anderson0a749442020-10-04 21:39:45 -040019#include <dm/device_compat.h>
Marek Vasut913a7252011-11-08 23:18:16 +000020#include <malloc.h>
Sean Anderson0a749442020-10-04 21:39:45 -040021#include <mxs_nand.h>
Marek Vasut913a7252011-11-08 23:18:16 +000022#include <asm/arch/clock.h>
23#include <asm/arch/imx-regs.h>
Sean Anderson0a749442020-10-04 21:39:45 -040024#include <asm/arch/sys_proto.h>
25#include <asm/cache.h>
26#include <asm/io.h>
Stefano Babic33731bc2017-06-29 10:16:06 +020027#include <asm/mach-imx/regs-bch.h>
28#include <asm/mach-imx/regs-gpmi.h>
Michael Trimarchifd6e13e2022-08-30 16:48:47 +020029#include <linux/delay.h>
Sean Anderson0a749442020-10-04 21:39:45 -040030#include <linux/errno.h>
31#include <linux/mtd/rawnand.h>
32#include <linux/sizes.h>
Igor Prusovc3421ea2023-11-09 20:10:04 +030033#include <linux/time.h>
Sean Anderson0a749442020-10-04 21:39:45 -040034#include <linux/types.h>
Michael Trimarchifd6e13e2022-08-30 16:48:47 +020035#include <linux/math64.h>
Marek Vasut913a7252011-11-08 23:18:16 +000036
37#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
38
Peng Fan128abf42020-05-04 22:09:00 +080039#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \
40 defined(CONFIG_IMX8M)
Stefan Roese8338d1d2013-04-15 21:14:12 +000041#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
42#else
43#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
44#endif
Marek Vasut913a7252011-11-08 23:18:16 +000045#define MXS_NAND_METADATA_SIZE 10
Jörg Krause1d870262015-04-15 09:27:22 +020046#define MXS_NAND_BITS_PER_ECC_LEVEL 13
Stefan Agner54bf8082016-08-01 23:55:18 -070047
48#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
Marek Vasut913a7252011-11-08 23:18:16 +000049#define MXS_NAND_COMMAND_BUFFER_SIZE 32
Stefan Agner54bf8082016-08-01 23:55:18 -070050#else
51#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
52#endif
Marek Vasut913a7252011-11-08 23:18:16 +000053
54#define MXS_NAND_BCH_TIMEOUT 10000
Michael Trimarchifd6e13e2022-08-30 16:48:47 +020055
56#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
Marek Vasut913a7252011-11-08 23:18:16 +000057
Marek Vasut913a7252011-11-08 23:18:16 +000058struct nand_ecclayout fake_ecc_layout;
59
Marek Vasut1b120e82012-03-15 18:33:19 +000060/*
61 * Cache management functions
62 */
Trevor Woerner43ec7e02019-05-03 09:41:00 -040063#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Marek Vasut1b120e82012-03-15 18:33:19 +000064static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
65{
Peng Fan128abf42020-05-04 22:09:00 +080066 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000067
68 flush_dcache_range(addr, addr + info->data_buf_size);
69}
70
71static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
72{
Peng Fan128abf42020-05-04 22:09:00 +080073 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000074
75 invalidate_dcache_range(addr, addr + info->data_buf_size);
76}
77
78static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
79{
Peng Fan128abf42020-05-04 22:09:00 +080080 uint32_t addr = (uintptr_t)info->cmd_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000081
82 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
83}
84#else
85static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
86static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
87static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
88#endif
89
Marek Vasut913a7252011-11-08 23:18:16 +000090static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
91{
92 struct mxs_dma_desc *desc;
93
94 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
95 printf("MXS NAND: Too many DMA descriptors requested\n");
96 return NULL;
97 }
98
99 desc = info->desc[info->desc_index];
100 info->desc_index++;
101
102 return desc;
103}
104
105static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
106{
107 int i;
108 struct mxs_dma_desc *desc;
109
110 for (i = 0; i < info->desc_index; i++) {
111 desc = info->desc[i];
112 memset(desc, 0, sizeof(struct mxs_dma_desc));
113 desc->address = (dma_addr_t)desc;
114 }
115
116 info->desc_index = 0;
117}
118
Marek Vasut913a7252011-11-08 23:18:16 +0000119static uint32_t mxs_nand_aux_status_offset(void)
120{
121 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
122}
123
Sean Anderson0a749442020-10-04 21:39:45 -0400124static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo,
125 struct mtd_info *mtd,
126 unsigned int *chunk_num)
Marek Vasut913a7252011-11-08 23:18:16 +0000127{
Ye Li94547442020-05-04 22:08:50 +0800128 unsigned int i, j;
Marek Vasut913a7252011-11-08 23:18:16 +0000129
Ye Li94547442020-05-04 22:08:50 +0800130 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
Sean Anderson0a749442020-10-04 21:39:45 -0400131 dev_err(mtd->dev, "The size of chunk0 must equal to chunkn\n");
Ye Li94547442020-05-04 22:08:50 +0800132 return false;
133 }
Marek Vasut913a7252011-11-08 23:18:16 +0000134
Ye Li94547442020-05-04 22:08:50 +0800135 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
136 (geo->gf_len * geo->ecc_strength +
137 geo->ecc_chunkn_size * 8);
Marek Vasut913a7252011-11-08 23:18:16 +0000138
Ye Li94547442020-05-04 22:08:50 +0800139 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
140 (geo->gf_len * geo->ecc_strength +
141 geo->ecc_chunkn_size * 8) * i;
Marek Vasut913a7252011-11-08 23:18:16 +0000142
Ye Li94547442020-05-04 22:08:50 +0800143 if (j < geo->ecc_chunkn_size * 8) {
144 *chunk_num = i + 1;
Sean Anderson0a749442020-10-04 21:39:45 -0400145 dev_dbg(mtd->dev, "Set ecc to %d and bbm in chunk %d\n",
Ye Li94547442020-05-04 22:08:50 +0800146 geo->ecc_strength, *chunk_num);
147 return true;
148 }
Marek Vasut913a7252011-11-08 23:18:16 +0000149
Ye Li94547442020-05-04 22:08:50 +0800150 return false;
Marek Vasut913a7252011-11-08 23:18:16 +0000151}
152
Stefan Agner4d42ac12018-06-22 17:19:51 +0200153static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
Stefan Agneread66eb2018-06-22 18:06:18 +0200154 struct mtd_info *mtd,
155 unsigned int ecc_strength,
156 unsigned int ecc_step)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200157{
158 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Agner4dc98db2018-06-22 18:06:15 +0200159 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800160 unsigned int block_mark_bit_offset;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200161
Stefan Agneread66eb2018-06-22 18:06:18 +0200162 switch (ecc_step) {
Stefan Agner4d42ac12018-06-22 17:19:51 +0200163 case SZ_512:
164 geo->gf_len = 13;
165 break;
166 case SZ_1K:
167 geo->gf_len = 14;
168 break;
169 default:
170 return -EINVAL;
171 }
172
Ye Li94547442020-05-04 22:08:50 +0800173 geo->ecc_chunk0_size = ecc_step;
174 geo->ecc_chunkn_size = ecc_step;
Stefan Agneread66eb2018-06-22 18:06:18 +0200175 geo->ecc_strength = round_up(ecc_strength, 2);
Stefan Agner4d42ac12018-06-22 17:19:51 +0200176
177 /* Keep the C >= O */
Ye Li94547442020-05-04 22:08:50 +0800178 if (geo->ecc_chunkn_size < mtd->oobsize)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200179 return -EINVAL;
180
Stefan Agner4dc98db2018-06-22 18:06:15 +0200181 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200182 return -EINVAL;
183
Ye Li94547442020-05-04 22:08:50 +0800184 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
185
186 /* For bit swap. */
187 block_mark_bit_offset = mtd->writesize * 8 -
188 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
189 + MXS_NAND_METADATA_SIZE * 8);
190
191 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
192 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200193
194 return 0;
195}
196
Ye Li94547442020-05-04 22:08:50 +0800197static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
Stefan Agnerd0778b32018-06-22 17:19:49 +0200198 struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +0000199{
Stefan Agner4dc98db2018-06-22 18:06:15 +0200200 struct nand_chip *chip = mtd_to_nand(mtd);
201 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800202 unsigned int block_mark_bit_offset;
Han Xu2ee499e2022-03-25 08:36:38 -0500203 int corr, ds_corr;
Stefan Agner4dc98db2018-06-22 18:06:15 +0200204
Stefan Agnerd0778b32018-06-22 17:19:49 +0200205 /* The default for the length of Galois Field. */
206 geo->gf_len = 13;
207
208 /* The default for chunk size. */
Ye Li94547442020-05-04 22:08:50 +0800209 geo->ecc_chunk0_size = 512;
210 geo->ecc_chunkn_size = 512;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200211
Ye Li94547442020-05-04 22:08:50 +0800212 if (geo->ecc_chunkn_size < mtd->oobsize) {
Stefan Agnerd0778b32018-06-22 17:19:49 +0200213 geo->gf_len = 14;
Ye Li94547442020-05-04 22:08:50 +0800214 geo->ecc_chunk0_size *= 2;
215 geo->ecc_chunkn_size *= 2;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200216 }
217
Ye Li94547442020-05-04 22:08:50 +0800218 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200219
Stefan Agnerd0778b32018-06-22 17:19:49 +0200220 /*
221 * Determine the ECC layout with the formula:
222 * ECC bits per chunk = (total page spare data bits) /
223 * (bits per ECC level) / (chunks per page)
224 * where:
225 * total page spare data bits =
226 * (page oob size - meta data size) * (bits per byte)
227 */
228 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
229 / (geo->gf_len * geo->ecc_chunk_count);
230
Stefan Agner4d42ac12018-06-22 17:19:51 +0200231 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
Stefan Agner4dc98db2018-06-22 18:06:15 +0200232 nand_info->max_ecc_strength_supported);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200233
Han Xu2ee499e2022-03-25 08:36:38 -0500234 /* check ecc strength, same as nand_ecc_is_strong_enough() did*/
235 if (chip->ecc_step_ds) {
236 corr = mtd->writesize * geo->ecc_strength /
237 geo->ecc_chunkn_size;
238 ds_corr = mtd->writesize * chip->ecc_strength_ds /
239 chip->ecc_step_ds;
240 if (corr < ds_corr ||
241 geo->ecc_strength < chip->ecc_strength_ds)
242 return -EINVAL;
243 }
244
Ye Li94547442020-05-04 22:08:50 +0800245 block_mark_bit_offset = mtd->writesize * 8 -
246 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
247 + MXS_NAND_METADATA_SIZE * 8);
248
249 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
250 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
251
252 return 0;
253}
254
255static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
256 struct mtd_info *mtd)
257{
258 struct nand_chip *chip = mtd_to_nand(mtd);
259 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
260 unsigned int block_mark_bit_offset;
261 unsigned int max_ecc;
262 unsigned int bbm_chunk;
263 unsigned int i;
264
265 /* sanity check for the minimum ecc nand required */
266 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
267 return -EINVAL;
268 geo->ecc_strength = chip->ecc_strength_ds;
269
270 /* calculate the maximum ecc platform can support*/
271 geo->gf_len = 14;
272 geo->ecc_chunk0_size = 1024;
273 geo->ecc_chunkn_size = 1024;
274 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
275 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
276 / (geo->gf_len * geo->ecc_chunk_count);
277 max_ecc = min(round_down(max_ecc, 2),
278 nand_info->max_ecc_strength_supported);
279
Ye Li94547442020-05-04 22:08:50 +0800280 /* search a supported ecc strength that makes bbm */
281 /* located in data chunk */
282 geo->ecc_strength = chip->ecc_strength_ds;
283 while (!(geo->ecc_strength > max_ecc)) {
284 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
285 break;
286 geo->ecc_strength += 2;
287 }
288
289 /* if none of them works, keep using the minimum ecc */
290 /* nand required but changing ecc page layout */
291 if (geo->ecc_strength > max_ecc) {
292 geo->ecc_strength = chip->ecc_strength_ds;
293 /* add extra ecc for meta data */
294 geo->ecc_chunk0_size = 0;
295 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
296 geo->ecc_for_meta = 1;
297 /* check if oob can afford this extra ecc chunk */
298 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
299 geo->gf_len * geo->ecc_strength
300 * geo->ecc_chunk_count) {
301 printf("unsupported NAND chip with new layout\n");
302 return -EINVAL;
303 }
304
305 /* calculate in which chunk bbm located */
306 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
307 geo->gf_len * geo->ecc_strength) /
308 (geo->gf_len * geo->ecc_strength +
309 geo->ecc_chunkn_size * 8) + 1;
310 }
311
312 /* calculate the number of ecc chunk behind the bbm */
313 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
314
315 block_mark_bit_offset = mtd->writesize * 8 -
316 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
317 + MXS_NAND_METADATA_SIZE * 8);
318
319 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
320 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
321
Stefan Agnerd0778b32018-06-22 17:19:49 +0200322 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000323}
324
325/*
326 * Wait for BCH complete IRQ and clear the IRQ
327 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200328static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +0000329{
Marek Vasut913a7252011-11-08 23:18:16 +0000330 int timeout = MXS_NAND_BCH_TIMEOUT;
331 int ret;
332
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200333 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
Marek Vasut913a7252011-11-08 23:18:16 +0000334 BCH_CTRL_COMPLETE_IRQ, timeout);
335
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200336 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
Marek Vasut913a7252011-11-08 23:18:16 +0000337
338 return ret;
339}
340
341/*
342 * This is the function that we install in the cmd_ctrl function pointer of the
343 * owning struct nand_chip. The only functions in the reference implementation
344 * that use these functions pointers are cmdfunc and select_chip.
345 *
346 * In this driver, we implement our own select_chip, so this function will only
347 * be called by the reference implementation's cmdfunc. For this reason, we can
348 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
349 * Flash.
350 */
351static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
352{
Scott Wood17fed142016-05-30 13:57:56 -0500353 struct nand_chip *nand = mtd_to_nand(mtd);
354 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000355 struct mxs_dma_desc *d;
356 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
357 int ret;
358
359 /*
360 * If this condition is true, something is _VERY_ wrong in MTD
361 * subsystem!
362 */
363 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
364 printf("MXS NAND: Command queue too long\n");
365 return;
366 }
367
368 /*
369 * Every operation begins with a command byte and a series of zero or
370 * more address bytes. These are distinguished by either the Address
371 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
372 * asserted. When MTD is ready to execute the command, it will
373 * deasert both latch enables.
374 *
375 * Rather than run a separate DMA operation for every single byte, we
376 * queue them up and run a single DMA operation for the entire series
377 * of command and data bytes.
378 */
379 if (ctrl & (NAND_ALE | NAND_CLE)) {
380 if (data != NAND_CMD_NONE)
381 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
382 return;
383 }
384
385 /*
386 * If control arrives here, MTD has deasserted both the ALE and CLE,
387 * which means it's ready to run an operation. Check if we have any
388 * bytes to send.
389 */
390 if (nand_info->cmd_queue_len == 0)
391 return;
392
393 /* Compile the DMA descriptor -- a descriptor that sends command. */
394 d = mxs_nand_get_dma_desc(nand_info);
395 d->cmd.data =
396 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
397 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
398 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
399 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
400
401 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
402
403 d->cmd.pio_words[0] =
404 GPMI_CTRL0_COMMAND_MODE_WRITE |
405 GPMI_CTRL0_WORD_LENGTH |
406 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
407 GPMI_CTRL0_ADDRESS_NAND_CLE |
408 GPMI_CTRL0_ADDRESS_INCREMENT |
409 nand_info->cmd_queue_len;
410
411 mxs_dma_desc_append(channel, d);
412
Marek Vasut1b120e82012-03-15 18:33:19 +0000413 /* Flush caches */
414 mxs_nand_flush_cmd_buf(nand_info);
415
Marek Vasut913a7252011-11-08 23:18:16 +0000416 /* Execute the DMA chain. */
417 ret = mxs_dma_go(channel);
418 if (ret)
419 printf("MXS NAND: Error sending command\n");
420
421 mxs_nand_return_dma_descs(nand_info);
422
423 /* Reset the command queue. */
424 nand_info->cmd_queue_len = 0;
425}
426
427/*
428 * Test if the NAND flash is ready.
429 */
430static int mxs_nand_device_ready(struct mtd_info *mtd)
431{
Scott Wood17fed142016-05-30 13:57:56 -0500432 struct nand_chip *chip = mtd_to_nand(mtd);
433 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000434 uint32_t tmp;
435
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200436 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
Marek Vasut913a7252011-11-08 23:18:16 +0000437 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
438
439 return tmp & 1;
440}
441
442/*
443 * Select the NAND chip.
444 */
445static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
446{
Scott Wood17fed142016-05-30 13:57:56 -0500447 struct nand_chip *nand = mtd_to_nand(mtd);
448 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000449
450 nand_info->cur_chip = chip;
451}
452
453/*
454 * Handle block mark swapping.
455 *
456 * Note that, when this function is called, it doesn't know whether it's
457 * swapping the block mark, or swapping it *back* -- but it doesn't matter
458 * because the the operation is the same.
459 */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200460static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
461 uint8_t *data_buf, uint8_t *oob_buf)
Marek Vasut913a7252011-11-08 23:18:16 +0000462{
Stefan Agnerd0778b32018-06-22 17:19:49 +0200463 uint32_t bit_offset = geo->block_mark_bit_offset;
464 uint32_t buf_offset = geo->block_mark_byte_offset;
Marek Vasut913a7252011-11-08 23:18:16 +0000465
466 uint32_t src;
467 uint32_t dst;
468
Marek Vasut913a7252011-11-08 23:18:16 +0000469 /*
470 * Get the byte from the data area that overlays the block mark. Since
471 * the ECC engine applies its own view to the bits in the page, the
472 * physical block mark won't (in general) appear on a byte boundary in
473 * the data.
474 */
475 src = data_buf[buf_offset] >> bit_offset;
476 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
477
478 dst = oob_buf[0];
479
480 oob_buf[0] = src;
481
482 data_buf[buf_offset] &= ~(0xff << bit_offset);
483 data_buf[buf_offset + 1] &= 0xff << bit_offset;
484
485 data_buf[buf_offset] |= dst << bit_offset;
486 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
487}
488
489/*
490 * Read data from NAND.
491 */
492static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
493{
Scott Wood17fed142016-05-30 13:57:56 -0500494 struct nand_chip *nand = mtd_to_nand(mtd);
495 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000496 struct mxs_dma_desc *d;
497 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
498 int ret;
499
500 if (length > NAND_MAX_PAGESIZE) {
501 printf("MXS NAND: DMA buffer too big\n");
502 return;
503 }
504
505 if (!buf) {
506 printf("MXS NAND: DMA buffer is NULL\n");
507 return;
508 }
509
510 /* Compile the DMA descriptor - a descriptor that reads data. */
511 d = mxs_nand_get_dma_desc(nand_info);
512 d->cmd.data =
513 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
514 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
515 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
516 (length << MXS_DMA_DESC_BYTES_OFFSET);
517
518 d->cmd.address = (dma_addr_t)nand_info->data_buf;
519
520 d->cmd.pio_words[0] =
521 GPMI_CTRL0_COMMAND_MODE_READ |
522 GPMI_CTRL0_WORD_LENGTH |
523 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
524 GPMI_CTRL0_ADDRESS_NAND_DATA |
525 length;
526
527 mxs_dma_desc_append(channel, d);
528
529 /*
530 * A DMA descriptor that waits for the command to end and the chip to
531 * become ready.
532 *
533 * I think we actually should *not* be waiting for the chip to become
534 * ready because, after all, we don't care. I think the original code
535 * did that and no one has re-thought it yet.
536 */
537 d = mxs_nand_get_dma_desc(nand_info);
538 d->cmd.data =
539 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
540 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
Luca Ellero80f06b82014-12-16 15:36:14 +0100541 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
Marek Vasut913a7252011-11-08 23:18:16 +0000542
543 d->cmd.address = 0;
544
545 d->cmd.pio_words[0] =
546 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
547 GPMI_CTRL0_WORD_LENGTH |
548 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
549 GPMI_CTRL0_ADDRESS_NAND_DATA;
550
551 mxs_dma_desc_append(channel, d);
552
Peng Fane3bbfb72015-07-21 16:15:21 +0800553 /* Invalidate caches */
554 mxs_nand_inval_data_buf(nand_info);
555
Marek Vasut913a7252011-11-08 23:18:16 +0000556 /* Execute the DMA chain. */
557 ret = mxs_dma_go(channel);
558 if (ret) {
559 printf("MXS NAND: DMA read error\n");
560 goto rtn;
561 }
562
Marek Vasut1b120e82012-03-15 18:33:19 +0000563 /* Invalidate caches */
564 mxs_nand_inval_data_buf(nand_info);
565
Marek Vasut913a7252011-11-08 23:18:16 +0000566 memcpy(buf, nand_info->data_buf, length);
567
568rtn:
569 mxs_nand_return_dma_descs(nand_info);
570}
571
572/*
573 * Write data to NAND.
574 */
575static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
576 int length)
577{
Scott Wood17fed142016-05-30 13:57:56 -0500578 struct nand_chip *nand = mtd_to_nand(mtd);
579 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000580 struct mxs_dma_desc *d;
581 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
582 int ret;
583
584 if (length > NAND_MAX_PAGESIZE) {
585 printf("MXS NAND: DMA buffer too big\n");
586 return;
587 }
588
589 if (!buf) {
590 printf("MXS NAND: DMA buffer is NULL\n");
591 return;
592 }
593
594 memcpy(nand_info->data_buf, buf, length);
595
596 /* Compile the DMA descriptor - a descriptor that writes data. */
597 d = mxs_nand_get_dma_desc(nand_info);
598 d->cmd.data =
599 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
600 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
Luca Ellero966f1cd2014-12-16 15:36:15 +0100601 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
Marek Vasut913a7252011-11-08 23:18:16 +0000602 (length << MXS_DMA_DESC_BYTES_OFFSET);
603
604 d->cmd.address = (dma_addr_t)nand_info->data_buf;
605
606 d->cmd.pio_words[0] =
607 GPMI_CTRL0_COMMAND_MODE_WRITE |
608 GPMI_CTRL0_WORD_LENGTH |
609 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
610 GPMI_CTRL0_ADDRESS_NAND_DATA |
611 length;
612
613 mxs_dma_desc_append(channel, d);
614
Marek Vasut1b120e82012-03-15 18:33:19 +0000615 /* Flush caches */
616 mxs_nand_flush_data_buf(nand_info);
617
Marek Vasut913a7252011-11-08 23:18:16 +0000618 /* Execute the DMA chain. */
619 ret = mxs_dma_go(channel);
620 if (ret)
621 printf("MXS NAND: DMA write error\n");
622
623 mxs_nand_return_dma_descs(nand_info);
624}
625
626/*
627 * Read a single byte from NAND.
628 */
629static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
630{
631 uint8_t buf;
632 mxs_nand_read_buf(mtd, &buf, 1);
633 return buf;
634}
635
Peng Fandf23c9d2020-05-04 22:08:52 +0800636static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
637 u8 *buf, int chunk, int page)
638{
639 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
640 struct bch_geometry *geo = &nand_info->bch_geometry;
641 unsigned int flip_bits = 0, flip_bits_noecc = 0;
642 unsigned int threshold;
643 unsigned int base = geo->ecc_chunkn_size * chunk;
644 u32 *dma_buf = (u32 *)buf;
645 int i;
646
647 threshold = geo->gf_len / 2;
648 if (threshold > geo->ecc_strength)
649 threshold = geo->ecc_strength;
650
651 for (i = 0; i < geo->ecc_chunkn_size; i++) {
652 flip_bits += hweight8(~buf[base + i]);
653 if (flip_bits > threshold)
654 return false;
655 }
656
657 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
658 nand->read_buf(mtd, buf, mtd->writesize);
659
660 for (i = 0; i < mtd->writesize / 4; i++) {
661 flip_bits_noecc += hweight32(~dma_buf[i]);
662 if (flip_bits_noecc > threshold)
663 return false;
664 }
665
666 mtd->ecc_stats.corrected += flip_bits;
667
668 memset(buf, 0xff, mtd->writesize);
669
670 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
671
672 return true;
673}
674
Marek Vasut913a7252011-11-08 23:18:16 +0000675/*
676 * Read a page from NAND.
677 */
678static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +0000679 uint8_t *buf, int oob_required,
680 int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000681{
Scott Wood17fed142016-05-30 13:57:56 -0500682 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200683 struct bch_geometry *geo = &nand_info->bch_geometry;
Peng Fan9e813732020-05-04 22:08:53 +0800684 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +0000685 struct mxs_dma_desc *d;
686 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
687 uint32_t corrected = 0, failed = 0;
688 uint8_t *status;
689 int i, ret;
Peng Fan9e813732020-05-04 22:08:53 +0800690 int flag = 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000691
692 /* Compile the DMA descriptor - wait for ready. */
693 d = mxs_nand_get_dma_desc(nand_info);
694 d->cmd.data =
695 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
696 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
697 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
698
699 d->cmd.address = 0;
700
701 d->cmd.pio_words[0] =
702 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
703 GPMI_CTRL0_WORD_LENGTH |
704 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
705 GPMI_CTRL0_ADDRESS_NAND_DATA;
706
707 mxs_dma_desc_append(channel, d);
708
709 /* Compile the DMA descriptor - enable the BCH block and read. */
710 d = mxs_nand_get_dma_desc(nand_info);
711 d->cmd.data =
712 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
713 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
714
715 d->cmd.address = 0;
716
717 d->cmd.pio_words[0] =
718 GPMI_CTRL0_COMMAND_MODE_READ |
719 GPMI_CTRL0_WORD_LENGTH |
720 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
721 GPMI_CTRL0_ADDRESS_NAND_DATA |
722 (mtd->writesize + mtd->oobsize);
723 d->cmd.pio_words[1] = 0;
724 d->cmd.pio_words[2] =
725 GPMI_ECCCTRL_ENABLE_ECC |
726 GPMI_ECCCTRL_ECC_CMD_DECODE |
727 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
728 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
729 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
730 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
731
Han Xuafed2a12020-05-06 20:59:19 +0800732 if (nand_info->en_randomizer) {
Alice Guo3f277782020-05-04 22:09:03 +0800733 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
734 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
735 d->cmd.pio_words[3] |= (page % 256) << 16;
736 }
737
Marek Vasut913a7252011-11-08 23:18:16 +0000738 mxs_dma_desc_append(channel, d);
739
740 /* Compile the DMA descriptor - disable the BCH block. */
741 d = mxs_nand_get_dma_desc(nand_info);
742 d->cmd.data =
743 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
744 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
745 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
746
747 d->cmd.address = 0;
748
749 d->cmd.pio_words[0] =
750 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
751 GPMI_CTRL0_WORD_LENGTH |
752 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
753 GPMI_CTRL0_ADDRESS_NAND_DATA |
754 (mtd->writesize + mtd->oobsize);
755 d->cmd.pio_words[1] = 0;
756 d->cmd.pio_words[2] = 0;
757
758 mxs_dma_desc_append(channel, d);
759
760 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
761 d = mxs_nand_get_dma_desc(nand_info);
762 d->cmd.data =
763 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
764 MXS_DMA_DESC_DEC_SEM;
765
766 d->cmd.address = 0;
767
768 mxs_dma_desc_append(channel, d);
769
Peng Fane3bbfb72015-07-21 16:15:21 +0800770 /* Invalidate caches */
771 mxs_nand_inval_data_buf(nand_info);
772
Marek Vasut913a7252011-11-08 23:18:16 +0000773 /* Execute the DMA chain. */
774 ret = mxs_dma_go(channel);
775 if (ret) {
776 printf("MXS NAND: DMA read error\n");
777 goto rtn;
778 }
779
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200780 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000781 if (ret) {
782 printf("MXS NAND: BCH read timeout\n");
783 goto rtn;
784 }
785
Peng Fandf23c9d2020-05-04 22:08:52 +0800786 mxs_nand_return_dma_descs(nand_info);
787
Marek Vasut1b120e82012-03-15 18:33:19 +0000788 /* Invalidate caches */
789 mxs_nand_inval_data_buf(nand_info);
790
Marek Vasut913a7252011-11-08 23:18:16 +0000791 /* Read DMA completed, now do the mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200792 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000793
794 /* Loop over status bytes, accumulating ECC status. */
795 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
Stefan Agnerd0778b32018-06-22 17:19:49 +0200796 for (i = 0; i < geo->ecc_chunk_count; i++) {
Marek Vasut913a7252011-11-08 23:18:16 +0000797 if (status[i] == 0x00)
798 continue;
799
Peng Fan9e813732020-05-04 22:08:53 +0800800 if (status[i] == 0xff) {
Han Xue4f2b002020-05-04 22:09:02 +0800801 if (!nand_info->en_randomizer &&
802 (is_mx6dqp() || is_mx7() || is_mx6ul() ||
803 is_imx8() || is_imx8m()))
Peng Fan9e813732020-05-04 22:08:53 +0800804 if (readl(&bch_regs->hw_bch_debug1))
805 flag = 1;
Marek Vasut913a7252011-11-08 23:18:16 +0000806 continue;
Peng Fan9e813732020-05-04 22:08:53 +0800807 }
Marek Vasut913a7252011-11-08 23:18:16 +0000808
809 if (status[i] == 0xfe) {
Peng Fandf23c9d2020-05-04 22:08:52 +0800810 if (mxs_nand_erased_page(mtd, nand,
811 nand_info->data_buf, i, page))
812 break;
Marek Vasut913a7252011-11-08 23:18:16 +0000813 failed++;
814 continue;
815 }
816
817 corrected += status[i];
818 }
819
820 /* Propagate ECC status to the owning MTD. */
821 mtd->ecc_stats.failed += failed;
822 mtd->ecc_stats.corrected += corrected;
823
824 /*
825 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
826 * details about our policy for delivering the OOB.
827 *
828 * We fill the caller's buffer with set bits, and then copy the block
829 * mark to the caller's buffer. Note that, if block mark swapping was
830 * necessary, it has already been done, so we can rely on the first
831 * byte of the auxiliary buffer to contain the block mark.
832 */
833 memset(nand->oob_poi, 0xff, mtd->oobsize);
834
835 nand->oob_poi[0] = nand_info->oob_buf[0];
836
837 memcpy(buf, nand_info->data_buf, mtd->writesize);
838
Peng Fan9e813732020-05-04 22:08:53 +0800839 if (flag)
840 memset(buf, 0xff, mtd->writesize);
Marek Vasut913a7252011-11-08 23:18:16 +0000841rtn:
842 mxs_nand_return_dma_descs(nand_info);
843
844 return ret;
845}
846
847/*
848 * Write a page to NAND.
849 */
Sergey Lapin3a38a552013-01-14 03:46:50 +0000850static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
851 struct nand_chip *nand, const uint8_t *buf,
Scott Wood46e13102016-05-30 13:57:57 -0500852 int oob_required, int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000853{
Scott Wood17fed142016-05-30 13:57:56 -0500854 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200855 struct bch_geometry *geo = &nand_info->bch_geometry;
Marek Vasut913a7252011-11-08 23:18:16 +0000856 struct mxs_dma_desc *d;
857 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
858 int ret;
859
860 memcpy(nand_info->data_buf, buf, mtd->writesize);
861 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
862
863 /* Handle block mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200864 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000865
866 /* Compile the DMA descriptor - write data. */
867 d = mxs_nand_get_dma_desc(nand_info);
868 d->cmd.data =
869 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
870 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
871 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
872
873 d->cmd.address = 0;
874
875 d->cmd.pio_words[0] =
876 GPMI_CTRL0_COMMAND_MODE_WRITE |
877 GPMI_CTRL0_WORD_LENGTH |
878 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
879 GPMI_CTRL0_ADDRESS_NAND_DATA;
880 d->cmd.pio_words[1] = 0;
881 d->cmd.pio_words[2] =
882 GPMI_ECCCTRL_ENABLE_ECC |
883 GPMI_ECCCTRL_ECC_CMD_ENCODE |
884 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
885 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
886 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
887 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
888
Han Xuafed2a12020-05-06 20:59:19 +0800889 if (nand_info->en_randomizer) {
Igor Opaniukc55401372019-11-03 16:49:43 +0100890 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
891 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
892 /*
893 * Write NAND page number needed to be randomized
894 * to GPMI_ECCCOUNT register.
895 *
896 * The value is between 0-255. For additional details
897 * check 9.6.6.4 of i.MX7D Applications Processor reference
898 */
Alice Guo3f277782020-05-04 22:09:03 +0800899 d->cmd.pio_words[3] |= (page % 256) << 16;
Igor Opaniukc55401372019-11-03 16:49:43 +0100900 }
901
Marek Vasut913a7252011-11-08 23:18:16 +0000902 mxs_dma_desc_append(channel, d);
903
Marek Vasut1b120e82012-03-15 18:33:19 +0000904 /* Flush caches */
905 mxs_nand_flush_data_buf(nand_info);
906
Marek Vasut913a7252011-11-08 23:18:16 +0000907 /* Execute the DMA chain. */
908 ret = mxs_dma_go(channel);
909 if (ret) {
910 printf("MXS NAND: DMA write error\n");
911 goto rtn;
912 }
913
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200914 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000915 if (ret) {
916 printf("MXS NAND: BCH write timeout\n");
917 goto rtn;
918 }
919
920rtn:
921 mxs_nand_return_dma_descs(nand_info);
Sergey Lapin3a38a552013-01-14 03:46:50 +0000922 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000923}
924
925/*
926 * Read OOB from NAND.
927 *
928 * This function is a veneer that replaces the function originally installed by
929 * the NAND Flash MTD code.
930 */
931static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
932 struct mtd_oob_ops *ops)
933{
Scott Wood17fed142016-05-30 13:57:56 -0500934 struct nand_chip *chip = mtd_to_nand(mtd);
935 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000936 int ret;
937
Sergey Lapin3a38a552013-01-14 03:46:50 +0000938 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000939 nand_info->raw_oob_mode = 1;
940 else
941 nand_info->raw_oob_mode = 0;
942
943 ret = nand_info->hooked_read_oob(mtd, from, ops);
944
945 nand_info->raw_oob_mode = 0;
946
947 return ret;
948}
949
950/*
951 * Write OOB to NAND.
952 *
953 * This function is a veneer that replaces the function originally installed by
954 * the NAND Flash MTD code.
955 */
956static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
957 struct mtd_oob_ops *ops)
958{
Scott Wood17fed142016-05-30 13:57:56 -0500959 struct nand_chip *chip = mtd_to_nand(mtd);
960 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000961 int ret;
962
Sergey Lapin3a38a552013-01-14 03:46:50 +0000963 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000964 nand_info->raw_oob_mode = 1;
965 else
966 nand_info->raw_oob_mode = 0;
967
968 ret = nand_info->hooked_write_oob(mtd, to, ops);
969
970 nand_info->raw_oob_mode = 0;
971
972 return ret;
973}
974
975/*
976 * Mark a block bad in NAND.
977 *
978 * This function is a veneer that replaces the function originally installed by
979 * the NAND Flash MTD code.
980 */
981static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
982{
Scott Wood17fed142016-05-30 13:57:56 -0500983 struct nand_chip *chip = mtd_to_nand(mtd);
984 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000985 int ret;
986
987 nand_info->marking_block_bad = 1;
988
989 ret = nand_info->hooked_block_markbad(mtd, ofs);
990
991 nand_info->marking_block_bad = 0;
992
993 return ret;
994}
995
996/*
997 * There are several places in this driver where we have to handle the OOB and
998 * block marks. This is the function where things are the most complicated, so
999 * this is where we try to explain it all. All the other places refer back to
1000 * here.
1001 *
1002 * These are the rules, in order of decreasing importance:
1003 *
1004 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
1005 * write operations take measures to protect it.
1006 *
1007 * 2) In read operations, the first byte of the OOB we return must reflect the
1008 * true state of the block mark, no matter where that block mark appears in
1009 * the physical page.
1010 *
1011 * 3) ECC-based read operations return an OOB full of set bits (since we never
1012 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1013 * return).
1014 *
1015 * 4) "Raw" read operations return a direct view of the physical bytes in the
1016 * page, using the conventional definition of which bytes are data and which
1017 * are OOB. This gives the caller a way to see the actual, physical bytes
1018 * in the page, without the distortions applied by our ECC engine.
1019 *
1020 * What we do for this specific read operation depends on whether we're doing
1021 * "raw" read, or an ECC-based read.
1022 *
1023 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1024 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1025 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1026 * ECC-based or raw view of the page is implicit in which function it calls
1027 * (there is a similar pair of ECC-based/raw functions for writing).
1028 *
1029 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1030 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1031 * caller wants an ECC-based or raw view of the page is not propagated down to
1032 * this driver.
1033 *
1034 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
1035 * ecc.read_oob and ecc.write_oob function pointers in the owning
1036 * struct mtd_info with our own functions. These hook functions set the
1037 * raw_oob_mode field so that, when control finally arrives here, we'll know
1038 * what to do.
1039 */
1040static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +00001041 int page)
Marek Vasut913a7252011-11-08 23:18:16 +00001042{
Scott Wood17fed142016-05-30 13:57:56 -05001043 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001044
1045 /*
1046 * First, fill in the OOB buffer. If we're doing a raw read, we need to
1047 * get the bytes from the physical page. If we're not doing a raw read,
1048 * we need to fill the buffer with set bits.
1049 */
1050 if (nand_info->raw_oob_mode) {
1051 /*
1052 * If control arrives here, we're doing a "raw" read. Send the
1053 * command to read the conventional OOB and read it.
1054 */
1055 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1056 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1057 } else {
1058 /*
1059 * If control arrives here, we're not doing a "raw" read. Fill
1060 * the OOB buffer with set bits and correct the block mark.
1061 */
1062 memset(nand->oob_poi, 0xff, mtd->oobsize);
1063
1064 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1065 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1066 }
1067
1068 return 0;
1069
1070}
1071
1072/*
1073 * Write OOB data to NAND.
1074 */
1075static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1076 int page)
1077{
Scott Wood17fed142016-05-30 13:57:56 -05001078 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001079 uint8_t block_mark = 0;
1080
1081 /*
1082 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1083 * the NAND Flash MTD model that make it essentially impossible to write
1084 * the out-of-band bytes.
1085 *
1086 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1087 * mark a block bad, we can do that.
1088 */
1089
1090 if (!nand_info->marking_block_bad) {
1091 printf("NXS NAND: Writing OOB isn't supported\n");
1092 return -EIO;
1093 }
1094
1095 /* Write the block mark. */
1096 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1097 nand->write_buf(mtd, &block_mark, 1);
1098 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1099
1100 /* Check if it worked. */
1101 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1102 return -EIO;
1103
1104 return 0;
1105}
1106
1107/*
1108 * Claims all blocks are good.
1109 *
1110 * In principle, this function is *only* called when the NAND Flash MTD system
1111 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1112 * the driver for bad block information.
1113 *
1114 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1115 * this function is *only* called when we take it away.
1116 *
1117 * Thus, this function is only called when we want *all* blocks to look good,
1118 * so it *always* return success.
1119 */
Scott Wood52ab7ce2016-05-30 13:57:58 -05001120static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
Marek Vasut913a7252011-11-08 23:18:16 +00001121{
Stefan Agneread66eb2018-06-22 18:06:18 +02001122 return 0;
1123}
1124
1125static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1126{
1127 struct nand_chip *chip = mtd_to_nand(mtd);
1128 struct nand_chip *nand = mtd_to_nand(mtd);
1129 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Han Xu2ee499e2022-03-25 08:36:38 -05001130 int err;
Stefan Agneread66eb2018-06-22 18:06:18 +02001131
Ye Li94547442020-05-04 22:08:50 +08001132 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1133 printf("unsupported NAND chip, minimum ecc required %d\n"
1134 , chip->ecc_strength_ds);
1135 return -EINVAL;
1136 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001137
Han Xu2ee499e2022-03-25 08:36:38 -05001138 /* use the legacy bch setting by default */
1139 if ((!nand_info->use_minimum_ecc && mtd->oobsize < 1024) ||
1140 !(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) {
1141 dev_dbg(mtd->dev, "use legacy bch geometry\n");
1142 err = mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1143 if (!err)
1144 return 0;
Ye Li94547442020-05-04 22:08:50 +08001145 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001146
Han Xu2ee499e2022-03-25 08:36:38 -05001147 /* for large oob nand */
1148 if (mtd->oobsize > 1024) {
1149 dev_dbg(mtd->dev, "use large oob bch geometry\n");
1150 err = mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1151 if (!err)
1152 return 0;
1153 }
Ye Li94547442020-05-04 22:08:50 +08001154
Han Xu2ee499e2022-03-25 08:36:38 -05001155 /* otherwise use the minimum ecc nand chips required */
1156 dev_dbg(mtd->dev, "use minimum ecc bch geometry\n");
1157 err = mxs_nand_calc_ecc_layout_by_info(geo, mtd, chip->ecc_strength_ds,
1158 chip->ecc_step_ds);
Stefan Agneread66eb2018-06-22 18:06:18 +02001159
Han Xu2ee499e2022-03-25 08:36:38 -05001160 if (err)
1161 dev_err(mtd->dev, "none of the bch geometry setting works\n");
1162
1163 return err;
1164}
1165
1166void mxs_nand_dump_geo(struct mtd_info *mtd)
1167{
1168 struct nand_chip *nand = mtd_to_nand(mtd);
1169 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1170 struct bch_geometry *geo = &nand_info->bch_geometry;
1171
1172 dev_dbg(mtd->dev, "BCH Geometry :\n"
1173 "GF Length\t\t: %u\n"
1174 "ECC Strength\t\t: %u\n"
1175 "ECC for Meta\t\t: %u\n"
1176 "ECC Chunk0 Size\t\t: %u\n"
1177 "ECC Chunkn Size\t\t: %u\n"
1178 "ECC Chunk Count\t\t: %u\n"
1179 "Block Mark Byte Offset\t: %u\n"
1180 "Block Mark Bit Offset\t: %u\n",
1181 geo->gf_len,
1182 geo->ecc_strength,
1183 geo->ecc_for_meta,
1184 geo->ecc_chunk0_size,
1185 geo->ecc_chunkn_size,
1186 geo->ecc_chunk_count,
1187 geo->block_mark_byte_offset,
1188 geo->block_mark_bit_offset);
Marek Vasut913a7252011-11-08 23:18:16 +00001189}
1190
1191/*
Marek Vasut913a7252011-11-08 23:18:16 +00001192 * At this point, the physical NAND Flash chips have been identified and
1193 * counted, so we know the physical geometry. This enables us to make some
1194 * important configuration decisions.
1195 *
Robert P. J. Day8d56db92016-07-15 13:44:45 -04001196 * The return value of this function propagates directly back to this driver's
Stefan Agner5883e552018-06-22 17:19:47 +02001197 * board_nand_init(). Anything other than zero will cause this driver to
Marek Vasut913a7252011-11-08 23:18:16 +00001198 * tear everything down and declare failure.
1199 */
Stefan Agner5883e552018-06-22 17:19:47 +02001200int mxs_nand_setup_ecc(struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +00001201{
Scott Wood17fed142016-05-30 13:57:56 -05001202 struct nand_chip *nand = mtd_to_nand(mtd);
1203 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +02001204 struct bch_geometry *geo = &nand_info->bch_geometry;
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001205 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +00001206 uint32_t tmp;
Stefan Agneread66eb2018-06-22 18:06:18 +02001207 int ret;
Stefan Agner4d42ac12018-06-22 17:19:51 +02001208
Igor Opaniukc55401372019-11-03 16:49:43 +01001209 nand_info->en_randomizer = 0;
1210 nand_info->oobsize = mtd->oobsize;
1211 nand_info->writesize = mtd->writesize;
1212
Stefan Agneread66eb2018-06-22 18:06:18 +02001213 ret = mxs_nand_set_geometry(mtd, geo);
Stefan Agner4d42ac12018-06-22 17:19:51 +02001214 if (ret)
1215 return ret;
1216
Han Xu2ee499e2022-03-25 08:36:38 -05001217 mxs_nand_dump_geo(mtd);
1218
Marek Vasut913a7252011-11-08 23:18:16 +00001219 /* Configure BCH and set NFC geometry */
Otavio Salvadorcbf0bf22012-08-13 09:53:12 +00001220 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001221
1222 /* Configure layout 0 */
Stefan Agnerd0778b32018-06-22 17:19:49 +02001223 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001224 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001225 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001226 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001227 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001228 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001229 writel(tmp, &bch_regs->hw_bch_flash0layout0);
Igor Opaniukc55401372019-11-03 16:49:43 +01001230 nand_info->bch_flash0layout0 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001231
1232 tmp = (mtd->writesize + mtd->oobsize)
1233 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001234 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001235 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001236 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001237 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001238 writel(tmp, &bch_regs->hw_bch_flash0layout1);
Igor Opaniukc55401372019-11-03 16:49:43 +01001239 nand_info->bch_flash0layout1 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001240
Peng Fan9e813732020-05-04 22:08:53 +08001241 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
1242 if (is_mx6dqp() || is_mx7() ||
Peng Fan128abf42020-05-04 22:09:00 +08001243 is_mx6ul() || is_imx8() || is_imx8m())
Peng Fan9e813732020-05-04 22:08:53 +08001244 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1245 &bch_regs->hw_bch_mode);
1246
Marek Vasut913a7252011-11-08 23:18:16 +00001247 /* Set *all* chip selects to use layout 0 */
1248 writel(0, &bch_regs->hw_bch_layoutselect);
1249
1250 /* Enable BCH complete interrupt */
1251 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1252
Stefan Agner5883e552018-06-22 17:19:47 +02001253 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001254}
1255
1256/*
1257 * Allocate DMA buffers
1258 */
1259int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1260{
1261 uint8_t *buf;
1262 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1263
Marek Vasut1b120e82012-03-15 18:33:19 +00001264 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1265
Marek Vasut913a7252011-11-08 23:18:16 +00001266 /* DMA buffers */
Marek Vasut1b120e82012-03-15 18:33:19 +00001267 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001268 if (!buf) {
1269 printf("MXS NAND: Error allocating DMA buffers\n");
1270 return -ENOMEM;
1271 }
1272
Marek Vasut1b120e82012-03-15 18:33:19 +00001273 memset(buf, 0, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001274
1275 nand_info->data_buf = buf;
1276 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
Marek Vasut913a7252011-11-08 23:18:16 +00001277 /* Command buffers */
1278 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1279 MXS_NAND_COMMAND_BUFFER_SIZE);
1280 if (!nand_info->cmd_buf) {
1281 free(buf);
1282 printf("MXS NAND: Error allocating command buffers\n");
1283 return -ENOMEM;
1284 }
1285 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1286 nand_info->cmd_queue_len = 0;
1287
1288 return 0;
1289}
1290
1291/*
1292 * Initializes the NFC hardware.
1293 */
Adam Ford6edb91a2019-01-12 06:25:48 -06001294static int mxs_nand_init_dma(struct mxs_nand_info *info)
Marek Vasut913a7252011-11-08 23:18:16 +00001295{
Peng Fane37d5a92016-01-27 10:38:02 +08001296 int i = 0, j, ret = 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001297
1298 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1299 MXS_NAND_DMA_DESCRIPTOR_COUNT);
Peng Fane37d5a92016-01-27 10:38:02 +08001300 if (!info->desc) {
1301 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001302 goto err1;
Peng Fane37d5a92016-01-27 10:38:02 +08001303 }
Marek Vasut913a7252011-11-08 23:18:16 +00001304
1305 /* Allocate the DMA descriptors. */
1306 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1307 info->desc[i] = mxs_dma_desc_alloc();
Peng Fane37d5a92016-01-27 10:38:02 +08001308 if (!info->desc[i]) {
1309 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001310 goto err2;
Peng Fane37d5a92016-01-27 10:38:02 +08001311 }
Marek Vasut913a7252011-11-08 23:18:16 +00001312 }
1313
1314 /* Init the DMA controller. */
Fabio Estevam17156222017-06-29 09:33:44 -03001315 mxs_dma_init();
Marek Vasut93541b42012-04-08 17:34:46 +00001316 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1317 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
Peng Fane37d5a92016-01-27 10:38:02 +08001318 ret = mxs_dma_init_channel(j);
1319 if (ret)
Marek Vasut93541b42012-04-08 17:34:46 +00001320 goto err3;
1321 }
Marek Vasut913a7252011-11-08 23:18:16 +00001322
1323 /* Reset the GPMI block. */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001324 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1325 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001326
1327 /*
1328 * Choose NAND mode, set IRQ polarity, disable write protection and
1329 * select BCH ECC.
1330 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001331 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
Marek Vasut913a7252011-11-08 23:18:16 +00001332 GPMI_CTRL1_GPMI_MODE,
1333 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1334 GPMI_CTRL1_BCH_MODE);
1335
1336 return 0;
1337
Marek Vasut93541b42012-04-08 17:34:46 +00001338err3:
Peng Fane37d5a92016-01-27 10:38:02 +08001339 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
Marek Vasut93541b42012-04-08 17:34:46 +00001340 mxs_dma_release(j);
Marek Vasut913a7252011-11-08 23:18:16 +00001341err2:
Marek Vasut913a7252011-11-08 23:18:16 +00001342 for (--i; i >= 0; i--)
1343 mxs_dma_desc_free(info->desc[i]);
Peng Fane37d5a92016-01-27 10:38:02 +08001344 free(info->desc);
1345err1:
1346 if (ret == -ENOMEM)
1347 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1348 return ret;
Marek Vasut913a7252011-11-08 23:18:16 +00001349}
1350
Michael Trimarchifd6e13e2022-08-30 16:48:47 +02001351/*
1352 * <1> Firstly, we should know what's the GPMI-clock means.
1353 * The GPMI-clock is the internal clock in the gpmi nand controller.
1354 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period
1355 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
1356 *
1357 * <2> Secondly, we should know what's the frequency on the nand chip pins.
1358 * The frequency on the nand chip pins is derived from the GPMI-clock.
1359 * We can get it from the following equation:
1360 *
1361 * F = G / (DS + DH)
1362 *
1363 * F : the frequency on the nand chip pins.
1364 * G : the GPMI clock, such as 100MHz.
1365 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
1366 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
1367 *
1368 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
1369 * the nand EDO(extended Data Out) timing could be applied.
1370 * The GPMI implements a feedback read strobe to sample the read data.
1371 * The feedback read strobe can be delayed to support the nand EDO timing
1372 * where the read strobe may deasserts before the read data is valid, and
1373 * read data is valid for some time after read strobe.
1374 *
1375 * The following figure illustrates some aspects of a NAND Flash read:
1376 *
1377 * |<---tREA---->|
1378 * | |
1379 * | | |
1380 * |<--tRP-->| |
1381 * | | |
1382 * __ ___|__________________________________
1383 * RDN \________/ |
1384 * |
1385 * /---------\
1386 * Read Data --------------< >---------
1387 * \---------/
1388 * | |
1389 * |<-D->|
1390 * FeedbackRDN ________ ____________
1391 * \___________/
1392 *
1393 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
1394 *
1395 *
1396 * <4> Now, we begin to describe how to compute the right RDN_DELAY.
1397 *
1398 * 4.1) From the aspect of the nand chip pins:
1399 * Delay = (tREA + C - tRP) {1}
1400 *
1401 * tREA : the maximum read access time.
1402 * C : a constant to adjust the delay. default is 4000ps.
1403 * tRP : the read pulse width, which is exactly:
1404 * tRP = (GPMI-clock-period) * DATA_SETUP
1405 *
1406 * 4.2) From the aspect of the GPMI nand controller:
1407 * Delay = RDN_DELAY * 0.125 * RP {2}
1408 *
1409 * RP : the DLL reference period.
1410 * if (GPMI-clock-period > DLL_THRETHOLD)
1411 * RP = GPMI-clock-period / 2;
1412 * else
1413 * RP = GPMI-clock-period;
1414 *
1415 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
1416 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
1417 * is 16000ps, but in mx6q, we use 12000ps.
1418 *
1419 * 4.3) since {1} equals {2}, we get:
1420 *
1421 * (tREA + 4000 - tRP) * 8
1422 * RDN_DELAY = ----------------------- {3}
1423 * RP
1424 */
1425static void mxs_compute_timings(struct nand_chip *chip,
1426 const struct nand_sdr_timings *sdr)
1427{
1428 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1429 unsigned long clk_rate;
1430 unsigned int dll_wait_time_us;
1431 unsigned int dll_threshold_ps = nand_info->max_chain_delay;
1432 unsigned int period_ps, reference_period_ps;
1433 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
1434 unsigned int tRP_ps;
1435 bool use_half_period;
1436 int sample_delay_ps, sample_delay_factor;
1437 u16 busy_timeout_cycles;
1438 u8 wrn_dly_sel;
1439 u32 timing0;
1440 u32 timing1;
1441 u32 ctrl1n;
1442
1443 if (sdr->tRC_min >= 30000) {
1444 /* ONFI non-EDO modes [0-3] */
1445 clk_rate = 22000000;
1446 wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
1447 } else if (sdr->tRC_min >= 25000) {
1448 /* ONFI EDO mode 4 */
1449 clk_rate = 80000000;
1450 wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
1451 debug("%s, setting ONFI onfi edo 4\n", __func__);
1452 } else {
1453 /* ONFI EDO mode 5 */
1454 clk_rate = 100000000;
1455 wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
1456 debug("%s, setting ONFI onfi edo 5\n", __func__);
1457 }
1458
1459 /* SDR core timings are given in picoseconds */
1460 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, clk_rate);
1461
1462 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
1463 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
1464 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
1465 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps);
1466
1467 timing0 = (addr_setup_cycles << GPMI_TIMING0_ADDRESS_SETUP_OFFSET) |
1468 (data_hold_cycles << GPMI_TIMING0_DATA_HOLD_OFFSET) |
1469 (data_setup_cycles << GPMI_TIMING0_DATA_SETUP_OFFSET);
1470 timing1 = (busy_timeout_cycles * 4096) << GPMI_TIMING1_DEVICE_BUSY_TIMEOUT_OFFSET;
1471
1472 /*
1473 * Derive NFC ideal delay from {3}:
1474 *
1475 * (tREA + 4000 - tRP) * 8
1476 * RDN_DELAY = -----------------------
1477 * RP
1478 */
1479 if (period_ps > dll_threshold_ps) {
1480 use_half_period = true;
1481 reference_period_ps = period_ps / 2;
1482 } else {
1483 use_half_period = false;
1484 reference_period_ps = period_ps;
1485 }
1486
1487 tRP_ps = data_setup_cycles * period_ps;
1488 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
1489 if (sample_delay_ps > 0)
1490 sample_delay_factor = sample_delay_ps / reference_period_ps;
1491 else
1492 sample_delay_factor = 0;
1493
1494 ctrl1n = (wrn_dly_sel << GPMI_CTRL1_WRN_DLY_SEL_OFFSET);
1495 if (sample_delay_factor)
1496 ctrl1n |= (sample_delay_factor << GPMI_CTRL1_RDN_DELAY_OFFSET) |
1497 GPMI_CTRL1_DLL_ENABLE |
1498 (use_half_period ? GPMI_CTRL1_HALF_PERIOD : 0);
1499
1500 writel(timing0, &nand_info->gpmi_regs->hw_gpmi_timing0);
1501 writel(timing1, &nand_info->gpmi_regs->hw_gpmi_timing1);
1502
1503 /*
1504 * Clear several CTRL1 fields, DLL must be disabled when setting
1505 * RDN_DELAY or HALF_PERIOD.
1506 */
1507 writel(GPMI_CTRL1_CLEAR_MASK, &nand_info->gpmi_regs->hw_gpmi_ctrl1_clr);
1508 writel(ctrl1n, &nand_info->gpmi_regs->hw_gpmi_ctrl1_set);
1509
1510 clk_set_rate(nand_info->gpmi_clk, clk_rate);
1511
1512 /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
1513 dll_wait_time_us = USEC_PER_SEC / clk_rate * 64;
1514 if (!dll_wait_time_us)
1515 dll_wait_time_us = 1;
1516
1517 /* Wait for the DLL to settle. */
1518 udelay(dll_wait_time_us);
1519}
1520
1521static int mxs_nand_setup_interface(struct mtd_info *mtd, int chipnr,
1522 const struct nand_data_interface *conf)
1523{
1524 struct nand_chip *chip = mtd_to_nand(mtd);
1525 const struct nand_sdr_timings *sdr;
1526
1527 sdr = nand_get_sdr_timings(conf);
1528 if (IS_ERR(sdr))
1529 return PTR_ERR(sdr);
1530
1531 /* Stop here if this call was just a check */
1532 if (chipnr < 0)
1533 return 0;
1534
1535 /* Do the actual derivation of the controller timings */
1536 mxs_compute_timings(chip, sdr);
1537
1538 return 0;
1539}
1540
Stefan Agner7152f342018-06-22 17:19:46 +02001541int mxs_nand_init_spl(struct nand_chip *nand)
1542{
1543 struct mxs_nand_info *nand_info;
1544 int err;
1545
1546 nand_info = malloc(sizeof(struct mxs_nand_info));
1547 if (!nand_info) {
1548 printf("MXS NAND: Failed to allocate private data\n");
1549 return -ENOMEM;
1550 }
1551 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1552
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001553 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1554 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
Adam Ford10210732019-01-02 20:36:52 -06001555
Peng Fan128abf42020-05-04 22:09:00 +08001556 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
Adam Ford10210732019-01-02 20:36:52 -06001557 nand_info->max_ecc_strength_supported = 62;
1558 else
1559 nand_info->max_ecc_strength_supported = 40;
1560
Ye Li61771d22022-03-31 13:27:47 +08001561 if (IS_ENABLED(CONFIG_NAND_MXS_USE_MINIMUM_ECC))
1562 nand_info->use_minimum_ecc = true;
1563
Stefan Agner7152f342018-06-22 17:19:46 +02001564 err = mxs_nand_alloc_buffers(nand_info);
1565 if (err)
1566 return err;
1567
Stefan Agner00e65162018-06-22 18:06:13 +02001568 err = mxs_nand_init_dma(nand_info);
Stefan Agner7152f342018-06-22 17:19:46 +02001569 if (err)
1570 return err;
1571
1572 nand_set_controller_data(nand, nand_info);
1573
1574 nand->options |= NAND_NO_SUBPAGE_WRITE;
1575
1576 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1577 nand->dev_ready = mxs_nand_device_ready;
1578 nand->select_chip = mxs_nand_select_chip;
Stefan Agner7152f342018-06-22 17:19:46 +02001579
1580 nand->read_byte = mxs_nand_read_byte;
1581 nand->read_buf = mxs_nand_read_buf;
1582
1583 nand->ecc.read_page = mxs_nand_ecc_read_page;
1584
1585 nand->ecc.mode = NAND_ECC_HW;
Stefan Agner7152f342018-06-22 17:19:46 +02001586
1587 return 0;
1588}
1589
Stefan Agner19f90512018-06-22 18:06:16 +02001590int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +00001591{
Stefan Agner5883e552018-06-22 17:19:47 +02001592 struct mtd_info *mtd;
Stefan Agner5883e552018-06-22 17:19:47 +02001593 struct nand_chip *nand;
Marek Vasut913a7252011-11-08 23:18:16 +00001594 int err;
1595
Stefan Agner5883e552018-06-22 17:19:47 +02001596 nand = &nand_info->chip;
1597 mtd = nand_to_mtd(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001598 err = mxs_nand_alloc_buffers(nand_info);
1599 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001600 return err;
Marek Vasut913a7252011-11-08 23:18:16 +00001601
Stefan Agner00e65162018-06-22 18:06:13 +02001602 err = mxs_nand_init_dma(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001603 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001604 goto err_free_buffers;
Marek Vasut913a7252011-11-08 23:18:16 +00001605
1606 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1607
Stefan Agner95f376f2018-06-22 17:19:48 +02001608#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1609 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1610#endif
1611
Scott Wood17fed142016-05-30 13:57:56 -05001612 nand_set_controller_data(nand, nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001613 nand->options |= NAND_NO_SUBPAGE_WRITE;
1614
Stefan Agner150ddbc2018-06-22 18:06:17 +02001615 if (nand_info->dev)
Patrice Chotard33d2cf92021-09-13 16:25:53 +02001616 nand->flash_node = dev_ofnode(nand_info->dev);
Stefan Agner150ddbc2018-06-22 18:06:17 +02001617
Marek Vasut913a7252011-11-08 23:18:16 +00001618 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1619
1620 nand->dev_ready = mxs_nand_device_ready;
1621 nand->select_chip = mxs_nand_select_chip;
1622 nand->block_bad = mxs_nand_block_bad;
Marek Vasut913a7252011-11-08 23:18:16 +00001623
1624 nand->read_byte = mxs_nand_read_byte;
1625
1626 nand->read_buf = mxs_nand_read_buf;
1627 nand->write_buf = mxs_nand_write_buf;
1628
Michael Trimarchifd6e13e2022-08-30 16:48:47 +02001629 if (nand_info->gpmi_clk)
1630 nand->setup_data_interface = mxs_nand_setup_interface;
1631
Stefan Agner5883e552018-06-22 17:19:47 +02001632 /* first scan to find the device and get the page size */
1633 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
Stefan Agner404b1102018-06-22 18:06:14 +02001634 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001635
1636 if (mxs_nand_setup_ecc(mtd))
Stefan Agner404b1102018-06-22 18:06:14 +02001637 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001638
Marek Vasut913a7252011-11-08 23:18:16 +00001639 nand->ecc.read_page = mxs_nand_ecc_read_page;
1640 nand->ecc.write_page = mxs_nand_ecc_write_page;
1641 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1642 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1643
1644 nand->ecc.layout = &fake_ecc_layout;
1645 nand->ecc.mode = NAND_ECC_HW;
Ye Li94547442020-05-04 22:08:50 +08001646 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
Stefan Agner72d627d2018-06-22 17:19:50 +02001647 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
Marek Vasut913a7252011-11-08 23:18:16 +00001648
Stefan Agner5883e552018-06-22 17:19:47 +02001649 /* second phase scan */
1650 err = nand_scan_tail(mtd);
1651 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001652 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001653
Michael Trimarchidc3da882022-05-15 11:35:30 +02001654 /* Hook some operations at the MTD level. */
1655 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1656 nand_info->hooked_read_oob = mtd->_read_oob;
1657 mtd->_read_oob = mxs_nand_hook_read_oob;
1658 }
1659
1660 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1661 nand_info->hooked_write_oob = mtd->_write_oob;
1662 mtd->_write_oob = mxs_nand_hook_write_oob;
1663 }
1664
1665 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1666 nand_info->hooked_block_markbad = mtd->_block_markbad;
1667 mtd->_block_markbad = mxs_nand_hook_block_markbad;
1668 }
1669
Stefan Agner5883e552018-06-22 17:19:47 +02001670 err = nand_register(0, mtd);
1671 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001672 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001673
Stefan Agner404b1102018-06-22 18:06:14 +02001674 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001675
Stefan Agner404b1102018-06-22 18:06:14 +02001676err_free_buffers:
Marek Vasut913a7252011-11-08 23:18:16 +00001677 free(nand_info->data_buf);
1678 free(nand_info->cmd_buf);
Stefan Agner404b1102018-06-22 18:06:14 +02001679
1680 return err;
1681}
1682
Stefan Agner150ddbc2018-06-22 18:06:17 +02001683#ifndef CONFIG_NAND_MXS_DT
Stefan Agner404b1102018-06-22 18:06:14 +02001684void board_nand_init(void)
1685{
1686 struct mxs_nand_info *nand_info;
1687
1688 nand_info = malloc(sizeof(struct mxs_nand_info));
1689 if (!nand_info) {
1690 printf("MXS NAND: Failed to allocate private data\n");
1691 return;
1692 }
1693 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1694
1695 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1696 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1697
Stefan Agner4dc98db2018-06-22 18:06:15 +02001698 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1699 if (is_mx6sx() || is_mx7())
1700 nand_info->max_ecc_strength_supported = 62;
1701 else
1702 nand_info->max_ecc_strength_supported = 40;
1703
1704#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1705 nand_info->use_minimum_ecc = true;
1706#endif
1707
Stefan Agner19f90512018-06-22 18:06:16 +02001708 if (mxs_nand_init_ctrl(nand_info) < 0)
Stefan Agner404b1102018-06-22 18:06:14 +02001709 goto err;
1710
Stefan Agner5883e552018-06-22 17:19:47 +02001711 return;
Stefan Agner404b1102018-06-22 18:06:14 +02001712
1713err:
1714 free(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001715}
Stefan Agner150ddbc2018-06-22 18:06:17 +02001716#endif
Igor Opaniukc55401372019-11-03 16:49:43 +01001717
1718/*
1719 * Read NAND layout for FCB block generation.
1720 */
1721void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1722{
1723 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1724 u32 tmp;
1725
1726 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1727 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1728 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1729 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1730 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1731
1732 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1733 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1734 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1735 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1736 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1737 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1738 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1739 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1740 BCH_FLASHLAYOUT1_ECCN_OFFSET;
Han Xu33543b52020-05-04 22:08:58 +08001741 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1742 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Igor Opaniukc55401372019-11-03 16:49:43 +01001743}
1744
1745/*
1746 * Set BCH to specific layout used by ROM bootloader to read FCB.
1747 */
Han Xuafed2a12020-05-06 20:59:19 +08001748void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd)
Igor Opaniukc55401372019-11-03 16:49:43 +01001749{
1750 u32 tmp;
1751 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1752 struct nand_chip *nand = mtd_to_nand(mtd);
1753 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1754
1755 nand_info->en_randomizer = 1;
1756
1757 mtd->writesize = 1024;
1758 mtd->oobsize = 1862 - 1024;
1759
1760 /* 8 ecc_chunks_*/
1761 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1762 /* 32 bytes for metadata */
1763 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1764 /* using ECC62 level to be performed */
1765 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1766 /* 0x20 * 4 bytes of the data0 block */
1767 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1768 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1769 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1770
1771 /* 1024 for data + 838 for OOB */
1772 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1773 /* using ECC62 level to be performed */
1774 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1775 /* 0x20 * 4 bytes of the data0 block */
1776 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1777 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1778 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1779}
1780
1781/*
Han Xuafed2a12020-05-06 20:59:19 +08001782 * Set BCH to specific layout used by ROM bootloader to read FCB.
1783 */
1784void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd)
1785{
1786 u32 tmp;
1787 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1788 struct nand_chip *nand = mtd_to_nand(mtd);
1789 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1790
1791 /* no randomizer in this setting*/
1792 nand_info->en_randomizer = 0;
1793
1794 mtd->writesize = 1024;
1795 mtd->oobsize = 1576 - 1024;
1796
1797 /* 8 ecc_chunks_*/
1798 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1799 /* 32 bytes for metadata */
1800 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1801 /* using ECC40 level to be performed */
1802 tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1803 /* 0x20 * 4 bytes of the data0 block */
1804 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1805 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1806 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1807
1808 /* 1024 for data + 552 for OOB */
1809 tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1810 /* using ECC40 level to be performed */
1811 tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1812 /* 0x20 * 4 bytes of the data0 block */
1813 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1814 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1815 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1816}
1817
1818/*
Igor Opaniukc55401372019-11-03 16:49:43 +01001819 * Restore BCH to normal settings.
1820 */
1821void mxs_nand_mode_normal(struct mtd_info *mtd)
1822{
1823 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1824 struct nand_chip *nand = mtd_to_nand(mtd);
1825 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1826
1827 nand_info->en_randomizer = 0;
1828
1829 mtd->writesize = nand_info->writesize;
1830 mtd->oobsize = nand_info->oobsize;
1831
1832 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1833 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1834}
1835
1836uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1837{
1838 struct nand_chip *chip = mtd_to_nand(mtd);
1839 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1840 struct bch_geometry *geo = &nand_info->bch_geometry;
1841
1842 return geo->block_mark_byte_offset;
1843}
1844
1845uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1846{
1847 struct nand_chip *chip = mtd_to_nand(mtd);
1848 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1849 struct bch_geometry *geo = &nand_info->bch_geometry;
1850
1851 return geo->block_mark_bit_offset;
1852}