blob: 3fd21e51f2a8d986bbb79361e26db51bba174883 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut913a7252011-11-08 23:18:16 +00002/*
3 * Freescale i.MX28 NAND flash driver
4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH
7 *
8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver
10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
Peng Fan9e813732020-05-04 22:08:53 +080013 * Copyright 2017-2019 NXP
Marek Vasut913a7252011-11-08 23:18:16 +000014 */
15
Tom Warrenc88d30f2012-09-10 08:47:51 -070016#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070017#include <cpu_func.h>
Stefan Agner19f90512018-06-22 18:06:16 +020018#include <dm.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090019#include <linux/mtd/rawnand.h>
Stefan Agner4d42ac12018-06-22 17:19:51 +020020#include <linux/sizes.h>
Marek Vasut913a7252011-11-08 23:18:16 +000021#include <linux/types.h>
Marek Vasut913a7252011-11-08 23:18:16 +000022#include <malloc.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090023#include <linux/errno.h>
Marek Vasut913a7252011-11-08 23:18:16 +000024#include <asm/io.h>
25#include <asm/arch/clock.h>
26#include <asm/arch/imx-regs.h>
Stefano Babic33731bc2017-06-29 10:16:06 +020027#include <asm/mach-imx/regs-bch.h>
28#include <asm/mach-imx/regs-gpmi.h>
Marek Vasut913a7252011-11-08 23:18:16 +000029#include <asm/arch/sys_proto.h>
Shyam Sainif63ef492019-06-14 13:05:33 +053030#include <mxs_nand.h>
Marek Vasut913a7252011-11-08 23:18:16 +000031
32#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
33
Peng Fan128abf42020-05-04 22:09:00 +080034#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \
35 defined(CONFIG_IMX8M)
Stefan Roese8338d1d2013-04-15 21:14:12 +000036#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
37#else
38#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
39#endif
Marek Vasut913a7252011-11-08 23:18:16 +000040#define MXS_NAND_METADATA_SIZE 10
Jörg Krause1d870262015-04-15 09:27:22 +020041#define MXS_NAND_BITS_PER_ECC_LEVEL 13
Stefan Agner54bf8082016-08-01 23:55:18 -070042
43#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
Marek Vasut913a7252011-11-08 23:18:16 +000044#define MXS_NAND_COMMAND_BUFFER_SIZE 32
Stefan Agner54bf8082016-08-01 23:55:18 -070045#else
46#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
47#endif
Marek Vasut913a7252011-11-08 23:18:16 +000048
49#define MXS_NAND_BCH_TIMEOUT 10000
50
Marek Vasut913a7252011-11-08 23:18:16 +000051struct nand_ecclayout fake_ecc_layout;
52
Marek Vasut1b120e82012-03-15 18:33:19 +000053/*
54 * Cache management functions
55 */
Trevor Woerner43ec7e02019-05-03 09:41:00 -040056#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Marek Vasut1b120e82012-03-15 18:33:19 +000057static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
58{
Peng Fan128abf42020-05-04 22:09:00 +080059 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000060
61 flush_dcache_range(addr, addr + info->data_buf_size);
62}
63
64static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
65{
Peng Fan128abf42020-05-04 22:09:00 +080066 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000067
68 invalidate_dcache_range(addr, addr + info->data_buf_size);
69}
70
71static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
72{
Peng Fan128abf42020-05-04 22:09:00 +080073 uint32_t addr = (uintptr_t)info->cmd_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000074
75 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
76}
77#else
78static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
79static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
80static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
81#endif
82
Marek Vasut913a7252011-11-08 23:18:16 +000083static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
84{
85 struct mxs_dma_desc *desc;
86
87 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
88 printf("MXS NAND: Too many DMA descriptors requested\n");
89 return NULL;
90 }
91
92 desc = info->desc[info->desc_index];
93 info->desc_index++;
94
95 return desc;
96}
97
98static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
99{
100 int i;
101 struct mxs_dma_desc *desc;
102
103 for (i = 0; i < info->desc_index; i++) {
104 desc = info->desc[i];
105 memset(desc, 0, sizeof(struct mxs_dma_desc));
106 desc->address = (dma_addr_t)desc;
107 }
108
109 info->desc_index = 0;
110}
111
Marek Vasut913a7252011-11-08 23:18:16 +0000112static uint32_t mxs_nand_aux_status_offset(void)
113{
114 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
115}
116
Ye Li94547442020-05-04 22:08:50 +0800117static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct mtd_info *mtd,
118 unsigned int *chunk_num)
Marek Vasut913a7252011-11-08 23:18:16 +0000119{
Ye Li94547442020-05-04 22:08:50 +0800120 unsigned int i, j;
Marek Vasut913a7252011-11-08 23:18:16 +0000121
Ye Li94547442020-05-04 22:08:50 +0800122 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
123 dev_err(this->dev, "The size of chunk0 must equal to chunkn\n");
124 return false;
125 }
Marek Vasut913a7252011-11-08 23:18:16 +0000126
Ye Li94547442020-05-04 22:08:50 +0800127 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
128 (geo->gf_len * geo->ecc_strength +
129 geo->ecc_chunkn_size * 8);
Marek Vasut913a7252011-11-08 23:18:16 +0000130
Ye Li94547442020-05-04 22:08:50 +0800131 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
132 (geo->gf_len * geo->ecc_strength +
133 geo->ecc_chunkn_size * 8) * i;
Marek Vasut913a7252011-11-08 23:18:16 +0000134
Ye Li94547442020-05-04 22:08:50 +0800135 if (j < geo->ecc_chunkn_size * 8) {
136 *chunk_num = i + 1;
137 dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n",
138 geo->ecc_strength, *chunk_num);
139 return true;
140 }
Marek Vasut913a7252011-11-08 23:18:16 +0000141
Ye Li94547442020-05-04 22:08:50 +0800142 return false;
Marek Vasut913a7252011-11-08 23:18:16 +0000143}
144
Stefan Agner4d42ac12018-06-22 17:19:51 +0200145static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
Stefan Agneread66eb2018-06-22 18:06:18 +0200146 struct mtd_info *mtd,
147 unsigned int ecc_strength,
148 unsigned int ecc_step)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200149{
150 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Agner4dc98db2018-06-22 18:06:15 +0200151 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800152 unsigned int block_mark_bit_offset;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200153
Stefan Agneread66eb2018-06-22 18:06:18 +0200154 switch (ecc_step) {
Stefan Agner4d42ac12018-06-22 17:19:51 +0200155 case SZ_512:
156 geo->gf_len = 13;
157 break;
158 case SZ_1K:
159 geo->gf_len = 14;
160 break;
161 default:
162 return -EINVAL;
163 }
164
Ye Li94547442020-05-04 22:08:50 +0800165 geo->ecc_chunk0_size = ecc_step;
166 geo->ecc_chunkn_size = ecc_step;
Stefan Agneread66eb2018-06-22 18:06:18 +0200167 geo->ecc_strength = round_up(ecc_strength, 2);
Stefan Agner4d42ac12018-06-22 17:19:51 +0200168
169 /* Keep the C >= O */
Ye Li94547442020-05-04 22:08:50 +0800170 if (geo->ecc_chunkn_size < mtd->oobsize)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200171 return -EINVAL;
172
Stefan Agner4dc98db2018-06-22 18:06:15 +0200173 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200174 return -EINVAL;
175
Ye Li94547442020-05-04 22:08:50 +0800176 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
177
178 /* For bit swap. */
179 block_mark_bit_offset = mtd->writesize * 8 -
180 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
181 + MXS_NAND_METADATA_SIZE * 8);
182
183 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
184 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200185
186 return 0;
187}
188
Ye Li94547442020-05-04 22:08:50 +0800189static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
Stefan Agnerd0778b32018-06-22 17:19:49 +0200190 struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +0000191{
Stefan Agner4dc98db2018-06-22 18:06:15 +0200192 struct nand_chip *chip = mtd_to_nand(mtd);
193 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800194 unsigned int block_mark_bit_offset;
Stefan Agner4dc98db2018-06-22 18:06:15 +0200195
Stefan Agnerd0778b32018-06-22 17:19:49 +0200196 /* The default for the length of Galois Field. */
197 geo->gf_len = 13;
198
199 /* The default for chunk size. */
Ye Li94547442020-05-04 22:08:50 +0800200 geo->ecc_chunk0_size = 512;
201 geo->ecc_chunkn_size = 512;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200202
Ye Li94547442020-05-04 22:08:50 +0800203 if (geo->ecc_chunkn_size < mtd->oobsize) {
Stefan Agnerd0778b32018-06-22 17:19:49 +0200204 geo->gf_len = 14;
Ye Li94547442020-05-04 22:08:50 +0800205 geo->ecc_chunk0_size *= 2;
206 geo->ecc_chunkn_size *= 2;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200207 }
208
Ye Li94547442020-05-04 22:08:50 +0800209 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200210
Stefan Agnerd0778b32018-06-22 17:19:49 +0200211 /*
212 * Determine the ECC layout with the formula:
213 * ECC bits per chunk = (total page spare data bits) /
214 * (bits per ECC level) / (chunks per page)
215 * where:
216 * total page spare data bits =
217 * (page oob size - meta data size) * (bits per byte)
218 */
219 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
220 / (geo->gf_len * geo->ecc_chunk_count);
221
Stefan Agner4d42ac12018-06-22 17:19:51 +0200222 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
Stefan Agner4dc98db2018-06-22 18:06:15 +0200223 nand_info->max_ecc_strength_supported);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200224
Ye Li94547442020-05-04 22:08:50 +0800225 block_mark_bit_offset = mtd->writesize * 8 -
226 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
227 + MXS_NAND_METADATA_SIZE * 8);
228
229 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
230 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
231
232 return 0;
233}
234
235static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
236 struct mtd_info *mtd)
237{
238 struct nand_chip *chip = mtd_to_nand(mtd);
239 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
240 unsigned int block_mark_bit_offset;
241 unsigned int max_ecc;
242 unsigned int bbm_chunk;
243 unsigned int i;
244
245 /* sanity check for the minimum ecc nand required */
246 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
247 return -EINVAL;
248 geo->ecc_strength = chip->ecc_strength_ds;
249
250 /* calculate the maximum ecc platform can support*/
251 geo->gf_len = 14;
252 geo->ecc_chunk0_size = 1024;
253 geo->ecc_chunkn_size = 1024;
254 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
255 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
256 / (geo->gf_len * geo->ecc_chunk_count);
257 max_ecc = min(round_down(max_ecc, 2),
258 nand_info->max_ecc_strength_supported);
259
260
261 /* search a supported ecc strength that makes bbm */
262 /* located in data chunk */
263 geo->ecc_strength = chip->ecc_strength_ds;
264 while (!(geo->ecc_strength > max_ecc)) {
265 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
266 break;
267 geo->ecc_strength += 2;
268 }
269
270 /* if none of them works, keep using the minimum ecc */
271 /* nand required but changing ecc page layout */
272 if (geo->ecc_strength > max_ecc) {
273 geo->ecc_strength = chip->ecc_strength_ds;
274 /* add extra ecc for meta data */
275 geo->ecc_chunk0_size = 0;
276 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
277 geo->ecc_for_meta = 1;
278 /* check if oob can afford this extra ecc chunk */
279 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
280 geo->gf_len * geo->ecc_strength
281 * geo->ecc_chunk_count) {
282 printf("unsupported NAND chip with new layout\n");
283 return -EINVAL;
284 }
285
286 /* calculate in which chunk bbm located */
287 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
288 geo->gf_len * geo->ecc_strength) /
289 (geo->gf_len * geo->ecc_strength +
290 geo->ecc_chunkn_size * 8) + 1;
291 }
292
293 /* calculate the number of ecc chunk behind the bbm */
294 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
295
296 block_mark_bit_offset = mtd->writesize * 8 -
297 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
298 + MXS_NAND_METADATA_SIZE * 8);
299
300 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
301 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
302
Stefan Agnerd0778b32018-06-22 17:19:49 +0200303 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000304}
305
306/*
307 * Wait for BCH complete IRQ and clear the IRQ
308 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200309static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +0000310{
Marek Vasut913a7252011-11-08 23:18:16 +0000311 int timeout = MXS_NAND_BCH_TIMEOUT;
312 int ret;
313
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200314 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
Marek Vasut913a7252011-11-08 23:18:16 +0000315 BCH_CTRL_COMPLETE_IRQ, timeout);
316
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200317 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
Marek Vasut913a7252011-11-08 23:18:16 +0000318
319 return ret;
320}
321
322/*
323 * This is the function that we install in the cmd_ctrl function pointer of the
324 * owning struct nand_chip. The only functions in the reference implementation
325 * that use these functions pointers are cmdfunc and select_chip.
326 *
327 * In this driver, we implement our own select_chip, so this function will only
328 * be called by the reference implementation's cmdfunc. For this reason, we can
329 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
330 * Flash.
331 */
332static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
333{
Scott Wood17fed142016-05-30 13:57:56 -0500334 struct nand_chip *nand = mtd_to_nand(mtd);
335 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000336 struct mxs_dma_desc *d;
337 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
338 int ret;
339
340 /*
341 * If this condition is true, something is _VERY_ wrong in MTD
342 * subsystem!
343 */
344 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
345 printf("MXS NAND: Command queue too long\n");
346 return;
347 }
348
349 /*
350 * Every operation begins with a command byte and a series of zero or
351 * more address bytes. These are distinguished by either the Address
352 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
353 * asserted. When MTD is ready to execute the command, it will
354 * deasert both latch enables.
355 *
356 * Rather than run a separate DMA operation for every single byte, we
357 * queue them up and run a single DMA operation for the entire series
358 * of command and data bytes.
359 */
360 if (ctrl & (NAND_ALE | NAND_CLE)) {
361 if (data != NAND_CMD_NONE)
362 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
363 return;
364 }
365
366 /*
367 * If control arrives here, MTD has deasserted both the ALE and CLE,
368 * which means it's ready to run an operation. Check if we have any
369 * bytes to send.
370 */
371 if (nand_info->cmd_queue_len == 0)
372 return;
373
374 /* Compile the DMA descriptor -- a descriptor that sends command. */
375 d = mxs_nand_get_dma_desc(nand_info);
376 d->cmd.data =
377 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
378 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
379 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
380 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
381
382 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
383
384 d->cmd.pio_words[0] =
385 GPMI_CTRL0_COMMAND_MODE_WRITE |
386 GPMI_CTRL0_WORD_LENGTH |
387 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
388 GPMI_CTRL0_ADDRESS_NAND_CLE |
389 GPMI_CTRL0_ADDRESS_INCREMENT |
390 nand_info->cmd_queue_len;
391
392 mxs_dma_desc_append(channel, d);
393
Marek Vasut1b120e82012-03-15 18:33:19 +0000394 /* Flush caches */
395 mxs_nand_flush_cmd_buf(nand_info);
396
Marek Vasut913a7252011-11-08 23:18:16 +0000397 /* Execute the DMA chain. */
398 ret = mxs_dma_go(channel);
399 if (ret)
400 printf("MXS NAND: Error sending command\n");
401
402 mxs_nand_return_dma_descs(nand_info);
403
404 /* Reset the command queue. */
405 nand_info->cmd_queue_len = 0;
406}
407
408/*
409 * Test if the NAND flash is ready.
410 */
411static int mxs_nand_device_ready(struct mtd_info *mtd)
412{
Scott Wood17fed142016-05-30 13:57:56 -0500413 struct nand_chip *chip = mtd_to_nand(mtd);
414 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000415 uint32_t tmp;
416
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200417 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
Marek Vasut913a7252011-11-08 23:18:16 +0000418 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
419
420 return tmp & 1;
421}
422
423/*
424 * Select the NAND chip.
425 */
426static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
427{
Scott Wood17fed142016-05-30 13:57:56 -0500428 struct nand_chip *nand = mtd_to_nand(mtd);
429 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000430
431 nand_info->cur_chip = chip;
432}
433
434/*
435 * Handle block mark swapping.
436 *
437 * Note that, when this function is called, it doesn't know whether it's
438 * swapping the block mark, or swapping it *back* -- but it doesn't matter
439 * because the the operation is the same.
440 */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200441static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
442 uint8_t *data_buf, uint8_t *oob_buf)
Marek Vasut913a7252011-11-08 23:18:16 +0000443{
Stefan Agnerd0778b32018-06-22 17:19:49 +0200444 uint32_t bit_offset = geo->block_mark_bit_offset;
445 uint32_t buf_offset = geo->block_mark_byte_offset;
Marek Vasut913a7252011-11-08 23:18:16 +0000446
447 uint32_t src;
448 uint32_t dst;
449
Marek Vasut913a7252011-11-08 23:18:16 +0000450 /*
451 * Get the byte from the data area that overlays the block mark. Since
452 * the ECC engine applies its own view to the bits in the page, the
453 * physical block mark won't (in general) appear on a byte boundary in
454 * the data.
455 */
456 src = data_buf[buf_offset] >> bit_offset;
457 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
458
459 dst = oob_buf[0];
460
461 oob_buf[0] = src;
462
463 data_buf[buf_offset] &= ~(0xff << bit_offset);
464 data_buf[buf_offset + 1] &= 0xff << bit_offset;
465
466 data_buf[buf_offset] |= dst << bit_offset;
467 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
468}
469
470/*
471 * Read data from NAND.
472 */
473static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
474{
Scott Wood17fed142016-05-30 13:57:56 -0500475 struct nand_chip *nand = mtd_to_nand(mtd);
476 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000477 struct mxs_dma_desc *d;
478 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
479 int ret;
480
481 if (length > NAND_MAX_PAGESIZE) {
482 printf("MXS NAND: DMA buffer too big\n");
483 return;
484 }
485
486 if (!buf) {
487 printf("MXS NAND: DMA buffer is NULL\n");
488 return;
489 }
490
491 /* Compile the DMA descriptor - a descriptor that reads data. */
492 d = mxs_nand_get_dma_desc(nand_info);
493 d->cmd.data =
494 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
495 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
496 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
497 (length << MXS_DMA_DESC_BYTES_OFFSET);
498
499 d->cmd.address = (dma_addr_t)nand_info->data_buf;
500
501 d->cmd.pio_words[0] =
502 GPMI_CTRL0_COMMAND_MODE_READ |
503 GPMI_CTRL0_WORD_LENGTH |
504 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
505 GPMI_CTRL0_ADDRESS_NAND_DATA |
506 length;
507
508 mxs_dma_desc_append(channel, d);
509
510 /*
511 * A DMA descriptor that waits for the command to end and the chip to
512 * become ready.
513 *
514 * I think we actually should *not* be waiting for the chip to become
515 * ready because, after all, we don't care. I think the original code
516 * did that and no one has re-thought it yet.
517 */
518 d = mxs_nand_get_dma_desc(nand_info);
519 d->cmd.data =
520 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
521 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
Luca Ellero80f06b82014-12-16 15:36:14 +0100522 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
Marek Vasut913a7252011-11-08 23:18:16 +0000523
524 d->cmd.address = 0;
525
526 d->cmd.pio_words[0] =
527 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
528 GPMI_CTRL0_WORD_LENGTH |
529 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
530 GPMI_CTRL0_ADDRESS_NAND_DATA;
531
532 mxs_dma_desc_append(channel, d);
533
Peng Fane3bbfb72015-07-21 16:15:21 +0800534 /* Invalidate caches */
535 mxs_nand_inval_data_buf(nand_info);
536
Marek Vasut913a7252011-11-08 23:18:16 +0000537 /* Execute the DMA chain. */
538 ret = mxs_dma_go(channel);
539 if (ret) {
540 printf("MXS NAND: DMA read error\n");
541 goto rtn;
542 }
543
Marek Vasut1b120e82012-03-15 18:33:19 +0000544 /* Invalidate caches */
545 mxs_nand_inval_data_buf(nand_info);
546
Marek Vasut913a7252011-11-08 23:18:16 +0000547 memcpy(buf, nand_info->data_buf, length);
548
549rtn:
550 mxs_nand_return_dma_descs(nand_info);
551}
552
553/*
554 * Write data to NAND.
555 */
556static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
557 int length)
558{
Scott Wood17fed142016-05-30 13:57:56 -0500559 struct nand_chip *nand = mtd_to_nand(mtd);
560 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000561 struct mxs_dma_desc *d;
562 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
563 int ret;
564
565 if (length > NAND_MAX_PAGESIZE) {
566 printf("MXS NAND: DMA buffer too big\n");
567 return;
568 }
569
570 if (!buf) {
571 printf("MXS NAND: DMA buffer is NULL\n");
572 return;
573 }
574
575 memcpy(nand_info->data_buf, buf, length);
576
577 /* Compile the DMA descriptor - a descriptor that writes data. */
578 d = mxs_nand_get_dma_desc(nand_info);
579 d->cmd.data =
580 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
581 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
Luca Ellero966f1cd2014-12-16 15:36:15 +0100582 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
Marek Vasut913a7252011-11-08 23:18:16 +0000583 (length << MXS_DMA_DESC_BYTES_OFFSET);
584
585 d->cmd.address = (dma_addr_t)nand_info->data_buf;
586
587 d->cmd.pio_words[0] =
588 GPMI_CTRL0_COMMAND_MODE_WRITE |
589 GPMI_CTRL0_WORD_LENGTH |
590 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
591 GPMI_CTRL0_ADDRESS_NAND_DATA |
592 length;
593
594 mxs_dma_desc_append(channel, d);
595
Marek Vasut1b120e82012-03-15 18:33:19 +0000596 /* Flush caches */
597 mxs_nand_flush_data_buf(nand_info);
598
Marek Vasut913a7252011-11-08 23:18:16 +0000599 /* Execute the DMA chain. */
600 ret = mxs_dma_go(channel);
601 if (ret)
602 printf("MXS NAND: DMA write error\n");
603
604 mxs_nand_return_dma_descs(nand_info);
605}
606
607/*
608 * Read a single byte from NAND.
609 */
610static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
611{
612 uint8_t buf;
613 mxs_nand_read_buf(mtd, &buf, 1);
614 return buf;
615}
616
Peng Fandf23c9d2020-05-04 22:08:52 +0800617static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
618 u8 *buf, int chunk, int page)
619{
620 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
621 struct bch_geometry *geo = &nand_info->bch_geometry;
622 unsigned int flip_bits = 0, flip_bits_noecc = 0;
623 unsigned int threshold;
624 unsigned int base = geo->ecc_chunkn_size * chunk;
625 u32 *dma_buf = (u32 *)buf;
626 int i;
627
628 threshold = geo->gf_len / 2;
629 if (threshold > geo->ecc_strength)
630 threshold = geo->ecc_strength;
631
632 for (i = 0; i < geo->ecc_chunkn_size; i++) {
633 flip_bits += hweight8(~buf[base + i]);
634 if (flip_bits > threshold)
635 return false;
636 }
637
638 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
639 nand->read_buf(mtd, buf, mtd->writesize);
640
641 for (i = 0; i < mtd->writesize / 4; i++) {
642 flip_bits_noecc += hweight32(~dma_buf[i]);
643 if (flip_bits_noecc > threshold)
644 return false;
645 }
646
647 mtd->ecc_stats.corrected += flip_bits;
648
649 memset(buf, 0xff, mtd->writesize);
650
651 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
652
653 return true;
654}
655
Marek Vasut913a7252011-11-08 23:18:16 +0000656/*
657 * Read a page from NAND.
658 */
659static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +0000660 uint8_t *buf, int oob_required,
661 int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000662{
Scott Wood17fed142016-05-30 13:57:56 -0500663 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200664 struct bch_geometry *geo = &nand_info->bch_geometry;
Peng Fan9e813732020-05-04 22:08:53 +0800665 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +0000666 struct mxs_dma_desc *d;
667 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
668 uint32_t corrected = 0, failed = 0;
669 uint8_t *status;
670 int i, ret;
Peng Fan9e813732020-05-04 22:08:53 +0800671 int flag = 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000672
673 /* Compile the DMA descriptor - wait for ready. */
674 d = mxs_nand_get_dma_desc(nand_info);
675 d->cmd.data =
676 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
677 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
678 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
679
680 d->cmd.address = 0;
681
682 d->cmd.pio_words[0] =
683 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
684 GPMI_CTRL0_WORD_LENGTH |
685 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
686 GPMI_CTRL0_ADDRESS_NAND_DATA;
687
688 mxs_dma_desc_append(channel, d);
689
690 /* Compile the DMA descriptor - enable the BCH block and read. */
691 d = mxs_nand_get_dma_desc(nand_info);
692 d->cmd.data =
693 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
694 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
695
696 d->cmd.address = 0;
697
698 d->cmd.pio_words[0] =
699 GPMI_CTRL0_COMMAND_MODE_READ |
700 GPMI_CTRL0_WORD_LENGTH |
701 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
702 GPMI_CTRL0_ADDRESS_NAND_DATA |
703 (mtd->writesize + mtd->oobsize);
704 d->cmd.pio_words[1] = 0;
705 d->cmd.pio_words[2] =
706 GPMI_ECCCTRL_ENABLE_ECC |
707 GPMI_ECCCTRL_ECC_CMD_DECODE |
708 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
709 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
710 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
711 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
712
713 mxs_dma_desc_append(channel, d);
714
715 /* Compile the DMA descriptor - disable the BCH block. */
716 d = mxs_nand_get_dma_desc(nand_info);
717 d->cmd.data =
718 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
719 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
720 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
721
722 d->cmd.address = 0;
723
724 d->cmd.pio_words[0] =
725 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
726 GPMI_CTRL0_WORD_LENGTH |
727 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
728 GPMI_CTRL0_ADDRESS_NAND_DATA |
729 (mtd->writesize + mtd->oobsize);
730 d->cmd.pio_words[1] = 0;
731 d->cmd.pio_words[2] = 0;
732
733 mxs_dma_desc_append(channel, d);
734
735 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
736 d = mxs_nand_get_dma_desc(nand_info);
737 d->cmd.data =
738 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
739 MXS_DMA_DESC_DEC_SEM;
740
741 d->cmd.address = 0;
742
743 mxs_dma_desc_append(channel, d);
744
Peng Fane3bbfb72015-07-21 16:15:21 +0800745 /* Invalidate caches */
746 mxs_nand_inval_data_buf(nand_info);
747
Marek Vasut913a7252011-11-08 23:18:16 +0000748 /* Execute the DMA chain. */
749 ret = mxs_dma_go(channel);
750 if (ret) {
751 printf("MXS NAND: DMA read error\n");
752 goto rtn;
753 }
754
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200755 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000756 if (ret) {
757 printf("MXS NAND: BCH read timeout\n");
758 goto rtn;
759 }
760
Peng Fandf23c9d2020-05-04 22:08:52 +0800761 mxs_nand_return_dma_descs(nand_info);
762
Marek Vasut1b120e82012-03-15 18:33:19 +0000763 /* Invalidate caches */
764 mxs_nand_inval_data_buf(nand_info);
765
Marek Vasut913a7252011-11-08 23:18:16 +0000766 /* Read DMA completed, now do the mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200767 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000768
769 /* Loop over status bytes, accumulating ECC status. */
770 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
Stefan Agnerd0778b32018-06-22 17:19:49 +0200771 for (i = 0; i < geo->ecc_chunk_count; i++) {
Marek Vasut913a7252011-11-08 23:18:16 +0000772 if (status[i] == 0x00)
773 continue;
774
Peng Fan9e813732020-05-04 22:08:53 +0800775 if (status[i] == 0xff) {
Han Xue4f2b002020-05-04 22:09:02 +0800776 if (!nand_info->en_randomizer &&
777 (is_mx6dqp() || is_mx7() || is_mx6ul() ||
778 is_imx8() || is_imx8m()))
Peng Fan9e813732020-05-04 22:08:53 +0800779 if (readl(&bch_regs->hw_bch_debug1))
780 flag = 1;
Marek Vasut913a7252011-11-08 23:18:16 +0000781 continue;
Peng Fan9e813732020-05-04 22:08:53 +0800782 }
Marek Vasut913a7252011-11-08 23:18:16 +0000783
784 if (status[i] == 0xfe) {
Peng Fandf23c9d2020-05-04 22:08:52 +0800785 if (mxs_nand_erased_page(mtd, nand,
786 nand_info->data_buf, i, page))
787 break;
Marek Vasut913a7252011-11-08 23:18:16 +0000788 failed++;
789 continue;
790 }
791
792 corrected += status[i];
793 }
794
795 /* Propagate ECC status to the owning MTD. */
796 mtd->ecc_stats.failed += failed;
797 mtd->ecc_stats.corrected += corrected;
798
799 /*
800 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
801 * details about our policy for delivering the OOB.
802 *
803 * We fill the caller's buffer with set bits, and then copy the block
804 * mark to the caller's buffer. Note that, if block mark swapping was
805 * necessary, it has already been done, so we can rely on the first
806 * byte of the auxiliary buffer to contain the block mark.
807 */
808 memset(nand->oob_poi, 0xff, mtd->oobsize);
809
810 nand->oob_poi[0] = nand_info->oob_buf[0];
811
812 memcpy(buf, nand_info->data_buf, mtd->writesize);
813
Peng Fan9e813732020-05-04 22:08:53 +0800814 if (flag)
815 memset(buf, 0xff, mtd->writesize);
Marek Vasut913a7252011-11-08 23:18:16 +0000816rtn:
817 mxs_nand_return_dma_descs(nand_info);
818
819 return ret;
820}
821
822/*
823 * Write a page to NAND.
824 */
Sergey Lapin3a38a552013-01-14 03:46:50 +0000825static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
826 struct nand_chip *nand, const uint8_t *buf,
Scott Wood46e13102016-05-30 13:57:57 -0500827 int oob_required, int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000828{
Scott Wood17fed142016-05-30 13:57:56 -0500829 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200830 struct bch_geometry *geo = &nand_info->bch_geometry;
Marek Vasut913a7252011-11-08 23:18:16 +0000831 struct mxs_dma_desc *d;
832 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
833 int ret;
834
835 memcpy(nand_info->data_buf, buf, mtd->writesize);
836 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
837
838 /* Handle block mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200839 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000840
841 /* Compile the DMA descriptor - write data. */
842 d = mxs_nand_get_dma_desc(nand_info);
843 d->cmd.data =
844 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
845 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
846 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
847
848 d->cmd.address = 0;
849
850 d->cmd.pio_words[0] =
851 GPMI_CTRL0_COMMAND_MODE_WRITE |
852 GPMI_CTRL0_WORD_LENGTH |
853 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
854 GPMI_CTRL0_ADDRESS_NAND_DATA;
855 d->cmd.pio_words[1] = 0;
856 d->cmd.pio_words[2] =
857 GPMI_ECCCTRL_ENABLE_ECC |
858 GPMI_ECCCTRL_ECC_CMD_ENCODE |
859 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
860 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
861 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
862 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
863
Alice Guo7f174c62020-05-04 22:08:59 +0800864 if ((is_mx7() || is_imx8m()) && nand_info->en_randomizer) {
Igor Opaniukc55401372019-11-03 16:49:43 +0100865 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
866 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
867 /*
868 * Write NAND page number needed to be randomized
869 * to GPMI_ECCCOUNT register.
870 *
871 * The value is between 0-255. For additional details
872 * check 9.6.6.4 of i.MX7D Applications Processor reference
873 */
874 d->cmd.pio_words[3] |= (page % 255) << 16;
875 }
876
Marek Vasut913a7252011-11-08 23:18:16 +0000877 mxs_dma_desc_append(channel, d);
878
Marek Vasut1b120e82012-03-15 18:33:19 +0000879 /* Flush caches */
880 mxs_nand_flush_data_buf(nand_info);
881
Marek Vasut913a7252011-11-08 23:18:16 +0000882 /* Execute the DMA chain. */
883 ret = mxs_dma_go(channel);
884 if (ret) {
885 printf("MXS NAND: DMA write error\n");
886 goto rtn;
887 }
888
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200889 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000890 if (ret) {
891 printf("MXS NAND: BCH write timeout\n");
892 goto rtn;
893 }
894
895rtn:
896 mxs_nand_return_dma_descs(nand_info);
Sergey Lapin3a38a552013-01-14 03:46:50 +0000897 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000898}
899
900/*
901 * Read OOB from NAND.
902 *
903 * This function is a veneer that replaces the function originally installed by
904 * the NAND Flash MTD code.
905 */
906static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
907 struct mtd_oob_ops *ops)
908{
Scott Wood17fed142016-05-30 13:57:56 -0500909 struct nand_chip *chip = mtd_to_nand(mtd);
910 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000911 int ret;
912
Sergey Lapin3a38a552013-01-14 03:46:50 +0000913 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000914 nand_info->raw_oob_mode = 1;
915 else
916 nand_info->raw_oob_mode = 0;
917
918 ret = nand_info->hooked_read_oob(mtd, from, ops);
919
920 nand_info->raw_oob_mode = 0;
921
922 return ret;
923}
924
925/*
926 * Write OOB to NAND.
927 *
928 * This function is a veneer that replaces the function originally installed by
929 * the NAND Flash MTD code.
930 */
931static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
932 struct mtd_oob_ops *ops)
933{
Scott Wood17fed142016-05-30 13:57:56 -0500934 struct nand_chip *chip = mtd_to_nand(mtd);
935 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000936 int ret;
937
Sergey Lapin3a38a552013-01-14 03:46:50 +0000938 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000939 nand_info->raw_oob_mode = 1;
940 else
941 nand_info->raw_oob_mode = 0;
942
943 ret = nand_info->hooked_write_oob(mtd, to, ops);
944
945 nand_info->raw_oob_mode = 0;
946
947 return ret;
948}
949
950/*
951 * Mark a block bad in NAND.
952 *
953 * This function is a veneer that replaces the function originally installed by
954 * the NAND Flash MTD code.
955 */
956static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
957{
Scott Wood17fed142016-05-30 13:57:56 -0500958 struct nand_chip *chip = mtd_to_nand(mtd);
959 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000960 int ret;
961
962 nand_info->marking_block_bad = 1;
963
964 ret = nand_info->hooked_block_markbad(mtd, ofs);
965
966 nand_info->marking_block_bad = 0;
967
968 return ret;
969}
970
971/*
972 * There are several places in this driver where we have to handle the OOB and
973 * block marks. This is the function where things are the most complicated, so
974 * this is where we try to explain it all. All the other places refer back to
975 * here.
976 *
977 * These are the rules, in order of decreasing importance:
978 *
979 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
980 * write operations take measures to protect it.
981 *
982 * 2) In read operations, the first byte of the OOB we return must reflect the
983 * true state of the block mark, no matter where that block mark appears in
984 * the physical page.
985 *
986 * 3) ECC-based read operations return an OOB full of set bits (since we never
987 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
988 * return).
989 *
990 * 4) "Raw" read operations return a direct view of the physical bytes in the
991 * page, using the conventional definition of which bytes are data and which
992 * are OOB. This gives the caller a way to see the actual, physical bytes
993 * in the page, without the distortions applied by our ECC engine.
994 *
995 * What we do for this specific read operation depends on whether we're doing
996 * "raw" read, or an ECC-based read.
997 *
998 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
999 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1000 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1001 * ECC-based or raw view of the page is implicit in which function it calls
1002 * (there is a similar pair of ECC-based/raw functions for writing).
1003 *
1004 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1005 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1006 * caller wants an ECC-based or raw view of the page is not propagated down to
1007 * this driver.
1008 *
1009 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
1010 * ecc.read_oob and ecc.write_oob function pointers in the owning
1011 * struct mtd_info with our own functions. These hook functions set the
1012 * raw_oob_mode field so that, when control finally arrives here, we'll know
1013 * what to do.
1014 */
1015static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +00001016 int page)
Marek Vasut913a7252011-11-08 23:18:16 +00001017{
Scott Wood17fed142016-05-30 13:57:56 -05001018 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001019
1020 /*
1021 * First, fill in the OOB buffer. If we're doing a raw read, we need to
1022 * get the bytes from the physical page. If we're not doing a raw read,
1023 * we need to fill the buffer with set bits.
1024 */
1025 if (nand_info->raw_oob_mode) {
1026 /*
1027 * If control arrives here, we're doing a "raw" read. Send the
1028 * command to read the conventional OOB and read it.
1029 */
1030 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1031 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1032 } else {
1033 /*
1034 * If control arrives here, we're not doing a "raw" read. Fill
1035 * the OOB buffer with set bits and correct the block mark.
1036 */
1037 memset(nand->oob_poi, 0xff, mtd->oobsize);
1038
1039 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1040 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1041 }
1042
1043 return 0;
1044
1045}
1046
1047/*
1048 * Write OOB data to NAND.
1049 */
1050static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1051 int page)
1052{
Scott Wood17fed142016-05-30 13:57:56 -05001053 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001054 uint8_t block_mark = 0;
1055
1056 /*
1057 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1058 * the NAND Flash MTD model that make it essentially impossible to write
1059 * the out-of-band bytes.
1060 *
1061 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1062 * mark a block bad, we can do that.
1063 */
1064
1065 if (!nand_info->marking_block_bad) {
1066 printf("NXS NAND: Writing OOB isn't supported\n");
1067 return -EIO;
1068 }
1069
1070 /* Write the block mark. */
1071 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1072 nand->write_buf(mtd, &block_mark, 1);
1073 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1074
1075 /* Check if it worked. */
1076 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1077 return -EIO;
1078
1079 return 0;
1080}
1081
1082/*
1083 * Claims all blocks are good.
1084 *
1085 * In principle, this function is *only* called when the NAND Flash MTD system
1086 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1087 * the driver for bad block information.
1088 *
1089 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1090 * this function is *only* called when we take it away.
1091 *
1092 * Thus, this function is only called when we want *all* blocks to look good,
1093 * so it *always* return success.
1094 */
Scott Wood52ab7ce2016-05-30 13:57:58 -05001095static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
Marek Vasut913a7252011-11-08 23:18:16 +00001096{
Stefan Agneread66eb2018-06-22 18:06:18 +02001097 return 0;
1098}
1099
1100static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1101{
1102 struct nand_chip *chip = mtd_to_nand(mtd);
1103 struct nand_chip *nand = mtd_to_nand(mtd);
1104 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1105
Ye Li94547442020-05-04 22:08:50 +08001106 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1107 printf("unsupported NAND chip, minimum ecc required %d\n"
1108 , chip->ecc_strength_ds);
1109 return -EINVAL;
1110 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001111
Ye Lic6736132020-05-04 22:08:51 +08001112 if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) &&
1113 mtd->oobsize < 1024) || nand_info->legacy_bch_geometry) {
Ye Li94547442020-05-04 22:08:50 +08001114 dev_warn(this->dev, "use legacy bch geometry\n");
1115 return mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1116 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001117
Ye Li94547442020-05-04 22:08:50 +08001118 if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize)
1119 return mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1120
1121 return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
Stefan Agneread66eb2018-06-22 18:06:18 +02001122 chip->ecc_strength_ds, chip->ecc_step_ds);
Stefan Agneread66eb2018-06-22 18:06:18 +02001123
Marek Vasut913a7252011-11-08 23:18:16 +00001124 return 0;
1125}
1126
1127/*
Marek Vasut913a7252011-11-08 23:18:16 +00001128 * At this point, the physical NAND Flash chips have been identified and
1129 * counted, so we know the physical geometry. This enables us to make some
1130 * important configuration decisions.
1131 *
Robert P. J. Day8d56db92016-07-15 13:44:45 -04001132 * The return value of this function propagates directly back to this driver's
Stefan Agner5883e552018-06-22 17:19:47 +02001133 * board_nand_init(). Anything other than zero will cause this driver to
Marek Vasut913a7252011-11-08 23:18:16 +00001134 * tear everything down and declare failure.
1135 */
Stefan Agner5883e552018-06-22 17:19:47 +02001136int mxs_nand_setup_ecc(struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +00001137{
Scott Wood17fed142016-05-30 13:57:56 -05001138 struct nand_chip *nand = mtd_to_nand(mtd);
1139 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +02001140 struct bch_geometry *geo = &nand_info->bch_geometry;
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001141 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +00001142 uint32_t tmp;
Stefan Agneread66eb2018-06-22 18:06:18 +02001143 int ret;
Stefan Agner4d42ac12018-06-22 17:19:51 +02001144
Igor Opaniukc55401372019-11-03 16:49:43 +01001145 nand_info->en_randomizer = 0;
1146 nand_info->oobsize = mtd->oobsize;
1147 nand_info->writesize = mtd->writesize;
1148
Stefan Agneread66eb2018-06-22 18:06:18 +02001149 ret = mxs_nand_set_geometry(mtd, geo);
Stefan Agner4d42ac12018-06-22 17:19:51 +02001150 if (ret)
1151 return ret;
1152
Marek Vasut913a7252011-11-08 23:18:16 +00001153 /* Configure BCH and set NFC geometry */
Otavio Salvadorcbf0bf22012-08-13 09:53:12 +00001154 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001155
1156 /* Configure layout 0 */
Stefan Agnerd0778b32018-06-22 17:19:49 +02001157 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001158 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001159 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001160 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001161 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001162 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001163 writel(tmp, &bch_regs->hw_bch_flash0layout0);
Igor Opaniukc55401372019-11-03 16:49:43 +01001164 nand_info->bch_flash0layout0 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001165
1166 tmp = (mtd->writesize + mtd->oobsize)
1167 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001168 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001169 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001170 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001171 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001172 writel(tmp, &bch_regs->hw_bch_flash0layout1);
Igor Opaniukc55401372019-11-03 16:49:43 +01001173 nand_info->bch_flash0layout1 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001174
Peng Fan9e813732020-05-04 22:08:53 +08001175 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
1176 if (is_mx6dqp() || is_mx7() ||
Peng Fan128abf42020-05-04 22:09:00 +08001177 is_mx6ul() || is_imx8() || is_imx8m())
Peng Fan9e813732020-05-04 22:08:53 +08001178 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1179 &bch_regs->hw_bch_mode);
1180
Marek Vasut913a7252011-11-08 23:18:16 +00001181 /* Set *all* chip selects to use layout 0 */
1182 writel(0, &bch_regs->hw_bch_layoutselect);
1183
1184 /* Enable BCH complete interrupt */
1185 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1186
1187 /* Hook some operations at the MTD level. */
Sergey Lapin3a38a552013-01-14 03:46:50 +00001188 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1189 nand_info->hooked_read_oob = mtd->_read_oob;
1190 mtd->_read_oob = mxs_nand_hook_read_oob;
Marek Vasut913a7252011-11-08 23:18:16 +00001191 }
1192
Sergey Lapin3a38a552013-01-14 03:46:50 +00001193 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1194 nand_info->hooked_write_oob = mtd->_write_oob;
1195 mtd->_write_oob = mxs_nand_hook_write_oob;
Marek Vasut913a7252011-11-08 23:18:16 +00001196 }
1197
Sergey Lapin3a38a552013-01-14 03:46:50 +00001198 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1199 nand_info->hooked_block_markbad = mtd->_block_markbad;
1200 mtd->_block_markbad = mxs_nand_hook_block_markbad;
Marek Vasut913a7252011-11-08 23:18:16 +00001201 }
1202
Stefan Agner5883e552018-06-22 17:19:47 +02001203 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001204}
1205
1206/*
1207 * Allocate DMA buffers
1208 */
1209int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1210{
1211 uint8_t *buf;
1212 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1213
Marek Vasut1b120e82012-03-15 18:33:19 +00001214 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1215
Marek Vasut913a7252011-11-08 23:18:16 +00001216 /* DMA buffers */
Marek Vasut1b120e82012-03-15 18:33:19 +00001217 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001218 if (!buf) {
1219 printf("MXS NAND: Error allocating DMA buffers\n");
1220 return -ENOMEM;
1221 }
1222
Marek Vasut1b120e82012-03-15 18:33:19 +00001223 memset(buf, 0, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001224
1225 nand_info->data_buf = buf;
1226 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
Marek Vasut913a7252011-11-08 23:18:16 +00001227 /* Command buffers */
1228 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1229 MXS_NAND_COMMAND_BUFFER_SIZE);
1230 if (!nand_info->cmd_buf) {
1231 free(buf);
1232 printf("MXS NAND: Error allocating command buffers\n");
1233 return -ENOMEM;
1234 }
1235 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1236 nand_info->cmd_queue_len = 0;
1237
1238 return 0;
1239}
1240
1241/*
1242 * Initializes the NFC hardware.
1243 */
Adam Ford6edb91a2019-01-12 06:25:48 -06001244static int mxs_nand_init_dma(struct mxs_nand_info *info)
Marek Vasut913a7252011-11-08 23:18:16 +00001245{
Peng Fane37d5a92016-01-27 10:38:02 +08001246 int i = 0, j, ret = 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001247
1248 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1249 MXS_NAND_DMA_DESCRIPTOR_COUNT);
Peng Fane37d5a92016-01-27 10:38:02 +08001250 if (!info->desc) {
1251 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001252 goto err1;
Peng Fane37d5a92016-01-27 10:38:02 +08001253 }
Marek Vasut913a7252011-11-08 23:18:16 +00001254
1255 /* Allocate the DMA descriptors. */
1256 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1257 info->desc[i] = mxs_dma_desc_alloc();
Peng Fane37d5a92016-01-27 10:38:02 +08001258 if (!info->desc[i]) {
1259 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001260 goto err2;
Peng Fane37d5a92016-01-27 10:38:02 +08001261 }
Marek Vasut913a7252011-11-08 23:18:16 +00001262 }
1263
1264 /* Init the DMA controller. */
Fabio Estevam17156222017-06-29 09:33:44 -03001265 mxs_dma_init();
Marek Vasut93541b42012-04-08 17:34:46 +00001266 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1267 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
Peng Fane37d5a92016-01-27 10:38:02 +08001268 ret = mxs_dma_init_channel(j);
1269 if (ret)
Marek Vasut93541b42012-04-08 17:34:46 +00001270 goto err3;
1271 }
Marek Vasut913a7252011-11-08 23:18:16 +00001272
1273 /* Reset the GPMI block. */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001274 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1275 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001276
1277 /*
1278 * Choose NAND mode, set IRQ polarity, disable write protection and
1279 * select BCH ECC.
1280 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001281 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
Marek Vasut913a7252011-11-08 23:18:16 +00001282 GPMI_CTRL1_GPMI_MODE,
1283 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1284 GPMI_CTRL1_BCH_MODE);
1285
1286 return 0;
1287
Marek Vasut93541b42012-04-08 17:34:46 +00001288err3:
Peng Fane37d5a92016-01-27 10:38:02 +08001289 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
Marek Vasut93541b42012-04-08 17:34:46 +00001290 mxs_dma_release(j);
Marek Vasut913a7252011-11-08 23:18:16 +00001291err2:
Marek Vasut913a7252011-11-08 23:18:16 +00001292 for (--i; i >= 0; i--)
1293 mxs_dma_desc_free(info->desc[i]);
Peng Fane37d5a92016-01-27 10:38:02 +08001294 free(info->desc);
1295err1:
1296 if (ret == -ENOMEM)
1297 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1298 return ret;
Marek Vasut913a7252011-11-08 23:18:16 +00001299}
1300
Stefan Agner7152f342018-06-22 17:19:46 +02001301int mxs_nand_init_spl(struct nand_chip *nand)
1302{
1303 struct mxs_nand_info *nand_info;
1304 int err;
1305
1306 nand_info = malloc(sizeof(struct mxs_nand_info));
1307 if (!nand_info) {
1308 printf("MXS NAND: Failed to allocate private data\n");
1309 return -ENOMEM;
1310 }
1311 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1312
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001313 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1314 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
Adam Ford10210732019-01-02 20:36:52 -06001315
Peng Fan128abf42020-05-04 22:09:00 +08001316 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
Adam Ford10210732019-01-02 20:36:52 -06001317 nand_info->max_ecc_strength_supported = 62;
1318 else
1319 nand_info->max_ecc_strength_supported = 40;
1320
Stefan Agner7152f342018-06-22 17:19:46 +02001321 err = mxs_nand_alloc_buffers(nand_info);
1322 if (err)
1323 return err;
1324
Stefan Agner00e65162018-06-22 18:06:13 +02001325 err = mxs_nand_init_dma(nand_info);
Stefan Agner7152f342018-06-22 17:19:46 +02001326 if (err)
1327 return err;
1328
1329 nand_set_controller_data(nand, nand_info);
1330
1331 nand->options |= NAND_NO_SUBPAGE_WRITE;
1332
1333 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1334 nand->dev_ready = mxs_nand_device_ready;
1335 nand->select_chip = mxs_nand_select_chip;
Stefan Agner7152f342018-06-22 17:19:46 +02001336
1337 nand->read_byte = mxs_nand_read_byte;
1338 nand->read_buf = mxs_nand_read_buf;
1339
1340 nand->ecc.read_page = mxs_nand_ecc_read_page;
1341
1342 nand->ecc.mode = NAND_ECC_HW;
Stefan Agner7152f342018-06-22 17:19:46 +02001343
1344 return 0;
1345}
1346
Stefan Agner19f90512018-06-22 18:06:16 +02001347int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +00001348{
Stefan Agner5883e552018-06-22 17:19:47 +02001349 struct mtd_info *mtd;
Stefan Agner5883e552018-06-22 17:19:47 +02001350 struct nand_chip *nand;
Marek Vasut913a7252011-11-08 23:18:16 +00001351 int err;
1352
Stefan Agner5883e552018-06-22 17:19:47 +02001353 nand = &nand_info->chip;
1354 mtd = nand_to_mtd(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001355 err = mxs_nand_alloc_buffers(nand_info);
1356 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001357 return err;
Marek Vasut913a7252011-11-08 23:18:16 +00001358
Stefan Agner00e65162018-06-22 18:06:13 +02001359 err = mxs_nand_init_dma(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001360 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001361 goto err_free_buffers;
Marek Vasut913a7252011-11-08 23:18:16 +00001362
1363 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1364
Stefan Agner95f376f2018-06-22 17:19:48 +02001365#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1366 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1367#endif
1368
Scott Wood17fed142016-05-30 13:57:56 -05001369 nand_set_controller_data(nand, nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001370 nand->options |= NAND_NO_SUBPAGE_WRITE;
1371
Stefan Agner150ddbc2018-06-22 18:06:17 +02001372 if (nand_info->dev)
1373 nand->flash_node = dev_of_offset(nand_info->dev);
1374
Marek Vasut913a7252011-11-08 23:18:16 +00001375 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1376
1377 nand->dev_ready = mxs_nand_device_ready;
1378 nand->select_chip = mxs_nand_select_chip;
1379 nand->block_bad = mxs_nand_block_bad;
Marek Vasut913a7252011-11-08 23:18:16 +00001380
1381 nand->read_byte = mxs_nand_read_byte;
1382
1383 nand->read_buf = mxs_nand_read_buf;
1384 nand->write_buf = mxs_nand_write_buf;
1385
Stefan Agner5883e552018-06-22 17:19:47 +02001386 /* first scan to find the device and get the page size */
1387 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
Stefan Agner404b1102018-06-22 18:06:14 +02001388 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001389
1390 if (mxs_nand_setup_ecc(mtd))
Stefan Agner404b1102018-06-22 18:06:14 +02001391 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001392
Marek Vasut913a7252011-11-08 23:18:16 +00001393 nand->ecc.read_page = mxs_nand_ecc_read_page;
1394 nand->ecc.write_page = mxs_nand_ecc_write_page;
1395 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1396 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1397
1398 nand->ecc.layout = &fake_ecc_layout;
1399 nand->ecc.mode = NAND_ECC_HW;
Ye Li94547442020-05-04 22:08:50 +08001400 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
Stefan Agner72d627d2018-06-22 17:19:50 +02001401 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
Marek Vasut913a7252011-11-08 23:18:16 +00001402
Stefan Agner5883e552018-06-22 17:19:47 +02001403 /* second phase scan */
1404 err = nand_scan_tail(mtd);
1405 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001406 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001407
1408 err = nand_register(0, mtd);
1409 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001410 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001411
Stefan Agner404b1102018-06-22 18:06:14 +02001412 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001413
Stefan Agner404b1102018-06-22 18:06:14 +02001414err_free_buffers:
Marek Vasut913a7252011-11-08 23:18:16 +00001415 free(nand_info->data_buf);
1416 free(nand_info->cmd_buf);
Stefan Agner404b1102018-06-22 18:06:14 +02001417
1418 return err;
1419}
1420
Stefan Agner150ddbc2018-06-22 18:06:17 +02001421#ifndef CONFIG_NAND_MXS_DT
Stefan Agner404b1102018-06-22 18:06:14 +02001422void board_nand_init(void)
1423{
1424 struct mxs_nand_info *nand_info;
1425
1426 nand_info = malloc(sizeof(struct mxs_nand_info));
1427 if (!nand_info) {
1428 printf("MXS NAND: Failed to allocate private data\n");
1429 return;
1430 }
1431 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1432
1433 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1434 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1435
Stefan Agner4dc98db2018-06-22 18:06:15 +02001436 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1437 if (is_mx6sx() || is_mx7())
1438 nand_info->max_ecc_strength_supported = 62;
1439 else
1440 nand_info->max_ecc_strength_supported = 40;
1441
1442#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1443 nand_info->use_minimum_ecc = true;
1444#endif
1445
Stefan Agner19f90512018-06-22 18:06:16 +02001446 if (mxs_nand_init_ctrl(nand_info) < 0)
Stefan Agner404b1102018-06-22 18:06:14 +02001447 goto err;
1448
Stefan Agner5883e552018-06-22 17:19:47 +02001449 return;
Stefan Agner404b1102018-06-22 18:06:14 +02001450
1451err:
1452 free(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001453}
Stefan Agner150ddbc2018-06-22 18:06:17 +02001454#endif
Igor Opaniukc55401372019-11-03 16:49:43 +01001455
1456/*
1457 * Read NAND layout for FCB block generation.
1458 */
1459void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1460{
1461 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1462 u32 tmp;
1463
1464 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1465 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1466 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1467 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1468 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1469
1470 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1471 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1472 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1473 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1474 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1475 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1476 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1477 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1478 BCH_FLASHLAYOUT1_ECCN_OFFSET;
Han Xu33543b52020-05-04 22:08:58 +08001479 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1480 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Igor Opaniukc55401372019-11-03 16:49:43 +01001481}
1482
1483/*
1484 * Set BCH to specific layout used by ROM bootloader to read FCB.
1485 */
1486void mxs_nand_mode_fcb(struct mtd_info *mtd)
1487{
1488 u32 tmp;
1489 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1490 struct nand_chip *nand = mtd_to_nand(mtd);
1491 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1492
1493 nand_info->en_randomizer = 1;
1494
1495 mtd->writesize = 1024;
1496 mtd->oobsize = 1862 - 1024;
1497
1498 /* 8 ecc_chunks_*/
1499 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1500 /* 32 bytes for metadata */
1501 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1502 /* using ECC62 level to be performed */
1503 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1504 /* 0x20 * 4 bytes of the data0 block */
1505 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1506 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1507 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1508
1509 /* 1024 for data + 838 for OOB */
1510 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1511 /* using ECC62 level to be performed */
1512 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1513 /* 0x20 * 4 bytes of the data0 block */
1514 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1515 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1516 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1517}
1518
1519/*
1520 * Restore BCH to normal settings.
1521 */
1522void mxs_nand_mode_normal(struct mtd_info *mtd)
1523{
1524 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1525 struct nand_chip *nand = mtd_to_nand(mtd);
1526 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1527
1528 nand_info->en_randomizer = 0;
1529
1530 mtd->writesize = nand_info->writesize;
1531 mtd->oobsize = nand_info->oobsize;
1532
1533 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1534 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1535}
1536
1537uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1538{
1539 struct nand_chip *chip = mtd_to_nand(mtd);
1540 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1541 struct bch_geometry *geo = &nand_info->bch_geometry;
1542
1543 return geo->block_mark_byte_offset;
1544}
1545
1546uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1547{
1548 struct nand_chip *chip = mtd_to_nand(mtd);
1549 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1550 struct bch_geometry *geo = &nand_info->bch_geometry;
1551
1552 return geo->block_mark_bit_offset;
1553}