blob: e3516cd141c93965705d3ebe6c26da2f72dfd300 [file] [log] [blame]
Tom Rini10e47792018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasut913a7252011-11-08 23:18:16 +00002/*
3 * Freescale i.MX28 NAND flash driver
4 *
5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com>
6 * on behalf of DENX Software Engineering GmbH
7 *
8 * Based on code from LTIB:
9 * Freescale GPMI NFC NAND Flash Driver
10 *
11 * Copyright (C) 2010 Freescale Semiconductor, Inc.
12 * Copyright (C) 2008 Embedded Alley Solutions, Inc.
Peng Fan9e813732020-05-04 22:08:53 +080013 * Copyright 2017-2019 NXP
Marek Vasut913a7252011-11-08 23:18:16 +000014 */
15
Tom Warrenc88d30f2012-09-10 08:47:51 -070016#include <common.h>
Simon Glass63334482019-11-14 12:57:39 -070017#include <cpu_func.h>
Stefan Agner19f90512018-06-22 18:06:16 +020018#include <dm.h>
Masahiro Yamada2b7a8732017-11-30 13:45:24 +090019#include <linux/mtd/rawnand.h>
Stefan Agner4d42ac12018-06-22 17:19:51 +020020#include <linux/sizes.h>
Marek Vasut913a7252011-11-08 23:18:16 +000021#include <linux/types.h>
Marek Vasut913a7252011-11-08 23:18:16 +000022#include <malloc.h>
Masahiro Yamada56a931c2016-09-21 11:28:55 +090023#include <linux/errno.h>
Marek Vasut913a7252011-11-08 23:18:16 +000024#include <asm/io.h>
25#include <asm/arch/clock.h>
26#include <asm/arch/imx-regs.h>
Stefano Babic33731bc2017-06-29 10:16:06 +020027#include <asm/mach-imx/regs-bch.h>
28#include <asm/mach-imx/regs-gpmi.h>
Marek Vasut913a7252011-11-08 23:18:16 +000029#include <asm/arch/sys_proto.h>
Shyam Sainif63ef492019-06-14 13:05:33 +053030#include <mxs_nand.h>
Marek Vasut913a7252011-11-08 23:18:16 +000031
32#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4
33
Peng Fan128abf42020-05-04 22:09:00 +080034#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \
35 defined(CONFIG_IMX8M)
Stefan Roese8338d1d2013-04-15 21:14:12 +000036#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2
37#else
38#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0
39#endif
Marek Vasut913a7252011-11-08 23:18:16 +000040#define MXS_NAND_METADATA_SIZE 10
Jörg Krause1d870262015-04-15 09:27:22 +020041#define MXS_NAND_BITS_PER_ECC_LEVEL 13
Stefan Agner54bf8082016-08-01 23:55:18 -070042
43#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32
Marek Vasut913a7252011-11-08 23:18:16 +000044#define MXS_NAND_COMMAND_BUFFER_SIZE 32
Stefan Agner54bf8082016-08-01 23:55:18 -070045#else
46#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE
47#endif
Marek Vasut913a7252011-11-08 23:18:16 +000048
49#define MXS_NAND_BCH_TIMEOUT 10000
50
Marek Vasut913a7252011-11-08 23:18:16 +000051struct nand_ecclayout fake_ecc_layout;
52
Marek Vasut1b120e82012-03-15 18:33:19 +000053/*
54 * Cache management functions
55 */
Trevor Woerner43ec7e02019-05-03 09:41:00 -040056#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
Marek Vasut1b120e82012-03-15 18:33:19 +000057static void mxs_nand_flush_data_buf(struct mxs_nand_info *info)
58{
Peng Fan128abf42020-05-04 22:09:00 +080059 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000060
61 flush_dcache_range(addr, addr + info->data_buf_size);
62}
63
64static void mxs_nand_inval_data_buf(struct mxs_nand_info *info)
65{
Peng Fan128abf42020-05-04 22:09:00 +080066 uint32_t addr = (uintptr_t)info->data_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000067
68 invalidate_dcache_range(addr, addr + info->data_buf_size);
69}
70
71static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info)
72{
Peng Fan128abf42020-05-04 22:09:00 +080073 uint32_t addr = (uintptr_t)info->cmd_buf;
Marek Vasut1b120e82012-03-15 18:33:19 +000074
75 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE);
76}
77#else
78static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {}
79static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {}
80static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {}
81#endif
82
Marek Vasut913a7252011-11-08 23:18:16 +000083static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info)
84{
85 struct mxs_dma_desc *desc;
86
87 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) {
88 printf("MXS NAND: Too many DMA descriptors requested\n");
89 return NULL;
90 }
91
92 desc = info->desc[info->desc_index];
93 info->desc_index++;
94
95 return desc;
96}
97
98static void mxs_nand_return_dma_descs(struct mxs_nand_info *info)
99{
100 int i;
101 struct mxs_dma_desc *desc;
102
103 for (i = 0; i < info->desc_index; i++) {
104 desc = info->desc[i];
105 memset(desc, 0, sizeof(struct mxs_dma_desc));
106 desc->address = (dma_addr_t)desc;
107 }
108
109 info->desc_index = 0;
110}
111
Marek Vasut913a7252011-11-08 23:18:16 +0000112static uint32_t mxs_nand_aux_status_offset(void)
113{
114 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3;
115}
116
Ye Li94547442020-05-04 22:08:50 +0800117static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, struct mtd_info *mtd,
118 unsigned int *chunk_num)
Marek Vasut913a7252011-11-08 23:18:16 +0000119{
Ye Li94547442020-05-04 22:08:50 +0800120 unsigned int i, j;
Marek Vasut913a7252011-11-08 23:18:16 +0000121
Ye Li94547442020-05-04 22:08:50 +0800122 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) {
123 dev_err(this->dev, "The size of chunk0 must equal to chunkn\n");
124 return false;
125 }
Marek Vasut913a7252011-11-08 23:18:16 +0000126
Ye Li94547442020-05-04 22:08:50 +0800127 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) /
128 (geo->gf_len * geo->ecc_strength +
129 geo->ecc_chunkn_size * 8);
Marek Vasut913a7252011-11-08 23:18:16 +0000130
Ye Li94547442020-05-04 22:08:50 +0800131 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) -
132 (geo->gf_len * geo->ecc_strength +
133 geo->ecc_chunkn_size * 8) * i;
Marek Vasut913a7252011-11-08 23:18:16 +0000134
Ye Li94547442020-05-04 22:08:50 +0800135 if (j < geo->ecc_chunkn_size * 8) {
136 *chunk_num = i + 1;
137 dev_dbg(this->dev, "Set ecc to %d and bbm in chunk %d\n",
138 geo->ecc_strength, *chunk_num);
139 return true;
140 }
Marek Vasut913a7252011-11-08 23:18:16 +0000141
Ye Li94547442020-05-04 22:08:50 +0800142 return false;
Marek Vasut913a7252011-11-08 23:18:16 +0000143}
144
Stefan Agner4d42ac12018-06-22 17:19:51 +0200145static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo,
Stefan Agneread66eb2018-06-22 18:06:18 +0200146 struct mtd_info *mtd,
147 unsigned int ecc_strength,
148 unsigned int ecc_step)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200149{
150 struct nand_chip *chip = mtd_to_nand(mtd);
Stefan Agner4dc98db2018-06-22 18:06:15 +0200151 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800152 unsigned int block_mark_bit_offset;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200153
Stefan Agneread66eb2018-06-22 18:06:18 +0200154 switch (ecc_step) {
Stefan Agner4d42ac12018-06-22 17:19:51 +0200155 case SZ_512:
156 geo->gf_len = 13;
157 break;
158 case SZ_1K:
159 geo->gf_len = 14;
160 break;
161 default:
162 return -EINVAL;
163 }
164
Ye Li94547442020-05-04 22:08:50 +0800165 geo->ecc_chunk0_size = ecc_step;
166 geo->ecc_chunkn_size = ecc_step;
Stefan Agneread66eb2018-06-22 18:06:18 +0200167 geo->ecc_strength = round_up(ecc_strength, 2);
Stefan Agner4d42ac12018-06-22 17:19:51 +0200168
169 /* Keep the C >= O */
Ye Li94547442020-05-04 22:08:50 +0800170 if (geo->ecc_chunkn_size < mtd->oobsize)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200171 return -EINVAL;
172
Stefan Agner4dc98db2018-06-22 18:06:15 +0200173 if (geo->ecc_strength > nand_info->max_ecc_strength_supported)
Stefan Agner4d42ac12018-06-22 17:19:51 +0200174 return -EINVAL;
175
Ye Li94547442020-05-04 22:08:50 +0800176 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
177
178 /* For bit swap. */
179 block_mark_bit_offset = mtd->writesize * 8 -
180 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
181 + MXS_NAND_METADATA_SIZE * 8);
182
183 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
184 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
Stefan Agner4d42ac12018-06-22 17:19:51 +0200185
186 return 0;
187}
188
Ye Li94547442020-05-04 22:08:50 +0800189static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo,
Stefan Agnerd0778b32018-06-22 17:19:49 +0200190 struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +0000191{
Stefan Agner4dc98db2018-06-22 18:06:15 +0200192 struct nand_chip *chip = mtd_to_nand(mtd);
193 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Ye Li94547442020-05-04 22:08:50 +0800194 unsigned int block_mark_bit_offset;
Stefan Agner4dc98db2018-06-22 18:06:15 +0200195
Stefan Agnerd0778b32018-06-22 17:19:49 +0200196 /* The default for the length of Galois Field. */
197 geo->gf_len = 13;
198
199 /* The default for chunk size. */
Ye Li94547442020-05-04 22:08:50 +0800200 geo->ecc_chunk0_size = 512;
201 geo->ecc_chunkn_size = 512;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200202
Ye Li94547442020-05-04 22:08:50 +0800203 if (geo->ecc_chunkn_size < mtd->oobsize) {
Stefan Agnerd0778b32018-06-22 17:19:49 +0200204 geo->gf_len = 14;
Ye Li94547442020-05-04 22:08:50 +0800205 geo->ecc_chunk0_size *= 2;
206 geo->ecc_chunkn_size *= 2;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200207 }
208
Ye Li94547442020-05-04 22:08:50 +0800209 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
Stefan Agnerd0778b32018-06-22 17:19:49 +0200210
Stefan Agnerd0778b32018-06-22 17:19:49 +0200211 /*
212 * Determine the ECC layout with the formula:
213 * ECC bits per chunk = (total page spare data bits) /
214 * (bits per ECC level) / (chunks per page)
215 * where:
216 * total page spare data bits =
217 * (page oob size - meta data size) * (bits per byte)
218 */
219 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
220 / (geo->gf_len * geo->ecc_chunk_count);
221
Stefan Agner4d42ac12018-06-22 17:19:51 +0200222 geo->ecc_strength = min(round_down(geo->ecc_strength, 2),
Stefan Agner4dc98db2018-06-22 18:06:15 +0200223 nand_info->max_ecc_strength_supported);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200224
Ye Li94547442020-05-04 22:08:50 +0800225 block_mark_bit_offset = mtd->writesize * 8 -
226 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
227 + MXS_NAND_METADATA_SIZE * 8);
228
229 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
230 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
231
232 return 0;
233}
234
235static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo,
236 struct mtd_info *mtd)
237{
238 struct nand_chip *chip = mtd_to_nand(mtd);
239 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
240 unsigned int block_mark_bit_offset;
241 unsigned int max_ecc;
242 unsigned int bbm_chunk;
243 unsigned int i;
244
245 /* sanity check for the minimum ecc nand required */
246 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
247 return -EINVAL;
248 geo->ecc_strength = chip->ecc_strength_ds;
249
250 /* calculate the maximum ecc platform can support*/
251 geo->gf_len = 14;
252 geo->ecc_chunk0_size = 1024;
253 geo->ecc_chunkn_size = 1024;
254 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size;
255 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
256 / (geo->gf_len * geo->ecc_chunk_count);
257 max_ecc = min(round_down(max_ecc, 2),
258 nand_info->max_ecc_strength_supported);
259
260
261 /* search a supported ecc strength that makes bbm */
262 /* located in data chunk */
263 geo->ecc_strength = chip->ecc_strength_ds;
264 while (!(geo->ecc_strength > max_ecc)) {
265 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk))
266 break;
267 geo->ecc_strength += 2;
268 }
269
270 /* if none of them works, keep using the minimum ecc */
271 /* nand required but changing ecc page layout */
272 if (geo->ecc_strength > max_ecc) {
273 geo->ecc_strength = chip->ecc_strength_ds;
274 /* add extra ecc for meta data */
275 geo->ecc_chunk0_size = 0;
276 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1;
277 geo->ecc_for_meta = 1;
278 /* check if oob can afford this extra ecc chunk */
279 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 +
280 geo->gf_len * geo->ecc_strength
281 * geo->ecc_chunk_count) {
282 printf("unsupported NAND chip with new layout\n");
283 return -EINVAL;
284 }
285
286 /* calculate in which chunk bbm located */
287 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 -
288 geo->gf_len * geo->ecc_strength) /
289 (geo->gf_len * geo->ecc_strength +
290 geo->ecc_chunkn_size * 8) + 1;
291 }
292
293 /* calculate the number of ecc chunk behind the bbm */
294 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1;
295
296 block_mark_bit_offset = mtd->writesize * 8 -
297 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i)
298 + MXS_NAND_METADATA_SIZE * 8);
299
300 geo->block_mark_byte_offset = block_mark_bit_offset / 8;
301 geo->block_mark_bit_offset = block_mark_bit_offset % 8;
302
Stefan Agnerd0778b32018-06-22 17:19:49 +0200303 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000304}
305
306/*
307 * Wait for BCH complete IRQ and clear the IRQ
308 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200309static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +0000310{
Marek Vasut913a7252011-11-08 23:18:16 +0000311 int timeout = MXS_NAND_BCH_TIMEOUT;
312 int ret;
313
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200314 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg,
Marek Vasut913a7252011-11-08 23:18:16 +0000315 BCH_CTRL_COMPLETE_IRQ, timeout);
316
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200317 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr);
Marek Vasut913a7252011-11-08 23:18:16 +0000318
319 return ret;
320}
321
322/*
323 * This is the function that we install in the cmd_ctrl function pointer of the
324 * owning struct nand_chip. The only functions in the reference implementation
325 * that use these functions pointers are cmdfunc and select_chip.
326 *
327 * In this driver, we implement our own select_chip, so this function will only
328 * be called by the reference implementation's cmdfunc. For this reason, we can
329 * ignore the chip enable bit and concentrate only on sending bytes to the NAND
330 * Flash.
331 */
332static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
333{
Scott Wood17fed142016-05-30 13:57:56 -0500334 struct nand_chip *nand = mtd_to_nand(mtd);
335 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000336 struct mxs_dma_desc *d;
337 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
338 int ret;
339
340 /*
341 * If this condition is true, something is _VERY_ wrong in MTD
342 * subsystem!
343 */
344 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) {
345 printf("MXS NAND: Command queue too long\n");
346 return;
347 }
348
349 /*
350 * Every operation begins with a command byte and a series of zero or
351 * more address bytes. These are distinguished by either the Address
352 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
353 * asserted. When MTD is ready to execute the command, it will
354 * deasert both latch enables.
355 *
356 * Rather than run a separate DMA operation for every single byte, we
357 * queue them up and run a single DMA operation for the entire series
358 * of command and data bytes.
359 */
360 if (ctrl & (NAND_ALE | NAND_CLE)) {
361 if (data != NAND_CMD_NONE)
362 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data;
363 return;
364 }
365
366 /*
367 * If control arrives here, MTD has deasserted both the ALE and CLE,
368 * which means it's ready to run an operation. Check if we have any
369 * bytes to send.
370 */
371 if (nand_info->cmd_queue_len == 0)
372 return;
373
374 /* Compile the DMA descriptor -- a descriptor that sends command. */
375 d = mxs_nand_get_dma_desc(nand_info);
376 d->cmd.data =
377 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
378 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM |
379 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
380 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET);
381
382 d->cmd.address = (dma_addr_t)nand_info->cmd_buf;
383
384 d->cmd.pio_words[0] =
385 GPMI_CTRL0_COMMAND_MODE_WRITE |
386 GPMI_CTRL0_WORD_LENGTH |
387 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
388 GPMI_CTRL0_ADDRESS_NAND_CLE |
389 GPMI_CTRL0_ADDRESS_INCREMENT |
390 nand_info->cmd_queue_len;
391
392 mxs_dma_desc_append(channel, d);
393
Marek Vasut1b120e82012-03-15 18:33:19 +0000394 /* Flush caches */
395 mxs_nand_flush_cmd_buf(nand_info);
396
Marek Vasut913a7252011-11-08 23:18:16 +0000397 /* Execute the DMA chain. */
398 ret = mxs_dma_go(channel);
399 if (ret)
400 printf("MXS NAND: Error sending command\n");
401
402 mxs_nand_return_dma_descs(nand_info);
403
404 /* Reset the command queue. */
405 nand_info->cmd_queue_len = 0;
406}
407
408/*
409 * Test if the NAND flash is ready.
410 */
411static int mxs_nand_device_ready(struct mtd_info *mtd)
412{
Scott Wood17fed142016-05-30 13:57:56 -0500413 struct nand_chip *chip = mtd_to_nand(mtd);
414 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000415 uint32_t tmp;
416
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200417 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat);
Marek Vasut913a7252011-11-08 23:18:16 +0000418 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip);
419
420 return tmp & 1;
421}
422
423/*
424 * Select the NAND chip.
425 */
426static void mxs_nand_select_chip(struct mtd_info *mtd, int chip)
427{
Scott Wood17fed142016-05-30 13:57:56 -0500428 struct nand_chip *nand = mtd_to_nand(mtd);
429 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000430
431 nand_info->cur_chip = chip;
432}
433
434/*
435 * Handle block mark swapping.
436 *
437 * Note that, when this function is called, it doesn't know whether it's
438 * swapping the block mark, or swapping it *back* -- but it doesn't matter
439 * because the the operation is the same.
440 */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200441static void mxs_nand_swap_block_mark(struct bch_geometry *geo,
442 uint8_t *data_buf, uint8_t *oob_buf)
Marek Vasut913a7252011-11-08 23:18:16 +0000443{
Stefan Agnerd0778b32018-06-22 17:19:49 +0200444 uint32_t bit_offset = geo->block_mark_bit_offset;
445 uint32_t buf_offset = geo->block_mark_byte_offset;
Marek Vasut913a7252011-11-08 23:18:16 +0000446
447 uint32_t src;
448 uint32_t dst;
449
Marek Vasut913a7252011-11-08 23:18:16 +0000450 /*
451 * Get the byte from the data area that overlays the block mark. Since
452 * the ECC engine applies its own view to the bits in the page, the
453 * physical block mark won't (in general) appear on a byte boundary in
454 * the data.
455 */
456 src = data_buf[buf_offset] >> bit_offset;
457 src |= data_buf[buf_offset + 1] << (8 - bit_offset);
458
459 dst = oob_buf[0];
460
461 oob_buf[0] = src;
462
463 data_buf[buf_offset] &= ~(0xff << bit_offset);
464 data_buf[buf_offset + 1] &= 0xff << bit_offset;
465
466 data_buf[buf_offset] |= dst << bit_offset;
467 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset);
468}
469
470/*
471 * Read data from NAND.
472 */
473static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length)
474{
Scott Wood17fed142016-05-30 13:57:56 -0500475 struct nand_chip *nand = mtd_to_nand(mtd);
476 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000477 struct mxs_dma_desc *d;
478 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
479 int ret;
480
481 if (length > NAND_MAX_PAGESIZE) {
482 printf("MXS NAND: DMA buffer too big\n");
483 return;
484 }
485
486 if (!buf) {
487 printf("MXS NAND: DMA buffer is NULL\n");
488 return;
489 }
490
491 /* Compile the DMA descriptor - a descriptor that reads data. */
492 d = mxs_nand_get_dma_desc(nand_info);
493 d->cmd.data =
494 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ |
495 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
496 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
497 (length << MXS_DMA_DESC_BYTES_OFFSET);
498
499 d->cmd.address = (dma_addr_t)nand_info->data_buf;
500
501 d->cmd.pio_words[0] =
502 GPMI_CTRL0_COMMAND_MODE_READ |
503 GPMI_CTRL0_WORD_LENGTH |
504 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
505 GPMI_CTRL0_ADDRESS_NAND_DATA |
506 length;
507
508 mxs_dma_desc_append(channel, d);
509
510 /*
511 * A DMA descriptor that waits for the command to end and the chip to
512 * become ready.
513 *
514 * I think we actually should *not* be waiting for the chip to become
515 * ready because, after all, we don't care. I think the original code
516 * did that and no one has re-thought it yet.
517 */
518 d = mxs_nand_get_dma_desc(nand_info);
519 d->cmd.data =
520 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
521 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM |
Luca Ellero80f06b82014-12-16 15:36:14 +0100522 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
Marek Vasut913a7252011-11-08 23:18:16 +0000523
524 d->cmd.address = 0;
525
526 d->cmd.pio_words[0] =
527 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
528 GPMI_CTRL0_WORD_LENGTH |
529 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
530 GPMI_CTRL0_ADDRESS_NAND_DATA;
531
532 mxs_dma_desc_append(channel, d);
533
Peng Fane3bbfb72015-07-21 16:15:21 +0800534 /* Invalidate caches */
535 mxs_nand_inval_data_buf(nand_info);
536
Marek Vasut913a7252011-11-08 23:18:16 +0000537 /* Execute the DMA chain. */
538 ret = mxs_dma_go(channel);
539 if (ret) {
540 printf("MXS NAND: DMA read error\n");
541 goto rtn;
542 }
543
Marek Vasut1b120e82012-03-15 18:33:19 +0000544 /* Invalidate caches */
545 mxs_nand_inval_data_buf(nand_info);
546
Marek Vasut913a7252011-11-08 23:18:16 +0000547 memcpy(buf, nand_info->data_buf, length);
548
549rtn:
550 mxs_nand_return_dma_descs(nand_info);
551}
552
553/*
554 * Write data to NAND.
555 */
556static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
557 int length)
558{
Scott Wood17fed142016-05-30 13:57:56 -0500559 struct nand_chip *nand = mtd_to_nand(mtd);
560 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +0000561 struct mxs_dma_desc *d;
562 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
563 int ret;
564
565 if (length > NAND_MAX_PAGESIZE) {
566 printf("MXS NAND: DMA buffer too big\n");
567 return;
568 }
569
570 if (!buf) {
571 printf("MXS NAND: DMA buffer is NULL\n");
572 return;
573 }
574
575 memcpy(nand_info->data_buf, buf, length);
576
577 /* Compile the DMA descriptor - a descriptor that writes data. */
578 d = mxs_nand_get_dma_desc(nand_info);
579 d->cmd.data =
580 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ |
581 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
Luca Ellero966f1cd2014-12-16 15:36:15 +0100582 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) |
Marek Vasut913a7252011-11-08 23:18:16 +0000583 (length << MXS_DMA_DESC_BYTES_OFFSET);
584
585 d->cmd.address = (dma_addr_t)nand_info->data_buf;
586
587 d->cmd.pio_words[0] =
588 GPMI_CTRL0_COMMAND_MODE_WRITE |
589 GPMI_CTRL0_WORD_LENGTH |
590 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
591 GPMI_CTRL0_ADDRESS_NAND_DATA |
592 length;
593
594 mxs_dma_desc_append(channel, d);
595
Marek Vasut1b120e82012-03-15 18:33:19 +0000596 /* Flush caches */
597 mxs_nand_flush_data_buf(nand_info);
598
Marek Vasut913a7252011-11-08 23:18:16 +0000599 /* Execute the DMA chain. */
600 ret = mxs_dma_go(channel);
601 if (ret)
602 printf("MXS NAND: DMA write error\n");
603
604 mxs_nand_return_dma_descs(nand_info);
605}
606
607/*
608 * Read a single byte from NAND.
609 */
610static uint8_t mxs_nand_read_byte(struct mtd_info *mtd)
611{
612 uint8_t buf;
613 mxs_nand_read_buf(mtd, &buf, 1);
614 return buf;
615}
616
Peng Fandf23c9d2020-05-04 22:08:52 +0800617static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand,
618 u8 *buf, int chunk, int page)
619{
620 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
621 struct bch_geometry *geo = &nand_info->bch_geometry;
622 unsigned int flip_bits = 0, flip_bits_noecc = 0;
623 unsigned int threshold;
624 unsigned int base = geo->ecc_chunkn_size * chunk;
625 u32 *dma_buf = (u32 *)buf;
626 int i;
627
628 threshold = geo->gf_len / 2;
629 if (threshold > geo->ecc_strength)
630 threshold = geo->ecc_strength;
631
632 for (i = 0; i < geo->ecc_chunkn_size; i++) {
633 flip_bits += hweight8(~buf[base + i]);
634 if (flip_bits > threshold)
635 return false;
636 }
637
638 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
639 nand->read_buf(mtd, buf, mtd->writesize);
640
641 for (i = 0; i < mtd->writesize / 4; i++) {
642 flip_bits_noecc += hweight32(~dma_buf[i]);
643 if (flip_bits_noecc > threshold)
644 return false;
645 }
646
647 mtd->ecc_stats.corrected += flip_bits;
648
649 memset(buf, 0xff, mtd->writesize);
650
651 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc);
652
653 return true;
654}
655
Marek Vasut913a7252011-11-08 23:18:16 +0000656/*
657 * Read a page from NAND.
658 */
659static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +0000660 uint8_t *buf, int oob_required,
661 int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000662{
Scott Wood17fed142016-05-30 13:57:56 -0500663 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200664 struct bch_geometry *geo = &nand_info->bch_geometry;
Peng Fan9e813732020-05-04 22:08:53 +0800665 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +0000666 struct mxs_dma_desc *d;
667 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
668 uint32_t corrected = 0, failed = 0;
669 uint8_t *status;
670 int i, ret;
Peng Fan9e813732020-05-04 22:08:53 +0800671 int flag = 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000672
673 /* Compile the DMA descriptor - wait for ready. */
674 d = mxs_nand_get_dma_desc(nand_info);
675 d->cmd.data =
676 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
677 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
678 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
679
680 d->cmd.address = 0;
681
682 d->cmd.pio_words[0] =
683 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
684 GPMI_CTRL0_WORD_LENGTH |
685 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
686 GPMI_CTRL0_ADDRESS_NAND_DATA;
687
688 mxs_dma_desc_append(channel, d);
689
690 /* Compile the DMA descriptor - enable the BCH block and read. */
691 d = mxs_nand_get_dma_desc(nand_info);
692 d->cmd.data =
693 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
694 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
695
696 d->cmd.address = 0;
697
698 d->cmd.pio_words[0] =
699 GPMI_CTRL0_COMMAND_MODE_READ |
700 GPMI_CTRL0_WORD_LENGTH |
701 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
702 GPMI_CTRL0_ADDRESS_NAND_DATA |
703 (mtd->writesize + mtd->oobsize);
704 d->cmd.pio_words[1] = 0;
705 d->cmd.pio_words[2] =
706 GPMI_ECCCTRL_ENABLE_ECC |
707 GPMI_ECCCTRL_ECC_CMD_DECODE |
708 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
709 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize;
710 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
711 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
712
Han Xuafed2a12020-05-06 20:59:19 +0800713 if (nand_info->en_randomizer) {
Alice Guo3f277782020-05-04 22:09:03 +0800714 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
715 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
716 d->cmd.pio_words[3] |= (page % 256) << 16;
717 }
718
Marek Vasut913a7252011-11-08 23:18:16 +0000719 mxs_dma_desc_append(channel, d);
720
721 /* Compile the DMA descriptor - disable the BCH block. */
722 d = mxs_nand_get_dma_desc(nand_info);
723 d->cmd.data =
724 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN |
725 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END |
726 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
727
728 d->cmd.address = 0;
729
730 d->cmd.pio_words[0] =
731 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY |
732 GPMI_CTRL0_WORD_LENGTH |
733 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
734 GPMI_CTRL0_ADDRESS_NAND_DATA |
735 (mtd->writesize + mtd->oobsize);
736 d->cmd.pio_words[1] = 0;
737 d->cmd.pio_words[2] = 0;
738
739 mxs_dma_desc_append(channel, d);
740
741 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */
742 d = mxs_nand_get_dma_desc(nand_info);
743 d->cmd.data =
744 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
745 MXS_DMA_DESC_DEC_SEM;
746
747 d->cmd.address = 0;
748
749 mxs_dma_desc_append(channel, d);
750
Peng Fane3bbfb72015-07-21 16:15:21 +0800751 /* Invalidate caches */
752 mxs_nand_inval_data_buf(nand_info);
753
Marek Vasut913a7252011-11-08 23:18:16 +0000754 /* Execute the DMA chain. */
755 ret = mxs_dma_go(channel);
756 if (ret) {
757 printf("MXS NAND: DMA read error\n");
758 goto rtn;
759 }
760
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200761 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000762 if (ret) {
763 printf("MXS NAND: BCH read timeout\n");
764 goto rtn;
765 }
766
Peng Fandf23c9d2020-05-04 22:08:52 +0800767 mxs_nand_return_dma_descs(nand_info);
768
Marek Vasut1b120e82012-03-15 18:33:19 +0000769 /* Invalidate caches */
770 mxs_nand_inval_data_buf(nand_info);
771
Marek Vasut913a7252011-11-08 23:18:16 +0000772 /* Read DMA completed, now do the mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200773 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000774
775 /* Loop over status bytes, accumulating ECC status. */
776 status = nand_info->oob_buf + mxs_nand_aux_status_offset();
Stefan Agnerd0778b32018-06-22 17:19:49 +0200777 for (i = 0; i < geo->ecc_chunk_count; i++) {
Marek Vasut913a7252011-11-08 23:18:16 +0000778 if (status[i] == 0x00)
779 continue;
780
Peng Fan9e813732020-05-04 22:08:53 +0800781 if (status[i] == 0xff) {
Han Xue4f2b002020-05-04 22:09:02 +0800782 if (!nand_info->en_randomizer &&
783 (is_mx6dqp() || is_mx7() || is_mx6ul() ||
784 is_imx8() || is_imx8m()))
Peng Fan9e813732020-05-04 22:08:53 +0800785 if (readl(&bch_regs->hw_bch_debug1))
786 flag = 1;
Marek Vasut913a7252011-11-08 23:18:16 +0000787 continue;
Peng Fan9e813732020-05-04 22:08:53 +0800788 }
Marek Vasut913a7252011-11-08 23:18:16 +0000789
790 if (status[i] == 0xfe) {
Peng Fandf23c9d2020-05-04 22:08:52 +0800791 if (mxs_nand_erased_page(mtd, nand,
792 nand_info->data_buf, i, page))
793 break;
Marek Vasut913a7252011-11-08 23:18:16 +0000794 failed++;
795 continue;
796 }
797
798 corrected += status[i];
799 }
800
801 /* Propagate ECC status to the owning MTD. */
802 mtd->ecc_stats.failed += failed;
803 mtd->ecc_stats.corrected += corrected;
804
805 /*
806 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for
807 * details about our policy for delivering the OOB.
808 *
809 * We fill the caller's buffer with set bits, and then copy the block
810 * mark to the caller's buffer. Note that, if block mark swapping was
811 * necessary, it has already been done, so we can rely on the first
812 * byte of the auxiliary buffer to contain the block mark.
813 */
814 memset(nand->oob_poi, 0xff, mtd->oobsize);
815
816 nand->oob_poi[0] = nand_info->oob_buf[0];
817
818 memcpy(buf, nand_info->data_buf, mtd->writesize);
819
Peng Fan9e813732020-05-04 22:08:53 +0800820 if (flag)
821 memset(buf, 0xff, mtd->writesize);
Marek Vasut913a7252011-11-08 23:18:16 +0000822rtn:
823 mxs_nand_return_dma_descs(nand_info);
824
825 return ret;
826}
827
828/*
829 * Write a page to NAND.
830 */
Sergey Lapin3a38a552013-01-14 03:46:50 +0000831static int mxs_nand_ecc_write_page(struct mtd_info *mtd,
832 struct nand_chip *nand, const uint8_t *buf,
Scott Wood46e13102016-05-30 13:57:57 -0500833 int oob_required, int page)
Marek Vasut913a7252011-11-08 23:18:16 +0000834{
Scott Wood17fed142016-05-30 13:57:56 -0500835 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +0200836 struct bch_geometry *geo = &nand_info->bch_geometry;
Marek Vasut913a7252011-11-08 23:18:16 +0000837 struct mxs_dma_desc *d;
838 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip;
839 int ret;
840
841 memcpy(nand_info->data_buf, buf, mtd->writesize);
842 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize);
843
844 /* Handle block mark swapping. */
Stefan Agnerd0778b32018-06-22 17:19:49 +0200845 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf);
Marek Vasut913a7252011-11-08 23:18:16 +0000846
847 /* Compile the DMA descriptor - write data. */
848 d = mxs_nand_get_dma_desc(nand_info);
849 d->cmd.data =
850 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ |
851 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END |
852 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET);
853
854 d->cmd.address = 0;
855
856 d->cmd.pio_words[0] =
857 GPMI_CTRL0_COMMAND_MODE_WRITE |
858 GPMI_CTRL0_WORD_LENGTH |
859 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) |
860 GPMI_CTRL0_ADDRESS_NAND_DATA;
861 d->cmd.pio_words[1] = 0;
862 d->cmd.pio_words[2] =
863 GPMI_ECCCTRL_ENABLE_ECC |
864 GPMI_ECCCTRL_ECC_CMD_ENCODE |
865 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE;
866 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize);
867 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf;
868 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf;
869
Han Xuafed2a12020-05-06 20:59:19 +0800870 if (nand_info->en_randomizer) {
Igor Opaniukc55401372019-11-03 16:49:43 +0100871 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE |
872 GPMI_ECCCTRL_RANDOMIZER_TYPE2;
873 /*
874 * Write NAND page number needed to be randomized
875 * to GPMI_ECCCOUNT register.
876 *
877 * The value is between 0-255. For additional details
878 * check 9.6.6.4 of i.MX7D Applications Processor reference
879 */
Alice Guo3f277782020-05-04 22:09:03 +0800880 d->cmd.pio_words[3] |= (page % 256) << 16;
Igor Opaniukc55401372019-11-03 16:49:43 +0100881 }
882
Marek Vasut913a7252011-11-08 23:18:16 +0000883 mxs_dma_desc_append(channel, d);
884
Marek Vasut1b120e82012-03-15 18:33:19 +0000885 /* Flush caches */
886 mxs_nand_flush_data_buf(nand_info);
887
Marek Vasut913a7252011-11-08 23:18:16 +0000888 /* Execute the DMA chain. */
889 ret = mxs_dma_go(channel);
890 if (ret) {
891 printf("MXS NAND: DMA write error\n");
892 goto rtn;
893 }
894
Stefan Agnerdc8af6d2018-06-22 18:06:12 +0200895 ret = mxs_nand_wait_for_bch_complete(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +0000896 if (ret) {
897 printf("MXS NAND: BCH write timeout\n");
898 goto rtn;
899 }
900
901rtn:
902 mxs_nand_return_dma_descs(nand_info);
Sergey Lapin3a38a552013-01-14 03:46:50 +0000903 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +0000904}
905
906/*
907 * Read OOB from NAND.
908 *
909 * This function is a veneer that replaces the function originally installed by
910 * the NAND Flash MTD code.
911 */
912static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from,
913 struct mtd_oob_ops *ops)
914{
Scott Wood17fed142016-05-30 13:57:56 -0500915 struct nand_chip *chip = mtd_to_nand(mtd);
916 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000917 int ret;
918
Sergey Lapin3a38a552013-01-14 03:46:50 +0000919 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000920 nand_info->raw_oob_mode = 1;
921 else
922 nand_info->raw_oob_mode = 0;
923
924 ret = nand_info->hooked_read_oob(mtd, from, ops);
925
926 nand_info->raw_oob_mode = 0;
927
928 return ret;
929}
930
931/*
932 * Write OOB to NAND.
933 *
934 * This function is a veneer that replaces the function originally installed by
935 * the NAND Flash MTD code.
936 */
937static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to,
938 struct mtd_oob_ops *ops)
939{
Scott Wood17fed142016-05-30 13:57:56 -0500940 struct nand_chip *chip = mtd_to_nand(mtd);
941 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000942 int ret;
943
Sergey Lapin3a38a552013-01-14 03:46:50 +0000944 if (ops->mode == MTD_OPS_RAW)
Marek Vasut913a7252011-11-08 23:18:16 +0000945 nand_info->raw_oob_mode = 1;
946 else
947 nand_info->raw_oob_mode = 0;
948
949 ret = nand_info->hooked_write_oob(mtd, to, ops);
950
951 nand_info->raw_oob_mode = 0;
952
953 return ret;
954}
955
956/*
957 * Mark a block bad in NAND.
958 *
959 * This function is a veneer that replaces the function originally installed by
960 * the NAND Flash MTD code.
961 */
962static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs)
963{
Scott Wood17fed142016-05-30 13:57:56 -0500964 struct nand_chip *chip = mtd_to_nand(mtd);
965 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
Marek Vasut913a7252011-11-08 23:18:16 +0000966 int ret;
967
968 nand_info->marking_block_bad = 1;
969
970 ret = nand_info->hooked_block_markbad(mtd, ofs);
971
972 nand_info->marking_block_bad = 0;
973
974 return ret;
975}
976
977/*
978 * There are several places in this driver where we have to handle the OOB and
979 * block marks. This is the function where things are the most complicated, so
980 * this is where we try to explain it all. All the other places refer back to
981 * here.
982 *
983 * These are the rules, in order of decreasing importance:
984 *
985 * 1) Nothing the caller does can be allowed to imperil the block mark, so all
986 * write operations take measures to protect it.
987 *
988 * 2) In read operations, the first byte of the OOB we return must reflect the
989 * true state of the block mark, no matter where that block mark appears in
990 * the physical page.
991 *
992 * 3) ECC-based read operations return an OOB full of set bits (since we never
993 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
994 * return).
995 *
996 * 4) "Raw" read operations return a direct view of the physical bytes in the
997 * page, using the conventional definition of which bytes are data and which
998 * are OOB. This gives the caller a way to see the actual, physical bytes
999 * in the page, without the distortions applied by our ECC engine.
1000 *
1001 * What we do for this specific read operation depends on whether we're doing
1002 * "raw" read, or an ECC-based read.
1003 *
1004 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1005 * easy. When reading a page, for example, the NAND Flash MTD code calls our
1006 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1007 * ECC-based or raw view of the page is implicit in which function it calls
1008 * (there is a similar pair of ECC-based/raw functions for writing).
1009 *
1010 * Since MTD assumes the OOB is not covered by ECC, there is no pair of
1011 * ECC-based/raw functions for reading or or writing the OOB. The fact that the
1012 * caller wants an ECC-based or raw view of the page is not propagated down to
1013 * this driver.
1014 *
1015 * Since our OOB *is* covered by ECC, we need this information. So, we hook the
1016 * ecc.read_oob and ecc.write_oob function pointers in the owning
1017 * struct mtd_info with our own functions. These hook functions set the
1018 * raw_oob_mode field so that, when control finally arrives here, we'll know
1019 * what to do.
1020 */
1021static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
Sergey Lapin3a38a552013-01-14 03:46:50 +00001022 int page)
Marek Vasut913a7252011-11-08 23:18:16 +00001023{
Scott Wood17fed142016-05-30 13:57:56 -05001024 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001025
1026 /*
1027 * First, fill in the OOB buffer. If we're doing a raw read, we need to
1028 * get the bytes from the physical page. If we're not doing a raw read,
1029 * we need to fill the buffer with set bits.
1030 */
1031 if (nand_info->raw_oob_mode) {
1032 /*
1033 * If control arrives here, we're doing a "raw" read. Send the
1034 * command to read the conventional OOB and read it.
1035 */
1036 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1037 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize);
1038 } else {
1039 /*
1040 * If control arrives here, we're not doing a "raw" read. Fill
1041 * the OOB buffer with set bits and correct the block mark.
1042 */
1043 memset(nand->oob_poi, 0xff, mtd->oobsize);
1044
1045 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
1046 mxs_nand_read_buf(mtd, nand->oob_poi, 1);
1047 }
1048
1049 return 0;
1050
1051}
1052
1053/*
1054 * Write OOB data to NAND.
1055 */
1056static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
1057 int page)
1058{
Scott Wood17fed142016-05-30 13:57:56 -05001059 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001060 uint8_t block_mark = 0;
1061
1062 /*
1063 * There are fundamental incompatibilities between the i.MX GPMI NFC and
1064 * the NAND Flash MTD model that make it essentially impossible to write
1065 * the out-of-band bytes.
1066 *
1067 * We permit *ONE* exception. If the *intent* of writing the OOB is to
1068 * mark a block bad, we can do that.
1069 */
1070
1071 if (!nand_info->marking_block_bad) {
1072 printf("NXS NAND: Writing OOB isn't supported\n");
1073 return -EIO;
1074 }
1075
1076 /* Write the block mark. */
1077 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1078 nand->write_buf(mtd, &block_mark, 1);
1079 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1080
1081 /* Check if it worked. */
1082 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL)
1083 return -EIO;
1084
1085 return 0;
1086}
1087
1088/*
1089 * Claims all blocks are good.
1090 *
1091 * In principle, this function is *only* called when the NAND Flash MTD system
1092 * isn't allowed to keep an in-memory bad block table, so it is forced to ask
1093 * the driver for bad block information.
1094 *
1095 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so
1096 * this function is *only* called when we take it away.
1097 *
1098 * Thus, this function is only called when we want *all* blocks to look good,
1099 * so it *always* return success.
1100 */
Scott Wood52ab7ce2016-05-30 13:57:58 -05001101static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
Marek Vasut913a7252011-11-08 23:18:16 +00001102{
Stefan Agneread66eb2018-06-22 18:06:18 +02001103 return 0;
1104}
1105
1106static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo)
1107{
1108 struct nand_chip *chip = mtd_to_nand(mtd);
1109 struct nand_chip *nand = mtd_to_nand(mtd);
1110 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1111
Ye Li94547442020-05-04 22:08:50 +08001112 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) {
1113 printf("unsupported NAND chip, minimum ecc required %d\n"
1114 , chip->ecc_strength_ds);
1115 return -EINVAL;
1116 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001117
Ye Lic6736132020-05-04 22:08:51 +08001118 if ((!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0) &&
1119 mtd->oobsize < 1024) || nand_info->legacy_bch_geometry) {
Ye Li94547442020-05-04 22:08:50 +08001120 dev_warn(this->dev, "use legacy bch geometry\n");
1121 return mxs_nand_legacy_calc_ecc_layout(geo, mtd);
1122 }
Stefan Agneread66eb2018-06-22 18:06:18 +02001123
Ye Li94547442020-05-04 22:08:50 +08001124 if (mtd->oobsize > 1024 || chip->ecc_step_ds < mtd->oobsize)
1125 return mxs_nand_calc_ecc_for_large_oob(geo, mtd);
1126
1127 return mxs_nand_calc_ecc_layout_by_info(geo, mtd,
Stefan Agneread66eb2018-06-22 18:06:18 +02001128 chip->ecc_strength_ds, chip->ecc_step_ds);
Stefan Agneread66eb2018-06-22 18:06:18 +02001129
Marek Vasut913a7252011-11-08 23:18:16 +00001130 return 0;
1131}
1132
1133/*
Marek Vasut913a7252011-11-08 23:18:16 +00001134 * At this point, the physical NAND Flash chips have been identified and
1135 * counted, so we know the physical geometry. This enables us to make some
1136 * important configuration decisions.
1137 *
Robert P. J. Day8d56db92016-07-15 13:44:45 -04001138 * The return value of this function propagates directly back to this driver's
Stefan Agner5883e552018-06-22 17:19:47 +02001139 * board_nand_init(). Anything other than zero will cause this driver to
Marek Vasut913a7252011-11-08 23:18:16 +00001140 * tear everything down and declare failure.
1141 */
Stefan Agner5883e552018-06-22 17:19:47 +02001142int mxs_nand_setup_ecc(struct mtd_info *mtd)
Marek Vasut913a7252011-11-08 23:18:16 +00001143{
Scott Wood17fed142016-05-30 13:57:56 -05001144 struct nand_chip *nand = mtd_to_nand(mtd);
1145 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
Stefan Agnerd0778b32018-06-22 17:19:49 +02001146 struct bch_geometry *geo = &nand_info->bch_geometry;
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001147 struct mxs_bch_regs *bch_regs = nand_info->bch_regs;
Marek Vasut913a7252011-11-08 23:18:16 +00001148 uint32_t tmp;
Stefan Agneread66eb2018-06-22 18:06:18 +02001149 int ret;
Stefan Agner4d42ac12018-06-22 17:19:51 +02001150
Igor Opaniukc55401372019-11-03 16:49:43 +01001151 nand_info->en_randomizer = 0;
1152 nand_info->oobsize = mtd->oobsize;
1153 nand_info->writesize = mtd->writesize;
1154
Stefan Agneread66eb2018-06-22 18:06:18 +02001155 ret = mxs_nand_set_geometry(mtd, geo);
Stefan Agner4d42ac12018-06-22 17:19:51 +02001156 if (ret)
1157 return ret;
1158
Marek Vasut913a7252011-11-08 23:18:16 +00001159 /* Configure BCH and set NFC geometry */
Otavio Salvadorcbf0bf22012-08-13 09:53:12 +00001160 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001161
1162 /* Configure layout 0 */
Stefan Agnerd0778b32018-06-22 17:19:49 +02001163 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001164 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001165 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001166 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001167 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001168 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001169 writel(tmp, &bch_regs->hw_bch_flash0layout0);
Igor Opaniukc55401372019-11-03 16:49:43 +01001170 nand_info->bch_flash0layout0 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001171
1172 tmp = (mtd->writesize + mtd->oobsize)
1173 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001174 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET;
Ye Li94547442020-05-04 22:08:50 +08001175 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT;
Stefan Agnerd0778b32018-06-22 17:19:49 +02001176 tmp |= (geo->gf_len == 14 ? 1 : 0) <<
Peng Fanc94f09d2015-07-21 16:15:19 +08001177 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Marek Vasut913a7252011-11-08 23:18:16 +00001178 writel(tmp, &bch_regs->hw_bch_flash0layout1);
Igor Opaniukc55401372019-11-03 16:49:43 +01001179 nand_info->bch_flash0layout1 = tmp;
Marek Vasut913a7252011-11-08 23:18:16 +00001180
Peng Fan9e813732020-05-04 22:08:53 +08001181 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */
1182 if (is_mx6dqp() || is_mx7() ||
Peng Fan128abf42020-05-04 22:09:00 +08001183 is_mx6ul() || is_imx8() || is_imx8m())
Peng Fan9e813732020-05-04 22:08:53 +08001184 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength),
1185 &bch_regs->hw_bch_mode);
1186
Marek Vasut913a7252011-11-08 23:18:16 +00001187 /* Set *all* chip selects to use layout 0 */
1188 writel(0, &bch_regs->hw_bch_layoutselect);
1189
1190 /* Enable BCH complete interrupt */
1191 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set);
1192
1193 /* Hook some operations at the MTD level. */
Sergey Lapin3a38a552013-01-14 03:46:50 +00001194 if (mtd->_read_oob != mxs_nand_hook_read_oob) {
1195 nand_info->hooked_read_oob = mtd->_read_oob;
1196 mtd->_read_oob = mxs_nand_hook_read_oob;
Marek Vasut913a7252011-11-08 23:18:16 +00001197 }
1198
Sergey Lapin3a38a552013-01-14 03:46:50 +00001199 if (mtd->_write_oob != mxs_nand_hook_write_oob) {
1200 nand_info->hooked_write_oob = mtd->_write_oob;
1201 mtd->_write_oob = mxs_nand_hook_write_oob;
Marek Vasut913a7252011-11-08 23:18:16 +00001202 }
1203
Sergey Lapin3a38a552013-01-14 03:46:50 +00001204 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) {
1205 nand_info->hooked_block_markbad = mtd->_block_markbad;
1206 mtd->_block_markbad = mxs_nand_hook_block_markbad;
Marek Vasut913a7252011-11-08 23:18:16 +00001207 }
1208
Stefan Agner5883e552018-06-22 17:19:47 +02001209 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001210}
1211
1212/*
1213 * Allocate DMA buffers
1214 */
1215int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info)
1216{
1217 uint8_t *buf;
1218 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
1219
Marek Vasut1b120e82012-03-15 18:33:19 +00001220 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT);
1221
Marek Vasut913a7252011-11-08 23:18:16 +00001222 /* DMA buffers */
Marek Vasut1b120e82012-03-15 18:33:19 +00001223 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001224 if (!buf) {
1225 printf("MXS NAND: Error allocating DMA buffers\n");
1226 return -ENOMEM;
1227 }
1228
Marek Vasut1b120e82012-03-15 18:33:19 +00001229 memset(buf, 0, nand_info->data_buf_size);
Marek Vasut913a7252011-11-08 23:18:16 +00001230
1231 nand_info->data_buf = buf;
1232 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE;
Marek Vasut913a7252011-11-08 23:18:16 +00001233 /* Command buffers */
1234 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT,
1235 MXS_NAND_COMMAND_BUFFER_SIZE);
1236 if (!nand_info->cmd_buf) {
1237 free(buf);
1238 printf("MXS NAND: Error allocating command buffers\n");
1239 return -ENOMEM;
1240 }
1241 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE);
1242 nand_info->cmd_queue_len = 0;
1243
1244 return 0;
1245}
1246
1247/*
1248 * Initializes the NFC hardware.
1249 */
Adam Ford6edb91a2019-01-12 06:25:48 -06001250static int mxs_nand_init_dma(struct mxs_nand_info *info)
Marek Vasut913a7252011-11-08 23:18:16 +00001251{
Peng Fane37d5a92016-01-27 10:38:02 +08001252 int i = 0, j, ret = 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001253
1254 info->desc = malloc(sizeof(struct mxs_dma_desc *) *
1255 MXS_NAND_DMA_DESCRIPTOR_COUNT);
Peng Fane37d5a92016-01-27 10:38:02 +08001256 if (!info->desc) {
1257 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001258 goto err1;
Peng Fane37d5a92016-01-27 10:38:02 +08001259 }
Marek Vasut913a7252011-11-08 23:18:16 +00001260
1261 /* Allocate the DMA descriptors. */
1262 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) {
1263 info->desc[i] = mxs_dma_desc_alloc();
Peng Fane37d5a92016-01-27 10:38:02 +08001264 if (!info->desc[i]) {
1265 ret = -ENOMEM;
Marek Vasut913a7252011-11-08 23:18:16 +00001266 goto err2;
Peng Fane37d5a92016-01-27 10:38:02 +08001267 }
Marek Vasut913a7252011-11-08 23:18:16 +00001268 }
1269
1270 /* Init the DMA controller. */
Fabio Estevam17156222017-06-29 09:33:44 -03001271 mxs_dma_init();
Marek Vasut93541b42012-04-08 17:34:46 +00001272 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0;
1273 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) {
Peng Fane37d5a92016-01-27 10:38:02 +08001274 ret = mxs_dma_init_channel(j);
1275 if (ret)
Marek Vasut93541b42012-04-08 17:34:46 +00001276 goto err3;
1277 }
Marek Vasut913a7252011-11-08 23:18:16 +00001278
1279 /* Reset the GPMI block. */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001280 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg);
1281 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg);
Marek Vasut913a7252011-11-08 23:18:16 +00001282
1283 /*
1284 * Choose NAND mode, set IRQ polarity, disable write protection and
1285 * select BCH ECC.
1286 */
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001287 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1,
Marek Vasut913a7252011-11-08 23:18:16 +00001288 GPMI_CTRL1_GPMI_MODE,
1289 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET |
1290 GPMI_CTRL1_BCH_MODE);
1291
1292 return 0;
1293
Marek Vasut93541b42012-04-08 17:34:46 +00001294err3:
Peng Fane37d5a92016-01-27 10:38:02 +08001295 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--)
Marek Vasut93541b42012-04-08 17:34:46 +00001296 mxs_dma_release(j);
Marek Vasut913a7252011-11-08 23:18:16 +00001297err2:
Marek Vasut913a7252011-11-08 23:18:16 +00001298 for (--i; i >= 0; i--)
1299 mxs_dma_desc_free(info->desc[i]);
Peng Fane37d5a92016-01-27 10:38:02 +08001300 free(info->desc);
1301err1:
1302 if (ret == -ENOMEM)
1303 printf("MXS NAND: Unable to allocate DMA descriptors\n");
1304 return ret;
Marek Vasut913a7252011-11-08 23:18:16 +00001305}
1306
Stefan Agner7152f342018-06-22 17:19:46 +02001307int mxs_nand_init_spl(struct nand_chip *nand)
1308{
1309 struct mxs_nand_info *nand_info;
1310 int err;
1311
1312 nand_info = malloc(sizeof(struct mxs_nand_info));
1313 if (!nand_info) {
1314 printf("MXS NAND: Failed to allocate private data\n");
1315 return -ENOMEM;
1316 }
1317 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1318
Stefan Agnerdc8af6d2018-06-22 18:06:12 +02001319 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1320 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
Adam Ford10210732019-01-02 20:36:52 -06001321
Peng Fan128abf42020-05-04 22:09:00 +08001322 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m())
Adam Ford10210732019-01-02 20:36:52 -06001323 nand_info->max_ecc_strength_supported = 62;
1324 else
1325 nand_info->max_ecc_strength_supported = 40;
1326
Stefan Agner7152f342018-06-22 17:19:46 +02001327 err = mxs_nand_alloc_buffers(nand_info);
1328 if (err)
1329 return err;
1330
Stefan Agner00e65162018-06-22 18:06:13 +02001331 err = mxs_nand_init_dma(nand_info);
Stefan Agner7152f342018-06-22 17:19:46 +02001332 if (err)
1333 return err;
1334
1335 nand_set_controller_data(nand, nand_info);
1336
1337 nand->options |= NAND_NO_SUBPAGE_WRITE;
1338
1339 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1340 nand->dev_ready = mxs_nand_device_ready;
1341 nand->select_chip = mxs_nand_select_chip;
Stefan Agner7152f342018-06-22 17:19:46 +02001342
1343 nand->read_byte = mxs_nand_read_byte;
1344 nand->read_buf = mxs_nand_read_buf;
1345
1346 nand->ecc.read_page = mxs_nand_ecc_read_page;
1347
1348 nand->ecc.mode = NAND_ECC_HW;
Stefan Agner7152f342018-06-22 17:19:46 +02001349
1350 return 0;
1351}
1352
Stefan Agner19f90512018-06-22 18:06:16 +02001353int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info)
Marek Vasut913a7252011-11-08 23:18:16 +00001354{
Stefan Agner5883e552018-06-22 17:19:47 +02001355 struct mtd_info *mtd;
Stefan Agner5883e552018-06-22 17:19:47 +02001356 struct nand_chip *nand;
Marek Vasut913a7252011-11-08 23:18:16 +00001357 int err;
1358
Stefan Agner5883e552018-06-22 17:19:47 +02001359 nand = &nand_info->chip;
1360 mtd = nand_to_mtd(nand);
Marek Vasut913a7252011-11-08 23:18:16 +00001361 err = mxs_nand_alloc_buffers(nand_info);
1362 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001363 return err;
Marek Vasut913a7252011-11-08 23:18:16 +00001364
Stefan Agner00e65162018-06-22 18:06:13 +02001365 err = mxs_nand_init_dma(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001366 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001367 goto err_free_buffers;
Marek Vasut913a7252011-11-08 23:18:16 +00001368
1369 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout));
1370
Stefan Agner95f376f2018-06-22 17:19:48 +02001371#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT
1372 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
1373#endif
1374
Scott Wood17fed142016-05-30 13:57:56 -05001375 nand_set_controller_data(nand, nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001376 nand->options |= NAND_NO_SUBPAGE_WRITE;
1377
Stefan Agner150ddbc2018-06-22 18:06:17 +02001378 if (nand_info->dev)
1379 nand->flash_node = dev_of_offset(nand_info->dev);
1380
Marek Vasut913a7252011-11-08 23:18:16 +00001381 nand->cmd_ctrl = mxs_nand_cmd_ctrl;
1382
1383 nand->dev_ready = mxs_nand_device_ready;
1384 nand->select_chip = mxs_nand_select_chip;
1385 nand->block_bad = mxs_nand_block_bad;
Marek Vasut913a7252011-11-08 23:18:16 +00001386
1387 nand->read_byte = mxs_nand_read_byte;
1388
1389 nand->read_buf = mxs_nand_read_buf;
1390 nand->write_buf = mxs_nand_write_buf;
1391
Stefan Agner5883e552018-06-22 17:19:47 +02001392 /* first scan to find the device and get the page size */
1393 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL))
Stefan Agner404b1102018-06-22 18:06:14 +02001394 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001395
1396 if (mxs_nand_setup_ecc(mtd))
Stefan Agner404b1102018-06-22 18:06:14 +02001397 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001398
Marek Vasut913a7252011-11-08 23:18:16 +00001399 nand->ecc.read_page = mxs_nand_ecc_read_page;
1400 nand->ecc.write_page = mxs_nand_ecc_write_page;
1401 nand->ecc.read_oob = mxs_nand_ecc_read_oob;
1402 nand->ecc.write_oob = mxs_nand_ecc_write_oob;
1403
1404 nand->ecc.layout = &fake_ecc_layout;
1405 nand->ecc.mode = NAND_ECC_HW;
Ye Li94547442020-05-04 22:08:50 +08001406 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size;
Stefan Agner72d627d2018-06-22 17:19:50 +02001407 nand->ecc.strength = nand_info->bch_geometry.ecc_strength;
Marek Vasut913a7252011-11-08 23:18:16 +00001408
Stefan Agner5883e552018-06-22 17:19:47 +02001409 /* second phase scan */
1410 err = nand_scan_tail(mtd);
1411 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001412 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001413
1414 err = nand_register(0, mtd);
1415 if (err)
Stefan Agner404b1102018-06-22 18:06:14 +02001416 goto err_free_buffers;
Stefan Agner5883e552018-06-22 17:19:47 +02001417
Stefan Agner404b1102018-06-22 18:06:14 +02001418 return 0;
Marek Vasut913a7252011-11-08 23:18:16 +00001419
Stefan Agner404b1102018-06-22 18:06:14 +02001420err_free_buffers:
Marek Vasut913a7252011-11-08 23:18:16 +00001421 free(nand_info->data_buf);
1422 free(nand_info->cmd_buf);
Stefan Agner404b1102018-06-22 18:06:14 +02001423
1424 return err;
1425}
1426
Stefan Agner150ddbc2018-06-22 18:06:17 +02001427#ifndef CONFIG_NAND_MXS_DT
Stefan Agner404b1102018-06-22 18:06:14 +02001428void board_nand_init(void)
1429{
1430 struct mxs_nand_info *nand_info;
1431
1432 nand_info = malloc(sizeof(struct mxs_nand_info));
1433 if (!nand_info) {
1434 printf("MXS NAND: Failed to allocate private data\n");
1435 return;
1436 }
1437 memset(nand_info, 0, sizeof(struct mxs_nand_info));
1438
1439 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE;
1440 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1441
Stefan Agner4dc98db2018-06-22 18:06:15 +02001442 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */
1443 if (is_mx6sx() || is_mx7())
1444 nand_info->max_ecc_strength_supported = 62;
1445 else
1446 nand_info->max_ecc_strength_supported = 40;
1447
1448#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC
1449 nand_info->use_minimum_ecc = true;
1450#endif
1451
Stefan Agner19f90512018-06-22 18:06:16 +02001452 if (mxs_nand_init_ctrl(nand_info) < 0)
Stefan Agner404b1102018-06-22 18:06:14 +02001453 goto err;
1454
Stefan Agner5883e552018-06-22 17:19:47 +02001455 return;
Stefan Agner404b1102018-06-22 18:06:14 +02001456
1457err:
1458 free(nand_info);
Marek Vasut913a7252011-11-08 23:18:16 +00001459}
Stefan Agner150ddbc2018-06-22 18:06:17 +02001460#endif
Igor Opaniukc55401372019-11-03 16:49:43 +01001461
1462/*
1463 * Read NAND layout for FCB block generation.
1464 */
1465void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l)
1466{
1467 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1468 u32 tmp;
1469
1470 tmp = readl(&bch_regs->hw_bch_flash0layout0);
1471 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >>
1472 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1473 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >>
1474 BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1475
1476 tmp = readl(&bch_regs->hw_bch_flash0layout1);
1477 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >>
1478 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET);
1479 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >>
1480 BCH_FLASHLAYOUT0_ECC0_OFFSET;
1481 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >>
1482 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET);
1483 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >>
1484 BCH_FLASHLAYOUT1_ECCN_OFFSET;
Han Xu33543b52020-05-04 22:08:58 +08001485 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >>
1486 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
Igor Opaniukc55401372019-11-03 16:49:43 +01001487}
1488
1489/*
1490 * Set BCH to specific layout used by ROM bootloader to read FCB.
1491 */
Han Xuafed2a12020-05-06 20:59:19 +08001492void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd)
Igor Opaniukc55401372019-11-03 16:49:43 +01001493{
1494 u32 tmp;
1495 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1496 struct nand_chip *nand = mtd_to_nand(mtd);
1497 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1498
1499 nand_info->en_randomizer = 1;
1500
1501 mtd->writesize = 1024;
1502 mtd->oobsize = 1862 - 1024;
1503
1504 /* 8 ecc_chunks_*/
1505 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1506 /* 32 bytes for metadata */
1507 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1508 /* using ECC62 level to be performed */
1509 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1510 /* 0x20 * 4 bytes of the data0 block */
1511 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1512 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1513 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1514
1515 /* 1024 for data + 838 for OOB */
1516 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1517 /* using ECC62 level to be performed */
1518 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1519 /* 0x20 * 4 bytes of the data0 block */
1520 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1521 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1522 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1523}
1524
1525/*
Han Xuafed2a12020-05-06 20:59:19 +08001526 * Set BCH to specific layout used by ROM bootloader to read FCB.
1527 */
1528void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd)
1529{
1530 u32 tmp;
1531 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1532 struct nand_chip *nand = mtd_to_nand(mtd);
1533 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1534
1535 /* no randomizer in this setting*/
1536 nand_info->en_randomizer = 0;
1537
1538 mtd->writesize = 1024;
1539 mtd->oobsize = 1576 - 1024;
1540
1541 /* 8 ecc_chunks_*/
1542 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET;
1543 /* 32 bytes for metadata */
1544 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET;
1545 /* using ECC40 level to be performed */
1546 tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET;
1547 /* 0x20 * 4 bytes of the data0 block */
1548 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET;
1549 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET;
1550 writel(tmp, &bch_regs->hw_bch_flash0layout0);
1551
1552 /* 1024 for data + 552 for OOB */
1553 tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET;
1554 /* using ECC40 level to be performed */
1555 tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET;
1556 /* 0x20 * 4 bytes of the data0 block */
1557 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET;
1558 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET;
1559 writel(tmp, &bch_regs->hw_bch_flash0layout1);
1560}
1561
1562/*
Igor Opaniukc55401372019-11-03 16:49:43 +01001563 * Restore BCH to normal settings.
1564 */
1565void mxs_nand_mode_normal(struct mtd_info *mtd)
1566{
1567 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE;
1568 struct nand_chip *nand = mtd_to_nand(mtd);
1569 struct mxs_nand_info *nand_info = nand_get_controller_data(nand);
1570
1571 nand_info->en_randomizer = 0;
1572
1573 mtd->writesize = nand_info->writesize;
1574 mtd->oobsize = nand_info->oobsize;
1575
1576 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0);
1577 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1);
1578}
1579
1580uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd)
1581{
1582 struct nand_chip *chip = mtd_to_nand(mtd);
1583 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1584 struct bch_geometry *geo = &nand_info->bch_geometry;
1585
1586 return geo->block_mark_byte_offset;
1587}
1588
1589uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd)
1590{
1591 struct nand_chip *chip = mtd_to_nand(mtd);
1592 struct mxs_nand_info *nand_info = nand_get_controller_data(chip);
1593 struct bch_geometry *geo = &nand_info->bch_geometry;
1594
1595 return geo->block_mark_bit_offset;
1596}