Tom Rini | 10e4779 | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0+ |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Freescale i.MX28 NAND flash driver |
| 4 | * |
| 5 | * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> |
| 6 | * on behalf of DENX Software Engineering GmbH |
| 7 | * |
| 8 | * Based on code from LTIB: |
| 9 | * Freescale GPMI NFC NAND Flash Driver |
| 10 | * |
| 11 | * Copyright (C) 2010 Freescale Semiconductor, Inc. |
| 12 | * Copyright (C) 2008 Embedded Alley Solutions, Inc. |
Peng Fan | 9e81373 | 2020-05-04 22:08:53 +0800 | [diff] [blame] | 13 | * Copyright 2017-2019 NXP |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 14 | */ |
| 15 | |
Tom Warren | c88d30f | 2012-09-10 08:47:51 -0700 | [diff] [blame] | 16 | #include <common.h> |
Michael Trimarchi | fd6e13e | 2022-08-30 16:48:47 +0200 | [diff] [blame] | 17 | #include <clk.h> |
Simon Glass | 6333448 | 2019-11-14 12:57:39 -0700 | [diff] [blame] | 18 | #include <cpu_func.h> |
Stefan Agner | 19f9051 | 2018-06-22 18:06:16 +0200 | [diff] [blame] | 19 | #include <dm.h> |
Sean Anderson | 0a74944 | 2020-10-04 21:39:45 -0400 | [diff] [blame] | 20 | #include <dm/device_compat.h> |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 21 | #include <malloc.h> |
Sean Anderson | 0a74944 | 2020-10-04 21:39:45 -0400 | [diff] [blame] | 22 | #include <mxs_nand.h> |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 23 | #include <asm/arch/clock.h> |
| 24 | #include <asm/arch/imx-regs.h> |
Sean Anderson | 0a74944 | 2020-10-04 21:39:45 -0400 | [diff] [blame] | 25 | #include <asm/arch/sys_proto.h> |
| 26 | #include <asm/cache.h> |
| 27 | #include <asm/io.h> |
Stefano Babic | 33731bc | 2017-06-29 10:16:06 +0200 | [diff] [blame] | 28 | #include <asm/mach-imx/regs-bch.h> |
| 29 | #include <asm/mach-imx/regs-gpmi.h> |
Michael Trimarchi | fd6e13e | 2022-08-30 16:48:47 +0200 | [diff] [blame] | 30 | #include <linux/delay.h> |
Sean Anderson | 0a74944 | 2020-10-04 21:39:45 -0400 | [diff] [blame] | 31 | #include <linux/errno.h> |
| 32 | #include <linux/mtd/rawnand.h> |
| 33 | #include <linux/sizes.h> |
| 34 | #include <linux/types.h> |
Michael Trimarchi | fd6e13e | 2022-08-30 16:48:47 +0200 | [diff] [blame] | 35 | #include <linux/math64.h> |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 36 | |
| 37 | #define MXS_NAND_DMA_DESCRIPTOR_COUNT 4 |
| 38 | |
Peng Fan | 128abf4 | 2020-05-04 22:09:00 +0800 | [diff] [blame] | 39 | #if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \ |
| 40 | defined(CONFIG_IMX8M) |
Stefan Roese | 8338d1d | 2013-04-15 21:14:12 +0000 | [diff] [blame] | 41 | #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2 |
| 42 | #else |
| 43 | #define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0 |
| 44 | #endif |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 45 | #define MXS_NAND_METADATA_SIZE 10 |
Jörg Krause | 1d87026 | 2015-04-15 09:27:22 +0200 | [diff] [blame] | 46 | #define MXS_NAND_BITS_PER_ECC_LEVEL 13 |
Stefan Agner | 54bf808 | 2016-08-01 23:55:18 -0700 | [diff] [blame] | 47 | |
| 48 | #if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32 |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 49 | #define MXS_NAND_COMMAND_BUFFER_SIZE 32 |
Stefan Agner | 54bf808 | 2016-08-01 23:55:18 -0700 | [diff] [blame] | 50 | #else |
| 51 | #define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE |
| 52 | #endif |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 53 | |
| 54 | #define MXS_NAND_BCH_TIMEOUT 10000 |
Michael Trimarchi | fd6e13e | 2022-08-30 16:48:47 +0200 | [diff] [blame] | 55 | #define USEC_PER_SEC 1000000 |
| 56 | #define NSEC_PER_SEC 1000000000L |
| 57 | |
| 58 | #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 59 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 60 | struct nand_ecclayout fake_ecc_layout; |
| 61 | |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 62 | /* |
| 63 | * Cache management functions |
| 64 | */ |
Trevor Woerner | 43ec7e0 | 2019-05-03 09:41:00 -0400 | [diff] [blame] | 65 | #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 66 | static void mxs_nand_flush_data_buf(struct mxs_nand_info *info) |
| 67 | { |
Peng Fan | 128abf4 | 2020-05-04 22:09:00 +0800 | [diff] [blame] | 68 | uint32_t addr = (uintptr_t)info->data_buf; |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 69 | |
| 70 | flush_dcache_range(addr, addr + info->data_buf_size); |
| 71 | } |
| 72 | |
| 73 | static void mxs_nand_inval_data_buf(struct mxs_nand_info *info) |
| 74 | { |
Peng Fan | 128abf4 | 2020-05-04 22:09:00 +0800 | [diff] [blame] | 75 | uint32_t addr = (uintptr_t)info->data_buf; |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 76 | |
| 77 | invalidate_dcache_range(addr, addr + info->data_buf_size); |
| 78 | } |
| 79 | |
| 80 | static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) |
| 81 | { |
Peng Fan | 128abf4 | 2020-05-04 22:09:00 +0800 | [diff] [blame] | 82 | uint32_t addr = (uintptr_t)info->cmd_buf; |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 83 | |
| 84 | flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE); |
| 85 | } |
| 86 | #else |
| 87 | static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {} |
| 88 | static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {} |
| 89 | static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {} |
| 90 | #endif |
| 91 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 92 | static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info) |
| 93 | { |
| 94 | struct mxs_dma_desc *desc; |
| 95 | |
| 96 | if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) { |
| 97 | printf("MXS NAND: Too many DMA descriptors requested\n"); |
| 98 | return NULL; |
| 99 | } |
| 100 | |
| 101 | desc = info->desc[info->desc_index]; |
| 102 | info->desc_index++; |
| 103 | |
| 104 | return desc; |
| 105 | } |
| 106 | |
| 107 | static void mxs_nand_return_dma_descs(struct mxs_nand_info *info) |
| 108 | { |
| 109 | int i; |
| 110 | struct mxs_dma_desc *desc; |
| 111 | |
| 112 | for (i = 0; i < info->desc_index; i++) { |
| 113 | desc = info->desc[i]; |
| 114 | memset(desc, 0, sizeof(struct mxs_dma_desc)); |
| 115 | desc->address = (dma_addr_t)desc; |
| 116 | } |
| 117 | |
| 118 | info->desc_index = 0; |
| 119 | } |
| 120 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 121 | static uint32_t mxs_nand_aux_status_offset(void) |
| 122 | { |
| 123 | return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3; |
| 124 | } |
| 125 | |
Sean Anderson | 0a74944 | 2020-10-04 21:39:45 -0400 | [diff] [blame] | 126 | static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, |
| 127 | struct mtd_info *mtd, |
| 128 | unsigned int *chunk_num) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 129 | { |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 130 | unsigned int i, j; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 131 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 132 | if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) { |
Sean Anderson | 0a74944 | 2020-10-04 21:39:45 -0400 | [diff] [blame] | 133 | dev_err(mtd->dev, "The size of chunk0 must equal to chunkn\n"); |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 134 | return false; |
| 135 | } |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 136 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 137 | i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) / |
| 138 | (geo->gf_len * geo->ecc_strength + |
| 139 | geo->ecc_chunkn_size * 8); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 140 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 141 | j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) - |
| 142 | (geo->gf_len * geo->ecc_strength + |
| 143 | geo->ecc_chunkn_size * 8) * i; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 144 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 145 | if (j < geo->ecc_chunkn_size * 8) { |
| 146 | *chunk_num = i + 1; |
Sean Anderson | 0a74944 | 2020-10-04 21:39:45 -0400 | [diff] [blame] | 147 | dev_dbg(mtd->dev, "Set ecc to %d and bbm in chunk %d\n", |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 148 | geo->ecc_strength, *chunk_num); |
| 149 | return true; |
| 150 | } |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 151 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 152 | return false; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 153 | } |
| 154 | |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 155 | static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo, |
Stefan Agner | ead66eb | 2018-06-22 18:06:18 +0200 | [diff] [blame] | 156 | struct mtd_info *mtd, |
| 157 | unsigned int ecc_strength, |
| 158 | unsigned int ecc_step) |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 159 | { |
| 160 | struct nand_chip *chip = mtd_to_nand(mtd); |
Stefan Agner | 4dc98db | 2018-06-22 18:06:15 +0200 | [diff] [blame] | 161 | struct mxs_nand_info *nand_info = nand_get_controller_data(chip); |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 162 | unsigned int block_mark_bit_offset; |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 163 | |
Stefan Agner | ead66eb | 2018-06-22 18:06:18 +0200 | [diff] [blame] | 164 | switch (ecc_step) { |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 165 | case SZ_512: |
| 166 | geo->gf_len = 13; |
| 167 | break; |
| 168 | case SZ_1K: |
| 169 | geo->gf_len = 14; |
| 170 | break; |
| 171 | default: |
| 172 | return -EINVAL; |
| 173 | } |
| 174 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 175 | geo->ecc_chunk0_size = ecc_step; |
| 176 | geo->ecc_chunkn_size = ecc_step; |
Stefan Agner | ead66eb | 2018-06-22 18:06:18 +0200 | [diff] [blame] | 177 | geo->ecc_strength = round_up(ecc_strength, 2); |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 178 | |
| 179 | /* Keep the C >= O */ |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 180 | if (geo->ecc_chunkn_size < mtd->oobsize) |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 181 | return -EINVAL; |
| 182 | |
Stefan Agner | 4dc98db | 2018-06-22 18:06:15 +0200 | [diff] [blame] | 183 | if (geo->ecc_strength > nand_info->max_ecc_strength_supported) |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 184 | return -EINVAL; |
| 185 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 186 | geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; |
| 187 | |
| 188 | /* For bit swap. */ |
| 189 | block_mark_bit_offset = mtd->writesize * 8 - |
| 190 | (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) |
| 191 | + MXS_NAND_METADATA_SIZE * 8); |
| 192 | |
| 193 | geo->block_mark_byte_offset = block_mark_bit_offset / 8; |
| 194 | geo->block_mark_bit_offset = block_mark_bit_offset % 8; |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 195 | |
| 196 | return 0; |
| 197 | } |
| 198 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 199 | static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo, |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 200 | struct mtd_info *mtd) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 201 | { |
Stefan Agner | 4dc98db | 2018-06-22 18:06:15 +0200 | [diff] [blame] | 202 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 203 | struct mxs_nand_info *nand_info = nand_get_controller_data(chip); |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 204 | unsigned int block_mark_bit_offset; |
Han Xu | 2ee499e | 2022-03-25 08:36:38 -0500 | [diff] [blame] | 205 | int corr, ds_corr; |
Stefan Agner | 4dc98db | 2018-06-22 18:06:15 +0200 | [diff] [blame] | 206 | |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 207 | /* The default for the length of Galois Field. */ |
| 208 | geo->gf_len = 13; |
| 209 | |
| 210 | /* The default for chunk size. */ |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 211 | geo->ecc_chunk0_size = 512; |
| 212 | geo->ecc_chunkn_size = 512; |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 213 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 214 | if (geo->ecc_chunkn_size < mtd->oobsize) { |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 215 | geo->gf_len = 14; |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 216 | geo->ecc_chunk0_size *= 2; |
| 217 | geo->ecc_chunkn_size *= 2; |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 218 | } |
| 219 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 220 | geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 221 | |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 222 | /* |
| 223 | * Determine the ECC layout with the formula: |
| 224 | * ECC bits per chunk = (total page spare data bits) / |
| 225 | * (bits per ECC level) / (chunks per page) |
| 226 | * where: |
| 227 | * total page spare data bits = |
| 228 | * (page oob size - meta data size) * (bits per byte) |
| 229 | */ |
| 230 | geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) |
| 231 | / (geo->gf_len * geo->ecc_chunk_count); |
| 232 | |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 233 | geo->ecc_strength = min(round_down(geo->ecc_strength, 2), |
Stefan Agner | 4dc98db | 2018-06-22 18:06:15 +0200 | [diff] [blame] | 234 | nand_info->max_ecc_strength_supported); |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 235 | |
Han Xu | 2ee499e | 2022-03-25 08:36:38 -0500 | [diff] [blame] | 236 | /* check ecc strength, same as nand_ecc_is_strong_enough() did*/ |
| 237 | if (chip->ecc_step_ds) { |
| 238 | corr = mtd->writesize * geo->ecc_strength / |
| 239 | geo->ecc_chunkn_size; |
| 240 | ds_corr = mtd->writesize * chip->ecc_strength_ds / |
| 241 | chip->ecc_step_ds; |
| 242 | if (corr < ds_corr || |
| 243 | geo->ecc_strength < chip->ecc_strength_ds) |
| 244 | return -EINVAL; |
| 245 | } |
| 246 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 247 | block_mark_bit_offset = mtd->writesize * 8 - |
| 248 | (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) |
| 249 | + MXS_NAND_METADATA_SIZE * 8); |
| 250 | |
| 251 | geo->block_mark_byte_offset = block_mark_bit_offset / 8; |
| 252 | geo->block_mark_bit_offset = block_mark_bit_offset % 8; |
| 253 | |
| 254 | return 0; |
| 255 | } |
| 256 | |
| 257 | static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo, |
| 258 | struct mtd_info *mtd) |
| 259 | { |
| 260 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 261 | struct mxs_nand_info *nand_info = nand_get_controller_data(chip); |
| 262 | unsigned int block_mark_bit_offset; |
| 263 | unsigned int max_ecc; |
| 264 | unsigned int bbm_chunk; |
| 265 | unsigned int i; |
| 266 | |
| 267 | /* sanity check for the minimum ecc nand required */ |
| 268 | if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) |
| 269 | return -EINVAL; |
| 270 | geo->ecc_strength = chip->ecc_strength_ds; |
| 271 | |
| 272 | /* calculate the maximum ecc platform can support*/ |
| 273 | geo->gf_len = 14; |
| 274 | geo->ecc_chunk0_size = 1024; |
| 275 | geo->ecc_chunkn_size = 1024; |
| 276 | geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; |
| 277 | max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) |
| 278 | / (geo->gf_len * geo->ecc_chunk_count); |
| 279 | max_ecc = min(round_down(max_ecc, 2), |
| 280 | nand_info->max_ecc_strength_supported); |
| 281 | |
| 282 | |
| 283 | /* search a supported ecc strength that makes bbm */ |
| 284 | /* located in data chunk */ |
| 285 | geo->ecc_strength = chip->ecc_strength_ds; |
| 286 | while (!(geo->ecc_strength > max_ecc)) { |
| 287 | if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk)) |
| 288 | break; |
| 289 | geo->ecc_strength += 2; |
| 290 | } |
| 291 | |
| 292 | /* if none of them works, keep using the minimum ecc */ |
| 293 | /* nand required but changing ecc page layout */ |
| 294 | if (geo->ecc_strength > max_ecc) { |
| 295 | geo->ecc_strength = chip->ecc_strength_ds; |
| 296 | /* add extra ecc for meta data */ |
| 297 | geo->ecc_chunk0_size = 0; |
| 298 | geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1; |
| 299 | geo->ecc_for_meta = 1; |
| 300 | /* check if oob can afford this extra ecc chunk */ |
| 301 | if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 + |
| 302 | geo->gf_len * geo->ecc_strength |
| 303 | * geo->ecc_chunk_count) { |
| 304 | printf("unsupported NAND chip with new layout\n"); |
| 305 | return -EINVAL; |
| 306 | } |
| 307 | |
| 308 | /* calculate in which chunk bbm located */ |
| 309 | bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 - |
| 310 | geo->gf_len * geo->ecc_strength) / |
| 311 | (geo->gf_len * geo->ecc_strength + |
| 312 | geo->ecc_chunkn_size * 8) + 1; |
| 313 | } |
| 314 | |
| 315 | /* calculate the number of ecc chunk behind the bbm */ |
| 316 | i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1; |
| 317 | |
| 318 | block_mark_bit_offset = mtd->writesize * 8 - |
| 319 | (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i) |
| 320 | + MXS_NAND_METADATA_SIZE * 8); |
| 321 | |
| 322 | geo->block_mark_byte_offset = block_mark_bit_offset / 8; |
| 323 | geo->block_mark_bit_offset = block_mark_bit_offset % 8; |
| 324 | |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 325 | return 0; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 326 | } |
| 327 | |
| 328 | /* |
| 329 | * Wait for BCH complete IRQ and clear the IRQ |
| 330 | */ |
Stefan Agner | dc8af6d | 2018-06-22 18:06:12 +0200 | [diff] [blame] | 331 | static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 332 | { |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 333 | int timeout = MXS_NAND_BCH_TIMEOUT; |
| 334 | int ret; |
| 335 | |
Stefan Agner | dc8af6d | 2018-06-22 18:06:12 +0200 | [diff] [blame] | 336 | ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg, |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 337 | BCH_CTRL_COMPLETE_IRQ, timeout); |
| 338 | |
Stefan Agner | dc8af6d | 2018-06-22 18:06:12 +0200 | [diff] [blame] | 339 | writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 340 | |
| 341 | return ret; |
| 342 | } |
| 343 | |
| 344 | /* |
| 345 | * This is the function that we install in the cmd_ctrl function pointer of the |
| 346 | * owning struct nand_chip. The only functions in the reference implementation |
| 347 | * that use these functions pointers are cmdfunc and select_chip. |
| 348 | * |
| 349 | * In this driver, we implement our own select_chip, so this function will only |
| 350 | * be called by the reference implementation's cmdfunc. For this reason, we can |
| 351 | * ignore the chip enable bit and concentrate only on sending bytes to the NAND |
| 352 | * Flash. |
| 353 | */ |
| 354 | static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) |
| 355 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 356 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 357 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 358 | struct mxs_dma_desc *d; |
| 359 | uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; |
| 360 | int ret; |
| 361 | |
| 362 | /* |
| 363 | * If this condition is true, something is _VERY_ wrong in MTD |
| 364 | * subsystem! |
| 365 | */ |
| 366 | if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) { |
| 367 | printf("MXS NAND: Command queue too long\n"); |
| 368 | return; |
| 369 | } |
| 370 | |
| 371 | /* |
| 372 | * Every operation begins with a command byte and a series of zero or |
| 373 | * more address bytes. These are distinguished by either the Address |
| 374 | * Latch Enable (ALE) or Command Latch Enable (CLE) signals being |
| 375 | * asserted. When MTD is ready to execute the command, it will |
| 376 | * deasert both latch enables. |
| 377 | * |
| 378 | * Rather than run a separate DMA operation for every single byte, we |
| 379 | * queue them up and run a single DMA operation for the entire series |
| 380 | * of command and data bytes. |
| 381 | */ |
| 382 | if (ctrl & (NAND_ALE | NAND_CLE)) { |
| 383 | if (data != NAND_CMD_NONE) |
| 384 | nand_info->cmd_buf[nand_info->cmd_queue_len++] = data; |
| 385 | return; |
| 386 | } |
| 387 | |
| 388 | /* |
| 389 | * If control arrives here, MTD has deasserted both the ALE and CLE, |
| 390 | * which means it's ready to run an operation. Check if we have any |
| 391 | * bytes to send. |
| 392 | */ |
| 393 | if (nand_info->cmd_queue_len == 0) |
| 394 | return; |
| 395 | |
| 396 | /* Compile the DMA descriptor -- a descriptor that sends command. */ |
| 397 | d = mxs_nand_get_dma_desc(nand_info); |
| 398 | d->cmd.data = |
| 399 | MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | |
| 400 | MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM | |
| 401 | MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | |
| 402 | (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET); |
| 403 | |
| 404 | d->cmd.address = (dma_addr_t)nand_info->cmd_buf; |
| 405 | |
| 406 | d->cmd.pio_words[0] = |
| 407 | GPMI_CTRL0_COMMAND_MODE_WRITE | |
| 408 | GPMI_CTRL0_WORD_LENGTH | |
| 409 | (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | |
| 410 | GPMI_CTRL0_ADDRESS_NAND_CLE | |
| 411 | GPMI_CTRL0_ADDRESS_INCREMENT | |
| 412 | nand_info->cmd_queue_len; |
| 413 | |
| 414 | mxs_dma_desc_append(channel, d); |
| 415 | |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 416 | /* Flush caches */ |
| 417 | mxs_nand_flush_cmd_buf(nand_info); |
| 418 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 419 | /* Execute the DMA chain. */ |
| 420 | ret = mxs_dma_go(channel); |
| 421 | if (ret) |
| 422 | printf("MXS NAND: Error sending command\n"); |
| 423 | |
| 424 | mxs_nand_return_dma_descs(nand_info); |
| 425 | |
| 426 | /* Reset the command queue. */ |
| 427 | nand_info->cmd_queue_len = 0; |
| 428 | } |
| 429 | |
| 430 | /* |
| 431 | * Test if the NAND flash is ready. |
| 432 | */ |
| 433 | static int mxs_nand_device_ready(struct mtd_info *mtd) |
| 434 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 435 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 436 | struct mxs_nand_info *nand_info = nand_get_controller_data(chip); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 437 | uint32_t tmp; |
| 438 | |
Stefan Agner | dc8af6d | 2018-06-22 18:06:12 +0200 | [diff] [blame] | 439 | tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 440 | tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip); |
| 441 | |
| 442 | return tmp & 1; |
| 443 | } |
| 444 | |
| 445 | /* |
| 446 | * Select the NAND chip. |
| 447 | */ |
| 448 | static void mxs_nand_select_chip(struct mtd_info *mtd, int chip) |
| 449 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 450 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 451 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 452 | |
| 453 | nand_info->cur_chip = chip; |
| 454 | } |
| 455 | |
| 456 | /* |
| 457 | * Handle block mark swapping. |
| 458 | * |
| 459 | * Note that, when this function is called, it doesn't know whether it's |
| 460 | * swapping the block mark, or swapping it *back* -- but it doesn't matter |
| 461 | * because the the operation is the same. |
| 462 | */ |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 463 | static void mxs_nand_swap_block_mark(struct bch_geometry *geo, |
| 464 | uint8_t *data_buf, uint8_t *oob_buf) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 465 | { |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 466 | uint32_t bit_offset = geo->block_mark_bit_offset; |
| 467 | uint32_t buf_offset = geo->block_mark_byte_offset; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 468 | |
| 469 | uint32_t src; |
| 470 | uint32_t dst; |
| 471 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 472 | /* |
| 473 | * Get the byte from the data area that overlays the block mark. Since |
| 474 | * the ECC engine applies its own view to the bits in the page, the |
| 475 | * physical block mark won't (in general) appear on a byte boundary in |
| 476 | * the data. |
| 477 | */ |
| 478 | src = data_buf[buf_offset] >> bit_offset; |
| 479 | src |= data_buf[buf_offset + 1] << (8 - bit_offset); |
| 480 | |
| 481 | dst = oob_buf[0]; |
| 482 | |
| 483 | oob_buf[0] = src; |
| 484 | |
| 485 | data_buf[buf_offset] &= ~(0xff << bit_offset); |
| 486 | data_buf[buf_offset + 1] &= 0xff << bit_offset; |
| 487 | |
| 488 | data_buf[buf_offset] |= dst << bit_offset; |
| 489 | data_buf[buf_offset + 1] |= dst >> (8 - bit_offset); |
| 490 | } |
| 491 | |
| 492 | /* |
| 493 | * Read data from NAND. |
| 494 | */ |
| 495 | static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length) |
| 496 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 497 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 498 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 499 | struct mxs_dma_desc *d; |
| 500 | uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; |
| 501 | int ret; |
| 502 | |
| 503 | if (length > NAND_MAX_PAGESIZE) { |
| 504 | printf("MXS NAND: DMA buffer too big\n"); |
| 505 | return; |
| 506 | } |
| 507 | |
| 508 | if (!buf) { |
| 509 | printf("MXS NAND: DMA buffer is NULL\n"); |
| 510 | return; |
| 511 | } |
| 512 | |
| 513 | /* Compile the DMA descriptor - a descriptor that reads data. */ |
| 514 | d = mxs_nand_get_dma_desc(nand_info); |
| 515 | d->cmd.data = |
| 516 | MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ | |
| 517 | MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | |
| 518 | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | |
| 519 | (length << MXS_DMA_DESC_BYTES_OFFSET); |
| 520 | |
| 521 | d->cmd.address = (dma_addr_t)nand_info->data_buf; |
| 522 | |
| 523 | d->cmd.pio_words[0] = |
| 524 | GPMI_CTRL0_COMMAND_MODE_READ | |
| 525 | GPMI_CTRL0_WORD_LENGTH | |
| 526 | (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | |
| 527 | GPMI_CTRL0_ADDRESS_NAND_DATA | |
| 528 | length; |
| 529 | |
| 530 | mxs_dma_desc_append(channel, d); |
| 531 | |
| 532 | /* |
| 533 | * A DMA descriptor that waits for the command to end and the chip to |
| 534 | * become ready. |
| 535 | * |
| 536 | * I think we actually should *not* be waiting for the chip to become |
| 537 | * ready because, after all, we don't care. I think the original code |
| 538 | * did that and no one has re-thought it yet. |
| 539 | */ |
| 540 | d = mxs_nand_get_dma_desc(nand_info); |
| 541 | d->cmd.data = |
| 542 | MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | |
| 543 | MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM | |
Luca Ellero | 80f06b8 | 2014-12-16 15:36:14 +0100 | [diff] [blame] | 544 | MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 545 | |
| 546 | d->cmd.address = 0; |
| 547 | |
| 548 | d->cmd.pio_words[0] = |
| 549 | GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | |
| 550 | GPMI_CTRL0_WORD_LENGTH | |
| 551 | (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | |
| 552 | GPMI_CTRL0_ADDRESS_NAND_DATA; |
| 553 | |
| 554 | mxs_dma_desc_append(channel, d); |
| 555 | |
Peng Fan | e3bbfb7 | 2015-07-21 16:15:21 +0800 | [diff] [blame] | 556 | /* Invalidate caches */ |
| 557 | mxs_nand_inval_data_buf(nand_info); |
| 558 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 559 | /* Execute the DMA chain. */ |
| 560 | ret = mxs_dma_go(channel); |
| 561 | if (ret) { |
| 562 | printf("MXS NAND: DMA read error\n"); |
| 563 | goto rtn; |
| 564 | } |
| 565 | |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 566 | /* Invalidate caches */ |
| 567 | mxs_nand_inval_data_buf(nand_info); |
| 568 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 569 | memcpy(buf, nand_info->data_buf, length); |
| 570 | |
| 571 | rtn: |
| 572 | mxs_nand_return_dma_descs(nand_info); |
| 573 | } |
| 574 | |
| 575 | /* |
| 576 | * Write data to NAND. |
| 577 | */ |
| 578 | static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, |
| 579 | int length) |
| 580 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 581 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 582 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 583 | struct mxs_dma_desc *d; |
| 584 | uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; |
| 585 | int ret; |
| 586 | |
| 587 | if (length > NAND_MAX_PAGESIZE) { |
| 588 | printf("MXS NAND: DMA buffer too big\n"); |
| 589 | return; |
| 590 | } |
| 591 | |
| 592 | if (!buf) { |
| 593 | printf("MXS NAND: DMA buffer is NULL\n"); |
| 594 | return; |
| 595 | } |
| 596 | |
| 597 | memcpy(nand_info->data_buf, buf, length); |
| 598 | |
| 599 | /* Compile the DMA descriptor - a descriptor that writes data. */ |
| 600 | d = mxs_nand_get_dma_desc(nand_info); |
| 601 | d->cmd.data = |
| 602 | MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | |
| 603 | MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | |
Luca Ellero | 966f1cd | 2014-12-16 15:36:15 +0100 | [diff] [blame] | 604 | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 605 | (length << MXS_DMA_DESC_BYTES_OFFSET); |
| 606 | |
| 607 | d->cmd.address = (dma_addr_t)nand_info->data_buf; |
| 608 | |
| 609 | d->cmd.pio_words[0] = |
| 610 | GPMI_CTRL0_COMMAND_MODE_WRITE | |
| 611 | GPMI_CTRL0_WORD_LENGTH | |
| 612 | (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | |
| 613 | GPMI_CTRL0_ADDRESS_NAND_DATA | |
| 614 | length; |
| 615 | |
| 616 | mxs_dma_desc_append(channel, d); |
| 617 | |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 618 | /* Flush caches */ |
| 619 | mxs_nand_flush_data_buf(nand_info); |
| 620 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 621 | /* Execute the DMA chain. */ |
| 622 | ret = mxs_dma_go(channel); |
| 623 | if (ret) |
| 624 | printf("MXS NAND: DMA write error\n"); |
| 625 | |
| 626 | mxs_nand_return_dma_descs(nand_info); |
| 627 | } |
| 628 | |
| 629 | /* |
| 630 | * Read a single byte from NAND. |
| 631 | */ |
| 632 | static uint8_t mxs_nand_read_byte(struct mtd_info *mtd) |
| 633 | { |
| 634 | uint8_t buf; |
| 635 | mxs_nand_read_buf(mtd, &buf, 1); |
| 636 | return buf; |
| 637 | } |
| 638 | |
Peng Fan | df23c9d | 2020-05-04 22:08:52 +0800 | [diff] [blame] | 639 | static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand, |
| 640 | u8 *buf, int chunk, int page) |
| 641 | { |
| 642 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
| 643 | struct bch_geometry *geo = &nand_info->bch_geometry; |
| 644 | unsigned int flip_bits = 0, flip_bits_noecc = 0; |
| 645 | unsigned int threshold; |
| 646 | unsigned int base = geo->ecc_chunkn_size * chunk; |
| 647 | u32 *dma_buf = (u32 *)buf; |
| 648 | int i; |
| 649 | |
| 650 | threshold = geo->gf_len / 2; |
| 651 | if (threshold > geo->ecc_strength) |
| 652 | threshold = geo->ecc_strength; |
| 653 | |
| 654 | for (i = 0; i < geo->ecc_chunkn_size; i++) { |
| 655 | flip_bits += hweight8(~buf[base + i]); |
| 656 | if (flip_bits > threshold) |
| 657 | return false; |
| 658 | } |
| 659 | |
| 660 | nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page); |
| 661 | nand->read_buf(mtd, buf, mtd->writesize); |
| 662 | |
| 663 | for (i = 0; i < mtd->writesize / 4; i++) { |
| 664 | flip_bits_noecc += hweight32(~dma_buf[i]); |
| 665 | if (flip_bits_noecc > threshold) |
| 666 | return false; |
| 667 | } |
| 668 | |
| 669 | mtd->ecc_stats.corrected += flip_bits; |
| 670 | |
| 671 | memset(buf, 0xff, mtd->writesize); |
| 672 | |
| 673 | printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc); |
| 674 | |
| 675 | return true; |
| 676 | } |
| 677 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 678 | /* |
| 679 | * Read a page from NAND. |
| 680 | */ |
| 681 | static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand, |
Sergey Lapin | 3a38a55 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 682 | uint8_t *buf, int oob_required, |
| 683 | int page) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 684 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 685 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 686 | struct bch_geometry *geo = &nand_info->bch_geometry; |
Peng Fan | 9e81373 | 2020-05-04 22:08:53 +0800 | [diff] [blame] | 687 | struct mxs_bch_regs *bch_regs = nand_info->bch_regs; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 688 | struct mxs_dma_desc *d; |
| 689 | uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; |
| 690 | uint32_t corrected = 0, failed = 0; |
| 691 | uint8_t *status; |
| 692 | int i, ret; |
Peng Fan | 9e81373 | 2020-05-04 22:08:53 +0800 | [diff] [blame] | 693 | int flag = 0; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 694 | |
| 695 | /* Compile the DMA descriptor - wait for ready. */ |
| 696 | d = mxs_nand_get_dma_desc(nand_info); |
| 697 | d->cmd.data = |
| 698 | MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | |
| 699 | MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | |
| 700 | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); |
| 701 | |
| 702 | d->cmd.address = 0; |
| 703 | |
| 704 | d->cmd.pio_words[0] = |
| 705 | GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | |
| 706 | GPMI_CTRL0_WORD_LENGTH | |
| 707 | (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | |
| 708 | GPMI_CTRL0_ADDRESS_NAND_DATA; |
| 709 | |
| 710 | mxs_dma_desc_append(channel, d); |
| 711 | |
| 712 | /* Compile the DMA descriptor - enable the BCH block and read. */ |
| 713 | d = mxs_nand_get_dma_desc(nand_info); |
| 714 | d->cmd.data = |
| 715 | MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | |
| 716 | MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); |
| 717 | |
| 718 | d->cmd.address = 0; |
| 719 | |
| 720 | d->cmd.pio_words[0] = |
| 721 | GPMI_CTRL0_COMMAND_MODE_READ | |
| 722 | GPMI_CTRL0_WORD_LENGTH | |
| 723 | (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | |
| 724 | GPMI_CTRL0_ADDRESS_NAND_DATA | |
| 725 | (mtd->writesize + mtd->oobsize); |
| 726 | d->cmd.pio_words[1] = 0; |
| 727 | d->cmd.pio_words[2] = |
| 728 | GPMI_ECCCTRL_ENABLE_ECC | |
| 729 | GPMI_ECCCTRL_ECC_CMD_DECODE | |
| 730 | GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; |
| 731 | d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize; |
| 732 | d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; |
| 733 | d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; |
| 734 | |
Han Xu | afed2a1 | 2020-05-06 20:59:19 +0800 | [diff] [blame] | 735 | if (nand_info->en_randomizer) { |
Alice Guo | 3f27778 | 2020-05-04 22:09:03 +0800 | [diff] [blame] | 736 | d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE | |
| 737 | GPMI_ECCCTRL_RANDOMIZER_TYPE2; |
| 738 | d->cmd.pio_words[3] |= (page % 256) << 16; |
| 739 | } |
| 740 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 741 | mxs_dma_desc_append(channel, d); |
| 742 | |
| 743 | /* Compile the DMA descriptor - disable the BCH block. */ |
| 744 | d = mxs_nand_get_dma_desc(nand_info); |
| 745 | d->cmd.data = |
| 746 | MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | |
| 747 | MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | |
| 748 | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET); |
| 749 | |
| 750 | d->cmd.address = 0; |
| 751 | |
| 752 | d->cmd.pio_words[0] = |
| 753 | GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | |
| 754 | GPMI_CTRL0_WORD_LENGTH | |
| 755 | (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | |
| 756 | GPMI_CTRL0_ADDRESS_NAND_DATA | |
| 757 | (mtd->writesize + mtd->oobsize); |
| 758 | d->cmd.pio_words[1] = 0; |
| 759 | d->cmd.pio_words[2] = 0; |
| 760 | |
| 761 | mxs_dma_desc_append(channel, d); |
| 762 | |
| 763 | /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */ |
| 764 | d = mxs_nand_get_dma_desc(nand_info); |
| 765 | d->cmd.data = |
| 766 | MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | |
| 767 | MXS_DMA_DESC_DEC_SEM; |
| 768 | |
| 769 | d->cmd.address = 0; |
| 770 | |
| 771 | mxs_dma_desc_append(channel, d); |
| 772 | |
Peng Fan | e3bbfb7 | 2015-07-21 16:15:21 +0800 | [diff] [blame] | 773 | /* Invalidate caches */ |
| 774 | mxs_nand_inval_data_buf(nand_info); |
| 775 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 776 | /* Execute the DMA chain. */ |
| 777 | ret = mxs_dma_go(channel); |
| 778 | if (ret) { |
| 779 | printf("MXS NAND: DMA read error\n"); |
| 780 | goto rtn; |
| 781 | } |
| 782 | |
Stefan Agner | dc8af6d | 2018-06-22 18:06:12 +0200 | [diff] [blame] | 783 | ret = mxs_nand_wait_for_bch_complete(nand_info); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 784 | if (ret) { |
| 785 | printf("MXS NAND: BCH read timeout\n"); |
| 786 | goto rtn; |
| 787 | } |
| 788 | |
Peng Fan | df23c9d | 2020-05-04 22:08:52 +0800 | [diff] [blame] | 789 | mxs_nand_return_dma_descs(nand_info); |
| 790 | |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 791 | /* Invalidate caches */ |
| 792 | mxs_nand_inval_data_buf(nand_info); |
| 793 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 794 | /* Read DMA completed, now do the mark swapping. */ |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 795 | mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 796 | |
| 797 | /* Loop over status bytes, accumulating ECC status. */ |
| 798 | status = nand_info->oob_buf + mxs_nand_aux_status_offset(); |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 799 | for (i = 0; i < geo->ecc_chunk_count; i++) { |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 800 | if (status[i] == 0x00) |
| 801 | continue; |
| 802 | |
Peng Fan | 9e81373 | 2020-05-04 22:08:53 +0800 | [diff] [blame] | 803 | if (status[i] == 0xff) { |
Han Xu | e4f2b00 | 2020-05-04 22:09:02 +0800 | [diff] [blame] | 804 | if (!nand_info->en_randomizer && |
| 805 | (is_mx6dqp() || is_mx7() || is_mx6ul() || |
| 806 | is_imx8() || is_imx8m())) |
Peng Fan | 9e81373 | 2020-05-04 22:08:53 +0800 | [diff] [blame] | 807 | if (readl(&bch_regs->hw_bch_debug1)) |
| 808 | flag = 1; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 809 | continue; |
Peng Fan | 9e81373 | 2020-05-04 22:08:53 +0800 | [diff] [blame] | 810 | } |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 811 | |
| 812 | if (status[i] == 0xfe) { |
Peng Fan | df23c9d | 2020-05-04 22:08:52 +0800 | [diff] [blame] | 813 | if (mxs_nand_erased_page(mtd, nand, |
| 814 | nand_info->data_buf, i, page)) |
| 815 | break; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 816 | failed++; |
| 817 | continue; |
| 818 | } |
| 819 | |
| 820 | corrected += status[i]; |
| 821 | } |
| 822 | |
| 823 | /* Propagate ECC status to the owning MTD. */ |
| 824 | mtd->ecc_stats.failed += failed; |
| 825 | mtd->ecc_stats.corrected += corrected; |
| 826 | |
| 827 | /* |
| 828 | * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for |
| 829 | * details about our policy for delivering the OOB. |
| 830 | * |
| 831 | * We fill the caller's buffer with set bits, and then copy the block |
| 832 | * mark to the caller's buffer. Note that, if block mark swapping was |
| 833 | * necessary, it has already been done, so we can rely on the first |
| 834 | * byte of the auxiliary buffer to contain the block mark. |
| 835 | */ |
| 836 | memset(nand->oob_poi, 0xff, mtd->oobsize); |
| 837 | |
| 838 | nand->oob_poi[0] = nand_info->oob_buf[0]; |
| 839 | |
| 840 | memcpy(buf, nand_info->data_buf, mtd->writesize); |
| 841 | |
Peng Fan | 9e81373 | 2020-05-04 22:08:53 +0800 | [diff] [blame] | 842 | if (flag) |
| 843 | memset(buf, 0xff, mtd->writesize); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 844 | rtn: |
| 845 | mxs_nand_return_dma_descs(nand_info); |
| 846 | |
| 847 | return ret; |
| 848 | } |
| 849 | |
| 850 | /* |
| 851 | * Write a page to NAND. |
| 852 | */ |
Sergey Lapin | 3a38a55 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 853 | static int mxs_nand_ecc_write_page(struct mtd_info *mtd, |
| 854 | struct nand_chip *nand, const uint8_t *buf, |
Scott Wood | 46e1310 | 2016-05-30 13:57:57 -0500 | [diff] [blame] | 855 | int oob_required, int page) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 856 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 857 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 858 | struct bch_geometry *geo = &nand_info->bch_geometry; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 859 | struct mxs_dma_desc *d; |
| 860 | uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; |
| 861 | int ret; |
| 862 | |
| 863 | memcpy(nand_info->data_buf, buf, mtd->writesize); |
| 864 | memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize); |
| 865 | |
| 866 | /* Handle block mark swapping. */ |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 867 | mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 868 | |
| 869 | /* Compile the DMA descriptor - write data. */ |
| 870 | d = mxs_nand_get_dma_desc(nand_info); |
| 871 | d->cmd.data = |
| 872 | MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | |
| 873 | MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | |
| 874 | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); |
| 875 | |
| 876 | d->cmd.address = 0; |
| 877 | |
| 878 | d->cmd.pio_words[0] = |
| 879 | GPMI_CTRL0_COMMAND_MODE_WRITE | |
| 880 | GPMI_CTRL0_WORD_LENGTH | |
| 881 | (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | |
| 882 | GPMI_CTRL0_ADDRESS_NAND_DATA; |
| 883 | d->cmd.pio_words[1] = 0; |
| 884 | d->cmd.pio_words[2] = |
| 885 | GPMI_ECCCTRL_ENABLE_ECC | |
| 886 | GPMI_ECCCTRL_ECC_CMD_ENCODE | |
| 887 | GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; |
| 888 | d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize); |
| 889 | d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; |
| 890 | d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; |
| 891 | |
Han Xu | afed2a1 | 2020-05-06 20:59:19 +0800 | [diff] [blame] | 892 | if (nand_info->en_randomizer) { |
Igor Opaniuk | c5540137 | 2019-11-03 16:49:43 +0100 | [diff] [blame] | 893 | d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE | |
| 894 | GPMI_ECCCTRL_RANDOMIZER_TYPE2; |
| 895 | /* |
| 896 | * Write NAND page number needed to be randomized |
| 897 | * to GPMI_ECCCOUNT register. |
| 898 | * |
| 899 | * The value is between 0-255. For additional details |
| 900 | * check 9.6.6.4 of i.MX7D Applications Processor reference |
| 901 | */ |
Alice Guo | 3f27778 | 2020-05-04 22:09:03 +0800 | [diff] [blame] | 902 | d->cmd.pio_words[3] |= (page % 256) << 16; |
Igor Opaniuk | c5540137 | 2019-11-03 16:49:43 +0100 | [diff] [blame] | 903 | } |
| 904 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 905 | mxs_dma_desc_append(channel, d); |
| 906 | |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 907 | /* Flush caches */ |
| 908 | mxs_nand_flush_data_buf(nand_info); |
| 909 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 910 | /* Execute the DMA chain. */ |
| 911 | ret = mxs_dma_go(channel); |
| 912 | if (ret) { |
| 913 | printf("MXS NAND: DMA write error\n"); |
| 914 | goto rtn; |
| 915 | } |
| 916 | |
Stefan Agner | dc8af6d | 2018-06-22 18:06:12 +0200 | [diff] [blame] | 917 | ret = mxs_nand_wait_for_bch_complete(nand_info); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 918 | if (ret) { |
| 919 | printf("MXS NAND: BCH write timeout\n"); |
| 920 | goto rtn; |
| 921 | } |
| 922 | |
| 923 | rtn: |
| 924 | mxs_nand_return_dma_descs(nand_info); |
Sergey Lapin | 3a38a55 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 925 | return 0; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 926 | } |
| 927 | |
| 928 | /* |
| 929 | * Read OOB from NAND. |
| 930 | * |
| 931 | * This function is a veneer that replaces the function originally installed by |
| 932 | * the NAND Flash MTD code. |
| 933 | */ |
| 934 | static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from, |
| 935 | struct mtd_oob_ops *ops) |
| 936 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 937 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 938 | struct mxs_nand_info *nand_info = nand_get_controller_data(chip); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 939 | int ret; |
| 940 | |
Sergey Lapin | 3a38a55 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 941 | if (ops->mode == MTD_OPS_RAW) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 942 | nand_info->raw_oob_mode = 1; |
| 943 | else |
| 944 | nand_info->raw_oob_mode = 0; |
| 945 | |
| 946 | ret = nand_info->hooked_read_oob(mtd, from, ops); |
| 947 | |
| 948 | nand_info->raw_oob_mode = 0; |
| 949 | |
| 950 | return ret; |
| 951 | } |
| 952 | |
| 953 | /* |
| 954 | * Write OOB to NAND. |
| 955 | * |
| 956 | * This function is a veneer that replaces the function originally installed by |
| 957 | * the NAND Flash MTD code. |
| 958 | */ |
| 959 | static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to, |
| 960 | struct mtd_oob_ops *ops) |
| 961 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 962 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 963 | struct mxs_nand_info *nand_info = nand_get_controller_data(chip); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 964 | int ret; |
| 965 | |
Sergey Lapin | 3a38a55 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 966 | if (ops->mode == MTD_OPS_RAW) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 967 | nand_info->raw_oob_mode = 1; |
| 968 | else |
| 969 | nand_info->raw_oob_mode = 0; |
| 970 | |
| 971 | ret = nand_info->hooked_write_oob(mtd, to, ops); |
| 972 | |
| 973 | nand_info->raw_oob_mode = 0; |
| 974 | |
| 975 | return ret; |
| 976 | } |
| 977 | |
| 978 | /* |
| 979 | * Mark a block bad in NAND. |
| 980 | * |
| 981 | * This function is a veneer that replaces the function originally installed by |
| 982 | * the NAND Flash MTD code. |
| 983 | */ |
| 984 | static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs) |
| 985 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 986 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 987 | struct mxs_nand_info *nand_info = nand_get_controller_data(chip); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 988 | int ret; |
| 989 | |
| 990 | nand_info->marking_block_bad = 1; |
| 991 | |
| 992 | ret = nand_info->hooked_block_markbad(mtd, ofs); |
| 993 | |
| 994 | nand_info->marking_block_bad = 0; |
| 995 | |
| 996 | return ret; |
| 997 | } |
| 998 | |
| 999 | /* |
| 1000 | * There are several places in this driver where we have to handle the OOB and |
| 1001 | * block marks. This is the function where things are the most complicated, so |
| 1002 | * this is where we try to explain it all. All the other places refer back to |
| 1003 | * here. |
| 1004 | * |
| 1005 | * These are the rules, in order of decreasing importance: |
| 1006 | * |
| 1007 | * 1) Nothing the caller does can be allowed to imperil the block mark, so all |
| 1008 | * write operations take measures to protect it. |
| 1009 | * |
| 1010 | * 2) In read operations, the first byte of the OOB we return must reflect the |
| 1011 | * true state of the block mark, no matter where that block mark appears in |
| 1012 | * the physical page. |
| 1013 | * |
| 1014 | * 3) ECC-based read operations return an OOB full of set bits (since we never |
| 1015 | * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads |
| 1016 | * return). |
| 1017 | * |
| 1018 | * 4) "Raw" read operations return a direct view of the physical bytes in the |
| 1019 | * page, using the conventional definition of which bytes are data and which |
| 1020 | * are OOB. This gives the caller a way to see the actual, physical bytes |
| 1021 | * in the page, without the distortions applied by our ECC engine. |
| 1022 | * |
| 1023 | * What we do for this specific read operation depends on whether we're doing |
| 1024 | * "raw" read, or an ECC-based read. |
| 1025 | * |
| 1026 | * It turns out that knowing whether we want an "ECC-based" or "raw" read is not |
| 1027 | * easy. When reading a page, for example, the NAND Flash MTD code calls our |
| 1028 | * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an |
| 1029 | * ECC-based or raw view of the page is implicit in which function it calls |
| 1030 | * (there is a similar pair of ECC-based/raw functions for writing). |
| 1031 | * |
| 1032 | * Since MTD assumes the OOB is not covered by ECC, there is no pair of |
| 1033 | * ECC-based/raw functions for reading or or writing the OOB. The fact that the |
| 1034 | * caller wants an ECC-based or raw view of the page is not propagated down to |
| 1035 | * this driver. |
| 1036 | * |
| 1037 | * Since our OOB *is* covered by ECC, we need this information. So, we hook the |
| 1038 | * ecc.read_oob and ecc.write_oob function pointers in the owning |
| 1039 | * struct mtd_info with our own functions. These hook functions set the |
| 1040 | * raw_oob_mode field so that, when control finally arrives here, we'll know |
| 1041 | * what to do. |
| 1042 | */ |
| 1043 | static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand, |
Sergey Lapin | 3a38a55 | 2013-01-14 03:46:50 +0000 | [diff] [blame] | 1044 | int page) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1045 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 1046 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1047 | |
| 1048 | /* |
| 1049 | * First, fill in the OOB buffer. If we're doing a raw read, we need to |
| 1050 | * get the bytes from the physical page. If we're not doing a raw read, |
| 1051 | * we need to fill the buffer with set bits. |
| 1052 | */ |
| 1053 | if (nand_info->raw_oob_mode) { |
| 1054 | /* |
| 1055 | * If control arrives here, we're doing a "raw" read. Send the |
| 1056 | * command to read the conventional OOB and read it. |
| 1057 | */ |
| 1058 | nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); |
| 1059 | nand->read_buf(mtd, nand->oob_poi, mtd->oobsize); |
| 1060 | } else { |
| 1061 | /* |
| 1062 | * If control arrives here, we're not doing a "raw" read. Fill |
| 1063 | * the OOB buffer with set bits and correct the block mark. |
| 1064 | */ |
| 1065 | memset(nand->oob_poi, 0xff, mtd->oobsize); |
| 1066 | |
| 1067 | nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); |
| 1068 | mxs_nand_read_buf(mtd, nand->oob_poi, 1); |
| 1069 | } |
| 1070 | |
| 1071 | return 0; |
| 1072 | |
| 1073 | } |
| 1074 | |
| 1075 | /* |
| 1076 | * Write OOB data to NAND. |
| 1077 | */ |
| 1078 | static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand, |
| 1079 | int page) |
| 1080 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 1081 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1082 | uint8_t block_mark = 0; |
| 1083 | |
| 1084 | /* |
| 1085 | * There are fundamental incompatibilities between the i.MX GPMI NFC and |
| 1086 | * the NAND Flash MTD model that make it essentially impossible to write |
| 1087 | * the out-of-band bytes. |
| 1088 | * |
| 1089 | * We permit *ONE* exception. If the *intent* of writing the OOB is to |
| 1090 | * mark a block bad, we can do that. |
| 1091 | */ |
| 1092 | |
| 1093 | if (!nand_info->marking_block_bad) { |
| 1094 | printf("NXS NAND: Writing OOB isn't supported\n"); |
| 1095 | return -EIO; |
| 1096 | } |
| 1097 | |
| 1098 | /* Write the block mark. */ |
| 1099 | nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); |
| 1100 | nand->write_buf(mtd, &block_mark, 1); |
| 1101 | nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); |
| 1102 | |
| 1103 | /* Check if it worked. */ |
| 1104 | if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL) |
| 1105 | return -EIO; |
| 1106 | |
| 1107 | return 0; |
| 1108 | } |
| 1109 | |
| 1110 | /* |
| 1111 | * Claims all blocks are good. |
| 1112 | * |
| 1113 | * In principle, this function is *only* called when the NAND Flash MTD system |
| 1114 | * isn't allowed to keep an in-memory bad block table, so it is forced to ask |
| 1115 | * the driver for bad block information. |
| 1116 | * |
| 1117 | * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so |
| 1118 | * this function is *only* called when we take it away. |
| 1119 | * |
| 1120 | * Thus, this function is only called when we want *all* blocks to look good, |
| 1121 | * so it *always* return success. |
| 1122 | */ |
Scott Wood | 52ab7ce | 2016-05-30 13:57:58 -0500 | [diff] [blame] | 1123 | static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1124 | { |
Stefan Agner | ead66eb | 2018-06-22 18:06:18 +0200 | [diff] [blame] | 1125 | return 0; |
| 1126 | } |
| 1127 | |
| 1128 | static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo) |
| 1129 | { |
| 1130 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 1131 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 1132 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
Han Xu | 2ee499e | 2022-03-25 08:36:38 -0500 | [diff] [blame] | 1133 | int err; |
Stefan Agner | ead66eb | 2018-06-22 18:06:18 +0200 | [diff] [blame] | 1134 | |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 1135 | if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) { |
| 1136 | printf("unsupported NAND chip, minimum ecc required %d\n" |
| 1137 | , chip->ecc_strength_ds); |
| 1138 | return -EINVAL; |
| 1139 | } |
Stefan Agner | ead66eb | 2018-06-22 18:06:18 +0200 | [diff] [blame] | 1140 | |
Han Xu | 2ee499e | 2022-03-25 08:36:38 -0500 | [diff] [blame] | 1141 | /* use the legacy bch setting by default */ |
| 1142 | if ((!nand_info->use_minimum_ecc && mtd->oobsize < 1024) || |
| 1143 | !(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) { |
| 1144 | dev_dbg(mtd->dev, "use legacy bch geometry\n"); |
| 1145 | err = mxs_nand_legacy_calc_ecc_layout(geo, mtd); |
| 1146 | if (!err) |
| 1147 | return 0; |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 1148 | } |
Stefan Agner | ead66eb | 2018-06-22 18:06:18 +0200 | [diff] [blame] | 1149 | |
Han Xu | 2ee499e | 2022-03-25 08:36:38 -0500 | [diff] [blame] | 1150 | /* for large oob nand */ |
| 1151 | if (mtd->oobsize > 1024) { |
| 1152 | dev_dbg(mtd->dev, "use large oob bch geometry\n"); |
| 1153 | err = mxs_nand_calc_ecc_for_large_oob(geo, mtd); |
| 1154 | if (!err) |
| 1155 | return 0; |
| 1156 | } |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 1157 | |
Han Xu | 2ee499e | 2022-03-25 08:36:38 -0500 | [diff] [blame] | 1158 | /* otherwise use the minimum ecc nand chips required */ |
| 1159 | dev_dbg(mtd->dev, "use minimum ecc bch geometry\n"); |
| 1160 | err = mxs_nand_calc_ecc_layout_by_info(geo, mtd, chip->ecc_strength_ds, |
| 1161 | chip->ecc_step_ds); |
Stefan Agner | ead66eb | 2018-06-22 18:06:18 +0200 | [diff] [blame] | 1162 | |
Han Xu | 2ee499e | 2022-03-25 08:36:38 -0500 | [diff] [blame] | 1163 | if (err) |
| 1164 | dev_err(mtd->dev, "none of the bch geometry setting works\n"); |
| 1165 | |
| 1166 | return err; |
| 1167 | } |
| 1168 | |
| 1169 | void mxs_nand_dump_geo(struct mtd_info *mtd) |
| 1170 | { |
| 1171 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 1172 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
| 1173 | struct bch_geometry *geo = &nand_info->bch_geometry; |
| 1174 | |
| 1175 | dev_dbg(mtd->dev, "BCH Geometry :\n" |
| 1176 | "GF Length\t\t: %u\n" |
| 1177 | "ECC Strength\t\t: %u\n" |
| 1178 | "ECC for Meta\t\t: %u\n" |
| 1179 | "ECC Chunk0 Size\t\t: %u\n" |
| 1180 | "ECC Chunkn Size\t\t: %u\n" |
| 1181 | "ECC Chunk Count\t\t: %u\n" |
| 1182 | "Block Mark Byte Offset\t: %u\n" |
| 1183 | "Block Mark Bit Offset\t: %u\n", |
| 1184 | geo->gf_len, |
| 1185 | geo->ecc_strength, |
| 1186 | geo->ecc_for_meta, |
| 1187 | geo->ecc_chunk0_size, |
| 1188 | geo->ecc_chunkn_size, |
| 1189 | geo->ecc_chunk_count, |
| 1190 | geo->block_mark_byte_offset, |
| 1191 | geo->block_mark_bit_offset); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1192 | } |
| 1193 | |
| 1194 | /* |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1195 | * At this point, the physical NAND Flash chips have been identified and |
| 1196 | * counted, so we know the physical geometry. This enables us to make some |
| 1197 | * important configuration decisions. |
| 1198 | * |
Robert P. J. Day | 8d56db9 | 2016-07-15 13:44:45 -0400 | [diff] [blame] | 1199 | * The return value of this function propagates directly back to this driver's |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1200 | * board_nand_init(). Anything other than zero will cause this driver to |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1201 | * tear everything down and declare failure. |
| 1202 | */ |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1203 | int mxs_nand_setup_ecc(struct mtd_info *mtd) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1204 | { |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 1205 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 1206 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 1207 | struct bch_geometry *geo = &nand_info->bch_geometry; |
Stefan Agner | dc8af6d | 2018-06-22 18:06:12 +0200 | [diff] [blame] | 1208 | struct mxs_bch_regs *bch_regs = nand_info->bch_regs; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1209 | uint32_t tmp; |
Stefan Agner | ead66eb | 2018-06-22 18:06:18 +0200 | [diff] [blame] | 1210 | int ret; |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 1211 | |
Igor Opaniuk | c5540137 | 2019-11-03 16:49:43 +0100 | [diff] [blame] | 1212 | nand_info->en_randomizer = 0; |
| 1213 | nand_info->oobsize = mtd->oobsize; |
| 1214 | nand_info->writesize = mtd->writesize; |
| 1215 | |
Stefan Agner | ead66eb | 2018-06-22 18:06:18 +0200 | [diff] [blame] | 1216 | ret = mxs_nand_set_geometry(mtd, geo); |
Stefan Agner | 4d42ac1 | 2018-06-22 17:19:51 +0200 | [diff] [blame] | 1217 | if (ret) |
| 1218 | return ret; |
| 1219 | |
Han Xu | 2ee499e | 2022-03-25 08:36:38 -0500 | [diff] [blame] | 1220 | mxs_nand_dump_geo(mtd); |
| 1221 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1222 | /* Configure BCH and set NFC geometry */ |
Otavio Salvador | cbf0bf2 | 2012-08-13 09:53:12 +0000 | [diff] [blame] | 1223 | mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1224 | |
| 1225 | /* Configure layout 0 */ |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 1226 | tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1227 | tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 1228 | tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET; |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 1229 | tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 1230 | tmp |= (geo->gf_len == 14 ? 1 : 0) << |
Peng Fan | c94f09d | 2015-07-21 16:15:19 +0800 | [diff] [blame] | 1231 | BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1232 | writel(tmp, &bch_regs->hw_bch_flash0layout0); |
Igor Opaniuk | c5540137 | 2019-11-03 16:49:43 +0100 | [diff] [blame] | 1233 | nand_info->bch_flash0layout0 = tmp; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1234 | |
| 1235 | tmp = (mtd->writesize + mtd->oobsize) |
| 1236 | << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 1237 | tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET; |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 1238 | tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; |
Stefan Agner | d0778b3 | 2018-06-22 17:19:49 +0200 | [diff] [blame] | 1239 | tmp |= (geo->gf_len == 14 ? 1 : 0) << |
Peng Fan | c94f09d | 2015-07-21 16:15:19 +0800 | [diff] [blame] | 1240 | BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1241 | writel(tmp, &bch_regs->hw_bch_flash0layout1); |
Igor Opaniuk | c5540137 | 2019-11-03 16:49:43 +0100 | [diff] [blame] | 1242 | nand_info->bch_flash0layout1 = tmp; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1243 | |
Peng Fan | 9e81373 | 2020-05-04 22:08:53 +0800 | [diff] [blame] | 1244 | /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */ |
| 1245 | if (is_mx6dqp() || is_mx7() || |
Peng Fan | 128abf4 | 2020-05-04 22:09:00 +0800 | [diff] [blame] | 1246 | is_mx6ul() || is_imx8() || is_imx8m()) |
Peng Fan | 9e81373 | 2020-05-04 22:08:53 +0800 | [diff] [blame] | 1247 | writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength), |
| 1248 | &bch_regs->hw_bch_mode); |
| 1249 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1250 | /* Set *all* chip selects to use layout 0 */ |
| 1251 | writel(0, &bch_regs->hw_bch_layoutselect); |
| 1252 | |
| 1253 | /* Enable BCH complete interrupt */ |
| 1254 | writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set); |
| 1255 | |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1256 | return 0; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1257 | } |
| 1258 | |
| 1259 | /* |
| 1260 | * Allocate DMA buffers |
| 1261 | */ |
| 1262 | int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info) |
| 1263 | { |
| 1264 | uint8_t *buf; |
| 1265 | const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE; |
| 1266 | |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 1267 | nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT); |
| 1268 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1269 | /* DMA buffers */ |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 1270 | buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1271 | if (!buf) { |
| 1272 | printf("MXS NAND: Error allocating DMA buffers\n"); |
| 1273 | return -ENOMEM; |
| 1274 | } |
| 1275 | |
Marek Vasut | 1b120e8 | 2012-03-15 18:33:19 +0000 | [diff] [blame] | 1276 | memset(buf, 0, nand_info->data_buf_size); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1277 | |
| 1278 | nand_info->data_buf = buf; |
| 1279 | nand_info->oob_buf = buf + NAND_MAX_PAGESIZE; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1280 | /* Command buffers */ |
| 1281 | nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT, |
| 1282 | MXS_NAND_COMMAND_BUFFER_SIZE); |
| 1283 | if (!nand_info->cmd_buf) { |
| 1284 | free(buf); |
| 1285 | printf("MXS NAND: Error allocating command buffers\n"); |
| 1286 | return -ENOMEM; |
| 1287 | } |
| 1288 | memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE); |
| 1289 | nand_info->cmd_queue_len = 0; |
| 1290 | |
| 1291 | return 0; |
| 1292 | } |
| 1293 | |
| 1294 | /* |
| 1295 | * Initializes the NFC hardware. |
| 1296 | */ |
Adam Ford | 6edb91a | 2019-01-12 06:25:48 -0600 | [diff] [blame] | 1297 | static int mxs_nand_init_dma(struct mxs_nand_info *info) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1298 | { |
Peng Fan | e37d5a9 | 2016-01-27 10:38:02 +0800 | [diff] [blame] | 1299 | int i = 0, j, ret = 0; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1300 | |
| 1301 | info->desc = malloc(sizeof(struct mxs_dma_desc *) * |
| 1302 | MXS_NAND_DMA_DESCRIPTOR_COUNT); |
Peng Fan | e37d5a9 | 2016-01-27 10:38:02 +0800 | [diff] [blame] | 1303 | if (!info->desc) { |
| 1304 | ret = -ENOMEM; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1305 | goto err1; |
Peng Fan | e37d5a9 | 2016-01-27 10:38:02 +0800 | [diff] [blame] | 1306 | } |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1307 | |
| 1308 | /* Allocate the DMA descriptors. */ |
| 1309 | for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) { |
| 1310 | info->desc[i] = mxs_dma_desc_alloc(); |
Peng Fan | e37d5a9 | 2016-01-27 10:38:02 +0800 | [diff] [blame] | 1311 | if (!info->desc[i]) { |
| 1312 | ret = -ENOMEM; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1313 | goto err2; |
Peng Fan | e37d5a9 | 2016-01-27 10:38:02 +0800 | [diff] [blame] | 1314 | } |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1315 | } |
| 1316 | |
| 1317 | /* Init the DMA controller. */ |
Fabio Estevam | 1715622 | 2017-06-29 09:33:44 -0300 | [diff] [blame] | 1318 | mxs_dma_init(); |
Marek Vasut | 93541b4 | 2012-04-08 17:34:46 +0000 | [diff] [blame] | 1319 | for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0; |
| 1320 | j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) { |
Peng Fan | e37d5a9 | 2016-01-27 10:38:02 +0800 | [diff] [blame] | 1321 | ret = mxs_dma_init_channel(j); |
| 1322 | if (ret) |
Marek Vasut | 93541b4 | 2012-04-08 17:34:46 +0000 | [diff] [blame] | 1323 | goto err3; |
| 1324 | } |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1325 | |
| 1326 | /* Reset the GPMI block. */ |
Stefan Agner | dc8af6d | 2018-06-22 18:06:12 +0200 | [diff] [blame] | 1327 | mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg); |
| 1328 | mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1329 | |
| 1330 | /* |
| 1331 | * Choose NAND mode, set IRQ polarity, disable write protection and |
| 1332 | * select BCH ECC. |
| 1333 | */ |
Stefan Agner | dc8af6d | 2018-06-22 18:06:12 +0200 | [diff] [blame] | 1334 | clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1, |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1335 | GPMI_CTRL1_GPMI_MODE, |
| 1336 | GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET | |
| 1337 | GPMI_CTRL1_BCH_MODE); |
| 1338 | |
| 1339 | return 0; |
| 1340 | |
Marek Vasut | 93541b4 | 2012-04-08 17:34:46 +0000 | [diff] [blame] | 1341 | err3: |
Peng Fan | e37d5a9 | 2016-01-27 10:38:02 +0800 | [diff] [blame] | 1342 | for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--) |
Marek Vasut | 93541b4 | 2012-04-08 17:34:46 +0000 | [diff] [blame] | 1343 | mxs_dma_release(j); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1344 | err2: |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1345 | for (--i; i >= 0; i--) |
| 1346 | mxs_dma_desc_free(info->desc[i]); |
Peng Fan | e37d5a9 | 2016-01-27 10:38:02 +0800 | [diff] [blame] | 1347 | free(info->desc); |
| 1348 | err1: |
| 1349 | if (ret == -ENOMEM) |
| 1350 | printf("MXS NAND: Unable to allocate DMA descriptors\n"); |
| 1351 | return ret; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1352 | } |
| 1353 | |
Michael Trimarchi | fd6e13e | 2022-08-30 16:48:47 +0200 | [diff] [blame] | 1354 | /* |
| 1355 | * <1> Firstly, we should know what's the GPMI-clock means. |
| 1356 | * The GPMI-clock is the internal clock in the gpmi nand controller. |
| 1357 | * If you set 100MHz to gpmi nand controller, the GPMI-clock's period |
| 1358 | * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period. |
| 1359 | * |
| 1360 | * <2> Secondly, we should know what's the frequency on the nand chip pins. |
| 1361 | * The frequency on the nand chip pins is derived from the GPMI-clock. |
| 1362 | * We can get it from the following equation: |
| 1363 | * |
| 1364 | * F = G / (DS + DH) |
| 1365 | * |
| 1366 | * F : the frequency on the nand chip pins. |
| 1367 | * G : the GPMI clock, such as 100MHz. |
| 1368 | * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP |
| 1369 | * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD |
| 1370 | * |
| 1371 | * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz, |
| 1372 | * the nand EDO(extended Data Out) timing could be applied. |
| 1373 | * The GPMI implements a feedback read strobe to sample the read data. |
| 1374 | * The feedback read strobe can be delayed to support the nand EDO timing |
| 1375 | * where the read strobe may deasserts before the read data is valid, and |
| 1376 | * read data is valid for some time after read strobe. |
| 1377 | * |
| 1378 | * The following figure illustrates some aspects of a NAND Flash read: |
| 1379 | * |
| 1380 | * |<---tREA---->| |
| 1381 | * | | |
| 1382 | * | | | |
| 1383 | * |<--tRP-->| | |
| 1384 | * | | | |
| 1385 | * __ ___|__________________________________ |
| 1386 | * RDN \________/ | |
| 1387 | * | |
| 1388 | * /---------\ |
| 1389 | * Read Data --------------< >--------- |
| 1390 | * \---------/ |
| 1391 | * | | |
| 1392 | * |<-D->| |
| 1393 | * FeedbackRDN ________ ____________ |
| 1394 | * \___________/ |
| 1395 | * |
| 1396 | * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY. |
| 1397 | * |
| 1398 | * |
| 1399 | * <4> Now, we begin to describe how to compute the right RDN_DELAY. |
| 1400 | * |
| 1401 | * 4.1) From the aspect of the nand chip pins: |
| 1402 | * Delay = (tREA + C - tRP) {1} |
| 1403 | * |
| 1404 | * tREA : the maximum read access time. |
| 1405 | * C : a constant to adjust the delay. default is 4000ps. |
| 1406 | * tRP : the read pulse width, which is exactly: |
| 1407 | * tRP = (GPMI-clock-period) * DATA_SETUP |
| 1408 | * |
| 1409 | * 4.2) From the aspect of the GPMI nand controller: |
| 1410 | * Delay = RDN_DELAY * 0.125 * RP {2} |
| 1411 | * |
| 1412 | * RP : the DLL reference period. |
| 1413 | * if (GPMI-clock-period > DLL_THRETHOLD) |
| 1414 | * RP = GPMI-clock-period / 2; |
| 1415 | * else |
| 1416 | * RP = GPMI-clock-period; |
| 1417 | * |
| 1418 | * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period |
| 1419 | * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD |
| 1420 | * is 16000ps, but in mx6q, we use 12000ps. |
| 1421 | * |
| 1422 | * 4.3) since {1} equals {2}, we get: |
| 1423 | * |
| 1424 | * (tREA + 4000 - tRP) * 8 |
| 1425 | * RDN_DELAY = ----------------------- {3} |
| 1426 | * RP |
| 1427 | */ |
| 1428 | static void mxs_compute_timings(struct nand_chip *chip, |
| 1429 | const struct nand_sdr_timings *sdr) |
| 1430 | { |
| 1431 | struct mxs_nand_info *nand_info = nand_get_controller_data(chip); |
| 1432 | unsigned long clk_rate; |
| 1433 | unsigned int dll_wait_time_us; |
| 1434 | unsigned int dll_threshold_ps = nand_info->max_chain_delay; |
| 1435 | unsigned int period_ps, reference_period_ps; |
| 1436 | unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles; |
| 1437 | unsigned int tRP_ps; |
| 1438 | bool use_half_period; |
| 1439 | int sample_delay_ps, sample_delay_factor; |
| 1440 | u16 busy_timeout_cycles; |
| 1441 | u8 wrn_dly_sel; |
| 1442 | u32 timing0; |
| 1443 | u32 timing1; |
| 1444 | u32 ctrl1n; |
| 1445 | |
| 1446 | if (sdr->tRC_min >= 30000) { |
| 1447 | /* ONFI non-EDO modes [0-3] */ |
| 1448 | clk_rate = 22000000; |
| 1449 | wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS; |
| 1450 | } else if (sdr->tRC_min >= 25000) { |
| 1451 | /* ONFI EDO mode 4 */ |
| 1452 | clk_rate = 80000000; |
| 1453 | wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; |
| 1454 | debug("%s, setting ONFI onfi edo 4\n", __func__); |
| 1455 | } else { |
| 1456 | /* ONFI EDO mode 5 */ |
| 1457 | clk_rate = 100000000; |
| 1458 | wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; |
| 1459 | debug("%s, setting ONFI onfi edo 5\n", __func__); |
| 1460 | } |
| 1461 | |
| 1462 | /* SDR core timings are given in picoseconds */ |
| 1463 | period_ps = div_u64((u64)NSEC_PER_SEC * 1000, clk_rate); |
| 1464 | |
| 1465 | addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); |
| 1466 | data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); |
| 1467 | data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); |
| 1468 | busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); |
| 1469 | |
| 1470 | timing0 = (addr_setup_cycles << GPMI_TIMING0_ADDRESS_SETUP_OFFSET) | |
| 1471 | (data_hold_cycles << GPMI_TIMING0_DATA_HOLD_OFFSET) | |
| 1472 | (data_setup_cycles << GPMI_TIMING0_DATA_SETUP_OFFSET); |
| 1473 | timing1 = (busy_timeout_cycles * 4096) << GPMI_TIMING1_DEVICE_BUSY_TIMEOUT_OFFSET; |
| 1474 | |
| 1475 | /* |
| 1476 | * Derive NFC ideal delay from {3}: |
| 1477 | * |
| 1478 | * (tREA + 4000 - tRP) * 8 |
| 1479 | * RDN_DELAY = ----------------------- |
| 1480 | * RP |
| 1481 | */ |
| 1482 | if (period_ps > dll_threshold_ps) { |
| 1483 | use_half_period = true; |
| 1484 | reference_period_ps = period_ps / 2; |
| 1485 | } else { |
| 1486 | use_half_period = false; |
| 1487 | reference_period_ps = period_ps; |
| 1488 | } |
| 1489 | |
| 1490 | tRP_ps = data_setup_cycles * period_ps; |
| 1491 | sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8; |
| 1492 | if (sample_delay_ps > 0) |
| 1493 | sample_delay_factor = sample_delay_ps / reference_period_ps; |
| 1494 | else |
| 1495 | sample_delay_factor = 0; |
| 1496 | |
| 1497 | ctrl1n = (wrn_dly_sel << GPMI_CTRL1_WRN_DLY_SEL_OFFSET); |
| 1498 | if (sample_delay_factor) |
| 1499 | ctrl1n |= (sample_delay_factor << GPMI_CTRL1_RDN_DELAY_OFFSET) | |
| 1500 | GPMI_CTRL1_DLL_ENABLE | |
| 1501 | (use_half_period ? GPMI_CTRL1_HALF_PERIOD : 0); |
| 1502 | |
| 1503 | writel(timing0, &nand_info->gpmi_regs->hw_gpmi_timing0); |
| 1504 | writel(timing1, &nand_info->gpmi_regs->hw_gpmi_timing1); |
| 1505 | |
| 1506 | /* |
| 1507 | * Clear several CTRL1 fields, DLL must be disabled when setting |
| 1508 | * RDN_DELAY or HALF_PERIOD. |
| 1509 | */ |
| 1510 | writel(GPMI_CTRL1_CLEAR_MASK, &nand_info->gpmi_regs->hw_gpmi_ctrl1_clr); |
| 1511 | writel(ctrl1n, &nand_info->gpmi_regs->hw_gpmi_ctrl1_set); |
| 1512 | |
| 1513 | clk_set_rate(nand_info->gpmi_clk, clk_rate); |
| 1514 | |
| 1515 | /* Wait 64 clock cycles before using the GPMI after enabling the DLL */ |
| 1516 | dll_wait_time_us = USEC_PER_SEC / clk_rate * 64; |
| 1517 | if (!dll_wait_time_us) |
| 1518 | dll_wait_time_us = 1; |
| 1519 | |
| 1520 | /* Wait for the DLL to settle. */ |
| 1521 | udelay(dll_wait_time_us); |
| 1522 | } |
| 1523 | |
| 1524 | static int mxs_nand_setup_interface(struct mtd_info *mtd, int chipnr, |
| 1525 | const struct nand_data_interface *conf) |
| 1526 | { |
| 1527 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 1528 | const struct nand_sdr_timings *sdr; |
| 1529 | |
| 1530 | sdr = nand_get_sdr_timings(conf); |
| 1531 | if (IS_ERR(sdr)) |
| 1532 | return PTR_ERR(sdr); |
| 1533 | |
| 1534 | /* Stop here if this call was just a check */ |
| 1535 | if (chipnr < 0) |
| 1536 | return 0; |
| 1537 | |
| 1538 | /* Do the actual derivation of the controller timings */ |
| 1539 | mxs_compute_timings(chip, sdr); |
| 1540 | |
| 1541 | return 0; |
| 1542 | } |
| 1543 | |
Stefan Agner | 7152f34 | 2018-06-22 17:19:46 +0200 | [diff] [blame] | 1544 | int mxs_nand_init_spl(struct nand_chip *nand) |
| 1545 | { |
| 1546 | struct mxs_nand_info *nand_info; |
| 1547 | int err; |
| 1548 | |
| 1549 | nand_info = malloc(sizeof(struct mxs_nand_info)); |
| 1550 | if (!nand_info) { |
| 1551 | printf("MXS NAND: Failed to allocate private data\n"); |
| 1552 | return -ENOMEM; |
| 1553 | } |
| 1554 | memset(nand_info, 0, sizeof(struct mxs_nand_info)); |
| 1555 | |
Stefan Agner | dc8af6d | 2018-06-22 18:06:12 +0200 | [diff] [blame] | 1556 | nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; |
| 1557 | nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; |
Adam Ford | 1021073 | 2019-01-02 20:36:52 -0600 | [diff] [blame] | 1558 | |
Peng Fan | 128abf4 | 2020-05-04 22:09:00 +0800 | [diff] [blame] | 1559 | if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m()) |
Adam Ford | 1021073 | 2019-01-02 20:36:52 -0600 | [diff] [blame] | 1560 | nand_info->max_ecc_strength_supported = 62; |
| 1561 | else |
| 1562 | nand_info->max_ecc_strength_supported = 40; |
| 1563 | |
Ye Li | 61771d2 | 2022-03-31 13:27:47 +0800 | [diff] [blame] | 1564 | if (IS_ENABLED(CONFIG_NAND_MXS_USE_MINIMUM_ECC)) |
| 1565 | nand_info->use_minimum_ecc = true; |
| 1566 | |
Stefan Agner | 7152f34 | 2018-06-22 17:19:46 +0200 | [diff] [blame] | 1567 | err = mxs_nand_alloc_buffers(nand_info); |
| 1568 | if (err) |
| 1569 | return err; |
| 1570 | |
Stefan Agner | 00e6516 | 2018-06-22 18:06:13 +0200 | [diff] [blame] | 1571 | err = mxs_nand_init_dma(nand_info); |
Stefan Agner | 7152f34 | 2018-06-22 17:19:46 +0200 | [diff] [blame] | 1572 | if (err) |
| 1573 | return err; |
| 1574 | |
| 1575 | nand_set_controller_data(nand, nand_info); |
| 1576 | |
| 1577 | nand->options |= NAND_NO_SUBPAGE_WRITE; |
| 1578 | |
| 1579 | nand->cmd_ctrl = mxs_nand_cmd_ctrl; |
| 1580 | nand->dev_ready = mxs_nand_device_ready; |
| 1581 | nand->select_chip = mxs_nand_select_chip; |
Stefan Agner | 7152f34 | 2018-06-22 17:19:46 +0200 | [diff] [blame] | 1582 | |
| 1583 | nand->read_byte = mxs_nand_read_byte; |
| 1584 | nand->read_buf = mxs_nand_read_buf; |
| 1585 | |
| 1586 | nand->ecc.read_page = mxs_nand_ecc_read_page; |
| 1587 | |
| 1588 | nand->ecc.mode = NAND_ECC_HW; |
Stefan Agner | 7152f34 | 2018-06-22 17:19:46 +0200 | [diff] [blame] | 1589 | |
| 1590 | return 0; |
| 1591 | } |
| 1592 | |
Stefan Agner | 19f9051 | 2018-06-22 18:06:16 +0200 | [diff] [blame] | 1593 | int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info) |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1594 | { |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1595 | struct mtd_info *mtd; |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1596 | struct nand_chip *nand; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1597 | int err; |
| 1598 | |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1599 | nand = &nand_info->chip; |
| 1600 | mtd = nand_to_mtd(nand); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1601 | err = mxs_nand_alloc_buffers(nand_info); |
| 1602 | if (err) |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1603 | return err; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1604 | |
Stefan Agner | 00e6516 | 2018-06-22 18:06:13 +0200 | [diff] [blame] | 1605 | err = mxs_nand_init_dma(nand_info); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1606 | if (err) |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1607 | goto err_free_buffers; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1608 | |
| 1609 | memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout)); |
| 1610 | |
Stefan Agner | 95f376f | 2018-06-22 17:19:48 +0200 | [diff] [blame] | 1611 | #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT |
| 1612 | nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; |
| 1613 | #endif |
| 1614 | |
Scott Wood | 17fed14 | 2016-05-30 13:57:56 -0500 | [diff] [blame] | 1615 | nand_set_controller_data(nand, nand_info); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1616 | nand->options |= NAND_NO_SUBPAGE_WRITE; |
| 1617 | |
Stefan Agner | 150ddbc | 2018-06-22 18:06:17 +0200 | [diff] [blame] | 1618 | if (nand_info->dev) |
Patrice Chotard | 33d2cf9 | 2021-09-13 16:25:53 +0200 | [diff] [blame] | 1619 | nand->flash_node = dev_ofnode(nand_info->dev); |
Stefan Agner | 150ddbc | 2018-06-22 18:06:17 +0200 | [diff] [blame] | 1620 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1621 | nand->cmd_ctrl = mxs_nand_cmd_ctrl; |
| 1622 | |
| 1623 | nand->dev_ready = mxs_nand_device_ready; |
| 1624 | nand->select_chip = mxs_nand_select_chip; |
| 1625 | nand->block_bad = mxs_nand_block_bad; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1626 | |
| 1627 | nand->read_byte = mxs_nand_read_byte; |
| 1628 | |
| 1629 | nand->read_buf = mxs_nand_read_buf; |
| 1630 | nand->write_buf = mxs_nand_write_buf; |
| 1631 | |
Michael Trimarchi | fd6e13e | 2022-08-30 16:48:47 +0200 | [diff] [blame] | 1632 | if (nand_info->gpmi_clk) |
| 1633 | nand->setup_data_interface = mxs_nand_setup_interface; |
| 1634 | |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1635 | /* first scan to find the device and get the page size */ |
| 1636 | if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL)) |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1637 | goto err_free_buffers; |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1638 | |
| 1639 | if (mxs_nand_setup_ecc(mtd)) |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1640 | goto err_free_buffers; |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1641 | |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1642 | nand->ecc.read_page = mxs_nand_ecc_read_page; |
| 1643 | nand->ecc.write_page = mxs_nand_ecc_write_page; |
| 1644 | nand->ecc.read_oob = mxs_nand_ecc_read_oob; |
| 1645 | nand->ecc.write_oob = mxs_nand_ecc_write_oob; |
| 1646 | |
| 1647 | nand->ecc.layout = &fake_ecc_layout; |
| 1648 | nand->ecc.mode = NAND_ECC_HW; |
Ye Li | 9454744 | 2020-05-04 22:08:50 +0800 | [diff] [blame] | 1649 | nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size; |
Stefan Agner | 72d627d | 2018-06-22 17:19:50 +0200 | [diff] [blame] | 1650 | nand->ecc.strength = nand_info->bch_geometry.ecc_strength; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1651 | |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1652 | /* second phase scan */ |
| 1653 | err = nand_scan_tail(mtd); |
| 1654 | if (err) |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1655 | goto err_free_buffers; |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1656 | |
Michael Trimarchi | dc3da88 | 2022-05-15 11:35:30 +0200 | [diff] [blame] | 1657 | /* Hook some operations at the MTD level. */ |
| 1658 | if (mtd->_read_oob != mxs_nand_hook_read_oob) { |
| 1659 | nand_info->hooked_read_oob = mtd->_read_oob; |
| 1660 | mtd->_read_oob = mxs_nand_hook_read_oob; |
| 1661 | } |
| 1662 | |
| 1663 | if (mtd->_write_oob != mxs_nand_hook_write_oob) { |
| 1664 | nand_info->hooked_write_oob = mtd->_write_oob; |
| 1665 | mtd->_write_oob = mxs_nand_hook_write_oob; |
| 1666 | } |
| 1667 | |
| 1668 | if (mtd->_block_markbad != mxs_nand_hook_block_markbad) { |
| 1669 | nand_info->hooked_block_markbad = mtd->_block_markbad; |
| 1670 | mtd->_block_markbad = mxs_nand_hook_block_markbad; |
| 1671 | } |
| 1672 | |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1673 | err = nand_register(0, mtd); |
| 1674 | if (err) |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1675 | goto err_free_buffers; |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1676 | |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1677 | return 0; |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1678 | |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1679 | err_free_buffers: |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1680 | free(nand_info->data_buf); |
| 1681 | free(nand_info->cmd_buf); |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1682 | |
| 1683 | return err; |
| 1684 | } |
| 1685 | |
Stefan Agner | 150ddbc | 2018-06-22 18:06:17 +0200 | [diff] [blame] | 1686 | #ifndef CONFIG_NAND_MXS_DT |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1687 | void board_nand_init(void) |
| 1688 | { |
| 1689 | struct mxs_nand_info *nand_info; |
| 1690 | |
| 1691 | nand_info = malloc(sizeof(struct mxs_nand_info)); |
| 1692 | if (!nand_info) { |
| 1693 | printf("MXS NAND: Failed to allocate private data\n"); |
| 1694 | return; |
| 1695 | } |
| 1696 | memset(nand_info, 0, sizeof(struct mxs_nand_info)); |
| 1697 | |
| 1698 | nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; |
| 1699 | nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; |
| 1700 | |
Stefan Agner | 4dc98db | 2018-06-22 18:06:15 +0200 | [diff] [blame] | 1701 | /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */ |
| 1702 | if (is_mx6sx() || is_mx7()) |
| 1703 | nand_info->max_ecc_strength_supported = 62; |
| 1704 | else |
| 1705 | nand_info->max_ecc_strength_supported = 40; |
| 1706 | |
| 1707 | #ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC |
| 1708 | nand_info->use_minimum_ecc = true; |
| 1709 | #endif |
| 1710 | |
Stefan Agner | 19f9051 | 2018-06-22 18:06:16 +0200 | [diff] [blame] | 1711 | if (mxs_nand_init_ctrl(nand_info) < 0) |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1712 | goto err; |
| 1713 | |
Stefan Agner | 5883e55 | 2018-06-22 17:19:47 +0200 | [diff] [blame] | 1714 | return; |
Stefan Agner | 404b110 | 2018-06-22 18:06:14 +0200 | [diff] [blame] | 1715 | |
| 1716 | err: |
| 1717 | free(nand_info); |
Marek Vasut | 913a725 | 2011-11-08 23:18:16 +0000 | [diff] [blame] | 1718 | } |
Stefan Agner | 150ddbc | 2018-06-22 18:06:17 +0200 | [diff] [blame] | 1719 | #endif |
Igor Opaniuk | c5540137 | 2019-11-03 16:49:43 +0100 | [diff] [blame] | 1720 | |
| 1721 | /* |
| 1722 | * Read NAND layout for FCB block generation. |
| 1723 | */ |
| 1724 | void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l) |
| 1725 | { |
| 1726 | struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; |
| 1727 | u32 tmp; |
| 1728 | |
| 1729 | tmp = readl(&bch_regs->hw_bch_flash0layout0); |
| 1730 | l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >> |
| 1731 | BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; |
| 1732 | l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >> |
| 1733 | BCH_FLASHLAYOUT0_META_SIZE_OFFSET; |
| 1734 | |
| 1735 | tmp = readl(&bch_regs->hw_bch_flash0layout1); |
| 1736 | l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >> |
| 1737 | BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET); |
| 1738 | l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >> |
| 1739 | BCH_FLASHLAYOUT0_ECC0_OFFSET; |
| 1740 | l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >> |
| 1741 | BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET); |
| 1742 | l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >> |
| 1743 | BCH_FLASHLAYOUT1_ECCN_OFFSET; |
Han Xu | 33543b5 | 2020-05-04 22:08:58 +0800 | [diff] [blame] | 1744 | l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >> |
| 1745 | BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; |
Igor Opaniuk | c5540137 | 2019-11-03 16:49:43 +0100 | [diff] [blame] | 1746 | } |
| 1747 | |
| 1748 | /* |
| 1749 | * Set BCH to specific layout used by ROM bootloader to read FCB. |
| 1750 | */ |
Han Xu | afed2a1 | 2020-05-06 20:59:19 +0800 | [diff] [blame] | 1751 | void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd) |
Igor Opaniuk | c5540137 | 2019-11-03 16:49:43 +0100 | [diff] [blame] | 1752 | { |
| 1753 | u32 tmp; |
| 1754 | struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; |
| 1755 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 1756 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
| 1757 | |
| 1758 | nand_info->en_randomizer = 1; |
| 1759 | |
| 1760 | mtd->writesize = 1024; |
| 1761 | mtd->oobsize = 1862 - 1024; |
| 1762 | |
| 1763 | /* 8 ecc_chunks_*/ |
| 1764 | tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; |
| 1765 | /* 32 bytes for metadata */ |
| 1766 | tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; |
| 1767 | /* using ECC62 level to be performed */ |
| 1768 | tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET; |
| 1769 | /* 0x20 * 4 bytes of the data0 block */ |
| 1770 | tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET; |
| 1771 | tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; |
| 1772 | writel(tmp, &bch_regs->hw_bch_flash0layout0); |
| 1773 | |
| 1774 | /* 1024 for data + 838 for OOB */ |
| 1775 | tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; |
| 1776 | /* using ECC62 level to be performed */ |
| 1777 | tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET; |
| 1778 | /* 0x20 * 4 bytes of the data0 block */ |
| 1779 | tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET; |
| 1780 | tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; |
| 1781 | writel(tmp, &bch_regs->hw_bch_flash0layout1); |
| 1782 | } |
| 1783 | |
| 1784 | /* |
Han Xu | afed2a1 | 2020-05-06 20:59:19 +0800 | [diff] [blame] | 1785 | * Set BCH to specific layout used by ROM bootloader to read FCB. |
| 1786 | */ |
| 1787 | void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd) |
| 1788 | { |
| 1789 | u32 tmp; |
| 1790 | struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; |
| 1791 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 1792 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
| 1793 | |
| 1794 | /* no randomizer in this setting*/ |
| 1795 | nand_info->en_randomizer = 0; |
| 1796 | |
| 1797 | mtd->writesize = 1024; |
| 1798 | mtd->oobsize = 1576 - 1024; |
| 1799 | |
| 1800 | /* 8 ecc_chunks_*/ |
| 1801 | tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; |
| 1802 | /* 32 bytes for metadata */ |
| 1803 | tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; |
| 1804 | /* using ECC40 level to be performed */ |
| 1805 | tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET; |
| 1806 | /* 0x20 * 4 bytes of the data0 block */ |
| 1807 | tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET; |
| 1808 | tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; |
| 1809 | writel(tmp, &bch_regs->hw_bch_flash0layout0); |
| 1810 | |
| 1811 | /* 1024 for data + 552 for OOB */ |
| 1812 | tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; |
| 1813 | /* using ECC40 level to be performed */ |
| 1814 | tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET; |
| 1815 | /* 0x20 * 4 bytes of the data0 block */ |
| 1816 | tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET; |
| 1817 | tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; |
| 1818 | writel(tmp, &bch_regs->hw_bch_flash0layout1); |
| 1819 | } |
| 1820 | |
| 1821 | /* |
Igor Opaniuk | c5540137 | 2019-11-03 16:49:43 +0100 | [diff] [blame] | 1822 | * Restore BCH to normal settings. |
| 1823 | */ |
| 1824 | void mxs_nand_mode_normal(struct mtd_info *mtd) |
| 1825 | { |
| 1826 | struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; |
| 1827 | struct nand_chip *nand = mtd_to_nand(mtd); |
| 1828 | struct mxs_nand_info *nand_info = nand_get_controller_data(nand); |
| 1829 | |
| 1830 | nand_info->en_randomizer = 0; |
| 1831 | |
| 1832 | mtd->writesize = nand_info->writesize; |
| 1833 | mtd->oobsize = nand_info->oobsize; |
| 1834 | |
| 1835 | writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0); |
| 1836 | writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1); |
| 1837 | } |
| 1838 | |
| 1839 | uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd) |
| 1840 | { |
| 1841 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 1842 | struct mxs_nand_info *nand_info = nand_get_controller_data(chip); |
| 1843 | struct bch_geometry *geo = &nand_info->bch_geometry; |
| 1844 | |
| 1845 | return geo->block_mark_byte_offset; |
| 1846 | } |
| 1847 | |
| 1848 | uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd) |
| 1849 | { |
| 1850 | struct nand_chip *chip = mtd_to_nand(mtd); |
| 1851 | struct mxs_nand_info *nand_info = nand_get_controller_data(chip); |
| 1852 | struct bch_geometry *geo = &nand_info->bch_geometry; |
| 1853 | |
| 1854 | return geo->block_mark_bit_offset; |
| 1855 | } |