blob: a2c93486f496731bdc3cc8f370626b13f02561f6 [file] [log] [blame]
Stefan Roesecdb295c2018-08-16 18:05:08 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Stefan Roese <sr@denx.de>
4 *
5 * Derived from drivers/mtd/nand/spi/micron.c
6 * Copyright (c) 2016-2017 Micron Technology, Inc.
7 */
8
9#ifndef __UBOOT__
Simon Glass9bc15642020-02-03 07:36:16 -070010#include <malloc.h>
Stefan Roesecdb295c2018-08-16 18:05:08 +020011#include <linux/device.h>
12#include <linux/kernel.h>
13#endif
14#include <linux/mtd/spinand.h>
15
Stefan Roese584957a2019-01-24 17:18:19 +010016#define SPINAND_MFR_GIGADEVICE 0xC8
17#define GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS (1 << 4)
18#define GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS (3 << 4)
Stefan Roesecdb295c2018-08-16 18:05:08 +020019
Reto Schneiderb97bb612021-02-11 13:05:48 +010020#define GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS (1 << 4)
21#define GD5FXGQ5XE_STATUS_ECC_4_BITFLIPS (3 << 4)
Stefan Roesecdb295c2018-08-16 18:05:08 +020022
Reto Schneiderb97bb612021-02-11 13:05:48 +010023#define GD5FXGQXXEXXG_REG_STATUS2 0xf0
24
25/* Q4 devices, QUADIO: Dummy bytes valid for 1 and 2 GBit variants */
26static SPINAND_OP_VARIANTS(gd5fxgq4_read_cache_variants,
Hauke Mehrtens3d431b92021-02-11 13:05:47 +010027 SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
Stefan Roesecdb295c2018-08-16 18:05:08 +020028 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
29 SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
30 SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
31 SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
32 SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
33
Reto Schneiderb97bb612021-02-11 13:05:48 +010034/* Q5 devices, QUADIO: Dummy bytes only valid for 1 GBit variants */
35static SPINAND_OP_VARIANTS(gd5f1gq5_read_cache_variants,
36 SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 2, NULL, 0),
37 SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
38 SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
39 SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
40 SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
41 SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
42
Stefan Roesecdb295c2018-08-16 18:05:08 +020043static SPINAND_OP_VARIANTS(write_cache_variants,
44 SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
45 SPINAND_PROG_LOAD(true, 0, NULL, 0));
46
47static SPINAND_OP_VARIANTS(update_cache_variants,
48 SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
49 SPINAND_PROG_LOAD(false, 0, NULL, 0));
50
Reto Schneiderb97bb612021-02-11 13:05:48 +010051static int gd5fxgqxxexxg_ooblayout_ecc(struct mtd_info *mtd, int section,
Stefan Roese584957a2019-01-24 17:18:19 +010052 struct mtd_oob_region *region)
Stefan Roesecdb295c2018-08-16 18:05:08 +020053{
54 if (section)
55 return -ERANGE;
56
57 region->offset = 64;
58 region->length = 64;
59
60 return 0;
61}
62
Reto Schneiderb97bb612021-02-11 13:05:48 +010063static int gd5fxgqxxexxg_ooblayout_free(struct mtd_info *mtd, int section,
Stefan Roese584957a2019-01-24 17:18:19 +010064 struct mtd_oob_region *region)
Stefan Roesecdb295c2018-08-16 18:05:08 +020065{
66 if (section)
67 return -ERANGE;
68
Stefan Roese584957a2019-01-24 17:18:19 +010069 /* Reserve 1 bytes for the BBM. */
70 region->offset = 1;
71 region->length = 63;
Stefan Roesecdb295c2018-08-16 18:05:08 +020072
73 return 0;
74}
75
Stefan Roese584957a2019-01-24 17:18:19 +010076static int gd5fxgq4xexxg_ecc_get_status(struct spinand_device *spinand,
77 u8 status)
Stefan Roesecdb295c2018-08-16 18:05:08 +020078{
Stefan Roese584957a2019-01-24 17:18:19 +010079 u8 status2;
Reto Schneiderb97bb612021-02-11 13:05:48 +010080 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
Stefan Roese584957a2019-01-24 17:18:19 +010081 &status2);
82 int ret;
Stefan Roesecdb295c2018-08-16 18:05:08 +020083
Stefan Roese584957a2019-01-24 17:18:19 +010084 switch (status & STATUS_ECC_MASK) {
Stefan Roesecdb295c2018-08-16 18:05:08 +020085 case STATUS_ECC_NO_BITFLIPS:
86 return 0;
87
Stefan Roese584957a2019-01-24 17:18:19 +010088 case GD5FXGQ4XA_STATUS_ECC_1_7_BITFLIPS:
89 /*
90 * Read status2 register to determine a more fine grained
91 * bit error status
92 */
93 ret = spi_mem_exec_op(spinand->slave, &op);
94 if (ret)
95 return ret;
Stefan Roesecdb295c2018-08-16 18:05:08 +020096
Stefan Roese584957a2019-01-24 17:18:19 +010097 /*
98 * 4 ... 7 bits are flipped (1..4 can't be detected, so
99 * report the maximum of 4 in this case
100 */
101 /* bits sorted this way (3...0): ECCS1,ECCS0,ECCSE1,ECCSE0 */
102 return ((status & STATUS_ECC_MASK) >> 2) |
103 ((status2 & STATUS_ECC_MASK) >> 4);
104
105 case GD5FXGQ4XA_STATUS_ECC_8_BITFLIPS:
Stefan Roesecdb295c2018-08-16 18:05:08 +0200106 return 8;
107
108 case STATUS_ECC_UNCOR_ERROR:
109 return -EBADMSG;
110
111 default:
112 break;
113 }
114
115 return -EINVAL;
116}
117
Reto Schneiderb97bb612021-02-11 13:05:48 +0100118static int gd5fxgq5xexxg_ecc_get_status(struct spinand_device *spinand,
119 u8 status)
120{
121 u8 status2;
122 struct spi_mem_op op = SPINAND_GET_FEATURE_OP(GD5FXGQXXEXXG_REG_STATUS2,
123 &status2);
124 int ret;
125
126 switch (status & STATUS_ECC_MASK) {
127 case STATUS_ECC_NO_BITFLIPS:
128 return 0;
129
130 case GD5FXGQ5XE_STATUS_ECC_1_4_BITFLIPS:
131 /*
132 * Read status2 register to determine a more fine grained
133 * bit error status
134 */
135 ret = spi_mem_exec_op(spinand->slave, &op);
136 if (ret)
137 return ret;
138
139 /*
140 * 1 ... 4 bits are flipped (and corrected)
141 */
142 /* bits sorted this way (1...0): ECCSE1, ECCSE0 */
143 return ((status2 & STATUS_ECC_MASK) >> 4) + 1;
144
145 case STATUS_ECC_UNCOR_ERROR:
146 return -EBADMSG;
147
148 default:
149 break;
150 }
151
152 return -EINVAL;
153}
154
155static const struct mtd_ooblayout_ops gd5fxgqxxexxg_ooblayout = {
156 .ecc = gd5fxgqxxexxg_ooblayout_ecc,
157 .rfree = gd5fxgqxxexxg_ooblayout_free,
Stefan Roese584957a2019-01-24 17:18:19 +0100158};
159
Stefan Roesecdb295c2018-08-16 18:05:08 +0200160static const struct spinand_info gigadevice_spinand_table[] = {
Stefan Roese584957a2019-01-24 17:18:19 +0100161 SPINAND_INFO("GD5F1GQ4UExxG", 0xd1,
Stefan Roesecdb295c2018-08-16 18:05:08 +0200162 NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
Stefan Roese584957a2019-01-24 17:18:19 +0100163 NAND_ECCREQ(8, 512),
Reto Schneiderb97bb612021-02-11 13:05:48 +0100164 SPINAND_INFO_OP_VARIANTS(&gd5fxgq4_read_cache_variants,
Stefan Roesecdb295c2018-08-16 18:05:08 +0200165 &write_cache_variants,
166 &update_cache_variants),
167 0,
Reto Schneiderb97bb612021-02-11 13:05:48 +0100168 SPINAND_ECCINFO(&gd5fxgqxxexxg_ooblayout,
Stefan Roese584957a2019-01-24 17:18:19 +0100169 gd5fxgq4xexxg_ecc_get_status)),
Reto Schneiderb97bb612021-02-11 13:05:48 +0100170 SPINAND_INFO("GD5F1GQ5UExxG", 0x51,
171 NAND_MEMORG(1, 2048, 128, 64, 1024, 1, 1, 1),
172 NAND_ECCREQ(4, 512),
173 SPINAND_INFO_OP_VARIANTS(&gd5f1gq5_read_cache_variants,
174 &write_cache_variants,
175 &update_cache_variants),
176 0,
177 SPINAND_ECCINFO(&gd5fxgqxxexxg_ooblayout,
178 gd5fxgq5xexxg_ecc_get_status)),
Stefan Roesecdb295c2018-08-16 18:05:08 +0200179};
180
181static int gigadevice_spinand_detect(struct spinand_device *spinand)
182{
183 u8 *id = spinand->id.data;
184 int ret;
185
186 /*
Stefan Roese584957a2019-01-24 17:18:19 +0100187 * For GD NANDs, There is an address byte needed to shift in before IDs
188 * are read out, so the first byte in raw_id is dummy.
Stefan Roesecdb295c2018-08-16 18:05:08 +0200189 */
190 if (id[1] != SPINAND_MFR_GIGADEVICE)
191 return 0;
192
193 ret = spinand_match_and_init(spinand, gigadevice_spinand_table,
194 ARRAY_SIZE(gigadevice_spinand_table),
195 id[2]);
196 if (ret)
197 return ret;
198
199 return 1;
200}
201
202static const struct spinand_manufacturer_ops gigadevice_spinand_manuf_ops = {
203 .detect = gigadevice_spinand_detect,
204};
205
206const struct spinand_manufacturer gigadevice_spinand_manufacturer = {
207 .id = SPINAND_MFR_GIGADEVICE,
208 .name = "GigaDevice",
209 .ops = &gigadevice_spinand_manuf_ops,
210};