blob: 5a6626262b08ec3a00c29330b29f7d8f77e7c97a [file] [log] [blame]
Jit Loon Lima7f54942023-05-17 12:26:11 +08001/*
2 * Copyright (c) 2022-2023, Intel Corporation. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <stdbool.h>
10#include <string.h>
11
12#include <arch_helpers.h>
13#include <common/debug.h>
14#include <drivers/cadence/cdns_nand.h>
15#include <drivers/delay_timer.h>
16#include <lib/mmio.h>
17#include <lib/utils.h>
18#include <platform_def.h>
19
20/* NAND flash device information struct */
21static cnf_dev_info_t dev_info;
22
23/* Scratch buffers for read and write operations */
24static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE];
25
26/* Wait for controller to be in idle state */
27static inline void cdns_nand_wait_idle(void)
28{
29 uint32_t reg = 0U;
30
31 do {
32 udelay(CNF_DEF_DELAY_US);
33 reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
34 } while (CNF_GET_CTRL_BUSY(reg) != 0U);
35}
36
37/* Wait for given thread to be in ready state */
38static inline void cdns_nand_wait_thread_ready(uint8_t thread_id)
39{
40 uint32_t reg = 0U;
41
42 do {
43 udelay(CNF_DEF_DELAY_US);
44 reg = mmio_read_32(CNF_CMDREG(TRD_STATUS));
45 reg &= (1U << (uint32_t)thread_id);
46 } while (reg != 0U);
47}
48
49/* Check if the last operation/command in selected thread is completed */
50static int cdns_nand_last_opr_status(uint8_t thread_id)
51{
52 uint8_t nthreads = 0U;
53 uint32_t reg = 0U;
54
55 /* Get number of threads */
56 reg = mmio_read_32(CNF_CTRLPARAM(FEATURE));
57 nthreads = CNF_GET_NTHREADS(reg);
58
59 if (thread_id > nthreads) {
60 ERROR("%s: Invalid thread ID\n", __func__);
61 return -EINVAL;
62 }
63
64 /* Select thread */
65 mmio_write_32(CNF_CMDREG(CMD_STAT_PTR), (uint32_t)thread_id);
66
67 uint32_t err_mask = CNF_ECMD | CNF_EECC | CNF_EDEV | CNF_EDQS | CNF_EFAIL |
68 CNF_EBUS | CNF_EDI | CNF_EPAR | CNF_ECTX | CNF_EPRO;
69
70 do {
71 udelay(CNF_DEF_DELAY_US * 2);
72 reg = mmio_read_32(CNF_CMDREG(CMD_STAT));
73 } while ((reg & CNF_CMPLT) == 0U);
74
75 /* last operation is completed, make sure no other error bits are set */
76 if ((reg & err_mask) == 1U) {
77 ERROR("%s, CMD_STATUS:0x%x\n", __func__, reg);
78 return -EIO;
79 }
80
81 return 0;
82}
83
84/* Set feature command */
85int cdns_nand_set_feature(uint8_t feat_addr, uint8_t feat_val, uint8_t thread_id)
86{
87 /* Wait for thread to be ready */
88 cdns_nand_wait_thread_ready(thread_id);
89
90 /* Set feature address */
91 mmio_write_32(CNF_CMDREG(CMD_REG1), (uint32_t)feat_addr);
92 /* Set feature volume */
93 mmio_write_32(CNF_CMDREG(CMD_REG2), (uint32_t)feat_val);
94
95 /* Set feature command */
96 uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
97
98 reg |= (thread_id << CNF_CMDREG0_TRD);
99 reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
100 reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
101 reg |= (CNF_CT_SET_FEATURE << CNF_CMDREG0_CMD);
102 mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
103
104 return cdns_nand_last_opr_status(thread_id);
105}
106
107/* Reset command to the selected device */
108int cdns_nand_reset(uint8_t thread_id)
109{
110 /* Operation is executed in selected thread */
111 cdns_nand_wait_thread_ready(thread_id);
112
113 /* Select memory */
114 mmio_write_32(CNF_CMDREG(CMD_REG4), (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
115
116 /* Issue reset command */
117 uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
118
119 reg |= (thread_id << CNF_CMDREG0_TRD);
120 reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
121 reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
122 reg |= (CNF_CT_RESET_ASYNC << CNF_CMDREG0_CMD);
123 mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
124
125 return cdns_nand_last_opr_status(thread_id);
126}
127
128/* Set operation work mode */
129static void cdns_nand_set_opr_mode(uint8_t opr_mode)
130{
131 /* Wait for controller to be in idle state */
132 cdns_nand_wait_idle();
133
134 /* Reset DLL PHY */
135 uint32_t reg = mmio_read_32(CNF_MINICTRL(DLL_PHY_CTRL));
136
137 reg &= ~(1 << CNF_DLL_PHY_RST_N);
138 mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), reg);
139
140 if (opr_mode == CNF_OPR_WORK_MODE_SDR) {
141 /* Combo PHY Control Timing Block register settings */
142 mmio_write_32(CP_CTB(CTRL_REG), CP_CTRL_REG_SDR);
143 mmio_write_32(CP_CTB(TSEL_REG), CP_TSEL_REG_SDR);
144
145 /* Combo PHY DLL register settings */
146 mmio_write_32(CP_DLL(DQ_TIMING_REG), CP_DQ_TIMING_REG_SDR);
147 mmio_write_32(CP_DLL(DQS_TIMING_REG), CP_DQS_TIMING_REG_SDR);
148 mmio_write_32(CP_DLL(GATE_LPBK_CTRL_REG), CP_GATE_LPBK_CTRL_REG_SDR);
149 mmio_write_32(CP_DLL(MASTER_CTRL_REG), CP_DLL_MASTER_CTRL_REG_SDR);
150
151 /* Async mode timing settings */
152 mmio_write_32(CNF_MINICTRL(ASYNC_TOGGLE_TIMINGS),
153 (2 << CNF_ASYNC_TIMINGS_TRH) |
154 (4 << CNF_ASYNC_TIMINGS_TRP) |
155 (2 << CNF_ASYNC_TIMINGS_TWH) |
156 (4 << CNF_ASYNC_TIMINGS_TWP));
157
158 /* Set extended read and write mode */
159 reg |= (1 << CNF_DLL_PHY_EXT_RD_MODE);
160 reg |= (1 << CNF_DLL_PHY_EXT_WR_MODE);
161
162 /* Set operation work mode in common settings */
163 uint32_t data = mmio_read_32(CNF_MINICTRL(CMN_SETTINGS));
164
165 data |= (CNF_OPR_WORK_MODE_SDR << CNF_CMN_SETTINGS_OPR);
166 mmio_write_32(CNF_MINICTRL(CMN_SETTINGS), data);
167
168 } else if (opr_mode == CNF_OPR_WORK_MODE_NVDDR) {
169 ; /* ToDo: add DDR mode settings also once available on SIMICS */
170 } else {
171 ;
172 }
173
174 reg |= (1 << CNF_DLL_PHY_RST_N);
175 mmio_write_32(CNF_MINICTRL(DLL_PHY_CTRL), reg);
176}
177
178/* Data transfer configuration */
179static void cdns_nand_transfer_config(void)
180{
181 /* Wait for controller to be in idle state */
182 cdns_nand_wait_idle();
183
184 /* Configure data transfer parameters */
185 mmio_write_32(CNF_CTRLCFG(TRANS_CFG0), 1);
186
187 /* ECC is disabled */
188 mmio_write_32(CNF_CTRLCFG(ECC_CFG0), 0);
189
190 /* DMA burst select */
191 mmio_write_32(CNF_CTRLCFG(DMA_SETTINGS),
192 (CNF_DMA_BURST_SIZE_MAX << CNF_DMA_SETTINGS_BURST) |
193 (1 << CNF_DMA_SETTINGS_OTE));
194
195 /* Enable pre-fetching for 1K */
196 mmio_write_32(CNF_CTRLCFG(FIFO_TLEVEL),
197 (CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_POS) |
198 (CNF_DMA_PREFETCH_SIZE << CNF_FIFO_TLEVEL_DMA_SIZE));
199
200 /* Select access type */
201 mmio_write_32(CNF_CTRLCFG(MULTIPLANE_CFG), 0);
202 mmio_write_32(CNF_CTRLCFG(CACHE_CFG), 0);
203}
204
205/* Update the nand flash device info */
206static int cdns_nand_update_dev_info(void)
207{
208 uint32_t reg = 0U;
209
210 /* Read the device type and number of LUNs */
211 reg = mmio_read_32(CNF_CTRLPARAM(DEV_PARAMS0));
212 dev_info.type = CNF_GET_DEV_TYPE(reg);
213 if (dev_info.type == CNF_DT_UNKNOWN) {
214 ERROR("%s: device type unknown\n", __func__);
215 return -ENXIO;
216 }
217 dev_info.nluns = CNF_GET_NLUNS(reg);
218
219 /* Pages per block */
220 reg = mmio_read_32(CNF_CTRLCFG(DEV_LAYOUT));
221 dev_info.npages_per_block = CNF_GET_NPAGES_PER_BLOCK(reg);
222
223 /* Sector size and last sector size */
224 reg = mmio_read_32(CNF_CTRLCFG(TRANS_CFG1));
225 dev_info.sector_size = CNF_GET_SCTR_SIZE(reg);
226 dev_info.last_sector_size = CNF_GET_LAST_SCTR_SIZE(reg);
227
228 /* Page size and spare size */
229 reg = mmio_read_32(CNF_CTRLPARAM(DEV_AREA));
230 dev_info.page_size = CNF_GET_PAGE_SIZE(reg);
231 dev_info.spare_size = CNF_GET_SPARE_SIZE(reg);
232
233 /* Device blocks per LUN */
234 dev_info.nblocks_per_lun = mmio_read_32(CNF_CTRLPARAM(DEV_BLOCKS_PLUN));
235
236 /* Calculate block size and total device size */
237 dev_info.block_size = (dev_info.npages_per_block * dev_info.page_size);
238 dev_info.total_size = (dev_info.block_size * dev_info.nblocks_per_lun *
239 dev_info.nluns);
240
241 VERBOSE("CNF params: page %d, spare %d, block %d, total %lld\n",
242 dev_info.page_size, dev_info.spare_size,
243 dev_info.block_size, dev_info.total_size);
244
245 return 0;
246}
247
248/* NAND Flash Controller/Host initialization */
249int cdns_nand_host_init(void)
250{
251 uint32_t reg = 0U;
252 int ret = 0;
253
254 do {
255 /* Read controller status register for init complete */
256 reg = mmio_read_32(CNF_CMDREG(CTRL_STATUS));
257 } while (CNF_GET_INIT_COMP(reg) == 0);
258
259 ret = cdns_nand_update_dev_info();
260 if (ret != 0) {
261 return ret;
262 }
263
264 INFO("CNF: device discovery process completed and device type %d\n",
265 dev_info.type);
266
267 /* Enable data integrity, enable CRC and parity */
268 reg = mmio_read_32(CNF_DI(CONTROL));
269 reg |= (1 << CNF_DI_PAR_EN);
270 reg |= (1 << CNF_DI_CRC_EN);
271 mmio_write_32(CNF_DI(CONTROL), reg);
272
273 /* Status polling mode, device control and status register */
274 cdns_nand_wait_idle();
275 reg = mmio_read_32(CNF_CTRLCFG(DEV_STAT));
276 reg = reg & ~1;
277 mmio_write_32(CNF_CTRLCFG(DEV_STAT), reg);
278
279 /* Set operation work mode */
280 cdns_nand_set_opr_mode(CNF_OPR_WORK_MODE_SDR);
281
282 /* Set data transfer configuration parameters */
283 cdns_nand_transfer_config();
284
285 return 0;
286}
287
288/* erase: Block erase command */
289int cdns_nand_erase(uint32_t offset, uint32_t size)
290{
291 /* Determine the starting block offset i.e row address */
292 uint32_t row_address = dev_info.npages_per_block * offset;
293
294 /* Wait for thread to be in ready state */
295 cdns_nand_wait_thread_ready(CNF_DEF_TRD);
296
297 /*Set row address */
298 mmio_write_32(CNF_CMDREG(CMD_REG1), row_address);
299
300 /* Operation bank number */
301 mmio_write_32(CNF_CMDREG(CMD_REG4), (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
302
303 /* Block erase command */
304 uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
305
306 reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
307 reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
308 reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
309 reg |= (CNF_CT_ERASE << CNF_CMDREG0_CMD);
310 reg |= (((size-1) & 0xFF) << CNF_CMDREG0_CMD);
311 mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
312
313 /* Wait for erase operation to complete */
314 return cdns_nand_last_opr_status(CNF_DEF_TRD);
315}
316
317/* io mtd functions */
318int cdns_nand_init_mtd(unsigned long long *size, unsigned int *erase_size)
319{
320 *size = dev_info.total_size;
321 *erase_size = dev_info.block_size;
322
323 return 0;
324}
325
326/* NAND Flash page read */
327static int cdns_nand_read_page(uint32_t block, uint32_t page, uintptr_t buffer)
328{
329 /* Wait for thread to be ready */
330 cdns_nand_wait_thread_ready(CNF_DEF_TRD);
331
332 /* Select device */
333 mmio_write_32(CNF_CMDREG(CMD_REG4),
334 (CNF_DEF_DEVICE << CNF_CMDREG4_MEM));
335
336 /* Set host memory address for DMA transfers */
337 mmio_write_32(CNF_CMDREG(CMD_REG2), (buffer & 0xFFFF));
338 mmio_write_32(CNF_CMDREG(CMD_REG3), ((buffer >> 32) & 0xFFFF));
339
340 /* Set row address */
341 uint32_t row_address = 0U;
342
343 row_address |= ((page & 0x3F) | (block << 6));
344 mmio_write_32(CNF_CMDREG(CMD_REG1), row_address);
345
346 /* Page read command */
347 uint32_t reg = (CNF_WORK_MODE_PIO << CNF_CMDREG0_CT);
348
349 reg |= (CNF_DEF_TRD << CNF_CMDREG0_TRD);
350 reg |= (CNF_DEF_VOL_ID << CNF_CMDREG0_VOL);
351 reg |= (CNF_INT_DIS << CNF_CMDREG0_INTR);
352 reg |= (CNF_DMA_MASTER_SEL << CNF_CMDREG0_DMA);
353 reg |= (CNF_CT_PAGE_READ << CNF_CMDREG0_CMD);
354 reg |= (((CNF_READ_SINGLE_PAGE-1) & 0xFF) << CNF_CMDREG0_CMD);
355 mmio_write_32(CNF_CMDREG(CMD_REG0), reg);
356
357 /* Wait for read operation to complete */
358 if (cdns_nand_last_opr_status(CNF_DEF_TRD)) {
359 ERROR("%s: Page read failed\n", __func__);
360 return -EIO;
361 }
362
363 return 0;
364}
365
366int cdns_nand_read(unsigned int offset, uintptr_t buffer, size_t length,
367 size_t *out_length)
368{
369 uint32_t block = offset / dev_info.block_size;
370 uint32_t end_block = (offset + length - 1U) / dev_info.block_size;
371 uint32_t page_start = (offset % dev_info.block_size) / dev_info.page_size;
372 uint32_t start_offset = offset % dev_info.page_size;
373 uint32_t nb_pages = dev_info.block_size / dev_info.page_size;
374 uint32_t bytes_read = 0U;
375 uint32_t page = 0U;
376 int result = 0;
377
378 VERBOSE("CNF: block %u-%u, page_start %u, len %zu, offset %u\n",
379 block, end_block, page_start, length, offset);
380
381 if ((offset >= dev_info.total_size) ||
382 (offset + length-1 >= dev_info.total_size) ||
383 (length == 0U)) {
384 ERROR("CNF: Invalid read parameters\n");
385 return -EINVAL;
386 }
387
388 *out_length = 0UL;
389
390 while (block <= end_block) {
391 for (page = page_start; page < nb_pages; page++) {
392 if ((start_offset != 0U) || (length < dev_info.page_size)) {
393 /* Partial page read */
394 result = cdns_nand_read_page(block, page,
395 (uintptr_t)scratch_buff);
396 if (result != 0) {
397 return result;
398 }
399
400 bytes_read = MIN((size_t)(dev_info.page_size - start_offset),
401 length);
402
403 memcpy((uint8_t *)buffer, scratch_buff + start_offset,
404 bytes_read);
405 start_offset = 0U;
406 } else {
407 /* Full page read */
408 result = cdns_nand_read_page(block, page,
409 (uintptr_t)scratch_buff);
410 if (result != 0) {
411 return result;
412 }
413
414 bytes_read = dev_info.page_size;
415 memcpy((uint8_t *)buffer, scratch_buff, bytes_read);
416 }
417
418 length -= bytes_read;
419 buffer += bytes_read;
420 *out_length += bytes_read;
421
422 /* All the bytes have read */
423 if (length == 0U) {
424 break;
425 }
426
427 udelay(CNF_READ_INT_DELAY_US);
428 } /* for */
429
430 page_start = 0U;
431 block++;
432 } /* while */
433
434 return 0;
435}