blob: 359cfc3b68cd52604674091ddf3eb10dc7934b3f [file] [log] [blame]
Dinesh Maniyamf61a2212025-02-27 00:18:17 +08001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Cadence NAND flash controller driver
4 *
5 * Copyright (C) 2019 Cadence
6 *
7 * Author: Piotr Sroka <piotrs@cadence.com>
8 *
9 */
10
11#include <cadence-nand.h>
12#include <clk.h>
13#include <dm.h>
14#include <hang.h>
15#include <malloc.h>
16#include <memalign.h>
17#include <nand.h>
18#include <reset.h>
19#include <wait_bit.h>
20#include <dm/device_compat.h>
21#include <dm/devres.h>
22#include <linux/bitfield.h>
23#include <linux/bug.h>
24#include <linux/delay.h>
25#include <linux/dma-direction.h>
26#include <linux/dma-mapping.h>
27#include <linux/io.h>
28#include <linux/iopoll.h>
29#include <linux/ioport.h>
30#include <linux/printk.h>
31#include <linux/sizes.h>
32
33static inline struct
34cdns_nand_chip *to_cdns_nand_chip(struct nand_chip *chip)
35{
36 return container_of(chip, struct cdns_nand_chip, chip);
37}
38
39static inline struct
40cadence_nand_info *to_cadence_nand_info(struct nand_hw_control *controller)
41{
42 return container_of(controller, struct cadence_nand_info, controller);
43}
44
45static bool
46cadence_nand_dma_buf_ok(struct cadence_nand_info *cadence, const void *buf,
47 u32 buf_len)
48{
49 u8 data_dma_width = cadence->caps2.data_dma_width;
50
51 return buf &&
52 likely(IS_ALIGNED((uintptr_t)buf, data_dma_width)) &&
53 likely(IS_ALIGNED(buf_len, DMA_DATA_SIZE_ALIGN));
54}
55
56static int cadence_nand_wait_for_value(struct cadence_nand_info *cadence,
57 u32 reg_offset, u32 timeout_us,
58 u32 mask, bool is_clear)
59{
60 u32 val;
61 int ret;
62
63 ret = readl_poll_sleep_timeout(cadence->reg + reg_offset,
64 val, !(val & mask) == is_clear,
65 10, timeout_us);
66
67 if (ret < 0) {
68 dev_err(cadence->dev,
69 "Timeout while waiting for reg %x with mask %x is clear %d\n",
70 reg_offset, mask, is_clear);
71 }
72
73 return ret;
74}
75
76static int cadence_nand_set_ecc_enable(struct cadence_nand_info *cadence,
77 bool enable)
78{
79 u32 reg;
80
81 if (cadence_nand_wait_for_value(cadence, CTRL_STATUS,
82 TIMEOUT_US,
83 CTRL_STATUS_CTRL_BUSY, true))
84 return -ETIMEDOUT;
85
86 reg = readl_relaxed(cadence->reg + ECC_CONFIG_0);
87
88 if (enable)
89 reg |= ECC_CONFIG_0_ECC_EN;
90 else
91 reg &= ~ECC_CONFIG_0_ECC_EN;
92
93 writel_relaxed(reg, cadence->reg + ECC_CONFIG_0);
94
95 return 0;
96}
97
98static void cadence_nand_set_ecc_strength(struct cadence_nand_info *cadence,
99 u8 corr_str_idx)
100{
101 u32 reg;
102
103 if (cadence->curr_corr_str_idx == corr_str_idx)
104 return;
105
106 reg = readl_relaxed(cadence->reg + ECC_CONFIG_0);
107 reg &= ~ECC_CONFIG_0_CORR_STR;
108 reg |= FIELD_PREP(ECC_CONFIG_0_CORR_STR, corr_str_idx);
109 writel_relaxed(reg, cadence->reg + ECC_CONFIG_0);
110
111 cadence->curr_corr_str_idx = corr_str_idx;
112}
113
114static int cadence_nand_get_ecc_strength_idx(struct cadence_nand_info *cadence,
115 u8 strength)
116{
117 int i, corr_str_idx = -1;
118
119 for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
120 if (cadence->ecc_strengths[i] == strength) {
121 corr_str_idx = i;
122 break;
123 }
124 }
125
126 return corr_str_idx;
127}
128
129static int cadence_nand_set_skip_marker_val(struct cadence_nand_info *cadence,
130 u16 marker_value)
131{
132 u32 reg;
133
134 if (cadence_nand_wait_for_value(cadence, CTRL_STATUS,
135 TIMEOUT_US,
136 CTRL_STATUS_CTRL_BUSY, true))
137 return -ETIMEDOUT;
138
139 reg = readl_relaxed(cadence->reg + SKIP_BYTES_CONF);
140 reg &= ~SKIP_BYTES_MARKER_VALUE;
141 reg |= FIELD_PREP(SKIP_BYTES_MARKER_VALUE,
142 marker_value);
143
144 writel_relaxed(reg, cadence->reg + SKIP_BYTES_CONF);
145
146 return 0;
147}
148
149static int cadence_nand_set_skip_bytes_conf(struct cadence_nand_info *cadence,
150 u8 num_of_bytes,
151 u32 offset_value,
152 int enable)
153{
154 u32 reg, skip_bytes_offset;
155
156 if (cadence_nand_wait_for_value(cadence, CTRL_STATUS,
157 TIMEOUT_US,
158 CTRL_STATUS_CTRL_BUSY, true))
159 return -ETIMEDOUT;
160
161 if (!enable) {
162 num_of_bytes = 0;
163 offset_value = 0;
164 }
165
166 reg = readl_relaxed(cadence->reg + SKIP_BYTES_CONF);
167 reg &= ~SKIP_BYTES_NUM_OF_BYTES;
168 reg |= FIELD_PREP(SKIP_BYTES_NUM_OF_BYTES,
169 num_of_bytes);
170 skip_bytes_offset = FIELD_PREP(SKIP_BYTES_OFFSET_VALUE,
171 offset_value);
172
173 writel_relaxed(reg, cadence->reg + SKIP_BYTES_CONF);
174 writel_relaxed(skip_bytes_offset, cadence->reg + SKIP_BYTES_OFFSET);
175
176 return 0;
177}
178
179/* Functions enables/disables hardware detection of erased data */
180static void cadence_nand_set_erase_detection(struct cadence_nand_info *cadence,
181 bool enable,
182 u8 bitflips_threshold)
183{
184 u32 reg;
185
186 reg = readl_relaxed(cadence->reg + ECC_CONFIG_0);
187
188 if (enable)
189 reg |= ECC_CONFIG_0_ERASE_DET_EN;
190 else
191 reg &= ~ECC_CONFIG_0_ERASE_DET_EN;
192
193 writel_relaxed(reg, cadence->reg + ECC_CONFIG_0);
194 writel_relaxed(bitflips_threshold, cadence->reg + ECC_CONFIG_1);
195}
196
197static int cadence_nand_set_access_width16(struct cadence_nand_info *cadence,
198 bool bit_bus16)
199{
200 u32 reg;
201
202 if (cadence_nand_wait_for_value(cadence, CTRL_STATUS,
203 TIMEOUT_US,
204 CTRL_STATUS_CTRL_BUSY, true))
205 return -ETIMEDOUT;
206
207 reg = readl_relaxed(cadence->reg + COMMON_SET);
208 if (!bit_bus16)
209 reg &= ~COMMON_SET_DEVICE_16BIT;
210 else
211 reg |= COMMON_SET_DEVICE_16BIT;
212 writel_relaxed(reg, cadence->reg + COMMON_SET);
213
214 return 0;
215}
216
217static void
218cadence_nand_clear_interrupt(struct cadence_nand_info *cadence,
219 struct cadence_nand_irq_status *irq_status)
220{
221 writel_relaxed(irq_status->status, cadence->reg + INTR_STATUS);
222 writel_relaxed(irq_status->trd_status,
223 cadence->reg + TRD_COMP_INT_STATUS);
224 writel_relaxed(irq_status->trd_error,
225 cadence->reg + TRD_ERR_INT_STATUS);
226}
227
228static void
229cadence_nand_read_int_status(struct cadence_nand_info *cadence,
230 struct cadence_nand_irq_status *irq_status)
231{
232 irq_status->status = readl_relaxed(cadence->reg + INTR_STATUS);
233 irq_status->trd_status = readl_relaxed(cadence->reg
234 + TRD_COMP_INT_STATUS);
235 irq_status->trd_error = readl_relaxed(cadence->reg
236 + TRD_ERR_INT_STATUS);
237}
238
239static u32 irq_detected(struct cadence_nand_info *cadence,
240 struct cadence_nand_irq_status *irq_status)
241{
242 cadence_nand_read_int_status(cadence, irq_status);
243
244 return irq_status->status || irq_status->trd_status ||
245 irq_status->trd_error;
246}
247
248static void cadence_nand_reset_irq(struct cadence_nand_info *cadence)
249{
250 memset(&cadence->irq_status, 0, sizeof(cadence->irq_status));
251 memset(&cadence->irq_mask, 0, sizeof(cadence->irq_mask));
252}
253
254/*
255 * This is the interrupt service routine. It handles all interrupts
256 * sent to this device.
257 */
258static irqreturn_t cadence_nand_isr(struct cadence_nand_info *cadence)
259{
260 struct cadence_nand_irq_status irq_status;
261 irqreturn_t result = IRQ_NONE;
262
263 if (irq_detected(cadence, &irq_status)) {
264 /* Handle interrupt. */
265 /* First acknowledge it. */
266 cadence_nand_clear_interrupt(cadence, &irq_status);
267 /* Status in the device context for someone to read. */
268 cadence->irq_status.status |= irq_status.status;
269 cadence->irq_status.trd_status |= irq_status.trd_status;
270 cadence->irq_status.trd_error |= irq_status.trd_error;
271 /* Tell the OS that we've handled this. */
272 result = IRQ_HANDLED;
273 }
274 return result;
275}
276
277static void cadence_nand_set_irq_mask(struct cadence_nand_info *cadence,
278 struct cadence_nand_irq_status *irq_mask)
279{
280 writel_relaxed(INTR_ENABLE_INTR_EN | irq_mask->status,
281 cadence->reg + INTR_ENABLE);
282
283 writel_relaxed(irq_mask->trd_error,
284 cadence->reg + TRD_ERR_INT_STATUS_EN);
285}
286
287static void
288cadence_nand_wait_for_irq(struct cadence_nand_info *cadence,
289 struct cadence_nand_irq_status *irq_mask,
290 struct cadence_nand_irq_status *irq_status)
291{
292 irqreturn_t result = IRQ_NONE;
293 u32 start = get_timer(0);
294
295 while (get_timer(start) < TIMEOUT_US) {
296 result = cadence_nand_isr(cadence);
297
298 if (result == IRQ_HANDLED) {
299 *irq_status = cadence->irq_status;
300 break;
301 }
302 udelay(1);
303 }
304
305 if (!result) {
306 /* Timeout error. */
307 dev_err(cadence->dev, "timeout occurred:\n");
308 dev_err(cadence->dev, "\tstatus = 0x%x, mask = 0x%x\n",
309 irq_status->status, irq_mask->status);
310 dev_err(cadence->dev,
311 "\ttrd_status = 0x%x, trd_status mask = 0x%x\n",
312 irq_status->trd_status, irq_mask->trd_status);
313 dev_err(cadence->dev,
314 "\t trd_error = 0x%x, trd_error mask = 0x%x\n",
315 irq_status->trd_error, irq_mask->trd_error);
316 }
317}
318
319/* Execute generic command on NAND controller. */
320static int cadence_nand_generic_cmd_send(struct cadence_nand_info *cadence,
321 u8 chip_nr,
322 u64 mini_ctrl_cmd)
323{
324 u32 mini_ctrl_cmd_l, mini_ctrl_cmd_h, reg;
325
326 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_CS, chip_nr);
327 mini_ctrl_cmd_l = mini_ctrl_cmd & 0xFFFFFFFF;
328 mini_ctrl_cmd_h = mini_ctrl_cmd >> 32;
329
330 if (cadence_nand_wait_for_value(cadence, CTRL_STATUS,
331 TIMEOUT_US,
332 CTRL_STATUS_CTRL_BUSY, true))
333 return -ETIMEDOUT;
334
335 cadence_nand_reset_irq(cadence);
336
337 writel_relaxed(mini_ctrl_cmd_l, cadence->reg + CMD_REG2);
338 writel_relaxed(mini_ctrl_cmd_h, cadence->reg + CMD_REG3);
339
340 /* Select generic command. */
341 reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_GEN);
342 /* Thread number. */
343 reg |= FIELD_PREP(CMD_REG0_TN, 0);
344
345 /* Issue command. */
346 writel_relaxed(reg, cadence->reg + CMD_REG0);
347
348 return 0;
349}
350
351/* Wait for data on slave DMA interface. */
352static int cadence_nand_wait_on_sdma(struct cadence_nand_info *cadence, u8 *out_sdma_trd,
353 u32 *out_sdma_size)
354{
355 struct cadence_nand_irq_status irq_mask, irq_status;
356
357 irq_mask.trd_status = 0;
358 irq_mask.trd_error = 0;
359 irq_mask.status = INTR_STATUS_SDMA_TRIGG
360 | INTR_STATUS_SDMA_ERR
361 | INTR_STATUS_UNSUPP_CMD;
362
363 cadence_nand_set_irq_mask(cadence, &irq_mask);
364 cadence_nand_wait_for_irq(cadence, &irq_mask, &irq_status);
365 if (irq_status.status == 0) {
366 dev_err(cadence->dev, "Timeout while waiting for SDMA\n");
367 return -ETIMEDOUT;
368 }
369
370 if (irq_status.status & INTR_STATUS_SDMA_TRIGG) {
371 *out_sdma_size = readl_relaxed(cadence->reg + SDMA_SIZE);
372 *out_sdma_trd = readl_relaxed(cadence->reg + SDMA_TRD_NUM);
373 *out_sdma_trd =
374 FIELD_GET(SDMA_TRD_NUM_SDMA_TRD, *out_sdma_trd);
375 } else {
376 dev_err(cadence->dev, "SDMA error - irq_status %x\n",
377 irq_status.status);
378 return -EIO;
379 }
380
381 return 0;
382}
383
384static void cadence_nand_get_caps(struct cadence_nand_info *cadence)
385{
386 u32 reg;
387
388 reg = readl_relaxed(cadence->reg + CTRL_FEATURES);
389
390 cadence->caps2.max_banks = 1 << FIELD_GET(CTRL_FEATURES_N_BANKS, reg);
391
392 if (FIELD_GET(CTRL_FEATURES_DMA_DWITH64, reg))
393 cadence->caps2.data_dma_width = 8;
394 else
395 cadence->caps2.data_dma_width = 4;
396
397 if (reg & CTRL_FEATURES_CONTROL_DATA)
398 cadence->caps2.data_control_supp = true;
399
400 if (reg & (CTRL_FEATURES_NVDDR_2_3
401 | CTRL_FEATURES_NVDDR))
402 cadence->caps2.is_phy_type_dll = true;
403}
404
405/* Prepare CDMA descriptor. */
406static void
407cadence_nand_cdma_desc_prepare(struct cadence_nand_info *cadence,
408 char nf_mem, u32 flash_ptr, dma_addr_t mem_ptr,
409 dma_addr_t ctrl_data_ptr, u16 ctype)
410{
411 struct cadence_nand_cdma_desc *cdma_desc = cadence->cdma_desc;
412
413 memset(cdma_desc, 0, sizeof(struct cadence_nand_cdma_desc));
414
415 /* Set fields for one descriptor. */
416 cdma_desc->flash_pointer = flash_ptr;
417 if (cadence->ctrl_rev >= 13)
418 cdma_desc->bank = nf_mem;
419 else
420 cdma_desc->flash_pointer |= (nf_mem << CDMA_CFPTR_MEM_SHIFT);
421
422 cdma_desc->command_flags |= CDMA_CF_DMA_MASTER;
423 cdma_desc->command_flags |= CDMA_CF_INT;
424
425 cdma_desc->memory_pointer = mem_ptr;
426 cdma_desc->status = 0;
427 cdma_desc->sync_flag_pointer = 0;
428 cdma_desc->sync_arguments = 0;
429
430 cdma_desc->command_type = ctype;
431 cdma_desc->ctrl_data_ptr = ctrl_data_ptr;
432}
433
434static u8 cadence_nand_check_desc_error(struct cadence_nand_info *cadence,
435 u32 desc_status)
436{
437 if (desc_status & CDMA_CS_ERP)
438 return STAT_ERASED;
439
440 if (desc_status & CDMA_CS_UNCE)
441 return STAT_ECC_UNCORR;
442
443 if (desc_status & CDMA_CS_ERR) {
444 dev_err(cadence->dev, ":CDMA desc error flag detected.\n");
445 return STAT_FAIL;
446 }
447
448 if (FIELD_GET(CDMA_CS_MAXERR, desc_status))
449 return STAT_ECC_CORR;
450
451 return STAT_FAIL;
452}
453
454static int cadence_nand_cdma_finish(struct cadence_nand_info *cadence)
455{
456 struct cadence_nand_cdma_desc *desc_ptr = cadence->cdma_desc;
457 u8 status = STAT_BUSY;
458
459 if (desc_ptr->status & CDMA_CS_FAIL) {
460 status = cadence_nand_check_desc_error(cadence,
461 desc_ptr->status);
462 dev_err(cadence->dev, ":CDMA error %x\n", desc_ptr->status);
463 } else if (desc_ptr->status & CDMA_CS_COMP) {
464 /* Descriptor finished with no errors. */
465 if (desc_ptr->command_flags & CDMA_CF_CONT) {
466 dev_info(cadence->dev, "DMA unsupported flag is set");
467 status = STAT_UNKNOWN;
468 } else {
469 /* Last descriptor. */
470 status = STAT_OK;
471 }
472 }
473
474 return status;
475}
476
477static int cadence_nand_cdma_send(struct cadence_nand_info *cadence,
478 u8 thread)
479{
480 u32 reg;
481 int status;
482
483 /* Wait for thread ready. */
484 status = cadence_nand_wait_for_value(cadence, TRD_STATUS,
485 TIMEOUT_US,
486 BIT(thread), true);
487 if (status)
488 return status;
489
490 cadence_nand_reset_irq(cadence);
491
492 writel_relaxed((u32)cadence->dma_cdma_desc,
493 cadence->reg + CMD_REG2);
494 writel_relaxed(0, cadence->reg + CMD_REG3);
495
496 /* Select CDMA mode. */
497 reg = FIELD_PREP(CMD_REG0_CT, CMD_REG0_CT_CDMA);
498 /* Thread number. */
499 reg |= FIELD_PREP(CMD_REG0_TN, thread);
500 /* Issue command. */
501 writel_relaxed(reg, cadence->reg + CMD_REG0);
502
503 return 0;
504}
505
506/* Send SDMA command and wait for finish. */
507static u32
508cadence_nand_cdma_send_and_wait(struct cadence_nand_info *cadence,
509 u8 thread)
510{
511 struct cadence_nand_irq_status irq_mask, irq_status = {0};
512 int status;
513
514 irq_mask.trd_status = BIT(thread);
515 irq_mask.trd_error = BIT(thread);
516 irq_mask.status = INTR_STATUS_CDMA_TERR;
517
518 cadence_nand_set_irq_mask(cadence, &irq_mask);
519
520 status = cadence_nand_cdma_send(cadence, thread);
521 if (status)
522 return status;
523
524 cadence_nand_wait_for_irq(cadence, &irq_mask, &irq_status);
525
526 if (irq_status.status == 0 && irq_status.trd_status == 0 &&
527 irq_status.trd_error == 0) {
528 dev_err(cadence->dev, "CDMA command timeout\n");
529 return -ETIMEDOUT;
530 }
531 if (irq_status.status & irq_mask.status) {
532 dev_err(cadence->dev, "CDMA command failed\n");
533 return -EIO;
534 }
535
536 return 0;
537}
538
539/*
540 * ECC size depends on configured ECC strength and on maximum supported
541 * ECC step size.
542 */
543static int cadence_nand_calc_ecc_bytes(int max_step_size, int strength)
544{
545 int nbytes = DIV_ROUND_UP(fls(8 * max_step_size) * strength, 8);
546
547 return ALIGN(nbytes, 2);
548}
549
550#define CADENCE_NAND_CALC_ECC_BYTES(max_step_size) \
551 static int \
552 cadence_nand_calc_ecc_bytes_##max_step_size(int step_size, \
553 int strength)\
554 {\
555 return cadence_nand_calc_ecc_bytes(max_step_size, strength);\
556 }
557
558CADENCE_NAND_CALC_ECC_BYTES(256)
559CADENCE_NAND_CALC_ECC_BYTES(512)
560CADENCE_NAND_CALC_ECC_BYTES(1024)
561CADENCE_NAND_CALC_ECC_BYTES(2048)
562CADENCE_NAND_CALC_ECC_BYTES(4096)
563
564/* Function reads BCH capabilities. */
565static int cadence_nand_read_bch_caps(struct cadence_nand_info *cadence)
566{
567 struct nand_ecc_caps *ecc_caps = &cadence->ecc_caps;
568 int max_step_size = 0, nstrengths, i;
569 u32 reg;
570
571 reg = readl_relaxed(cadence->reg + BCH_CFG_3);
572 cadence->bch_metadata_size = FIELD_GET(BCH_CFG_3_METADATA_SIZE, reg);
573 if (cadence->bch_metadata_size < 4) {
574 dev_err(cadence->dev,
575 "Driver needs at least 4 bytes of BCH meta data\n");
576 return -EIO;
577 }
578
579 reg = readl_relaxed(cadence->reg + BCH_CFG_0);
580 cadence->ecc_strengths[0] = FIELD_GET(BCH_CFG_0_CORR_CAP_0, reg);
581 cadence->ecc_strengths[1] = FIELD_GET(BCH_CFG_0_CORR_CAP_1, reg);
582 cadence->ecc_strengths[2] = FIELD_GET(BCH_CFG_0_CORR_CAP_2, reg);
583 cadence->ecc_strengths[3] = FIELD_GET(BCH_CFG_0_CORR_CAP_3, reg);
584
585 reg = readl_relaxed(cadence->reg + BCH_CFG_1);
586 cadence->ecc_strengths[4] = FIELD_GET(BCH_CFG_1_CORR_CAP_4, reg);
587 cadence->ecc_strengths[5] = FIELD_GET(BCH_CFG_1_CORR_CAP_5, reg);
588 cadence->ecc_strengths[6] = FIELD_GET(BCH_CFG_1_CORR_CAP_6, reg);
589 cadence->ecc_strengths[7] = FIELD_GET(BCH_CFG_1_CORR_CAP_7, reg);
590
591 reg = readl_relaxed(cadence->reg + BCH_CFG_2);
592 cadence->ecc_stepinfos[0].stepsize =
593 FIELD_GET(BCH_CFG_2_SECT_0, reg);
594
595 cadence->ecc_stepinfos[1].stepsize =
596 FIELD_GET(BCH_CFG_2_SECT_1, reg);
597
598 nstrengths = 0;
599 for (i = 0; i < BCH_MAX_NUM_CORR_CAPS; i++) {
600 if (cadence->ecc_strengths[i] != 0)
601 nstrengths++;
602 }
603
604 ecc_caps->nstepinfos = 0;
605 for (i = 0; i < BCH_MAX_NUM_SECTOR_SIZES; i++) {
606 /* ECC strengths are common for all step infos. */
607 cadence->ecc_stepinfos[i].nstrengths = nstrengths;
608 cadence->ecc_stepinfos[i].strengths =
609 cadence->ecc_strengths;
610
611 if (cadence->ecc_stepinfos[i].stepsize != 0)
612 ecc_caps->nstepinfos++;
613
614 if (cadence->ecc_stepinfos[i].stepsize > max_step_size)
615 max_step_size = cadence->ecc_stepinfos[i].stepsize;
616 }
617 ecc_caps->stepinfos = &cadence->ecc_stepinfos[0];
618
619 switch (max_step_size) {
620 case 256:
621 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_256;
622 break;
623 case 512:
624 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_512;
625 break;
626 case 1024:
627 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_1024;
628 break;
629 case 2048:
630 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_2048;
631 break;
632 case 4096:
633 ecc_caps->calc_ecc_bytes = &cadence_nand_calc_ecc_bytes_4096;
634 break;
635 default:
636 dev_err(cadence->dev,
637 "Unsupported sector size(ecc step size) %d\n",
638 max_step_size);
639 return -EIO;
640 }
641
642 return 0;
643}
644
645/* Hardware initialization. */
646static int cadence_nand_hw_init(struct cadence_nand_info *cadence)
647{
648 int status;
649 u32 reg;
650
651 status = cadence_nand_wait_for_value(cadence, CTRL_STATUS,
652 TIMEOUT_US,
653 CTRL_STATUS_INIT_COMP, false);
654 if (status)
655 return status;
656
657 reg = readl_relaxed(cadence->reg + CTRL_VERSION);
658 cadence->ctrl_rev = FIELD_GET(CTRL_VERSION_REV, reg);
659
660 dev_info(cadence->dev,
661 "%s: cadence nand controller version reg %x\n",
662 __func__, reg);
663
664 /* Disable cache and multiplane. */
665 writel_relaxed(0, cadence->reg + MULTIPLANE_CFG);
666 writel_relaxed(0, cadence->reg + CACHE_CFG);
667
668 /* Clear all interrupts. */
669 writel_relaxed(0xFFFFFFFF, cadence->reg + INTR_STATUS);
670
671 cadence_nand_get_caps(cadence);
672 if (cadence_nand_read_bch_caps(cadence))
673 return -EIO;
674
675 /*
676 * Set IO width access to 8.
677 * It is because during SW device discovering width access
678 * is expected to be 8.
679 */
680 status = cadence_nand_set_access_width16(cadence, false);
681
682 return status;
683}
684
685#define TT_MAIN_OOB_AREAS 2
686#define TT_RAW_PAGE 3
687#define TT_BBM 4
688#define TT_MAIN_OOB_AREA_EXT 5
689
690/* Prepare size of data to transfer. */
691static void
692cadence_nand_prepare_data_size(struct mtd_info *mtd,
693 int transfer_type)
694{
695 struct nand_chip *chip = mtd_to_nand(mtd);
696 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
697 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
698 u32 sec_size = 0, offset = 0, sec_cnt = 1;
699 u32 last_sec_size = cdns_chip->sector_size;
700 u32 data_ctrl_size = 0;
701 u32 reg = 0;
702
703 if (cadence->curr_trans_type == transfer_type)
704 return;
705
706 switch (transfer_type) {
707 case TT_MAIN_OOB_AREA_EXT:
708 sec_cnt = cdns_chip->sector_count;
709 sec_size = cdns_chip->sector_size;
710 data_ctrl_size = cdns_chip->avail_oob_size;
711 break;
712 case TT_MAIN_OOB_AREAS:
713 sec_cnt = cdns_chip->sector_count;
714 last_sec_size = cdns_chip->sector_size
715 + cdns_chip->avail_oob_size;
716 sec_size = cdns_chip->sector_size;
717 break;
718 case TT_RAW_PAGE:
719 last_sec_size = mtd->writesize + mtd->oobsize;
720 break;
721 case TT_BBM:
722 offset = mtd->writesize + cdns_chip->bbm_offs;
723 last_sec_size = 8;
724 break;
725 }
726
727 reg = 0;
728 reg |= FIELD_PREP(TRAN_CFG_0_OFFSET, offset);
729 reg |= FIELD_PREP(TRAN_CFG_0_SEC_CNT, sec_cnt);
730 writel_relaxed(reg, cadence->reg + TRAN_CFG_0);
731
732 reg = 0;
733 reg |= FIELD_PREP(TRAN_CFG_1_LAST_SEC_SIZE, last_sec_size);
734 reg |= FIELD_PREP(TRAN_CFG_1_SECTOR_SIZE, sec_size);
735 writel_relaxed(reg, cadence->reg + TRAN_CFG_1);
736
737 if (cadence->caps2.data_control_supp) {
738 reg = readl_relaxed(cadence->reg + CONTROL_DATA_CTRL);
739 reg &= ~CONTROL_DATA_CTRL_SIZE;
740 reg |= FIELD_PREP(CONTROL_DATA_CTRL_SIZE, data_ctrl_size);
741 writel_relaxed(reg, cadence->reg + CONTROL_DATA_CTRL);
742 }
743
744 cadence->curr_trans_type = transfer_type;
745}
746
747static int
748cadence_nand_cdma_transfer(struct cadence_nand_info *cadence, u8 chip_nr,
749 int page, void *buf, void *ctrl_dat, u32 buf_size,
750 u32 ctrl_dat_size, enum dma_data_direction dir,
751 bool with_ecc)
752{
753 dma_addr_t dma_buf, dma_ctrl_dat = 0;
754 u8 thread_nr = chip_nr;
755 int status;
756 u16 ctype;
757
758 if (dir == DMA_FROM_DEVICE)
759 ctype = CDMA_CT_RD;
760 else
761 ctype = CDMA_CT_WR;
762
763 cadence_nand_set_ecc_enable(cadence, with_ecc);
764
765 dma_buf = dma_map_single(buf, buf_size, dir);
766 if (dma_mapping_error(cadence->dev, dma_buf)) {
767 dev_err(cadence->dev, "Failed to map DMA buffer\n");
768 return -EIO;
769 }
770
771 if (ctrl_dat && ctrl_dat_size) {
772 dma_ctrl_dat = dma_map_single(ctrl_dat,
773 ctrl_dat_size, dir);
774 if (dma_mapping_error(cadence->dev, dma_ctrl_dat)) {
775 dma_unmap_single(dma_buf,
776 buf_size, dir);
777 dev_err(cadence->dev, "Failed to map DMA buffer\n");
778 return -EIO;
779 }
780 }
781
782 cadence_nand_cdma_desc_prepare(cadence, chip_nr, page,
783 dma_buf, dma_ctrl_dat, ctype);
784
785 status = cadence_nand_cdma_send_and_wait(cadence, thread_nr);
786
787 dma_unmap_single(dma_buf,
788 buf_size, dir);
789
790 if (ctrl_dat && ctrl_dat_size)
791 dma_unmap_single(dma_ctrl_dat,
792 ctrl_dat_size, dir);
793 if (status)
794 return status;
795
796 return cadence_nand_cdma_finish(cadence);
797}
798
799static void cadence_nand_set_timings(struct cadence_nand_info *cadence,
800 struct cadence_nand_timings *t)
801{
802 writel_relaxed(t->async_toggle_timings,
803 cadence->reg + ASYNC_TOGGLE_TIMINGS);
804 writel_relaxed(t->timings0, cadence->reg + TIMINGS0);
805 writel_relaxed(t->timings1, cadence->reg + TIMINGS1);
806 writel_relaxed(t->timings2, cadence->reg + TIMINGS2);
807
808 if (cadence->caps2.is_phy_type_dll)
809 writel_relaxed(t->dll_phy_ctrl, cadence->reg + DLL_PHY_CTRL);
810
811 writel_relaxed(t->phy_ctrl, cadence->reg + PHY_CTRL);
812
813 if (cadence->caps2.is_phy_type_dll) {
814 writel_relaxed(0, cadence->reg + PHY_TSEL);
815 writel_relaxed(2, cadence->reg + PHY_DQ_TIMING);
816 writel_relaxed(t->phy_dqs_timing,
817 cadence->reg + PHY_DQS_TIMING);
818 writel_relaxed(t->phy_gate_lpbk_ctrl,
819 cadence->reg + PHY_GATE_LPBK_CTRL);
820 writel_relaxed(PHY_DLL_MASTER_CTRL_BYPASS_MODE,
821 cadence->reg + PHY_DLL_MASTER_CTRL);
822 writel_relaxed(0, cadence->reg + PHY_DLL_SLAVE_CTRL);
823 }
824}
825
826static int cadence_nand_select_target(struct nand_chip *chip)
827{
828 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
829 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
830
831 if (chip == cadence->selected_chip)
832 return 0;
833
834 if (cadence_nand_wait_for_value(cadence, CTRL_STATUS,
835 TIMEOUT_US,
836 CTRL_STATUS_CTRL_BUSY, true))
837 return -ETIMEDOUT;
838
839 cadence_nand_set_timings(cadence, &cdns_chip->timings);
840
841 cadence_nand_set_ecc_strength(cadence,
842 cdns_chip->corr_str_idx);
843
844 cadence_nand_set_erase_detection(cadence, true,
845 chip->ecc.strength);
846
847 cadence->curr_trans_type = -1;
848 cadence->selected_chip = chip;
849
850 return 0;
851}
852
853static int cadence_nand_erase(struct mtd_info *mtd, int page)
854{
855 struct nand_chip *chip = mtd_to_nand(mtd);
856 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
857 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
858 int status;
859 u8 thread_nr = cdns_chip->cs[chip->cur_cs];
860
861 cadence_nand_cdma_desc_prepare(cadence,
862 cdns_chip->cs[chip->cur_cs],
863 page, 0, 0,
864 CDMA_CT_ERASE);
865 status = cadence_nand_cdma_send_and_wait(cadence, thread_nr);
866 if (status) {
867 dev_err(cadence->dev, "erase operation failed\n");
868 return -EIO;
869 }
870
871 status = cadence_nand_cdma_finish(cadence);
872 if (status)
873 return status;
874
875 return 0;
876}
877
878static int cadence_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip, int oobavail)
879{
880 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
881 int ret;
882
883 /*
884 * If .size and .strength are already set (usually by DT),
885 * check if they are supported by this controller.
886 */
887 if (chip->ecc.size && chip->ecc.strength)
888 return nand_check_ecc_caps(chip, &cadence->ecc_caps, oobavail);
889
890 /*
891 * We want .size and .strength closest to the chip's requirement
892 * unless NAND_ECC_MAXIMIZE is requested.
893 */
894 if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) {
895 ret = nand_match_ecc_req(chip, &cadence->ecc_caps, oobavail);
896 if (!ret)
897 return 0;
898 }
899
900 /* Max ECC strength is the last thing we can do */
901 return nand_maximize_ecc(chip, &cadence->ecc_caps, oobavail);
902}
903
904static int cadence_nand_read_bbm(struct mtd_info *mtd, struct nand_chip *chip, int page, u8 *buf)
905{
906 int status;
907 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
908 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
909
910 cadence_nand_prepare_data_size(mtd, TT_BBM);
911
912 cadence_nand_set_skip_bytes_conf(cadence, 0, 0, 0);
913
914 /*
915 * Read only bad block marker from offset
916 * defined by a memory manufacturer.
917 */
918 status = cadence_nand_cdma_transfer(cadence,
919 cdns_chip->cs[chip->cur_cs],
920 page, cadence->buf, NULL,
921 mtd->oobsize,
922 0, DMA_FROM_DEVICE, false);
923 if (status) {
924 dev_err(cadence->dev, "read BBM failed\n");
925 return -EIO;
926 }
927
928 memcpy(buf + cdns_chip->bbm_offs, cadence->buf, cdns_chip->bbm_len);
929
930 return 0;
931}
932
933static int cadence_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
934 const u8 *buf, int oob_required, int page)
935{
936 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
937 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
938 int status;
939 u16 marker_val = 0xFFFF;
940
941 status = cadence_nand_select_target(chip);
942 if (status)
943 return status;
944
945 cadence_nand_set_skip_bytes_conf(cadence, cdns_chip->bbm_len,
946 mtd->writesize
947 + cdns_chip->bbm_offs,
948 1);
949
950 if (oob_required) {
951 marker_val = *(u16 *)(chip->oob_poi
952 + cdns_chip->bbm_offs);
953 } else {
954 /* Set oob data to 0xFF. */
955 memset(cadence->buf + mtd->writesize, 0xFF,
956 cdns_chip->avail_oob_size);
957 }
958
959 cadence_nand_set_skip_marker_val(cadence, marker_val);
960
961 cadence_nand_prepare_data_size(mtd, TT_MAIN_OOB_AREA_EXT);
962
963 if (cadence_nand_dma_buf_ok(cadence, buf, mtd->writesize) &&
964 cadence->caps2.data_control_supp) {
965 u8 *oob;
966
967 if (oob_required)
968 oob = chip->oob_poi;
969 else
970 oob = cadence->buf + mtd->writesize;
971
972 status = cadence_nand_cdma_transfer(cadence,
973 cdns_chip->cs[chip->cur_cs],
974 page, (void *)buf, oob,
975 mtd->writesize,
976 cdns_chip->avail_oob_size,
977 DMA_TO_DEVICE, true);
978 if (status) {
979 dev_err(cadence->dev, "write page failed\n");
980 return -EIO;
981 }
982
983 return 0;
984 }
985
986 if (oob_required) {
987 /* Transfer the data to the oob area. */
988 memcpy(cadence->buf + mtd->writesize, chip->oob_poi,
989 cdns_chip->avail_oob_size);
990 }
991
992 memcpy(cadence->buf, buf, mtd->writesize);
993
994 cadence_nand_prepare_data_size(mtd, TT_MAIN_OOB_AREAS);
995
996 return cadence_nand_cdma_transfer(cadence,
997 cdns_chip->cs[chip->cur_cs],
998 page, cadence->buf, NULL,
999 mtd->writesize
1000 + cdns_chip->avail_oob_size,
1001 0, DMA_TO_DEVICE, true);
1002}
1003
1004static int cadence_nand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1005 int page)
1006{
1007 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1008
1009 memset(cadence->buf, 0xFF, mtd->writesize);
1010
1011 return cadence_nand_write_page(mtd, chip, cadence->buf, 1, page);
1012}
1013
1014static int cadence_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1015 const u8 *buf, int oob_required, int page)
1016{
1017 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1018 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1019 int writesize = mtd->writesize;
1020 int oobsize = mtd->oobsize;
1021 int ecc_steps = chip->ecc.steps;
1022 int ecc_size = chip->ecc.size;
1023 int ecc_bytes = chip->ecc.bytes;
1024 void *tmp_buf = cadence->buf;
1025 int oob_skip = cdns_chip->bbm_len;
1026 size_t size = writesize + oobsize;
1027 int i, pos, len;
1028 int status;
1029
1030 status = cadence_nand_select_target(chip);
1031 if (status)
1032 return status;
1033
1034 /*
1035 * Fill the buffer with 0xff first except the full page transfer.
1036 * This simplifies the logic.
1037 */
1038 if (!buf || !oob_required)
1039 memset(tmp_buf, 0xff, size);
1040
1041 cadence_nand_set_skip_bytes_conf(cadence, 0, 0, 0);
1042
1043 /* Arrange the buffer for syndrome payload/ecc layout. */
1044 if (buf) {
1045 for (i = 0; i < ecc_steps; i++) {
1046 pos = i * (ecc_size + ecc_bytes);
1047 len = ecc_size;
1048
1049 if (pos >= writesize)
1050 pos += oob_skip;
1051 else if (pos + len > writesize)
1052 len = writesize - pos;
1053
1054 memcpy(tmp_buf + pos, buf, len);
1055 buf += len;
1056 if (len < ecc_size) {
1057 len = ecc_size - len;
1058 memcpy(tmp_buf + writesize + oob_skip, buf,
1059 len);
1060 buf += len;
1061 }
1062 }
1063 }
1064
1065 if (oob_required) {
1066 const u8 *oob = chip->oob_poi;
1067 u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1068 (cdns_chip->sector_size + chip->ecc.bytes)
1069 + cdns_chip->sector_size + oob_skip;
1070
1071 /* BBM at the beginning of the OOB area. */
1072 memcpy(tmp_buf + writesize, oob, oob_skip);
1073
1074 /* OOB free. */
1075 memcpy(tmp_buf + oob_data_offset, oob,
1076 cdns_chip->avail_oob_size);
1077 oob += cdns_chip->avail_oob_size;
1078
1079 /* OOB ECC. */
1080 for (i = 0; i < ecc_steps; i++) {
1081 pos = ecc_size + i * (ecc_size + ecc_bytes);
1082 if (i == (ecc_steps - 1))
1083 pos += cdns_chip->avail_oob_size;
1084
1085 len = ecc_bytes;
1086
1087 if (pos >= writesize)
1088 pos += oob_skip;
1089 else if (pos + len > writesize)
1090 len = writesize - pos;
1091
1092 memcpy(tmp_buf + pos, oob, len);
1093 oob += len;
1094 if (len < ecc_bytes) {
1095 len = ecc_bytes - len;
1096 memcpy(tmp_buf + writesize + oob_skip, oob,
1097 len);
1098 oob += len;
1099 }
1100 }
1101 }
1102
1103 cadence_nand_prepare_data_size(mtd, TT_RAW_PAGE);
1104
1105 return cadence_nand_cdma_transfer(cadence,
1106 cdns_chip->cs[chip->cur_cs],
1107 page, cadence->buf, NULL,
1108 mtd->writesize +
1109 mtd->oobsize,
1110 0, DMA_TO_DEVICE, false);
1111}
1112
1113static int cadence_nand_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1114 int page)
1115{
1116 return cadence_nand_write_page_raw(mtd, chip, NULL, true, page);
1117}
1118
1119static int cadence_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1120 u8 *buf, int oob_required, int page)
1121{
1122 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1123 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1124 int status;
1125 int ecc_err_count = 0;
1126
1127 status = cadence_nand_select_target(chip);
1128 if (status)
1129 return status;
1130
1131 cadence_nand_set_skip_bytes_conf(cadence, cdns_chip->bbm_len,
1132 mtd->writesize
1133 + cdns_chip->bbm_offs, 1);
1134
1135 /*
1136 * If data buffer can be accessed by DMA and data_control feature
1137 * is supported then transfer data and oob directly.
1138 */
1139 if (cadence_nand_dma_buf_ok(cadence, buf, mtd->writesize) &&
1140 cadence->caps2.data_control_supp) {
1141 u8 *oob;
1142
1143 if (oob_required)
1144 oob = chip->oob_poi;
1145 else
1146 oob = cadence->buf + mtd->writesize;
1147
1148 cadence_nand_prepare_data_size(mtd, TT_MAIN_OOB_AREA_EXT);
1149 status = cadence_nand_cdma_transfer(cadence,
1150 cdns_chip->cs[chip->cur_cs],
1151 page, buf, oob,
1152 mtd->writesize,
1153 cdns_chip->avail_oob_size,
1154 DMA_FROM_DEVICE, true);
1155 /* Otherwise use bounce buffer. */
1156 } else {
1157 cadence_nand_prepare_data_size(mtd, TT_MAIN_OOB_AREAS);
1158 status = cadence_nand_cdma_transfer(cadence,
1159 cdns_chip->cs[chip->cur_cs],
1160 page, cadence->buf,
1161 NULL, mtd->writesize
1162 + cdns_chip->avail_oob_size,
1163 0, DMA_FROM_DEVICE, true);
1164
1165 memcpy(buf, cadence->buf, mtd->writesize);
1166 if (oob_required)
1167 memcpy(chip->oob_poi,
1168 cadence->buf + mtd->writesize,
1169 mtd->oobsize);
1170 }
1171
1172 switch (status) {
1173 case STAT_ECC_UNCORR:
1174 mtd->ecc_stats.failed++;
1175 ecc_err_count++;
1176 break;
1177 case STAT_ECC_CORR:
1178 ecc_err_count = FIELD_GET(CDMA_CS_MAXERR,
1179 cadence->cdma_desc->status);
1180 mtd->ecc_stats.corrected += ecc_err_count;
1181 break;
1182 case STAT_ERASED:
1183 case STAT_OK:
1184 break;
1185 default:
1186 dev_err(cadence->dev, "read page failed\n");
1187 return -EIO;
1188 }
1189
1190 if (oob_required)
1191 if (cadence_nand_read_bbm(mtd, chip, page, chip->oob_poi))
1192 return -EIO;
1193
1194 return ecc_err_count;
1195}
1196
1197static int cadence_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1198 int page)
1199{
1200 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1201
1202 return cadence_nand_read_page(mtd, chip, cadence->buf, 1, page);
1203}
1204
1205static int cadence_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1206 u8 *buf, int oob_required, int page)
1207{
1208 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1209 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1210 int oob_skip = cdns_chip->bbm_len;
1211 int writesize = mtd->writesize;
1212 int ecc_steps = chip->ecc.steps;
1213 int ecc_size = chip->ecc.size;
1214 int ecc_bytes = chip->ecc.bytes;
1215 void *tmp_buf = cadence->buf;
1216 int i, pos, len;
1217 int status;
1218
1219 status = cadence_nand_select_target(chip);
1220 if (status)
1221 return status;
1222
1223 cadence_nand_set_skip_bytes_conf(cadence, 0, 0, 0);
1224
1225 cadence_nand_prepare_data_size(mtd, TT_RAW_PAGE);
1226 status = cadence_nand_cdma_transfer(cadence,
1227 cdns_chip->cs[chip->cur_cs],
1228 page, cadence->buf, NULL,
1229 mtd->writesize
1230 + mtd->oobsize,
1231 0, DMA_FROM_DEVICE, false);
1232
1233 switch (status) {
1234 case STAT_ERASED:
1235 case STAT_OK:
1236 break;
1237 default:
1238 dev_err(cadence->dev, "read raw page failed\n");
1239 return -EIO;
1240 }
1241
1242 /* Arrange the buffer for syndrome payload/ecc layout. */
1243 if (buf) {
1244 for (i = 0; i < ecc_steps; i++) {
1245 pos = i * (ecc_size + ecc_bytes);
1246 len = ecc_size;
1247
1248 if (pos >= writesize)
1249 pos += oob_skip;
1250 else if (pos + len > writesize)
1251 len = writesize - pos;
1252
1253 memcpy(buf, tmp_buf + pos, len);
1254 buf += len;
1255 if (len < ecc_size) {
1256 len = ecc_size - len;
1257 memcpy(buf, tmp_buf + writesize + oob_skip,
1258 len);
1259 buf += len;
1260 }
1261 }
1262 }
1263
1264 if (oob_required) {
1265 u8 *oob = chip->oob_poi;
1266 u32 oob_data_offset = (cdns_chip->sector_count - 1) *
1267 (cdns_chip->sector_size + chip->ecc.bytes)
1268 + cdns_chip->sector_size + oob_skip;
1269
1270 /* OOB free. */
1271 memcpy(oob, tmp_buf + oob_data_offset,
1272 cdns_chip->avail_oob_size);
1273
1274 /* BBM at the beginning of the OOB area. */
1275 memcpy(oob, tmp_buf + writesize, oob_skip);
1276
1277 oob += cdns_chip->avail_oob_size;
1278
1279 /* OOB ECC */
1280 for (i = 0; i < ecc_steps; i++) {
1281 pos = ecc_size + i * (ecc_size + ecc_bytes);
1282 len = ecc_bytes;
1283
1284 if (i == (ecc_steps - 1))
1285 pos += cdns_chip->avail_oob_size;
1286
1287 if (pos >= writesize)
1288 pos += oob_skip;
1289 else if (pos + len > writesize)
1290 len = writesize - pos;
1291
1292 memcpy(oob, tmp_buf + pos, len);
1293 oob += len;
1294 if (len < ecc_bytes) {
1295 len = ecc_bytes - len;
1296 memcpy(oob, tmp_buf + writesize + oob_skip,
1297 len);
1298 oob += len;
1299 }
1300 }
1301 }
1302 return 0;
1303}
1304
1305static int cadence_nand_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
1306 int page)
1307{
1308 return cadence_nand_read_page_raw(mtd, chip, NULL, true, page);
1309}
1310
1311static void cadence_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
1312{
1313 struct nand_chip *chip = mtd_to_nand(mtd);
1314 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1315 u8 thread_nr = 0;
1316 u32 sdma_size;
1317 int status;
1318 int len_in_words = len >> 2;
1319
1320 /* Wait until slave DMA interface is ready to data transfer. */
1321 status = cadence_nand_wait_on_sdma(cadence, &thread_nr, &sdma_size);
1322 if (status) {
1323 pr_err("Wait on sdma failed:%x\n", status);
1324 hang();
1325 }
1326
1327 if (!cadence->caps1->has_dma) {
1328 readsq(cadence->io.virt, buf, len_in_words);
1329
1330 if (sdma_size > len) {
1331 memcpy(cadence->buf, buf + (len_in_words << 2),
1332 len - (len_in_words << 2));
1333 readsl(cadence->io.virt, cadence->buf,
1334 sdma_size / 4 - len_in_words);
1335 }
1336 }
1337}
1338
1339static void cadence_nand_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
1340{
1341 struct nand_chip *chip = mtd_to_nand(mtd);
1342 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1343 u8 thread_nr = 0;
1344 u32 sdma_size;
1345 int status;
1346 int len_in_words = len >> 2;
1347
1348 /* Wait until slave DMA interface is ready to data transfer. */
1349 status = cadence_nand_wait_on_sdma(cadence, &thread_nr, &sdma_size);
1350 if (status) {
1351 pr_err("Wait on sdma failed:%x\n", status);
1352 hang();
1353 }
1354
1355 if (!cadence->caps1->has_dma) {
1356 writesq(cadence->io.virt, buf, len_in_words);
1357
1358 if (sdma_size > len) {
1359 memcpy(cadence->buf, buf + (len_in_words << 2),
1360 len - (len_in_words << 2));
1361 writesl(cadence->io.virt, cadence->buf,
1362 sdma_size / 4 - len_in_words);
1363 }
1364 }
1365}
1366
1367static int cadence_nand_cmd_opcode(struct nand_chip *chip, unsigned int op_id)
1368{
1369 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1370 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1371 u64 mini_ctrl_cmd = 0;
1372 int ret;
1373
1374 mini_ctrl_cmd |= GCMD_LAY_TWB;
1375 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR, GCMD_LAY_INSTR_CMD);
1376 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_CMD, op_id);
1377
1378 ret = cadence_nand_generic_cmd_send(cadence,
1379 cdns_chip->cs[chip->cur_cs],
1380 mini_ctrl_cmd);
1381
1382 if (ret)
1383 dev_err(cadence->dev, "send cmd %x failed\n",
1384 op_id);
1385
1386 return ret;
1387}
1388
1389static int cadence_nand_cmd_address(struct nand_chip *chip,
1390 unsigned int naddrs, const u8 *addrs)
1391{
1392 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1393 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1394 u64 address = 0;
1395 u64 mini_ctrl_cmd = 0;
1396 int ret;
1397 int i;
1398
1399 mini_ctrl_cmd |= GCMD_LAY_TWB;
1400
1401 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
1402 GCMD_LAY_INSTR_ADDR);
1403
1404 for (i = 0; i < naddrs; i++)
1405 address |= (u64)addrs[i] << (8 * i);
1406
1407 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR,
1408 address);
1409 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INPUT_ADDR_SIZE,
1410 naddrs - 1);
1411
1412 ret = cadence_nand_generic_cmd_send(cadence,
1413 cdns_chip->cs[chip->cur_cs],
1414 mini_ctrl_cmd);
1415
1416 if (ret)
1417 pr_err("send address %llx failed\n", address);
1418
1419 return ret;
1420}
1421
1422static int cadence_nand_cmd_data(struct nand_chip *chip,
1423 unsigned int len, u8 mode)
1424{
1425 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1426 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1427 u64 mini_ctrl_cmd = 0;
1428 int ret;
1429
1430 mini_ctrl_cmd |= GCMD_LAY_TWB;
1431 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAY_INSTR,
1432 GCMD_LAY_INSTR_DATA);
1433
1434 if (mode)
1435 mini_ctrl_cmd |= FIELD_PREP(GCMD_DIR, GCMD_DIR_WRITE);
1436
1437 mini_ctrl_cmd |= FIELD_PREP(GCMD_SECT_CNT, 1);
1438 mini_ctrl_cmd |= FIELD_PREP(GCMD_LAST_SIZE, len);
1439
1440 ret = cadence_nand_generic_cmd_send(cadence,
1441 cdns_chip->cs[chip->cur_cs],
1442 mini_ctrl_cmd);
1443
1444 if (ret) {
1445 pr_err("send generic data cmd failed\n");
1446 return ret;
1447 }
1448
1449 return ret;
1450}
1451
1452static int cadence_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1453{
1454 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1455 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1456 int status;
1457
1458 status = cadence_nand_wait_for_value(cadence, RBN_SETINGS,
1459 TIMEOUT_US,
1460 BIT(cdns_chip->cs[chip->cur_cs]),
1461 false);
1462 return status;
1463}
1464
1465static int cadence_nand_ooblayout_free(struct mtd_info *mtd, int section,
1466 struct mtd_oob_region *oobregion)
1467{
1468 struct nand_chip *chip = mtd_to_nand(mtd);
1469 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1470
1471 if (section)
1472 return -ERANGE;
1473
1474 oobregion->offset = cdns_chip->bbm_len;
1475 oobregion->length = cdns_chip->avail_oob_size
1476 - cdns_chip->bbm_len;
1477
1478 return 0;
1479}
1480
1481static int cadence_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
1482 struct mtd_oob_region *oobregion)
1483{
1484 struct nand_chip *chip = mtd_to_nand(mtd);
1485 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1486
1487 if (section)
1488 return -ERANGE;
1489
1490 oobregion->offset = cdns_chip->avail_oob_size;
1491 oobregion->length = chip->ecc.total;
1492
1493 return 0;
1494}
1495
1496static const struct mtd_ooblayout_ops cadence_nand_ooblayout_ops = {
1497 .rfree = cadence_nand_ooblayout_free,
1498 .ecc = cadence_nand_ooblayout_ecc,
1499};
1500
1501static int calc_cycl(u32 timing, u32 clock)
1502{
1503 if (timing == 0 || clock == 0)
1504 return 0;
1505
1506 if ((timing % clock) > 0)
1507 return timing / clock;
1508 else
1509 return timing / clock - 1;
1510}
1511
1512/* Calculate max data valid window. */
1513static inline u32 calc_tdvw_max(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
1514 u32 board_delay_skew_min, u32 ext_mode)
1515{
1516 if (ext_mode == 0)
1517 clk_period /= 2;
1518
1519 return (trp_cnt + 1) * clk_period + trhoh_min +
1520 board_delay_skew_min;
1521}
1522
1523/* Calculate data valid window. */
1524static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min,
1525 u32 trea_max, u32 ext_mode)
1526{
1527 if (ext_mode == 0)
1528 clk_period /= 2;
1529
1530 return (trp_cnt + 1) * clk_period + trhoh_min - trea_max;
1531}
1532
1533static inline int of_get_child_count(const ofnode node)
1534{
1535 return fdtdec_get_child_count(gd->fdt_blob, ofnode_to_offset(node));
1536}
1537
1538static int cadence_setup_data_interface(struct mtd_info *mtd, int chipnr,
1539 const struct nand_data_interface *conf)
1540{
1541 struct nand_chip *chip = mtd_to_nand(mtd);
1542 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1543 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(mtd_to_nand(mtd));
1544 const struct nand_sdr_timings *sdr;
1545 struct cadence_nand_timings *t = &cdns_chip->timings;
1546 u32 reg;
1547 u32 board_delay = cadence->board_delay;
1548 u32 clk_period = DIV_ROUND_DOWN_ULL(1000000000000ULL,
1549 cadence->nf_clk_rate);
1550 u32 tceh_cnt, tcs_cnt, tadl_cnt, tccs_cnt;
1551 u32 tfeat_cnt, trhz_cnt, tvdly_cnt;
1552 u32 trhw_cnt, twb_cnt, twh_cnt = 0, twhr_cnt;
1553 u32 twp_cnt = 0, trp_cnt = 0, trh_cnt = 0;
1554 u32 if_skew = cadence->caps1->if_skew;
1555 u32 board_delay_skew_min = board_delay - if_skew;
1556 u32 board_delay_skew_max = board_delay + if_skew;
1557 u32 dqs_sampl_res, phony_dqs_mod;
1558 u32 tdvw, tdvw_min, tdvw_max;
1559 u32 ext_rd_mode, ext_wr_mode;
1560 u32 dll_phy_dqs_timing = 0, phony_dqs_timing = 0, rd_del_sel = 0;
1561 u32 sampling_point;
1562
1563 sdr = nand_get_sdr_timings(conf);
1564 if (IS_ERR(sdr))
1565 return PTR_ERR(sdr);
1566
1567 memset(t, 0, sizeof(*t));
1568 /* Sampling point calculation. */
1569 if (cadence->caps2.is_phy_type_dll)
1570 phony_dqs_mod = 2;
1571 else
1572 phony_dqs_mod = 1;
1573
1574 dqs_sampl_res = clk_period / phony_dqs_mod;
1575
1576 tdvw_min = sdr->tREA_max + board_delay_skew_max;
1577 /*
1578 * The idea of those calculation is to get the optimum value
1579 * for tRP and tRH timings. If it is NOT possible to sample data
1580 * with optimal tRP/tRH settings, the parameters will be extended.
1581 * If clk_period is 50ns (the lowest value) this condition is met
1582 * for SDR timing modes 1, 2, 3, 4 and 5.
1583 * If clk_period is 20ns the condition is met only for SDR timing
1584 * mode 5.
1585 */
1586 if (sdr->tRC_min <= clk_period &&
1587 sdr->tRP_min <= (clk_period / 2) &&
1588 sdr->tREH_min <= (clk_period / 2)) {
1589 /* Performance mode. */
1590 ext_rd_mode = 0;
1591 tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
1592 sdr->tREA_max, ext_rd_mode);
1593 tdvw_max = calc_tdvw_max(trp_cnt, clk_period, sdr->tRHOH_min,
1594 board_delay_skew_min,
1595 ext_rd_mode);
1596 /*
1597 * Check if data valid window and sampling point can be found
1598 * and is not on the edge (ie. we have hold margin).
1599 * If not extend the tRP timings.
1600 */
1601 if (tdvw > 0) {
1602 if (tdvw_max <= tdvw_min ||
1603 (tdvw_max % dqs_sampl_res) == 0) {
1604 /*
1605 * No valid sampling point so the RE pulse need
1606 * to be widen widening by half clock cycle.
1607 */
1608 ext_rd_mode = 1;
1609 }
1610 } else {
1611 /*
1612 * There is no valid window
1613 * to be able to sample data the tRP need to be widen.
1614 * Very safe calculations are performed here.
1615 */
1616 trp_cnt = (sdr->tREA_max + board_delay_skew_max
1617 + dqs_sampl_res) / clk_period;
1618 ext_rd_mode = 1;
1619 }
1620
1621 } else {
1622 /* Extended read mode. */
1623 u32 trh;
1624
1625 ext_rd_mode = 1;
1626 trp_cnt = calc_cycl(sdr->tRP_min, clk_period);
1627 trh = sdr->tRC_min - ((trp_cnt + 1) * clk_period);
1628 if (sdr->tREH_min >= trh)
1629 trh_cnt = calc_cycl(sdr->tREH_min, clk_period);
1630 else
1631 trh_cnt = calc_cycl(trh, clk_period);
1632
1633 tdvw = calc_tdvw(trp_cnt, clk_period, sdr->tRHOH_min,
1634 sdr->tREA_max, ext_rd_mode);
1635 /*
1636 * Check if data valid window and sampling point can be found
1637 * or if it is at the edge check if previous is valid
1638 * - if not extend the tRP timings.
1639 */
1640 if (tdvw > 0) {
1641 tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
1642 sdr->tRHOH_min,
1643 board_delay_skew_min,
1644 ext_rd_mode);
1645
1646 if ((((tdvw_max / dqs_sampl_res)
1647 * dqs_sampl_res) <= tdvw_min) ||
1648 (((tdvw_max % dqs_sampl_res) == 0) &&
1649 (((tdvw_max / dqs_sampl_res - 1)
1650 * dqs_sampl_res) <= tdvw_min))) {
1651 /*
1652 * Data valid window width is lower than
1653 * sampling resolution and do not hit any
1654 * sampling point to be sure the sampling point
1655 * will be found the RE low pulse width will be
1656 * extended by one clock cycle.
1657 */
1658 trp_cnt = trp_cnt + 1;
1659 }
1660 } else {
1661 /*
1662 * There is no valid window to be able to sample data.
1663 * The tRP need to be widen.
1664 * Very safe calculations are performed here.
1665 */
1666 trp_cnt = (sdr->tREA_max + board_delay_skew_max
1667 + dqs_sampl_res) / clk_period;
1668 }
1669 }
1670
1671 tdvw_max = calc_tdvw_max(trp_cnt, clk_period,
1672 sdr->tRHOH_min,
1673 board_delay_skew_min, ext_rd_mode);
1674
1675 if (sdr->tWC_min <= clk_period &&
1676 (sdr->tWP_min + if_skew) <= (clk_period / 2) &&
1677 (sdr->tWH_min + if_skew) <= (clk_period / 2)) {
1678 ext_wr_mode = 0;
1679 } else {
1680 u32 twh;
1681
1682 ext_wr_mode = 1;
1683 twp_cnt = calc_cycl(sdr->tWP_min + if_skew, clk_period);
1684 if ((twp_cnt + 1) * clk_period < (sdr->tALS_min + if_skew))
1685 twp_cnt = calc_cycl(sdr->tALS_min + if_skew,
1686 clk_period);
1687
1688 twh = (sdr->tWC_min - (twp_cnt + 1) * clk_period);
1689 if (sdr->tWH_min >= twh)
1690 twh = sdr->tWH_min;
1691
1692 twh_cnt = calc_cycl(twh + if_skew, clk_period);
1693 }
1694
1695 reg = FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRH, trh_cnt);
1696 reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TRP, trp_cnt);
1697 reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWH, twh_cnt);
1698 reg |= FIELD_PREP(ASYNC_TOGGLE_TIMINGS_TWP, twp_cnt);
1699 t->async_toggle_timings = reg;
1700 dev_dbg(cadence->dev, "ASYNC_TOGGLE_TIMINGS_SDR\t%x\n", reg);
1701
1702 tadl_cnt = calc_cycl((sdr->tADL_min + if_skew), clk_period);
1703 tccs_cnt = calc_cycl((sdr->tCCS_min + if_skew), clk_period);
1704 twhr_cnt = calc_cycl((sdr->tWHR_min + if_skew), clk_period);
1705 trhw_cnt = calc_cycl((sdr->tRHW_min + if_skew), clk_period);
1706 reg = FIELD_PREP(TIMINGS0_TADL, tadl_cnt);
1707
1708 /*
1709 * If timing exceeds delay field in timing register
1710 * then use maximum value.
1711 */
1712 if (FIELD_FIT(TIMINGS0_TCCS, tccs_cnt))
1713 reg |= FIELD_PREP(TIMINGS0_TCCS, tccs_cnt);
1714 else
1715 reg |= TIMINGS0_TCCS;
1716
1717 reg |= FIELD_PREP(TIMINGS0_TWHR, twhr_cnt);
1718 reg |= FIELD_PREP(TIMINGS0_TRHW, trhw_cnt);
1719 t->timings0 = reg;
1720 dev_dbg(cadence->dev, "TIMINGS0_SDR\t%x\n", reg);
1721
1722 /* The following is related to single signal so skew is not needed. */
1723 trhz_cnt = calc_cycl(sdr->tRHZ_max, clk_period);
1724 trhz_cnt = trhz_cnt + 1;
1725 twb_cnt = calc_cycl((sdr->tWB_max + board_delay), clk_period);
1726 /*
1727 * Because of the two stage syncflop the value must be increased by 3
1728 * first value is related with sync, second value is related
1729 * with output if delay.
1730 */
1731 twb_cnt = twb_cnt + 3 + 5;
1732 /*
1733 * The following is related to the we edge of the random data input
1734 * sequence so skew is not needed.
1735 */
1736 tvdly_cnt = calc_cycl(500000 + if_skew, clk_period);
1737 reg = FIELD_PREP(TIMINGS1_TRHZ, trhz_cnt);
1738 reg |= FIELD_PREP(TIMINGS1_TWB, twb_cnt);
1739 reg |= FIELD_PREP(TIMINGS1_TVDLY, tvdly_cnt);
1740 t->timings1 = reg;
1741 dev_dbg(cadence->dev, "TIMINGS1_SDR\t%x\n", reg);
1742
1743 tfeat_cnt = calc_cycl(sdr->tFEAT_max, clk_period);
1744 if (tfeat_cnt < twb_cnt)
1745 tfeat_cnt = twb_cnt;
1746
1747 tceh_cnt = calc_cycl(sdr->tCEH_min, clk_period);
1748 tcs_cnt = calc_cycl((sdr->tCS_min + if_skew), clk_period);
1749
1750 reg = FIELD_PREP(TIMINGS2_TFEAT, tfeat_cnt);
1751 reg |= FIELD_PREP(TIMINGS2_CS_HOLD_TIME, tceh_cnt);
1752 reg |= FIELD_PREP(TIMINGS2_CS_SETUP_TIME, tcs_cnt);
1753 t->timings2 = reg;
1754 dev_dbg(cadence->dev, "TIMINGS2_SDR\t%x\n", reg);
1755
1756 if (cadence->caps2.is_phy_type_dll) {
1757 reg = DLL_PHY_CTRL_DLL_RST_N;
1758 if (ext_wr_mode)
1759 reg |= DLL_PHY_CTRL_EXTENDED_WR_MODE;
1760 if (ext_rd_mode)
1761 reg |= DLL_PHY_CTRL_EXTENDED_RD_MODE;
1762
1763 reg |= FIELD_PREP(DLL_PHY_CTRL_RS_HIGH_WAIT_CNT, 7);
1764 reg |= FIELD_PREP(DLL_PHY_CTRL_RS_IDLE_CNT, 7);
1765 t->dll_phy_ctrl = reg;
1766 dev_dbg(cadence->dev, "DLL_PHY_CTRL_SDR\t%x\n", reg);
1767 }
1768
1769 /* Sampling point calculation. */
1770 if ((tdvw_max % dqs_sampl_res) > 0)
1771 sampling_point = tdvw_max / dqs_sampl_res;
1772 else
1773 sampling_point = (tdvw_max / dqs_sampl_res - 1);
1774
1775 if (sampling_point * dqs_sampl_res > tdvw_min) {
1776 dll_phy_dqs_timing =
1777 FIELD_PREP(PHY_DQS_TIMING_DQS_SEL_OE_END, 4);
1778 dll_phy_dqs_timing |= PHY_DQS_TIMING_USE_PHONY_DQS;
1779 phony_dqs_timing = sampling_point / phony_dqs_mod;
1780
1781 if ((sampling_point % 2) > 0) {
1782 dll_phy_dqs_timing |= PHY_DQS_TIMING_PHONY_DQS_SEL;
1783 if ((tdvw_max % dqs_sampl_res) == 0)
1784 /*
1785 * Calculation for sampling point at the edge
1786 * of data and being odd number.
1787 */
1788 phony_dqs_timing = (tdvw_max / dqs_sampl_res)
1789 / phony_dqs_mod - 1;
1790
1791 if (!cadence->caps2.is_phy_type_dll)
1792 phony_dqs_timing--;
1793
1794 } else {
1795 phony_dqs_timing--;
1796 }
1797 rd_del_sel = phony_dqs_timing + 3;
1798 } else {
1799 dev_warn(cadence->dev,
1800 "ERROR : cannot find valid sampling point\n");
1801 }
1802
1803 reg = FIELD_PREP(PHY_CTRL_PHONY_DQS, phony_dqs_timing);
1804 if (cadence->caps2.is_phy_type_dll)
1805 reg |= PHY_CTRL_SDR_DQS;
1806 t->phy_ctrl = reg;
1807 dev_dbg(cadence->dev, "PHY_CTRL_REG_SDR\t%x\n", reg);
1808
1809 if (cadence->caps2.is_phy_type_dll) {
1810 dev_dbg(cadence->dev, "PHY_TSEL_REG_SDR\t%x\n", 0);
1811 dev_dbg(cadence->dev, "PHY_DQ_TIMING_REG_SDR\t%x\n", 2);
1812 dev_dbg(cadence->dev, "PHY_DQS_TIMING_REG_SDR\t%x\n",
1813 dll_phy_dqs_timing);
1814 t->phy_dqs_timing = dll_phy_dqs_timing;
1815
1816 reg = FIELD_PREP(PHY_GATE_LPBK_CTRL_RDS, rd_del_sel);
1817 dev_dbg(cadence->dev, "PHY_GATE_LPBK_CTRL_REG_SDR\t%x\n",
1818 reg);
1819 t->phy_gate_lpbk_ctrl = reg;
1820
1821 dev_dbg(cadence->dev, "PHY_DLL_MASTER_CTRL_REG_SDR\t%lx\n",
1822 PHY_DLL_MASTER_CTRL_BYPASS_MODE);
1823 dev_dbg(cadence->dev, "PHY_DLL_SLAVE_CTRL_REG_SDR\t%x\n", 0);
1824 }
1825 return 0;
1826}
1827
1828static int cadence_nand_attach_chip(struct nand_chip *chip)
1829{
1830 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1831 struct cdns_nand_chip *cdns_chip = to_cdns_nand_chip(chip);
1832 static struct nand_ecclayout nand_oob;
1833 u32 ecc_size;
1834 struct mtd_info *mtd = nand_to_mtd(chip);
1835 int ret;
1836
1837 if (chip->options & NAND_BUSWIDTH_16) {
1838 ret = cadence_nand_set_access_width16(cadence, true);
1839 if (ret)
1840 return ret;
1841 }
1842
1843 chip->bbt_options |= NAND_BBT_USE_FLASH;
1844 chip->bbt_options |= NAND_BBT_NO_OOB;
1845 chip->ecc.mode = NAND_ECC_HW_SYNDROME;
1846
1847 chip->options |= NAND_NO_SUBPAGE_WRITE;
1848
1849 cdns_chip->bbm_offs = chip->badblockpos;
1850 cdns_chip->bbm_offs &= ~0x01;
1851 /* this value should be even number */
1852 cdns_chip->bbm_len = 2;
1853
1854 ret = cadence_ecc_setup(mtd, chip, mtd->oobsize - cdns_chip->bbm_len);
1855 if (ret) {
1856 dev_err(cadence->dev, "ECC configuration failed\n");
1857 return ret;
1858 }
1859
1860 dev_dbg(cadence->dev,
1861 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1862 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1863
1864 /* Error correction configuration. */
1865 cdns_chip->sector_size = chip->ecc.size;
1866 cdns_chip->sector_count = mtd->writesize / cdns_chip->sector_size;
1867 ecc_size = cdns_chip->sector_count * chip->ecc.bytes;
1868
1869 cdns_chip->avail_oob_size = mtd->oobsize - ecc_size;
1870
1871 if (cdns_chip->avail_oob_size > cadence->bch_metadata_size)
1872 cdns_chip->avail_oob_size = cadence->bch_metadata_size;
1873
1874 if ((cdns_chip->avail_oob_size + cdns_chip->bbm_len + ecc_size)
1875 > mtd->oobsize)
1876 cdns_chip->avail_oob_size -= 4;
1877
1878 ret = cadence_nand_get_ecc_strength_idx(cadence, chip->ecc.strength);
1879 if (ret < 0)
1880 return -EINVAL;
1881
1882 cdns_chip->corr_str_idx = (u8)ret;
1883
1884 if (cadence_nand_wait_for_value(cadence, CTRL_STATUS,
1885 TIMEOUT_US,
1886 CTRL_STATUS_CTRL_BUSY, true))
1887 return -ETIMEDOUT;
1888
1889 cadence_nand_set_ecc_strength(cadence,
1890 cdns_chip->corr_str_idx);
1891
1892 cadence_nand_set_erase_detection(cadence, true,
1893 chip->ecc.strength);
1894
1895 dev_dbg(cadence->dev,
1896 "chosen ECC settings: step=%d, strength=%d, bytes=%d\n",
1897 chip->ecc.size, chip->ecc.strength, chip->ecc.bytes);
1898
1899 /* Override the default read operations. */
1900 chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS;
1901 chip->ecc.read_page = cadence_nand_read_page;
1902 chip->ecc.read_page_raw = cadence_nand_read_page_raw;
1903 chip->ecc.write_page = cadence_nand_write_page;
1904 chip->ecc.write_page_raw = cadence_nand_write_page_raw;
1905 chip->ecc.read_oob = cadence_nand_read_oob;
1906 chip->ecc.write_oob = cadence_nand_write_oob;
1907 chip->ecc.read_oob_raw = cadence_nand_read_oob_raw;
1908 chip->ecc.write_oob_raw = cadence_nand_write_oob_raw;
1909 chip->erase = cadence_nand_erase;
1910
1911 if ((mtd->writesize + mtd->oobsize) > cadence->buf_size)
1912 cadence->buf_size = mtd->writesize + mtd->oobsize;
1913
1914 mtd_set_ooblayout(mtd, &cadence_nand_ooblayout_ops);
1915
1916 nand_oob.eccbytes = cdns_chip->chip.ecc.bytes;
1917 cdns_chip->chip.ecc.layout = &nand_oob;
1918
1919 return 0;
1920}
1921
1922/* Dummy implementation: we don't support multiple chips */
1923static void cadence_nand_select_chip(struct mtd_info *mtd, int chipnr)
1924{
1925 switch (chipnr) {
1926 case -1:
1927 case 0:
1928 break;
1929
1930 default:
1931 WARN_ON(chipnr);
1932 }
1933}
1934
1935static void cadence_nand_cmdfunc(struct mtd_info *mtd, unsigned int command,
1936 int offset_in_page, int page)
1937{
1938}
1939
1940static int cadence_nand_dev_ready(struct mtd_info *mtd)
1941{
1942 struct nand_chip *chip = mtd_to_nand(mtd);
1943 struct cadence_nand_info *cadence = to_cadence_nand_info(chip->controller);
1944
1945 if (cadence_nand_wait_for_value(cadence, CTRL_STATUS,
1946 TIMEOUT_US,
1947 CTRL_STATUS_CTRL_BUSY, true))
1948 return -ETIMEDOUT;
1949
1950 return 0;
1951}
1952
1953static u8 cadence_nand_read_byte(struct mtd_info *mtd)
1954{
1955 return 0;
1956}
1957
1958static void cadence_nand_write_byte(struct mtd_info *mtd, u8 byte)
1959{
1960 cadence_nand_write_buf(mtd, &byte, 1);
1961}
1962
1963static int cadence_nand_chip_init(struct cadence_nand_info *cadence, ofnode node)
1964{
1965 struct cdns_nand_chip *cdns_chip;
1966 struct nand_chip *chip;
1967 struct mtd_info *mtd;
1968 int ret, i;
1969 int nsels;
1970 u32 cs;
1971
1972 if (!ofnode_get_property(node, "reg", &nsels))
1973 return -ENODEV;
1974
1975 nsels /= sizeof(u32);
1976 if (nsels <= 0) {
1977 dev_err(cadence->dev, "invalid reg property size %d\n", nsels);
1978 return -EINVAL;
1979 }
1980
1981 cdns_chip = devm_kzalloc(cadence->dev, sizeof(*cdns_chip) +
1982 (nsels * sizeof(u8)), GFP_KERNEL);
1983 if (!cdns_chip)
1984 return -ENODEV;
1985
1986 cdns_chip->nsels = nsels;
1987 for (i = 0; i < nsels; i++) {
1988 /* Retrieve CS id. */
1989 ret = ofnode_read_u32_index(node, "reg", i, &cs);
1990 if (ret) {
1991 dev_err(cadence->dev,
1992 "could not retrieve reg property: %d\n",
1993 ret);
1994 goto free_buf;
1995 }
1996
1997 if (cs >= cadence->caps2.max_banks) {
1998 dev_err(cadence->dev,
1999 "invalid reg value: %u (max CS = %d)\n",
2000 cs, cadence->caps2.max_banks);
2001 ret = -EINVAL;
2002 goto free_buf;
2003 }
2004
2005 if (test_and_set_bit(cs, &cadence->assigned_cs)) {
2006 dev_err(cadence->dev,
2007 "CS %d already assigned\n", cs);
2008 ret = -EINVAL;
2009 goto free_buf;
2010 }
2011
2012 cdns_chip->cs[i] = cs;
2013 }
2014
2015 chip = &cdns_chip->chip;
2016 chip->controller = &cadence->controller;
2017 nand_set_flash_node(chip, node);
2018 mtd = nand_to_mtd(chip);
2019 mtd->dev->parent = cadence->dev;
2020
2021 chip->options |= NAND_BUSWIDTH_AUTO;
2022 chip->select_chip = cadence_nand_select_chip;
2023 chip->cmdfunc = cadence_nand_cmdfunc;
2024 chip->dev_ready = cadence_nand_dev_ready;
2025 chip->read_byte = cadence_nand_read_byte;
2026 chip->write_byte = cadence_nand_write_byte;
2027 chip->waitfunc = cadence_nand_waitfunc;
2028 chip->read_buf = cadence_nand_read_buf;
2029 chip->write_buf = cadence_nand_write_buf;
2030 chip->setup_data_interface = cadence_setup_data_interface;
2031
2032 ret = nand_scan_ident(mtd, 1, NULL);
2033 if (ret) {
2034 dev_err(cadence->dev, "Chip identification failure\n");
2035 goto free_buf;
2036 }
2037
2038 ret = cadence_nand_attach_chip(chip);
2039 if (ret) {
2040 dev_err(cadence->dev, "Chip not able to attached\n");
2041 goto free_buf;
2042 }
2043
2044 ret = nand_scan_tail(mtd);
2045 if (ret) {
2046 dev_err(cadence->dev, "could not scan the nand chip\n");
2047 goto free_buf;
2048 }
2049
2050 ret = nand_register(0, mtd);
2051 if (ret) {
2052 dev_err(cadence->dev, "Failed to register MTD: %d\n", ret);
2053 goto free_buf;
2054 }
2055
2056 return 0;
2057
2058free_buf:
2059 devm_kfree(cadence->dev, cdns_chip);
2060 return ret;
2061}
2062
2063static int cadence_nand_chips_init(struct cadence_nand_info *cadence)
2064{
2065 struct udevice *dev = cadence->dev;
2066 ofnode node = dev_ofnode(dev);
2067 ofnode nand_node;
2068 int max_cs = cadence->caps2.max_banks;
2069 int nchips, ret;
2070
2071 nchips = of_get_child_count(node);
2072
2073 if (nchips > max_cs) {
2074 dev_err(cadence->dev,
2075 "too many NAND chips: %d (max = %d CS)\n",
2076 nchips, max_cs);
2077 return -EINVAL;
2078 }
2079
2080 ofnode_for_each_subnode(nand_node, node) {
2081 ret = cadence_nand_chip_init(cadence, nand_node);
2082 if (ret)
2083 return ret;
2084 }
2085
2086 return 0;
2087}
2088
2089static int cadence_nand_init(struct cadence_nand_info *cadence)
2090{
2091 int ret;
2092
2093 cadence->cdma_desc = dma_alloc_coherent(sizeof(*cadence->cdma_desc),
2094 (unsigned long *)&cadence->dma_cdma_desc);
2095 if (!cadence->cdma_desc)
2096 return -ENOMEM;
2097
2098 cadence->buf_size = SZ_16K;
2099 cadence->buf = kmalloc(cadence->buf_size, GFP_KERNEL);
2100 if (!cadence->buf) {
2101 ret = -ENOMEM;
2102 goto free_buf_desc;
2103 }
2104
2105 //Hardware initialization
2106 ret = cadence_nand_hw_init(cadence);
2107 if (ret)
2108 goto free_buf;
2109
2110 cadence->curr_corr_str_idx = 0xFF;
2111
2112 ret = cadence_nand_chips_init(cadence);
2113 if (ret) {
2114 dev_err(cadence->dev, "Failed to register MTD: %d\n",
2115 ret);
2116 goto free_buf;
2117 }
2118
2119 kfree(cadence->buf);
2120 cadence->buf = kzalloc(cadence->buf_size, GFP_KERNEL);
2121 if (!cadence->buf) {
2122 ret = -ENOMEM;
2123 goto free_buf_desc;
2124 }
2125
2126 return 0;
2127
2128free_buf:
2129 kfree(cadence->buf);
2130
2131free_buf_desc:
2132 dma_free_coherent(cadence->cdma_desc);
2133
2134 return ret;
2135}
2136
2137static const struct cadence_nand_dt_devdata cadence_nand_default = {
2138 .if_skew = 0,
2139 .has_dma = 0,
2140};
2141
2142static const struct udevice_id cadence_nand_dt_ids[] = {
2143 {
2144 .compatible = "cdns,nand",
2145 .data = (unsigned long)&cadence_nand_default
2146 }, {}
2147};
2148
2149static int cadence_nand_dt_probe(struct udevice *dev)
2150{
2151 struct cadence_nand_info *cadence = dev_get_priv(dev);
2152 const struct udevice_id *of_id;
2153 const struct cadence_nand_dt_devdata *devdata;
2154 struct resource res;
2155 int ret;
2156 u32 val;
2157
2158 if (!dev) {
2159 dev_warn(dev, "Device ptr null\n");
2160 return -EINVAL;
2161 }
2162
2163 of_id = &cadence_nand_dt_ids[0];
2164 devdata = (struct cadence_nand_dt_devdata *)of_id->data;
2165
2166 cadence->caps1 = devdata;
2167 cadence->dev = dev;
2168
2169 ret = clk_get_by_index(dev, 0, &cadence->clk);
2170 if (ret)
2171 return ret;
2172
2173 ret = clk_enable(&cadence->clk);
2174 if (ret && ret != -ENOSYS && ret != -ENOMEM) {
2175 dev_err(dev, "failed to enable clock\n");
2176 return ret;
2177 }
2178 cadence->nf_clk_rate = clk_get_rate(&cadence->clk);
2179
2180 ret = reset_get_by_index(dev, 1, &cadence->softphy_reset);
2181 if (ret) {
2182 if (ret != -ENOMEM)
2183 dev_warn(dev, "Can't get softphy_reset: %d\n", ret);
2184 } else {
2185 reset_deassert(&cadence->softphy_reset);
2186 }
2187
2188 ret = reset_get_by_index(dev, 0, &cadence->nand_reset);
2189 if (ret) {
2190 if (ret != -ENOMEM)
2191 dev_warn(dev, "Can't get nand_reset: %d\n", ret);
2192 } else {
2193 reset_deassert(&cadence->nand_reset);
2194 }
2195
2196 ret = dev_read_resource_byname(dev, "reg", &res);
2197 if (ret)
2198 return ret;
2199 cadence->reg = devm_ioremap(dev, res.start, resource_size(&res));
2200
2201 ret = dev_read_resource_byname(dev, "sdma", &res);
2202 if (ret)
2203 return ret;
2204 cadence->io.dma = res.start;
2205 cadence->io.virt = devm_ioremap(dev, res.start, resource_size(&res));
2206
2207 ret = ofnode_read_u32(dev_ofnode(dev->parent),
2208 "cdns,board-delay-ps", &val);
2209 if (ret) {
2210 val = 4830;
2211 dev_info(cadence->dev,
2212 "missing cdns,board-delay-ps property, %d was set\n",
2213 val);
2214 }
2215 cadence->board_delay = val;
2216
2217 ret = cadence_nand_init(cadence);
2218 if (ret)
2219 return ret;
2220
2221 return 0;
2222}
2223
2224U_BOOT_DRIVER(cadence_nand_dt) = {
2225 .name = "cadence-nand-dt",
2226 .id = UCLASS_MTD,
2227 .of_match = cadence_nand_dt_ids,
2228 .probe = cadence_nand_dt_probe,
2229 .priv_auto = sizeof(struct cadence_nand_info),
2230};
2231
2232void board_nand_init(void)
2233{
2234 struct udevice *dev;
2235 int ret;
2236
2237 ret = uclass_get_device_by_driver(UCLASS_MTD,
2238 DM_DRIVER_GET(cadence_nand_dt),
2239 &dev);
2240 if (ret && ret != -ENODEV)
2241 pr_err("Failed to initialize Cadence NAND controller. (error %d)\n",
2242 ret);
2243}