blob: 65a03d22c1dbfc6f65f69bdd121a1dd8d08c1c60 [file] [log] [blame]
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Marvell International Ltd.
4 */
5
6#include <dm.h>
7#include <dm/device-internal.h>
8#include <dm/devres.h>
9#include <dm/of_access.h>
10#include <malloc.h>
11#include <memalign.h>
12#include <nand.h>
13#include <pci.h>
14#include <time.h>
15#include <linux/bitfield.h>
16#include <linux/ctype.h>
17#include <linux/dma-mapping.h>
18#include <linux/delay.h>
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/ioport.h>
22#include <linux/libfdt.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand_bch.h>
25#include <linux/mtd/nand_ecc.h>
Tom Rini3bde7e22021-09-22 14:50:35 -040026#include <linux/mtd/rawnand.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060027#include <asm/global_data.h>
Suneel Garapati9de7d2b2020-08-26 14:37:22 +020028#include <asm/io.h>
29#include <asm/types.h>
30#include <asm/dma-mapping.h>
31#include <asm/arch/clock.h>
32#include "octeontx_bch.h"
33
Suneel Garapati9de7d2b2020-08-26 14:37:22 +020034/*
35 * The NDF_CMD queue takes commands between 16 - 128 bit.
36 * All commands must be 16 bit aligned and are little endian.
37 * WAIT_STATUS commands must be 64 bit aligned.
38 * Commands are selected by the 4 bit opcode.
39 *
40 * Available Commands:
41 *
42 * 16 Bit:
43 * NOP
44 * WAIT
45 * BUS_ACQ, BUS_REL
46 * CHIP_EN, CHIP_DIS
47 *
48 * 32 Bit:
49 * CLE_CMD
50 * RD_CMD, RD_EDO_CMD
51 * WR_CMD
52 *
53 * 64 Bit:
54 * SET_TM_PAR
55 *
56 * 96 Bit:
57 * ALE_CMD
58 *
59 * 128 Bit:
60 * WAIT_STATUS, WAIT_STATUS_ALE
61 */
62
63/* NDF Register offsets */
64#define NDF_CMD 0x0
65#define NDF_MISC 0x8
66#define NDF_ECC_CNT 0x10
67#define NDF_DRBELL 0x30
68#define NDF_ST_REG 0x38 /* status */
69#define NDF_INT 0x40
70#define NDF_INT_W1S 0x48
71#define NDF_DMA_CFG 0x50
72#define NDF_DMA_ADR 0x58
73#define NDF_INT_ENA_W1C 0x60
74#define NDF_INT_ENA_W1S 0x68
75
76/* NDF command opcodes */
77#define NDF_OP_NOP 0x0
78#define NDF_OP_SET_TM_PAR 0x1
79#define NDF_OP_WAIT 0x2
80#define NDF_OP_CHIP_EN_DIS 0x3
81#define NDF_OP_CLE_CMD 0x4
82#define NDF_OP_ALE_CMD 0x5
83#define NDF_OP_WR_CMD 0x8
84#define NDF_OP_RD_CMD 0x9
85#define NDF_OP_RD_EDO_CMD 0xa
86#define NDF_OP_WAIT_STATUS 0xb /* same opcode for WAIT_STATUS_ALE */
87#define NDF_OP_BUS_ACQ_REL 0xf
88
89#define NDF_BUS_ACQUIRE 1
90#define NDF_BUS_RELEASE 0
91
92#define DBGX_EDSCR(X) (0x87A008000088 + (X) * 0x80000)
93
94struct ndf_nop_cmd {
95 u16 opcode: 4;
96 u16 nop: 12;
97};
98
99struct ndf_wait_cmd {
100 u16 opcode:4;
101 u16 r_b:1; /* wait for one cycle or PBUS_WAIT deassert */
102 u16:3;
103 u16 wlen:3; /* timing parameter select */
104 u16:5;
105};
106
107struct ndf_bus_cmd {
108 u16 opcode:4;
109 u16 direction:4; /* 1 = acquire, 0 = release */
110 u16:8;
111};
112
113struct ndf_chip_cmd {
114 u16 opcode:4;
115 u16 chip:3; /* select chip, 0 = disable */
116 u16 enable:1; /* 1 = enable, 0 = disable */
117 u16 bus_width:2; /* 10 = 16 bit, 01 = 8 bit */
118 u16:6;
119};
120
121struct ndf_cle_cmd {
122 u32 opcode:4;
123 u32:4;
124 u32 cmd_data:8; /* command sent to the PBUS AD pins */
125 u32 clen1:3; /* time between PBUS CLE and WE asserts */
126 u32 clen2:3; /* time WE remains asserted */
127 u32 clen3:3; /* time between WE deassert and CLE */
128 u32:7;
129};
130
131/* RD_EDO_CMD uses the same layout as RD_CMD */
132struct ndf_rd_cmd {
133 u32 opcode:4;
134 u32 data:16; /* data bytes */
135 u32 rlen1:3;
136 u32 rlen2:3;
137 u32 rlen3:3;
138 u32 rlen4:3;
139};
140
141struct ndf_wr_cmd {
142 u32 opcode:4;
143 u32 data:16; /* data bytes */
144 u32:4;
145 u32 wlen1:3;
146 u32 wlen2:3;
147 u32:3;
148};
149
150struct ndf_set_tm_par_cmd {
151 u64 opcode:4;
152 u64 tim_mult:4; /* multiplier for the seven parameters */
153 u64 tm_par1:8; /* --> Following are the 7 timing parameters that */
154 u64 tm_par2:8; /* specify the number of coprocessor cycles. */
155 u64 tm_par3:8; /* A value of zero means one cycle. */
156 u64 tm_par4:8; /* All values are scaled by tim_mult */
157 u64 tm_par5:8; /* using tim_par * (2 ^ tim_mult). */
158 u64 tm_par6:8;
159 u64 tm_par7:8;
160};
161
162struct ndf_ale_cmd {
163 u32 opcode:4;
164 u32:4;
165 u32 adr_byte_num:4; /* number of address bytes to be sent */
166 u32:4;
167 u32 alen1:3;
168 u32 alen2:3;
169 u32 alen3:3;
170 u32 alen4:3;
171 u32:4;
172 u8 adr_byt1;
173 u8 adr_byt2;
174 u8 adr_byt3;
175 u8 adr_byt4;
176 u8 adr_byt5;
177 u8 adr_byt6;
178 u8 adr_byt7;
179 u8 adr_byt8;
180};
181
182struct ndf_wait_status_cmd {
183 u32 opcode:4;
184 u32:4;
185 u32 data:8; /** data */
186 u32 clen1:3;
187 u32 clen2:3;
188 u32 clen3:3;
189 u32:8;
190 /** set to 5 to select WAIT_STATUS_ALE command */
191 u32 ale_ind:8;
192 /** ALE only: number of address bytes to be sent */
193 u32 adr_byte_num:4;
194 u32:4;
195 u32 alen1:3; /* ALE only */
196 u32 alen2:3; /* ALE only */
197 u32 alen3:3; /* ALE only */
198 u32 alen4:3; /* ALE only */
199 u32:4;
200 u8 adr_byt[4]; /* ALE only */
201 u32 nine:4; /* set to 9 */
202 u32 and_mask:8;
203 u32 comp_byte:8;
204 u32 rlen1:3;
205 u32 rlen2:3;
206 u32 rlen3:3;
207 u32 rlen4:3;
208};
209
210union ndf_cmd {
211 u64 val[2];
212 union {
213 struct ndf_nop_cmd nop;
214 struct ndf_wait_cmd wait;
215 struct ndf_bus_cmd bus_acq_rel;
216 struct ndf_chip_cmd chip_en_dis;
217 struct ndf_cle_cmd cle_cmd;
218 struct ndf_rd_cmd rd_cmd;
219 struct ndf_wr_cmd wr_cmd;
220 struct ndf_set_tm_par_cmd set_tm_par;
221 struct ndf_ale_cmd ale_cmd;
222 struct ndf_wait_status_cmd wait_status;
223 } u;
224};
225
226/** Disable multi-bit error hangs */
227#define NDF_MISC_MB_DIS BIT_ULL(27)
228/** High watermark for NBR FIFO or load/store operations */
229#define NDF_MISC_NBR_HWM GENMASK_ULL(26, 24)
230/** Wait input filter count */
231#define NDF_MISC_WAIT_CNT GENMASK_ULL(23, 18)
232/** Unfilled NFD_CMD queue bytes */
233#define NDF_MISC_FR_BYTE GENMASK_ULL(17, 7)
234/** Set by HW when it reads the last 8 bytes of NDF_CMD */
235#define NDF_MISC_RD_DONE BIT_ULL(6)
236/** Set by HW when it reads. SW read of NDF_CMD clears it */
237#define NDF_MISC_RD_VAL BIT_ULL(5)
238/** Let HW read NDF_CMD queue. Cleared on SW NDF_CMD write */
239#define NDF_MISC_RD_CMD BIT_ULL(4)
240/** Boot disable */
241#define NDF_MISC_BT_DIS BIT_ULL(2)
242/** Stop command execution after completing command queue */
243#define NDF_MISC_EX_DIS BIT_ULL(1)
244/** Reset fifo */
245#define NDF_MISC_RST_FF BIT_ULL(0)
246
247/** DMA engine enable */
248#define NDF_DMA_CFG_EN BIT_ULL(63)
249/** Read or write */
250#define NDF_DMA_CFG_RW BIT_ULL(62)
251/** Terminates DMA and clears enable bit */
252#define NDF_DMA_CFG_CLR BIT_ULL(61)
253/** 32-bit swap enable */
254#define NDF_DMA_CFG_SWAP32 BIT_ULL(59)
255/** 16-bit swap enable */
256#define NDF_DMA_CFG_SWAP16 BIT_ULL(58)
257/** 8-bit swap enable */
258#define NDF_DMA_CFG_SWAP8 BIT_ULL(57)
259/** Endian mode */
260#define NDF_DMA_CFG_CMD_BE BIT_ULL(56)
261/** Number of 64 bit transfers */
262#define NDF_DMA_CFG_SIZE GENMASK_ULL(55, 36)
263
264/** Command execution status idle */
265#define NDF_ST_REG_EXE_IDLE BIT_ULL(15)
266/** Command execution SM states */
267#define NDF_ST_REG_EXE_SM GENMASK_ULL(14, 11)
268/** DMA and load SM states */
269#define NDF_ST_REG_BT_SM GENMASK_ULL(10, 7)
270/** Queue read-back SM bad state */
271#define NDF_ST_REG_RD_FF_BAD BIT_ULL(6)
272/** Queue read-back SM states */
273#define NDF_ST_REG_RD_FF GENMASK_ULL(5, 4)
274/** Main SM is in a bad state */
275#define NDF_ST_REG_MAIN_BAD BIT_ULL(3)
276/** Main SM states */
277#define NDF_ST_REG_MAIN_SM GENMASK_ULL(2, 0)
278
279#define MAX_NAND_NAME_LEN 64
280#if (defined(NAND_MAX_PAGESIZE) && (NAND_MAX_PAGESIZE > 4096)) || \
281 !defined(NAND_MAX_PAGESIZE)
282# undef NAND_MAX_PAGESIZE
283# define NAND_MAX_PAGESIZE 4096
284#endif
285#if (defined(NAND_MAX_OOBSIZE) && (NAND_MAX_OOBSIZE > 256)) || \
286 !defined(NAND_MAX_OOBSIZE)
287# undef NAND_MAX_OOBSIZE
288# define NAND_MAX_OOBSIZE 256
289#endif
290
291#define OCTEONTX_NAND_DRIVER_NAME "octeontx_nand"
292
293#define NDF_TIMEOUT 1000 /** Timeout in ms */
294#define USEC_PER_SEC 1000000 /** Linux compatibility */
295#ifndef NAND_MAX_CHIPS
296# define NAND_MAX_CHIPS 8 /** Linux compatibility */
297#endif
298
299struct octeontx_nand_chip {
300 struct list_head node;
301 struct nand_chip nand;
302 struct ndf_set_tm_par_cmd timings;
303 int cs;
304 int selected_page;
305 int iface_mode;
306 int row_bytes;
307 int col_bytes;
308 bool oob_only;
309 bool iface_set;
310};
311
312struct octeontx_nand_buf {
313 u8 *dmabuf;
314 dma_addr_t dmaaddr;
315 int dmabuflen;
316 int data_len;
317 int data_index;
318};
319
320/** NAND flash controller (NDF) related information */
321struct octeontx_nfc {
322 struct nand_hw_control controller;
323 struct udevice *dev;
324 void __iomem *base;
325 struct list_head chips;
326 int selected_chip; /* Currently selected NAND chip number */
327
328 /*
329 * Status is separate from octeontx_nand_buf because
330 * it can be used in parallel and during init.
331 */
332 u8 *stat;
333 dma_addr_t stat_addr;
334 bool use_status;
335
336 struct octeontx_nand_buf buf;
337 union bch_resp *bch_resp;
338 dma_addr_t bch_rhandle;
339
340 /* BCH of all-0xff, so erased pages read as error-free */
341 unsigned char *eccmask;
342};
343
344/* settable timings - 0..7 select timing of alen1..4/clen1..3/etc */
345enum tm_idx {
346 t0, /* fixed at 4<<mult cycles */
347 t1, t2, t3, t4, t5, t6, t7, /* settable per ONFI-timing mode */
348};
349
350struct octeontx_probe_device {
351 struct list_head list;
352 struct udevice *dev;
353};
354
355static struct bch_vf *bch_vf;
356/** Deferred devices due to BCH not being ready */
Bin Mengfde8e4e2023-04-05 22:38:37 +0800357static LIST_HEAD(octeontx_pci_nand_deferred_devices);
Suneel Garapati9de7d2b2020-08-26 14:37:22 +0200358
359/** default parameters used for probing chips */
360#define MAX_ONFI_MODE 5
361
362static int default_onfi_timing;
363static int slew_ns = 2; /* default timing padding */
364static int def_ecc_size = 512; /* 1024 best for sw_bch, <= 4095 for hw_bch */
365static int default_width = 1; /* 8 bit */
366static int default_page_size = 2048;
367static struct ndf_set_tm_par_cmd default_timing_parms;
368
369/** Port from Linux */
370#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
371({ \
372 ulong __start = get_timer(0); \
373 void *__addr = (addr); \
374 const ulong __timeout_ms = timeout_us / 1000; \
375 do { \
376 (val) = readq(__addr); \
377 if (cond) \
378 break; \
379 if (timeout_us && get_timer(__start) > __timeout_ms) { \
380 (val) = readq(__addr); \
381 break; \
382 } \
383 if (delay_us) \
384 udelay(delay_us); \
385 } while (1); \
386 (cond) ? 0 : -ETIMEDOUT; \
387})
388
389/** Ported from Linux 4.9.0 include/linux/of.h for compatibility */
390static inline int of_get_child_count(const ofnode node)
391{
392 return fdtdec_get_child_count(gd->fdt_blob, ofnode_to_offset(node));
393}
394
395/**
396 * Linux compatibility from Linux 4.9.0 drivers/mtd/nand/nand_base.c
397 */
398static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
399 struct mtd_oob_region *oobregion)
400{
401 struct nand_chip *chip = mtd_to_nand(mtd);
402 struct nand_ecc_ctrl *ecc = &chip->ecc;
403
404 if (section || !ecc->total)
405 return -ERANGE;
406
407 oobregion->length = ecc->total;
408 oobregion->offset = mtd->oobsize - oobregion->length;
409
410 return 0;
411}
412
413/**
414 * Linux compatibility from Linux 4.9.0 drivers/mtd/nand/nand_base.c
415 */
416static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
417 struct mtd_oob_region *oobregion)
418{
419 struct nand_chip *chip = mtd_to_nand(mtd);
420 struct nand_ecc_ctrl *ecc = &chip->ecc;
421
422 if (section)
423 return -ERANGE;
424
425 oobregion->length = mtd->oobsize - ecc->total - 2;
426 oobregion->offset = 2;
427
428 return 0;
429}
430
431static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
432 .ecc = nand_ooblayout_ecc_lp,
433 .rfree = nand_ooblayout_free_lp,
434};
435
436static inline struct octeontx_nand_chip *to_otx_nand(struct nand_chip *nand)
437{
438 return container_of(nand, struct octeontx_nand_chip, nand);
439}
440
441static inline struct octeontx_nfc *to_otx_nfc(struct nand_hw_control *ctrl)
442{
443 return container_of(ctrl, struct octeontx_nfc, controller);
444}
445
446static int octeontx_nand_calc_ecc_layout(struct nand_chip *nand)
447{
448 struct nand_ecclayout *layout = nand->ecc.layout;
449 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
450 struct mtd_info *mtd = &nand->mtd;
451 int oobsize = mtd->oobsize;
452 int i;
453 bool layout_alloc = false;
454
455 if (!layout) {
456 layout = devm_kzalloc(tn->dev, sizeof(*layout), GFP_KERNEL);
457 if (!layout)
458 return -ENOMEM;
459 nand->ecc.layout = layout;
460 layout_alloc = true;
461 }
462 layout->eccbytes = nand->ecc.steps * nand->ecc.bytes;
463 /* Reserve 2 bytes for bad block marker */
464 if (layout->eccbytes + 2 > oobsize) {
465 pr_err("No suitable oob scheme available for oobsize %d eccbytes %u\n",
466 oobsize, layout->eccbytes);
467 goto fail;
468 }
469 /* put ecc bytes at oob tail */
470 for (i = 0; i < layout->eccbytes; i++)
471 layout->eccpos[i] = oobsize - layout->eccbytes + i;
472 layout->oobfree[0].offset = 2;
473 layout->oobfree[0].length = oobsize - 2 - layout->eccbytes;
474 nand->ecc.layout = layout;
475 return 0;
476
477fail:
478 if (layout_alloc)
479 kfree(layout);
480 return -1;
481}
482
483/*
484 * Read a single byte from the temporary buffer. Used after READID
485 * to get the NAND information and for STATUS.
486 */
487static u8 octeontx_nand_read_byte(struct mtd_info *mtd)
488{
489 struct nand_chip *nand = mtd_to_nand(mtd);
490 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
491
492 if (tn->use_status) {
493 tn->use_status = false;
494 return *tn->stat;
495 }
496
497 if (tn->buf.data_index < tn->buf.data_len)
498 return tn->buf.dmabuf[tn->buf.data_index++];
499
500 dev_err(tn->dev, "No data to read, idx: 0x%x, len: 0x%x\n",
501 tn->buf.data_index, tn->buf.data_len);
502
503 return 0xff;
504}
505
506/*
507 * Read a number of pending bytes from the temporary buffer. Used
508 * to get page and OOB data.
509 */
510static void octeontx_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
511{
512 struct nand_chip *nand = mtd_to_nand(mtd);
513 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
514
515 if (len > tn->buf.data_len - tn->buf.data_index) {
516 dev_err(tn->dev, "Not enough data for read of %d bytes\n", len);
517 return;
518 }
519
520 memcpy(buf, tn->buf.dmabuf + tn->buf.data_index, len);
521 tn->buf.data_index += len;
522}
523
524static void octeontx_nand_write_buf(struct mtd_info *mtd,
525 const u8 *buf, int len)
526{
527 struct nand_chip *nand = mtd_to_nand(mtd);
528 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
529
530 memcpy(tn->buf.dmabuf + tn->buf.data_len, buf, len);
531 tn->buf.data_len += len;
532}
533
534/* Overwrite default function to avoid sync abort on chip = -1. */
535static void octeontx_nand_select_chip(struct mtd_info *mtd, int chip)
536{
537}
538
539static inline int timing_to_cycle(u32 psec, unsigned long clock)
540{
541 unsigned int ns;
542 int ticks;
543
544 ns = DIV_ROUND_UP(psec, 1000);
545 ns += slew_ns;
546
547 /* no rounding needed since clock is multiple of 1MHz */
548 clock /= 1000000;
549 ns *= clock;
550
551 ticks = DIV_ROUND_UP(ns, 1000);
552
553 /* actual delay is (tm_parX+1)<<tim_mult */
554 if (ticks)
555 ticks--;
556
557 return ticks;
558}
559
560static void set_timings(struct octeontx_nand_chip *chip,
561 struct ndf_set_tm_par_cmd *tp,
562 const struct nand_sdr_timings *timings,
563 unsigned long sclk)
564{
565 /* scaled coprocessor-cycle values */
566 u32 s_wh, s_cls, s_clh, s_rp, s_wb, s_wc;
567
568 tp->tim_mult = 0;
569 s_wh = timing_to_cycle(timings->tWH_min, sclk);
570 s_cls = timing_to_cycle(timings->tCLS_min, sclk);
571 s_clh = timing_to_cycle(timings->tCLH_min, sclk);
572 s_rp = timing_to_cycle(timings->tRP_min, sclk);
573 s_wb = timing_to_cycle(timings->tWB_max, sclk);
574 s_wc = timing_to_cycle(timings->tWC_min, sclk);
575
576 tp->tm_par1 = s_wh;
577 tp->tm_par2 = s_clh;
578 tp->tm_par3 = s_rp + 1;
579 tp->tm_par4 = s_cls - s_wh;
580 tp->tm_par5 = s_wc - s_wh + 1;
581 tp->tm_par6 = s_wb;
582 tp->tm_par7 = 0;
583 tp->tim_mult++; /* overcompensate for bad math */
584
585 /* TODO: comment parameter re-use */
586
587 pr_debug("%s: tim_par: mult: %d p1: %d p2: %d p3: %d\n",
588 __func__, tp->tim_mult, tp->tm_par1, tp->tm_par2, tp->tm_par3);
589 pr_debug(" p4: %d p5: %d p6: %d p7: %d\n",
590 tp->tm_par4, tp->tm_par5, tp->tm_par6, tp->tm_par7);
591}
592
593static int set_default_timings(struct octeontx_nfc *tn,
594 const struct nand_sdr_timings *timings)
595{
596 unsigned long sclk = octeontx_get_io_clock();
597
598 set_timings(NULL, &default_timing_parms, timings, sclk);
599 return 0;
600}
601
602static int octeontx_nfc_chip_set_timings(struct octeontx_nand_chip *chip,
603 const struct nand_sdr_timings *timings)
604{
605 /*struct octeontx_nfc *tn = to_otx_nfc(chip->nand.controller);*/
606 unsigned long sclk = octeontx_get_io_clock();
607
608 set_timings(chip, &chip->timings, timings, sclk);
609 return 0;
610}
611
612/* How many bytes are free in the NFD_CMD queue? */
613static int ndf_cmd_queue_free(struct octeontx_nfc *tn)
614{
615 u64 ndf_misc;
616
617 ndf_misc = readq(tn->base + NDF_MISC);
618 return FIELD_GET(NDF_MISC_FR_BYTE, ndf_misc);
619}
620
621/* Submit a command to the NAND command queue. */
622static int ndf_submit(struct octeontx_nfc *tn, union ndf_cmd *cmd)
623{
624 int opcode = cmd->val[0] & 0xf;
625
626 switch (opcode) {
627 /* All these commands fit in one 64bit word */
628 case NDF_OP_NOP:
629 case NDF_OP_SET_TM_PAR:
630 case NDF_OP_WAIT:
631 case NDF_OP_CHIP_EN_DIS:
632 case NDF_OP_CLE_CMD:
633 case NDF_OP_WR_CMD:
634 case NDF_OP_RD_CMD:
635 case NDF_OP_RD_EDO_CMD:
636 case NDF_OP_BUS_ACQ_REL:
637 if (ndf_cmd_queue_free(tn) < 8)
638 goto full;
639 writeq(cmd->val[0], tn->base + NDF_CMD);
640 break;
641 case NDF_OP_ALE_CMD:
642 /* ALE commands take either one or two 64bit words */
643 if (cmd->u.ale_cmd.adr_byte_num < 5) {
644 if (ndf_cmd_queue_free(tn) < 8)
645 goto full;
646 writeq(cmd->val[0], tn->base + NDF_CMD);
647 } else {
648 if (ndf_cmd_queue_free(tn) < 16)
649 goto full;
650 writeq(cmd->val[0], tn->base + NDF_CMD);
651 writeq(cmd->val[1], tn->base + NDF_CMD);
652 }
653 break;
654 case NDF_OP_WAIT_STATUS: /* Wait status commands take two 64bit words */
655 if (ndf_cmd_queue_free(tn) < 16)
656 goto full;
657 writeq(cmd->val[0], tn->base + NDF_CMD);
658 writeq(cmd->val[1], tn->base + NDF_CMD);
659 break;
660 default:
661 dev_err(tn->dev, "%s: unknown command: %u\n", __func__, opcode);
662 return -EINVAL;
663 }
664 return 0;
665
666full:
667 dev_err(tn->dev, "%s: no space left in command queue\n", __func__);
668 return -ENOMEM;
669}
670
671/**
672 * Wait for the ready/busy signal. First wait for busy to be valid,
673 * then wait for busy to de-assert.
674 */
675static int ndf_build_wait_busy(struct octeontx_nfc *tn)
676{
677 union ndf_cmd cmd;
678
679 memset(&cmd, 0, sizeof(cmd));
680 cmd.u.wait.opcode = NDF_OP_WAIT;
681 cmd.u.wait.r_b = 1;
682 cmd.u.wait.wlen = t6;
683
684 if (ndf_submit(tn, &cmd))
685 return -ENOMEM;
686 return 0;
687}
688
689static bool ndf_dma_done(struct octeontx_nfc *tn)
690{
691 u64 dma_cfg;
692
693 /* Enable bit should be clear after a transfer */
694 dma_cfg = readq(tn->base + NDF_DMA_CFG);
695 if (!(dma_cfg & NDF_DMA_CFG_EN))
696 return true;
697
698 return false;
699}
700
701static int ndf_wait(struct octeontx_nfc *tn)
702{
703 ulong start = get_timer(0);
704 bool done;
705
706 while (!(done = ndf_dma_done(tn)) && get_timer(start) < NDF_TIMEOUT)
707 ;
708
709 if (!done) {
710 dev_err(tn->dev, "%s: timeout error\n", __func__);
711 return -ETIMEDOUT;
712 }
713 return 0;
714}
715
716static int ndf_wait_idle(struct octeontx_nfc *tn)
717{
718 u64 val;
719 u64 dval = 0;
720 int rc;
721 int pause = 100;
722 u64 tot_us = USEC_PER_SEC / 10;
723
724 rc = readq_poll_timeout(tn->base + NDF_ST_REG,
725 val, val & NDF_ST_REG_EXE_IDLE, pause, tot_us);
726 if (!rc)
727 rc = readq_poll_timeout(tn->base + NDF_DMA_CFG,
728 dval, !(dval & NDF_DMA_CFG_EN),
729 pause, tot_us);
730
731 return rc;
732}
733
734/** Issue set timing parameters */
735static int ndf_queue_cmd_timing(struct octeontx_nfc *tn,
736 struct ndf_set_tm_par_cmd *timings)
737{
738 union ndf_cmd cmd;
739
740 memset(&cmd, 0, sizeof(cmd));
741 cmd.u.set_tm_par.opcode = NDF_OP_SET_TM_PAR;
742 cmd.u.set_tm_par.tim_mult = timings->tim_mult;
743 cmd.u.set_tm_par.tm_par1 = timings->tm_par1;
744 cmd.u.set_tm_par.tm_par2 = timings->tm_par2;
745 cmd.u.set_tm_par.tm_par3 = timings->tm_par3;
746 cmd.u.set_tm_par.tm_par4 = timings->tm_par4;
747 cmd.u.set_tm_par.tm_par5 = timings->tm_par5;
748 cmd.u.set_tm_par.tm_par6 = timings->tm_par6;
749 cmd.u.set_tm_par.tm_par7 = timings->tm_par7;
750 return ndf_submit(tn, &cmd);
751}
752
753/** Issue bus acquire or release */
754static int ndf_queue_cmd_bus(struct octeontx_nfc *tn, int direction)
755{
756 union ndf_cmd cmd;
757
758 memset(&cmd, 0, sizeof(cmd));
759 cmd.u.bus_acq_rel.opcode = NDF_OP_BUS_ACQ_REL;
760 cmd.u.bus_acq_rel.direction = direction;
761 return ndf_submit(tn, &cmd);
762}
763
764/* Issue chip select or deselect */
765static int ndf_queue_cmd_chip(struct octeontx_nfc *tn, int enable, int chip,
766 int width)
767{
768 union ndf_cmd cmd;
769
770 memset(&cmd, 0, sizeof(cmd));
771 cmd.u.chip_en_dis.opcode = NDF_OP_CHIP_EN_DIS;
772 cmd.u.chip_en_dis.chip = chip;
773 cmd.u.chip_en_dis.enable = enable;
774 cmd.u.chip_en_dis.bus_width = width;
775 return ndf_submit(tn, &cmd);
776}
777
778static int ndf_queue_cmd_wait(struct octeontx_nfc *tn, int t_delay)
779{
780 union ndf_cmd cmd;
781
782 memset(&cmd, 0, sizeof(cmd));
783 cmd.u.wait.opcode = NDF_OP_WAIT;
784 cmd.u.wait.wlen = t_delay;
785 return ndf_submit(tn, &cmd);
786}
787
788static int ndf_queue_cmd_cle(struct octeontx_nfc *tn, int command)
789{
790 union ndf_cmd cmd;
791
792 memset(&cmd, 0, sizeof(cmd));
793 cmd.u.cle_cmd.opcode = NDF_OP_CLE_CMD;
794 cmd.u.cle_cmd.cmd_data = command;
795 cmd.u.cle_cmd.clen1 = t4;
796 cmd.u.cle_cmd.clen2 = t1;
797 cmd.u.cle_cmd.clen3 = t2;
798 return ndf_submit(tn, &cmd);
799}
800
801static int ndf_queue_cmd_ale(struct octeontx_nfc *tn, int addr_bytes,
802 struct nand_chip *nand, u64 page,
803 u32 col, int page_size)
804{
805 struct octeontx_nand_chip *octeontx_nand = (nand) ?
806 to_otx_nand(nand) : NULL;
807 union ndf_cmd cmd;
808
809 memset(&cmd, 0, sizeof(cmd));
810 cmd.u.ale_cmd.opcode = NDF_OP_ALE_CMD;
811 cmd.u.ale_cmd.adr_byte_num = addr_bytes;
812
813 /* set column bit for OOB area, assume OOB follows page */
814 if (octeontx_nand && octeontx_nand->oob_only)
815 col += page_size;
816
817 /* page is u64 for this generality, even if cmdfunc() passes int */
818 switch (addr_bytes) {
819 /* 4-8 bytes: page, then 2-byte col */
820 case 8:
821 cmd.u.ale_cmd.adr_byt8 = (page >> 40) & 0xff;
822 fallthrough;
823 case 7:
824 cmd.u.ale_cmd.adr_byt7 = (page >> 32) & 0xff;
825 fallthrough;
826 case 6:
827 cmd.u.ale_cmd.adr_byt6 = (page >> 24) & 0xff;
828 fallthrough;
829 case 5:
830 cmd.u.ale_cmd.adr_byt5 = (page >> 16) & 0xff;
831 fallthrough;
832 case 4:
833 cmd.u.ale_cmd.adr_byt4 = (page >> 8) & 0xff;
834 cmd.u.ale_cmd.adr_byt3 = page & 0xff;
835 cmd.u.ale_cmd.adr_byt2 = (col >> 8) & 0xff;
836 cmd.u.ale_cmd.adr_byt1 = col & 0xff;
837 break;
838 /* 1-3 bytes: just the page address */
839 case 3:
840 cmd.u.ale_cmd.adr_byt3 = (page >> 16) & 0xff;
841 fallthrough;
842 case 2:
843 cmd.u.ale_cmd.adr_byt2 = (page >> 8) & 0xff;
844 fallthrough;
845 case 1:
846 cmd.u.ale_cmd.adr_byt1 = page & 0xff;
847 break;
848 default:
849 break;
850 }
851
852 cmd.u.ale_cmd.alen1 = t3;
853 cmd.u.ale_cmd.alen2 = t1;
854 cmd.u.ale_cmd.alen3 = t5;
855 cmd.u.ale_cmd.alen4 = t2;
856 return ndf_submit(tn, &cmd);
857}
858
859static int ndf_queue_cmd_write(struct octeontx_nfc *tn, int len)
860{
861 union ndf_cmd cmd;
862
863 memset(&cmd, 0, sizeof(cmd));
864 cmd.u.wr_cmd.opcode = NDF_OP_WR_CMD;
865 cmd.u.wr_cmd.data = len;
866 cmd.u.wr_cmd.wlen1 = t3;
867 cmd.u.wr_cmd.wlen2 = t1;
868 return ndf_submit(tn, &cmd);
869}
870
871static int ndf_build_pre_cmd(struct octeontx_nfc *tn, int cmd1,
872 int addr_bytes, u64 page, u32 col, int cmd2)
873{
874 struct nand_chip *nand = tn->controller.active;
875 struct octeontx_nand_chip *octeontx_nand;
876 struct ndf_set_tm_par_cmd *timings;
877 int width, page_size, rc;
878
879 /* Also called before chip probing is finished */
880 if (!nand) {
881 timings = &default_timing_parms;
882 page_size = default_page_size;
883 width = default_width;
884 } else {
885 octeontx_nand = to_otx_nand(nand);
886 timings = &octeontx_nand->timings;
887 page_size = nand->mtd.writesize;
888 if (nand->options & NAND_BUSWIDTH_16)
889 width = 2;
890 else
891 width = 1;
892 }
893 rc = ndf_queue_cmd_timing(tn, timings);
894 if (rc)
895 return rc;
896
897 rc = ndf_queue_cmd_bus(tn, NDF_BUS_ACQUIRE);
898 if (rc)
899 return rc;
900
901 rc = ndf_queue_cmd_chip(tn, 1, tn->selected_chip, width);
902 if (rc)
903 return rc;
904
905 rc = ndf_queue_cmd_wait(tn, t1);
906 if (rc)
907 return rc;
908
909 rc = ndf_queue_cmd_cle(tn, cmd1);
910 if (rc)
911 return rc;
912
913 if (addr_bytes) {
914 rc = ndf_build_wait_busy(tn);
915 if (rc)
916 return rc;
917
918 rc = ndf_queue_cmd_ale(tn, addr_bytes, nand,
919 page, col, page_size);
920 if (rc)
921 return rc;
922 }
923
924 /* CLE 2 */
925 if (cmd2) {
926 rc = ndf_build_wait_busy(tn);
927 if (rc)
928 return rc;
929
930 rc = ndf_queue_cmd_cle(tn, cmd2);
931 if (rc)
932 return rc;
933 }
934 return 0;
935}
936
937static int ndf_build_post_cmd(struct octeontx_nfc *tn, int hold_time)
938{
939 int rc;
940
941 /* Deselect chip */
942 rc = ndf_queue_cmd_chip(tn, 0, 0, 0);
943 if (rc)
944 return rc;
945
946 rc = ndf_queue_cmd_wait(tn, t2);
947 if (rc)
948 return rc;
949
950 /* Release bus */
951 rc = ndf_queue_cmd_bus(tn, 0);
952 if (rc)
953 return rc;
954
955 rc = ndf_queue_cmd_wait(tn, hold_time);
956 if (rc)
957 return rc;
958
959 /*
960 * Last action is ringing the doorbell with number of bus
961 * acquire-releases cycles (currently 1).
962 */
963 writeq(1, tn->base + NDF_DRBELL);
964 return 0;
965}
966
967/* Setup the NAND DMA engine for a transfer. */
968static void ndf_setup_dma(struct octeontx_nfc *tn, int is_write,
969 dma_addr_t bus_addr, int len)
970{
971 u64 dma_cfg;
972
973 dma_cfg = FIELD_PREP(NDF_DMA_CFG_RW, is_write) |
974 FIELD_PREP(NDF_DMA_CFG_SIZE, (len >> 3) - 1);
975 dma_cfg |= NDF_DMA_CFG_EN;
976 writeq(bus_addr, tn->base + NDF_DMA_ADR);
977 writeq(dma_cfg, tn->base + NDF_DMA_CFG);
978}
979
980static int octeontx_nand_reset(struct octeontx_nfc *tn)
981{
982 int rc;
983
984 rc = ndf_build_pre_cmd(tn, NAND_CMD_RESET, 0, 0, 0, 0);
985 if (rc)
986 return rc;
987
988 rc = ndf_build_wait_busy(tn);
989 if (rc)
990 return rc;
991
992 rc = ndf_build_post_cmd(tn, t2);
993 if (rc)
994 return rc;
995
996 return 0;
997}
998
999static int ndf_read(struct octeontx_nfc *tn, int cmd1, int addr_bytes,
1000 u64 page, u32 col, int cmd2, int len)
1001{
1002 dma_addr_t bus_addr = tn->use_status ? tn->stat_addr : tn->buf.dmaaddr;
1003 struct nand_chip *nand = tn->controller.active;
1004 int timing_mode, bytes, rc;
1005 union ndf_cmd cmd;
1006 u64 start, end;
1007
1008 pr_debug("%s(%p, 0x%x, 0x%x, 0x%llx, 0x%x, 0x%x, 0x%x)\n", __func__,
1009 tn, cmd1, addr_bytes, page, col, cmd2, len);
1010 if (!nand)
1011 timing_mode = default_onfi_timing;
1012 else
1013 timing_mode = nand->onfi_timing_mode_default;
1014
1015 /* Build the command and address cycles */
1016 rc = ndf_build_pre_cmd(tn, cmd1, addr_bytes, page, col, cmd2);
1017 if (rc) {
1018 dev_err(tn->dev, "Build pre command failed\n");
1019 return rc;
1020 }
1021
1022 /* This waits for some time, then waits for busy to be de-asserted. */
1023 rc = ndf_build_wait_busy(tn);
1024 if (rc) {
1025 dev_err(tn->dev, "Wait timeout\n");
1026 return rc;
1027 }
1028
1029 memset(&cmd, 0, sizeof(cmd));
1030
1031 if (timing_mode < 4)
1032 cmd.u.rd_cmd.opcode = NDF_OP_RD_CMD;
1033 else
1034 cmd.u.rd_cmd.opcode = NDF_OP_RD_EDO_CMD;
1035
1036 cmd.u.rd_cmd.data = len;
1037 cmd.u.rd_cmd.rlen1 = t7;
1038 cmd.u.rd_cmd.rlen2 = t3;
1039 cmd.u.rd_cmd.rlen3 = t1;
1040 cmd.u.rd_cmd.rlen4 = t7;
1041 rc = ndf_submit(tn, &cmd);
1042 if (rc) {
1043 dev_err(tn->dev, "Error submitting command\n");
1044 return rc;
1045 }
1046
1047 start = (u64)bus_addr;
1048 ndf_setup_dma(tn, 0, bus_addr, len);
1049
1050 rc = ndf_build_post_cmd(tn, t2);
1051 if (rc) {
1052 dev_err(tn->dev, "Build post command failed\n");
1053 return rc;
1054 }
1055
1056 /* Wait for the DMA to complete */
1057 rc = ndf_wait(tn);
1058 if (rc) {
1059 dev_err(tn->dev, "DMA timed out\n");
1060 return rc;
1061 }
1062
1063 end = readq(tn->base + NDF_DMA_ADR);
1064 bytes = end - start;
1065
1066 /* Make sure NDF is really done */
1067 rc = ndf_wait_idle(tn);
1068 if (rc) {
1069 dev_err(tn->dev, "poll idle failed\n");
1070 return rc;
1071 }
1072
1073 pr_debug("%s: Read %d bytes\n", __func__, bytes);
1074 return bytes;
1075}
1076
1077static int octeontx_nand_get_features(struct mtd_info *mtd,
1078 struct nand_chip *chip, int feature_addr,
1079 u8 *subfeature_para)
1080{
1081 struct nand_chip *nand = chip;
1082 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1083 int len = 8;
1084 int rc;
1085
1086 pr_debug("%s: feature addr: 0x%x\n", __func__, feature_addr);
1087 memset(tn->buf.dmabuf, 0xff, len);
1088 tn->buf.data_index = 0;
1089 tn->buf.data_len = 0;
1090 rc = ndf_read(tn, NAND_CMD_GET_FEATURES, 1, feature_addr, 0, 0, len);
1091 if (rc)
1092 return rc;
1093
1094 memcpy(subfeature_para, tn->buf.dmabuf, ONFI_SUBFEATURE_PARAM_LEN);
1095
1096 return 0;
1097}
1098
1099static int octeontx_nand_set_features(struct mtd_info *mtd,
1100 struct nand_chip *chip, int feature_addr,
1101 u8 *subfeature_para)
1102{
1103 struct nand_chip *nand = chip;
1104 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1105 const int len = ONFI_SUBFEATURE_PARAM_LEN;
1106 int rc;
1107
1108 rc = ndf_build_pre_cmd(tn, NAND_CMD_SET_FEATURES,
1109 1, feature_addr, 0, 0);
1110 if (rc)
1111 return rc;
1112
1113 memcpy(tn->buf.dmabuf, subfeature_para, len);
1114 memset(tn->buf.dmabuf + len, 0, 8 - len);
1115
1116 ndf_setup_dma(tn, 1, tn->buf.dmaaddr, 8);
1117
1118 rc = ndf_queue_cmd_write(tn, 8);
1119 if (rc)
1120 return rc;
1121
1122 rc = ndf_build_wait_busy(tn);
1123 if (rc)
1124 return rc;
1125
1126 rc = ndf_build_post_cmd(tn, t2);
1127 if (rc)
1128 return rc;
1129
1130 return 0;
1131}
1132
1133/*
1134 * Read a page from NAND. If the buffer has room, the out of band
1135 * data will be included.
1136 */
1137static int ndf_page_read(struct octeontx_nfc *tn, u64 page, int col, int len)
1138{
1139 debug("%s(%p, 0x%llx, 0x%x, 0x%x) active: %p\n", __func__,
1140 tn, page, col, len, tn->controller.active);
1141 struct nand_chip *nand = tn->controller.active;
1142 struct octeontx_nand_chip *chip = to_otx_nand(nand);
1143 int addr_bytes = chip->row_bytes + chip->col_bytes;
1144
1145 memset(tn->buf.dmabuf, 0xff, len);
1146 return ndf_read(tn, NAND_CMD_READ0, addr_bytes,
1147 page, col, NAND_CMD_READSTART, len);
1148}
1149
1150/* Erase a NAND block */
1151static int ndf_block_erase(struct octeontx_nfc *tn, u64 page_addr)
1152{
1153 struct nand_chip *nand = tn->controller.active;
1154 struct octeontx_nand_chip *chip = to_otx_nand(nand);
1155 int addr_bytes = chip->row_bytes;
1156 int rc;
1157
1158 rc = ndf_build_pre_cmd(tn, NAND_CMD_ERASE1, addr_bytes,
1159 page_addr, 0, NAND_CMD_ERASE2);
1160 if (rc)
1161 return rc;
1162
1163 /* Wait for R_B to signal erase is complete */
1164 rc = ndf_build_wait_busy(tn);
1165 if (rc)
1166 return rc;
1167
1168 rc = ndf_build_post_cmd(tn, t2);
1169 if (rc)
1170 return rc;
1171
1172 /* Wait until the command queue is idle */
1173 return ndf_wait_idle(tn);
1174}
1175
1176/*
1177 * Write a page (or less) to NAND.
1178 */
1179static int ndf_page_write(struct octeontx_nfc *tn, int page)
1180{
1181 int len, rc;
1182 struct nand_chip *nand = tn->controller.active;
1183 struct octeontx_nand_chip *chip = to_otx_nand(nand);
1184 int addr_bytes = chip->row_bytes + chip->col_bytes;
1185
1186 len = tn->buf.data_len - tn->buf.data_index;
1187 chip->oob_only = (tn->buf.data_index >= nand->mtd.writesize);
1188 WARN_ON_ONCE(len & 0x7);
1189
1190 ndf_setup_dma(tn, 1, tn->buf.dmaaddr + tn->buf.data_index, len);
1191 rc = ndf_build_pre_cmd(tn, NAND_CMD_SEQIN, addr_bytes, page, 0, 0);
1192 if (rc)
1193 return rc;
1194
1195 rc = ndf_queue_cmd_write(tn, len);
1196 if (rc)
1197 return rc;
1198
1199 rc = ndf_queue_cmd_cle(tn, NAND_CMD_PAGEPROG);
1200 if (rc)
1201 return rc;
1202
1203 /* Wait for R_B to signal program is complete */
1204 rc = ndf_build_wait_busy(tn);
1205 if (rc)
1206 return rc;
1207
1208 rc = ndf_build_post_cmd(tn, t2);
1209 if (rc)
1210 return rc;
1211
1212 /* Wait for the DMA to complete */
1213 rc = ndf_wait(tn);
1214 if (rc)
1215 return rc;
1216
1217 /* Data transfer is done but NDF is not, it is waiting for R/B# */
1218 return ndf_wait_idle(tn);
1219}
1220
1221static void octeontx_nand_cmdfunc(struct mtd_info *mtd, unsigned int command,
1222 int column, int page_addr)
1223{
1224 struct nand_chip *nand = mtd_to_nand(mtd);
1225 struct octeontx_nand_chip *octeontx_nand = to_otx_nand(nand);
1226 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1227 int rc;
1228
1229 tn->selected_chip = octeontx_nand->cs;
1230 if (tn->selected_chip < 0 || tn->selected_chip >= NAND_MAX_CHIPS) {
1231 dev_err(tn->dev, "invalid chip select\n");
1232 return;
1233 }
1234
1235 tn->use_status = false;
1236
1237 pr_debug("%s(%p, 0x%x, 0x%x, 0x%x) cs: %d\n", __func__, mtd, command,
1238 column, page_addr, tn->selected_chip);
1239 switch (command) {
1240 case NAND_CMD_READID:
1241 tn->buf.data_index = 0;
1242 octeontx_nand->oob_only = false;
1243 rc = ndf_read(tn, command, 1, column, 0, 0, 8);
1244 if (rc < 0)
1245 dev_err(tn->dev, "READID failed with %d\n", rc);
1246 else
1247 tn->buf.data_len = rc;
1248 break;
1249
1250 case NAND_CMD_READOOB:
1251 octeontx_nand->oob_only = true;
1252 tn->buf.data_index = 0;
1253 tn->buf.data_len = 0;
1254 rc = ndf_page_read(tn, page_addr, column, mtd->oobsize);
1255 if (rc < mtd->oobsize)
1256 dev_err(tn->dev, "READOOB failed with %d\n",
1257 tn->buf.data_len);
1258 else
1259 tn->buf.data_len = rc;
1260 break;
1261
1262 case NAND_CMD_READ0:
1263 octeontx_nand->oob_only = false;
1264 tn->buf.data_index = 0;
1265 tn->buf.data_len = 0;
1266 rc = ndf_page_read(tn, page_addr, column,
1267 mtd->writesize + mtd->oobsize);
1268
1269 if (rc < mtd->writesize + mtd->oobsize)
1270 dev_err(tn->dev, "READ0 failed with %d\n", rc);
1271 else
1272 tn->buf.data_len = rc;
1273 break;
1274
1275 case NAND_CMD_STATUS:
1276 /* used in oob/not states */
1277 tn->use_status = true;
1278 rc = ndf_read(tn, command, 0, 0, 0, 0, 8);
1279 if (rc < 0)
1280 dev_err(tn->dev, "STATUS failed with %d\n", rc);
1281 break;
1282
1283 case NAND_CMD_RESET:
1284 /* used in oob/not states */
1285 rc = octeontx_nand_reset(tn);
1286 if (rc < 0)
1287 dev_err(tn->dev, "RESET failed with %d\n", rc);
1288 break;
1289
1290 case NAND_CMD_PARAM:
1291 octeontx_nand->oob_only = false;
1292 tn->buf.data_index = 0;
1293 rc = ndf_read(tn, command, 1, 0, 0, 0,
1294 min(tn->buf.dmabuflen, 3 * 512));
1295 if (rc < 0)
1296 dev_err(tn->dev, "PARAM failed with %d\n", rc);
1297 else
1298 tn->buf.data_len = rc;
1299 break;
1300
1301 case NAND_CMD_RNDOUT:
1302 tn->buf.data_index = column;
1303 break;
1304
1305 case NAND_CMD_ERASE1:
1306 if (ndf_block_erase(tn, page_addr))
1307 dev_err(tn->dev, "ERASE1 failed\n");
1308 break;
1309
1310 case NAND_CMD_ERASE2:
1311 /* We do all erase processing in the first command, so ignore
1312 * this one.
1313 */
1314 break;
1315
1316 case NAND_CMD_SEQIN:
1317 octeontx_nand->oob_only = (column >= mtd->writesize);
1318 tn->buf.data_index = column;
1319 tn->buf.data_len = column;
1320
1321 octeontx_nand->selected_page = page_addr;
1322 break;
1323
1324 case NAND_CMD_PAGEPROG:
1325 rc = ndf_page_write(tn, octeontx_nand->selected_page);
1326 if (rc)
1327 dev_err(tn->dev, "PAGEPROG failed with %d\n", rc);
1328 break;
1329
1330 case NAND_CMD_SET_FEATURES:
1331 octeontx_nand->oob_only = false;
1332 /* assume tn->buf.data_len == 4 of data has been set there */
1333 rc = octeontx_nand_set_features(mtd, nand,
1334 page_addr, tn->buf.dmabuf);
1335 if (rc)
1336 dev_err(tn->dev, "SET_FEATURES failed with %d\n", rc);
1337 break;
1338
1339 case NAND_CMD_GET_FEATURES:
1340 octeontx_nand->oob_only = false;
1341 rc = octeontx_nand_get_features(mtd, nand,
1342 page_addr, tn->buf.dmabuf);
1343 if (!rc) {
1344 tn->buf.data_index = 0;
1345 tn->buf.data_len = 4;
1346 } else {
1347 dev_err(tn->dev, "GET_FEATURES failed with %d\n", rc);
1348 }
1349 break;
1350
1351 default:
1352 WARN_ON_ONCE(1);
1353 dev_err(tn->dev, "unhandled nand cmd: %x\n", command);
1354 }
1355}
1356
1357static int octeontx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1358{
1359 struct octeontx_nfc *tn = to_otx_nfc(chip->controller);
1360 int ret;
1361
1362 ret = ndf_wait_idle(tn);
1363 return (ret < 0) ? -EIO : 0;
1364}
1365
1366/* check compatibility with ONFI timing mode#N, and optionally apply */
1367/* TODO: Implement chipnr support? */
1368static int octeontx_nand_setup_dat_intf(struct mtd_info *mtd, int chipnr,
1369 const struct nand_data_interface *conf)
1370{
1371 static const bool check_only;
1372 struct nand_chip *nand = mtd_to_nand(mtd);
1373 struct octeontx_nand_chip *chip = to_otx_nand(nand);
1374 static u64 t_wc_n[MAX_ONFI_MODE + 2]; /* cache a mode signature */
1375 int mode; /* deduced mode number, for reporting and restricting */
1376 int rc;
1377
1378 /*
1379 * Cache timing modes for reporting, and reducing needless change.
1380 *
1381 * Challenge: caller does not pass ONFI mode#, but reporting the mode
1382 * and restricting to a maximum, or a list, are useful for diagnosing
1383 * new hardware. So use tWC_min, distinct and monotonic across modes,
1384 * to discover the requested/accepted mode number
1385 */
1386 for (mode = MAX_ONFI_MODE; mode >= 0 && !t_wc_n[0]; mode--) {
1387 const struct nand_sdr_timings *t;
1388
1389 t = onfi_async_timing_mode_to_sdr_timings(mode);
1390 if (!t)
1391 continue;
1392 t_wc_n[mode] = t->tWC_min;
1393 }
1394
1395 if (!conf) {
1396 rc = -EINVAL;
1397 } else if (check_only) {
1398 rc = 0;
1399 } else if (nand->data_interface &&
1400 chip->iface_set && chip->iface_mode == mode) {
1401 /*
1402 * Cases:
1403 * - called from nand_reset, which clears DDR timing
1404 * mode back to SDR. BUT if we're already in SDR,
1405 * timing mode persists over resets.
1406 * While mtd/nand layer only supports SDR,
1407 * this is always safe. And this driver only supports SDR.
1408 *
1409 * - called from post-power-event nand_reset (maybe
1410 * NFC+flash power down, or system hibernate.
1411 * Address this when CONFIG_PM support added
1412 */
1413 rc = 0;
1414 } else {
1415 rc = octeontx_nfc_chip_set_timings(chip, &conf->timings.sdr);
1416 if (!rc) {
1417 chip->iface_mode = mode;
1418 chip->iface_set = true;
1419 }
1420 }
1421 return rc;
1422}
1423
1424static void octeontx_bch_reset(void)
1425{
1426}
1427
1428/*
1429 * Given a page, calculate the ECC code
1430 *
1431 * chip: Pointer to NAND chip data structure
1432 * buf: Buffer to calculate ECC on
1433 * code: Buffer to hold ECC data
1434 *
1435 * Return 0 on success or -1 on failure
1436 */
1437static int octeontx_nand_bch_calculate_ecc_internal(struct mtd_info *mtd,
1438 dma_addr_t ihandle,
1439 u8 *code)
1440{
1441 struct nand_chip *nand = mtd_to_nand(mtd);
1442 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1443 int rc;
1444 int i;
1445 static u8 *ecc_buffer;
1446 static int ecc_size;
1447 static unsigned long ecc_handle;
1448 union bch_resp *r = tn->bch_resp;
1449
1450 if (!ecc_buffer || ecc_size < nand->ecc.size) {
1451 ecc_size = nand->ecc.size;
1452 ecc_buffer = dma_alloc_coherent(ecc_size,
1453 (unsigned long *)&ecc_handle);
1454 }
1455
1456 memset(ecc_buffer, 0, nand->ecc.bytes);
1457
1458 r->u16 = 0;
1459 __iowmb(); /* flush done=0 before making request */
1460
1461 rc = octeontx_bch_encode(bch_vf, ihandle, nand->ecc.size,
1462 nand->ecc.strength,
1463 (dma_addr_t)ecc_handle, tn->bch_rhandle);
1464
1465 if (!rc) {
1466 octeontx_bch_wait(bch_vf, r, tn->bch_rhandle);
1467 } else {
1468 dev_err(tn->dev, "octeontx_bch_encode failed\n");
1469 return -1;
1470 }
1471
1472 if (!r->s.done || r->s.uncorrectable) {
1473 dev_err(tn->dev,
1474 "%s timeout, done:%d uncorr:%d corr:%d erased:%d\n",
1475 __func__, r->s.done, r->s.uncorrectable,
1476 r->s.num_errors, r->s.erased);
1477 octeontx_bch_reset();
1478 return -1;
1479 }
1480
1481 memcpy(code, ecc_buffer, nand->ecc.bytes);
1482
1483 for (i = 0; i < nand->ecc.bytes; i++)
1484 code[i] ^= tn->eccmask[i];
1485
1486 return tn->bch_resp->s.num_errors;
1487}
1488
1489/*
1490 * Given a page, calculate the ECC code
1491 *
1492 * mtd: MTD block structure
1493 * dat: raw data (unused)
1494 * ecc_code: buffer for ECC
1495 */
1496static int octeontx_nand_bch_calculate(struct mtd_info *mtd,
1497 const u8 *dat, u8 *ecc_code)
1498{
1499 struct nand_chip *nand = mtd_to_nand(mtd);
1500 dma_addr_t handle = dma_map_single((u8 *)dat,
1501 nand->ecc.size, DMA_TO_DEVICE);
1502 int ret;
1503
1504 ret = octeontx_nand_bch_calculate_ecc_internal(mtd, handle,
1505 (void *)ecc_code);
1506
1507 return ret;
1508}
1509
1510/*
1511 * Detect and correct multi-bit ECC for a page
1512 *
1513 * mtd: MTD block structure
1514 * dat: raw data read from the chip
1515 * read_ecc: ECC from the chip (unused)
1516 * isnull: unused
1517 *
1518 * Returns number of bits corrected or -1 if unrecoverable
1519 */
1520static int octeontx_nand_bch_correct(struct mtd_info *mtd, u_char *dat,
1521 u_char *read_ecc, u_char *isnull)
1522{
1523 struct nand_chip *nand = mtd_to_nand(mtd);
1524 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1525 int i = nand->ecc.size + nand->ecc.bytes;
1526 static u8 *data_buffer;
1527 static dma_addr_t ihandle;
1528 static int buffer_size;
1529 dma_addr_t ohandle;
1530 union bch_resp *r = tn->bch_resp;
1531 int rc;
1532
1533 if (i > buffer_size) {
1534 if (buffer_size)
1535 free(data_buffer);
1536 data_buffer = dma_alloc_coherent(i,
1537 (unsigned long *)&ihandle);
1538 if (!data_buffer) {
1539 dev_err(tn->dev,
1540 "%s: Could not allocate %d bytes for buffer\n",
1541 __func__, i);
1542 goto error;
1543 }
1544 buffer_size = i;
1545 }
1546
1547 memcpy(data_buffer, dat, nand->ecc.size);
1548 memcpy(data_buffer + nand->ecc.size, read_ecc, nand->ecc.bytes);
1549
1550 for (i = 0; i < nand->ecc.bytes; i++)
1551 data_buffer[nand->ecc.size + i] ^= tn->eccmask[i];
1552
1553 r->u16 = 0;
1554 __iowmb(); /* flush done=0 before making request */
1555
1556 ohandle = dma_map_single(dat, nand->ecc.size, DMA_FROM_DEVICE);
1557 rc = octeontx_bch_decode(bch_vf, ihandle, nand->ecc.size,
1558 nand->ecc.strength, ohandle, tn->bch_rhandle);
1559
1560 if (!rc)
1561 octeontx_bch_wait(bch_vf, r, tn->bch_rhandle);
1562
1563 if (rc) {
1564 dev_err(tn->dev, "octeontx_bch_decode failed\n");
1565 goto error;
1566 }
1567
1568 if (!r->s.done) {
1569 dev_err(tn->dev, "Error: BCH engine timeout\n");
1570 octeontx_bch_reset();
1571 goto error;
1572 }
1573
1574 if (r->s.erased) {
1575 debug("Info: BCH block is erased\n");
1576 return 0;
1577 }
1578
1579 if (r->s.uncorrectable) {
1580 debug("Cannot correct NAND block, response: 0x%x\n",
1581 r->u16);
1582 goto error;
1583 }
1584
1585 return r->s.num_errors;
1586
1587error:
1588 debug("Error performing bch correction\n");
1589 return -1;
1590}
1591
1592void octeontx_nand_bch_hwctl(struct mtd_info *mtd, int mode)
1593{
1594 /* Do nothing. */
1595}
1596
1597static int octeontx_nand_hw_bch_read_page(struct mtd_info *mtd,
1598 struct nand_chip *chip, u8 *buf,
1599 int oob_required, int page)
1600{
1601 struct nand_chip *nand = mtd_to_nand(mtd);
1602 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1603 int i, eccsize = chip->ecc.size, ret;
1604 int eccbytes = chip->ecc.bytes;
1605 int eccsteps = chip->ecc.steps;
1606 u8 *p;
1607 u8 *ecc_code = chip->buffers->ecccode;
1608 unsigned int max_bitflips = 0;
1609
1610 /* chip->read_buf() insists on sequential order, we do OOB first */
1611 memcpy(chip->oob_poi, tn->buf.dmabuf + mtd->writesize, mtd->oobsize);
1612
1613 /* Use private buffer as input for ECC correction */
1614 p = tn->buf.dmabuf;
1615
1616 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1617 chip->ecc.total);
1618 if (ret)
1619 return ret;
1620
1621 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1622 int stat;
1623
1624 debug("Correcting block offset %lx, ecc offset %x\n",
1625 p - buf, i);
1626 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1627
1628 if (stat < 0) {
1629 mtd->ecc_stats.failed++;
1630 debug("Cannot correct NAND page %d\n", page);
1631 } else {
1632 mtd->ecc_stats.corrected += stat;
1633 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1634 }
1635 }
1636
1637 /* Copy corrected data to caller's buffer now */
1638 memcpy(buf, tn->buf.dmabuf, mtd->writesize);
1639
1640 return max_bitflips;
1641}
1642
1643static int octeontx_nand_hw_bch_write_page(struct mtd_info *mtd,
1644 struct nand_chip *chip,
1645 const u8 *buf, int oob_required,
1646 int page)
1647{
1648 struct octeontx_nfc *tn = to_otx_nfc(chip->controller);
1649 int i, eccsize = chip->ecc.size, ret;
1650 int eccbytes = chip->ecc.bytes;
1651 int eccsteps = chip->ecc.steps;
1652 const u8 *p;
1653 u8 *ecc_calc = chip->buffers->ecccalc;
1654
1655 debug("%s(buf?%p, oob%d p%x)\n",
1656 __func__, buf, oob_required, page);
1657 for (i = 0; i < chip->ecc.total; i++)
1658 ecc_calc[i] = 0xFF;
1659
1660 /* Copy the page data from caller's buffers to private buffer */
1661 chip->write_buf(mtd, buf, mtd->writesize);
1662 /* Use private date as source for ECC calculation */
1663 p = tn->buf.dmabuf;
1664
1665 /* Hardware ECC calculation */
1666 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1667 int ret;
1668
1669 ret = chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1670
1671 if (ret < 0)
1672 debug("calculate(mtd, p?%p, &ecc_calc[%d]?%p) returned %d\n",
1673 p, i, &ecc_calc[i], ret);
1674
1675 debug("block offset %lx, ecc offset %x\n", p - buf, i);
1676 }
1677
1678 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1679 chip->ecc.total);
1680 if (ret)
1681 return ret;
1682
1683 /* Store resulting OOB into private buffer, will be sent to HW */
1684 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1685
1686 return 0;
1687}
1688
1689/**
1690 * nand_write_page_raw - [INTERN] raw page write function
1691 * @mtd: mtd info structure
1692 * @chip: nand chip info structure
1693 * @buf: data buffer
1694 * @oob_required: must write chip->oob_poi to OOB
1695 * @page: page number to write
1696 *
1697 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1698 */
1699static int octeontx_nand_write_page_raw(struct mtd_info *mtd,
1700 struct nand_chip *chip,
1701 const u8 *buf, int oob_required,
1702 int page)
1703{
1704 chip->write_buf(mtd, buf, mtd->writesize);
1705 if (oob_required)
1706 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1707
1708 return 0;
1709}
1710
1711/**
1712 * octeontx_nand_write_oob_std - [REPLACEABLE] the most common OOB data write
1713 * function
1714 * @mtd: mtd info structure
1715 * @chip: nand chip info structure
1716 * @page: page number to write
1717 */
1718static int octeontx_nand_write_oob_std(struct mtd_info *mtd,
1719 struct nand_chip *chip,
1720 int page)
1721{
1722 int status = 0;
1723 const u8 *buf = chip->oob_poi;
1724 int length = mtd->oobsize;
1725
1726 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1727 chip->write_buf(mtd, buf, length);
1728 /* Send command to program the OOB data */
1729 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1730
1731 status = chip->waitfunc(mtd, chip);
1732
1733 return status & NAND_STATUS_FAIL ? -EIO : 0;
1734}
1735
1736/**
1737 * octeontx_nand_read_page_raw - [INTERN] read raw page data without ecc
1738 * @mtd: mtd info structure
1739 * @chip: nand chip info structure
1740 * @buf: buffer to store read data
1741 * @oob_required: caller requires OOB data read to chip->oob_poi
1742 * @page: page number to read
1743 *
1744 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1745 */
1746static int octeontx_nand_read_page_raw(struct mtd_info *mtd,
1747 struct nand_chip *chip,
1748 u8 *buf, int oob_required, int page)
1749{
1750 chip->read_buf(mtd, buf, mtd->writesize);
1751 if (oob_required)
1752 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1753 return 0;
1754}
1755
1756static int octeontx_nand_read_oob_std(struct mtd_info *mtd,
1757 struct nand_chip *chip,
1758 int page)
1759
1760{
1761 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1762 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1763 return 0;
1764}
1765
1766static int octeontx_nand_calc_bch_ecc_strength(struct nand_chip *nand)
1767{
1768 struct mtd_info *mtd = nand_to_mtd(nand);
1769 struct nand_ecc_ctrl *ecc = &nand->ecc;
1770 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1771 int nsteps = mtd->writesize / ecc->size;
1772 int oobchunk = mtd->oobsize / nsteps;
1773
1774 /* ecc->strength determines ecc_level and OOB's ecc_bytes. */
1775 const u8 strengths[] = {4, 8, 16, 24, 32, 40, 48, 56, 60, 64};
1776 /* first set the desired ecc_level to match strengths[] */
1777 int index = ARRAY_SIZE(strengths) - 1;
1778 int need;
1779
1780 while (index > 0 && !(ecc->options & NAND_ECC_MAXIMIZE) &&
1781 strengths[index - 1] >= ecc->strength)
1782 index--;
1783
1784 do {
1785 need = DIV_ROUND_UP(15 * strengths[index], 8);
1786 if (need <= oobchunk - 2)
1787 break;
1788 } while (index > 0);
1789
1790 debug("%s: steps ds: %d, strength ds: %d\n", __func__,
1791 nand->ecc_step_ds, nand->ecc_strength_ds);
1792 ecc->strength = strengths[index];
1793 ecc->bytes = need;
1794 debug("%s: strength: %d, bytes: %d\n", __func__, ecc->strength,
1795 ecc->bytes);
1796
1797 if (!tn->eccmask)
1798 tn->eccmask = devm_kzalloc(tn->dev, ecc->bytes, GFP_KERNEL);
1799 if (!tn->eccmask)
1800 return -ENOMEM;
1801
1802 return 0;
1803}
1804
1805/* sample the BCH signature of an erased (all 0xff) page,
1806 * to XOR into all page traffic, so erased pages have no ECC errors
1807 */
1808static int octeontx_bch_save_empty_eccmask(struct nand_chip *nand)
1809{
1810 struct mtd_info *mtd = nand_to_mtd(nand);
1811 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1812 unsigned int eccsize = nand->ecc.size;
1813 unsigned int eccbytes = nand->ecc.bytes;
1814 u8 erased_ecc[eccbytes];
1815 unsigned long erased_handle;
1816 unsigned char *erased_page = dma_alloc_coherent(eccsize,
1817 &erased_handle);
1818 int i;
1819 int rc = 0;
1820
1821 if (!erased_page)
1822 return -ENOMEM;
1823
1824 memset(erased_page, 0xff, eccsize);
1825 memset(erased_ecc, 0, eccbytes);
1826
1827 rc = octeontx_nand_bch_calculate_ecc_internal(mtd,
1828 (dma_addr_t)erased_handle,
1829 erased_ecc);
1830
1831 free(erased_page);
1832
1833 for (i = 0; i < eccbytes; i++)
1834 tn->eccmask[i] = erased_ecc[i] ^ 0xff;
1835
1836 return rc;
1837}
1838
1839static void octeontx_nfc_chip_sizing(struct nand_chip *nand)
1840{
1841 struct octeontx_nand_chip *chip = to_otx_nand(nand);
1842 struct mtd_info *mtd = nand_to_mtd(nand);
1843 struct nand_ecc_ctrl *ecc = &nand->ecc;
1844
1845 chip->row_bytes = nand->onfi_params.addr_cycles & 0xf;
1846 chip->col_bytes = nand->onfi_params.addr_cycles >> 4;
1847 debug("%s(%p) row bytes: %d, col bytes: %d, ecc mode: %d\n",
1848 __func__, nand, chip->row_bytes, chip->col_bytes, ecc->mode);
1849
1850 /*
1851 * HW_BCH using OcteonTX BCH engine, or SOFT_BCH laid out in
1852 * HW_BCH-compatible fashion, depending on devtree advice
1853 * and kernel config.
1854 * BCH/NFC hardware capable of subpage ops, not implemented.
1855 */
1856 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1857 nand->options |= NAND_NO_SUBPAGE_WRITE;
1858 debug("%s: start steps: %d, size: %d, bytes: %d\n",
1859 __func__, ecc->steps, ecc->size, ecc->bytes);
1860 debug("%s: step ds: %d, strength ds: %d\n", __func__,
1861 nand->ecc_step_ds, nand->ecc_strength_ds);
1862
1863 if (ecc->mode != NAND_ECC_NONE) {
1864 int nsteps = ecc->steps ? ecc->steps : 1;
1865
1866 if (ecc->size && ecc->size != mtd->writesize)
1867 nsteps = mtd->writesize / ecc->size;
1868 else if (mtd->writesize > def_ecc_size &&
1869 !(mtd->writesize & (def_ecc_size - 1)))
1870 nsteps = mtd->writesize / def_ecc_size;
1871 ecc->steps = nsteps;
1872 ecc->size = mtd->writesize / nsteps;
1873 ecc->bytes = mtd->oobsize / nsteps;
1874
1875 if (nand->ecc_strength_ds)
1876 ecc->strength = nand->ecc_strength_ds;
1877 if (nand->ecc_step_ds)
1878 ecc->size = nand->ecc_step_ds;
1879 /*
1880 * no subpage ops, but set subpage-shift to match ecc->steps
1881 * so mtd_nandbiterrs tests appropriate boundaries
1882 */
1883 if (!mtd->subpage_sft && !(ecc->steps & (ecc->steps - 1)))
1884 mtd->subpage_sft = fls(ecc->steps) - 1;
1885
1886 if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
1887 debug("%s: ecc mode: %d\n", __func__, ecc->mode);
1888 if (ecc->mode != NAND_ECC_SOFT &&
1889 !octeontx_nand_calc_bch_ecc_strength(nand)) {
1890 struct octeontx_nfc *tn =
1891 to_otx_nfc(nand->controller);
1892
1893 debug("Using hardware BCH engine support\n");
1894 ecc->mode = NAND_ECC_HW_SYNDROME;
1895 ecc->read_page = octeontx_nand_hw_bch_read_page;
1896 ecc->write_page =
1897 octeontx_nand_hw_bch_write_page;
1898 ecc->read_page_raw =
1899 octeontx_nand_read_page_raw;
1900 ecc->write_page_raw =
1901 octeontx_nand_write_page_raw;
1902 ecc->read_oob = octeontx_nand_read_oob_std;
1903 ecc->write_oob = octeontx_nand_write_oob_std;
1904
1905 ecc->calculate = octeontx_nand_bch_calculate;
1906 ecc->correct = octeontx_nand_bch_correct;
1907 ecc->hwctl = octeontx_nand_bch_hwctl;
1908
1909 debug("NAND chip %d using hw_bch\n",
1910 tn->selected_chip);
1911 debug(" %d bytes ECC per %d byte block\n",
1912 ecc->bytes, ecc->size);
1913 debug(" for %d bits of correction per block.",
1914 ecc->strength);
1915 octeontx_nand_calc_ecc_layout(nand);
1916 octeontx_bch_save_empty_eccmask(nand);
1917 }
1918 }
1919 }
1920}
1921
1922static int octeontx_nfc_chip_init(struct octeontx_nfc *tn, struct udevice *dev,
1923 ofnode node)
1924{
1925 struct octeontx_nand_chip *chip;
1926 struct nand_chip *nand;
1927 struct mtd_info *mtd;
1928 int ret;
1929
1930 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
1931 if (!chip)
1932 return -ENOMEM;
1933
1934 debug("%s: Getting chip select\n", __func__);
1935 ret = ofnode_read_s32(node, "reg", &chip->cs);
1936 if (ret) {
1937 dev_err(dev, "could not retrieve reg property: %d\n", ret);
1938 return ret;
1939 }
1940
1941 if (chip->cs >= NAND_MAX_CHIPS) {
1942 dev_err(dev, "invalid reg value: %u (max CS = 7)\n", chip->cs);
1943 return -EINVAL;
1944 }
1945 debug("%s: chip select: %d\n", __func__, chip->cs);
1946 nand = &chip->nand;
1947 nand->controller = &tn->controller;
1948 if (!tn->controller.active)
1949 tn->controller.active = nand;
1950
1951 debug("%s: Setting flash node\n", __func__);
1952 nand_set_flash_node(nand, node);
1953
1954 nand->options = 0;
1955 nand->select_chip = octeontx_nand_select_chip;
1956 nand->cmdfunc = octeontx_nand_cmdfunc;
1957 nand->waitfunc = octeontx_nand_waitfunc;
1958 nand->read_byte = octeontx_nand_read_byte;
1959 nand->read_buf = octeontx_nand_read_buf;
1960 nand->write_buf = octeontx_nand_write_buf;
1961 nand->onfi_set_features = octeontx_nand_set_features;
1962 nand->onfi_get_features = octeontx_nand_get_features;
1963 nand->setup_data_interface = octeontx_nand_setup_dat_intf;
1964
1965 mtd = nand_to_mtd(nand);
1966 debug("%s: mtd: %p\n", __func__, mtd);
1967 mtd->dev->parent = dev;
1968
1969 debug("%s: NDF_MISC: 0x%llx\n", __func__,
1970 readq(tn->base + NDF_MISC));
1971
1972 /* TODO: support more then 1 chip */
1973 debug("%s: Scanning identification\n", __func__);
1974 ret = nand_scan_ident(mtd, 1, NULL);
1975 if (ret)
1976 return ret;
1977
1978 debug("%s: Sizing chip\n", __func__);
1979 octeontx_nfc_chip_sizing(nand);
1980
1981 debug("%s: Scanning tail\n", __func__);
1982 ret = nand_scan_tail(mtd);
1983 if (ret) {
1984 dev_err(dev, "nand_scan_tail failed: %d\n", ret);
1985 return ret;
1986 }
1987
1988 debug("%s: Registering mtd\n", __func__);
1989 ret = nand_register(0, mtd);
1990
1991 debug("%s: Adding tail\n", __func__);
1992 list_add_tail(&chip->node, &tn->chips);
1993 return 0;
1994}
1995
1996static int octeontx_nfc_chips_init(struct octeontx_nfc *tn)
1997{
1998 struct udevice *dev = tn->dev;
Simon Glassa7ece582020-12-19 10:40:14 -07001999 ofnode node = dev_ofnode(dev);
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002000 ofnode nand_node;
2001 int nr_chips = of_get_child_count(node);
2002 int ret;
2003
2004 debug("%s: node: %s\n", __func__, ofnode_get_name(node));
2005 debug("%s: %d chips\n", __func__, nr_chips);
2006 if (nr_chips > NAND_MAX_CHIPS) {
2007 dev_err(dev, "too many NAND chips: %d\n", nr_chips);
2008 return -EINVAL;
2009 }
2010
2011 if (!nr_chips) {
2012 debug("no DT NAND chips found\n");
2013 return -ENODEV;
2014 }
2015
2016 pr_info("%s: scanning %d chips DTs\n", __func__, nr_chips);
2017
2018 ofnode_for_each_subnode(nand_node, node) {
2019 debug("%s: Calling octeontx_nfc_chip_init(%p, %s, %ld)\n",
2020 __func__, tn, dev->name, nand_node.of_offset);
2021 ret = octeontx_nfc_chip_init(tn, dev, nand_node);
2022 if (ret)
2023 return ret;
2024 }
2025 return 0;
2026}
2027
2028/* Reset NFC and initialize registers. */
2029static int octeontx_nfc_init(struct octeontx_nfc *tn)
2030{
2031 const struct nand_sdr_timings *timings;
2032 u64 ndf_misc;
2033 int rc;
2034
2035 /* Initialize values and reset the fifo */
2036 ndf_misc = readq(tn->base + NDF_MISC);
2037
2038 ndf_misc &= ~NDF_MISC_EX_DIS;
2039 ndf_misc |= (NDF_MISC_BT_DIS | NDF_MISC_RST_FF);
2040 writeq(ndf_misc, tn->base + NDF_MISC);
2041 debug("%s: NDF_MISC: 0x%llx\n", __func__, readq(tn->base + NDF_MISC));
2042
2043 /* Bring the fifo out of reset */
2044 ndf_misc &= ~(NDF_MISC_RST_FF);
2045
2046 /* Maximum of co-processor cycles for glitch filtering */
2047 ndf_misc |= FIELD_PREP(NDF_MISC_WAIT_CNT, 0x3f);
2048
2049 writeq(ndf_misc, tn->base + NDF_MISC);
2050
2051 /* Set timing parameters to onfi mode 0 for probing */
2052 timings = onfi_async_timing_mode_to_sdr_timings(0);
2053 if (IS_ERR(timings))
2054 return PTR_ERR(timings);
2055 rc = set_default_timings(tn, timings);
2056 if (rc)
2057 return rc;
2058
2059 return 0;
2060}
2061
2062static int octeontx_pci_nand_probe(struct udevice *dev)
2063{
2064 struct octeontx_nfc *tn = dev_get_priv(dev);
2065 int ret;
2066 static bool probe_done;
2067
2068 debug("%s(%s) tn: %p\n", __func__, dev->name, tn);
2069 if (probe_done)
2070 return 0;
2071
2072 if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
2073 bch_vf = octeontx_bch_getv();
2074 if (!bch_vf) {
2075 struct octeontx_probe_device *probe_dev;
2076
2077 debug("%s: bch not yet initialized\n", __func__);
2078 probe_dev = calloc(sizeof(*probe_dev), 1);
2079 if (!probe_dev) {
2080 printf("%s: Out of memory\n", __func__);
2081 return -ENOMEM;
2082 }
2083 probe_dev->dev = dev;
2084 INIT_LIST_HEAD(&probe_dev->list);
2085 list_add_tail(&probe_dev->list,
2086 &octeontx_pci_nand_deferred_devices);
2087 debug("%s: Defering probe until after BCH initialization\n",
2088 __func__);
2089 return 0;
2090 }
2091 }
2092
2093 tn->dev = dev;
2094 INIT_LIST_HEAD(&tn->chips);
2095
Andrew Scull6520c822022-04-21 16:11:13 +00002096 tn->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, 0, 0, PCI_REGION_TYPE, PCI_REGION_MEM);
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002097 if (!tn->base) {
2098 ret = -EINVAL;
2099 goto release;
2100 }
2101 debug("%s: bar at %p\n", __func__, tn->base);
2102 tn->buf.dmabuflen = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
2103 tn->buf.dmabuf = dma_alloc_coherent(tn->buf.dmabuflen,
2104 (unsigned long *)&tn->buf.dmaaddr);
2105 if (!tn->buf.dmabuf) {
2106 ret = -ENOMEM;
2107 debug("%s: Could not allocate DMA buffer\n", __func__);
2108 goto unclk;
2109 }
2110
2111 /* one hw-bch response, for one outstanding transaction */
2112 tn->bch_resp = dma_alloc_coherent(sizeof(*tn->bch_resp),
2113 (unsigned long *)&tn->bch_rhandle);
2114
2115 tn->stat = dma_alloc_coherent(8, (unsigned long *)&tn->stat_addr);
2116 if (!tn->stat || !tn->bch_resp) {
2117 debug("%s: Could not allocate bch status or response\n",
2118 __func__);
2119 ret = -ENOMEM;
2120 goto unclk;
2121 }
2122
2123 debug("%s: Calling octeontx_nfc_init()\n", __func__);
2124 octeontx_nfc_init(tn);
2125 debug("%s: Initializing chips\n", __func__);
2126 ret = octeontx_nfc_chips_init(tn);
2127 debug("%s: init chips ret: %d\n", __func__, ret);
2128 if (ret) {
2129 if (ret != -ENODEV)
2130 dev_err(dev, "failed to init nand chips\n");
2131 goto unclk;
2132 }
2133 dev_info(dev, "probed\n");
2134 return 0;
2135
2136unclk:
2137release:
2138 return ret;
2139}
2140
2141int octeontx_pci_nand_disable(struct udevice *dev)
2142{
2143 struct octeontx_nfc *tn = dev_get_priv(dev);
2144 u64 dma_cfg;
2145 u64 ndf_misc;
2146
2147 debug("%s: Disabling NAND device %s\n", __func__, dev->name);
2148 dma_cfg = readq(tn->base + NDF_DMA_CFG);
2149 dma_cfg &= ~NDF_DMA_CFG_EN;
2150 dma_cfg |= NDF_DMA_CFG_CLR;
2151 writeq(dma_cfg, tn->base + NDF_DMA_CFG);
2152
2153 /* Disable execution and put FIFO in reset mode */
2154 ndf_misc = readq(tn->base + NDF_MISC);
2155 ndf_misc |= NDF_MISC_EX_DIS | NDF_MISC_RST_FF;
2156 writeq(ndf_misc, tn->base + NDF_MISC);
2157 ndf_misc &= ~NDF_MISC_RST_FF;
2158 writeq(ndf_misc, tn->base + NDF_MISC);
2159#ifdef DEBUG
2160 printf("%s: NDF_MISC: 0x%llx\n", __func__, readq(tn->base + NDF_MISC));
2161#endif
2162 /* Clear any interrupts and enable bits */
2163 writeq(~0ull, tn->base + NDF_INT_ENA_W1C);
2164 writeq(~0ull, tn->base + NDF_INT);
2165 debug("%s: NDF_ST_REG: 0x%llx\n", __func__,
2166 readq(tn->base + NDF_ST_REG));
2167 return 0;
2168}
2169
2170/**
2171 * Since it's possible (and even likely) that the NAND device will be probed
2172 * before the BCH device has been probed, we may need to defer the probing.
2173 *
2174 * In this case, the initial probe returns success but the actual probing
2175 * is deferred until the BCH VF has been probed.
2176 *
Heinrich Schuchardt47b4c022022-01-19 18:05:50 +01002177 * Return: 0 for success, otherwise error
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002178 */
2179int octeontx_pci_nand_deferred_probe(void)
2180{
2181 int rc = 0;
2182 struct octeontx_probe_device *pdev;
2183
2184 debug("%s: Performing deferred probing\n", __func__);
2185 list_for_each_entry(pdev, &octeontx_pci_nand_deferred_devices, list) {
2186 debug("%s: Probing %s\n", __func__, pdev->dev->name);
Simon Glass6211d762020-12-19 10:40:10 -07002187 dev_get_flags(pdev->dev) &= ~DM_FLAG_ACTIVATED;
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002188 rc = device_probe(pdev->dev);
2189 if (rc && rc != -ENODEV) {
2190 printf("%s: Error %d with deferred probe of %s\n",
2191 __func__, rc, pdev->dev->name);
2192 break;
2193 }
2194 }
2195 return rc;
2196}
2197
2198static const struct pci_device_id octeontx_nfc_pci_id_table[] = {
2199 { PCI_VDEVICE(CAVIUM, 0xA04F) },
2200 {}
2201};
2202
Simon Glassaad29ae2020-12-03 16:55:21 -07002203static int octeontx_nand_of_to_plat(struct udevice *dev)
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002204{
2205 return 0;
2206}
2207
2208static const struct udevice_id octeontx_nand_ids[] = {
2209 { .compatible = "cavium,cn8130-nand" },
2210 { },
2211};
2212
2213U_BOOT_DRIVER(octeontx_pci_nand) = {
2214 .name = OCTEONTX_NAND_DRIVER_NAME,
2215 .id = UCLASS_MTD,
2216 .of_match = of_match_ptr(octeontx_nand_ids),
Simon Glassaad29ae2020-12-03 16:55:21 -07002217 .of_to_plat = octeontx_nand_of_to_plat,
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002218 .probe = octeontx_pci_nand_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -07002219 .priv_auto = sizeof(struct octeontx_nfc),
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002220 .remove = octeontx_pci_nand_disable,
2221 .flags = DM_FLAG_OS_PREPARE,
2222};
2223
2224U_BOOT_PCI_DEVICE(octeontx_pci_nand, octeontx_nfc_pci_id_table);
2225
2226void board_nand_init(void)
2227{
2228 struct udevice *dev;
2229 int ret;
2230
2231 if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
2232 ret = uclass_get_device_by_driver(UCLASS_MISC,
Simon Glass65130cd2020-12-28 20:34:56 -07002233 DM_DRIVER_GET(octeontx_pci_bchpf),
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002234 &dev);
2235 if (ret && ret != -ENODEV) {
2236 pr_err("Failed to initialize OcteonTX BCH PF controller. (error %d)\n",
2237 ret);
2238 }
2239 ret = uclass_get_device_by_driver(UCLASS_MISC,
Simon Glass65130cd2020-12-28 20:34:56 -07002240 DM_DRIVER_GET(octeontx_pci_bchvf),
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002241 &dev);
2242 if (ret && ret != -ENODEV) {
2243 pr_err("Failed to initialize OcteonTX BCH VF controller. (error %d)\n",
2244 ret);
2245 }
2246 }
2247
2248 ret = uclass_get_device_by_driver(UCLASS_MTD,
Simon Glass65130cd2020-12-28 20:34:56 -07002249 DM_DRIVER_GET(octeontx_pci_nand),
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002250 &dev);
2251 if (ret && ret != -ENODEV)
2252 pr_err("Failed to initialize OcteonTX NAND controller. (error %d)\n",
2253 ret);
2254}