blob: e0ccc7b0d90dbab6b673fd6ce0352d76207a0b8a [file] [log] [blame]
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018 Marvell International Ltd.
4 */
5
6#include <dm.h>
7#include <dm/device-internal.h>
8#include <dm/devres.h>
9#include <dm/of_access.h>
10#include <malloc.h>
11#include <memalign.h>
12#include <nand.h>
13#include <pci.h>
14#include <time.h>
15#include <linux/bitfield.h>
16#include <linux/ctype.h>
17#include <linux/dma-mapping.h>
18#include <linux/delay.h>
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/ioport.h>
22#include <linux/libfdt.h>
23#include <linux/mtd/mtd.h>
24#include <linux/mtd/nand_bch.h>
25#include <linux/mtd/nand_ecc.h>
Simon Glass3ba929a2020-10-30 21:38:53 -060026#include <asm/global_data.h>
Suneel Garapati9de7d2b2020-08-26 14:37:22 +020027#include <asm/io.h>
28#include <asm/types.h>
29#include <asm/dma-mapping.h>
30#include <asm/arch/clock.h>
31#include "octeontx_bch.h"
32
33#ifdef DEBUG
34# undef CONFIG_LOGLEVEL
35# define CONFIG_LOGLEVEL 8
36#endif
37
38/*
39 * The NDF_CMD queue takes commands between 16 - 128 bit.
40 * All commands must be 16 bit aligned and are little endian.
41 * WAIT_STATUS commands must be 64 bit aligned.
42 * Commands are selected by the 4 bit opcode.
43 *
44 * Available Commands:
45 *
46 * 16 Bit:
47 * NOP
48 * WAIT
49 * BUS_ACQ, BUS_REL
50 * CHIP_EN, CHIP_DIS
51 *
52 * 32 Bit:
53 * CLE_CMD
54 * RD_CMD, RD_EDO_CMD
55 * WR_CMD
56 *
57 * 64 Bit:
58 * SET_TM_PAR
59 *
60 * 96 Bit:
61 * ALE_CMD
62 *
63 * 128 Bit:
64 * WAIT_STATUS, WAIT_STATUS_ALE
65 */
66
67/* NDF Register offsets */
68#define NDF_CMD 0x0
69#define NDF_MISC 0x8
70#define NDF_ECC_CNT 0x10
71#define NDF_DRBELL 0x30
72#define NDF_ST_REG 0x38 /* status */
73#define NDF_INT 0x40
74#define NDF_INT_W1S 0x48
75#define NDF_DMA_CFG 0x50
76#define NDF_DMA_ADR 0x58
77#define NDF_INT_ENA_W1C 0x60
78#define NDF_INT_ENA_W1S 0x68
79
80/* NDF command opcodes */
81#define NDF_OP_NOP 0x0
82#define NDF_OP_SET_TM_PAR 0x1
83#define NDF_OP_WAIT 0x2
84#define NDF_OP_CHIP_EN_DIS 0x3
85#define NDF_OP_CLE_CMD 0x4
86#define NDF_OP_ALE_CMD 0x5
87#define NDF_OP_WR_CMD 0x8
88#define NDF_OP_RD_CMD 0x9
89#define NDF_OP_RD_EDO_CMD 0xa
90#define NDF_OP_WAIT_STATUS 0xb /* same opcode for WAIT_STATUS_ALE */
91#define NDF_OP_BUS_ACQ_REL 0xf
92
93#define NDF_BUS_ACQUIRE 1
94#define NDF_BUS_RELEASE 0
95
96#define DBGX_EDSCR(X) (0x87A008000088 + (X) * 0x80000)
97
98struct ndf_nop_cmd {
99 u16 opcode: 4;
100 u16 nop: 12;
101};
102
103struct ndf_wait_cmd {
104 u16 opcode:4;
105 u16 r_b:1; /* wait for one cycle or PBUS_WAIT deassert */
106 u16:3;
107 u16 wlen:3; /* timing parameter select */
108 u16:5;
109};
110
111struct ndf_bus_cmd {
112 u16 opcode:4;
113 u16 direction:4; /* 1 = acquire, 0 = release */
114 u16:8;
115};
116
117struct ndf_chip_cmd {
118 u16 opcode:4;
119 u16 chip:3; /* select chip, 0 = disable */
120 u16 enable:1; /* 1 = enable, 0 = disable */
121 u16 bus_width:2; /* 10 = 16 bit, 01 = 8 bit */
122 u16:6;
123};
124
125struct ndf_cle_cmd {
126 u32 opcode:4;
127 u32:4;
128 u32 cmd_data:8; /* command sent to the PBUS AD pins */
129 u32 clen1:3; /* time between PBUS CLE and WE asserts */
130 u32 clen2:3; /* time WE remains asserted */
131 u32 clen3:3; /* time between WE deassert and CLE */
132 u32:7;
133};
134
135/* RD_EDO_CMD uses the same layout as RD_CMD */
136struct ndf_rd_cmd {
137 u32 opcode:4;
138 u32 data:16; /* data bytes */
139 u32 rlen1:3;
140 u32 rlen2:3;
141 u32 rlen3:3;
142 u32 rlen4:3;
143};
144
145struct ndf_wr_cmd {
146 u32 opcode:4;
147 u32 data:16; /* data bytes */
148 u32:4;
149 u32 wlen1:3;
150 u32 wlen2:3;
151 u32:3;
152};
153
154struct ndf_set_tm_par_cmd {
155 u64 opcode:4;
156 u64 tim_mult:4; /* multiplier for the seven parameters */
157 u64 tm_par1:8; /* --> Following are the 7 timing parameters that */
158 u64 tm_par2:8; /* specify the number of coprocessor cycles. */
159 u64 tm_par3:8; /* A value of zero means one cycle. */
160 u64 tm_par4:8; /* All values are scaled by tim_mult */
161 u64 tm_par5:8; /* using tim_par * (2 ^ tim_mult). */
162 u64 tm_par6:8;
163 u64 tm_par7:8;
164};
165
166struct ndf_ale_cmd {
167 u32 opcode:4;
168 u32:4;
169 u32 adr_byte_num:4; /* number of address bytes to be sent */
170 u32:4;
171 u32 alen1:3;
172 u32 alen2:3;
173 u32 alen3:3;
174 u32 alen4:3;
175 u32:4;
176 u8 adr_byt1;
177 u8 adr_byt2;
178 u8 adr_byt3;
179 u8 adr_byt4;
180 u8 adr_byt5;
181 u8 adr_byt6;
182 u8 adr_byt7;
183 u8 adr_byt8;
184};
185
186struct ndf_wait_status_cmd {
187 u32 opcode:4;
188 u32:4;
189 u32 data:8; /** data */
190 u32 clen1:3;
191 u32 clen2:3;
192 u32 clen3:3;
193 u32:8;
194 /** set to 5 to select WAIT_STATUS_ALE command */
195 u32 ale_ind:8;
196 /** ALE only: number of address bytes to be sent */
197 u32 adr_byte_num:4;
198 u32:4;
199 u32 alen1:3; /* ALE only */
200 u32 alen2:3; /* ALE only */
201 u32 alen3:3; /* ALE only */
202 u32 alen4:3; /* ALE only */
203 u32:4;
204 u8 adr_byt[4]; /* ALE only */
205 u32 nine:4; /* set to 9 */
206 u32 and_mask:8;
207 u32 comp_byte:8;
208 u32 rlen1:3;
209 u32 rlen2:3;
210 u32 rlen3:3;
211 u32 rlen4:3;
212};
213
214union ndf_cmd {
215 u64 val[2];
216 union {
217 struct ndf_nop_cmd nop;
218 struct ndf_wait_cmd wait;
219 struct ndf_bus_cmd bus_acq_rel;
220 struct ndf_chip_cmd chip_en_dis;
221 struct ndf_cle_cmd cle_cmd;
222 struct ndf_rd_cmd rd_cmd;
223 struct ndf_wr_cmd wr_cmd;
224 struct ndf_set_tm_par_cmd set_tm_par;
225 struct ndf_ale_cmd ale_cmd;
226 struct ndf_wait_status_cmd wait_status;
227 } u;
228};
229
230/** Disable multi-bit error hangs */
231#define NDF_MISC_MB_DIS BIT_ULL(27)
232/** High watermark for NBR FIFO or load/store operations */
233#define NDF_MISC_NBR_HWM GENMASK_ULL(26, 24)
234/** Wait input filter count */
235#define NDF_MISC_WAIT_CNT GENMASK_ULL(23, 18)
236/** Unfilled NFD_CMD queue bytes */
237#define NDF_MISC_FR_BYTE GENMASK_ULL(17, 7)
238/** Set by HW when it reads the last 8 bytes of NDF_CMD */
239#define NDF_MISC_RD_DONE BIT_ULL(6)
240/** Set by HW when it reads. SW read of NDF_CMD clears it */
241#define NDF_MISC_RD_VAL BIT_ULL(5)
242/** Let HW read NDF_CMD queue. Cleared on SW NDF_CMD write */
243#define NDF_MISC_RD_CMD BIT_ULL(4)
244/** Boot disable */
245#define NDF_MISC_BT_DIS BIT_ULL(2)
246/** Stop command execution after completing command queue */
247#define NDF_MISC_EX_DIS BIT_ULL(1)
248/** Reset fifo */
249#define NDF_MISC_RST_FF BIT_ULL(0)
250
251/** DMA engine enable */
252#define NDF_DMA_CFG_EN BIT_ULL(63)
253/** Read or write */
254#define NDF_DMA_CFG_RW BIT_ULL(62)
255/** Terminates DMA and clears enable bit */
256#define NDF_DMA_CFG_CLR BIT_ULL(61)
257/** 32-bit swap enable */
258#define NDF_DMA_CFG_SWAP32 BIT_ULL(59)
259/** 16-bit swap enable */
260#define NDF_DMA_CFG_SWAP16 BIT_ULL(58)
261/** 8-bit swap enable */
262#define NDF_DMA_CFG_SWAP8 BIT_ULL(57)
263/** Endian mode */
264#define NDF_DMA_CFG_CMD_BE BIT_ULL(56)
265/** Number of 64 bit transfers */
266#define NDF_DMA_CFG_SIZE GENMASK_ULL(55, 36)
267
268/** Command execution status idle */
269#define NDF_ST_REG_EXE_IDLE BIT_ULL(15)
270/** Command execution SM states */
271#define NDF_ST_REG_EXE_SM GENMASK_ULL(14, 11)
272/** DMA and load SM states */
273#define NDF_ST_REG_BT_SM GENMASK_ULL(10, 7)
274/** Queue read-back SM bad state */
275#define NDF_ST_REG_RD_FF_BAD BIT_ULL(6)
276/** Queue read-back SM states */
277#define NDF_ST_REG_RD_FF GENMASK_ULL(5, 4)
278/** Main SM is in a bad state */
279#define NDF_ST_REG_MAIN_BAD BIT_ULL(3)
280/** Main SM states */
281#define NDF_ST_REG_MAIN_SM GENMASK_ULL(2, 0)
282
283#define MAX_NAND_NAME_LEN 64
284#if (defined(NAND_MAX_PAGESIZE) && (NAND_MAX_PAGESIZE > 4096)) || \
285 !defined(NAND_MAX_PAGESIZE)
286# undef NAND_MAX_PAGESIZE
287# define NAND_MAX_PAGESIZE 4096
288#endif
289#if (defined(NAND_MAX_OOBSIZE) && (NAND_MAX_OOBSIZE > 256)) || \
290 !defined(NAND_MAX_OOBSIZE)
291# undef NAND_MAX_OOBSIZE
292# define NAND_MAX_OOBSIZE 256
293#endif
294
295#define OCTEONTX_NAND_DRIVER_NAME "octeontx_nand"
296
297#define NDF_TIMEOUT 1000 /** Timeout in ms */
298#define USEC_PER_SEC 1000000 /** Linux compatibility */
299#ifndef NAND_MAX_CHIPS
300# define NAND_MAX_CHIPS 8 /** Linux compatibility */
301#endif
302
303struct octeontx_nand_chip {
304 struct list_head node;
305 struct nand_chip nand;
306 struct ndf_set_tm_par_cmd timings;
307 int cs;
308 int selected_page;
309 int iface_mode;
310 int row_bytes;
311 int col_bytes;
312 bool oob_only;
313 bool iface_set;
314};
315
316struct octeontx_nand_buf {
317 u8 *dmabuf;
318 dma_addr_t dmaaddr;
319 int dmabuflen;
320 int data_len;
321 int data_index;
322};
323
324/** NAND flash controller (NDF) related information */
325struct octeontx_nfc {
326 struct nand_hw_control controller;
327 struct udevice *dev;
328 void __iomem *base;
329 struct list_head chips;
330 int selected_chip; /* Currently selected NAND chip number */
331
332 /*
333 * Status is separate from octeontx_nand_buf because
334 * it can be used in parallel and during init.
335 */
336 u8 *stat;
337 dma_addr_t stat_addr;
338 bool use_status;
339
340 struct octeontx_nand_buf buf;
341 union bch_resp *bch_resp;
342 dma_addr_t bch_rhandle;
343
344 /* BCH of all-0xff, so erased pages read as error-free */
345 unsigned char *eccmask;
346};
347
348/* settable timings - 0..7 select timing of alen1..4/clen1..3/etc */
349enum tm_idx {
350 t0, /* fixed at 4<<mult cycles */
351 t1, t2, t3, t4, t5, t6, t7, /* settable per ONFI-timing mode */
352};
353
354struct octeontx_probe_device {
355 struct list_head list;
356 struct udevice *dev;
357};
358
359static struct bch_vf *bch_vf;
360/** Deferred devices due to BCH not being ready */
361LIST_HEAD(octeontx_pci_nand_deferred_devices);
362
363/** default parameters used for probing chips */
364#define MAX_ONFI_MODE 5
365
366static int default_onfi_timing;
367static int slew_ns = 2; /* default timing padding */
368static int def_ecc_size = 512; /* 1024 best for sw_bch, <= 4095 for hw_bch */
369static int default_width = 1; /* 8 bit */
370static int default_page_size = 2048;
371static struct ndf_set_tm_par_cmd default_timing_parms;
372
373/** Port from Linux */
374#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \
375({ \
376 ulong __start = get_timer(0); \
377 void *__addr = (addr); \
378 const ulong __timeout_ms = timeout_us / 1000; \
379 do { \
380 (val) = readq(__addr); \
381 if (cond) \
382 break; \
383 if (timeout_us && get_timer(__start) > __timeout_ms) { \
384 (val) = readq(__addr); \
385 break; \
386 } \
387 if (delay_us) \
388 udelay(delay_us); \
389 } while (1); \
390 (cond) ? 0 : -ETIMEDOUT; \
391})
392
393/** Ported from Linux 4.9.0 include/linux/of.h for compatibility */
394static inline int of_get_child_count(const ofnode node)
395{
396 return fdtdec_get_child_count(gd->fdt_blob, ofnode_to_offset(node));
397}
398
399/**
400 * Linux compatibility from Linux 4.9.0 drivers/mtd/nand/nand_base.c
401 */
402static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
403 struct mtd_oob_region *oobregion)
404{
405 struct nand_chip *chip = mtd_to_nand(mtd);
406 struct nand_ecc_ctrl *ecc = &chip->ecc;
407
408 if (section || !ecc->total)
409 return -ERANGE;
410
411 oobregion->length = ecc->total;
412 oobregion->offset = mtd->oobsize - oobregion->length;
413
414 return 0;
415}
416
417/**
418 * Linux compatibility from Linux 4.9.0 drivers/mtd/nand/nand_base.c
419 */
420static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
421 struct mtd_oob_region *oobregion)
422{
423 struct nand_chip *chip = mtd_to_nand(mtd);
424 struct nand_ecc_ctrl *ecc = &chip->ecc;
425
426 if (section)
427 return -ERANGE;
428
429 oobregion->length = mtd->oobsize - ecc->total - 2;
430 oobregion->offset = 2;
431
432 return 0;
433}
434
435static const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
436 .ecc = nand_ooblayout_ecc_lp,
437 .rfree = nand_ooblayout_free_lp,
438};
439
440static inline struct octeontx_nand_chip *to_otx_nand(struct nand_chip *nand)
441{
442 return container_of(nand, struct octeontx_nand_chip, nand);
443}
444
445static inline struct octeontx_nfc *to_otx_nfc(struct nand_hw_control *ctrl)
446{
447 return container_of(ctrl, struct octeontx_nfc, controller);
448}
449
450static int octeontx_nand_calc_ecc_layout(struct nand_chip *nand)
451{
452 struct nand_ecclayout *layout = nand->ecc.layout;
453 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
454 struct mtd_info *mtd = &nand->mtd;
455 int oobsize = mtd->oobsize;
456 int i;
457 bool layout_alloc = false;
458
459 if (!layout) {
460 layout = devm_kzalloc(tn->dev, sizeof(*layout), GFP_KERNEL);
461 if (!layout)
462 return -ENOMEM;
463 nand->ecc.layout = layout;
464 layout_alloc = true;
465 }
466 layout->eccbytes = nand->ecc.steps * nand->ecc.bytes;
467 /* Reserve 2 bytes for bad block marker */
468 if (layout->eccbytes + 2 > oobsize) {
469 pr_err("No suitable oob scheme available for oobsize %d eccbytes %u\n",
470 oobsize, layout->eccbytes);
471 goto fail;
472 }
473 /* put ecc bytes at oob tail */
474 for (i = 0; i < layout->eccbytes; i++)
475 layout->eccpos[i] = oobsize - layout->eccbytes + i;
476 layout->oobfree[0].offset = 2;
477 layout->oobfree[0].length = oobsize - 2 - layout->eccbytes;
478 nand->ecc.layout = layout;
479 return 0;
480
481fail:
482 if (layout_alloc)
483 kfree(layout);
484 return -1;
485}
486
487/*
488 * Read a single byte from the temporary buffer. Used after READID
489 * to get the NAND information and for STATUS.
490 */
491static u8 octeontx_nand_read_byte(struct mtd_info *mtd)
492{
493 struct nand_chip *nand = mtd_to_nand(mtd);
494 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
495
496 if (tn->use_status) {
497 tn->use_status = false;
498 return *tn->stat;
499 }
500
501 if (tn->buf.data_index < tn->buf.data_len)
502 return tn->buf.dmabuf[tn->buf.data_index++];
503
504 dev_err(tn->dev, "No data to read, idx: 0x%x, len: 0x%x\n",
505 tn->buf.data_index, tn->buf.data_len);
506
507 return 0xff;
508}
509
510/*
511 * Read a number of pending bytes from the temporary buffer. Used
512 * to get page and OOB data.
513 */
514static void octeontx_nand_read_buf(struct mtd_info *mtd, u8 *buf, int len)
515{
516 struct nand_chip *nand = mtd_to_nand(mtd);
517 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
518
519 if (len > tn->buf.data_len - tn->buf.data_index) {
520 dev_err(tn->dev, "Not enough data for read of %d bytes\n", len);
521 return;
522 }
523
524 memcpy(buf, tn->buf.dmabuf + tn->buf.data_index, len);
525 tn->buf.data_index += len;
526}
527
528static void octeontx_nand_write_buf(struct mtd_info *mtd,
529 const u8 *buf, int len)
530{
531 struct nand_chip *nand = mtd_to_nand(mtd);
532 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
533
534 memcpy(tn->buf.dmabuf + tn->buf.data_len, buf, len);
535 tn->buf.data_len += len;
536}
537
538/* Overwrite default function to avoid sync abort on chip = -1. */
539static void octeontx_nand_select_chip(struct mtd_info *mtd, int chip)
540{
541}
542
543static inline int timing_to_cycle(u32 psec, unsigned long clock)
544{
545 unsigned int ns;
546 int ticks;
547
548 ns = DIV_ROUND_UP(psec, 1000);
549 ns += slew_ns;
550
551 /* no rounding needed since clock is multiple of 1MHz */
552 clock /= 1000000;
553 ns *= clock;
554
555 ticks = DIV_ROUND_UP(ns, 1000);
556
557 /* actual delay is (tm_parX+1)<<tim_mult */
558 if (ticks)
559 ticks--;
560
561 return ticks;
562}
563
564static void set_timings(struct octeontx_nand_chip *chip,
565 struct ndf_set_tm_par_cmd *tp,
566 const struct nand_sdr_timings *timings,
567 unsigned long sclk)
568{
569 /* scaled coprocessor-cycle values */
570 u32 s_wh, s_cls, s_clh, s_rp, s_wb, s_wc;
571
572 tp->tim_mult = 0;
573 s_wh = timing_to_cycle(timings->tWH_min, sclk);
574 s_cls = timing_to_cycle(timings->tCLS_min, sclk);
575 s_clh = timing_to_cycle(timings->tCLH_min, sclk);
576 s_rp = timing_to_cycle(timings->tRP_min, sclk);
577 s_wb = timing_to_cycle(timings->tWB_max, sclk);
578 s_wc = timing_to_cycle(timings->tWC_min, sclk);
579
580 tp->tm_par1 = s_wh;
581 tp->tm_par2 = s_clh;
582 tp->tm_par3 = s_rp + 1;
583 tp->tm_par4 = s_cls - s_wh;
584 tp->tm_par5 = s_wc - s_wh + 1;
585 tp->tm_par6 = s_wb;
586 tp->tm_par7 = 0;
587 tp->tim_mult++; /* overcompensate for bad math */
588
589 /* TODO: comment parameter re-use */
590
591 pr_debug("%s: tim_par: mult: %d p1: %d p2: %d p3: %d\n",
592 __func__, tp->tim_mult, tp->tm_par1, tp->tm_par2, tp->tm_par3);
593 pr_debug(" p4: %d p5: %d p6: %d p7: %d\n",
594 tp->tm_par4, tp->tm_par5, tp->tm_par6, tp->tm_par7);
595}
596
597static int set_default_timings(struct octeontx_nfc *tn,
598 const struct nand_sdr_timings *timings)
599{
600 unsigned long sclk = octeontx_get_io_clock();
601
602 set_timings(NULL, &default_timing_parms, timings, sclk);
603 return 0;
604}
605
606static int octeontx_nfc_chip_set_timings(struct octeontx_nand_chip *chip,
607 const struct nand_sdr_timings *timings)
608{
609 /*struct octeontx_nfc *tn = to_otx_nfc(chip->nand.controller);*/
610 unsigned long sclk = octeontx_get_io_clock();
611
612 set_timings(chip, &chip->timings, timings, sclk);
613 return 0;
614}
615
616/* How many bytes are free in the NFD_CMD queue? */
617static int ndf_cmd_queue_free(struct octeontx_nfc *tn)
618{
619 u64 ndf_misc;
620
621 ndf_misc = readq(tn->base + NDF_MISC);
622 return FIELD_GET(NDF_MISC_FR_BYTE, ndf_misc);
623}
624
625/* Submit a command to the NAND command queue. */
626static int ndf_submit(struct octeontx_nfc *tn, union ndf_cmd *cmd)
627{
628 int opcode = cmd->val[0] & 0xf;
629
630 switch (opcode) {
631 /* All these commands fit in one 64bit word */
632 case NDF_OP_NOP:
633 case NDF_OP_SET_TM_PAR:
634 case NDF_OP_WAIT:
635 case NDF_OP_CHIP_EN_DIS:
636 case NDF_OP_CLE_CMD:
637 case NDF_OP_WR_CMD:
638 case NDF_OP_RD_CMD:
639 case NDF_OP_RD_EDO_CMD:
640 case NDF_OP_BUS_ACQ_REL:
641 if (ndf_cmd_queue_free(tn) < 8)
642 goto full;
643 writeq(cmd->val[0], tn->base + NDF_CMD);
644 break;
645 case NDF_OP_ALE_CMD:
646 /* ALE commands take either one or two 64bit words */
647 if (cmd->u.ale_cmd.adr_byte_num < 5) {
648 if (ndf_cmd_queue_free(tn) < 8)
649 goto full;
650 writeq(cmd->val[0], tn->base + NDF_CMD);
651 } else {
652 if (ndf_cmd_queue_free(tn) < 16)
653 goto full;
654 writeq(cmd->val[0], tn->base + NDF_CMD);
655 writeq(cmd->val[1], tn->base + NDF_CMD);
656 }
657 break;
658 case NDF_OP_WAIT_STATUS: /* Wait status commands take two 64bit words */
659 if (ndf_cmd_queue_free(tn) < 16)
660 goto full;
661 writeq(cmd->val[0], tn->base + NDF_CMD);
662 writeq(cmd->val[1], tn->base + NDF_CMD);
663 break;
664 default:
665 dev_err(tn->dev, "%s: unknown command: %u\n", __func__, opcode);
666 return -EINVAL;
667 }
668 return 0;
669
670full:
671 dev_err(tn->dev, "%s: no space left in command queue\n", __func__);
672 return -ENOMEM;
673}
674
675/**
676 * Wait for the ready/busy signal. First wait for busy to be valid,
677 * then wait for busy to de-assert.
678 */
679static int ndf_build_wait_busy(struct octeontx_nfc *tn)
680{
681 union ndf_cmd cmd;
682
683 memset(&cmd, 0, sizeof(cmd));
684 cmd.u.wait.opcode = NDF_OP_WAIT;
685 cmd.u.wait.r_b = 1;
686 cmd.u.wait.wlen = t6;
687
688 if (ndf_submit(tn, &cmd))
689 return -ENOMEM;
690 return 0;
691}
692
693static bool ndf_dma_done(struct octeontx_nfc *tn)
694{
695 u64 dma_cfg;
696
697 /* Enable bit should be clear after a transfer */
698 dma_cfg = readq(tn->base + NDF_DMA_CFG);
699 if (!(dma_cfg & NDF_DMA_CFG_EN))
700 return true;
701
702 return false;
703}
704
705static int ndf_wait(struct octeontx_nfc *tn)
706{
707 ulong start = get_timer(0);
708 bool done;
709
710 while (!(done = ndf_dma_done(tn)) && get_timer(start) < NDF_TIMEOUT)
711 ;
712
713 if (!done) {
714 dev_err(tn->dev, "%s: timeout error\n", __func__);
715 return -ETIMEDOUT;
716 }
717 return 0;
718}
719
720static int ndf_wait_idle(struct octeontx_nfc *tn)
721{
722 u64 val;
723 u64 dval = 0;
724 int rc;
725 int pause = 100;
726 u64 tot_us = USEC_PER_SEC / 10;
727
728 rc = readq_poll_timeout(tn->base + NDF_ST_REG,
729 val, val & NDF_ST_REG_EXE_IDLE, pause, tot_us);
730 if (!rc)
731 rc = readq_poll_timeout(tn->base + NDF_DMA_CFG,
732 dval, !(dval & NDF_DMA_CFG_EN),
733 pause, tot_us);
734
735 return rc;
736}
737
738/** Issue set timing parameters */
739static int ndf_queue_cmd_timing(struct octeontx_nfc *tn,
740 struct ndf_set_tm_par_cmd *timings)
741{
742 union ndf_cmd cmd;
743
744 memset(&cmd, 0, sizeof(cmd));
745 cmd.u.set_tm_par.opcode = NDF_OP_SET_TM_PAR;
746 cmd.u.set_tm_par.tim_mult = timings->tim_mult;
747 cmd.u.set_tm_par.tm_par1 = timings->tm_par1;
748 cmd.u.set_tm_par.tm_par2 = timings->tm_par2;
749 cmd.u.set_tm_par.tm_par3 = timings->tm_par3;
750 cmd.u.set_tm_par.tm_par4 = timings->tm_par4;
751 cmd.u.set_tm_par.tm_par5 = timings->tm_par5;
752 cmd.u.set_tm_par.tm_par6 = timings->tm_par6;
753 cmd.u.set_tm_par.tm_par7 = timings->tm_par7;
754 return ndf_submit(tn, &cmd);
755}
756
757/** Issue bus acquire or release */
758static int ndf_queue_cmd_bus(struct octeontx_nfc *tn, int direction)
759{
760 union ndf_cmd cmd;
761
762 memset(&cmd, 0, sizeof(cmd));
763 cmd.u.bus_acq_rel.opcode = NDF_OP_BUS_ACQ_REL;
764 cmd.u.bus_acq_rel.direction = direction;
765 return ndf_submit(tn, &cmd);
766}
767
768/* Issue chip select or deselect */
769static int ndf_queue_cmd_chip(struct octeontx_nfc *tn, int enable, int chip,
770 int width)
771{
772 union ndf_cmd cmd;
773
774 memset(&cmd, 0, sizeof(cmd));
775 cmd.u.chip_en_dis.opcode = NDF_OP_CHIP_EN_DIS;
776 cmd.u.chip_en_dis.chip = chip;
777 cmd.u.chip_en_dis.enable = enable;
778 cmd.u.chip_en_dis.bus_width = width;
779 return ndf_submit(tn, &cmd);
780}
781
782static int ndf_queue_cmd_wait(struct octeontx_nfc *tn, int t_delay)
783{
784 union ndf_cmd cmd;
785
786 memset(&cmd, 0, sizeof(cmd));
787 cmd.u.wait.opcode = NDF_OP_WAIT;
788 cmd.u.wait.wlen = t_delay;
789 return ndf_submit(tn, &cmd);
790}
791
792static int ndf_queue_cmd_cle(struct octeontx_nfc *tn, int command)
793{
794 union ndf_cmd cmd;
795
796 memset(&cmd, 0, sizeof(cmd));
797 cmd.u.cle_cmd.opcode = NDF_OP_CLE_CMD;
798 cmd.u.cle_cmd.cmd_data = command;
799 cmd.u.cle_cmd.clen1 = t4;
800 cmd.u.cle_cmd.clen2 = t1;
801 cmd.u.cle_cmd.clen3 = t2;
802 return ndf_submit(tn, &cmd);
803}
804
805static int ndf_queue_cmd_ale(struct octeontx_nfc *tn, int addr_bytes,
806 struct nand_chip *nand, u64 page,
807 u32 col, int page_size)
808{
809 struct octeontx_nand_chip *octeontx_nand = (nand) ?
810 to_otx_nand(nand) : NULL;
811 union ndf_cmd cmd;
812
813 memset(&cmd, 0, sizeof(cmd));
814 cmd.u.ale_cmd.opcode = NDF_OP_ALE_CMD;
815 cmd.u.ale_cmd.adr_byte_num = addr_bytes;
816
817 /* set column bit for OOB area, assume OOB follows page */
818 if (octeontx_nand && octeontx_nand->oob_only)
819 col += page_size;
820
821 /* page is u64 for this generality, even if cmdfunc() passes int */
822 switch (addr_bytes) {
823 /* 4-8 bytes: page, then 2-byte col */
824 case 8:
825 cmd.u.ale_cmd.adr_byt8 = (page >> 40) & 0xff;
826 fallthrough;
827 case 7:
828 cmd.u.ale_cmd.adr_byt7 = (page >> 32) & 0xff;
829 fallthrough;
830 case 6:
831 cmd.u.ale_cmd.adr_byt6 = (page >> 24) & 0xff;
832 fallthrough;
833 case 5:
834 cmd.u.ale_cmd.adr_byt5 = (page >> 16) & 0xff;
835 fallthrough;
836 case 4:
837 cmd.u.ale_cmd.adr_byt4 = (page >> 8) & 0xff;
838 cmd.u.ale_cmd.adr_byt3 = page & 0xff;
839 cmd.u.ale_cmd.adr_byt2 = (col >> 8) & 0xff;
840 cmd.u.ale_cmd.adr_byt1 = col & 0xff;
841 break;
842 /* 1-3 bytes: just the page address */
843 case 3:
844 cmd.u.ale_cmd.adr_byt3 = (page >> 16) & 0xff;
845 fallthrough;
846 case 2:
847 cmd.u.ale_cmd.adr_byt2 = (page >> 8) & 0xff;
848 fallthrough;
849 case 1:
850 cmd.u.ale_cmd.adr_byt1 = page & 0xff;
851 break;
852 default:
853 break;
854 }
855
856 cmd.u.ale_cmd.alen1 = t3;
857 cmd.u.ale_cmd.alen2 = t1;
858 cmd.u.ale_cmd.alen3 = t5;
859 cmd.u.ale_cmd.alen4 = t2;
860 return ndf_submit(tn, &cmd);
861}
862
863static int ndf_queue_cmd_write(struct octeontx_nfc *tn, int len)
864{
865 union ndf_cmd cmd;
866
867 memset(&cmd, 0, sizeof(cmd));
868 cmd.u.wr_cmd.opcode = NDF_OP_WR_CMD;
869 cmd.u.wr_cmd.data = len;
870 cmd.u.wr_cmd.wlen1 = t3;
871 cmd.u.wr_cmd.wlen2 = t1;
872 return ndf_submit(tn, &cmd);
873}
874
875static int ndf_build_pre_cmd(struct octeontx_nfc *tn, int cmd1,
876 int addr_bytes, u64 page, u32 col, int cmd2)
877{
878 struct nand_chip *nand = tn->controller.active;
879 struct octeontx_nand_chip *octeontx_nand;
880 struct ndf_set_tm_par_cmd *timings;
881 int width, page_size, rc;
882
883 /* Also called before chip probing is finished */
884 if (!nand) {
885 timings = &default_timing_parms;
886 page_size = default_page_size;
887 width = default_width;
888 } else {
889 octeontx_nand = to_otx_nand(nand);
890 timings = &octeontx_nand->timings;
891 page_size = nand->mtd.writesize;
892 if (nand->options & NAND_BUSWIDTH_16)
893 width = 2;
894 else
895 width = 1;
896 }
897 rc = ndf_queue_cmd_timing(tn, timings);
898 if (rc)
899 return rc;
900
901 rc = ndf_queue_cmd_bus(tn, NDF_BUS_ACQUIRE);
902 if (rc)
903 return rc;
904
905 rc = ndf_queue_cmd_chip(tn, 1, tn->selected_chip, width);
906 if (rc)
907 return rc;
908
909 rc = ndf_queue_cmd_wait(tn, t1);
910 if (rc)
911 return rc;
912
913 rc = ndf_queue_cmd_cle(tn, cmd1);
914 if (rc)
915 return rc;
916
917 if (addr_bytes) {
918 rc = ndf_build_wait_busy(tn);
919 if (rc)
920 return rc;
921
922 rc = ndf_queue_cmd_ale(tn, addr_bytes, nand,
923 page, col, page_size);
924 if (rc)
925 return rc;
926 }
927
928 /* CLE 2 */
929 if (cmd2) {
930 rc = ndf_build_wait_busy(tn);
931 if (rc)
932 return rc;
933
934 rc = ndf_queue_cmd_cle(tn, cmd2);
935 if (rc)
936 return rc;
937 }
938 return 0;
939}
940
941static int ndf_build_post_cmd(struct octeontx_nfc *tn, int hold_time)
942{
943 int rc;
944
945 /* Deselect chip */
946 rc = ndf_queue_cmd_chip(tn, 0, 0, 0);
947 if (rc)
948 return rc;
949
950 rc = ndf_queue_cmd_wait(tn, t2);
951 if (rc)
952 return rc;
953
954 /* Release bus */
955 rc = ndf_queue_cmd_bus(tn, 0);
956 if (rc)
957 return rc;
958
959 rc = ndf_queue_cmd_wait(tn, hold_time);
960 if (rc)
961 return rc;
962
963 /*
964 * Last action is ringing the doorbell with number of bus
965 * acquire-releases cycles (currently 1).
966 */
967 writeq(1, tn->base + NDF_DRBELL);
968 return 0;
969}
970
971/* Setup the NAND DMA engine for a transfer. */
972static void ndf_setup_dma(struct octeontx_nfc *tn, int is_write,
973 dma_addr_t bus_addr, int len)
974{
975 u64 dma_cfg;
976
977 dma_cfg = FIELD_PREP(NDF_DMA_CFG_RW, is_write) |
978 FIELD_PREP(NDF_DMA_CFG_SIZE, (len >> 3) - 1);
979 dma_cfg |= NDF_DMA_CFG_EN;
980 writeq(bus_addr, tn->base + NDF_DMA_ADR);
981 writeq(dma_cfg, tn->base + NDF_DMA_CFG);
982}
983
984static int octeontx_nand_reset(struct octeontx_nfc *tn)
985{
986 int rc;
987
988 rc = ndf_build_pre_cmd(tn, NAND_CMD_RESET, 0, 0, 0, 0);
989 if (rc)
990 return rc;
991
992 rc = ndf_build_wait_busy(tn);
993 if (rc)
994 return rc;
995
996 rc = ndf_build_post_cmd(tn, t2);
997 if (rc)
998 return rc;
999
1000 return 0;
1001}
1002
1003static int ndf_read(struct octeontx_nfc *tn, int cmd1, int addr_bytes,
1004 u64 page, u32 col, int cmd2, int len)
1005{
1006 dma_addr_t bus_addr = tn->use_status ? tn->stat_addr : tn->buf.dmaaddr;
1007 struct nand_chip *nand = tn->controller.active;
1008 int timing_mode, bytes, rc;
1009 union ndf_cmd cmd;
1010 u64 start, end;
1011
1012 pr_debug("%s(%p, 0x%x, 0x%x, 0x%llx, 0x%x, 0x%x, 0x%x)\n", __func__,
1013 tn, cmd1, addr_bytes, page, col, cmd2, len);
1014 if (!nand)
1015 timing_mode = default_onfi_timing;
1016 else
1017 timing_mode = nand->onfi_timing_mode_default;
1018
1019 /* Build the command and address cycles */
1020 rc = ndf_build_pre_cmd(tn, cmd1, addr_bytes, page, col, cmd2);
1021 if (rc) {
1022 dev_err(tn->dev, "Build pre command failed\n");
1023 return rc;
1024 }
1025
1026 /* This waits for some time, then waits for busy to be de-asserted. */
1027 rc = ndf_build_wait_busy(tn);
1028 if (rc) {
1029 dev_err(tn->dev, "Wait timeout\n");
1030 return rc;
1031 }
1032
1033 memset(&cmd, 0, sizeof(cmd));
1034
1035 if (timing_mode < 4)
1036 cmd.u.rd_cmd.opcode = NDF_OP_RD_CMD;
1037 else
1038 cmd.u.rd_cmd.opcode = NDF_OP_RD_EDO_CMD;
1039
1040 cmd.u.rd_cmd.data = len;
1041 cmd.u.rd_cmd.rlen1 = t7;
1042 cmd.u.rd_cmd.rlen2 = t3;
1043 cmd.u.rd_cmd.rlen3 = t1;
1044 cmd.u.rd_cmd.rlen4 = t7;
1045 rc = ndf_submit(tn, &cmd);
1046 if (rc) {
1047 dev_err(tn->dev, "Error submitting command\n");
1048 return rc;
1049 }
1050
1051 start = (u64)bus_addr;
1052 ndf_setup_dma(tn, 0, bus_addr, len);
1053
1054 rc = ndf_build_post_cmd(tn, t2);
1055 if (rc) {
1056 dev_err(tn->dev, "Build post command failed\n");
1057 return rc;
1058 }
1059
1060 /* Wait for the DMA to complete */
1061 rc = ndf_wait(tn);
1062 if (rc) {
1063 dev_err(tn->dev, "DMA timed out\n");
1064 return rc;
1065 }
1066
1067 end = readq(tn->base + NDF_DMA_ADR);
1068 bytes = end - start;
1069
1070 /* Make sure NDF is really done */
1071 rc = ndf_wait_idle(tn);
1072 if (rc) {
1073 dev_err(tn->dev, "poll idle failed\n");
1074 return rc;
1075 }
1076
1077 pr_debug("%s: Read %d bytes\n", __func__, bytes);
1078 return bytes;
1079}
1080
1081static int octeontx_nand_get_features(struct mtd_info *mtd,
1082 struct nand_chip *chip, int feature_addr,
1083 u8 *subfeature_para)
1084{
1085 struct nand_chip *nand = chip;
1086 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1087 int len = 8;
1088 int rc;
1089
1090 pr_debug("%s: feature addr: 0x%x\n", __func__, feature_addr);
1091 memset(tn->buf.dmabuf, 0xff, len);
1092 tn->buf.data_index = 0;
1093 tn->buf.data_len = 0;
1094 rc = ndf_read(tn, NAND_CMD_GET_FEATURES, 1, feature_addr, 0, 0, len);
1095 if (rc)
1096 return rc;
1097
1098 memcpy(subfeature_para, tn->buf.dmabuf, ONFI_SUBFEATURE_PARAM_LEN);
1099
1100 return 0;
1101}
1102
1103static int octeontx_nand_set_features(struct mtd_info *mtd,
1104 struct nand_chip *chip, int feature_addr,
1105 u8 *subfeature_para)
1106{
1107 struct nand_chip *nand = chip;
1108 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1109 const int len = ONFI_SUBFEATURE_PARAM_LEN;
1110 int rc;
1111
1112 rc = ndf_build_pre_cmd(tn, NAND_CMD_SET_FEATURES,
1113 1, feature_addr, 0, 0);
1114 if (rc)
1115 return rc;
1116
1117 memcpy(tn->buf.dmabuf, subfeature_para, len);
1118 memset(tn->buf.dmabuf + len, 0, 8 - len);
1119
1120 ndf_setup_dma(tn, 1, tn->buf.dmaaddr, 8);
1121
1122 rc = ndf_queue_cmd_write(tn, 8);
1123 if (rc)
1124 return rc;
1125
1126 rc = ndf_build_wait_busy(tn);
1127 if (rc)
1128 return rc;
1129
1130 rc = ndf_build_post_cmd(tn, t2);
1131 if (rc)
1132 return rc;
1133
1134 return 0;
1135}
1136
1137/*
1138 * Read a page from NAND. If the buffer has room, the out of band
1139 * data will be included.
1140 */
1141static int ndf_page_read(struct octeontx_nfc *tn, u64 page, int col, int len)
1142{
1143 debug("%s(%p, 0x%llx, 0x%x, 0x%x) active: %p\n", __func__,
1144 tn, page, col, len, tn->controller.active);
1145 struct nand_chip *nand = tn->controller.active;
1146 struct octeontx_nand_chip *chip = to_otx_nand(nand);
1147 int addr_bytes = chip->row_bytes + chip->col_bytes;
1148
1149 memset(tn->buf.dmabuf, 0xff, len);
1150 return ndf_read(tn, NAND_CMD_READ0, addr_bytes,
1151 page, col, NAND_CMD_READSTART, len);
1152}
1153
1154/* Erase a NAND block */
1155static int ndf_block_erase(struct octeontx_nfc *tn, u64 page_addr)
1156{
1157 struct nand_chip *nand = tn->controller.active;
1158 struct octeontx_nand_chip *chip = to_otx_nand(nand);
1159 int addr_bytes = chip->row_bytes;
1160 int rc;
1161
1162 rc = ndf_build_pre_cmd(tn, NAND_CMD_ERASE1, addr_bytes,
1163 page_addr, 0, NAND_CMD_ERASE2);
1164 if (rc)
1165 return rc;
1166
1167 /* Wait for R_B to signal erase is complete */
1168 rc = ndf_build_wait_busy(tn);
1169 if (rc)
1170 return rc;
1171
1172 rc = ndf_build_post_cmd(tn, t2);
1173 if (rc)
1174 return rc;
1175
1176 /* Wait until the command queue is idle */
1177 return ndf_wait_idle(tn);
1178}
1179
1180/*
1181 * Write a page (or less) to NAND.
1182 */
1183static int ndf_page_write(struct octeontx_nfc *tn, int page)
1184{
1185 int len, rc;
1186 struct nand_chip *nand = tn->controller.active;
1187 struct octeontx_nand_chip *chip = to_otx_nand(nand);
1188 int addr_bytes = chip->row_bytes + chip->col_bytes;
1189
1190 len = tn->buf.data_len - tn->buf.data_index;
1191 chip->oob_only = (tn->buf.data_index >= nand->mtd.writesize);
1192 WARN_ON_ONCE(len & 0x7);
1193
1194 ndf_setup_dma(tn, 1, tn->buf.dmaaddr + tn->buf.data_index, len);
1195 rc = ndf_build_pre_cmd(tn, NAND_CMD_SEQIN, addr_bytes, page, 0, 0);
1196 if (rc)
1197 return rc;
1198
1199 rc = ndf_queue_cmd_write(tn, len);
1200 if (rc)
1201 return rc;
1202
1203 rc = ndf_queue_cmd_cle(tn, NAND_CMD_PAGEPROG);
1204 if (rc)
1205 return rc;
1206
1207 /* Wait for R_B to signal program is complete */
1208 rc = ndf_build_wait_busy(tn);
1209 if (rc)
1210 return rc;
1211
1212 rc = ndf_build_post_cmd(tn, t2);
1213 if (rc)
1214 return rc;
1215
1216 /* Wait for the DMA to complete */
1217 rc = ndf_wait(tn);
1218 if (rc)
1219 return rc;
1220
1221 /* Data transfer is done but NDF is not, it is waiting for R/B# */
1222 return ndf_wait_idle(tn);
1223}
1224
1225static void octeontx_nand_cmdfunc(struct mtd_info *mtd, unsigned int command,
1226 int column, int page_addr)
1227{
1228 struct nand_chip *nand = mtd_to_nand(mtd);
1229 struct octeontx_nand_chip *octeontx_nand = to_otx_nand(nand);
1230 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1231 int rc;
1232
1233 tn->selected_chip = octeontx_nand->cs;
1234 if (tn->selected_chip < 0 || tn->selected_chip >= NAND_MAX_CHIPS) {
1235 dev_err(tn->dev, "invalid chip select\n");
1236 return;
1237 }
1238
1239 tn->use_status = false;
1240
1241 pr_debug("%s(%p, 0x%x, 0x%x, 0x%x) cs: %d\n", __func__, mtd, command,
1242 column, page_addr, tn->selected_chip);
1243 switch (command) {
1244 case NAND_CMD_READID:
1245 tn->buf.data_index = 0;
1246 octeontx_nand->oob_only = false;
1247 rc = ndf_read(tn, command, 1, column, 0, 0, 8);
1248 if (rc < 0)
1249 dev_err(tn->dev, "READID failed with %d\n", rc);
1250 else
1251 tn->buf.data_len = rc;
1252 break;
1253
1254 case NAND_CMD_READOOB:
1255 octeontx_nand->oob_only = true;
1256 tn->buf.data_index = 0;
1257 tn->buf.data_len = 0;
1258 rc = ndf_page_read(tn, page_addr, column, mtd->oobsize);
1259 if (rc < mtd->oobsize)
1260 dev_err(tn->dev, "READOOB failed with %d\n",
1261 tn->buf.data_len);
1262 else
1263 tn->buf.data_len = rc;
1264 break;
1265
1266 case NAND_CMD_READ0:
1267 octeontx_nand->oob_only = false;
1268 tn->buf.data_index = 0;
1269 tn->buf.data_len = 0;
1270 rc = ndf_page_read(tn, page_addr, column,
1271 mtd->writesize + mtd->oobsize);
1272
1273 if (rc < mtd->writesize + mtd->oobsize)
1274 dev_err(tn->dev, "READ0 failed with %d\n", rc);
1275 else
1276 tn->buf.data_len = rc;
1277 break;
1278
1279 case NAND_CMD_STATUS:
1280 /* used in oob/not states */
1281 tn->use_status = true;
1282 rc = ndf_read(tn, command, 0, 0, 0, 0, 8);
1283 if (rc < 0)
1284 dev_err(tn->dev, "STATUS failed with %d\n", rc);
1285 break;
1286
1287 case NAND_CMD_RESET:
1288 /* used in oob/not states */
1289 rc = octeontx_nand_reset(tn);
1290 if (rc < 0)
1291 dev_err(tn->dev, "RESET failed with %d\n", rc);
1292 break;
1293
1294 case NAND_CMD_PARAM:
1295 octeontx_nand->oob_only = false;
1296 tn->buf.data_index = 0;
1297 rc = ndf_read(tn, command, 1, 0, 0, 0,
1298 min(tn->buf.dmabuflen, 3 * 512));
1299 if (rc < 0)
1300 dev_err(tn->dev, "PARAM failed with %d\n", rc);
1301 else
1302 tn->buf.data_len = rc;
1303 break;
1304
1305 case NAND_CMD_RNDOUT:
1306 tn->buf.data_index = column;
1307 break;
1308
1309 case NAND_CMD_ERASE1:
1310 if (ndf_block_erase(tn, page_addr))
1311 dev_err(tn->dev, "ERASE1 failed\n");
1312 break;
1313
1314 case NAND_CMD_ERASE2:
1315 /* We do all erase processing in the first command, so ignore
1316 * this one.
1317 */
1318 break;
1319
1320 case NAND_CMD_SEQIN:
1321 octeontx_nand->oob_only = (column >= mtd->writesize);
1322 tn->buf.data_index = column;
1323 tn->buf.data_len = column;
1324
1325 octeontx_nand->selected_page = page_addr;
1326 break;
1327
1328 case NAND_CMD_PAGEPROG:
1329 rc = ndf_page_write(tn, octeontx_nand->selected_page);
1330 if (rc)
1331 dev_err(tn->dev, "PAGEPROG failed with %d\n", rc);
1332 break;
1333
1334 case NAND_CMD_SET_FEATURES:
1335 octeontx_nand->oob_only = false;
1336 /* assume tn->buf.data_len == 4 of data has been set there */
1337 rc = octeontx_nand_set_features(mtd, nand,
1338 page_addr, tn->buf.dmabuf);
1339 if (rc)
1340 dev_err(tn->dev, "SET_FEATURES failed with %d\n", rc);
1341 break;
1342
1343 case NAND_CMD_GET_FEATURES:
1344 octeontx_nand->oob_only = false;
1345 rc = octeontx_nand_get_features(mtd, nand,
1346 page_addr, tn->buf.dmabuf);
1347 if (!rc) {
1348 tn->buf.data_index = 0;
1349 tn->buf.data_len = 4;
1350 } else {
1351 dev_err(tn->dev, "GET_FEATURES failed with %d\n", rc);
1352 }
1353 break;
1354
1355 default:
1356 WARN_ON_ONCE(1);
1357 dev_err(tn->dev, "unhandled nand cmd: %x\n", command);
1358 }
1359}
1360
1361static int octeontx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
1362{
1363 struct octeontx_nfc *tn = to_otx_nfc(chip->controller);
1364 int ret;
1365
1366 ret = ndf_wait_idle(tn);
1367 return (ret < 0) ? -EIO : 0;
1368}
1369
1370/* check compatibility with ONFI timing mode#N, and optionally apply */
1371/* TODO: Implement chipnr support? */
1372static int octeontx_nand_setup_dat_intf(struct mtd_info *mtd, int chipnr,
1373 const struct nand_data_interface *conf)
1374{
1375 static const bool check_only;
1376 struct nand_chip *nand = mtd_to_nand(mtd);
1377 struct octeontx_nand_chip *chip = to_otx_nand(nand);
1378 static u64 t_wc_n[MAX_ONFI_MODE + 2]; /* cache a mode signature */
1379 int mode; /* deduced mode number, for reporting and restricting */
1380 int rc;
1381
1382 /*
1383 * Cache timing modes for reporting, and reducing needless change.
1384 *
1385 * Challenge: caller does not pass ONFI mode#, but reporting the mode
1386 * and restricting to a maximum, or a list, are useful for diagnosing
1387 * new hardware. So use tWC_min, distinct and monotonic across modes,
1388 * to discover the requested/accepted mode number
1389 */
1390 for (mode = MAX_ONFI_MODE; mode >= 0 && !t_wc_n[0]; mode--) {
1391 const struct nand_sdr_timings *t;
1392
1393 t = onfi_async_timing_mode_to_sdr_timings(mode);
1394 if (!t)
1395 continue;
1396 t_wc_n[mode] = t->tWC_min;
1397 }
1398
1399 if (!conf) {
1400 rc = -EINVAL;
1401 } else if (check_only) {
1402 rc = 0;
1403 } else if (nand->data_interface &&
1404 chip->iface_set && chip->iface_mode == mode) {
1405 /*
1406 * Cases:
1407 * - called from nand_reset, which clears DDR timing
1408 * mode back to SDR. BUT if we're already in SDR,
1409 * timing mode persists over resets.
1410 * While mtd/nand layer only supports SDR,
1411 * this is always safe. And this driver only supports SDR.
1412 *
1413 * - called from post-power-event nand_reset (maybe
1414 * NFC+flash power down, or system hibernate.
1415 * Address this when CONFIG_PM support added
1416 */
1417 rc = 0;
1418 } else {
1419 rc = octeontx_nfc_chip_set_timings(chip, &conf->timings.sdr);
1420 if (!rc) {
1421 chip->iface_mode = mode;
1422 chip->iface_set = true;
1423 }
1424 }
1425 return rc;
1426}
1427
1428static void octeontx_bch_reset(void)
1429{
1430}
1431
1432/*
1433 * Given a page, calculate the ECC code
1434 *
1435 * chip: Pointer to NAND chip data structure
1436 * buf: Buffer to calculate ECC on
1437 * code: Buffer to hold ECC data
1438 *
1439 * Return 0 on success or -1 on failure
1440 */
1441static int octeontx_nand_bch_calculate_ecc_internal(struct mtd_info *mtd,
1442 dma_addr_t ihandle,
1443 u8 *code)
1444{
1445 struct nand_chip *nand = mtd_to_nand(mtd);
1446 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1447 int rc;
1448 int i;
1449 static u8 *ecc_buffer;
1450 static int ecc_size;
1451 static unsigned long ecc_handle;
1452 union bch_resp *r = tn->bch_resp;
1453
1454 if (!ecc_buffer || ecc_size < nand->ecc.size) {
1455 ecc_size = nand->ecc.size;
1456 ecc_buffer = dma_alloc_coherent(ecc_size,
1457 (unsigned long *)&ecc_handle);
1458 }
1459
1460 memset(ecc_buffer, 0, nand->ecc.bytes);
1461
1462 r->u16 = 0;
1463 __iowmb(); /* flush done=0 before making request */
1464
1465 rc = octeontx_bch_encode(bch_vf, ihandle, nand->ecc.size,
1466 nand->ecc.strength,
1467 (dma_addr_t)ecc_handle, tn->bch_rhandle);
1468
1469 if (!rc) {
1470 octeontx_bch_wait(bch_vf, r, tn->bch_rhandle);
1471 } else {
1472 dev_err(tn->dev, "octeontx_bch_encode failed\n");
1473 return -1;
1474 }
1475
1476 if (!r->s.done || r->s.uncorrectable) {
1477 dev_err(tn->dev,
1478 "%s timeout, done:%d uncorr:%d corr:%d erased:%d\n",
1479 __func__, r->s.done, r->s.uncorrectable,
1480 r->s.num_errors, r->s.erased);
1481 octeontx_bch_reset();
1482 return -1;
1483 }
1484
1485 memcpy(code, ecc_buffer, nand->ecc.bytes);
1486
1487 for (i = 0; i < nand->ecc.bytes; i++)
1488 code[i] ^= tn->eccmask[i];
1489
1490 return tn->bch_resp->s.num_errors;
1491}
1492
1493/*
1494 * Given a page, calculate the ECC code
1495 *
1496 * mtd: MTD block structure
1497 * dat: raw data (unused)
1498 * ecc_code: buffer for ECC
1499 */
1500static int octeontx_nand_bch_calculate(struct mtd_info *mtd,
1501 const u8 *dat, u8 *ecc_code)
1502{
1503 struct nand_chip *nand = mtd_to_nand(mtd);
1504 dma_addr_t handle = dma_map_single((u8 *)dat,
1505 nand->ecc.size, DMA_TO_DEVICE);
1506 int ret;
1507
1508 ret = octeontx_nand_bch_calculate_ecc_internal(mtd, handle,
1509 (void *)ecc_code);
1510
1511 return ret;
1512}
1513
1514/*
1515 * Detect and correct multi-bit ECC for a page
1516 *
1517 * mtd: MTD block structure
1518 * dat: raw data read from the chip
1519 * read_ecc: ECC from the chip (unused)
1520 * isnull: unused
1521 *
1522 * Returns number of bits corrected or -1 if unrecoverable
1523 */
1524static int octeontx_nand_bch_correct(struct mtd_info *mtd, u_char *dat,
1525 u_char *read_ecc, u_char *isnull)
1526{
1527 struct nand_chip *nand = mtd_to_nand(mtd);
1528 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1529 int i = nand->ecc.size + nand->ecc.bytes;
1530 static u8 *data_buffer;
1531 static dma_addr_t ihandle;
1532 static int buffer_size;
1533 dma_addr_t ohandle;
1534 union bch_resp *r = tn->bch_resp;
1535 int rc;
1536
1537 if (i > buffer_size) {
1538 if (buffer_size)
1539 free(data_buffer);
1540 data_buffer = dma_alloc_coherent(i,
1541 (unsigned long *)&ihandle);
1542 if (!data_buffer) {
1543 dev_err(tn->dev,
1544 "%s: Could not allocate %d bytes for buffer\n",
1545 __func__, i);
1546 goto error;
1547 }
1548 buffer_size = i;
1549 }
1550
1551 memcpy(data_buffer, dat, nand->ecc.size);
1552 memcpy(data_buffer + nand->ecc.size, read_ecc, nand->ecc.bytes);
1553
1554 for (i = 0; i < nand->ecc.bytes; i++)
1555 data_buffer[nand->ecc.size + i] ^= tn->eccmask[i];
1556
1557 r->u16 = 0;
1558 __iowmb(); /* flush done=0 before making request */
1559
1560 ohandle = dma_map_single(dat, nand->ecc.size, DMA_FROM_DEVICE);
1561 rc = octeontx_bch_decode(bch_vf, ihandle, nand->ecc.size,
1562 nand->ecc.strength, ohandle, tn->bch_rhandle);
1563
1564 if (!rc)
1565 octeontx_bch_wait(bch_vf, r, tn->bch_rhandle);
1566
1567 if (rc) {
1568 dev_err(tn->dev, "octeontx_bch_decode failed\n");
1569 goto error;
1570 }
1571
1572 if (!r->s.done) {
1573 dev_err(tn->dev, "Error: BCH engine timeout\n");
1574 octeontx_bch_reset();
1575 goto error;
1576 }
1577
1578 if (r->s.erased) {
1579 debug("Info: BCH block is erased\n");
1580 return 0;
1581 }
1582
1583 if (r->s.uncorrectable) {
1584 debug("Cannot correct NAND block, response: 0x%x\n",
1585 r->u16);
1586 goto error;
1587 }
1588
1589 return r->s.num_errors;
1590
1591error:
1592 debug("Error performing bch correction\n");
1593 return -1;
1594}
1595
1596void octeontx_nand_bch_hwctl(struct mtd_info *mtd, int mode)
1597{
1598 /* Do nothing. */
1599}
1600
1601static int octeontx_nand_hw_bch_read_page(struct mtd_info *mtd,
1602 struct nand_chip *chip, u8 *buf,
1603 int oob_required, int page)
1604{
1605 struct nand_chip *nand = mtd_to_nand(mtd);
1606 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1607 int i, eccsize = chip->ecc.size, ret;
1608 int eccbytes = chip->ecc.bytes;
1609 int eccsteps = chip->ecc.steps;
1610 u8 *p;
1611 u8 *ecc_code = chip->buffers->ecccode;
1612 unsigned int max_bitflips = 0;
1613
1614 /* chip->read_buf() insists on sequential order, we do OOB first */
1615 memcpy(chip->oob_poi, tn->buf.dmabuf + mtd->writesize, mtd->oobsize);
1616
1617 /* Use private buffer as input for ECC correction */
1618 p = tn->buf.dmabuf;
1619
1620 ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1621 chip->ecc.total);
1622 if (ret)
1623 return ret;
1624
1625 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1626 int stat;
1627
1628 debug("Correcting block offset %lx, ecc offset %x\n",
1629 p - buf, i);
1630 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1631
1632 if (stat < 0) {
1633 mtd->ecc_stats.failed++;
1634 debug("Cannot correct NAND page %d\n", page);
1635 } else {
1636 mtd->ecc_stats.corrected += stat;
1637 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1638 }
1639 }
1640
1641 /* Copy corrected data to caller's buffer now */
1642 memcpy(buf, tn->buf.dmabuf, mtd->writesize);
1643
1644 return max_bitflips;
1645}
1646
1647static int octeontx_nand_hw_bch_write_page(struct mtd_info *mtd,
1648 struct nand_chip *chip,
1649 const u8 *buf, int oob_required,
1650 int page)
1651{
1652 struct octeontx_nfc *tn = to_otx_nfc(chip->controller);
1653 int i, eccsize = chip->ecc.size, ret;
1654 int eccbytes = chip->ecc.bytes;
1655 int eccsteps = chip->ecc.steps;
1656 const u8 *p;
1657 u8 *ecc_calc = chip->buffers->ecccalc;
1658
1659 debug("%s(buf?%p, oob%d p%x)\n",
1660 __func__, buf, oob_required, page);
1661 for (i = 0; i < chip->ecc.total; i++)
1662 ecc_calc[i] = 0xFF;
1663
1664 /* Copy the page data from caller's buffers to private buffer */
1665 chip->write_buf(mtd, buf, mtd->writesize);
1666 /* Use private date as source for ECC calculation */
1667 p = tn->buf.dmabuf;
1668
1669 /* Hardware ECC calculation */
1670 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1671 int ret;
1672
1673 ret = chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1674
1675 if (ret < 0)
1676 debug("calculate(mtd, p?%p, &ecc_calc[%d]?%p) returned %d\n",
1677 p, i, &ecc_calc[i], ret);
1678
1679 debug("block offset %lx, ecc offset %x\n", p - buf, i);
1680 }
1681
1682 ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1683 chip->ecc.total);
1684 if (ret)
1685 return ret;
1686
1687 /* Store resulting OOB into private buffer, will be sent to HW */
1688 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1689
1690 return 0;
1691}
1692
1693/**
1694 * nand_write_page_raw - [INTERN] raw page write function
1695 * @mtd: mtd info structure
1696 * @chip: nand chip info structure
1697 * @buf: data buffer
1698 * @oob_required: must write chip->oob_poi to OOB
1699 * @page: page number to write
1700 *
1701 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1702 */
1703static int octeontx_nand_write_page_raw(struct mtd_info *mtd,
1704 struct nand_chip *chip,
1705 const u8 *buf, int oob_required,
1706 int page)
1707{
1708 chip->write_buf(mtd, buf, mtd->writesize);
1709 if (oob_required)
1710 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1711
1712 return 0;
1713}
1714
1715/**
1716 * octeontx_nand_write_oob_std - [REPLACEABLE] the most common OOB data write
1717 * function
1718 * @mtd: mtd info structure
1719 * @chip: nand chip info structure
1720 * @page: page number to write
1721 */
1722static int octeontx_nand_write_oob_std(struct mtd_info *mtd,
1723 struct nand_chip *chip,
1724 int page)
1725{
1726 int status = 0;
1727 const u8 *buf = chip->oob_poi;
1728 int length = mtd->oobsize;
1729
1730 chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
1731 chip->write_buf(mtd, buf, length);
1732 /* Send command to program the OOB data */
1733 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1734
1735 status = chip->waitfunc(mtd, chip);
1736
1737 return status & NAND_STATUS_FAIL ? -EIO : 0;
1738}
1739
1740/**
1741 * octeontx_nand_read_page_raw - [INTERN] read raw page data without ecc
1742 * @mtd: mtd info structure
1743 * @chip: nand chip info structure
1744 * @buf: buffer to store read data
1745 * @oob_required: caller requires OOB data read to chip->oob_poi
1746 * @page: page number to read
1747 *
1748 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1749 */
1750static int octeontx_nand_read_page_raw(struct mtd_info *mtd,
1751 struct nand_chip *chip,
1752 u8 *buf, int oob_required, int page)
1753{
1754 chip->read_buf(mtd, buf, mtd->writesize);
1755 if (oob_required)
1756 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1757 return 0;
1758}
1759
1760static int octeontx_nand_read_oob_std(struct mtd_info *mtd,
1761 struct nand_chip *chip,
1762 int page)
1763
1764{
1765 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1766 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1767 return 0;
1768}
1769
1770static int octeontx_nand_calc_bch_ecc_strength(struct nand_chip *nand)
1771{
1772 struct mtd_info *mtd = nand_to_mtd(nand);
1773 struct nand_ecc_ctrl *ecc = &nand->ecc;
1774 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1775 int nsteps = mtd->writesize / ecc->size;
1776 int oobchunk = mtd->oobsize / nsteps;
1777
1778 /* ecc->strength determines ecc_level and OOB's ecc_bytes. */
1779 const u8 strengths[] = {4, 8, 16, 24, 32, 40, 48, 56, 60, 64};
1780 /* first set the desired ecc_level to match strengths[] */
1781 int index = ARRAY_SIZE(strengths) - 1;
1782 int need;
1783
1784 while (index > 0 && !(ecc->options & NAND_ECC_MAXIMIZE) &&
1785 strengths[index - 1] >= ecc->strength)
1786 index--;
1787
1788 do {
1789 need = DIV_ROUND_UP(15 * strengths[index], 8);
1790 if (need <= oobchunk - 2)
1791 break;
1792 } while (index > 0);
1793
1794 debug("%s: steps ds: %d, strength ds: %d\n", __func__,
1795 nand->ecc_step_ds, nand->ecc_strength_ds);
1796 ecc->strength = strengths[index];
1797 ecc->bytes = need;
1798 debug("%s: strength: %d, bytes: %d\n", __func__, ecc->strength,
1799 ecc->bytes);
1800
1801 if (!tn->eccmask)
1802 tn->eccmask = devm_kzalloc(tn->dev, ecc->bytes, GFP_KERNEL);
1803 if (!tn->eccmask)
1804 return -ENOMEM;
1805
1806 return 0;
1807}
1808
1809/* sample the BCH signature of an erased (all 0xff) page,
1810 * to XOR into all page traffic, so erased pages have no ECC errors
1811 */
1812static int octeontx_bch_save_empty_eccmask(struct nand_chip *nand)
1813{
1814 struct mtd_info *mtd = nand_to_mtd(nand);
1815 struct octeontx_nfc *tn = to_otx_nfc(nand->controller);
1816 unsigned int eccsize = nand->ecc.size;
1817 unsigned int eccbytes = nand->ecc.bytes;
1818 u8 erased_ecc[eccbytes];
1819 unsigned long erased_handle;
1820 unsigned char *erased_page = dma_alloc_coherent(eccsize,
1821 &erased_handle);
1822 int i;
1823 int rc = 0;
1824
1825 if (!erased_page)
1826 return -ENOMEM;
1827
1828 memset(erased_page, 0xff, eccsize);
1829 memset(erased_ecc, 0, eccbytes);
1830
1831 rc = octeontx_nand_bch_calculate_ecc_internal(mtd,
1832 (dma_addr_t)erased_handle,
1833 erased_ecc);
1834
1835 free(erased_page);
1836
1837 for (i = 0; i < eccbytes; i++)
1838 tn->eccmask[i] = erased_ecc[i] ^ 0xff;
1839
1840 return rc;
1841}
1842
1843static void octeontx_nfc_chip_sizing(struct nand_chip *nand)
1844{
1845 struct octeontx_nand_chip *chip = to_otx_nand(nand);
1846 struct mtd_info *mtd = nand_to_mtd(nand);
1847 struct nand_ecc_ctrl *ecc = &nand->ecc;
1848
1849 chip->row_bytes = nand->onfi_params.addr_cycles & 0xf;
1850 chip->col_bytes = nand->onfi_params.addr_cycles >> 4;
1851 debug("%s(%p) row bytes: %d, col bytes: %d, ecc mode: %d\n",
1852 __func__, nand, chip->row_bytes, chip->col_bytes, ecc->mode);
1853
1854 /*
1855 * HW_BCH using OcteonTX BCH engine, or SOFT_BCH laid out in
1856 * HW_BCH-compatible fashion, depending on devtree advice
1857 * and kernel config.
1858 * BCH/NFC hardware capable of subpage ops, not implemented.
1859 */
1860 mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
1861 nand->options |= NAND_NO_SUBPAGE_WRITE;
1862 debug("%s: start steps: %d, size: %d, bytes: %d\n",
1863 __func__, ecc->steps, ecc->size, ecc->bytes);
1864 debug("%s: step ds: %d, strength ds: %d\n", __func__,
1865 nand->ecc_step_ds, nand->ecc_strength_ds);
1866
1867 if (ecc->mode != NAND_ECC_NONE) {
1868 int nsteps = ecc->steps ? ecc->steps : 1;
1869
1870 if (ecc->size && ecc->size != mtd->writesize)
1871 nsteps = mtd->writesize / ecc->size;
1872 else if (mtd->writesize > def_ecc_size &&
1873 !(mtd->writesize & (def_ecc_size - 1)))
1874 nsteps = mtd->writesize / def_ecc_size;
1875 ecc->steps = nsteps;
1876 ecc->size = mtd->writesize / nsteps;
1877 ecc->bytes = mtd->oobsize / nsteps;
1878
1879 if (nand->ecc_strength_ds)
1880 ecc->strength = nand->ecc_strength_ds;
1881 if (nand->ecc_step_ds)
1882 ecc->size = nand->ecc_step_ds;
1883 /*
1884 * no subpage ops, but set subpage-shift to match ecc->steps
1885 * so mtd_nandbiterrs tests appropriate boundaries
1886 */
1887 if (!mtd->subpage_sft && !(ecc->steps & (ecc->steps - 1)))
1888 mtd->subpage_sft = fls(ecc->steps) - 1;
1889
1890 if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
1891 debug("%s: ecc mode: %d\n", __func__, ecc->mode);
1892 if (ecc->mode != NAND_ECC_SOFT &&
1893 !octeontx_nand_calc_bch_ecc_strength(nand)) {
1894 struct octeontx_nfc *tn =
1895 to_otx_nfc(nand->controller);
1896
1897 debug("Using hardware BCH engine support\n");
1898 ecc->mode = NAND_ECC_HW_SYNDROME;
1899 ecc->read_page = octeontx_nand_hw_bch_read_page;
1900 ecc->write_page =
1901 octeontx_nand_hw_bch_write_page;
1902 ecc->read_page_raw =
1903 octeontx_nand_read_page_raw;
1904 ecc->write_page_raw =
1905 octeontx_nand_write_page_raw;
1906 ecc->read_oob = octeontx_nand_read_oob_std;
1907 ecc->write_oob = octeontx_nand_write_oob_std;
1908
1909 ecc->calculate = octeontx_nand_bch_calculate;
1910 ecc->correct = octeontx_nand_bch_correct;
1911 ecc->hwctl = octeontx_nand_bch_hwctl;
1912
1913 debug("NAND chip %d using hw_bch\n",
1914 tn->selected_chip);
1915 debug(" %d bytes ECC per %d byte block\n",
1916 ecc->bytes, ecc->size);
1917 debug(" for %d bits of correction per block.",
1918 ecc->strength);
1919 octeontx_nand_calc_ecc_layout(nand);
1920 octeontx_bch_save_empty_eccmask(nand);
1921 }
1922 }
1923 }
1924}
1925
1926static int octeontx_nfc_chip_init(struct octeontx_nfc *tn, struct udevice *dev,
1927 ofnode node)
1928{
1929 struct octeontx_nand_chip *chip;
1930 struct nand_chip *nand;
1931 struct mtd_info *mtd;
1932 int ret;
1933
1934 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
1935 if (!chip)
1936 return -ENOMEM;
1937
1938 debug("%s: Getting chip select\n", __func__);
1939 ret = ofnode_read_s32(node, "reg", &chip->cs);
1940 if (ret) {
1941 dev_err(dev, "could not retrieve reg property: %d\n", ret);
1942 return ret;
1943 }
1944
1945 if (chip->cs >= NAND_MAX_CHIPS) {
1946 dev_err(dev, "invalid reg value: %u (max CS = 7)\n", chip->cs);
1947 return -EINVAL;
1948 }
1949 debug("%s: chip select: %d\n", __func__, chip->cs);
1950 nand = &chip->nand;
1951 nand->controller = &tn->controller;
1952 if (!tn->controller.active)
1953 tn->controller.active = nand;
1954
1955 debug("%s: Setting flash node\n", __func__);
1956 nand_set_flash_node(nand, node);
1957
1958 nand->options = 0;
1959 nand->select_chip = octeontx_nand_select_chip;
1960 nand->cmdfunc = octeontx_nand_cmdfunc;
1961 nand->waitfunc = octeontx_nand_waitfunc;
1962 nand->read_byte = octeontx_nand_read_byte;
1963 nand->read_buf = octeontx_nand_read_buf;
1964 nand->write_buf = octeontx_nand_write_buf;
1965 nand->onfi_set_features = octeontx_nand_set_features;
1966 nand->onfi_get_features = octeontx_nand_get_features;
1967 nand->setup_data_interface = octeontx_nand_setup_dat_intf;
1968
1969 mtd = nand_to_mtd(nand);
1970 debug("%s: mtd: %p\n", __func__, mtd);
1971 mtd->dev->parent = dev;
1972
1973 debug("%s: NDF_MISC: 0x%llx\n", __func__,
1974 readq(tn->base + NDF_MISC));
1975
1976 /* TODO: support more then 1 chip */
1977 debug("%s: Scanning identification\n", __func__);
1978 ret = nand_scan_ident(mtd, 1, NULL);
1979 if (ret)
1980 return ret;
1981
1982 debug("%s: Sizing chip\n", __func__);
1983 octeontx_nfc_chip_sizing(nand);
1984
1985 debug("%s: Scanning tail\n", __func__);
1986 ret = nand_scan_tail(mtd);
1987 if (ret) {
1988 dev_err(dev, "nand_scan_tail failed: %d\n", ret);
1989 return ret;
1990 }
1991
1992 debug("%s: Registering mtd\n", __func__);
1993 ret = nand_register(0, mtd);
1994
1995 debug("%s: Adding tail\n", __func__);
1996 list_add_tail(&chip->node, &tn->chips);
1997 return 0;
1998}
1999
2000static int octeontx_nfc_chips_init(struct octeontx_nfc *tn)
2001{
2002 struct udevice *dev = tn->dev;
Simon Glassa7ece582020-12-19 10:40:14 -07002003 ofnode node = dev_ofnode(dev);
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002004 ofnode nand_node;
2005 int nr_chips = of_get_child_count(node);
2006 int ret;
2007
2008 debug("%s: node: %s\n", __func__, ofnode_get_name(node));
2009 debug("%s: %d chips\n", __func__, nr_chips);
2010 if (nr_chips > NAND_MAX_CHIPS) {
2011 dev_err(dev, "too many NAND chips: %d\n", nr_chips);
2012 return -EINVAL;
2013 }
2014
2015 if (!nr_chips) {
2016 debug("no DT NAND chips found\n");
2017 return -ENODEV;
2018 }
2019
2020 pr_info("%s: scanning %d chips DTs\n", __func__, nr_chips);
2021
2022 ofnode_for_each_subnode(nand_node, node) {
2023 debug("%s: Calling octeontx_nfc_chip_init(%p, %s, %ld)\n",
2024 __func__, tn, dev->name, nand_node.of_offset);
2025 ret = octeontx_nfc_chip_init(tn, dev, nand_node);
2026 if (ret)
2027 return ret;
2028 }
2029 return 0;
2030}
2031
2032/* Reset NFC and initialize registers. */
2033static int octeontx_nfc_init(struct octeontx_nfc *tn)
2034{
2035 const struct nand_sdr_timings *timings;
2036 u64 ndf_misc;
2037 int rc;
2038
2039 /* Initialize values and reset the fifo */
2040 ndf_misc = readq(tn->base + NDF_MISC);
2041
2042 ndf_misc &= ~NDF_MISC_EX_DIS;
2043 ndf_misc |= (NDF_MISC_BT_DIS | NDF_MISC_RST_FF);
2044 writeq(ndf_misc, tn->base + NDF_MISC);
2045 debug("%s: NDF_MISC: 0x%llx\n", __func__, readq(tn->base + NDF_MISC));
2046
2047 /* Bring the fifo out of reset */
2048 ndf_misc &= ~(NDF_MISC_RST_FF);
2049
2050 /* Maximum of co-processor cycles for glitch filtering */
2051 ndf_misc |= FIELD_PREP(NDF_MISC_WAIT_CNT, 0x3f);
2052
2053 writeq(ndf_misc, tn->base + NDF_MISC);
2054
2055 /* Set timing parameters to onfi mode 0 for probing */
2056 timings = onfi_async_timing_mode_to_sdr_timings(0);
2057 if (IS_ERR(timings))
2058 return PTR_ERR(timings);
2059 rc = set_default_timings(tn, timings);
2060 if (rc)
2061 return rc;
2062
2063 return 0;
2064}
2065
2066static int octeontx_pci_nand_probe(struct udevice *dev)
2067{
2068 struct octeontx_nfc *tn = dev_get_priv(dev);
2069 int ret;
2070 static bool probe_done;
2071
2072 debug("%s(%s) tn: %p\n", __func__, dev->name, tn);
2073 if (probe_done)
2074 return 0;
2075
2076 if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
2077 bch_vf = octeontx_bch_getv();
2078 if (!bch_vf) {
2079 struct octeontx_probe_device *probe_dev;
2080
2081 debug("%s: bch not yet initialized\n", __func__);
2082 probe_dev = calloc(sizeof(*probe_dev), 1);
2083 if (!probe_dev) {
2084 printf("%s: Out of memory\n", __func__);
2085 return -ENOMEM;
2086 }
2087 probe_dev->dev = dev;
2088 INIT_LIST_HEAD(&probe_dev->list);
2089 list_add_tail(&probe_dev->list,
2090 &octeontx_pci_nand_deferred_devices);
2091 debug("%s: Defering probe until after BCH initialization\n",
2092 __func__);
2093 return 0;
2094 }
2095 }
2096
2097 tn->dev = dev;
2098 INIT_LIST_HEAD(&tn->chips);
2099
2100 tn->base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM);
2101 if (!tn->base) {
2102 ret = -EINVAL;
2103 goto release;
2104 }
2105 debug("%s: bar at %p\n", __func__, tn->base);
2106 tn->buf.dmabuflen = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE;
2107 tn->buf.dmabuf = dma_alloc_coherent(tn->buf.dmabuflen,
2108 (unsigned long *)&tn->buf.dmaaddr);
2109 if (!tn->buf.dmabuf) {
2110 ret = -ENOMEM;
2111 debug("%s: Could not allocate DMA buffer\n", __func__);
2112 goto unclk;
2113 }
2114
2115 /* one hw-bch response, for one outstanding transaction */
2116 tn->bch_resp = dma_alloc_coherent(sizeof(*tn->bch_resp),
2117 (unsigned long *)&tn->bch_rhandle);
2118
2119 tn->stat = dma_alloc_coherent(8, (unsigned long *)&tn->stat_addr);
2120 if (!tn->stat || !tn->bch_resp) {
2121 debug("%s: Could not allocate bch status or response\n",
2122 __func__);
2123 ret = -ENOMEM;
2124 goto unclk;
2125 }
2126
2127 debug("%s: Calling octeontx_nfc_init()\n", __func__);
2128 octeontx_nfc_init(tn);
2129 debug("%s: Initializing chips\n", __func__);
2130 ret = octeontx_nfc_chips_init(tn);
2131 debug("%s: init chips ret: %d\n", __func__, ret);
2132 if (ret) {
2133 if (ret != -ENODEV)
2134 dev_err(dev, "failed to init nand chips\n");
2135 goto unclk;
2136 }
2137 dev_info(dev, "probed\n");
2138 return 0;
2139
2140unclk:
2141release:
2142 return ret;
2143}
2144
2145int octeontx_pci_nand_disable(struct udevice *dev)
2146{
2147 struct octeontx_nfc *tn = dev_get_priv(dev);
2148 u64 dma_cfg;
2149 u64 ndf_misc;
2150
2151 debug("%s: Disabling NAND device %s\n", __func__, dev->name);
2152 dma_cfg = readq(tn->base + NDF_DMA_CFG);
2153 dma_cfg &= ~NDF_DMA_CFG_EN;
2154 dma_cfg |= NDF_DMA_CFG_CLR;
2155 writeq(dma_cfg, tn->base + NDF_DMA_CFG);
2156
2157 /* Disable execution and put FIFO in reset mode */
2158 ndf_misc = readq(tn->base + NDF_MISC);
2159 ndf_misc |= NDF_MISC_EX_DIS | NDF_MISC_RST_FF;
2160 writeq(ndf_misc, tn->base + NDF_MISC);
2161 ndf_misc &= ~NDF_MISC_RST_FF;
2162 writeq(ndf_misc, tn->base + NDF_MISC);
2163#ifdef DEBUG
2164 printf("%s: NDF_MISC: 0x%llx\n", __func__, readq(tn->base + NDF_MISC));
2165#endif
2166 /* Clear any interrupts and enable bits */
2167 writeq(~0ull, tn->base + NDF_INT_ENA_W1C);
2168 writeq(~0ull, tn->base + NDF_INT);
2169 debug("%s: NDF_ST_REG: 0x%llx\n", __func__,
2170 readq(tn->base + NDF_ST_REG));
2171 return 0;
2172}
2173
2174/**
2175 * Since it's possible (and even likely) that the NAND device will be probed
2176 * before the BCH device has been probed, we may need to defer the probing.
2177 *
2178 * In this case, the initial probe returns success but the actual probing
2179 * is deferred until the BCH VF has been probed.
2180 *
2181 * @return 0 for success, otherwise error
2182 */
2183int octeontx_pci_nand_deferred_probe(void)
2184{
2185 int rc = 0;
2186 struct octeontx_probe_device *pdev;
2187
2188 debug("%s: Performing deferred probing\n", __func__);
2189 list_for_each_entry(pdev, &octeontx_pci_nand_deferred_devices, list) {
2190 debug("%s: Probing %s\n", __func__, pdev->dev->name);
Simon Glass6211d762020-12-19 10:40:10 -07002191 dev_get_flags(pdev->dev) &= ~DM_FLAG_ACTIVATED;
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002192 rc = device_probe(pdev->dev);
2193 if (rc && rc != -ENODEV) {
2194 printf("%s: Error %d with deferred probe of %s\n",
2195 __func__, rc, pdev->dev->name);
2196 break;
2197 }
2198 }
2199 return rc;
2200}
2201
2202static const struct pci_device_id octeontx_nfc_pci_id_table[] = {
2203 { PCI_VDEVICE(CAVIUM, 0xA04F) },
2204 {}
2205};
2206
Simon Glassaad29ae2020-12-03 16:55:21 -07002207static int octeontx_nand_of_to_plat(struct udevice *dev)
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002208{
2209 return 0;
2210}
2211
2212static const struct udevice_id octeontx_nand_ids[] = {
2213 { .compatible = "cavium,cn8130-nand" },
2214 { },
2215};
2216
2217U_BOOT_DRIVER(octeontx_pci_nand) = {
2218 .name = OCTEONTX_NAND_DRIVER_NAME,
2219 .id = UCLASS_MTD,
2220 .of_match = of_match_ptr(octeontx_nand_ids),
Simon Glassaad29ae2020-12-03 16:55:21 -07002221 .of_to_plat = octeontx_nand_of_to_plat,
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002222 .probe = octeontx_pci_nand_probe,
Simon Glass8a2b47f2020-12-03 16:55:17 -07002223 .priv_auto = sizeof(struct octeontx_nfc),
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002224 .remove = octeontx_pci_nand_disable,
2225 .flags = DM_FLAG_OS_PREPARE,
2226};
2227
2228U_BOOT_PCI_DEVICE(octeontx_pci_nand, octeontx_nfc_pci_id_table);
2229
2230void board_nand_init(void)
2231{
2232 struct udevice *dev;
2233 int ret;
2234
2235 if (IS_ENABLED(CONFIG_NAND_OCTEONTX_HW_ECC)) {
2236 ret = uclass_get_device_by_driver(UCLASS_MISC,
Simon Glass65130cd2020-12-28 20:34:56 -07002237 DM_DRIVER_GET(octeontx_pci_bchpf),
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002238 &dev);
2239 if (ret && ret != -ENODEV) {
2240 pr_err("Failed to initialize OcteonTX BCH PF controller. (error %d)\n",
2241 ret);
2242 }
2243 ret = uclass_get_device_by_driver(UCLASS_MISC,
Simon Glass65130cd2020-12-28 20:34:56 -07002244 DM_DRIVER_GET(octeontx_pci_bchvf),
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002245 &dev);
2246 if (ret && ret != -ENODEV) {
2247 pr_err("Failed to initialize OcteonTX BCH VF controller. (error %d)\n",
2248 ret);
2249 }
2250 }
2251
2252 ret = uclass_get_device_by_driver(UCLASS_MTD,
Simon Glass65130cd2020-12-28 20:34:56 -07002253 DM_DRIVER_GET(octeontx_pci_nand),
Suneel Garapati9de7d2b2020-08-26 14:37:22 +02002254 &dev);
2255 if (ret && ret != -ENODEV)
2256 pr_err("Failed to initialize OcteonTX NAND controller. (error %d)\n",
2257 ret);
2258}