blob: 3155c682a95fbf7731530b0270e04e0e7bfb8e70 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/device.h>
12#include <linux/mutex.h>
13#include <linux/clk.h>
14#include <linux/slab.h>
15#include <linux/interrupt.h>
16#include <linux/dma-mapping.h>
17#include <linux/wait.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/partitions.h>
20#include <linux/of_platform.h>
21
22#include "mtk-snand.h"
23#include "mtk-snand-os.h"
24
25struct mtk_snand_of_id {
26 enum mtk_snand_soc soc;
27};
28
29struct mtk_snand_mtd {
30 struct mtk_snand_plat_dev pdev;
31
32 struct clk *nfi_clk;
33 struct clk *pad_clk;
34 struct clk *ecc_clk;
35
36 void __iomem *nfi_regs;
37 void __iomem *ecc_regs;
38
39 int irq;
40
41 bool quad_spi;
42 enum mtk_snand_soc soc;
43
44 struct mtd_info mtd;
45 struct mtk_snand *snf;
46 struct mtk_snand_chip_info cinfo;
47 uint8_t *page_cache;
48 struct mutex lock;
49};
50
51#define mtd_to_msm(mtd) container_of(mtd, struct mtk_snand_mtd, mtd)
52
53static int mtk_snand_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
54{
55 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
56 u64 start_addr, end_addr;
57 int ret;
58
59 /* Do not allow write past end of device */
60 if ((instr->addr + instr->len) > mtd->size) {
61 dev_err(msm->pdev.dev,
62 "attempt to erase beyond end of device\n");
63 return -EINVAL;
64 }
65
66 start_addr = instr->addr & (~mtd->erasesize_mask);
67 end_addr = instr->addr + instr->len;
68 if (end_addr & mtd->erasesize_mask) {
69 end_addr = (end_addr + mtd->erasesize_mask) &
70 (~mtd->erasesize_mask);
71 }
72
73 mutex_lock(&msm->lock);
74
75 while (start_addr < end_addr) {
76 if (mtk_snand_block_isbad(msm->snf, start_addr)) {
77 instr->fail_addr = start_addr;
78 ret = -EIO;
79 break;
80 }
81
82 ret = mtk_snand_erase_block(msm->snf, start_addr);
83 if (ret) {
84 instr->fail_addr = start_addr;
85 break;
86 }
87
88 start_addr += mtd->erasesize;
89 }
90
91 mutex_unlock(&msm->lock);
92
93 return ret;
94}
95
96static int mtk_snand_mtd_read_data(struct mtk_snand_mtd *msm, uint64_t addr,
97 struct mtd_oob_ops *ops)
98{
99 struct mtd_info *mtd = &msm->mtd;
100 size_t len, ooblen, maxooblen, chklen;
101 uint32_t col, ooboffs;
102 uint8_t *datcache, *oobcache;
developer02fcbaa2021-05-11 11:18:11 +0800103 bool ecc_failed = false, raw = ops->mode == MTD_OPS_RAW ? true : false;
104 int ret, max_bitflips = 0;
developerfd40db22021-04-29 10:08:25 +0800105
106 col = addr & mtd->writesize_mask;
107 addr &= ~mtd->writesize_mask;
108 maxooblen = mtd_oobavail(mtd, ops);
109 ooboffs = ops->ooboffs;
110 ooblen = ops->ooblen;
111 len = ops->len;
112
113 datcache = len ? msm->page_cache : NULL;
114 oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
115
116 ops->oobretlen = 0;
117 ops->retlen = 0;
118
119 while (len || ooblen) {
120 if (ops->mode == MTD_OPS_AUTO_OOB)
121 ret = mtk_snand_read_page_auto_oob(msm->snf, addr,
122 datcache, oobcache, maxooblen, NULL, raw);
123 else
124 ret = mtk_snand_read_page(msm->snf, addr, datcache,
125 oobcache, raw);
126
developer02fcbaa2021-05-11 11:18:11 +0800127 if (ret < 0 && ret != -EBADMSG)
developerfd40db22021-04-29 10:08:25 +0800128 return ret;
129
developer02fcbaa2021-05-11 11:18:11 +0800130 if (ret == -EBADMSG) {
131 mtd->ecc_stats.failed++;
132 ecc_failed = true;
133 } else {
134 mtd->ecc_stats.corrected += ret;
135 max_bitflips = max_t(int, ret, max_bitflips);
136 }
137
developerfd40db22021-04-29 10:08:25 +0800138 if (len) {
139 /* Move data */
140 chklen = mtd->writesize - col;
141 if (chklen > len)
142 chklen = len;
143
144 memcpy(ops->datbuf + ops->retlen, datcache + col,
145 chklen);
146 len -= chklen;
147 col = 0; /* (col + chklen) % */
148 ops->retlen += chklen;
149 }
150
151 if (ooblen) {
152 /* Move oob */
153 chklen = maxooblen - ooboffs;
154 if (chklen > ooblen)
155 chklen = ooblen;
156
157 memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
158 chklen);
159 ooblen -= chklen;
160 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
161 ops->oobretlen += chklen;
162 }
163
164 addr += mtd->writesize;
165 }
166
developer02fcbaa2021-05-11 11:18:11 +0800167 return ecc_failed ? -EBADMSG : max_bitflips;
developerfd40db22021-04-29 10:08:25 +0800168}
169
170static int mtk_snand_mtd_read_oob(struct mtd_info *mtd, loff_t from,
171 struct mtd_oob_ops *ops)
172{
173 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
174 uint32_t maxooblen;
175 int ret;
176
177 if (!ops->oobbuf && !ops->datbuf) {
178 if (ops->ooblen || ops->len)
179 return -EINVAL;
180
181 return 0;
182 }
183
184 switch (ops->mode) {
185 case MTD_OPS_PLACE_OOB:
186 case MTD_OPS_AUTO_OOB:
187 case MTD_OPS_RAW:
188 break;
189 default:
190 dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
191 return -EINVAL;
192 }
193
194 maxooblen = mtd_oobavail(mtd, ops);
195
196 /* Do not allow read past end of device */
197 if (ops->datbuf && (from + ops->len) > mtd->size) {
198 dev_err(msm->pdev.dev,
199 "attempt to read beyond end of device\n");
200 return -EINVAL;
201 }
202
203 if (unlikely(ops->ooboffs >= maxooblen)) {
204 dev_err(msm->pdev.dev, "attempt to start read outside oob\n");
205 return -EINVAL;
206 }
207
208 if (unlikely(from >= mtd->size ||
209 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
210 (from >> mtd->writesize_shift)) * maxooblen)) {
211 dev_err(msm->pdev.dev,
212 "attempt to read beyond end of device\n");
213 return -EINVAL;
214 }
215
216 mutex_lock(&msm->lock);
217 ret = mtk_snand_mtd_read_data(msm, from, ops);
218 mutex_unlock(&msm->lock);
219
220 return ret;
221}
222
223static int mtk_snand_mtd_write_data(struct mtk_snand_mtd *msm, uint64_t addr,
224 struct mtd_oob_ops *ops)
225{
226 struct mtd_info *mtd = &msm->mtd;
227 size_t len, ooblen, maxooblen, chklen, oobwrlen;
228 uint32_t col, ooboffs;
229 uint8_t *datcache, *oobcache;
230 bool raw = ops->mode == MTD_OPS_RAW ? true : false;
231 int ret;
232
233 col = addr & mtd->writesize_mask;
234 addr &= ~mtd->writesize_mask;
235 maxooblen = mtd_oobavail(mtd, ops);
236 ooboffs = ops->ooboffs;
237 ooblen = ops->ooblen;
238 len = ops->len;
239
240 datcache = len ? msm->page_cache : NULL;
241 oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
242
243 ops->oobretlen = 0;
244 ops->retlen = 0;
245
246 while (len || ooblen) {
247 if (len) {
248 /* Move data */
249 chklen = mtd->writesize - col;
250 if (chklen > len)
251 chklen = len;
252
253 memset(datcache, 0xff, col);
254 memcpy(datcache + col, ops->datbuf + ops->retlen,
255 chklen);
256 memset(datcache + col + chklen, 0xff,
257 mtd->writesize - col - chklen);
258 len -= chklen;
259 col = 0; /* (col + chklen) % */
260 ops->retlen += chklen;
261 }
262
263 oobwrlen = 0;
264 if (ooblen) {
265 /* Move oob */
266 chklen = maxooblen - ooboffs;
267 if (chklen > ooblen)
268 chklen = ooblen;
269
270 memset(oobcache, 0xff, ooboffs);
271 memcpy(oobcache + ooboffs,
272 ops->oobbuf + ops->oobretlen, chklen);
273 memset(oobcache + ooboffs + chklen, 0xff,
274 mtd->oobsize - ooboffs - chklen);
275 oobwrlen = chklen + ooboffs;
276 ooblen -= chklen;
277 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
278 ops->oobretlen += chklen;
279 }
280
281 if (ops->mode == MTD_OPS_AUTO_OOB)
282 ret = mtk_snand_write_page_auto_oob(msm->snf, addr,
283 datcache, oobcache, oobwrlen, NULL, raw);
284 else
285 ret = mtk_snand_write_page(msm->snf, addr, datcache,
286 oobcache, raw);
287
288 if (ret)
289 return ret;
290
291 addr += mtd->writesize;
292 }
293
294 return 0;
295}
296
297static int mtk_snand_mtd_write_oob(struct mtd_info *mtd, loff_t to,
298 struct mtd_oob_ops *ops)
299{
300 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
301 uint32_t maxooblen;
302 int ret;
303
304 if (!ops->oobbuf && !ops->datbuf) {
305 if (ops->ooblen || ops->len)
306 return -EINVAL;
307
308 return 0;
309 }
310
311 switch (ops->mode) {
312 case MTD_OPS_PLACE_OOB:
313 case MTD_OPS_AUTO_OOB:
314 case MTD_OPS_RAW:
315 break;
316 default:
317 dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
318 return -EINVAL;
319 }
320
321 maxooblen = mtd_oobavail(mtd, ops);
322
323 /* Do not allow write past end of device */
324 if (ops->datbuf && (to + ops->len) > mtd->size) {
325 dev_err(msm->pdev.dev,
326 "attempt to write beyond end of device\n");
327 return -EINVAL;
328 }
329
330 if (unlikely(ops->ooboffs >= maxooblen)) {
331 dev_err(msm->pdev.dev,
332 "attempt to start write outside oob\n");
333 return -EINVAL;
334 }
335
336 if (unlikely(to >= mtd->size ||
337 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
338 (to >> mtd->writesize_shift)) * maxooblen)) {
339 dev_err(msm->pdev.dev,
340 "attempt to write beyond end of device\n");
341 return -EINVAL;
342 }
343
344 mutex_lock(&msm->lock);
345 ret = mtk_snand_mtd_write_data(msm, to, ops);
346 mutex_unlock(&msm->lock);
347
348 return ret;
349}
350
351static int mtk_snand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
352{
353 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
354 int ret;
355
356 mutex_lock(&msm->lock);
357 ret = mtk_snand_block_isbad(msm->snf, offs);
358 mutex_unlock(&msm->lock);
359
360 return ret;
361}
362
363static int mtk_snand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
364{
365 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
366 int ret;
367
368 mutex_lock(&msm->lock);
369 ret = mtk_snand_block_markbad(msm->snf, offs);
370 mutex_unlock(&msm->lock);
371
372 return ret;
373}
374
375static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
376 struct mtd_oob_region *oobecc)
377{
378 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
379
380 if (section)
381 return -ERANGE;
382
383 oobecc->offset = msm->cinfo.fdm_size * msm->cinfo.num_sectors;
384 oobecc->length = mtd->oobsize - oobecc->offset;
385
386 return 0;
387}
388
389static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
390 struct mtd_oob_region *oobfree)
391{
392 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
393
394 if (section >= msm->cinfo.num_sectors)
395 return -ERANGE;
396
397 oobfree->length = msm->cinfo.fdm_size - 1;
398 oobfree->offset = section * msm->cinfo.fdm_size + 1;
399
400 return 0;
401}
402
403static irqreturn_t mtk_snand_irq(int irq, void *id)
404{
405 struct mtk_snand_mtd *msm = id;
406 int ret;
407
408 ret = mtk_snand_irq_process(msm->snf);
409 if (ret > 0)
410 return IRQ_HANDLED;
411
412 return IRQ_NONE;
413}
414
415static int mtk_snand_enable_clk(struct mtk_snand_mtd *msm)
416{
417 int ret;
418
419 ret = clk_prepare_enable(msm->nfi_clk);
420 if (ret) {
421 dev_err(msm->pdev.dev, "unable to enable nfi clk\n");
422 return ret;
423 }
424
425 ret = clk_prepare_enable(msm->pad_clk);
426 if (ret) {
427 dev_err(msm->pdev.dev, "unable to enable pad clk\n");
428 clk_disable_unprepare(msm->nfi_clk);
429 return ret;
430 }
431
432 ret = clk_prepare_enable(msm->ecc_clk);
433 if (ret) {
434 dev_err(msm->pdev.dev, "unable to enable ecc clk\n");
435 clk_disable_unprepare(msm->nfi_clk);
436 clk_disable_unprepare(msm->pad_clk);
437 return ret;
438 }
439
440 return 0;
441}
442
443static void mtk_snand_disable_clk(struct mtk_snand_mtd *msm)
444{
445 clk_disable_unprepare(msm->nfi_clk);
446 clk_disable_unprepare(msm->pad_clk);
447 clk_disable_unprepare(msm->ecc_clk);
448}
449
450static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
451 .ecc = mtk_snand_ooblayout_ecc,
452 .free = mtk_snand_ooblayout_free,
453};
454
455static struct mtk_snand_of_id mt7622_soc_id = { .soc = SNAND_SOC_MT7622 };
456static struct mtk_snand_of_id mt7629_soc_id = { .soc = SNAND_SOC_MT7629 };
457static struct mtk_snand_of_id mt7986_soc_id = { .soc = SNAND_SOC_MT7986 };
458
459static const struct of_device_id mtk_snand_ids[] = {
460 { .compatible = "mediatek,mt7622-snand", .data = &mt7622_soc_id },
461 { .compatible = "mediatek,mt7629-snand", .data = &mt7629_soc_id },
462 { .compatible = "mediatek,mt7986-snand", .data = &mt7986_soc_id },
463 { },
464};
465
466MODULE_DEVICE_TABLE(of, mtk_snand_ids);
467
468static int mtk_snand_probe(struct platform_device *pdev)
469{
470 struct mtk_snand_platdata mtk_snand_pdata = {};
471 struct device_node *np = pdev->dev.of_node;
472 const struct of_device_id *of_soc_id;
473 const struct mtk_snand_of_id *soc_id;
474 struct mtk_snand_mtd *msm;
475 struct mtd_info *mtd;
476 struct resource *r;
477 uint32_t size;
478 int ret;
479
480 of_soc_id = of_match_node(mtk_snand_ids, np);
481 if (!of_soc_id)
482 return -EINVAL;
483
484 soc_id = of_soc_id->data;
485
486 msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
487 if (!msm)
488 return -ENOMEM;
489
490 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
491 msm->nfi_regs = devm_ioremap_resource(&pdev->dev, r);
492 if (IS_ERR(msm->nfi_regs)) {
493 ret = PTR_ERR(msm->nfi_regs);
494 goto errout1;
495 }
496
497 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc");
498 msm->ecc_regs = devm_ioremap_resource(&pdev->dev, r);
499 if (IS_ERR(msm->ecc_regs)) {
500 ret = PTR_ERR(msm->ecc_regs);
501 goto errout1;
502 }
503
504 msm->pdev.dev = &pdev->dev;
505 msm->quad_spi = of_property_read_bool(np, "mediatek,quad-spi");
506 msm->soc = soc_id->soc;
507
508 msm->nfi_clk = devm_clk_get(msm->pdev.dev, "nfi_clk");
509 if (IS_ERR(msm->nfi_clk)) {
510 ret = PTR_ERR(msm->nfi_clk);
511 dev_err(msm->pdev.dev, "unable to get nfi_clk, err = %d\n",
512 ret);
513 goto errout1;
514 }
515
516 msm->ecc_clk = devm_clk_get(msm->pdev.dev, "ecc_clk");
517 if (IS_ERR(msm->ecc_clk)) {
518 ret = PTR_ERR(msm->ecc_clk);
519 dev_err(msm->pdev.dev, "unable to get ecc_clk, err = %d\n",
520 ret);
521 goto errout1;
522 }
523
524 msm->pad_clk = devm_clk_get(msm->pdev.dev, "pad_clk");
525 if (IS_ERR(msm->pad_clk)) {
526 ret = PTR_ERR(msm->pad_clk);
527 dev_err(msm->pdev.dev, "unable to get pad_clk, err = %d\n",
528 ret);
529 goto errout1;
530 }
531
532 ret = mtk_snand_enable_clk(msm);
533 if (ret)
534 goto errout1;
535
536 /* Probe SPI-NAND Flash */
537 mtk_snand_pdata.soc = msm->soc;
538 mtk_snand_pdata.quad_spi = msm->quad_spi;
539 mtk_snand_pdata.nfi_base = msm->nfi_regs;
540 mtk_snand_pdata.ecc_base = msm->ecc_regs;
541
542 ret = mtk_snand_init(&msm->pdev, &mtk_snand_pdata, &msm->snf);
543 if (ret)
544 goto errout1;
545
546 msm->irq = platform_get_irq(pdev, 0);
547 if (msm->irq >= 0) {
548 ret = devm_request_irq(msm->pdev.dev, msm->irq, mtk_snand_irq,
549 0x0, "mtk-snand", msm);
550 if (ret) {
551 dev_err(msm->pdev.dev, "failed to request snfi irq\n");
552 goto errout2;
553 }
554
555 ret = dma_set_mask(msm->pdev.dev, DMA_BIT_MASK(32));
556 if (ret) {
557 dev_err(msm->pdev.dev, "failed to set dma mask\n");
558 goto errout3;
559 }
560 }
561
562 mtk_snand_get_chip_info(msm->snf, &msm->cinfo);
563
564 size = msm->cinfo.pagesize + msm->cinfo.sparesize;
565 msm->page_cache = devm_kmalloc(msm->pdev.dev, size, GFP_KERNEL);
566 if (!msm->page_cache) {
567 dev_err(msm->pdev.dev, "failed to allocate page cache\n");
568 ret = -ENOMEM;
569 goto errout3;
570 }
571
572 mutex_init(&msm->lock);
573
574 dev_info(msm->pdev.dev,
575 "chip is %s, size %lluMB, page size %u, oob size %u\n",
576 msm->cinfo.model, msm->cinfo.chipsize >> 20,
577 msm->cinfo.pagesize, msm->cinfo.sparesize);
578
579 /* Initialize mtd for SPI-NAND */
580 mtd = &msm->mtd;
581
582 mtd->owner = THIS_MODULE;
583 mtd->dev.parent = &pdev->dev;
584 mtd->type = MTD_NANDFLASH;
585 mtd->flags = MTD_CAP_NANDFLASH;
586
587 mtd_set_of_node(mtd, np);
588
589 mtd->size = msm->cinfo.chipsize;
590 mtd->erasesize = msm->cinfo.blocksize;
591 mtd->writesize = msm->cinfo.pagesize;
592 mtd->writebufsize = mtd->writesize;
593 mtd->oobsize = msm->cinfo.sparesize;
594 mtd->oobavail = msm->cinfo.num_sectors * (msm->cinfo.fdm_size - 1);
595
596 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
597 mtd->writesize_shift = ffs(mtd->writesize) - 1;
598 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
599 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
600
601 mtd->ooblayout = &mtk_snand_ooblayout;
602
developer4da1bed2021-05-08 17:30:37 +0800603 mtd->ecc_strength = msm->cinfo.ecc_strength;
developerfd40db22021-04-29 10:08:25 +0800604 mtd->bitflip_threshold = (mtd->ecc_strength * 3) / 4;
605 mtd->ecc_step_size = msm->cinfo.sector_size;
606
607 mtd->_erase = mtk_snand_mtd_erase;
608 mtd->_read_oob = mtk_snand_mtd_read_oob;
609 mtd->_write_oob = mtk_snand_mtd_write_oob;
610 mtd->_block_isbad = mtk_snand_mtd_block_isbad;
611 mtd->_block_markbad = mtk_snand_mtd_block_markbad;
612
613 ret = mtd_device_register(mtd, NULL, 0);
614 if (ret) {
615 dev_err(msm->pdev.dev, "failed to register mtd partition\n");
616 goto errout4;
617 }
618
619 platform_set_drvdata(pdev, msm);
620
621 return 0;
622
623errout4:
624 devm_kfree(msm->pdev.dev, msm->page_cache);
625
626errout3:
627 if (msm->irq >= 0)
628 devm_free_irq(msm->pdev.dev, msm->irq, msm);
629
630errout2:
631 mtk_snand_cleanup(msm->snf);
632
633errout1:
634 devm_kfree(msm->pdev.dev, msm);
635
636 platform_set_drvdata(pdev, NULL);
637
638 return ret;
639}
640
641static int mtk_snand_remove(struct platform_device *pdev)
642{
643 struct mtk_snand_mtd *msm = platform_get_drvdata(pdev);
644 struct mtd_info *mtd = &msm->mtd;
645 int ret;
646
647 ret = mtd_device_unregister(mtd);
648 if (ret)
649 return ret;
650
651 mtk_snand_cleanup(msm->snf);
652
653 if (msm->irq >= 0)
654 devm_free_irq(msm->pdev.dev, msm->irq, msm);
655
656 mtk_snand_disable_clk(msm);
657
658 devm_kfree(msm->pdev.dev, msm->page_cache);
659 devm_kfree(msm->pdev.dev, msm);
660
661 platform_set_drvdata(pdev, NULL);
662
663 return 0;
664}
665
666static struct platform_driver mtk_snand_driver = {
667 .probe = mtk_snand_probe,
668 .remove = mtk_snand_remove,
669 .driver = {
670 .name = "mtk-snand",
671 .of_match_table = mtk_snand_ids,
672 },
673};
674
675module_platform_driver(mtk_snand_driver);
676
677MODULE_LICENSE("GPL");
678MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
679MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");