blob: 949a3de4468f3cdd478791c9da9a9aa4259d1338 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/device.h>
12#include <linux/mutex.h>
13#include <linux/clk.h>
14#include <linux/slab.h>
15#include <linux/interrupt.h>
16#include <linux/dma-mapping.h>
17#include <linux/wait.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/partitions.h>
20#include <linux/of_platform.h>
21
22#include "mtk-snand.h"
23#include "mtk-snand-os.h"
24
25struct mtk_snand_of_id {
26 enum mtk_snand_soc soc;
27};
28
29struct mtk_snand_mtd {
30 struct mtk_snand_plat_dev pdev;
31
32 struct clk *nfi_clk;
33 struct clk *pad_clk;
34 struct clk *ecc_clk;
35
36 void __iomem *nfi_regs;
37 void __iomem *ecc_regs;
38
39 int irq;
40
41 bool quad_spi;
42 enum mtk_snand_soc soc;
43
44 struct mtd_info mtd;
45 struct mtk_snand *snf;
46 struct mtk_snand_chip_info cinfo;
47 uint8_t *page_cache;
48 struct mutex lock;
49};
50
51#define mtd_to_msm(mtd) container_of(mtd, struct mtk_snand_mtd, mtd)
52
53static int mtk_snand_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
54{
55 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
56 u64 start_addr, end_addr;
57 int ret;
58
59 /* Do not allow write past end of device */
60 if ((instr->addr + instr->len) > mtd->size) {
61 dev_err(msm->pdev.dev,
62 "attempt to erase beyond end of device\n");
63 return -EINVAL;
64 }
65
66 start_addr = instr->addr & (~mtd->erasesize_mask);
67 end_addr = instr->addr + instr->len;
68 if (end_addr & mtd->erasesize_mask) {
69 end_addr = (end_addr + mtd->erasesize_mask) &
70 (~mtd->erasesize_mask);
71 }
72
73 mutex_lock(&msm->lock);
74
75 while (start_addr < end_addr) {
76 if (mtk_snand_block_isbad(msm->snf, start_addr)) {
77 instr->fail_addr = start_addr;
78 ret = -EIO;
79 break;
80 }
81
82 ret = mtk_snand_erase_block(msm->snf, start_addr);
83 if (ret) {
84 instr->fail_addr = start_addr;
85 break;
86 }
87
88 start_addr += mtd->erasesize;
89 }
90
91 mutex_unlock(&msm->lock);
92
93 return ret;
94}
95
96static int mtk_snand_mtd_read_data(struct mtk_snand_mtd *msm, uint64_t addr,
97 struct mtd_oob_ops *ops)
98{
99 struct mtd_info *mtd = &msm->mtd;
100 size_t len, ooblen, maxooblen, chklen;
101 uint32_t col, ooboffs;
102 uint8_t *datcache, *oobcache;
103 bool raw = ops->mode == MTD_OPS_RAW ? true : false;
104 int ret;
105
106 col = addr & mtd->writesize_mask;
107 addr &= ~mtd->writesize_mask;
108 maxooblen = mtd_oobavail(mtd, ops);
109 ooboffs = ops->ooboffs;
110 ooblen = ops->ooblen;
111 len = ops->len;
112
113 datcache = len ? msm->page_cache : NULL;
114 oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
115
116 ops->oobretlen = 0;
117 ops->retlen = 0;
118
119 while (len || ooblen) {
120 if (ops->mode == MTD_OPS_AUTO_OOB)
121 ret = mtk_snand_read_page_auto_oob(msm->snf, addr,
122 datcache, oobcache, maxooblen, NULL, raw);
123 else
124 ret = mtk_snand_read_page(msm->snf, addr, datcache,
125 oobcache, raw);
126
127 if (ret < 0)
128 return ret;
129
130 if (len) {
131 /* Move data */
132 chklen = mtd->writesize - col;
133 if (chklen > len)
134 chklen = len;
135
136 memcpy(ops->datbuf + ops->retlen, datcache + col,
137 chklen);
138 len -= chklen;
139 col = 0; /* (col + chklen) % */
140 ops->retlen += chklen;
141 }
142
143 if (ooblen) {
144 /* Move oob */
145 chklen = maxooblen - ooboffs;
146 if (chklen > ooblen)
147 chklen = ooblen;
148
149 memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
150 chklen);
151 ooblen -= chklen;
152 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
153 ops->oobretlen += chklen;
154 }
155
156 addr += mtd->writesize;
157 }
158
159 return 0;
160}
161
162static int mtk_snand_mtd_read_oob(struct mtd_info *mtd, loff_t from,
163 struct mtd_oob_ops *ops)
164{
165 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
166 uint32_t maxooblen;
167 int ret;
168
169 if (!ops->oobbuf && !ops->datbuf) {
170 if (ops->ooblen || ops->len)
171 return -EINVAL;
172
173 return 0;
174 }
175
176 switch (ops->mode) {
177 case MTD_OPS_PLACE_OOB:
178 case MTD_OPS_AUTO_OOB:
179 case MTD_OPS_RAW:
180 break;
181 default:
182 dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
183 return -EINVAL;
184 }
185
186 maxooblen = mtd_oobavail(mtd, ops);
187
188 /* Do not allow read past end of device */
189 if (ops->datbuf && (from + ops->len) > mtd->size) {
190 dev_err(msm->pdev.dev,
191 "attempt to read beyond end of device\n");
192 return -EINVAL;
193 }
194
195 if (unlikely(ops->ooboffs >= maxooblen)) {
196 dev_err(msm->pdev.dev, "attempt to start read outside oob\n");
197 return -EINVAL;
198 }
199
200 if (unlikely(from >= mtd->size ||
201 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
202 (from >> mtd->writesize_shift)) * maxooblen)) {
203 dev_err(msm->pdev.dev,
204 "attempt to read beyond end of device\n");
205 return -EINVAL;
206 }
207
208 mutex_lock(&msm->lock);
209 ret = mtk_snand_mtd_read_data(msm, from, ops);
210 mutex_unlock(&msm->lock);
211
212 return ret;
213}
214
215static int mtk_snand_mtd_write_data(struct mtk_snand_mtd *msm, uint64_t addr,
216 struct mtd_oob_ops *ops)
217{
218 struct mtd_info *mtd = &msm->mtd;
219 size_t len, ooblen, maxooblen, chklen, oobwrlen;
220 uint32_t col, ooboffs;
221 uint8_t *datcache, *oobcache;
222 bool raw = ops->mode == MTD_OPS_RAW ? true : false;
223 int ret;
224
225 col = addr & mtd->writesize_mask;
226 addr &= ~mtd->writesize_mask;
227 maxooblen = mtd_oobavail(mtd, ops);
228 ooboffs = ops->ooboffs;
229 ooblen = ops->ooblen;
230 len = ops->len;
231
232 datcache = len ? msm->page_cache : NULL;
233 oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
234
235 ops->oobretlen = 0;
236 ops->retlen = 0;
237
238 while (len || ooblen) {
239 if (len) {
240 /* Move data */
241 chklen = mtd->writesize - col;
242 if (chklen > len)
243 chklen = len;
244
245 memset(datcache, 0xff, col);
246 memcpy(datcache + col, ops->datbuf + ops->retlen,
247 chklen);
248 memset(datcache + col + chklen, 0xff,
249 mtd->writesize - col - chklen);
250 len -= chklen;
251 col = 0; /* (col + chklen) % */
252 ops->retlen += chklen;
253 }
254
255 oobwrlen = 0;
256 if (ooblen) {
257 /* Move oob */
258 chklen = maxooblen - ooboffs;
259 if (chklen > ooblen)
260 chklen = ooblen;
261
262 memset(oobcache, 0xff, ooboffs);
263 memcpy(oobcache + ooboffs,
264 ops->oobbuf + ops->oobretlen, chklen);
265 memset(oobcache + ooboffs + chklen, 0xff,
266 mtd->oobsize - ooboffs - chklen);
267 oobwrlen = chklen + ooboffs;
268 ooblen -= chklen;
269 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
270 ops->oobretlen += chklen;
271 }
272
273 if (ops->mode == MTD_OPS_AUTO_OOB)
274 ret = mtk_snand_write_page_auto_oob(msm->snf, addr,
275 datcache, oobcache, oobwrlen, NULL, raw);
276 else
277 ret = mtk_snand_write_page(msm->snf, addr, datcache,
278 oobcache, raw);
279
280 if (ret)
281 return ret;
282
283 addr += mtd->writesize;
284 }
285
286 return 0;
287}
288
289static int mtk_snand_mtd_write_oob(struct mtd_info *mtd, loff_t to,
290 struct mtd_oob_ops *ops)
291{
292 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
293 uint32_t maxooblen;
294 int ret;
295
296 if (!ops->oobbuf && !ops->datbuf) {
297 if (ops->ooblen || ops->len)
298 return -EINVAL;
299
300 return 0;
301 }
302
303 switch (ops->mode) {
304 case MTD_OPS_PLACE_OOB:
305 case MTD_OPS_AUTO_OOB:
306 case MTD_OPS_RAW:
307 break;
308 default:
309 dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
310 return -EINVAL;
311 }
312
313 maxooblen = mtd_oobavail(mtd, ops);
314
315 /* Do not allow write past end of device */
316 if (ops->datbuf && (to + ops->len) > mtd->size) {
317 dev_err(msm->pdev.dev,
318 "attempt to write beyond end of device\n");
319 return -EINVAL;
320 }
321
322 if (unlikely(ops->ooboffs >= maxooblen)) {
323 dev_err(msm->pdev.dev,
324 "attempt to start write outside oob\n");
325 return -EINVAL;
326 }
327
328 if (unlikely(to >= mtd->size ||
329 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
330 (to >> mtd->writesize_shift)) * maxooblen)) {
331 dev_err(msm->pdev.dev,
332 "attempt to write beyond end of device\n");
333 return -EINVAL;
334 }
335
336 mutex_lock(&msm->lock);
337 ret = mtk_snand_mtd_write_data(msm, to, ops);
338 mutex_unlock(&msm->lock);
339
340 return ret;
341}
342
343static int mtk_snand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
344{
345 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
346 int ret;
347
348 mutex_lock(&msm->lock);
349 ret = mtk_snand_block_isbad(msm->snf, offs);
350 mutex_unlock(&msm->lock);
351
352 return ret;
353}
354
355static int mtk_snand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
356{
357 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
358 int ret;
359
360 mutex_lock(&msm->lock);
361 ret = mtk_snand_block_markbad(msm->snf, offs);
362 mutex_unlock(&msm->lock);
363
364 return ret;
365}
366
367static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
368 struct mtd_oob_region *oobecc)
369{
370 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
371
372 if (section)
373 return -ERANGE;
374
375 oobecc->offset = msm->cinfo.fdm_size * msm->cinfo.num_sectors;
376 oobecc->length = mtd->oobsize - oobecc->offset;
377
378 return 0;
379}
380
381static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
382 struct mtd_oob_region *oobfree)
383{
384 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
385
386 if (section >= msm->cinfo.num_sectors)
387 return -ERANGE;
388
389 oobfree->length = msm->cinfo.fdm_size - 1;
390 oobfree->offset = section * msm->cinfo.fdm_size + 1;
391
392 return 0;
393}
394
395static irqreturn_t mtk_snand_irq(int irq, void *id)
396{
397 struct mtk_snand_mtd *msm = id;
398 int ret;
399
400 ret = mtk_snand_irq_process(msm->snf);
401 if (ret > 0)
402 return IRQ_HANDLED;
403
404 return IRQ_NONE;
405}
406
407static int mtk_snand_enable_clk(struct mtk_snand_mtd *msm)
408{
409 int ret;
410
411 ret = clk_prepare_enable(msm->nfi_clk);
412 if (ret) {
413 dev_err(msm->pdev.dev, "unable to enable nfi clk\n");
414 return ret;
415 }
416
417 ret = clk_prepare_enable(msm->pad_clk);
418 if (ret) {
419 dev_err(msm->pdev.dev, "unable to enable pad clk\n");
420 clk_disable_unprepare(msm->nfi_clk);
421 return ret;
422 }
423
424 ret = clk_prepare_enable(msm->ecc_clk);
425 if (ret) {
426 dev_err(msm->pdev.dev, "unable to enable ecc clk\n");
427 clk_disable_unprepare(msm->nfi_clk);
428 clk_disable_unprepare(msm->pad_clk);
429 return ret;
430 }
431
432 return 0;
433}
434
435static void mtk_snand_disable_clk(struct mtk_snand_mtd *msm)
436{
437 clk_disable_unprepare(msm->nfi_clk);
438 clk_disable_unprepare(msm->pad_clk);
439 clk_disable_unprepare(msm->ecc_clk);
440}
441
442static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
443 .ecc = mtk_snand_ooblayout_ecc,
444 .free = mtk_snand_ooblayout_free,
445};
446
447static struct mtk_snand_of_id mt7622_soc_id = { .soc = SNAND_SOC_MT7622 };
448static struct mtk_snand_of_id mt7629_soc_id = { .soc = SNAND_SOC_MT7629 };
449static struct mtk_snand_of_id mt7986_soc_id = { .soc = SNAND_SOC_MT7986 };
450
451static const struct of_device_id mtk_snand_ids[] = {
452 { .compatible = "mediatek,mt7622-snand", .data = &mt7622_soc_id },
453 { .compatible = "mediatek,mt7629-snand", .data = &mt7629_soc_id },
454 { .compatible = "mediatek,mt7986-snand", .data = &mt7986_soc_id },
455 { },
456};
457
458MODULE_DEVICE_TABLE(of, mtk_snand_ids);
459
460static int mtk_snand_probe(struct platform_device *pdev)
461{
462 struct mtk_snand_platdata mtk_snand_pdata = {};
463 struct device_node *np = pdev->dev.of_node;
464 const struct of_device_id *of_soc_id;
465 const struct mtk_snand_of_id *soc_id;
466 struct mtk_snand_mtd *msm;
467 struct mtd_info *mtd;
468 struct resource *r;
469 uint32_t size;
470 int ret;
471
472 of_soc_id = of_match_node(mtk_snand_ids, np);
473 if (!of_soc_id)
474 return -EINVAL;
475
476 soc_id = of_soc_id->data;
477
478 msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
479 if (!msm)
480 return -ENOMEM;
481
482 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
483 msm->nfi_regs = devm_ioremap_resource(&pdev->dev, r);
484 if (IS_ERR(msm->nfi_regs)) {
485 ret = PTR_ERR(msm->nfi_regs);
486 goto errout1;
487 }
488
489 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc");
490 msm->ecc_regs = devm_ioremap_resource(&pdev->dev, r);
491 if (IS_ERR(msm->ecc_regs)) {
492 ret = PTR_ERR(msm->ecc_regs);
493 goto errout1;
494 }
495
496 msm->pdev.dev = &pdev->dev;
497 msm->quad_spi = of_property_read_bool(np, "mediatek,quad-spi");
498 msm->soc = soc_id->soc;
499
500 msm->nfi_clk = devm_clk_get(msm->pdev.dev, "nfi_clk");
501 if (IS_ERR(msm->nfi_clk)) {
502 ret = PTR_ERR(msm->nfi_clk);
503 dev_err(msm->pdev.dev, "unable to get nfi_clk, err = %d\n",
504 ret);
505 goto errout1;
506 }
507
508 msm->ecc_clk = devm_clk_get(msm->pdev.dev, "ecc_clk");
509 if (IS_ERR(msm->ecc_clk)) {
510 ret = PTR_ERR(msm->ecc_clk);
511 dev_err(msm->pdev.dev, "unable to get ecc_clk, err = %d\n",
512 ret);
513 goto errout1;
514 }
515
516 msm->pad_clk = devm_clk_get(msm->pdev.dev, "pad_clk");
517 if (IS_ERR(msm->pad_clk)) {
518 ret = PTR_ERR(msm->pad_clk);
519 dev_err(msm->pdev.dev, "unable to get pad_clk, err = %d\n",
520 ret);
521 goto errout1;
522 }
523
524 ret = mtk_snand_enable_clk(msm);
525 if (ret)
526 goto errout1;
527
528 /* Probe SPI-NAND Flash */
529 mtk_snand_pdata.soc = msm->soc;
530 mtk_snand_pdata.quad_spi = msm->quad_spi;
531 mtk_snand_pdata.nfi_base = msm->nfi_regs;
532 mtk_snand_pdata.ecc_base = msm->ecc_regs;
533
534 ret = mtk_snand_init(&msm->pdev, &mtk_snand_pdata, &msm->snf);
535 if (ret)
536 goto errout1;
537
538 msm->irq = platform_get_irq(pdev, 0);
539 if (msm->irq >= 0) {
540 ret = devm_request_irq(msm->pdev.dev, msm->irq, mtk_snand_irq,
541 0x0, "mtk-snand", msm);
542 if (ret) {
543 dev_err(msm->pdev.dev, "failed to request snfi irq\n");
544 goto errout2;
545 }
546
547 ret = dma_set_mask(msm->pdev.dev, DMA_BIT_MASK(32));
548 if (ret) {
549 dev_err(msm->pdev.dev, "failed to set dma mask\n");
550 goto errout3;
551 }
552 }
553
554 mtk_snand_get_chip_info(msm->snf, &msm->cinfo);
555
556 size = msm->cinfo.pagesize + msm->cinfo.sparesize;
557 msm->page_cache = devm_kmalloc(msm->pdev.dev, size, GFP_KERNEL);
558 if (!msm->page_cache) {
559 dev_err(msm->pdev.dev, "failed to allocate page cache\n");
560 ret = -ENOMEM;
561 goto errout3;
562 }
563
564 mutex_init(&msm->lock);
565
566 dev_info(msm->pdev.dev,
567 "chip is %s, size %lluMB, page size %u, oob size %u\n",
568 msm->cinfo.model, msm->cinfo.chipsize >> 20,
569 msm->cinfo.pagesize, msm->cinfo.sparesize);
570
571 /* Initialize mtd for SPI-NAND */
572 mtd = &msm->mtd;
573
574 mtd->owner = THIS_MODULE;
575 mtd->dev.parent = &pdev->dev;
576 mtd->type = MTD_NANDFLASH;
577 mtd->flags = MTD_CAP_NANDFLASH;
578
579 mtd_set_of_node(mtd, np);
580
581 mtd->size = msm->cinfo.chipsize;
582 mtd->erasesize = msm->cinfo.blocksize;
583 mtd->writesize = msm->cinfo.pagesize;
584 mtd->writebufsize = mtd->writesize;
585 mtd->oobsize = msm->cinfo.sparesize;
586 mtd->oobavail = msm->cinfo.num_sectors * (msm->cinfo.fdm_size - 1);
587
588 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
589 mtd->writesize_shift = ffs(mtd->writesize) - 1;
590 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
591 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
592
593 mtd->ooblayout = &mtk_snand_ooblayout;
594
developer4da1bed2021-05-08 17:30:37 +0800595 mtd->ecc_strength = msm->cinfo.ecc_strength;
developerfd40db22021-04-29 10:08:25 +0800596 mtd->bitflip_threshold = (mtd->ecc_strength * 3) / 4;
597 mtd->ecc_step_size = msm->cinfo.sector_size;
598
599 mtd->_erase = mtk_snand_mtd_erase;
600 mtd->_read_oob = mtk_snand_mtd_read_oob;
601 mtd->_write_oob = mtk_snand_mtd_write_oob;
602 mtd->_block_isbad = mtk_snand_mtd_block_isbad;
603 mtd->_block_markbad = mtk_snand_mtd_block_markbad;
604
605 ret = mtd_device_register(mtd, NULL, 0);
606 if (ret) {
607 dev_err(msm->pdev.dev, "failed to register mtd partition\n");
608 goto errout4;
609 }
610
611 platform_set_drvdata(pdev, msm);
612
613 return 0;
614
615errout4:
616 devm_kfree(msm->pdev.dev, msm->page_cache);
617
618errout3:
619 if (msm->irq >= 0)
620 devm_free_irq(msm->pdev.dev, msm->irq, msm);
621
622errout2:
623 mtk_snand_cleanup(msm->snf);
624
625errout1:
626 devm_kfree(msm->pdev.dev, msm);
627
628 platform_set_drvdata(pdev, NULL);
629
630 return ret;
631}
632
633static int mtk_snand_remove(struct platform_device *pdev)
634{
635 struct mtk_snand_mtd *msm = platform_get_drvdata(pdev);
636 struct mtd_info *mtd = &msm->mtd;
637 int ret;
638
639 ret = mtd_device_unregister(mtd);
640 if (ret)
641 return ret;
642
643 mtk_snand_cleanup(msm->snf);
644
645 if (msm->irq >= 0)
646 devm_free_irq(msm->pdev.dev, msm->irq, msm);
647
648 mtk_snand_disable_clk(msm);
649
650 devm_kfree(msm->pdev.dev, msm->page_cache);
651 devm_kfree(msm->pdev.dev, msm);
652
653 platform_set_drvdata(pdev, NULL);
654
655 return 0;
656}
657
658static struct platform_driver mtk_snand_driver = {
659 .probe = mtk_snand_probe,
660 .remove = mtk_snand_remove,
661 .driver = {
662 .name = "mtk-snand",
663 .of_match_table = mtk_snand_ids,
664 },
665};
666
667module_platform_driver(mtk_snand_driver);
668
669MODULE_LICENSE("GPL");
670MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
671MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");