blob: 09dc34d09330d0290ea12aabc4a425d9f8854b16 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/device.h>
12#include <linux/mutex.h>
13#include <linux/clk.h>
14#include <linux/slab.h>
15#include <linux/interrupt.h>
16#include <linux/dma-mapping.h>
17#include <linux/wait.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/partitions.h>
20#include <linux/of_platform.h>
21
22#include "mtk-snand.h"
23#include "mtk-snand-os.h"
24
25struct mtk_snand_of_id {
26 enum mtk_snand_soc soc;
developer4a752952021-07-14 15:59:06 +080027 bool en_ecc_clk;
28 bool en_nfi_hclk;
developerfd40db22021-04-29 10:08:25 +080029};
30
31struct mtk_snand_mtd {
32 struct mtk_snand_plat_dev pdev;
developer4a752952021-07-14 15:59:06 +080033 struct mtk_snand_of_id *soc_id;
developerfd40db22021-04-29 10:08:25 +080034
35 struct clk *nfi_clk;
36 struct clk *pad_clk;
37 struct clk *ecc_clk;
developer4a752952021-07-14 15:59:06 +080038 struct clk *nfi_hclk;
developerfd40db22021-04-29 10:08:25 +080039
40 void __iomem *nfi_regs;
41 void __iomem *ecc_regs;
42
43 int irq;
44
45 bool quad_spi;
46 enum mtk_snand_soc soc;
47
48 struct mtd_info mtd;
49 struct mtk_snand *snf;
50 struct mtk_snand_chip_info cinfo;
51 uint8_t *page_cache;
52 struct mutex lock;
53};
54
55#define mtd_to_msm(mtd) container_of(mtd, struct mtk_snand_mtd, mtd)
56
57static int mtk_snand_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
58{
59 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
60 u64 start_addr, end_addr;
61 int ret;
62
63 /* Do not allow write past end of device */
64 if ((instr->addr + instr->len) > mtd->size) {
65 dev_err(msm->pdev.dev,
66 "attempt to erase beyond end of device\n");
67 return -EINVAL;
68 }
69
70 start_addr = instr->addr & (~mtd->erasesize_mask);
71 end_addr = instr->addr + instr->len;
72 if (end_addr & mtd->erasesize_mask) {
73 end_addr = (end_addr + mtd->erasesize_mask) &
74 (~mtd->erasesize_mask);
75 }
76
77 mutex_lock(&msm->lock);
78
79 while (start_addr < end_addr) {
80 if (mtk_snand_block_isbad(msm->snf, start_addr)) {
81 instr->fail_addr = start_addr;
82 ret = -EIO;
83 break;
84 }
85
86 ret = mtk_snand_erase_block(msm->snf, start_addr);
87 if (ret) {
88 instr->fail_addr = start_addr;
89 break;
90 }
91
92 start_addr += mtd->erasesize;
93 }
94
95 mutex_unlock(&msm->lock);
96
97 return ret;
98}
99
100static int mtk_snand_mtd_read_data(struct mtk_snand_mtd *msm, uint64_t addr,
101 struct mtd_oob_ops *ops)
102{
103 struct mtd_info *mtd = &msm->mtd;
104 size_t len, ooblen, maxooblen, chklen;
105 uint32_t col, ooboffs;
106 uint8_t *datcache, *oobcache;
developer02fcbaa2021-05-11 11:18:11 +0800107 bool ecc_failed = false, raw = ops->mode == MTD_OPS_RAW ? true : false;
108 int ret, max_bitflips = 0;
developerfd40db22021-04-29 10:08:25 +0800109
110 col = addr & mtd->writesize_mask;
111 addr &= ~mtd->writesize_mask;
112 maxooblen = mtd_oobavail(mtd, ops);
113 ooboffs = ops->ooboffs;
114 ooblen = ops->ooblen;
115 len = ops->len;
116
117 datcache = len ? msm->page_cache : NULL;
118 oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
119
120 ops->oobretlen = 0;
121 ops->retlen = 0;
122
123 while (len || ooblen) {
124 if (ops->mode == MTD_OPS_AUTO_OOB)
125 ret = mtk_snand_read_page_auto_oob(msm->snf, addr,
126 datcache, oobcache, maxooblen, NULL, raw);
127 else
128 ret = mtk_snand_read_page(msm->snf, addr, datcache,
129 oobcache, raw);
130
developer02fcbaa2021-05-11 11:18:11 +0800131 if (ret < 0 && ret != -EBADMSG)
developerfd40db22021-04-29 10:08:25 +0800132 return ret;
133
developer02fcbaa2021-05-11 11:18:11 +0800134 if (ret == -EBADMSG) {
135 mtd->ecc_stats.failed++;
136 ecc_failed = true;
137 } else {
138 mtd->ecc_stats.corrected += ret;
139 max_bitflips = max_t(int, ret, max_bitflips);
140 }
141
developerfd40db22021-04-29 10:08:25 +0800142 if (len) {
143 /* Move data */
144 chklen = mtd->writesize - col;
145 if (chklen > len)
146 chklen = len;
147
148 memcpy(ops->datbuf + ops->retlen, datcache + col,
149 chklen);
150 len -= chklen;
151 col = 0; /* (col + chklen) % */
152 ops->retlen += chklen;
153 }
154
155 if (ooblen) {
156 /* Move oob */
157 chklen = maxooblen - ooboffs;
158 if (chklen > ooblen)
159 chklen = ooblen;
160
161 memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
162 chklen);
163 ooblen -= chklen;
164 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
165 ops->oobretlen += chklen;
166 }
167
168 addr += mtd->writesize;
169 }
170
developer02fcbaa2021-05-11 11:18:11 +0800171 return ecc_failed ? -EBADMSG : max_bitflips;
developerfd40db22021-04-29 10:08:25 +0800172}
173
174static int mtk_snand_mtd_read_oob(struct mtd_info *mtd, loff_t from,
175 struct mtd_oob_ops *ops)
176{
177 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
178 uint32_t maxooblen;
179 int ret;
180
181 if (!ops->oobbuf && !ops->datbuf) {
182 if (ops->ooblen || ops->len)
183 return -EINVAL;
184
185 return 0;
186 }
187
188 switch (ops->mode) {
189 case MTD_OPS_PLACE_OOB:
190 case MTD_OPS_AUTO_OOB:
191 case MTD_OPS_RAW:
192 break;
193 default:
194 dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
195 return -EINVAL;
196 }
197
198 maxooblen = mtd_oobavail(mtd, ops);
199
200 /* Do not allow read past end of device */
201 if (ops->datbuf && (from + ops->len) > mtd->size) {
202 dev_err(msm->pdev.dev,
203 "attempt to read beyond end of device\n");
204 return -EINVAL;
205 }
206
207 if (unlikely(ops->ooboffs >= maxooblen)) {
208 dev_err(msm->pdev.dev, "attempt to start read outside oob\n");
209 return -EINVAL;
210 }
211
212 if (unlikely(from >= mtd->size ||
213 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
214 (from >> mtd->writesize_shift)) * maxooblen)) {
215 dev_err(msm->pdev.dev,
216 "attempt to read beyond end of device\n");
217 return -EINVAL;
218 }
219
220 mutex_lock(&msm->lock);
221 ret = mtk_snand_mtd_read_data(msm, from, ops);
222 mutex_unlock(&msm->lock);
223
224 return ret;
225}
226
227static int mtk_snand_mtd_write_data(struct mtk_snand_mtd *msm, uint64_t addr,
228 struct mtd_oob_ops *ops)
229{
230 struct mtd_info *mtd = &msm->mtd;
231 size_t len, ooblen, maxooblen, chklen, oobwrlen;
232 uint32_t col, ooboffs;
233 uint8_t *datcache, *oobcache;
234 bool raw = ops->mode == MTD_OPS_RAW ? true : false;
235 int ret;
236
237 col = addr & mtd->writesize_mask;
238 addr &= ~mtd->writesize_mask;
239 maxooblen = mtd_oobavail(mtd, ops);
240 ooboffs = ops->ooboffs;
241 ooblen = ops->ooblen;
242 len = ops->len;
243
244 datcache = len ? msm->page_cache : NULL;
245 oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
246
247 ops->oobretlen = 0;
248 ops->retlen = 0;
249
250 while (len || ooblen) {
251 if (len) {
252 /* Move data */
253 chklen = mtd->writesize - col;
254 if (chklen > len)
255 chklen = len;
256
257 memset(datcache, 0xff, col);
258 memcpy(datcache + col, ops->datbuf + ops->retlen,
259 chklen);
260 memset(datcache + col + chklen, 0xff,
261 mtd->writesize - col - chklen);
262 len -= chklen;
263 col = 0; /* (col + chklen) % */
264 ops->retlen += chklen;
265 }
266
267 oobwrlen = 0;
268 if (ooblen) {
269 /* Move oob */
270 chklen = maxooblen - ooboffs;
271 if (chklen > ooblen)
272 chklen = ooblen;
273
274 memset(oobcache, 0xff, ooboffs);
275 memcpy(oobcache + ooboffs,
276 ops->oobbuf + ops->oobretlen, chklen);
277 memset(oobcache + ooboffs + chklen, 0xff,
278 mtd->oobsize - ooboffs - chklen);
279 oobwrlen = chklen + ooboffs;
280 ooblen -= chklen;
281 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
282 ops->oobretlen += chklen;
283 }
284
285 if (ops->mode == MTD_OPS_AUTO_OOB)
286 ret = mtk_snand_write_page_auto_oob(msm->snf, addr,
287 datcache, oobcache, oobwrlen, NULL, raw);
288 else
289 ret = mtk_snand_write_page(msm->snf, addr, datcache,
290 oobcache, raw);
291
292 if (ret)
293 return ret;
294
295 addr += mtd->writesize;
296 }
297
298 return 0;
299}
300
301static int mtk_snand_mtd_write_oob(struct mtd_info *mtd, loff_t to,
302 struct mtd_oob_ops *ops)
303{
304 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
305 uint32_t maxooblen;
306 int ret;
307
308 if (!ops->oobbuf && !ops->datbuf) {
309 if (ops->ooblen || ops->len)
310 return -EINVAL;
311
312 return 0;
313 }
314
315 switch (ops->mode) {
316 case MTD_OPS_PLACE_OOB:
317 case MTD_OPS_AUTO_OOB:
318 case MTD_OPS_RAW:
319 break;
320 default:
321 dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
322 return -EINVAL;
323 }
324
325 maxooblen = mtd_oobavail(mtd, ops);
326
327 /* Do not allow write past end of device */
328 if (ops->datbuf && (to + ops->len) > mtd->size) {
329 dev_err(msm->pdev.dev,
330 "attempt to write beyond end of device\n");
331 return -EINVAL;
332 }
333
334 if (unlikely(ops->ooboffs >= maxooblen)) {
335 dev_err(msm->pdev.dev,
336 "attempt to start write outside oob\n");
337 return -EINVAL;
338 }
339
340 if (unlikely(to >= mtd->size ||
341 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
342 (to >> mtd->writesize_shift)) * maxooblen)) {
343 dev_err(msm->pdev.dev,
344 "attempt to write beyond end of device\n");
345 return -EINVAL;
346 }
347
348 mutex_lock(&msm->lock);
349 ret = mtk_snand_mtd_write_data(msm, to, ops);
350 mutex_unlock(&msm->lock);
351
352 return ret;
353}
354
355static int mtk_snand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
356{
357 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
358 int ret;
359
360 mutex_lock(&msm->lock);
361 ret = mtk_snand_block_isbad(msm->snf, offs);
362 mutex_unlock(&msm->lock);
363
364 return ret;
365}
366
367static int mtk_snand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
368{
369 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
370 int ret;
371
372 mutex_lock(&msm->lock);
373 ret = mtk_snand_block_markbad(msm->snf, offs);
374 mutex_unlock(&msm->lock);
375
376 return ret;
377}
378
379static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
380 struct mtd_oob_region *oobecc)
381{
382 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
383
384 if (section)
385 return -ERANGE;
386
387 oobecc->offset = msm->cinfo.fdm_size * msm->cinfo.num_sectors;
388 oobecc->length = mtd->oobsize - oobecc->offset;
389
390 return 0;
391}
392
393static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
394 struct mtd_oob_region *oobfree)
395{
396 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
397
398 if (section >= msm->cinfo.num_sectors)
399 return -ERANGE;
400
401 oobfree->length = msm->cinfo.fdm_size - 1;
402 oobfree->offset = section * msm->cinfo.fdm_size + 1;
403
404 return 0;
405}
406
407static irqreturn_t mtk_snand_irq(int irq, void *id)
408{
409 struct mtk_snand_mtd *msm = id;
410 int ret;
411
412 ret = mtk_snand_irq_process(msm->snf);
413 if (ret > 0)
414 return IRQ_HANDLED;
415
416 return IRQ_NONE;
417}
418
419static int mtk_snand_enable_clk(struct mtk_snand_mtd *msm)
420{
developer4a752952021-07-14 15:59:06 +0800421 struct mtk_snand_of_id *soc_id = msm->soc_id;
developerfd40db22021-04-29 10:08:25 +0800422 int ret;
423
424 ret = clk_prepare_enable(msm->nfi_clk);
425 if (ret) {
426 dev_err(msm->pdev.dev, "unable to enable nfi clk\n");
427 return ret;
428 }
429
430 ret = clk_prepare_enable(msm->pad_clk);
431 if (ret) {
432 dev_err(msm->pdev.dev, "unable to enable pad clk\n");
433 clk_disable_unprepare(msm->nfi_clk);
434 return ret;
435 }
436
developer4a752952021-07-14 15:59:06 +0800437 if (soc_id->en_ecc_clk) {
438 ret = clk_prepare_enable(msm->ecc_clk);
439 if (ret) {
440 dev_err(msm->pdev.dev, "unable to enable ecc clk\n");
441 clk_disable_unprepare(msm->nfi_clk);
442 clk_disable_unprepare(msm->pad_clk);
443 return ret;
444 }
developerfd40db22021-04-29 10:08:25 +0800445 }
446
developer4a752952021-07-14 15:59:06 +0800447 if (soc_id->en_nfi_hclk) {
448 ret = clk_prepare_enable(msm->nfi_hclk);
449 if (ret) {
450 dev_err(msm->pdev.dev, "unable to enable nfi hclk\n");
451 clk_disable_unprepare(msm->nfi_clk);
452 clk_disable_unprepare(msm->pad_clk);
453 if (soc_id->en_ecc_clk)
454 clk_disable_unprepare(msm->ecc_clk);
455 return ret;
456 }
457 }
458
developerfd40db22021-04-29 10:08:25 +0800459 return 0;
460}
461
462static void mtk_snand_disable_clk(struct mtk_snand_mtd *msm)
463{
developer4a752952021-07-14 15:59:06 +0800464 struct mtk_snand_of_id *soc_id = msm->soc_id;
465
developerfd40db22021-04-29 10:08:25 +0800466 clk_disable_unprepare(msm->nfi_clk);
467 clk_disable_unprepare(msm->pad_clk);
developer4a752952021-07-14 15:59:06 +0800468 if (soc_id->en_ecc_clk)
469 clk_disable_unprepare(msm->ecc_clk);
470 if (soc_id->en_nfi_hclk)
471 clk_disable_unprepare(msm->nfi_hclk);
developerfd40db22021-04-29 10:08:25 +0800472}
473
474static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
475 .ecc = mtk_snand_ooblayout_ecc,
476 .free = mtk_snand_ooblayout_free,
477};
478
developer4a752952021-07-14 15:59:06 +0800479static struct mtk_snand_of_id mt7622_soc_id = {
480 .soc = SNAND_SOC_MT7622,
481 .en_ecc_clk = true,
482 .en_nfi_hclk = false
483};
484
485static struct mtk_snand_of_id mt7629_soc_id = {
486 .soc = SNAND_SOC_MT7629,
487 .en_ecc_clk = true,
488 .en_nfi_hclk = false
489};
490
491static struct mtk_snand_of_id mt7986_soc_id = {
492 .soc = SNAND_SOC_MT7986,
493 .en_ecc_clk = false,
494 .en_nfi_hclk = true
495};
developerfd40db22021-04-29 10:08:25 +0800496
developerdfd23622022-11-25 19:02:20 +0800497static struct mtk_snand_of_id mt7988_soc_id = {
498 .soc = SNAND_SOC_MT7988,
499 .en_ecc_clk = false,
500 .en_nfi_hclk = false
501};
502
developerfd40db22021-04-29 10:08:25 +0800503static const struct of_device_id mtk_snand_ids[] = {
504 { .compatible = "mediatek,mt7622-snand", .data = &mt7622_soc_id },
505 { .compatible = "mediatek,mt7629-snand", .data = &mt7629_soc_id },
506 { .compatible = "mediatek,mt7986-snand", .data = &mt7986_soc_id },
developerdfd23622022-11-25 19:02:20 +0800507 { .compatible = "mediatek,mt7988-snand", .data = &mt7988_soc_id },
developerfd40db22021-04-29 10:08:25 +0800508 { },
509};
510
511MODULE_DEVICE_TABLE(of, mtk_snand_ids);
512
513static int mtk_snand_probe(struct platform_device *pdev)
514{
515 struct mtk_snand_platdata mtk_snand_pdata = {};
516 struct device_node *np = pdev->dev.of_node;
517 const struct of_device_id *of_soc_id;
developerfd40db22021-04-29 10:08:25 +0800518 struct mtk_snand_mtd *msm;
519 struct mtd_info *mtd;
520 struct resource *r;
521 uint32_t size;
522 int ret;
523
524 of_soc_id = of_match_node(mtk_snand_ids, np);
525 if (!of_soc_id)
526 return -EINVAL;
527
developerfd40db22021-04-29 10:08:25 +0800528 msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
529 if (!msm)
530 return -ENOMEM;
531
developer4a752952021-07-14 15:59:06 +0800532 msm->soc_id = of_soc_id->data;
533
developerfd40db22021-04-29 10:08:25 +0800534 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
535 msm->nfi_regs = devm_ioremap_resource(&pdev->dev, r);
536 if (IS_ERR(msm->nfi_regs)) {
537 ret = PTR_ERR(msm->nfi_regs);
538 goto errout1;
539 }
540
541 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc");
542 msm->ecc_regs = devm_ioremap_resource(&pdev->dev, r);
543 if (IS_ERR(msm->ecc_regs)) {
544 ret = PTR_ERR(msm->ecc_regs);
545 goto errout1;
546 }
547
548 msm->pdev.dev = &pdev->dev;
549 msm->quad_spi = of_property_read_bool(np, "mediatek,quad-spi");
developer4a752952021-07-14 15:59:06 +0800550 msm->soc = msm->soc_id->soc;
developerfd40db22021-04-29 10:08:25 +0800551
552 msm->nfi_clk = devm_clk_get(msm->pdev.dev, "nfi_clk");
553 if (IS_ERR(msm->nfi_clk)) {
554 ret = PTR_ERR(msm->nfi_clk);
developer4a752952021-07-14 15:59:06 +0800555 dev_err(msm->pdev.dev,
556 "unable to get nfi_clk, err = %d\n", ret);
developerfd40db22021-04-29 10:08:25 +0800557 goto errout1;
558 }
559
developer4a752952021-07-14 15:59:06 +0800560 if (msm->soc_id->en_ecc_clk) {
561 msm->ecc_clk = devm_clk_get(msm->pdev.dev, "ecc_clk");
562 if (IS_ERR(msm->ecc_clk)) {
563 ret = PTR_ERR(msm->ecc_clk);
564 dev_err(msm->pdev.dev,
565 "unable to get ecc_clk, err = %d\n", ret);
566 goto errout1;
567 }
developerfd40db22021-04-29 10:08:25 +0800568 }
569
570 msm->pad_clk = devm_clk_get(msm->pdev.dev, "pad_clk");
571 if (IS_ERR(msm->pad_clk)) {
572 ret = PTR_ERR(msm->pad_clk);
developer4a752952021-07-14 15:59:06 +0800573 dev_err(msm->pdev.dev,
574 "unable to get pad_clk, err = %d\n", ret);
developerfd40db22021-04-29 10:08:25 +0800575 goto errout1;
576 }
577
developer4a752952021-07-14 15:59:06 +0800578 if (msm->soc_id->en_nfi_hclk) {
579 msm->nfi_hclk = devm_clk_get(msm->pdev.dev, "nfi_hclk");
580 if (IS_ERR(msm->nfi_hclk)) {
581 ret = PTR_ERR(msm->nfi_hclk);
582 dev_err(msm->pdev.dev,
583 "unable to get nfi_hclk, err = %d\n", ret);
584 goto errout1;
585 }
586 }
587
developerfd40db22021-04-29 10:08:25 +0800588 ret = mtk_snand_enable_clk(msm);
589 if (ret)
590 goto errout1;
591
592 /* Probe SPI-NAND Flash */
593 mtk_snand_pdata.soc = msm->soc;
594 mtk_snand_pdata.quad_spi = msm->quad_spi;
595 mtk_snand_pdata.nfi_base = msm->nfi_regs;
596 mtk_snand_pdata.ecc_base = msm->ecc_regs;
597
598 ret = mtk_snand_init(&msm->pdev, &mtk_snand_pdata, &msm->snf);
599 if (ret)
600 goto errout1;
601
602 msm->irq = platform_get_irq(pdev, 0);
603 if (msm->irq >= 0) {
604 ret = devm_request_irq(msm->pdev.dev, msm->irq, mtk_snand_irq,
605 0x0, "mtk-snand", msm);
606 if (ret) {
607 dev_err(msm->pdev.dev, "failed to request snfi irq\n");
608 goto errout2;
609 }
610
611 ret = dma_set_mask(msm->pdev.dev, DMA_BIT_MASK(32));
612 if (ret) {
613 dev_err(msm->pdev.dev, "failed to set dma mask\n");
614 goto errout3;
615 }
616 }
617
618 mtk_snand_get_chip_info(msm->snf, &msm->cinfo);
619
620 size = msm->cinfo.pagesize + msm->cinfo.sparesize;
621 msm->page_cache = devm_kmalloc(msm->pdev.dev, size, GFP_KERNEL);
622 if (!msm->page_cache) {
623 dev_err(msm->pdev.dev, "failed to allocate page cache\n");
624 ret = -ENOMEM;
625 goto errout3;
626 }
627
628 mutex_init(&msm->lock);
629
630 dev_info(msm->pdev.dev,
631 "chip is %s, size %lluMB, page size %u, oob size %u\n",
632 msm->cinfo.model, msm->cinfo.chipsize >> 20,
633 msm->cinfo.pagesize, msm->cinfo.sparesize);
634
635 /* Initialize mtd for SPI-NAND */
636 mtd = &msm->mtd;
637
638 mtd->owner = THIS_MODULE;
639 mtd->dev.parent = &pdev->dev;
640 mtd->type = MTD_NANDFLASH;
641 mtd->flags = MTD_CAP_NANDFLASH;
642
643 mtd_set_of_node(mtd, np);
644
645 mtd->size = msm->cinfo.chipsize;
646 mtd->erasesize = msm->cinfo.blocksize;
647 mtd->writesize = msm->cinfo.pagesize;
648 mtd->writebufsize = mtd->writesize;
649 mtd->oobsize = msm->cinfo.sparesize;
650 mtd->oobavail = msm->cinfo.num_sectors * (msm->cinfo.fdm_size - 1);
651
652 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
653 mtd->writesize_shift = ffs(mtd->writesize) - 1;
654 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
655 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
656
657 mtd->ooblayout = &mtk_snand_ooblayout;
658
developer4da1bed2021-05-08 17:30:37 +0800659 mtd->ecc_strength = msm->cinfo.ecc_strength;
developerfd40db22021-04-29 10:08:25 +0800660 mtd->bitflip_threshold = (mtd->ecc_strength * 3) / 4;
661 mtd->ecc_step_size = msm->cinfo.sector_size;
662
663 mtd->_erase = mtk_snand_mtd_erase;
664 mtd->_read_oob = mtk_snand_mtd_read_oob;
665 mtd->_write_oob = mtk_snand_mtd_write_oob;
666 mtd->_block_isbad = mtk_snand_mtd_block_isbad;
667 mtd->_block_markbad = mtk_snand_mtd_block_markbad;
668
669 ret = mtd_device_register(mtd, NULL, 0);
670 if (ret) {
671 dev_err(msm->pdev.dev, "failed to register mtd partition\n");
672 goto errout4;
673 }
674
675 platform_set_drvdata(pdev, msm);
676
677 return 0;
678
679errout4:
680 devm_kfree(msm->pdev.dev, msm->page_cache);
681
682errout3:
683 if (msm->irq >= 0)
684 devm_free_irq(msm->pdev.dev, msm->irq, msm);
685
686errout2:
687 mtk_snand_cleanup(msm->snf);
688
689errout1:
690 devm_kfree(msm->pdev.dev, msm);
691
692 platform_set_drvdata(pdev, NULL);
693
694 return ret;
695}
696
697static int mtk_snand_remove(struct platform_device *pdev)
698{
699 struct mtk_snand_mtd *msm = platform_get_drvdata(pdev);
700 struct mtd_info *mtd = &msm->mtd;
701 int ret;
702
703 ret = mtd_device_unregister(mtd);
704 if (ret)
705 return ret;
706
707 mtk_snand_cleanup(msm->snf);
708
709 if (msm->irq >= 0)
710 devm_free_irq(msm->pdev.dev, msm->irq, msm);
711
712 mtk_snand_disable_clk(msm);
713
714 devm_kfree(msm->pdev.dev, msm->page_cache);
715 devm_kfree(msm->pdev.dev, msm);
716
717 platform_set_drvdata(pdev, NULL);
718
719 return 0;
720}
721
722static struct platform_driver mtk_snand_driver = {
723 .probe = mtk_snand_probe,
724 .remove = mtk_snand_remove,
725 .driver = {
726 .name = "mtk-snand",
727 .of_match_table = mtk_snand_ids,
728 },
729};
730
731module_platform_driver(mtk_snand_driver);
732
733MODULE_LICENSE("GPL");
734MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
735MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");