blob: 61d75d3cbd8670b6f8ec7d52379d3c1105dccbb3 [file] [log] [blame]
developerfd40db22021-04-29 10:08:25 +08001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020 MediaTek Inc. All Rights Reserved.
4 *
5 * Author: Weijie Gao <weijie.gao@mediatek.com>
6 */
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/init.h>
11#include <linux/device.h>
12#include <linux/mutex.h>
13#include <linux/clk.h>
14#include <linux/slab.h>
15#include <linux/interrupt.h>
16#include <linux/dma-mapping.h>
17#include <linux/wait.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/partitions.h>
20#include <linux/of_platform.h>
21
22#include "mtk-snand.h"
23#include "mtk-snand-os.h"
24
25struct mtk_snand_of_id {
26 enum mtk_snand_soc soc;
developer4a752952021-07-14 15:59:06 +080027 bool en_ecc_clk;
28 bool en_nfi_hclk;
developerfd40db22021-04-29 10:08:25 +080029};
30
31struct mtk_snand_mtd {
32 struct mtk_snand_plat_dev pdev;
developer4a752952021-07-14 15:59:06 +080033 struct mtk_snand_of_id *soc_id;
developerfd40db22021-04-29 10:08:25 +080034
35 struct clk *nfi_clk;
36 struct clk *pad_clk;
37 struct clk *ecc_clk;
developer4a752952021-07-14 15:59:06 +080038 struct clk *nfi_hclk;
developerfd40db22021-04-29 10:08:25 +080039
40 void __iomem *nfi_regs;
41 void __iomem *ecc_regs;
42
43 int irq;
44
45 bool quad_spi;
46 enum mtk_snand_soc soc;
47
48 struct mtd_info mtd;
49 struct mtk_snand *snf;
50 struct mtk_snand_chip_info cinfo;
51 uint8_t *page_cache;
52 struct mutex lock;
53};
54
55#define mtd_to_msm(mtd) container_of(mtd, struct mtk_snand_mtd, mtd)
56
57static int mtk_snand_mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
58{
59 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
60 u64 start_addr, end_addr;
61 int ret;
62
63 /* Do not allow write past end of device */
64 if ((instr->addr + instr->len) > mtd->size) {
65 dev_err(msm->pdev.dev,
66 "attempt to erase beyond end of device\n");
67 return -EINVAL;
68 }
69
70 start_addr = instr->addr & (~mtd->erasesize_mask);
71 end_addr = instr->addr + instr->len;
72 if (end_addr & mtd->erasesize_mask) {
73 end_addr = (end_addr + mtd->erasesize_mask) &
74 (~mtd->erasesize_mask);
75 }
76
77 mutex_lock(&msm->lock);
78
79 while (start_addr < end_addr) {
80 if (mtk_snand_block_isbad(msm->snf, start_addr)) {
81 instr->fail_addr = start_addr;
82 ret = -EIO;
83 break;
84 }
85
86 ret = mtk_snand_erase_block(msm->snf, start_addr);
87 if (ret) {
88 instr->fail_addr = start_addr;
89 break;
90 }
91
92 start_addr += mtd->erasesize;
93 }
94
95 mutex_unlock(&msm->lock);
96
97 return ret;
98}
99
100static int mtk_snand_mtd_read_data(struct mtk_snand_mtd *msm, uint64_t addr,
101 struct mtd_oob_ops *ops)
102{
103 struct mtd_info *mtd = &msm->mtd;
104 size_t len, ooblen, maxooblen, chklen;
105 uint32_t col, ooboffs;
106 uint8_t *datcache, *oobcache;
developer02fcbaa2021-05-11 11:18:11 +0800107 bool ecc_failed = false, raw = ops->mode == MTD_OPS_RAW ? true : false;
108 int ret, max_bitflips = 0;
developerfd40db22021-04-29 10:08:25 +0800109
110 col = addr & mtd->writesize_mask;
111 addr &= ~mtd->writesize_mask;
112 maxooblen = mtd_oobavail(mtd, ops);
113 ooboffs = ops->ooboffs;
114 ooblen = ops->ooblen;
115 len = ops->len;
116
117 datcache = len ? msm->page_cache : NULL;
118 oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
119
120 ops->oobretlen = 0;
121 ops->retlen = 0;
122
123 while (len || ooblen) {
124 if (ops->mode == MTD_OPS_AUTO_OOB)
125 ret = mtk_snand_read_page_auto_oob(msm->snf, addr,
126 datcache, oobcache, maxooblen, NULL, raw);
127 else
128 ret = mtk_snand_read_page(msm->snf, addr, datcache,
129 oobcache, raw);
130
developer02fcbaa2021-05-11 11:18:11 +0800131 if (ret < 0 && ret != -EBADMSG)
developerfd40db22021-04-29 10:08:25 +0800132 return ret;
133
developer02fcbaa2021-05-11 11:18:11 +0800134 if (ret == -EBADMSG) {
135 mtd->ecc_stats.failed++;
136 ecc_failed = true;
137 } else {
138 mtd->ecc_stats.corrected += ret;
139 max_bitflips = max_t(int, ret, max_bitflips);
140 }
141
developerfd40db22021-04-29 10:08:25 +0800142 if (len) {
143 /* Move data */
144 chklen = mtd->writesize - col;
145 if (chklen > len)
146 chklen = len;
147
148 memcpy(ops->datbuf + ops->retlen, datcache + col,
149 chklen);
150 len -= chklen;
151 col = 0; /* (col + chklen) % */
152 ops->retlen += chklen;
153 }
154
155 if (ooblen) {
156 /* Move oob */
157 chklen = maxooblen - ooboffs;
158 if (chklen > ooblen)
159 chklen = ooblen;
160
161 memcpy(ops->oobbuf + ops->oobretlen, oobcache + ooboffs,
162 chklen);
163 ooblen -= chklen;
164 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
165 ops->oobretlen += chklen;
166 }
167
168 addr += mtd->writesize;
169 }
170
developer02fcbaa2021-05-11 11:18:11 +0800171 return ecc_failed ? -EBADMSG : max_bitflips;
developerfd40db22021-04-29 10:08:25 +0800172}
173
174static int mtk_snand_mtd_read_oob(struct mtd_info *mtd, loff_t from,
175 struct mtd_oob_ops *ops)
176{
177 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
178 uint32_t maxooblen;
179 int ret;
180
181 if (!ops->oobbuf && !ops->datbuf) {
182 if (ops->ooblen || ops->len)
183 return -EINVAL;
184
185 return 0;
186 }
187
188 switch (ops->mode) {
189 case MTD_OPS_PLACE_OOB:
190 case MTD_OPS_AUTO_OOB:
191 case MTD_OPS_RAW:
192 break;
193 default:
194 dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
195 return -EINVAL;
196 }
197
198 maxooblen = mtd_oobavail(mtd, ops);
199
200 /* Do not allow read past end of device */
201 if (ops->datbuf && (from + ops->len) > mtd->size) {
202 dev_err(msm->pdev.dev,
203 "attempt to read beyond end of device\n");
204 return -EINVAL;
205 }
206
207 if (unlikely(ops->ooboffs >= maxooblen)) {
208 dev_err(msm->pdev.dev, "attempt to start read outside oob\n");
209 return -EINVAL;
210 }
211
212 if (unlikely(from >= mtd->size ||
213 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
214 (from >> mtd->writesize_shift)) * maxooblen)) {
215 dev_err(msm->pdev.dev,
216 "attempt to read beyond end of device\n");
217 return -EINVAL;
218 }
219
220 mutex_lock(&msm->lock);
221 ret = mtk_snand_mtd_read_data(msm, from, ops);
222 mutex_unlock(&msm->lock);
223
224 return ret;
225}
226
227static int mtk_snand_mtd_write_data(struct mtk_snand_mtd *msm, uint64_t addr,
228 struct mtd_oob_ops *ops)
229{
230 struct mtd_info *mtd = &msm->mtd;
231 size_t len, ooblen, maxooblen, chklen, oobwrlen;
232 uint32_t col, ooboffs;
233 uint8_t *datcache, *oobcache;
234 bool raw = ops->mode == MTD_OPS_RAW ? true : false;
235 int ret;
236
237 col = addr & mtd->writesize_mask;
238 addr &= ~mtd->writesize_mask;
239 maxooblen = mtd_oobavail(mtd, ops);
240 ooboffs = ops->ooboffs;
241 ooblen = ops->ooblen;
242 len = ops->len;
243
244 datcache = len ? msm->page_cache : NULL;
245 oobcache = ooblen ? msm->page_cache + mtd->writesize : NULL;
246
247 ops->oobretlen = 0;
248 ops->retlen = 0;
249
250 while (len || ooblen) {
251 if (len) {
252 /* Move data */
253 chklen = mtd->writesize - col;
254 if (chklen > len)
255 chklen = len;
256
257 memset(datcache, 0xff, col);
258 memcpy(datcache + col, ops->datbuf + ops->retlen,
259 chklen);
260 memset(datcache + col + chklen, 0xff,
261 mtd->writesize - col - chklen);
262 len -= chklen;
263 col = 0; /* (col + chklen) % */
264 ops->retlen += chklen;
265 }
266
267 oobwrlen = 0;
268 if (ooblen) {
269 /* Move oob */
270 chklen = maxooblen - ooboffs;
271 if (chklen > ooblen)
272 chklen = ooblen;
273
274 memset(oobcache, 0xff, ooboffs);
275 memcpy(oobcache + ooboffs,
276 ops->oobbuf + ops->oobretlen, chklen);
277 memset(oobcache + ooboffs + chklen, 0xff,
278 mtd->oobsize - ooboffs - chklen);
279 oobwrlen = chklen + ooboffs;
280 ooblen -= chklen;
281 ooboffs = 0; /* (ooboffs + chklen) % maxooblen; */
282 ops->oobretlen += chklen;
283 }
284
285 if (ops->mode == MTD_OPS_AUTO_OOB)
286 ret = mtk_snand_write_page_auto_oob(msm->snf, addr,
287 datcache, oobcache, oobwrlen, NULL, raw);
288 else
289 ret = mtk_snand_write_page(msm->snf, addr, datcache,
290 oobcache, raw);
291
292 if (ret)
293 return ret;
294
295 addr += mtd->writesize;
296 }
297
298 return 0;
299}
300
301static int mtk_snand_mtd_write_oob(struct mtd_info *mtd, loff_t to,
302 struct mtd_oob_ops *ops)
303{
304 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
305 uint32_t maxooblen;
306 int ret;
307
308 if (!ops->oobbuf && !ops->datbuf) {
309 if (ops->ooblen || ops->len)
310 return -EINVAL;
311
312 return 0;
313 }
314
315 switch (ops->mode) {
316 case MTD_OPS_PLACE_OOB:
317 case MTD_OPS_AUTO_OOB:
318 case MTD_OPS_RAW:
319 break;
320 default:
321 dev_err(msm->pdev.dev, "unsupported oob mode: %u\n", ops->mode);
322 return -EINVAL;
323 }
324
325 maxooblen = mtd_oobavail(mtd, ops);
326
327 /* Do not allow write past end of device */
328 if (ops->datbuf && (to + ops->len) > mtd->size) {
329 dev_err(msm->pdev.dev,
330 "attempt to write beyond end of device\n");
331 return -EINVAL;
332 }
333
334 if (unlikely(ops->ooboffs >= maxooblen)) {
335 dev_err(msm->pdev.dev,
336 "attempt to start write outside oob\n");
337 return -EINVAL;
338 }
339
340 if (unlikely(to >= mtd->size ||
341 ops->ooboffs + ops->ooblen > ((mtd->size >> mtd->writesize_shift) -
342 (to >> mtd->writesize_shift)) * maxooblen)) {
343 dev_err(msm->pdev.dev,
344 "attempt to write beyond end of device\n");
345 return -EINVAL;
346 }
347
348 mutex_lock(&msm->lock);
349 ret = mtk_snand_mtd_write_data(msm, to, ops);
350 mutex_unlock(&msm->lock);
351
352 return ret;
353}
354
developer8c414ee2023-06-20 19:08:04 +0800355static int mtk_snand_mtd_panic_write(struct mtd_info *mtd, loff_t to,
356 size_t len, size_t *retlen,
357 const u_char *buf)
358{
359 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
360 size_t chklen, wrlen = 0;
361 uint32_t col;
362 int ret;
363
364 /* Disable IRQ and enter poll mode */
365 disable_irq(msm->irq);
366 mtk_snand_control_poll_mode(&msm->pdev, true);
367
368 col = to & mtd->writesize_mask;
369 to &= ~mtd->writesize_mask;
370
371 while (len) {
372 /* Move data */
373 chklen = mtd->writesize - col;
374 if (chklen > len)
375 chklen = len;
376
377 if (chklen < mtd->writesize)
378 memset(msm->page_cache, 0xff, mtd->writesize);
379 memcpy(msm->page_cache + col, buf + wrlen, chklen);
380
381 len -= chklen;
382 col = 0; /* (col + chklen) % */
383 wrlen += chklen;
384
385 ret = mtk_snand_write_page(msm->snf, to, msm->page_cache,
386 NULL, false);
387 if (ret)
388 break;
389
390 to += mtd->writesize;
391 }
392
393 if (retlen)
394 *retlen = wrlen;
395
396 return ret;
397}
398
developerfd40db22021-04-29 10:08:25 +0800399static int mtk_snand_mtd_block_isbad(struct mtd_info *mtd, loff_t offs)
400{
401 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
402 int ret;
403
404 mutex_lock(&msm->lock);
405 ret = mtk_snand_block_isbad(msm->snf, offs);
406 mutex_unlock(&msm->lock);
407
408 return ret;
409}
410
411static int mtk_snand_mtd_block_markbad(struct mtd_info *mtd, loff_t offs)
412{
413 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
414 int ret;
415
416 mutex_lock(&msm->lock);
417 ret = mtk_snand_block_markbad(msm->snf, offs);
418 mutex_unlock(&msm->lock);
419
420 return ret;
421}
422
423static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section,
424 struct mtd_oob_region *oobecc)
425{
426 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
427
428 if (section)
429 return -ERANGE;
430
431 oobecc->offset = msm->cinfo.fdm_size * msm->cinfo.num_sectors;
432 oobecc->length = mtd->oobsize - oobecc->offset;
433
434 return 0;
435}
436
437static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section,
438 struct mtd_oob_region *oobfree)
439{
440 struct mtk_snand_mtd *msm = mtd_to_msm(mtd);
441
442 if (section >= msm->cinfo.num_sectors)
443 return -ERANGE;
444
445 oobfree->length = msm->cinfo.fdm_size - 1;
446 oobfree->offset = section * msm->cinfo.fdm_size + 1;
447
448 return 0;
449}
450
451static irqreturn_t mtk_snand_irq(int irq, void *id)
452{
453 struct mtk_snand_mtd *msm = id;
454 int ret;
455
456 ret = mtk_snand_irq_process(msm->snf);
457 if (ret > 0)
458 return IRQ_HANDLED;
459
460 return IRQ_NONE;
461}
462
463static int mtk_snand_enable_clk(struct mtk_snand_mtd *msm)
464{
developer4a752952021-07-14 15:59:06 +0800465 struct mtk_snand_of_id *soc_id = msm->soc_id;
developerfd40db22021-04-29 10:08:25 +0800466 int ret;
467
468 ret = clk_prepare_enable(msm->nfi_clk);
469 if (ret) {
470 dev_err(msm->pdev.dev, "unable to enable nfi clk\n");
471 return ret;
472 }
473
474 ret = clk_prepare_enable(msm->pad_clk);
475 if (ret) {
476 dev_err(msm->pdev.dev, "unable to enable pad clk\n");
477 clk_disable_unprepare(msm->nfi_clk);
478 return ret;
479 }
480
developer4a752952021-07-14 15:59:06 +0800481 if (soc_id->en_ecc_clk) {
482 ret = clk_prepare_enable(msm->ecc_clk);
483 if (ret) {
484 dev_err(msm->pdev.dev, "unable to enable ecc clk\n");
485 clk_disable_unprepare(msm->nfi_clk);
486 clk_disable_unprepare(msm->pad_clk);
487 return ret;
488 }
developerfd40db22021-04-29 10:08:25 +0800489 }
490
developer4a752952021-07-14 15:59:06 +0800491 if (soc_id->en_nfi_hclk) {
492 ret = clk_prepare_enable(msm->nfi_hclk);
493 if (ret) {
494 dev_err(msm->pdev.dev, "unable to enable nfi hclk\n");
495 clk_disable_unprepare(msm->nfi_clk);
496 clk_disable_unprepare(msm->pad_clk);
497 if (soc_id->en_ecc_clk)
498 clk_disable_unprepare(msm->ecc_clk);
499 return ret;
500 }
501 }
502
developerfd40db22021-04-29 10:08:25 +0800503 return 0;
504}
505
506static void mtk_snand_disable_clk(struct mtk_snand_mtd *msm)
507{
developer4a752952021-07-14 15:59:06 +0800508 struct mtk_snand_of_id *soc_id = msm->soc_id;
509
developerfd40db22021-04-29 10:08:25 +0800510 clk_disable_unprepare(msm->nfi_clk);
511 clk_disable_unprepare(msm->pad_clk);
developer4a752952021-07-14 15:59:06 +0800512 if (soc_id->en_ecc_clk)
513 clk_disable_unprepare(msm->ecc_clk);
514 if (soc_id->en_nfi_hclk)
515 clk_disable_unprepare(msm->nfi_hclk);
developerfd40db22021-04-29 10:08:25 +0800516}
517
518static const struct mtd_ooblayout_ops mtk_snand_ooblayout = {
519 .ecc = mtk_snand_ooblayout_ecc,
520 .free = mtk_snand_ooblayout_free,
521};
522
developer4a752952021-07-14 15:59:06 +0800523static struct mtk_snand_of_id mt7622_soc_id = {
524 .soc = SNAND_SOC_MT7622,
525 .en_ecc_clk = true,
526 .en_nfi_hclk = false
527};
528
529static struct mtk_snand_of_id mt7629_soc_id = {
530 .soc = SNAND_SOC_MT7629,
531 .en_ecc_clk = true,
532 .en_nfi_hclk = false
533};
534
535static struct mtk_snand_of_id mt7986_soc_id = {
536 .soc = SNAND_SOC_MT7986,
537 .en_ecc_clk = false,
538 .en_nfi_hclk = true
539};
developerfd40db22021-04-29 10:08:25 +0800540
developerdfd23622022-11-25 19:02:20 +0800541static struct mtk_snand_of_id mt7988_soc_id = {
542 .soc = SNAND_SOC_MT7988,
543 .en_ecc_clk = false,
544 .en_nfi_hclk = false
545};
546
developerfd40db22021-04-29 10:08:25 +0800547static const struct of_device_id mtk_snand_ids[] = {
548 { .compatible = "mediatek,mt7622-snand", .data = &mt7622_soc_id },
549 { .compatible = "mediatek,mt7629-snand", .data = &mt7629_soc_id },
550 { .compatible = "mediatek,mt7986-snand", .data = &mt7986_soc_id },
developerdfd23622022-11-25 19:02:20 +0800551 { .compatible = "mediatek,mt7988-snand", .data = &mt7988_soc_id },
developerfd40db22021-04-29 10:08:25 +0800552 { },
553};
554
555MODULE_DEVICE_TABLE(of, mtk_snand_ids);
556
557static int mtk_snand_probe(struct platform_device *pdev)
558{
559 struct mtk_snand_platdata mtk_snand_pdata = {};
560 struct device_node *np = pdev->dev.of_node;
561 const struct of_device_id *of_soc_id;
developerfd40db22021-04-29 10:08:25 +0800562 struct mtk_snand_mtd *msm;
563 struct mtd_info *mtd;
564 struct resource *r;
565 uint32_t size;
566 int ret;
567
568 of_soc_id = of_match_node(mtk_snand_ids, np);
569 if (!of_soc_id)
570 return -EINVAL;
571
developerfd40db22021-04-29 10:08:25 +0800572 msm = devm_kzalloc(&pdev->dev, sizeof(*msm), GFP_KERNEL);
573 if (!msm)
574 return -ENOMEM;
575
developer4a752952021-07-14 15:59:06 +0800576 msm->soc_id = of_soc_id->data;
577
developerfd40db22021-04-29 10:08:25 +0800578 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nfi");
579 msm->nfi_regs = devm_ioremap_resource(&pdev->dev, r);
580 if (IS_ERR(msm->nfi_regs)) {
581 ret = PTR_ERR(msm->nfi_regs);
582 goto errout1;
583 }
584
585 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ecc");
586 msm->ecc_regs = devm_ioremap_resource(&pdev->dev, r);
587 if (IS_ERR(msm->ecc_regs)) {
588 ret = PTR_ERR(msm->ecc_regs);
589 goto errout1;
590 }
591
592 msm->pdev.dev = &pdev->dev;
593 msm->quad_spi = of_property_read_bool(np, "mediatek,quad-spi");
developer4a752952021-07-14 15:59:06 +0800594 msm->soc = msm->soc_id->soc;
developerfd40db22021-04-29 10:08:25 +0800595
596 msm->nfi_clk = devm_clk_get(msm->pdev.dev, "nfi_clk");
597 if (IS_ERR(msm->nfi_clk)) {
598 ret = PTR_ERR(msm->nfi_clk);
developer4a752952021-07-14 15:59:06 +0800599 dev_err(msm->pdev.dev,
600 "unable to get nfi_clk, err = %d\n", ret);
developerfd40db22021-04-29 10:08:25 +0800601 goto errout1;
602 }
603
developer4a752952021-07-14 15:59:06 +0800604 if (msm->soc_id->en_ecc_clk) {
605 msm->ecc_clk = devm_clk_get(msm->pdev.dev, "ecc_clk");
606 if (IS_ERR(msm->ecc_clk)) {
607 ret = PTR_ERR(msm->ecc_clk);
608 dev_err(msm->pdev.dev,
609 "unable to get ecc_clk, err = %d\n", ret);
610 goto errout1;
611 }
developerfd40db22021-04-29 10:08:25 +0800612 }
613
614 msm->pad_clk = devm_clk_get(msm->pdev.dev, "pad_clk");
615 if (IS_ERR(msm->pad_clk)) {
616 ret = PTR_ERR(msm->pad_clk);
developer4a752952021-07-14 15:59:06 +0800617 dev_err(msm->pdev.dev,
618 "unable to get pad_clk, err = %d\n", ret);
developerfd40db22021-04-29 10:08:25 +0800619 goto errout1;
620 }
621
developer4a752952021-07-14 15:59:06 +0800622 if (msm->soc_id->en_nfi_hclk) {
623 msm->nfi_hclk = devm_clk_get(msm->pdev.dev, "nfi_hclk");
624 if (IS_ERR(msm->nfi_hclk)) {
625 ret = PTR_ERR(msm->nfi_hclk);
626 dev_err(msm->pdev.dev,
627 "unable to get nfi_hclk, err = %d\n", ret);
628 goto errout1;
629 }
630 }
631
developerfd40db22021-04-29 10:08:25 +0800632 ret = mtk_snand_enable_clk(msm);
633 if (ret)
634 goto errout1;
635
636 /* Probe SPI-NAND Flash */
637 mtk_snand_pdata.soc = msm->soc;
638 mtk_snand_pdata.quad_spi = msm->quad_spi;
639 mtk_snand_pdata.nfi_base = msm->nfi_regs;
640 mtk_snand_pdata.ecc_base = msm->ecc_regs;
641
642 ret = mtk_snand_init(&msm->pdev, &mtk_snand_pdata, &msm->snf);
643 if (ret)
644 goto errout1;
645
646 msm->irq = platform_get_irq(pdev, 0);
647 if (msm->irq >= 0) {
648 ret = devm_request_irq(msm->pdev.dev, msm->irq, mtk_snand_irq,
649 0x0, "mtk-snand", msm);
650 if (ret) {
651 dev_err(msm->pdev.dev, "failed to request snfi irq\n");
652 goto errout2;
653 }
654
655 ret = dma_set_mask(msm->pdev.dev, DMA_BIT_MASK(32));
656 if (ret) {
657 dev_err(msm->pdev.dev, "failed to set dma mask\n");
658 goto errout3;
659 }
660 }
661
662 mtk_snand_get_chip_info(msm->snf, &msm->cinfo);
663
664 size = msm->cinfo.pagesize + msm->cinfo.sparesize;
665 msm->page_cache = devm_kmalloc(msm->pdev.dev, size, GFP_KERNEL);
666 if (!msm->page_cache) {
667 dev_err(msm->pdev.dev, "failed to allocate page cache\n");
668 ret = -ENOMEM;
669 goto errout3;
670 }
671
672 mutex_init(&msm->lock);
673
674 dev_info(msm->pdev.dev,
675 "chip is %s, size %lluMB, page size %u, oob size %u\n",
676 msm->cinfo.model, msm->cinfo.chipsize >> 20,
677 msm->cinfo.pagesize, msm->cinfo.sparesize);
678
679 /* Initialize mtd for SPI-NAND */
680 mtd = &msm->mtd;
681
682 mtd->owner = THIS_MODULE;
683 mtd->dev.parent = &pdev->dev;
684 mtd->type = MTD_NANDFLASH;
685 mtd->flags = MTD_CAP_NANDFLASH;
686
687 mtd_set_of_node(mtd, np);
688
689 mtd->size = msm->cinfo.chipsize;
690 mtd->erasesize = msm->cinfo.blocksize;
691 mtd->writesize = msm->cinfo.pagesize;
692 mtd->writebufsize = mtd->writesize;
693 mtd->oobsize = msm->cinfo.sparesize;
694 mtd->oobavail = msm->cinfo.num_sectors * (msm->cinfo.fdm_size - 1);
695
696 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
697 mtd->writesize_shift = ffs(mtd->writesize) - 1;
698 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
699 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
700
701 mtd->ooblayout = &mtk_snand_ooblayout;
702
developer4da1bed2021-05-08 17:30:37 +0800703 mtd->ecc_strength = msm->cinfo.ecc_strength;
developerfd40db22021-04-29 10:08:25 +0800704 mtd->bitflip_threshold = (mtd->ecc_strength * 3) / 4;
705 mtd->ecc_step_size = msm->cinfo.sector_size;
706
707 mtd->_erase = mtk_snand_mtd_erase;
708 mtd->_read_oob = mtk_snand_mtd_read_oob;
709 mtd->_write_oob = mtk_snand_mtd_write_oob;
developer8c414ee2023-06-20 19:08:04 +0800710 mtd->_panic_write = mtk_snand_mtd_panic_write;
developerfd40db22021-04-29 10:08:25 +0800711 mtd->_block_isbad = mtk_snand_mtd_block_isbad;
712 mtd->_block_markbad = mtk_snand_mtd_block_markbad;
713
714 ret = mtd_device_register(mtd, NULL, 0);
715 if (ret) {
716 dev_err(msm->pdev.dev, "failed to register mtd partition\n");
717 goto errout4;
718 }
719
720 platform_set_drvdata(pdev, msm);
721
722 return 0;
723
724errout4:
725 devm_kfree(msm->pdev.dev, msm->page_cache);
726
727errout3:
728 if (msm->irq >= 0)
729 devm_free_irq(msm->pdev.dev, msm->irq, msm);
730
731errout2:
732 mtk_snand_cleanup(msm->snf);
733
734errout1:
735 devm_kfree(msm->pdev.dev, msm);
736
737 platform_set_drvdata(pdev, NULL);
738
739 return ret;
740}
741
742static int mtk_snand_remove(struct platform_device *pdev)
743{
744 struct mtk_snand_mtd *msm = platform_get_drvdata(pdev);
745 struct mtd_info *mtd = &msm->mtd;
746 int ret;
747
748 ret = mtd_device_unregister(mtd);
749 if (ret)
750 return ret;
751
752 mtk_snand_cleanup(msm->snf);
753
754 if (msm->irq >= 0)
755 devm_free_irq(msm->pdev.dev, msm->irq, msm);
756
757 mtk_snand_disable_clk(msm);
758
759 devm_kfree(msm->pdev.dev, msm->page_cache);
760 devm_kfree(msm->pdev.dev, msm);
761
762 platform_set_drvdata(pdev, NULL);
763
764 return 0;
765}
766
767static struct platform_driver mtk_snand_driver = {
768 .probe = mtk_snand_probe,
769 .remove = mtk_snand_remove,
770 .driver = {
771 .name = "mtk-snand",
772 .of_match_table = mtk_snand_ids,
773 },
774};
775
776module_platform_driver(mtk_snand_driver);
777
778MODULE_LICENSE("GPL");
779MODULE_AUTHOR("Weijie Gao <weijie.gao@mediatek.com>");
780MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver");