blob: 9f0331ad739537aa70cbcb807f63e0231c089605 [file] [log] [blame]
Lionel Debieve64a524d2019-09-09 20:13:34 +02001/*
Yann Gautier09704142020-08-05 14:39:00 +02002 * Copyright (c) 2019-2021, STMicroelectronics - All Rights Reserved
Lionel Debieve64a524d2019-09-09 20:13:34 +02003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <stddef.h>
10
11#include <platform_def.h>
12
13#include <common/debug.h>
14#include <drivers/delay_timer.h>
15#include <drivers/nand.h>
16#include <lib/utils.h>
17
18/*
19 * Define a single nand_device used by specific NAND frameworks.
20 */
21static struct nand_device nand_dev;
22static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE];
23
24int nand_read(unsigned int offset, uintptr_t buffer, size_t length,
25 size_t *length_read)
26{
27 unsigned int block = offset / nand_dev.block_size;
28 unsigned int end_block = (offset + length - 1U) / nand_dev.block_size;
29 unsigned int page_start =
30 (offset % nand_dev.block_size) / nand_dev.page_size;
31 unsigned int nb_pages = nand_dev.block_size / nand_dev.page_size;
32 unsigned int start_offset = offset % nand_dev.page_size;
33 unsigned int page;
34 unsigned int bytes_read;
35 int is_bad;
36 int ret;
37
38 VERBOSE("Block %u - %u, page_start %u, nb %u, length %zu, offset %u\n",
39 block, end_block, page_start, nb_pages, length, offset);
40
41 *length_read = 0UL;
42
43 if (((start_offset != 0U) || (length % nand_dev.page_size) != 0U) &&
44 (sizeof(scratch_buff) < nand_dev.page_size)) {
45 return -EINVAL;
46 }
47
48 while (block <= end_block) {
49 is_bad = nand_dev.mtd_block_is_bad(block);
50 if (is_bad < 0) {
51 return is_bad;
52 }
53
54 if (is_bad == 1) {
55 /* Skip the block */
56 uint32_t max_block =
57 nand_dev.size / nand_dev.block_size;
58
59 block++;
60 end_block++;
61 if ((block < max_block) && (end_block < max_block)) {
62 continue;
63 }
64
65 return -EIO;
66 }
67
68 for (page = page_start; page < nb_pages; page++) {
69 if ((start_offset != 0U) ||
70 (length < nand_dev.page_size)) {
71 ret = nand_dev.mtd_read_page(
72 &nand_dev,
73 (block * nb_pages) + page,
74 (uintptr_t)scratch_buff);
75 if (ret != 0) {
76 return ret;
77 }
78
79 bytes_read = MIN((size_t)(nand_dev.page_size -
80 start_offset),
81 length);
82
83 memcpy((uint8_t *)buffer,
84 scratch_buff + start_offset,
85 bytes_read);
86
87 start_offset = 0U;
88 } else {
89 ret = nand_dev.mtd_read_page(&nand_dev,
90 (block * nb_pages) + page,
91 buffer);
92 if (ret != 0) {
93 return ret;
94 }
95
96 bytes_read = nand_dev.page_size;
97 }
98
99 length -= bytes_read;
100 buffer += bytes_read;
101 *length_read += bytes_read;
102
103 if (length == 0U) {
104 break;
105 }
106 }
107
108 page_start = 0U;
109 block++;
110 }
111
112 return 0;
113}
114
Yann Gautier09704142020-08-05 14:39:00 +0200115int nand_seek_bb(uintptr_t base, unsigned int offset, size_t *extra_offset)
116{
117 unsigned int block;
118 unsigned int offset_block;
119 unsigned int max_block;
120 int is_bad;
121 size_t count_bb = 0U;
122
123 block = base / nand_dev.block_size;
124
125 if (offset != 0U) {
126 offset_block = (base + offset - 1U) / nand_dev.block_size;
127 } else {
128 offset_block = block;
129 }
130
131 max_block = nand_dev.size / nand_dev.block_size;
132
133 while (block <= offset_block) {
134 if (offset_block >= max_block) {
135 return -EIO;
136 }
137
138 is_bad = nand_dev.mtd_block_is_bad(block);
139 if (is_bad < 0) {
140 return is_bad;
141 }
142
143 if (is_bad == 1) {
144 count_bb++;
145 offset_block++;
146 }
147
148 block++;
149 }
150
151 *extra_offset = count_bb * nand_dev.block_size;
152
153 return 0;
154}
155
Lionel Debieve64a524d2019-09-09 20:13:34 +0200156struct nand_device *get_nand_device(void)
157{
158 return &nand_dev;
159}