blob: 6ef2256ab43e62035fd3a6324d9b380945cf771b [file] [log] [blame]
Lionel Debieve64a524d2019-09-09 20:13:34 +02001/*
Lionel Debieve68176982021-04-13 13:38:02 +02002 * Copyright (c) 2019-2022, STMicroelectronics - All Rights Reserved
Lionel Debieve64a524d2019-09-09 20:13:34 +02003 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#include <assert.h>
8#include <errno.h>
9#include <stddef.h>
10
Lionel Debieve64a524d2019-09-09 20:13:34 +020011#include <common/debug.h>
12#include <drivers/delay_timer.h>
13#include <drivers/nand.h>
14#include <lib/utils.h>
15
Lionel Debieve68176982021-04-13 13:38:02 +020016#include <platform_def.h>
17
Lionel Debieve64a524d2019-09-09 20:13:34 +020018/*
19 * Define a single nand_device used by specific NAND frameworks.
20 */
21static struct nand_device nand_dev;
Lionel Debieve68176982021-04-13 13:38:02 +020022
23#pragma weak plat_get_scratch_buffer
24void plat_get_scratch_buffer(void **buffer_addr, size_t *buf_size)
25{
26 static uint8_t scratch_buff[PLATFORM_MTD_MAX_PAGE_SIZE];
27
28 assert(buffer_addr != NULL);
29 assert(buf_size != NULL);
30
31 *buffer_addr = (void *)scratch_buff;
32 *buf_size = sizeof(scratch_buff);
33}
Lionel Debieve64a524d2019-09-09 20:13:34 +020034
35int nand_read(unsigned int offset, uintptr_t buffer, size_t length,
36 size_t *length_read)
37{
38 unsigned int block = offset / nand_dev.block_size;
39 unsigned int end_block = (offset + length - 1U) / nand_dev.block_size;
40 unsigned int page_start =
41 (offset % nand_dev.block_size) / nand_dev.page_size;
42 unsigned int nb_pages = nand_dev.block_size / nand_dev.page_size;
43 unsigned int start_offset = offset % nand_dev.page_size;
44 unsigned int page;
45 unsigned int bytes_read;
46 int is_bad;
47 int ret;
Lionel Debieve68176982021-04-13 13:38:02 +020048 uint8_t *scratch_buff;
49 size_t scratch_buff_size;
50
51 plat_get_scratch_buffer((void **)&scratch_buff, &scratch_buff_size);
52
53 assert(scratch_buff != NULL);
Lionel Debieve64a524d2019-09-09 20:13:34 +020054
55 VERBOSE("Block %u - %u, page_start %u, nb %u, length %zu, offset %u\n",
56 block, end_block, page_start, nb_pages, length, offset);
57
58 *length_read = 0UL;
59
60 if (((start_offset != 0U) || (length % nand_dev.page_size) != 0U) &&
Lionel Debieve68176982021-04-13 13:38:02 +020061 (scratch_buff_size < nand_dev.page_size)) {
Lionel Debieve64a524d2019-09-09 20:13:34 +020062 return -EINVAL;
63 }
64
65 while (block <= end_block) {
66 is_bad = nand_dev.mtd_block_is_bad(block);
67 if (is_bad < 0) {
68 return is_bad;
69 }
70
71 if (is_bad == 1) {
72 /* Skip the block */
73 uint32_t max_block =
74 nand_dev.size / nand_dev.block_size;
75
76 block++;
77 end_block++;
78 if ((block < max_block) && (end_block < max_block)) {
79 continue;
80 }
81
82 return -EIO;
83 }
84
85 for (page = page_start; page < nb_pages; page++) {
86 if ((start_offset != 0U) ||
87 (length < nand_dev.page_size)) {
88 ret = nand_dev.mtd_read_page(
89 &nand_dev,
90 (block * nb_pages) + page,
91 (uintptr_t)scratch_buff);
92 if (ret != 0) {
93 return ret;
94 }
95
96 bytes_read = MIN((size_t)(nand_dev.page_size -
97 start_offset),
98 length);
99
100 memcpy((uint8_t *)buffer,
101 scratch_buff + start_offset,
102 bytes_read);
103
104 start_offset = 0U;
105 } else {
106 ret = nand_dev.mtd_read_page(&nand_dev,
107 (block * nb_pages) + page,
108 buffer);
109 if (ret != 0) {
110 return ret;
111 }
112
113 bytes_read = nand_dev.page_size;
114 }
115
116 length -= bytes_read;
117 buffer += bytes_read;
118 *length_read += bytes_read;
119
120 if (length == 0U) {
121 break;
122 }
123 }
124
125 page_start = 0U;
126 block++;
127 }
128
129 return 0;
130}
131
Yann Gautier09704142020-08-05 14:39:00 +0200132int nand_seek_bb(uintptr_t base, unsigned int offset, size_t *extra_offset)
133{
134 unsigned int block;
135 unsigned int offset_block;
136 unsigned int max_block;
137 int is_bad;
138 size_t count_bb = 0U;
139
140 block = base / nand_dev.block_size;
141
142 if (offset != 0U) {
143 offset_block = (base + offset - 1U) / nand_dev.block_size;
144 } else {
145 offset_block = block;
146 }
147
148 max_block = nand_dev.size / nand_dev.block_size;
149
150 while (block <= offset_block) {
151 if (offset_block >= max_block) {
152 return -EIO;
153 }
154
155 is_bad = nand_dev.mtd_block_is_bad(block);
156 if (is_bad < 0) {
157 return is_bad;
158 }
159
160 if (is_bad == 1) {
161 count_bb++;
162 offset_block++;
163 }
164
165 block++;
166 }
167
168 *extra_offset = count_bb * nand_dev.block_size;
169
170 return 0;
171}
172
Lionel Debieve64a524d2019-09-09 20:13:34 +0200173struct nand_device *get_nand_device(void)
174{
175 return &nand_dev;
176}