blob: db4213ea3dcdc357bc0cdc73b9a8e4ae95729506 [file] [log] [blame]
Ladislav Michlc6a42002017-04-16 15:31:59 +02001int nand_spl_load_image(uint32_t offs, unsigned int size, void *dst)
2{
3 unsigned int block, lastblock;
4 unsigned int page, page_offset;
5
6 /* offs has to be aligned to a page address! */
7 block = offs / CONFIG_SYS_NAND_BLOCK_SIZE;
8 lastblock = (offs + size - 1) / CONFIG_SYS_NAND_BLOCK_SIZE;
9 page = (offs % CONFIG_SYS_NAND_BLOCK_SIZE) / CONFIG_SYS_NAND_PAGE_SIZE;
10 page_offset = offs % CONFIG_SYS_NAND_PAGE_SIZE;
11
12 while (block <= lastblock) {
13 if (!nand_is_bad_block(block)) {
14 /* Skip bad blocks */
Sean Andersonec66ca82023-11-04 16:37:42 -040015 while (size && page < SYS_NAND_BLOCK_PAGES) {
Ladislav Michlc6a42002017-04-16 15:31:59 +020016 nand_read_page(block, page, dst);
Sean Andersonec66ca82023-11-04 16:37:42 -040017
18 size -= min(size, CONFIG_SYS_NAND_PAGE_SIZE -
19 page_offset);
Ladislav Michlc6a42002017-04-16 15:31:59 +020020 /*
21 * When offs is not aligned to page address the
22 * extra offset is copied to dst as well. Copy
23 * the image such that its first byte will be
24 * at the dst.
25 */
26 if (unlikely(page_offset)) {
27 memmove(dst, dst + page_offset,
28 CONFIG_SYS_NAND_PAGE_SIZE);
Roger Quadros8f440bd2022-10-11 14:50:05 +030029 dst = (void *)(dst - page_offset);
Ladislav Michlc6a42002017-04-16 15:31:59 +020030 page_offset = 0;
31 }
32 dst += CONFIG_SYS_NAND_PAGE_SIZE;
33 page++;
34 }
35
36 page = 0;
37 } else {
38 lastblock++;
39 }
40
41 block++;
42 }
43
44 return 0;
45}
46
Dario Binacchia6d91ca2020-05-27 13:56:20 +020047/**
48 * nand_spl_adjust_offset - Adjust offset from a starting sector
49 * @sector: Address of the sector
50 * @offs: Offset starting from @sector
51 *
52 * If one or more bad blocks are in the address space between @sector
53 * and @sector + @offs, @offs is increased by the NAND block size for
54 * each bad block found.
55 */
56u32 nand_spl_adjust_offset(u32 sector, u32 offs)
57{
58 unsigned int block, lastblock;
59
60 block = sector / CONFIG_SYS_NAND_BLOCK_SIZE;
61 lastblock = (sector + offs) / CONFIG_SYS_NAND_BLOCK_SIZE;
62
63 while (block <= lastblock) {
64 if (nand_is_bad_block(block)) {
65 offs += CONFIG_SYS_NAND_BLOCK_SIZE;
66 lastblock++;
67 }
68
69 block++;
70 }
71
72 return offs;
73}
74
Ladislav Michlc6a42002017-04-16 15:31:59 +020075#ifdef CONFIG_SPL_UBI
76/*
77 * Temporary storage for non NAND page aligned and non NAND page sized
78 * reads. Note: This does not support runtime detected FLASH yet, but
79 * that should be reasonably easy to fix by making the buffer large
80 * enough :)
81 */
82static u8 scratch_buf[CONFIG_SYS_NAND_PAGE_SIZE];
83
84/**
85 * nand_spl_read_block - Read data from physical eraseblock into a buffer
86 * @block: Number of the physical eraseblock
87 * @offset: Data offset from the start of @peb
88 * @len: Data size to read
89 * @dst: Address of the destination buffer
90 *
91 * This could be further optimized if we'd have a subpage read
92 * function in the simple code. On NAND which allows subpage reads
93 * this would spare quite some time to readout e.g. the VID header of
94 * UBI.
95 *
96 * Notes:
97 * @offset + @len are not allowed to be larger than a physical
98 * erase block. No sanity check done for simplicity reasons.
99 *
100 * To support runtime detected flash this needs to be extended by
101 * information about the actual flash geometry, but thats beyond the
102 * scope of this effort and for most applications where fast boot is
103 * required it is not an issue anyway.
104 */
105int nand_spl_read_block(int block, int offset, int len, void *dst)
106{
107 int page, read;
108
109 /* Calculate the page number */
110 page = offset / CONFIG_SYS_NAND_PAGE_SIZE;
111
112 /* Offset to the start of a flash page */
113 offset = offset % CONFIG_SYS_NAND_PAGE_SIZE;
114
115 while (len) {
116 /*
117 * Non page aligned reads go to the scratch buffer.
118 * Page aligned reads go directly to the destination.
119 */
120 if (offset || len < CONFIG_SYS_NAND_PAGE_SIZE) {
121 nand_read_page(block, page, scratch_buf);
122 read = min(len, CONFIG_SYS_NAND_PAGE_SIZE - offset);
123 memcpy(dst, scratch_buf + offset, read);
124 offset = 0;
125 } else {
126 nand_read_page(block, page, dst);
127 read = CONFIG_SYS_NAND_PAGE_SIZE;
128 }
129 page++;
130 len -= read;
131 dst += read;
132 }
133 return 0;
134}
135#endif