blob: 699975c1be41912ce27394f8b970b5619206d31c [file] [log] [blame]
Huang Jianan024fb2f2022-02-26 15:05:47 +08001// SPDX-License-Identifier: GPL-2.0+
2#include "internal.h"
3
4static int erofs_map_blocks_flatmode(struct erofs_inode *inode,
5 struct erofs_map_blocks *map,
6 int flags)
7{
8 int err = 0;
9 erofs_blk_t nblocks, lastblk;
10 u64 offset = map->m_la;
11 struct erofs_inode *vi = inode;
12 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
13
14 nblocks = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
15 lastblk = nblocks - tailendpacking;
16
17 /* there is no hole in flatmode */
18 map->m_flags = EROFS_MAP_MAPPED;
19
20 if (offset < blknr_to_addr(lastblk)) {
21 map->m_pa = blknr_to_addr(vi->u.i_blkaddr) + map->m_la;
22 map->m_plen = blknr_to_addr(lastblk) - offset;
23 } else if (tailendpacking) {
24 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
25 map->m_pa = iloc(vi->nid) + vi->inode_isize +
26 vi->xattr_isize + erofs_blkoff(map->m_la);
27 map->m_plen = inode->i_size - offset;
28
29 /* inline data should be located in one meta block */
30 if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
31 erofs_err("inline data cross block boundary @ nid %" PRIu64,
32 vi->nid);
33 DBG_BUGON(1);
34 err = -EFSCORRUPTED;
35 goto err_out;
36 }
37
38 map->m_flags |= EROFS_MAP_META;
39 } else {
40 erofs_err("internal error @ nid: %" PRIu64 " (size %llu), m_la 0x%" PRIx64,
41 vi->nid, (unsigned long long)inode->i_size, map->m_la);
42 DBG_BUGON(1);
43 err = -EIO;
44 goto err_out;
45 }
46
47 map->m_llen = map->m_plen;
48err_out:
49 return err;
50}
51
52int erofs_map_blocks(struct erofs_inode *inode,
53 struct erofs_map_blocks *map, int flags)
54{
55 struct erofs_inode *vi = inode;
56 struct erofs_inode_chunk_index *idx;
57 u8 buf[EROFS_BLKSIZ];
58 u64 chunknr;
59 unsigned int unit;
60 erofs_off_t pos;
61 int err = 0;
62
63 map->m_deviceid = 0;
64 if (map->m_la >= inode->i_size) {
65 /* leave out-of-bound access unmapped */
66 map->m_flags = 0;
67 map->m_plen = 0;
68 goto out;
69 }
70
71 if (vi->datalayout != EROFS_INODE_CHUNK_BASED)
72 return erofs_map_blocks_flatmode(inode, map, flags);
73
74 if (vi->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
75 unit = sizeof(*idx); /* chunk index */
76 else
77 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
78
79 chunknr = map->m_la >> vi->u.chunkbits;
80 pos = roundup(iloc(vi->nid) + vi->inode_isize +
81 vi->xattr_isize, unit) + unit * chunknr;
82
83 err = erofs_blk_read(buf, erofs_blknr(pos), 1);
84 if (err < 0)
85 return -EIO;
86
87 map->m_la = chunknr << vi->u.chunkbits;
88 map->m_plen = min_t(erofs_off_t, 1UL << vi->u.chunkbits,
89 roundup(inode->i_size - map->m_la, EROFS_BLKSIZ));
90
91 /* handle block map */
92 if (!(vi->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
93 __le32 *blkaddr = (void *)buf + erofs_blkoff(pos);
94
95 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
96 map->m_flags = 0;
97 } else {
98 map->m_pa = blknr_to_addr(le32_to_cpu(*blkaddr));
99 map->m_flags = EROFS_MAP_MAPPED;
100 }
101 goto out;
102 }
103 /* parse chunk indexes */
104 idx = (void *)buf + erofs_blkoff(pos);
105 switch (le32_to_cpu(idx->blkaddr)) {
106 case EROFS_NULL_ADDR:
107 map->m_flags = 0;
108 break;
109 default:
110 map->m_deviceid = le16_to_cpu(idx->device_id) &
111 sbi.device_id_mask;
112 map->m_pa = blknr_to_addr(le32_to_cpu(idx->blkaddr));
113 map->m_flags = EROFS_MAP_MAPPED;
114 break;
115 }
116out:
117 map->m_llen = map->m_plen;
118 return err;
119}
120
121int erofs_map_dev(struct erofs_sb_info *sbi, struct erofs_map_dev *map)
122{
123 struct erofs_device_info *dif;
124 int id;
125
126 if (map->m_deviceid) {
127 if (sbi->extra_devices < map->m_deviceid)
128 return -ENODEV;
129 } else if (sbi->extra_devices) {
130 for (id = 0; id < sbi->extra_devices; ++id) {
131 erofs_off_t startoff, length;
132
133 dif = sbi->devs + id;
134 if (!dif->mapped_blkaddr)
135 continue;
136 startoff = blknr_to_addr(dif->mapped_blkaddr);
137 length = blknr_to_addr(dif->blocks);
138
139 if (map->m_pa >= startoff &&
140 map->m_pa < startoff + length) {
141 map->m_pa -= startoff;
142 break;
143 }
144 }
145 }
146 return 0;
147}
148
149static int erofs_read_raw_data(struct erofs_inode *inode, char *buffer,
150 erofs_off_t size, erofs_off_t offset)
151{
152 struct erofs_map_blocks map = {
153 .index = UINT_MAX,
154 };
155 struct erofs_map_dev mdev;
156 int ret;
157 erofs_off_t ptr = offset;
158
159 while (ptr < offset + size) {
160 char *const estart = buffer + ptr - offset;
161 erofs_off_t eend;
162
163 map.m_la = ptr;
164 ret = erofs_map_blocks(inode, &map, 0);
165 if (ret)
166 return ret;
167
168 DBG_BUGON(map.m_plen != map.m_llen);
169
170 mdev = (struct erofs_map_dev) {
171 .m_deviceid = map.m_deviceid,
172 .m_pa = map.m_pa,
173 };
174 ret = erofs_map_dev(&sbi, &mdev);
175 if (ret)
176 return ret;
177
178 /* trim extent */
179 eend = min(offset + size, map.m_la + map.m_llen);
180 DBG_BUGON(ptr < map.m_la);
181
182 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
183 if (!map.m_llen) {
184 /* reached EOF */
185 memset(estart, 0, offset + size - ptr);
186 ptr = offset + size;
187 continue;
188 }
189 memset(estart, 0, eend - ptr);
190 ptr = eend;
191 continue;
192 }
193
194 if (ptr > map.m_la) {
195 mdev.m_pa += ptr - map.m_la;
196 map.m_la = ptr;
197 }
198
199 ret = erofs_dev_read(mdev.m_deviceid, estart, mdev.m_pa,
200 eend - map.m_la);
201 if (ret < 0)
202 return -EIO;
203 ptr = eend;
204 }
205 return 0;
206}
207
208int erofs_pread(struct erofs_inode *inode, char *buf,
209 erofs_off_t count, erofs_off_t offset)
210{
211 switch (inode->datalayout) {
212 case EROFS_INODE_FLAT_PLAIN:
213 case EROFS_INODE_FLAT_INLINE:
214 case EROFS_INODE_CHUNK_BASED:
215 return erofs_read_raw_data(inode, buf, count, offset);
216 case EROFS_INODE_FLAT_COMPRESSION_LEGACY:
217 case EROFS_INODE_FLAT_COMPRESSION:
218 return -EOPNOTSUPP;
219 default:
220 break;
221 }
222 return -EINVAL;
223}