1 // SPDX-License-Identifier: GPL-2.0+ OR Apache-2.0
2 /*
3 * Copyright (C) 2020 Gao Xiang <[email protected]>
4 * Compression support by Huang Jianan <[email protected]>
5 */
6 #include <stdlib.h>
7 #include "erofs/print.h"
8 #include "erofs/internal.h"
9 #include "erofs/trace.h"
10 #include "erofs/decompress.h"
11
erofs_map_blocks_flatmode(struct erofs_inode * inode,struct erofs_map_blocks * map,int flags)12 static int erofs_map_blocks_flatmode(struct erofs_inode *inode,
13 struct erofs_map_blocks *map,
14 int flags)
15 {
16 int err = 0;
17 erofs_blk_t nblocks, lastblk;
18 u64 offset = map->m_la;
19 struct erofs_inode *vi = inode;
20 struct erofs_sb_info *sbi = inode->sbi;
21 bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
22
23 trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
24
25 nblocks = BLK_ROUND_UP(sbi, inode->i_size);
26 lastblk = nblocks - tailendpacking;
27
28 /* there is no hole in flatmode */
29 map->m_flags = EROFS_MAP_MAPPED;
30
31 if (offset < erofs_pos(sbi, lastblk)) {
32 map->m_pa = erofs_pos(sbi, vi->u.i_blkaddr) + map->m_la;
33 map->m_plen = erofs_pos(sbi, lastblk) - offset;
34 } else if (tailendpacking) {
35 /* 2 - inode inline B: inode, [xattrs], inline last blk... */
36 map->m_pa = erofs_iloc(vi) + vi->inode_isize +
37 vi->xattr_isize + erofs_blkoff(sbi, map->m_la);
38 map->m_plen = inode->i_size - offset;
39
40 /* inline data should be located in the same meta block */
41 if (erofs_blkoff(sbi, map->m_pa) + map->m_plen >
42 erofs_blksiz(sbi)) {
43 erofs_err("inline data cross block boundary @ nid %" PRIu64,
44 vi->nid);
45 DBG_BUGON(1);
46 err = -EFSCORRUPTED;
47 goto err_out;
48 }
49
50 map->m_flags |= EROFS_MAP_META;
51 } else {
52 erofs_err("internal error @ nid: %" PRIu64 " (size %llu), m_la 0x%" PRIx64,
53 vi->nid, (unsigned long long)inode->i_size, map->m_la);
54 DBG_BUGON(1);
55 err = -EIO;
56 goto err_out;
57 }
58
59 map->m_llen = map->m_plen;
60 err_out:
61 trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
62 return err;
63 }
64
erofs_map_blocks(struct erofs_inode * inode,struct erofs_map_blocks * map,int flags)65 int erofs_map_blocks(struct erofs_inode *inode,
66 struct erofs_map_blocks *map, int flags)
67 {
68 struct erofs_inode *vi = inode;
69 struct erofs_sb_info *sbi = inode->sbi;
70 struct erofs_inode_chunk_index *idx;
71 u8 buf[EROFS_MAX_BLOCK_SIZE];
72 u64 chunknr;
73 unsigned int unit;
74 erofs_off_t pos;
75 int err = 0;
76
77 map->m_deviceid = 0;
78 if (map->m_la >= inode->i_size) {
79 /* leave out-of-bound access unmapped */
80 map->m_flags = 0;
81 map->m_plen = 0;
82 goto out;
83 }
84
85 if (vi->datalayout != EROFS_INODE_CHUNK_BASED)
86 return erofs_map_blocks_flatmode(inode, map, flags);
87
88 if (vi->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)
89 unit = sizeof(*idx); /* chunk index */
90 else
91 unit = EROFS_BLOCK_MAP_ENTRY_SIZE; /* block map */
92
93 chunknr = map->m_la >> vi->u.chunkbits;
94 pos = roundup(erofs_iloc(vi) + vi->inode_isize +
95 vi->xattr_isize, unit) + unit * chunknr;
96
97 err = erofs_blk_read(sbi, 0, buf, erofs_blknr(sbi, pos), 1);
98 if (err < 0)
99 return -EIO;
100
101 map->m_la = chunknr << vi->u.chunkbits;
102 map->m_plen = min_t(erofs_off_t, 1UL << vi->u.chunkbits,
103 roundup(inode->i_size - map->m_la, erofs_blksiz(sbi)));
104
105 /* handle block map */
106 if (!(vi->u.chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
107 __le32 *blkaddr = (void *)buf + erofs_blkoff(sbi, pos);
108
109 if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
110 map->m_flags = 0;
111 } else {
112 map->m_pa = erofs_pos(sbi, le32_to_cpu(*blkaddr));
113 map->m_flags = EROFS_MAP_MAPPED;
114 }
115 goto out;
116 }
117 /* parse chunk indexes */
118 idx = (void *)buf + erofs_blkoff(sbi, pos);
119 switch (le32_to_cpu(idx->blkaddr)) {
120 case EROFS_NULL_ADDR:
121 map->m_flags = 0;
122 break;
123 default:
124 map->m_deviceid = le16_to_cpu(idx->device_id) &
125 sbi->device_id_mask;
126 map->m_pa = erofs_pos(sbi, le32_to_cpu(idx->blkaddr));
127 map->m_flags = EROFS_MAP_MAPPED;
128 break;
129 }
130 out:
131 map->m_llen = map->m_plen;
132 return err;
133 }
134
erofs_map_dev(struct erofs_sb_info * sbi,struct erofs_map_dev * map)135 int erofs_map_dev(struct erofs_sb_info *sbi, struct erofs_map_dev *map)
136 {
137 struct erofs_device_info *dif;
138 int id;
139
140 if (map->m_deviceid) {
141 if (sbi->extra_devices < map->m_deviceid)
142 return -ENODEV;
143 } else if (sbi->extra_devices) {
144 for (id = 0; id < sbi->extra_devices; ++id) {
145 erofs_off_t startoff, length;
146
147 dif = sbi->devs + id;
148 if (!dif->mapped_blkaddr)
149 continue;
150 startoff = erofs_pos(sbi, dif->mapped_blkaddr);
151 length = erofs_pos(sbi, dif->blocks);
152
153 if (map->m_pa >= startoff &&
154 map->m_pa < startoff + length) {
155 map->m_pa -= startoff;
156 break;
157 }
158 }
159 }
160 return 0;
161 }
162
erofs_read_one_data(struct erofs_inode * inode,struct erofs_map_blocks * map,char * buffer,u64 offset,size_t len)163 int erofs_read_one_data(struct erofs_inode *inode, struct erofs_map_blocks *map,
164 char *buffer, u64 offset, size_t len)
165 {
166 struct erofs_sb_info *sbi = inode->sbi;
167 struct erofs_map_dev mdev;
168 int ret;
169
170 mdev = (struct erofs_map_dev) {
171 .m_deviceid = map->m_deviceid,
172 .m_pa = map->m_pa,
173 };
174 ret = erofs_map_dev(sbi, &mdev);
175 if (ret)
176 return ret;
177
178 ret = erofs_dev_read(sbi, mdev.m_deviceid, buffer, mdev.m_pa + offset, len);
179 if (ret < 0)
180 return -EIO;
181 return 0;
182 }
183
erofs_read_raw_data(struct erofs_inode * inode,char * buffer,erofs_off_t size,erofs_off_t offset)184 static int erofs_read_raw_data(struct erofs_inode *inode, char *buffer,
185 erofs_off_t size, erofs_off_t offset)
186 {
187 struct erofs_map_blocks map = {
188 .index = UINT_MAX,
189 };
190 int ret;
191 erofs_off_t ptr = offset;
192
193 while (ptr < offset + size) {
194 char *const estart = buffer + ptr - offset;
195 erofs_off_t eend, moff = 0;
196
197 map.m_la = ptr;
198 ret = erofs_map_blocks(inode, &map, 0);
199 if (ret)
200 return ret;
201
202 DBG_BUGON(map.m_plen != map.m_llen);
203
204 /* trim extent */
205 eend = min(offset + size, map.m_la + map.m_llen);
206 DBG_BUGON(ptr < map.m_la);
207
208 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
209 if (!map.m_llen) {
210 /* reached EOF */
211 memset(estart, 0, offset + size - ptr);
212 ptr = offset + size;
213 continue;
214 }
215 memset(estart, 0, eend - ptr);
216 ptr = eend;
217 continue;
218 }
219
220 if (ptr > map.m_la) {
221 moff = ptr - map.m_la;
222 map.m_la = ptr;
223 }
224
225 ret = erofs_read_one_data(inode, &map, estart, moff,
226 eend - map.m_la);
227 if (ret)
228 return ret;
229 ptr = eend;
230 }
231 return 0;
232 }
233
z_erofs_read_one_data(struct erofs_inode * inode,struct erofs_map_blocks * map,char * raw,char * buffer,erofs_off_t skip,erofs_off_t length,bool trimmed)234 int z_erofs_read_one_data(struct erofs_inode *inode,
235 struct erofs_map_blocks *map, char *raw, char *buffer,
236 erofs_off_t skip, erofs_off_t length, bool trimmed)
237 {
238 struct erofs_sb_info *sbi = inode->sbi;
239 struct erofs_map_dev mdev;
240 int ret = 0;
241
242 if (map->m_flags & EROFS_MAP_FRAGMENT) {
243 struct erofs_inode packed_inode = {
244 .sbi = sbi,
245 .nid = sbi->packed_nid,
246 };
247
248 ret = erofs_read_inode_from_disk(&packed_inode);
249 if (ret) {
250 erofs_err("failed to read packed inode from disk");
251 return ret;
252 }
253
254 return erofs_pread(&packed_inode, buffer, length - skip,
255 inode->fragmentoff + skip);
256 }
257
258 /* no device id here, thus it will always succeed */
259 mdev = (struct erofs_map_dev) {
260 .m_pa = map->m_pa,
261 };
262 ret = erofs_map_dev(sbi, &mdev);
263 if (ret) {
264 DBG_BUGON(1);
265 return ret;
266 }
267
268 ret = erofs_dev_read(sbi, mdev.m_deviceid, raw, mdev.m_pa, map->m_plen);
269 if (ret < 0)
270 return ret;
271
272 ret = z_erofs_decompress(&(struct z_erofs_decompress_req) {
273 .sbi = sbi,
274 .in = raw,
275 .out = buffer,
276 .decodedskip = skip,
277 .interlaced_offset =
278 map->m_algorithmformat == Z_EROFS_COMPRESSION_INTERLACED ?
279 erofs_blkoff(sbi, map->m_la) : 0,
280 .inputsize = map->m_plen,
281 .decodedlength = length,
282 .alg = map->m_algorithmformat,
283 .partial_decoding = trimmed ? true :
284 !(map->m_flags & EROFS_MAP_FULL_MAPPED) ||
285 (map->m_flags & EROFS_MAP_PARTIAL_REF),
286 });
287 if (ret < 0)
288 return ret;
289 return 0;
290 }
291
z_erofs_read_data(struct erofs_inode * inode,char * buffer,erofs_off_t size,erofs_off_t offset)292 static int z_erofs_read_data(struct erofs_inode *inode, char *buffer,
293 erofs_off_t size, erofs_off_t offset)
294 {
295 erofs_off_t end, length, skip;
296 struct erofs_map_blocks map = {
297 .index = UINT_MAX,
298 };
299 bool trimmed;
300 unsigned int bufsize = 0;
301 char *raw = NULL;
302 int ret = 0;
303
304 end = offset + size;
305 while (end > offset) {
306 map.m_la = end - 1;
307
308 ret = z_erofs_map_blocks_iter(inode, &map, 0);
309 if (ret)
310 break;
311
312 /*
313 * trim to the needed size if the returned extent is quite
314 * larger than requested, and set up partial flag as well.
315 */
316 if (end < map.m_la + map.m_llen) {
317 length = end - map.m_la;
318 trimmed = true;
319 } else {
320 DBG_BUGON(end != map.m_la + map.m_llen);
321 length = map.m_llen;
322 trimmed = false;
323 }
324
325 if (map.m_la < offset) {
326 skip = offset - map.m_la;
327 end = offset;
328 } else {
329 skip = 0;
330 end = map.m_la;
331 }
332
333 if (!(map.m_flags & EROFS_MAP_MAPPED)) {
334 memset(buffer + end - offset, 0, length - skip);
335 end = map.m_la;
336 continue;
337 }
338
339 if (map.m_plen > bufsize) {
340 char *newraw;
341
342 bufsize = map.m_plen;
343 newraw = realloc(raw, bufsize);
344 if (!newraw) {
345 ret = -ENOMEM;
346 break;
347 }
348 raw = newraw;
349 }
350
351 ret = z_erofs_read_one_data(inode, &map, raw,
352 buffer + end - offset, skip, length, trimmed);
353 if (ret < 0)
354 break;
355 }
356 if (raw)
357 free(raw);
358 return ret < 0 ? ret : 0;
359 }
360
erofs_pread(struct erofs_inode * inode,char * buf,erofs_off_t count,erofs_off_t offset)361 int erofs_pread(struct erofs_inode *inode, char *buf,
362 erofs_off_t count, erofs_off_t offset)
363 {
364 switch (inode->datalayout) {
365 case EROFS_INODE_FLAT_PLAIN:
366 case EROFS_INODE_FLAT_INLINE:
367 case EROFS_INODE_CHUNK_BASED:
368 return erofs_read_raw_data(inode, buf, count, offset);
369 case EROFS_INODE_COMPRESSED_FULL:
370 case EROFS_INODE_COMPRESSED_COMPACT:
371 return z_erofs_read_data(inode, buf, count, offset);
372 default:
373 break;
374 }
375 return -EINVAL;
376 }
377
erofs_read_metadata_nid(struct erofs_sb_info * sbi,erofs_nid_t nid,erofs_off_t * offset,int * lengthp)378 static void *erofs_read_metadata_nid(struct erofs_sb_info *sbi, erofs_nid_t nid,
379 erofs_off_t *offset, int *lengthp)
380 {
381 struct erofs_inode vi = { .sbi = sbi, .nid = nid };
382 __le16 __len;
383 int ret, len;
384 char *buffer;
385
386 ret = erofs_read_inode_from_disk(&vi);
387 if (ret)
388 return ERR_PTR(ret);
389
390 *offset = round_up(*offset, 4);
391 ret = erofs_pread(&vi, (void *)&__len, sizeof(__le16), *offset);
392 if (ret)
393 return ERR_PTR(ret);
394
395 len = le16_to_cpu(__len);
396 if (!len)
397 return ERR_PTR(-EFSCORRUPTED);
398
399 buffer = malloc(len);
400 if (!buffer)
401 return ERR_PTR(-ENOMEM);
402 *offset += sizeof(__le16);
403 *lengthp = len;
404
405 ret = erofs_pread(&vi, buffer, len, *offset);
406 if (ret) {
407 free(buffer);
408 return ERR_PTR(ret);
409 }
410 *offset += len;
411 return buffer;
412 }
413
erofs_read_metadata_bdi(struct erofs_sb_info * sbi,erofs_off_t * offset,int * lengthp)414 static void *erofs_read_metadata_bdi(struct erofs_sb_info *sbi,
415 erofs_off_t *offset, int *lengthp)
416 {
417 int ret, len, i, cnt;
418 void *buffer;
419 u8 data[EROFS_MAX_BLOCK_SIZE];
420
421 *offset = round_up(*offset, 4);
422 ret = erofs_blk_read(sbi, 0, data, erofs_blknr(sbi, *offset), 1);
423 if (ret)
424 return ERR_PTR(ret);
425 len = le16_to_cpu(*(__le16 *)(data + erofs_blkoff(sbi, *offset)));
426 if (!len)
427 return ERR_PTR(-EFSCORRUPTED);
428
429 buffer = malloc(len);
430 if (!buffer)
431 return ERR_PTR(-ENOMEM);
432 *offset += sizeof(__le16);
433 *lengthp = len;
434
435 for (i = 0; i < len; i += cnt) {
436 cnt = min_t(int, erofs_blksiz(sbi) - erofs_blkoff(sbi, *offset),
437 len - i);
438 ret = erofs_blk_read(sbi, 0, data, erofs_blknr(sbi, *offset), 1);
439 if (ret) {
440 free(buffer);
441 return ERR_PTR(ret);
442 }
443 memcpy(buffer + i, data + erofs_blkoff(sbi, *offset), cnt);
444 *offset += cnt;
445 }
446 return buffer;
447 }
448
449 /*
450 * read variable-sized metadata, offset will be aligned by 4-byte
451 *
452 * @nid is 0 if metadata is in meta inode
453 */
erofs_read_metadata(struct erofs_sb_info * sbi,erofs_nid_t nid,erofs_off_t * offset,int * lengthp)454 void *erofs_read_metadata(struct erofs_sb_info *sbi, erofs_nid_t nid,
455 erofs_off_t *offset, int *lengthp)
456 {
457 if (nid)
458 return erofs_read_metadata_nid(sbi, nid, offset, lengthp);
459 return erofs_read_metadata_bdi(sbi, offset, lengthp);
460 }
461