1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2017-2018 HUAWEI, Inc.
4 * https://www.huawei.com/
5 * Copyright (C) 2021, Alibaba Cloud
6 */
7 #ifndef __EROFS_INTERNAL_H
8 #define __EROFS_INTERNAL_H
9
10 #include <linux/fs.h>
11 #include <linux/dax.h>
12 #include <linux/dcache.h>
13 #include <linux/mm.h>
14 #include <linux/module.h>
15 #include <linux/pagemap.h>
16 #include <linux/bio.h>
17 #include <linux/magic.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/iomap.h>
21 #include "erofs_fs.h"
22
23 __printf(2, 3) void _erofs_printk(struct super_block *sb, const char *fmt, ...);
24 #define erofs_err(sb, fmt, ...) \
25 _erofs_printk(sb, KERN_ERR fmt "\n", ##__VA_ARGS__)
26 #define erofs_info(sb, fmt, ...) \
27 _erofs_printk(sb, KERN_INFO fmt "\n", ##__VA_ARGS__)
28
29 #ifdef CONFIG_EROFS_FS_DEBUG
30 #define DBG_BUGON BUG_ON
31 #else
32 #define DBG_BUGON(x) ((void)(x))
33 #endif /* !CONFIG_EROFS_FS_DEBUG */
34
35 /* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
36 #define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
37
38 typedef u64 erofs_nid_t;
39 typedef u64 erofs_off_t;
40 /* data type for filesystem-wide blocks number */
41 typedef u32 erofs_blk_t;
42
43 struct erofs_device_info {
44 char *path;
45 struct erofs_fscache *fscache;
46 struct file *file;
47 struct dax_device *dax_dev;
48 u64 dax_part_off;
49
50 u32 blocks;
51 u32 mapped_blkaddr;
52 };
53
54 enum {
55 EROFS_SYNC_DECOMPRESS_AUTO,
56 EROFS_SYNC_DECOMPRESS_FORCE_ON,
57 EROFS_SYNC_DECOMPRESS_FORCE_OFF
58 };
59
60 struct erofs_mount_opts {
61 /* current strategy of how to use managed cache */
62 unsigned char cache_strategy;
63 /* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */
64 unsigned int sync_decompress;
65 /* threshold for decompression synchronously */
66 unsigned int max_sync_decompress_pages;
67 unsigned int mount_opt;
68 };
69
70 struct erofs_dev_context {
71 struct idr tree;
72 struct rw_semaphore rwsem;
73
74 unsigned int extra_devices;
75 bool flatdev;
76 };
77
78 /* all filesystem-wide lz4 configurations */
79 struct erofs_sb_lz4_info {
80 /* # of pages needed for EROFS lz4 rolling decompression */
81 u16 max_distance_pages;
82 /* maximum possible blocks for pclusters in the filesystem */
83 u16 max_pclusterblks;
84 };
85
86 struct erofs_domain {
87 refcount_t ref;
88 struct list_head list;
89 struct fscache_volume *volume;
90 char *domain_id;
91 };
92
93 struct erofs_fscache {
94 struct fscache_cookie *cookie;
95 struct inode *inode; /* anonymous inode for the blob */
96
97 /* used for share domain mode */
98 struct erofs_domain *domain;
99 struct list_head node;
100 refcount_t ref;
101 char *name;
102 };
103
104 struct erofs_xattr_prefix_item {
105 struct erofs_xattr_long_prefix *prefix;
106 u8 infix_len;
107 };
108
109 struct erofs_sb_info {
110 struct erofs_device_info dif0;
111 struct erofs_mount_opts opt; /* options */
112 #ifdef CONFIG_EROFS_FS_ZIP
113 /* list for all registered superblocks, mainly for shrinker */
114 struct list_head list;
115 struct mutex umount_mutex;
116
117 /* managed XArray arranged in physical block number */
118 struct xarray managed_pslots;
119
120 unsigned int shrinker_run_no;
121 u16 available_compr_algs;
122
123 /* pseudo inode to manage cached pages */
124 struct inode *managed_cache;
125
126 struct erofs_sb_lz4_info lz4;
127 #endif /* CONFIG_EROFS_FS_ZIP */
128 struct inode *packed_inode;
129 struct erofs_dev_context *devs;
130 u64 total_blocks;
131
132 u32 meta_blkaddr;
133 #ifdef CONFIG_EROFS_FS_XATTR
134 u32 xattr_blkaddr;
135 u32 xattr_prefix_start;
136 u8 xattr_prefix_count;
137 struct erofs_xattr_prefix_item *xattr_prefixes;
138 unsigned int xattr_filter_reserved;
139 #endif
140 u16 device_id_mask; /* valid bits of device id to be used */
141
142 unsigned char islotbits; /* inode slot unit size in bit shift */
143 unsigned char blkszbits; /* filesystem block size in bit shift */
144
145 u32 sb_size; /* total superblock size */
146 u32 build_time_nsec;
147 u64 build_time;
148
149 /* what we really care is nid, rather than ino.. */
150 erofs_nid_t root_nid;
151 erofs_nid_t packed_nid;
152 /* used for statfs, f_files - f_favail */
153 u64 inos;
154
155 u32 feature_compat;
156 u32 feature_incompat;
157
158 /* sysfs support */
159 struct kobject s_kobj; /* /sys/fs/erofs/<devname> */
160 struct completion s_kobj_unregister;
161
162 /* fscache support */
163 struct fscache_volume *volume;
164 struct erofs_domain *domain;
165 char *fsid;
166 char *domain_id;
167 };
168
169 #define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
170 #define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
171
172 /* Mount flags set via mount options or defaults */
173 #define EROFS_MOUNT_XATTR_USER 0x00000010
174 #define EROFS_MOUNT_POSIX_ACL 0x00000020
175 #define EROFS_MOUNT_DAX_ALWAYS 0x00000040
176 #define EROFS_MOUNT_DAX_NEVER 0x00000080
177 #define EROFS_MOUNT_DIRECT_IO 0x00000100
178
179 #define clear_opt(opt, option) ((opt)->mount_opt &= ~EROFS_MOUNT_##option)
180 #define set_opt(opt, option) ((opt)->mount_opt |= EROFS_MOUNT_##option)
181 #define test_opt(opt, option) ((opt)->mount_opt & EROFS_MOUNT_##option)
182
erofs_is_fileio_mode(struct erofs_sb_info * sbi)183 static inline bool erofs_is_fileio_mode(struct erofs_sb_info *sbi)
184 {
185 return IS_ENABLED(CONFIG_EROFS_FS_BACKED_BY_FILE) && sbi->dif0.file;
186 }
187
erofs_is_fscache_mode(struct super_block * sb)188 static inline bool erofs_is_fscache_mode(struct super_block *sb)
189 {
190 return IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) &&
191 !erofs_is_fileio_mode(EROFS_SB(sb)) && !sb->s_bdev;
192 }
193
194 enum {
195 EROFS_ZIP_CACHE_DISABLED,
196 EROFS_ZIP_CACHE_READAHEAD,
197 EROFS_ZIP_CACHE_READAROUND
198 };
199
200 enum erofs_kmap_type {
201 EROFS_NO_KMAP, /* don't map the buffer */
202 EROFS_KMAP, /* use kmap_local_page() to map the buffer */
203 };
204
205 struct erofs_buf {
206 struct address_space *mapping;
207 struct file *file;
208 struct page *page;
209 void *base;
210 };
211 #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL })
212
213 #define erofs_blknr(sb, addr) ((erofs_blk_t)((addr) >> (sb)->s_blocksize_bits))
214 #define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1))
215 #define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits)
216 #define erofs_iblks(i) (round_up((i)->i_size, i_blocksize(i)) >> (i)->i_blkbits)
217
218 #define EROFS_FEATURE_FUNCS(name, compat, feature) \
219 static inline bool erofs_sb_has_##name(struct erofs_sb_info *sbi) \
220 { \
221 return sbi->feature_##compat & EROFS_FEATURE_##feature; \
222 }
223
224 EROFS_FEATURE_FUNCS(zero_padding, incompat, INCOMPAT_ZERO_PADDING)
225 EROFS_FEATURE_FUNCS(compr_cfgs, incompat, INCOMPAT_COMPR_CFGS)
226 EROFS_FEATURE_FUNCS(big_pcluster, incompat, INCOMPAT_BIG_PCLUSTER)
227 EROFS_FEATURE_FUNCS(chunked_file, incompat, INCOMPAT_CHUNKED_FILE)
228 EROFS_FEATURE_FUNCS(device_table, incompat, INCOMPAT_DEVICE_TABLE)
229 EROFS_FEATURE_FUNCS(compr_head2, incompat, INCOMPAT_COMPR_HEAD2)
230 EROFS_FEATURE_FUNCS(ztailpacking, incompat, INCOMPAT_ZTAILPACKING)
231 EROFS_FEATURE_FUNCS(fragments, incompat, INCOMPAT_FRAGMENTS)
232 EROFS_FEATURE_FUNCS(dedupe, incompat, INCOMPAT_DEDUPE)
233 EROFS_FEATURE_FUNCS(xattr_prefixes, incompat, INCOMPAT_XATTR_PREFIXES)
234 EROFS_FEATURE_FUNCS(sb_chksum, compat, COMPAT_SB_CHKSUM)
235 EROFS_FEATURE_FUNCS(xattr_filter, compat, COMPAT_XATTR_FILTER)
236
237 /* atomic flag definitions */
238 #define EROFS_I_EA_INITED_BIT 0
239 #define EROFS_I_Z_INITED_BIT 1
240
241 /* bitlock definitions (arranged in reverse order) */
242 #define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1)
243 #define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2)
244
245 struct erofs_inode {
246 erofs_nid_t nid;
247
248 /* atomic flags (including bitlocks) */
249 unsigned long flags;
250
251 unsigned char datalayout;
252 unsigned char inode_isize;
253 unsigned int xattr_isize;
254
255 unsigned int xattr_name_filter;
256 unsigned int xattr_shared_count;
257 unsigned int *xattr_shared_xattrs;
258
259 union {
260 erofs_blk_t raw_blkaddr;
261 struct {
262 unsigned short chunkformat;
263 unsigned char chunkbits;
264 };
265 #ifdef CONFIG_EROFS_FS_ZIP
266 struct {
267 unsigned short z_advise;
268 unsigned char z_algorithmtype[2];
269 unsigned char z_logical_clusterbits;
270 unsigned long z_tailextent_headlcn;
271 union {
272 struct {
273 erofs_off_t z_idataoff;
274 unsigned short z_idata_size;
275 };
276 erofs_off_t z_fragmentoff;
277 };
278 };
279 #endif /* CONFIG_EROFS_FS_ZIP */
280 };
281 /* the corresponding vfs inode */
282 struct inode vfs_inode;
283 };
284
285 #define EROFS_I(ptr) container_of(ptr, struct erofs_inode, vfs_inode)
286
erofs_iloc(struct inode * inode)287 static inline erofs_off_t erofs_iloc(struct inode *inode)
288 {
289 struct erofs_sb_info *sbi = EROFS_I_SB(inode);
290
291 return erofs_pos(inode->i_sb, sbi->meta_blkaddr) +
292 (EROFS_I(inode)->nid << sbi->islotbits);
293 }
294
erofs_inode_version(unsigned int ifmt)295 static inline unsigned int erofs_inode_version(unsigned int ifmt)
296 {
297 return (ifmt >> EROFS_I_VERSION_BIT) & EROFS_I_VERSION_MASK;
298 }
299
erofs_inode_datalayout(unsigned int ifmt)300 static inline unsigned int erofs_inode_datalayout(unsigned int ifmt)
301 {
302 return (ifmt >> EROFS_I_DATALAYOUT_BIT) & EROFS_I_DATALAYOUT_MASK;
303 }
304
305 /* reclaiming is never triggered when allocating new folios. */
erofs_grab_folio_nowait(struct address_space * as,pgoff_t index)306 static inline struct folio *erofs_grab_folio_nowait(struct address_space *as,
307 pgoff_t index)
308 {
309 return __filemap_get_folio(as, index,
310 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
311 readahead_gfp_mask(as) & ~__GFP_RECLAIM);
312 }
313
314 /* Has a disk mapping */
315 #define EROFS_MAP_MAPPED 0x0001
316 /* Located in metadata (could be copied from bd_inode) */
317 #define EROFS_MAP_META 0x0002
318 /* The extent is encoded */
319 #define EROFS_MAP_ENCODED 0x0004
320 /* The length of extent is full */
321 #define EROFS_MAP_FULL_MAPPED 0x0008
322 /* Located in the special packed inode */
323 #define EROFS_MAP_FRAGMENT 0x0010
324 /* The extent refers to partial decompressed data */
325 #define EROFS_MAP_PARTIAL_REF 0x0020
326
327 struct erofs_map_blocks {
328 struct erofs_buf buf;
329
330 erofs_off_t m_pa, m_la;
331 u64 m_plen, m_llen;
332
333 unsigned short m_deviceid;
334 char m_algorithmformat;
335 unsigned int m_flags;
336 };
337
338 /*
339 * Used to get the exact decompressed length, e.g. fiemap (consider lookback
340 * approach instead if possible since it's more metadata lightweight.)
341 */
342 #define EROFS_GET_BLOCKS_FIEMAP 0x0001
343 /* Used to map the whole extent if non-negligible data is requested for LZMA */
344 #define EROFS_GET_BLOCKS_READMORE 0x0002
345 /* Used to map tail extent for tailpacking inline or fragment pcluster */
346 #define EROFS_GET_BLOCKS_FINDTAIL 0x0004
347
348 enum {
349 Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX,
350 Z_EROFS_COMPRESSION_INTERLACED,
351 Z_EROFS_COMPRESSION_RUNTIME_MAX
352 };
353
354 struct erofs_map_dev {
355 struct super_block *m_sb;
356 struct erofs_device_info *m_dif;
357 struct block_device *m_bdev;
358
359 erofs_off_t m_pa;
360 unsigned int m_deviceid;
361 };
362
363 extern const struct super_operations erofs_sops;
364
365 extern const struct address_space_operations erofs_aops;
366 extern const struct address_space_operations erofs_fileio_aops;
367 extern const struct address_space_operations z_erofs_aops;
368 extern const struct address_space_operations erofs_fscache_access_aops;
369
370 extern const struct inode_operations erofs_generic_iops;
371 extern const struct inode_operations erofs_symlink_iops;
372 extern const struct inode_operations erofs_fast_symlink_iops;
373 extern const struct inode_operations erofs_dir_iops;
374
375 extern const struct file_operations erofs_file_fops;
376 extern const struct file_operations erofs_dir_fops;
377
378 extern const struct iomap_ops z_erofs_iomap_report_ops;
379
380 /* flags for erofs_fscache_register_cookie() */
381 #define EROFS_REG_COOKIE_SHARE 0x0001
382 #define EROFS_REG_COOKIE_NEED_NOEXIST 0x0002
383
384 void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
385 erofs_off_t *offset, int *lengthp);
386 void erofs_unmap_metabuf(struct erofs_buf *buf);
387 void erofs_put_metabuf(struct erofs_buf *buf);
388 void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
389 enum erofs_kmap_type type);
390 void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb);
391 void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
392 erofs_off_t offset, enum erofs_kmap_type type);
393 int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
394 int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
395 u64 start, u64 len);
396 int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map);
397 void erofs_onlinefolio_init(struct folio *folio);
398 void erofs_onlinefolio_split(struct folio *folio);
399 void erofs_onlinefolio_end(struct folio *folio, int err);
400 struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid);
401 int erofs_getattr(struct mnt_idmap *idmap, const struct path *path,
402 struct kstat *stat, u32 request_mask,
403 unsigned int query_flags);
404 int erofs_namei(struct inode *dir, const struct qstr *name,
405 erofs_nid_t *nid, unsigned int *d_type);
406
erofs_vm_map_ram(struct page ** pages,unsigned int count)407 static inline void *erofs_vm_map_ram(struct page **pages, unsigned int count)
408 {
409 int retried = 0;
410
411 while (1) {
412 void *p = vm_map_ram(pages, count, -1);
413
414 /* retry two more times (totally 3 times) */
415 if (p || ++retried >= 3)
416 return p;
417 vm_unmap_aliases();
418 }
419 return NULL;
420 }
421
422 int erofs_register_sysfs(struct super_block *sb);
423 void erofs_unregister_sysfs(struct super_block *sb);
424 int __init erofs_init_sysfs(void);
425 void erofs_exit_sysfs(void);
426
427 struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv);
erofs_allocpage(struct page ** pagepool,gfp_t gfp)428 static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
429 {
430 return __erofs_allocpage(pagepool, gfp, false);
431 }
erofs_pagepool_add(struct page ** pagepool,struct page * page)432 static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
433 {
434 set_page_private(page, (unsigned long)*pagepool);
435 *pagepool = page;
436 }
437 void erofs_release_pages(struct page **pagepool);
438
439 #ifdef CONFIG_EROFS_FS_ZIP
440 #define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
441
442 extern atomic_long_t erofs_global_shrink_cnt;
443 void erofs_shrinker_register(struct super_block *sb);
444 void erofs_shrinker_unregister(struct super_block *sb);
445 int __init erofs_init_shrinker(void);
446 void erofs_exit_shrinker(void);
447 int __init z_erofs_init_subsystem(void);
448 void z_erofs_exit_subsystem(void);
449 unsigned long z_erofs_shrink_scan(struct erofs_sb_info *sbi,
450 unsigned long nr_shrink);
451 int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
452 int flags);
453 void *z_erofs_get_gbuf(unsigned int requiredpages);
454 void z_erofs_put_gbuf(void *ptr);
455 int z_erofs_gbuf_growsize(unsigned int nrpages);
456 int __init z_erofs_gbuf_init(void);
457 void z_erofs_gbuf_exit(void);
458 int erofs_init_managed_cache(struct super_block *sb);
459 int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
460 #else
erofs_shrinker_register(struct super_block * sb)461 static inline void erofs_shrinker_register(struct super_block *sb) {}
erofs_shrinker_unregister(struct super_block * sb)462 static inline void erofs_shrinker_unregister(struct super_block *sb) {}
erofs_init_shrinker(void)463 static inline int erofs_init_shrinker(void) { return 0; }
erofs_exit_shrinker(void)464 static inline void erofs_exit_shrinker(void) {}
z_erofs_init_subsystem(void)465 static inline int z_erofs_init_subsystem(void) { return 0; }
z_erofs_exit_subsystem(void)466 static inline void z_erofs_exit_subsystem(void) {}
erofs_init_managed_cache(struct super_block * sb)467 static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
468 #endif /* !CONFIG_EROFS_FS_ZIP */
469
470 #ifdef CONFIG_EROFS_FS_BACKED_BY_FILE
471 struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev);
472 void erofs_fileio_submit_bio(struct bio *bio);
473 #else
erofs_fileio_bio_alloc(struct erofs_map_dev * mdev)474 static inline struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev) { return NULL; }
erofs_fileio_submit_bio(struct bio * bio)475 static inline void erofs_fileio_submit_bio(struct bio *bio) {}
476 #endif
477
478 #ifdef CONFIG_EROFS_FS_ONDEMAND
479 int erofs_fscache_register_fs(struct super_block *sb);
480 void erofs_fscache_unregister_fs(struct super_block *sb);
481
482 struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
483 char *name, unsigned int flags);
484 void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache);
485 struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev);
486 void erofs_fscache_submit_bio(struct bio *bio);
487 #else
erofs_fscache_register_fs(struct super_block * sb)488 static inline int erofs_fscache_register_fs(struct super_block *sb)
489 {
490 return -EOPNOTSUPP;
491 }
erofs_fscache_unregister_fs(struct super_block * sb)492 static inline void erofs_fscache_unregister_fs(struct super_block *sb) {}
493
494 static inline
erofs_fscache_register_cookie(struct super_block * sb,char * name,unsigned int flags)495 struct erofs_fscache *erofs_fscache_register_cookie(struct super_block *sb,
496 char *name, unsigned int flags)
497 {
498 return ERR_PTR(-EOPNOTSUPP);
499 }
500
erofs_fscache_unregister_cookie(struct erofs_fscache * fscache)501 static inline void erofs_fscache_unregister_cookie(struct erofs_fscache *fscache)
502 {
503 }
erofs_fscache_bio_alloc(struct erofs_map_dev * mdev)504 static inline struct bio *erofs_fscache_bio_alloc(struct erofs_map_dev *mdev) { return NULL; }
erofs_fscache_submit_bio(struct bio * bio)505 static inline void erofs_fscache_submit_bio(struct bio *bio) {}
506 #endif
507
508 #define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
509
510 #endif /* __EROFS_INTERNAL_H */
511