1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * NILFS module and super block management.
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi.
8 */
9 /*
10 * linux/fs/ext2/super.c
11 *
12 * Copyright (C) 1992, 1993, 1994, 1995
13 * Remy Card ([email protected])
14 * Laboratoire MASI - Institut Blaise Pascal
15 * Universite Pierre et Marie Curie (Paris VI)
16 *
17 * from
18 *
19 * linux/fs/minix/inode.c
20 *
21 * Copyright (C) 1991, 1992 Linus Torvalds
22 *
23 * Big-endian to little-endian byte-swapping/bitmaps by
24 * David S. Miller ([email protected]), 1995
25 */
26
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/blkdev.h>
32 #include <linux/crc32.h>
33 #include <linux/vfs.h>
34 #include <linux/writeback.h>
35 #include <linux/seq_file.h>
36 #include <linux/mount.h>
37 #include <linux/fs_context.h>
38 #include <linux/fs_parser.h>
39 #include "nilfs.h"
40 #include "export.h"
41 #include "mdt.h"
42 #include "alloc.h"
43 #include "btree.h"
44 #include "btnode.h"
45 #include "page.h"
46 #include "cpfile.h"
47 #include "sufile.h" /* nilfs_sufile_resize(), nilfs_sufile_set_alloc_range() */
48 #include "ifile.h"
49 #include "dat.h"
50 #include "segment.h"
51 #include "segbuf.h"
52
53 MODULE_AUTHOR("NTT Corp.");
54 MODULE_DESCRIPTION("A New Implementation of the Log-structured Filesystem "
55 "(NILFS)");
56 MODULE_LICENSE("GPL");
57
58 static struct kmem_cache *nilfs_inode_cachep;
59 struct kmem_cache *nilfs_transaction_cachep;
60 struct kmem_cache *nilfs_segbuf_cachep;
61 struct kmem_cache *nilfs_btree_path_cache;
62
63 static int nilfs_setup_super(struct super_block *sb, int is_mount);
64
__nilfs_msg(struct super_block * sb,const char * fmt,...)65 void __nilfs_msg(struct super_block *sb, const char *fmt, ...)
66 {
67 struct va_format vaf;
68 va_list args;
69 int level;
70
71 va_start(args, fmt);
72
73 level = printk_get_level(fmt);
74 vaf.fmt = printk_skip_level(fmt);
75 vaf.va = &args;
76
77 if (sb)
78 printk("%c%cNILFS (%s): %pV\n",
79 KERN_SOH_ASCII, level, sb->s_id, &vaf);
80 else
81 printk("%c%cNILFS: %pV\n",
82 KERN_SOH_ASCII, level, &vaf);
83
84 va_end(args);
85 }
86
nilfs_set_error(struct super_block * sb)87 static void nilfs_set_error(struct super_block *sb)
88 {
89 struct the_nilfs *nilfs = sb->s_fs_info;
90 struct nilfs_super_block **sbp;
91
92 down_write(&nilfs->ns_sem);
93 if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
94 nilfs->ns_mount_state |= NILFS_ERROR_FS;
95 sbp = nilfs_prepare_super(sb, 0);
96 if (likely(sbp)) {
97 sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
98 if (sbp[1])
99 sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
100 nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
101 }
102 }
103 up_write(&nilfs->ns_sem);
104 }
105
106 /**
107 * __nilfs_error() - report failure condition on a filesystem
108 * @sb: super block instance
109 * @function: name of calling function
110 * @fmt: format string for message to be output
111 * @...: optional arguments to @fmt
112 *
113 * __nilfs_error() sets an ERROR_FS flag on the superblock as well as
114 * reporting an error message. This function should be called when
115 * NILFS detects incoherences or defects of meta data on disk.
116 *
117 * This implements the body of nilfs_error() macro. Normally,
118 * nilfs_error() should be used. As for sustainable errors such as a
119 * single-shot I/O error, nilfs_err() should be used instead.
120 *
121 * Callers should not add a trailing newline since this will do it.
122 */
__nilfs_error(struct super_block * sb,const char * function,const char * fmt,...)123 void __nilfs_error(struct super_block *sb, const char *function,
124 const char *fmt, ...)
125 {
126 struct the_nilfs *nilfs = sb->s_fs_info;
127 struct va_format vaf;
128 va_list args;
129
130 va_start(args, fmt);
131
132 vaf.fmt = fmt;
133 vaf.va = &args;
134
135 printk(KERN_CRIT "NILFS error (device %s): %s: %pV\n",
136 sb->s_id, function, &vaf);
137
138 va_end(args);
139
140 if (!sb_rdonly(sb)) {
141 nilfs_set_error(sb);
142
143 if (nilfs_test_opt(nilfs, ERRORS_RO)) {
144 printk(KERN_CRIT "Remounting filesystem read-only\n");
145 sb->s_flags |= SB_RDONLY;
146 }
147 }
148
149 if (nilfs_test_opt(nilfs, ERRORS_PANIC))
150 panic("NILFS (device %s): panic forced after error\n",
151 sb->s_id);
152 }
153
nilfs_alloc_inode(struct super_block * sb)154 struct inode *nilfs_alloc_inode(struct super_block *sb)
155 {
156 struct nilfs_inode_info *ii;
157
158 ii = alloc_inode_sb(sb, nilfs_inode_cachep, GFP_NOFS);
159 if (!ii)
160 return NULL;
161 ii->i_bh = NULL;
162 ii->i_state = 0;
163 ii->i_type = 0;
164 ii->i_cno = 0;
165 ii->i_assoc_inode = NULL;
166 ii->i_bmap = &ii->i_bmap_data;
167 return &ii->vfs_inode;
168 }
169
nilfs_free_inode(struct inode * inode)170 static void nilfs_free_inode(struct inode *inode)
171 {
172 if (nilfs_is_metadata_file_inode(inode))
173 nilfs_mdt_destroy(inode);
174
175 kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode));
176 }
177
nilfs_sync_super(struct super_block * sb,int flag)178 static int nilfs_sync_super(struct super_block *sb, int flag)
179 {
180 struct the_nilfs *nilfs = sb->s_fs_info;
181 int err;
182
183 retry:
184 set_buffer_dirty(nilfs->ns_sbh[0]);
185 if (nilfs_test_opt(nilfs, BARRIER)) {
186 err = __sync_dirty_buffer(nilfs->ns_sbh[0],
187 REQ_SYNC | REQ_PREFLUSH | REQ_FUA);
188 } else {
189 err = sync_dirty_buffer(nilfs->ns_sbh[0]);
190 }
191
192 if (unlikely(err)) {
193 nilfs_err(sb, "unable to write superblock: err=%d", err);
194 if (err == -EIO && nilfs->ns_sbh[1]) {
195 /*
196 * sbp[0] points to newer log than sbp[1],
197 * so copy sbp[0] to sbp[1] to take over sbp[0].
198 */
199 memcpy(nilfs->ns_sbp[1], nilfs->ns_sbp[0],
200 nilfs->ns_sbsize);
201 nilfs_fall_back_super_block(nilfs);
202 goto retry;
203 }
204 } else {
205 struct nilfs_super_block *sbp = nilfs->ns_sbp[0];
206
207 nilfs->ns_sbwcount++;
208
209 /*
210 * The latest segment becomes trailable from the position
211 * written in superblock.
212 */
213 clear_nilfs_discontinued(nilfs);
214
215 /* update GC protection for recent segments */
216 if (nilfs->ns_sbh[1]) {
217 if (flag == NILFS_SB_COMMIT_ALL) {
218 set_buffer_dirty(nilfs->ns_sbh[1]);
219 if (sync_dirty_buffer(nilfs->ns_sbh[1]) < 0)
220 goto out;
221 }
222 if (le64_to_cpu(nilfs->ns_sbp[1]->s_last_cno) <
223 le64_to_cpu(nilfs->ns_sbp[0]->s_last_cno))
224 sbp = nilfs->ns_sbp[1];
225 }
226
227 spin_lock(&nilfs->ns_last_segment_lock);
228 nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq);
229 spin_unlock(&nilfs->ns_last_segment_lock);
230 }
231 out:
232 return err;
233 }
234
nilfs_set_log_cursor(struct nilfs_super_block * sbp,struct the_nilfs * nilfs)235 void nilfs_set_log_cursor(struct nilfs_super_block *sbp,
236 struct the_nilfs *nilfs)
237 {
238 sector_t nfreeblocks;
239
240 /* nilfs->ns_sem must be locked by the caller. */
241 nilfs_count_free_blocks(nilfs, &nfreeblocks);
242 sbp->s_free_blocks_count = cpu_to_le64(nfreeblocks);
243
244 spin_lock(&nilfs->ns_last_segment_lock);
245 sbp->s_last_seq = cpu_to_le64(nilfs->ns_last_seq);
246 sbp->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg);
247 sbp->s_last_cno = cpu_to_le64(nilfs->ns_last_cno);
248 spin_unlock(&nilfs->ns_last_segment_lock);
249 }
250
nilfs_prepare_super(struct super_block * sb,int flip)251 struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb,
252 int flip)
253 {
254 struct the_nilfs *nilfs = sb->s_fs_info;
255 struct nilfs_super_block **sbp = nilfs->ns_sbp;
256
257 /* nilfs->ns_sem must be locked by the caller. */
258 if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
259 if (sbp[1] &&
260 sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) {
261 memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
262 } else {
263 nilfs_crit(sb, "superblock broke");
264 return NULL;
265 }
266 } else if (sbp[1] &&
267 sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
268 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
269 }
270
271 if (flip && sbp[1])
272 nilfs_swap_super_block(nilfs);
273
274 return sbp;
275 }
276
nilfs_commit_super(struct super_block * sb,int flag)277 int nilfs_commit_super(struct super_block *sb, int flag)
278 {
279 struct the_nilfs *nilfs = sb->s_fs_info;
280 struct nilfs_super_block **sbp = nilfs->ns_sbp;
281 time64_t t;
282
283 /* nilfs->ns_sem must be locked by the caller. */
284 t = ktime_get_real_seconds();
285 nilfs->ns_sbwtime = t;
286 sbp[0]->s_wtime = cpu_to_le64(t);
287 sbp[0]->s_sum = 0;
288 sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
289 (unsigned char *)sbp[0],
290 nilfs->ns_sbsize));
291 if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) {
292 sbp[1]->s_wtime = sbp[0]->s_wtime;
293 sbp[1]->s_sum = 0;
294 sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
295 (unsigned char *)sbp[1],
296 nilfs->ns_sbsize));
297 }
298 clear_nilfs_sb_dirty(nilfs);
299 nilfs->ns_flushed_device = 1;
300 /* make sure store to ns_flushed_device cannot be reordered */
301 smp_wmb();
302 return nilfs_sync_super(sb, flag);
303 }
304
305 /**
306 * nilfs_cleanup_super() - write filesystem state for cleanup
307 * @sb: super block instance to be unmounted or degraded to read-only
308 *
309 * This function restores state flags in the on-disk super block.
310 * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the
311 * filesystem was not clean previously.
312 *
313 * Return: 0 on success, %-EIO if I/O error or superblock is corrupted.
314 */
nilfs_cleanup_super(struct super_block * sb)315 int nilfs_cleanup_super(struct super_block *sb)
316 {
317 struct the_nilfs *nilfs = sb->s_fs_info;
318 struct nilfs_super_block **sbp;
319 int flag = NILFS_SB_COMMIT;
320 int ret = -EIO;
321
322 sbp = nilfs_prepare_super(sb, 0);
323 if (sbp) {
324 sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
325 nilfs_set_log_cursor(sbp[0], nilfs);
326 if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) {
327 /*
328 * make the "clean" flag also to the opposite
329 * super block if both super blocks point to
330 * the same checkpoint.
331 */
332 sbp[1]->s_state = sbp[0]->s_state;
333 flag = NILFS_SB_COMMIT_ALL;
334 }
335 ret = nilfs_commit_super(sb, flag);
336 }
337 return ret;
338 }
339
340 /**
341 * nilfs_move_2nd_super - relocate secondary super block
342 * @sb: super block instance
343 * @sb2off: new offset of the secondary super block (in bytes)
344 *
345 * Return: 0 on success, or a negative error code on failure.
346 */
nilfs_move_2nd_super(struct super_block * sb,loff_t sb2off)347 static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
348 {
349 struct the_nilfs *nilfs = sb->s_fs_info;
350 struct buffer_head *nsbh;
351 struct nilfs_super_block *nsbp;
352 sector_t blocknr, newblocknr;
353 unsigned long offset;
354 int sb2i; /* array index of the secondary superblock */
355 int ret = 0;
356
357 /* nilfs->ns_sem must be locked by the caller. */
358 if (nilfs->ns_sbh[1] &&
359 nilfs->ns_sbh[1]->b_blocknr > nilfs->ns_first_data_block) {
360 sb2i = 1;
361 blocknr = nilfs->ns_sbh[1]->b_blocknr;
362 } else if (nilfs->ns_sbh[0]->b_blocknr > nilfs->ns_first_data_block) {
363 sb2i = 0;
364 blocknr = nilfs->ns_sbh[0]->b_blocknr;
365 } else {
366 sb2i = -1;
367 blocknr = 0;
368 }
369 if (sb2i >= 0 && (u64)blocknr << nilfs->ns_blocksize_bits == sb2off)
370 goto out; /* super block location is unchanged */
371
372 /* Get new super block buffer */
373 newblocknr = sb2off >> nilfs->ns_blocksize_bits;
374 offset = sb2off & (nilfs->ns_blocksize - 1);
375 nsbh = sb_getblk(sb, newblocknr);
376 if (!nsbh) {
377 nilfs_warn(sb,
378 "unable to move secondary superblock to block %llu",
379 (unsigned long long)newblocknr);
380 ret = -EIO;
381 goto out;
382 }
383 nsbp = (void *)nsbh->b_data + offset;
384
385 lock_buffer(nsbh);
386 if (sb2i >= 0) {
387 /*
388 * The position of the second superblock only changes by 4KiB,
389 * which is larger than the maximum superblock data size
390 * (= 1KiB), so there is no need to use memmove() to allow
391 * overlap between source and destination.
392 */
393 memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
394
395 /*
396 * Zero fill after copy to avoid overwriting in case of move
397 * within the same block.
398 */
399 memset(nsbh->b_data, 0, offset);
400 memset((void *)nsbp + nilfs->ns_sbsize, 0,
401 nsbh->b_size - offset - nilfs->ns_sbsize);
402 } else {
403 memset(nsbh->b_data, 0, nsbh->b_size);
404 }
405 set_buffer_uptodate(nsbh);
406 unlock_buffer(nsbh);
407
408 if (sb2i >= 0) {
409 brelse(nilfs->ns_sbh[sb2i]);
410 nilfs->ns_sbh[sb2i] = nsbh;
411 nilfs->ns_sbp[sb2i] = nsbp;
412 } else if (nilfs->ns_sbh[0]->b_blocknr < nilfs->ns_first_data_block) {
413 /* secondary super block will be restored to index 1 */
414 nilfs->ns_sbh[1] = nsbh;
415 nilfs->ns_sbp[1] = nsbp;
416 } else {
417 brelse(nsbh);
418 }
419 out:
420 return ret;
421 }
422
423 /**
424 * nilfs_resize_fs - resize the filesystem
425 * @sb: super block instance
426 * @newsize: new size of the filesystem (in bytes)
427 *
428 * Return: 0 on success, or a negative error code on failure.
429 */
nilfs_resize_fs(struct super_block * sb,__u64 newsize)430 int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
431 {
432 struct the_nilfs *nilfs = sb->s_fs_info;
433 struct nilfs_super_block **sbp;
434 __u64 devsize, newnsegs;
435 loff_t sb2off;
436 int ret;
437
438 ret = -ERANGE;
439 devsize = bdev_nr_bytes(sb->s_bdev);
440 if (newsize > devsize)
441 goto out;
442
443 /*
444 * Prevent underflow in second superblock position calculation.
445 * The exact minimum size check is done in nilfs_sufile_resize().
446 */
447 if (newsize < 4096) {
448 ret = -ENOSPC;
449 goto out;
450 }
451
452 /*
453 * Write lock is required to protect some functions depending
454 * on the number of segments, the number of reserved segments,
455 * and so forth.
456 */
457 down_write(&nilfs->ns_segctor_sem);
458
459 sb2off = NILFS_SB2_OFFSET_BYTES(newsize);
460 newnsegs = sb2off >> nilfs->ns_blocksize_bits;
461 newnsegs = div64_ul(newnsegs, nilfs->ns_blocks_per_segment);
462
463 ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs);
464 up_write(&nilfs->ns_segctor_sem);
465 if (ret < 0)
466 goto out;
467
468 ret = nilfs_construct_segment(sb);
469 if (ret < 0)
470 goto out;
471
472 down_write(&nilfs->ns_sem);
473 nilfs_move_2nd_super(sb, sb2off);
474 ret = -EIO;
475 sbp = nilfs_prepare_super(sb, 0);
476 if (likely(sbp)) {
477 nilfs_set_log_cursor(sbp[0], nilfs);
478 /*
479 * Drop NILFS_RESIZE_FS flag for compatibility with
480 * mount-time resize which may be implemented in a
481 * future release.
482 */
483 sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) &
484 ~NILFS_RESIZE_FS);
485 sbp[0]->s_dev_size = cpu_to_le64(newsize);
486 sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments);
487 if (sbp[1])
488 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
489 ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
490 }
491 up_write(&nilfs->ns_sem);
492
493 /*
494 * Reset the range of allocatable segments last. This order
495 * is important in the case of expansion because the secondary
496 * superblock must be protected from log write until migration
497 * completes.
498 */
499 if (!ret)
500 nilfs_sufile_set_alloc_range(nilfs->ns_sufile, 0, newnsegs - 1);
501 out:
502 return ret;
503 }
504
nilfs_put_super(struct super_block * sb)505 static void nilfs_put_super(struct super_block *sb)
506 {
507 struct the_nilfs *nilfs = sb->s_fs_info;
508
509 nilfs_detach_log_writer(sb);
510
511 if (!sb_rdonly(sb)) {
512 down_write(&nilfs->ns_sem);
513 nilfs_cleanup_super(sb);
514 up_write(&nilfs->ns_sem);
515 }
516
517 nilfs_sysfs_delete_device_group(nilfs);
518 iput(nilfs->ns_sufile);
519 iput(nilfs->ns_cpfile);
520 iput(nilfs->ns_dat);
521
522 destroy_nilfs(nilfs);
523 sb->s_fs_info = NULL;
524 }
525
nilfs_sync_fs(struct super_block * sb,int wait)526 static int nilfs_sync_fs(struct super_block *sb, int wait)
527 {
528 struct the_nilfs *nilfs = sb->s_fs_info;
529 struct nilfs_super_block **sbp;
530 int err = 0;
531
532 /* This function is called when super block should be written back */
533 if (wait)
534 err = nilfs_construct_segment(sb);
535
536 down_write(&nilfs->ns_sem);
537 if (nilfs_sb_dirty(nilfs)) {
538 sbp = nilfs_prepare_super(sb, nilfs_sb_will_flip(nilfs));
539 if (likely(sbp)) {
540 nilfs_set_log_cursor(sbp[0], nilfs);
541 nilfs_commit_super(sb, NILFS_SB_COMMIT);
542 }
543 }
544 up_write(&nilfs->ns_sem);
545
546 if (!err)
547 err = nilfs_flush_device(nilfs);
548
549 return err;
550 }
551
nilfs_attach_checkpoint(struct super_block * sb,__u64 cno,int curr_mnt,struct nilfs_root ** rootp)552 int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
553 struct nilfs_root **rootp)
554 {
555 struct the_nilfs *nilfs = sb->s_fs_info;
556 struct nilfs_root *root;
557 int err = -ENOMEM;
558
559 root = nilfs_find_or_create_root(
560 nilfs, curr_mnt ? NILFS_CPTREE_CURRENT_CNO : cno);
561 if (!root)
562 return err;
563
564 if (root->ifile)
565 goto reuse; /* already attached checkpoint */
566
567 down_read(&nilfs->ns_segctor_sem);
568 err = nilfs_ifile_read(sb, root, cno, nilfs->ns_inode_size);
569 up_read(&nilfs->ns_segctor_sem);
570 if (unlikely(err))
571 goto failed;
572
573 reuse:
574 *rootp = root;
575 return 0;
576
577 failed:
578 if (err == -EINVAL)
579 nilfs_err(sb, "Invalid checkpoint (checkpoint number=%llu)",
580 (unsigned long long)cno);
581 nilfs_put_root(root);
582
583 return err;
584 }
585
nilfs_freeze(struct super_block * sb)586 static int nilfs_freeze(struct super_block *sb)
587 {
588 struct the_nilfs *nilfs = sb->s_fs_info;
589 int err;
590
591 if (sb_rdonly(sb))
592 return 0;
593
594 /* Mark super block clean */
595 down_write(&nilfs->ns_sem);
596 err = nilfs_cleanup_super(sb);
597 up_write(&nilfs->ns_sem);
598 return err;
599 }
600
nilfs_unfreeze(struct super_block * sb)601 static int nilfs_unfreeze(struct super_block *sb)
602 {
603 struct the_nilfs *nilfs = sb->s_fs_info;
604
605 if (sb_rdonly(sb))
606 return 0;
607
608 down_write(&nilfs->ns_sem);
609 nilfs_setup_super(sb, false);
610 up_write(&nilfs->ns_sem);
611 return 0;
612 }
613
nilfs_statfs(struct dentry * dentry,struct kstatfs * buf)614 static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
615 {
616 struct super_block *sb = dentry->d_sb;
617 struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root;
618 struct the_nilfs *nilfs = root->nilfs;
619 u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
620 unsigned long long blocks;
621 unsigned long overhead;
622 unsigned long nrsvblocks;
623 sector_t nfreeblocks;
624 u64 nmaxinodes, nfreeinodes;
625 int err;
626
627 /*
628 * Compute all of the segment blocks
629 *
630 * The blocks before first segment and after last segment
631 * are excluded.
632 */
633 blocks = nilfs->ns_blocks_per_segment * nilfs->ns_nsegments
634 - nilfs->ns_first_data_block;
635 nrsvblocks = nilfs->ns_nrsvsegs * nilfs->ns_blocks_per_segment;
636
637 /*
638 * Compute the overhead
639 *
640 * When distributing meta data blocks outside segment structure,
641 * We must count them as the overhead.
642 */
643 overhead = 0;
644
645 err = nilfs_count_free_blocks(nilfs, &nfreeblocks);
646 if (unlikely(err))
647 return err;
648
649 err = nilfs_ifile_count_free_inodes(root->ifile,
650 &nmaxinodes, &nfreeinodes);
651 if (unlikely(err)) {
652 nilfs_warn(sb, "failed to count free inodes: err=%d", err);
653 if (err == -ERANGE) {
654 /*
655 * If nilfs_palloc_count_max_entries() returns
656 * -ERANGE error code then we simply treat
657 * curent inodes count as maximum possible and
658 * zero as free inodes value.
659 */
660 nmaxinodes = atomic64_read(&root->inodes_count);
661 nfreeinodes = 0;
662 err = 0;
663 } else
664 return err;
665 }
666
667 buf->f_type = NILFS_SUPER_MAGIC;
668 buf->f_bsize = sb->s_blocksize;
669 buf->f_blocks = blocks - overhead;
670 buf->f_bfree = nfreeblocks;
671 buf->f_bavail = (buf->f_bfree >= nrsvblocks) ?
672 (buf->f_bfree - nrsvblocks) : 0;
673 buf->f_files = nmaxinodes;
674 buf->f_ffree = nfreeinodes;
675 buf->f_namelen = NILFS_NAME_LEN;
676 buf->f_fsid = u64_to_fsid(id);
677
678 return 0;
679 }
680
nilfs_show_options(struct seq_file * seq,struct dentry * dentry)681 static int nilfs_show_options(struct seq_file *seq, struct dentry *dentry)
682 {
683 struct super_block *sb = dentry->d_sb;
684 struct the_nilfs *nilfs = sb->s_fs_info;
685 struct nilfs_root *root = NILFS_I(d_inode(dentry))->i_root;
686
687 if (!nilfs_test_opt(nilfs, BARRIER))
688 seq_puts(seq, ",nobarrier");
689 if (root->cno != NILFS_CPTREE_CURRENT_CNO)
690 seq_printf(seq, ",cp=%llu", (unsigned long long)root->cno);
691 if (nilfs_test_opt(nilfs, ERRORS_PANIC))
692 seq_puts(seq, ",errors=panic");
693 if (nilfs_test_opt(nilfs, ERRORS_CONT))
694 seq_puts(seq, ",errors=continue");
695 if (nilfs_test_opt(nilfs, STRICT_ORDER))
696 seq_puts(seq, ",order=strict");
697 if (nilfs_test_opt(nilfs, NORECOVERY))
698 seq_puts(seq, ",norecovery");
699 if (nilfs_test_opt(nilfs, DISCARD))
700 seq_puts(seq, ",discard");
701
702 return 0;
703 }
704
705 static const struct super_operations nilfs_sops = {
706 .alloc_inode = nilfs_alloc_inode,
707 .free_inode = nilfs_free_inode,
708 .dirty_inode = nilfs_dirty_inode,
709 .evict_inode = nilfs_evict_inode,
710 .put_super = nilfs_put_super,
711 .sync_fs = nilfs_sync_fs,
712 .freeze_fs = nilfs_freeze,
713 .unfreeze_fs = nilfs_unfreeze,
714 .statfs = nilfs_statfs,
715 .show_options = nilfs_show_options
716 };
717
718 enum {
719 Opt_err, Opt_barrier, Opt_snapshot, Opt_order, Opt_norecovery,
720 Opt_discard,
721 };
722
723 static const struct constant_table nilfs_param_err[] = {
724 {"continue", NILFS_MOUNT_ERRORS_CONT},
725 {"panic", NILFS_MOUNT_ERRORS_PANIC},
726 {"remount-ro", NILFS_MOUNT_ERRORS_RO},
727 {}
728 };
729
730 static const struct fs_parameter_spec nilfs_param_spec[] = {
731 fsparam_enum ("errors", Opt_err, nilfs_param_err),
732 fsparam_flag_no ("barrier", Opt_barrier),
733 fsparam_u64 ("cp", Opt_snapshot),
734 fsparam_string ("order", Opt_order),
735 fsparam_flag ("norecovery", Opt_norecovery),
736 fsparam_flag_no ("discard", Opt_discard),
737 {}
738 };
739
740 struct nilfs_fs_context {
741 unsigned long ns_mount_opt;
742 __u64 cno;
743 };
744
nilfs_parse_param(struct fs_context * fc,struct fs_parameter * param)745 static int nilfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
746 {
747 struct nilfs_fs_context *nilfs = fc->fs_private;
748 int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
749 struct fs_parse_result result;
750 int opt;
751
752 opt = fs_parse(fc, nilfs_param_spec, param, &result);
753 if (opt < 0)
754 return opt;
755
756 switch (opt) {
757 case Opt_barrier:
758 if (result.negated)
759 nilfs_clear_opt(nilfs, BARRIER);
760 else
761 nilfs_set_opt(nilfs, BARRIER);
762 break;
763 case Opt_order:
764 if (strcmp(param->string, "relaxed") == 0)
765 /* Ordered data semantics */
766 nilfs_clear_opt(nilfs, STRICT_ORDER);
767 else if (strcmp(param->string, "strict") == 0)
768 /* Strict in-order semantics */
769 nilfs_set_opt(nilfs, STRICT_ORDER);
770 else
771 return -EINVAL;
772 break;
773 case Opt_err:
774 nilfs->ns_mount_opt &= ~NILFS_MOUNT_ERROR_MODE;
775 nilfs->ns_mount_opt |= result.uint_32;
776 break;
777 case Opt_snapshot:
778 if (is_remount) {
779 struct super_block *sb = fc->root->d_sb;
780
781 nilfs_err(sb,
782 "\"%s\" option is invalid for remount",
783 param->key);
784 return -EINVAL;
785 }
786 if (result.uint_64 == 0) {
787 nilfs_err(NULL,
788 "invalid option \"cp=0\": invalid checkpoint number 0");
789 return -EINVAL;
790 }
791 nilfs->cno = result.uint_64;
792 break;
793 case Opt_norecovery:
794 nilfs_set_opt(nilfs, NORECOVERY);
795 break;
796 case Opt_discard:
797 if (result.negated)
798 nilfs_clear_opt(nilfs, DISCARD);
799 else
800 nilfs_set_opt(nilfs, DISCARD);
801 break;
802 default:
803 return -EINVAL;
804 }
805
806 return 0;
807 }
808
nilfs_setup_super(struct super_block * sb,int is_mount)809 static int nilfs_setup_super(struct super_block *sb, int is_mount)
810 {
811 struct the_nilfs *nilfs = sb->s_fs_info;
812 struct nilfs_super_block **sbp;
813 int max_mnt_count;
814 int mnt_count;
815
816 /* nilfs->ns_sem must be locked by the caller. */
817 sbp = nilfs_prepare_super(sb, 0);
818 if (!sbp)
819 return -EIO;
820
821 if (!is_mount)
822 goto skip_mount_setup;
823
824 max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count);
825 mnt_count = le16_to_cpu(sbp[0]->s_mnt_count);
826
827 if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
828 nilfs_warn(sb, "mounting fs with errors");
829 #if 0
830 } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) {
831 nilfs_warn(sb, "maximal mount count reached");
832 #endif
833 }
834 if (!max_mnt_count)
835 sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT);
836
837 sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1);
838 sbp[0]->s_mtime = cpu_to_le64(ktime_get_real_seconds());
839
840 skip_mount_setup:
841 sbp[0]->s_state =
842 cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
843 /* synchronize sbp[1] with sbp[0] */
844 if (sbp[1])
845 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
846 return nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
847 }
848
nilfs_read_super_block(struct super_block * sb,u64 pos,int blocksize,struct buffer_head ** pbh)849 struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb,
850 u64 pos, int blocksize,
851 struct buffer_head **pbh)
852 {
853 unsigned long long sb_index = pos;
854 unsigned long offset;
855
856 offset = do_div(sb_index, blocksize);
857 *pbh = sb_bread(sb, sb_index);
858 if (!*pbh)
859 return NULL;
860 return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset);
861 }
862
nilfs_store_magic(struct super_block * sb,struct nilfs_super_block * sbp)863 int nilfs_store_magic(struct super_block *sb,
864 struct nilfs_super_block *sbp)
865 {
866 struct the_nilfs *nilfs = sb->s_fs_info;
867
868 sb->s_magic = le16_to_cpu(sbp->s_magic);
869
870 /* FS independent flags */
871 #ifdef NILFS_ATIME_DISABLE
872 sb->s_flags |= SB_NOATIME;
873 #endif
874
875 nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid);
876 nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid);
877 nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval);
878 nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max);
879
880 return 0;
881 }
882
nilfs_check_feature_compatibility(struct super_block * sb,struct nilfs_super_block * sbp)883 int nilfs_check_feature_compatibility(struct super_block *sb,
884 struct nilfs_super_block *sbp)
885 {
886 __u64 features;
887
888 features = le64_to_cpu(sbp->s_feature_incompat) &
889 ~NILFS_FEATURE_INCOMPAT_SUPP;
890 if (features) {
891 nilfs_err(sb,
892 "couldn't mount because of unsupported optional features (%llx)",
893 (unsigned long long)features);
894 return -EINVAL;
895 }
896 features = le64_to_cpu(sbp->s_feature_compat_ro) &
897 ~NILFS_FEATURE_COMPAT_RO_SUPP;
898 if (!sb_rdonly(sb) && features) {
899 nilfs_err(sb,
900 "couldn't mount RDWR because of unsupported optional features (%llx)",
901 (unsigned long long)features);
902 return -EINVAL;
903 }
904 return 0;
905 }
906
nilfs_get_root_dentry(struct super_block * sb,struct nilfs_root * root,struct dentry ** root_dentry)907 static int nilfs_get_root_dentry(struct super_block *sb,
908 struct nilfs_root *root,
909 struct dentry **root_dentry)
910 {
911 struct inode *inode;
912 struct dentry *dentry;
913 int ret = 0;
914
915 inode = nilfs_iget(sb, root, NILFS_ROOT_INO);
916 if (IS_ERR(inode)) {
917 ret = PTR_ERR(inode);
918 nilfs_err(sb, "error %d getting root inode", ret);
919 goto out;
920 }
921 if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) {
922 iput(inode);
923 nilfs_err(sb, "corrupt root inode");
924 ret = -EINVAL;
925 goto out;
926 }
927
928 if (root->cno == NILFS_CPTREE_CURRENT_CNO) {
929 dentry = d_find_alias(inode);
930 if (!dentry) {
931 dentry = d_make_root(inode);
932 if (!dentry) {
933 ret = -ENOMEM;
934 goto failed_dentry;
935 }
936 } else {
937 iput(inode);
938 }
939 } else {
940 dentry = d_obtain_root(inode);
941 if (IS_ERR(dentry)) {
942 ret = PTR_ERR(dentry);
943 goto failed_dentry;
944 }
945 }
946 *root_dentry = dentry;
947 out:
948 return ret;
949
950 failed_dentry:
951 nilfs_err(sb, "error %d getting root dentry", ret);
952 goto out;
953 }
954
nilfs_attach_snapshot(struct super_block * s,__u64 cno,struct dentry ** root_dentry)955 static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
956 struct dentry **root_dentry)
957 {
958 struct the_nilfs *nilfs = s->s_fs_info;
959 struct nilfs_root *root;
960 int ret;
961
962 mutex_lock(&nilfs->ns_snapshot_mount_mutex);
963
964 down_read(&nilfs->ns_segctor_sem);
965 ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
966 up_read(&nilfs->ns_segctor_sem);
967 if (ret < 0) {
968 ret = (ret == -ENOENT) ? -EINVAL : ret;
969 goto out;
970 } else if (!ret) {
971 nilfs_err(s,
972 "The specified checkpoint is not a snapshot (checkpoint number=%llu)",
973 (unsigned long long)cno);
974 ret = -EINVAL;
975 goto out;
976 }
977
978 ret = nilfs_attach_checkpoint(s, cno, false, &root);
979 if (ret) {
980 nilfs_err(s,
981 "error %d while loading snapshot (checkpoint number=%llu)",
982 ret, (unsigned long long)cno);
983 goto out;
984 }
985 ret = nilfs_get_root_dentry(s, root, root_dentry);
986 nilfs_put_root(root);
987 out:
988 mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
989 return ret;
990 }
991
992 /**
993 * nilfs_tree_is_busy() - try to shrink dentries of a checkpoint
994 * @root_dentry: root dentry of the tree to be shrunk
995 *
996 * Return: true if the tree was in-use, false otherwise.
997 */
nilfs_tree_is_busy(struct dentry * root_dentry)998 static bool nilfs_tree_is_busy(struct dentry *root_dentry)
999 {
1000 shrink_dcache_parent(root_dentry);
1001 return d_count(root_dentry) > 1;
1002 }
1003
nilfs_checkpoint_is_mounted(struct super_block * sb,__u64 cno)1004 int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
1005 {
1006 struct the_nilfs *nilfs = sb->s_fs_info;
1007 struct nilfs_root *root;
1008 struct inode *inode;
1009 struct dentry *dentry;
1010 int ret;
1011
1012 if (cno > nilfs->ns_cno)
1013 return false;
1014
1015 if (cno >= nilfs_last_cno(nilfs))
1016 return true; /* protect recent checkpoints */
1017
1018 ret = false;
1019 root = nilfs_lookup_root(nilfs, cno);
1020 if (root) {
1021 inode = nilfs_ilookup(sb, root, NILFS_ROOT_INO);
1022 if (inode) {
1023 dentry = d_find_alias(inode);
1024 if (dentry) {
1025 ret = nilfs_tree_is_busy(dentry);
1026 dput(dentry);
1027 }
1028 iput(inode);
1029 }
1030 nilfs_put_root(root);
1031 }
1032 return ret;
1033 }
1034
1035 /**
1036 * nilfs_fill_super() - initialize a super block instance
1037 * @sb: super_block
1038 * @fc: filesystem context
1039 *
1040 * This function is called exclusively by nilfs->ns_mount_mutex.
1041 * So, the recovery process is protected from other simultaneous mounts.
1042 *
1043 * Return: 0 on success, or a negative error code on failure.
1044 */
1045 static int
nilfs_fill_super(struct super_block * sb,struct fs_context * fc)1046 nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
1047 {
1048 struct the_nilfs *nilfs;
1049 struct nilfs_root *fsroot;
1050 struct nilfs_fs_context *ctx = fc->fs_private;
1051 __u64 cno;
1052 int err;
1053
1054 nilfs = alloc_nilfs(sb);
1055 if (!nilfs)
1056 return -ENOMEM;
1057
1058 sb->s_fs_info = nilfs;
1059
1060 err = init_nilfs(nilfs, sb);
1061 if (err)
1062 goto failed_nilfs;
1063
1064 /* Copy in parsed mount options */
1065 nilfs->ns_mount_opt = ctx->ns_mount_opt;
1066
1067 sb->s_op = &nilfs_sops;
1068 sb->s_export_op = &nilfs_export_ops;
1069 sb->s_root = NULL;
1070 sb->s_time_gran = 1;
1071 sb->s_max_links = NILFS_LINK_MAX;
1072
1073 sb->s_bdi = bdi_get(sb->s_bdev->bd_disk->bdi);
1074
1075 err = load_nilfs(nilfs, sb);
1076 if (err)
1077 goto failed_nilfs;
1078
1079 super_set_uuid(sb, nilfs->ns_sbp[0]->s_uuid,
1080 sizeof(nilfs->ns_sbp[0]->s_uuid));
1081 super_set_sysfs_name_bdev(sb);
1082
1083 cno = nilfs_last_cno(nilfs);
1084 err = nilfs_attach_checkpoint(sb, cno, true, &fsroot);
1085 if (err) {
1086 nilfs_err(sb,
1087 "error %d while loading last checkpoint (checkpoint number=%llu)",
1088 err, (unsigned long long)cno);
1089 goto failed_unload;
1090 }
1091
1092 if (!sb_rdonly(sb)) {
1093 err = nilfs_attach_log_writer(sb, fsroot);
1094 if (err)
1095 goto failed_checkpoint;
1096 }
1097
1098 err = nilfs_get_root_dentry(sb, fsroot, &sb->s_root);
1099 if (err)
1100 goto failed_segctor;
1101
1102 nilfs_put_root(fsroot);
1103
1104 if (!sb_rdonly(sb)) {
1105 down_write(&nilfs->ns_sem);
1106 nilfs_setup_super(sb, true);
1107 up_write(&nilfs->ns_sem);
1108 }
1109
1110 return 0;
1111
1112 failed_segctor:
1113 nilfs_detach_log_writer(sb);
1114
1115 failed_checkpoint:
1116 nilfs_put_root(fsroot);
1117
1118 failed_unload:
1119 nilfs_sysfs_delete_device_group(nilfs);
1120 iput(nilfs->ns_sufile);
1121 iput(nilfs->ns_cpfile);
1122 iput(nilfs->ns_dat);
1123
1124 failed_nilfs:
1125 destroy_nilfs(nilfs);
1126 return err;
1127 }
1128
nilfs_reconfigure(struct fs_context * fc)1129 static int nilfs_reconfigure(struct fs_context *fc)
1130 {
1131 struct nilfs_fs_context *ctx = fc->fs_private;
1132 struct super_block *sb = fc->root->d_sb;
1133 struct the_nilfs *nilfs = sb->s_fs_info;
1134 int err;
1135
1136 sync_filesystem(sb);
1137
1138 err = -EINVAL;
1139
1140 if (!nilfs_valid_fs(nilfs)) {
1141 nilfs_warn(sb,
1142 "couldn't remount because the filesystem is in an incomplete recovery state");
1143 goto ignore_opts;
1144 }
1145 if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
1146 goto out;
1147 if (fc->sb_flags & SB_RDONLY) {
1148 sb->s_flags |= SB_RDONLY;
1149
1150 /*
1151 * Remounting a valid RW partition RDONLY, so set
1152 * the RDONLY flag and then mark the partition as valid again.
1153 */
1154 down_write(&nilfs->ns_sem);
1155 nilfs_cleanup_super(sb);
1156 up_write(&nilfs->ns_sem);
1157 } else {
1158 __u64 features;
1159 struct nilfs_root *root;
1160
1161 /*
1162 * Mounting a RDONLY partition read-write, so reread and
1163 * store the current valid flag. (It may have been changed
1164 * by fsck since we originally mounted the partition.)
1165 */
1166 down_read(&nilfs->ns_sem);
1167 features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
1168 ~NILFS_FEATURE_COMPAT_RO_SUPP;
1169 up_read(&nilfs->ns_sem);
1170 if (features) {
1171 nilfs_warn(sb,
1172 "couldn't remount RDWR because of unsupported optional features (%llx)",
1173 (unsigned long long)features);
1174 err = -EROFS;
1175 goto ignore_opts;
1176 }
1177
1178 sb->s_flags &= ~SB_RDONLY;
1179
1180 root = NILFS_I(d_inode(sb->s_root))->i_root;
1181 err = nilfs_attach_log_writer(sb, root);
1182 if (err) {
1183 sb->s_flags |= SB_RDONLY;
1184 goto ignore_opts;
1185 }
1186
1187 down_write(&nilfs->ns_sem);
1188 nilfs_setup_super(sb, true);
1189 up_write(&nilfs->ns_sem);
1190 }
1191 out:
1192 sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
1193 /* Copy over parsed remount options */
1194 nilfs->ns_mount_opt = ctx->ns_mount_opt;
1195
1196 return 0;
1197
1198 ignore_opts:
1199 return err;
1200 }
1201
1202 static int
nilfs_get_tree(struct fs_context * fc)1203 nilfs_get_tree(struct fs_context *fc)
1204 {
1205 struct nilfs_fs_context *ctx = fc->fs_private;
1206 struct super_block *s;
1207 dev_t dev;
1208 int err;
1209
1210 if (ctx->cno && !(fc->sb_flags & SB_RDONLY)) {
1211 nilfs_err(NULL,
1212 "invalid option \"cp=%llu\": read-only option is not specified",
1213 ctx->cno);
1214 return -EINVAL;
1215 }
1216
1217 err = lookup_bdev(fc->source, &dev);
1218 if (err)
1219 return err;
1220
1221 s = sget_dev(fc, dev);
1222 if (IS_ERR(s))
1223 return PTR_ERR(s);
1224
1225 if (!s->s_root) {
1226 err = setup_bdev_super(s, fc->sb_flags, fc);
1227 if (!err)
1228 err = nilfs_fill_super(s, fc);
1229 if (err)
1230 goto failed_super;
1231
1232 s->s_flags |= SB_ACTIVE;
1233 } else if (!ctx->cno) {
1234 if (nilfs_tree_is_busy(s->s_root)) {
1235 if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
1236 nilfs_err(s,
1237 "the device already has a %s mount.",
1238 sb_rdonly(s) ? "read-only" : "read/write");
1239 err = -EBUSY;
1240 goto failed_super;
1241 }
1242 } else {
1243 /*
1244 * Try reconfigure to setup mount states if the current
1245 * tree is not mounted and only snapshots use this sb.
1246 *
1247 * Since nilfs_reconfigure() requires fc->root to be
1248 * set, set it first and release it on failure.
1249 */
1250 fc->root = dget(s->s_root);
1251 err = nilfs_reconfigure(fc);
1252 if (err) {
1253 dput(fc->root);
1254 fc->root = NULL; /* prevent double release */
1255 goto failed_super;
1256 }
1257 return 0;
1258 }
1259 }
1260
1261 if (ctx->cno) {
1262 struct dentry *root_dentry;
1263
1264 err = nilfs_attach_snapshot(s, ctx->cno, &root_dentry);
1265 if (err)
1266 goto failed_super;
1267 fc->root = root_dentry;
1268 return 0;
1269 }
1270
1271 fc->root = dget(s->s_root);
1272 return 0;
1273
1274 failed_super:
1275 deactivate_locked_super(s);
1276 return err;
1277 }
1278
nilfs_free_fc(struct fs_context * fc)1279 static void nilfs_free_fc(struct fs_context *fc)
1280 {
1281 kfree(fc->fs_private);
1282 }
1283
1284 static const struct fs_context_operations nilfs_context_ops = {
1285 .parse_param = nilfs_parse_param,
1286 .get_tree = nilfs_get_tree,
1287 .reconfigure = nilfs_reconfigure,
1288 .free = nilfs_free_fc,
1289 };
1290
nilfs_init_fs_context(struct fs_context * fc)1291 static int nilfs_init_fs_context(struct fs_context *fc)
1292 {
1293 struct nilfs_fs_context *ctx;
1294
1295 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1296 if (!ctx)
1297 return -ENOMEM;
1298
1299 ctx->ns_mount_opt = NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
1300 fc->fs_private = ctx;
1301 fc->ops = &nilfs_context_ops;
1302
1303 return 0;
1304 }
1305
1306 struct file_system_type nilfs_fs_type = {
1307 .owner = THIS_MODULE,
1308 .name = "nilfs2",
1309 .kill_sb = kill_block_super,
1310 .fs_flags = FS_REQUIRES_DEV,
1311 .init_fs_context = nilfs_init_fs_context,
1312 .parameters = nilfs_param_spec,
1313 };
1314 MODULE_ALIAS_FS("nilfs2");
1315
nilfs_inode_init_once(void * obj)1316 static void nilfs_inode_init_once(void *obj)
1317 {
1318 struct nilfs_inode_info *ii = obj;
1319
1320 INIT_LIST_HEAD(&ii->i_dirty);
1321 #ifdef CONFIG_NILFS_XATTR
1322 init_rwsem(&ii->xattr_sem);
1323 #endif
1324 inode_init_once(&ii->vfs_inode);
1325 }
1326
nilfs_segbuf_init_once(void * obj)1327 static void nilfs_segbuf_init_once(void *obj)
1328 {
1329 memset(obj, 0, sizeof(struct nilfs_segment_buffer));
1330 }
1331
nilfs_destroy_cachep(void)1332 static void nilfs_destroy_cachep(void)
1333 {
1334 /*
1335 * Make sure all delayed rcu free inodes are flushed before we
1336 * destroy cache.
1337 */
1338 rcu_barrier();
1339
1340 kmem_cache_destroy(nilfs_inode_cachep);
1341 kmem_cache_destroy(nilfs_transaction_cachep);
1342 kmem_cache_destroy(nilfs_segbuf_cachep);
1343 kmem_cache_destroy(nilfs_btree_path_cache);
1344 }
1345
nilfs_init_cachep(void)1346 static int __init nilfs_init_cachep(void)
1347 {
1348 nilfs_inode_cachep = kmem_cache_create("nilfs2_inode_cache",
1349 sizeof(struct nilfs_inode_info), 0,
1350 SLAB_RECLAIM_ACCOUNT|SLAB_ACCOUNT,
1351 nilfs_inode_init_once);
1352 if (!nilfs_inode_cachep)
1353 goto fail;
1354
1355 nilfs_transaction_cachep = kmem_cache_create("nilfs2_transaction_cache",
1356 sizeof(struct nilfs_transaction_info), 0,
1357 SLAB_RECLAIM_ACCOUNT, NULL);
1358 if (!nilfs_transaction_cachep)
1359 goto fail;
1360
1361 nilfs_segbuf_cachep = kmem_cache_create("nilfs2_segbuf_cache",
1362 sizeof(struct nilfs_segment_buffer), 0,
1363 SLAB_RECLAIM_ACCOUNT, nilfs_segbuf_init_once);
1364 if (!nilfs_segbuf_cachep)
1365 goto fail;
1366
1367 nilfs_btree_path_cache = kmem_cache_create("nilfs2_btree_path_cache",
1368 sizeof(struct nilfs_btree_path) * NILFS_BTREE_LEVEL_MAX,
1369 0, 0, NULL);
1370 if (!nilfs_btree_path_cache)
1371 goto fail;
1372
1373 return 0;
1374
1375 fail:
1376 nilfs_destroy_cachep();
1377 return -ENOMEM;
1378 }
1379
init_nilfs_fs(void)1380 static int __init init_nilfs_fs(void)
1381 {
1382 int err;
1383
1384 err = nilfs_init_cachep();
1385 if (err)
1386 goto fail;
1387
1388 err = nilfs_sysfs_init();
1389 if (err)
1390 goto free_cachep;
1391
1392 err = register_filesystem(&nilfs_fs_type);
1393 if (err)
1394 goto deinit_sysfs_entry;
1395
1396 printk(KERN_INFO "NILFS version 2 loaded\n");
1397 return 0;
1398
1399 deinit_sysfs_entry:
1400 nilfs_sysfs_exit();
1401 free_cachep:
1402 nilfs_destroy_cachep();
1403 fail:
1404 return err;
1405 }
1406
exit_nilfs_fs(void)1407 static void __exit exit_nilfs_fs(void)
1408 {
1409 nilfs_destroy_cachep();
1410 nilfs_sysfs_exit();
1411 unregister_filesystem(&nilfs_fs_type);
1412 }
1413
1414 module_init(init_nilfs_fs)
1415 module_exit(exit_nilfs_fs)
1416