Lines Matching +full:additional +full:- +full:devs

1 /* SPDX-License-Identifier: GPL-2.0 */
21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
26 * There's also flash only volumes - this is the reason for the distinction
30 * provisioning with very little additional code.
43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
48 * packed on disk (in a linked list of buckets - aside from the superblock, all
62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
69 * At a high level, the btree is just an index of key -> ptr tuples.
77 * extent within the inode - not the starting offset; this makes lookups
83 * Index lookups are not fully abstracted - cache lookups in particular are
90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
111 * free smaller than a bucket - so, that's how big our btree nodes are.
114 * - no less than 1/4th - but a bucket still contains no more than a single
118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
139 * We can't just invalidate any bucket - it might contain dirty data or
165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
186 #include <linux/backing-dev-defs.h>
194 #include <linux/percpu-refcount.h>
195 #include <linux/percpu-rwsem.h>
215 #include "sb-errors_types.h"
230 #define count_event(_c, _name) this_cpu_inc((_c)->counters[BCH_COUNTER_##_name])
251 #define bch2_log_msg(_c, fmt) "bcachefs (%s): " fmt, ((_c)->name)
252 #define bch2_fmt_dev(_ca, fmt) "bcachefs (%s): " fmt "\n", ((_ca)->name)
253 #define bch2_fmt_dev_offset(_ca, _offset, fmt) "bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name…
254 #define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
256 "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
261 #define bch2_fmt_dev(_ca, fmt) "%s: " fmt "\n", ((_ca)->name)
262 #define bch2_fmt_dev_offset(_ca, _offset, fmt) "%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
280 struct bch_dev *: ((struct bch_dev *) (_c))->fs, \
354 if ((c)->opts.verbose) \
360 if ((c)->opts.verbose) \
487 #include "sb-members_types.h"
496 #define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
532 * Committed by bch2_write_super() -> bch_fs_mi_update()
550 * Per-bucket arrays are protected by either rcu_read_lock or
721 /* Counts outstanding writes, for clean transition to read-only */
734 * Analagous to c->writes, for asynchronous ops that don't necessarily
735 * need fs to be read-write
742 struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX]; member
815 * Cache of allocated btree nodes - if we allocate a btree node and
818 * when allocating btree reserves fail halfway through) - instead, we
869 * draining, such as read-only transition.
882 * increment capacity_gen - this invalidates outstanding reservations
938 * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
1030 /* VFS IO PATH - fs-io.c */
1044 * "Has this fsck pass?" - i.e. should this type of error be an
1045 * emergency read-only
1068 * on the stack - have to dynamically allocate them
1105 atomic_long_inc(&c->writes[ref]); in bch2_write_ref_get()
1107 percpu_ref_get(&c->writes); in bch2_write_ref_get()
1114 return !test_bit(BCH_FS_going_ro, &c->flags) && in __bch2_write_ref_tryget()
1115 atomic_long_inc_not_zero(&c->writes[ref]); in __bch2_write_ref_tryget()
1117 return percpu_ref_tryget(&c->writes); in __bch2_write_ref_tryget()
1124 return !test_bit(BCH_FS_going_ro, &c->flags) && in bch2_write_ref_tryget()
1125 atomic_long_inc_not_zero(&c->writes[ref]); in bch2_write_ref_tryget()
1127 return percpu_ref_tryget_live(&c->writes); in bch2_write_ref_tryget()
1134 long v = atomic_long_dec_return(&c->writes[ref]); in bch2_write_ref_put()
1140 if (atomic_long_read(&c->writes[i])) in bch2_write_ref_put()
1143 set_bit(BCH_FS_write_disable_complete, &c->flags); in bch2_write_ref_put()
1146 percpu_ref_put(&c->writes); in bch2_write_ref_put()
1152 if (test_bit(BCH_FS_stopping, &c->flags)) in bch2_ro_ref_tryget()
1155 return refcount_inc_not_zero(&c->ro_ref); in bch2_ro_ref_tryget()
1160 if (refcount_dec_and_test(&c->ro_ref)) in bch2_ro_ref_put()
1161 wake_up(&c->ro_ref_wait); in bch2_ro_ref_put()
1167 if (c->vfs_sb) in bch2_set_ra_pages()
1168 c->vfs_sb->s_bdi->ra_pages = ra_pages; in bch2_set_ra_pages()
1174 return ca->mi.bucket_size << 9; in bucket_bytes()
1179 return c->opts.block_size; in block_bytes()
1184 return c->opts.block_size >> 9; in block_sectors()
1189 return c->btree_key_cache_btrees & (1U << btree); in btree_id_cached()
1198 time += c->sb.time_base_lo; in bch2_time_to_timespec()
1200 sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem); in bch2_time_to_timespec()
1202 set_normalized_timespec64(&t, sec, rem * (s64)c->sb.nsec_per_time_unit); in bch2_time_to_timespec()
1209 return (ts.tv_sec * c->sb.time_units_per_sec + in timespec_to_bch2_time()
1210 (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo; in timespec_to_bch2_time()
1223 return max(1ULL, (u64) atomic64_read(&c->io_clock[rw].now) & LRU_TIME_MAX); in bch2_current_io_time()
1228 struct stdio_redirect *stdio = c->stdio; in bch2_fs_stdio_redirect()
1230 if (c->stdio_filter && c->stdio_filter != current) in bch2_fs_stdio_redirect()
1237 return min(c->opts.metadata_replicas, in metadata_replicas_required()
1238 c->opts.metadata_replicas_required); in metadata_replicas_required()
1243 return min(c->opts.data_replicas, in data_replicas_required()
1244 c->opts.data_replicas_required); in data_replicas_required()