Lines Matching +full:additional +full:- +full:devs
1 /* SPDX-License-Identifier: GPL-2.0 */
21 * a cache set at runtime - while it's mounted and in use. Detaching implicitly
26 * There's also flash only volumes - this is the reason for the distinction
30 * provisioning with very little additional code.
43 * unit of allocation; they're typically around 1 mb - anywhere from 128k to 2M+
48 * packed on disk (in a linked list of buckets - aside from the superblock, all
62 * Bcache is entirely COW - we never write twice to a bucket, even buckets that
69 * At a high level, the btree is just an index of key -> ptr tuples.
77 * extent within the inode - not the starting offset; this makes lookups
83 * Index lookups are not fully abstracted - cache lookups in particular are
90 * BTREE_INSERT will just take a list of keys and insert them into the btree -
101 * gen) or by inserting a key with 0 pointers - which will overwrite anything
111 * free smaller than a bucket - so, that's how big our btree nodes are.
114 * - no less than 1/4th - but a bucket still contains no more than a single
118 * Anyways, btree nodes are big - big enough to be inefficient with a textbook
125 * We maintain this log structure in memory - keeping 1Mb of keys sorted would
139 * We can't just invalidate any bucket - it might contain dirty data or
165 * a few keys each) - highly inefficient in terms of amount of metadata writes,
176 * - updates to non leaf nodes just happen synchronously (see btree_split()).
186 #include <linux/backing-dev-defs.h>
194 #include <linux/percpu-refcount.h>
195 #include <linux/percpu-rwsem.h>
214 #include "sb-errors_types.h"
229 #define count_event(_c, _name) this_cpu_inc((_c)->counters[BCH_COUNTER_##_name])
250 #define bch2_log_msg(_c, fmt) "bcachefs (%s): " fmt, ((_c)->name)
251 #define bch2_fmt_dev(_ca, fmt) "bcachefs (%s): " fmt "\n", ((_ca)->name)
252 #define bch2_fmt_dev_offset(_ca, _offset, fmt) "bcachefs (%s sector %llu): " fmt "\n", ((_ca)->name…
253 #define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)
255 "bcachefs (%s inum %llu offset %llu): " fmt "\n", ((_c)->name), (_inum), (_offset)
260 #define bch2_fmt_dev(_ca, fmt) "%s: " fmt "\n", ((_ca)->name)
261 #define bch2_fmt_dev_offset(_ca, _offset, fmt) "%s sector %llu: " fmt "\n", ((_ca)->name), (_offset)
279 struct bch_dev *: ((struct bch_dev *) (_c))->fs, \
351 if ((c)->opts.verbose) \
478 #include "sb-members_types.h"
487 #define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
523 * Committed by bch2_write_super() -> bch_fs_mi_update()
541 * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
542 * gc_gens_lock, for device resize - holding any is sufficient for
667 * end of the buffer - from @nr to @size - the empty space is at @gap.
731 /* Counts outstanding writes, for clean transition to read-only */
738 * Analagous to c->writes, for asynchronous ops that don't necessarily
739 * need fs to be read-write
746 struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX]; member
817 * Cache of allocated btree nodes - if we allocate a btree node and
820 * when allocating btree reserves fail halfway through) - instead, we
870 * draining, such as read-only transition.
883 * increment capacity_gen - this invalidates outstanding reservations
939 * Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
1031 /* VFS IO PATH - fs-io.c */
1045 * "Has this fsck pass?" - i.e. should this type of error be an
1046 * emergency read-only
1070 * on the stack - have to dynamically allocate them
1109 atomic_long_inc(&c->writes[ref]); in bch2_write_ref_get()
1111 percpu_ref_get(&c->writes); in bch2_write_ref_get()
1118 return !test_bit(BCH_FS_going_ro, &c->flags) && in __bch2_write_ref_tryget()
1119 atomic_long_inc_not_zero(&c->writes[ref]); in __bch2_write_ref_tryget()
1121 return percpu_ref_tryget(&c->writes); in __bch2_write_ref_tryget()
1128 return !test_bit(BCH_FS_going_ro, &c->flags) && in bch2_write_ref_tryget()
1129 atomic_long_inc_not_zero(&c->writes[ref]); in bch2_write_ref_tryget()
1131 return percpu_ref_tryget_live(&c->writes); in bch2_write_ref_tryget()
1138 long v = atomic_long_dec_return(&c->writes[ref]); in bch2_write_ref_put()
1144 if (atomic_long_read(&c->writes[i])) in bch2_write_ref_put()
1147 set_bit(BCH_FS_write_disable_complete, &c->flags); in bch2_write_ref_put()
1150 percpu_ref_put(&c->writes); in bch2_write_ref_put()
1156 if (test_bit(BCH_FS_stopping, &c->flags)) in bch2_ro_ref_tryget()
1159 return refcount_inc_not_zero(&c->ro_ref); in bch2_ro_ref_tryget()
1164 if (refcount_dec_and_test(&c->ro_ref)) in bch2_ro_ref_put()
1165 wake_up(&c->ro_ref_wait); in bch2_ro_ref_put()
1171 if (c->vfs_sb) in bch2_set_ra_pages()
1172 c->vfs_sb->s_bdi->ra_pages = ra_pages; in bch2_set_ra_pages()
1178 return ca->mi.bucket_size << 9; in bucket_bytes()
1183 return c->opts.block_size; in block_bytes()
1188 return c->opts.block_size >> 9; in block_sectors()
1193 return c->btree_key_cache_btrees & (1U << btree); in btree_id_cached()
1202 time += c->sb.time_base_lo; in bch2_time_to_timespec()
1204 sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem); in bch2_time_to_timespec()
1206 set_normalized_timespec64(&t, sec, rem * (s64)c->sb.nsec_per_time_unit); in bch2_time_to_timespec()
1213 return (ts.tv_sec * c->sb.time_units_per_sec + in timespec_to_bch2_time()
1214 (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo; in timespec_to_bch2_time()
1227 return max(1ULL, (u64) atomic64_read(&c->io_clock[rw].now) & LRU_TIME_MAX); in bch2_current_io_time()
1232 struct stdio_redirect *stdio = c->stdio; in bch2_fs_stdio_redirect()
1234 if (c->stdio_filter && c->stdio_filter != current) in bch2_fs_stdio_redirect()
1241 return min(c->opts.metadata_replicas, in metadata_replicas_required()
1242 c->opts.metadata_replicas_required); in metadata_replicas_required()
1247 return min(c->opts.data_replicas, in data_replicas_required()
1248 c->opts.data_replicas_required); in data_replicas_required()