1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Code for manipulating bucket marks for garbage collection. 4 * 5 * Copyright 2014 Datera, Inc. 6 */ 7 8 #ifndef _BUCKETS_H 9 #define _BUCKETS_H 10 11 #include "buckets_types.h" 12 #include "extents.h" 13 #include "sb-members.h" 14 15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s) 16 { 17 return div_u64(s, ca->mi.bucket_size); 18 } 19 20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b) 21 { 22 return ((sector_t) b) * ca->mi.bucket_size; 23 } 24 25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s) 26 { 27 u32 remainder; 28 29 div_u64_rem(s, ca->mi.bucket_size, &remainder); 30 return remainder; 31 } 32 33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset) 34 { 35 return div_u64_rem(s, ca->mi.bucket_size, offset); 36 } 37 38 #define for_each_bucket(_b, _buckets) \ 39 for (_b = (_buckets)->b + (_buckets)->first_bucket; \ 40 _b < (_buckets)->b + (_buckets)->nbuckets; _b++) 41 42 /* 43 * Ugly hack alert: 44 * 45 * We need to cram a spinlock in a single byte, because that's what we have left 46 * in struct bucket, and we care about the size of these - during fsck, we need 47 * in memory state for every single bucket on every device. 48 * 49 * We used to do 50 * while (xchg(&b->lock, 1) cpu_relax(); 51 * but, it turns out not all architectures support xchg on a single byte. 52 * 53 * So now we use bit_spin_lock(), with fun games since we can't burn a whole 54 * ulong for this - we just need to make sure the lock bit always ends up in the 55 * first byte. 56 */ 57 58 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 59 #define BUCKET_LOCK_BITNR 0 60 #else 61 #define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1) 62 #endif 63 64 union ulong_byte_assert { 65 ulong ulong; 66 u8 byte; 67 }; 68 69 static inline void bucket_unlock(struct bucket *b) 70 { 71 BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte); 72 73 clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock); 74 wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR); 75 } 76 77 static inline void bucket_lock(struct bucket *b) 78 { 79 wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR, 80 TASK_UNINTERRUPTIBLE); 81 } 82 83 static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca) 84 { 85 return rcu_dereference_check(ca->buckets_gc, 86 !ca->fs || 87 percpu_rwsem_is_held(&ca->fs->mark_lock) || 88 lockdep_is_held(&ca->fs->state_lock) || 89 lockdep_is_held(&ca->bucket_lock)); 90 } 91 92 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b) 93 { 94 struct bucket_array *buckets = gc_bucket_array(ca); 95 96 if (b - buckets->first_bucket >= buckets->nbuckets_minus_first) 97 return NULL; 98 return buckets->b + b; 99 } 100 101 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca) 102 { 103 return rcu_dereference_check(ca->bucket_gens, 104 !ca->fs || 105 percpu_rwsem_is_held(&ca->fs->mark_lock) || 106 lockdep_is_held(&ca->fs->state_lock) || 107 lockdep_is_held(&ca->bucket_lock)); 108 } 109 110 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b) 111 { 112 struct bucket_gens *gens = bucket_gens(ca); 113 114 if (b - gens->first_bucket >= gens->nbuckets_minus_first) 115 return NULL; 116 return gens->b + b; 117 } 118 119 static inline u8 bucket_gen_get(struct bch_dev *ca, size_t b) 120 { 121 rcu_read_lock(); 122 u8 gen = *bucket_gen(ca, b); 123 rcu_read_unlock(); 124 return gen; 125 } 126 127 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca, 128 const struct bch_extent_ptr *ptr) 129 { 130 return sector_to_bucket(ca, ptr->offset); 131 } 132 133 static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca, 134 const struct bch_extent_ptr *ptr) 135 { 136 return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr)); 137 } 138 139 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca, 140 const struct bch_extent_ptr *ptr, 141 u32 *bucket_offset) 142 { 143 return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset)); 144 } 145 146 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca, 147 const struct bch_extent_ptr *ptr) 148 { 149 return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr)); 150 } 151 152 static inline enum bch_data_type ptr_data_type(const struct bkey *k, 153 const struct bch_extent_ptr *ptr) 154 { 155 if (bkey_is_btree_ptr(k)) 156 return BCH_DATA_btree; 157 158 return ptr->cached ? BCH_DATA_cached : BCH_DATA_user; 159 } 160 161 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p) 162 { 163 EBUG_ON(sectors < 0); 164 165 return crc_is_compressed(p.crc) 166 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size, 167 p.crc.uncompressed_size) 168 : sectors; 169 } 170 171 static inline int gen_cmp(u8 a, u8 b) 172 { 173 return (s8) (a - b); 174 } 175 176 static inline int gen_after(u8 a, u8 b) 177 { 178 int r = gen_cmp(a, b); 179 180 return r > 0 ? r : 0; 181 } 182 183 static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr) 184 { 185 u8 *gen = bucket_gen(ca, PTR_BUCKET_NR(ca, ptr)); 186 if (!gen) 187 return -1; 188 return gen_after(*gen, ptr->gen); 189 } 190 191 /** 192 * dev_ptr_stale() - check if a pointer points into a bucket that has been 193 * invalidated. 194 */ 195 static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr) 196 { 197 rcu_read_lock(); 198 int ret = dev_ptr_stale_rcu(ca, ptr); 199 rcu_read_unlock(); 200 201 return ret; 202 } 203 204 /* Device usage: */ 205 206 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *); 207 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca) 208 { 209 struct bch_dev_usage ret; 210 211 bch2_dev_usage_read_fast(ca, &ret); 212 return ret; 213 } 214 215 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *); 216 217 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark) 218 { 219 s64 reserved = 0; 220 221 switch (watermark) { 222 case BCH_WATERMARK_NR: 223 BUG(); 224 case BCH_WATERMARK_stripe: 225 reserved += ca->mi.nbuckets >> 6; 226 fallthrough; 227 case BCH_WATERMARK_normal: 228 reserved += ca->mi.nbuckets >> 6; 229 fallthrough; 230 case BCH_WATERMARK_copygc: 231 reserved += ca->nr_btree_reserve; 232 fallthrough; 233 case BCH_WATERMARK_btree: 234 reserved += ca->nr_btree_reserve; 235 fallthrough; 236 case BCH_WATERMARK_btree_copygc: 237 case BCH_WATERMARK_reclaim: 238 case BCH_WATERMARK_interior_updates: 239 break; 240 } 241 242 return reserved; 243 } 244 245 static inline u64 dev_buckets_free(struct bch_dev *ca, 246 struct bch_dev_usage usage, 247 enum bch_watermark watermark) 248 { 249 return max_t(s64, 0, 250 usage.d[BCH_DATA_free].buckets - 251 ca->nr_open_buckets - 252 bch2_dev_buckets_reserved(ca, watermark)); 253 } 254 255 static inline u64 __dev_buckets_available(struct bch_dev *ca, 256 struct bch_dev_usage usage, 257 enum bch_watermark watermark) 258 { 259 return max_t(s64, 0, 260 usage.d[BCH_DATA_free].buckets 261 + usage.d[BCH_DATA_cached].buckets 262 + usage.d[BCH_DATA_need_gc_gens].buckets 263 + usage.d[BCH_DATA_need_discard].buckets 264 - ca->nr_open_buckets 265 - bch2_dev_buckets_reserved(ca, watermark)); 266 } 267 268 static inline u64 dev_buckets_available(struct bch_dev *ca, 269 enum bch_watermark watermark) 270 { 271 return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark); 272 } 273 274 /* Filesystem usage: */ 275 276 static inline unsigned dev_usage_u64s(void) 277 { 278 return sizeof(struct bch_dev_usage) / sizeof(u64); 279 } 280 281 struct bch_fs_usage_short 282 bch2_fs_usage_read_short(struct bch_fs *); 283 284 int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *, 285 struct bkey_s_c, const struct bch_extent_ptr *, 286 s64, enum bch_data_type, u8, u8, u32 *); 287 288 int bch2_check_fix_ptrs(struct btree_trans *, 289 enum btree_id, unsigned, struct bkey_s_c, 290 enum btree_iter_update_trigger_flags); 291 292 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned, 293 struct bkey_s_c, struct bkey_s, 294 enum btree_iter_update_trigger_flags); 295 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned, 296 struct bkey_s_c, struct bkey_s, 297 enum btree_iter_update_trigger_flags); 298 299 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\ 300 ({ \ 301 int ret = 0; \ 302 \ 303 if (_old.k->type) \ 304 ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \ 305 if (!ret && _new.k->type) \ 306 ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\ 307 ret; \ 308 }) 309 310 void bch2_trans_account_disk_usage_change(struct btree_trans *); 311 312 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64, 313 enum bch_data_type, unsigned, 314 enum btree_iter_update_trigger_flags); 315 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *, 316 enum btree_iter_update_trigger_flags); 317 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *, 318 enum btree_iter_update_trigger_flags); 319 int bch2_trans_mark_dev_sbs(struct bch_fs *); 320 321 static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b) 322 { 323 struct bch_sb_layout *layout = &ca->disk_sb.sb->layout; 324 u64 b_offset = bucket_to_sector(ca, b); 325 u64 b_end = bucket_to_sector(ca, b + 1); 326 unsigned i; 327 328 if (!b) 329 return true; 330 331 for (i = 0; i < layout->nr_superblocks; i++) { 332 u64 offset = le64_to_cpu(layout->sb_offset[i]); 333 u64 end = offset + (1 << layout->sb_max_size_bits); 334 335 if (!(offset >= b_end || end <= b_offset)) 336 return true; 337 } 338 339 return false; 340 } 341 342 static inline const char *bch2_data_type_str(enum bch_data_type type) 343 { 344 return type < BCH_DATA_NR 345 ? __bch2_data_types[type] 346 : "(invalid data type)"; 347 } 348 349 /* disk reservations: */ 350 351 static inline void bch2_disk_reservation_put(struct bch_fs *c, 352 struct disk_reservation *res) 353 { 354 if (res->sectors) { 355 this_cpu_sub(*c->online_reserved, res->sectors); 356 res->sectors = 0; 357 } 358 } 359 360 #define BCH_DISK_RESERVATION_NOFAIL (1 << 0) 361 362 int __bch2_disk_reservation_add(struct bch_fs *, 363 struct disk_reservation *, 364 u64, int); 365 366 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, 367 u64 sectors, int flags) 368 { 369 #ifdef __KERNEL__ 370 u64 old, new; 371 372 old = this_cpu_read(c->pcpu->sectors_available); 373 do { 374 if (sectors > old) 375 return __bch2_disk_reservation_add(c, res, sectors, flags); 376 377 new = old - sectors; 378 } while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new)); 379 380 this_cpu_add(*c->online_reserved, sectors); 381 res->sectors += sectors; 382 return 0; 383 #else 384 return __bch2_disk_reservation_add(c, res, sectors, flags); 385 #endif 386 } 387 388 static inline struct disk_reservation 389 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas) 390 { 391 return (struct disk_reservation) { 392 .sectors = 0, 393 #if 0 394 /* not used yet: */ 395 .gen = c->capacity_gen, 396 #endif 397 .nr_replicas = nr_replicas, 398 }; 399 } 400 401 static inline int bch2_disk_reservation_get(struct bch_fs *c, 402 struct disk_reservation *res, 403 u64 sectors, unsigned nr_replicas, 404 int flags) 405 { 406 *res = bch2_disk_reservation_init(c, nr_replicas); 407 408 return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags); 409 } 410 411 #define RESERVE_FACTOR 6 412 413 static inline u64 avail_factor(u64 r) 414 { 415 return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1); 416 } 417 418 void bch2_buckets_nouse_free(struct bch_fs *); 419 int bch2_buckets_nouse_alloc(struct bch_fs *); 420 421 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64); 422 void bch2_dev_buckets_free(struct bch_dev *); 423 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *); 424 425 #endif /* _BUCKETS_H */ 426