1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Code for manipulating bucket marks for garbage collection. 4 * 5 * Copyright 2014 Datera, Inc. 6 */ 7 8 #ifndef _BUCKETS_H 9 #define _BUCKETS_H 10 11 #include "buckets_types.h" 12 #include "extents.h" 13 #include "sb-members.h" 14 15 static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s) 16 { 17 return div_u64(s, ca->mi.bucket_size); 18 } 19 20 static inline sector_t bucket_to_sector(const struct bch_dev *ca, size_t b) 21 { 22 return ((sector_t) b) * ca->mi.bucket_size; 23 } 24 25 static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s) 26 { 27 u32 remainder; 28 29 div_u64_rem(s, ca->mi.bucket_size, &remainder); 30 return remainder; 31 } 32 33 static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset) 34 { 35 return div_u64_rem(s, ca->mi.bucket_size, offset); 36 } 37 38 #define for_each_bucket(_b, _buckets) \ 39 for (_b = (_buckets)->b + (_buckets)->first_bucket; \ 40 _b < (_buckets)->b + (_buckets)->nbuckets; _b++) 41 42 /* 43 * Ugly hack alert: 44 * 45 * We need to cram a spinlock in a single byte, because that's what we have left 46 * in struct bucket, and we care about the size of these - during fsck, we need 47 * in memory state for every single bucket on every device. 48 * 49 * We used to do 50 * while (xchg(&b->lock, 1) cpu_relax(); 51 * but, it turns out not all architectures support xchg on a single byte. 52 * 53 * So now we use bit_spin_lock(), with fun games since we can't burn a whole 54 * ulong for this - we just need to make sure the lock bit always ends up in the 55 * first byte. 56 */ 57 58 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 59 #define BUCKET_LOCK_BITNR 0 60 #else 61 #define BUCKET_LOCK_BITNR (BITS_PER_LONG - 1) 62 #endif 63 64 union ulong_byte_assert { 65 ulong ulong; 66 u8 byte; 67 }; 68 69 static inline void bucket_unlock(struct bucket *b) 70 { 71 BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte); 72 73 clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &b->lock); 74 wake_up_bit((void *) &b->lock, BUCKET_LOCK_BITNR); 75 } 76 77 static inline void bucket_lock(struct bucket *b) 78 { 79 wait_on_bit_lock((void *) &b->lock, BUCKET_LOCK_BITNR, 80 TASK_UNINTERRUPTIBLE); 81 } 82 83 static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b) 84 { 85 return bucket_valid(ca, b) 86 ? genradix_ptr(&ca->buckets_gc, b) 87 : NULL; 88 } 89 90 static inline struct bucket_gens *bucket_gens(struct bch_dev *ca) 91 { 92 return rcu_dereference_check(ca->bucket_gens, 93 lockdep_is_held(&ca->fs->state_lock)); 94 } 95 96 static inline u8 *bucket_gen(struct bch_dev *ca, size_t b) 97 { 98 struct bucket_gens *gens = bucket_gens(ca); 99 100 if (b - gens->first_bucket >= gens->nbuckets_minus_first) 101 return NULL; 102 return gens->b + b; 103 } 104 105 static inline int bucket_gen_get_rcu(struct bch_dev *ca, size_t b) 106 { 107 u8 *gen = bucket_gen(ca, b); 108 return gen ? *gen : -1; 109 } 110 111 static inline int bucket_gen_get(struct bch_dev *ca, size_t b) 112 { 113 rcu_read_lock(); 114 int ret = bucket_gen_get_rcu(ca, b); 115 rcu_read_unlock(); 116 return ret; 117 } 118 119 static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca, 120 const struct bch_extent_ptr *ptr) 121 { 122 return sector_to_bucket(ca, ptr->offset); 123 } 124 125 static inline struct bpos PTR_BUCKET_POS(const struct bch_dev *ca, 126 const struct bch_extent_ptr *ptr) 127 { 128 return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr)); 129 } 130 131 static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_dev *ca, 132 const struct bch_extent_ptr *ptr, 133 u32 *bucket_offset) 134 { 135 return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset)); 136 } 137 138 static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca, 139 const struct bch_extent_ptr *ptr) 140 { 141 return gc_bucket(ca, PTR_BUCKET_NR(ca, ptr)); 142 } 143 144 static inline enum bch_data_type ptr_data_type(const struct bkey *k, 145 const struct bch_extent_ptr *ptr) 146 { 147 if (bkey_is_btree_ptr(k)) 148 return BCH_DATA_btree; 149 150 return ptr->cached ? BCH_DATA_cached : BCH_DATA_user; 151 } 152 153 static inline s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p) 154 { 155 EBUG_ON(sectors < 0); 156 157 return crc_is_compressed(p.crc) 158 ? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size, 159 p.crc.uncompressed_size) 160 : sectors; 161 } 162 163 static inline int gen_cmp(u8 a, u8 b) 164 { 165 return (s8) (a - b); 166 } 167 168 static inline int gen_after(u8 a, u8 b) 169 { 170 int r = gen_cmp(a, b); 171 172 return r > 0 ? r : 0; 173 } 174 175 static inline int dev_ptr_stale_rcu(struct bch_dev *ca, const struct bch_extent_ptr *ptr) 176 { 177 int gen = bucket_gen_get_rcu(ca, PTR_BUCKET_NR(ca, ptr)); 178 return gen < 0 ? gen : gen_after(gen, ptr->gen); 179 } 180 181 /** 182 * dev_ptr_stale() - check if a pointer points into a bucket that has been 183 * invalidated. 184 */ 185 static inline int dev_ptr_stale(struct bch_dev *ca, const struct bch_extent_ptr *ptr) 186 { 187 rcu_read_lock(); 188 int ret = dev_ptr_stale_rcu(ca, ptr); 189 rcu_read_unlock(); 190 return ret; 191 } 192 193 /* Device usage: */ 194 195 void bch2_dev_usage_read_fast(struct bch_dev *, struct bch_dev_usage *); 196 static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca) 197 { 198 struct bch_dev_usage ret; 199 200 bch2_dev_usage_read_fast(ca, &ret); 201 return ret; 202 } 203 204 void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *); 205 206 static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark) 207 { 208 s64 reserved = 0; 209 210 switch (watermark) { 211 case BCH_WATERMARK_NR: 212 BUG(); 213 case BCH_WATERMARK_stripe: 214 reserved += ca->mi.nbuckets >> 6; 215 fallthrough; 216 case BCH_WATERMARK_normal: 217 reserved += ca->mi.nbuckets >> 6; 218 fallthrough; 219 case BCH_WATERMARK_copygc: 220 reserved += ca->nr_btree_reserve; 221 fallthrough; 222 case BCH_WATERMARK_btree: 223 reserved += ca->nr_btree_reserve; 224 fallthrough; 225 case BCH_WATERMARK_btree_copygc: 226 case BCH_WATERMARK_reclaim: 227 case BCH_WATERMARK_interior_updates: 228 break; 229 } 230 231 return reserved; 232 } 233 234 static inline u64 dev_buckets_free(struct bch_dev *ca, 235 struct bch_dev_usage usage, 236 enum bch_watermark watermark) 237 { 238 return max_t(s64, 0, 239 usage.d[BCH_DATA_free].buckets - 240 ca->nr_open_buckets - 241 bch2_dev_buckets_reserved(ca, watermark)); 242 } 243 244 static inline u64 __dev_buckets_available(struct bch_dev *ca, 245 struct bch_dev_usage usage, 246 enum bch_watermark watermark) 247 { 248 return max_t(s64, 0, 249 usage.d[BCH_DATA_free].buckets 250 + usage.d[BCH_DATA_cached].buckets 251 + usage.d[BCH_DATA_need_gc_gens].buckets 252 + usage.d[BCH_DATA_need_discard].buckets 253 - ca->nr_open_buckets 254 - bch2_dev_buckets_reserved(ca, watermark)); 255 } 256 257 static inline u64 dev_buckets_available(struct bch_dev *ca, 258 enum bch_watermark watermark) 259 { 260 return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark); 261 } 262 263 /* Filesystem usage: */ 264 265 static inline unsigned dev_usage_u64s(void) 266 { 267 return sizeof(struct bch_dev_usage) / sizeof(u64); 268 } 269 270 struct bch_fs_usage_short 271 bch2_fs_usage_read_short(struct bch_fs *); 272 273 int bch2_bucket_ref_update(struct btree_trans *, struct bch_dev *, 274 struct bkey_s_c, const struct bch_extent_ptr *, 275 s64, enum bch_data_type, u8, u8, u32 *); 276 277 int bch2_check_fix_ptrs(struct btree_trans *, 278 enum btree_id, unsigned, struct bkey_s_c, 279 enum btree_iter_update_trigger_flags); 280 281 int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned, 282 struct bkey_s_c, struct bkey_s, 283 enum btree_iter_update_trigger_flags); 284 int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned, 285 struct bkey_s_c, struct bkey_s, 286 enum btree_iter_update_trigger_flags); 287 288 #define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\ 289 ({ \ 290 int ret = 0; \ 291 \ 292 if (_old.k->type) \ 293 ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \ 294 if (!ret && _new.k->type) \ 295 ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\ 296 ret; \ 297 }) 298 299 void bch2_trans_account_disk_usage_change(struct btree_trans *); 300 301 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64, 302 enum bch_data_type, unsigned, 303 enum btree_iter_update_trigger_flags); 304 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *, 305 enum btree_iter_update_trigger_flags); 306 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *, 307 enum btree_iter_update_trigger_flags); 308 int bch2_trans_mark_dev_sbs(struct bch_fs *); 309 310 bool bch2_is_superblock_bucket(struct bch_dev *, u64); 311 312 static inline const char *bch2_data_type_str(enum bch_data_type type) 313 { 314 return type < BCH_DATA_NR 315 ? __bch2_data_types[type] 316 : "(invalid data type)"; 317 } 318 319 /* disk reservations: */ 320 321 static inline void bch2_disk_reservation_put(struct bch_fs *c, 322 struct disk_reservation *res) 323 { 324 if (res->sectors) { 325 this_cpu_sub(*c->online_reserved, res->sectors); 326 res->sectors = 0; 327 } 328 } 329 330 enum bch_reservation_flags { 331 BCH_DISK_RESERVATION_NOFAIL = 1 << 0, 332 BCH_DISK_RESERVATION_PARTIAL = 1 << 1, 333 }; 334 335 int __bch2_disk_reservation_add(struct bch_fs *, struct disk_reservation *, 336 u64, enum bch_reservation_flags); 337 338 static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res, 339 u64 sectors, enum bch_reservation_flags flags) 340 { 341 #ifdef __KERNEL__ 342 u64 old, new; 343 344 old = this_cpu_read(c->pcpu->sectors_available); 345 do { 346 if (sectors > old) 347 return __bch2_disk_reservation_add(c, res, sectors, flags); 348 349 new = old - sectors; 350 } while (!this_cpu_try_cmpxchg(c->pcpu->sectors_available, &old, new)); 351 352 this_cpu_add(*c->online_reserved, sectors); 353 res->sectors += sectors; 354 return 0; 355 #else 356 return __bch2_disk_reservation_add(c, res, sectors, flags); 357 #endif 358 } 359 360 static inline struct disk_reservation 361 bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas) 362 { 363 return (struct disk_reservation) { 364 .sectors = 0, 365 #if 0 366 /* not used yet: */ 367 .gen = c->capacity_gen, 368 #endif 369 .nr_replicas = nr_replicas, 370 }; 371 } 372 373 static inline int bch2_disk_reservation_get(struct bch_fs *c, 374 struct disk_reservation *res, 375 u64 sectors, unsigned nr_replicas, 376 int flags) 377 { 378 *res = bch2_disk_reservation_init(c, nr_replicas); 379 380 return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags); 381 } 382 383 #define RESERVE_FACTOR 6 384 385 static inline u64 avail_factor(u64 r) 386 { 387 return div_u64(r << RESERVE_FACTOR, (1 << RESERVE_FACTOR) + 1); 388 } 389 390 void bch2_buckets_nouse_free(struct bch_fs *); 391 int bch2_buckets_nouse_alloc(struct bch_fs *); 392 393 int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64); 394 void bch2_dev_buckets_free(struct bch_dev *); 395 int bch2_dev_buckets_alloc(struct bch_fs *, struct bch_dev *); 396 397 #endif /* _BUCKETS_H */ 398