1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "btree_cache.h" 5 #include "btree_iter.h" 6 #include "btree_key_cache.h" 7 #include "btree_locking.h" 8 #include "btree_update.h" 9 #include "errcode.h" 10 #include "error.h" 11 #include "journal.h" 12 #include "journal_reclaim.h" 13 #include "trace.h" 14 15 #include <linux/sched/mm.h> 16 btree_uses_pcpu_readers(enum btree_id id)17 static inline bool btree_uses_pcpu_readers(enum btree_id id) 18 { 19 return id == BTREE_ID_subvolumes; 20 } 21 22 static struct kmem_cache *bch2_key_cache; 23 bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg * arg,const void * obj)24 static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg, 25 const void *obj) 26 { 27 const struct bkey_cached *ck = obj; 28 const struct bkey_cached_key *key = arg->key; 29 30 return ck->key.btree_id != key->btree_id || 31 !bpos_eq(ck->key.pos, key->pos); 32 } 33 34 static const struct rhashtable_params bch2_btree_key_cache_params = { 35 .head_offset = offsetof(struct bkey_cached, hash), 36 .key_offset = offsetof(struct bkey_cached, key), 37 .key_len = sizeof(struct bkey_cached_key), 38 .obj_cmpfn = bch2_btree_key_cache_cmp_fn, 39 .automatic_shrinking = true, 40 }; 41 btree_path_cached_set(struct btree_trans * trans,struct btree_path * path,struct bkey_cached * ck,enum btree_node_locked_type lock_held)42 static inline void btree_path_cached_set(struct btree_trans *trans, struct btree_path *path, 43 struct bkey_cached *ck, 44 enum btree_node_locked_type lock_held) 45 { 46 path->l[0].lock_seq = six_lock_seq(&ck->c.lock); 47 path->l[0].b = (void *) ck; 48 mark_btree_node_locked(trans, path, 0, lock_held); 49 } 50 51 __flatten 52 inline struct bkey_cached * bch2_btree_key_cache_find(struct bch_fs * c,enum btree_id btree_id,struct bpos pos)53 bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos) 54 { 55 struct bkey_cached_key key = { 56 .btree_id = btree_id, 57 .pos = pos, 58 }; 59 60 return rhashtable_lookup_fast(&c->btree_key_cache.table, &key, 61 bch2_btree_key_cache_params); 62 } 63 bkey_cached_lock_for_evict(struct bkey_cached * ck)64 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck) 65 { 66 if (!six_trylock_intent(&ck->c.lock)) 67 return false; 68 69 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { 70 six_unlock_intent(&ck->c.lock); 71 return false; 72 } 73 74 if (!six_trylock_write(&ck->c.lock)) { 75 six_unlock_intent(&ck->c.lock); 76 return false; 77 } 78 79 return true; 80 } 81 bkey_cached_evict(struct btree_key_cache * c,struct bkey_cached * ck)82 static bool bkey_cached_evict(struct btree_key_cache *c, 83 struct bkey_cached *ck) 84 { 85 bool ret = !rhashtable_remove_fast(&c->table, &ck->hash, 86 bch2_btree_key_cache_params); 87 if (ret) { 88 memset(&ck->key, ~0, sizeof(ck->key)); 89 atomic_long_dec(&c->nr_keys); 90 } 91 92 return ret; 93 } 94 __bkey_cached_free(struct rcu_pending * pending,struct rcu_head * rcu)95 static void __bkey_cached_free(struct rcu_pending *pending, struct rcu_head *rcu) 96 { 97 struct bch_fs *c = container_of(pending->srcu, struct bch_fs, btree_trans_barrier); 98 struct bkey_cached *ck = container_of(rcu, struct bkey_cached, rcu); 99 100 this_cpu_dec(*c->btree_key_cache.nr_pending); 101 kmem_cache_free(bch2_key_cache, ck); 102 } 103 bkey_cached_free(struct btree_key_cache * bc,struct bkey_cached * ck)104 static void bkey_cached_free(struct btree_key_cache *bc, 105 struct bkey_cached *ck) 106 { 107 kfree(ck->k); 108 ck->k = NULL; 109 ck->u64s = 0; 110 111 six_unlock_write(&ck->c.lock); 112 six_unlock_intent(&ck->c.lock); 113 114 bool pcpu_readers = ck->c.lock.readers != NULL; 115 rcu_pending_enqueue(&bc->pending[pcpu_readers], &ck->rcu); 116 this_cpu_inc(*bc->nr_pending); 117 } 118 __bkey_cached_alloc(unsigned key_u64s,gfp_t gfp)119 static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp) 120 { 121 gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE; 122 123 struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp); 124 if (unlikely(!ck)) 125 return NULL; 126 ck->k = kmalloc(key_u64s * sizeof(u64), gfp); 127 if (unlikely(!ck->k)) { 128 kmem_cache_free(bch2_key_cache, ck); 129 return NULL; 130 } 131 ck->u64s = key_u64s; 132 return ck; 133 } 134 135 static struct bkey_cached * bkey_cached_alloc(struct btree_trans * trans,struct btree_path * path,unsigned key_u64s)136 bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned key_u64s) 137 { 138 struct bch_fs *c = trans->c; 139 struct btree_key_cache *bc = &c->btree_key_cache; 140 bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id); 141 int ret; 142 143 struct bkey_cached *ck = container_of_or_null( 144 rcu_pending_dequeue(&bc->pending[pcpu_readers]), 145 struct bkey_cached, rcu); 146 if (ck) 147 goto lock; 148 149 ck = allocate_dropping_locks(trans, ret, 150 __bkey_cached_alloc(key_u64s, _gfp)); 151 if (ret) { 152 if (ck) 153 kfree(ck->k); 154 kmem_cache_free(bch2_key_cache, ck); 155 return ERR_PTR(ret); 156 } 157 158 if (ck) { 159 bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL); 160 ck->c.cached = true; 161 goto lock; 162 } 163 164 ck = container_of_or_null(rcu_pending_dequeue_from_all(&bc->pending[pcpu_readers]), 165 struct bkey_cached, rcu); 166 if (ck) 167 goto lock; 168 lock: 169 six_lock_intent(&ck->c.lock, NULL, NULL); 170 six_lock_write(&ck->c.lock, NULL, NULL); 171 return ck; 172 } 173 174 static struct bkey_cached * bkey_cached_reuse(struct btree_key_cache * c)175 bkey_cached_reuse(struct btree_key_cache *c) 176 { 177 struct bucket_table *tbl; 178 struct rhash_head *pos; 179 struct bkey_cached *ck; 180 unsigned i; 181 182 rcu_read_lock(); 183 tbl = rht_dereference_rcu(c->table.tbl, &c->table); 184 for (i = 0; i < tbl->size; i++) 185 rht_for_each_entry_rcu(ck, pos, tbl, i, hash) { 186 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) && 187 bkey_cached_lock_for_evict(ck)) { 188 if (bkey_cached_evict(c, ck)) 189 goto out; 190 six_unlock_write(&ck->c.lock); 191 six_unlock_intent(&ck->c.lock); 192 } 193 } 194 ck = NULL; 195 out: 196 rcu_read_unlock(); 197 return ck; 198 } 199 btree_key_cache_create(struct btree_trans * trans,struct btree_path * path,struct btree_path * ck_path,struct bkey_s_c k)200 static int btree_key_cache_create(struct btree_trans *trans, 201 struct btree_path *path, 202 struct btree_path *ck_path, 203 struct bkey_s_c k) 204 { 205 struct bch_fs *c = trans->c; 206 struct btree_key_cache *bc = &c->btree_key_cache; 207 208 /* 209 * bch2_varint_decode can read past the end of the buffer by at 210 * most 7 bytes (it won't be used): 211 */ 212 unsigned key_u64s = k.k->u64s + 1; 213 214 /* 215 * Allocate some extra space so that the transaction commit path is less 216 * likely to have to reallocate, since that requires a transaction 217 * restart: 218 */ 219 key_u64s = min(256U, (key_u64s * 3) / 2); 220 key_u64s = roundup_pow_of_two(key_u64s); 221 222 struct bkey_cached *ck = bkey_cached_alloc(trans, ck_path, key_u64s); 223 int ret = PTR_ERR_OR_ZERO(ck); 224 if (ret) 225 return ret; 226 227 if (unlikely(!ck)) { 228 ck = bkey_cached_reuse(bc); 229 if (unlikely(!ck)) { 230 bch_err(c, "error allocating memory for key cache item, btree %s", 231 bch2_btree_id_str(ck_path->btree_id)); 232 return -BCH_ERR_ENOMEM_btree_key_cache_create; 233 } 234 } 235 236 ck->c.level = 0; 237 ck->c.btree_id = ck_path->btree_id; 238 ck->key.btree_id = ck_path->btree_id; 239 ck->key.pos = ck_path->pos; 240 ck->flags = 1U << BKEY_CACHED_ACCESSED; 241 242 if (unlikely(key_u64s > ck->u64s)) { 243 mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED); 244 245 struct bkey_i *new_k = allocate_dropping_locks(trans, ret, 246 kmalloc(key_u64s * sizeof(u64), _gfp)); 247 if (unlikely(!new_k)) { 248 bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u", 249 bch2_btree_id_str(ck->key.btree_id), key_u64s); 250 ret = -BCH_ERR_ENOMEM_btree_key_cache_fill; 251 } else if (ret) { 252 kfree(new_k); 253 goto err; 254 } 255 256 kfree(ck->k); 257 ck->k = new_k; 258 ck->u64s = key_u64s; 259 } 260 261 bkey_reassemble(ck->k, k); 262 263 ret = bch2_btree_node_lock_write(trans, path, &path_l(path)->b->c); 264 if (unlikely(ret)) 265 goto err; 266 267 ret = rhashtable_lookup_insert_fast(&bc->table, &ck->hash, bch2_btree_key_cache_params); 268 269 bch2_btree_node_unlock_write(trans, path, path_l(path)->b); 270 271 if (unlikely(ret)) /* raced with another fill? */ 272 goto err; 273 274 atomic_long_inc(&bc->nr_keys); 275 six_unlock_write(&ck->c.lock); 276 277 enum six_lock_type lock_want = __btree_lock_want(ck_path, 0); 278 if (lock_want == SIX_LOCK_read) 279 six_lock_downgrade(&ck->c.lock); 280 btree_path_cached_set(trans, ck_path, ck, (enum btree_node_locked_type) lock_want); 281 ck_path->uptodate = BTREE_ITER_UPTODATE; 282 return 0; 283 err: 284 bkey_cached_free(bc, ck); 285 mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED); 286 287 return ret; 288 } 289 do_trace_key_cache_fill(struct btree_trans * trans,struct btree_path * ck_path,struct bkey_s_c k)290 static noinline_for_stack void do_trace_key_cache_fill(struct btree_trans *trans, 291 struct btree_path *ck_path, 292 struct bkey_s_c k) 293 { 294 struct printbuf buf = PRINTBUF; 295 296 bch2_bpos_to_text(&buf, ck_path->pos); 297 prt_char(&buf, ' '); 298 bch2_bkey_val_to_text(&buf, trans->c, k); 299 trace_key_cache_fill(trans, buf.buf); 300 printbuf_exit(&buf); 301 } 302 btree_key_cache_fill(struct btree_trans * trans,struct btree_path * ck_path,unsigned flags)303 static noinline int btree_key_cache_fill(struct btree_trans *trans, 304 struct btree_path *ck_path, 305 unsigned flags) 306 { 307 if (flags & BTREE_ITER_cached_nofill) { 308 ck_path->l[0].b = NULL; 309 return 0; 310 } 311 312 struct bch_fs *c = trans->c; 313 struct btree_iter iter; 314 struct bkey_s_c k; 315 int ret; 316 317 bch2_trans_iter_init(trans, &iter, ck_path->btree_id, ck_path->pos, 318 BTREE_ITER_intent| 319 BTREE_ITER_key_cache_fill| 320 BTREE_ITER_cached_nofill); 321 iter.flags &= ~BTREE_ITER_with_journal; 322 k = bch2_btree_iter_peek_slot(trans, &iter); 323 ret = bkey_err(k); 324 if (ret) 325 goto err; 326 327 /* Recheck after btree lookup, before allocating: */ 328 ret = bch2_btree_key_cache_find(c, ck_path->btree_id, ck_path->pos) ? -EEXIST : 0; 329 if (unlikely(ret)) 330 goto out; 331 332 ret = btree_key_cache_create(trans, btree_iter_path(trans, &iter), ck_path, k); 333 if (ret) 334 goto err; 335 336 if (trace_key_cache_fill_enabled()) 337 do_trace_key_cache_fill(trans, ck_path, k); 338 out: 339 /* We're not likely to need this iterator again: */ 340 bch2_set_btree_iter_dontneed(trans, &iter); 341 err: 342 bch2_trans_iter_exit(trans, &iter); 343 return ret; 344 } 345 btree_path_traverse_cached_fast(struct btree_trans * trans,struct btree_path * path)346 static inline int btree_path_traverse_cached_fast(struct btree_trans *trans, 347 struct btree_path *path) 348 { 349 struct bch_fs *c = trans->c; 350 struct bkey_cached *ck; 351 retry: 352 ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos); 353 if (!ck) 354 return -ENOENT; 355 356 enum six_lock_type lock_want = __btree_lock_want(path, 0); 357 358 int ret = btree_node_lock(trans, path, (void *) ck, 0, lock_want, _THIS_IP_); 359 if (ret) 360 return ret; 361 362 if (ck->key.btree_id != path->btree_id || 363 !bpos_eq(ck->key.pos, path->pos)) { 364 six_unlock_type(&ck->c.lock, lock_want); 365 goto retry; 366 } 367 368 if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) 369 set_bit(BKEY_CACHED_ACCESSED, &ck->flags); 370 371 btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want); 372 path->uptodate = BTREE_ITER_UPTODATE; 373 return 0; 374 } 375 bch2_btree_path_traverse_cached(struct btree_trans * trans,struct btree_path * path,unsigned flags)376 int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path *path, 377 unsigned flags) 378 { 379 EBUG_ON(path->level); 380 381 path->l[1].b = NULL; 382 383 int ret; 384 do { 385 ret = btree_path_traverse_cached_fast(trans, path); 386 if (unlikely(ret == -ENOENT)) 387 ret = btree_key_cache_fill(trans, path, flags); 388 } while (ret == -EEXIST); 389 390 if (unlikely(ret)) { 391 path->uptodate = BTREE_ITER_NEED_TRAVERSE; 392 if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) { 393 btree_node_unlock(trans, path, 0); 394 path->l[0].b = ERR_PTR(ret); 395 } 396 } 397 return ret; 398 } 399 btree_key_cache_flush_pos(struct btree_trans * trans,struct bkey_cached_key key,u64 journal_seq,unsigned commit_flags,bool evict)400 static int btree_key_cache_flush_pos(struct btree_trans *trans, 401 struct bkey_cached_key key, 402 u64 journal_seq, 403 unsigned commit_flags, 404 bool evict) 405 { 406 struct bch_fs *c = trans->c; 407 struct journal *j = &c->journal; 408 struct btree_iter c_iter, b_iter; 409 struct bkey_cached *ck = NULL; 410 int ret; 411 412 bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos, 413 BTREE_ITER_slots| 414 BTREE_ITER_intent| 415 BTREE_ITER_all_snapshots); 416 bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos, 417 BTREE_ITER_cached| 418 BTREE_ITER_intent); 419 b_iter.flags &= ~BTREE_ITER_with_key_cache; 420 421 ret = bch2_btree_iter_traverse(trans, &c_iter); 422 if (ret) 423 goto out; 424 425 ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b; 426 if (!ck) 427 goto out; 428 429 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { 430 if (evict) 431 goto evict; 432 goto out; 433 } 434 435 if (journal_seq && ck->journal.seq != journal_seq) 436 goto out; 437 438 trans->journal_res.seq = ck->journal.seq; 439 440 /* 441 * If we're at the end of the journal, we really want to free up space 442 * in the journal right away - we don't want to pin that old journal 443 * sequence number with a new btree node write, we want to re-journal 444 * the update 445 */ 446 if (ck->journal.seq == journal_last_seq(j)) 447 commit_flags |= BCH_WATERMARK_reclaim; 448 449 if (ck->journal.seq != journal_last_seq(j) || 450 !test_bit(JOURNAL_space_low, &c->journal.flags)) 451 commit_flags |= BCH_TRANS_COMMIT_no_journal_res; 452 453 struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(trans, &b_iter); 454 ret = bkey_err(btree_k); 455 if (ret) 456 goto err; 457 458 /* * Check that we're not violating cache coherency rules: */ 459 BUG_ON(bkey_deleted(btree_k.k)); 460 461 ret = bch2_trans_update(trans, &b_iter, ck->k, 462 BTREE_UPDATE_key_cache_reclaim| 463 BTREE_UPDATE_internal_snapshot_node| 464 BTREE_TRIGGER_norun) ?: 465 bch2_trans_commit(trans, NULL, NULL, 466 BCH_TRANS_COMMIT_no_check_rw| 467 BCH_TRANS_COMMIT_no_enospc| 468 commit_flags); 469 err: 470 bch2_fs_fatal_err_on(ret && 471 !bch2_err_matches(ret, BCH_ERR_transaction_restart) && 472 !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) && 473 !bch2_journal_error(j), c, 474 "flushing key cache: %s", bch2_err_str(ret)); 475 if (ret) 476 goto out; 477 478 bch2_journal_pin_drop(j, &ck->journal); 479 480 struct btree_path *path = btree_iter_path(trans, &c_iter); 481 BUG_ON(!btree_node_locked(path, 0)); 482 483 if (!evict) { 484 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { 485 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); 486 atomic_long_dec(&c->btree_key_cache.nr_dirty); 487 } 488 } else { 489 struct btree_path *path2; 490 unsigned i; 491 evict: 492 trans_for_each_path(trans, path2, i) 493 if (path2 != path) 494 __bch2_btree_path_unlock(trans, path2); 495 496 bch2_btree_node_lock_write_nofail(trans, path, &ck->c); 497 498 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { 499 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); 500 atomic_long_dec(&c->btree_key_cache.nr_dirty); 501 } 502 503 mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED); 504 if (bkey_cached_evict(&c->btree_key_cache, ck)) { 505 bkey_cached_free(&c->btree_key_cache, ck); 506 } else { 507 six_unlock_write(&ck->c.lock); 508 six_unlock_intent(&ck->c.lock); 509 } 510 } 511 out: 512 bch2_trans_iter_exit(trans, &b_iter); 513 bch2_trans_iter_exit(trans, &c_iter); 514 return ret; 515 } 516 bch2_btree_key_cache_journal_flush(struct journal * j,struct journal_entry_pin * pin,u64 seq)517 int bch2_btree_key_cache_journal_flush(struct journal *j, 518 struct journal_entry_pin *pin, u64 seq) 519 { 520 struct bch_fs *c = container_of(j, struct bch_fs, journal); 521 struct bkey_cached *ck = 522 container_of(pin, struct bkey_cached, journal); 523 struct bkey_cached_key key; 524 struct btree_trans *trans = bch2_trans_get(c); 525 int srcu_idx = srcu_read_lock(&c->btree_trans_barrier); 526 int ret = 0; 527 528 btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read); 529 key = ck->key; 530 531 if (ck->journal.seq != seq || 532 !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { 533 six_unlock_read(&ck->c.lock); 534 goto unlock; 535 } 536 537 if (ck->seq != seq) { 538 bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal, 539 bch2_btree_key_cache_journal_flush); 540 six_unlock_read(&ck->c.lock); 541 goto unlock; 542 } 543 six_unlock_read(&ck->c.lock); 544 545 ret = lockrestart_do(trans, 546 btree_key_cache_flush_pos(trans, key, seq, 547 BCH_TRANS_COMMIT_journal_reclaim, false)); 548 unlock: 549 srcu_read_unlock(&c->btree_trans_barrier, srcu_idx); 550 551 bch2_trans_put(trans); 552 return ret; 553 } 554 bch2_btree_insert_key_cached(struct btree_trans * trans,unsigned flags,struct btree_insert_entry * insert_entry)555 bool bch2_btree_insert_key_cached(struct btree_trans *trans, 556 unsigned flags, 557 struct btree_insert_entry *insert_entry) 558 { 559 struct bch_fs *c = trans->c; 560 struct bkey_cached *ck = (void *) (trans->paths + insert_entry->path)->l[0].b; 561 struct bkey_i *insert = insert_entry->k; 562 bool kick_reclaim = false; 563 564 BUG_ON(insert->k.u64s > ck->u64s); 565 566 bkey_copy(ck->k, insert); 567 568 if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { 569 EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags)); 570 set_bit(BKEY_CACHED_DIRTY, &ck->flags); 571 atomic_long_inc(&c->btree_key_cache.nr_dirty); 572 573 if (bch2_nr_btree_keys_need_flush(c)) 574 kick_reclaim = true; 575 } 576 577 /* 578 * To minimize lock contention, we only add the journal pin here and 579 * defer pin updates to the flush callback via ->seq. Be careful not to 580 * update ->seq on nojournal commits because we don't want to update the 581 * pin to a seq that doesn't include journal updates on disk. Otherwise 582 * we risk losing the update after a crash. 583 * 584 * The only exception is if the pin is not active in the first place. We 585 * have to add the pin because journal reclaim drives key cache 586 * flushing. The flush callback will not proceed unless ->seq matches 587 * the latest pin, so make sure it starts with a consistent value. 588 */ 589 if (!(insert_entry->flags & BTREE_UPDATE_nojournal) || 590 !journal_pin_active(&ck->journal)) { 591 ck->seq = trans->journal_res.seq; 592 } 593 bch2_journal_pin_add(&c->journal, trans->journal_res.seq, 594 &ck->journal, bch2_btree_key_cache_journal_flush); 595 596 if (kick_reclaim) 597 journal_reclaim_kick(&c->journal); 598 return true; 599 } 600 bch2_btree_key_cache_drop(struct btree_trans * trans,struct btree_path * path)601 void bch2_btree_key_cache_drop(struct btree_trans *trans, 602 struct btree_path *path) 603 { 604 struct bch_fs *c = trans->c; 605 struct btree_key_cache *bc = &c->btree_key_cache; 606 struct bkey_cached *ck = (void *) path->l[0].b; 607 608 /* 609 * We just did an update to the btree, bypassing the key cache: the key 610 * cache key is now stale and must be dropped, even if dirty: 611 */ 612 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { 613 clear_bit(BKEY_CACHED_DIRTY, &ck->flags); 614 atomic_long_dec(&c->btree_key_cache.nr_dirty); 615 bch2_journal_pin_drop(&c->journal, &ck->journal); 616 } 617 618 bkey_cached_evict(bc, ck); 619 bkey_cached_free(bc, ck); 620 621 mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED); 622 623 struct btree_path *path2; 624 unsigned i; 625 trans_for_each_path(trans, path2, i) 626 if (path2->l[0].b == (void *) ck) { 627 __bch2_btree_path_unlock(trans, path2); 628 path2->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_drop); 629 path2->should_be_locked = false; 630 btree_path_set_dirty(path2, BTREE_ITER_NEED_TRAVERSE); 631 } 632 633 bch2_trans_verify_locks(trans); 634 } 635 bch2_btree_key_cache_scan(struct shrinker * shrink,struct shrink_control * sc)636 static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink, 637 struct shrink_control *sc) 638 { 639 struct bch_fs *c = shrink->private_data; 640 struct btree_key_cache *bc = &c->btree_key_cache; 641 struct bucket_table *tbl; 642 struct bkey_cached *ck; 643 size_t scanned = 0, freed = 0, nr = sc->nr_to_scan; 644 unsigned iter, start; 645 int srcu_idx; 646 647 srcu_idx = srcu_read_lock(&c->btree_trans_barrier); 648 rcu_read_lock(); 649 650 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); 651 652 /* 653 * Scanning is expensive while a rehash is in progress - most elements 654 * will be on the new hashtable, if it's in progress 655 * 656 * A rehash could still start while we're scanning - that's ok, we'll 657 * still see most elements. 658 */ 659 if (unlikely(tbl->nest)) { 660 rcu_read_unlock(); 661 srcu_read_unlock(&c->btree_trans_barrier, srcu_idx); 662 return SHRINK_STOP; 663 } 664 665 iter = bc->shrink_iter; 666 if (iter >= tbl->size) 667 iter = 0; 668 start = iter; 669 670 do { 671 struct rhash_head *pos, *next; 672 673 pos = rht_ptr_rcu(&tbl->buckets[iter]); 674 675 while (!rht_is_a_nulls(pos)) { 676 next = rht_dereference_bucket_rcu(pos->next, tbl, iter); 677 ck = container_of(pos, struct bkey_cached, hash); 678 679 if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) { 680 bc->skipped_dirty++; 681 } else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) { 682 clear_bit(BKEY_CACHED_ACCESSED, &ck->flags); 683 bc->skipped_accessed++; 684 } else if (!bkey_cached_lock_for_evict(ck)) { 685 bc->skipped_lock_fail++; 686 } else if (bkey_cached_evict(bc, ck)) { 687 bkey_cached_free(bc, ck); 688 bc->freed++; 689 freed++; 690 } else { 691 six_unlock_write(&ck->c.lock); 692 six_unlock_intent(&ck->c.lock); 693 } 694 695 scanned++; 696 if (scanned >= nr) 697 goto out; 698 699 pos = next; 700 } 701 702 iter++; 703 if (iter >= tbl->size) 704 iter = 0; 705 } while (scanned < nr && iter != start); 706 out: 707 bc->shrink_iter = iter; 708 709 rcu_read_unlock(); 710 srcu_read_unlock(&c->btree_trans_barrier, srcu_idx); 711 712 return freed; 713 } 714 bch2_btree_key_cache_count(struct shrinker * shrink,struct shrink_control * sc)715 static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink, 716 struct shrink_control *sc) 717 { 718 struct bch_fs *c = shrink->private_data; 719 struct btree_key_cache *bc = &c->btree_key_cache; 720 long nr = atomic_long_read(&bc->nr_keys) - 721 atomic_long_read(&bc->nr_dirty); 722 723 /* 724 * Avoid hammering our shrinker too much if it's nearly empty - the 725 * shrinker code doesn't take into account how big our cache is, if it's 726 * mostly empty but the system is under memory pressure it causes nasty 727 * lock contention: 728 */ 729 nr -= 128; 730 731 return max(0L, nr); 732 } 733 bch2_fs_btree_key_cache_exit(struct btree_key_cache * bc)734 void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc) 735 { 736 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache); 737 struct bucket_table *tbl; 738 struct bkey_cached *ck; 739 struct rhash_head *pos; 740 LIST_HEAD(items); 741 unsigned i; 742 743 shrinker_free(bc->shrink); 744 745 /* 746 * The loop is needed to guard against racing with rehash: 747 */ 748 while (atomic_long_read(&bc->nr_keys)) { 749 rcu_read_lock(); 750 tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); 751 if (tbl) { 752 if (tbl->nest) { 753 /* wait for in progress rehash */ 754 rcu_read_unlock(); 755 mutex_lock(&bc->table.mutex); 756 mutex_unlock(&bc->table.mutex); 757 continue; 758 } 759 for (i = 0; i < tbl->size; i++) 760 while (pos = rht_ptr_rcu(&tbl->buckets[i]), !rht_is_a_nulls(pos)) { 761 ck = container_of(pos, struct bkey_cached, hash); 762 BUG_ON(!bkey_cached_evict(bc, ck)); 763 kfree(ck->k); 764 kmem_cache_free(bch2_key_cache, ck); 765 } 766 } 767 rcu_read_unlock(); 768 } 769 770 if (atomic_long_read(&bc->nr_dirty) && 771 !bch2_journal_error(&c->journal) && 772 test_bit(BCH_FS_was_rw, &c->flags)) 773 panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n", 774 atomic_long_read(&bc->nr_dirty)); 775 776 if (atomic_long_read(&bc->nr_keys)) 777 panic("btree key cache shutdown error: nr_keys nonzero (%li)\n", 778 atomic_long_read(&bc->nr_keys)); 779 780 if (bc->table_init_done) 781 rhashtable_destroy(&bc->table); 782 783 rcu_pending_exit(&bc->pending[0]); 784 rcu_pending_exit(&bc->pending[1]); 785 786 free_percpu(bc->nr_pending); 787 } 788 bch2_fs_btree_key_cache_init_early(struct btree_key_cache * c)789 void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c) 790 { 791 } 792 bch2_fs_btree_key_cache_init(struct btree_key_cache * bc)793 int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc) 794 { 795 struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache); 796 struct shrinker *shrink; 797 798 bc->nr_pending = alloc_percpu(size_t); 799 if (!bc->nr_pending) 800 return -BCH_ERR_ENOMEM_fs_btree_cache_init; 801 802 if (rcu_pending_init(&bc->pending[0], &c->btree_trans_barrier, __bkey_cached_free) || 803 rcu_pending_init(&bc->pending[1], &c->btree_trans_barrier, __bkey_cached_free)) 804 return -BCH_ERR_ENOMEM_fs_btree_cache_init; 805 806 if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params)) 807 return -BCH_ERR_ENOMEM_fs_btree_cache_init; 808 809 bc->table_init_done = true; 810 811 shrink = shrinker_alloc(0, "%s-btree_key_cache", c->name); 812 if (!shrink) 813 return -BCH_ERR_ENOMEM_fs_btree_cache_init; 814 bc->shrink = shrink; 815 shrink->count_objects = bch2_btree_key_cache_count; 816 shrink->scan_objects = bch2_btree_key_cache_scan; 817 shrink->batch = 1 << 14; 818 shrink->seeks = 0; 819 shrink->private_data = c; 820 shrinker_register(shrink); 821 return 0; 822 } 823 bch2_btree_key_cache_to_text(struct printbuf * out,struct btree_key_cache * bc)824 void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *bc) 825 { 826 printbuf_tabstop_push(out, 24); 827 printbuf_tabstop_push(out, 12); 828 829 prt_printf(out, "keys:\t%lu\r\n", atomic_long_read(&bc->nr_keys)); 830 prt_printf(out, "dirty:\t%lu\r\n", atomic_long_read(&bc->nr_dirty)); 831 prt_printf(out, "table size:\t%u\r\n", bc->table.tbl->size); 832 prt_newline(out); 833 prt_printf(out, "shrinker:\n"); 834 prt_printf(out, "requested_to_free:\t%lu\r\n", bc->requested_to_free); 835 prt_printf(out, "freed:\t%lu\r\n", bc->freed); 836 prt_printf(out, "skipped_dirty:\t%lu\r\n", bc->skipped_dirty); 837 prt_printf(out, "skipped_accessed:\t%lu\r\n", bc->skipped_accessed); 838 prt_printf(out, "skipped_lock_fail:\t%lu\r\n", bc->skipped_lock_fail); 839 prt_newline(out); 840 prt_printf(out, "pending:\t%zu\r\n", per_cpu_sum(bc->nr_pending)); 841 } 842 bch2_btree_key_cache_exit(void)843 void bch2_btree_key_cache_exit(void) 844 { 845 kmem_cache_destroy(bch2_key_cache); 846 } 847 bch2_btree_key_cache_init(void)848 int __init bch2_btree_key_cache_init(void) 849 { 850 bch2_key_cache = KMEM_CACHE(bkey_cached, SLAB_RECLAIM_ACCOUNT); 851 if (!bch2_key_cache) 852 return -ENOMEM; 853 854 return 0; 855 } 856