xref: /linux/fs/bcachefs/btree_key_cache.c (revision ff0905bbf991f4337b5ebc19c0d43525ebb0d96b)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "bcachefs.h"
4 #include "btree_cache.h"
5 #include "btree_iter.h"
6 #include "btree_key_cache.h"
7 #include "btree_locking.h"
8 #include "btree_update.h"
9 #include "errcode.h"
10 #include "error.h"
11 #include "journal.h"
12 #include "journal_reclaim.h"
13 #include "trace.h"
14 
15 #include <linux/sched/mm.h>
16 
btree_uses_pcpu_readers(enum btree_id id)17 static inline bool btree_uses_pcpu_readers(enum btree_id id)
18 {
19 	return id == BTREE_ID_subvolumes;
20 }
21 
22 static struct kmem_cache *bch2_key_cache;
23 
bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg * arg,const void * obj)24 static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
25 				       const void *obj)
26 {
27 	const struct bkey_cached *ck = obj;
28 	const struct bkey_cached_key *key = arg->key;
29 
30 	return ck->key.btree_id != key->btree_id ||
31 		!bpos_eq(ck->key.pos, key->pos);
32 }
33 
34 static const struct rhashtable_params bch2_btree_key_cache_params = {
35 	.head_offset		= offsetof(struct bkey_cached, hash),
36 	.key_offset		= offsetof(struct bkey_cached, key),
37 	.key_len		= sizeof(struct bkey_cached_key),
38 	.obj_cmpfn		= bch2_btree_key_cache_cmp_fn,
39 	.automatic_shrinking	= true,
40 };
41 
btree_path_cached_set(struct btree_trans * trans,struct btree_path * path,struct bkey_cached * ck,enum btree_node_locked_type lock_held)42 static inline void btree_path_cached_set(struct btree_trans *trans, struct btree_path *path,
43 					 struct bkey_cached *ck,
44 					 enum btree_node_locked_type lock_held)
45 {
46 	path->l[0].lock_seq	= six_lock_seq(&ck->c.lock);
47 	path->l[0].b		= (void *) ck;
48 	mark_btree_node_locked(trans, path, 0, lock_held);
49 }
50 
51 __flatten
52 inline struct bkey_cached *
bch2_btree_key_cache_find(struct bch_fs * c,enum btree_id btree_id,struct bpos pos)53 bch2_btree_key_cache_find(struct bch_fs *c, enum btree_id btree_id, struct bpos pos)
54 {
55 	struct bkey_cached_key key = {
56 		.btree_id	= btree_id,
57 		.pos		= pos,
58 	};
59 
60 	return rhashtable_lookup_fast(&c->btree_key_cache.table, &key,
61 				      bch2_btree_key_cache_params);
62 }
63 
bkey_cached_lock_for_evict(struct bkey_cached * ck)64 static bool bkey_cached_lock_for_evict(struct bkey_cached *ck)
65 {
66 	if (!six_trylock_intent(&ck->c.lock))
67 		return false;
68 
69 	if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
70 		six_unlock_intent(&ck->c.lock);
71 		return false;
72 	}
73 
74 	if (!six_trylock_write(&ck->c.lock)) {
75 		six_unlock_intent(&ck->c.lock);
76 		return false;
77 	}
78 
79 	return true;
80 }
81 
bkey_cached_evict(struct btree_key_cache * c,struct bkey_cached * ck)82 static bool bkey_cached_evict(struct btree_key_cache *c,
83 			      struct bkey_cached *ck)
84 {
85 	bool ret = !rhashtable_remove_fast(&c->table, &ck->hash,
86 				      bch2_btree_key_cache_params);
87 	if (ret) {
88 		memset(&ck->key, ~0, sizeof(ck->key));
89 		atomic_long_dec(&c->nr_keys);
90 	}
91 
92 	return ret;
93 }
94 
__bkey_cached_free(struct rcu_pending * pending,struct rcu_head * rcu)95 static void __bkey_cached_free(struct rcu_pending *pending, struct rcu_head *rcu)
96 {
97 	struct bch_fs *c = container_of(pending->srcu, struct bch_fs, btree_trans_barrier);
98 	struct bkey_cached *ck = container_of(rcu, struct bkey_cached, rcu);
99 
100 	this_cpu_dec(*c->btree_key_cache.nr_pending);
101 	kmem_cache_free(bch2_key_cache, ck);
102 }
103 
bkey_cached_free_noassert(struct btree_key_cache * bc,struct bkey_cached * ck)104 static inline void bkey_cached_free_noassert(struct btree_key_cache *bc,
105 				      struct bkey_cached *ck)
106 {
107 	kfree(ck->k);
108 	ck->k		= NULL;
109 	ck->u64s	= 0;
110 
111 	six_unlock_write(&ck->c.lock);
112 	six_unlock_intent(&ck->c.lock);
113 
114 	bool pcpu_readers = ck->c.lock.readers != NULL;
115 	rcu_pending_enqueue(&bc->pending[pcpu_readers], &ck->rcu);
116 	this_cpu_inc(*bc->nr_pending);
117 }
118 
bkey_cached_free(struct btree_trans * trans,struct btree_key_cache * bc,struct bkey_cached * ck)119 static void bkey_cached_free(struct btree_trans *trans,
120 			     struct btree_key_cache *bc,
121 			     struct bkey_cached *ck)
122 {
123 	/*
124 	 * we'll hit strange issues in the SRCU code if we aren't holding an
125 	 * SRCU read lock...
126 	 */
127 	EBUG_ON(!trans->srcu_held);
128 
129 	bkey_cached_free_noassert(bc, ck);
130 }
131 
__bkey_cached_alloc(unsigned key_u64s,gfp_t gfp)132 static struct bkey_cached *__bkey_cached_alloc(unsigned key_u64s, gfp_t gfp)
133 {
134 	gfp |= __GFP_ACCOUNT|__GFP_RECLAIMABLE;
135 
136 	struct bkey_cached *ck = kmem_cache_zalloc(bch2_key_cache, gfp);
137 	if (unlikely(!ck))
138 		return NULL;
139 	ck->k = kmalloc(key_u64s * sizeof(u64), gfp);
140 	if (unlikely(!ck->k)) {
141 		kmem_cache_free(bch2_key_cache, ck);
142 		return NULL;
143 	}
144 	ck->u64s = key_u64s;
145 	return ck;
146 }
147 
148 static struct bkey_cached *
bkey_cached_alloc(struct btree_trans * trans,struct btree_path * path,unsigned key_u64s)149 bkey_cached_alloc(struct btree_trans *trans, struct btree_path *path, unsigned key_u64s)
150 {
151 	struct bch_fs *c = trans->c;
152 	struct btree_key_cache *bc = &c->btree_key_cache;
153 	bool pcpu_readers = btree_uses_pcpu_readers(path->btree_id);
154 	int ret;
155 
156 	struct bkey_cached *ck = container_of_or_null(
157 				rcu_pending_dequeue(&bc->pending[pcpu_readers]),
158 				struct bkey_cached, rcu);
159 	if (ck)
160 		goto lock;
161 
162 	ck = allocate_dropping_locks(trans, ret,
163 				     __bkey_cached_alloc(key_u64s, _gfp));
164 	if (ret) {
165 		if (ck)
166 			kfree(ck->k);
167 		kmem_cache_free(bch2_key_cache, ck);
168 		return ERR_PTR(ret);
169 	}
170 
171 	if (ck) {
172 		bch2_btree_lock_init(&ck->c, pcpu_readers ? SIX_LOCK_INIT_PCPU : 0, GFP_KERNEL);
173 		ck->c.cached = true;
174 		goto lock;
175 	}
176 
177 	ck = container_of_or_null(rcu_pending_dequeue_from_all(&bc->pending[pcpu_readers]),
178 				  struct bkey_cached, rcu);
179 	if (ck)
180 		goto lock;
181 lock:
182 	six_lock_intent(&ck->c.lock, NULL, NULL);
183 	six_lock_write(&ck->c.lock, NULL, NULL);
184 	return ck;
185 }
186 
187 static struct bkey_cached *
bkey_cached_reuse(struct btree_key_cache * c)188 bkey_cached_reuse(struct btree_key_cache *c)
189 {
190 
191 	guard(rcu)();
192 	struct bucket_table *tbl = rht_dereference_rcu(c->table.tbl, &c->table);
193 	struct rhash_head *pos;
194 	struct bkey_cached *ck;
195 
196 	for (unsigned i = 0; i < tbl->size; i++)
197 		rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
198 			if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
199 			    bkey_cached_lock_for_evict(ck)) {
200 				if (bkey_cached_evict(c, ck))
201 					return ck;
202 				six_unlock_write(&ck->c.lock);
203 				six_unlock_intent(&ck->c.lock);
204 			}
205 		}
206 	return NULL;
207 }
208 
btree_key_cache_create(struct btree_trans * trans,struct btree_path * path,struct btree_path * ck_path,struct bkey_s_c k)209 static int btree_key_cache_create(struct btree_trans *trans,
210 				  struct btree_path *path,
211 				  struct btree_path *ck_path,
212 				  struct bkey_s_c k)
213 {
214 	struct bch_fs *c = trans->c;
215 	struct btree_key_cache *bc = &c->btree_key_cache;
216 
217 	/*
218 	 * bch2_varint_decode can read past the end of the buffer by at
219 	 * most 7 bytes (it won't be used):
220 	 */
221 	unsigned key_u64s = k.k->u64s + 1;
222 
223 	/*
224 	 * Allocate some extra space so that the transaction commit path is less
225 	 * likely to have to reallocate, since that requires a transaction
226 	 * restart:
227 	 */
228 	key_u64s = min(256U, (key_u64s * 3) / 2);
229 	key_u64s = roundup_pow_of_two(key_u64s);
230 
231 	struct bkey_cached *ck = bkey_cached_alloc(trans, ck_path, key_u64s);
232 	int ret = PTR_ERR_OR_ZERO(ck);
233 	if (ret)
234 		return ret;
235 
236 	if (unlikely(!ck)) {
237 		ck = bkey_cached_reuse(bc);
238 		if (unlikely(!ck)) {
239 			bch_err(c, "error allocating memory for key cache item, btree %s",
240 				bch2_btree_id_str(ck_path->btree_id));
241 			return bch_err_throw(c, ENOMEM_btree_key_cache_create);
242 		}
243 	}
244 
245 	ck->c.level		= 0;
246 	ck->c.btree_id		= ck_path->btree_id;
247 	ck->key.btree_id	= ck_path->btree_id;
248 	ck->key.pos		= ck_path->pos;
249 	ck->flags		= 1U << BKEY_CACHED_ACCESSED;
250 
251 	if (unlikely(key_u64s > ck->u64s)) {
252 		mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED);
253 
254 		struct bkey_i *new_k = allocate_dropping_locks(trans, ret,
255 				kmalloc(key_u64s * sizeof(u64), _gfp));
256 		if (unlikely(!new_k)) {
257 			bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
258 				bch2_btree_id_str(ck->key.btree_id), key_u64s);
259 			ret = bch_err_throw(c, ENOMEM_btree_key_cache_fill);
260 		} else if (ret) {
261 			kfree(new_k);
262 			goto err;
263 		}
264 
265 		kfree(ck->k);
266 		ck->k = new_k;
267 		ck->u64s = key_u64s;
268 	}
269 
270 	bkey_reassemble(ck->k, k);
271 
272 	ret = bch2_btree_node_lock_write(trans, path, &path_l(path)->b->c);
273 	if (unlikely(ret))
274 		goto err;
275 
276 	ret = rhashtable_lookup_insert_fast(&bc->table, &ck->hash, bch2_btree_key_cache_params);
277 
278 	bch2_btree_node_unlock_write(trans, path, path_l(path)->b);
279 
280 	if (unlikely(ret)) /* raced with another fill? */
281 		goto err;
282 
283 	atomic_long_inc(&bc->nr_keys);
284 	six_unlock_write(&ck->c.lock);
285 
286 	enum six_lock_type lock_want = __btree_lock_want(ck_path, 0);
287 	if (lock_want == SIX_LOCK_read)
288 		six_lock_downgrade(&ck->c.lock);
289 	btree_path_cached_set(trans, ck_path, ck, (enum btree_node_locked_type) lock_want);
290 	ck_path->uptodate = BTREE_ITER_UPTODATE;
291 	return 0;
292 err:
293 	bkey_cached_free(trans, bc, ck);
294 	mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED);
295 
296 	return ret;
297 }
298 
do_trace_key_cache_fill(struct btree_trans * trans,struct btree_path * ck_path,struct bkey_s_c k)299 static noinline_for_stack void do_trace_key_cache_fill(struct btree_trans *trans,
300 						       struct btree_path *ck_path,
301 						       struct bkey_s_c k)
302 {
303 	struct printbuf buf = PRINTBUF;
304 
305 	bch2_bpos_to_text(&buf, ck_path->pos);
306 	prt_char(&buf, ' ');
307 	bch2_bkey_val_to_text(&buf, trans->c, k);
308 	trace_key_cache_fill(trans, buf.buf);
309 	printbuf_exit(&buf);
310 }
311 
btree_key_cache_fill(struct btree_trans * trans,btree_path_idx_t ck_path_idx,unsigned flags)312 static noinline int btree_key_cache_fill(struct btree_trans *trans,
313 					 btree_path_idx_t ck_path_idx,
314 					 unsigned flags)
315 {
316 	struct btree_path *ck_path = trans->paths + ck_path_idx;
317 
318 	if (flags & BTREE_ITER_cached_nofill) {
319 		ck_path->l[0].b = NULL;
320 		return 0;
321 	}
322 
323 	struct bch_fs *c = trans->c;
324 	struct btree_iter iter;
325 	struct bkey_s_c k;
326 	int ret;
327 
328 	bch2_trans_iter_init(trans, &iter, ck_path->btree_id, ck_path->pos,
329 			     BTREE_ITER_intent|
330 			     BTREE_ITER_key_cache_fill|
331 			     BTREE_ITER_cached_nofill);
332 	iter.flags &= ~BTREE_ITER_with_journal;
333 	k = bch2_btree_iter_peek_slot(trans, &iter);
334 	ret = bkey_err(k);
335 	if (ret)
336 		goto err;
337 
338 	/* Recheck after btree lookup, before allocating: */
339 	ck_path = trans->paths + ck_path_idx;
340 	ret = bch2_btree_key_cache_find(c, ck_path->btree_id, ck_path->pos) ? -EEXIST : 0;
341 	if (unlikely(ret))
342 		goto out;
343 
344 	ret = btree_key_cache_create(trans, btree_iter_path(trans, &iter), ck_path, k);
345 	if (ret)
346 		goto err;
347 
348 	if (trace_key_cache_fill_enabled())
349 		do_trace_key_cache_fill(trans, ck_path, k);
350 out:
351 	/* We're not likely to need this iterator again: */
352 	bch2_set_btree_iter_dontneed(trans, &iter);
353 err:
354 	bch2_trans_iter_exit(trans, &iter);
355 	return ret;
356 }
357 
btree_path_traverse_cached_fast(struct btree_trans * trans,btree_path_idx_t path_idx)358 static inline int btree_path_traverse_cached_fast(struct btree_trans *trans,
359 						  btree_path_idx_t path_idx)
360 {
361 	struct bch_fs *c = trans->c;
362 	struct bkey_cached *ck;
363 	struct btree_path *path = trans->paths + path_idx;
364 retry:
365 	ck = bch2_btree_key_cache_find(c, path->btree_id, path->pos);
366 	if (!ck)
367 		return -ENOENT;
368 
369 	enum six_lock_type lock_want = __btree_lock_want(path, 0);
370 
371 	int ret = btree_node_lock(trans, path, (void *) ck, 0, lock_want, _THIS_IP_);
372 	if (ret)
373 		return ret;
374 
375 	if (ck->key.btree_id != path->btree_id ||
376 	    !bpos_eq(ck->key.pos, path->pos)) {
377 		six_unlock_type(&ck->c.lock, lock_want);
378 		goto retry;
379 	}
380 
381 	if (!test_bit(BKEY_CACHED_ACCESSED, &ck->flags))
382 		set_bit(BKEY_CACHED_ACCESSED, &ck->flags);
383 
384 	btree_path_cached_set(trans, path, ck, (enum btree_node_locked_type) lock_want);
385 	path->uptodate = BTREE_ITER_UPTODATE;
386 	return 0;
387 }
388 
bch2_btree_path_traverse_cached(struct btree_trans * trans,btree_path_idx_t path_idx,unsigned flags)389 int bch2_btree_path_traverse_cached(struct btree_trans *trans,
390 				    btree_path_idx_t path_idx,
391 				    unsigned flags)
392 {
393 	EBUG_ON(trans->paths[path_idx].level);
394 
395 	int ret;
396 	do {
397 		ret = btree_path_traverse_cached_fast(trans, path_idx);
398 		if (unlikely(ret == -ENOENT))
399 			ret = btree_key_cache_fill(trans, path_idx, flags);
400 	} while (ret == -EEXIST);
401 
402 	struct btree_path *path = trans->paths + path_idx;
403 
404 	if (unlikely(ret)) {
405 		path->uptodate = BTREE_ITER_NEED_TRAVERSE;
406 		if (!bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
407 			btree_node_unlock(trans, path, 0);
408 			path->l[0].b = ERR_PTR(ret);
409 		}
410 	} else {
411 		BUG_ON(path->uptodate);
412 		BUG_ON(!path->nodes_locked);
413 	}
414 
415 	return ret;
416 }
417 
btree_key_cache_flush_pos(struct btree_trans * trans,struct bkey_cached_key key,u64 journal_seq,unsigned commit_flags,bool evict)418 static int btree_key_cache_flush_pos(struct btree_trans *trans,
419 				     struct bkey_cached_key key,
420 				     u64 journal_seq,
421 				     unsigned commit_flags,
422 				     bool evict)
423 {
424 	struct bch_fs *c = trans->c;
425 	struct journal *j = &c->journal;
426 	struct btree_iter c_iter, b_iter;
427 	struct bkey_cached *ck = NULL;
428 	int ret;
429 
430 	bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
431 			     BTREE_ITER_slots|
432 			     BTREE_ITER_intent|
433 			     BTREE_ITER_all_snapshots);
434 	bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
435 			     BTREE_ITER_cached|
436 			     BTREE_ITER_intent);
437 	b_iter.flags &= ~BTREE_ITER_with_key_cache;
438 
439 	ret = bch2_btree_iter_traverse(trans, &c_iter);
440 	if (ret)
441 		goto out;
442 
443 	ck = (void *) btree_iter_path(trans, &c_iter)->l[0].b;
444 	if (!ck)
445 		goto out;
446 
447 	if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
448 		if (evict)
449 			goto evict;
450 		goto out;
451 	}
452 
453 	if (journal_seq && ck->journal.seq != journal_seq)
454 		goto out;
455 
456 	trans->journal_res.seq = ck->journal.seq;
457 
458 	/*
459 	 * If we're at the end of the journal, we really want to free up space
460 	 * in the journal right away - we don't want to pin that old journal
461 	 * sequence number with a new btree node write, we want to re-journal
462 	 * the update
463 	 */
464 	if (ck->journal.seq == journal_last_seq(j))
465 		commit_flags |= BCH_WATERMARK_reclaim;
466 
467 	if (ck->journal.seq != journal_last_seq(j) ||
468 	    !test_bit(JOURNAL_space_low, &c->journal.flags))
469 		commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
470 
471 	struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(trans, &b_iter);
472 	ret = bkey_err(btree_k);
473 	if (ret)
474 		goto err;
475 
476 	/* * Check that we're not violating cache coherency rules: */
477 	BUG_ON(bkey_deleted(btree_k.k));
478 
479 	ret   = bch2_trans_update(trans, &b_iter, ck->k,
480 				  BTREE_UPDATE_key_cache_reclaim|
481 				  BTREE_UPDATE_internal_snapshot_node|
482 				  BTREE_TRIGGER_norun) ?:
483 		bch2_trans_commit(trans, NULL, NULL,
484 				  BCH_TRANS_COMMIT_no_check_rw|
485 				  BCH_TRANS_COMMIT_no_enospc|
486 				  commit_flags);
487 err:
488 	bch2_fs_fatal_err_on(ret &&
489 			     !bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
490 			     !bch2_err_matches(ret, BCH_ERR_journal_reclaim_would_deadlock) &&
491 			     !bch2_journal_error(j), c,
492 			     "flushing key cache: %s", bch2_err_str(ret));
493 	if (ret)
494 		goto out;
495 
496 	bch2_journal_pin_drop(j, &ck->journal);
497 
498 	struct btree_path *path = btree_iter_path(trans, &c_iter);
499 	BUG_ON(!btree_node_locked(path, 0));
500 
501 	if (!evict) {
502 		if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
503 			clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
504 			atomic_long_dec(&c->btree_key_cache.nr_dirty);
505 		}
506 	} else {
507 		struct btree_path *path2;
508 		unsigned i;
509 evict:
510 		trans_for_each_path(trans, path2, i)
511 			if (path2 != path)
512 				__bch2_btree_path_unlock(trans, path2);
513 
514 		bch2_btree_node_lock_write_nofail(trans, path, &ck->c);
515 
516 		if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
517 			clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
518 			atomic_long_dec(&c->btree_key_cache.nr_dirty);
519 		}
520 
521 		mark_btree_node_locked_noreset(path, 0, BTREE_NODE_UNLOCKED);
522 		if (bkey_cached_evict(&c->btree_key_cache, ck)) {
523 			bkey_cached_free(trans, &c->btree_key_cache, ck);
524 		} else {
525 			six_unlock_write(&ck->c.lock);
526 			six_unlock_intent(&ck->c.lock);
527 		}
528 	}
529 out:
530 	bch2_trans_iter_exit(trans, &b_iter);
531 	bch2_trans_iter_exit(trans, &c_iter);
532 	return ret;
533 }
534 
bch2_btree_key_cache_journal_flush(struct journal * j,struct journal_entry_pin * pin,u64 seq)535 int bch2_btree_key_cache_journal_flush(struct journal *j,
536 				struct journal_entry_pin *pin, u64 seq)
537 {
538 	struct bch_fs *c = container_of(j, struct bch_fs, journal);
539 	struct bkey_cached *ck =
540 		container_of(pin, struct bkey_cached, journal);
541 	struct bkey_cached_key key;
542 	struct btree_trans *trans = bch2_trans_get(c);
543 	int srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
544 	int ret = 0;
545 
546 	btree_node_lock_nopath_nofail(trans, &ck->c, SIX_LOCK_read);
547 	key = ck->key;
548 
549 	if (ck->journal.seq != seq ||
550 	    !test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
551 		six_unlock_read(&ck->c.lock);
552 		goto unlock;
553 	}
554 
555 	if (ck->seq != seq) {
556 		bch2_journal_pin_update(&c->journal, ck->seq, &ck->journal,
557 					bch2_btree_key_cache_journal_flush);
558 		six_unlock_read(&ck->c.lock);
559 		goto unlock;
560 	}
561 	six_unlock_read(&ck->c.lock);
562 
563 	ret = lockrestart_do(trans,
564 		btree_key_cache_flush_pos(trans, key, seq,
565 				BCH_TRANS_COMMIT_journal_reclaim, false));
566 unlock:
567 	srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
568 
569 	bch2_trans_put(trans);
570 	return ret;
571 }
572 
bch2_btree_insert_key_cached(struct btree_trans * trans,unsigned flags,struct btree_insert_entry * insert_entry)573 bool bch2_btree_insert_key_cached(struct btree_trans *trans,
574 				  unsigned flags,
575 				  struct btree_insert_entry *insert_entry)
576 {
577 	struct bch_fs *c = trans->c;
578 	struct bkey_cached *ck = (void *) (trans->paths + insert_entry->path)->l[0].b;
579 	struct bkey_i *insert = insert_entry->k;
580 	bool kick_reclaim = false;
581 
582 	BUG_ON(insert->k.u64s > ck->u64s);
583 
584 	bkey_copy(ck->k, insert);
585 
586 	if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
587 		EBUG_ON(test_bit(BCH_FS_clean_shutdown, &c->flags));
588 		set_bit(BKEY_CACHED_DIRTY, &ck->flags);
589 		atomic_long_inc(&c->btree_key_cache.nr_dirty);
590 
591 		if (bch2_nr_btree_keys_need_flush(c))
592 			kick_reclaim = true;
593 	}
594 
595 	/*
596 	 * To minimize lock contention, we only add the journal pin here and
597 	 * defer pin updates to the flush callback via ->seq. Be careful not to
598 	 * update ->seq on nojournal commits because we don't want to update the
599 	 * pin to a seq that doesn't include journal updates on disk. Otherwise
600 	 * we risk losing the update after a crash.
601 	 *
602 	 * The only exception is if the pin is not active in the first place. We
603 	 * have to add the pin because journal reclaim drives key cache
604 	 * flushing. The flush callback will not proceed unless ->seq matches
605 	 * the latest pin, so make sure it starts with a consistent value.
606 	 */
607 	if (!(insert_entry->flags & BTREE_UPDATE_nojournal) ||
608 	    !journal_pin_active(&ck->journal)) {
609 		ck->seq = trans->journal_res.seq;
610 	}
611 	bch2_journal_pin_add(&c->journal, trans->journal_res.seq,
612 			     &ck->journal, bch2_btree_key_cache_journal_flush);
613 
614 	if (kick_reclaim)
615 		journal_reclaim_kick(&c->journal);
616 	return true;
617 }
618 
bch2_btree_key_cache_drop(struct btree_trans * trans,struct btree_path * path)619 void bch2_btree_key_cache_drop(struct btree_trans *trans,
620 			       struct btree_path *path)
621 {
622 	struct bch_fs *c = trans->c;
623 	struct btree_key_cache *bc = &c->btree_key_cache;
624 	struct bkey_cached *ck = (void *) path->l[0].b;
625 
626 	/*
627 	 * We just did an update to the btree, bypassing the key cache: the key
628 	 * cache key is now stale and must be dropped, even if dirty:
629 	 */
630 	if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
631 		clear_bit(BKEY_CACHED_DIRTY, &ck->flags);
632 		atomic_long_dec(&c->btree_key_cache.nr_dirty);
633 		bch2_journal_pin_drop(&c->journal, &ck->journal);
634 	}
635 
636 	bkey_cached_evict(bc, ck);
637 	bkey_cached_free(trans, bc, ck);
638 
639 	mark_btree_node_locked(trans, path, 0, BTREE_NODE_UNLOCKED);
640 
641 	struct btree_path *path2;
642 	unsigned i;
643 	trans_for_each_path(trans, path2, i)
644 		if (path2->l[0].b == (void *) ck) {
645 			/*
646 			 * It's safe to clear should_be_locked here because
647 			 * we're evicting from the key cache, and we still have
648 			 * the underlying btree locked: filling into the key
649 			 * cache would require taking a write lock on the btree
650 			 * node
651 			 */
652 			path2->should_be_locked = false;
653 			__bch2_btree_path_unlock(trans, path2);
654 			path2->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_drop);
655 			btree_path_set_dirty(trans, path2, BTREE_ITER_NEED_TRAVERSE);
656 		}
657 
658 	bch2_trans_verify_locks(trans);
659 }
660 
bch2_btree_key_cache_scan(struct shrinker * shrink,struct shrink_control * sc)661 static unsigned long bch2_btree_key_cache_scan(struct shrinker *shrink,
662 					   struct shrink_control *sc)
663 {
664 	struct bch_fs *c = shrink->private_data;
665 	struct btree_key_cache *bc = &c->btree_key_cache;
666 	struct bucket_table *tbl;
667 	struct bkey_cached *ck;
668 	size_t scanned = 0, freed = 0, nr = sc->nr_to_scan;
669 	unsigned iter, start;
670 	int srcu_idx;
671 
672 	srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
673 	rcu_read_lock();
674 
675 	tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
676 
677 	/*
678 	 * Scanning is expensive while a rehash is in progress - most elements
679 	 * will be on the new hashtable, if it's in progress
680 	 *
681 	 * A rehash could still start while we're scanning - that's ok, we'll
682 	 * still see most elements.
683 	 */
684 	if (unlikely(tbl->nest)) {
685 		rcu_read_unlock();
686 		srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
687 		return SHRINK_STOP;
688 	}
689 
690 	iter = bc->shrink_iter;
691 	if (iter >= tbl->size)
692 		iter = 0;
693 	start = iter;
694 
695 	do {
696 		struct rhash_head *pos, *next;
697 
698 		pos = rht_ptr_rcu(&tbl->buckets[iter]);
699 
700 		while (!rht_is_a_nulls(pos)) {
701 			next = rht_dereference_bucket_rcu(pos->next, tbl, iter);
702 			ck = container_of(pos, struct bkey_cached, hash);
703 
704 			if (test_bit(BKEY_CACHED_DIRTY, &ck->flags)) {
705 				bc->skipped_dirty++;
706 			} else if (test_bit(BKEY_CACHED_ACCESSED, &ck->flags)) {
707 				clear_bit(BKEY_CACHED_ACCESSED, &ck->flags);
708 				bc->skipped_accessed++;
709 			} else if (!bkey_cached_lock_for_evict(ck)) {
710 				bc->skipped_lock_fail++;
711 			} else if (bkey_cached_evict(bc, ck)) {
712 				bkey_cached_free_noassert(bc, ck);
713 				bc->freed++;
714 				freed++;
715 			} else {
716 				six_unlock_write(&ck->c.lock);
717 				six_unlock_intent(&ck->c.lock);
718 			}
719 
720 			scanned++;
721 			if (scanned >= nr)
722 				goto out;
723 
724 			pos = next;
725 		}
726 
727 		iter++;
728 		if (iter >= tbl->size)
729 			iter = 0;
730 	} while (scanned < nr && iter != start);
731 out:
732 	bc->shrink_iter = iter;
733 
734 	rcu_read_unlock();
735 	srcu_read_unlock(&c->btree_trans_barrier, srcu_idx);
736 
737 	return freed;
738 }
739 
bch2_btree_key_cache_count(struct shrinker * shrink,struct shrink_control * sc)740 static unsigned long bch2_btree_key_cache_count(struct shrinker *shrink,
741 					    struct shrink_control *sc)
742 {
743 	struct bch_fs *c = shrink->private_data;
744 	struct btree_key_cache *bc = &c->btree_key_cache;
745 	long nr = atomic_long_read(&bc->nr_keys) -
746 		atomic_long_read(&bc->nr_dirty);
747 
748 	/*
749 	 * Avoid hammering our shrinker too much if it's nearly empty - the
750 	 * shrinker code doesn't take into account how big our cache is, if it's
751 	 * mostly empty but the system is under memory pressure it causes nasty
752 	 * lock contention:
753 	 */
754 	nr -= 128;
755 
756 	return max(0L, nr);
757 }
758 
bch2_fs_btree_key_cache_exit(struct btree_key_cache * bc)759 void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
760 {
761 	struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
762 	struct bucket_table *tbl;
763 	struct bkey_cached *ck;
764 	struct rhash_head *pos;
765 	LIST_HEAD(items);
766 	unsigned i;
767 
768 	shrinker_free(bc->shrink);
769 
770 	/*
771 	 * The loop is needed to guard against racing with rehash:
772 	 */
773 	while (atomic_long_read(&bc->nr_keys)) {
774 		rcu_read_lock();
775 		tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
776 		if (tbl) {
777 			if (tbl->nest) {
778 				/* wait for in progress rehash */
779 				rcu_read_unlock();
780 				mutex_lock(&bc->table.mutex);
781 				mutex_unlock(&bc->table.mutex);
782 				continue;
783 			}
784 			for (i = 0; i < tbl->size; i++)
785 				while (pos = rht_ptr_rcu(&tbl->buckets[i]), !rht_is_a_nulls(pos)) {
786 					ck = container_of(pos, struct bkey_cached, hash);
787 					BUG_ON(!bkey_cached_evict(bc, ck));
788 					kfree(ck->k);
789 					kmem_cache_free(bch2_key_cache, ck);
790 				}
791 		}
792 		rcu_read_unlock();
793 	}
794 
795 	if (atomic_long_read(&bc->nr_dirty) &&
796 	    !bch2_journal_error(&c->journal) &&
797 	    test_bit(BCH_FS_was_rw, &c->flags))
798 		panic("btree key cache shutdown error: nr_dirty nonzero (%li)\n",
799 		      atomic_long_read(&bc->nr_dirty));
800 
801 	if (atomic_long_read(&bc->nr_keys))
802 		panic("btree key cache shutdown error: nr_keys nonzero (%li)\n",
803 		      atomic_long_read(&bc->nr_keys));
804 
805 	if (bc->table_init_done)
806 		rhashtable_destroy(&bc->table);
807 
808 	rcu_pending_exit(&bc->pending[0]);
809 	rcu_pending_exit(&bc->pending[1]);
810 
811 	free_percpu(bc->nr_pending);
812 }
813 
bch2_fs_btree_key_cache_init_early(struct btree_key_cache * c)814 void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *c)
815 {
816 }
817 
bch2_fs_btree_key_cache_init(struct btree_key_cache * bc)818 int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
819 {
820 	struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
821 	struct shrinker *shrink;
822 
823 	bc->nr_pending = alloc_percpu(size_t);
824 	if (!bc->nr_pending)
825 		return bch_err_throw(c, ENOMEM_fs_btree_cache_init);
826 
827 	if (rcu_pending_init(&bc->pending[0], &c->btree_trans_barrier, __bkey_cached_free) ||
828 	    rcu_pending_init(&bc->pending[1], &c->btree_trans_barrier, __bkey_cached_free))
829 		return bch_err_throw(c, ENOMEM_fs_btree_cache_init);
830 
831 	if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
832 		return bch_err_throw(c, ENOMEM_fs_btree_cache_init);
833 
834 	bc->table_init_done = true;
835 
836 	shrink = shrinker_alloc(0, "%s-btree_key_cache", c->name);
837 	if (!shrink)
838 		return bch_err_throw(c, ENOMEM_fs_btree_cache_init);
839 	bc->shrink = shrink;
840 	shrink->count_objects	= bch2_btree_key_cache_count;
841 	shrink->scan_objects	= bch2_btree_key_cache_scan;
842 	shrink->batch		= 1 << 14;
843 	shrink->seeks		= 0;
844 	shrink->private_data	= c;
845 	shrinker_register(shrink);
846 	return 0;
847 }
848 
bch2_btree_key_cache_to_text(struct printbuf * out,struct btree_key_cache * bc)849 void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *bc)
850 {
851 	printbuf_tabstop_push(out, 24);
852 	printbuf_tabstop_push(out, 12);
853 
854 	prt_printf(out, "keys:\t%lu\r\n",		atomic_long_read(&bc->nr_keys));
855 	prt_printf(out, "dirty:\t%lu\r\n",		atomic_long_read(&bc->nr_dirty));
856 	prt_printf(out, "table size:\t%u\r\n",		bc->table.tbl->size);
857 	prt_newline(out);
858 	prt_printf(out, "shrinker:\n");
859 	prt_printf(out, "requested_to_free:\t%lu\r\n",	bc->requested_to_free);
860 	prt_printf(out, "freed:\t%lu\r\n",		bc->freed);
861 	prt_printf(out, "skipped_dirty:\t%lu\r\n",	bc->skipped_dirty);
862 	prt_printf(out, "skipped_accessed:\t%lu\r\n",	bc->skipped_accessed);
863 	prt_printf(out, "skipped_lock_fail:\t%lu\r\n",	bc->skipped_lock_fail);
864 	prt_newline(out);
865 	prt_printf(out, "pending:\t%zu\r\n",		per_cpu_sum(bc->nr_pending));
866 }
867 
bch2_btree_key_cache_exit(void)868 void bch2_btree_key_cache_exit(void)
869 {
870 	kmem_cache_destroy(bch2_key_cache);
871 }
872 
bch2_btree_key_cache_init(void)873 int __init bch2_btree_key_cache_init(void)
874 {
875 	bch2_key_cache = KMEM_CACHE(bkey_cached, SLAB_RECLAIM_ACCOUNT);
876 	if (!bch2_key_cache)
877 		return -ENOMEM;
878 
879 	return 0;
880 }
881