Lines Matching +full:wait +full:- +full:pin
1 // SPDX-License-Identifier: GPL-2.0
10 * btree pointers - they must match for the pointer to be considered valid.
19 * When we invalidate a bucket, we have to write its new gen to disk and wait
20 * for that write to complete before we use it - otherwise after a crash we
29 * free_inc isn't the only freelist - if it was, we'd often to sleep while
30 * priorities and gens were being written before we could allocate. c->free is a
37 * have nothing pointing into them - these we can reuse without waiting for
40 * them (because they were overwritten). That's the unused list - buckets on the
43 * It's also important to ensure that gens don't wrap around - with respect to
45 * difficult to do in practice, but we explicitly guard against it anyways - if
60 * invalidated, and then invalidate them and stick them on the free_inc list -
78 uint8_t ret = ++b->gen; in bch_inc_gen()
80 ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b)); in bch_inc_gen()
81 WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX); in bch_inc_gen()
90 unsigned long next = c->nbuckets * c->cache->sb.bucket_size / 1024; in bch_rescale_priorities()
93 atomic_sub(sectors, &c->rescale); in bch_rescale_priorities()
96 r = atomic_read(&c->rescale); in bch_rescale_priorities()
100 } while (atomic_cmpxchg(&c->rescale, r, r + next) != r); in bch_rescale_priorities()
102 mutex_lock(&c->bucket_lock); in bch_rescale_priorities()
104 c->min_prio = USHRT_MAX; in bch_rescale_priorities()
106 ca = c->cache; in bch_rescale_priorities()
108 if (b->prio && in bch_rescale_priorities()
109 b->prio != BTREE_PRIO && in bch_rescale_priorities()
110 !atomic_read(&b->pin)) { in bch_rescale_priorities()
111 b->prio--; in bch_rescale_priorities()
112 c->min_prio = min(c->min_prio, b->prio); in bch_rescale_priorities()
115 mutex_unlock(&c->bucket_lock); in bch_rescale_priorities()
132 return (ca->set->gc_mark_valid || b->reclaimable_in_gc) && in bch_can_invalidate_bucket()
134 !atomic_read(&b->pin) && can_inc_bucket_gen(b)); in bch_can_invalidate_bucket()
139 lockdep_assert_held(&ca->set->bucket_lock); in __bch_invalidate_one_bucket()
143 trace_bcache_invalidate(ca, b - ca->buckets); in __bch_invalidate_one_bucket()
146 b->prio = INITIAL_PRIO; in __bch_invalidate_one_bucket()
147 atomic_inc(&b->pin); in __bch_invalidate_one_bucket()
148 b->reclaimable_in_gc = 0; in __bch_invalidate_one_bucket()
155 fifo_push(&ca->free_inc, b - ca->buckets); in bch_invalidate_one_bucket()
169 unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
171 (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
182 ca->heap.used = 0; in invalidate_buckets_lru()
188 if (!heap_full(&ca->heap)) in invalidate_buckets_lru()
189 heap_add(&ca->heap, b, bucket_max_cmp); in invalidate_buckets_lru()
190 else if (bucket_max_cmp(b, heap_peek(&ca->heap))) { in invalidate_buckets_lru()
191 ca->heap.data[0] = b; in invalidate_buckets_lru()
192 heap_sift(&ca->heap, 0, bucket_max_cmp); in invalidate_buckets_lru()
196 for (i = ca->heap.used / 2 - 1; i >= 0; --i) in invalidate_buckets_lru()
197 heap_sift(&ca->heap, i, bucket_min_cmp); in invalidate_buckets_lru()
199 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_lru()
200 if (!heap_pop(&ca->heap, b, bucket_min_cmp)) { in invalidate_buckets_lru()
205 ca->invalidate_needs_gc = 1; in invalidate_buckets_lru()
206 wake_up_gc(ca->set); in invalidate_buckets_lru()
219 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_fifo()
220 if (ca->fifo_last_bucket < ca->sb.first_bucket || in invalidate_buckets_fifo()
221 ca->fifo_last_bucket >= ca->sb.nbuckets) in invalidate_buckets_fifo()
222 ca->fifo_last_bucket = ca->sb.first_bucket; in invalidate_buckets_fifo()
224 b = ca->buckets + ca->fifo_last_bucket++; in invalidate_buckets_fifo()
229 if (++checked >= ca->sb.nbuckets) { in invalidate_buckets_fifo()
230 ca->invalidate_needs_gc = 1; in invalidate_buckets_fifo()
231 wake_up_gc(ca->set); in invalidate_buckets_fifo()
242 while (!fifo_full(&ca->free_inc)) { in invalidate_buckets_random()
247 n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket); in invalidate_buckets_random()
248 n += ca->sb.first_bucket; in invalidate_buckets_random()
250 b = ca->buckets + n; in invalidate_buckets_random()
255 if (++checked >= ca->sb.nbuckets / 2) { in invalidate_buckets_random()
256 ca->invalidate_needs_gc = 1; in invalidate_buckets_random()
257 wake_up_gc(ca->set); in invalidate_buckets_random()
265 BUG_ON(ca->invalidate_needs_gc); in invalidate_buckets()
267 switch (CACHE_REPLACEMENT(&ca->sb)) { in invalidate_buckets()
287 mutex_unlock(&(ca)->set->bucket_lock); \
289 test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
295 mutex_lock(&(ca)->set->bucket_lock); \
305 if (fifo_push(&ca->free[RESERVE_PRIO], bucket)) in bch_allocator_push()
309 if (fifo_push(&ca->free[i], bucket)) in bch_allocator_push()
319 mutex_lock(&ca->set->bucket_lock); in bch_allocator_thread()
330 if (!fifo_pop(&ca->free_inc, bucket)) in bch_allocator_thread()
333 if (ca->discard) { in bch_allocator_thread()
334 mutex_unlock(&ca->set->bucket_lock); in bch_allocator_thread()
335 blkdev_issue_discard(ca->bdev, in bch_allocator_thread()
336 bucket_to_sector(ca->set, bucket), in bch_allocator_thread()
337 ca->sb.bucket_size, GFP_KERNEL); in bch_allocator_thread()
338 mutex_lock(&ca->set->bucket_lock); in bch_allocator_thread()
342 wake_up(&ca->set->btree_cache_wait); in bch_allocator_thread()
343 wake_up(&ca->set->bucket_wait); in bch_allocator_thread()
353 allocator_wait(ca, !ca->invalidate_needs_gc); in bch_allocator_thread()
360 allocator_wait(ca, !atomic_read(&ca->set->prio_blocked)); in bch_allocator_thread()
361 if (CACHE_SYNC(&ca->sb)) { in bch_allocator_thread()
364 * node locked ever blocked - having the btree node in bch_allocator_thread()
373 if (!fifo_full(&ca->free_inc)) in bch_allocator_thread()
377 ca->invalidate_needs_gc = 1; in bch_allocator_thread()
378 wake_up_gc(ca->set); in bch_allocator_thread()
389 long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait) in bch_bucket_alloc() argument
397 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags))) in bch_bucket_alloc()
398 return -1; in bch_bucket_alloc()
401 if (fifo_pop(&ca->free[RESERVE_NONE], r) || in bch_bucket_alloc()
402 fifo_pop(&ca->free[reserve], r)) in bch_bucket_alloc()
405 if (!wait) { in bch_bucket_alloc()
407 return -1; in bch_bucket_alloc()
411 prepare_to_wait(&ca->set->bucket_wait, &w, in bch_bucket_alloc()
414 mutex_unlock(&ca->set->bucket_lock); in bch_bucket_alloc()
416 mutex_lock(&ca->set->bucket_lock); in bch_bucket_alloc()
417 } while (!fifo_pop(&ca->free[RESERVE_NONE], r) && in bch_bucket_alloc()
418 !fifo_pop(&ca->free[reserve], r)); in bch_bucket_alloc()
420 finish_wait(&ca->set->bucket_wait, &w); in bch_bucket_alloc()
422 if (ca->alloc_thread) in bch_bucket_alloc()
423 wake_up_process(ca->alloc_thread); in bch_bucket_alloc()
427 if (expensive_debug_checks(ca->set)) { in bch_bucket_alloc()
433 BUG_ON(ca->prio_buckets[iter] == (uint64_t) r); in bch_bucket_alloc()
436 fifo_for_each(i, &ca->free[j], iter) in bch_bucket_alloc()
438 fifo_for_each(i, &ca->free_inc, iter) in bch_bucket_alloc()
442 b = ca->buckets + r; in bch_bucket_alloc()
444 BUG_ON(atomic_read(&b->pin) != 1); in bch_bucket_alloc()
446 SET_GC_SECTORS_USED(b, ca->sb.bucket_size); in bch_bucket_alloc()
451 b->prio = BTREE_PRIO; in bch_bucket_alloc()
455 b->prio = INITIAL_PRIO; in bch_bucket_alloc()
458 if (ca->set->avail_nbuckets > 0) { in bch_bucket_alloc()
459 ca->set->avail_nbuckets--; in bch_bucket_alloc()
460 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats); in bch_bucket_alloc()
471 if (ca->set->avail_nbuckets < ca->set->nbuckets) { in __bch_bucket_free()
472 ca->set->avail_nbuckets++; in __bch_bucket_free()
473 bch_update_bucket_in_use(ca->set, &ca->set->gc_stats); in __bch_bucket_free()
482 __bch_bucket_free(c->cache, PTR_BUCKET(c, k, i)); in bch_bucket_free()
486 struct bkey *k, bool wait) in __bch_bucket_alloc_set() argument
492 if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) in __bch_bucket_alloc_set()
493 return -1; in __bch_bucket_alloc_set()
495 lockdep_assert_held(&c->bucket_lock); in __bch_bucket_alloc_set()
499 ca = c->cache; in __bch_bucket_alloc_set()
500 b = bch_bucket_alloc(ca, reserve, wait); in __bch_bucket_alloc_set()
502 return -1; in __bch_bucket_alloc_set()
504 k->ptr[0] = MAKE_PTR(ca->buckets[b].gen, in __bch_bucket_alloc_set()
506 ca->sb.nr_this_dev); in __bch_bucket_alloc_set()
514 struct bkey *k, bool wait) in bch_bucket_alloc_set() argument
518 mutex_lock(&c->bucket_lock); in bch_bucket_alloc_set()
519 ret = __bch_bucket_alloc_set(c, reserve, k, wait); in bch_bucket_alloc_set()
520 mutex_unlock(&c->bucket_lock); in bch_bucket_alloc_set()
565 list_for_each_entry_reverse(ret, &c->data_buckets, list) in pick_data_bucket()
566 if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) != in pick_data_bucket()
567 UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)])) in pick_data_bucket()
569 else if (!bkey_cmp(&ret->key, search)) in pick_data_bucket()
571 else if (ret->last_write_point == write_point) in pick_data_bucket()
574 ret = ret_task ?: list_first_entry(&c->data_buckets, in pick_data_bucket()
577 if (!ret->sectors_free && KEY_PTRS(alloc)) { in pick_data_bucket()
578 ret->sectors_free = c->cache->sb.bucket_size; in pick_data_bucket()
579 bkey_copy(&ret->key, alloc); in pick_data_bucket()
583 if (!ret->sectors_free) in pick_data_bucket()
597 * If s->writeback is true, will not fail.
604 bool wait) in bch_alloc_sectors() argument
618 spin_lock(&c->data_bucket_lock); in bch_alloc_sectors()
625 spin_unlock(&c->data_bucket_lock); in bch_alloc_sectors()
627 if (bch_bucket_alloc_set(c, watermark, &alloc.key, wait)) in bch_alloc_sectors()
630 spin_lock(&c->data_bucket_lock); in bch_alloc_sectors()
641 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_alloc_sectors()
642 EBUG_ON(ptr_stale(c, &b->key, i)); in bch_alloc_sectors()
646 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_alloc_sectors()
647 k->ptr[i] = b->key.ptr[i]; in bch_alloc_sectors()
649 sectors = min(sectors, b->sectors_free); in bch_alloc_sectors()
653 SET_KEY_PTRS(k, KEY_PTRS(&b->key)); in bch_alloc_sectors()
659 list_move_tail(&b->list, &c->data_buckets); in bch_alloc_sectors()
660 bkey_copy_key(&b->key, k); in bch_alloc_sectors()
661 b->last_write_point = write_point; in bch_alloc_sectors()
663 b->sectors_free -= sectors; in bch_alloc_sectors()
665 for (i = 0; i < KEY_PTRS(&b->key); i++) { in bch_alloc_sectors()
666 SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors); in bch_alloc_sectors()
669 &c->cache->sectors_written); in bch_alloc_sectors()
672 if (b->sectors_free < c->cache->sb.block_size) in bch_alloc_sectors()
673 b->sectors_free = 0; in bch_alloc_sectors()
680 if (b->sectors_free) in bch_alloc_sectors()
681 for (i = 0; i < KEY_PTRS(&b->key); i++) in bch_alloc_sectors()
682 atomic_inc(&PTR_BUCKET(c, &b->key, i)->pin); in bch_alloc_sectors()
684 spin_unlock(&c->data_bucket_lock); in bch_alloc_sectors()
694 while (!list_empty(&c->data_buckets)) { in bch_open_buckets_free()
695 b = list_first_entry(&c->data_buckets, in bch_open_buckets_free()
697 list_del(&b->list); in bch_open_buckets_free()
706 spin_lock_init(&c->data_bucket_lock); in bch_open_buckets_alloc()
712 return -ENOMEM; in bch_open_buckets_alloc()
714 list_add(&b->list, &c->data_buckets); in bch_open_buckets_alloc()
727 ca->alloc_thread = k; in bch_cache_allocator_start()