Lines Matching refs:bc
404 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block) in cache_read_lock() argument
406 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_read_lock()
407 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_read_lock()
409 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_read_lock()
412 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_read_unlock() argument
414 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_read_unlock()
415 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_read_unlock()
417 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_read_unlock()
420 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block) in cache_write_lock() argument
422 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_write_lock()
423 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_write_lock()
425 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_write_lock()
428 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block) in cache_write_unlock() argument
430 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep) in cache_write_unlock()
431 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock); in cache_write_unlock()
433 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock); in cache_write_unlock()
528 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep) in cache_init() argument
532 bc->num_locks = num_locks; in cache_init()
533 bc->no_sleep = no_sleep; in cache_init()
535 for (i = 0; i < bc->num_locks; i++) { in cache_init()
537 rwlock_init(&bc->trees[i].u.spinlock); in cache_init()
539 init_rwsem(&bc->trees[i].u.lock); in cache_init()
540 bc->trees[i].root = RB_ROOT; in cache_init()
543 lru_init(&bc->lru[LIST_CLEAN]); in cache_init()
544 lru_init(&bc->lru[LIST_DIRTY]); in cache_init()
547 static void cache_destroy(struct dm_buffer_cache *bc) in cache_destroy() argument
551 for (i = 0; i < bc->num_locks; i++) in cache_destroy()
552 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root)); in cache_destroy()
554 lru_destroy(&bc->lru[LIST_CLEAN]); in cache_destroy()
555 lru_destroy(&bc->lru[LIST_DIRTY]); in cache_destroy()
563 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode) in cache_count() argument
565 return bc->lru[list_mode].count; in cache_count()
568 static inline unsigned long cache_total(struct dm_buffer_cache *bc) in cache_total() argument
570 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY); in cache_total()
605 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block) in cache_get() argument
609 cache_read_lock(bc, block); in cache_get()
610 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block); in cache_get()
615 cache_read_unlock(bc, block); in cache_get()
626 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_put() argument
630 cache_read_lock(bc, b->block); in cache_put()
633 cache_read_unlock(bc, b->block); in cache_put()
670 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode, in __cache_evict() argument
678 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep); in __cache_evict()
684 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in __cache_evict()
689 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode, in cache_evict() argument
695 lh_init(&lh, bc, true); in cache_evict()
696 b = __cache_evict(bc, list_mode, pred, context, &lh); in cache_evict()
707 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode) in cache_mark() argument
709 cache_write_lock(bc, b->block); in cache_mark()
711 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_mark()
713 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_mark()
715 cache_write_unlock(bc, b->block); in cache_mark()
724 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, in __cache_mark_many() argument
732 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep); in __cache_mark_many()
738 lru_insert(&bc->lru[b->list_mode], &b->lru); in __cache_mark_many()
742 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode, in cache_mark_many() argument
747 lh_init(&lh, bc, true); in cache_mark_many()
748 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh); in cache_mark_many()
770 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode, in __cache_iterate() argument
773 struct lru *lru = &bc->lru[list_mode]; in __cache_iterate()
798 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode, in cache_iterate() argument
803 lh_init(&lh, bc, false); in cache_iterate()
804 __cache_iterate(bc, list_mode, fn, context, &lh); in cache_iterate()
841 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_insert() argument
848 cache_write_lock(bc, b->block); in cache_insert()
850 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b); in cache_insert()
852 lru_insert(&bc->lru[b->list_mode], &b->lru); in cache_insert()
853 cache_write_unlock(bc, b->block); in cache_insert()
866 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b) in cache_remove() argument
870 cache_write_lock(bc, b->block); in cache_remove()
876 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root); in cache_remove()
877 lru_remove(&bc->lru[b->list_mode], &b->lru); in cache_remove()
880 cache_write_unlock(bc, b->block); in cache_remove()
912 static void __remove_range(struct dm_buffer_cache *bc, in __remove_range() argument
933 lru_remove(&bc->lru[b->list_mode], &b->lru); in __remove_range()
939 static void cache_remove_range(struct dm_buffer_cache *bc, in cache_remove_range() argument
945 BUG_ON(bc->no_sleep); in cache_remove_range()
946 for (i = 0; i < bc->num_locks; i++) { in cache_remove_range()
947 down_write(&bc->trees[i].u.lock); in cache_remove_range()
948 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release); in cache_remove_range()
949 up_write(&bc->trees[i].u.lock); in cache_remove_range()