Lines Matching refs:bs

112 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)  in bs_bio_slab_size()  argument
114 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size()
117 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) in bio_find_or_create_slab() argument
119 unsigned int size = bs_bio_slab_size(bs); in bio_find_or_create_slab()
135 static void bio_put_slab(struct bio_set *bs) in bio_put_slab() argument
138 unsigned int slab_size = bs_bio_slab_size(bs); in bio_put_slab()
146 WARN_ON_ONCE(bslab->slab != bs->bio_slab); in bio_put_slab()
230 struct bio_set *bs = bio->bi_pool; in bio_free() local
233 WARN_ON_ONCE(!bs); in bio_free()
236 bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); in bio_free()
237 mempool_free(p - bs->front_pad, &bs->bio_pool); in bio_free()
381 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); in bio_alloc_rescue() local
385 spin_lock(&bs->rescue_lock); in bio_alloc_rescue()
386 bio = bio_list_pop(&bs->rescue_list); in bio_alloc_rescue()
387 spin_unlock(&bs->rescue_lock); in bio_alloc_rescue()
396 static void punt_bios_to_rescuer(struct bio_set *bs) in punt_bios_to_rescuer() argument
401 if (WARN_ON_ONCE(!bs->rescue_workqueue)) in punt_bios_to_rescuer()
418 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
423 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); in punt_bios_to_rescuer()
426 spin_lock(&bs->rescue_lock); in punt_bios_to_rescuer()
427 bio_list_merge(&bs->rescue_list, &punt); in punt_bios_to_rescuer()
428 spin_unlock(&bs->rescue_lock); in punt_bios_to_rescuer()
430 queue_work(bs->rescue_workqueue, &bs->rescue_work); in punt_bios_to_rescuer()
451 struct bio_set *bs) in bio_alloc_percpu_cache() argument
456 cache = per_cpu_ptr(bs->cache, get_cpu()); in bio_alloc_percpu_cache()
474 bio->bi_pool = bs; in bio_alloc_percpu_cache()
514 struct bio_set *bs) in bio_alloc_bioset() argument
521 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) in bio_alloc_bioset()
524 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { in bio_alloc_bioset()
527 gfp_mask, bs); in bio_alloc_bioset()
558 bs->rescue_workqueue) in bio_alloc_bioset()
561 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
563 punt_bios_to_rescuer(bs); in bio_alloc_bioset()
565 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
569 if (!mempool_is_saturated(&bs->bio_pool)) in bio_alloc_bioset()
572 bio = p + bs->front_pad; in bio_alloc_bioset()
576 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); in bio_alloc_bioset()
578 punt_bios_to_rescuer(bs); in bio_alloc_bioset()
580 bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); in bio_alloc_bioset()
592 bio->bi_pool = bs; in bio_alloc_bioset()
596 mempool_free(p, &bs->bio_pool); in bio_alloc_bioset()
750 struct bio_set *bs; in bio_cpu_dead() local
752 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); in bio_cpu_dead()
753 if (bs->cache) { in bio_cpu_dead()
754 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); in bio_cpu_dead()
761 static void bio_alloc_cache_destroy(struct bio_set *bs) in bio_alloc_cache_destroy() argument
765 if (!bs->cache) in bio_alloc_cache_destroy()
768 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); in bio_alloc_cache_destroy()
772 cache = per_cpu_ptr(bs->cache, cpu); in bio_alloc_cache_destroy()
775 free_percpu(bs->cache); in bio_alloc_cache_destroy()
776 bs->cache = NULL; in bio_alloc_cache_destroy()
869 gfp_t gfp, struct bio_set *bs) in bio_alloc_clone() argument
873 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); in bio_alloc_clone()
1694 gfp_t gfp, struct bio_set *bs) in bio_split() argument
1711 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); in bio_split()
1777 void bioset_exit(struct bio_set *bs) in bioset_exit() argument
1779 bio_alloc_cache_destroy(bs); in bioset_exit()
1780 if (bs->rescue_workqueue) in bioset_exit()
1781 destroy_workqueue(bs->rescue_workqueue); in bioset_exit()
1782 bs->rescue_workqueue = NULL; in bioset_exit()
1784 mempool_exit(&bs->bio_pool); in bioset_exit()
1785 mempool_exit(&bs->bvec_pool); in bioset_exit()
1787 if (bs->bio_slab) in bioset_exit()
1788 bio_put_slab(bs); in bioset_exit()
1789 bs->bio_slab = NULL; in bioset_exit()
1814 int bioset_init(struct bio_set *bs, in bioset_init() argument
1819 bs->front_pad = front_pad; in bioset_init()
1821 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); in bioset_init()
1823 bs->back_pad = 0; in bioset_init()
1825 spin_lock_init(&bs->rescue_lock); in bioset_init()
1826 bio_list_init(&bs->rescue_list); in bioset_init()
1827 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); in bioset_init()
1829 bs->bio_slab = bio_find_or_create_slab(bs); in bioset_init()
1830 if (!bs->bio_slab) in bioset_init()
1833 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) in bioset_init()
1837 biovec_init_pool(&bs->bvec_pool, pool_size)) in bioset_init()
1841 bs->rescue_workqueue = alloc_workqueue("bioset", in bioset_init()
1843 if (!bs->rescue_workqueue) in bioset_init()
1847 bs->cache = alloc_percpu(struct bio_alloc_cache); in bioset_init()
1848 if (!bs->cache) in bioset_init()
1850 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); in bioset_init()
1855 bioset_exit(bs); in bioset_init()