xref: /linux/block/bio.c (revision 35ebee7e720944a66befb5899c72ce1e01dfa44e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4  */
5 #include <linux/mm.h>
6 #include <linux/swap.h>
7 #include <linux/bio-integrity.h>
8 #include <linux/blkdev.h>
9 #include <linux/uio.h>
10 #include <linux/iocontext.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/mempool.h>
16 #include <linux/workqueue.h>
17 #include <linux/cgroup.h>
18 #include <linux/highmem.h>
19 #include <linux/blk-crypto.h>
20 #include <linux/xarray.h>
21 
22 #include <trace/events/block.h>
23 #include "blk.h"
24 #include "blk-rq-qos.h"
25 #include "blk-cgroup.h"
26 
27 #define ALLOC_CACHE_THRESHOLD	16
28 #define ALLOC_CACHE_MAX		256
29 
30 struct bio_alloc_cache {
31 	struct bio		*free_list;
32 	struct bio		*free_list_irq;
33 	unsigned int		nr;
34 	unsigned int		nr_irq;
35 };
36 
37 static struct biovec_slab {
38 	int nr_vecs;
39 	char *name;
40 	struct kmem_cache *slab;
41 } bvec_slabs[] __read_mostly = {
42 	{ .nr_vecs = 16, .name = "biovec-16" },
43 	{ .nr_vecs = 64, .name = "biovec-64" },
44 	{ .nr_vecs = 128, .name = "biovec-128" },
45 	{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
46 };
47 
biovec_slab(unsigned short nr_vecs)48 static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
49 {
50 	switch (nr_vecs) {
51 	/* smaller bios use inline vecs */
52 	case 5 ... 16:
53 		return &bvec_slabs[0];
54 	case 17 ... 64:
55 		return &bvec_slabs[1];
56 	case 65 ... 128:
57 		return &bvec_slabs[2];
58 	case 129 ... BIO_MAX_VECS:
59 		return &bvec_slabs[3];
60 	default:
61 		BUG();
62 		return NULL;
63 	}
64 }
65 
66 /*
67  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
68  * IO code that does not need private memory pools.
69  */
70 struct bio_set fs_bio_set;
71 EXPORT_SYMBOL(fs_bio_set);
72 
73 /*
74  * Our slab pool management
75  */
76 struct bio_slab {
77 	struct kmem_cache *slab;
78 	unsigned int slab_ref;
79 	unsigned int slab_size;
80 	char name[12];
81 };
82 static DEFINE_MUTEX(bio_slab_lock);
83 static DEFINE_XARRAY(bio_slabs);
84 
create_bio_slab(unsigned int size)85 static struct bio_slab *create_bio_slab(unsigned int size)
86 {
87 	struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
88 
89 	if (!bslab)
90 		return NULL;
91 
92 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
93 	bslab->slab = kmem_cache_create(bslab->name, size,
94 			ARCH_KMALLOC_MINALIGN,
95 			SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
96 	if (!bslab->slab)
97 		goto fail_alloc_slab;
98 
99 	bslab->slab_ref = 1;
100 	bslab->slab_size = size;
101 
102 	if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
103 		return bslab;
104 
105 	kmem_cache_destroy(bslab->slab);
106 
107 fail_alloc_slab:
108 	kfree(bslab);
109 	return NULL;
110 }
111 
bs_bio_slab_size(struct bio_set * bs)112 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
113 {
114 	return bs->front_pad + sizeof(struct bio) + bs->back_pad;
115 }
116 
bio_find_or_create_slab(struct bio_set * bs)117 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
118 {
119 	unsigned int size = bs_bio_slab_size(bs);
120 	struct bio_slab *bslab;
121 
122 	mutex_lock(&bio_slab_lock);
123 	bslab = xa_load(&bio_slabs, size);
124 	if (bslab)
125 		bslab->slab_ref++;
126 	else
127 		bslab = create_bio_slab(size);
128 	mutex_unlock(&bio_slab_lock);
129 
130 	if (bslab)
131 		return bslab->slab;
132 	return NULL;
133 }
134 
bio_put_slab(struct bio_set * bs)135 static void bio_put_slab(struct bio_set *bs)
136 {
137 	struct bio_slab *bslab = NULL;
138 	unsigned int slab_size = bs_bio_slab_size(bs);
139 
140 	mutex_lock(&bio_slab_lock);
141 
142 	bslab = xa_load(&bio_slabs, slab_size);
143 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
144 		goto out;
145 
146 	WARN_ON_ONCE(bslab->slab != bs->bio_slab);
147 
148 	WARN_ON(!bslab->slab_ref);
149 
150 	if (--bslab->slab_ref)
151 		goto out;
152 
153 	xa_erase(&bio_slabs, slab_size);
154 
155 	kmem_cache_destroy(bslab->slab);
156 	kfree(bslab);
157 
158 out:
159 	mutex_unlock(&bio_slab_lock);
160 }
161 
bvec_free(mempool_t * pool,struct bio_vec * bv,unsigned short nr_vecs)162 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
163 {
164 	BUG_ON(nr_vecs > BIO_MAX_VECS);
165 
166 	if (nr_vecs == BIO_MAX_VECS)
167 		mempool_free(bv, pool);
168 	else if (nr_vecs > BIO_INLINE_VECS)
169 		kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
170 }
171 
172 /*
173  * Make the first allocation restricted and don't dump info on allocation
174  * failures, since we'll fall back to the mempool in case of failure.
175  */
bvec_alloc_gfp(gfp_t gfp)176 static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
177 {
178 	return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
179 		__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
180 }
181 
bvec_alloc(mempool_t * pool,unsigned short * nr_vecs,gfp_t gfp_mask)182 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
183 		gfp_t gfp_mask)
184 {
185 	struct biovec_slab *bvs = biovec_slab(*nr_vecs);
186 
187 	if (WARN_ON_ONCE(!bvs))
188 		return NULL;
189 
190 	/*
191 	 * Upgrade the nr_vecs request to take full advantage of the allocation.
192 	 * We also rely on this in the bvec_free path.
193 	 */
194 	*nr_vecs = bvs->nr_vecs;
195 
196 	/*
197 	 * Try a slab allocation first for all smaller allocations.  If that
198 	 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
199 	 * The mempool is sized to handle up to BIO_MAX_VECS entries.
200 	 */
201 	if (*nr_vecs < BIO_MAX_VECS) {
202 		struct bio_vec *bvl;
203 
204 		bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
205 		if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
206 			return bvl;
207 		*nr_vecs = BIO_MAX_VECS;
208 	}
209 
210 	return mempool_alloc(pool, gfp_mask);
211 }
212 
bio_uninit(struct bio * bio)213 void bio_uninit(struct bio *bio)
214 {
215 #ifdef CONFIG_BLK_CGROUP
216 	if (bio->bi_blkg) {
217 		blkg_put(bio->bi_blkg);
218 		bio->bi_blkg = NULL;
219 	}
220 #endif
221 	if (bio_integrity(bio))
222 		bio_integrity_free(bio);
223 
224 	bio_crypt_free_ctx(bio);
225 }
226 EXPORT_SYMBOL(bio_uninit);
227 
bio_free(struct bio * bio)228 static void bio_free(struct bio *bio)
229 {
230 	struct bio_set *bs = bio->bi_pool;
231 	void *p = bio;
232 
233 	WARN_ON_ONCE(!bs);
234 
235 	bio_uninit(bio);
236 	bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
237 	mempool_free(p - bs->front_pad, &bs->bio_pool);
238 }
239 
240 /*
241  * Users of this function have their own bio allocation. Subsequently,
242  * they must remember to pair any call to bio_init() with bio_uninit()
243  * when IO has completed, or when the bio is released.
244  */
bio_init(struct bio * bio,struct block_device * bdev,struct bio_vec * table,unsigned short max_vecs,blk_opf_t opf)245 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
246 	      unsigned short max_vecs, blk_opf_t opf)
247 {
248 	bio->bi_next = NULL;
249 	bio->bi_bdev = bdev;
250 	bio->bi_opf = opf;
251 	bio->bi_flags = 0;
252 	bio->bi_ioprio = 0;
253 	bio->bi_write_hint = 0;
254 	bio->bi_write_stream = 0;
255 	bio->bi_status = 0;
256 	bio->bi_bvec_gap_bit = 0;
257 	bio->bi_iter.bi_sector = 0;
258 	bio->bi_iter.bi_size = 0;
259 	bio->bi_iter.bi_idx = 0;
260 	bio->bi_iter.bi_bvec_done = 0;
261 	bio->bi_end_io = NULL;
262 	bio->bi_private = NULL;
263 #ifdef CONFIG_BLK_CGROUP
264 	bio->bi_blkg = NULL;
265 	bio->issue_time_ns = 0;
266 	if (bdev)
267 		bio_associate_blkg(bio);
268 #ifdef CONFIG_BLK_CGROUP_IOCOST
269 	bio->bi_iocost_cost = 0;
270 #endif
271 #endif
272 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
273 	bio->bi_crypt_context = NULL;
274 #endif
275 #ifdef CONFIG_BLK_DEV_INTEGRITY
276 	bio->bi_integrity = NULL;
277 #endif
278 	bio->bi_vcnt = 0;
279 
280 	atomic_set(&bio->__bi_remaining, 1);
281 	atomic_set(&bio->__bi_cnt, 1);
282 	bio->bi_cookie = BLK_QC_T_NONE;
283 
284 	bio->bi_max_vecs = max_vecs;
285 	bio->bi_io_vec = table;
286 	bio->bi_pool = NULL;
287 }
288 EXPORT_SYMBOL(bio_init);
289 
290 /**
291  * bio_reset - reinitialize a bio
292  * @bio:	bio to reset
293  * @bdev:	block device to use the bio for
294  * @opf:	operation and flags for bio
295  *
296  * Description:
297  *   After calling bio_reset(), @bio will be in the same state as a freshly
298  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
299  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
300  *   comment in struct bio.
301  */
bio_reset(struct bio * bio,struct block_device * bdev,blk_opf_t opf)302 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
303 {
304 	bio_uninit(bio);
305 	memset(bio, 0, BIO_RESET_BYTES);
306 	atomic_set(&bio->__bi_remaining, 1);
307 	bio->bi_bdev = bdev;
308 	if (bio->bi_bdev)
309 		bio_associate_blkg(bio);
310 	bio->bi_opf = opf;
311 }
312 EXPORT_SYMBOL(bio_reset);
313 
__bio_chain_endio(struct bio * bio)314 static struct bio *__bio_chain_endio(struct bio *bio)
315 {
316 	struct bio *parent = bio->bi_private;
317 
318 	if (bio->bi_status && !parent->bi_status)
319 		parent->bi_status = bio->bi_status;
320 	bio_put(bio);
321 	return parent;
322 }
323 
324 /*
325  * This function should only be used as a flag and must never be called.
326  * If execution reaches here, it indicates a serious programming error.
327  */
bio_chain_endio(struct bio * bio)328 static void bio_chain_endio(struct bio *bio)
329 {
330 	BUG();
331 }
332 
333 /**
334  * bio_chain - chain bio completions
335  * @bio: the target bio
336  * @parent: the parent bio of @bio
337  *
338  * The caller won't have a bi_end_io called when @bio completes - instead,
339  * @parent's bi_end_io won't be called until both @parent and @bio have
340  * completed; the chained bio will also be freed when it completes.
341  *
342  * The caller must not set bi_private or bi_end_io in @bio.
343  */
bio_chain(struct bio * bio,struct bio * parent)344 void bio_chain(struct bio *bio, struct bio *parent)
345 {
346 	BUG_ON(bio->bi_private || bio->bi_end_io);
347 
348 	bio->bi_private = parent;
349 	bio->bi_end_io	= bio_chain_endio;
350 	bio_inc_remaining(parent);
351 }
352 EXPORT_SYMBOL(bio_chain);
353 
354 /**
355  * bio_chain_and_submit - submit a bio after chaining it to another one
356  * @prev: bio to chain and submit
357  * @new: bio to chain to
358  *
359  * If @prev is non-NULL, chain it to @new and submit it.
360  *
361  * Return: @new.
362  */
bio_chain_and_submit(struct bio * prev,struct bio * new)363 struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new)
364 {
365 	if (prev) {
366 		bio_chain(prev, new);
367 		submit_bio(prev);
368 	}
369 	return new;
370 }
371 
blk_next_bio(struct bio * bio,struct block_device * bdev,unsigned int nr_pages,blk_opf_t opf,gfp_t gfp)372 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
373 		unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
374 {
375 	return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp));
376 }
377 EXPORT_SYMBOL_GPL(blk_next_bio);
378 
bio_alloc_rescue(struct work_struct * work)379 static void bio_alloc_rescue(struct work_struct *work)
380 {
381 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
382 	struct bio *bio;
383 
384 	while (1) {
385 		spin_lock(&bs->rescue_lock);
386 		bio = bio_list_pop(&bs->rescue_list);
387 		spin_unlock(&bs->rescue_lock);
388 
389 		if (!bio)
390 			break;
391 
392 		submit_bio_noacct(bio);
393 	}
394 }
395 
punt_bios_to_rescuer(struct bio_set * bs)396 static void punt_bios_to_rescuer(struct bio_set *bs)
397 {
398 	struct bio_list punt, nopunt;
399 	struct bio *bio;
400 
401 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
402 		return;
403 	/*
404 	 * In order to guarantee forward progress we must punt only bios that
405 	 * were allocated from this bio_set; otherwise, if there was a bio on
406 	 * there for a stacking driver higher up in the stack, processing it
407 	 * could require allocating bios from this bio_set, and doing that from
408 	 * our own rescuer would be bad.
409 	 *
410 	 * Since bio lists are singly linked, pop them all instead of trying to
411 	 * remove from the middle of the list:
412 	 */
413 
414 	bio_list_init(&punt);
415 	bio_list_init(&nopunt);
416 
417 	while ((bio = bio_list_pop(&current->bio_list[0])))
418 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
419 	current->bio_list[0] = nopunt;
420 
421 	bio_list_init(&nopunt);
422 	while ((bio = bio_list_pop(&current->bio_list[1])))
423 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
424 	current->bio_list[1] = nopunt;
425 
426 	spin_lock(&bs->rescue_lock);
427 	bio_list_merge(&bs->rescue_list, &punt);
428 	spin_unlock(&bs->rescue_lock);
429 
430 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
431 }
432 
bio_alloc_irq_cache_splice(struct bio_alloc_cache * cache)433 static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
434 {
435 	unsigned long flags;
436 
437 	/* cache->free_list must be empty */
438 	if (WARN_ON_ONCE(cache->free_list))
439 		return;
440 
441 	local_irq_save(flags);
442 	cache->free_list = cache->free_list_irq;
443 	cache->free_list_irq = NULL;
444 	cache->nr += cache->nr_irq;
445 	cache->nr_irq = 0;
446 	local_irq_restore(flags);
447 }
448 
bio_alloc_percpu_cache(struct block_device * bdev,unsigned short nr_vecs,blk_opf_t opf,gfp_t gfp,struct bio_set * bs)449 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
450 		unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
451 		struct bio_set *bs)
452 {
453 	struct bio_alloc_cache *cache;
454 	struct bio *bio;
455 
456 	cache = per_cpu_ptr(bs->cache, get_cpu());
457 	if (!cache->free_list) {
458 		if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD)
459 			bio_alloc_irq_cache_splice(cache);
460 		if (!cache->free_list) {
461 			put_cpu();
462 			return NULL;
463 		}
464 	}
465 	bio = cache->free_list;
466 	cache->free_list = bio->bi_next;
467 	cache->nr--;
468 	put_cpu();
469 
470 	if (nr_vecs)
471 		bio_init_inline(bio, bdev, nr_vecs, opf);
472 	else
473 		bio_init(bio, bdev, NULL, nr_vecs, opf);
474 	bio->bi_pool = bs;
475 	return bio;
476 }
477 
478 /**
479  * bio_alloc_bioset - allocate a bio for I/O
480  * @bdev:	block device to allocate the bio for (can be %NULL)
481  * @nr_vecs:	number of bvecs to pre-allocate
482  * @opf:	operation and flags for bio
483  * @gfp_mask:   the GFP_* mask given to the slab allocator
484  * @bs:		the bio_set to allocate from.
485  *
486  * Allocate a bio from the mempools in @bs.
487  *
488  * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
489  * allocate a bio.  This is due to the mempool guarantees.  To make this work,
490  * callers must never allocate more than 1 bio at a time from the general pool.
491  * Callers that need to allocate more than 1 bio must always submit the
492  * previously allocated bio for IO before attempting to allocate a new one.
493  * Failure to do so can cause deadlocks under memory pressure.
494  *
495  * Note that when running under submit_bio_noacct() (i.e. any block driver),
496  * bios are not submitted until after you return - see the code in
497  * submit_bio_noacct() that converts recursion into iteration, to prevent
498  * stack overflows.
499  *
500  * This would normally mean allocating multiple bios under submit_bio_noacct()
501  * would be susceptible to deadlocks, but we have
502  * deadlock avoidance code that resubmits any blocked bios from a rescuer
503  * thread.
504  *
505  * However, we do not guarantee forward progress for allocations from other
506  * mempools. Doing multiple allocations from the same mempool under
507  * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
508  * for per bio allocations.
509  *
510  * Returns: Pointer to new bio on success, NULL on failure.
511  */
bio_alloc_bioset(struct block_device * bdev,unsigned short nr_vecs,blk_opf_t opf,gfp_t gfp_mask,struct bio_set * bs)512 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
513 			     blk_opf_t opf, gfp_t gfp_mask,
514 			     struct bio_set *bs)
515 {
516 	gfp_t saved_gfp = gfp_mask;
517 	struct bio *bio;
518 	void *p;
519 
520 	/* should not use nobvec bioset for nr_vecs > 0 */
521 	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
522 		return NULL;
523 
524 	if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
525 		opf |= REQ_ALLOC_CACHE;
526 		bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
527 					     gfp_mask, bs);
528 		if (bio)
529 			return bio;
530 		/*
531 		 * No cached bio available, bio returned below marked with
532 		 * REQ_ALLOC_CACHE to participate in per-cpu alloc cache.
533 		 */
534 	} else
535 		opf &= ~REQ_ALLOC_CACHE;
536 
537 	/*
538 	 * submit_bio_noacct() converts recursion to iteration; this means if
539 	 * we're running beneath it, any bios we allocate and submit will not be
540 	 * submitted (and thus freed) until after we return.
541 	 *
542 	 * This exposes us to a potential deadlock if we allocate multiple bios
543 	 * from the same bio_set() while running underneath submit_bio_noacct().
544 	 * If we were to allocate multiple bios (say a stacking block driver
545 	 * that was splitting bios), we would deadlock if we exhausted the
546 	 * mempool's reserve.
547 	 *
548 	 * We solve this, and guarantee forward progress, with a rescuer
549 	 * workqueue per bio_set. If we go to allocate and there are bios on
550 	 * current->bio_list, we first try the allocation without
551 	 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
552 	 * blocking to the rescuer workqueue before we retry with the original
553 	 * gfp_flags.
554 	 */
555 	if (current->bio_list &&
556 	    (!bio_list_empty(&current->bio_list[0]) ||
557 	     !bio_list_empty(&current->bio_list[1])) &&
558 	    bs->rescue_workqueue)
559 		gfp_mask &= ~__GFP_DIRECT_RECLAIM;
560 
561 	p = mempool_alloc(&bs->bio_pool, gfp_mask);
562 	if (!p && gfp_mask != saved_gfp) {
563 		punt_bios_to_rescuer(bs);
564 		gfp_mask = saved_gfp;
565 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
566 	}
567 	if (unlikely(!p))
568 		return NULL;
569 	if (!mempool_is_saturated(&bs->bio_pool))
570 		opf &= ~REQ_ALLOC_CACHE;
571 
572 	bio = p + bs->front_pad;
573 	if (nr_vecs > BIO_INLINE_VECS) {
574 		struct bio_vec *bvl = NULL;
575 
576 		bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
577 		if (!bvl && gfp_mask != saved_gfp) {
578 			punt_bios_to_rescuer(bs);
579 			gfp_mask = saved_gfp;
580 			bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
581 		}
582 		if (unlikely(!bvl))
583 			goto err_free;
584 
585 		bio_init(bio, bdev, bvl, nr_vecs, opf);
586 	} else if (nr_vecs) {
587 		bio_init_inline(bio, bdev, BIO_INLINE_VECS, opf);
588 	} else {
589 		bio_init(bio, bdev, NULL, 0, opf);
590 	}
591 
592 	bio->bi_pool = bs;
593 	return bio;
594 
595 err_free:
596 	mempool_free(p, &bs->bio_pool);
597 	return NULL;
598 }
599 EXPORT_SYMBOL(bio_alloc_bioset);
600 
601 /**
602  * bio_kmalloc - kmalloc a bio
603  * @nr_vecs:	number of bio_vecs to allocate
604  * @gfp_mask:   the GFP_* mask given to the slab allocator
605  *
606  * Use kmalloc to allocate a bio (including bvecs).  The bio must be initialized
607  * using bio_init() before use.  To free a bio returned from this function use
608  * kfree() after calling bio_uninit().  A bio returned from this function can
609  * be reused by calling bio_uninit() before calling bio_init() again.
610  *
611  * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
612  * function are not backed by a mempool can fail.  Do not use this function
613  * for allocations in the file system I/O path.
614  *
615  * Returns: Pointer to new bio on success, NULL on failure.
616  */
bio_kmalloc(unsigned short nr_vecs,gfp_t gfp_mask)617 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
618 {
619 	struct bio *bio;
620 
621 	if (nr_vecs > BIO_MAX_INLINE_VECS)
622 		return NULL;
623 	return kmalloc(sizeof(*bio) + nr_vecs * sizeof(struct bio_vec),
624 			gfp_mask);
625 }
626 EXPORT_SYMBOL(bio_kmalloc);
627 
zero_fill_bio_iter(struct bio * bio,struct bvec_iter start)628 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
629 {
630 	struct bio_vec bv;
631 	struct bvec_iter iter;
632 
633 	__bio_for_each_segment(bv, bio, iter, start)
634 		memzero_bvec(&bv);
635 }
636 EXPORT_SYMBOL(zero_fill_bio_iter);
637 
638 /**
639  * bio_truncate - truncate the bio to small size of @new_size
640  * @bio:	the bio to be truncated
641  * @new_size:	new size for truncating the bio
642  *
643  * Description:
644  *   Truncate the bio to new size of @new_size. If bio_op(bio) is
645  *   REQ_OP_READ, zero the truncated part. This function should only
646  *   be used for handling corner cases, such as bio eod.
647  */
bio_truncate(struct bio * bio,unsigned new_size)648 static void bio_truncate(struct bio *bio, unsigned new_size)
649 {
650 	struct bio_vec bv;
651 	struct bvec_iter iter;
652 	unsigned int done = 0;
653 	bool truncated = false;
654 
655 	if (new_size >= bio->bi_iter.bi_size)
656 		return;
657 
658 	if (bio_op(bio) != REQ_OP_READ)
659 		goto exit;
660 
661 	bio_for_each_segment(bv, bio, iter) {
662 		if (done + bv.bv_len > new_size) {
663 			size_t offset;
664 
665 			if (!truncated)
666 				offset = new_size - done;
667 			else
668 				offset = 0;
669 			memzero_page(bv.bv_page, bv.bv_offset + offset,
670 				  bv.bv_len - offset);
671 			truncated = true;
672 		}
673 		done += bv.bv_len;
674 	}
675 
676  exit:
677 	/*
678 	 * Don't touch bvec table here and make it really immutable, since
679 	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
680 	 * in its .end_bio() callback.
681 	 *
682 	 * It is enough to truncate bio by updating .bi_size since we can make
683 	 * correct bvec with the updated .bi_size for drivers.
684 	 */
685 	bio->bi_iter.bi_size = new_size;
686 }
687 
688 /**
689  * guard_bio_eod - truncate a BIO to fit the block device
690  * @bio:	bio to truncate
691  *
692  * This allows us to do IO even on the odd last sectors of a device, even if the
693  * block size is some multiple of the physical sector size.
694  *
695  * We'll just truncate the bio to the size of the device, and clear the end of
696  * the buffer head manually.  Truly out-of-range accesses will turn into actual
697  * I/O errors, this only handles the "we need to be able to do I/O at the final
698  * sector" case.
699  */
guard_bio_eod(struct bio * bio)700 void guard_bio_eod(struct bio *bio)
701 {
702 	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
703 
704 	if (!maxsector)
705 		return;
706 
707 	/*
708 	 * If the *whole* IO is past the end of the device,
709 	 * let it through, and the IO layer will turn it into
710 	 * an EIO.
711 	 */
712 	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
713 		return;
714 
715 	maxsector -= bio->bi_iter.bi_sector;
716 	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
717 		return;
718 
719 	bio_truncate(bio, maxsector << 9);
720 }
721 
__bio_alloc_cache_prune(struct bio_alloc_cache * cache,unsigned int nr)722 static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
723 				   unsigned int nr)
724 {
725 	unsigned int i = 0;
726 	struct bio *bio;
727 
728 	while ((bio = cache->free_list) != NULL) {
729 		cache->free_list = bio->bi_next;
730 		cache->nr--;
731 		bio_free(bio);
732 		if (++i == nr)
733 			break;
734 	}
735 	return i;
736 }
737 
bio_alloc_cache_prune(struct bio_alloc_cache * cache,unsigned int nr)738 static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
739 				  unsigned int nr)
740 {
741 	nr -= __bio_alloc_cache_prune(cache, nr);
742 	if (!READ_ONCE(cache->free_list)) {
743 		bio_alloc_irq_cache_splice(cache);
744 		__bio_alloc_cache_prune(cache, nr);
745 	}
746 }
747 
bio_cpu_dead(unsigned int cpu,struct hlist_node * node)748 static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
749 {
750 	struct bio_set *bs;
751 
752 	bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
753 	if (bs->cache) {
754 		struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
755 
756 		bio_alloc_cache_prune(cache, -1U);
757 	}
758 	return 0;
759 }
760 
bio_alloc_cache_destroy(struct bio_set * bs)761 static void bio_alloc_cache_destroy(struct bio_set *bs)
762 {
763 	int cpu;
764 
765 	if (!bs->cache)
766 		return;
767 
768 	cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
769 	for_each_possible_cpu(cpu) {
770 		struct bio_alloc_cache *cache;
771 
772 		cache = per_cpu_ptr(bs->cache, cpu);
773 		bio_alloc_cache_prune(cache, -1U);
774 	}
775 	free_percpu(bs->cache);
776 	bs->cache = NULL;
777 }
778 
bio_put_percpu_cache(struct bio * bio)779 static inline void bio_put_percpu_cache(struct bio *bio)
780 {
781 	struct bio_alloc_cache *cache;
782 
783 	cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
784 	if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX)
785 		goto out_free;
786 
787 	if (in_task()) {
788 		bio_uninit(bio);
789 		bio->bi_next = cache->free_list;
790 		/* Not necessary but helps not to iopoll already freed bios */
791 		bio->bi_bdev = NULL;
792 		cache->free_list = bio;
793 		cache->nr++;
794 	} else if (in_hardirq()) {
795 		lockdep_assert_irqs_disabled();
796 
797 		bio_uninit(bio);
798 		bio->bi_next = cache->free_list_irq;
799 		cache->free_list_irq = bio;
800 		cache->nr_irq++;
801 	} else {
802 		goto out_free;
803 	}
804 	put_cpu();
805 	return;
806 out_free:
807 	put_cpu();
808 	bio_free(bio);
809 }
810 
811 /**
812  * bio_put - release a reference to a bio
813  * @bio:   bio to release reference to
814  *
815  * Description:
816  *   Put a reference to a &struct bio, either one you have gotten with
817  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
818  **/
bio_put(struct bio * bio)819 void bio_put(struct bio *bio)
820 {
821 	if (unlikely(bio_flagged(bio, BIO_REFFED))) {
822 		BUG_ON(!atomic_read(&bio->__bi_cnt));
823 		if (!atomic_dec_and_test(&bio->__bi_cnt))
824 			return;
825 	}
826 	if (bio->bi_opf & REQ_ALLOC_CACHE)
827 		bio_put_percpu_cache(bio);
828 	else
829 		bio_free(bio);
830 }
831 EXPORT_SYMBOL(bio_put);
832 
__bio_clone(struct bio * bio,struct bio * bio_src,gfp_t gfp)833 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
834 {
835 	bio_set_flag(bio, BIO_CLONED);
836 	bio->bi_ioprio = bio_src->bi_ioprio;
837 	bio->bi_write_hint = bio_src->bi_write_hint;
838 	bio->bi_write_stream = bio_src->bi_write_stream;
839 	bio->bi_iter = bio_src->bi_iter;
840 
841 	if (bio->bi_bdev) {
842 		if (bio->bi_bdev == bio_src->bi_bdev &&
843 		    bio_flagged(bio_src, BIO_REMAPPED))
844 			bio_set_flag(bio, BIO_REMAPPED);
845 		bio_clone_blkg_association(bio, bio_src);
846 	}
847 
848 	if (bio_crypt_clone(bio, bio_src, gfp) < 0)
849 		return -ENOMEM;
850 	if (bio_integrity(bio_src) &&
851 	    bio_integrity_clone(bio, bio_src, gfp) < 0)
852 		return -ENOMEM;
853 	return 0;
854 }
855 
856 /**
857  * bio_alloc_clone - clone a bio that shares the original bio's biovec
858  * @bdev: block_device to clone onto
859  * @bio_src: bio to clone from
860  * @gfp: allocation priority
861  * @bs: bio_set to allocate from
862  *
863  * Allocate a new bio that is a clone of @bio_src. The caller owns the returned
864  * bio, but not the actual data it points to.
865  *
866  * The caller must ensure that the return bio is not freed before @bio_src.
867  */
bio_alloc_clone(struct block_device * bdev,struct bio * bio_src,gfp_t gfp,struct bio_set * bs)868 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
869 		gfp_t gfp, struct bio_set *bs)
870 {
871 	struct bio *bio;
872 
873 	bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
874 	if (!bio)
875 		return NULL;
876 
877 	if (__bio_clone(bio, bio_src, gfp) < 0) {
878 		bio_put(bio);
879 		return NULL;
880 	}
881 	bio->bi_io_vec = bio_src->bi_io_vec;
882 
883 	return bio;
884 }
885 EXPORT_SYMBOL(bio_alloc_clone);
886 
887 /**
888  * bio_init_clone - clone a bio that shares the original bio's biovec
889  * @bdev: block_device to clone onto
890  * @bio: bio to clone into
891  * @bio_src: bio to clone from
892  * @gfp: allocation priority
893  *
894  * Initialize a new bio in caller provided memory that is a clone of @bio_src.
895  * The caller owns the returned bio, but not the actual data it points to.
896  *
897  * The caller must ensure that @bio_src is not freed before @bio.
898  */
bio_init_clone(struct block_device * bdev,struct bio * bio,struct bio * bio_src,gfp_t gfp)899 int bio_init_clone(struct block_device *bdev, struct bio *bio,
900 		struct bio *bio_src, gfp_t gfp)
901 {
902 	int ret;
903 
904 	bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
905 	ret = __bio_clone(bio, bio_src, gfp);
906 	if (ret)
907 		bio_uninit(bio);
908 	return ret;
909 }
910 EXPORT_SYMBOL(bio_init_clone);
911 
912 /**
913  * bio_full - check if the bio is full
914  * @bio:	bio to check
915  * @len:	length of one segment to be added
916  *
917  * Return true if @bio is full and one segment with @len bytes can't be
918  * added to the bio, otherwise return false
919  */
bio_full(struct bio * bio,unsigned len)920 static inline bool bio_full(struct bio *bio, unsigned len)
921 {
922 	if (bio->bi_vcnt >= bio->bi_max_vecs)
923 		return true;
924 	if (bio->bi_iter.bi_size > UINT_MAX - len)
925 		return true;
926 	return false;
927 }
928 
bvec_try_merge_page(struct bio_vec * bv,struct page * page,unsigned int len,unsigned int off)929 static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
930 		unsigned int len, unsigned int off)
931 {
932 	size_t bv_end = bv->bv_offset + bv->bv_len;
933 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
934 	phys_addr_t page_addr = page_to_phys(page);
935 
936 	if (vec_end_addr + 1 != page_addr + off)
937 		return false;
938 	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
939 		return false;
940 
941 	if ((vec_end_addr & PAGE_MASK) != ((page_addr + off) & PAGE_MASK)) {
942 		if (IS_ENABLED(CONFIG_KMSAN))
943 			return false;
944 		if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
945 			return false;
946 	}
947 
948 	bv->bv_len += len;
949 	return true;
950 }
951 
952 /*
953  * Try to merge a page into a segment, while obeying the hardware segment
954  * size limit.
955  *
956  * This is kept around for the integrity metadata, which is still tries
957  * to build the initial bio to the hardware limit and doesn't have proper
958  * helpers to split.  Hopefully this will go away soon.
959  */
bvec_try_merge_hw_page(struct request_queue * q,struct bio_vec * bv,struct page * page,unsigned len,unsigned offset)960 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
961 		struct page *page, unsigned len, unsigned offset)
962 {
963 	unsigned long mask = queue_segment_boundary(q);
964 	phys_addr_t addr1 = bvec_phys(bv);
965 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
966 
967 	if ((addr1 | mask) != (addr2 | mask))
968 		return false;
969 	if (len > queue_max_segment_size(q) - bv->bv_len)
970 		return false;
971 	return bvec_try_merge_page(bv, page, len, offset);
972 }
973 
974 /**
975  * __bio_add_page - add page(s) to a bio in a new segment
976  * @bio: destination bio
977  * @page: start page to add
978  * @len: length of the data to add, may cross pages
979  * @off: offset of the data relative to @page, may cross pages
980  *
981  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
982  * that @bio has space for another bvec.
983  */
__bio_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int off)984 void __bio_add_page(struct bio *bio, struct page *page,
985 		unsigned int len, unsigned int off)
986 {
987 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
988 	WARN_ON_ONCE(bio_full(bio, len));
989 
990 	if (is_pci_p2pdma_page(page))
991 		bio->bi_opf |= REQ_NOMERGE;
992 
993 	bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
994 	bio->bi_iter.bi_size += len;
995 	bio->bi_vcnt++;
996 }
997 EXPORT_SYMBOL_GPL(__bio_add_page);
998 
999 /**
1000  * bio_add_virt_nofail - add data in the direct kernel mapping to a bio
1001  * @bio: destination bio
1002  * @vaddr: data to add
1003  * @len: length of the data to add, may cross pages
1004  *
1005  * Add the data at @vaddr to @bio.  The caller must have ensure a segment
1006  * is available for the added data.  No merging into an existing segment
1007  * will be performed.
1008  */
bio_add_virt_nofail(struct bio * bio,void * vaddr,unsigned len)1009 void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len)
1010 {
1011 	__bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr));
1012 }
1013 EXPORT_SYMBOL_GPL(bio_add_virt_nofail);
1014 
1015 /**
1016  *	bio_add_page	-	attempt to add page(s) to bio
1017  *	@bio: destination bio
1018  *	@page: start page to add
1019  *	@len: vec entry length, may cross pages
1020  *	@offset: vec entry offset relative to @page, may cross pages
1021  *
1022  *	Attempt to add page(s) to the bio_vec maplist. This will only fail
1023  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1024  */
bio_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)1025 int bio_add_page(struct bio *bio, struct page *page,
1026 		 unsigned int len, unsigned int offset)
1027 {
1028 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1029 		return 0;
1030 	if (bio->bi_iter.bi_size > UINT_MAX - len)
1031 		return 0;
1032 
1033 	if (bio->bi_vcnt > 0) {
1034 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
1035 
1036 		if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
1037 			return 0;
1038 
1039 		if (bvec_try_merge_page(bv, page, len, offset)) {
1040 			bio->bi_iter.bi_size += len;
1041 			return len;
1042 		}
1043 	}
1044 
1045 	if (bio->bi_vcnt >= bio->bi_max_vecs)
1046 		return 0;
1047 	__bio_add_page(bio, page, len, offset);
1048 	return len;
1049 }
1050 EXPORT_SYMBOL(bio_add_page);
1051 
bio_add_folio_nofail(struct bio * bio,struct folio * folio,size_t len,size_t off)1052 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
1053 			  size_t off)
1054 {
1055 	unsigned long nr = off / PAGE_SIZE;
1056 
1057 	WARN_ON_ONCE(len > UINT_MAX);
1058 	__bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE);
1059 }
1060 EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
1061 
1062 /**
1063  * bio_add_folio - Attempt to add part of a folio to a bio.
1064  * @bio: BIO to add to.
1065  * @folio: Folio to add.
1066  * @len: How many bytes from the folio to add.
1067  * @off: First byte in this folio to add.
1068  *
1069  * Filesystems that use folios can call this function instead of calling
1070  * bio_add_page() for each page in the folio.  If @off is bigger than
1071  * PAGE_SIZE, this function can create a bio_vec that starts in a page
1072  * after the bv_page.  BIOs do not support folios that are 4GiB or larger.
1073  *
1074  * Return: Whether the addition was successful.
1075  */
bio_add_folio(struct bio * bio,struct folio * folio,size_t len,size_t off)1076 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1077 		   size_t off)
1078 {
1079 	unsigned long nr = off / PAGE_SIZE;
1080 
1081 	if (len > UINT_MAX)
1082 		return false;
1083 	return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0;
1084 }
1085 EXPORT_SYMBOL(bio_add_folio);
1086 
1087 /**
1088  * bio_add_vmalloc_chunk - add a vmalloc chunk to a bio
1089  * @bio: destination bio
1090  * @vaddr: vmalloc address to add
1091  * @len: total length in bytes of the data to add
1092  *
1093  * Add data starting at @vaddr to @bio and return how many bytes were added.
1094  * This may be less than the amount originally asked.  Returns 0 if no data
1095  * could be added to @bio.
1096  *
1097  * This helper calls flush_kernel_vmap_range() for the range added.  For reads
1098  * the caller still needs to manually call invalidate_kernel_vmap_range() in
1099  * the completion handler.
1100  */
bio_add_vmalloc_chunk(struct bio * bio,void * vaddr,unsigned len)1101 unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len)
1102 {
1103 	unsigned int offset = offset_in_page(vaddr);
1104 
1105 	len = min(len, PAGE_SIZE - offset);
1106 	if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len)
1107 		return 0;
1108 	if (op_is_write(bio_op(bio)))
1109 		flush_kernel_vmap_range(vaddr, len);
1110 	return len;
1111 }
1112 EXPORT_SYMBOL_GPL(bio_add_vmalloc_chunk);
1113 
1114 /**
1115  * bio_add_vmalloc - add a vmalloc region to a bio
1116  * @bio: destination bio
1117  * @vaddr: vmalloc address to add
1118  * @len: total length in bytes of the data to add
1119  *
1120  * Add data starting at @vaddr to @bio.  Return %true on success or %false if
1121  * @bio does not have enough space for the payload.
1122  *
1123  * This helper calls flush_kernel_vmap_range() for the range added.  For reads
1124  * the caller still needs to manually call invalidate_kernel_vmap_range() in
1125  * the completion handler.
1126  */
bio_add_vmalloc(struct bio * bio,void * vaddr,unsigned int len)1127 bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len)
1128 {
1129 	do {
1130 		unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len);
1131 
1132 		if (!added)
1133 			return false;
1134 		vaddr += added;
1135 		len -= added;
1136 	} while (len);
1137 
1138 	return true;
1139 }
1140 EXPORT_SYMBOL_GPL(bio_add_vmalloc);
1141 
__bio_release_pages(struct bio * bio,bool mark_dirty)1142 void __bio_release_pages(struct bio *bio, bool mark_dirty)
1143 {
1144 	struct folio_iter fi;
1145 
1146 	bio_for_each_folio_all(fi, bio) {
1147 		size_t nr_pages;
1148 
1149 		if (mark_dirty) {
1150 			folio_lock(fi.folio);
1151 			folio_mark_dirty(fi.folio);
1152 			folio_unlock(fi.folio);
1153 		}
1154 		nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
1155 			   fi.offset / PAGE_SIZE + 1;
1156 		unpin_user_folio(fi.folio, nr_pages);
1157 	}
1158 }
1159 EXPORT_SYMBOL_GPL(__bio_release_pages);
1160 
bio_iov_bvec_set(struct bio * bio,const struct iov_iter * iter)1161 void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
1162 {
1163 	WARN_ON_ONCE(bio->bi_max_vecs);
1164 
1165 	bio->bi_vcnt = iter->nr_segs;
1166 	bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1167 	bio->bi_iter.bi_bvec_done = iter->iov_offset;
1168 	bio->bi_iter.bi_size = iov_iter_count(iter);
1169 	bio_set_flag(bio, BIO_CLONED);
1170 }
1171 
get_contig_folio_len(unsigned int * num_pages,struct page ** pages,unsigned int i,struct folio * folio,size_t left,size_t offset)1172 static unsigned int get_contig_folio_len(unsigned int *num_pages,
1173 					 struct page **pages, unsigned int i,
1174 					 struct folio *folio, size_t left,
1175 					 size_t offset)
1176 {
1177 	size_t bytes = left;
1178 	size_t contig_sz = min_t(size_t, PAGE_SIZE - offset, bytes);
1179 	unsigned int j;
1180 
1181 	/*
1182 	 * We might COW a single page in the middle of
1183 	 * a large folio, so we have to check that all
1184 	 * pages belong to the same folio.
1185 	 */
1186 	bytes -= contig_sz;
1187 	for (j = i + 1; j < i + *num_pages; j++) {
1188 		size_t next = min_t(size_t, PAGE_SIZE, bytes);
1189 
1190 		if (page_folio(pages[j]) != folio ||
1191 		    pages[j] != pages[j - 1] + 1) {
1192 			break;
1193 		}
1194 		contig_sz += next;
1195 		bytes -= next;
1196 	}
1197 	*num_pages = j - i;
1198 
1199 	return contig_sz;
1200 }
1201 
1202 #define PAGE_PTRS_PER_BVEC     (sizeof(struct bio_vec) / sizeof(struct page *))
1203 
1204 /**
1205  * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
1206  * @bio: bio to add pages to
1207  * @iter: iov iterator describing the region to be mapped
1208  *
1209  * Extracts pages from *iter and appends them to @bio's bvec array.  The pages
1210  * will have to be cleaned up in the way indicated by the BIO_PAGE_PINNED flag.
1211  * For a multi-segment *iter, this function only adds pages from the next
1212  * non-empty segment of the iov iterator.
1213  */
__bio_iov_iter_get_pages(struct bio * bio,struct iov_iter * iter)1214 static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1215 {
1216 	iov_iter_extraction_t extraction_flags = 0;
1217 	unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
1218 	unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt;
1219 	struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
1220 	struct page **pages = (struct page **)bv;
1221 	ssize_t size;
1222 	unsigned int num_pages, i = 0;
1223 	size_t offset, folio_offset, left, len;
1224 	int ret = 0;
1225 
1226 	/*
1227 	 * Move page array up in the allocated memory for the bio vecs as far as
1228 	 * possible so that we can start filling biovecs from the beginning
1229 	 * without overwriting the temporary page array.
1230 	 */
1231 	BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2);
1232 	pages += entries_left * (PAGE_PTRS_PER_BVEC - 1);
1233 
1234 	if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1235 		extraction_flags |= ITER_ALLOW_P2PDMA;
1236 
1237 	size = iov_iter_extract_pages(iter, &pages,
1238 				      UINT_MAX - bio->bi_iter.bi_size,
1239 				      nr_pages, extraction_flags, &offset);
1240 	if (unlikely(size <= 0))
1241 		return size ? size : -EFAULT;
1242 
1243 	nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE);
1244 	for (left = size, i = 0; left > 0; left -= len, i += num_pages) {
1245 		struct page *page = pages[i];
1246 		struct folio *folio = page_folio(page);
1247 		unsigned int old_vcnt = bio->bi_vcnt;
1248 
1249 		folio_offset = ((size_t)folio_page_idx(folio, page) <<
1250 			       PAGE_SHIFT) + offset;
1251 
1252 		len = min(folio_size(folio) - folio_offset, left);
1253 
1254 		num_pages = DIV_ROUND_UP(offset + len, PAGE_SIZE);
1255 
1256 		if (num_pages > 1)
1257 			len = get_contig_folio_len(&num_pages, pages, i,
1258 						   folio, left, offset);
1259 
1260 		if (!bio_add_folio(bio, folio, len, folio_offset)) {
1261 			WARN_ON_ONCE(1);
1262 			ret = -EINVAL;
1263 			goto out;
1264 		}
1265 
1266 		if (bio_flagged(bio, BIO_PAGE_PINNED)) {
1267 			/*
1268 			 * We're adding another fragment of a page that already
1269 			 * was part of the last segment.  Undo our pin as the
1270 			 * page was pinned when an earlier fragment of it was
1271 			 * added to the bio and __bio_release_pages expects a
1272 			 * single pin per page.
1273 			 */
1274 			if (offset && bio->bi_vcnt == old_vcnt)
1275 				unpin_user_folio(folio, 1);
1276 		}
1277 		offset = 0;
1278 	}
1279 
1280 	iov_iter_revert(iter, left);
1281 out:
1282 	while (i < nr_pages)
1283 		bio_release_page(bio, pages[i++]);
1284 
1285 	return ret;
1286 }
1287 
1288 /*
1289  * Aligns the bio size to the len_align_mask, releasing excessive bio vecs that
1290  * __bio_iov_iter_get_pages may have inserted, and reverts the trimmed length
1291  * for the next iteration.
1292  */
bio_iov_iter_align_down(struct bio * bio,struct iov_iter * iter,unsigned len_align_mask)1293 static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
1294 			    unsigned len_align_mask)
1295 {
1296 	size_t nbytes = bio->bi_iter.bi_size & len_align_mask;
1297 
1298 	if (!nbytes)
1299 		return 0;
1300 
1301 	iov_iter_revert(iter, nbytes);
1302 	bio->bi_iter.bi_size -= nbytes;
1303 	do {
1304 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
1305 
1306 		if (nbytes < bv->bv_len) {
1307 			bv->bv_len -= nbytes;
1308 			break;
1309 		}
1310 
1311 		bio_release_page(bio, bv->bv_page);
1312 		bio->bi_vcnt--;
1313 		nbytes -= bv->bv_len;
1314 	} while (nbytes);
1315 
1316 	if (!bio->bi_vcnt)
1317 		return -EFAULT;
1318 	return 0;
1319 }
1320 
1321 /**
1322  * bio_iov_iter_get_pages - add user or kernel pages to a bio
1323  * @bio: bio to add pages to
1324  * @iter: iov iterator describing the region to be added
1325  * @len_align_mask: the mask to align the total size to, 0 for any length
1326  *
1327  * This takes either an iterator pointing to user memory, or one pointing to
1328  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1329  * map them into the kernel. On IO completion, the caller should put those
1330  * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1331  * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1332  * to ensure the bvecs and pages stay referenced until the submitted I/O is
1333  * completed by a call to ->ki_complete() or returns with an error other than
1334  * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1335  * on IO completion. If it isn't, then pages should be released.
1336  *
1337  * The function tries, but does not guarantee, to pin as many pages as
1338  * fit into the bio, or are requested in @iter, whatever is smaller. If
1339  * MM encounters an error pinning the requested pages, it stops. Error
1340  * is returned only if 0 pages could be pinned.
1341  */
bio_iov_iter_get_pages(struct bio * bio,struct iov_iter * iter,unsigned len_align_mask)1342 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
1343 			   unsigned len_align_mask)
1344 {
1345 	int ret = 0;
1346 
1347 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1348 		return -EIO;
1349 
1350 	if (iov_iter_is_bvec(iter)) {
1351 		bio_iov_bvec_set(bio, iter);
1352 		iov_iter_advance(iter, bio->bi_iter.bi_size);
1353 		return 0;
1354 	}
1355 
1356 	if (iov_iter_extract_will_pin(iter))
1357 		bio_set_flag(bio, BIO_PAGE_PINNED);
1358 	do {
1359 		ret = __bio_iov_iter_get_pages(bio, iter);
1360 	} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
1361 
1362 	if (bio->bi_vcnt)
1363 		return bio_iov_iter_align_down(bio, iter, len_align_mask);
1364 	return ret;
1365 }
1366 
submit_bio_wait_endio(struct bio * bio)1367 static void submit_bio_wait_endio(struct bio *bio)
1368 {
1369 	complete(bio->bi_private);
1370 }
1371 
1372 /**
1373  * submit_bio_wait - submit a bio, and wait until it completes
1374  * @bio: The &struct bio which describes the I/O
1375  *
1376  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1377  * bio_endio() on failure.
1378  *
1379  * WARNING: Unlike to how submit_bio() is usually used, this function does not
1380  * result in bio reference to be consumed. The caller must drop the reference
1381  * on his own.
1382  */
submit_bio_wait(struct bio * bio)1383 int submit_bio_wait(struct bio *bio)
1384 {
1385 	DECLARE_COMPLETION_ONSTACK_MAP(done,
1386 			bio->bi_bdev->bd_disk->lockdep_map);
1387 
1388 	bio->bi_private = &done;
1389 	bio->bi_end_io = submit_bio_wait_endio;
1390 	bio->bi_opf |= REQ_SYNC;
1391 	submit_bio(bio);
1392 	blk_wait_io(&done);
1393 
1394 	return blk_status_to_errno(bio->bi_status);
1395 }
1396 EXPORT_SYMBOL(submit_bio_wait);
1397 
1398 /**
1399  * bdev_rw_virt - synchronously read into / write from kernel mapping
1400  * @bdev:	block device to access
1401  * @sector:	sector to access
1402  * @data:	data to read/write
1403  * @len:	length in byte to read/write
1404  * @op:		operation (e.g. REQ_OP_READ/REQ_OP_WRITE)
1405  *
1406  * Performs synchronous I/O to @bdev for @data/@len.  @data must be in
1407  * the kernel direct mapping and not a vmalloc address.
1408  */
bdev_rw_virt(struct block_device * bdev,sector_t sector,void * data,size_t len,enum req_op op)1409 int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
1410 		size_t len, enum req_op op)
1411 {
1412 	struct bio_vec bv;
1413 	struct bio bio;
1414 	int error;
1415 
1416 	if (WARN_ON_ONCE(is_vmalloc_addr(data)))
1417 		return -EIO;
1418 
1419 	bio_init(&bio, bdev, &bv, 1, op);
1420 	bio.bi_iter.bi_sector = sector;
1421 	bio_add_virt_nofail(&bio, data, len);
1422 	error = submit_bio_wait(&bio);
1423 	bio_uninit(&bio);
1424 	return error;
1425 }
1426 EXPORT_SYMBOL_GPL(bdev_rw_virt);
1427 
bio_wait_end_io(struct bio * bio)1428 static void bio_wait_end_io(struct bio *bio)
1429 {
1430 	complete(bio->bi_private);
1431 	bio_put(bio);
1432 }
1433 
1434 /*
1435  * bio_await_chain - ends @bio and waits for every chained bio to complete
1436  */
bio_await_chain(struct bio * bio)1437 void bio_await_chain(struct bio *bio)
1438 {
1439 	DECLARE_COMPLETION_ONSTACK_MAP(done,
1440 			bio->bi_bdev->bd_disk->lockdep_map);
1441 
1442 	bio->bi_private = &done;
1443 	bio->bi_end_io = bio_wait_end_io;
1444 	bio_endio(bio);
1445 	blk_wait_io(&done);
1446 }
1447 
__bio_advance(struct bio * bio,unsigned bytes)1448 void __bio_advance(struct bio *bio, unsigned bytes)
1449 {
1450 	if (bio_integrity(bio))
1451 		bio_integrity_advance(bio, bytes);
1452 
1453 	bio_crypt_advance(bio, bytes);
1454 	bio_advance_iter(bio, &bio->bi_iter, bytes);
1455 }
1456 EXPORT_SYMBOL(__bio_advance);
1457 
bio_copy_data_iter(struct bio * dst,struct bvec_iter * dst_iter,struct bio * src,struct bvec_iter * src_iter)1458 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1459 			struct bio *src, struct bvec_iter *src_iter)
1460 {
1461 	while (src_iter->bi_size && dst_iter->bi_size) {
1462 		struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1463 		struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1464 		unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1465 		void *src_buf = bvec_kmap_local(&src_bv);
1466 		void *dst_buf = bvec_kmap_local(&dst_bv);
1467 
1468 		memcpy(dst_buf, src_buf, bytes);
1469 
1470 		kunmap_local(dst_buf);
1471 		kunmap_local(src_buf);
1472 
1473 		bio_advance_iter_single(src, src_iter, bytes);
1474 		bio_advance_iter_single(dst, dst_iter, bytes);
1475 	}
1476 }
1477 EXPORT_SYMBOL(bio_copy_data_iter);
1478 
1479 /**
1480  * bio_copy_data - copy contents of data buffers from one bio to another
1481  * @src: source bio
1482  * @dst: destination bio
1483  *
1484  * Stops when it reaches the end of either @src or @dst - that is, copies
1485  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1486  */
bio_copy_data(struct bio * dst,struct bio * src)1487 void bio_copy_data(struct bio *dst, struct bio *src)
1488 {
1489 	struct bvec_iter src_iter = src->bi_iter;
1490 	struct bvec_iter dst_iter = dst->bi_iter;
1491 
1492 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1493 }
1494 EXPORT_SYMBOL(bio_copy_data);
1495 
bio_free_pages(struct bio * bio)1496 void bio_free_pages(struct bio *bio)
1497 {
1498 	struct bio_vec *bvec;
1499 	struct bvec_iter_all iter_all;
1500 
1501 	bio_for_each_segment_all(bvec, bio, iter_all)
1502 		__free_page(bvec->bv_page);
1503 }
1504 EXPORT_SYMBOL(bio_free_pages);
1505 
1506 /*
1507  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1508  * for performing direct-IO in BIOs.
1509  *
1510  * The problem is that we cannot run folio_mark_dirty() from interrupt context
1511  * because the required locks are not interrupt-safe.  So what we can do is to
1512  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1513  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1514  * in process context.
1515  *
1516  * Note that this code is very hard to test under normal circumstances because
1517  * direct-io pins the pages with get_user_pages().  This makes
1518  * is_page_cache_freeable return false, and the VM will not clean the pages.
1519  * But other code (eg, flusher threads) could clean the pages if they are mapped
1520  * pagecache.
1521  *
1522  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1523  * deferred bio dirtying paths.
1524  */
1525 
1526 /*
1527  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1528  */
bio_set_pages_dirty(struct bio * bio)1529 void bio_set_pages_dirty(struct bio *bio)
1530 {
1531 	struct folio_iter fi;
1532 
1533 	bio_for_each_folio_all(fi, bio) {
1534 		folio_lock(fi.folio);
1535 		folio_mark_dirty(fi.folio);
1536 		folio_unlock(fi.folio);
1537 	}
1538 }
1539 EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1540 
1541 /*
1542  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1543  * If they are, then fine.  If, however, some pages are clean then they must
1544  * have been written out during the direct-IO read.  So we take another ref on
1545  * the BIO and re-dirty the pages in process context.
1546  *
1547  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1548  * here on.  It will unpin each page and will run one bio_put() against the
1549  * BIO.
1550  */
1551 
1552 static void bio_dirty_fn(struct work_struct *work);
1553 
1554 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1555 static DEFINE_SPINLOCK(bio_dirty_lock);
1556 static struct bio *bio_dirty_list;
1557 
1558 /*
1559  * This runs in process context
1560  */
bio_dirty_fn(struct work_struct * work)1561 static void bio_dirty_fn(struct work_struct *work)
1562 {
1563 	struct bio *bio, *next;
1564 
1565 	spin_lock_irq(&bio_dirty_lock);
1566 	next = bio_dirty_list;
1567 	bio_dirty_list = NULL;
1568 	spin_unlock_irq(&bio_dirty_lock);
1569 
1570 	while ((bio = next) != NULL) {
1571 		next = bio->bi_private;
1572 
1573 		bio_release_pages(bio, true);
1574 		bio_put(bio);
1575 	}
1576 }
1577 
bio_check_pages_dirty(struct bio * bio)1578 void bio_check_pages_dirty(struct bio *bio)
1579 {
1580 	struct folio_iter fi;
1581 	unsigned long flags;
1582 
1583 	bio_for_each_folio_all(fi, bio) {
1584 		if (!folio_test_dirty(fi.folio))
1585 			goto defer;
1586 	}
1587 
1588 	bio_release_pages(bio, false);
1589 	bio_put(bio);
1590 	return;
1591 defer:
1592 	spin_lock_irqsave(&bio_dirty_lock, flags);
1593 	bio->bi_private = bio_dirty_list;
1594 	bio_dirty_list = bio;
1595 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1596 	schedule_work(&bio_dirty_work);
1597 }
1598 EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1599 
bio_remaining_done(struct bio * bio)1600 static inline bool bio_remaining_done(struct bio *bio)
1601 {
1602 	/*
1603 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1604 	 * we always end io on the first invocation.
1605 	 */
1606 	if (!bio_flagged(bio, BIO_CHAIN))
1607 		return true;
1608 
1609 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1610 
1611 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1612 		bio_clear_flag(bio, BIO_CHAIN);
1613 		return true;
1614 	}
1615 
1616 	return false;
1617 }
1618 
1619 /**
1620  * bio_endio - end I/O on a bio
1621  * @bio:	bio
1622  *
1623  * Description:
1624  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1625  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1626  *   bio unless they own it and thus know that it has an end_io function.
1627  *
1628  *   bio_endio() can be called several times on a bio that has been chained
1629  *   using bio_chain().  The ->bi_end_io() function will only be called the
1630  *   last time.
1631  **/
bio_endio(struct bio * bio)1632 void bio_endio(struct bio *bio)
1633 {
1634 again:
1635 	if (!bio_remaining_done(bio))
1636 		return;
1637 	if (!bio_integrity_endio(bio))
1638 		return;
1639 
1640 	blk_zone_bio_endio(bio);
1641 
1642 	rq_qos_done_bio(bio);
1643 
1644 	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1645 		trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1646 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1647 	}
1648 
1649 	/*
1650 	 * Need to have a real endio function for chained bios, otherwise
1651 	 * various corner cases will break (like stacking block devices that
1652 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1653 	 * recursion and blowing the stack. Tail call optimization would
1654 	 * handle this, but compiling with frame pointers also disables
1655 	 * gcc's sibling call optimization.
1656 	 */
1657 	if (bio->bi_end_io == bio_chain_endio) {
1658 		bio = __bio_chain_endio(bio);
1659 		goto again;
1660 	}
1661 
1662 #ifdef CONFIG_BLK_CGROUP
1663 	/*
1664 	 * Release cgroup info.  We shouldn't have to do this here, but quite
1665 	 * a few callers of bio_init fail to call bio_uninit, so we cover up
1666 	 * for that here at least for now.
1667 	 */
1668 	if (bio->bi_blkg) {
1669 		blkg_put(bio->bi_blkg);
1670 		bio->bi_blkg = NULL;
1671 	}
1672 #endif
1673 
1674 	if (bio->bi_end_io)
1675 		bio->bi_end_io(bio);
1676 }
1677 EXPORT_SYMBOL(bio_endio);
1678 
1679 /**
1680  * bio_split - split a bio
1681  * @bio:	bio to split
1682  * @sectors:	number of sectors to split from the front of @bio
1683  * @gfp:	gfp mask
1684  * @bs:		bio set to allocate from
1685  *
1686  * Allocates and returns a new bio which represents @sectors from the start of
1687  * @bio, and updates @bio to represent the remaining sectors.
1688  *
1689  * Unless this is a discard request the newly allocated bio will point
1690  * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1691  * neither @bio nor @bs are freed before the split bio.
1692  */
bio_split(struct bio * bio,int sectors,gfp_t gfp,struct bio_set * bs)1693 struct bio *bio_split(struct bio *bio, int sectors,
1694 		      gfp_t gfp, struct bio_set *bs)
1695 {
1696 	struct bio *split;
1697 
1698 	if (WARN_ON_ONCE(sectors <= 0))
1699 		return ERR_PTR(-EINVAL);
1700 	if (WARN_ON_ONCE(sectors >= bio_sectors(bio)))
1701 		return ERR_PTR(-EINVAL);
1702 
1703 	/* Zone append commands cannot be split */
1704 	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1705 		return ERR_PTR(-EINVAL);
1706 
1707 	/* atomic writes cannot be split */
1708 	if (bio->bi_opf & REQ_ATOMIC)
1709 		return ERR_PTR(-EINVAL);
1710 
1711 	split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1712 	if (!split)
1713 		return ERR_PTR(-ENOMEM);
1714 
1715 	split->bi_iter.bi_size = sectors << 9;
1716 
1717 	if (bio_integrity(split))
1718 		bio_integrity_trim(split);
1719 
1720 	bio_advance(bio, split->bi_iter.bi_size);
1721 
1722 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1723 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1724 
1725 	return split;
1726 }
1727 EXPORT_SYMBOL(bio_split);
1728 
1729 /**
1730  * bio_trim - trim a bio
1731  * @bio:	bio to trim
1732  * @offset:	number of sectors to trim from the front of @bio
1733  * @size:	size we want to trim @bio to, in sectors
1734  *
1735  * This function is typically used for bios that are cloned and submitted
1736  * to the underlying device in parts.
1737  */
bio_trim(struct bio * bio,sector_t offset,sector_t size)1738 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1739 {
1740 	/* We should never trim an atomic write */
1741 	if (WARN_ON_ONCE(bio->bi_opf & REQ_ATOMIC && size))
1742 		return;
1743 
1744 	if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1745 			 offset + size > bio_sectors(bio)))
1746 		return;
1747 
1748 	size <<= 9;
1749 	if (offset == 0 && size == bio->bi_iter.bi_size)
1750 		return;
1751 
1752 	bio_advance(bio, offset << 9);
1753 	bio->bi_iter.bi_size = size;
1754 
1755 	if (bio_integrity(bio))
1756 		bio_integrity_trim(bio);
1757 }
1758 EXPORT_SYMBOL_GPL(bio_trim);
1759 
1760 /*
1761  * create memory pools for biovec's in a bio_set.
1762  * use the global biovec slabs created for general use.
1763  */
biovec_init_pool(mempool_t * pool,int pool_entries)1764 int biovec_init_pool(mempool_t *pool, int pool_entries)
1765 {
1766 	struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1767 
1768 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1769 }
1770 
1771 /*
1772  * bioset_exit - exit a bioset initialized with bioset_init()
1773  *
1774  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1775  * kzalloc()).
1776  */
bioset_exit(struct bio_set * bs)1777 void bioset_exit(struct bio_set *bs)
1778 {
1779 	bio_alloc_cache_destroy(bs);
1780 	if (bs->rescue_workqueue)
1781 		destroy_workqueue(bs->rescue_workqueue);
1782 	bs->rescue_workqueue = NULL;
1783 
1784 	mempool_exit(&bs->bio_pool);
1785 	mempool_exit(&bs->bvec_pool);
1786 
1787 	if (bs->bio_slab)
1788 		bio_put_slab(bs);
1789 	bs->bio_slab = NULL;
1790 }
1791 EXPORT_SYMBOL(bioset_exit);
1792 
1793 /**
1794  * bioset_init - Initialize a bio_set
1795  * @bs:		pool to initialize
1796  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1797  * @front_pad:	Number of bytes to allocate in front of the returned bio
1798  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1799  *              and %BIOSET_NEED_RESCUER
1800  *
1801  * Description:
1802  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1803  *    to ask for a number of bytes to be allocated in front of the bio.
1804  *    Front pad allocation is useful for embedding the bio inside
1805  *    another structure, to avoid allocating extra data to go with the bio.
1806  *    Note that the bio must be embedded at the END of that structure always,
1807  *    or things will break badly.
1808  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1809  *    for allocating iovecs.  This pool is not needed e.g. for bio_init_clone().
1810  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
1811  *    to dispatch queued requests when the mempool runs out of space.
1812  *
1813  */
bioset_init(struct bio_set * bs,unsigned int pool_size,unsigned int front_pad,int flags)1814 int bioset_init(struct bio_set *bs,
1815 		unsigned int pool_size,
1816 		unsigned int front_pad,
1817 		int flags)
1818 {
1819 	bs->front_pad = front_pad;
1820 	if (flags & BIOSET_NEED_BVECS)
1821 		bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1822 	else
1823 		bs->back_pad = 0;
1824 
1825 	spin_lock_init(&bs->rescue_lock);
1826 	bio_list_init(&bs->rescue_list);
1827 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1828 
1829 	bs->bio_slab = bio_find_or_create_slab(bs);
1830 	if (!bs->bio_slab)
1831 		return -ENOMEM;
1832 
1833 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1834 		goto bad;
1835 
1836 	if ((flags & BIOSET_NEED_BVECS) &&
1837 	    biovec_init_pool(&bs->bvec_pool, pool_size))
1838 		goto bad;
1839 
1840 	if (flags & BIOSET_NEED_RESCUER) {
1841 		bs->rescue_workqueue = alloc_workqueue("bioset",
1842 							WQ_MEM_RECLAIM, 0);
1843 		if (!bs->rescue_workqueue)
1844 			goto bad;
1845 	}
1846 	if (flags & BIOSET_PERCPU_CACHE) {
1847 		bs->cache = alloc_percpu(struct bio_alloc_cache);
1848 		if (!bs->cache)
1849 			goto bad;
1850 		cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1851 	}
1852 
1853 	return 0;
1854 bad:
1855 	bioset_exit(bs);
1856 	return -ENOMEM;
1857 }
1858 EXPORT_SYMBOL(bioset_init);
1859 
init_bio(void)1860 static int __init init_bio(void)
1861 {
1862 	int i;
1863 
1864 	BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
1865 
1866 	for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1867 		struct biovec_slab *bvs = bvec_slabs + i;
1868 
1869 		bvs->slab = kmem_cache_create(bvs->name,
1870 				bvs->nr_vecs * sizeof(struct bio_vec), 0,
1871 				SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1872 	}
1873 
1874 	cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1875 					bio_cpu_dead);
1876 
1877 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0,
1878 			BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
1879 		panic("bio: can't allocate bios\n");
1880 
1881 	return 0;
1882 }
1883 subsys_initcall(init_bio);
1884