xref: /linux/block/bio.c (revision 4adc13ed7c281c16152a700e47b65d17de07321a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4  */
5 #include <linux/mm.h>
6 #include <linux/swap.h>
7 #include <linux/bio-integrity.h>
8 #include <linux/blkdev.h>
9 #include <linux/uio.h>
10 #include <linux/iocontext.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/mempool.h>
16 #include <linux/workqueue.h>
17 #include <linux/cgroup.h>
18 #include <linux/highmem.h>
19 #include <linux/blk-crypto.h>
20 #include <linux/xarray.h>
21 
22 #include <trace/events/block.h>
23 #include "blk.h"
24 #include "blk-rq-qos.h"
25 #include "blk-cgroup.h"
26 
27 #define ALLOC_CACHE_THRESHOLD	16
28 #define ALLOC_CACHE_MAX		256
29 
30 struct bio_alloc_cache {
31 	struct bio		*free_list;
32 	struct bio		*free_list_irq;
33 	unsigned int		nr;
34 	unsigned int		nr_irq;
35 };
36 
37 static struct biovec_slab {
38 	int nr_vecs;
39 	char *name;
40 	struct kmem_cache *slab;
41 } bvec_slabs[] __read_mostly = {
42 	{ .nr_vecs = 16, .name = "biovec-16" },
43 	{ .nr_vecs = 64, .name = "biovec-64" },
44 	{ .nr_vecs = 128, .name = "biovec-128" },
45 	{ .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
46 };
47 
48 static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
49 {
50 	switch (nr_vecs) {
51 	/* smaller bios use inline vecs */
52 	case 5 ... 16:
53 		return &bvec_slabs[0];
54 	case 17 ... 64:
55 		return &bvec_slabs[1];
56 	case 65 ... 128:
57 		return &bvec_slabs[2];
58 	case 129 ... BIO_MAX_VECS:
59 		return &bvec_slabs[3];
60 	default:
61 		BUG();
62 		return NULL;
63 	}
64 }
65 
66 /*
67  * fs_bio_set is the bio_set containing bio and iovec memory pools used by
68  * IO code that does not need private memory pools.
69  */
70 struct bio_set fs_bio_set;
71 EXPORT_SYMBOL(fs_bio_set);
72 
73 /*
74  * Our slab pool management
75  */
76 struct bio_slab {
77 	struct kmem_cache *slab;
78 	unsigned int slab_ref;
79 	unsigned int slab_size;
80 	char name[12];
81 };
82 static DEFINE_MUTEX(bio_slab_lock);
83 static DEFINE_XARRAY(bio_slabs);
84 
85 static struct bio_slab *create_bio_slab(unsigned int size)
86 {
87 	struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL);
88 
89 	if (!bslab)
90 		return NULL;
91 
92 	snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
93 	bslab->slab = kmem_cache_create(bslab->name, size,
94 			ARCH_KMALLOC_MINALIGN,
95 			SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
96 	if (!bslab->slab)
97 		goto fail_alloc_slab;
98 
99 	bslab->slab_ref = 1;
100 	bslab->slab_size = size;
101 
102 	if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
103 		return bslab;
104 
105 	kmem_cache_destroy(bslab->slab);
106 
107 fail_alloc_slab:
108 	kfree(bslab);
109 	return NULL;
110 }
111 
112 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
113 {
114 	return bs->front_pad + sizeof(struct bio) + bs->back_pad;
115 }
116 
117 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
118 {
119 	unsigned int size = bs_bio_slab_size(bs);
120 	struct bio_slab *bslab;
121 
122 	mutex_lock(&bio_slab_lock);
123 	bslab = xa_load(&bio_slabs, size);
124 	if (bslab)
125 		bslab->slab_ref++;
126 	else
127 		bslab = create_bio_slab(size);
128 	mutex_unlock(&bio_slab_lock);
129 
130 	if (bslab)
131 		return bslab->slab;
132 	return NULL;
133 }
134 
135 static void bio_put_slab(struct bio_set *bs)
136 {
137 	struct bio_slab *bslab = NULL;
138 	unsigned int slab_size = bs_bio_slab_size(bs);
139 
140 	mutex_lock(&bio_slab_lock);
141 
142 	bslab = xa_load(&bio_slabs, slab_size);
143 	if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
144 		goto out;
145 
146 	WARN_ON_ONCE(bslab->slab != bs->bio_slab);
147 
148 	WARN_ON(!bslab->slab_ref);
149 
150 	if (--bslab->slab_ref)
151 		goto out;
152 
153 	xa_erase(&bio_slabs, slab_size);
154 
155 	kmem_cache_destroy(bslab->slab);
156 	kfree(bslab);
157 
158 out:
159 	mutex_unlock(&bio_slab_lock);
160 }
161 
162 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs)
163 {
164 	BUG_ON(nr_vecs > BIO_MAX_VECS);
165 
166 	if (nr_vecs == BIO_MAX_VECS)
167 		mempool_free(bv, pool);
168 	else if (nr_vecs > BIO_INLINE_VECS)
169 		kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
170 }
171 
172 /*
173  * Make the first allocation restricted and don't dump info on allocation
174  * failures, since we'll fall back to the mempool in case of failure.
175  */
176 static inline gfp_t bvec_alloc_gfp(gfp_t gfp)
177 {
178 	return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
179 		__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
180 }
181 
182 struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
183 		gfp_t gfp_mask)
184 {
185 	struct biovec_slab *bvs = biovec_slab(*nr_vecs);
186 
187 	if (WARN_ON_ONCE(!bvs))
188 		return NULL;
189 
190 	/*
191 	 * Upgrade the nr_vecs request to take full advantage of the allocation.
192 	 * We also rely on this in the bvec_free path.
193 	 */
194 	*nr_vecs = bvs->nr_vecs;
195 
196 	/*
197 	 * Try a slab allocation first for all smaller allocations.  If that
198 	 * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool.
199 	 * The mempool is sized to handle up to BIO_MAX_VECS entries.
200 	 */
201 	if (*nr_vecs < BIO_MAX_VECS) {
202 		struct bio_vec *bvl;
203 
204 		bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
205 		if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM))
206 			return bvl;
207 		*nr_vecs = BIO_MAX_VECS;
208 	}
209 
210 	return mempool_alloc(pool, gfp_mask);
211 }
212 
213 void bio_uninit(struct bio *bio)
214 {
215 #ifdef CONFIG_BLK_CGROUP
216 	if (bio->bi_blkg) {
217 		blkg_put(bio->bi_blkg);
218 		bio->bi_blkg = NULL;
219 	}
220 #endif
221 	if (bio_integrity(bio))
222 		bio_integrity_free(bio);
223 
224 	bio_crypt_free_ctx(bio);
225 }
226 EXPORT_SYMBOL(bio_uninit);
227 
228 static void bio_free(struct bio *bio)
229 {
230 	struct bio_set *bs = bio->bi_pool;
231 	void *p = bio;
232 
233 	WARN_ON_ONCE(!bs);
234 
235 	bio_uninit(bio);
236 	bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs);
237 	mempool_free(p - bs->front_pad, &bs->bio_pool);
238 }
239 
240 /*
241  * Users of this function have their own bio allocation. Subsequently,
242  * they must remember to pair any call to bio_init() with bio_uninit()
243  * when IO has completed, or when the bio is released.
244  */
245 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
246 	      unsigned short max_vecs, blk_opf_t opf)
247 {
248 	bio->bi_next = NULL;
249 	bio->bi_bdev = bdev;
250 	bio->bi_opf = opf;
251 	bio->bi_flags = 0;
252 	bio->bi_ioprio = 0;
253 	bio->bi_write_hint = 0;
254 	bio->bi_write_stream = 0;
255 	bio->bi_status = 0;
256 	bio->bi_bvec_gap_bit = 0;
257 	bio->bi_iter.bi_sector = 0;
258 	bio->bi_iter.bi_size = 0;
259 	bio->bi_iter.bi_idx = 0;
260 	bio->bi_iter.bi_bvec_done = 0;
261 	bio->bi_end_io = NULL;
262 	bio->bi_private = NULL;
263 #ifdef CONFIG_BLK_CGROUP
264 	bio->bi_blkg = NULL;
265 	bio->issue_time_ns = 0;
266 	if (bdev)
267 		bio_associate_blkg(bio);
268 #ifdef CONFIG_BLK_CGROUP_IOCOST
269 	bio->bi_iocost_cost = 0;
270 #endif
271 #endif
272 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
273 	bio->bi_crypt_context = NULL;
274 #endif
275 #ifdef CONFIG_BLK_DEV_INTEGRITY
276 	bio->bi_integrity = NULL;
277 #endif
278 	bio->bi_vcnt = 0;
279 
280 	atomic_set(&bio->__bi_remaining, 1);
281 	atomic_set(&bio->__bi_cnt, 1);
282 	bio->bi_cookie = BLK_QC_T_NONE;
283 
284 	bio->bi_max_vecs = max_vecs;
285 	bio->bi_io_vec = table;
286 	bio->bi_pool = NULL;
287 }
288 EXPORT_SYMBOL(bio_init);
289 
290 /**
291  * bio_reset - reinitialize a bio
292  * @bio:	bio to reset
293  * @bdev:	block device to use the bio for
294  * @opf:	operation and flags for bio
295  *
296  * Description:
297  *   After calling bio_reset(), @bio will be in the same state as a freshly
298  *   allocated bio returned bio bio_alloc_bioset() - the only fields that are
299  *   preserved are the ones that are initialized by bio_alloc_bioset(). See
300  *   comment in struct bio.
301  */
302 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
303 {
304 	struct bio_vec          *bv = bio->bi_io_vec;
305 
306 	bio_uninit(bio);
307 	memset(bio, 0, BIO_RESET_BYTES);
308 	atomic_set(&bio->__bi_remaining, 1);
309 	bio->bi_io_vec = bv;
310 	bio->bi_bdev = bdev;
311 	if (bio->bi_bdev)
312 		bio_associate_blkg(bio);
313 	bio->bi_opf = opf;
314 }
315 EXPORT_SYMBOL(bio_reset);
316 
317 /**
318  * bio_reuse - reuse a bio with the payload left intact
319  * @bio:	bio to reuse
320  * @opf:	operation and flags for the next I/O
321  *
322  * Allow reusing an existing bio for another operation with all set up
323  * fields including the payload, device and end_io handler left intact.
324  *
325  * Typically used when @bio is first used to read data which is then written
326  * to another location without modification.  @bio must not be in-flight and
327  * owned by the caller.  Can't be used for cloned bios.
328  *
329  * Note: Can't be used when @bio has integrity or blk-crypto contexts for now.
330  * Feel free to add that support when you need it, though.
331  */
332 void bio_reuse(struct bio *bio, blk_opf_t opf)
333 {
334 	unsigned short vcnt = bio->bi_vcnt, i;
335 	bio_end_io_t *end_io = bio->bi_end_io;
336 	void *private = bio->bi_private;
337 
338 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
339 	WARN_ON_ONCE(bio_integrity(bio));
340 	WARN_ON_ONCE(bio_has_crypt_ctx(bio));
341 
342 	bio_reset(bio, bio->bi_bdev, opf);
343 	for (i = 0; i < vcnt; i++)
344 		bio->bi_iter.bi_size += bio->bi_io_vec[i].bv_len;
345 	bio->bi_vcnt = vcnt;
346 	bio->bi_private = private;
347 	bio->bi_end_io = end_io;
348 }
349 EXPORT_SYMBOL_GPL(bio_reuse);
350 
351 static struct bio *__bio_chain_endio(struct bio *bio)
352 {
353 	struct bio *parent = bio->bi_private;
354 
355 	if (bio->bi_status && !parent->bi_status)
356 		parent->bi_status = bio->bi_status;
357 	bio_put(bio);
358 	return parent;
359 }
360 
361 /*
362  * This function should only be used as a flag and must never be called.
363  * If execution reaches here, it indicates a serious programming error.
364  */
365 static void bio_chain_endio(struct bio *bio)
366 {
367 	BUG();
368 }
369 
370 /**
371  * bio_chain - chain bio completions
372  * @bio: the target bio
373  * @parent: the parent bio of @bio
374  *
375  * The caller won't have a bi_end_io called when @bio completes - instead,
376  * @parent's bi_end_io won't be called until both @parent and @bio have
377  * completed; the chained bio will also be freed when it completes.
378  *
379  * The caller must not set bi_private or bi_end_io in @bio.
380  */
381 void bio_chain(struct bio *bio, struct bio *parent)
382 {
383 	BUG_ON(bio->bi_private || bio->bi_end_io);
384 
385 	bio->bi_private = parent;
386 	bio->bi_end_io	= bio_chain_endio;
387 	bio_inc_remaining(parent);
388 }
389 EXPORT_SYMBOL(bio_chain);
390 
391 /**
392  * bio_chain_and_submit - submit a bio after chaining it to another one
393  * @prev: bio to chain and submit
394  * @new: bio to chain to
395  *
396  * If @prev is non-NULL, chain it to @new and submit it.
397  *
398  * Return: @new.
399  */
400 struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new)
401 {
402 	if (prev) {
403 		bio_chain(prev, new);
404 		submit_bio(prev);
405 	}
406 	return new;
407 }
408 
409 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
410 		unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
411 {
412 	return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp));
413 }
414 EXPORT_SYMBOL_GPL(blk_next_bio);
415 
416 static void bio_alloc_rescue(struct work_struct *work)
417 {
418 	struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
419 	struct bio *bio;
420 
421 	while (1) {
422 		spin_lock(&bs->rescue_lock);
423 		bio = bio_list_pop(&bs->rescue_list);
424 		spin_unlock(&bs->rescue_lock);
425 
426 		if (!bio)
427 			break;
428 
429 		submit_bio_noacct(bio);
430 	}
431 }
432 
433 static void punt_bios_to_rescuer(struct bio_set *bs)
434 {
435 	struct bio_list punt, nopunt;
436 	struct bio *bio;
437 
438 	if (WARN_ON_ONCE(!bs->rescue_workqueue))
439 		return;
440 	/*
441 	 * In order to guarantee forward progress we must punt only bios that
442 	 * were allocated from this bio_set; otherwise, if there was a bio on
443 	 * there for a stacking driver higher up in the stack, processing it
444 	 * could require allocating bios from this bio_set, and doing that from
445 	 * our own rescuer would be bad.
446 	 *
447 	 * Since bio lists are singly linked, pop them all instead of trying to
448 	 * remove from the middle of the list:
449 	 */
450 
451 	bio_list_init(&punt);
452 	bio_list_init(&nopunt);
453 
454 	while ((bio = bio_list_pop(&current->bio_list[0])))
455 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
456 	current->bio_list[0] = nopunt;
457 
458 	bio_list_init(&nopunt);
459 	while ((bio = bio_list_pop(&current->bio_list[1])))
460 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
461 	current->bio_list[1] = nopunt;
462 
463 	spin_lock(&bs->rescue_lock);
464 	bio_list_merge(&bs->rescue_list, &punt);
465 	spin_unlock(&bs->rescue_lock);
466 
467 	queue_work(bs->rescue_workqueue, &bs->rescue_work);
468 }
469 
470 static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
471 {
472 	unsigned long flags;
473 
474 	/* cache->free_list must be empty */
475 	if (WARN_ON_ONCE(cache->free_list))
476 		return;
477 
478 	local_irq_save(flags);
479 	cache->free_list = cache->free_list_irq;
480 	cache->free_list_irq = NULL;
481 	cache->nr += cache->nr_irq;
482 	cache->nr_irq = 0;
483 	local_irq_restore(flags);
484 }
485 
486 static struct bio *bio_alloc_percpu_cache(struct block_device *bdev,
487 		unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp,
488 		struct bio_set *bs)
489 {
490 	struct bio_alloc_cache *cache;
491 	struct bio *bio;
492 
493 	cache = per_cpu_ptr(bs->cache, get_cpu());
494 	if (!cache->free_list) {
495 		if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD)
496 			bio_alloc_irq_cache_splice(cache);
497 		if (!cache->free_list) {
498 			put_cpu();
499 			return NULL;
500 		}
501 	}
502 	bio = cache->free_list;
503 	cache->free_list = bio->bi_next;
504 	cache->nr--;
505 	put_cpu();
506 
507 	if (nr_vecs)
508 		bio_init_inline(bio, bdev, nr_vecs, opf);
509 	else
510 		bio_init(bio, bdev, NULL, nr_vecs, opf);
511 	bio->bi_pool = bs;
512 	return bio;
513 }
514 
515 /**
516  * bio_alloc_bioset - allocate a bio for I/O
517  * @bdev:	block device to allocate the bio for (can be %NULL)
518  * @nr_vecs:	number of bvecs to pre-allocate
519  * @opf:	operation and flags for bio
520  * @gfp_mask:   the GFP_* mask given to the slab allocator
521  * @bs:		the bio_set to allocate from.
522  *
523  * Allocate a bio from the mempools in @bs.
524  *
525  * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
526  * allocate a bio.  This is due to the mempool guarantees.  To make this work,
527  * callers must never allocate more than 1 bio at a time from the general pool.
528  * Callers that need to allocate more than 1 bio must always submit the
529  * previously allocated bio for IO before attempting to allocate a new one.
530  * Failure to do so can cause deadlocks under memory pressure.
531  *
532  * Note that when running under submit_bio_noacct() (i.e. any block driver),
533  * bios are not submitted until after you return - see the code in
534  * submit_bio_noacct() that converts recursion into iteration, to prevent
535  * stack overflows.
536  *
537  * This would normally mean allocating multiple bios under submit_bio_noacct()
538  * would be susceptible to deadlocks, but we have
539  * deadlock avoidance code that resubmits any blocked bios from a rescuer
540  * thread.
541  *
542  * However, we do not guarantee forward progress for allocations from other
543  * mempools. Doing multiple allocations from the same mempool under
544  * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
545  * for per bio allocations.
546  *
547  * Returns: Pointer to new bio on success, NULL on failure.
548  */
549 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
550 			     blk_opf_t opf, gfp_t gfp_mask,
551 			     struct bio_set *bs)
552 {
553 	gfp_t saved_gfp = gfp_mask;
554 	struct bio *bio;
555 	void *p;
556 
557 	/* should not use nobvec bioset for nr_vecs > 0 */
558 	if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
559 		return NULL;
560 
561 	if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
562 		opf |= REQ_ALLOC_CACHE;
563 		bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf,
564 					     gfp_mask, bs);
565 		if (bio)
566 			return bio;
567 		/*
568 		 * No cached bio available, bio returned below marked with
569 		 * REQ_ALLOC_CACHE to participate in per-cpu alloc cache.
570 		 */
571 	} else
572 		opf &= ~REQ_ALLOC_CACHE;
573 
574 	/*
575 	 * submit_bio_noacct() converts recursion to iteration; this means if
576 	 * we're running beneath it, any bios we allocate and submit will not be
577 	 * submitted (and thus freed) until after we return.
578 	 *
579 	 * This exposes us to a potential deadlock if we allocate multiple bios
580 	 * from the same bio_set() while running underneath submit_bio_noacct().
581 	 * If we were to allocate multiple bios (say a stacking block driver
582 	 * that was splitting bios), we would deadlock if we exhausted the
583 	 * mempool's reserve.
584 	 *
585 	 * We solve this, and guarantee forward progress, with a rescuer
586 	 * workqueue per bio_set. If we go to allocate and there are bios on
587 	 * current->bio_list, we first try the allocation without
588 	 * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be
589 	 * blocking to the rescuer workqueue before we retry with the original
590 	 * gfp_flags.
591 	 */
592 	if (current->bio_list &&
593 	    (!bio_list_empty(&current->bio_list[0]) ||
594 	     !bio_list_empty(&current->bio_list[1])) &&
595 	    bs->rescue_workqueue)
596 		gfp_mask &= ~__GFP_DIRECT_RECLAIM;
597 
598 	p = mempool_alloc(&bs->bio_pool, gfp_mask);
599 	if (!p && gfp_mask != saved_gfp) {
600 		punt_bios_to_rescuer(bs);
601 		gfp_mask = saved_gfp;
602 		p = mempool_alloc(&bs->bio_pool, gfp_mask);
603 	}
604 	if (unlikely(!p))
605 		return NULL;
606 	if (!mempool_is_saturated(&bs->bio_pool))
607 		opf &= ~REQ_ALLOC_CACHE;
608 
609 	bio = p + bs->front_pad;
610 	if (nr_vecs > BIO_INLINE_VECS) {
611 		struct bio_vec *bvl = NULL;
612 
613 		bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
614 		if (!bvl && gfp_mask != saved_gfp) {
615 			punt_bios_to_rescuer(bs);
616 			gfp_mask = saved_gfp;
617 			bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask);
618 		}
619 		if (unlikely(!bvl))
620 			goto err_free;
621 
622 		bio_init(bio, bdev, bvl, nr_vecs, opf);
623 	} else if (nr_vecs) {
624 		bio_init_inline(bio, bdev, BIO_INLINE_VECS, opf);
625 	} else {
626 		bio_init(bio, bdev, NULL, 0, opf);
627 	}
628 
629 	bio->bi_pool = bs;
630 	return bio;
631 
632 err_free:
633 	mempool_free(p, &bs->bio_pool);
634 	return NULL;
635 }
636 EXPORT_SYMBOL(bio_alloc_bioset);
637 
638 /**
639  * bio_kmalloc - kmalloc a bio
640  * @nr_vecs:	number of bio_vecs to allocate
641  * @gfp_mask:   the GFP_* mask given to the slab allocator
642  *
643  * Use kmalloc to allocate a bio (including bvecs).  The bio must be initialized
644  * using bio_init() before use.  To free a bio returned from this function use
645  * kfree() after calling bio_uninit().  A bio returned from this function can
646  * be reused by calling bio_uninit() before calling bio_init() again.
647  *
648  * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
649  * function are not backed by a mempool can fail.  Do not use this function
650  * for allocations in the file system I/O path.
651  *
652  * Returns: Pointer to new bio on success, NULL on failure.
653  */
654 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
655 {
656 	struct bio *bio;
657 
658 	if (nr_vecs > BIO_MAX_INLINE_VECS)
659 		return NULL;
660 	return kmalloc(sizeof(*bio) + nr_vecs * sizeof(struct bio_vec),
661 			gfp_mask);
662 }
663 EXPORT_SYMBOL(bio_kmalloc);
664 
665 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
666 {
667 	struct bio_vec bv;
668 	struct bvec_iter iter;
669 
670 	__bio_for_each_segment(bv, bio, iter, start)
671 		memzero_bvec(&bv);
672 }
673 EXPORT_SYMBOL(zero_fill_bio_iter);
674 
675 /**
676  * bio_truncate - truncate the bio to small size of @new_size
677  * @bio:	the bio to be truncated
678  * @new_size:	new size for truncating the bio
679  *
680  * Description:
681  *   Truncate the bio to new size of @new_size. If bio_op(bio) is
682  *   REQ_OP_READ, zero the truncated part. This function should only
683  *   be used for handling corner cases, such as bio eod.
684  */
685 static void bio_truncate(struct bio *bio, unsigned new_size)
686 {
687 	struct bio_vec bv;
688 	struct bvec_iter iter;
689 	unsigned int done = 0;
690 	bool truncated = false;
691 
692 	if (new_size >= bio->bi_iter.bi_size)
693 		return;
694 
695 	if (bio_op(bio) != REQ_OP_READ)
696 		goto exit;
697 
698 	bio_for_each_segment(bv, bio, iter) {
699 		if (done + bv.bv_len > new_size) {
700 			size_t offset;
701 
702 			if (!truncated)
703 				offset = new_size - done;
704 			else
705 				offset = 0;
706 			memzero_page(bv.bv_page, bv.bv_offset + offset,
707 				  bv.bv_len - offset);
708 			truncated = true;
709 		}
710 		done += bv.bv_len;
711 	}
712 
713  exit:
714 	/*
715 	 * Don't touch bvec table here and make it really immutable, since
716 	 * fs bio user has to retrieve all pages via bio_for_each_segment_all
717 	 * in its .end_bio() callback.
718 	 *
719 	 * It is enough to truncate bio by updating .bi_size since we can make
720 	 * correct bvec with the updated .bi_size for drivers.
721 	 */
722 	bio->bi_iter.bi_size = new_size;
723 }
724 
725 /**
726  * guard_bio_eod - truncate a BIO to fit the block device
727  * @bio:	bio to truncate
728  *
729  * This allows us to do IO even on the odd last sectors of a device, even if the
730  * block size is some multiple of the physical sector size.
731  *
732  * We'll just truncate the bio to the size of the device, and clear the end of
733  * the buffer head manually.  Truly out-of-range accesses will turn into actual
734  * I/O errors, this only handles the "we need to be able to do I/O at the final
735  * sector" case.
736  */
737 void guard_bio_eod(struct bio *bio)
738 {
739 	sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
740 
741 	if (!maxsector)
742 		return;
743 
744 	/*
745 	 * If the *whole* IO is past the end of the device,
746 	 * let it through, and the IO layer will turn it into
747 	 * an EIO.
748 	 */
749 	if (unlikely(bio->bi_iter.bi_sector >= maxsector))
750 		return;
751 
752 	maxsector -= bio->bi_iter.bi_sector;
753 	if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
754 		return;
755 
756 	bio_truncate(bio, maxsector << 9);
757 }
758 
759 static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
760 				   unsigned int nr)
761 {
762 	unsigned int i = 0;
763 	struct bio *bio;
764 
765 	while ((bio = cache->free_list) != NULL) {
766 		cache->free_list = bio->bi_next;
767 		cache->nr--;
768 		bio_free(bio);
769 		if (++i == nr)
770 			break;
771 	}
772 	return i;
773 }
774 
775 static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
776 				  unsigned int nr)
777 {
778 	nr -= __bio_alloc_cache_prune(cache, nr);
779 	if (!READ_ONCE(cache->free_list)) {
780 		bio_alloc_irq_cache_splice(cache);
781 		__bio_alloc_cache_prune(cache, nr);
782 	}
783 }
784 
785 static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
786 {
787 	struct bio_set *bs;
788 
789 	bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
790 	if (bs->cache) {
791 		struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
792 
793 		bio_alloc_cache_prune(cache, -1U);
794 	}
795 	return 0;
796 }
797 
798 static void bio_alloc_cache_destroy(struct bio_set *bs)
799 {
800 	int cpu;
801 
802 	if (!bs->cache)
803 		return;
804 
805 	cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
806 	for_each_possible_cpu(cpu) {
807 		struct bio_alloc_cache *cache;
808 
809 		cache = per_cpu_ptr(bs->cache, cpu);
810 		bio_alloc_cache_prune(cache, -1U);
811 	}
812 	free_percpu(bs->cache);
813 	bs->cache = NULL;
814 }
815 
816 static inline void bio_put_percpu_cache(struct bio *bio)
817 {
818 	struct bio_alloc_cache *cache;
819 
820 	cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
821 	if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX)
822 		goto out_free;
823 
824 	if (in_task()) {
825 		bio_uninit(bio);
826 		bio->bi_next = cache->free_list;
827 		/* Not necessary but helps not to iopoll already freed bios */
828 		bio->bi_bdev = NULL;
829 		cache->free_list = bio;
830 		cache->nr++;
831 	} else if (in_hardirq()) {
832 		lockdep_assert_irqs_disabled();
833 
834 		bio_uninit(bio);
835 		bio->bi_next = cache->free_list_irq;
836 		cache->free_list_irq = bio;
837 		cache->nr_irq++;
838 	} else {
839 		goto out_free;
840 	}
841 	put_cpu();
842 	return;
843 out_free:
844 	put_cpu();
845 	bio_free(bio);
846 }
847 
848 /**
849  * bio_put - release a reference to a bio
850  * @bio:   bio to release reference to
851  *
852  * Description:
853  *   Put a reference to a &struct bio, either one you have gotten with
854  *   bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
855  **/
856 void bio_put(struct bio *bio)
857 {
858 	if (unlikely(bio_flagged(bio, BIO_REFFED))) {
859 		BUG_ON(!atomic_read(&bio->__bi_cnt));
860 		if (!atomic_dec_and_test(&bio->__bi_cnt))
861 			return;
862 	}
863 	if (bio->bi_opf & REQ_ALLOC_CACHE)
864 		bio_put_percpu_cache(bio);
865 	else
866 		bio_free(bio);
867 }
868 EXPORT_SYMBOL(bio_put);
869 
870 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
871 {
872 	bio_set_flag(bio, BIO_CLONED);
873 	bio->bi_ioprio = bio_src->bi_ioprio;
874 	bio->bi_write_hint = bio_src->bi_write_hint;
875 	bio->bi_write_stream = bio_src->bi_write_stream;
876 	bio->bi_iter = bio_src->bi_iter;
877 
878 	if (bio->bi_bdev) {
879 		if (bio->bi_bdev == bio_src->bi_bdev &&
880 		    bio_flagged(bio_src, BIO_REMAPPED))
881 			bio_set_flag(bio, BIO_REMAPPED);
882 		bio_clone_blkg_association(bio, bio_src);
883 	}
884 
885 	if (bio_crypt_clone(bio, bio_src, gfp) < 0)
886 		return -ENOMEM;
887 	if (bio_integrity(bio_src) &&
888 	    bio_integrity_clone(bio, bio_src, gfp) < 0)
889 		return -ENOMEM;
890 	return 0;
891 }
892 
893 /**
894  * bio_alloc_clone - clone a bio that shares the original bio's biovec
895  * @bdev: block_device to clone onto
896  * @bio_src: bio to clone from
897  * @gfp: allocation priority
898  * @bs: bio_set to allocate from
899  *
900  * Allocate a new bio that is a clone of @bio_src. The caller owns the returned
901  * bio, but not the actual data it points to.
902  *
903  * The caller must ensure that the return bio is not freed before @bio_src.
904  */
905 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
906 		gfp_t gfp, struct bio_set *bs)
907 {
908 	struct bio *bio;
909 
910 	bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
911 	if (!bio)
912 		return NULL;
913 
914 	if (__bio_clone(bio, bio_src, gfp) < 0) {
915 		bio_put(bio);
916 		return NULL;
917 	}
918 	bio->bi_io_vec = bio_src->bi_io_vec;
919 
920 	return bio;
921 }
922 EXPORT_SYMBOL(bio_alloc_clone);
923 
924 /**
925  * bio_init_clone - clone a bio that shares the original bio's biovec
926  * @bdev: block_device to clone onto
927  * @bio: bio to clone into
928  * @bio_src: bio to clone from
929  * @gfp: allocation priority
930  *
931  * Initialize a new bio in caller provided memory that is a clone of @bio_src.
932  * The caller owns the returned bio, but not the actual data it points to.
933  *
934  * The caller must ensure that @bio_src is not freed before @bio.
935  */
936 int bio_init_clone(struct block_device *bdev, struct bio *bio,
937 		struct bio *bio_src, gfp_t gfp)
938 {
939 	int ret;
940 
941 	bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
942 	ret = __bio_clone(bio, bio_src, gfp);
943 	if (ret)
944 		bio_uninit(bio);
945 	return ret;
946 }
947 EXPORT_SYMBOL(bio_init_clone);
948 
949 /**
950  * bio_full - check if the bio is full
951  * @bio:	bio to check
952  * @len:	length of one segment to be added
953  *
954  * Return true if @bio is full and one segment with @len bytes can't be
955  * added to the bio, otherwise return false
956  */
957 static inline bool bio_full(struct bio *bio, unsigned len)
958 {
959 	if (bio->bi_vcnt >= bio->bi_max_vecs)
960 		return true;
961 	if (bio->bi_iter.bi_size > BIO_MAX_SIZE - len)
962 		return true;
963 	return false;
964 }
965 
966 static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
967 		unsigned int len, unsigned int off)
968 {
969 	size_t bv_end = bv->bv_offset + bv->bv_len;
970 	phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
971 	phys_addr_t page_addr = page_to_phys(page);
972 
973 	if (vec_end_addr + 1 != page_addr + off)
974 		return false;
975 	if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
976 		return false;
977 
978 	if ((vec_end_addr & PAGE_MASK) != ((page_addr + off) & PAGE_MASK)) {
979 		if (IS_ENABLED(CONFIG_KMSAN))
980 			return false;
981 		if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
982 			return false;
983 	}
984 
985 	bv->bv_len += len;
986 	return true;
987 }
988 
989 /*
990  * Try to merge a page into a segment, while obeying the hardware segment
991  * size limit.
992  *
993  * This is kept around for the integrity metadata, which is still tries
994  * to build the initial bio to the hardware limit and doesn't have proper
995  * helpers to split.  Hopefully this will go away soon.
996  */
997 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
998 		struct page *page, unsigned len, unsigned offset)
999 {
1000 	unsigned long mask = queue_segment_boundary(q);
1001 	phys_addr_t addr1 = bvec_phys(bv);
1002 	phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
1003 
1004 	if ((addr1 | mask) != (addr2 | mask))
1005 		return false;
1006 	if (len > queue_max_segment_size(q) - bv->bv_len)
1007 		return false;
1008 	return bvec_try_merge_page(bv, page, len, offset);
1009 }
1010 
1011 /**
1012  * __bio_add_page - add page(s) to a bio in a new segment
1013  * @bio: destination bio
1014  * @page: start page to add
1015  * @len: length of the data to add, may cross pages
1016  * @off: offset of the data relative to @page, may cross pages
1017  *
1018  * Add the data at @page + @off to @bio as a new bvec.  The caller must ensure
1019  * that @bio has space for another bvec.
1020  */
1021 void __bio_add_page(struct bio *bio, struct page *page,
1022 		unsigned int len, unsigned int off)
1023 {
1024 	WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
1025 	WARN_ON_ONCE(bio_full(bio, len));
1026 
1027 	if (is_pci_p2pdma_page(page))
1028 		bio->bi_opf |= REQ_NOMERGE;
1029 
1030 	bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
1031 	bio->bi_iter.bi_size += len;
1032 	bio->bi_vcnt++;
1033 }
1034 EXPORT_SYMBOL_GPL(__bio_add_page);
1035 
1036 /**
1037  * bio_add_virt_nofail - add data in the direct kernel mapping to a bio
1038  * @bio: destination bio
1039  * @vaddr: data to add
1040  * @len: length of the data to add, may cross pages
1041  *
1042  * Add the data at @vaddr to @bio.  The caller must have ensure a segment
1043  * is available for the added data.  No merging into an existing segment
1044  * will be performed.
1045  */
1046 void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len)
1047 {
1048 	__bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr));
1049 }
1050 EXPORT_SYMBOL_GPL(bio_add_virt_nofail);
1051 
1052 /**
1053  *	bio_add_page	-	attempt to add page(s) to bio
1054  *	@bio: destination bio
1055  *	@page: start page to add
1056  *	@len: vec entry length, may cross pages
1057  *	@offset: vec entry offset relative to @page, may cross pages
1058  *
1059  *	Attempt to add page(s) to the bio_vec maplist. This will only fail
1060  *	if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1061  */
1062 int bio_add_page(struct bio *bio, struct page *page,
1063 		 unsigned int len, unsigned int offset)
1064 {
1065 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1066 		return 0;
1067 	if (bio->bi_iter.bi_size > BIO_MAX_SIZE - len)
1068 		return 0;
1069 
1070 	if (bio->bi_vcnt > 0) {
1071 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
1072 
1073 		if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
1074 			return 0;
1075 
1076 		if (bvec_try_merge_page(bv, page, len, offset)) {
1077 			bio->bi_iter.bi_size += len;
1078 			return len;
1079 		}
1080 	}
1081 
1082 	if (bio->bi_vcnt >= bio->bi_max_vecs)
1083 		return 0;
1084 	__bio_add_page(bio, page, len, offset);
1085 	return len;
1086 }
1087 EXPORT_SYMBOL(bio_add_page);
1088 
1089 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
1090 			  size_t off)
1091 {
1092 	unsigned long nr = off / PAGE_SIZE;
1093 
1094 	WARN_ON_ONCE(len > BIO_MAX_SIZE);
1095 	__bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE);
1096 }
1097 EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
1098 
1099 /**
1100  * bio_add_folio - Attempt to add part of a folio to a bio.
1101  * @bio: BIO to add to.
1102  * @folio: Folio to add.
1103  * @len: How many bytes from the folio to add.
1104  * @off: First byte in this folio to add.
1105  *
1106  * Filesystems that use folios can call this function instead of calling
1107  * bio_add_page() for each page in the folio.  If @off is bigger than
1108  * PAGE_SIZE, this function can create a bio_vec that starts in a page
1109  * after the bv_page.  BIOs do not support folios that are 4GiB or larger.
1110  *
1111  * Return: Whether the addition was successful.
1112  */
1113 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1114 		   size_t off)
1115 {
1116 	unsigned long nr = off / PAGE_SIZE;
1117 
1118 	if (len > BIO_MAX_SIZE)
1119 		return false;
1120 	return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0;
1121 }
1122 EXPORT_SYMBOL(bio_add_folio);
1123 
1124 /**
1125  * bio_add_vmalloc_chunk - add a vmalloc chunk to a bio
1126  * @bio: destination bio
1127  * @vaddr: vmalloc address to add
1128  * @len: total length in bytes of the data to add
1129  *
1130  * Add data starting at @vaddr to @bio and return how many bytes were added.
1131  * This may be less than the amount originally asked.  Returns 0 if no data
1132  * could be added to @bio.
1133  *
1134  * This helper calls flush_kernel_vmap_range() for the range added.  For reads
1135  * the caller still needs to manually call invalidate_kernel_vmap_range() in
1136  * the completion handler.
1137  */
1138 unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len)
1139 {
1140 	unsigned int offset = offset_in_page(vaddr);
1141 
1142 	len = min(len, PAGE_SIZE - offset);
1143 	if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len)
1144 		return 0;
1145 	if (op_is_write(bio_op(bio)))
1146 		flush_kernel_vmap_range(vaddr, len);
1147 	return len;
1148 }
1149 EXPORT_SYMBOL_GPL(bio_add_vmalloc_chunk);
1150 
1151 /**
1152  * bio_add_vmalloc - add a vmalloc region to a bio
1153  * @bio: destination bio
1154  * @vaddr: vmalloc address to add
1155  * @len: total length in bytes of the data to add
1156  *
1157  * Add data starting at @vaddr to @bio.  Return %true on success or %false if
1158  * @bio does not have enough space for the payload.
1159  *
1160  * This helper calls flush_kernel_vmap_range() for the range added.  For reads
1161  * the caller still needs to manually call invalidate_kernel_vmap_range() in
1162  * the completion handler.
1163  */
1164 bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len)
1165 {
1166 	do {
1167 		unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len);
1168 
1169 		if (!added)
1170 			return false;
1171 		vaddr += added;
1172 		len -= added;
1173 	} while (len);
1174 
1175 	return true;
1176 }
1177 EXPORT_SYMBOL_GPL(bio_add_vmalloc);
1178 
1179 void __bio_release_pages(struct bio *bio, bool mark_dirty)
1180 {
1181 	struct folio_iter fi;
1182 
1183 	bio_for_each_folio_all(fi, bio) {
1184 		size_t nr_pages;
1185 
1186 		if (mark_dirty) {
1187 			folio_lock(fi.folio);
1188 			folio_mark_dirty(fi.folio);
1189 			folio_unlock(fi.folio);
1190 		}
1191 		nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
1192 			   fi.offset / PAGE_SIZE + 1;
1193 		unpin_user_folio(fi.folio, nr_pages);
1194 	}
1195 }
1196 EXPORT_SYMBOL_GPL(__bio_release_pages);
1197 
1198 void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
1199 {
1200 	WARN_ON_ONCE(bio->bi_max_vecs);
1201 
1202 	bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1203 	bio->bi_iter.bi_idx = 0;
1204 	bio->bi_iter.bi_bvec_done = iter->iov_offset;
1205 	bio->bi_iter.bi_size = iov_iter_count(iter);
1206 	bio_set_flag(bio, BIO_CLONED);
1207 }
1208 
1209 /*
1210  * Aligns the bio size to the len_align_mask, releasing excessive bio vecs that
1211  * __bio_iov_iter_get_pages may have inserted, and reverts the trimmed length
1212  * for the next iteration.
1213  */
1214 static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
1215 			    unsigned len_align_mask)
1216 {
1217 	size_t nbytes = bio->bi_iter.bi_size & len_align_mask;
1218 
1219 	if (!nbytes)
1220 		return 0;
1221 
1222 	iov_iter_revert(iter, nbytes);
1223 	bio->bi_iter.bi_size -= nbytes;
1224 	do {
1225 		struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
1226 
1227 		if (nbytes < bv->bv_len) {
1228 			bv->bv_len -= nbytes;
1229 			break;
1230 		}
1231 
1232 		if (bio_flagged(bio, BIO_PAGE_PINNED))
1233 			unpin_user_page(bv->bv_page);
1234 
1235 		bio->bi_vcnt--;
1236 		nbytes -= bv->bv_len;
1237 	} while (nbytes);
1238 
1239 	if (!bio->bi_vcnt)
1240 		return -EFAULT;
1241 	return 0;
1242 }
1243 
1244 /**
1245  * bio_iov_iter_get_pages - add user or kernel pages to a bio
1246  * @bio: bio to add pages to
1247  * @iter: iov iterator describing the region to be added
1248  * @len_align_mask: the mask to align the total size to, 0 for any length
1249  *
1250  * This takes either an iterator pointing to user memory, or one pointing to
1251  * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1252  * map them into the kernel. On IO completion, the caller should put those
1253  * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1254  * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1255  * to ensure the bvecs and pages stay referenced until the submitted I/O is
1256  * completed by a call to ->ki_complete() or returns with an error other than
1257  * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1258  * on IO completion. If it isn't, then pages should be released.
1259  *
1260  * The function tries, but does not guarantee, to pin as many pages as
1261  * fit into the bio, or are requested in @iter, whatever is smaller. If
1262  * MM encounters an error pinning the requested pages, it stops. Error
1263  * is returned only if 0 pages could be pinned.
1264  */
1265 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
1266 			   unsigned len_align_mask)
1267 {
1268 	iov_iter_extraction_t flags = 0;
1269 
1270 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1271 		return -EIO;
1272 
1273 	if (iov_iter_is_bvec(iter)) {
1274 		bio_iov_bvec_set(bio, iter);
1275 		iov_iter_advance(iter, bio->bi_iter.bi_size);
1276 		return 0;
1277 	}
1278 
1279 	if (iov_iter_extract_will_pin(iter))
1280 		bio_set_flag(bio, BIO_PAGE_PINNED);
1281 	if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1282 		flags |= ITER_ALLOW_P2PDMA;
1283 
1284 	do {
1285 		ssize_t ret;
1286 
1287 		ret = iov_iter_extract_bvecs(iter, bio->bi_io_vec,
1288 				BIO_MAX_SIZE - bio->bi_iter.bi_size,
1289 				&bio->bi_vcnt, bio->bi_max_vecs, flags);
1290 		if (ret <= 0) {
1291 			if (!bio->bi_vcnt)
1292 				return ret;
1293 			break;
1294 		}
1295 		bio->bi_iter.bi_size += ret;
1296 	} while (iov_iter_count(iter) && !bio_full(bio, 0));
1297 
1298 	if (is_pci_p2pdma_page(bio->bi_io_vec->bv_page))
1299 		bio->bi_opf |= REQ_NOMERGE;
1300 	return bio_iov_iter_align_down(bio, iter, len_align_mask);
1301 }
1302 
1303 static struct folio *folio_alloc_greedy(gfp_t gfp, size_t *size)
1304 {
1305 	struct folio *folio;
1306 
1307 	while (*size > PAGE_SIZE) {
1308 		folio = folio_alloc(gfp | __GFP_NORETRY, get_order(*size));
1309 		if (folio)
1310 			return folio;
1311 		*size = rounddown_pow_of_two(*size - 1);
1312 	}
1313 
1314 	return folio_alloc(gfp, get_order(*size));
1315 }
1316 
1317 static void bio_free_folios(struct bio *bio)
1318 {
1319 	struct bio_vec *bv;
1320 	int i;
1321 
1322 	bio_for_each_bvec_all(bv, bio, i) {
1323 		struct folio *folio = page_folio(bv->bv_page);
1324 
1325 		if (!is_zero_folio(folio))
1326 			folio_put(folio);
1327 	}
1328 }
1329 
1330 static int bio_iov_iter_bounce_write(struct bio *bio, struct iov_iter *iter)
1331 {
1332 	size_t total_len = iov_iter_count(iter);
1333 
1334 	if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1335 		return -EINVAL;
1336 	if (WARN_ON_ONCE(bio->bi_iter.bi_size))
1337 		return -EINVAL;
1338 	if (WARN_ON_ONCE(bio->bi_vcnt >= bio->bi_max_vecs))
1339 		return -EINVAL;
1340 
1341 	do {
1342 		size_t this_len = min(total_len, SZ_1M);
1343 		struct folio *folio;
1344 
1345 		if (this_len > PAGE_SIZE * 2)
1346 			this_len = rounddown_pow_of_two(this_len);
1347 
1348 		if (bio->bi_iter.bi_size > BIO_MAX_SIZE - this_len)
1349 			break;
1350 
1351 		folio = folio_alloc_greedy(GFP_KERNEL, &this_len);
1352 		if (!folio)
1353 			break;
1354 		bio_add_folio_nofail(bio, folio, this_len, 0);
1355 
1356 		if (copy_from_iter(folio_address(folio), this_len, iter) !=
1357 				this_len) {
1358 			bio_free_folios(bio);
1359 			return -EFAULT;
1360 		}
1361 
1362 		total_len -= this_len;
1363 	} while (total_len && bio->bi_vcnt < bio->bi_max_vecs);
1364 
1365 	if (!bio->bi_iter.bi_size)
1366 		return -ENOMEM;
1367 	return 0;
1368 }
1369 
1370 static int bio_iov_iter_bounce_read(struct bio *bio, struct iov_iter *iter)
1371 {
1372 	size_t len = min(iov_iter_count(iter), SZ_1M);
1373 	struct folio *folio;
1374 
1375 	folio = folio_alloc_greedy(GFP_KERNEL, &len);
1376 	if (!folio)
1377 		return -ENOMEM;
1378 
1379 	do {
1380 		ssize_t ret;
1381 
1382 		ret = iov_iter_extract_bvecs(iter, bio->bi_io_vec + 1, len,
1383 				&bio->bi_vcnt, bio->bi_max_vecs - 1, 0);
1384 		if (ret <= 0) {
1385 			if (!bio->bi_vcnt)
1386 				return ret;
1387 			break;
1388 		}
1389 		len -= ret;
1390 		bio->bi_iter.bi_size += ret;
1391 	} while (len && bio->bi_vcnt < bio->bi_max_vecs - 1);
1392 
1393 	/*
1394 	 * Set the folio directly here.  The above loop has already calculated
1395 	 * the correct bi_size, and we use bi_vcnt for the user buffers.  That
1396 	 * is safe as bi_vcnt is only used by the submitter and not the actual
1397 	 * I/O path.
1398 	 */
1399 	bvec_set_folio(&bio->bi_io_vec[0], folio, bio->bi_iter.bi_size, 0);
1400 	if (iov_iter_extract_will_pin(iter))
1401 		bio_set_flag(bio, BIO_PAGE_PINNED);
1402 	return 0;
1403 }
1404 
1405 /**
1406  * bio_iov_iter_bounce - bounce buffer data from an iter into a bio
1407  * @bio:	bio to send
1408  * @iter:	iter to read from / write into
1409  *
1410  * Helper for direct I/O implementations that need to bounce buffer because
1411  * we need to checksum the data or perform other operations that require
1412  * consistency.  Allocates folios to back the bounce buffer, and for writes
1413  * copies the data into it.  Needs to be paired with bio_iov_iter_unbounce()
1414  * called on completion.
1415  */
1416 int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter)
1417 {
1418 	if (op_is_write(bio_op(bio)))
1419 		return bio_iov_iter_bounce_write(bio, iter);
1420 	return bio_iov_iter_bounce_read(bio, iter);
1421 }
1422 
1423 static void bvec_unpin(struct bio_vec *bv, bool mark_dirty)
1424 {
1425 	struct folio *folio = page_folio(bv->bv_page);
1426 	size_t nr_pages = (bv->bv_offset + bv->bv_len - 1) / PAGE_SIZE -
1427 			bv->bv_offset / PAGE_SIZE + 1;
1428 
1429 	if (mark_dirty)
1430 		folio_mark_dirty_lock(folio);
1431 	unpin_user_folio(folio, nr_pages);
1432 }
1433 
1434 static void bio_iov_iter_unbounce_read(struct bio *bio, bool is_error,
1435 		bool mark_dirty)
1436 {
1437 	unsigned int len = bio->bi_io_vec[0].bv_len;
1438 
1439 	if (likely(!is_error)) {
1440 		void *buf = bvec_virt(&bio->bi_io_vec[0]);
1441 		struct iov_iter to;
1442 
1443 		iov_iter_bvec(&to, ITER_DEST, bio->bi_io_vec + 1, bio->bi_vcnt,
1444 				len);
1445 		/* copying to pinned pages should always work */
1446 		WARN_ON_ONCE(copy_to_iter(buf, len, &to) != len);
1447 	} else {
1448 		/* No need to mark folios dirty if never copied to them */
1449 		mark_dirty = false;
1450 	}
1451 
1452 	if (bio_flagged(bio, BIO_PAGE_PINNED)) {
1453 		int i;
1454 
1455 		for (i = 0; i < bio->bi_vcnt; i++)
1456 			bvec_unpin(&bio->bi_io_vec[1 + i], mark_dirty);
1457 	}
1458 
1459 	folio_put(page_folio(bio->bi_io_vec[0].bv_page));
1460 }
1461 
1462 /**
1463  * bio_iov_iter_unbounce - finish a bounce buffer operation
1464  * @bio:	completed bio
1465  * @is_error:	%true if an I/O error occurred and data should not be copied
1466  * @mark_dirty:	If %true, folios will be marked dirty.
1467  *
1468  * Helper for direct I/O implementations that need to bounce buffer because
1469  * we need to checksum the data or perform other operations that require
1470  * consistency.  Called to complete a bio set up by bio_iov_iter_bounce().
1471  * Copies data back for reads, and marks the original folios dirty if
1472  * requested and then frees the bounce buffer.
1473  */
1474 void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty)
1475 {
1476 	if (op_is_write(bio_op(bio)))
1477 		bio_free_folios(bio);
1478 	else
1479 		bio_iov_iter_unbounce_read(bio, is_error, mark_dirty);
1480 }
1481 
1482 static void submit_bio_wait_endio(struct bio *bio)
1483 {
1484 	complete(bio->bi_private);
1485 }
1486 
1487 /**
1488  * submit_bio_wait - submit a bio, and wait until it completes
1489  * @bio: The &struct bio which describes the I/O
1490  *
1491  * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1492  * bio_endio() on failure.
1493  *
1494  * WARNING: Unlike to how submit_bio() is usually used, this function does not
1495  * result in bio reference to be consumed. The caller must drop the reference
1496  * on his own.
1497  */
1498 int submit_bio_wait(struct bio *bio)
1499 {
1500 	DECLARE_COMPLETION_ONSTACK_MAP(done,
1501 			bio->bi_bdev->bd_disk->lockdep_map);
1502 
1503 	bio->bi_private = &done;
1504 	bio->bi_end_io = submit_bio_wait_endio;
1505 	bio->bi_opf |= REQ_SYNC;
1506 	submit_bio(bio);
1507 	blk_wait_io(&done);
1508 
1509 	return blk_status_to_errno(bio->bi_status);
1510 }
1511 EXPORT_SYMBOL(submit_bio_wait);
1512 
1513 /**
1514  * bdev_rw_virt - synchronously read into / write from kernel mapping
1515  * @bdev:	block device to access
1516  * @sector:	sector to access
1517  * @data:	data to read/write
1518  * @len:	length in byte to read/write
1519  * @op:		operation (e.g. REQ_OP_READ/REQ_OP_WRITE)
1520  *
1521  * Performs synchronous I/O to @bdev for @data/@len.  @data must be in
1522  * the kernel direct mapping and not a vmalloc address.
1523  */
1524 int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
1525 		size_t len, enum req_op op)
1526 {
1527 	struct bio_vec bv;
1528 	struct bio bio;
1529 	int error;
1530 
1531 	if (WARN_ON_ONCE(is_vmalloc_addr(data)))
1532 		return -EIO;
1533 
1534 	bio_init(&bio, bdev, &bv, 1, op);
1535 	bio.bi_iter.bi_sector = sector;
1536 	bio_add_virt_nofail(&bio, data, len);
1537 	error = submit_bio_wait(&bio);
1538 	bio_uninit(&bio);
1539 	return error;
1540 }
1541 EXPORT_SYMBOL_GPL(bdev_rw_virt);
1542 
1543 static void bio_wait_end_io(struct bio *bio)
1544 {
1545 	complete(bio->bi_private);
1546 	bio_put(bio);
1547 }
1548 
1549 /*
1550  * bio_await_chain - ends @bio and waits for every chained bio to complete
1551  */
1552 void bio_await_chain(struct bio *bio)
1553 {
1554 	DECLARE_COMPLETION_ONSTACK_MAP(done,
1555 			bio->bi_bdev->bd_disk->lockdep_map);
1556 
1557 	bio->bi_private = &done;
1558 	bio->bi_end_io = bio_wait_end_io;
1559 	bio_endio(bio);
1560 	blk_wait_io(&done);
1561 }
1562 
1563 void __bio_advance(struct bio *bio, unsigned bytes)
1564 {
1565 	if (bio_integrity(bio))
1566 		bio_integrity_advance(bio, bytes);
1567 
1568 	bio_crypt_advance(bio, bytes);
1569 	bio_advance_iter(bio, &bio->bi_iter, bytes);
1570 }
1571 EXPORT_SYMBOL(__bio_advance);
1572 
1573 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1574 			struct bio *src, struct bvec_iter *src_iter)
1575 {
1576 	while (src_iter->bi_size && dst_iter->bi_size) {
1577 		struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1578 		struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1579 		unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1580 		void *src_buf = bvec_kmap_local(&src_bv);
1581 		void *dst_buf = bvec_kmap_local(&dst_bv);
1582 
1583 		memcpy(dst_buf, src_buf, bytes);
1584 
1585 		kunmap_local(dst_buf);
1586 		kunmap_local(src_buf);
1587 
1588 		bio_advance_iter_single(src, src_iter, bytes);
1589 		bio_advance_iter_single(dst, dst_iter, bytes);
1590 	}
1591 }
1592 EXPORT_SYMBOL(bio_copy_data_iter);
1593 
1594 /**
1595  * bio_copy_data - copy contents of data buffers from one bio to another
1596  * @src: source bio
1597  * @dst: destination bio
1598  *
1599  * Stops when it reaches the end of either @src or @dst - that is, copies
1600  * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1601  */
1602 void bio_copy_data(struct bio *dst, struct bio *src)
1603 {
1604 	struct bvec_iter src_iter = src->bi_iter;
1605 	struct bvec_iter dst_iter = dst->bi_iter;
1606 
1607 	bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1608 }
1609 EXPORT_SYMBOL(bio_copy_data);
1610 
1611 void bio_free_pages(struct bio *bio)
1612 {
1613 	struct bio_vec *bvec;
1614 	struct bvec_iter_all iter_all;
1615 
1616 	bio_for_each_segment_all(bvec, bio, iter_all)
1617 		__free_page(bvec->bv_page);
1618 }
1619 EXPORT_SYMBOL(bio_free_pages);
1620 
1621 /*
1622  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1623  * for performing direct-IO in BIOs.
1624  *
1625  * The problem is that we cannot run folio_mark_dirty() from interrupt context
1626  * because the required locks are not interrupt-safe.  So what we can do is to
1627  * mark the pages dirty _before_ performing IO.  And in interrupt context,
1628  * check that the pages are still dirty.   If so, fine.  If not, redirty them
1629  * in process context.
1630  *
1631  * Note that this code is very hard to test under normal circumstances because
1632  * direct-io pins the pages with get_user_pages().  This makes
1633  * is_page_cache_freeable return false, and the VM will not clean the pages.
1634  * But other code (eg, flusher threads) could clean the pages if they are mapped
1635  * pagecache.
1636  *
1637  * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1638  * deferred bio dirtying paths.
1639  */
1640 
1641 /*
1642  * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1643  */
1644 void bio_set_pages_dirty(struct bio *bio)
1645 {
1646 	struct folio_iter fi;
1647 
1648 	bio_for_each_folio_all(fi, bio) {
1649 		folio_lock(fi.folio);
1650 		folio_mark_dirty(fi.folio);
1651 		folio_unlock(fi.folio);
1652 	}
1653 }
1654 EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1655 
1656 /*
1657  * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1658  * If they are, then fine.  If, however, some pages are clean then they must
1659  * have been written out during the direct-IO read.  So we take another ref on
1660  * the BIO and re-dirty the pages in process context.
1661  *
1662  * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1663  * here on.  It will unpin each page and will run one bio_put() against the
1664  * BIO.
1665  */
1666 
1667 static void bio_dirty_fn(struct work_struct *work);
1668 
1669 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1670 static DEFINE_SPINLOCK(bio_dirty_lock);
1671 static struct bio *bio_dirty_list;
1672 
1673 /*
1674  * This runs in process context
1675  */
1676 static void bio_dirty_fn(struct work_struct *work)
1677 {
1678 	struct bio *bio, *next;
1679 
1680 	spin_lock_irq(&bio_dirty_lock);
1681 	next = bio_dirty_list;
1682 	bio_dirty_list = NULL;
1683 	spin_unlock_irq(&bio_dirty_lock);
1684 
1685 	while ((bio = next) != NULL) {
1686 		next = bio->bi_private;
1687 
1688 		bio_release_pages(bio, true);
1689 		bio_put(bio);
1690 	}
1691 }
1692 
1693 void bio_check_pages_dirty(struct bio *bio)
1694 {
1695 	struct folio_iter fi;
1696 	unsigned long flags;
1697 
1698 	bio_for_each_folio_all(fi, bio) {
1699 		if (!folio_test_dirty(fi.folio))
1700 			goto defer;
1701 	}
1702 
1703 	bio_release_pages(bio, false);
1704 	bio_put(bio);
1705 	return;
1706 defer:
1707 	spin_lock_irqsave(&bio_dirty_lock, flags);
1708 	bio->bi_private = bio_dirty_list;
1709 	bio_dirty_list = bio;
1710 	spin_unlock_irqrestore(&bio_dirty_lock, flags);
1711 	schedule_work(&bio_dirty_work);
1712 }
1713 EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1714 
1715 static inline bool bio_remaining_done(struct bio *bio)
1716 {
1717 	/*
1718 	 * If we're not chaining, then ->__bi_remaining is always 1 and
1719 	 * we always end io on the first invocation.
1720 	 */
1721 	if (!bio_flagged(bio, BIO_CHAIN))
1722 		return true;
1723 
1724 	BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1725 
1726 	if (atomic_dec_and_test(&bio->__bi_remaining)) {
1727 		bio_clear_flag(bio, BIO_CHAIN);
1728 		return true;
1729 	}
1730 
1731 	return false;
1732 }
1733 
1734 /**
1735  * bio_endio - end I/O on a bio
1736  * @bio:	bio
1737  *
1738  * Description:
1739  *   bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1740  *   way to end I/O on a bio. No one should call bi_end_io() directly on a
1741  *   bio unless they own it and thus know that it has an end_io function.
1742  *
1743  *   bio_endio() can be called several times on a bio that has been chained
1744  *   using bio_chain().  The ->bi_end_io() function will only be called the
1745  *   last time.
1746  **/
1747 void bio_endio(struct bio *bio)
1748 {
1749 again:
1750 	if (!bio_remaining_done(bio))
1751 		return;
1752 	if (!bio_integrity_endio(bio))
1753 		return;
1754 
1755 	blk_zone_bio_endio(bio);
1756 
1757 	rq_qos_done_bio(bio);
1758 
1759 	if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1760 		trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1761 		bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1762 	}
1763 
1764 	/*
1765 	 * Need to have a real endio function for chained bios, otherwise
1766 	 * various corner cases will break (like stacking block devices that
1767 	 * save/restore bi_end_io) - however, we want to avoid unbounded
1768 	 * recursion and blowing the stack. Tail call optimization would
1769 	 * handle this, but compiling with frame pointers also disables
1770 	 * gcc's sibling call optimization.
1771 	 */
1772 	if (bio->bi_end_io == bio_chain_endio) {
1773 		bio = __bio_chain_endio(bio);
1774 		goto again;
1775 	}
1776 
1777 #ifdef CONFIG_BLK_CGROUP
1778 	/*
1779 	 * Release cgroup info.  We shouldn't have to do this here, but quite
1780 	 * a few callers of bio_init fail to call bio_uninit, so we cover up
1781 	 * for that here at least for now.
1782 	 */
1783 	if (bio->bi_blkg) {
1784 		blkg_put(bio->bi_blkg);
1785 		bio->bi_blkg = NULL;
1786 	}
1787 #endif
1788 
1789 	if (bio->bi_end_io)
1790 		bio->bi_end_io(bio);
1791 }
1792 EXPORT_SYMBOL(bio_endio);
1793 
1794 /**
1795  * bio_split - split a bio
1796  * @bio:	bio to split
1797  * @sectors:	number of sectors to split from the front of @bio
1798  * @gfp:	gfp mask
1799  * @bs:		bio set to allocate from
1800  *
1801  * Allocates and returns a new bio which represents @sectors from the start of
1802  * @bio, and updates @bio to represent the remaining sectors.
1803  *
1804  * Unless this is a discard request the newly allocated bio will point
1805  * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1806  * neither @bio nor @bs are freed before the split bio.
1807  */
1808 struct bio *bio_split(struct bio *bio, int sectors,
1809 		      gfp_t gfp, struct bio_set *bs)
1810 {
1811 	struct bio *split;
1812 
1813 	if (WARN_ON_ONCE(sectors <= 0))
1814 		return ERR_PTR(-EINVAL);
1815 	if (WARN_ON_ONCE(sectors >= bio_sectors(bio)))
1816 		return ERR_PTR(-EINVAL);
1817 
1818 	/* Zone append commands cannot be split */
1819 	if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1820 		return ERR_PTR(-EINVAL);
1821 
1822 	/* atomic writes cannot be split */
1823 	if (bio->bi_opf & REQ_ATOMIC)
1824 		return ERR_PTR(-EINVAL);
1825 
1826 	split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1827 	if (!split)
1828 		return ERR_PTR(-ENOMEM);
1829 
1830 	split->bi_iter.bi_size = sectors << 9;
1831 
1832 	if (bio_integrity(split))
1833 		bio_integrity_trim(split);
1834 
1835 	bio_advance(bio, split->bi_iter.bi_size);
1836 
1837 	if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1838 		bio_set_flag(split, BIO_TRACE_COMPLETION);
1839 
1840 	return split;
1841 }
1842 EXPORT_SYMBOL(bio_split);
1843 
1844 /**
1845  * bio_trim - trim a bio
1846  * @bio:	bio to trim
1847  * @offset:	number of sectors to trim from the front of @bio
1848  * @size:	size we want to trim @bio to, in sectors
1849  *
1850  * This function is typically used for bios that are cloned and submitted
1851  * to the underlying device in parts.
1852  */
1853 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1854 {
1855 	/* We should never trim an atomic write */
1856 	if (WARN_ON_ONCE(bio->bi_opf & REQ_ATOMIC && size))
1857 		return;
1858 
1859 	if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1860 			 offset + size > bio_sectors(bio)))
1861 		return;
1862 
1863 	size <<= 9;
1864 	if (offset == 0 && size == bio->bi_iter.bi_size)
1865 		return;
1866 
1867 	bio_advance(bio, offset << 9);
1868 	bio->bi_iter.bi_size = size;
1869 
1870 	if (bio_integrity(bio))
1871 		bio_integrity_trim(bio);
1872 }
1873 EXPORT_SYMBOL_GPL(bio_trim);
1874 
1875 /*
1876  * create memory pools for biovec's in a bio_set.
1877  * use the global biovec slabs created for general use.
1878  */
1879 int biovec_init_pool(mempool_t *pool, int pool_entries)
1880 {
1881 	struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1882 
1883 	return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1884 }
1885 
1886 /*
1887  * bioset_exit - exit a bioset initialized with bioset_init()
1888  *
1889  * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1890  * kzalloc()).
1891  */
1892 void bioset_exit(struct bio_set *bs)
1893 {
1894 	bio_alloc_cache_destroy(bs);
1895 	if (bs->rescue_workqueue)
1896 		destroy_workqueue(bs->rescue_workqueue);
1897 	bs->rescue_workqueue = NULL;
1898 
1899 	mempool_exit(&bs->bio_pool);
1900 	mempool_exit(&bs->bvec_pool);
1901 
1902 	if (bs->bio_slab)
1903 		bio_put_slab(bs);
1904 	bs->bio_slab = NULL;
1905 }
1906 EXPORT_SYMBOL(bioset_exit);
1907 
1908 /**
1909  * bioset_init - Initialize a bio_set
1910  * @bs:		pool to initialize
1911  * @pool_size:	Number of bio and bio_vecs to cache in the mempool
1912  * @front_pad:	Number of bytes to allocate in front of the returned bio
1913  * @flags:	Flags to modify behavior, currently %BIOSET_NEED_BVECS
1914  *              and %BIOSET_NEED_RESCUER
1915  *
1916  * Description:
1917  *    Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1918  *    to ask for a number of bytes to be allocated in front of the bio.
1919  *    Front pad allocation is useful for embedding the bio inside
1920  *    another structure, to avoid allocating extra data to go with the bio.
1921  *    Note that the bio must be embedded at the END of that structure always,
1922  *    or things will break badly.
1923  *    If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1924  *    for allocating iovecs.  This pool is not needed e.g. for bio_init_clone().
1925  *    If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
1926  *    to dispatch queued requests when the mempool runs out of space.
1927  *
1928  */
1929 int bioset_init(struct bio_set *bs,
1930 		unsigned int pool_size,
1931 		unsigned int front_pad,
1932 		int flags)
1933 {
1934 	bs->front_pad = front_pad;
1935 	if (flags & BIOSET_NEED_BVECS)
1936 		bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1937 	else
1938 		bs->back_pad = 0;
1939 
1940 	spin_lock_init(&bs->rescue_lock);
1941 	bio_list_init(&bs->rescue_list);
1942 	INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1943 
1944 	bs->bio_slab = bio_find_or_create_slab(bs);
1945 	if (!bs->bio_slab)
1946 		return -ENOMEM;
1947 
1948 	if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1949 		goto bad;
1950 
1951 	if ((flags & BIOSET_NEED_BVECS) &&
1952 	    biovec_init_pool(&bs->bvec_pool, pool_size))
1953 		goto bad;
1954 
1955 	if (flags & BIOSET_NEED_RESCUER) {
1956 		bs->rescue_workqueue = alloc_workqueue("bioset",
1957 							WQ_MEM_RECLAIM, 0);
1958 		if (!bs->rescue_workqueue)
1959 			goto bad;
1960 	}
1961 	if (flags & BIOSET_PERCPU_CACHE) {
1962 		bs->cache = alloc_percpu(struct bio_alloc_cache);
1963 		if (!bs->cache)
1964 			goto bad;
1965 		cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1966 	}
1967 
1968 	return 0;
1969 bad:
1970 	bioset_exit(bs);
1971 	return -ENOMEM;
1972 }
1973 EXPORT_SYMBOL(bioset_init);
1974 
1975 static int __init init_bio(void)
1976 {
1977 	int i;
1978 
1979 	BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
1980 
1981 	for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1982 		struct biovec_slab *bvs = bvec_slabs + i;
1983 
1984 		bvs->slab = kmem_cache_create(bvs->name,
1985 				bvs->nr_vecs * sizeof(struct bio_vec), 0,
1986 				SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1987 	}
1988 
1989 	cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1990 					bio_cpu_dead);
1991 
1992 	if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0,
1993 			BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
1994 		panic("bio: can't allocate bios\n");
1995 
1996 	return 0;
1997 }
1998 subsys_initcall(init_bio);
1999