1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 */
5 #include <linux/mm.h>
6 #include <linux/swap.h>
7 #include <linux/bio-integrity.h>
8 #include <linux/blkdev.h>
9 #include <linux/uio.h>
10 #include <linux/iocontext.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/mempool.h>
16 #include <linux/workqueue.h>
17 #include <linux/cgroup.h>
18 #include <linux/highmem.h>
19 #include <linux/blk-crypto.h>
20 #include <linux/xarray.h>
21 #include <linux/kmemleak.h>
22
23 #include <trace/events/block.h>
24 #include "blk.h"
25 #include "blk-rq-qos.h"
26 #include "blk-cgroup.h"
27
28 #define ALLOC_CACHE_THRESHOLD 16
29 #define ALLOC_CACHE_MAX 256
30
31 struct bio_alloc_cache {
32 struct bio *free_list;
33 struct bio *free_list_irq;
34 unsigned int nr;
35 unsigned int nr_irq;
36 };
37
38 #define BIO_INLINE_VECS 4
39
40 static struct biovec_slab {
41 int nr_vecs;
42 char *name;
43 struct kmem_cache *slab;
44 } bvec_slabs[] __read_mostly = {
45 { .nr_vecs = 16, .name = "biovec-16" },
46 { .nr_vecs = 64, .name = "biovec-64" },
47 { .nr_vecs = 128, .name = "biovec-128" },
48 { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" },
49 };
50
biovec_slab(unsigned short nr_vecs)51 static struct biovec_slab *biovec_slab(unsigned short nr_vecs)
52 {
53 switch (nr_vecs) {
54 /* smaller bios use inline vecs */
55 case 5 ... 16:
56 return &bvec_slabs[0];
57 case 17 ... 64:
58 return &bvec_slabs[1];
59 case 65 ... 128:
60 return &bvec_slabs[2];
61 case 129 ... BIO_MAX_VECS:
62 return &bvec_slabs[3];
63 default:
64 BUG();
65 return NULL;
66 }
67 }
68
69 /*
70 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
71 * IO code that does not need private memory pools.
72 */
73 struct bio_set fs_bio_set;
74 EXPORT_SYMBOL(fs_bio_set);
75
76 /*
77 * Our slab pool management
78 */
79 struct bio_slab {
80 struct kmem_cache *slab;
81 unsigned int slab_ref;
82 unsigned int slab_size;
83 char name[12];
84 };
85 static DEFINE_MUTEX(bio_slab_lock);
86 static DEFINE_XARRAY(bio_slabs);
87
create_bio_slab(unsigned int size)88 static struct bio_slab *create_bio_slab(unsigned int size)
89 {
90 struct bio_slab *bslab = kzalloc_obj(*bslab);
91
92 if (!bslab)
93 return NULL;
94
95 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size);
96 bslab->slab = kmem_cache_create(bslab->name, size,
97 ARCH_KMALLOC_MINALIGN,
98 SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL);
99 if (!bslab->slab)
100 goto fail_alloc_slab;
101
102 bslab->slab_ref = 1;
103 bslab->slab_size = size;
104
105 if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL)))
106 return bslab;
107
108 kmem_cache_destroy(bslab->slab);
109
110 fail_alloc_slab:
111 kfree(bslab);
112 return NULL;
113 }
114
bs_bio_slab_size(struct bio_set * bs)115 static inline unsigned int bs_bio_slab_size(struct bio_set *bs)
116 {
117 return bs->front_pad + sizeof(struct bio) + bs->back_pad;
118 }
119
bio_slab_addr(struct bio * bio)120 static inline void *bio_slab_addr(struct bio *bio)
121 {
122 return (void *)bio - bio->bi_pool->front_pad;
123 }
124
bio_find_or_create_slab(struct bio_set * bs)125 static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs)
126 {
127 unsigned int size = bs_bio_slab_size(bs);
128 struct bio_slab *bslab;
129
130 mutex_lock(&bio_slab_lock);
131 bslab = xa_load(&bio_slabs, size);
132 if (bslab)
133 bslab->slab_ref++;
134 else
135 bslab = create_bio_slab(size);
136 mutex_unlock(&bio_slab_lock);
137
138 if (bslab)
139 return bslab->slab;
140 return NULL;
141 }
142
bio_put_slab(struct bio_set * bs)143 static void bio_put_slab(struct bio_set *bs)
144 {
145 struct bio_slab *bslab = NULL;
146 unsigned int slab_size = bs_bio_slab_size(bs);
147
148 mutex_lock(&bio_slab_lock);
149
150 bslab = xa_load(&bio_slabs, slab_size);
151 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
152 goto out;
153
154 WARN_ON_ONCE(bslab->slab != bs->bio_slab);
155
156 WARN_ON(!bslab->slab_ref);
157
158 if (--bslab->slab_ref)
159 goto out;
160
161 xa_erase(&bio_slabs, slab_size);
162
163 kmem_cache_destroy(bslab->slab);
164 kfree(bslab);
165
166 out:
167 mutex_unlock(&bio_slab_lock);
168 }
169
170 /*
171 * Make the first allocation restricted and don't dump info on allocation
172 * failures, since we'll fall back to the mempool in case of failure.
173 */
try_alloc_gfp(gfp_t gfp)174 static inline gfp_t try_alloc_gfp(gfp_t gfp)
175 {
176 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) |
177 __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
178 }
179
bio_uninit(struct bio * bio)180 void bio_uninit(struct bio *bio)
181 {
182 #ifdef CONFIG_BLK_CGROUP
183 if (bio->bi_blkg) {
184 blkg_put(bio->bi_blkg);
185 bio->bi_blkg = NULL;
186 }
187 #endif
188 if (bio_integrity(bio))
189 bio_integrity_free(bio);
190
191 bio_crypt_free_ctx(bio);
192 }
193 EXPORT_SYMBOL(bio_uninit);
194
bio_free(struct bio * bio)195 static void bio_free(struct bio *bio)
196 {
197 struct bio_set *bs = bio->bi_pool;
198 void *p = bio;
199
200 WARN_ON_ONCE(!bs);
201 WARN_ON_ONCE(bio->bi_max_vecs > BIO_MAX_VECS);
202
203 bio_uninit(bio);
204 if (bio->bi_max_vecs == BIO_MAX_VECS)
205 mempool_free(bio->bi_io_vec, &bs->bvec_pool);
206 else if (bio->bi_max_vecs > BIO_INLINE_VECS)
207 kmem_cache_free(biovec_slab(bio->bi_max_vecs)->slab,
208 bio->bi_io_vec);
209 mempool_free(p - bs->front_pad, &bs->bio_pool);
210 }
211
212 /*
213 * Users of this function have their own bio allocation. Subsequently,
214 * they must remember to pair any call to bio_init() with bio_uninit()
215 * when IO has completed, or when the bio is released.
216 */
bio_init(struct bio * bio,struct block_device * bdev,struct bio_vec * table,unsigned short max_vecs,blk_opf_t opf)217 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
218 unsigned short max_vecs, blk_opf_t opf)
219 {
220 bio->bi_next = NULL;
221 bio->bi_bdev = bdev;
222 bio->bi_opf = opf;
223 bio->bi_flags = 0;
224 bio->bi_ioprio = 0;
225 bio->bi_write_hint = 0;
226 bio->bi_write_stream = 0;
227 bio->bi_status = 0;
228 bio->bi_bvec_gap_bit = 0;
229 bio->bi_iter.bi_sector = 0;
230 bio->bi_iter.bi_size = 0;
231 bio->bi_iter.bi_idx = 0;
232 bio->bi_iter.bi_bvec_done = 0;
233 bio->bi_end_io = NULL;
234 bio->bi_private = NULL;
235 #ifdef CONFIG_BLK_CGROUP
236 bio->bi_blkg = NULL;
237 bio->issue_time_ns = 0;
238 if (bdev)
239 bio_associate_blkg(bio);
240 #ifdef CONFIG_BLK_CGROUP_IOCOST
241 bio->bi_iocost_cost = 0;
242 #endif
243 #endif
244 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
245 bio->bi_crypt_context = NULL;
246 #endif
247 #ifdef CONFIG_BLK_DEV_INTEGRITY
248 bio->bi_integrity = NULL;
249 #endif
250 bio->bi_vcnt = 0;
251
252 atomic_set(&bio->__bi_remaining, 1);
253 atomic_set(&bio->__bi_cnt, 1);
254 bio->bi_cookie = BLK_QC_T_NONE;
255
256 bio->bi_max_vecs = max_vecs;
257 bio->bi_io_vec = table;
258 bio->bi_pool = NULL;
259 }
260 EXPORT_SYMBOL(bio_init);
261
262 /**
263 * bio_reset - reinitialize a bio
264 * @bio: bio to reset
265 * @bdev: block device to use the bio for
266 * @opf: operation and flags for bio
267 *
268 * Description:
269 * After calling bio_reset(), @bio will be in the same state as a freshly
270 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
271 * preserved are the ones that are initialized by bio_alloc_bioset(). See
272 * comment in struct bio.
273 */
bio_reset(struct bio * bio,struct block_device * bdev,blk_opf_t opf)274 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf)
275 {
276 struct bio_vec *bv = bio->bi_io_vec;
277
278 bio_uninit(bio);
279 memset(bio, 0, BIO_RESET_BYTES);
280 atomic_set(&bio->__bi_remaining, 1);
281 bio->bi_io_vec = bv;
282 bio->bi_bdev = bdev;
283 if (bio->bi_bdev)
284 bio_associate_blkg(bio);
285 bio->bi_opf = opf;
286 }
287 EXPORT_SYMBOL(bio_reset);
288
289 /**
290 * bio_reuse - reuse a bio with the payload left intact
291 * @bio: bio to reuse
292 * @opf: operation and flags for the next I/O
293 *
294 * Allow reusing an existing bio for another operation with all set up
295 * fields including the payload, device and end_io handler left intact.
296 *
297 * Typically used when @bio is first used to read data which is then written
298 * to another location without modification. @bio must not be in-flight and
299 * owned by the caller. Can't be used for cloned bios.
300 *
301 * Note: Can't be used when @bio has integrity or blk-crypto contexts for now.
302 * Feel free to add that support when you need it, though.
303 */
bio_reuse(struct bio * bio,blk_opf_t opf)304 void bio_reuse(struct bio *bio, blk_opf_t opf)
305 {
306 unsigned short vcnt = bio->bi_vcnt, i;
307 bio_end_io_t *end_io = bio->bi_end_io;
308 void *private = bio->bi_private;
309
310 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
311 WARN_ON_ONCE(bio_integrity(bio));
312 WARN_ON_ONCE(bio_has_crypt_ctx(bio));
313
314 bio_reset(bio, bio->bi_bdev, opf);
315 for (i = 0; i < vcnt; i++)
316 bio->bi_iter.bi_size += bio->bi_io_vec[i].bv_len;
317 bio->bi_vcnt = vcnt;
318 bio->bi_private = private;
319 bio->bi_end_io = end_io;
320 }
321 EXPORT_SYMBOL_GPL(bio_reuse);
322
__bio_chain_endio(struct bio * bio)323 static struct bio *__bio_chain_endio(struct bio *bio)
324 {
325 struct bio *parent = bio->bi_private;
326
327 if (bio->bi_status && !parent->bi_status)
328 parent->bi_status = bio->bi_status;
329 bio_put(bio);
330 return parent;
331 }
332
333 /*
334 * This function should only be used as a flag and must never be called.
335 * If execution reaches here, it indicates a serious programming error.
336 */
bio_chain_endio(struct bio * bio)337 static void bio_chain_endio(struct bio *bio)
338 {
339 BUG();
340 }
341
342 /**
343 * bio_chain - chain bio completions
344 * @bio: the target bio
345 * @parent: the parent bio of @bio
346 *
347 * The caller won't have a bi_end_io called when @bio completes - instead,
348 * @parent's bi_end_io won't be called until both @parent and @bio have
349 * completed; the chained bio will also be freed when it completes.
350 *
351 * The caller must not set bi_private or bi_end_io in @bio.
352 */
bio_chain(struct bio * bio,struct bio * parent)353 void bio_chain(struct bio *bio, struct bio *parent)
354 {
355 BUG_ON(bio->bi_private || bio->bi_end_io);
356
357 bio->bi_private = parent;
358 bio->bi_end_io = bio_chain_endio;
359 bio_inc_remaining(parent);
360 }
361 EXPORT_SYMBOL(bio_chain);
362
363 /**
364 * bio_chain_and_submit - submit a bio after chaining it to another one
365 * @prev: bio to chain and submit
366 * @new: bio to chain to
367 *
368 * If @prev is non-NULL, chain it to @new and submit it.
369 *
370 * Return: @new.
371 */
bio_chain_and_submit(struct bio * prev,struct bio * new)372 struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new)
373 {
374 if (prev) {
375 bio_chain(prev, new);
376 submit_bio(prev);
377 }
378 return new;
379 }
380
blk_next_bio(struct bio * bio,struct block_device * bdev,unsigned int nr_pages,blk_opf_t opf,gfp_t gfp)381 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
382 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp)
383 {
384 return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp));
385 }
386 EXPORT_SYMBOL_GPL(blk_next_bio);
387
bio_alloc_rescue(struct work_struct * work)388 static void bio_alloc_rescue(struct work_struct *work)
389 {
390 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
391 struct bio *bio;
392
393 while (1) {
394 spin_lock(&bs->rescue_lock);
395 bio = bio_list_pop(&bs->rescue_list);
396 spin_unlock(&bs->rescue_lock);
397
398 if (!bio)
399 break;
400
401 submit_bio_noacct(bio);
402 }
403 }
404
405 /*
406 * submit_bio_noacct() converts recursion to iteration; this means if we're
407 * running beneath it, any bios we allocate and submit will not be submitted
408 * (and thus freed) until after we return.
409 *
410 * This exposes us to a potential deadlock if we allocate multiple bios from the
411 * same bio_set while running underneath submit_bio_noacct(). If we were to
412 * allocate multiple bios (say a stacking block driver that was splitting bios),
413 * we would deadlock if we exhausted the mempool's reserve.
414 *
415 * We solve this, and guarantee forward progress by punting the bios on
416 * current->bio_list to a per bio_set rescuer workqueue before blocking to wait
417 * for elements being returned to the mempool.
418 */
punt_bios_to_rescuer(struct bio_set * bs)419 static void punt_bios_to_rescuer(struct bio_set *bs)
420 {
421 struct bio_list punt, nopunt;
422 struct bio *bio;
423
424 if (!current->bio_list || !bs->rescue_workqueue)
425 return;
426 if (bio_list_empty(¤t->bio_list[0]) &&
427 bio_list_empty(¤t->bio_list[1]))
428 return;
429
430 /*
431 * In order to guarantee forward progress we must punt only bios that
432 * were allocated from this bio_set; otherwise, if there was a bio on
433 * there for a stacking driver higher up in the stack, processing it
434 * could require allocating bios from this bio_set, and doing that from
435 * our own rescuer would be bad.
436 *
437 * Since bio lists are singly linked, pop them all instead of trying to
438 * remove from the middle of the list:
439 */
440
441 bio_list_init(&punt);
442 bio_list_init(&nopunt);
443
444 while ((bio = bio_list_pop(¤t->bio_list[0])))
445 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
446 current->bio_list[0] = nopunt;
447
448 bio_list_init(&nopunt);
449 while ((bio = bio_list_pop(¤t->bio_list[1])))
450 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
451 current->bio_list[1] = nopunt;
452
453 spin_lock(&bs->rescue_lock);
454 bio_list_merge(&bs->rescue_list, &punt);
455 spin_unlock(&bs->rescue_lock);
456
457 queue_work(bs->rescue_workqueue, &bs->rescue_work);
458 }
459
bio_alloc_irq_cache_splice(struct bio_alloc_cache * cache)460 static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache)
461 {
462 unsigned long flags;
463
464 /* cache->free_list must be empty */
465 if (WARN_ON_ONCE(cache->free_list))
466 return;
467
468 local_irq_save(flags);
469 cache->free_list = cache->free_list_irq;
470 cache->free_list_irq = NULL;
471 cache->nr += cache->nr_irq;
472 cache->nr_irq = 0;
473 local_irq_restore(flags);
474 }
475
bio_alloc_percpu_cache(struct bio_set * bs)476 static struct bio *bio_alloc_percpu_cache(struct bio_set *bs)
477 {
478 struct bio_alloc_cache *cache;
479 struct bio *bio;
480
481 cache = per_cpu_ptr(bs->cache, get_cpu());
482 if (!cache->free_list) {
483 if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD)
484 bio_alloc_irq_cache_splice(cache);
485 if (!cache->free_list) {
486 put_cpu();
487 return NULL;
488 }
489 }
490 bio = cache->free_list;
491 cache->free_list = bio->bi_next;
492 cache->nr--;
493 put_cpu();
494 bio->bi_pool = bs;
495
496 kmemleak_alloc(bio_slab_addr(bio),
497 kmem_cache_size(bs->bio_slab), 1, GFP_NOIO);
498 return bio;
499 }
500
501 /**
502 * bio_alloc_bioset - allocate a bio for I/O
503 * @bdev: block device to allocate the bio for (can be %NULL)
504 * @nr_vecs: number of bvecs to pre-allocate
505 * @opf: operation and flags for bio
506 * @gfp: the GFP_* mask given to the slab allocator
507 * @bs: the bio_set to allocate from.
508 *
509 * Allocate a bio from the mempools in @bs.
510 *
511 * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to
512 * allocate a bio. This is due to the mempool guarantees. To make this work,
513 * callers must never allocate more than 1 bio at a time from the general pool.
514 * Callers that need to allocate more than 1 bio must always submit the
515 * previously allocated bio for IO before attempting to allocate a new one.
516 * Failure to do so can cause deadlocks under memory pressure.
517 *
518 * Note that when running under submit_bio_noacct() (i.e. any block driver),
519 * bios are not submitted until after you return - see the code in
520 * submit_bio_noacct() that converts recursion into iteration, to prevent
521 * stack overflows.
522 *
523 * This would normally mean allocating multiple bios under submit_bio_noacct()
524 * would be susceptible to deadlocks, but we have
525 * deadlock avoidance code that resubmits any blocked bios from a rescuer
526 * thread.
527 *
528 * However, we do not guarantee forward progress for allocations from other
529 * mempools. Doing multiple allocations from the same mempool under
530 * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad
531 * for per bio allocations.
532 *
533 * Returns: Pointer to new bio on success, NULL on failure.
534 */
bio_alloc_bioset(struct block_device * bdev,unsigned short nr_vecs,blk_opf_t opf,gfp_t gfp,struct bio_set * bs)535 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
536 blk_opf_t opf, gfp_t gfp, struct bio_set *bs)
537 {
538 struct bio_vec *bvecs = NULL;
539 struct bio *bio = NULL;
540 gfp_t saved_gfp = gfp;
541 void *p;
542
543 /* should not use nobvec bioset for nr_vecs > 0 */
544 if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0))
545 return NULL;
546
547 if (saved_gfp & __GFP_DIRECT_RECLAIM)
548 gfp = try_alloc_gfp(gfp);
549 if (bs->cache && nr_vecs <= BIO_INLINE_VECS) {
550 /*
551 * Set REQ_ALLOC_CACHE even if no cached bio is available to
552 * return the allocated bio to the percpu cache when done.
553 */
554 opf |= REQ_ALLOC_CACHE;
555 bio = bio_alloc_percpu_cache(bs);
556 } else {
557 opf &= ~REQ_ALLOC_CACHE;
558 p = kmem_cache_alloc(bs->bio_slab, gfp);
559 if (p)
560 bio = p + bs->front_pad;
561 }
562
563 if (bio && nr_vecs > BIO_INLINE_VECS) {
564 struct biovec_slab *bvs = biovec_slab(nr_vecs);
565
566 /*
567 * Upgrade nr_vecs to take full advantage of the allocation.
568 * We also rely on this in bio_free().
569 */
570 nr_vecs = bvs->nr_vecs;
571 bvecs = kmem_cache_alloc(bvs->slab, gfp);
572 if (unlikely(!bvecs)) {
573 kmem_cache_free(bs->bio_slab, p);
574 bio = NULL;
575 }
576 }
577
578 if (unlikely(!bio)) {
579 /*
580 * Give up if we are not allow to sleep as non-blocking mempool
581 * allocations just go back to the slab allocation.
582 */
583 if (!(saved_gfp & __GFP_DIRECT_RECLAIM))
584 return NULL;
585
586 punt_bios_to_rescuer(bs);
587
588 /*
589 * Don't rob the mempools by returning to the per-CPU cache if
590 * we're tight on memory.
591 */
592 opf &= ~REQ_ALLOC_CACHE;
593
594 p = mempool_alloc(&bs->bio_pool, saved_gfp);
595 bio = p + bs->front_pad;
596 if (nr_vecs > BIO_INLINE_VECS) {
597 nr_vecs = BIO_MAX_VECS;
598 bvecs = mempool_alloc(&bs->bvec_pool, saved_gfp);
599 }
600 }
601
602 if (nr_vecs && nr_vecs <= BIO_INLINE_VECS)
603 bio_init_inline(bio, bdev, nr_vecs, opf);
604 else
605 bio_init(bio, bdev, bvecs, nr_vecs, opf);
606 bio->bi_pool = bs;
607 return bio;
608 }
609 EXPORT_SYMBOL(bio_alloc_bioset);
610
611 /**
612 * bio_kmalloc - kmalloc a bio
613 * @nr_vecs: number of bio_vecs to allocate
614 * @gfp_mask: the GFP_* mask given to the slab allocator
615 *
616 * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized
617 * using bio_init() before use. To free a bio returned from this function use
618 * kfree() after calling bio_uninit(). A bio returned from this function can
619 * be reused by calling bio_uninit() before calling bio_init() again.
620 *
621 * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
622 * function are not backed by a mempool can fail. Do not use this function
623 * for allocations in the file system I/O path.
624 *
625 * Returns: Pointer to new bio on success, NULL on failure.
626 */
bio_kmalloc(unsigned short nr_vecs,gfp_t gfp_mask)627 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask)
628 {
629 struct bio *bio;
630
631 if (nr_vecs > BIO_MAX_INLINE_VECS)
632 return NULL;
633 return kmalloc(sizeof(*bio) + nr_vecs * sizeof(struct bio_vec),
634 gfp_mask);
635 }
636 EXPORT_SYMBOL(bio_kmalloc);
637
zero_fill_bio_iter(struct bio * bio,struct bvec_iter start)638 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
639 {
640 struct bio_vec bv;
641 struct bvec_iter iter;
642
643 __bio_for_each_segment(bv, bio, iter, start)
644 memzero_bvec(&bv);
645 }
646 EXPORT_SYMBOL(zero_fill_bio_iter);
647
648 /**
649 * bio_truncate - truncate the bio to small size of @new_size
650 * @bio: the bio to be truncated
651 * @new_size: new size for truncating the bio
652 *
653 * Description:
654 * Truncate the bio to new size of @new_size. If bio_op(bio) is
655 * REQ_OP_READ, zero the truncated part. This function should only
656 * be used for handling corner cases, such as bio eod.
657 */
bio_truncate(struct bio * bio,unsigned new_size)658 static void bio_truncate(struct bio *bio, unsigned new_size)
659 {
660 struct bio_vec bv;
661 struct bvec_iter iter;
662 unsigned int done = 0;
663 bool truncated = false;
664
665 if (new_size >= bio->bi_iter.bi_size)
666 return;
667
668 if (bio_op(bio) != REQ_OP_READ)
669 goto exit;
670
671 bio_for_each_segment(bv, bio, iter) {
672 if (done + bv.bv_len > new_size) {
673 size_t offset;
674
675 if (!truncated)
676 offset = new_size - done;
677 else
678 offset = 0;
679 memzero_page(bv.bv_page, bv.bv_offset + offset,
680 bv.bv_len - offset);
681 truncated = true;
682 }
683 done += bv.bv_len;
684 }
685
686 exit:
687 /*
688 * Don't touch bvec table here and make it really immutable, since
689 * fs bio user has to retrieve all pages via bio_for_each_segment_all
690 * in its .end_bio() callback.
691 *
692 * It is enough to truncate bio by updating .bi_size since we can make
693 * correct bvec with the updated .bi_size for drivers.
694 */
695 bio->bi_iter.bi_size = new_size;
696 }
697
698 /**
699 * guard_bio_eod - truncate a BIO to fit the block device
700 * @bio: bio to truncate
701 *
702 * This allows us to do IO even on the odd last sectors of a device, even if the
703 * block size is some multiple of the physical sector size.
704 *
705 * We'll just truncate the bio to the size of the device, and clear the end of
706 * the buffer head manually. Truly out-of-range accesses will turn into actual
707 * I/O errors, this only handles the "we need to be able to do I/O at the final
708 * sector" case.
709 */
guard_bio_eod(struct bio * bio)710 void guard_bio_eod(struct bio *bio)
711 {
712 sector_t maxsector = bdev_nr_sectors(bio->bi_bdev);
713
714 if (!maxsector)
715 return;
716
717 /*
718 * If the *whole* IO is past the end of the device,
719 * let it through, and the IO layer will turn it into
720 * an EIO.
721 */
722 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
723 return;
724
725 maxsector -= bio->bi_iter.bi_sector;
726 if (likely((bio->bi_iter.bi_size >> 9) <= maxsector))
727 return;
728
729 bio_truncate(bio, maxsector << 9);
730 }
731
__bio_alloc_cache_prune(struct bio_alloc_cache * cache,unsigned int nr)732 static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache,
733 unsigned int nr)
734 {
735 unsigned int i = 0;
736 struct bio *bio;
737
738 while ((bio = cache->free_list) != NULL) {
739 cache->free_list = bio->bi_next;
740 cache->nr--;
741 kmemleak_alloc(bio_slab_addr(bio),
742 kmem_cache_size(bio->bi_pool->bio_slab),
743 1, GFP_KERNEL);
744 bio_free(bio);
745 if (++i == nr)
746 break;
747 }
748 return i;
749 }
750
bio_alloc_cache_prune(struct bio_alloc_cache * cache,unsigned int nr)751 static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
752 unsigned int nr)
753 {
754 nr -= __bio_alloc_cache_prune(cache, nr);
755 if (!READ_ONCE(cache->free_list)) {
756 bio_alloc_irq_cache_splice(cache);
757 __bio_alloc_cache_prune(cache, nr);
758 }
759 }
760
bio_cpu_dead(unsigned int cpu,struct hlist_node * node)761 static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node)
762 {
763 struct bio_set *bs;
764
765 bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead);
766 if (bs->cache) {
767 struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu);
768
769 bio_alloc_cache_prune(cache, -1U);
770 }
771 return 0;
772 }
773
bio_alloc_cache_destroy(struct bio_set * bs)774 static void bio_alloc_cache_destroy(struct bio_set *bs)
775 {
776 int cpu;
777
778 if (!bs->cache)
779 return;
780
781 cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
782 for_each_possible_cpu(cpu) {
783 struct bio_alloc_cache *cache;
784
785 cache = per_cpu_ptr(bs->cache, cpu);
786 bio_alloc_cache_prune(cache, -1U);
787 }
788 free_percpu(bs->cache);
789 bs->cache = NULL;
790 }
791
bio_put_percpu_cache(struct bio * bio)792 static inline void bio_put_percpu_cache(struct bio *bio)
793 {
794 struct bio_alloc_cache *cache;
795
796 cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
797 if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX)
798 goto out_free;
799
800 if (in_task()) {
801 bio_uninit(bio);
802 bio->bi_next = cache->free_list;
803 /* Not necessary but helps not to iopoll already freed bios */
804 bio->bi_bdev = NULL;
805 cache->free_list = bio;
806 cache->nr++;
807 kmemleak_free(bio_slab_addr(bio));
808 } else if (in_hardirq()) {
809 lockdep_assert_irqs_disabled();
810
811 bio_uninit(bio);
812 bio->bi_next = cache->free_list_irq;
813 cache->free_list_irq = bio;
814 cache->nr_irq++;
815 kmemleak_free(bio_slab_addr(bio));
816 } else {
817 goto out_free;
818 }
819 put_cpu();
820 return;
821 out_free:
822 put_cpu();
823 bio_free(bio);
824 }
825
826 /**
827 * bio_put - release a reference to a bio
828 * @bio: bio to release reference to
829 *
830 * Description:
831 * Put a reference to a &struct bio, either one you have gotten with
832 * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it.
833 **/
bio_put(struct bio * bio)834 void bio_put(struct bio *bio)
835 {
836 if (unlikely(bio_flagged(bio, BIO_REFFED))) {
837 BUG_ON(!atomic_read(&bio->__bi_cnt));
838 if (!atomic_dec_and_test(&bio->__bi_cnt))
839 return;
840 }
841 if (bio->bi_opf & REQ_ALLOC_CACHE)
842 bio_put_percpu_cache(bio);
843 else
844 bio_free(bio);
845 }
846 EXPORT_SYMBOL(bio_put);
847
__bio_clone(struct bio * bio,struct bio * bio_src,gfp_t gfp)848 static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
849 {
850 bio_set_flag(bio, BIO_CLONED);
851 bio->bi_ioprio = bio_src->bi_ioprio;
852 bio->bi_write_hint = bio_src->bi_write_hint;
853 bio->bi_write_stream = bio_src->bi_write_stream;
854 bio->bi_iter = bio_src->bi_iter;
855
856 if (bio->bi_bdev) {
857 if (bio->bi_bdev == bio_src->bi_bdev &&
858 bio_flagged(bio_src, BIO_REMAPPED))
859 bio_set_flag(bio, BIO_REMAPPED);
860 bio_clone_blkg_association(bio, bio_src);
861 }
862
863 if (bio_crypt_clone(bio, bio_src, gfp) < 0)
864 return -ENOMEM;
865 if (bio_integrity(bio_src) &&
866 bio_integrity_clone(bio, bio_src, gfp) < 0)
867 return -ENOMEM;
868 return 0;
869 }
870
871 /**
872 * bio_alloc_clone - clone a bio that shares the original bio's biovec
873 * @bdev: block_device to clone onto
874 * @bio_src: bio to clone from
875 * @gfp: allocation priority
876 * @bs: bio_set to allocate from
877 *
878 * Allocate a new bio that is a clone of @bio_src. This reuses the bio_vecs
879 * pointed to by @bio_src->bi_io_vec, and clones the iterator pointing to
880 * the current position in it. The caller owns the returned bio, but not
881 * the bio_vecs, and must ensure the bio is freed before the memory
882 * pointed to by @bio_Src->bi_io_vecs.
883 */
bio_alloc_clone(struct block_device * bdev,struct bio * bio_src,gfp_t gfp,struct bio_set * bs)884 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
885 gfp_t gfp, struct bio_set *bs)
886 {
887 struct bio *bio;
888
889 bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
890 if (!bio)
891 return NULL;
892
893 if (__bio_clone(bio, bio_src, gfp) < 0) {
894 bio_put(bio);
895 return NULL;
896 }
897 bio->bi_io_vec = bio_src->bi_io_vec;
898
899 return bio;
900 }
901 EXPORT_SYMBOL(bio_alloc_clone);
902
903 /**
904 * bio_init_clone - clone a bio that shares the original bio's biovec
905 * @bdev: block_device to clone onto
906 * @bio: bio to clone into
907 * @bio_src: bio to clone from
908 * @gfp: allocation priority
909 *
910 * Initialize a new bio in caller provided memory that is a clone of @bio_src.
911 * The same bio_vecs reuse and bio lifetime rules as bio_alloc_clone() apply.
912 */
bio_init_clone(struct block_device * bdev,struct bio * bio,struct bio * bio_src,gfp_t gfp)913 int bio_init_clone(struct block_device *bdev, struct bio *bio,
914 struct bio *bio_src, gfp_t gfp)
915 {
916 int ret;
917
918 bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf);
919 ret = __bio_clone(bio, bio_src, gfp);
920 if (ret)
921 bio_uninit(bio);
922 return ret;
923 }
924 EXPORT_SYMBOL(bio_init_clone);
925
926 /**
927 * bio_full - check if the bio is full
928 * @bio: bio to check
929 * @len: length of one segment to be added
930 *
931 * Return true if @bio is full and one segment with @len bytes can't be
932 * added to the bio, otherwise return false
933 */
bio_full(struct bio * bio,unsigned len)934 static inline bool bio_full(struct bio *bio, unsigned len)
935 {
936 if (bio->bi_vcnt >= bio->bi_max_vecs)
937 return true;
938 if (bio->bi_iter.bi_size > BIO_MAX_SIZE - len)
939 return true;
940 return false;
941 }
942
bvec_try_merge_page(struct bio_vec * bv,struct page * page,unsigned int len,unsigned int off)943 static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page,
944 unsigned int len, unsigned int off)
945 {
946 size_t bv_end = bv->bv_offset + bv->bv_len;
947 phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
948 phys_addr_t page_addr = page_to_phys(page);
949
950 if (vec_end_addr + 1 != page_addr + off)
951 return false;
952 if (xen_domain() && !xen_biovec_phys_mergeable(bv, page))
953 return false;
954
955 if ((vec_end_addr & PAGE_MASK) != ((page_addr + off) & PAGE_MASK)) {
956 if (IS_ENABLED(CONFIG_KMSAN))
957 return false;
958 if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
959 return false;
960 }
961
962 bv->bv_len += len;
963 return true;
964 }
965
966 /*
967 * Try to merge a page into a segment, while obeying the hardware segment
968 * size limit.
969 *
970 * This is kept around for the integrity metadata, which is still tries
971 * to build the initial bio to the hardware limit and doesn't have proper
972 * helpers to split. Hopefully this will go away soon.
973 */
bvec_try_merge_hw_page(struct request_queue * q,struct bio_vec * bv,struct page * page,unsigned len,unsigned offset)974 bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv,
975 struct page *page, unsigned len, unsigned offset)
976 {
977 unsigned long mask = queue_segment_boundary(q);
978 phys_addr_t addr1 = bvec_phys(bv);
979 phys_addr_t addr2 = page_to_phys(page) + offset + len - 1;
980
981 if ((addr1 | mask) != (addr2 | mask))
982 return false;
983 if (len > queue_max_segment_size(q) - bv->bv_len)
984 return false;
985 return bvec_try_merge_page(bv, page, len, offset);
986 }
987
988 /**
989 * __bio_add_page - add page(s) to a bio in a new segment
990 * @bio: destination bio
991 * @page: start page to add
992 * @len: length of the data to add, may cross pages
993 * @off: offset of the data relative to @page, may cross pages
994 *
995 * Add the data at @page + @off to @bio as a new bvec. The caller must ensure
996 * that @bio has space for another bvec.
997 */
__bio_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int off)998 void __bio_add_page(struct bio *bio, struct page *page,
999 unsigned int len, unsigned int off)
1000 {
1001 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
1002 WARN_ON_ONCE(bio_full(bio, len));
1003
1004 if (is_pci_p2pdma_page(page))
1005 bio->bi_opf |= REQ_NOMERGE;
1006
1007 bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off);
1008 bio->bi_iter.bi_size += len;
1009 bio->bi_vcnt++;
1010 }
1011 EXPORT_SYMBOL_GPL(__bio_add_page);
1012
1013 /**
1014 * bio_add_virt_nofail - add data in the direct kernel mapping to a bio
1015 * @bio: destination bio
1016 * @vaddr: data to add
1017 * @len: length of the data to add, may cross pages
1018 *
1019 * Add the data at @vaddr to @bio. The caller must have ensure a segment
1020 * is available for the added data. No merging into an existing segment
1021 * will be performed.
1022 */
bio_add_virt_nofail(struct bio * bio,void * vaddr,unsigned len)1023 void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len)
1024 {
1025 __bio_add_page(bio, virt_to_page(vaddr), len, offset_in_page(vaddr));
1026 }
1027 EXPORT_SYMBOL_GPL(bio_add_virt_nofail);
1028
1029 /**
1030 * bio_add_page - attempt to add page(s) to bio
1031 * @bio: destination bio
1032 * @page: start page to add
1033 * @len: vec entry length, may cross pages
1034 * @offset: vec entry offset relative to @page, may cross pages
1035 *
1036 * Attempt to add page(s) to the bio_vec maplist. This will only fail
1037 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
1038 */
bio_add_page(struct bio * bio,struct page * page,unsigned int len,unsigned int offset)1039 int bio_add_page(struct bio *bio, struct page *page,
1040 unsigned int len, unsigned int offset)
1041 {
1042 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1043 return 0;
1044 if (WARN_ON_ONCE(len == 0))
1045 return 0;
1046 if (bio->bi_iter.bi_size > BIO_MAX_SIZE - len)
1047 return 0;
1048
1049 if (bio->bi_vcnt > 0) {
1050 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
1051
1052 if (!zone_device_pages_compatible(bv->bv_page, page))
1053 return 0;
1054 if (zone_device_pages_have_same_pgmap(bv->bv_page, page) &&
1055 bvec_try_merge_page(bv, page, len, offset)) {
1056 bio->bi_iter.bi_size += len;
1057 return len;
1058 }
1059 }
1060
1061 if (bio->bi_vcnt >= bio->bi_max_vecs)
1062 return 0;
1063 __bio_add_page(bio, page, len, offset);
1064 return len;
1065 }
1066 EXPORT_SYMBOL(bio_add_page);
1067
bio_add_folio_nofail(struct bio * bio,struct folio * folio,size_t len,size_t off)1068 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len,
1069 size_t off)
1070 {
1071 unsigned long nr = off / PAGE_SIZE;
1072
1073 WARN_ON_ONCE(len > BIO_MAX_SIZE);
1074 __bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE);
1075 }
1076 EXPORT_SYMBOL_GPL(bio_add_folio_nofail);
1077
1078 /**
1079 * bio_add_folio - Attempt to add part of a folio to a bio.
1080 * @bio: BIO to add to.
1081 * @folio: Folio to add.
1082 * @len: How many bytes from the folio to add.
1083 * @off: First byte in this folio to add.
1084 *
1085 * Filesystems that use folios can call this function instead of calling
1086 * bio_add_page() for each page in the folio. If @off is bigger than
1087 * PAGE_SIZE, this function can create a bio_vec that starts in a page
1088 * after the bv_page. BIOs do not support folios that are 4GiB or larger.
1089 *
1090 * Return: Whether the addition was successful.
1091 */
bio_add_folio(struct bio * bio,struct folio * folio,size_t len,size_t off)1092 bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
1093 size_t off)
1094 {
1095 unsigned long nr = off / PAGE_SIZE;
1096
1097 if (len > BIO_MAX_SIZE)
1098 return false;
1099 return bio_add_page(bio, folio_page(folio, nr), len, off % PAGE_SIZE) > 0;
1100 }
1101 EXPORT_SYMBOL(bio_add_folio);
1102
1103 /**
1104 * bio_add_vmalloc_chunk - add a vmalloc chunk to a bio
1105 * @bio: destination bio
1106 * @vaddr: vmalloc address to add
1107 * @len: total length in bytes of the data to add
1108 *
1109 * Add data starting at @vaddr to @bio and return how many bytes were added.
1110 * This may be less than the amount originally asked. Returns 0 if no data
1111 * could be added to @bio.
1112 *
1113 * This helper calls flush_kernel_vmap_range() for the range added. For reads
1114 * the caller still needs to manually call invalidate_kernel_vmap_range() in
1115 * the completion handler.
1116 */
bio_add_vmalloc_chunk(struct bio * bio,void * vaddr,unsigned len)1117 unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len)
1118 {
1119 unsigned int offset = offset_in_page(vaddr);
1120
1121 len = min(len, PAGE_SIZE - offset);
1122 if (bio_add_page(bio, vmalloc_to_page(vaddr), len, offset) < len)
1123 return 0;
1124 if (op_is_write(bio_op(bio)))
1125 flush_kernel_vmap_range(vaddr, len);
1126 return len;
1127 }
1128 EXPORT_SYMBOL_GPL(bio_add_vmalloc_chunk);
1129
1130 /**
1131 * bio_add_vmalloc - add a vmalloc region to a bio
1132 * @bio: destination bio
1133 * @vaddr: vmalloc address to add
1134 * @len: total length in bytes of the data to add
1135 *
1136 * Add data starting at @vaddr to @bio. Return %true on success or %false if
1137 * @bio does not have enough space for the payload.
1138 *
1139 * This helper calls flush_kernel_vmap_range() for the range added. For reads
1140 * the caller still needs to manually call invalidate_kernel_vmap_range() in
1141 * the completion handler.
1142 */
bio_add_vmalloc(struct bio * bio,void * vaddr,unsigned int len)1143 bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len)
1144 {
1145 do {
1146 unsigned int added = bio_add_vmalloc_chunk(bio, vaddr, len);
1147
1148 if (!added)
1149 return false;
1150 vaddr += added;
1151 len -= added;
1152 } while (len);
1153
1154 return true;
1155 }
1156 EXPORT_SYMBOL_GPL(bio_add_vmalloc);
1157
__bio_release_pages(struct bio * bio,bool mark_dirty)1158 void __bio_release_pages(struct bio *bio, bool mark_dirty)
1159 {
1160 struct folio_iter fi;
1161
1162 bio_for_each_folio_all(fi, bio) {
1163 size_t nr_pages;
1164
1165 if (mark_dirty) {
1166 folio_lock(fi.folio);
1167 folio_mark_dirty(fi.folio);
1168 folio_unlock(fi.folio);
1169 }
1170 nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE -
1171 fi.offset / PAGE_SIZE + 1;
1172 unpin_user_folio(fi.folio, nr_pages);
1173 }
1174 }
1175 EXPORT_SYMBOL_GPL(__bio_release_pages);
1176
bio_iov_bvec_set(struct bio * bio,const struct iov_iter * iter)1177 void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter)
1178 {
1179 WARN_ON_ONCE(bio->bi_max_vecs);
1180
1181 bio->bi_io_vec = (struct bio_vec *)iter->bvec;
1182 bio->bi_iter.bi_idx = 0;
1183 bio->bi_iter.bi_bvec_done = iter->iov_offset;
1184 bio->bi_iter.bi_size = iov_iter_count(iter);
1185 bio_set_flag(bio, BIO_CLONED);
1186 }
1187
1188 /*
1189 * Aligns the bio size to the len_align_mask, releasing excessive bio vecs that
1190 * __bio_iov_iter_get_pages may have inserted, and reverts the trimmed length
1191 * for the next iteration.
1192 */
bio_iov_iter_align_down(struct bio * bio,struct iov_iter * iter,unsigned len_align_mask)1193 static int bio_iov_iter_align_down(struct bio *bio, struct iov_iter *iter,
1194 unsigned len_align_mask)
1195 {
1196 size_t nbytes = bio->bi_iter.bi_size & len_align_mask;
1197
1198 if (!nbytes)
1199 return 0;
1200
1201 iov_iter_revert(iter, nbytes);
1202 bio->bi_iter.bi_size -= nbytes;
1203 do {
1204 struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
1205
1206 if (nbytes < bv->bv_len) {
1207 bv->bv_len -= nbytes;
1208 break;
1209 }
1210
1211 if (bio_flagged(bio, BIO_PAGE_PINNED))
1212 unpin_user_page(bv->bv_page);
1213
1214 bio->bi_vcnt--;
1215 nbytes -= bv->bv_len;
1216 } while (nbytes);
1217
1218 if (!bio->bi_vcnt)
1219 return -EFAULT;
1220 return 0;
1221 }
1222
1223 /**
1224 * bio_iov_iter_get_pages - add user or kernel pages to a bio
1225 * @bio: bio to add pages to
1226 * @iter: iov iterator describing the region to be added
1227 * @len_align_mask: the mask to align the total size to, 0 for any length
1228 *
1229 * This takes either an iterator pointing to user memory, or one pointing to
1230 * kernel pages (BVEC iterator). If we're adding user pages, we pin them and
1231 * map them into the kernel. On IO completion, the caller should put those
1232 * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided
1233 * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs
1234 * to ensure the bvecs and pages stay referenced until the submitted I/O is
1235 * completed by a call to ->ki_complete() or returns with an error other than
1236 * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF
1237 * on IO completion. If it isn't, then pages should be released.
1238 *
1239 * The function tries, but does not guarantee, to pin as many pages as
1240 * fit into the bio, or are requested in @iter, whatever is smaller. If
1241 * MM encounters an error pinning the requested pages, it stops. Error
1242 * is returned only if 0 pages could be pinned.
1243 */
bio_iov_iter_get_pages(struct bio * bio,struct iov_iter * iter,unsigned len_align_mask)1244 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter,
1245 unsigned len_align_mask)
1246 {
1247 iov_iter_extraction_t flags = 0;
1248
1249 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1250 return -EIO;
1251
1252 if (iov_iter_is_bvec(iter)) {
1253 bio_iov_bvec_set(bio, iter);
1254 iov_iter_advance(iter, bio->bi_iter.bi_size);
1255 return 0;
1256 }
1257
1258 if (iov_iter_extract_will_pin(iter))
1259 bio_set_flag(bio, BIO_PAGE_PINNED);
1260 if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue))
1261 flags |= ITER_ALLOW_P2PDMA;
1262
1263 do {
1264 ssize_t ret;
1265
1266 ret = iov_iter_extract_bvecs(iter, bio->bi_io_vec,
1267 BIO_MAX_SIZE - bio->bi_iter.bi_size,
1268 &bio->bi_vcnt, bio->bi_max_vecs, flags);
1269 if (ret <= 0) {
1270 if (!bio->bi_vcnt)
1271 return ret;
1272 break;
1273 }
1274 bio->bi_iter.bi_size += ret;
1275 } while (iov_iter_count(iter) && !bio_full(bio, 0));
1276
1277 if (is_pci_p2pdma_page(bio->bi_io_vec->bv_page))
1278 bio->bi_opf |= REQ_NOMERGE;
1279 return bio_iov_iter_align_down(bio, iter, len_align_mask);
1280 }
1281
folio_alloc_greedy(gfp_t gfp,size_t * size)1282 static struct folio *folio_alloc_greedy(gfp_t gfp, size_t *size)
1283 {
1284 struct folio *folio;
1285
1286 while (*size > PAGE_SIZE) {
1287 folio = folio_alloc(gfp | __GFP_NORETRY, get_order(*size));
1288 if (folio)
1289 return folio;
1290 *size = rounddown_pow_of_two(*size - 1);
1291 }
1292
1293 return folio_alloc(gfp, get_order(*size));
1294 }
1295
bio_free_folios(struct bio * bio)1296 static void bio_free_folios(struct bio *bio)
1297 {
1298 struct bio_vec *bv;
1299 int i;
1300
1301 bio_for_each_bvec_all(bv, bio, i) {
1302 struct folio *folio = page_folio(bv->bv_page);
1303
1304 if (!is_zero_folio(folio))
1305 folio_put(folio);
1306 }
1307 }
1308
bio_iov_iter_bounce_write(struct bio * bio,struct iov_iter * iter,size_t maxlen)1309 static int bio_iov_iter_bounce_write(struct bio *bio, struct iov_iter *iter,
1310 size_t maxlen)
1311 {
1312 size_t total_len = min(maxlen, iov_iter_count(iter));
1313
1314 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
1315 return -EINVAL;
1316 if (WARN_ON_ONCE(bio->bi_iter.bi_size))
1317 return -EINVAL;
1318 if (WARN_ON_ONCE(bio->bi_vcnt >= bio->bi_max_vecs))
1319 return -EINVAL;
1320
1321 do {
1322 size_t this_len = min(total_len, SZ_1M);
1323 struct folio *folio;
1324
1325 if (this_len > PAGE_SIZE * 2)
1326 this_len = rounddown_pow_of_two(this_len);
1327
1328 if (bio->bi_iter.bi_size > BIO_MAX_SIZE - this_len)
1329 break;
1330
1331 folio = folio_alloc_greedy(GFP_KERNEL, &this_len);
1332 if (!folio)
1333 break;
1334 bio_add_folio_nofail(bio, folio, this_len, 0);
1335
1336 if (copy_from_iter(folio_address(folio), this_len, iter) !=
1337 this_len) {
1338 bio_free_folios(bio);
1339 return -EFAULT;
1340 }
1341
1342 total_len -= this_len;
1343 } while (total_len && bio->bi_vcnt < bio->bi_max_vecs);
1344
1345 if (!bio->bi_iter.bi_size)
1346 return -ENOMEM;
1347 return 0;
1348 }
1349
bio_iov_iter_bounce_read(struct bio * bio,struct iov_iter * iter,size_t maxlen)1350 static int bio_iov_iter_bounce_read(struct bio *bio, struct iov_iter *iter,
1351 size_t maxlen)
1352 {
1353 size_t len = min3(iov_iter_count(iter), maxlen, SZ_1M);
1354 struct folio *folio;
1355
1356 folio = folio_alloc_greedy(GFP_KERNEL, &len);
1357 if (!folio)
1358 return -ENOMEM;
1359
1360 do {
1361 ssize_t ret;
1362
1363 ret = iov_iter_extract_bvecs(iter, bio->bi_io_vec + 1, len,
1364 &bio->bi_vcnt, bio->bi_max_vecs - 1, 0);
1365 if (ret <= 0) {
1366 if (!bio->bi_vcnt) {
1367 folio_put(folio);
1368 return ret;
1369 }
1370 break;
1371 }
1372 len -= ret;
1373 bio->bi_iter.bi_size += ret;
1374 } while (len && bio->bi_vcnt < bio->bi_max_vecs - 1);
1375
1376 /*
1377 * Set the folio directly here. The above loop has already calculated
1378 * the correct bi_size, and we use bi_vcnt for the user buffers. That
1379 * is safe as bi_vcnt is only used by the submitter and not the actual
1380 * I/O path.
1381 */
1382 bvec_set_folio(&bio->bi_io_vec[0], folio, bio->bi_iter.bi_size, 0);
1383 if (iov_iter_extract_will_pin(iter))
1384 bio_set_flag(bio, BIO_PAGE_PINNED);
1385 return 0;
1386 }
1387
1388 /**
1389 * bio_iov_iter_bounce - bounce buffer data from an iter into a bio
1390 * @bio: bio to send
1391 * @iter: iter to read from / write into
1392 * @maxlen: maximum size to bounce
1393 *
1394 * Helper for direct I/O implementations that need to bounce buffer because
1395 * we need to checksum the data or perform other operations that require
1396 * consistency. Allocates folios to back the bounce buffer, and for writes
1397 * copies the data into it. Needs to be paired with bio_iov_iter_unbounce()
1398 * called on completion.
1399 */
bio_iov_iter_bounce(struct bio * bio,struct iov_iter * iter,size_t maxlen)1400 int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter, size_t maxlen)
1401 {
1402 if (op_is_write(bio_op(bio)))
1403 return bio_iov_iter_bounce_write(bio, iter, maxlen);
1404 return bio_iov_iter_bounce_read(bio, iter, maxlen);
1405 }
1406
bvec_unpin(struct bio_vec * bv,bool mark_dirty)1407 static void bvec_unpin(struct bio_vec *bv, bool mark_dirty)
1408 {
1409 struct folio *folio = page_folio(bv->bv_page);
1410 size_t nr_pages = (bv->bv_offset + bv->bv_len - 1) / PAGE_SIZE -
1411 bv->bv_offset / PAGE_SIZE + 1;
1412
1413 if (mark_dirty)
1414 folio_mark_dirty_lock(folio);
1415 unpin_user_folio(folio, nr_pages);
1416 }
1417
bio_iov_iter_unbounce_read(struct bio * bio,bool is_error,bool mark_dirty)1418 static void bio_iov_iter_unbounce_read(struct bio *bio, bool is_error,
1419 bool mark_dirty)
1420 {
1421 unsigned int len = bio->bi_io_vec[0].bv_len;
1422
1423 if (likely(!is_error)) {
1424 void *buf = bvec_virt(&bio->bi_io_vec[0]);
1425 struct iov_iter to;
1426
1427 iov_iter_bvec(&to, ITER_DEST, bio->bi_io_vec + 1, bio->bi_vcnt,
1428 len);
1429 /* copying to pinned pages should always work */
1430 WARN_ON_ONCE(copy_to_iter(buf, len, &to) != len);
1431 } else {
1432 /* No need to mark folios dirty if never copied to them */
1433 mark_dirty = false;
1434 }
1435
1436 if (bio_flagged(bio, BIO_PAGE_PINNED)) {
1437 int i;
1438
1439 for (i = 0; i < bio->bi_vcnt; i++)
1440 bvec_unpin(&bio->bi_io_vec[1 + i], mark_dirty);
1441 }
1442
1443 folio_put(page_folio(bio->bi_io_vec[0].bv_page));
1444 }
1445
1446 /**
1447 * bio_iov_iter_unbounce - finish a bounce buffer operation
1448 * @bio: completed bio
1449 * @is_error: %true if an I/O error occurred and data should not be copied
1450 * @mark_dirty: If %true, folios will be marked dirty.
1451 *
1452 * Helper for direct I/O implementations that need to bounce buffer because
1453 * we need to checksum the data or perform other operations that require
1454 * consistency. Called to complete a bio set up by bio_iov_iter_bounce().
1455 * Copies data back for reads, and marks the original folios dirty if
1456 * requested and then frees the bounce buffer.
1457 */
bio_iov_iter_unbounce(struct bio * bio,bool is_error,bool mark_dirty)1458 void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty)
1459 {
1460 if (op_is_write(bio_op(bio)))
1461 bio_free_folios(bio);
1462 else
1463 bio_iov_iter_unbounce_read(bio, is_error, mark_dirty);
1464 }
1465
bio_wait_end_io(struct bio * bio)1466 static void bio_wait_end_io(struct bio *bio)
1467 {
1468 complete(bio->bi_private);
1469 }
1470
1471 /**
1472 * bio_await - call a function on a bio, and wait until it completes
1473 * @bio: the bio which describes the I/O
1474 * @submit: function called to submit the bio
1475 * @priv: private data passed to @submit
1476 *
1477 * Wait for the bio as well as any bio chained off it after executing the
1478 * passed in callback @submit. The wait for the bio is set up before calling
1479 * @submit to ensure that the completion is captured. If @submit is %NULL,
1480 * submit_bio() is used instead to submit the bio.
1481 *
1482 * Note: this overrides the bi_private and bi_end_io fields in the bio.
1483 */
bio_await(struct bio * bio,void * priv,void (* submit)(struct bio * bio,void * priv))1484 void bio_await(struct bio *bio, void *priv,
1485 void (*submit)(struct bio *bio, void *priv))
1486 {
1487 DECLARE_COMPLETION_ONSTACK_MAP(done,
1488 bio->bi_bdev->bd_disk->lockdep_map);
1489
1490 bio->bi_private = &done;
1491 bio->bi_end_io = bio_wait_end_io;
1492 bio->bi_opf |= REQ_SYNC;
1493 if (submit)
1494 submit(bio, priv);
1495 else
1496 submit_bio(bio);
1497 blk_wait_io(&done);
1498 }
1499 EXPORT_SYMBOL_GPL(bio_await);
1500
1501 /**
1502 * submit_bio_wait - submit a bio, and wait until it completes
1503 * @bio: The &struct bio which describes the I/O
1504 *
1505 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
1506 * bio_endio() on failure.
1507 *
1508 * WARNING: Unlike to how submit_bio() is usually used, this function does not
1509 * result in bio reference to be consumed. The caller must drop the reference
1510 * on his own.
1511 */
submit_bio_wait(struct bio * bio)1512 int submit_bio_wait(struct bio *bio)
1513 {
1514 bio_await(bio, NULL, NULL);
1515 return blk_status_to_errno(bio->bi_status);
1516 }
1517 EXPORT_SYMBOL(submit_bio_wait);
1518
bio_endio_cb(struct bio * bio,void * priv)1519 static void bio_endio_cb(struct bio *bio, void *priv)
1520 {
1521 bio_endio(bio);
1522 }
1523
1524 /*
1525 * Submit @bio synchronously, or call bio_endio on it if the current process
1526 * is being killed.
1527 */
bio_submit_or_kill(struct bio * bio,unsigned int flags)1528 int bio_submit_or_kill(struct bio *bio, unsigned int flags)
1529 {
1530 if ((flags & BLKDEV_ZERO_KILLABLE) && fatal_signal_pending(current)) {
1531 bio_await(bio, NULL, bio_endio_cb);
1532 return -EINTR;
1533 }
1534
1535 return submit_bio_wait(bio);
1536 }
1537
1538 /**
1539 * bdev_rw_virt - synchronously read into / write from kernel mapping
1540 * @bdev: block device to access
1541 * @sector: sector to access
1542 * @data: data to read/write
1543 * @len: length in byte to read/write
1544 * @op: operation (e.g. REQ_OP_READ/REQ_OP_WRITE)
1545 *
1546 * Performs synchronous I/O to @bdev for @data/@len. @data must be in
1547 * the kernel direct mapping and not a vmalloc address.
1548 */
bdev_rw_virt(struct block_device * bdev,sector_t sector,void * data,size_t len,enum req_op op)1549 int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data,
1550 size_t len, enum req_op op)
1551 {
1552 struct bio_vec bv;
1553 struct bio bio;
1554 int error;
1555
1556 if (WARN_ON_ONCE(is_vmalloc_addr(data)))
1557 return -EIO;
1558
1559 bio_init(&bio, bdev, &bv, 1, op);
1560 bio.bi_iter.bi_sector = sector;
1561 bio_add_virt_nofail(&bio, data, len);
1562 error = submit_bio_wait(&bio);
1563 bio_uninit(&bio);
1564 return error;
1565 }
1566 EXPORT_SYMBOL_GPL(bdev_rw_virt);
1567
__bio_advance(struct bio * bio,unsigned bytes)1568 void __bio_advance(struct bio *bio, unsigned bytes)
1569 {
1570 if (bio_integrity(bio))
1571 bio_integrity_advance(bio, bytes);
1572
1573 bio_crypt_advance(bio, bytes);
1574 bio_advance_iter(bio, &bio->bi_iter, bytes);
1575 }
1576 EXPORT_SYMBOL(__bio_advance);
1577
bio_copy_data_iter(struct bio * dst,struct bvec_iter * dst_iter,struct bio * src,struct bvec_iter * src_iter)1578 void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
1579 struct bio *src, struct bvec_iter *src_iter)
1580 {
1581 while (src_iter->bi_size && dst_iter->bi_size) {
1582 struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
1583 struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
1584 unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
1585 void *src_buf = bvec_kmap_local(&src_bv);
1586 void *dst_buf = bvec_kmap_local(&dst_bv);
1587
1588 memcpy(dst_buf, src_buf, bytes);
1589
1590 kunmap_local(dst_buf);
1591 kunmap_local(src_buf);
1592
1593 bio_advance_iter_single(src, src_iter, bytes);
1594 bio_advance_iter_single(dst, dst_iter, bytes);
1595 }
1596 }
1597 EXPORT_SYMBOL(bio_copy_data_iter);
1598
1599 /**
1600 * bio_copy_data - copy contents of data buffers from one bio to another
1601 * @src: source bio
1602 * @dst: destination bio
1603 *
1604 * Stops when it reaches the end of either @src or @dst - that is, copies
1605 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
1606 */
bio_copy_data(struct bio * dst,struct bio * src)1607 void bio_copy_data(struct bio *dst, struct bio *src)
1608 {
1609 struct bvec_iter src_iter = src->bi_iter;
1610 struct bvec_iter dst_iter = dst->bi_iter;
1611
1612 bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
1613 }
1614 EXPORT_SYMBOL(bio_copy_data);
1615
bio_free_pages(struct bio * bio)1616 void bio_free_pages(struct bio *bio)
1617 {
1618 struct bio_vec *bvec;
1619 struct bvec_iter_all iter_all;
1620
1621 bio_for_each_segment_all(bvec, bio, iter_all)
1622 __free_page(bvec->bv_page);
1623 }
1624 EXPORT_SYMBOL(bio_free_pages);
1625
1626 /*
1627 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1628 * for performing direct-IO in BIOs.
1629 *
1630 * The problem is that we cannot run folio_mark_dirty() from interrupt context
1631 * because the required locks are not interrupt-safe. So what we can do is to
1632 * mark the pages dirty _before_ performing IO. And in interrupt context,
1633 * check that the pages are still dirty. If so, fine. If not, redirty them
1634 * in process context.
1635 *
1636 * Note that this code is very hard to test under normal circumstances because
1637 * direct-io pins the pages with get_user_pages(). This makes
1638 * is_page_cache_freeable return false, and the VM will not clean the pages.
1639 * But other code (eg, flusher threads) could clean the pages if they are mapped
1640 * pagecache.
1641 *
1642 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1643 * deferred bio dirtying paths.
1644 */
1645
1646 /*
1647 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1648 */
bio_set_pages_dirty(struct bio * bio)1649 void bio_set_pages_dirty(struct bio *bio)
1650 {
1651 struct folio_iter fi;
1652
1653 bio_for_each_folio_all(fi, bio) {
1654 folio_lock(fi.folio);
1655 folio_mark_dirty(fi.folio);
1656 folio_unlock(fi.folio);
1657 }
1658 }
1659 EXPORT_SYMBOL_GPL(bio_set_pages_dirty);
1660
1661 /*
1662 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1663 * If they are, then fine. If, however, some pages are clean then they must
1664 * have been written out during the direct-IO read. So we take another ref on
1665 * the BIO and re-dirty the pages in process context.
1666 *
1667 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1668 * here on. It will unpin each page and will run one bio_put() against the
1669 * BIO.
1670 */
1671
1672 static void bio_dirty_fn(struct work_struct *work);
1673
1674 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1675 static DEFINE_SPINLOCK(bio_dirty_lock);
1676 static struct bio *bio_dirty_list;
1677
1678 /*
1679 * This runs in process context
1680 */
bio_dirty_fn(struct work_struct * work)1681 static void bio_dirty_fn(struct work_struct *work)
1682 {
1683 struct bio *bio, *next;
1684
1685 spin_lock_irq(&bio_dirty_lock);
1686 next = bio_dirty_list;
1687 bio_dirty_list = NULL;
1688 spin_unlock_irq(&bio_dirty_lock);
1689
1690 while ((bio = next) != NULL) {
1691 next = bio->bi_private;
1692
1693 bio_release_pages(bio, true);
1694 bio_put(bio);
1695 }
1696 }
1697
bio_check_pages_dirty(struct bio * bio)1698 void bio_check_pages_dirty(struct bio *bio)
1699 {
1700 struct folio_iter fi;
1701 unsigned long flags;
1702
1703 bio_for_each_folio_all(fi, bio) {
1704 if (!folio_test_dirty(fi.folio))
1705 goto defer;
1706 }
1707
1708 bio_release_pages(bio, false);
1709 bio_put(bio);
1710 return;
1711 defer:
1712 spin_lock_irqsave(&bio_dirty_lock, flags);
1713 bio->bi_private = bio_dirty_list;
1714 bio_dirty_list = bio;
1715 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1716 schedule_work(&bio_dirty_work);
1717 }
1718 EXPORT_SYMBOL_GPL(bio_check_pages_dirty);
1719
bio_remaining_done(struct bio * bio)1720 static inline bool bio_remaining_done(struct bio *bio)
1721 {
1722 /*
1723 * If we're not chaining, then ->__bi_remaining is always 1 and
1724 * we always end io on the first invocation.
1725 */
1726 if (!bio_flagged(bio, BIO_CHAIN))
1727 return true;
1728
1729 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1730
1731 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1732 bio_clear_flag(bio, BIO_CHAIN);
1733 return true;
1734 }
1735
1736 return false;
1737 }
1738
1739 /**
1740 * bio_endio - end I/O on a bio
1741 * @bio: bio
1742 *
1743 * Description:
1744 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1745 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1746 * bio unless they own it and thus know that it has an end_io function.
1747 *
1748 * bio_endio() can be called several times on a bio that has been chained
1749 * using bio_chain(). The ->bi_end_io() function will only be called the
1750 * last time.
1751 **/
bio_endio(struct bio * bio)1752 void bio_endio(struct bio *bio)
1753 {
1754 again:
1755 if (!bio_remaining_done(bio))
1756 return;
1757 if (!bio_integrity_endio(bio))
1758 return;
1759
1760 blk_zone_bio_endio(bio);
1761
1762 rq_qos_done_bio(bio);
1763
1764 if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
1765 trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio);
1766 bio_clear_flag(bio, BIO_TRACE_COMPLETION);
1767 }
1768
1769 /*
1770 * Need to have a real endio function for chained bios, otherwise
1771 * various corner cases will break (like stacking block devices that
1772 * save/restore bi_end_io) - however, we want to avoid unbounded
1773 * recursion and blowing the stack. Tail call optimization would
1774 * handle this, but compiling with frame pointers also disables
1775 * gcc's sibling call optimization.
1776 */
1777 if (bio->bi_end_io == bio_chain_endio) {
1778 bio = __bio_chain_endio(bio);
1779 goto again;
1780 }
1781
1782 #ifdef CONFIG_BLK_CGROUP
1783 /*
1784 * Release cgroup info. We shouldn't have to do this here, but quite
1785 * a few callers of bio_init fail to call bio_uninit, so we cover up
1786 * for that here at least for now.
1787 */
1788 if (bio->bi_blkg) {
1789 blkg_put(bio->bi_blkg);
1790 bio->bi_blkg = NULL;
1791 }
1792 #endif
1793
1794 if (bio->bi_end_io)
1795 bio->bi_end_io(bio);
1796 }
1797 EXPORT_SYMBOL(bio_endio);
1798
1799 /**
1800 * bio_split - split a bio
1801 * @bio: bio to split
1802 * @sectors: number of sectors to split from the front of @bio
1803 * @gfp: gfp mask
1804 * @bs: bio set to allocate from
1805 *
1806 * Allocates and returns a new bio which represents @sectors from the start of
1807 * @bio, and updates @bio to represent the remaining sectors.
1808 *
1809 * Unless this is a discard request the newly allocated bio will point
1810 * to @bio's bi_io_vec. It is the caller's responsibility to ensure that
1811 * neither @bio nor @bs are freed before the split bio.
1812 */
bio_split(struct bio * bio,int sectors,gfp_t gfp,struct bio_set * bs)1813 struct bio *bio_split(struct bio *bio, int sectors,
1814 gfp_t gfp, struct bio_set *bs)
1815 {
1816 struct bio *split;
1817
1818 if (WARN_ON_ONCE(sectors <= 0))
1819 return ERR_PTR(-EINVAL);
1820 if (WARN_ON_ONCE(sectors >= bio_sectors(bio)))
1821 return ERR_PTR(-EINVAL);
1822
1823 /* Zone append commands cannot be split */
1824 if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
1825 return ERR_PTR(-EINVAL);
1826
1827 /* atomic writes cannot be split */
1828 if (bio->bi_opf & REQ_ATOMIC)
1829 return ERR_PTR(-EINVAL);
1830
1831 split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
1832 if (!split)
1833 return ERR_PTR(-ENOMEM);
1834
1835 split->bi_iter.bi_size = sectors << 9;
1836
1837 if (bio_integrity(split))
1838 bio_integrity_trim(split);
1839
1840 bio_advance(bio, split->bi_iter.bi_size);
1841
1842 if (bio_flagged(bio, BIO_TRACE_COMPLETION))
1843 bio_set_flag(split, BIO_TRACE_COMPLETION);
1844
1845 return split;
1846 }
1847 EXPORT_SYMBOL(bio_split);
1848
1849 /**
1850 * bio_trim - trim a bio
1851 * @bio: bio to trim
1852 * @offset: number of sectors to trim from the front of @bio
1853 * @size: size we want to trim @bio to, in sectors
1854 *
1855 * This function is typically used for bios that are cloned and submitted
1856 * to the underlying device in parts.
1857 */
bio_trim(struct bio * bio,sector_t offset,sector_t size)1858 void bio_trim(struct bio *bio, sector_t offset, sector_t size)
1859 {
1860 /* We should never trim an atomic write */
1861 if (WARN_ON_ONCE(bio->bi_opf & REQ_ATOMIC && size))
1862 return;
1863
1864 if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS ||
1865 offset + size > bio_sectors(bio)))
1866 return;
1867
1868 size <<= 9;
1869 if (offset == 0 && size == bio->bi_iter.bi_size)
1870 return;
1871
1872 bio_advance(bio, offset << 9);
1873 bio->bi_iter.bi_size = size;
1874
1875 if (bio_integrity(bio))
1876 bio_integrity_trim(bio);
1877 }
1878 EXPORT_SYMBOL_GPL(bio_trim);
1879
1880 /*
1881 * create memory pools for biovec's in a bio_set.
1882 * use the global biovec slabs created for general use.
1883 */
biovec_init_pool(mempool_t * pool,int pool_entries)1884 int biovec_init_pool(mempool_t *pool, int pool_entries)
1885 {
1886 struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1;
1887
1888 return mempool_init_slab_pool(pool, pool_entries, bp->slab);
1889 }
1890
1891 /*
1892 * bioset_exit - exit a bioset initialized with bioset_init()
1893 *
1894 * May be called on a zeroed but uninitialized bioset (i.e. allocated with
1895 * kzalloc()).
1896 */
bioset_exit(struct bio_set * bs)1897 void bioset_exit(struct bio_set *bs)
1898 {
1899 bio_alloc_cache_destroy(bs);
1900 if (bs->rescue_workqueue)
1901 destroy_workqueue(bs->rescue_workqueue);
1902 bs->rescue_workqueue = NULL;
1903
1904 mempool_exit(&bs->bio_pool);
1905 mempool_exit(&bs->bvec_pool);
1906
1907 if (bs->bio_slab)
1908 bio_put_slab(bs);
1909 bs->bio_slab = NULL;
1910 }
1911 EXPORT_SYMBOL(bioset_exit);
1912
1913 /**
1914 * bioset_init - Initialize a bio_set
1915 * @bs: pool to initialize
1916 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1917 * @front_pad: Number of bytes to allocate in front of the returned bio
1918 * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS
1919 * and %BIOSET_NEED_RESCUER
1920 *
1921 * Description:
1922 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1923 * to ask for a number of bytes to be allocated in front of the bio.
1924 * Front pad allocation is useful for embedding the bio inside
1925 * another structure, to avoid allocating extra data to go with the bio.
1926 * Note that the bio must be embedded at the END of that structure always,
1927 * or things will break badly.
1928 * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated
1929 * for allocating iovecs. This pool is not needed e.g. for bio_init_clone().
1930 * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used
1931 * to dispatch queued requests when the mempool runs out of space.
1932 *
1933 */
bioset_init(struct bio_set * bs,unsigned int pool_size,unsigned int front_pad,int flags)1934 int bioset_init(struct bio_set *bs,
1935 unsigned int pool_size,
1936 unsigned int front_pad,
1937 int flags)
1938 {
1939 bs->front_pad = front_pad;
1940 if (flags & BIOSET_NEED_BVECS)
1941 bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1942 else
1943 bs->back_pad = 0;
1944
1945 spin_lock_init(&bs->rescue_lock);
1946 bio_list_init(&bs->rescue_list);
1947 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1948
1949 bs->bio_slab = bio_find_or_create_slab(bs);
1950 if (!bs->bio_slab)
1951 return -ENOMEM;
1952
1953 if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab))
1954 goto bad;
1955
1956 if ((flags & BIOSET_NEED_BVECS) &&
1957 biovec_init_pool(&bs->bvec_pool, pool_size))
1958 goto bad;
1959
1960 if (flags & BIOSET_NEED_RESCUER) {
1961 bs->rescue_workqueue = alloc_workqueue("bioset",
1962 WQ_MEM_RECLAIM | WQ_PERCPU, 0);
1963 if (!bs->rescue_workqueue)
1964 goto bad;
1965 }
1966 if (flags & BIOSET_PERCPU_CACHE) {
1967 bs->cache = alloc_percpu(struct bio_alloc_cache);
1968 if (!bs->cache)
1969 goto bad;
1970 cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead);
1971 }
1972
1973 return 0;
1974 bad:
1975 bioset_exit(bs);
1976 return -ENOMEM;
1977 }
1978 EXPORT_SYMBOL(bioset_init);
1979
init_bio(void)1980 static int __init init_bio(void)
1981 {
1982 int i;
1983
1984 BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags));
1985
1986 for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) {
1987 struct biovec_slab *bvs = bvec_slabs + i;
1988
1989 bvs->slab = kmem_cache_create(bvs->name,
1990 bvs->nr_vecs * sizeof(struct bio_vec), 0,
1991 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
1992 }
1993
1994 cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
1995 bio_cpu_dead);
1996
1997 if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0,
1998 BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
1999 panic("bio: can't allocate bios\n");
2000
2001 return 0;
2002 }
2003 subsys_initcall(init_bio);
2004