1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de> 4 */ 5 #ifndef __LINUX_BIO_H 6 #define __LINUX_BIO_H 7 8 #include <linux/mempool.h> 9 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 10 #include <linux/blk_types.h> 11 #include <linux/uio.h> 12 13 #define BIO_MAX_VECS 256U 14 #define BIO_MAX_INLINE_VECS UIO_MAXIOV 15 16 struct queue_limits; 17 18 static inline unsigned int bio_max_segs(unsigned int nr_segs) 19 { 20 return min(nr_segs, BIO_MAX_VECS); 21 } 22 23 #define bio_iter_iovec(bio, iter) \ 24 bvec_iter_bvec((bio)->bi_io_vec, (iter)) 25 26 #define bio_iter_page(bio, iter) \ 27 bvec_iter_page((bio)->bi_io_vec, (iter)) 28 #define bio_iter_len(bio, iter) \ 29 bvec_iter_len((bio)->bi_io_vec, (iter)) 30 #define bio_iter_offset(bio, iter) \ 31 bvec_iter_offset((bio)->bi_io_vec, (iter)) 32 33 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter) 34 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter) 35 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter) 36 37 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9) 38 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter))) 39 40 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) 41 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) 42 43 /* 44 * Return the data direction, READ or WRITE. 45 */ 46 #define bio_data_dir(bio) \ 47 (op_is_write(bio_op(bio)) ? WRITE : READ) 48 49 static inline bool bio_flagged(const struct bio *bio, unsigned int bit) 50 { 51 return bio->bi_flags & (1U << bit); 52 } 53 54 static inline void bio_set_flag(struct bio *bio, unsigned int bit) 55 { 56 bio->bi_flags |= (1U << bit); 57 } 58 59 static inline void bio_clear_flag(struct bio *bio, unsigned int bit) 60 { 61 bio->bi_flags &= ~(1U << bit); 62 } 63 64 /* 65 * Check whether this bio carries any data or not. A NULL bio is allowed. 66 */ 67 static inline bool bio_has_data(struct bio *bio) 68 { 69 if (bio && 70 bio->bi_iter.bi_size && 71 bio_op(bio) != REQ_OP_DISCARD && 72 bio_op(bio) != REQ_OP_SECURE_ERASE && 73 bio_op(bio) != REQ_OP_WRITE_ZEROES) 74 return true; 75 76 return false; 77 } 78 79 static inline bool bio_no_advance_iter(const struct bio *bio) 80 { 81 return bio_op(bio) == REQ_OP_DISCARD || 82 bio_op(bio) == REQ_OP_SECURE_ERASE || 83 bio_op(bio) == REQ_OP_WRITE_ZEROES; 84 } 85 86 static inline void *bio_data(struct bio *bio) 87 { 88 if (bio_has_data(bio)) 89 return page_address(bio_page(bio)) + bio_offset(bio); 90 91 return NULL; 92 } 93 94 static inline bool bio_next_segment(const struct bio *bio, 95 struct bvec_iter_all *iter) 96 { 97 if (iter->idx >= bio->bi_vcnt) 98 return false; 99 100 bvec_advance(&bio->bi_io_vec[iter->idx], iter); 101 return true; 102 } 103 104 /* 105 * drivers should _never_ use the all version - the bio may have been split 106 * before it got to the driver and the driver won't own all of it 107 */ 108 #define bio_for_each_segment_all(bvl, bio, iter) \ 109 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); ) 110 111 static inline void bio_advance_iter(const struct bio *bio, 112 struct bvec_iter *iter, unsigned int bytes) 113 { 114 iter->bi_sector += bytes >> 9; 115 116 if (bio_no_advance_iter(bio)) 117 iter->bi_size -= bytes; 118 else 119 bvec_iter_advance(bio->bi_io_vec, iter, bytes); 120 /* TODO: It is reasonable to complete bio with error here. */ 121 } 122 123 /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */ 124 static inline void bio_advance_iter_single(const struct bio *bio, 125 struct bvec_iter *iter, 126 unsigned int bytes) 127 { 128 iter->bi_sector += bytes >> 9; 129 130 if (bio_no_advance_iter(bio)) 131 iter->bi_size -= bytes; 132 else 133 bvec_iter_advance_single(bio->bi_io_vec, iter, bytes); 134 } 135 136 void __bio_advance(struct bio *, unsigned bytes); 137 138 /** 139 * bio_advance - increment/complete a bio by some number of bytes 140 * @bio: bio to advance 141 * @nbytes: number of bytes to complete 142 * 143 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 144 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 145 * be updated on the last bvec as well. 146 * 147 * @bio will then represent the remaining, uncompleted portion of the io. 148 */ 149 static inline void bio_advance(struct bio *bio, unsigned int nbytes) 150 { 151 if (nbytes == bio->bi_iter.bi_size) { 152 bio->bi_iter.bi_size = 0; 153 return; 154 } 155 __bio_advance(bio, nbytes); 156 } 157 158 #define __bio_for_each_segment(bvl, bio, iter, start) \ 159 for (iter = (start); \ 160 (iter).bi_size && \ 161 ((bvl = bio_iter_iovec((bio), (iter))), 1); \ 162 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 163 164 #define bio_for_each_segment(bvl, bio, iter) \ 165 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) 166 167 #define __bio_for_each_bvec(bvl, bio, iter, start) \ 168 for (iter = (start); \ 169 (iter).bi_size && \ 170 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ 171 bio_advance_iter_single((bio), &(iter), (bvl).bv_len)) 172 173 /* iterate over multi-page bvec */ 174 #define bio_for_each_bvec(bvl, bio, iter) \ 175 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) 176 177 /* 178 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the 179 * same reasons as bio_for_each_segment_all(). 180 */ 181 #define bio_for_each_bvec_all(bvl, bio, i) \ 182 for (i = 0, bvl = bio_first_bvec_all(bio); \ 183 i < (bio)->bi_vcnt; i++, bvl++) 184 185 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) 186 187 static inline unsigned bio_segments(struct bio *bio) 188 { 189 unsigned segs = 0; 190 struct bio_vec bv; 191 struct bvec_iter iter; 192 193 /* 194 * We special case discard/write same/write zeroes, because they 195 * interpret bi_size differently: 196 */ 197 198 switch (bio_op(bio)) { 199 case REQ_OP_DISCARD: 200 case REQ_OP_SECURE_ERASE: 201 case REQ_OP_WRITE_ZEROES: 202 return 0; 203 default: 204 break; 205 } 206 207 bio_for_each_segment(bv, bio, iter) 208 segs++; 209 210 return segs; 211 } 212 213 /* 214 * get a reference to a bio, so it won't disappear. the intended use is 215 * something like: 216 * 217 * bio_get(bio); 218 * submit_bio(rw, bio); 219 * if (bio->bi_flags ...) 220 * do_something 221 * bio_put(bio); 222 * 223 * without the bio_get(), it could potentially complete I/O before submit_bio 224 * returns. and then bio would be freed memory when if (bio->bi_flags ...) 225 * runs 226 */ 227 static inline void bio_get(struct bio *bio) 228 { 229 bio->bi_flags |= (1 << BIO_REFFED); 230 smp_mb__before_atomic(); 231 atomic_inc(&bio->__bi_cnt); 232 } 233 234 static inline void bio_cnt_set(struct bio *bio, unsigned int count) 235 { 236 if (count != 1) { 237 bio->bi_flags |= (1 << BIO_REFFED); 238 smp_mb(); 239 } 240 atomic_set(&bio->__bi_cnt, count); 241 } 242 243 static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) 244 { 245 WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); 246 return bio->bi_io_vec; 247 } 248 249 static inline struct page *bio_first_page_all(struct bio *bio) 250 { 251 return bio_first_bvec_all(bio)->bv_page; 252 } 253 254 static inline struct folio *bio_first_folio_all(struct bio *bio) 255 { 256 return page_folio(bio_first_page_all(bio)); 257 } 258 259 /** 260 * struct folio_iter - State for iterating all folios in a bio. 261 * @folio: The current folio we're iterating. NULL after the last folio. 262 * @offset: The byte offset within the current folio. 263 * @length: The number of bytes in this iteration (will not cross folio 264 * boundary). 265 */ 266 struct folio_iter { 267 struct folio *folio; 268 size_t offset; 269 size_t length; 270 /* private: for use by the iterator */ 271 struct folio *_next; 272 size_t _seg_count; 273 int _i; 274 }; 275 276 static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio, 277 int i) 278 { 279 struct bio_vec *bvec = bio_first_bvec_all(bio) + i; 280 281 if (unlikely(i >= bio->bi_vcnt)) { 282 fi->folio = NULL; 283 return; 284 } 285 286 fi->folio = page_folio(bvec->bv_page); 287 fi->offset = bvec->bv_offset + 288 PAGE_SIZE * folio_page_idx(fi->folio, bvec->bv_page); 289 fi->_seg_count = bvec->bv_len; 290 fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count); 291 fi->_next = folio_next(fi->folio); 292 fi->_i = i; 293 } 294 295 static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) 296 { 297 fi->_seg_count -= fi->length; 298 if (fi->_seg_count) { 299 fi->folio = fi->_next; 300 fi->offset = 0; 301 fi->length = min(folio_size(fi->folio), fi->_seg_count); 302 fi->_next = folio_next(fi->folio); 303 } else { 304 bio_first_folio(fi, bio, fi->_i + 1); 305 } 306 } 307 308 /** 309 * bio_for_each_folio_all - Iterate over each folio in a bio. 310 * @fi: struct folio_iter which is updated for each folio. 311 * @bio: struct bio to iterate over. 312 */ 313 #define bio_for_each_folio_all(fi, bio) \ 314 for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio)) 315 316 void bio_trim(struct bio *bio, sector_t offset, sector_t size); 317 extern struct bio *bio_split(struct bio *bio, int sectors, 318 gfp_t gfp, struct bio_set *bs); 319 int bio_split_io_at(struct bio *bio, const struct queue_limits *lim, 320 unsigned *segs, unsigned max_bytes, unsigned len_align); 321 u8 bio_seg_gap(struct request_queue *q, struct bio *prev, struct bio *next, 322 u8 gaps_bit); 323 324 /** 325 * bio_next_split - get next @sectors from a bio, splitting if necessary 326 * @bio: bio to split 327 * @sectors: number of sectors to split from the front of @bio 328 * @gfp: gfp mask 329 * @bs: bio set to allocate from 330 * 331 * Return: a bio representing the next @sectors of @bio - if the bio is smaller 332 * than @sectors, returns the original bio unchanged. 333 */ 334 static inline struct bio *bio_next_split(struct bio *bio, int sectors, 335 gfp_t gfp, struct bio_set *bs) 336 { 337 if (sectors >= bio_sectors(bio)) 338 return bio; 339 340 return bio_split(bio, sectors, gfp, bs); 341 } 342 343 enum { 344 BIOSET_NEED_BVECS = BIT(0), 345 BIOSET_NEED_RESCUER = BIT(1), 346 BIOSET_PERCPU_CACHE = BIT(2), 347 }; 348 extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); 349 extern void bioset_exit(struct bio_set *); 350 extern int biovec_init_pool(mempool_t *pool, int pool_entries); 351 352 struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, 353 blk_opf_t opf, gfp_t gfp, struct bio_set *bs); 354 struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask); 355 extern void bio_put(struct bio *); 356 357 struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, 358 gfp_t gfp, struct bio_set *bs); 359 int bio_init_clone(struct block_device *bdev, struct bio *bio, 360 struct bio *bio_src, gfp_t gfp); 361 362 extern struct bio_set fs_bio_set; 363 364 static inline struct bio *bio_alloc(struct block_device *bdev, 365 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) 366 { 367 return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); 368 } 369 370 void submit_bio(struct bio *bio); 371 372 extern void bio_endio(struct bio *); 373 374 static inline void bio_io_error(struct bio *bio) 375 { 376 bio->bi_status = BLK_STS_IOERR; 377 bio_endio(bio); 378 } 379 380 static inline void bio_wouldblock_error(struct bio *bio) 381 { 382 bio_set_flag(bio, BIO_QUIET); 383 bio->bi_status = BLK_STS_AGAIN; 384 bio_endio(bio); 385 } 386 387 /* 388 * Calculate number of bvec segments that should be allocated to fit data 389 * pointed by @iter. If @iter is backed by bvec it's going to be reused 390 * instead of allocating a new one. 391 */ 392 static inline int bio_iov_vecs_to_alloc(struct iov_iter *iter, int max_segs) 393 { 394 if (iov_iter_is_bvec(iter)) 395 return 0; 396 return iov_iter_npages(iter, max_segs); 397 } 398 399 /** 400 * bio_iov_bounce_nr_vecs - calculate number of bvecs for a bounce bio 401 * @iter: iter to bounce from 402 * @op: REQ_OP_* for the bio 403 * 404 * Calculates how many bvecs are needed for the next bio to bounce from/to 405 * @iter. 406 */ 407 static inline unsigned short 408 bio_iov_bounce_nr_vecs(struct iov_iter *iter, blk_opf_t op) 409 { 410 /* 411 * We still need to bounce bvec iters, so don't special case them 412 * here unlike in bio_iov_vecs_to_alloc. 413 * 414 * For reads we need to use a vector for the bounce buffer, account 415 * for that here. 416 */ 417 if (op_is_write(op)) 418 return iov_iter_npages(iter, BIO_MAX_VECS); 419 return iov_iter_npages(iter, BIO_MAX_VECS - 1) + 1; 420 } 421 422 struct request_queue; 423 424 void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, 425 unsigned short max_vecs, blk_opf_t opf); 426 static inline void bio_init_inline(struct bio *bio, struct block_device *bdev, 427 unsigned short max_vecs, blk_opf_t opf) 428 { 429 bio_init(bio, bdev, bio_inline_vecs(bio), max_vecs, opf); 430 } 431 extern void bio_uninit(struct bio *); 432 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf); 433 void bio_reuse(struct bio *bio, blk_opf_t opf); 434 void bio_chain(struct bio *, struct bio *); 435 void bio_await(struct bio *bio, void *priv, 436 void (*submit)(struct bio *bio, void *priv)); 437 438 int __must_check bio_add_page(struct bio *bio, struct page *page, unsigned len, 439 unsigned off); 440 bool __must_check bio_add_folio(struct bio *bio, struct folio *folio, 441 size_t len, size_t off); 442 void __bio_add_page(struct bio *bio, struct page *page, 443 unsigned int len, unsigned int off); 444 void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, 445 size_t off); 446 void bio_add_virt_nofail(struct bio *bio, void *vaddr, unsigned len); 447 448 /** 449 * bio_add_max_vecs - number of bio_vecs needed to add data to a bio 450 * @kaddr: kernel virtual address to add 451 * @len: length in bytes to add 452 * 453 * Calculate how many bio_vecs need to be allocated to add the kernel virtual 454 * address range in [@kaddr:@len] in the worse case. 455 */ 456 static inline unsigned int bio_add_max_vecs(void *kaddr, unsigned int len) 457 { 458 if (is_vmalloc_addr(kaddr)) 459 return DIV_ROUND_UP(offset_in_page(kaddr) + len, PAGE_SIZE); 460 return 1; 461 } 462 463 unsigned int bio_add_vmalloc_chunk(struct bio *bio, void *vaddr, unsigned len); 464 bool bio_add_vmalloc(struct bio *bio, void *vaddr, unsigned int len); 465 466 int submit_bio_wait(struct bio *bio); 467 int bdev_rw_virt(struct block_device *bdev, sector_t sector, void *data, 468 size_t len, enum req_op op); 469 470 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter, 471 unsigned len_align_mask); 472 473 void bio_iov_bvec_set(struct bio *bio, const struct iov_iter *iter); 474 void __bio_release_pages(struct bio *bio, bool mark_dirty); 475 extern void bio_set_pages_dirty(struct bio *bio); 476 extern void bio_check_pages_dirty(struct bio *bio); 477 478 int bio_iov_iter_bounce(struct bio *bio, struct iov_iter *iter, size_t maxlen); 479 void bio_iov_iter_unbounce(struct bio *bio, bool is_error, bool mark_dirty); 480 481 extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, 482 struct bio *src, struct bvec_iter *src_iter); 483 extern void bio_copy_data(struct bio *dst, struct bio *src); 484 extern void bio_free_pages(struct bio *bio); 485 void guard_bio_eod(struct bio *bio); 486 void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter); 487 488 static inline void zero_fill_bio(struct bio *bio) 489 { 490 zero_fill_bio_iter(bio, bio->bi_iter); 491 } 492 493 static inline void bio_release_pages(struct bio *bio, bool mark_dirty) 494 { 495 if (bio_flagged(bio, BIO_PAGE_PINNED)) 496 __bio_release_pages(bio, mark_dirty); 497 } 498 499 #define bio_dev(bio) \ 500 disk_devt((bio)->bi_bdev->bd_disk) 501 502 #ifdef CONFIG_BLK_CGROUP 503 void bio_associate_blkg(struct bio *bio); 504 void bio_associate_blkg_from_css(struct bio *bio, 505 struct cgroup_subsys_state *css); 506 void bio_clone_blkg_association(struct bio *dst, struct bio *src); 507 void blkcg_punt_bio_submit(struct bio *bio); 508 #else /* CONFIG_BLK_CGROUP */ 509 static inline void bio_associate_blkg(struct bio *bio) { } 510 static inline void bio_associate_blkg_from_css(struct bio *bio, 511 struct cgroup_subsys_state *css) 512 { } 513 static inline void bio_clone_blkg_association(struct bio *dst, 514 struct bio *src) { } 515 static inline void blkcg_punt_bio_submit(struct bio *bio) 516 { 517 submit_bio(bio); 518 } 519 #endif /* CONFIG_BLK_CGROUP */ 520 521 static inline void bio_set_dev(struct bio *bio, struct block_device *bdev) 522 { 523 bio_clear_flag(bio, BIO_REMAPPED); 524 if (bio->bi_bdev != bdev) 525 bio_clear_flag(bio, BIO_BPS_THROTTLED); 526 bio->bi_bdev = bdev; 527 bio_associate_blkg(bio); 528 } 529 530 /* 531 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 532 * 533 * A bio_list anchors a singly-linked list of bios chained through the bi_next 534 * member of the bio. The bio_list also caches the last list member to allow 535 * fast access to the tail. 536 */ 537 struct bio_list { 538 struct bio *head; 539 struct bio *tail; 540 }; 541 542 static inline int bio_list_empty(const struct bio_list *bl) 543 { 544 return bl->head == NULL; 545 } 546 547 static inline void bio_list_init(struct bio_list *bl) 548 { 549 bl->head = bl->tail = NULL; 550 } 551 552 #define BIO_EMPTY_LIST { NULL, NULL } 553 554 #define bio_list_for_each(bio, bl) \ 555 for (bio = (bl)->head; bio; bio = bio->bi_next) 556 557 static inline unsigned bio_list_size(const struct bio_list *bl) 558 { 559 unsigned sz = 0; 560 struct bio *bio; 561 562 bio_list_for_each(bio, bl) 563 sz++; 564 565 return sz; 566 } 567 568 static inline void bio_list_add(struct bio_list *bl, struct bio *bio) 569 { 570 bio->bi_next = NULL; 571 572 if (bl->tail) 573 bl->tail->bi_next = bio; 574 else 575 bl->head = bio; 576 577 bl->tail = bio; 578 } 579 580 static inline void bio_list_add_head(struct bio_list *bl, struct bio *bio) 581 { 582 bio->bi_next = bl->head; 583 584 bl->head = bio; 585 586 if (!bl->tail) 587 bl->tail = bio; 588 } 589 590 static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) 591 { 592 if (!bl2->head) 593 return; 594 595 if (bl->tail) 596 bl->tail->bi_next = bl2->head; 597 else 598 bl->head = bl2->head; 599 600 bl->tail = bl2->tail; 601 } 602 603 static inline void bio_list_merge_init(struct bio_list *bl, 604 struct bio_list *bl2) 605 { 606 bio_list_merge(bl, bl2); 607 bio_list_init(bl2); 608 } 609 610 static inline void bio_list_merge_head(struct bio_list *bl, 611 struct bio_list *bl2) 612 { 613 if (!bl2->head) 614 return; 615 616 if (bl->head) 617 bl2->tail->bi_next = bl->head; 618 else 619 bl->tail = bl2->tail; 620 621 bl->head = bl2->head; 622 } 623 624 static inline struct bio *bio_list_peek(struct bio_list *bl) 625 { 626 return bl->head; 627 } 628 629 static inline struct bio *bio_list_pop(struct bio_list *bl) 630 { 631 struct bio *bio = bl->head; 632 633 if (bio) { 634 bl->head = bl->head->bi_next; 635 if (!bl->head) 636 bl->tail = NULL; 637 638 bio->bi_next = NULL; 639 } 640 641 return bio; 642 } 643 644 static inline struct bio *bio_list_get(struct bio_list *bl) 645 { 646 struct bio *bio = bl->head; 647 648 bl->head = bl->tail = NULL; 649 650 return bio; 651 } 652 653 /* 654 * Increment chain count for the bio. Make sure the CHAIN flag update 655 * is visible before the raised count. 656 */ 657 static inline void bio_inc_remaining(struct bio *bio) 658 { 659 bio_set_flag(bio, BIO_CHAIN); 660 smp_mb__before_atomic(); 661 atomic_inc(&bio->__bi_remaining); 662 } 663 664 /* 665 * bio_set is used to allow other portions of the IO system to 666 * allocate their own private memory pools for bio and iovec structures. 667 * These memory pools in turn all allocate from the bio_slab 668 * and the bvec_slabs[]. 669 */ 670 #define BIO_POOL_SIZE 2 671 672 struct bio_set { 673 struct kmem_cache *bio_slab; 674 unsigned int front_pad; 675 676 /* 677 * per-cpu bio alloc cache 678 */ 679 struct bio_alloc_cache __percpu *cache; 680 681 mempool_t bio_pool; 682 mempool_t bvec_pool; 683 684 unsigned int back_pad; 685 /* 686 * Deadlock avoidance for stacking block drivers: see comments in 687 * bio_alloc_bioset() for details 688 */ 689 spinlock_t rescue_lock; 690 struct bio_list rescue_list; 691 struct work_struct rescue_work; 692 struct workqueue_struct *rescue_workqueue; 693 694 /* 695 * Hot un-plug notifier for the per-cpu cache, if used 696 */ 697 struct hlist_node cpuhp_dead; 698 }; 699 700 static inline bool bioset_initialized(struct bio_set *bs) 701 { 702 return bs->bio_slab != NULL; 703 } 704 705 /* 706 * Mark a bio as polled. Note that for async polled IO, the caller must 707 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). 708 * We cannot block waiting for requests on polled IO, as those completions 709 * must be found by the caller. This is different than IRQ driven IO, where 710 * it's safe to wait for IO to complete. 711 */ 712 static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) 713 { 714 bio->bi_opf |= REQ_POLLED; 715 if (kiocb->ki_flags & IOCB_NOWAIT) 716 bio->bi_opf |= REQ_NOWAIT; 717 } 718 719 static inline void bio_clear_polled(struct bio *bio) 720 { 721 bio->bi_opf &= ~REQ_POLLED; 722 } 723 724 /** 725 * bio_is_zone_append - is this a zone append bio? 726 * @bio: bio to check 727 * 728 * Check if @bio is a zone append operation. Core block layer code and end_io 729 * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check 730 * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if 731 * it is not natively supported. 732 */ 733 static inline bool bio_is_zone_append(struct bio *bio) 734 { 735 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) 736 return false; 737 return bio_op(bio) == REQ_OP_ZONE_APPEND || 738 bio_flagged(bio, BIO_EMULATES_ZONE_APPEND); 739 } 740 741 struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, 742 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); 743 struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new); 744 745 struct bio *blk_alloc_discard_bio(struct block_device *bdev, 746 sector_t *sector, sector_t *nr_sects, gfp_t gfp_mask); 747 748 #endif /* __LINUX_BIO_H */ 749