/linux/block/ |
H A D | blk-merge.c | 122 split->bi_opf |= REQ_NOMERGE; in bio_submit_split() 200 bool is_atomic = bio->bi_opf & REQ_ATOMIC; in get_max_io_size() 348 if (bio->bi_opf & REQ_ATOMIC) in bio_split_rw_at() 355 if (bio->bi_opf & REQ_NOWAIT) in bio_split_rw_at() 786 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && in blk_rq_set_mixed_merge() 787 (bio->bi_opf & REQ_FAILFAST_MASK) != ff); in blk_rq_set_mixed_merge() 788 bio->bi_opf |= ff; in blk_rq_set_mixed_merge() 795 if (bio->bi_opf & REQ_RAHEAD) in bio_failfast() 798 return bio->bi_opf & REQ_FAILFAST_MASK; in bio_failfast() 810 if (bio->bi_opf & REQ_RAHEAD) in blk_update_mixed_merge() [all …]
|
H A D | blk-core.c | 338 if (bio->bi_opf & REQ_NOWAIT) { in __bio_queue_enter() 519 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in bio_check_ro() 559 current->comm, bio->bi_bdev, bio->bi_opf, in bio_check_eod() 613 bio->bi_opf |= REQ_NOMERGE; in blk_check_zone_append() 777 if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev)) in submit_bio_noacct() 795 if (op_is_flush(bio->bi_opf)) { in submit_bio_noacct() 800 bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); in submit_bio_noacct() 809 (bio->bi_opf & REQ_POLLED)) { in submit_bio_noacct() 818 if (bio->bi_opf & REQ_ATOMIC) { in submit_bio_noacct() 1264 sizeof_field(struct bio, bi_opf)); in blk_dev_init()
|
H A D | blk-throttle.h | 177 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf, in blk_should_throtl() 180 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1); in blk_should_throtl()
|
H A D | fops.c | 77 bio.bi_opf |= REQ_ATOMIC; in __blkdev_direct_IO_simple() 88 bio.bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO_simple() 230 bio->bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO() 350 bio->bi_opf |= REQ_ATOMIC; in __blkdev_direct_IO_async() 353 bio->bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO_async() 356 bio->bi_opf |= REQ_POLLED; in __blkdev_direct_IO_async()
|
H A D | blk-mq-sched.h | 33 return !(bio->bi_opf & REQ_NOMERGE_FLAGS); in bio_mergeable()
|
H A D | blk-wbt.c | 604 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == in wbt_should_throttle() 625 if (bio->bi_opf & REQ_SWAP) in bio_to_wbt_flags() 658 __wbt_wait(rwb, flags, bio->bi_opf); in wbt_wait()
|
H A D | bio-integrity.c | 45 bio->bi_opf &= ~REQ_INTEGRITY; in bio_integrity_free() 95 bio->bi_opf |= REQ_INTEGRITY; in bio_integrity_alloc()
|
/linux/fs/btrfs/ |
H A D | bio.c | 321 else if (!(bio->bi_opf & REQ_RAHEAD)) in btrfs_log_dev_io_error() 323 if (bio->bi_opf & REQ_PREFLUSH) in btrfs_log_dev_io_error() 330 if (bio->bi_opf & REQ_META) in btrfs_end_io_wq() 452 __func__, bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector, in btrfs_submit_dev_bio() 456 if (bio->bi_opf & REQ_BTRFS_CGROUP_PUNT) in btrfs_submit_dev_bio() 517 if (bbio->bio.bi_opf & REQ_META) in btrfs_bio_csum() 585 bio->bi_opf |= REQ_BTRFS_CGROUP_PUNT; in run_one_async_done() 611 if (op_is_sync(bbio->bio.bi_opf)) in should_async_write() 615 if ((bbio->bio.bi_opf & REQ_META) && btrfs_is_zoned(bbio->fs_info)) in should_async_write() 724 bio->bi_opf &= ~REQ_OP_WRITE; in btrfs_submit_chunk() [all …]
|
/linux/drivers/md/bcache/ |
H A D | writeback.h | 123 return (op_is_sync(bio->bi_opf) || in should_writeback() 124 bio->bi_opf & (REQ_META|REQ_PRIO) || in should_writeback()
|
H A D | io.c | 68 if (bio->bi_opf & REQ_RAHEAD) { in bch_count_backing_io_errors()
|
/linux/include/trace/events/ |
H A D | bcache.h | 31 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 105 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 140 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 171 blk_fill_rwbs(__entry->rwbs, bio->bi_opf); 241 blk_fill_rwbs(__entry->rwbs, bio->bi_opf);
|
/linux/drivers/md/ |
H A D | dm-raid1.c | 268 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC, in mirror_flush() 547 .bi_opf = REQ_OP_READ, in read_async_bio() 659 blk_opf_t op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH); in do_write() 661 .bi_opf = REQ_OP_WRITE | op_flags, in do_write() 670 io_req.bi_opf = REQ_OP_DISCARD | op_flags; in do_write() 708 if ((bio->bi_opf & REQ_PREFLUSH) || in do_writes() 1222 if (bio->bi_opf & REQ_RAHEAD) in mirror_map() 1259 if (!(bio->bi_opf & REQ_PREFLUSH) && in mirror_end_io() 1268 if (bio->bi_opf & REQ_RAHEAD) in mirror_end_io()
|
H A D | dm-io.c | 482 if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) { in dp_init() 507 if (num_regions > 1 && !op_is_write(io_req->bi_opf)) { in dm_io() 518 io_req->bi_opf, &dp, sync_error_bits, ioprio); in dm_io() 520 async_io(io_req->client, num_regions, where, io_req->bi_opf, &dp, in dm_io()
|
H A D | raid1.c | 470 (bio->bi_opf & MD_FAILFAST) && in raid1_end_write_request() 1323 const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; in raid1_read_request() 1340 bio->bi_opf & REQ_NOWAIT)) { in raid1_read_request() 1411 read_bio->bi_opf = op | do_sync; in raid1_read_request() 1414 read_bio->bi_opf |= MD_FAILFAST; in raid1_read_request() 1447 if (bio->bi_opf & REQ_NOWAIT) in wait_blocked_rdev() 1478 if (bio->bi_opf & REQ_NOWAIT) { in raid1_write_request() 1500 bio->bi_opf & REQ_NOWAIT)) { in raid1_write_request() 1583 if (bio->bi_opf & REQ_ATOMIC) { in raid1_write_request() 1674 mbio->bi_opf = bio_op(bio) | in raid1_write_request() [all …]
|
H A D | raid10.c | 492 (bio->bi_opf & MD_FAILFAST)) { in raid10_end_write_request() 1127 if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) { in regular_request_wait() 1135 if (bio->bi_opf & REQ_NOWAIT) { in regular_request_wait() 1155 const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; in raid10_read_request() 1236 read_bio->bi_opf = op | do_sync; in raid10_read_request() 1239 read_bio->bi_opf |= MD_FAILFAST; in raid10_read_request() 1256 const blk_opf_t do_sync = bio->bi_opf & REQ_SYNC; in raid10_write_one_disk() 1257 const blk_opf_t do_fua = bio->bi_opf & REQ_FUA; in raid10_write_one_disk() 1258 const blk_opf_t do_atomic = bio->bi_opf & REQ_ATOMIC; in raid10_write_one_disk() 1277 mbio->bi_opf = op | do_sync | do_fua | do_atomic; in raid10_write_one_disk() [all …]
|
H A D | dm-flakey.c | 22 (((bio)->bi_opf & (fc)->corrupt_bio_flags) == (fc)->corrupt_bio_flags) 359 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_opf, in corrupt_bio_common() 429 bio_init(clone, fc->dev->bdev, bio->bi_inline_vecs, nr_iovecs, bio->bi_opf); in clone_bio()
|
H A D | dm-stripe.c | 278 if (bio->bi_opf & REQ_PREFLUSH) { in stripe_map() 414 if (bio->bi_opf & REQ_RAHEAD) in stripe_end_io()
|
/linux/drivers/md/dm-vdo/ |
H A D | vio.h | 122 blk_opf_t bi_opf, physical_block_number_t pbn); 125 blk_opf_t bi_opf, physical_block_number_t pbn);
|
/linux/drivers/block/ |
H A D | brd.c | 251 if (unlikely(op_is_discard(bio->bi_opf))) { in brd_submit_bio() 266 bio->bi_opf, sector); in brd_submit_bio() 268 if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) { in brd_submit_bio()
|
/linux/include/linux/ |
H A D | dm-io.h | 62 blk_opf_t bi_opf; /* Request type and flags */ member
|
H A D | blk_types.h | 217 blk_opf_t bi_opf; /* bottom bits REQ_OP, top bits member 433 return bio->bi_opf & REQ_OP_MASK; in bio_op()
|
/linux/fs/bcachefs/ |
H A D | io_write.c | 422 n->bio.bi_opf = wbio->bio.bi_opf; in bch2_submit_wbio_replicas() 735 wbio->bio.bi_opf = src->bi_opf; in bch2_write_bio_alloc() 1075 dst->bi_opf = src->bi_opf; in bch2_write_extent() 1316 bio->bi_opf = op->wbio.bio.bi_opf; in bch2_nocow_write() 1326 bio->bi_opf |= REQ_OP_WRITE; in bch2_nocow_write() 1480 bio->bi_opf |= REQ_OP_WRITE; in __bch2_write()
|
/linux/fs/xfs/ |
H A D | xfs_bio_io.c | 42 prev->bi_opf, GFP_KERNEL); in xfs_rw_bdev()
|
/linux/drivers/nvme/host/ |
H A D | multipath.c | 108 if (bio->bi_opf & REQ_POLLED) { in nvme_failover_req() 109 bio->bi_opf &= ~REQ_POLLED; in nvme_failover_req() 119 bio->bi_opf &= ~REQ_NOWAIT; in nvme_failover_req() 468 bio->bi_opf |= REQ_NVME_MPATH; in nvme_ns_head_submit_bio()
|
/linux/Documentation/admin-guide/device-mapper/ |
H A D | dm-flakey.rst | 67 Perform the replacement only if bio->bi_opf has all the
|