/linux/block/ |
H A D | blk.h | 150 if (req_op(rq) == REQ_OP_FLUSH) in rq_mergeable() 153 if (req_op(rq) == REQ_OP_WRITE_ZEROES) in rq_mergeable() 156 if (req_op(rq) == REQ_OP_ZONE_APPEND) in rq_mergeable() 177 if (req_op(req) == REQ_OP_DISCARD && in blk_discard_mergable() 185 if (req_op(rq) == REQ_OP_DISCARD) in blk_rq_get_max_segments() 193 enum req_op op = req_op(rq); in blk_queue_get_max_sectors() 473 if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio)) in blk_zone_update_request_bio()
|
H A D | blk-merge.c | 636 req_op(rq) == REQ_OP_DISCARD || in blk_rq_get_max_sectors() 637 req_op(rq) == REQ_OP_SECURE_ERASE) in blk_rq_get_max_sectors() 653 if (req_op(req) == REQ_OP_DISCARD) in ll_new_hw_segment() 824 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_request() 826 in_flight[op_is_write(req_op(req))]); in blk_account_io_merge_request() 864 if (req_op(req) != req_op(next)) in attempt_merge() 979 if (req_op(rq) != bio_op(bio)) in blk_rq_merge_ok() 1013 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]); in blk_account_io_merge_bio()
|
H A D | blk-map.c | 160 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); in bio_copy_user_iov() 267 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); in blk_rq_map_bio_alloc() 784 bio->bi_opf |= req_op(rq); in blk_rq_map_kern()
|
H A D | blk-core.c | 124 inline const char *blk_op_str(enum req_op op) in blk_op_str() 1023 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, in bdev_start_io_acct() 1047 void bdev_end_io_acct(struct block_device *bdev, enum req_op op, in bdev_end_io_acct()
|
H A D | blk-wbt.c | 745 const enum req_op op = req_op(rq); in wbt_data_dir()
|
H A D | blk-mq.c | 826 const int sgrp = op_stat_group(req_op(req)); in blk_account_io_completion() 841 blk_rq_pos(req), (__force u32)req_op(req), in blk_print_req_error() 842 blk_op_str(req_op(req)), in blk_print_req_error() 863 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ) in blk_complete_request() 932 if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && in blk_update_request() 1044 const int sgrp = op_stat_group(req_op(req)); in blk_account_io_done() 1051 in_flight[op_is_write(req_op(req))]); in blk_account_io_done() 1111 part_stat_local_inc(req->part, in_flight[op_is_write(req_op(req))]); in blk_account_io_start() 1353 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) in blk_mq_start_request() 2607 } else if (req_op(rq) == REQ_OP_FLUSH) { in blk_mq_insert_request()
|
/linux/drivers/block/null_blk/ |
H A D | null_blk.h | 131 blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op, 140 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op, 159 enum req_op op, sector_t sector, sector_t nr_sectors) in null_process_zoned_cmd()
|
/linux/include/linux/ |
H A D | blk_types.h | 325 enum req_op { enum 431 static inline enum req_op bio_op(const struct bio *bio) in bio_op() 472 static inline bool op_is_zone_mgmt(enum req_op op) in op_is_zone_mgmt() 485 static inline int op_stat_group(enum req_op op) in op_stat_group()
|
H A D | blk-mq.h | 210 static inline enum req_op req_op(const struct request *req) in req_op() function 227 #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) 230 (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
|
H A D | blkdev.h | 420 int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, 888 extern const char *blk_op_str(enum req_op op); 1571 unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, 1573 void bdev_end_io_acct(struct block_device *bdev, enum req_op op,
|
/linux/fs/ |
H A D | direct-io.c | 169 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_refill_pages() 246 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_complete() 336 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_end_aio() 427 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_submit() 502 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_complete() 606 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in get_more_blocks() 789 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in submit_page_section() 906 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in do_direct_IO()
|
/linux/arch/um/drivers/ |
H A D | ubd_kern.c | 451 if (req_op(io_req->req) == REQ_OP_DISCARD) in ubd_end_request() 453 else if (req_op(io_req->req) == REQ_OP_WRITE_ZEROES) in ubd_end_request() 1185 if (req_op(req->req) == REQ_OP_READ) { in cowify_req() 1205 enum req_op op = req_op(req); in ubd_map_req() 1268 enum req_op op = req_op(req); in ubd_submit_request() 1305 switch (req_op(req)) { in ubd_queue_rq() 1423 if (req_op(req->req) == REQ_OP_FLUSH) { in do_io() 1444 switch (req_op(req->req)) { in do_io()
|
/linux/fs/zonefs/ |
H A D | trace.h | 24 enum req_op op), 29 __field(enum req_op, op)
|
H A D | zonefs.h | 260 int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op);
|
/linux/drivers/block/rnbd/ |
H A D | rnbd-proto.h | 261 switch (req_op(rq)) { in rq_to_rnbd_flags() 282 (__force u32)req_op(rq), in rq_to_rnbd_flags()
|
/linux/drivers/block/ |
H A D | ublk_drv.c | 404 switch (req_op(req)) { in ublk_setup_iod_zoned() 900 return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE; in ublk_need_map_req() 906 (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_DRV_IN); in ublk_need_unmap_req() 985 enum req_op op = req_op(req); in ublk_setup_iod() 992 switch (req_op(req)) { in ublk_setup_iod() 1049 if (!io->res && req_op(req) == REQ_OP_READ) in __ublk_complete_rq() 1063 if (req_op(req) != REQ_OP_READ && req_op(req) != REQ_OP_WRITE && in __ublk_complete_rq() 1064 req_op(req) != REQ_OP_DRV_IN) in __ublk_complete_rq() 1420 if (req_op(req) == REQ_OP_ZONE_APPEND) in ublk_commit_completion() 1842 req_op(req) == REQ_OP_READ)) in __ublk_ch_uring_cmd() [all …]
|
H A D | xen-blkfront.c | 566 if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard) in blkif_queue_discard_req() 771 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA); in blkif_queue_rw_req() 783 if (req_op(req) == REQ_OP_FLUSH || in blkif_queue_rw_req() 784 (req_op(req) == REQ_OP_WRITE && (req->cmd_flags & REQ_FUA))) { in blkif_queue_rw_req() 876 if (unlikely(req_op(req) == REQ_OP_DISCARD || in blkif_queue_request() 877 req_op(req) == REQ_OP_SECURE_ERASE)) in blkif_queue_request() 915 if (unlikely(req_op(qd->rq) == REQ_OP_FLUSH && !info->feature_flush)) in blkif_queue_rq() 2086 if (req_op(shadow[j].request) == REQ_OP_FLUSH || in blkfront_resume() 2087 req_op(shadow[j].request) == REQ_OP_DISCARD || in blkfront_resume() 2088 req_op(shadow[j].request) == REQ_OP_SECURE_ERASE || in blkfront_resume()
|
H A D | ps3disk.c | 168 switch (req_op(req)) { in ps3disk_do_request() 231 if (req_op(req) == REQ_OP_FLUSH) { in ps3disk_interrupt()
|
/linux/include/trace/events/ |
H A D | nilfs2.h | 195 enum req_op mode), 205 * bitwise type enum req_op. 207 __field_struct(enum req_op, mode)
|
/linux/drivers/mmc/core/ |
H A D | queue.c | 46 switch (req_op(req)) { in mmc_cqe_issue_type() 67 if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE) in mmc_issue_type()
|
/linux/fs/xfs/ |
H A D | xfs_bio_io.c | 18 enum req_op op) in xfs_rw_bdev()
|
H A D | xfs_linux.h | 217 char *data, enum req_op op);
|
/linux/drivers/md/ |
H A D | dm-rq.c | 218 if (req_op(clone) == REQ_OP_DISCARD && in dm_done() 221 else if (req_op(clone) == REQ_OP_WRITE_ZEROES && in dm_done()
|
H A D | dm-ebs-target.c | 65 static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv, in __ebs_rw_bvec() 122 static int __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio) in __ebs_rw_bio()
|
/linux/drivers/nvme/target/ |
H A D | passthru.c | 272 ARRAY_SIZE(req->inline_bvec), req_op(rq)); in nvmet_passthru_map_sg() 274 bio = bio_alloc(NULL, bio_max_segs(req->sg_cnt), req_op(rq), in nvmet_passthru_map_sg()
|