/linux/arch/sparc/kernel/ |
H A D | visemul.c | 294 static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) in edge() argument 307 switch (opf) { in edge() 352 switch (opf) { in edge() 372 static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) in array() argument 394 switch (opf) { in array() 477 static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) in pformat() argument 483 scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f); in pformat() 484 switch (opf) { in pformat() 589 static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) in pmul() argument 594 switch (opf) { in pmul() [all …]
|
/linux/drivers/md/ |
H A D | dm-io.c | 306 static void do_region(const blk_opf_t opf, unsigned int region, in do_region() argument 319 const enum req_op op = opf & REQ_OP_MASK; in do_region() 353 bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO, in do_region() 385 static void dispatch_io(blk_opf_t opf, unsigned int num_regions, in dispatch_io() argument 400 if (where[i].count || (opf & REQ_PREFLUSH)) in dispatch_io() 401 do_region(opf, i, where + i, dp, io, ioprio); in dispatch_io() 412 struct dm_io_region *where, blk_opf_t opf, in async_io() argument 428 dispatch_io(opf, num_regions, where, dp, io, ioprio); in async_io() 445 struct dm_io_region *where, blk_opf_t opf, struct dpages *dp, in sync_io() argument 452 async_io(client, num_regions, where, opf | REQ_SYNC, dp, in sync_io()
|
H A D | dm-snap-persistent.c | 232 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf, in chunk_io() argument 241 .bi_opf = opf, in chunk_io() 288 static int area_io(struct pstore *ps, blk_opf_t opf) in area_io() argument 292 return chunk_io(ps, ps->area, chunk, opf, 0); in area_io()
|
/linux/block/ |
H A D | blk-cgroup-rwstat.h | 62 blk_opf_t opf, uint64_t val) in blkg_rwstat_add() argument 66 if (op_is_discard(opf)) in blkg_rwstat_add() 68 else if (op_is_write(opf)) in blkg_rwstat_add() 75 if (op_is_sync(opf)) in blkg_rwstat_add()
|
H A D | blk-wbt.c | 533 static inline unsigned int get_limit(struct rq_wb *rwb, blk_opf_t opf) in get_limit() argument 537 if ((opf & REQ_OP_MASK) == REQ_OP_DISCARD) in get_limit() 548 if ((opf & REQ_HIPRIO) || wb_recent_wait(rwb)) in get_limit() 550 else if ((opf & REQ_BACKGROUND) || close_io(rwb)) { in get_limit() 565 blk_opf_t opf; member 571 return rq_wait_inc_below(rqw, get_limit(data->rwb, data->opf)); in wbt_inflight_cb() 585 blk_opf_t opf) in __wbt_wait() argument 591 .opf = opf, in __wbt_wait()
|
H A D | bio.c | 246 unsigned short max_vecs, blk_opf_t opf) in bio_init() argument 250 bio->bi_opf = opf; in bio_init() 300 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) in bio_reset() argument 308 bio->bi_opf = opf; in bio_reset() 367 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp) in blk_next_bio() argument 369 return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp)); in blk_next_bio() 444 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp, in bio_alloc_percpu_cache() argument 464 bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf); in bio_alloc_percpu_cache() 504 blk_opf_t opf, gfp_t gfp_mask, in bio_alloc_bioset() argument 515 if (opf & REQ_ALLOC_CACHE) { in bio_alloc_bioset() [all …]
|
H A D | fops.c | 30 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in dio_bio_write_op() local 34 opf |= REQ_FUA; in dio_bio_write_op() 35 return opf; in dio_bio_write_op() 170 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO() local 175 opf |= REQ_ALLOC_CACHE; in __blkdev_direct_IO() 176 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, in __blkdev_direct_IO() 249 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL); in __blkdev_direct_IO() 305 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO_async() local 312 opf |= REQ_ALLOC_CACHE; in __blkdev_direct_IO_async() 313 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, in __blkdev_direct_IO_async()
|
H A D | bfq-cgroup.c | 223 blk_opf_t opf) in bfqg_stats_update_io_add() argument 225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1); in bfqg_stats_update_io_add() 231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) in bfqg_stats_update_io_remove() argument 233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1); in bfqg_stats_update_io_remove() 236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) in bfqg_stats_update_io_merged() argument 238 blkg_rwstat_add(&bfqg->stats.merged, opf, 1); in bfqg_stats_update_io_merged() 242 u64 io_start_time_ns, blk_opf_t opf) in bfqg_stats_update_completion() argument 248 blkg_rwstat_add(&stats->service_time, opf, in bfqg_stats_update_completion() 251 blkg_rwstat_add(&stats->wait_time, opf, in bfqg_stats_update_completion() 257 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { } in bfqg_stats_update_io_remove() argument [all …]
|
H A D | bfq-iosched.h | 1068 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf); 1069 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf); 1071 u64 io_start_time_ns, blk_opf_t opf); 1079 blk_opf_t opf);
|
H A D | kyber-iosched.c | 196 static unsigned int kyber_sched_domain(blk_opf_t opf) in kyber_sched_domain() argument 198 switch (opf & REQ_OP_MASK) { in kyber_sched_domain() 554 static void kyber_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) in kyber_limit_depth() argument 560 if (!op_is_sync(opf)) { in kyber_limit_depth()
|
H A D | blk-mq.c | 579 blk_opf_t opf, in blk_mq_rq_cache_fill() argument 585 .cmd_flags = opf, in blk_mq_rq_cache_fill() 603 blk_opf_t opf, in blk_mq_alloc_cached_request() argument 615 rq = blk_mq_rq_cache_fill(q, plug, opf, flags); in blk_mq_alloc_cached_request() 623 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type) in blk_mq_alloc_cached_request() 625 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) in blk_mq_alloc_cached_request() 632 rq->cmd_flags = opf; in blk_mq_alloc_cached_request() 637 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, in blk_mq_alloc_request() argument 642 rq = blk_mq_alloc_cached_request(q, opf, flags); in blk_mq_alloc_request() 647 .cmd_flags = opf, in blk_mq_alloc_request() [all …]
|
/linux/drivers/nvme/target/ |
H A D | io-cmd-bdev.c | 251 blk_opf_t opf; in nvmet_bdev_execute_rw() local 266 opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in nvmet_bdev_execute_rw() 268 opf |= REQ_FUA; in nvmet_bdev_execute_rw() 271 opf = REQ_OP_READ; in nvmet_bdev_execute_rw() 276 opf |= REQ_NOMERGE; in nvmet_bdev_execute_rw() 283 ARRAY_SIZE(req->inline_bvec), opf); in nvmet_bdev_execute_rw() 285 bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf, in nvmet_bdev_execute_rw() 312 opf, GFP_KERNEL); in nvmet_bdev_execute_rw()
|
/linux/fs/ |
H A D | direct-io.c | 118 blk_opf_t opf; /* request operation type and flags */ member 169 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_refill_pages() 246 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_complete() 336 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_end_aio() 404 bio = bio_alloc(bdev, nr_vecs, dio->opf, GFP_KERNEL); in dio_bio_alloc() 427 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_submit() 502 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in dio_bio_complete() 606 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in get_more_blocks() 789 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in submit_page_section() 906 const enum req_op dio_op = dio->opf & REQ_OP_MASK; in do_direct_IO() [all …]
|
/linux/drivers/scsi/device_handler/ |
H A D | scsi_dh_hp_sw.c | 83 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in hp_sw_tur() local 103 res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT, in hp_sw_tur() 132 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in hp_sw_start_stop() local 157 res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT, in hp_sw_start_stop()
|
H A D | scsi_dh_emc.c | 242 blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | in send_trespass_cmd() local 269 err = scsi_execute_cmd(sdev, cdb, opf, csdev->buffer, len, in send_trespass_cmd()
|
/linux/drivers/target/ |
H A D | target_core_iblock.c | 361 blk_opf_t opf) in iblock_get_bio() argument 370 bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf, in iblock_get_bio() 740 blk_opf_t opf; in iblock_execute_rw() local 753 opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in iblock_execute_rw() 761 opf |= REQ_FUA; in iblock_execute_rw() 763 opf |= REQ_FUA; in iblock_execute_rw() 766 opf = REQ_OP_READ; in iblock_execute_rw() 781 bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf); in iblock_execute_rw() 814 bio = iblock_get_bio(cmd, block_lba, sg_num, opf); in iblock_execute_rw()
|
/linux/include/linux/ |
H A D | bio.h | 359 blk_opf_t opf, gfp_t gfp_mask, 372 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp_mask) in bio_alloc() argument 374 return bio_alloc_bioset(bdev, nr_vecs, opf, gfp_mask, &fs_bio_set); in bio_alloc() 410 unsigned short max_vecs, blk_opf_t opf); 412 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf); 696 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp);
|
H A D | blktrace_api.h | 113 void blk_fill_rwbs(char *rwbs, blk_opf_t opf);
|
/linux/kernel/trace/ |
H A D | blktrace.c | 216 const blk_opf_t opf, u32 what, int error, in __blk_add_trace() argument 230 const enum req_op op = opf & REQ_OP_MASK; in __blk_add_trace() 236 what |= MASK_TC_BIT(opf, SYNC); in __blk_add_trace() 237 what |= MASK_TC_BIT(opf, RAHEAD); in __blk_add_trace() 238 what |= MASK_TC_BIT(opf, META); in __blk_add_trace() 239 what |= MASK_TC_BIT(opf, PREFLUSH); in __blk_add_trace() 240 what |= MASK_TC_BIT(opf, FUA); in __blk_add_trace() 1868 * @opf: request operation type (REQ_OP_XXX) and flags for the tracepoint 1875 void blk_fill_rwbs(char *rwbs, blk_opf_t opf) 1879 if (opf in blk_fill_rwbs() 1876 blk_fill_rwbs(char * rwbs,blk_opf_t opf) blk_fill_rwbs() argument [all...] |
/linux/drivers/block/ |
H A D | brd.c | 193 unsigned int len, unsigned int off, blk_opf_t opf, in brd_do_bvec() argument 199 if (op_is_write(opf)) { in brd_do_bvec() 204 gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO; in brd_do_bvec() 212 if (!op_is_write(opf)) { in brd_do_bvec()
|
/linux/fs/nilfs2/ |
H A D | btnode.c | 88 sector_t pblocknr, blk_opf_t opf, in nilfs_btnode_submit_block() argument 121 if (opf & REQ_RAHEAD) { in nilfs_btnode_submit_block() 139 submit_bh(opf, bh); in nilfs_btnode_submit_block()
|
H A D | mdt.c | 116 nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, blk_opf_t opf, in nilfs_mdt_submit_block() argument 131 if (opf & REQ_RAHEAD) { in nilfs_mdt_submit_block() 153 submit_bh(opf, bh); in nilfs_mdt_submit_block() 157 opf & REQ_OP_MASK); in nilfs_mdt_submit_block()
|
/linux/fs/btrfs/ |
H A D | bio.h | 102 struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
|
/linux/fs/gfs2/ |
H A D | lops.h | 20 void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
|
/linux/fs/iomap/ |
H A D | direct-io.c | 63 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf) in iomap_dio_alloc_bio() argument 66 return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf, in iomap_dio_alloc_bio() 68 return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL); in iomap_dio_alloc_bio()
|