| /linux/arch/sparc/kernel/ |
| H A D | visemul.c | 294 static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) in edge() argument 307 switch (opf) { in edge() 352 switch (opf) { in edge() 372 static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) in array() argument 394 switch (opf) { in array() 477 static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) in pformat() argument 483 scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f); in pformat() 484 switch (opf) { in pformat() 589 static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) in pmul() argument 594 switch (opf) { in pmul() [all …]
|
| /linux/drivers/nvme/target/ |
| H A D | io-cmd-bdev.c | 248 blk_opf_t opf; in nvmet_bdev_execute_rw() local 263 opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in nvmet_bdev_execute_rw() 265 opf |= REQ_FUA; in nvmet_bdev_execute_rw() 268 opf = REQ_OP_READ; in nvmet_bdev_execute_rw() 273 opf |= REQ_FAILFAST_DEV; in nvmet_bdev_execute_rw() 276 opf |= REQ_NOMERGE; in nvmet_bdev_execute_rw() 283 ARRAY_SIZE(req->inline_bvec), opf); in nvmet_bdev_execute_rw() 285 bio = bio_alloc(req->ns->bdev, bio_max_segs(sg_cnt), opf, in nvmet_bdev_execute_rw() 312 opf, GFP_KERNEL); in nvmet_bdev_execute_rw()
|
| H A D | zns.c | 536 const blk_opf_t opf = REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE; in nvmet_bdev_execute_zone_append() local 575 ARRAY_SIZE(req->inline_bvec), opf); in nvmet_bdev_execute_zone_append() 577 bio = bio_alloc(req->ns->bdev, req->sg_cnt, opf, GFP_KERNEL); in nvmet_bdev_execute_zone_append()
|
| /linux/drivers/scsi/device_handler/ |
| H A D | scsi_dh_hp_sw.c | 83 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in hp_sw_tur() local 103 res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT, in hp_sw_tur() 132 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in hp_sw_start_stop() local 157 res = scsi_execute_cmd(sdev, cmd, opf, NULL, 0, HP_SW_TIMEOUT, in hp_sw_start_stop()
|
| H A D | scsi_dh_emc.c | 242 blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | in send_trespass_cmd() local 269 err = scsi_execute_cmd(sdev, cdb, opf, csdev->buffer, len, in send_trespass_cmd()
|
| H A D | scsi_dh_alua.c | 130 blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | in submit_rtpg() local 145 return scsi_execute_cmd(sdev, cdb, opf, buff, bufflen, in submit_rtpg() 163 blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | in submit_stpg() local 180 return scsi_execute_cmd(sdev, cdb, opf, stpg_data, in submit_stpg()
|
| H A D | scsi_dh_rdac.c | 513 blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | in send_mode_select() local 573 rc = scsi_execute_cmd(sdev, cdb, opf, &h->ctlr->mode_select, data_size, in send_mode_select()
|
| /linux/block/ |
| H A D | bio.c | 246 unsigned short max_vecs, blk_opf_t opf) in bio_init() argument 250 bio->bi_opf = opf; in bio_init() 302 void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) in bio_reset() argument 310 bio->bi_opf = opf; in bio_reset() 373 unsigned int nr_pages, blk_opf_t opf, gfp_t gfp) in blk_next_bio() argument 375 return bio_chain_and_submit(bio, bio_alloc(bdev, nr_pages, opf, gfp)); in blk_next_bio() 450 unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp, in bio_alloc_percpu_cache() argument 471 bio_init_inline(bio, bdev, nr_vecs, opf); in bio_alloc_percpu_cache() 473 bio_init(bio, bdev, NULL, nr_vecs, opf); in bio_alloc_percpu_cache() 513 blk_opf_t opf, gfp_t gfp_mask, in bio_alloc_bioset() argument [all …]
|
| H A D | blk-mq.h | 90 static inline enum hctx_type blk_mq_get_hctx_type(blk_opf_t opf) in blk_mq_get_hctx_type() argument 97 if (opf & REQ_POLLED) in blk_mq_get_hctx_type() 99 else if ((opf & REQ_OP_MASK) == REQ_OP_READ) in blk_mq_get_hctx_type() 109 static inline struct blk_mq_hw_ctx *blk_mq_map_queue(blk_opf_t opf, in blk_mq_map_queue() argument 112 return ctx->hctxs[blk_mq_get_hctx_type(opf)]; in blk_mq_map_queue()
|
| H A D | bfq-cgroup.c | 223 blk_opf_t opf) in bfqg_stats_update_io_add() argument 225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1); in bfqg_stats_update_io_add() 231 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) in bfqg_stats_update_io_remove() argument 233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1); in bfqg_stats_update_io_remove() 236 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) in bfqg_stats_update_io_merged() argument 238 blkg_rwstat_add(&bfqg->stats.merged, opf, 1); in bfqg_stats_update_io_merged() 242 u64 io_start_time_ns, blk_opf_t opf) in bfqg_stats_update_completion() argument 248 blkg_rwstat_add(&stats->service_time, opf, in bfqg_stats_update_completion() 251 blkg_rwstat_add(&stats->wait_time, opf, in bfqg_stats_update_completion() 257 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { } in bfqg_stats_update_io_remove() argument 258 bfqg_stats_update_io_merged(struct bfq_group * bfqg,blk_opf_t opf) bfqg_stats_update_io_merged() argument 260 bfqg_stats_update_completion(struct bfq_group * bfqg,u64 start_time_ns,u64 io_start_time_ns,blk_opf_t opf) bfqg_stats_update_completion() argument [all...] |
| H A D | fops.c | 31 blk_opf_t opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in dio_bio_write_op() local 35 opf |= REQ_FUA; in dio_bio_write_op() 36 return opf; in dio_bio_write_op() 183 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO() local 187 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, in __blkdev_direct_IO() 263 bio = bio_alloc(bdev, nr_pages, opf, GFP_KERNEL); in __blkdev_direct_IO() 328 blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb); in __blkdev_direct_IO_async() local 334 bio = bio_alloc_bioset(bdev, nr_pages, opf, GFP_KERNEL, in __blkdev_direct_IO_async()
|
| H A D | bfq-iosched.h | 1067 void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf); 1068 void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf); 1070 u64 io_start_time_ns, blk_opf_t opf); 1078 blk_opf_t opf);
|
| H A D | blk-mq.c | 587 blk_opf_t opf, in blk_mq_rq_cache_fill() argument 594 .cmd_flags = opf, in blk_mq_rq_cache_fill() 615 blk_opf_t opf, in blk_mq_alloc_cached_request() argument 627 rq = blk_mq_rq_cache_fill(q, plug, opf, flags); in blk_mq_alloc_cached_request() 635 if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type) in blk_mq_alloc_cached_request() 637 if (op_is_flush(rq->cmd_flags) != op_is_flush(opf)) in blk_mq_alloc_cached_request() 644 rq->cmd_flags = opf; in blk_mq_alloc_cached_request() 649 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, in blk_mq_alloc_request() argument 654 rq = blk_mq_alloc_cached_request(q, opf, flags); in blk_mq_alloc_request() 660 .cmd_flags = opf, in blk_mq_alloc_request() 689 blk_mq_alloc_request_hctx(struct request_queue * q,blk_opf_t opf,blk_mq_req_flags_t flags,unsigned int hctx_idx) blk_mq_alloc_request_hctx() argument 3071 blk_mq_peek_cached_request(struct blk_plug * plug,struct request_queue * q,blk_opf_t opf) blk_mq_peek_cached_request() argument [all...] |
| /linux/drivers/target/ |
| H A D | target_core_iblock.c | 356 blk_opf_t opf) in iblock_get_bio() argument 365 bio = bio_alloc_bioset(ib_dev->ibd_bd, bio_max_segs(sg_num), opf, in iblock_get_bio() 753 blk_opf_t opf; in iblock_execute_rw() local 766 opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE; in iblock_execute_rw() 774 opf |= REQ_FUA; in iblock_execute_rw() 776 opf |= REQ_FUA; in iblock_execute_rw() 780 opf |= REQ_ATOMIC; in iblock_execute_rw() 782 opf = REQ_OP_READ; in iblock_execute_rw() 797 bio = iblock_get_bio(cmd, block_lba, sgl_nents, opf); in iblock_execute_rw() 830 bio = iblock_get_bio(cmd, block_lba, sg_num, opf); in iblock_execute_rw()
|
| /linux/kernel/trace/ |
| H A D | blktrace.c | 311 const blk_opf_t opf, u64 what, int error, in __blk_add_trace() argument 324 const enum req_op op = opf & REQ_OP_MASK; in __blk_add_trace() 331 what |= MASK_TC_BIT(opf, SYNC); in __blk_add_trace() 332 what |= MASK_TC_BIT(opf, RAHEAD); in __blk_add_trace() 333 what |= MASK_TC_BIT(opf, META); in __blk_add_trace() 334 what |= MASK_TC_BIT(opf, PREFLUSH); in __blk_add_trace() 335 what |= MASK_TC_BIT(opf, FUA); in __blk_add_trace() 2127 void blk_fill_rwbs(char *rwbs, blk_opf_t opf) in blk_fill_rwbs() argument 2131 if (opf & REQ_PREFLUSH) in blk_fill_rwbs() 2134 switch (opf & REQ_OP_MASK) { in blk_fill_rwbs() [all …]
|
| /linux/fs/nilfs2/ |
| H A D | btnode.c | 88 sector_t pblocknr, blk_opf_t opf, in nilfs_btnode_submit_block() argument 121 if (opf & REQ_RAHEAD) { in nilfs_btnode_submit_block() 139 submit_bh(opf, bh); in nilfs_btnode_submit_block()
|
| H A D | mdt.c | 116 nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, blk_opf_t opf, in nilfs_mdt_submit_block() argument 131 if (opf & REQ_RAHEAD) { in nilfs_mdt_submit_block() 153 submit_bh(opf, bh); in nilfs_mdt_submit_block() 157 opf & REQ_OP_MASK); in nilfs_mdt_submit_block()
|
| /linux/drivers/md/ |
| H A D | dm-snap-persistent.c | 232 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf, in chunk_io() argument 241 .bi_opf = opf, in chunk_io() 288 static int area_io(struct pstore *ps, blk_opf_t opf) in area_io() argument 292 return chunk_io(ps, ps->area, chunk, opf, 0); in area_io()
|
| /linux/fs/ |
| H A D | mpage.c | 168 blk_opf_t opf = REQ_OP_READ; in do_mpage_readpage() local 174 opf |= REQ_RAHEAD; in do_mpage_readpage() 287 args->bio = bio_alloc(bdev, bio_max_segs(args->nr_pages), opf, in do_mpage_readpage()
|
| H A D | buffer.c | 57 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, 2780 static void submit_bh_wbc(blk_opf_t opf, struct buffer_head *bh, in submit_bh_wbc() argument 2784 const enum req_op op = opf & REQ_OP_MASK; in submit_bh_wbc() 2800 opf |= REQ_META; in submit_bh_wbc() 2802 opf |= REQ_PRIO; in submit_bh_wbc() 2804 bio = bio_alloc(bh->b_bdev, 1, opf, GFP_NOIO); in submit_bh_wbc() 2827 void submit_bh(blk_opf_t opf, struct buffer_head *bh) in submit_bh() argument 2829 submit_bh_wbc(opf, bh, WRITE_LIFE_NOT_SET, NULL); in submit_bh()
|
| /linux/fs/iomap/ |
| H A D | direct-io.c | 52 struct iomap_dio *dio, unsigned short nr_vecs, blk_opf_t opf) in iomap_dio_alloc_bio() argument 55 return bio_alloc_bioset(iter->iomap.bdev, nr_vecs, opf, in iomap_dio_alloc_bio() 57 return bio_alloc(iter->iomap.bdev, nr_vecs, opf, GFP_KERNEL); in iomap_dio_alloc_bio()
|
| /linux/include/scsi/ |
| H A D | scsi_cmnd.h | 397 struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf,
|
| /linux/kernel/power/ |
| H A D | swap.c | 264 static int hib_submit_io_sync(blk_opf_t opf, pgoff_t page_off, void *addr) in hib_submit_io_sync() argument 267 page_off * (PAGE_SIZE >> 9), addr, PAGE_SIZE, opf); in hib_submit_io_sync() 270 static int hib_submit_io_async(blk_opf_t opf, pgoff_t page_off, void *addr, in hib_submit_io_async() argument 275 bio = bio_alloc(file_bdev(hib_resume_bdev_file), 1, opf, in hib_submit_io_async()
|
| /linux/drivers/md/dm-vdo/ |
| H A D | data-vio.c | 1562 blk_opf_t opf = ((data_vio->user_bio->bi_opf & PASSTHROUGH_FLAGS) | REQ_OP_READ); in read_block() local 1565 result = vio_reset_bio(vio, vio->data, read_endio, opf, in read_block() 1569 bio_reset(vio->bio, vio->bio->bi_bdev, opf); in read_block() 1574 vdo_set_bio_properties(vio->bio, vio, read_endio, opf, in read_block()
|
| /linux/include/linux/ |
| H A D | blk-mq.h | 763 struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, 766 blk_opf_t opf, blk_mq_req_flags_t flags,
|