Home
last modified time | relevance | path

Searched refs:iod (Results 1 – 20 of 20) sorted by relevance

/linux/drivers/pinctrl/ti/
H A Dpinctrl-ti-iodelay.c209 static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod, in ti_iodelay_pinconf_set() argument
212 const struct ti_iodelay_reg_data *reg = iod->reg_data; in ti_iodelay_pinconf_set()
213 struct ti_iodelay_reg_values *ival = &iod->reg_init_conf_values; in ti_iodelay_pinconf_set()
214 struct device *dev = iod->dev; in ti_iodelay_pinconf_set()
267 r = regmap_update_bits(iod->regmap, cfg->offset, reg_mask, reg_val); in ti_iodelay_pinconf_set()
284 struct ti_iodelay_device *iod = data; in ti_iodelay_pinconf_deinit_dev() local
285 const struct ti_iodelay_reg_data *reg = iod->reg_data; in ti_iodelay_pinconf_deinit_dev()
288 regmap_update_bits(iod->regmap, reg->reg_global_lock_offset, in ti_iodelay_pinconf_deinit_dev()
300 static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod) in ti_iodelay_pinconf_init_dev() argument
302 const struct ti_iodelay_reg_data *reg = iod->reg_data; in ti_iodelay_pinconf_init_dev()
[all …]
/linux/drivers/soc/rockchip/
H A Dio-domain.c65 struct rockchip_iodomain *iod; member
74 void (*init)(struct rockchip_iodomain *iod);
88 struct rockchip_iodomain *iod = supply->iod; in rk3568_iodomain_write() local
102 regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL2, val0); in rk3568_iodomain_write()
103 regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL2, val1); in rk3568_iodomain_write()
117 regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL0, val0); in rk3568_iodomain_write()
118 regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL1, val1); in rk3568_iodomain_write()
130 struct rockchip_iodomain *iod = supply->iod; in rockchip_iodomain_write() local
141 ret = regmap_write(iod->grf, iod->soc_data->grf_offset, val); in rockchip_iodomain_write()
143 dev_err(iod->dev, "Couldn't write to GRF\n"); in rockchip_iodomain_write()
[all …]
/linux/drivers/nvme/target/
H A Dpci-epf.c667 static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod) in nvmet_pci_epf_iod_name() argument
669 return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode); in nvmet_pci_epf_iod_name()
678 struct nvmet_pci_epf_iod *iod; in nvmet_pci_epf_alloc_iod() local
680 iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL); in nvmet_pci_epf_alloc_iod()
681 if (unlikely(!iod)) in nvmet_pci_epf_alloc_iod()
684 memset(iod, 0, sizeof(*iod)); in nvmet_pci_epf_alloc_iod()
685 iod->req.cmd = &iod->cmd; in nvmet_pci_epf_alloc_iod()
686 iod->req.cqe = &iod->cqe; in nvmet_pci_epf_alloc_iod()
687 iod->req.port = ctrl->port; in nvmet_pci_epf_alloc_iod()
688 iod->ctrl = ctrl; in nvmet_pci_epf_alloc_iod()
[all …]
H A Dfc.c105 struct nvmet_fc_ls_iod *iod; member
251 struct nvmet_fc_ls_iod *iod);
528 struct nvmet_fc_ls_iod *iod; in nvmet_fc_alloc_ls_iodlist() local
531 iod = kzalloc_objs(struct nvmet_fc_ls_iod, NVMET_LS_CTX_COUNT); in nvmet_fc_alloc_ls_iodlist()
532 if (!iod) in nvmet_fc_alloc_ls_iodlist()
535 tgtport->iod = iod; in nvmet_fc_alloc_ls_iodlist()
537 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { in nvmet_fc_alloc_ls_iodlist()
538 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); in nvmet_fc_alloc_ls_iodlist()
539 iod in nvmet_fc_alloc_ls_iodlist()
577 struct nvmet_fc_ls_iod *iod = tgtport->iod; nvmet_fc_free_ls_iodlist() local
593 struct nvmet_fc_ls_iod *iod; nvmet_fc_alloc_ls_iod() local
608 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_free_ls_iod() argument
1595 struct nvmet_fc_ls_iod *iod; nvmet_fc_free_pending_reqs() local
1664 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_ls_create_association() argument
1754 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_ls_create_connection() argument
1844 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_ls_disconnect() argument
1934 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; nvmet_fc_xmt_ls_rsp_done() local
1945 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_xmt_ls_rsp() argument
1962 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_handle_ls_rqst() argument
2010 struct nvmet_fc_ls_iod *iod = nvmet_fc_handle_ls_rqst_work() local
2043 struct nvmet_fc_ls_iod *iod; nvmet_fc_rcv_ls_req() local
[all...]
H A Dloop.c78 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); in nvme_loop_complete_rq() local
80 sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT); in nvme_loop_complete_rq()
127 struct nvme_loop_iod *iod = in nvme_loop_execute_work() local
130 iod->req.execute(&iod->req); in nvme_loop_execute_work()
139 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); in nvme_loop_queue_rq() local
151 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; in nvme_loop_queue_rq()
152 iod->req.port = queue->ctrl->port; in nvme_loop_queue_rq()
153 if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) in nvme_loop_queue_rq()
157 iod in nvme_loop_queue_rq()
178 struct nvme_loop_iod *iod = &ctrl->async_event_iod; nvme_loop_submit_async_event() local
194 nvme_loop_init_iod(struct nvme_loop_ctrl * ctrl,struct nvme_loop_iod * iod,unsigned int queue_idx) nvme_loop_init_iod() argument
208 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); nvme_loop_init_request() local
[all...]
/linux/tools/testing/selftests/ublk/
H A Dfile_backed.c5 static enum io_uring_op ublk_to_uring_op(const struct ublksrv_io_desc *iod, int zc) in ublk_to_uring_op() argument
7 unsigned ublk_op = ublksrv_get_op(iod); in ublk_to_uring_op()
17 const struct ublksrv_io_desc *iod, int tag) in loop_queue_flush_io() argument
19 unsigned ublk_op = ublksrv_get_op(iod); in loop_queue_flush_io()
33 * index + offset from iod->addr and use the server's mmap of that in loop_queue_tgt_rw_io()
37 const struct ublksrv_io_desc *iod, int tag) in loop_queue_tgt_rw_io()
39 unsigned ublk_op = ublksrv_get_op(iod); in loop_queue_tgt_rw_io()
40 enum io_uring_op op = ublk_to_uring_op(iod, 0); in loop_queue_tgt_rw_io()
41 __u64 file_offset = iod->start_sector << 9; in loop_queue_tgt_rw_io()
42 __u32 len = iod in loop_queue_tgt_rw_io()
31 loop_queue_tgt_rw_io(struct ublk_thread * t,struct ublk_queue * q,const struct ublksrv_io_desc * iod,int tag) loop_queue_tgt_rw_io() argument
95 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); loop_queue_tgt_io() local
[all...]
H A Dstripe.c34 const struct ublksrv_io_desc *iod) in calculate_nr_vec() argument
38 loff_t start = iod->start_sector; in calculate_nr_vec()
39 loff_t end = start + iod->nr_sectors; in calculate_nr_vec()
45 const struct ublksrv_io_desc *iod) in alloc_stripe_array() argument
47 unsigned nr_vecs = calculate_nr_vec(conf, iod); in alloc_stripe_array()
73 const struct ublksrv_io_desc *iod, struct stripe_array *s, void *base) in calculate_stripe_array() argument
78 off64_t start = iod->start_sector; in calculate_stripe_array()
79 off64_t end = start + iod->nr_sectors; in calculate_stripe_array()
115 const struct ublksrv_io_desc *iod, int zc) in stripe_to_uring_op() argument
117 unsigned ublk_op = ublksrv_get_op(iod); in stripe_to_uring_op()
[all …]
H A Dnull.c46 static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod, in __setup_nop_io()
49 unsigned ublk_op = ublksrv_get_op(iod); in __setup_nop_io()
55 sqe->len = iod->nr_sectors << 9; /* injected result */ in __setup_nop_io()
62 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); in null_queue_zc_io()
73 __setup_nop_io(tag, iod, sqe[1], q->q_id, buf_idx); in null_queue_zc_io()
86 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); in null_queue_auto_zc_io()
90 __setup_nop_io(tag, iod, sqe[0], q->q_id, ublk_io_buf_idx(t, q, tag)); in null_queue_auto_zc_io()
120 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); in ublk_null_queue_io()
125 if (auto_zc && !ublk_io_auto_zc_fallback(iod)) in ublk_null_queue_io()
130 ublk_complete_io(t, q, tag, iod in ublk_null_queue_io()
45 __setup_nop_io(int tag,const struct ublksrv_io_desc * iod,struct io_uring_sqe * sqe,int q_id) __setup_nop_io() argument
61 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); null_queue_zc_io() local
84 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); null_queue_auto_zc_io() local
118 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); ublk_null_queue_io() local
[all...]
H A Dfault_inject.c85 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); in ublk_fault_inject_cmd_line()
94 sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1); in ublk_fault_inject_usage()
106 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
112 ublk_complete_io(t, q, tag, iod->nr_sectors << 9);
44 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); ublk_fault_inject_queue_io() local
64 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); ublk_fault_inject_tgt_io_done() local
H A Dkublk.h323 static inline int ublk_io_auto_zc_fallback(const struct ublksrv_io_desc *iod) in ublk_user_copy_offset()
325 return !!(iod->op_flags & UBLK_IO_F_NEED_REG_BUF); in ublk_user_copy_offset()
317 ublk_io_auto_zc_fallback(const struct ublksrv_io_desc * iod) ublk_io_auto_zc_fallback() argument
H A Dkublk.c658 const struct ublksrv_io_desc *iod = ublk_get_iod(q, io->tag); in ublk_user_copy()
660 __u8 ublk_op = ublksrv_get_op(iod); in ublk_user_copy()
661 __u32 len = iod->nr_sectors << 9; in ublk_user_copy()
683 if (!(iod->op_flags & UBLK_IO_F_INTEGRITY)) in ublk_user_copy()
686 len = ublk_integrity_len(q, iod->nr_sectors << 9); in ublk_user_copy()
657 const struct ublksrv_io_desc *iod = ublk_get_iod(q, io->tag); ublk_user_copy() local
/linux/drivers/nvme/host/
H A Dapple.c368 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); in apple_nvme_iod_list() local
370 return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); in apple_nvme_iod_list()
376 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); in apple_nvme_free_prps() local
377 dma_addr_t dma_addr = iod->first_dma; in apple_nvme_free_prps()
380 for (i = 0; i < iod->npages; i++) { in apple_nvme_free_prps()
391 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); in apple_nvme_unmap_data() local
393 if (iod->dma_len) { in apple_nvme_unmap_data()
394 dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len, in apple_nvme_unmap_data()
399 WARN_ON_ONCE(!iod->nents); in apple_nvme_unmap_data()
401 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); in apple_nvme_unmap_data()
[all …]
H A Dpci.c396 /* bits for iod->flags */
665 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_init_request() local
668 nvme_req(req)->cmd = &iod->cmd; in nvme_pci_init_request()
797 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_avg_seg_size() local
800 if (blk_rq_dma_map_coalesce(&iod->dma_state)) in nvme_pci_avg_seg_size()
808 struct nvme_iod *iod) in nvme_dma_pool() argument
810 if (iod->flags & IOD_SMALL_DESCRIPTOR) in nvme_dma_pool()
837 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_free_descriptors() local
838 dma_addr_t dma_addr = nvme_pci_first_desc_dma_addr(&iod->cmd); in nvme_free_descriptors()
841 if (iod in nvme_free_descriptors()
859 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_free_prps() local
894 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_unmap_metadata() local
930 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_unmap_data() local
967 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_prp_save_mapping() local
1002 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_setup_data_prp() local
1137 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_setup_data_sgl() local
1180 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_setup_data_simple() local
1218 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_map_data() local
1262 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_setup_meta_iter() local
1343 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_setup_meta_mptr() local
1360 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_map_metadata() local
1370 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_prep_rq() local
1411 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_queue_rq() local
1443 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_submit_cmds() local
1846 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_timeout() local
[all...]
/linux/drivers/clk/
H A Dclk-versaclock7.c172 struct vc7_iod_data *iod; member
379 map->src.iod = &vc7->clk_iod[0]; in vc7_get_bank_clk()
383 map->src.iod = &vc7->clk_iod[1]; in vc7_get_bank_clk()
400 map->src.iod = &vc7->clk_iod[1]; in vc7_get_bank_clk()
437 map->src.iod = &vc7->clk_iod[2]; in vc7_get_bank_clk()
463 map->src.iod = &vc7->clk_iod[2]; in vc7_get_bank_clk()
467 map->src.iod = &vc7->clk_iod[3]; in vc7_get_bank_clk()
490 map->src.iod = &vc7->clk_iod[2]; in vc7_get_bank_clk()
494 map->src.iod = &vc7->clk_iod[3]; in vc7_get_bank_clk()
964 struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw); in vc7_iod_recalc_rate() local
[all …]
/linux/include/uapi/linux/
H A Dublk_cmd.h549 static inline __u8 ublksrv_get_op(const struct ublksrv_io_desc *iod)
551 return iod->op_flags & 0xff;
554 static inline __u32 ublksrv_get_flags(const struct ublksrv_io_desc *iod)
556 return iod->op_flags >> 8;
435 ublksrv_get_op(const struct ublksrv_io_desc * iod) ublksrv_get_op() argument
440 ublksrv_get_flags(const struct ublksrv_io_desc * iod) ublksrv_get_flags() argument
/linux/Documentation/admin-guide/perf/
H A Dfujitsu_uncore_pmu.rst10 mac_iod<iod>_mac<mac>_ch<ch>.
12 pci_iod<iod>_pci<pci>.
15 options in sysfs, see /sys/bus/event_sources/devices/mac_iod<iod>_mac<mac>_ch<ch>/
16 and /sys/bus/event_sources/devices/pci_iod<iod>_pci<pci>/.
/linux/arch/alpha/kernel/
H A Dio.c661 u16 __iomem *iod = (u16 __iomem *) d; in scr_memcpyw() local
673 __raw_writew(tmp, iod++); in scr_memcpyw()
680 memcpy_toio(iod, s, count); in scr_memcpyw()
/linux/drivers/block/
H A Dublk_drv.c655 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); in ublk_setup_iod_zoned()
686 iod->op_flags = ublk_op | ublk_req_build_flags(req); in ublk_revalidate_disk_zones()
687 iod->nr_zones = desc->nr_zones;
688 iod->start_sector = desc->sector; in ublk_setup_iod_zoned()
700 iod->op_flags = ublk_op | ublk_req_build_flags(req);
701 iod->nr_sectors = blk_rq_sectors(req);
702 iod->start_sector = blk_rq_pos(req);
703 iod->addr = io->buf.addr;
1471 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); in ublk_get_uring_cmd_pdu()
1498 iod in __ublk_complete_rq()
617 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); ublk_setup_iod_zoned() local
1433 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); ublk_setup_iod() local
1591 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag); ublk_auto_buf_reg_fallback() local
[all...]
/linux/tools/perf/util/
H A Dmachine.c1383 struct io_dir iod; in maps__set_modules_path_dir() local
1387 io_dir__init(&iod, open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); in maps__set_modules_path_dir()
1388 if (iod.dirfd < 0) { in maps__set_modules_path_dir()
1396 while ((dent = io_dir__readdir(&iod)) != NULL) { in maps__set_modules_path_dir()
1397 if (io_dir__is_dir(&iod, dent)) { in maps__set_modules_path_dir()
1440 close(iod.dirfd); in maps__set_modules_path_dir()
H A Dsynthetic-events.c782 struct io_dir iod; in __event__synthesize_thread() local
817 io_dir__init(&iod, open(filename, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); in __event__synthesize_thread()
818 if (iod.dirfd < 0) in __event__synthesize_thread()
821 while ((dent = io_dir__readdir(&iod)) != NULL) { in __event__synthesize_thread()
863 close(iod.dirfd); in __event__synthesize_thread()