| /linux/drivers/pinctrl/ti/ |
| H A D | pinctrl-ti-iodelay.c | 209 static int ti_iodelay_pinconf_set(struct ti_iodelay_device *iod, in ti_iodelay_pinconf_set() argument 212 const struct ti_iodelay_reg_data *reg = iod->reg_data; in ti_iodelay_pinconf_set() 213 struct ti_iodelay_reg_values *ival = &iod->reg_init_conf_values; in ti_iodelay_pinconf_set() 214 struct device *dev = iod->dev; in ti_iodelay_pinconf_set() 267 r = regmap_update_bits(iod->regmap, cfg->offset, reg_mask, reg_val); in ti_iodelay_pinconf_set() 284 struct ti_iodelay_device *iod = data; in ti_iodelay_pinconf_deinit_dev() local 285 const struct ti_iodelay_reg_data *reg = iod->reg_data; in ti_iodelay_pinconf_deinit_dev() 288 regmap_update_bits(iod->regmap, reg->reg_global_lock_offset, in ti_iodelay_pinconf_deinit_dev() 300 static int ti_iodelay_pinconf_init_dev(struct ti_iodelay_device *iod) in ti_iodelay_pinconf_init_dev() argument 302 const struct ti_iodelay_reg_data *reg = iod->reg_data; in ti_iodelay_pinconf_init_dev() [all …]
|
| /linux/drivers/soc/rockchip/ |
| H A D | io-domain.c | 65 struct rockchip_iodomain *iod; member 74 void (*init)(struct rockchip_iodomain *iod); 88 struct rockchip_iodomain *iod = supply->iod; in rk3568_iodomain_write() local 102 regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL2, val0); in rk3568_iodomain_write() 103 regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL2, val1); in rk3568_iodomain_write() 117 regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL0, val0); in rk3568_iodomain_write() 118 regmap_write(iod->grf, RK3568_PMU_GRF_IO_VSEL1, val1); in rk3568_iodomain_write() 130 struct rockchip_iodomain *iod = supply->iod; in rockchip_iodomain_write() local 141 ret = regmap_write(iod->grf, iod->soc_data->grf_offset, val); in rockchip_iodomain_write() 143 dev_err(iod->dev, "Couldn't write to GRF\n"); in rockchip_iodomain_write() [all …]
|
| /linux/drivers/nvme/target/ |
| H A D | pci-epf.c | 667 static inline const char *nvmet_pci_epf_iod_name(struct nvmet_pci_epf_iod *iod) in nvmet_pci_epf_iod_name() argument 669 return nvme_opcode_str(iod->sq->qid, iod->cmd.common.opcode); in nvmet_pci_epf_iod_name() 678 struct nvmet_pci_epf_iod *iod; in nvmet_pci_epf_alloc_iod() local 680 iod = mempool_alloc(&ctrl->iod_pool, GFP_KERNEL); in nvmet_pci_epf_alloc_iod() 681 if (unlikely(!iod)) in nvmet_pci_epf_alloc_iod() 684 memset(iod, 0, sizeof(*iod)); in nvmet_pci_epf_alloc_iod() 685 iod->req.cmd = &iod in nvmet_pci_epf_alloc_iod() 702 nvmet_pci_epf_alloc_iod_data_segs(struct nvmet_pci_epf_iod * iod,int nsegs) nvmet_pci_epf_alloc_iod_data_segs() argument 720 nvmet_pci_epf_free_iod(struct nvmet_pci_epf_iod * iod) nvmet_pci_epf_free_iod() argument 735 nvmet_pci_epf_transfer_iod_data(struct nvmet_pci_epf_iod * iod) nvmet_pci_epf_transfer_iod_data() argument 791 nvmet_pci_epf_iod_parse_prp_list(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_iod * iod) nvmet_pci_epf_iod_parse_prp_list() argument 923 nvmet_pci_epf_iod_parse_prp_simple(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_iod * iod) nvmet_pci_epf_iod_parse_prp_simple() argument 972 nvmet_pci_epf_iod_parse_prps(struct nvmet_pci_epf_iod * iod) nvmet_pci_epf_iod_parse_prps() argument 1037 nvmet_pci_epf_iod_parse_sgl_segments(struct nvmet_pci_epf_ctrl * ctrl,struct nvmet_pci_epf_iod * iod) nvmet_pci_epf_iod_parse_sgl_segments() argument 1096 nvmet_pci_epf_iod_parse_sgls(struct nvmet_pci_epf_iod * iod) nvmet_pci_epf_iod_parse_sgls() argument 1113 nvmet_pci_epf_alloc_iod_data_buf(struct nvmet_pci_epf_iod * iod) nvmet_pci_epf_alloc_iod_data_buf() argument 1169 nvmet_pci_epf_complete_iod(struct nvmet_pci_epf_iod * iod) nvmet_pci_epf_complete_iod() argument 1194 struct nvmet_pci_epf_iod *iod; nvmet_pci_epf_drain_queue() local 1241 struct nvmet_pci_epf_iod *iod = nvmet_pci_epf_queue_response() local 1596 struct nvmet_pci_epf_iod *iod = nvmet_pci_epf_exec_iod_work() local 1668 struct nvmet_pci_epf_iod *iod; nvmet_pci_epf_process_sq() local 1762 struct nvmet_pci_epf_iod *iod; nvmet_pci_epf_cq_work() local [all...] |
| H A D | fc.c | 105 struct nvmet_fc_ls_iod *iod; member 251 struct nvmet_fc_ls_iod *iod); 528 struct nvmet_fc_ls_iod *iod; in nvmet_fc_alloc_ls_iodlist() local 531 iod = kzalloc_objs(struct nvmet_fc_ls_iod, NVMET_LS_CTX_COUNT); in nvmet_fc_alloc_ls_iodlist() 532 if (!iod) in nvmet_fc_alloc_ls_iodlist() 535 tgtport->iod = iod; in nvmet_fc_alloc_ls_iodlist() 537 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) { in nvmet_fc_alloc_ls_iodlist() 538 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); in nvmet_fc_alloc_ls_iodlist() 539 iod in nvmet_fc_alloc_ls_iodlist() 578 struct nvmet_fc_ls_iod *iod = tgtport->iod; nvmet_fc_free_ls_iodlist() local 594 struct nvmet_fc_ls_iod *iod; nvmet_fc_alloc_ls_iod() local 609 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_free_ls_iod() argument 1596 struct nvmet_fc_ls_iod *iod; nvmet_fc_free_pending_reqs() local 1665 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_ls_create_association() argument 1755 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_ls_create_connection() argument 1845 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_ls_disconnect() argument 1935 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; nvmet_fc_xmt_ls_rsp_done() local 1946 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_xmt_ls_rsp() argument 1963 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod) nvmet_fc_handle_ls_rqst() argument 2011 struct nvmet_fc_ls_iod *iod = nvmet_fc_handle_ls_rqst_work() local 2044 struct nvmet_fc_ls_iod *iod; nvmet_fc_rcv_ls_req() local [all...] |
| H A D | loop.c | 78 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); in nvme_loop_complete_rq() local 80 sg_free_table_chained(&iod->sg_table, NVME_INLINE_SG_CNT); in nvme_loop_complete_rq() 127 struct nvme_loop_iod *iod = in nvme_loop_execute_work() local 130 iod->req.execute(&iod->req); in nvme_loop_execute_work() 139 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); in nvme_loop_queue_rq() local 151 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; in nvme_loop_queue_rq() 152 iod->req.port = queue->ctrl->port; in nvme_loop_queue_rq() 153 if (!nvmet_req_init(&iod->req, &queue->nvme_sq, &nvme_loop_ops)) in nvme_loop_queue_rq() 157 iod in nvme_loop_queue_rq() 178 struct nvme_loop_iod *iod = &ctrl->async_event_iod; nvme_loop_submit_async_event() local 194 nvme_loop_init_iod(struct nvme_loop_ctrl * ctrl,struct nvme_loop_iod * iod,unsigned int queue_idx) nvme_loop_init_iod() argument 208 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); nvme_loop_init_request() local [all...] |
| /linux/tools/testing/selftests/ublk/ |
| H A D | file_backed.c | 5 static enum io_uring_op ublk_to_uring_op(const struct ublksrv_io_desc *iod, int zc) in ublk_to_uring_op() argument 7 unsigned ublk_op = ublksrv_get_op(iod); in ublk_to_uring_op() 17 const struct ublksrv_io_desc *iod, int tag) in loop_queue_flush_io() argument 19 unsigned ublk_op = ublksrv_get_op(iod); in loop_queue_flush_io() 31 const struct ublksrv_io_desc *iod, int tag) in loop_queue_tgt_rw_io() argument 33 unsigned ublk_op = ublksrv_get_op(iod); in loop_queue_tgt_rw_io() 36 enum io_uring_op op = ublk_to_uring_op(iod, zc | auto_zc); in loop_queue_tgt_rw_io() 38 __u64 offset = iod->start_sector << 9; in loop_queue_tgt_rw_io() 39 __u32 len = iod->nr_sectors << 9; in loop_queue_tgt_rw_io() 44 if (iod->op_flags & UBLK_IO_F_INTEGRITY) { in loop_queue_tgt_rw_io() [all …]
|
| H A D | stripe.c | 34 const struct ublksrv_io_desc *iod) in calculate_nr_vec() argument 38 loff_t start = iod->start_sector; in calculate_nr_vec() 39 loff_t end = start + iod->nr_sectors; in calculate_nr_vec() 45 const struct ublksrv_io_desc *iod) in alloc_stripe_array() argument 47 unsigned nr_vecs = calculate_nr_vec(conf, iod); in alloc_stripe_array() 73 const struct ublksrv_io_desc *iod, struct stripe_array *s, void *base) in calculate_stripe_array() argument 78 off64_t start = iod->start_sector; in calculate_stripe_array() 79 off64_t end = start + iod->nr_sectors; in calculate_stripe_array() 115 const struct ublksrv_io_desc *iod, int zc) in stripe_to_uring_op() argument 117 unsigned ublk_op = ublksrv_get_op(iod); in stripe_to_uring_op() [all …]
|
| H A D | null.c | 46 static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod, in __setup_nop_io() 49 unsigned ublk_op = ublksrv_get_op(iod); in __setup_nop_io() 55 sqe->len = iod->nr_sectors << 9; /* injected result */ in __setup_nop_io() 62 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); in null_queue_zc_io() 73 __setup_nop_io(tag, iod, sqe[1], q->q_id, buf_idx); in null_queue_zc_io() 86 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); in null_queue_auto_zc_io() 90 __setup_nop_io(tag, iod, sqe[0], q->q_id, ublk_io_buf_idx(t, q, tag)); in null_queue_auto_zc_io() 120 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); in ublk_null_queue_io() 125 if (auto_zc && !ublk_io_auto_zc_fallback(iod)) in ublk_null_queue_io() 130 ublk_complete_io(t, q, tag, iod in ublk_null_queue_io() 45 __setup_nop_io(int tag,const struct ublksrv_io_desc * iod,struct io_uring_sqe * sqe,int q_id) __setup_nop_io() argument 61 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); null_queue_zc_io() local 84 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); null_queue_auto_zc_io() local 118 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); ublk_null_queue_io() local [all...] |
| H A D | fault_inject.c | 45 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); in ublk_fault_inject_queue_io() 53 sqe->user_data = build_user_data(tag, ublksrv_get_op(iod), 0, q->q_id, 1); in ublk_fault_inject_queue_io() 65 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); in ublk_fault_inject_tgt_io_done() 71 ublk_complete_io(t, q, tag, iod->nr_sectors << 9); in ublk_fault_inject_tgt_io_done() 44 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); ublk_fault_inject_queue_io() local 64 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); ublk_fault_inject_tgt_io_done() local
|
| H A D | kublk.h | 317 static inline int ublk_io_auto_zc_fallback(const struct ublksrv_io_desc *iod) in ublk_io_auto_zc_fallback() argument 319 return !!(iod->op_flags & UBLK_IO_F_NEED_REG_BUF); in ublk_io_auto_zc_fallback()
|
| H A D | kublk.c | 657 const struct ublksrv_io_desc *iod = ublk_get_iod(q, io->tag); in ublk_user_copy() local 659 __u8 ublk_op = ublksrv_get_op(iod); in ublk_user_copy() 660 __u32 len = iod->nr_sectors << 9; in ublk_user_copy() 682 if (!(iod->op_flags & UBLK_IO_F_INTEGRITY)) in ublk_user_copy() 685 len = ublk_integrity_len(q, iod->nr_sectors << 9); in ublk_user_copy()
|
| /linux/drivers/nvme/host/ |
| H A D | apple.c | 368 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); in apple_nvme_iod_list() local 370 return (void **)(iod->sg + blk_rq_nr_phys_segments(req)); in apple_nvme_iod_list() 376 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); in apple_nvme_free_prps() local 377 dma_addr_t dma_addr = iod->first_dma; in apple_nvme_free_prps() 380 for (i = 0; i < iod->npages; i++) { in apple_nvme_free_prps() 391 struct apple_nvme_iod *iod = blk_mq_rq_to_pdu(req); in apple_nvme_unmap_data() local 393 if (iod->dma_len) { in apple_nvme_unmap_data() 394 dma_unmap_page(anv->dev, iod->first_dma, iod->dma_len, in apple_nvme_unmap_data() 399 WARN_ON_ONCE(!iod->nents); in apple_nvme_unmap_data() 401 dma_unmap_sg(anv->dev, iod->sg, iod->nents, rq_dma_dir(req)); in apple_nvme_unmap_data() [all …]
|
| H A D | pci.c | 252 /* bits for iod->flags */ 521 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_init_request() local 524 nvme_req(req)->cmd = &iod->cmd; in nvme_pci_init_request() 653 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_pci_avg_seg_size() local 656 if (blk_rq_dma_map_coalesce(&iod->dma_state)) in nvme_pci_avg_seg_size() 664 struct nvme_iod *iod) in nvme_dma_pool() argument 666 if (iod->flags & IOD_SMALL_DESCRIPTOR) in nvme_dma_pool() 693 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); in nvme_free_descriptors() local 694 dma_addr_t dma_addr = nvme_pci_first_desc_dma_addr(&iod->cmd); in nvme_free_descriptors() 697 if (iod in nvme_free_descriptors() 715 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_free_prps() local 750 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_unmap_metadata() local 786 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_unmap_data() local 823 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_prp_save_mapping() local 858 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_setup_data_prp() local 993 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_setup_data_sgl() local 1036 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_setup_data_simple() local 1074 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_map_data() local 1118 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_setup_meta_iter() local 1199 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_pci_setup_meta_mptr() local 1216 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_map_metadata() local 1226 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_prep_rq() local 1267 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_queue_rq() local 1299 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_submit_cmds() local 1699 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); nvme_timeout() local [all...] |
| /linux/drivers/clk/ |
| H A D | clk-versaclock7.c | 172 struct vc7_iod_data *iod; member 379 map->src.iod = &vc7->clk_iod[0]; in vc7_get_bank_clk() 383 map->src.iod = &vc7->clk_iod[1]; in vc7_get_bank_clk() 400 map->src.iod = &vc7->clk_iod[1]; in vc7_get_bank_clk() 437 map->src.iod = &vc7->clk_iod[2]; in vc7_get_bank_clk() 463 map->src.iod = &vc7->clk_iod[2]; in vc7_get_bank_clk() 467 map->src.iod = &vc7->clk_iod[3]; in vc7_get_bank_clk() 490 map->src.iod = &vc7->clk_iod[2]; in vc7_get_bank_clk() 494 map->src.iod = &vc7->clk_iod[3]; in vc7_get_bank_clk() 964 struct vc7_iod_data *iod = container_of(hw, struct vc7_iod_data, hw); in vc7_iod_recalc_rate() local [all …]
|
| /linux/include/uapi/linux/ |
| H A D | ublk_cmd.h | 496 static inline __u8 ublksrv_get_op(const struct ublksrv_io_desc *iod) in ublk_auto_buf_reg_to_sqe_addr() 498 return iod->op_flags & 0xff; in ublk_auto_buf_reg_to_sqe_addr() 501 static inline __u32 ublksrv_get_flags(const struct ublksrv_io_desc *iod) 503 return iod->op_flags >> 8; 435 ublksrv_get_op(const struct ublksrv_io_desc * iod) ublksrv_get_op() argument 440 ublksrv_get_flags(const struct ublksrv_io_desc * iod) ublksrv_get_flags() argument
|
| /linux/Documentation/admin-guide/perf/ |
| H A D | fujitsu_uncore_pmu.rst | 10 mac_iod<iod>_mac<mac>_ch<ch>. 12 pci_iod<iod>_pci<pci>. 15 options in sysfs, see /sys/bus/event_sources/devices/mac_iod<iod>_mac<mac>_ch<ch>/ 16 and /sys/bus/event_sources/devices/pci_iod<iod>_pci<pci>/.
|
| /linux/arch/alpha/kernel/ |
| H A D | io.c | 661 u16 __iomem *iod = (u16 __iomem *) d; in scr_memcpyw() local 673 __raw_writew(tmp, iod++); in scr_memcpyw() 680 memcpy_toio(iod, s, count); in scr_memcpyw()
|
| /linux/drivers/block/ |
| H A D | ublk_drv.c | 617 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); in ublk_setup_iod_zoned() local 648 iod->op_flags = ublk_op | ublk_req_build_flags(req); in ublk_setup_iod_zoned() 649 iod->nr_zones = desc->nr_zones; in ublk_setup_iod_zoned() 650 iod->start_sector = desc->sector; in ublk_setup_iod_zoned() 662 iod->op_flags = ublk_op | ublk_req_build_flags(req); in ublk_setup_iod_zoned() 663 iod->nr_sectors = blk_rq_sectors(req); in ublk_setup_iod_zoned() 664 iod->start_sector = blk_rq_pos(req); in ublk_setup_iod_zoned() 665 iod->addr = io->buf.addr; in ublk_setup_iod_zoned() 1433 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, req->tag); in ublk_setup_iod() local 1460 iod in ublk_setup_iod() 1591 struct ublksrv_io_desc *iod = ublk_get_iod(ubq, tag); ublk_auto_buf_reg_fallback() local [all...] |
| /linux/tools/perf/util/ |
| H A D | machine.c | 1383 struct io_dir iod; in maps__set_modules_path_dir() local 1387 io_dir__init(&iod, open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); in maps__set_modules_path_dir() 1388 if (iod.dirfd < 0) { in maps__set_modules_path_dir() 1396 while ((dent = io_dir__readdir(&iod)) != NULL) { in maps__set_modules_path_dir() 1397 if (io_dir__is_dir(&iod, dent)) { in maps__set_modules_path_dir() 1440 close(iod.dirfd); in maps__set_modules_path_dir()
|
| H A D | synthetic-events.c | 777 struct io_dir iod; in __event__synthesize_thread() local 812 io_dir__init(&iod, open(filename, O_CLOEXEC | O_DIRECTORY | O_RDONLY)); in __event__synthesize_thread() 813 if (iod.dirfd < 0) in __event__synthesize_thread() 816 while ((dent = io_dir__readdir(&iod)) != NULL) { in __event__synthesize_thread() 858 close(iod.dirfd); in __event__synthesize_thread()
|