/linux/drivers/staging/greybus/ |
H A D | audio_apbridgea.c | 16 struct audio_apbridgea_set_config_request req; in gb_audio_apbridgea_set_config() local 18 req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG; in gb_audio_apbridgea_set_config() 19 req.hdr.i2s_port = cpu_to_le16(i2s_port); in gb_audio_apbridgea_set_config() 20 req.format = cpu_to_le32(format); in gb_audio_apbridgea_set_config() 21 req.rate = cpu_to_le32(rate); in gb_audio_apbridgea_set_config() 22 req.mclk_freq = cpu_to_le32(mclk_freq); in gb_audio_apbridgea_set_config() 24 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_config() 33 struct audio_apbridgea_register_cport_request req; in gb_audio_apbridgea_register_cport() local 36 req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT; in gb_audio_apbridgea_register_cport() 37 req.hdr.i2s_port = cpu_to_le16(i2s_port); in gb_audio_apbridgea_register_cport() [all …]
|
H A D | audio_gb.c | 50 struct gb_audio_get_control_request req; in gb_audio_gb_get_control() local 54 req.control_id = control_id; in gb_audio_gb_get_control() 55 req.index = index; in gb_audio_gb_get_control() 58 &req, sizeof(req), &resp, sizeof(resp)); in gb_audio_gb_get_control() 72 struct gb_audio_set_control_request req; in gb_audio_gb_set_control() local 74 req.control_id = control_id; in gb_audio_gb_set_control() 75 req.index = index; in gb_audio_gb_set_control() 76 memcpy(&req.value, value, sizeof(req.value)); in gb_audio_gb_set_control() 79 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_control() 86 struct gb_audio_enable_widget_request req; in gb_audio_gb_enable_widget() local [all …]
|
/linux/drivers/nvme/target/ |
H A D | admin-cmd.c | 15 static void nvmet_execute_delete_sq(struct nvmet_req *req) in nvmet_execute_delete_sq() argument 17 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_delete_sq() 18 u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid); in nvmet_execute_delete_sq() 22 status = nvmet_report_invalid_opcode(req); in nvmet_execute_delete_sq() 38 nvmet_req_complete(req, status); in nvmet_execute_delete_sq() 41 static void nvmet_execute_create_sq(struct nvmet_req *req) in nvmet_execute_create_sq() argument 43 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_create_sq() 44 struct nvme_command *cmd = req->cmd; in nvmet_execute_create_sq() 53 status = nvmet_report_invalid_opcode(req); in nvmet_execute_create_sq() 80 nvmet_req_complete(req, status); in nvmet_execute_create_sq() [all …]
|
H A D | io-cmd-file.c | 76 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, in nvmet_file_submit_bvec() argument 79 struct kiocb *iocb = &req->f.iocb; in nvmet_file_submit_bvec() 84 if (req->cmd->rw.opcode == nvme_cmd_write) { in nvmet_file_submit_bvec() 85 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) in nvmet_file_submit_bvec() 87 call_iter = req->ns->file->f_op->write_iter; in nvmet_file_submit_bvec() 90 call_iter = req->ns->file->f_op->read_iter; in nvmet_file_submit_bvec() 94 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec() 97 iocb->ki_filp = req->ns->file; in nvmet_file_submit_bvec() 105 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); in nvmet_file_io_done() local 108 if (req->f.bvec != req->inline_bvec) { in nvmet_file_io_done() [all …]
|
H A D | fabrics-cmd-auth.c | 34 static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d) in nvmet_auth_negotiate() argument 36 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_auth_negotiate() 41 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate() 45 req->sq->dhchap_tid = le16_to_cpu(data->t_id); in nvmet_auth_negotiate() 50 if (req->sq->qid) in nvmet_auth_negotiate() 54 if (nvmet_queue_tls_keyid(req->sq)) in nvmet_auth_negotiate() 58 if (!nvmet_queue_tls_keyid(req->sq)) in nvmet_auth_negotiate() 88 __func__, ctrl->cntlid, req->sq->qid); in nvmet_auth_negotiate() 92 __func__, ctrl->cntlid, req->sq->qid, in nvmet_auth_negotiate() 116 __func__, ctrl->cntlid, req->sq->qid); in nvmet_auth_negotiate() [all …]
|
H A D | zns.c | 69 void nvmet_execute_identify_ctrl_zns(struct nvmet_req *req) in nvmet_execute_identify_ctrl_zns() argument 71 u8 zasl = req->sq->ctrl->subsys->zasl; in nvmet_execute_identify_ctrl_zns() 72 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_ctrl_zns() 87 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); in nvmet_execute_identify_ctrl_zns() 91 nvmet_req_complete(req, status); in nvmet_execute_identify_ctrl_zns() 94 void nvmet_execute_identify_ns_zns(struct nvmet_req *req) in nvmet_execute_identify_ns_zns() argument 101 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_ns_zns() 102 req->error_loc = offsetof(struct nvme_identify, nsid); in nvmet_execute_identify_ns_zns() 113 status = nvmet_req_find_ns(req); in nvmet_execute_identify_ns_zns() 117 if (nvmet_ns_revalidate(req->ns)) { in nvmet_execute_identify_ns_zns() [all …]
|
H A D | io-cmd-bdev.c | 130 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) in blk_to_nvme_status() argument 145 req->error_loc = offsetof(struct nvme_rw_command, length); in blk_to_nvme_status() 149 req->error_loc = offsetof(struct nvme_rw_command, slba); in blk_to_nvme_status() 153 req->error_loc = offsetof(struct nvme_common_command, opcode); in blk_to_nvme_status() 157 req->error_loc = offsetof(struct nvme_rw_command, nsid); in blk_to_nvme_status() 162 req->error_loc = offsetof(struct nvme_common_command, opcode); in blk_to_nvme_status() 165 switch (req->cmd->common.opcode) { in blk_to_nvme_status() 168 req->error_slba = le64_to_cpu(req->cmd->rw.slba); in blk_to_nvme_status() 171 req->error_slba = in blk_to_nvme_status() 172 le64_to_cpu(req->cmd->write_zeroes.slba); in blk_to_nvme_status() [all …]
|
/linux/drivers/s390/scsi/ |
H A D | zfcp_fsf.c | 80 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) in zfcp_fsf_class_not_supp() argument 82 dev_err(&req->adapter->ccw_device->dev, "FCP device not " in zfcp_fsf_class_not_supp() 84 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); in zfcp_fsf_class_not_supp() 85 req->status |= ZFCP_STATUS_FSFREQ_ERROR; in zfcp_fsf_class_not_supp() 90 * @req: pointer to struct zfcp_fsf_req 92 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) in zfcp_fsf_req_free() argument 94 if (likely(req->pool)) { in zfcp_fsf_req_free() 95 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) in zfcp_fsf_req_free() 96 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 97 mempool_free(req, req->pool); in zfcp_fsf_req_free() [all …]
|
/linux/io_uring/ |
H A D | rw.c | 38 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask) in io_file_supports_nowait() argument 41 if (req->flags & REQ_F_SUPPORT_NOWAIT) in io_file_supports_nowait() 44 if (io_file_can_poll(req)) { in io_file_supports_nowait() 47 return vfs_poll(req->file, &pt) & mask; in io_file_supports_nowait() 64 static int io_iov_buffer_select_prep(struct io_kiocb *req) in io_iov_buffer_select_prep() argument 68 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_iov_buffer_select_prep() 73 if (io_is_compat(req->ctx)) in io_iov_buffer_select_prep() 83 static int io_import_vec(int ddir, struct io_kiocb *req, in io_import_vec() argument 100 io_is_compat(req->ctx)); in io_import_vec() 104 req in io_import_vec() 110 __io_import_rw_buffer(int ddir,struct io_kiocb * req,struct io_async_rw * io,struct io_br_sel * sel,unsigned int issue_flags) __io_import_rw_buffer() argument 132 io_import_rw_buffer(int rw,struct io_kiocb * req,struct io_async_rw * io,struct io_br_sel * sel,unsigned int issue_flags) io_import_rw_buffer() argument 147 io_rw_recycle(struct io_kiocb * req,unsigned int issue_flags) io_rw_recycle() argument 162 io_req_rw_cleanup(struct io_kiocb * req,unsigned int issue_flags) io_req_rw_cleanup() argument 197 io_rw_alloc_async(struct io_kiocb * req) io_rw_alloc_async() argument 225 io_prep_rw_pi(struct io_kiocb * req,struct io_rw * rw,int ddir,u64 attr_ptr,u64 attr_type_mask) io_prep_rw_pi() argument 252 __io_prep_rw(struct io_kiocb * req,const struct io_uring_sqe * sqe,int ddir) __io_prep_rw() argument 307 io_rw_do_import(struct io_kiocb * req,int ddir) io_rw_do_import() argument 317 io_prep_rw(struct io_kiocb * req,const struct io_uring_sqe * sqe,int ddir) io_prep_rw() argument 329 io_prep_read(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_prep_read() argument 334 io_prep_write(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_prep_write() argument 339 io_prep_rwv(struct io_kiocb * req,const struct io_uring_sqe * sqe,int ddir) io_prep_rwv() argument 357 io_prep_readv(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_prep_readv() argument 362 io_prep_writev(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_prep_writev() argument 367 io_init_rw_fixed(struct io_kiocb * req,unsigned int issue_flags,int ddir) io_init_rw_fixed() argument 383 io_prep_read_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_prep_read_fixed() argument 388 io_prep_write_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_prep_write_fixed() argument 393 io_rw_import_reg_vec(struct io_kiocb * req,struct io_async_rw * io,int ddir,unsigned int issue_flags) io_rw_import_reg_vec() argument 410 io_rw_prep_reg_vec(struct io_kiocb * req) io_rw_prep_reg_vec() argument 420 io_prep_readv_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_prep_readv_fixed() argument 430 io_prep_writev_fixed(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_prep_writev_fixed() argument 444 io_read_mshot_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_read_mshot_prep() argument 464 io_readv_writev_cleanup(struct io_kiocb * req) io_readv_writev_cleanup() argument 470 io_kiocb_update_pos(struct io_kiocb * req) io_kiocb_update_pos() argument 487 io_rw_should_reissue(struct io_kiocb * req) io_rw_should_reissue() argument 516 io_req_end_write(struct io_kiocb * req) io_req_end_write() argument 529 io_req_io_end(struct io_kiocb * req) io_req_io_end() argument 541 __io_complete_rw_common(struct io_kiocb * req,long res) __io_complete_rw_common() argument 553 io_fixup_rw_res(struct io_kiocb * req,long res) io_fixup_rw_res() argument 567 io_req_rw_complete(struct io_kiocb * req,io_tw_token_t tw) io_req_rw_complete() argument 590 struct io_kiocb *req = cmd_to_io_kiocb(rw); io_complete_rw() local 603 struct io_kiocb *req = cmd_to_io_kiocb(rw); io_complete_rw_iopoll() local 618 io_rw_done(struct io_kiocb * req,ssize_t ret) io_rw_done() argument 649 kiocb_done(struct io_kiocb * req,ssize_t ret,struct io_br_sel * sel,unsigned int issue_flags) kiocb_done() argument 685 struct io_kiocb *req = cmd_to_io_kiocb(rw); loop_rw_iter() local 762 struct io_kiocb *req = wait->private; io_async_buf_func() local 789 io_rw_should_retry(struct io_kiocb * req) io_rw_should_retry() argument 837 need_complete_io(struct io_kiocb * req) need_complete_io() argument 843 io_rw_init_file(struct io_kiocb * req,fmode_t mode,int rw_type) io_rw_init_file() argument 907 __io_read(struct io_kiocb * req,struct io_br_sel * sel,unsigned int issue_flags) __io_read() argument 1022 io_read(struct io_kiocb * req,unsigned int issue_flags) io_read() argument 1036 io_read_mshot(struct io_kiocb * req,unsigned int issue_flags) io_read_mshot() argument 1103 io_kiocb_start_write(struct io_kiocb * req,struct kiocb * kiocb) io_kiocb_start_write() argument 1122 io_write(struct io_kiocb * req,unsigned int issue_flags) io_write() argument 1218 io_read_fixed(struct io_kiocb * req,unsigned int issue_flags) io_read_fixed() argument 1229 io_write_fixed(struct io_kiocb * req,unsigned int issue_flags) io_write_fixed() argument 1240 io_rw_fail(struct io_kiocb * req) io_rw_fail() argument 1248 io_uring_classic_poll(struct io_kiocb * req,struct io_comp_batch * iob,unsigned int poll_flags) io_uring_classic_poll() argument 1265 io_hybrid_iopoll_delay(struct io_ring_ctx * ctx,struct io_kiocb * req) io_hybrid_iopoll_delay() argument 1299 io_uring_hybrid_poll(struct io_kiocb * req,struct io_comp_batch * iob,unsigned int poll_flags) io_uring_hybrid_poll() argument 1335 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); io_do_iopoll() local 1369 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); io_do_iopoll() local [all...] |
H A D | poll.c | 36 struct io_kiocb *req; member 73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath() argument 82 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_get_ownership_slowpath() 85 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership_slowpath() 94 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership() argument 96 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) in io_poll_get_ownership() 97 return io_poll_get_ownership_slowpath(req); in io_poll_get_ownership() 98 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership() 101 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled() argument 103 atomic_or(IO_POLL_CANCEL_FLAG, &req in io_poll_mark_cancelled() 106 io_poll_get_double(struct io_kiocb * req) io_poll_get_double() argument 114 io_poll_get_single(struct io_kiocb * req) io_poll_get_single() argument 121 io_poll_req_insert(struct io_kiocb * req) io_poll_req_insert() argument 153 io_poll_remove_entries(struct io_kiocb * req) io_poll_remove_entries() argument 193 __io_poll_execute(struct io_kiocb * req,int mask) __io_poll_execute() argument 207 io_poll_execute(struct io_kiocb * req,int res) io_poll_execute() argument 223 io_poll_check_events(struct io_kiocb * req,io_tw_token_t tw) io_poll_check_events() argument 313 io_poll_task_func(struct io_kiocb * req,io_tw_token_t tw) io_poll_task_func() argument 358 io_poll_cancel_req(struct io_kiocb * req) io_poll_cancel_req() argument 367 io_pollfree_wake(struct io_kiocb * req,struct io_poll * poll) io_pollfree_wake() argument 395 struct io_kiocb *req = wqe_to_req(wait); io_poll_wake() local 430 io_poll_double_prepare(struct io_kiocb * req) io_poll_double_prepare() argument 459 struct io_kiocb *req = pt->req; __io_queue_proc() local 522 io_poll_can_finish_inline(struct io_kiocb * req,struct io_poll_table * pt) io_poll_can_finish_inline() argument 528 io_poll_add_hash(struct io_kiocb * req,unsigned int issue_flags) io_poll_add_hash() argument 543 __io_arm_poll_handler(struct io_kiocb * req,struct io_poll * poll,struct io_poll_table * ipt,__poll_t mask,unsigned issue_flags) __io_arm_poll_handler() argument 645 io_req_alloc_apoll(struct io_kiocb * req,unsigned issue_flags) io_req_alloc_apoll() argument 670 io_arm_apoll(struct io_kiocb * req,unsigned issue_flags,__poll_t mask) io_arm_apoll() argument 698 io_arm_poll_handler(struct io_kiocb * req,unsigned issue_flags) io_arm_poll_handler() argument 731 struct io_kiocb *req; io_poll_remove_all() local 754 struct io_kiocb *req; io_poll_find() local 776 struct io_kiocb *req; io_poll_file_find() local 790 io_poll_disarm(struct io_kiocb * req) io_poll_disarm() argument 803 struct io_kiocb *req; __io_poll_cancel() local 846 io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_poll_remove_prep() argument 876 io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_poll_add_prep() argument 893 io_poll_add(struct io_kiocb * req,unsigned int issue_flags) io_poll_add() argument 909 io_poll_remove(struct io_kiocb * req,unsigned int issue_flags) io_poll_remove() argument [all...] |
H A D | uring_cmd.c | 26 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags) in io_req_uring_cleanup() 28 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); in io_req_uring_cleanup() 29 struct io_async_cmd *ac = req->async_data; in io_req_uring_cleanup() 38 if (io_alloc_cache_put(&req->ctx->cmd_cache, ac)) { in io_req_uring_cleanup() 40 io_req_async_data_clear(req, REQ_F_NEED_CLEANUP); in io_req_uring_cleanup() 44 void io_uring_cmd_cleanup(struct io_kiocb *req) in io_uring_cmd_cleanup() argument 46 io_req_uring_cleanup(req, 0); in io_uring_cmd_cleanup() 53 struct io_kiocb *req; in io_uring_try_cancel_uring_cmd() local 58 hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd, in io_uring_try_cancel_uring_cmd() 60 struct io_uring_cmd *cmd = io_kiocb_to_cmd(req, in io_uring_try_cancel_uring_cmd() 25 io_req_uring_cleanup(struct io_kiocb * req,unsigned int issue_flags) io_req_uring_cleanup() argument 80 struct io_kiocb *req = cmd_to_io_kiocb(cmd); io_uring_cmd_del_cancelable() local 104 struct io_kiocb *req = cmd_to_io_kiocb(cmd); io_uring_cmd_mark_cancelable() local 116 io_uring_cmd_work(struct io_kiocb * req,io_tw_token_t tw) io_uring_cmd_work() argument 132 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); __io_uring_cmd_do_in_task() local 143 io_req_set_cqe32_extra(struct io_kiocb * req,u64 extra1,u64 extra2) io_req_set_cqe32_extra() argument 157 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); io_uring_cmd_done() local 185 io_uring_cmd_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_uring_cmd_prep() argument 209 io_uring_cmd_sqe_copy(struct io_kiocb * req) io_uring_cmd_sqe_copy() argument 221 io_uring_cmd(struct io_kiocb * req,unsigned int issue_flags) io_uring_cmd() argument 272 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); io_uring_cmd_import_fixed() local 287 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); io_uring_cmd_import_fixed_vec() local 305 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); io_uring_cmd_issue_blocking() local 313 struct io_kiocb *req = cmd_to_io_kiocb(cmd); io_cmd_poll_multishot() local 330 struct io_kiocb *req = cmd_to_io_kiocb(cmd); io_uring_cmd_post_mshot_cqe32() local [all...] |
H A D | timeout.c | 38 static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, in io_is_timeout_noseq() argument 41 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_is_timeout_noseq() 43 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); in io_is_timeout_noseq() 44 struct io_timeout_data *data = req->async_data; in io_is_timeout_noseq() 49 static inline void io_put_req(struct io_kiocb *req) in io_put_req() 51 if (req_ref_put_and_test(req)) { in io_put_req() 52 io_queue_next(req); in io_put_req() 53 io_free_req(req); 71 static void io_timeout_complete(struct io_kiocb *req, io_tw_token_t tw) in io_timeout_complete() 73 struct io_timeout *timeout = io_kiocb_to_cmd(req, struc in io_timeout_complete() 46 io_put_req(struct io_kiocb * req) io_put_req() argument 68 io_timeout_complete(struct io_kiocb * req,io_tw_token_t tw) io_timeout_complete() argument 95 struct io_kiocb *req; io_flush_killed_timeouts() local 108 io_kill_timeout(struct io_kiocb * req,struct list_head * list) io_kill_timeout() argument 132 struct io_kiocb *req = cmd_to_io_kiocb(timeout); io_flush_timeouts() local 173 io_fail_links(struct io_kiocb * req) io_fail_links() argument 197 io_remove_next_linked(struct io_kiocb * req) io_remove_next_linked() argument 205 io_disarm_next(struct io_kiocb * req) io_disarm_next() argument 231 __io_disarm_linked_timeout(struct io_kiocb * req,struct io_kiocb * link) __io_disarm_linked_timeout() argument 253 struct io_kiocb *req = data->req; io_timeout_fn() local 279 struct io_kiocb *req = NULL; io_timeout_extract() local 303 struct io_kiocb *req; io_timeout_cancel() local 315 io_req_task_link_timeout(struct io_kiocb * req,io_tw_token_t tw) io_req_task_link_timeout() argument 345 struct io_kiocb *prev, *req = data->req; io_link_timeout_fn() local 394 struct io_kiocb *req = NULL; io_linked_timeout_update() local 420 struct io_kiocb *req = io_timeout_extract(ctx, &cd); io_timeout_update() local 437 io_timeout_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_timeout_remove_prep() argument 477 io_timeout_remove(struct io_kiocb * req,unsigned int issue_flags) io_timeout_remove() argument 506 __io_timeout_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool is_timeout_link) __io_timeout_prep() argument 576 io_timeout_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_timeout_prep() argument 581 io_link_timeout_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_link_timeout_prep() argument 586 io_timeout(struct io_kiocb * req,unsigned int issue_flags) io_timeout() argument 636 io_queue_linked_timeout(struct io_kiocb * req) io_queue_linked_timeout() argument 662 struct io_kiocb *req; io_match_task() local 690 struct io_kiocb *req = cmd_to_io_kiocb(timeout); io_kill_timeouts() local [all...] |
H A D | futex.c | 28 struct io_kiocb *req; member 44 static void __io_futex_complete(struct io_kiocb *req, io_tw_token_t tw) in __io_futex_complete() argument 46 hlist_del_init(&req->hash_node); in __io_futex_complete() 47 io_req_task_complete(req, tw); in __io_futex_complete() 50 static void io_futex_complete(struct io_kiocb *req, io_tw_token_t tw) 52 struct io_ring_ctx *ctx = req->ctx; in io_futex_complete() 55 io_cache_free(&ctx->futex_cache, req->async_data); in io_futex_complete() 56 io_req_async_data_clear(req, 0); in io_futex_complete() 57 __io_futex_complete(req, tw); in io_futex_complete() 60 static void io_futexv_complete(struct io_kiocb *req, io_tw_token_ argument 51 io_futex_complete(struct io_kiocb * req,io_tw_token_t tw) io_futex_complete() argument 88 __io_futex_cancel(struct io_kiocb * req) __io_futex_cancel() argument 123 io_futex_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_futex_prep() argument 155 struct io_kiocb *req = q->wake_data; io_futex_wakev_fn() local 168 io_futexv_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_futexv_prep() argument 207 struct io_kiocb *req = ifd->req; io_futex_wake_fn() local 217 io_futexv_wait(struct io_kiocb * req,unsigned int issue_flags) io_futexv_wait() argument 272 io_futex_wait(struct io_kiocb * req,unsigned int issue_flags) io_futex_wait() argument 319 io_futex_wake(struct io_kiocb * req,unsigned int issue_flags) io_futex_wake() argument [all...] |
H A D | net.c | 124 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_shutdown_prep() 126 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); in io_shutdown_prep() 133 req->flags |= REQ_F_FORCE_ASYNC; in io_shutdown_prep() 137 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() 139 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); in io_shutdown() 145 sock = sock_from_file(req->file); in io_shutdown() 150 io_req_set_res(req, ret, 0); in io_shutdown() 167 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_netmsg_recycle() 169 struct io_async_msghdr *hdr = req->async_data; in io_netmsg_recycle() 182 if (io_alloc_cache_put(&req in io_netmsg_recycle() 123 io_shutdown_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_shutdown_prep() argument 136 io_shutdown(struct io_kiocb * req,unsigned int issue_flags) io_shutdown() argument 166 io_netmsg_recycle(struct io_kiocb * req,unsigned int issue_flags) io_netmsg_recycle() argument 187 io_msg_alloc_async(struct io_kiocb * req) io_msg_alloc_async() argument 202 io_mshot_prep_retry(struct io_kiocb * req,struct io_async_msghdr * kmsg) io_mshot_prep_retry() argument 213 io_net_import_vec(struct io_kiocb * req,struct io_async_msghdr * iomsg,const struct iovec __user * uiov,unsigned uvec_seg,int ddir) io_net_import_vec() argument 240 io_compat_msg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg,struct compat_msghdr * msg,int ddir,struct sockaddr __user ** save_addr) io_compat_msg_copy_hdr() argument 291 io_msg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg,struct user_msghdr * msg,int ddir,struct sockaddr __user ** save_addr) io_msg_copy_hdr() argument 344 io_sendmsg_recvmsg_cleanup(struct io_kiocb * req) io_sendmsg_recvmsg_cleanup() argument 351 io_send_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_send_setup() argument 392 io_sendmsg_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_sendmsg_setup() argument 418 io_sendmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_sendmsg_prep() argument 452 io_req_msg_cleanup(struct io_kiocb * req,unsigned int issue_flags) io_req_msg_cleanup() argument 497 io_net_kbuf_recyle(struct io_kiocb * req,struct io_async_msghdr * kmsg,int len) io_net_kbuf_recyle() argument 506 io_send_finish(struct io_kiocb * req,int * ret,struct io_async_msghdr * kmsg,unsigned issue_flags) io_send_finish() argument 540 io_sendmsg(struct io_kiocb * req,unsigned int issue_flags) io_sendmsg() argument 589 io_send_select_buffer(struct io_kiocb * req,unsigned int issue_flags,struct io_async_msghdr * kmsg) io_send_select_buffer() argument 638 io_send(struct io_kiocb * req,unsigned int issue_flags) io_send() argument 703 io_recvmsg_mshot_prep(struct io_kiocb * req,struct io_async_msghdr * iomsg,int namelen,size_t controllen) io_recvmsg_mshot_prep() argument 727 io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg) io_recvmsg_copy_hdr() argument 747 io_recvmsg_prep_setup(struct io_kiocb * req) io_recvmsg_prep_setup() argument 778 io_recvmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_recvmsg_prep() argument 849 io_recv_finish(struct io_kiocb * req,int * ret,struct io_async_msghdr * kmsg,bool mshot_finished,unsigned issue_flags) io_recv_finish() argument 1016 io_recvmsg(struct io_kiocb * req,unsigned int issue_flags) io_recvmsg() argument 1103 io_recv_buf_select(struct io_kiocb * req,struct io_async_msghdr * kmsg,size_t * len,unsigned int issue_flags) io_recv_buf_select() argument 1176 io_recv(struct io_kiocb * req,unsigned int issue_flags) io_recv() argument 1251 io_recvzc_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_recvzc_prep() argument 1280 io_recvzc(struct io_kiocb * req,unsigned int issue_flags) io_recvzc() argument 1316 io_send_zc_cleanup(struct io_kiocb * req) io_send_zc_cleanup() argument 1333 io_send_zc_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_send_zc_prep() argument 1451 io_send_zc_import(struct io_kiocb * req,unsigned int issue_flags) io_send_zc_import() argument 1464 io_send_zc(struct io_kiocb * req,unsigned int issue_flags) io_send_zc() argument 1533 io_sendmsg_zc(struct io_kiocb * req,unsigned int issue_flags) io_sendmsg_zc() argument 1603 io_sendrecv_fail(struct io_kiocb * req) io_sendrecv_fail() argument 1618 io_accept_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_accept_prep() argument 1652 io_accept(struct io_kiocb * req,unsigned int issue_flags) io_accept() argument 1713 io_socket_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_socket_prep() argument 1734 io_socket(struct io_kiocb * req,unsigned int issue_flags) io_socket() argument 1767 io_connect_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_connect_prep() argument 1786 io_connect(struct io_kiocb * req,unsigned int issue_flags) io_connect() argument 1838 io_bind_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_bind_prep() argument 1856 io_bind(struct io_kiocb * req,unsigned int issue_flags) io_bind() argument 1874 io_listen_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_listen_prep() argument 1885 io_listen(struct io_kiocb * req,unsigned int issue_flags) io_listen() argument [all...] |
/linux/tools/testing/selftests/net/tcp_ao/lib/ |
H A D | netlink.c | 73 test_print("req buf is too small: %zu < %zu", req_sz, nl_size); in rtattr_pack() 142 } req; in __add_veth() local 146 memset(&req, 0, sizeof(req)); in __add_veth() 147 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info)); in __add_veth() 148 req.nh.nlmsg_type = RTM_NEWLINK; in __add_veth() 149 req.nh.nlmsg_flags = flags; in __add_veth() 150 req.nh.nlmsg_seq = seq; in __add_veth() 151 req.info.ifi_family = AF_UNSPEC; in __add_veth() 152 req.info.ifi_change = 0xFFFFFFFF; in __add_veth() 154 if (rtattr_pack(&req.nh, sizeof(req), IFLA_IFNAME, name, strlen(name))) in __add_veth() [all …]
|
/linux/drivers/peci/ |
H A D | request.c | 88 static u8 peci_request_data_cc(struct peci_request *req) in peci_request_data_cc() argument 90 return req->rx.buf[0]; in peci_request_data_cc() 95 * @req: the PECI request that contains response data with completion code 102 int peci_request_status(struct peci_request *req) in peci_request_status() argument 104 u8 cc = peci_request_data_cc(req); in peci_request_status() 107 dev_dbg(&req->device->dev, "ret: %#02x\n", cc); in peci_request_status() 133 static int peci_request_xfer(struct peci_request *req) in peci_request_xfer() argument 135 struct peci_device *device = req->device; in peci_request_xfer() 140 ret = controller->ops->xfer(controller, device->addr, req); in peci_request_xfer() 146 static int peci_request_xfer_retry(struct peci_request *req) in peci_request_xfer_retry() argument [all …]
|
/linux/crypto/ |
H A D | ahash.c | 32 static int ahash_def_finup(struct ahash_request *req); 102 int crypto_hash_walk_first(struct ahash_request *req, in crypto_hash_walk_first() argument 105 walk->total = req->nbytes; in crypto_hash_walk_first() 111 walk->flags = req->base.flags; in crypto_hash_walk_first() 113 if (ahash_request_isvirt(req)) { in crypto_hash_walk_first() 114 walk->data = req->svirt; in crypto_hash_walk_first() 116 return req->nbytes; in crypto_hash_walk_first() 119 walk->sg = req->src; in crypto_hash_walk_first() 162 static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req, in prepare_shash_desc() argument 165 struct shash_desc *desc = ahash_request_ctx(req); in prepare_shash_desc() [all …]
|
/linux/drivers/block/drbd/ |
H A D | drbd_req.c | 26 struct drbd_request *req; in drbd_req_new() local 28 req = mempool_alloc(&drbd_request_mempool, GFP_NOIO); in drbd_req_new() 29 if (!req) in drbd_req_new() 31 memset(req, 0, sizeof(*req)); in drbd_req_new() 33 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) in drbd_req_new() 36 req->device = device; in drbd_req_new() 37 req->master_bio = bio_src; in drbd_req_new() 38 req->epoch = 0; in drbd_req_new() 40 drbd_clear_interval(&req->i); in drbd_req_new() 41 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new() [all …]
|
/linux/drivers/accel/ivpu/ |
H A D | ivpu_jsm_msg.c | 98 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_REGISTER_DB }; in ivpu_jsm_register_db() local 102 req.payload.register_db.db_idx = db_id; in ivpu_jsm_register_db() 103 req.payload.register_db.jobq_base = jobq_base; in ivpu_jsm_register_db() 104 req.payload.register_db.jobq_size = jobq_size; in ivpu_jsm_register_db() 105 req.payload.register_db.host_ssid = ctx_id; in ivpu_jsm_register_db() 107 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_REGISTER_DB_DONE, &resp, in ivpu_jsm_register_db() 117 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_UNREGISTER_DB }; in ivpu_jsm_unregister_db() local 121 req.payload.unregister_db.db_idx = db_id; in ivpu_jsm_unregister_db() 123 ret = ivpu_ipc_send_receive(vdev, &req, VPU_JSM_MSG_UNREGISTER_DB_DONE, &resp, in ivpu_jsm_unregister_db() 133 struct vpu_jsm_msg req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB }; in ivpu_jsm_get_heartbeat() local [all …]
|
/linux/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_tlv.h | 48 static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size) in __get_cmdq_base_opcode() argument 50 if (HAS_TLV_HEADER(req) && size > TLV_BYTES) in __get_cmdq_base_opcode() 51 return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode; in __get_cmdq_base_opcode() 53 return req->opcode; in __get_cmdq_base_opcode() 56 static inline void __set_cmdq_base_opcode(struct cmdq_base *req, in __set_cmdq_base_opcode() argument 59 if (HAS_TLV_HEADER(req) && size > TLV_BYTES) in __set_cmdq_base_opcode() 60 ((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val; in __set_cmdq_base_opcode() 62 req->opcode = val; in __set_cmdq_base_opcode() 65 static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size) in __get_cmdq_base_cookie() argument 67 if (HAS_TLV_HEADER(req) && size > TLV_BYTES) in __get_cmdq_base_cookie() [all …]
|
/linux/fs/nfs/ |
H A D | pagelist.c | 37 const struct nfs_page *req; member 42 const struct nfs_page *req) in nfs_page_iter_page_init() argument 44 i->req = req; in nfs_page_iter_page_init() 50 const struct nfs_page *req = i->req; in nfs_page_iter_page_advance() local 53 i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes; in nfs_page_iter_page_advance() 58 const struct nfs_page *req = i->req; in nfs_page_iter_page_get() local 61 if (i->count != req->wb_bytes) { in nfs_page_iter_page_get() 62 size_t base = i->count + req->wb_pgbase; in nfs_page_iter_page_get() 65 page = nfs_page_to_page(req, base); in nfs_page_iter_page_get() 102 hdr->req = nfs_list_entry(mirror->pg_list.next); in nfs_pgheader_init() [all …]
|
/linux/drivers/net/ethernet/marvell/prestera/ |
H A D | prestera_hw.c | 781 /* structure that are part of req/resp fw messages */ in prestera_hw_build_tests() 1026 struct prestera_msg_port_info_req req = { in prestera_hw_port_info_get() local 1033 &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); in prestera_hw_port_info_get() 1046 struct prestera_msg_switch_attr_req req = { in prestera_hw_switch_mac_set() local 1050 ether_addr_copy(req.param.mac, mac); in prestera_hw_switch_mac_set() 1053 &req.cmd, sizeof(req)); in prestera_hw_switch_mac_set() 1059 struct prestera_msg_common_req req; in prestera_hw_switch_init() local 1067 &req.cmd, sizeof(req), in prestera_hw_switch_init() 1094 struct prestera_msg_switch_attr_req req = { in prestera_hw_switch_ageing_set() local 1102 &req.cmd, sizeof(req)); in prestera_hw_switch_ageing_set() [all …]
|
/linux/fs/smb/server/ |
H A D | transport_ipc.c | 304 static int ipc_server_config_on_startup(struct ksmbd_startup_request *req) in ipc_server_config_on_startup() argument 308 ksmbd_set_fd_limit(req->file_max); in ipc_server_config_on_startup() 309 server_conf.flags = req->flags; in ipc_server_config_on_startup() 310 server_conf.signing = req->signing; in ipc_server_config_on_startup() 311 server_conf.tcp_port = req->tcp_port; in ipc_server_config_on_startup() 312 server_conf.ipc_timeout = req->ipc_timeout * HZ; in ipc_server_config_on_startup() 313 if (check_mul_overflow(req->deadtime, SMB_ECHO_INTERVAL, in ipc_server_config_on_startup() 318 server_conf.share_fake_fscaps = req->share_fake_fscaps; in ipc_server_config_on_startup() 319 ksmbd_init_domain(req->sub_auth); in ipc_server_config_on_startup() 321 if (req->smb2_max_read) in ipc_server_config_on_startup() [all …]
|
/linux/net/sunrpc/ |
H A D | backchannel_rqst.c | 40 static void xprt_free_allocation(struct rpc_rqst *req) in xprt_free_allocation() argument 44 dprintk("RPC: free allocations for req= %p\n", req); in xprt_free_allocation() 45 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); in xprt_free_allocation() 46 xbufp = &req->rq_rcv_buf; in xprt_free_allocation() 48 xbufp = &req->rq_snd_buf; in xprt_free_allocation() 50 kfree(req); in xprt_free_allocation() 78 struct rpc_rqst *req; in xprt_alloc_bc_req() local 81 req = kzalloc(sizeof(*req), gfp_flags); in xprt_alloc_bc_req() 82 if (req == NULL) in xprt_alloc_bc_req() 85 req->rq_xprt = xprt; in xprt_alloc_bc_req() [all …]
|
/linux/drivers/net/ethernet/marvell/octeon_ep/ |
H A D | octep_ctrl_net.c | 66 cmd = d->data.req.hdr.s.cmd; in octep_send_mbox_req() 130 struct octep_ctrl_net_h2f_req *req = &d.data.req; in octep_ctrl_net_get_link_status() local 133 init_send_req(&d.msg, (void *)req, state_sz, vfid); in octep_ctrl_net_get_link_status() 134 req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; in octep_ctrl_net_get_link_status() 135 req->link.cmd = OCTEP_CTRL_NET_CMD_GET; in octep_ctrl_net_get_link_status() 147 struct octep_ctrl_net_h2f_req *req = &d.data.req; in octep_ctrl_net_set_link_status() local 149 init_send_req(&d.msg, req, state_sz, vfid); in octep_ctrl_net_set_link_status() 150 req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; in octep_ctrl_net_set_link_status() 151 req->link.cmd = OCTEP_CTRL_NET_CMD_SET; in octep_ctrl_net_set_link_status() 152 req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : in octep_ctrl_net_set_link_status() [all …]
|