/linux/drivers/media/mc/ |
H A D | mc-request.c | 39 static void media_request_clean(struct media_request *req) in media_request_clean() argument 44 WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING); in media_request_clean() 45 WARN_ON(req->updating_count); in media_request_clean() 46 WARN_ON(req->access_count); in media_request_clean() 48 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { in media_request_clean() 53 req->updating_count = 0; in media_request_clean() 54 req->access_count = 0; in media_request_clean() 55 WARN_ON(req->num_incomplete_objects); in media_request_clean() 56 req->num_incomplete_objects = 0; in media_request_clean() 57 wake_up_interruptible_all(&req->poll_wait); in media_request_clean() [all …]
|
/linux/drivers/s390/scsi/ |
H A D | zfcp_fsf.c | 80 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) in zfcp_fsf_class_not_supp() argument 82 dev_err(&req->adapter->ccw_device->dev, "FCP device not " in zfcp_fsf_class_not_supp() 84 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); in zfcp_fsf_class_not_supp() 85 req->status |= ZFCP_STATUS_FSFREQ_ERROR; in zfcp_fsf_class_not_supp() 92 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) in zfcp_fsf_req_free() argument 94 if (likely(req->pool)) { in zfcp_fsf_req_free() 95 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) in zfcp_fsf_req_free() 96 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 97 mempool_free(req, req->pool); in zfcp_fsf_req_free() 101 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) in zfcp_fsf_req_free() [all …]
|
/linux/drivers/nvme/target/ |
H A D | admin-cmd.c | 15 static void nvmet_execute_delete_sq(struct nvmet_req *req) in nvmet_execute_delete_sq() argument 17 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_delete_sq() 18 u16 sqid = le16_to_cpu(req->cmd->delete_queue.qid); in nvmet_execute_delete_sq() 22 status = nvmet_report_invalid_opcode(req); in nvmet_execute_delete_sq() 38 nvmet_req_complete(req, status); in nvmet_execute_delete_sq() 41 static void nvmet_execute_create_sq(struct nvmet_req *req) in nvmet_execute_create_sq() argument 43 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_create_sq() 44 struct nvme_command *cmd = req->cmd; in nvmet_execute_create_sq() 53 status = nvmet_report_invalid_opcode(req); in nvmet_execute_create_sq() 85 nvmet_req_complete(req, status); in nvmet_execute_create_sq() [all …]
|
H A D | io-cmd-file.c | 76 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, in nvmet_file_submit_bvec() argument 79 struct kiocb *iocb = &req->f.iocb; in nvmet_file_submit_bvec() 84 if (req->cmd->rw.opcode == nvme_cmd_write) { in nvmet_file_submit_bvec() 85 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) in nvmet_file_submit_bvec() 87 call_iter = req->ns->file->f_op->write_iter; in nvmet_file_submit_bvec() 90 call_iter = req->ns->file->f_op->read_iter; in nvmet_file_submit_bvec() 94 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec() 97 iocb->ki_filp = req->ns->file; in nvmet_file_submit_bvec() 105 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); in nvmet_file_io_done() local 108 if (req->f.bvec != req->inline_bvec) { in nvmet_file_io_done() [all …]
|
H A D | io-cmd-bdev.c | 126 u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) in blk_to_nvme_status() argument 141 req->error_loc = offsetof(struct nvme_rw_command, length); in blk_to_nvme_status() 145 req->error_loc = offsetof(struct nvme_rw_command, slba); in blk_to_nvme_status() 148 req->error_loc = offsetof(struct nvme_common_command, opcode); in blk_to_nvme_status() 149 switch (req->cmd->common.opcode) { in blk_to_nvme_status() 160 req->error_loc = offsetof(struct nvme_rw_command, nsid); in blk_to_nvme_status() 165 req->error_loc = offsetof(struct nvme_common_command, opcode); in blk_to_nvme_status() 168 switch (req->cmd->common.opcode) { in blk_to_nvme_status() 171 req->error_slba = le64_to_cpu(req->cmd->rw.slba); in blk_to_nvme_status() 174 req->error_slba = in blk_to_nvme_status() [all …]
|
H A D | fabrics-cmd.c | 10 static void nvmet_execute_prop_set(struct nvmet_req *req) in nvmet_execute_prop_set() argument 12 u64 val = le64_to_cpu(req->cmd->prop_set.value); in nvmet_execute_prop_set() 15 if (!nvmet_check_transfer_len(req, 0)) in nvmet_execute_prop_set() 18 if (req->cmd->prop_set.attrib & 1) { in nvmet_execute_prop_set() 19 req->error_loc = in nvmet_execute_prop_set() 25 switch (le32_to_cpu(req->cmd->prop_set.offset)) { in nvmet_execute_prop_set() 27 nvmet_update_cc(req->sq->ctrl, val); in nvmet_execute_prop_set() 30 req->error_loc = in nvmet_execute_prop_set() 35 nvmet_req_complete(req, status); in nvmet_execute_prop_set() 38 static void nvmet_execute_prop_get(struct nvmet_req *req) in nvmet_execute_prop_get() argument [all …]
|
/linux/drivers/staging/greybus/ |
H A D | audio_apbridgea.c | 16 struct audio_apbridgea_set_config_request req; in gb_audio_apbridgea_set_config() local 18 req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG; in gb_audio_apbridgea_set_config() 19 req.hdr.i2s_port = cpu_to_le16(i2s_port); in gb_audio_apbridgea_set_config() 20 req.format = cpu_to_le32(format); in gb_audio_apbridgea_set_config() 21 req.rate = cpu_to_le32(rate); in gb_audio_apbridgea_set_config() 22 req.mclk_freq = cpu_to_le32(mclk_freq); in gb_audio_apbridgea_set_config() 24 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_config() 33 struct audio_apbridgea_register_cport_request req; in gb_audio_apbridgea_register_cport() local 36 req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT; in gb_audio_apbridgea_register_cport() 37 req.hdr.i2s_port = cpu_to_le16(i2s_port); in gb_audio_apbridgea_register_cport() [all …]
|
H A D | audio_gb.c | 50 struct gb_audio_get_control_request req; in gb_audio_gb_get_control() local 54 req.control_id = control_id; in gb_audio_gb_get_control() 55 req.index = index; in gb_audio_gb_get_control() 58 &req, sizeof(req), &resp, sizeof(resp)); in gb_audio_gb_get_control() 72 struct gb_audio_set_control_request req; in gb_audio_gb_set_control() local 74 req.control_id = control_id; in gb_audio_gb_set_control() 75 req.index = index; in gb_audio_gb_set_control() 76 memcpy(&req.value, value, sizeof(req.value)); in gb_audio_gb_set_control() 79 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_control() 86 struct gb_audio_enable_widget_request req; in gb_audio_gb_enable_widget() local [all …]
|
/linux/drivers/block/drbd/ |
H A D | drbd_req.c | 26 struct drbd_request *req; in drbd_req_new() local 28 req = mempool_alloc(&drbd_request_mempool, GFP_NOIO); in drbd_req_new() 29 if (!req) in drbd_req_new() 31 memset(req, 0, sizeof(*req)); in drbd_req_new() 33 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) in drbd_req_new() 36 req->device = device; in drbd_req_new() 37 req->master_bio = bio_src; in drbd_req_new() 38 req->epoch = 0; in drbd_req_new() 40 drbd_clear_interval(&req->i); in drbd_req_new() 41 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new() [all …]
|
/linux/io_uring/ |
H A D | rw.c | 37 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask) in io_file_supports_nowait() argument 40 if (req->flags & REQ_F_SUPPORT_NOWAIT) in io_file_supports_nowait() 43 if (io_file_can_poll(req)) { in io_file_supports_nowait() 46 return vfs_poll(req->file, &pt) & mask; in io_file_supports_nowait() 71 static int io_iov_buffer_select_prep(struct io_kiocb *req) in io_iov_buffer_select_prep() argument 75 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_iov_buffer_select_prep() 81 if (req->ctx->compat) in io_iov_buffer_select_prep() 92 static int __io_import_iovec(int ddir, struct io_kiocb *req, in __io_import_iovec() argument 96 const struct io_issue_def *def = &io_issue_defs[req->opcode]; in __io_import_iovec() 97 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_import_iovec() [all …]
|
H A D | timeout.c | 38 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_is_timeout_noseq() argument 40 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); in io_is_timeout_noseq() 41 struct io_timeout_data *data = req->async_data; in io_is_timeout_noseq() 46 static inline void io_put_req(struct io_kiocb *req) in io_put_req() argument 48 if (req_ref_put_and_test(req)) { in io_put_req() 49 io_queue_next(req); in io_put_req() 50 io_free_req(req); in io_put_req() 68 static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts) in io_timeout_complete() argument 70 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); in io_timeout_complete() 71 struct io_timeout_data *data = req->async_data; in io_timeout_complete() [all …]
|
H A D | futex.c | 31 struct io_kiocb *req; member 47 static void __io_futex_complete(struct io_kiocb *req, struct io_tw_state *ts) in __io_futex_complete() argument 49 req->async_data = NULL; in __io_futex_complete() 50 hlist_del_init(&req->hash_node); in __io_futex_complete() 51 io_req_task_complete(req, ts); in __io_futex_complete() 54 static void io_futex_complete(struct io_kiocb *req, struct io_tw_state *ts) in io_futex_complete() argument 56 struct io_futex_data *ifd = req->async_data; in io_futex_complete() 57 struct io_ring_ctx *ctx = req->ctx; in io_futex_complete() 62 __io_futex_complete(req, ts); in io_futex_complete() 65 static void io_futexv_complete(struct io_kiocb *req, struct io_tw_state *ts) in io_futexv_complete() argument [all …]
|
/linux/drivers/net/ethernet/marvell/octeontx2/af/ |
H A D | mcs_rvu_if.c | 21 struct _req_type *req; \ 23 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 26 if (!req) \ 28 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 29 req->hdr.id = _id; \ 30 return req; \ 73 struct mcs_set_lmac_mode *req, in rvu_mbox_handler_mcs_set_lmac_mode() argument 78 if (req->mcs_id >= rvu->mcs_blk_cnt) in rvu_mbox_handler_mcs_set_lmac_mode() 81 mcs = mcs_get_pdata(req->mcs_id); in rvu_mbox_handler_mcs_set_lmac_mode() 83 if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap) in rvu_mbox_handler_mcs_set_lmac_mode() [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | user_sdma.c | 38 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts); 41 static void user_sdma_free_request(struct user_sdma_request *req); 42 static int check_header_template(struct user_sdma_request *req, 45 static int set_txreq_header(struct user_sdma_request *req, 47 static int set_txreq_header_ahg(struct user_sdma_request *req, 290 struct user_sdma_request *req; in hfi1_user_sdma_process_request() local 297 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { in hfi1_user_sdma_process_request() 302 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr)); in hfi1_user_sdma_process_request() 352 req = pq->reqs + info.comp_idx; in hfi1_user_sdma_process_request() 353 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */ in hfi1_user_sdma_process_request() [all …]
|
/linux/crypto/ |
H A D | chacha20poly1305.c | 42 struct ahash_request req; /* must be last member */ member 48 struct skcipher_request req; /* must be last member */ member 70 static inline void async_done_continue(struct aead_request *req, int err, in async_done_continue() argument 74 struct chachapoly_req_ctx *rctx = aead_request_ctx(req); in async_done_continue() 77 err = cont(req); in async_done_continue() 81 aead_request_complete(req, err); in async_done_continue() 84 static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb) in chacha_iv() argument 86 struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); in chacha_iv() 91 memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv, in chacha_iv() 95 static int poly_verify_tag(struct aead_request *req) in poly_verify_tag() argument [all …]
|
H A D | seqiv.c | 21 static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) in seqiv_aead_encrypt_complete2() argument 23 struct aead_request *subreq = aead_request_ctx(req); in seqiv_aead_encrypt_complete2() 32 geniv = crypto_aead_reqtfm(req); in seqiv_aead_encrypt_complete2() 33 memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv)); in seqiv_aead_encrypt_complete2() 41 struct aead_request *req = data; in seqiv_aead_encrypt_complete() local 43 seqiv_aead_encrypt_complete2(req, err); in seqiv_aead_encrypt_complete() 44 aead_request_complete(req, err); in seqiv_aead_encrypt_complete() 47 static int seqiv_aead_encrypt(struct aead_request *req) in seqiv_aead_encrypt() argument 49 struct crypto_aead *geniv = crypto_aead_reqtfm(req); in seqiv_aead_encrypt() 51 struct aead_request *subreq = aead_request_ctx(req); in seqiv_aead_encrypt() [all …]
|
H A D | gcm.c | 60 int (*complete)(struct aead_request *req, u32 flags); 82 static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc); 85 struct aead_request *req) in crypto_gcm_reqctx() argument 87 unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); in crypto_gcm_reqctx() 89 return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); in crypto_gcm_reqctx() 105 struct skcipher_request req; in crypto_gcm_setkey() member 123 skcipher_request_set_tfm(&data->req, ctr); in crypto_gcm_setkey() 124 skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | in crypto_gcm_setkey() 128 skcipher_request_set_crypt(&data->req, data->sg, data->sg, in crypto_gcm_setkey() 131 err = crypto_wait_req(crypto_skcipher_encrypt(&data->req), in crypto_gcm_setkey() [all …]
|
/linux/tools/testing/selftests/net/tcp_ao/lib/ |
H A D | netlink.c | 142 } req; in __add_veth() local 146 memset(&req, 0, sizeof(req)); in __add_veth() 147 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info)); in __add_veth() 148 req.nh.nlmsg_type = RTM_NEWLINK; in __add_veth() 149 req.nh.nlmsg_flags = flags; in __add_veth() 150 req.nh.nlmsg_seq = seq; in __add_veth() 151 req.info.ifi_family = AF_UNSPEC; in __add_veth() 152 req.info.ifi_change = 0xFFFFFFFF; in __add_veth() 154 if (rtattr_pack(&req.nh, sizeof(req), IFLA_IFNAME, name, strlen(name))) in __add_veth() 157 if (rtattr_pack(&req.nh, sizeof(req), IFLA_NET_NS_FD, &ns_a, sizeof(ns_a))) in __add_veth() [all …]
|
/linux/drivers/peci/ |
H A D | request.c | 88 static u8 peci_request_data_cc(struct peci_request *req) in peci_request_data_cc() argument 90 return req->rx.buf[0]; in peci_request_data_cc() 102 int peci_request_status(struct peci_request *req) in peci_request_status() argument 104 u8 cc = peci_request_data_cc(req); in peci_request_status() 107 dev_dbg(&req->device->dev, "ret: %#02x\n", cc); in peci_request_status() 133 static int peci_request_xfer(struct peci_request *req) in peci_request_xfer() argument 135 struct peci_device *device = req->device; in peci_request_xfer() 140 ret = controller->ops->xfer(controller, device->addr, req); in peci_request_xfer() 146 static int peci_request_xfer_retry(struct peci_request *req) in peci_request_xfer_retry() argument 149 struct peci_device *device = req->device; in peci_request_xfer_retry() [all …]
|
/linux/net/sunrpc/ |
H A D | backchannel_rqst.c | 40 static void xprt_free_allocation(struct rpc_rqst *req) in xprt_free_allocation() argument 44 dprintk("RPC: free allocations for req= %p\n", req); in xprt_free_allocation() 45 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); in xprt_free_allocation() 46 xbufp = &req->rq_rcv_buf; in xprt_free_allocation() 48 xbufp = &req->rq_snd_buf; in xprt_free_allocation() 50 kfree(req); in xprt_free_allocation() 78 struct rpc_rqst *req; in xprt_alloc_bc_req() local 81 req = kzalloc(sizeof(*req), gfp_flags); in xprt_alloc_bc_req() 82 if (req == NULL) in xprt_alloc_bc_req() 85 req->rq_xprt = xprt; in xprt_alloc_bc_req() [all …]
|
/linux/drivers/clk/sunxi/ |
H A D | clk-sunxi.c | 33 static void sun4i_get_pll1_factors(struct factors_request *req) in sun4i_get_pll1_factors() argument 38 div = req->rate / 6000000; in sun4i_get_pll1_factors() 39 req->rate = 6000000 * div; in sun4i_get_pll1_factors() 42 req->m = 0; in sun4i_get_pll1_factors() 45 if (req->rate >= 768000000 || req->rate == 42000000 || in sun4i_get_pll1_factors() 46 req->rate == 54000000) in sun4i_get_pll1_factors() 47 req->k = 1; in sun4i_get_pll1_factors() 49 req->k = 0; in sun4i_get_pll1_factors() 53 req->p = 3; in sun4i_get_pll1_factors() 57 req->p = 2; in sun4i_get_pll1_factors() [all …]
|
/linux/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_tlv.h | 48 static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size) in __get_cmdq_base_opcode() argument 50 if (HAS_TLV_HEADER(req) && size > TLV_BYTES) in __get_cmdq_base_opcode() 51 return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode; in __get_cmdq_base_opcode() 53 return req->opcode; in __get_cmdq_base_opcode() 56 static inline void __set_cmdq_base_opcode(struct cmdq_base *req, in __set_cmdq_base_opcode() argument 59 if (HAS_TLV_HEADER(req) && size > TLV_BYTES) in __set_cmdq_base_opcode() 60 ((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val; in __set_cmdq_base_opcode() 62 req->opcode = val; in __set_cmdq_base_opcode() 65 static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size) in __get_cmdq_base_cookie() argument 67 if (HAS_TLV_HEADER(req) && size > TLV_BYTES) in __get_cmdq_base_cookie() [all …]
|
/linux/drivers/s390/cio/ |
H A D | ccwreq.c | 43 struct ccw_request *req = &cdev->private->req; in ccwreq_next_path() local 45 if (!req->singlepath) { in ccwreq_next_path() 46 req->mask = 0; in ccwreq_next_path() 49 req->retries = req->maxretries; in ccwreq_next_path() 50 req->mask = lpm_adjust(req->mask >> 1, req->lpm); in ccwreq_next_path() 52 return req->mask; in ccwreq_next_path() 60 struct ccw_request *req = &cdev->private->req; in ccwreq_stop() local 62 if (req->done) in ccwreq_stop() 64 req->done = 1; in ccwreq_stop() 67 if (rc && rc != -ENODEV && req->drc) in ccwreq_stop() [all …]
|
/linux/fs/nfs/ |
H A D | pagelist.c | 37 const struct nfs_page *req; member 42 const struct nfs_page *req) in nfs_page_iter_page_init() argument 44 i->req = req; in nfs_page_iter_page_init() 50 const struct nfs_page *req = i->req; in nfs_page_iter_page_advance() local 53 i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes; in nfs_page_iter_page_advance() 58 const struct nfs_page *req = i->req; in nfs_page_iter_page_get() local 199 nfs_page_set_headlock(struct nfs_page * req) nfs_page_set_headlock() argument 215 nfs_page_clear_headlock(struct nfs_page * req) nfs_page_clear_headlock() argument 234 nfs_page_group_lock(struct nfs_page * req) nfs_page_group_lock() argument 249 nfs_page_group_unlock(struct nfs_page * req) nfs_page_group_unlock() argument 262 nfs_page_group_sync_on_bit_locked(struct nfs_page * req,unsigned int bit) nfs_page_group_sync_on_bit_locked() argument 293 nfs_page_group_sync_on_bit(struct nfs_page * req,unsigned int bit) nfs_page_group_sync_on_bit() argument 311 nfs_page_group_init(struct nfs_page * req,struct nfs_page * prev) nfs_page_group_init() argument 354 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); nfs_page_group_destroy() local 380 struct nfs_page *req; nfs_page_create() local 406 nfs_page_assign_folio(struct nfs_page * req,struct folio * folio) nfs_page_assign_folio() argument 415 nfs_page_assign_page(struct nfs_page * req,struct page * page) nfs_page_assign_page() argument 486 nfs_create_subreq(struct nfs_page * req,unsigned int pgbase,unsigned int offset,unsigned int count) nfs_create_subreq() argument 520 nfs_unlock_request(struct nfs_page * req) nfs_unlock_request() argument 533 nfs_unlock_and_release_request(struct nfs_page * req) nfs_unlock_and_release_request() argument 546 nfs_clear_request(struct nfs_page * req) nfs_clear_request() argument 579 nfs_free_request(struct nfs_page * req) nfs_free_request() argument 595 nfs_release_request(struct nfs_page * req) nfs_release_request() argument 611 nfs_generic_pg_test(struct nfs_pageio_descriptor * desc,struct nfs_page * prev,struct nfs_page * req) nfs_generic_pg_test() argument 685 struct nfs_page *req = hdr->req; nfs_pgio_rpcsetup() local 879 struct nfs_page *req; nfs_generic_pgio() local 1006 nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor * pgio,struct nfs_page * req) nfs_pageio_setup_mirroring() argument 1045 nfs_page_is_contiguous(const struct nfs_page * prev,const struct nfs_page * req) nfs_page_is_contiguous() argument 1075 nfs_coalesce_size(struct nfs_page * prev,struct nfs_page * req,struct nfs_pageio_descriptor * pgio) nfs_coalesce_size() argument 1106 nfs_pageio_do_add_request(struct nfs_pageio_descriptor * desc,struct nfs_page * req) nfs_pageio_do_add_request() argument 1157 nfs_pageio_cleanup_request(struct nfs_pageio_descriptor * desc,struct nfs_page * req) nfs_pageio_cleanup_request() argument 1178 __nfs_pageio_add_request(struct nfs_pageio_descriptor * desc,struct nfs_page * req) __nfs_pageio_add_request() argument 1244 struct nfs_page *req; nfs_do_recoalesce() local 1261 nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor * desc,struct nfs_page * req) nfs_pageio_add_request_mirror() argument 1293 nfs_pageio_add_request(struct nfs_pageio_descriptor * desc,struct nfs_page * req) nfs_pageio_add_request() argument 1383 struct nfs_page *req = nfs_list_entry(pages.next); nfs_pageio_resend() local [all...] |
/linux/drivers/macintosh/ |
H A D | via-pmu.c | 197 static int pmu_send_request(struct adb_request *req, int sync); 231 int pmu_polled_request(struct adb_request *req); 573 struct adb_request req; in init_pmu() local 579 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); in init_pmu() 581 while (!req.complete) { in init_pmu() 606 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); in init_pmu() 607 while (!req.complete) in init_pmu() 612 pmu_request(&req, NULL, 1, PMU_GET_VERSION); in init_pmu() 613 pmu_wait_complete(&req); in init_pmu() 614 if (req.reply_len > 0) in init_pmu() [all …]
|