| /linux/drivers/staging/greybus/ |
| H A D | audio_apbridgea.c | 16 struct audio_apbridgea_set_config_request req; in gb_audio_apbridgea_set_config() local 18 req.hdr.type = AUDIO_APBRIDGEA_TYPE_SET_CONFIG; in gb_audio_apbridgea_set_config() 19 req.hdr.i2s_port = cpu_to_le16(i2s_port); in gb_audio_apbridgea_set_config() 20 req.format = cpu_to_le32(format); in gb_audio_apbridgea_set_config() 21 req.rate = cpu_to_le32(rate); in gb_audio_apbridgea_set_config() 22 req.mclk_freq = cpu_to_le32(mclk_freq); in gb_audio_apbridgea_set_config() 24 return gb_hd_output(connection->hd, &req, sizeof(req), in gb_audio_apbridgea_set_config() 33 struct audio_apbridgea_register_cport_request req; in gb_audio_apbridgea_register_cport() local 36 req.hdr.type = AUDIO_APBRIDGEA_TYPE_REGISTER_CPORT; in gb_audio_apbridgea_register_cport() 37 req.hdr.i2s_port = cpu_to_le16(i2s_port); in gb_audio_apbridgea_register_cport() [all …]
|
| H A D | audio_gb.c | 50 struct gb_audio_get_control_request req; in gb_audio_gb_get_control() local 54 req.control_id = control_id; in gb_audio_gb_get_control() 55 req.index = index; in gb_audio_gb_get_control() 58 &req, sizeof(req), &resp, sizeof(resp)); in gb_audio_gb_get_control() 72 struct gb_audio_set_control_request req; in gb_audio_gb_set_control() local 74 req.control_id = control_id; in gb_audio_gb_set_control() 75 req.index = index; in gb_audio_gb_set_control() 76 memcpy(&req.value, value, sizeof(req.value)); in gb_audio_gb_set_control() 79 &req, sizeof(req), NULL, 0); in gb_audio_gb_set_control() 86 struct gb_audio_enable_widget_request req; in gb_audio_gb_enable_widget() local [all …]
|
| /linux/drivers/media/mc/ |
| H A D | mc-request.c | 39 static void media_request_clean(struct media_request *req) in media_request_clean() argument 44 WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING); in media_request_clean() 45 WARN_ON(req->updating_count); in media_request_clean() 46 WARN_ON(req->access_count); in media_request_clean() 48 list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { in media_request_clean() 53 req->updating_count = 0; in media_request_clean() 54 req->access_count = 0; in media_request_clean() 55 WARN_ON(req->num_incomplete_objects); in media_request_clean() 56 req->num_incomplete_objects = 0; in media_request_clean() 57 req->manual_completion = false; in media_request_clean() [all …]
|
| /linux/drivers/s390/scsi/ |
| H A D | zfcp_fsf.c | 79 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) in zfcp_fsf_class_not_supp() argument 81 dev_err(&req->adapter->ccw_device->dev, "FCP device not " in zfcp_fsf_class_not_supp() 83 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); in zfcp_fsf_class_not_supp() 84 req->status |= ZFCP_STATUS_FSFREQ_ERROR; in zfcp_fsf_class_not_supp() 89 * @req: pointer to struct zfcp_fsf_req 91 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) in zfcp_fsf_req_free() argument 93 if (likely(req->pool)) { in zfcp_fsf_req_free() 94 if (likely(!zfcp_fsf_req_is_status_read_buffer(req))) in zfcp_fsf_req_free() 95 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free() 96 mempool_free(req, req->pool); in zfcp_fsf_req_free() [all …]
|
| /linux/drivers/nvme/target/ |
| H A D | io-cmd-file.c | 76 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, in nvmet_file_submit_bvec() argument 79 struct kiocb *iocb = &req->f.iocb; in nvmet_file_submit_bvec() 84 if (req->cmd->rw.opcode == nvme_cmd_write) { in nvmet_file_submit_bvec() 85 if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) in nvmet_file_submit_bvec() 87 call_iter = req->ns->file->f_op->write_iter; in nvmet_file_submit_bvec() 90 call_iter = req->ns->file->f_op->read_iter; in nvmet_file_submit_bvec() 94 iov_iter_bvec(&iter, rw, req->f.bvec, nr_segs, count); in nvmet_file_submit_bvec() 97 iocb->ki_filp = req->ns->file; in nvmet_file_submit_bvec() 105 struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb); in nvmet_file_io_done() local 108 if (req->f.bvec != req->inline_bvec) { in nvmet_file_io_done() [all …]
|
| H A D | fabrics-cmd.c | 10 static void nvmet_execute_prop_set(struct nvmet_req *req) in nvmet_execute_prop_set() argument 12 u64 val = le64_to_cpu(req->cmd->prop_set.value); in nvmet_execute_prop_set() 15 if (!nvmet_check_transfer_len(req, 0)) in nvmet_execute_prop_set() 18 if (req->cmd->prop_set.attrib & 1) { in nvmet_execute_prop_set() 19 req->error_loc = in nvmet_execute_prop_set() 25 switch (le32_to_cpu(req->cmd->prop_set.offset)) { in nvmet_execute_prop_set() 27 nvmet_update_cc(req->sq->ctrl, val); in nvmet_execute_prop_set() 30 req->error_loc = in nvmet_execute_prop_set() 35 nvmet_req_complete(req, status); in nvmet_execute_prop_set() 38 static void nvmet_execute_prop_get(struct nvmet_req *req) in nvmet_execute_prop_get() argument [all …]
|
| /linux/drivers/net/ethernet/marvell/octeontx2/af/ |
| H A D | mcs_rvu_if.c | 21 struct _req_type *req; \ 23 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ 26 if (!req) \ 28 req->hdr.sig = OTX2_MBOX_REQ_SIG; \ 29 req->hdr.id = _id; \ 30 return req; \ 73 struct mcs_set_lmac_mode *req, in rvu_mbox_handler_mcs_set_lmac_mode() argument 78 if (req->mcs_id >= rvu->mcs_blk_cnt) in rvu_mbox_handler_mcs_set_lmac_mode() 81 mcs = mcs_get_pdata(req->mcs_id); in rvu_mbox_handler_mcs_set_lmac_mode() 83 if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap) in rvu_mbox_handler_mcs_set_lmac_mode() [all …]
|
| /linux/io_uring/ |
| H A D | rw.c | 38 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask) in io_file_supports_nowait() argument 41 if (req->flags & REQ_F_SUPPORT_NOWAIT) in io_file_supports_nowait() 44 if (io_file_can_poll(req)) { in io_file_supports_nowait() 47 return vfs_poll(req->file, &pt) & mask; in io_file_supports_nowait() 64 static int io_iov_buffer_select_prep(struct io_kiocb *req) in io_iov_buffer_select_prep() argument 68 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_iov_buffer_select_prep() 73 if (io_is_compat(req->ctx)) in io_iov_buffer_select_prep() 83 static int io_import_vec(int ddir, struct io_kiocb *req, in io_import_vec() argument 100 io_is_compat(req->ctx)); in io_import_vec() 104 req->flags |= REQ_F_NEED_CLEANUP; in io_import_vec() [all …]
|
| H A D | poll.c | 36 struct io_kiocb *req; member 73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath() argument 82 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_get_ownership_slowpath() 85 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership_slowpath() 94 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership() argument 96 if (unlikely((unsigned int)atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) in io_poll_get_ownership() 97 return io_poll_get_ownership_slowpath(req); in io_poll_get_ownership() 98 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership() 101 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled() argument 103 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); in io_poll_mark_cancelled() [all …]
|
| H A D | timeout.c | 77 static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, in io_timeout_finish() 80 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_timeout_finish() 82 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); in io_timeout_finish() 83 struct io_timeout_data *data = req->async_data; in io_timeout_finish() 88 static inline void io_put_req(struct io_kiocb *req) 90 if (req_ref_put_and_test(req)) { in io_timeout_complete() 91 io_queue_next(req); in io_timeout_complete() 92 io_free_req(req); in io_timeout_complete() local 112 struct io_kiocb *req = tw_req.req; in io_flush_killed_timeouts() 60 io_is_timeout_noseq(struct io_kiocb * req) io_is_timeout_noseq() argument 68 io_put_req(struct io_kiocb * req) io_put_req() argument 118 struct io_kiocb *req; io_flush_killed_timeouts() local 131 io_kill_timeout(struct io_kiocb * req,struct list_head * list) io_kill_timeout() argument 155 struct io_kiocb *req = cmd_to_io_kiocb(timeout); io_flush_timeouts() local 198 io_fail_links(struct io_kiocb * req) io_fail_links() argument 222 io_remove_next_linked(struct io_kiocb * req) io_remove_next_linked() argument 230 io_disarm_next(struct io_kiocb * req) io_disarm_next() argument 258 __io_disarm_linked_timeout(struct io_kiocb * req,struct io_kiocb * link) __io_disarm_linked_timeout() argument 280 struct io_kiocb *req = data->req; io_timeout_fn() local 306 struct io_kiocb *req = NULL; io_timeout_extract() local 330 struct io_kiocb *req; io_timeout_cancel() local 344 struct io_kiocb *req = tw_req.req; io_req_task_link_timeout() local 373 struct io_kiocb *prev, *req = data->req; io_link_timeout_fn() local 422 struct io_kiocb *req = NULL; io_linked_timeout_update() local 448 struct io_kiocb *req = io_timeout_extract(ctx, &cd); io_timeout_update() local 465 io_timeout_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_timeout_remove_prep() argument 509 io_timeout_remove(struct io_kiocb * req,unsigned int issue_flags) io_timeout_remove() argument 538 __io_timeout_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool is_timeout_link) __io_timeout_prep() argument 610 io_timeout_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_timeout_prep() argument 615 io_link_timeout_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe) io_link_timeout_prep() argument 620 io_timeout(struct io_kiocb * req,unsigned int issue_flags) io_timeout() argument 670 io_queue_linked_timeout(struct io_kiocb * req) io_queue_linked_timeout() argument 695 struct io_kiocb *req; io_match_task() local 723 struct io_kiocb *req = cmd_to_io_kiocb(timeout); io_kill_timeouts() local [all...] |
| H A D | futex.c | 27 struct io_kiocb *req; member 50 hlist_del_init(&tw_req.req->hash_node); in __io_futex_complete() 56 struct io_kiocb *req = tw_req.req; in io_futex_complete() local 57 struct io_ring_ctx *ctx = req->ctx; in io_futex_complete() 60 io_cache_free(&ctx->futex_cache, req->async_data); in io_futex_complete() 61 io_req_async_data_clear(req, 0); in io_futex_complete() 67 struct io_kiocb *req = tw_req.req; in io_futexv_complete() local 68 struct io_futex *iof = io_kiocb_to_cmd(req, struct io_futex); in io_futexv_complete() 69 struct io_futexv_data *ifd = req->async_data; in io_futexv_complete() 71 io_tw_lock(req->ctx, tw); in io_futexv_complete() [all …]
|
| H A D | net.c | 123 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_shutdown_prep() argument 125 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); in io_shutdown_prep() 132 req->flags |= REQ_F_FORCE_ASYNC; in io_shutdown_prep() 136 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() argument 138 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); in io_shutdown() 144 sock = sock_from_file(req->file); in io_shutdown() 149 io_req_set_res(req, ret, 0); in io_shutdown() 166 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_netmsg_recycle() argument 168 struct io_async_msghdr *hdr = req->async_data; in io_netmsg_recycle() 181 if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) in io_netmsg_recycle() [all …]
|
| /linux/tools/testing/selftests/net/tcp_ao/lib/ |
| H A D | netlink.c | 73 test_print("req buf is too small: %zu < %zu", req_sz, nl_size); in rtattr_pack() 142 } req; in __add_veth() local 146 memset(&req, 0, sizeof(req)); in __add_veth() 147 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info)); in __add_veth() 148 req.nh.nlmsg_type = RTM_NEWLINK; in __add_veth() 149 req.nh.nlmsg_flags = flags; in __add_veth() 150 req.nh.nlmsg_seq = seq; in __add_veth() 151 req.info.ifi_family = AF_UNSPEC; in __add_veth() 152 req.info.ifi_change = 0xFFFFFFFF; in __add_veth() 154 if (rtattr_pack(&req.nh, sizeof(req), IFLA_IFNAME, name, strlen(name))) in __add_veth() [all …]
|
| /linux/drivers/peci/ |
| H A D | request.c | 88 static u8 peci_request_data_cc(struct peci_request *req) in peci_request_data_cc() argument 90 return req->rx.buf[0]; in peci_request_data_cc() 95 * @req: the PECI request that contains response data with completion code 102 int peci_request_status(struct peci_request *req) in peci_request_status() argument 104 u8 cc = peci_request_data_cc(req); in peci_request_status() 107 dev_dbg(&req->device->dev, "ret: %#02x\n", cc); in peci_request_status() 133 static int peci_request_xfer(struct peci_request *req) in peci_request_xfer() argument 135 struct peci_device *device = req->device; in peci_request_xfer() 140 ret = controller->ops->xfer(controller, device->addr, req); in peci_request_xfer() 146 static int peci_request_xfer_retry(struct peci_request *req) in peci_request_xfer_retry() argument [all …]
|
| /linux/net/handshake/ |
| H A D | request.c | 66 static bool handshake_req_hash_add(struct handshake_req *req) in handshake_req_hash_add() argument 71 &req->hr_rhash, in handshake_req_hash_add() 76 static void handshake_req_destroy(struct handshake_req *req) in handshake_req_destroy() argument 78 if (req->hr_proto->hp_destroy) in handshake_req_destroy() 79 req->hr_proto->hp_destroy(req); in handshake_req_destroy() 80 rhashtable_remove_fast(&handshake_rhashtbl, &req->hr_rhash, in handshake_req_destroy() 82 kfree(req); in handshake_req_destroy() 88 struct handshake_req *req; in handshake_sk_destruct() local 90 req = handshake_req_hash_lookup(sk); in handshake_sk_destruct() 91 if (!req) in handshake_sk_destruct() [all …]
|
| /linux/drivers/infiniband/hw/hfi1/ |
| H A D | user_sdma.c | 38 static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts); 41 static void user_sdma_free_request(struct user_sdma_request *req); 42 static int check_header_template(struct user_sdma_request *req, 45 static int set_txreq_header(struct user_sdma_request *req, 47 static int set_txreq_header_ahg(struct user_sdma_request *req, 288 struct user_sdma_request *req; in hfi1_user_sdma_process_request() local 295 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { in hfi1_user_sdma_process_request() 300 iovec[idx].iov_len, sizeof(info) + sizeof(req->hdr)); in hfi1_user_sdma_process_request() 350 req = pq->reqs + info.comp_idx; in hfi1_user_sdma_process_request() 351 req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */ in hfi1_user_sdma_process_request() [all …]
|
| /linux/drivers/block/drbd/ |
| H A D | drbd_req.c | 26 struct drbd_request *req; in drbd_req_new() local 28 req = mempool_alloc(&drbd_request_mempool, GFP_NOIO); in drbd_req_new() 29 if (!req) in drbd_req_new() 31 memset(req, 0, sizeof(*req)); in drbd_req_new() 33 req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) in drbd_req_new() 36 req->device = device; in drbd_req_new() 37 req->master_bio = bio_src; in drbd_req_new() 38 req->epoch = 0; in drbd_req_new() 40 drbd_clear_interval(&req->i); in drbd_req_new() 41 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new() [all …]
|
| /linux/drivers/crypto/inside-secure/ |
| H A D | safexcel_hash.c | 70 static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req) in safexcel_queued_len() argument 72 return req->len - req->processed; in safexcel_queued_len() 109 struct safexcel_ahash_req *req, in safexcel_context_control() argument 123 if (unlikely(req->digest == CONTEXT_CONTROL_DIGEST_XCM)) { in safexcel_context_control() 124 if (req->xcbcmac) in safexcel_context_control() 127 memcpy(ctx->base.ctxr->data, req->state, req->state_sz); in safexcel_context_control() 129 if (!req->finish && req->xcbcmac) in safexcel_context_control() 134 CONTEXT_CONTROL_SIZE(req->state_sz / in safexcel_context_control() 140 CONTEXT_CONTROL_SIZE(req->state_sz / in safexcel_context_control() 143 } else if (!req->processed) { in safexcel_context_control() [all …]
|
| /linux/drivers/infiniband/hw/bnxt_re/ |
| H A D | qplib_tlv.h | 48 static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size) in __get_cmdq_base_opcode() argument 50 if (HAS_TLV_HEADER(req) && size > TLV_BYTES) in __get_cmdq_base_opcode() 51 return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode; in __get_cmdq_base_opcode() 53 return req->opcode; in __get_cmdq_base_opcode() 56 static inline void __set_cmdq_base_opcode(struct cmdq_base *req, in __set_cmdq_base_opcode() argument 59 if (HAS_TLV_HEADER(req) && size > TLV_BYTES) in __set_cmdq_base_opcode() 60 ((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val; in __set_cmdq_base_opcode() 62 req->opcode = val; in __set_cmdq_base_opcode() 65 static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size) in __get_cmdq_base_cookie() argument 67 if (HAS_TLV_HEADER(req) && size > TLV_BYTES) in __get_cmdq_base_cookie() [all …]
|
| /linux/fs/nfs/ |
| H A D | pagelist.c | 37 const struct nfs_page *req; member 42 const struct nfs_page *req) in nfs_page_iter_page_init() argument 44 i->req = req; in nfs_page_iter_page_init() 50 const struct nfs_page *req = i->req; in nfs_page_iter_page_advance() local 53 i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes; in nfs_page_iter_page_advance() 58 const struct nfs_page *req = i->req; in nfs_page_iter_page_get() local 61 if (i->count != req->wb_bytes) { in nfs_page_iter_page_get() 62 size_t base = i->count + req->wb_pgbase; in nfs_page_iter_page_get() 65 page = nfs_page_to_page(req, base); in nfs_page_iter_page_get() 102 hdr->req = nfs_list_entry(mirror->pg_list.next); in nfs_pgheader_init() [all …]
|
| /linux/drivers/net/ethernet/marvell/prestera/ |
| H A D | prestera_hw.c | 781 /* structure that are part of req/resp fw messages */ in prestera_hw_build_tests() 1026 struct prestera_msg_port_info_req req = { in prestera_hw_port_info_get() local 1033 &req.cmd, sizeof(req), &resp.ret, sizeof(resp)); in prestera_hw_port_info_get() 1046 struct prestera_msg_switch_attr_req req = { in prestera_hw_switch_mac_set() local 1050 ether_addr_copy(req.param.mac, mac); in prestera_hw_switch_mac_set() 1053 &req.cmd, sizeof(req)); in prestera_hw_switch_mac_set() 1059 struct prestera_msg_common_req req; in prestera_hw_switch_init() local 1067 &req.cmd, sizeof(req), in prestera_hw_switch_init() 1094 struct prestera_msg_switch_attr_req req = { in prestera_hw_switch_ageing_set() local 1102 &req.cmd, sizeof(req)); in prestera_hw_switch_ageing_set() [all …]
|
| /linux/crypto/ |
| H A D | seqiv.c | 21 static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) in seqiv_aead_encrypt_complete2() argument 23 struct aead_request *subreq = aead_request_ctx(req); in seqiv_aead_encrypt_complete2() 32 geniv = crypto_aead_reqtfm(req); in seqiv_aead_encrypt_complete2() 33 memcpy(req->iv, subreq->iv, crypto_aead_ivsize(geniv)); in seqiv_aead_encrypt_complete2() 41 struct aead_request *req = data; in seqiv_aead_encrypt_complete() local 43 seqiv_aead_encrypt_complete2(req, err); in seqiv_aead_encrypt_complete() 44 aead_request_complete(req, err); in seqiv_aead_encrypt_complete() 47 static int seqiv_aead_encrypt(struct aead_request *req) in seqiv_aead_encrypt() argument 49 struct crypto_aead *geniv = crypto_aead_reqtfm(req); in seqiv_aead_encrypt() 51 struct aead_request *subreq = aead_request_ctx(req); in seqiv_aead_encrypt() [all …]
|
| /linux/include/trace/events/ |
| H A D | io_uring.h | 101 * @req: pointer to a submitted request 110 TP_PROTO(struct io_kiocb *req, int fd), 112 TP_ARGS(req, fd), 116 __field( void *, req ) 122 __entry->ctx = req->ctx; 123 __entry->req = req; 124 __entry->user_data = req->cqe.user_data; 128 TP_printk("ring %p, req %p, user_data 0x%llx, fd %d", 129 __entry->ctx, __entry->req, __entry->user_data, __entry->fd) 135 * @req: pointer to a submitted request [all …]
|
| /linux/net/sunrpc/ |
| H A D | backchannel_rqst.c | 56 static void xprt_free_allocation(struct rpc_rqst *req) in xprt_free_allocation() argument 60 dprintk("RPC: free allocations for req= %p\n", req); in xprt_free_allocation() 61 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); in xprt_free_allocation() 62 xbufp = &req->rq_rcv_buf; in xprt_free_allocation() 64 xbufp = &req->rq_snd_buf; in xprt_free_allocation() 66 kfree(req); in xprt_free_allocation() 94 struct rpc_rqst *req; in xprt_alloc_bc_req() local 97 req = kzalloc_obj(*req, gfp_flags); in xprt_alloc_bc_req() 98 if (req == NULL) in xprt_alloc_bc_req() 101 req->rq_xprt = xprt; in xprt_alloc_bc_req() [all …]
|
| /linux/drivers/net/ethernet/marvell/octeon_ep/ |
| H A D | octep_ctrl_net.c | 66 cmd = d->data.req.hdr.s.cmd; in octep_send_mbox_req() 130 struct octep_ctrl_net_h2f_req *req = &d.data.req; in octep_ctrl_net_get_link_status() local 133 init_send_req(&d.msg, (void *)req, state_sz, vfid); in octep_ctrl_net_get_link_status() 134 req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; in octep_ctrl_net_get_link_status() 135 req->link.cmd = OCTEP_CTRL_NET_CMD_GET; in octep_ctrl_net_get_link_status() 147 struct octep_ctrl_net_h2f_req *req = &d.data.req; in octep_ctrl_net_set_link_status() local 149 init_send_req(&d.msg, req, state_sz, vfid); in octep_ctrl_net_set_link_status() 150 req->hdr.s.cmd = OCTEP_CTRL_NET_H2F_CMD_LINK_STATUS; in octep_ctrl_net_set_link_status() 151 req->link.cmd = OCTEP_CTRL_NET_CMD_SET; in octep_ctrl_net_set_link_status() 152 req->link.state = (up) ? OCTEP_CTRL_NET_STATE_UP : in octep_ctrl_net_set_link_status() [all …]
|