/linux/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_recvfrom.c | 87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of 122 struct svc_rdma_recv_ctxt *ctxt; in svc_rdma_recv_ctxt_alloc() local 126 ctxt = kzalloc_node(sizeof(*ctxt), GFP_KERNEL, node); in svc_rdma_recv_ctxt_alloc() 127 if (!ctxt) in svc_rdma_recv_ctxt_alloc() 137 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); in svc_rdma_recv_ctxt_alloc() 138 pcl_init(&ctxt->rc_call_pcl); in svc_rdma_recv_ctxt_alloc() 139 pcl_init(&ctxt->rc_read_pcl); in svc_rdma_recv_ctxt_alloc() 140 pcl_init(&ctxt->rc_write_pcl); in svc_rdma_recv_ctxt_alloc() 141 pcl_init(&ctxt in svc_rdma_recv_ctxt_alloc() 164 svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt) svc_rdma_recv_ctxt_destroy() argument 179 struct svc_rdma_recv_ctxt *ctxt; svc_rdma_recv_ctxts_destroy() local 196 struct svc_rdma_recv_ctxt *ctxt; svc_rdma_recv_ctxt_get() local 215 svc_rdma_recv_ctxt_put(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt) svc_rdma_recv_ctxt_put() argument 243 struct svc_rdma_recv_ctxt *ctxt = vctxt; svc_rdma_release_ctxt() local 255 struct svc_rdma_recv_ctxt *ctxt; svc_rdma_refresh_recvs() local 311 struct svc_rdma_recv_ctxt *ctxt; svc_rdma_post_recvs() local 332 struct svc_rdma_recv_ctxt *ctxt; svc_rdma_wc_receive() local 385 struct svc_rdma_recv_ctxt *ctxt; svc_rdma_flush_recv_queues() local 398 svc_rdma_build_arg_xdr(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * ctxt) svc_rdma_build_arg_xdr() argument 599 svc_rdma_get_inv_rkey(struct svcxprt_rdma * rdma,struct svc_rdma_recv_ctxt * ctxt) svc_rdma_get_inv_rkey() argument 779 svc_rdma_read_complete_one(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * ctxt) svc_rdma_read_complete_one() argument 816 svc_rdma_read_complete_multiple(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * ctxt) svc_rdma_read_complete_multiple() argument 835 svc_rdma_read_complete_pzrc(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * ctxt) svc_rdma_read_complete_pzrc() argument 849 svc_rdma_read_complete(struct svc_rqst * rqstp,struct svc_rdma_recv_ctxt * ctxt) svc_rdma_read_complete() argument 921 struct svc_rdma_recv_ctxt *ctxt; svc_rdma_recvfrom() local [all...] |
H A D | svc_rdma_sendto.c | 120 struct svc_rdma_send_ctxt *ctxt; in svc_rdma_send_ctxt_alloc() local 125 ctxt = kzalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges), in svc_rdma_send_ctxt_alloc() 127 if (!ctxt) in svc_rdma_send_ctxt_alloc() 137 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); in svc_rdma_send_ctxt_alloc() 139 ctxt->sc_rdma = rdma; in svc_rdma_send_ctxt_alloc() 140 ctxt->sc_send_wr.next = NULL; in svc_rdma_send_ctxt_alloc() 141 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc() 142 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; in svc_rdma_send_ctxt_alloc() 143 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; in svc_rdma_send_ctxt_alloc() 144 ctxt->sc_cqe.done = svc_rdma_wc_send; in svc_rdma_send_ctxt_alloc() [all …]
|
H A D | svc_rdma_rw.c | 59 struct svc_rdma_rw_ctxt *ctxt; in svc_rdma_get_rw_ctxt() local 66 ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node); in svc_rdma_get_rw_ctxt() 68 ctxt = kmalloc_node(struct_size(ctxt, rw_first_sgl, first_sgl_nents), in svc_rdma_get_rw_ctxt() 70 if (!ctxt) in svc_rdma_get_rw_ctxt() 73 INIT_LIST_HEAD(&ctxt->rw_list); in svc_rdma_get_rw_ctxt() 74 ctxt->rw_first_sgl_nents = first_sgl_nents; in svc_rdma_get_rw_ctxt() 77 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; in svc_rdma_get_rw_ctxt() 78 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, in svc_rdma_get_rw_ctxt() 79 ctxt->rw_sg_table.sgl, in svc_rdma_get_rw_ctxt() 82 return ctxt; in svc_rdma_get_rw_ctxt() [all …]
|
/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_vsi_vlan_lib.c | 94 struct ice_vsi_ctx *ctxt; in ice_vsi_manage_vlan_insertion() local 97 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_manage_vlan_insertion() 98 if (!ctxt) in ice_vsi_manage_vlan_insertion() 105 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; in ice_vsi_manage_vlan_insertion() 108 ctxt->info.inner_vlan_flags |= (vsi->info.inner_vlan_flags & in ice_vsi_manage_vlan_insertion() 111 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); in ice_vsi_manage_vlan_insertion() 113 err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_manage_vlan_insertion() 120 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_vsi_manage_vlan_insertion() 122 kfree(ctxt); in ice_vsi_manage_vlan_insertion() 134 struct ice_vsi_ctx *ctxt; in ice_vsi_manage_vlan_stripping() local [all …]
|
H A D | ice_lib.c | 283 struct ice_vsi_ctx *ctxt; in ice_vsi_delete_from_hw() local 287 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_delete_from_hw() 288 if (!ctxt) in ice_vsi_delete_from_hw() 292 ctxt->vf_num = vsi->vf->vf_id; in ice_vsi_delete_from_hw() 293 ctxt->vsi_num = vsi->vsi_num; in ice_vsi_delete_from_hw() 295 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info)); in ice_vsi_delete_from_hw() 297 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL); in ice_vsi_delete_from_hw() 302 kfree(ctxt); in ice_vsi_delete_from_hw() 932 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) in ice_set_dflt_vsi_ctx() argument 936 memset(&ctxt->info, 0, sizeof(ctxt->info)); in ice_set_dflt_vsi_ctx() [all …]
|
/linux/kernel/printk/ |
H A D | nbcon.c | 202 static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq) in nbcon_seq_try_update() argument 204 unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq); in nbcon_seq_try_update() 205 struct console *con = ctxt->console; in nbcon_seq_try_update() 209 ctxt->seq = new_seq; in nbcon_seq_try_update() 211 ctxt->seq = nbcon_seq_read(con); in nbcon_seq_try_update() 237 static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt, in nbcon_context_try_acquire_direct() argument 241 struct console *con = ctxt->console; in nbcon_context_try_acquire_direct() 255 if (ctxt->prio <= cur->prio || ctxt->prio <= cur->req_prio) in nbcon_context_try_acquire_direct() 268 new.prio = ctxt->prio; in nbcon_context_try_acquire_direct() 335 static int nbcon_context_try_acquire_requested(struct nbcon_context *ctxt, in nbcon_context_try_acquire_requested() argument [all …]
|
/linux/fs/bcachefs/ |
H A D | move.c | 84 struct moving_context *ctxt = io->write.ctxt; in move_free() local 91 mutex_lock(&ctxt->lock); in move_free() 93 wake_up(&ctxt->wait); in move_free() 94 mutex_unlock(&ctxt->lock); in move_free() 102 struct moving_context *ctxt = io->write.ctxt; in move_write_done() local 105 ctxt->write_error = true; in move_write_done() 107 atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); in move_write_done() 108 atomic_dec(&io->write.ctxt->write_ios); in move_write_done() 110 closure_put(&ctxt->cl); in move_write_done() 129 closure_get(&io->write.ctxt->cl); in move_write() [all …]
|
H A D | movinggc.c | 111 static void move_buckets_wait(struct moving_context *ctxt, in move_buckets_wait() argument 120 move_ctxt_wait_event(ctxt, !atomic_read(&i->count)); in move_buckets_wait() 138 bch2_trans_unlock_long(ctxt->trans); in move_buckets_wait() 149 static int bch2_copygc_get_buckets(struct moving_context *ctxt, in bch2_copygc_get_buckets() argument 153 struct btree_trans *trans = ctxt->trans; in bch2_copygc_get_buckets() 159 move_buckets_wait(ctxt, buckets_in_flight, false); in bch2_copygc_get_buckets() 207 static int bch2_copygc(struct moving_context *ctxt, in bch2_copygc() argument 211 struct btree_trans *trans = ctxt->trans; in bch2_copygc() 218 u64 moved = atomic64_read(&ctxt->stats->sectors_moved); in bch2_copygc() 221 ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets); in bch2_copygc() [all …]
|
H A D | rebalance.c | 196 static int do_rebalance_extent(struct moving_context *ctxt, in do_rebalance_extent() argument 200 struct btree_trans *trans = ctxt->trans; in do_rebalance_extent() 209 ctxt->stats = &r->work_stats; in do_rebalance_extent() 223 atomic64_add(k.k->size, &ctxt->stats->sectors_seen); in do_rebalance_extent() 232 ret = bch2_move_extent(ctxt, NULL, extent_iter, k, io_opts, data_opts); in do_rebalance_extent() 236 bch2_move_ctxt_wait_for_io(ctxt); in do_rebalance_extent() 274 static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie) in do_rebalance_scan() argument 276 struct btree_trans *trans = ctxt->trans; in do_rebalance_scan() 281 ctxt->stats = &r->scan_stats; in do_rebalance_scan() 293 ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?: in do_rebalance_scan() [all …]
|
/linux/drivers/net/wireless/intel/iwlwifi/mvm/ |
H A D | phy-ctxt.c | 69 static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_hdr() argument 73 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, in iwl_mvm_phy_ctxt_cmd_hdr() 74 ctxt->color)); in iwl_mvm_phy_ctxt_cmd_hdr() 79 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_set_rxchain() argument 97 if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) { in iwl_mvm_phy_ctxt_set_rxchain() 117 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_data_v1() argument 128 iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info, in iwl_mvm_phy_ctxt_cmd_data_v1() 138 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_data() argument 151 iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info, in iwl_mvm_phy_ctxt_cmd_data() 155 int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_send_rlc() argument [all …]
|
/linux/arch/x86/power/ |
H A D | cpu.c | 39 static void msr_save_context(struct saved_context *ctxt) in msr_save_context() argument 41 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context() 42 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context() 51 static void msr_restore_context(struct saved_context *ctxt) in msr_restore_context() argument 53 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context() 54 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context() 79 static void __save_processor_state(struct saved_context *ctxt) in __save_processor_state() argument 89 store_idt(&ctxt->idt); in __save_processor_state() 97 ctxt->gdt_desc.size = GDT_SIZE - 1; in __save_processor_state() 98 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); in __save_processor_state() [all …]
|
/linux/arch/x86/coco/sev/ |
H A D | core.c | 282 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, in vc_fetch_insn_kernel() argument 285 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); in vc_fetch_insn_kernel() 288 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt) in __vc_decode_user_insn() argument 293 insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer); in __vc_decode_user_insn() 296 ctxt->fi.vector = X86_TRAP_PF; in __vc_decode_user_insn() 297 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; in __vc_decode_user_insn() 298 ctxt->fi.cr2 = ctxt->regs->ip; in __vc_decode_user_insn() 302 ctxt->fi.vector = X86_TRAP_GP; in __vc_decode_user_insn() 303 ctxt->fi.error_code = 0; in __vc_decode_user_insn() 304 ctxt->fi.cr2 = 0; in __vc_decode_user_insn() [all …]
|
H A D | shared.c | 206 static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, in vc_init_em_ctxt() argument 212 memset(ctxt, 0, sizeof(*ctxt)); in vc_init_em_ctxt() 213 ctxt->regs = regs; in vc_init_em_ctxt() 216 ret = vc_decode_insn(ctxt); in vc_init_em_ctxt() 221 static void vc_finish_insn(struct es_em_ctxt *ctxt) in vc_finish_insn() argument 223 ctxt->regs->ip += ctxt->insn.length; in vc_finish_insn() 226 static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt) in verify_exception_info() argument 242 ctxt->fi.vector = v; in verify_exception_info() 245 ctxt->fi.error_code = info >> 32; in verify_exception_info() 338 struct es_em_ctxt ctxt; in svsm_perform_ghcb_protocol() local [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | trace_ctxts.h | 25 __field(unsigned int, ctxt) 37 __entry->ctxt = uctxt->ctxt; 50 __entry->ctxt, 66 TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt, 69 TP_ARGS(dd, ctxt, subctxt, cinfo), 71 __field(unsigned int, ctxt) 80 __entry->ctxt = ctxt; 90 __entry->ctxt, 100 const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt); 102 TP_PROTO(unsigned int ctxt), [all …]
|
H A D | init.c | 111 if (rcd->ctxt == HFI1_CTRL_CTXT) in hfi1_create_kctxt() 179 rcd->dd->rcd[rcd->ctxt] = NULL; in hfi1_rcd_free() 229 u16 ctxt; in allocate_rcd_index() local 232 for (ctxt = 0; ctxt < dd->num_rcv_contexts; ctxt++) in allocate_rcd_index() 233 if (!dd->rcd[ctxt]) in allocate_rcd_index() 236 if (ctxt < dd->num_rcv_contexts) { in allocate_rcd_index() 237 rcd->ctxt = ctxt; in allocate_rcd_index() 238 dd->rcd[ctxt] = rcd; in allocate_rcd_index() 243 if (ctxt >= dd->num_rcv_contexts) in allocate_rcd_index() 246 *index = ctxt; in allocate_rcd_index() [all …]
|
H A D | netdev_rx.c | 59 struct hfi1_ctxtdata **ctxt) in hfi1_netdev_allocate_ctxt() argument 85 dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt); in hfi1_netdev_allocate_ctxt() 86 *ctxt = uctxt; in hfi1_netdev_allocate_ctxt() 122 struct hfi1_ctxtdata **ctxt) in hfi1_netdev_allot_ctxt() argument 127 rc = hfi1_netdev_allocate_ctxt(dd, ctxt); in hfi1_netdev_allot_ctxt() 133 rc = hfi1_netdev_setup_ctxt(rx, *ctxt); in hfi1_netdev_allot_ctxt() 136 hfi1_netdev_deallocate_ctxt(dd, *ctxt); in hfi1_netdev_allot_ctxt() 137 *ctxt = NULL; in hfi1_netdev_allot_ctxt() 213 i, rxq->rcd->ctxt); in hfi1_netdev_rxq_init() 271 rxq->rcd->ctxt); in enable_queues() [all …]
|
H A D | file_ops.c | 131 #define HFI1_MMAP_TOKEN(type, ctxt, subctxt, addr) \ argument 134 HFI1_MMAP_TOKEN_SET(CTXT, ctxt) | \ 279 trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim); in hfi1_write_iter() 307 static inline void mmap_cdbg(u16 ctxt, u8 subctxt, u8 type, u8 mapio, u8 vmf, in mmap_cdbg() argument 313 ctxt, subctxt, type, mapio, vmf, !!memdma, in mmap_cdbg() 331 u16 ctxt; in hfi1_file_mmap() local 339 ctxt = HFI1_MMAP_TOKEN_GET(CTXT, token); in hfi1_file_mmap() 342 if (ctxt != uctxt->ctxt || subctxt != fd->subctxt) { in hfi1_file_mmap() 441 mmap_cdbg(ctxt, subctxt, type, mapio, vmf, memaddr, in hfi1_file_mmap() 464 + (uctxt->ctxt * RXE_PER_CONTEXT_SIZE); in hfi1_file_mmap() [all …]
|
/linux/arch/x86/xen/ |
H A D | smp_pv.c | 227 struct vcpu_guest_context *ctxt; in cpu_initialize_context() local 234 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in cpu_initialize_context() 235 if (ctxt == NULL) { in cpu_initialize_context() 247 ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle; in cpu_initialize_context() 248 ctxt->flags = VGCF_IN_KERNEL; in cpu_initialize_context() 249 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ in cpu_initialize_context() 250 ctxt->user_regs.ds = __USER_DS; in cpu_initialize_context() 251 ctxt->user_regs.es = __USER_DS; in cpu_initialize_context() 252 ctxt->user_regs.ss = __KERNEL_DS; in cpu_initialize_context() 253 ctxt->user_regs.cs = __KERNEL_CS; in cpu_initialize_context() [all …]
|
H A D | pmu.c | 29 #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ argument 30 (uintptr_t)ctxt->field)) 201 struct xen_pmu_intel_ctxt *ctxt; in xen_intel_pmu_emulate() local 211 ctxt = &xenpmu_data->pmu.c.intel; in xen_intel_pmu_emulate() 215 reg = &ctxt->global_ovf_ctrl; in xen_intel_pmu_emulate() 218 reg = &ctxt->global_status; in xen_intel_pmu_emulate() 221 reg = &ctxt->global_ctrl; in xen_intel_pmu_emulate() 224 reg = &ctxt->fixed_ctrl; in xen_intel_pmu_emulate() 229 fix_counters = field_offset(ctxt, fixed_counters); in xen_intel_pmu_emulate() 233 arch_cntr_pair = field_offset(ctxt, arch_counters); in xen_intel_pmu_emulate() [all …]
|
/linux/arch/arm64/kvm/hyp/nvhe/ |
H A D | ffa.c | 97 static void ffa_set_retval(struct kvm_cpu_context *ctxt, in ffa_set_retval() argument 100 cpu_reg(ctxt, 0) = res->a0; in ffa_set_retval() 101 cpu_reg(ctxt, 1) = res->a1; in ffa_set_retval() 102 cpu_reg(ctxt, 2) = res->a2; in ffa_set_retval() 103 cpu_reg(ctxt, 3) = res->a3; in ffa_set_retval() 192 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_map() argument 194 DECLARE_REG(phys_addr_t, tx, ctxt, 1); in do_ffa_rxtx_map() 195 DECLARE_REG(phys_addr_t, rx, ctxt, 2); in do_ffa_rxtx_map() 196 DECLARE_REG(u32, npages, ctxt, 3); in do_ffa_rxtx_map() 271 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_unmap() argument [all …]
|
/linux/fs/nilfs2/ |
H A D | btnode.c | 209 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_prepare_change_key() argument 213 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_prepare_change_key() 219 obh = ctxt->bh; in nilfs_btnode_prepare_change_key() 220 ctxt->newbh = NULL; in nilfs_btnode_prepare_change_key() 259 ctxt->newbh = nbh; in nilfs_btnode_prepare_change_key() 285 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_commit_change_key() argument 287 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; in nilfs_btnode_commit_change_key() 288 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_commit_change_key() 315 ctxt->bh = nbh; in nilfs_btnode_commit_change_key() 336 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_abort_change_key() argument [all …]
|
/linux/arch/arm64/include/asm/ |
H A D | kvm_ptrauth.h | 105 #define __ptrauth_save_key(ctxt, key) \ argument 109 ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \ 111 ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \ 114 #define ptrauth_save_keys(ctxt) \ argument 116 __ptrauth_save_key(ctxt, APIA); \ 117 __ptrauth_save_key(ctxt, APIB); \ 118 __ptrauth_save_key(ctxt, APDA); \ 119 __ptrauth_save_key(ctxt, APDB); \ 120 __ptrauth_save_key(ctxt, APGA); \
|
/linux/arch/x86/boot/compressed/ |
H A D | sev.c | 77 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt) in vc_decode_insn() argument 82 memcpy(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); in vc_decode_insn() 84 ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64); in vc_decode_insn() 91 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt, in vc_write_mem() argument 99 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt, in vc_read_mem() argument 107 static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size) in vc_ioio_check() argument 338 struct es_em_ctxt ctxt; in do_boot_stage2_vc() local 345 result = vc_init_em_ctxt(&ctxt, regs, exit_code); in do_boot_stage2_vc() 349 result = vc_check_opcode_bytes(&ctxt, exit_code); in do_boot_stage2_vc() 356 result = vc_handle_rdtsc(boot_ghcb, &ctxt, exit_code); in do_boot_stage2_vc() [all …]
|
/linux/fs/ocfs2/ |
H A D | xattr.c | 260 struct ocfs2_xattr_set_ctxt *ctxt); 265 struct ocfs2_xattr_set_ctxt *ctxt); 698 struct ocfs2_xattr_set_ctxt *ctxt) in ocfs2_xattr_extend_allocation() argument 701 handle_t *handle = ctxt->handle; in ocfs2_xattr_extend_allocation() 724 ctxt->data_ac, in ocfs2_xattr_extend_allocation() 725 ctxt->meta_ac, in ocfs2_xattr_extend_allocation() 763 struct ocfs2_xattr_set_ctxt *ctxt) in __ocfs2_remove_xattr_range() argument 767 handle_t *handle = ctxt->handle; in __ocfs2_remove_xattr_range() 779 ret = ocfs2_remove_extent(handle, &et, cpos, len, ctxt->meta_ac, in __ocfs2_remove_xattr_range() 780 &ctxt->dealloc); in __ocfs2_remove_xattr_range() [all …]
|
/linux/drivers/scsi/be2iscsi/ |
H A D | be_cmds.c | 784 void *ctxt = &req->context; in beiscsi_cmd_cq_create() local 798 ctxt, coalesce_wm); in beiscsi_cmd_cq_create() 799 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); in beiscsi_cmd_cq_create() 800 AMAP_SET_BITS(struct amap_cq_context, count, ctxt, in beiscsi_cmd_cq_create() 802 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1); in beiscsi_cmd_cq_create() 803 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts); in beiscsi_cmd_cq_create() 804 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1); in beiscsi_cmd_cq_create() 805 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id); in beiscsi_cmd_cq_create() 806 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); in beiscsi_cmd_cq_create() 807 AMAP_SET_BITS(struct amap_cq_context, func, ctxt, in beiscsi_cmd_cq_create() [all …]
|