/linux/arch/x86/kvm/ |
H A D | emulate.c | 198 int (*execute)(struct x86_emulate_ctxt *ctxt); 207 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 246 static void writeback_registers(struct x86_emulate_ctxt *ctxt) in writeback_registers() argument 248 unsigned long dirty = ctxt->regs_dirty; in writeback_registers() 252 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); in writeback_registers() 255 static void invalidate_registers(struct x86_emulate_ctxt *ctxt) in invalidate_registers() argument 257 ctxt->regs_dirty = 0; in invalidate_registers() 258 ctxt->regs_valid = 0; in invalidate_registers() 291 static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop); 466 static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, in emulator_check_intercept() argument [all …]
|
H A D | kvm_emulate.h | 99 void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); 105 ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg); 112 void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val); 121 int (*read_std)(struct x86_emulate_ctxt *ctxt, 134 int (*write_std)(struct x86_emulate_ctxt *ctxt, 144 int (*fetch)(struct x86_emulate_ctxt *ctxt, 154 int (*read_emulated)(struct x86_emulate_ctxt *ctxt, 165 int (*write_emulated)(struct x86_emulate_ctxt *ctxt, 178 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt, 184 void (*invlpg)(struct x86_emulate_ctxt *ctxt, ulong addr); [all …]
|
/linux/arch/arm64/kvm/hyp/include/hyp/ |
H A D | sysreg-sr.h | 19 static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt); 21 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) in __sysreg_save_common_state() argument 23 ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1); in __sysreg_save_common_state() 26 if (ctxt_has_s1poe(ctxt)) in __sysreg_save_common_state() 27 ctxt_sys_reg(ctxt, POR_EL0) = read_sysreg_s(SYS_POR_EL0); in __sysreg_save_common_state() 30 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) in __sysreg_save_user_state() argument 32 ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0); in __sysreg_save_user_state() 33 ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); in __sysreg_save_user_state() 36 static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) in ctxt_to_vcpu() argument 38 struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; in ctxt_to_vcpu() [all …]
|
/linux/net/sunrpc/xprtrdma/ |
H A D | svc_rdma_recvfrom.c | 122 struct svc_rdma_recv_ctxt *ctxt; in svc_rdma_recv_ctxt_alloc() local 126 ctxt = kzalloc_node(sizeof(*ctxt), GFP_KERNEL, node); in svc_rdma_recv_ctxt_alloc() 127 if (!ctxt) in svc_rdma_recv_ctxt_alloc() 137 svc_rdma_recv_cid_init(rdma, &ctxt->rc_cid); in svc_rdma_recv_ctxt_alloc() 138 pcl_init(&ctxt->rc_call_pcl); in svc_rdma_recv_ctxt_alloc() 139 pcl_init(&ctxt->rc_read_pcl); in svc_rdma_recv_ctxt_alloc() 140 pcl_init(&ctxt->rc_write_pcl); in svc_rdma_recv_ctxt_alloc() 141 pcl_init(&ctxt->rc_reply_pcl); in svc_rdma_recv_ctxt_alloc() 143 ctxt->rc_recv_wr.next = NULL; in svc_rdma_recv_ctxt_alloc() 144 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc() [all …]
|
H A D | svc_rdma_sendto.c | 120 struct svc_rdma_send_ctxt *ctxt; in svc_rdma_send_ctxt_alloc() local 125 ctxt = kzalloc_node(struct_size(ctxt, sc_sges, rdma->sc_max_send_sges), in svc_rdma_send_ctxt_alloc() 127 if (!ctxt) in svc_rdma_send_ctxt_alloc() 137 svc_rdma_send_cid_init(rdma, &ctxt->sc_cid); in svc_rdma_send_ctxt_alloc() 139 ctxt->sc_rdma = rdma; in svc_rdma_send_ctxt_alloc() 140 ctxt->sc_send_wr.next = NULL; in svc_rdma_send_ctxt_alloc() 141 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc() 142 ctxt->sc_send_wr.sg_list = ctxt->sc_sges; in svc_rdma_send_ctxt_alloc() 143 ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED; in svc_rdma_send_ctxt_alloc() 144 ctxt->sc_cqe.done = svc_rdma_wc_send; in svc_rdma_send_ctxt_alloc() [all …]
|
H A D | svc_rdma_rw.c | 59 struct svc_rdma_rw_ctxt *ctxt; in svc_rdma_get_rw_ctxt() local 66 ctxt = llist_entry(node, struct svc_rdma_rw_ctxt, rw_node); in svc_rdma_get_rw_ctxt() 68 ctxt = kmalloc_node(struct_size(ctxt, rw_first_sgl, first_sgl_nents), in svc_rdma_get_rw_ctxt() 70 if (!ctxt) in svc_rdma_get_rw_ctxt() 73 INIT_LIST_HEAD(&ctxt->rw_list); in svc_rdma_get_rw_ctxt() 74 ctxt->rw_first_sgl_nents = first_sgl_nents; in svc_rdma_get_rw_ctxt() 77 ctxt->rw_sg_table.sgl = ctxt->rw_first_sgl; in svc_rdma_get_rw_ctxt() 78 if (sg_alloc_table_chained(&ctxt->rw_sg_table, sges, in svc_rdma_get_rw_ctxt() 79 ctxt->rw_sg_table.sgl, in svc_rdma_get_rw_ctxt() 82 return ctxt; in svc_rdma_get_rw_ctxt() [all …]
|
/linux/drivers/net/ethernet/intel/ice/ |
H A D | ice_vsi_vlan_lib.c | 94 struct ice_vsi_ctx *ctxt; in ice_vsi_manage_vlan_insertion() 97 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in ice_vsi_manage_vlan_insertion() 98 if (!ctxt) in ice_vsi_manage_vlan_insertion() 105 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL; in ice_vsi_manage_vlan_insertion() 108 ctxt->info.inner_vlan_flags |= (vsi->info.inner_vlan_flags & in ice_vsi_manage_vlan_insertion() 111 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID); in ice_vsi_manage_vlan_insertion() 113 err = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_manage_vlan_insertion() 120 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_vsi_manage_vlan_insertion() 122 kfree(ctxt); in ice_vsi_manage_vlan_insertion() 93 struct ice_vsi_ctx *ctxt; ice_vsi_manage_vlan_insertion() local 133 struct ice_vsi_ctx *ctxt; ice_vsi_manage_vlan_stripping() local 238 struct ice_vsi_ctx *ctxt; __ice_vsi_set_inner_port_vlan() local 291 struct ice_vsi_ctx *ctxt; ice_vsi_clear_inner_port_vlan() local 323 struct ice_vsi_ctx *ctxt; ice_cfg_vlan_pruning() local 467 struct ice_vsi_ctx *ctxt; ice_vsi_ena_outer_stripping() local 525 struct ice_vsi_ctx *ctxt; ice_vsi_dis_outer_stripping() local 576 struct ice_vsi_ctx *ctxt; ice_vsi_ena_outer_insertion() local 632 struct ice_vsi_ctx *ctxt; ice_vsi_dis_outer_insertion() local 689 struct ice_vsi_ctx *ctxt; __ice_vsi_set_outer_port_vlan() local 766 struct ice_vsi_ctx *ctxt; ice_vsi_clear_outer_port_vlan() local [all...] |
/linux/kernel/printk/ |
H A D | nbcon.c | 193 * @ctxt: Pointer to an acquire context that contains in nbcon_seq_try_update() 197 * @ctxt->seq is updated to the new value of @con::nbcon_seq (expanded to in nbcon_seq_try_update() 202 static void nbcon_seq_try_update(struct nbcon_context *ctxt, u64 new_seq) in nbcon_seq_try_update() 204 unsigned long nbcon_seq = __u64seq_to_ulseq(ctxt->seq); 205 struct console *con = ctxt->console; 209 ctxt->seq = new_seq; 211 ctxt->seq = nbcon_seq_read(con); 217 * @ctxt: The context of the caller 237 static int nbcon_context_try_acquire_direct(struct nbcon_context *ctxt, in nbcon_context_try_acquire_direct() 241 struct console *con = ctxt in nbcon_context_try_acquire_direct() 191 nbcon_seq_try_update(struct nbcon_context * ctxt,u64 new_seq) nbcon_seq_try_update() argument 226 nbcon_context_try_acquire_direct(struct nbcon_context * ctxt,struct nbcon_state * cur) nbcon_context_try_acquire_direct() argument 306 nbcon_context_try_acquire_requested(struct nbcon_context * ctxt,struct nbcon_state * cur) nbcon_context_try_acquire_requested() argument 389 nbcon_context_try_acquire_handover(struct nbcon_context * ctxt,struct nbcon_state * cur) nbcon_context_try_acquire_handover() argument 495 nbcon_context_try_acquire_hostile(struct nbcon_context * ctxt,struct nbcon_state * cur) nbcon_context_try_acquire_hostile() argument 542 nbcon_context_try_acquire(struct nbcon_context * ctxt) nbcon_context_try_acquire() argument 604 nbcon_context_release(struct nbcon_context * ctxt) nbcon_context_release() argument 657 nbcon_context_can_proceed(struct nbcon_context * ctxt,struct nbcon_state * cur) nbcon_context_can_proceed() argument 727 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_can_proceed() local 759 __nbcon_context_update_unsafe(struct nbcon_context * ctxt,bool unsafe) __nbcon_context_update_unsafe() argument 801 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_enter_unsafe() local 821 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_exit_unsafe() local 846 struct nbcon_context *ctxt = &ACCESS_PRIVATE(wctxt, ctxt); nbcon_emit_next_record() local [all...] |
/linux/fs/bcachefs/ |
H A D | move.c | 84 struct moving_context *ctxt = io->write.ctxt; in move_free() local 91 mutex_lock(&ctxt->lock); in move_free() 93 wake_up(&ctxt->wait); in move_free() 94 mutex_unlock(&ctxt->lock); in move_free() 102 struct moving_context *ctxt = io->write.ctxt; in move_write_done() local 105 ctxt->write_error = true; in move_write_done() 107 atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); in move_write_done() 108 atomic_dec(&io->write.ctxt->write_ios); in move_write_done() 110 closure_put(&ctxt->cl); in move_write_done() 129 closure_get(&io->write.ctxt->cl); in move_write() [all …]
|
H A D | movinggc.c | 105 static void move_buckets_wait(struct moving_context *ctxt, in move_buckets_wait() argument 114 move_ctxt_wait_event(ctxt, !atomic_read(&i->count)); in move_buckets_wait() 132 bch2_trans_unlock_long(ctxt->trans); in move_buckets_wait() 143 static int bch2_copygc_get_buckets(struct moving_context *ctxt, in bch2_copygc_get_buckets() argument 147 struct btree_trans *trans = ctxt->trans; in bch2_copygc_get_buckets() 153 move_buckets_wait(ctxt, buckets_in_flight, false); in bch2_copygc_get_buckets() 201 static int bch2_copygc(struct moving_context *ctxt, in bch2_copygc() argument 205 struct btree_trans *trans = ctxt->trans; in bch2_copygc() 212 u64 moved = atomic64_read(&ctxt->stats->sectors_moved); in bch2_copygc() 215 ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets); in bch2_copygc() [all …]
|
H A D | rebalance.c | 192 static int do_rebalance_extent(struct moving_context *ctxt, in do_rebalance_extent() argument 196 struct btree_trans *trans = ctxt->trans; in do_rebalance_extent() 205 ctxt->stats = &r->work_stats; in do_rebalance_extent() 219 atomic64_add(k.k->size, &ctxt->stats->sectors_seen); in do_rebalance_extent() 228 ret = bch2_move_extent(ctxt, NULL, extent_iter, k, io_opts, data_opts); in do_rebalance_extent() 232 bch2_move_ctxt_wait_for_io(ctxt); in do_rebalance_extent() 269 static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie) in do_rebalance_scan() argument 271 struct btree_trans *trans = ctxt->trans; in do_rebalance_scan() 276 ctxt->stats = &r->scan_stats; in do_rebalance_scan() 288 ret = __bch2_move_data(ctxt, in do_rebalance_scan() 317 do_rebalance(struct moving_context * ctxt) do_rebalance() argument 386 struct moving_context ctxt; bch2_rebalance_thread() local [all...] |
/linux/drivers/net/wireless/intel/iwlwifi/mvm/ |
H A D | phy-ctxt.c | 69 static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_hdr() argument 73 cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id, in iwl_mvm_phy_ctxt_cmd_hdr() 74 ctxt->color)); in iwl_mvm_phy_ctxt_cmd_hdr() 79 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_set_rxchain() argument 97 if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm, ctxt)) { in iwl_mvm_phy_ctxt_set_rxchain() 117 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_data_v1() argument 128 iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &tail->rxchain_info, in iwl_mvm_phy_ctxt_cmd_data_v1() 138 struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_ctxt_cmd_data() argument 151 iwl_mvm_phy_ctxt_set_rxchain(mvm, ctxt, &cmd->rxchain_info, in iwl_mvm_phy_ctxt_cmd_data() 155 int iwl_mvm_phy_send_rlc(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt, in iwl_mvm_phy_send_rlc() argument [all …]
|
/linux/arch/x86/power/ |
H A D | cpu.c | 39 static void msr_save_context(struct saved_context *ctxt) in msr_save_context() argument 41 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_save_context() 42 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_save_context() 51 static void msr_restore_context(struct saved_context *ctxt) in msr_restore_context() argument 53 struct saved_msr *msr = ctxt->saved_msrs.array; in msr_restore_context() 54 struct saved_msr *end = msr + ctxt->saved_msrs.num; in msr_restore_context() 79 static void __save_processor_state(struct saved_context *ctxt) in __save_processor_state() argument 89 store_idt(&ctxt->idt); in __save_processor_state() 97 ctxt->gdt_desc.size = GDT_SIZE - 1; in __save_processor_state() 98 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); in __save_processor_state() [all …]
|
/linux/arch/x86/coco/sev/ |
H A D | core.c | 306 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt, in vc_fetch_insn_kernel() argument 309 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE); in vc_fetch_insn_kernel() 312 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt) in __vc_decode_user_insn() argument 317 insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer); in __vc_decode_user_insn() 320 ctxt->fi.vector = X86_TRAP_PF; in __vc_decode_user_insn() 321 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER; in __vc_decode_user_insn() 322 ctxt->fi.cr2 = ctxt->regs->ip; in __vc_decode_user_insn() 326 ctxt->fi.vector = X86_TRAP_GP; in __vc_decode_user_insn() 327 ctxt->fi.error_code = 0; in __vc_decode_user_insn() 328 ctxt->fi.cr2 = 0; in __vc_decode_user_insn() [all …]
|
H A D | shared.c | 206 static enum es_result vc_init_em_ctxt(struct es_em_ctxt *ctxt, in vc_init_em_ctxt() argument 212 memset(ctxt, 0, sizeof(*ctxt)); in vc_init_em_ctxt() 213 ctxt->regs = regs; in vc_init_em_ctxt() 216 ret = vc_decode_insn(ctxt); in vc_init_em_ctxt() 221 static void vc_finish_insn(struct es_em_ctxt *ctxt) in vc_finish_insn() argument 223 ctxt->regs->ip += ctxt->insn.length; in vc_finish_insn() 226 static enum es_result verify_exception_info(struct ghcb *ghcb, struct es_em_ctxt *ctxt) in verify_exception_info() argument 242 ctxt->fi.vector = v; in verify_exception_info() 245 ctxt->fi.error_code = info >> 32; in verify_exception_info() 338 struct es_em_ctxt ctxt; in svsm_perform_ghcb_protocol() local [all …]
|
/linux/arch/arm64/include/asm/ |
H A D | kvm_asm.h | 305 .macro get_vcpu_ptr vcpu, ctxt 306 get_host_ctxt \ctxt, \vcpu 307 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 310 .macro get_loaded_vcpu vcpu, ctxt 311 adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu 312 ldr \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 315 .macro set_loaded_vcpu vcpu, ctxt, tmp 316 adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp 317 str \vcpu, [\ctxt, #HOST_CONTEXT_VCPU] 344 .macro save_callee_saved_regs ctxt [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
H A D | trace_ctxts.h | 25 __field(unsigned int, ctxt) 37 __entry->ctxt = uctxt->ctxt; 50 __entry->ctxt, 66 TP_PROTO(struct hfi1_devdata *dd, unsigned int ctxt, 69 TP_ARGS(dd, ctxt, subctxt, cinfo), 71 __field(unsigned int, ctxt) 80 __entry->ctxt = ctxt; 90 __entry->ctxt, 100 const char *hfi1_trace_print_rsm_hist(struct trace_seq *p, unsigned int ctxt); 102 TP_PROTO(unsigned int ctxt), [all …]
|
H A D | trace_rx.h | 29 __field(u32, ctxt) 38 __entry->ctxt = packet->rcd->ctxt; 46 "[%s] ctxt %d eflags 0x%llx etype %d,%s hlen %d tlen %d updegr %d etail %d", 48 __entry->ctxt, 62 __field(u32, ctxt) 67 __entry->ctxt = rcd->ctxt; 71 TP_printk("[%s] ctxt %d SlowPath: %d DmaRtail: %d", 73 __entry->ctxt, [all...] |
H A D | trace_tx.h | 37 TP_printk("[%s] ctxt %u(%u) extra %d", 60 TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx", 170 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt), 171 TP_ARGS(dd, ctxt, subctxt), 173 __field(u16, ctxt) 177 __entry->ctxt = ctxt; 182 __entry->ctxt, 188 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, 190 TP_ARGS(dd, ctxt, subctx [all...] |
/linux/arch/arm64/kvm/hyp/nvhe/ |
H A D | sysreg-sr.c | 21 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_save_state_nvhe() argument 23 __sysreg_save_el1_state(ctxt); in __sysreg_save_state_nvhe() 24 __sysreg_save_common_state(ctxt); in __sysreg_save_state_nvhe() 25 __sysreg_save_user_state(ctxt); in __sysreg_save_state_nvhe() 26 __sysreg_save_el2_return_state(ctxt); in __sysreg_save_state_nvhe() 29 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_restore_state_nvhe() argument 31 __sysreg_restore_el1_state(ctxt); in __sysreg_restore_state_nvhe() 32 __sysreg_restore_common_state(ctxt); in __sysreg_restore_state_nvhe() 33 __sysreg_restore_user_state(ctxt); in __sysreg_restore_state_nvhe() 34 __sysreg_restore_el2_return_state(ctxt); in __sysreg_restore_state_nvhe()
|
H A D | ffa.c | 97 static void ffa_set_retval(struct kvm_cpu_context *ctxt, in ffa_set_retval() argument 100 cpu_reg(ctxt, 0) = res->a0; in ffa_set_retval() 101 cpu_reg(ctxt, 1) = res->a1; in ffa_set_retval() 102 cpu_reg(ctxt, 2) = res->a2; in ffa_set_retval() 103 cpu_reg(ctxt, 3) = res->a3; in ffa_set_retval() 192 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_map() argument 194 DECLARE_REG(phys_addr_t, tx, ctxt, 1); in do_ffa_rxtx_map() 195 DECLARE_REG(phys_addr_t, rx, ctxt, 2); in do_ffa_rxtx_map() 196 DECLARE_REG(u32, npages, ctxt, 3); in do_ffa_rxtx_map() 271 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_unmap() argument [all …]
|
/linux/arch/arm64/kvm/hyp/vhe/ |
H A D | sysreg-sr.c | 28 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_host_state_vhe() argument 30 __sysreg_save_common_state(ctxt); in sysreg_save_host_state_vhe() 34 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_guest_state_vhe() argument 36 __sysreg_save_common_state(ctxt); in sysreg_save_guest_state_vhe() 37 __sysreg_save_el2_return_state(ctxt); in sysreg_save_guest_state_vhe() 41 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_host_state_vhe() argument 43 __sysreg_restore_common_state(ctxt); in sysreg_restore_host_state_vhe() 47 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_guest_state_vhe() argument 49 __sysreg_restore_common_state(ctxt); in sysreg_restore_guest_state_vhe() 50 __sysreg_restore_el2_return_state(ctxt); in sysreg_restore_guest_state_vhe() [all …]
|
/linux/arch/x86/xen/ |
H A D | smp_pv.c | 227 struct vcpu_guest_context *ctxt; in cpu_initialize_context() local 234 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); in cpu_initialize_context() 235 if (ctxt == NULL) { in cpu_initialize_context() 247 ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle; in cpu_initialize_context() 248 ctxt->flags = VGCF_IN_KERNEL; in cpu_initialize_context() 249 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ in cpu_initialize_context() 250 ctxt->user_regs.ds = __USER_DS; in cpu_initialize_context() 251 ctxt->user_regs.es = __USER_DS; in cpu_initialize_context() 252 ctxt->user_regs.ss = __KERNEL_DS; in cpu_initialize_context() 253 ctxt->user_regs.cs = __KERNEL_CS; in cpu_initialize_context() [all …]
|
H A D | pmu.c | 29 #define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ argument 30 (uintptr_t)ctxt->field)) 201 struct xen_pmu_intel_ctxt *ctxt; in xen_intel_pmu_emulate() local 211 ctxt = &xenpmu_data->pmu.c.intel; in xen_intel_pmu_emulate() 215 reg = &ctxt->global_ovf_ctrl; in xen_intel_pmu_emulate() 218 reg = &ctxt->global_status; in xen_intel_pmu_emulate() 221 reg = &ctxt->global_ctrl; in xen_intel_pmu_emulate() 224 reg = &ctxt->fixed_ctrl; in xen_intel_pmu_emulate() 229 fix_counters = field_offset(ctxt, fixed_counters); in xen_intel_pmu_emulate() 233 arch_cntr_pair = field_offset(ctxt, arch_counters); in xen_intel_pmu_emulate() [all …]
|
/linux/fs/nilfs2/ |
H A D | btnode.c | 189 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_prepare_change_key() argument 193 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_prepare_change_key() 199 obh = ctxt->bh; in nilfs_btnode_prepare_change_key() 200 ctxt->newbh = NULL; in nilfs_btnode_prepare_change_key() 239 ctxt->newbh = nbh; in nilfs_btnode_prepare_change_key() 252 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_commit_change_key() argument 254 struct buffer_head *obh = ctxt->bh, *nbh = ctxt->newbh; in nilfs_btnode_commit_change_key() 255 __u64 oldkey = ctxt->oldkey, newkey = ctxt->newkey; in nilfs_btnode_commit_change_key() 282 ctxt->bh = nbh; in nilfs_btnode_commit_change_key() 292 struct nilfs_btnode_chkey_ctxt *ctxt) in nilfs_btnode_abort_change_key() argument [all …]
|