| /linux/tools/testing/selftests/kvm/x86/ |
| H A D | hyperv_features.c | 37 static bool is_write_only_msr(uint32_t msr) in is_write_only_msr() argument 39 return msr == HV_X64_MSR_EOI; in is_write_only_msr() 42 static void guest_msr(struct msr_data *msr) in guest_msr() argument 47 GUEST_ASSERT(msr->idx); in guest_msr() 49 if (msr->write) in guest_msr() 50 vector = wrmsr_safe(msr->idx, msr->write_val); in guest_msr() 52 if (!vector && (!msr->write || !is_write_only_msr(msr->idx))) in guest_msr() 53 vector = rdmsr_safe(msr->idx, &msr_val); in guest_msr() 55 if (msr->fault_expected) in guest_msr() 58 msr->write ? "WR" : "RD", msr->idx, ex_str(vector)); in guest_msr() [all …]
|
| H A D | msrs_test.c | 23 #define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2, is_kvm) \ argument 25 .index = msr, \ 35 #define __MSR_TEST(msr, str, val, rsvd, reset, feat) \ argument 36 ____MSR_TEST(msr, str, val, rsvd, reset, feat, feat, false) 38 #define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat) \ argument 39 __MSR_TEST(msr, #msr, val, rsvd, reset, feat) 41 #define MSR_TEST(msr, val, rsvd, feat) \ argument 42 __MSR_TEST(msr, #msr, val, rsvd, 0, feat) 44 #define MSR_TEST2(msr, val, rsvd, feat, f2) \ argument 45 ____MSR_TEST(msr, #msr, val, rsvd, 0, feat, f2, false) [all …]
|
| H A D | userspace_msr_exit_test.c | 80 static void deny_msr(uint8_t *bitmap, u32 msr) in deny_msr() argument 82 u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1); in deny_msr() 145 static noinline uint64_t test_rdmsr(uint32_t msr) in test_rdmsr() argument 152 "=a"(a), "=d"(d) : "c"(msr) : "memory"); in test_rdmsr() 161 static noinline void test_wrmsr(uint32_t msr, uint64_t value) in test_wrmsr() argument 169 "a"(a), "d"(d), "c"(msr) : "memory"); in test_wrmsr() 179 static noinline uint64_t test_em_rdmsr(uint32_t msr) in test_em_rdmsr() argument 186 "=a"(a), "=d"(d) : "c"(msr) : "memory"); in test_em_rdmsr() 195 static noinline void test_em_wrmsr(uint32_t msr, uint64_t value) in test_em_wrmsr() argument 203 "a"(a), "d"(d), "c"(msr) : "memory"); in test_em_wrmsr() [all …]
|
| /linux/arch/x86/kernel/cpu/ |
| H A D | perfctr-watchdog.c | 45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) in nmi_perfctr_msr_to_bit() argument 51 if (msr >= MSR_F15H_PERF_CTR) in nmi_perfctr_msr_to_bit() 52 return (msr - MSR_F15H_PERF_CTR) >> 1; in nmi_perfctr_msr_to_bit() 53 return msr - MSR_K7_PERFCTR0; in nmi_perfctr_msr_to_bit() 56 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit() 60 return msr - MSR_P6_PERFCTR0; in nmi_perfctr_msr_to_bit() 62 return msr - MSR_KNC_PERFCTR0; in nmi_perfctr_msr_to_bit() 64 return msr - MSR_P4_BPU_PERFCTR0; in nmi_perfctr_msr_to_bit() 69 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit() 78 static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr) in nmi_evntsel_msr_to_bit() argument [all …]
|
| /linux/arch/powerpc/kvm/ |
| H A D | book3s_hv_tm.c | 19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() local 23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure() 25 if (msr & MSR_PR) { in emulate_tx_failure() 45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() local 74 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation() 78 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation() 84 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { in kvmhv_p9_tm_emulation() 96 if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { in kvmhv_p9_tm_emulation() 105 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation() 111 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation() [all …]
|
| H A D | book3s_hv_tm_builtin.c | 23 u64 newmsr, msr, bescr; in kvmhv_p9_tm_emulation_early() local 45 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() 52 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 53 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) in kvmhv_p9_tm_emulation_early() 57 ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) in kvmhv_p9_tm_emulation_early() 67 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation_early() 68 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation_early() 77 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early() 82 newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); in kvmhv_p9_tm_emulation_early() 84 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early() [all …]
|
| H A D | book3s_hv_builtin.c | 507 void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) in kvmppc_set_msr_hv() argument 510 msr = (msr | MSR_ME) & ~MSR_HV; in kvmppc_set_msr_hv() 516 if ((msr & MSR_TS_MASK) == MSR_TS_MASK) in kvmppc_set_msr_hv() 517 msr &= ~MSR_TS_MASK; in kvmppc_set_msr_hv() 518 __kvmppc_set_msr_hv(vcpu, msr); in kvmppc_set_msr_hv() 525 unsigned long msr, pc, new_msr, new_pc; in inject_interrupt() local 527 msr = kvmppc_get_msr(vcpu); in inject_interrupt() 533 if (MSR_TM_TRANSACTIONAL(msr)) in inject_interrupt() 536 new_msr |= msr & MSR_TS_MASK; in inject_interrupt() 548 (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) { in inject_interrupt() [all …]
|
| /linux/arch/x86/include/asm/ |
| H A D | msr-trace.h | 3 #define TRACE_SYSTEM msr 6 #define TRACE_INCLUDE_FILE msr-trace 22 TP_PROTO(unsigned msr, u64 val, int failed), 23 TP_ARGS(msr, val, failed), 25 __field( unsigned, msr ) 30 __entry->msr = msr; 35 __entry->msr, 41 TP_PROTO(unsigned msr, u64 val, int failed), 42 TP_ARGS(msr, val, failed) 46 TP_PROTO(unsigned msr, u64 val, int failed), [all …]
|
| /linux/arch/m68k/bvme6000/ |
| H A D | config.c | 166 unsigned char msr; in bvme6000_timer_int() local 169 msr = rtc->msr & 0xc0; in bvme6000_timer_int() 170 rtc->msr = msr | 0x20; /* Ack the interrupt */ in bvme6000_timer_int() 191 unsigned char msr = rtc->msr & 0xc0; in bvme6000_sched_init() local 193 rtc->msr = 0; /* Ensure timer registers accessible */ in bvme6000_sched_init() 203 rtc->msr = 0x40; /* Access int.cntrl, etc */ in bvme6000_sched_init() 208 rtc->msr = 0; /* Access timer 1 control */ in bvme6000_sched_init() 211 rtc->msr = msr; in bvme6000_sched_init() 233 unsigned char msr, msb; in bvme6000_read_clk() local 239 msr = rtc->msr & 0xc0; in bvme6000_read_clk() [all …]
|
| H A D | rtc.c | 42 unsigned char msr; in rtc_ioctl() local 52 msr = rtc->msr & 0xc0; in rtc_ioctl() 53 rtc->msr = 0x40; in rtc_ioctl() 66 rtc->msr = msr; in rtc_ioctl() 108 msr = rtc->msr & 0xc0; in rtc_ioctl() 109 rtc->msr = 0x40; in rtc_ioctl() 123 rtc->msr = msr; in rtc_ioctl()
|
| /linux/arch/x86/kvm/svm/ |
| H A D | pmu.c | 38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument 47 switch (msr) { in get_gp_pmc_amd() 55 idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2); in get_gp_pmc_amd() 56 if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL)) in get_gp_pmc_amd() 62 idx = msr - MSR_K7_EVNTSEL0; in get_gp_pmc_amd() 67 idx = msr - MSR_K7_PERFCTR0; in get_gp_pmc_amd() 93 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) in amd_msr_idx_to_pmc() argument 98 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_msr_idx_to_pmc() 99 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); in amd_msr_idx_to_pmc() 104 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in amd_is_valid_msr() argument [all …]
|
| /linux/arch/microblaze/kernel/ |
| H A D | process.c | 45 regs->msr, regs->ear, regs->esr, regs->fsr); in show_regs() 72 local_save_flags(childregs->msr); in copy_thread() 73 ti->cpu_context.msr = childregs->msr & ~MSR_IE; in copy_thread() 83 childregs->msr |= MSR_UMS; in copy_thread() 95 childregs->msr &= ~MSR_EIP; in copy_thread() 96 childregs->msr |= MSR_IE; in copy_thread() 97 childregs->msr &= ~MSR_VM; in copy_thread() 98 childregs->msr |= MSR_VMS; in copy_thread() 99 childregs->msr |= MSR_EE; /* exceptions will be enabled*/ in copy_thread() 101 ti->cpu_context.msr = (childregs->msr|MSR_VM); in copy_thread() [all …]
|
| /linux/arch/powerpc/kernel/ |
| H A D | signal_64.c | 130 unsigned long msr = regs->msr; in __unsafe_setup_sigcontext() local 147 msr |= MSR_VEC; in __unsafe_setup_sigcontext() 163 msr &= ~MSR_VSX; in __unsafe_setup_sigcontext() 176 msr |= MSR_VSX; in __unsafe_setup_sigcontext() 181 unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out); in __unsafe_setup_sigcontext() 210 unsigned long msr) in setup_tm_sigcontexts() argument 229 BUG_ON(!MSR_TM_ACTIVE(msr)); in setup_tm_sigcontexts() 237 msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX); in setup_tm_sigcontexts() 251 if (msr & MSR_VEC) in setup_tm_sigcontexts() 263 msr |= MSR_VEC; in setup_tm_sigcontexts() [all …]
|
| H A D | signal_32.c | 271 unsigned long msr = regs->msr; in __unsafe_save_user_regs() local 283 msr |= MSR_VEC; in __unsafe_save_user_regs() 302 msr &= ~MSR_VSX; in __unsafe_save_user_regs() 312 msr |= MSR_VSX; in __unsafe_save_user_regs() 322 msr |= MSR_SPE; in __unsafe_save_user_regs() 331 unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed); in __unsafe_save_user_regs() 370 struct mcontext __user *tm_frame, unsigned long msr) in save_tm_user_regs_unsafe() argument 382 unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed); in save_tm_user_regs_unsafe() 388 if (msr & MSR_VEC) in save_tm_user_regs_unsafe() 400 msr |= MSR_VEC; in save_tm_user_regs_unsafe() [all …]
|
| H A D | cpu_setup_power.c | 19 u64 msr; in init_hvmode_206() local 21 msr = mfmsr(); in init_hvmode_206() 22 if (msr & MSR_HV) in init_hvmode_206() 153 u64 msr; in __restore_cpu_power7() local 155 msr = mfmsr(); in __restore_cpu_power7() 156 if (!(msr & MSR_HV)) in __restore_cpu_power7() 185 u64 msr; in __restore_cpu_power8() local 191 msr = mfmsr(); in __restore_cpu_power8() 192 if (!(msr & MSR_HV)) in __restore_cpu_power8() 225 u64 msr; in __restore_cpu_power9() local [all …]
|
| H A D | process.c | 92 MSR_TM_ACTIVE(tsk->thread.regs->msr) && in check_if_tm_restore_required() 95 tsk->thread.regs->msr); in check_if_tm_restore_required() 153 unsigned long msr; in __giveup_fpu() local 156 msr = tsk->thread.regs->msr; in __giveup_fpu() 157 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); in __giveup_fpu() 159 msr &= ~MSR_VSX; in __giveup_fpu() 160 regs_set_return_msr(tsk->thread.regs, msr); in __giveup_fpu() 189 if (tsk->thread.regs->msr & MSR_FP) { in flush_fp_to_thread() 213 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp() 223 MSR_TM_ACTIVE(current->thread.regs->msr)) in enable_kernel_fp() [all …]
|
| /linux/arch/arm64/kvm/hyp/nvhe/ |
| H A D | hyp-init.S | 100 msr mair_el2, x1 112 msr tpidr_el2, x0 122 msr tpidr_el2, x1 125 msr vttbr_el2, x1 128 msr vtcr_el2, x1 135 msr ttbr0_el2, x2 138 msr tcr_el2, x0 160 msr sctlr_el2, x0 165 msr vbar_el2, x0 211 2: msr SPsel, #1 // We want to use SP_EL{1,2} [all …]
|
| /linux/tools/power/x86/turbostat/ |
| H A D | turbostat.c | 564 int get_msr(int cpu, off_t offset, unsigned long long *msr); 619 unsigned long long msr = 3; in slm_bclk() local 623 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) in slm_bclk() 626 i = msr & 0xf; in slm_bclk() 1346 unsigned long long msr[NUM_RAPL_COUNTERS]; member 1364 unsigned long long msr; member 1379 .msr = MSR_PKG_ENERGY_STATUS, 1392 .msr = MSR_PKG_ENERGY_STATUS, 1405 .msr = MSR_PKG_ENERGY_STAT, 1418 .msr = MSR_PKG_ENERGY_STAT, [all …]
|
| /linux/arch/powerpc/kernel/ptrace/ |
| H A D | ptrace-tm.c | 34 return task->thread.ckpt_regs.msr | task->thread.fpexc_mode; in get_user_ckpt_msr() 37 static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr) in set_user_ckpt_msr() argument 39 task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE; in set_user_ckpt_msr() 40 task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE; in set_user_ckpt_msr() 63 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_active() 89 struct membuf to_msr = membuf_at(&to, offsetof(struct pt_regs, msr)); in tm_cgpr_get() 97 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_get() 144 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cgpr_set() 164 offsetof(struct pt_regs, msr) + sizeof(long)); in tm_cgpr_set() 205 if (!MSR_TM_ACTIVE(target->thread.regs->msr)) in tm_cfpr_active() [all …]
|
| /linux/tools/power/x86/x86_energy_perf_policy/ |
| H A D | x86_energy_perf_policy.c | 688 int get_msr(int cpu, int offset, unsigned long long *msr) in get_msr() argument 701 retval = pread(fd, msr, sizeof(*msr), offset); in get_msr() 702 if (retval != sizeof(*msr)) { in get_msr() 708 fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr); in get_msr() 790 unsigned long long msr; in read_hwp_cap() local 792 get_msr(cpu, msr_offset, &msr); in read_hwp_cap() 794 cap->highest = msr_perf_2_ratio(HWP_HIGHEST_PERF(msr)); in read_hwp_cap() 795 cap->guaranteed = msr_perf_2_ratio(HWP_GUARANTEED_PERF(msr)); in read_hwp_cap() 796 cap->efficient = msr_perf_2_ratio(HWP_MOSTEFFICIENT_PERF(msr)); in read_hwp_cap() 797 cap->lowest = msr_perf_2_ratio(HWP_LOWEST_PERF(msr)); in read_hwp_cap() [all …]
|
| /linux/arch/arm64/kernel/ |
| H A D | hyp-stub.S | 51 msr vbar_el2, x1 112 msr tpidr_el2, x0 116 msr cpacr_el1, x0 118 msr vbar_el1, x0 124 msr mdcr_el2, x0 128 msr tcr_el1, x0 130 msr ttbr0_el1, x0 132 msr ttbr1_el1, x0 134 msr mair_el1, x0 139 msr REG_TCR2_EL1, x0 [all …]
|
| /linux/arch/x86/kvm/vmx/ |
| H A D | pmu_intel.c | 144 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) in get_fw_gp_pmc() argument 149 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0); in get_fw_gp_pmc() 186 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in intel_is_valid_msr() argument 192 switch (msr) { in intel_is_valid_msr() 207 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) || in intel_is_valid_msr() 208 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) || in intel_is_valid_msr() 209 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) || in intel_is_valid_msr() 210 intel_pmu_is_valid_lbr_msr(vcpu, msr); in intel_is_valid_msr() 217 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) in intel_msr_idx_to_pmc() argument 222 pmc = get_fixed_pmc(pmu, msr); in intel_msr_idx_to_pmc() [all …]
|
| /linux/arch/x86/kvm/ |
| H A D | pmu.h | 29 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); 31 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); 135 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc() argument 138 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { in get_gp_pmc() 139 u32 index = array_index_nospec(msr - base, in get_gp_pmc() 149 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) in get_fixed_pmc() argument 153 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { in get_fixed_pmc() 154 u32 index = array_index_nospec(msr - base, in get_fixed_pmc() 220 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
|
| H A D | hyperv.c | 245 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr) in synic_exit() argument 251 hv_vcpu->exit.u.synic.msr = msr; in synic_exit() 260 u32 msr, u64 data, bool host) in synic_set_msr() argument 268 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); in synic_set_msr() 271 switch (msr) { in synic_set_msr() 275 synic_exit(synic, msr); in synic_set_msr() 294 synic_exit(synic, msr); in synic_set_msr() 306 synic_exit(synic, msr); in synic_set_msr() 319 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); in synic_set_msr() 340 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL) in kvm_hv_syndbg_complete_userspace() [all …]
|
| /linux/tools/power/cpupower/debug/i386/ |
| H A D | centrino-decode.c | 28 static int rdmsr(unsigned int cpu, unsigned int msr, in rdmsr() argument 47 if (lseek(fd, msr, SEEK_CUR) == -1) in rdmsr() 63 static void decode (unsigned int msr) in decode() argument 68 multiplier = ((msr >> 8) & 0xFF); in decode() 70 mv = (((msr & 0xFF) * 16) + 700); in decode() 72 printf("0x%x means multiplier %d @ %d mV\n", msr, multiplier, mv); in decode()
|