Home
last modified time | relevance | path

Searched full:msr (Results 1 – 25 of 680) sorted by relevance

12345678910>>...28

/linux/tools/testing/selftests/kvm/x86/
H A Dhyperv_features.c37 static bool is_write_only_msr(uint32_t msr) in is_write_only_msr() argument
39 return msr == HV_X64_MSR_EOI; in is_write_only_msr()
42 static void guest_msr(struct msr_data *msr) in guest_msr() argument
47 GUEST_ASSERT(msr->idx); in guest_msr()
49 if (msr->write) in guest_msr()
50 vector = wrmsr_safe(msr->idx, msr->write_val); in guest_msr()
52 if (!vector && (!msr->write || !is_write_only_msr(msr->idx))) in guest_msr()
53 vector = rdmsr_safe(msr->idx, &msr_val); in guest_msr()
55 if (msr->fault_expected) in guest_msr()
58 msr->write ? "WR" : "RD", msr->idx, ex_str(vector)); in guest_msr()
[all …]
H A Dmsrs_test.c2 #include <asm/msr-index.h>
23 #define ____MSR_TEST(msr, str, val, rsvd, reset, feat, f2, is_kvm) \ argument
25 .index = msr, \
35 #define __MSR_TEST(msr, str, val, rsvd, reset, feat) \ argument
36 ____MSR_TEST(msr, str, val, rsvd, reset, feat, feat, false)
38 #define MSR_TEST_NON_ZERO(msr, val, rsvd, reset, feat) \ argument
39 __MSR_TEST(msr, #msr, val, rsvd, reset, feat)
41 #define MSR_TEST(msr, val, rsvd, feat) \ argument
42 __MSR_TEST(msr, #msr, val, rsvd, 0, feat)
44 #define MSR_TEST2(msr, val, rsvd, feat, f2) \ argument
[all …]
H A Duserspace_msr_exit_test.c24 /* Test an MSR the kernel knows about. */
31 /* Test an MSR the kernel doesn't know about. */
38 /* Test a fabricated MSR that no one knows about. */
80 static void deny_msr(uint8_t *bitmap, u32 msr) in deny_msr() argument
82 u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1); in deny_msr()
145 static noinline uint64_t test_rdmsr(uint32_t msr) in test_rdmsr() argument
152 "=a"(a), "=d"(d) : "c"(msr) : "memory"); in test_rdmsr()
161 static noinline void test_wrmsr(uint32_t msr, uint64_t value) in test_wrmsr() argument
169 "a"(a), "d"(d), "c"(msr) : "memory"); in test_wrmsr()
179 static noinline uint64_t test_em_rdmsr(uint32_t msr) in test_em_rdmsr() argument
[all …]
/linux/arch/x86/kernel/cpu/
H A Dperfctr-watchdog.c44 /* converts an msr to an appropriate reservation bit */
45 static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr) in nmi_perfctr_msr_to_bit() argument
51 if (msr >= MSR_F15H_PERF_CTR) in nmi_perfctr_msr_to_bit()
52 return (msr - MSR_F15H_PERF_CTR) >> 1; in nmi_perfctr_msr_to_bit()
53 return msr - MSR_K7_PERFCTR0; in nmi_perfctr_msr_to_bit()
56 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit()
60 return msr - MSR_P6_PERFCTR0; in nmi_perfctr_msr_to_bit()
62 return msr - MSR_KNC_PERFCTR0; in nmi_perfctr_msr_to_bit()
64 return msr - MSR_P4_BPU_PERFCTR0; in nmi_perfctr_msr_to_bit()
69 return msr - MSR_ARCH_PERFMON_PERFCTR0; in nmi_perfctr_msr_to_bit()
[all …]
H A Dumwait.c6 #include <asm/msr.h>
16 * Cache IA32_UMWAIT_CONTROL MSR. This is a systemwide control. By default,
22 * Cache the original IA32_UMWAIT_CONTROL MSR value which is configured by
28 * Serialize access to umwait_control_cached and IA32_UMWAIT_CONTROL MSR in
40 * The CPU hotplug callback sets the control MSR to the global control
63 * The CPU hotplug callback sets the control MSR to the original control
71 * the original control MSR value in umwait_init(). So there in umwait_cpu_offline()
80 * On resume, restore IA32_UMWAIT_CONTROL MSR on the boot processor which
81 * is the only active CPU at this time. The MSR is set up on the APs via the
105 * When bit 0 in IA32_UMWAIT_CONTROL MSR is 1, C0.2 is disabled.
[all …]
/linux/arch/powerpc/kvm/
H A Dbook3s_hv_tm.c19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure() local
23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure()
25 if (msr & MSR_PR) { in emulate_tx_failure()
45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation() local
74 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation()
78 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation()
84 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { in kvmhv_p9_tm_emulation()
96 if ((msr & MSR_PR) && !(vcpu->arch.fscr & FSCR_EBB)) { in kvmhv_p9_tm_emulation()
105 WARN_ON_ONCE(!(MSR_TM_SUSPENDED(msr) && in kvmhv_p9_tm_emulation()
111 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation()
[all …]
H A Dbook3s_hv_tm_builtin.c18 * (MSR[TS] = S and the fake-suspend flag is not set).
23 u64 newmsr, msr, bescr; in kvmhv_p9_tm_emulation_early() local
45 vcpu->arch.shregs.msr = newmsr; in kvmhv_p9_tm_emulation_early()
52 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early()
53 if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) in kvmhv_p9_tm_emulation_early()
57 ((msr & MSR_PR) && !(mfspr(SPRN_FSCR) & FSCR_EBB))) in kvmhv_p9_tm_emulation_early()
67 msr = (msr & ~MSR_TS_MASK) | MSR_TS_T; in kvmhv_p9_tm_emulation_early()
68 vcpu->arch.shregs.msr = msr; in kvmhv_p9_tm_emulation_early()
77 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation_early()
82 newmsr = (newmsr & ~MSR_LE) | (msr & MSR_LE); in kvmhv_p9_tm_emulation_early()
[all …]
/linux/arch/x86/include/asm/
H A Dmsr-trace.h3 #define TRACE_SYSTEM msr
6 #define TRACE_INCLUDE_FILE msr-trace
22 TP_PROTO(unsigned msr, u64 val, int failed),
23 TP_ARGS(msr, val, failed),
25 __field( unsigned, msr )
30 __entry->msr = msr;
35 __entry->msr,
41 TP_PROTO(unsigned msr, u64 val, int failed),
42 TP_ARGS(msr, val, failed)
46 TP_PROTO(unsigned msr, u64 val, int failed),
[all …]
/linux/arch/powerpc/kernel/
H A Dsignal_64.c130 unsigned long msr = regs->msr; in __unsafe_setup_sigcontext() local
144 /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg) in __unsafe_setup_sigcontext()
147 msr |= MSR_VEC; in __unsafe_setup_sigcontext()
160 * Clear the MSR VSX bit to indicate there is no valid state attached in __unsafe_setup_sigcontext()
163 msr &= ~MSR_VSX; in __unsafe_setup_sigcontext()
173 /* set MSR_VSX in the MSR value in the frame to in __unsafe_setup_sigcontext()
176 msr |= MSR_VSX; in __unsafe_setup_sigcontext()
181 unsafe_put_user(msr, &sc->gp_regs[PT_MSR], efault_out); in __unsafe_setup_sigcontext()
210 unsigned long msr) in setup_tm_sigcontexts() argument
229 BUG_ON(!MSR_TM_ACTIVE(msr)); in setup_tm_sigcontexts()
[all …]
H A Dsignal_32.c158 /* copy up to but not including MSR */ in __unsafe_restore_general_regs()
161 /* copy from orig_r3 (the word after the MSR) up to the end */ in __unsafe_restore_general_regs()
271 unsigned long msr = regs->msr; in __unsafe_save_user_regs() local
281 /* set MSR_VEC in the saved MSR value to indicate that in __unsafe_save_user_regs()
283 msr |= MSR_VEC; in __unsafe_save_user_regs()
285 /* else assert((regs->msr & MSR_VEC) == 0) */ in __unsafe_save_user_regs()
299 * Clear the MSR VSX bit to indicate there is no valid state attached in __unsafe_save_user_regs()
302 msr &= ~MSR_VSX; in __unsafe_save_user_regs()
307 * the saved MSR value to indicate that frame->mc_vregs in __unsafe_save_user_regs()
312 msr |= MSR_VSX; in __unsafe_save_user_regs()
[all …]
H A Dcpu_setup_power.c16 /* Disable CPU_FTR_HVMODE and return false if MSR:HV is not set */
19 u64 msr; in init_hvmode_206() local
21 msr = mfmsr(); in init_hvmode_206()
22 if (msr & MSR_HV) in init_hvmode_206()
153 u64 msr; in __restore_cpu_power7() local
155 msr = mfmsr(); in __restore_cpu_power7()
156 if (!(msr & MSR_HV)) in __restore_cpu_power7()
185 u64 msr; in __restore_cpu_power8() local
191 msr = mfmsr(); in __restore_cpu_power8()
192 if (!(msr & MSR_HV)) in __restore_cpu_power8()
[all …]
H A Dprocess.c92 MSR_TM_ACTIVE(tsk->thread.regs->msr) && in check_if_tm_restore_required()
95 tsk->thread.regs->msr); in check_if_tm_restore_required()
153 unsigned long msr; in __giveup_fpu() local
156 msr = tsk->thread.regs->msr; in __giveup_fpu()
157 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1); in __giveup_fpu()
159 msr &= ~MSR_VSX; in __giveup_fpu()
160 regs_set_return_msr(tsk->thread.regs, msr); in __giveup_fpu()
182 * another process could get scheduled after the regs->msr in flush_fp_to_thread()
189 if (tsk->thread.regs->msr & MSR_FP) { in flush_fp_to_thread()
213 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) { in enable_kernel_fp()
[all …]
H A Dkvm_emul.S57 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */
63 /* OR the register's (MSR_EE|MSR_RI) on MSR */
69 /* Put MSR back into magic page */
123 /* Fetch old MSR in r31 */
126 /* Find the changed bits between old and new MSR */
154 /* Put MSR into magic page because we don't call mtmsr */
203 /* Fetch old MSR in r31 */
206 /* Insert new MSR[EE] */
212 * If MSR[EE] is now set, check for a pending interrupt.
213 * We could skip this if MSR[EE] was already on, but that
[all …]
/linux/Documentation/trace/
H A Devents-msr.rst2 MSR Trace Events
5 The x86 kernel supports tracing most MSR (Model Specific Register) accesses.
11 /sys/kernel/tracing/events/msr/
13 Trace MSR reads:
17 - msr: MSR number
22 Trace MSR writes:
26 - msr: MSR number
37 cat /sys/kernel/tracing/trace | decode_msr.py /usr/src/linux/include/asm/msr-index.h
39 to add symbolic MSR names.
/linux/arch/microblaze/kernel/
H A Dprocess.c44 pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n", in show_regs()
45 regs->msr, regs->ear, regs->esr, regs->fsr); in show_regs()
72 local_save_flags(childregs->msr); in copy_thread()
73 ti->cpu_context.msr = childregs->msr & ~MSR_IE; in copy_thread()
83 childregs->msr |= MSR_UMS; in copy_thread()
87 * before enabling VM. This MSR will be restored in switch_to and in copy_thread()
91 * compose the right MSR for RETURN(). It will work for switch_to also in copy_thread()
94 * right now MSR is a copy of parent one */ in copy_thread()
95 childregs->msr &= ~MSR_EIP; in copy_thread()
96 childregs->msr |= MSR_IE; in copy_thread()
[all …]
/linux/arch/x86/kvm/svm/
H A Dpmu.c38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument
47 switch (msr) { in get_gp_pmc_amd()
55 idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2); in get_gp_pmc_amd()
56 if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL)) in get_gp_pmc_amd()
62 idx = msr - MSR_K7_EVNTSEL0; in get_gp_pmc_amd()
67 idx = msr - MSR_K7_PERFCTR0; in get_gp_pmc_amd()
93 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) in amd_msr_idx_to_pmc() argument
98 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER); in amd_msr_idx_to_pmc()
99 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL); in amd_msr_idx_to_pmc()
104 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) in amd_is_valid_msr() argument
[all …]
/linux/arch/m68k/bvme6000/
H A Dconfig.c166 unsigned char msr; in bvme6000_timer_int() local
169 msr = rtc->msr & 0xc0; in bvme6000_timer_int()
170 rtc->msr = msr | 0x20; /* Ack the interrupt */ in bvme6000_timer_int()
191 unsigned char msr = rtc->msr & 0xc0; in bvme6000_sched_init() local
193 rtc->msr = 0; /* Ensure timer registers accessible */ in bvme6000_sched_init()
203 rtc->msr = 0x40; /* Access int.cntrl, etc */ in bvme6000_sched_init()
208 rtc->msr = 0; /* Access timer 1 control */ in bvme6000_sched_init()
211 rtc->msr = msr; in bvme6000_sched_init()
233 unsigned char msr, msb; in bvme6000_read_clk() local
239 msr = rtc->msr & 0xc0; in bvme6000_read_clk()
[all …]
H A Drtc.c42 unsigned char msr; in rtc_ioctl() local
52 msr = rtc->msr & 0xc0; in rtc_ioctl()
53 rtc->msr = 0x40; in rtc_ioctl()
66 rtc->msr = msr; in rtc_ioctl()
108 msr = rtc->msr & 0xc0; in rtc_ioctl()
109 rtc->msr = 0x40; in rtc_ioctl()
123 rtc->msr = msr; in rtc_ioctl()
/linux/arch/arm64/kvm/hyp/nvhe/
H A Dhyp-init.S100 msr mair_el2, x1
112 msr tpidr_el2, x0
122 msr tpidr_el2, x1
125 msr vttbr_el2, x1
128 msr vtcr_el2, x1
135 msr ttbr0_el2, x2
138 msr tcr_el2, x0
160 msr sctlr_el2, x0
165 msr vbar_el2, x0
211 2: msr SPsel, #1 // We want to use SP_EL{1,2}
[all …]
/linux/tools/power/x86/x86_energy_perf_policy/
H A Dx86_energy_perf_policy.c121 * then we must translate between MSR format and simple ratio
688 int get_msr(int cpu, int offset, unsigned long long *msr) in get_msr() argument
694 sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", cpu); in get_msr()
698 pathname, use_android_msr_path ? "/dev/msr*" : "/dev/cpu/*/msr"); in get_msr()
701 retval = pread(fd, msr, sizeof(*msr), offset); in get_msr()
702 if (retval != sizeof(*msr)) { in get_msr()
708 fprintf(stderr, "get_msr(cpu%d, 0x%X, 0x%llX)\n", cpu, offset, *msr); in get_msr()
720 sprintf(pathname, use_android_msr_path ? "/dev/msr%d" : "/dev/cpu/%d/msr", cpu); in put_msr()
724 pathname, use_android_msr_path ? "/dev/msr*" : "/dev/cpu/*/msr"); in put_msr()
790 unsigned long long msr; in read_hwp_cap() local
[all …]
/linux/Documentation/virt/kvm/x86/
H A Dmsr.rst15 Custom MSR list
18 The current supported Custom MSR list is:
35 guaranteed to update this data at the moment of MSR write.
37 to write more than once to this MSR. Fields have the following meanings:
54 particular MSR is global.
56 Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid
144 Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid
154 This MSR falls outside the reserved KVM range and may be removed in the
157 Availability of this MSR must be checked via bit 0 in 0x4000001 cpuid
166 This MSR falls outside the reserved KVM range and may be removed in the
[all …]
/linux/tools/power/x86/turbostat/
H A Dturbostat.c564 int get_msr(int cpu, off_t offset, unsigned long long *msr);
619 unsigned long long msr = 3; in slm_bclk() local
623 if (get_msr(base_cpu, MSR_FSB_FREQ, &msr)) in slm_bclk()
626 i = msr & 0xf; in slm_bclk()
1012 .has_msr_module_c6_res_ms = 1, /* DMR has Dual-Core-Module and MC6 MSR */
1346 unsigned long long msr[NUM_RAPL_COUNTERS]; member
1364 unsigned long long msr; member
1367 …double *platform_rapl_msr_scale; /* Scale applied to values read by MSR (platform dependent, fille…
1379 .msr = MSR_PKG_ENERGY_STATUS,
1392 .msr = MSR_PKG_ENERGY_STATUS,
[all …]
/linux/arch/arm64/kernel/
H A Dhyp-stub.S51 msr vbar_el2, x1
112 msr tpidr_el2, x0
116 msr cpacr_el1, x0
118 msr vbar_el1, x0
124 msr mdcr_el2, x0
128 msr tcr_el1, x0
130 msr ttbr0_el1, x0
132 msr ttbr1_el1, x0
134 msr mair_el1, x0
139 msr REG_TCR2_EL1, x0
[all …]
/linux/arch/x86/kvm/
H A Dpmu.h29 struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
31 bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
52 * greater than zero. However, KVM only exposes and emulates the MSR in kvm_pmu_has_perf_global_ctrl()
131 /* returns general purpose PMC with the specified MSR. Note that it can be
135 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc() argument
138 if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { in get_gp_pmc()
139 u32 index = array_index_nospec(msr - base, in get_gp_pmc()
148 /* returns fixed PMC with the specified MSR */
149 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) in get_fixed_pmc() argument
153 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { in get_fixed_pmc()
[all …]
/linux/tools/power/cpupower/debug/i386/
H A Dcentrino-decode.c10 * or pass the CPU number as argument, or pass the MSR content
28 static int rdmsr(unsigned int cpu, unsigned int msr, in rdmsr() argument
41 sprintf(file, "/dev/cpu/%d/msr", cpu); in rdmsr()
47 if (lseek(fd, msr, SEEK_CUR) == -1) in rdmsr()
63 static void decode (unsigned int msr) in decode() argument
68 multiplier = ((msr >> 8) & 0xFF); in decode()
70 mv = (((msr & 0xFF) * 16) + 700); in decode()
72 printf("0x%x means multiplier %d @ %d mV\n", msr, multiplier, mv); in decode()
85 printf("or you are not root, or the msr driver is not present\n"); in decode_live()

12345678910>>...28