1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vMTRR implementation 4 * 5 * Copyright (C) 2006 Qumranet, Inc. 6 * Copyright 2010 Red Hat, Inc. and/or its affiliates. 7 * Copyright(C) 2015 Intel Corporation. 8 * 9 * Authors: 10 * Yaniv Kamay <yaniv@qumranet.com> 11 * Avi Kivity <avi@qumranet.com> 12 * Marcelo Tosatti <mtosatti@redhat.com> 13 * Paolo Bonzini <pbonzini@redhat.com> 14 * Xiao Guangrong <guangrong.xiao@linux.intel.com> 15 */ 16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 17 18 #include <linux/kvm_host.h> 19 #include <asm/mtrr.h> 20 21 #include "cpuid.h" 22 23 static u64 *find_mtrr(struct kvm_vcpu *vcpu, unsigned int msr) 24 { 25 int index; 26 27 switch (msr) { 28 case MTRRphysBase_MSR(0) ... MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1): 29 index = msr - MTRRphysBase_MSR(0); 30 return &vcpu->arch.mtrr_state.var[index]; 31 case MSR_MTRRfix64K_00000: 32 return &vcpu->arch.mtrr_state.fixed_64k; 33 case MSR_MTRRfix16K_80000: 34 case MSR_MTRRfix16K_A0000: 35 index = msr - MSR_MTRRfix16K_80000; 36 return &vcpu->arch.mtrr_state.fixed_16k[index]; 37 case MSR_MTRRfix4K_C0000: 38 case MSR_MTRRfix4K_C8000: 39 case MSR_MTRRfix4K_D0000: 40 case MSR_MTRRfix4K_D8000: 41 case MSR_MTRRfix4K_E0000: 42 case MSR_MTRRfix4K_E8000: 43 case MSR_MTRRfix4K_F0000: 44 case MSR_MTRRfix4K_F8000: 45 index = msr - MSR_MTRRfix4K_C0000; 46 return &vcpu->arch.mtrr_state.fixed_4k[index]; 47 case MSR_MTRRdefType: 48 return &vcpu->arch.mtrr_state.deftype; 49 default: 50 break; 51 } 52 return NULL; 53 } 54 55 static bool valid_mtrr_type(unsigned t) 56 { 57 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ 58 } 59 60 static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) 61 { 62 int i; 63 u64 mask; 64 65 if (msr == MSR_MTRRdefType) { 66 if (data & ~0xcff) 67 return false; 68 return valid_mtrr_type(data & 0xff); 69 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { 70 for (i = 0; i < 8 ; i++) 71 if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) 72 return false; 73 return true; 74 } 75 76 /* variable MTRRs */ 77 if (WARN_ON_ONCE(!(msr >= MTRRphysBase_MSR(0) && 78 msr <= MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1)))) 79 return false; 80 81 mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu); 82 if ((msr & 1) == 0) { 83 /* MTRR base */ 84 if (!valid_mtrr_type(data & 0xff)) 85 return false; 86 mask |= 0xf00; 87 } else { 88 /* MTRR mask */ 89 mask |= 0x7ff; 90 } 91 92 return (data & mask) == 0; 93 } 94 95 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) 96 { 97 u64 *mtrr; 98 99 mtrr = find_mtrr(vcpu, msr); 100 if (!mtrr) 101 return 1; 102 103 if (!kvm_mtrr_valid(vcpu, msr, data)) 104 return 1; 105 106 *mtrr = data; 107 return 0; 108 } 109 110 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) 111 { 112 u64 *mtrr; 113 114 /* MSR_MTRRcap is a readonly MSR. */ 115 if (msr == MSR_MTRRcap) { 116 /* 117 * SMRR = 0 118 * WC = 1 119 * FIX = 1 120 * VCNT = KVM_NR_VAR_MTRR 121 */ 122 *pdata = 0x500 | KVM_NR_VAR_MTRR; 123 return 0; 124 } 125 126 mtrr = find_mtrr(vcpu, msr); 127 if (!mtrr) 128 return 1; 129 130 *pdata = *mtrr; 131 return 0; 132 } 133