1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2014, Neel Natu (neel@freebsd.org) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include "opt_bhyve_snapshot.h" 33 34 #include <sys/param.h> 35 #include <sys/errno.h> 36 #include <sys/systm.h> 37 38 #include <machine/cpufunc.h> 39 #include <machine/specialreg.h> 40 #include <machine/vmm.h> 41 42 #include "svm.h" 43 #include "vmcb.h" 44 #include "svm_softc.h" 45 #include "svm_msr.h" 46 47 #ifndef MSR_AMDK8_IPM 48 #define MSR_AMDK8_IPM 0xc0010055 49 #endif 50 51 enum { 52 IDX_MSR_LSTAR, 53 IDX_MSR_CSTAR, 54 IDX_MSR_STAR, 55 IDX_MSR_SF_MASK, 56 HOST_MSR_NUM /* must be the last enumeration */ 57 }; 58 59 static uint64_t host_msrs[HOST_MSR_NUM]; 60 61 void 62 svm_msr_init(void) 63 { 64 /* 65 * It is safe to cache the values of the following MSRs because they 66 * don't change based on curcpu, curproc or curthread. 67 */ 68 host_msrs[IDX_MSR_LSTAR] = rdmsr(MSR_LSTAR); 69 host_msrs[IDX_MSR_CSTAR] = rdmsr(MSR_CSTAR); 70 host_msrs[IDX_MSR_STAR] = rdmsr(MSR_STAR); 71 host_msrs[IDX_MSR_SF_MASK] = rdmsr(MSR_SF_MASK); 72 } 73 74 void 75 svm_msr_guest_init(struct svm_softc *sc, int vcpu) 76 { 77 /* 78 * All the MSRs accessible to the guest are either saved/restored by 79 * hardware on every #VMEXIT/VMRUN (e.g., G_PAT) or are saved/restored 80 * by VMSAVE/VMLOAD (e.g., MSR_GSBASE). 81 * 82 * There are no guest MSRs that are saved/restored "by hand" so nothing 83 * more to do here. 84 */ 85 return; 86 } 87 88 void 89 svm_msr_guest_enter(struct svm_softc *sc, int vcpu) 90 { 91 /* 92 * Save host MSRs (if any) and restore guest MSRs (if any). 93 */ 94 } 95 96 void 97 svm_msr_guest_exit(struct svm_softc *sc, int vcpu) 98 { 99 /* 100 * Save guest MSRs (if any) and restore host MSRs. 101 */ 102 wrmsr(MSR_LSTAR, host_msrs[IDX_MSR_LSTAR]); 103 wrmsr(MSR_CSTAR, host_msrs[IDX_MSR_CSTAR]); 104 wrmsr(MSR_STAR, host_msrs[IDX_MSR_STAR]); 105 wrmsr(MSR_SF_MASK, host_msrs[IDX_MSR_SF_MASK]); 106 107 /* MSR_KGSBASE will be restored on the way back to userspace */ 108 } 109 110 int 111 svm_rdmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t *result, 112 bool *retu) 113 { 114 int error = 0; 115 116 switch (num) { 117 case MSR_MCG_CAP: 118 case MSR_MCG_STATUS: 119 *result = 0; 120 break; 121 case MSR_MTRRcap: 122 case MSR_MTRRdefType: 123 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 124 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 125 case MSR_MTRR64kBase: 126 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 127 if (vm_rdmtrr(&sc->mtrr[vcpu], num, result) != 0) { 128 vm_inject_gp(sc->vm, vcpu); 129 } 130 break; 131 case MSR_SYSCFG: 132 case MSR_AMDK8_IPM: 133 case MSR_EXTFEATURES: 134 *result = 0; 135 break; 136 default: 137 error = EINVAL; 138 break; 139 } 140 141 return (error); 142 } 143 144 int 145 svm_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu) 146 { 147 int error = 0; 148 149 switch (num) { 150 case MSR_MCG_CAP: 151 case MSR_MCG_STATUS: 152 break; /* ignore writes */ 153 case MSR_MTRRcap: 154 case MSR_MTRRdefType: 155 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 156 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 157 case MSR_MTRR64kBase: 158 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: 159 if (vm_wrmtrr(&sc->mtrr[vcpu], num, val) != 0) { 160 vm_inject_gp(sc->vm, vcpu); 161 } 162 break; 163 case MSR_SYSCFG: 164 break; /* Ignore writes */ 165 case MSR_AMDK8_IPM: 166 /* 167 * Ignore writes to the "Interrupt Pending Message" MSR. 168 */ 169 break; 170 case MSR_K8_UCODE_UPDATE: 171 /* 172 * Ignore writes to microcode update register. 173 */ 174 break; 175 #ifdef BHYVE_SNAPSHOT 176 case MSR_TSC: 177 error = svm_set_tsc_offset(sc, vcpu, val - rdtsc()); 178 break; 179 #endif 180 case MSR_EXTFEATURES: 181 break; 182 default: 183 error = EINVAL; 184 break; 185 } 186 187 return (error); 188 } 189