1 /*- 2 * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _SVM_SOFTC_H_ 30 #define _SVM_SOFTC_H_ 31 32 #define SVM_IO_BITMAP_SIZE (3 * PAGE_SIZE) 33 #define SVM_MSR_BITMAP_SIZE (2 * PAGE_SIZE) 34 35 struct asid { 36 uint64_t gen; /* range is [1, ~0UL] */ 37 uint32_t num; /* range is [1, nasid - 1] */ 38 }; 39 40 /* 41 * XXX separate out 'struct vmcb' from 'svm_vcpu' to avoid wasting space 42 * due to VMCB alignment requirements. 43 */ 44 struct svm_vcpu { 45 struct vmcb vmcb; /* hardware saved vcpu context */ 46 struct svm_regctx swctx; /* software saved vcpu context */ 47 uint64_t vmcb_pa; /* VMCB physical address */ 48 int lastcpu; /* host cpu that the vcpu last ran on */ 49 uint32_t dirty; /* state cache bits that must be cleared */ 50 long eptgen; /* pmap->pm_eptgen when the vcpu last ran */ 51 struct asid asid; 52 } __aligned(PAGE_SIZE); 53 54 /* 55 * SVM softc, one per virtual machine. 56 */ 57 struct svm_softc { 58 uint8_t iopm_bitmap[SVM_IO_BITMAP_SIZE]; /* shared by all vcpus */ 59 uint8_t msr_bitmap[SVM_MSR_BITMAP_SIZE]; /* shared by all vcpus */ 60 uint8_t apic_page[VM_MAXCPU][PAGE_SIZE]; 61 struct svm_vcpu vcpu[VM_MAXCPU]; 62 vm_offset_t nptp; /* nested page table */ 63 struct vm *vm; 64 } __aligned(PAGE_SIZE); 65 66 CTASSERT((offsetof(struct svm_softc, nptp) & PAGE_MASK) == 0); 67 68 static __inline struct svm_vcpu * 69 svm_get_vcpu(struct svm_softc *sc, int vcpu) 70 { 71 72 return (&(sc->vcpu[vcpu])); 73 } 74 75 static __inline struct vmcb * 76 svm_get_vmcb(struct svm_softc *sc, int vcpu) 77 { 78 79 return (&(sc->vcpu[vcpu].vmcb)); 80 } 81 82 static __inline struct vmcb_state * 83 svm_get_vmcb_state(struct svm_softc *sc, int vcpu) 84 { 85 86 return (&(sc->vcpu[vcpu].vmcb.state)); 87 } 88 89 static __inline struct vmcb_ctrl * 90 svm_get_vmcb_ctrl(struct svm_softc *sc, int vcpu) 91 { 92 93 return (&(sc->vcpu[vcpu].vmcb.ctrl)); 94 } 95 96 static __inline struct svm_regctx * 97 svm_get_guest_regctx(struct svm_softc *sc, int vcpu) 98 { 99 100 return (&(sc->vcpu[vcpu].swctx)); 101 } 102 103 static __inline void 104 svm_set_dirty(struct svm_softc *sc, int vcpu, uint32_t dirtybits) 105 { 106 struct svm_vcpu *vcpustate; 107 108 vcpustate = svm_get_vcpu(sc, vcpu); 109 110 vcpustate->dirty |= dirtybits; 111 } 112 113 #endif /* _SVM_SOFTC_H_ */ 114