xref: /freebsd/sys/amd64/vmm/vmm_host.c (revision 0b3105a37d7adcadcb720112fed4dc4e8040be99)
1 /*-
2  * Copyright (c) 2012 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/pcpu.h>
34 
35 #include <machine/cpufunc.h>
36 #include <machine/segments.h>
37 #include <machine/specialreg.h>
38 
39 #include "vmm_host.h"
40 
41 static uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4,
42 	vmm_host_xcr0;
43 static struct xsave_limits vmm_xsave_limits;
44 
45 void
46 vmm_host_state_init(void)
47 {
48 	int regs[4];
49 
50 	vmm_host_efer = rdmsr(MSR_EFER);
51 	vmm_host_pat = rdmsr(MSR_PAT);
52 
53 	/*
54 	 * We always want CR0.TS to be set when the processor does a VM exit.
55 	 *
56 	 * With emulation turned on unconditionally after a VM exit, we are
57 	 * able to trap inadvertent use of the FPU until the guest FPU state
58 	 * has been safely squirreled away.
59 	 */
60 	vmm_host_cr0 = rcr0() | CR0_TS;
61 
62 	vmm_host_cr4 = rcr4();
63 
64 	/*
65 	 * Only permit a guest to use XSAVE if the host is using
66 	 * XSAVE.  Only permit a guest to use XSAVE features supported
67 	 * by the host.  This ensures that the FPU state used by the
68 	 * guest is always a subset of the saved guest FPU state.
69 	 *
70 	 * In addition, only permit known XSAVE features where the
71 	 * rules for which features depend on other features is known
72 	 * to properly emulate xsetbv.
73 	 */
74 	if (vmm_host_cr4 & CR4_XSAVE) {
75 		vmm_xsave_limits.xsave_enabled = 1;
76 		vmm_host_xcr0 = rxcr(0);
77 		vmm_xsave_limits.xcr0_allowed = vmm_host_xcr0 &
78 		    (XFEATURE_AVX | XFEATURE_MPX | XFEATURE_AVX512);
79 
80 		cpuid_count(0xd, 0x0, regs);
81 		vmm_xsave_limits.xsave_max_size = regs[1];
82 	}
83 }
84 
85 uint64_t
86 vmm_get_host_pat(void)
87 {
88 
89 	return (vmm_host_pat);
90 }
91 
92 uint64_t
93 vmm_get_host_efer(void)
94 {
95 
96 	return (vmm_host_efer);
97 }
98 
99 uint64_t
100 vmm_get_host_cr0(void)
101 {
102 
103 	return (vmm_host_cr0);
104 }
105 
106 uint64_t
107 vmm_get_host_cr4(void)
108 {
109 
110 	return (vmm_host_cr4);
111 }
112 
113 uint64_t
114 vmm_get_host_xcr0(void)
115 {
116 
117 	return (vmm_host_xcr0);
118 }
119 
120 uint64_t
121 vmm_get_host_datasel(void)
122 {
123 
124 	return (GSEL(GDATA_SEL, SEL_KPL));
125 
126 }
127 
128 uint64_t
129 vmm_get_host_codesel(void)
130 {
131 
132 	return (GSEL(GCODE_SEL, SEL_KPL));
133 }
134 
135 uint64_t
136 vmm_get_host_tsssel(void)
137 {
138 
139 	return (GSEL(GPROC0_SEL, SEL_KPL));
140 }
141 
142 uint64_t
143 vmm_get_host_fsbase(void)
144 {
145 
146 	return (0);
147 }
148 
149 uint64_t
150 vmm_get_host_idtrbase(void)
151 {
152 
153 	return (r_idt.rd_base);
154 }
155 
156 const struct xsave_limits *
157 vmm_get_xsave_limits(void)
158 {
159 
160 	return (&vmm_xsave_limits);
161 }
162