xref: /freebsd/sys/amd64/vmm/vmm_host.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 NetApp, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #include <sys/pcpu.h>
31 
32 #include <machine/cpufunc.h>
33 #include <machine/segments.h>
34 #include <machine/specialreg.h>
35 
36 #include "vmm_host.h"
37 
38 static uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4,
39 	vmm_host_xcr0;
40 static struct xsave_limits vmm_xsave_limits;
41 
42 void
vmm_host_state_init(void)43 vmm_host_state_init(void)
44 {
45 	int regs[4];
46 
47 	vmm_host_efer = rdmsr(MSR_EFER);
48 	vmm_host_pat = rdmsr(MSR_PAT);
49 
50 	/*
51 	 * We always want CR0.TS to be set when the processor does a VM exit.
52 	 *
53 	 * With emulation turned on unconditionally after a VM exit, we are
54 	 * able to trap inadvertent use of the FPU until the guest FPU state
55 	 * has been safely squirreled away.
56 	 */
57 	vmm_host_cr0 = rcr0() | CR0_TS;
58 
59 	/*
60 	 * On non-PCID or PCID but without INVPCID support machines,
61 	 * we flush kernel i.e. global TLB entries, by temporary
62 	 * clearing the CR4.PGE bit, see invltlb_glob().  If
63 	 * preemption occurs at the wrong time, cached vmm_host_cr4
64 	 * might store the value with CR4.PGE cleared.  Since FreeBSD
65 	 * requires support for PG_G on amd64, just set it
66 	 * unconditionally.
67 	 */
68 	vmm_host_cr4 = rcr4() | CR4_PGE;
69 
70 	/*
71 	 * Only permit a guest to use XSAVE if the host is using
72 	 * XSAVE.  Only permit a guest to use XSAVE features supported
73 	 * by the host.  This ensures that the FPU state used by the
74 	 * guest is always a subset of the saved guest FPU state.
75 	 *
76 	 * In addition, only permit known XSAVE features where the
77 	 * rules for which features depend on other features is known
78 	 * to properly emulate xsetbv.
79 	 */
80 	if (vmm_host_cr4 & CR4_XSAVE) {
81 		vmm_xsave_limits.xsave_enabled = 1;
82 		vmm_host_xcr0 = rxcr(0);
83 		vmm_xsave_limits.xcr0_allowed = vmm_host_xcr0 &
84 		    (XFEATURE_AVX | XFEATURE_MPX | XFEATURE_AVX512);
85 
86 		cpuid_count(0xd, 0x0, regs);
87 		vmm_xsave_limits.xsave_max_size = regs[1];
88 	}
89 }
90 
91 uint64_t
vmm_get_host_pat(void)92 vmm_get_host_pat(void)
93 {
94 
95 	return (vmm_host_pat);
96 }
97 
98 uint64_t
vmm_get_host_efer(void)99 vmm_get_host_efer(void)
100 {
101 
102 	return (vmm_host_efer);
103 }
104 
105 uint64_t
vmm_get_host_cr0(void)106 vmm_get_host_cr0(void)
107 {
108 
109 	return (vmm_host_cr0);
110 }
111 
112 uint64_t
vmm_get_host_cr4(void)113 vmm_get_host_cr4(void)
114 {
115 
116 	return (vmm_host_cr4);
117 }
118 
119 uint64_t
vmm_get_host_xcr0(void)120 vmm_get_host_xcr0(void)
121 {
122 
123 	return (vmm_host_xcr0);
124 }
125 
126 uint64_t
vmm_get_host_datasel(void)127 vmm_get_host_datasel(void)
128 {
129 
130 	return (GSEL(GDATA_SEL, SEL_KPL));
131 
132 }
133 
134 uint64_t
vmm_get_host_codesel(void)135 vmm_get_host_codesel(void)
136 {
137 
138 	return (GSEL(GCODE_SEL, SEL_KPL));
139 }
140 
141 uint64_t
vmm_get_host_tsssel(void)142 vmm_get_host_tsssel(void)
143 {
144 
145 	return (GSEL(GPROC0_SEL, SEL_KPL));
146 }
147 
148 uint64_t
vmm_get_host_fsbase(void)149 vmm_get_host_fsbase(void)
150 {
151 
152 	return (0);
153 }
154 
155 uint64_t
vmm_get_host_idtrbase(void)156 vmm_get_host_idtrbase(void)
157 {
158 
159 	return (r_idt.rd_base);
160 }
161 
162 const struct xsave_limits *
vmm_get_xsave_limits(void)163 vmm_get_xsave_limits(void)
164 {
165 
166 	return (&vmm_xsave_limits);
167 }
168