xref: /freebsd/sys/amd64/vmm/vmm_host.c (revision ab0b9f6b3073e6c4d1dfbf07444d7db67a189a96)
1 /*-
2  * Copyright (c) 2012 NetApp, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/pcpu.h>
34 
35 #include <machine/cpufunc.h>
36 #include <machine/segments.h>
37 #include <machine/specialreg.h>
38 
39 #include "vmm_host.h"
40 
41 static uint64_t vmm_host_efer, vmm_host_pat, vmm_host_cr0, vmm_host_cr4;
42 
43 void
44 vmm_host_state_init(void)
45 {
46 
47 	vmm_host_efer = rdmsr(MSR_EFER);
48 	vmm_host_pat = rdmsr(MSR_PAT);
49 
50 	/*
51 	 * We always want CR0.TS to be set when the processor does a VM exit.
52 	 *
53 	 * With emulation turned on unconditionally after a VM exit, we are
54 	 * able to trap inadvertent use of the FPU until the guest FPU state
55 	 * has been safely squirreled away.
56 	 */
57 	vmm_host_cr0 = rcr0() | CR0_TS;
58 
59 	vmm_host_cr4 = rcr4();
60 }
61 
62 uint64_t
63 vmm_get_host_pat(void)
64 {
65 
66 	return (vmm_host_pat);
67 }
68 
69 uint64_t
70 vmm_get_host_efer(void)
71 {
72 
73 	return (vmm_host_efer);
74 }
75 
76 uint64_t
77 vmm_get_host_cr0(void)
78 {
79 
80 	return (vmm_host_cr0);
81 }
82 
83 uint64_t
84 vmm_get_host_cr4(void)
85 {
86 
87 	return (vmm_host_cr4);
88 }
89 
90 uint64_t
91 vmm_get_host_datasel(void)
92 {
93 
94 	return (GSEL(GDATA_SEL, SEL_KPL));
95 
96 }
97 
98 uint64_t
99 vmm_get_host_codesel(void)
100 {
101 
102 	return (GSEL(GCODE_SEL, SEL_KPL));
103 }
104 
105 uint64_t
106 vmm_get_host_tsssel(void)
107 {
108 
109 	return (GSEL(GPROC0_SEL, SEL_KPL));
110 }
111 
112 uint64_t
113 vmm_get_host_fsbase(void)
114 {
115 
116 	return (0);
117 }
118 
119 uint64_t
120 vmm_get_host_idtrbase(void)
121 {
122 
123 	return (r_idt.rd_base);
124 }
125