xref: /illumos-gate/usr/src/uts/intel/sys/hma.h (revision 09ea9c53cd9ac02c506f68475d98e8f07b457ffc)
1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2019 Joyent, Inc.
14  * Copyright 2024 Oxide Computer Company
15  */
16 
17 #ifndef _SYS_HMA_H
18 #define	_SYS_HMA_H
19 
20 /*
21  * Hypervisor Multiplexor API
22  *
23  * This provides a set of APIs that are usable by hypervisor implementations
24  * that allows them to coexist and to make sure that they are all in a
25  * consistent state.
26  */
27 
28 #include <sys/fp.h>
29 
30 #ifdef __cplusplus
31 extern "C" {
32 #endif
33 
34 
35 /*
36  * Register a hypervisor with HMA.  On success, a pointer to the opaque
37  * registration token will be returned, indicating that proper host setup has
38  * occurred for further hypervisor actions.
39  */
40 typedef struct hma_reg hma_reg_t;
41 extern hma_reg_t *hma_register(const char *);
42 extern hma_reg_t *hma_register_exclusive(const char *);
43 extern void hma_unregister(hma_reg_t *);
44 
45 /*
46  * Allocate or free a VPID for use with VMX.
47  *
48  * This must not be performed by a hypervisor until it has successfully
49  * registered via hma_register().
50  */
51 extern uint16_t hma_vmx_vpid_alloc(void);
52 extern void hma_vmx_vpid_free(uint16_t);
53 
54 /*
55  * On all active CPUs, perform a single-context INVEPT on the given EPTP.
56  */
57 extern void hma_vmx_invept_allcpus(uintptr_t);
58 
59 struct hma_svm_asid {
60 	uint64_t hsa_gen;
61 	uint32_t hsa_asid;
62 };
63 typedef struct hma_svm_asid hma_svm_asid_t;
64 
65 extern void hma_svm_asid_init(hma_svm_asid_t *);
66 extern uint8_t hma_svm_asid_update(hma_svm_asid_t *, boolean_t, boolean_t);
67 
68 /*
69  * Disable, enable, or query the GIF on CPUs supporting SVM.
70  */
71 extern void hma_svm_gif_disable(void);
72 extern void hma_svm_gif_enable(void);
73 extern boolean_t hma_svm_gif_is_disabled(void);
74 
75 typedef enum hma_cpc_flags {
76 	/* Guest not using CPCs */
77 	HCF_DISABLED = 0,
78 
79 	/* Base (0-3) CPCs usable by guest */
80 	HCF_EN_BASE = (1 << 0),
81 	/* Extended (4-5) CPCs usable by guest */
82 	HCF_EN_EXTD = (1 << 1),
83 } hma_cpc_flags_t;
84 
85 #define	HMA_CPC_REGS_MAX	6
86 
87 typedef struct hma_cpc {
88 	uint64_t hc_evtsel;
89 	uint64_t hc_ctr;
90 } hma_cpc_t;
91 
92 struct hma_svm_cpc_state {
93 	hma_cpc_t	hscs_regs[HMA_CPC_REGS_MAX];
94 	hma_cpc_flags_t	hscs_flags;
95 };
96 
97 typedef enum hma_svm_cpc_res {
98 	/* Base (empty) case */
99 	HSCR_EMPTY = 0,
100 
101 	/* Direct guest access to RDPMC instruction allowed */
102 	HSCR_ACCESS_RDPMC = (1 << 0),
103 	/* Direct guest access to CPC CTR MSRs allowed */
104 	HSCR_ACCESS_CTR_MSR = (1 << 1),
105 } hma_svm_cpc_res_t;
106 
107 extern hma_svm_cpc_res_t hma_svm_cpc_enter(struct hma_svm_cpc_state *);
108 extern void hma_svm_cpc_exit(struct hma_svm_cpc_state *);
109 
110 /*
111  * FPU related management. These functions provide a set of APIs to manage the
112  * FPU state and switch between host and guest management of this state.
113  */
114 
115 typedef struct hma_fpu hma_fpu_t;
116 
117 /*
118  * Allocate and free FPU state management structures.
119  */
120 extern hma_fpu_t *hma_fpu_alloc(int);
121 extern void hma_fpu_free(hma_fpu_t *);
122 
123 /*
124  * Resets the FPU to the standard x86 default state. This should be called after
125  * allocation and whenever the guest needs to logically reset the state (when
126  * the CPU is reset, etc.). If the system supports xsave, then the xbv state
127  * will be set to have the x87 and SSE portions as valid and the rest will be
128  * set to their initial states (regardless of whether or not they will be
129  * advertised in the host).
130  */
131 extern int hma_fpu_init(hma_fpu_t *);
132 
133 /*
134  * Save the current host's FPU state and restore the guest's state in the FPU.
135  * At this point, CR0.TS will not be set. The caller must not use the FPU in any
136  * way before entering the guest.
137  *
138  * This should be used in normal operation before entering the guest. It should
139  * also be used in a thread context operation when the thread is being scheduled
140  * again. This interface has an implicit assumption that a given guest state
141  * will be mapped to only one specific OS thread at any given time.
142  *
143  * This must be called with preemption disabled.
144  */
145 extern void hma_fpu_start_guest(hma_fpu_t *);
146 
147 /*
148  * Save the current guest's FPU state and restore the host's state in the FPU.
149  * By the time the thread returns to userland, the FPU will be in a usable
150  * state; however, the FPU will not be usable while inside the kernel (CR0.TS
151  * will be set).
152  *
153  * This should be used in normal operation after leaving the guest and returning
154  * to user land. It should also be used in a thread context operation when the
155  * thread is being descheduled. Like the hma_fpu_start_guest() interface, this
156  * interface has an implicit assumption that a given guest state will be mapped
157  * to only a single OS thread at any given time.
158  *
159  * This must be called with preemption disabled.
160  */
161 extern void hma_fpu_stop_guest(hma_fpu_t *);
162 
163 typedef enum {
164 	HFXR_OK = 0,
165 	HFXR_NO_SPACE,		/* buffer is not large enough */
166 	HFXR_BAD_ALIGN,		/* buffer is not properly (64-byte) aligned */
167 	HFXR_UNSUP_FMT,		/* data using unsupported (compressed) format */
168 	HFXR_UNSUP_FEAT,	/* data has unsupported features set */
169 	HFXR_INVALID_DATA,	/* CPU determined xsave data is invalid */
170 } hma_fpu_xsave_result_t;
171 
172 /*
173  * Get and set the contents of the FPU save area, formatted as XSAVE-style
174  * information.  If XSAVE is not supported by the host, the input and output
175  * values will be translated to and from the FXSAVE format.  Attempts to set
176  * XSAVE values not supported by the host will result in an error.
177  *
178  * These functions cannot be called while the FPU is in use by the guest. It is
179  * up to callers to guarantee this invariant.
180  */
181 extern hma_fpu_xsave_result_t hma_fpu_get_xsave_state(const hma_fpu_t *, void *,
182     size_t);
183 extern hma_fpu_xsave_result_t hma_fpu_set_xsave_state(hma_fpu_t *, void *,
184     size_t);
185 
186 typedef struct hma_xsave_state_desc {
187 	uint64_t	hxsd_bit;
188 	uint32_t	hxsd_size;
189 	uint32_t	hxsd_off;
190 } hma_xsave_state_desc_t;
191 
192 /*
193  * Get a description of the data fields supported by the host via the XSAVE APIs
194  * for getting/setting guest FPU data.  See the function definition for more
195  * detailed parameter usage.
196  */
197 extern uint_t hma_fpu_describe_xsave_state(hma_xsave_state_desc_t *, uint_t,
198     size_t *);
199 
200 /*
201  * Get and set the contents of the FPU save area. This sets the fxsave style
202  * information. In all cases when this is in use, if an XSAVE state is actually
203  * used by the host, then this will end up zeroing all of the non-fxsave state
204  * and it will reset the xbv to indicate that the legacy x87 and SSE portions
205  * are valid.
206  *
207  * These functions cannot be called while the FPU is in use by the guest. It is
208  * up to callers to guarantee this fact.
209  */
210 extern void hma_fpu_get_fxsave_state(const hma_fpu_t *, struct fxsave_state *);
211 extern int hma_fpu_set_fxsave_state(hma_fpu_t *, const struct fxsave_state *);
212 
213 /* Perform HMA initialization steps during boot-up. */
214 extern void hma_init(void);
215 
216 #ifdef __cplusplus
217 }
218 #endif
219 
220 #endif /* _SYS_HMA_H */
221