xref: /freebsd/sys/arm64/vmm/vmm_hyp.c (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Andrew Turner
5  *
6  * This work was supported by Innovate UK project 105694, "Digital Security
7  * by Design (DSbD) Technology Platform Prototype".
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 #include <sys/types.h>
33 #include <sys/proc.h>
34 
35 #include <machine/armreg.h>
36 
37 #include "arm64.h"
38 #include "hyp.h"
39 
40 struct hypctx;
41 
42 uint64_t vmm_hyp_enter(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t,
43     uint64_t, uint64_t, uint64_t);
44 uint64_t vmm_enter_guest(struct hypctx *);
45 
46 static void
47 vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
48 {
49 	uint64_t dfr0;
50 
51 	/* Store the guest VFP registers */
52 	if (guest) {
53 		/* Store the timer registers */
54 		hypctx->vtimer_cpu.cntkctl_el1 = READ_SPECIALREG(cntkctl_el1);
55 		hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 =
56 		    READ_SPECIALREG(cntv_cval_el0);
57 		hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 =
58 		    READ_SPECIALREG(cntv_ctl_el0);
59 
60 		/* Store the GICv3 registers */
61 		hypctx->vgic_v3_regs.ich_eisr_el2 =
62 		    READ_SPECIALREG(ich_eisr_el2);
63 		hypctx->vgic_v3_regs.ich_elrsr_el2 =
64 		    READ_SPECIALREG(ich_elrsr_el2);
65 		hypctx->vgic_v3_regs.ich_hcr_el2 =
66 		    READ_SPECIALREG(ich_hcr_el2);
67 		hypctx->vgic_v3_regs.ich_misr_el2 =
68 		    READ_SPECIALREG(ich_misr_el2);
69 		hypctx->vgic_v3_regs.ich_vmcr_el2 =
70 		    READ_SPECIALREG(ich_vmcr_el2);
71 		switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
72 #define	STORE_LR(x)					\
73 	case x:						\
74 		hypctx->vgic_v3_regs.ich_lr_el2[x] =	\
75 		    READ_SPECIALREG(ich_lr ## x ##_el2)
76 		STORE_LR(15);
77 		STORE_LR(14);
78 		STORE_LR(13);
79 		STORE_LR(12);
80 		STORE_LR(11);
81 		STORE_LR(10);
82 		STORE_LR(9);
83 		STORE_LR(8);
84 		STORE_LR(7);
85 		STORE_LR(6);
86 		STORE_LR(5);
87 		STORE_LR(4);
88 		STORE_LR(3);
89 		STORE_LR(2);
90 		STORE_LR(1);
91 		default:
92 		STORE_LR(0);
93 #undef STORE_LR
94 		}
95 
96 		switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
97 #define	STORE_APR(x)						\
98 	case x:							\
99 		hypctx->vgic_v3_regs.ich_ap0r_el2[x] =		\
100 		    READ_SPECIALREG(ich_ap0r ## x ##_el2);	\
101 		hypctx->vgic_v3_regs.ich_ap1r_el2[x] =		\
102 		    READ_SPECIALREG(ich_ap1r ## x ##_el2)
103 		STORE_APR(3);
104 		STORE_APR(2);
105 		STORE_APR(1);
106 		default:
107 		STORE_APR(0);
108 #undef STORE_APR
109 		}
110 	}
111 
112 	dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
113 	switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
114 #define	STORE_DBG_BRP(x)						\
115 	case x:								\
116 		hypctx->dbgbcr_el1[x] =					\
117 		    READ_SPECIALREG(dbgbcr ## x ## _el1);		\
118 		hypctx->dbgbvr_el1[x] =					\
119 		    READ_SPECIALREG(dbgbvr ## x ## _el1)
120 	STORE_DBG_BRP(15);
121 	STORE_DBG_BRP(14);
122 	STORE_DBG_BRP(13);
123 	STORE_DBG_BRP(12);
124 	STORE_DBG_BRP(11);
125 	STORE_DBG_BRP(10);
126 	STORE_DBG_BRP(9);
127 	STORE_DBG_BRP(8);
128 	STORE_DBG_BRP(7);
129 	STORE_DBG_BRP(6);
130 	STORE_DBG_BRP(5);
131 	STORE_DBG_BRP(4);
132 	STORE_DBG_BRP(3);
133 	STORE_DBG_BRP(2);
134 	STORE_DBG_BRP(1);
135 	default:
136 	STORE_DBG_BRP(0);
137 #undef STORE_DBG_BRP
138 	}
139 
140 	switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
141 #define	STORE_DBG_WRP(x)						\
142 	case x:								\
143 		hypctx->dbgwcr_el1[x] =					\
144 		    READ_SPECIALREG(dbgwcr ## x ## _el1);		\
145 		hypctx->dbgwvr_el1[x] =					\
146 		    READ_SPECIALREG(dbgwvr ## x ## _el1)
147 	STORE_DBG_WRP(15);
148 	STORE_DBG_WRP(14);
149 	STORE_DBG_WRP(13);
150 	STORE_DBG_WRP(12);
151 	STORE_DBG_WRP(11);
152 	STORE_DBG_WRP(10);
153 	STORE_DBG_WRP(9);
154 	STORE_DBG_WRP(8);
155 	STORE_DBG_WRP(7);
156 	STORE_DBG_WRP(6);
157 	STORE_DBG_WRP(5);
158 	STORE_DBG_WRP(4);
159 	STORE_DBG_WRP(3);
160 	STORE_DBG_WRP(2);
161 	STORE_DBG_WRP(1);
162 	default:
163 	STORE_DBG_WRP(0);
164 #undef STORE_DBG_WRP
165 	}
166 
167 	/* Store the PMU registers */
168 	hypctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0);
169 	hypctx->pmccntr_el0 = READ_SPECIALREG(pmccntr_el0);
170 	hypctx->pmccfiltr_el0 = READ_SPECIALREG(pmccfiltr_el0);
171 	hypctx->pmcntenset_el0 = READ_SPECIALREG(pmcntenset_el0);
172 	hypctx->pmintenset_el1 = READ_SPECIALREG(pmintenset_el1);
173 	hypctx->pmovsset_el0 = READ_SPECIALREG(pmovsset_el0);
174 	hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
175 	switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
176 #define	STORE_PMU(x)							\
177 	case (x + 1):							\
178 		hypctx->pmevcntr_el0[x] =				\
179 		    READ_SPECIALREG(pmevcntr ## x ## _el0);		\
180 		hypctx->pmevtyper_el0[x] =				\
181 		    READ_SPECIALREG(pmevtyper ## x ## _el0)
182 	STORE_PMU(30);
183 	STORE_PMU(29);
184 	STORE_PMU(28);
185 	STORE_PMU(27);
186 	STORE_PMU(26);
187 	STORE_PMU(25);
188 	STORE_PMU(24);
189 	STORE_PMU(23);
190 	STORE_PMU(22);
191 	STORE_PMU(21);
192 	STORE_PMU(20);
193 	STORE_PMU(19);
194 	STORE_PMU(18);
195 	STORE_PMU(17);
196 	STORE_PMU(16);
197 	STORE_PMU(15);
198 	STORE_PMU(14);
199 	STORE_PMU(13);
200 	STORE_PMU(12);
201 	STORE_PMU(11);
202 	STORE_PMU(10);
203 	STORE_PMU(9);
204 	STORE_PMU(8);
205 	STORE_PMU(7);
206 	STORE_PMU(6);
207 	STORE_PMU(5);
208 	STORE_PMU(4);
209 	STORE_PMU(3);
210 	STORE_PMU(2);
211 	STORE_PMU(1);
212 	STORE_PMU(0);
213 	default:		/* N == 0 when only PMCCNTR_EL0 is available */
214 		break;
215 #undef STORE_PMU
216 	}
217 
218 	/* Store the special to from the trapframe */
219 	hypctx->tf.tf_sp = READ_SPECIALREG(sp_el1);
220 	hypctx->tf.tf_elr = READ_SPECIALREG(elr_el2);
221 	hypctx->tf.tf_spsr = READ_SPECIALREG(spsr_el2);
222 	if (guest) {
223 		hypctx->tf.tf_esr = READ_SPECIALREG(esr_el2);
224 	}
225 
226 	/* Store the guest special registers */
227 	hypctx->elr_el1 = READ_SPECIALREG(elr_el1);
228 	hypctx->sp_el0 = READ_SPECIALREG(sp_el0);
229 	hypctx->tpidr_el0 = READ_SPECIALREG(tpidr_el0);
230 	hypctx->tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
231 	hypctx->tpidr_el1 = READ_SPECIALREG(tpidr_el1);
232 	hypctx->vbar_el1 = READ_SPECIALREG(vbar_el1);
233 
234 	hypctx->actlr_el1 = READ_SPECIALREG(actlr_el1);
235 	hypctx->afsr0_el1 = READ_SPECIALREG(afsr0_el1);
236 	hypctx->afsr1_el1 = READ_SPECIALREG(afsr1_el1);
237 	hypctx->amair_el1 = READ_SPECIALREG(amair_el1);
238 	hypctx->contextidr_el1 = READ_SPECIALREG(contextidr_el1);
239 	hypctx->cpacr_el1 = READ_SPECIALREG(cpacr_el1);
240 	hypctx->csselr_el1 = READ_SPECIALREG(csselr_el1);
241 	hypctx->esr_el1 = READ_SPECIALREG(esr_el1);
242 	hypctx->far_el1 = READ_SPECIALREG(far_el1);
243 	hypctx->mair_el1 = READ_SPECIALREG(mair_el1);
244 	hypctx->mdccint_el1 = READ_SPECIALREG(mdccint_el1);
245 	hypctx->mdscr_el1 = READ_SPECIALREG(mdscr_el1);
246 	hypctx->par_el1 = READ_SPECIALREG(par_el1);
247 	hypctx->sctlr_el1 = READ_SPECIALREG(sctlr_el1);
248 	hypctx->spsr_el1 = READ_SPECIALREG(spsr_el1);
249 	hypctx->tcr_el1 = READ_SPECIALREG(tcr_el1);
250 	/* TODO: Support when this is not res0 */
251 	hypctx->tcr2_el1 = 0;
252 	hypctx->ttbr0_el1 = READ_SPECIALREG(ttbr0_el1);
253 	hypctx->ttbr1_el1 = READ_SPECIALREG(ttbr1_el1);
254 
255 	hypctx->cptr_el2 = READ_SPECIALREG(cptr_el2);
256 	hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2);
257 	hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2);
258 	hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2);
259 }
260 
261 static void
262 vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
263 {
264 	uint64_t dfr0;
265 
266 	/* Restore the special registers */
267 	WRITE_SPECIALREG(elr_el1, hypctx->elr_el1);
268 	WRITE_SPECIALREG(sp_el0, hypctx->sp_el0);
269 	WRITE_SPECIALREG(tpidr_el0, hypctx->tpidr_el0);
270 	WRITE_SPECIALREG(tpidrro_el0, hypctx->tpidrro_el0);
271 	WRITE_SPECIALREG(tpidr_el1, hypctx->tpidr_el1);
272 	WRITE_SPECIALREG(vbar_el1, hypctx->vbar_el1);
273 
274 	WRITE_SPECIALREG(actlr_el1, hypctx->actlr_el1);
275 	WRITE_SPECIALREG(afsr0_el1, hypctx->afsr0_el1);
276 	WRITE_SPECIALREG(afsr1_el1, hypctx->afsr1_el1);
277 	WRITE_SPECIALREG(amair_el1, hypctx->amair_el1);
278 	WRITE_SPECIALREG(contextidr_el1, hypctx->contextidr_el1);
279 	WRITE_SPECIALREG(cpacr_el1, hypctx->cpacr_el1);
280 	WRITE_SPECIALREG(csselr_el1, hypctx->csselr_el1);
281 	WRITE_SPECIALREG(esr_el1, hypctx->esr_el1);
282 	WRITE_SPECIALREG(far_el1, hypctx->far_el1);
283 	WRITE_SPECIALREG(mdccint_el1, hypctx->mdccint_el1);
284 	WRITE_SPECIALREG(mdscr_el1, hypctx->mdscr_el1);
285 	WRITE_SPECIALREG(mair_el1, hypctx->mair_el1);
286 	WRITE_SPECIALREG(par_el1, hypctx->par_el1);
287 	WRITE_SPECIALREG(sctlr_el1, hypctx->sctlr_el1);
288 	WRITE_SPECIALREG(tcr_el1, hypctx->tcr_el1);
289 	/* TODO: tcr2_el1 */
290 	WRITE_SPECIALREG(ttbr0_el1, hypctx->ttbr0_el1);
291 	WRITE_SPECIALREG(ttbr1_el1, hypctx->ttbr1_el1);
292 	WRITE_SPECIALREG(spsr_el1, hypctx->spsr_el1);
293 
294 	WRITE_SPECIALREG(cptr_el2, hypctx->cptr_el2);
295 	WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
296 	WRITE_SPECIALREG(vpidr_el2, hypctx->vpidr_el2);
297 	WRITE_SPECIALREG(vmpidr_el2, hypctx->vmpidr_el2);
298 
299 	/* Load the special regs from the trapframe */
300 	WRITE_SPECIALREG(sp_el1, hypctx->tf.tf_sp);
301 	WRITE_SPECIALREG(elr_el2, hypctx->tf.tf_elr);
302 	WRITE_SPECIALREG(spsr_el2, hypctx->tf.tf_spsr);
303 
304 	/* Restore the PMU registers */
305 	WRITE_SPECIALREG(pmcr_el0, hypctx->pmcr_el0);
306 	WRITE_SPECIALREG(pmccntr_el0, hypctx->pmccntr_el0);
307 	WRITE_SPECIALREG(pmccfiltr_el0, hypctx->pmccfiltr_el0);
308 	/* Clear all events/interrupts then enable them */
309 	WRITE_SPECIALREG(pmcntenclr_el0, 0xfffffffful);
310 	WRITE_SPECIALREG(pmcntenset_el0, hypctx->pmcntenset_el0);
311 	WRITE_SPECIALREG(pmintenclr_el1, 0xfffffffful);
312 	WRITE_SPECIALREG(pmintenset_el1, hypctx->pmintenset_el1);
313 	WRITE_SPECIALREG(pmovsclr_el0, 0xfffffffful);
314 	WRITE_SPECIALREG(pmovsset_el0, hypctx->pmovsset_el0);
315 
316 	switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
317 #define	LOAD_PMU(x)							\
318 	case (x + 1):							\
319 		WRITE_SPECIALREG(pmevcntr ## x ## _el0,			\
320 		    hypctx->pmevcntr_el0[x]);				\
321 		WRITE_SPECIALREG(pmevtyper ## x ## _el0,		\
322 		    hypctx->pmevtyper_el0[x])
323 	LOAD_PMU(30);
324 	LOAD_PMU(29);
325 	LOAD_PMU(28);
326 	LOAD_PMU(27);
327 	LOAD_PMU(26);
328 	LOAD_PMU(25);
329 	LOAD_PMU(24);
330 	LOAD_PMU(23);
331 	LOAD_PMU(22);
332 	LOAD_PMU(21);
333 	LOAD_PMU(20);
334 	LOAD_PMU(19);
335 	LOAD_PMU(18);
336 	LOAD_PMU(17);
337 	LOAD_PMU(16);
338 	LOAD_PMU(15);
339 	LOAD_PMU(14);
340 	LOAD_PMU(13);
341 	LOAD_PMU(12);
342 	LOAD_PMU(11);
343 	LOAD_PMU(10);
344 	LOAD_PMU(9);
345 	LOAD_PMU(8);
346 	LOAD_PMU(7);
347 	LOAD_PMU(6);
348 	LOAD_PMU(5);
349 	LOAD_PMU(4);
350 	LOAD_PMU(3);
351 	LOAD_PMU(2);
352 	LOAD_PMU(1);
353 	LOAD_PMU(0);
354 	default:		/* N == 0 when only PMCCNTR_EL0 is available */
355 		break;
356 #undef LOAD_PMU
357 	}
358 
359 	dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
360 	switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
361 #define	LOAD_DBG_BRP(x)							\
362 	case x:								\
363 		WRITE_SPECIALREG(dbgbcr ## x ## _el1,			\
364 		    hypctx->dbgbcr_el1[x]);				\
365 		WRITE_SPECIALREG(dbgbvr ## x ## _el1,			\
366 		    hypctx->dbgbvr_el1[x])
367 	LOAD_DBG_BRP(15);
368 	LOAD_DBG_BRP(14);
369 	LOAD_DBG_BRP(13);
370 	LOAD_DBG_BRP(12);
371 	LOAD_DBG_BRP(11);
372 	LOAD_DBG_BRP(10);
373 	LOAD_DBG_BRP(9);
374 	LOAD_DBG_BRP(8);
375 	LOAD_DBG_BRP(7);
376 	LOAD_DBG_BRP(6);
377 	LOAD_DBG_BRP(5);
378 	LOAD_DBG_BRP(4);
379 	LOAD_DBG_BRP(3);
380 	LOAD_DBG_BRP(2);
381 	LOAD_DBG_BRP(1);
382 	default:
383 	LOAD_DBG_BRP(0);
384 #undef LOAD_DBG_BRP
385 	}
386 
387 	switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
388 #define	LOAD_DBG_WRP(x)							\
389 	case x:								\
390 		WRITE_SPECIALREG(dbgwcr ## x ## _el1,			\
391 		    hypctx->dbgwcr_el1[x]);				\
392 		WRITE_SPECIALREG(dbgwvr ## x ## _el1,			\
393 		    hypctx->dbgwvr_el1[x])
394 	LOAD_DBG_WRP(15);
395 	LOAD_DBG_WRP(14);
396 	LOAD_DBG_WRP(13);
397 	LOAD_DBG_WRP(12);
398 	LOAD_DBG_WRP(11);
399 	LOAD_DBG_WRP(10);
400 	LOAD_DBG_WRP(9);
401 	LOAD_DBG_WRP(8);
402 	LOAD_DBG_WRP(7);
403 	LOAD_DBG_WRP(6);
404 	LOAD_DBG_WRP(5);
405 	LOAD_DBG_WRP(4);
406 	LOAD_DBG_WRP(3);
407 	LOAD_DBG_WRP(2);
408 	LOAD_DBG_WRP(1);
409 	default:
410 	LOAD_DBG_WRP(0);
411 #undef LOAD_DBG_WRP
412 	}
413 
414 	if (guest) {
415 		/* Load the timer registers */
416 		WRITE_SPECIALREG(cntkctl_el1, hypctx->vtimer_cpu.cntkctl_el1);
417 		WRITE_SPECIALREG(cntv_cval_el0,
418 		    hypctx->vtimer_cpu.virt_timer.cntx_cval_el0);
419 		WRITE_SPECIALREG(cntv_ctl_el0,
420 		    hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0);
421 		WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2);
422 		WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2);
423 
424 		/* Load the GICv3 registers */
425 		WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2);
426 		WRITE_SPECIALREG(ich_vmcr_el2,
427 		    hypctx->vgic_v3_regs.ich_vmcr_el2);
428 		switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
429 #define	LOAD_LR(x)					\
430 	case x:						\
431 		WRITE_SPECIALREG(ich_lr ## x ##_el2,	\
432 		    hypctx->vgic_v3_regs.ich_lr_el2[x])
433 		LOAD_LR(15);
434 		LOAD_LR(14);
435 		LOAD_LR(13);
436 		LOAD_LR(12);
437 		LOAD_LR(11);
438 		LOAD_LR(10);
439 		LOAD_LR(9);
440 		LOAD_LR(8);
441 		LOAD_LR(7);
442 		LOAD_LR(6);
443 		LOAD_LR(5);
444 		LOAD_LR(4);
445 		LOAD_LR(3);
446 		LOAD_LR(2);
447 		LOAD_LR(1);
448 		default:
449 		LOAD_LR(0);
450 #undef LOAD_LR
451 		}
452 
453 		switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
454 #define	LOAD_APR(x)						\
455 	case x:							\
456 		WRITE_SPECIALREG(ich_ap0r ## x ##_el2,		\
457 		    hypctx->vgic_v3_regs.ich_ap0r_el2[x]);		\
458 		WRITE_SPECIALREG(ich_ap1r ## x ##_el2,		\
459 		    hypctx->vgic_v3_regs.ich_ap1r_el2[x])
460 		LOAD_APR(3);
461 		LOAD_APR(2);
462 		LOAD_APR(1);
463 		default:
464 		LOAD_APR(0);
465 #undef LOAD_APR
466 		}
467 	}
468 }
469 
470 static uint64_t
471 vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
472 {
473 	struct hypctx host_hypctx;
474 	uint64_t cntvoff_el2;
475 	uint64_t ich_hcr_el2, ich_vmcr_el2, cnthctl_el2, cntkctl_el1;
476 	uint64_t ret;
477 	uint64_t s1e1r, hpfar_el2;
478 	bool hpfar_valid;
479 
480 	vmm_hyp_reg_store(&host_hypctx, NULL, false);
481 
482 	/* Save the host special registers */
483 	cnthctl_el2 = READ_SPECIALREG(cnthctl_el2);
484 	cntkctl_el1 = READ_SPECIALREG(cntkctl_el1);
485 	cntvoff_el2 = READ_SPECIALREG(cntvoff_el2);
486 
487 	ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2);
488 	ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2);
489 
490 	vmm_hyp_reg_restore(hypctx, hyp, true);
491 
492 	/* Load the common hypervisor registers */
493 	WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2);
494 
495 	host_hypctx.mdcr_el2 = READ_SPECIALREG(mdcr_el2);
496 	WRITE_SPECIALREG(mdcr_el2, hypctx->mdcr_el2);
497 
498 	/* Call into the guest */
499 	ret = vmm_enter_guest(hypctx);
500 
501 	WRITE_SPECIALREG(mdcr_el2, host_hypctx.mdcr_el2);
502 	isb();
503 
504 	/* Store the exit info */
505 	hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2);
506 	vmm_hyp_reg_store(hypctx, hyp, true);
507 
508 	hpfar_valid = true;
509 	if (ret == EXCP_TYPE_EL1_SYNC) {
510 		switch (ESR_ELx_EXCEPTION(hypctx->tf.tf_esr)) {
511 		case EXCP_INSN_ABORT_L:
512 		case EXCP_DATA_ABORT_L:
513 			/*
514 			 * The hpfar_el2 register is valid for:
515 			 *  - Translation and Access faults.
516 			 *  - Translation, Access, and permission faults on
517 			 *    the translation table walk on the stage 1 tables.
518 			 *  - A stage 2 Address size fault.
519 			 *
520 			 * As we only need it in the first 2 cases we can just
521 			 * exclude it on permission faults that are not from
522 			 * the stage 1 table walk.
523 			 *
524 			 * TODO: Add a case for Arm erratum 834220.
525 			 */
526 			if ((hypctx->tf.tf_esr & ISS_DATA_S1PTW) != 0)
527 				break;
528 			switch (hypctx->tf.tf_esr & ISS_DATA_DFSC_MASK) {
529 			case ISS_DATA_DFSC_PF_L1:
530 			case ISS_DATA_DFSC_PF_L2:
531 			case ISS_DATA_DFSC_PF_L3:
532 				hpfar_valid = false;
533 				break;
534 			}
535 			break;
536 		}
537 	}
538 	if (hpfar_valid) {
539 		hypctx->exit_info.hpfar_el2 = READ_SPECIALREG(hpfar_el2);
540 	} else {
541 		/*
542 		 * TODO: There is a risk the at instruction could cause an
543 		 * exception here. We should handle it & return a failure.
544 		 */
545 		s1e1r =
546 		    arm64_address_translate_s1e1r(hypctx->exit_info.far_el2);
547 		if (PAR_SUCCESS(s1e1r)) {
548 			hpfar_el2 = (s1e1r & PAR_PA_MASK) >> PAR_PA_SHIFT;
549 			hpfar_el2 <<= HPFAR_EL2_FIPA_SHIFT;
550 			hypctx->exit_info.hpfar_el2 = hpfar_el2;
551 		} else {
552 			ret = EXCP_TYPE_REENTER;
553 		}
554 	}
555 
556 	vmm_hyp_reg_restore(&host_hypctx, NULL, false);
557 
558 	/* Restore the host special registers */
559 	WRITE_SPECIALREG(ich_hcr_el2, ich_hcr_el2);
560 	WRITE_SPECIALREG(ich_vmcr_el2, ich_vmcr_el2);
561 
562 	WRITE_SPECIALREG(cnthctl_el2, cnthctl_el2);
563 	WRITE_SPECIALREG(cntkctl_el1, cntkctl_el1);
564 	WRITE_SPECIALREG(cntvoff_el2, cntvoff_el2);
565 
566 	return (ret);
567 }
568 
569 static uint64_t
570 vmm_hyp_read_reg(uint64_t reg)
571 {
572 	switch (reg) {
573 	case HYP_REG_ICH_VTR:
574 		return (READ_SPECIALREG(ich_vtr_el2));
575 	case HYP_REG_CNTHCTL:
576 		return (READ_SPECIALREG(cnthctl_el2));
577 	}
578 
579 	return (0);
580 }
581 
582 static int
583 vmm_clean_s2_tlbi(void)
584 {
585 	dsb(ishst);
586 	__asm __volatile("tlbi alle1is");
587 	dsb(ish);
588 
589 	return (0);
590 }
591 
592 static int
593 vm_s2_tlbi_range(uint64_t vttbr, vm_offset_t sva, vm_size_t eva,
594     bool final_only)
595 {
596 	uint64_t end, r, start;
597 	uint64_t host_vttbr;
598 
599 #define	TLBI_VA_SHIFT			12
600 #define	TLBI_VA_MASK			((1ul << 44) - 1)
601 #define	TLBI_VA(addr)			(((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK)
602 #define	TLBI_VA_L3_INCR			(L3_SIZE >> TLBI_VA_SHIFT)
603 
604 	/* Switch to the guest vttbr */
605 	/* TODO: Handle Cortex-A57/A72 erratum 131936 */
606 	host_vttbr = READ_SPECIALREG(vttbr_el2);
607 	WRITE_SPECIALREG(vttbr_el2, vttbr);
608 	isb();
609 
610 	/*
611 	 * The CPU can cache the stage 1 + 2 combination so we need to ensure
612 	 * the stage 2 is invalidated first, then when this has completed we
613 	 * invalidate the stage 1 TLB. As we don't know which stage 1 virtual
614 	 * addresses point at the stage 2 IPA we need to invalidate the entire
615 	 * stage 1 TLB.
616 	 */
617 
618 	start = TLBI_VA(sva);
619 	end = TLBI_VA(eva);
620 	for (r = start; r < end; r += TLBI_VA_L3_INCR) {
621 		/* Invalidate the stage 2 TLB entry */
622 		if (final_only)
623 			__asm __volatile("tlbi	ipas2le1is, %0" : : "r"(r));
624 		else
625 			__asm __volatile("tlbi	ipas2e1is, %0" : : "r"(r));
626 	}
627 	/* Ensure the entry has been invalidated */
628 	dsb(ish);
629 	/* Invalidate the stage 1 TLB. */
630 	__asm __volatile("tlbi vmalle1is");
631 	dsb(ish);
632 	isb();
633 
634 	/* Switch back t othe host vttbr */
635 	WRITE_SPECIALREG(vttbr_el2, host_vttbr);
636 	isb();
637 
638 	return (0);
639 }
640 
641 static int
642 vm_s2_tlbi_all(uint64_t vttbr)
643 {
644 	uint64_t host_vttbr;
645 
646 	/* Switch to the guest vttbr */
647 	/* TODO: Handle Cortex-A57/A72 erratum 131936 */
648 	host_vttbr = READ_SPECIALREG(vttbr_el2);
649 	WRITE_SPECIALREG(vttbr_el2, vttbr);
650 	isb();
651 
652 	__asm __volatile("tlbi vmalls12e1is");
653 	dsb(ish);
654 	isb();
655 
656 	/* Switch back t othe host vttbr */
657 	WRITE_SPECIALREG(vttbr_el2, host_vttbr);
658 	isb();
659 
660 	return (0);
661 }
662 
663 static int
664 vmm_dc_civac(uint64_t start, uint64_t len)
665 {
666 	size_t line_size, end;
667 	uint64_t ctr;
668 
669 	ctr = READ_SPECIALREG(ctr_el0);
670 	line_size = sizeof(int) << CTR_DLINE_SIZE(ctr);
671 	end = start + len;
672 	dsb(ishst);
673 	/* Clean and Invalidate the D-cache */
674 	for (; start < end; start += line_size)
675 		__asm __volatile("dc	civac, %0" :: "r" (start) : "memory");
676 	dsb(ish);
677 	return (0);
678 }
679 
680 static int
681 vmm_el2_tlbi(uint64_t type, uint64_t start, uint64_t len)
682 {
683 	uint64_t end, r;
684 
685 	dsb(ishst);
686 	switch (type) {
687 	default:
688 	case HYP_EL2_TLBI_ALL:
689 		__asm __volatile("tlbi	alle2" ::: "memory");
690 		break;
691 	case HYP_EL2_TLBI_VA:
692 		end = TLBI_VA(start + len);
693 		start = TLBI_VA(start);
694 		for (r = start; r < end; r += TLBI_VA_L3_INCR) {
695 			__asm __volatile("tlbi	vae2is, %0" :: "r"(r));
696 		}
697 		break;
698 	}
699 	dsb(ish);
700 
701 	return (0);
702 }
703 
704 uint64_t
705 vmm_hyp_enter(uint64_t handle, uint64_t x1, uint64_t x2, uint64_t x3,
706     uint64_t x4, uint64_t x5, uint64_t x6, uint64_t x7)
707 {
708 	uint64_t ret;
709 
710 	switch (handle) {
711 	case HYP_ENTER_GUEST:
712 		do {
713 			ret = vmm_hyp_call_guest((struct hyp *)x1,
714 			    (struct hypctx *)x2);
715 		} while (ret == EXCP_TYPE_REENTER);
716 		return (ret);
717 	case HYP_READ_REGISTER:
718 		return (vmm_hyp_read_reg(x1));
719 	case HYP_CLEAN_S2_TLBI:
720 		return (vmm_clean_s2_tlbi());
721 	case HYP_DC_CIVAC:
722 		return (vmm_dc_civac(x1, x2));
723 	case HYP_EL2_TLBI:
724 		return (vmm_el2_tlbi(x1, x2, x3));
725 	case HYP_S2_TLBI_RANGE:
726 		return (vm_s2_tlbi_range(x1, x2, x3, x4));
727 	case HYP_S2_TLBI_ALL:
728 		return (vm_s2_tlbi_all(x1));
729 	case HYP_CLEANUP:	/* Handled in vmm_hyp_exception.S */
730 	default:
731 		break;
732 	}
733 
734 	return (0);
735 }
736