xref: /freebsd/sys/arm64/vmm/vmm_hyp.c (revision 24e4dcf4ba5e9dedcf89efd358ea3e1fe5867020)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Andrew Turner
5  *
6  * This work was supported by Innovate UK project 105694, "Digital Security
7  * by Design (DSbD) Technology Platform Prototype".
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 #include <sys/types.h>
33 #include <sys/proc.h>
34 
35 #include <machine/armreg.h>
36 
37 #include "arm64.h"
38 #include "hyp.h"
39 
40 struct hypctx;
41 
42 uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
43 
44 static void
45 vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest,
46     bool ecv_poff)
47 {
48 	uint64_t dfr0;
49 
50 	if (guest) {
51 		/* Store the timer registers */
52 		hypctx->vtimer_cpu.cntkctl_el1 =
53 		    READ_SPECIALREG(EL1_REG(CNTKCTL));
54 		hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 =
55 		    READ_SPECIALREG(EL0_REG(CNTV_CVAL));
56 		hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 =
57 		    READ_SPECIALREG(EL0_REG(CNTV_CTL));
58 	}
59 	if (guest_or_nonvhe(guest) && ecv_poff) {
60 		/*
61 		 * If we have ECV then the guest could modify these registers.
62 		 * If VHE is enabled then the kernel will see a different view
63 		 * of the registers, so doesn't need to handle them.
64 		 */
65 		hypctx->vtimer_cpu.phys_timer.cntx_cval_el0 =
66 		    READ_SPECIALREG(EL0_REG(CNTP_CVAL));
67 		hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0 =
68 		    READ_SPECIALREG(EL0_REG(CNTP_CTL));
69 	}
70 
71 	if (guest) {
72 		/* Store the GICv3 registers */
73 		hypctx->vgic_v3_regs.ich_eisr_el2 =
74 		    READ_SPECIALREG(ich_eisr_el2);
75 		hypctx->vgic_v3_regs.ich_elrsr_el2 =
76 		    READ_SPECIALREG(ich_elrsr_el2);
77 		hypctx->vgic_v3_regs.ich_hcr_el2 =
78 		    READ_SPECIALREG(ich_hcr_el2);
79 		hypctx->vgic_v3_regs.ich_misr_el2 =
80 		    READ_SPECIALREG(ich_misr_el2);
81 		hypctx->vgic_v3_regs.ich_vmcr_el2 =
82 		    READ_SPECIALREG(ich_vmcr_el2);
83 		switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
84 #define	STORE_LR(x)					\
85 	case x:						\
86 		hypctx->vgic_v3_regs.ich_lr_el2[x] =	\
87 		    READ_SPECIALREG(ich_lr ## x ##_el2)
88 		STORE_LR(15);
89 		STORE_LR(14);
90 		STORE_LR(13);
91 		STORE_LR(12);
92 		STORE_LR(11);
93 		STORE_LR(10);
94 		STORE_LR(9);
95 		STORE_LR(8);
96 		STORE_LR(7);
97 		STORE_LR(6);
98 		STORE_LR(5);
99 		STORE_LR(4);
100 		STORE_LR(3);
101 		STORE_LR(2);
102 		STORE_LR(1);
103 		default:
104 		STORE_LR(0);
105 #undef STORE_LR
106 		}
107 
108 		switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
109 #define	STORE_APR(x)						\
110 	case x:							\
111 		hypctx->vgic_v3_regs.ich_ap0r_el2[x] =		\
112 		    READ_SPECIALREG(ich_ap0r ## x ##_el2);	\
113 		hypctx->vgic_v3_regs.ich_ap1r_el2[x] =		\
114 		    READ_SPECIALREG(ich_ap1r ## x ##_el2)
115 		STORE_APR(3);
116 		STORE_APR(2);
117 		STORE_APR(1);
118 		default:
119 		STORE_APR(0);
120 #undef STORE_APR
121 		}
122 	}
123 
124 	dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
125 	switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
126 #define	STORE_DBG_BRP(x)						\
127 	case x:								\
128 		hypctx->dbgbcr_el1[x] =					\
129 		    READ_SPECIALREG(dbgbcr ## x ## _el1);		\
130 		hypctx->dbgbvr_el1[x] =					\
131 		    READ_SPECIALREG(dbgbvr ## x ## _el1)
132 	STORE_DBG_BRP(15);
133 	STORE_DBG_BRP(14);
134 	STORE_DBG_BRP(13);
135 	STORE_DBG_BRP(12);
136 	STORE_DBG_BRP(11);
137 	STORE_DBG_BRP(10);
138 	STORE_DBG_BRP(9);
139 	STORE_DBG_BRP(8);
140 	STORE_DBG_BRP(7);
141 	STORE_DBG_BRP(6);
142 	STORE_DBG_BRP(5);
143 	STORE_DBG_BRP(4);
144 	STORE_DBG_BRP(3);
145 	STORE_DBG_BRP(2);
146 	STORE_DBG_BRP(1);
147 	default:
148 	STORE_DBG_BRP(0);
149 #undef STORE_DBG_BRP
150 	}
151 
152 	switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
153 #define	STORE_DBG_WRP(x)						\
154 	case x:								\
155 		hypctx->dbgwcr_el1[x] =					\
156 		    READ_SPECIALREG(dbgwcr ## x ## _el1);		\
157 		hypctx->dbgwvr_el1[x] =					\
158 		    READ_SPECIALREG(dbgwvr ## x ## _el1)
159 	STORE_DBG_WRP(15);
160 	STORE_DBG_WRP(14);
161 	STORE_DBG_WRP(13);
162 	STORE_DBG_WRP(12);
163 	STORE_DBG_WRP(11);
164 	STORE_DBG_WRP(10);
165 	STORE_DBG_WRP(9);
166 	STORE_DBG_WRP(8);
167 	STORE_DBG_WRP(7);
168 	STORE_DBG_WRP(6);
169 	STORE_DBG_WRP(5);
170 	STORE_DBG_WRP(4);
171 	STORE_DBG_WRP(3);
172 	STORE_DBG_WRP(2);
173 	STORE_DBG_WRP(1);
174 	default:
175 	STORE_DBG_WRP(0);
176 #undef STORE_DBG_WRP
177 	}
178 
179 	/* Store the PMU registers */
180 	hypctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0);
181 	hypctx->pmccntr_el0 = READ_SPECIALREG(pmccntr_el0);
182 	hypctx->pmccfiltr_el0 = READ_SPECIALREG(pmccfiltr_el0);
183 	hypctx->pmcntenset_el0 = READ_SPECIALREG(pmcntenset_el0);
184 	hypctx->pmintenset_el1 = READ_SPECIALREG(pmintenset_el1);
185 	hypctx->pmovsset_el0 = READ_SPECIALREG(pmovsset_el0);
186 	hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
187 	switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
188 #define	STORE_PMU(x)							\
189 	case (x + 1):							\
190 		hypctx->pmevcntr_el0[x] =				\
191 		    READ_SPECIALREG(pmevcntr ## x ## _el0);		\
192 		hypctx->pmevtyper_el0[x] =				\
193 		    READ_SPECIALREG(pmevtyper ## x ## _el0)
194 	STORE_PMU(30);
195 	STORE_PMU(29);
196 	STORE_PMU(28);
197 	STORE_PMU(27);
198 	STORE_PMU(26);
199 	STORE_PMU(25);
200 	STORE_PMU(24);
201 	STORE_PMU(23);
202 	STORE_PMU(22);
203 	STORE_PMU(21);
204 	STORE_PMU(20);
205 	STORE_PMU(19);
206 	STORE_PMU(18);
207 	STORE_PMU(17);
208 	STORE_PMU(16);
209 	STORE_PMU(15);
210 	STORE_PMU(14);
211 	STORE_PMU(13);
212 	STORE_PMU(12);
213 	STORE_PMU(11);
214 	STORE_PMU(10);
215 	STORE_PMU(9);
216 	STORE_PMU(8);
217 	STORE_PMU(7);
218 	STORE_PMU(6);
219 	STORE_PMU(5);
220 	STORE_PMU(4);
221 	STORE_PMU(3);
222 	STORE_PMU(2);
223 	STORE_PMU(1);
224 	STORE_PMU(0);
225 	default:		/* N == 0 when only PMCCNTR_EL0 is available */
226 		break;
227 #undef STORE_PMU
228 	}
229 
230 	/* Store the special to from the trapframe */
231 	hypctx->tf.tf_sp = READ_SPECIALREG(sp_el1);
232 	hypctx->tf.tf_elr = READ_SPECIALREG(elr_el2);
233 	hypctx->tf.tf_spsr = READ_SPECIALREG(spsr_el2);
234 	if (guest) {
235 		hypctx->tf.tf_esr = READ_SPECIALREG(esr_el2);
236 		hypctx->par_el1 = READ_SPECIALREG(par_el1);
237 	}
238 
239 	/* Store the guest special registers */
240 	hypctx->sp_el0 = READ_SPECIALREG(sp_el0);
241 	hypctx->tpidr_el0 = READ_SPECIALREG(tpidr_el0);
242 	hypctx->tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
243 	hypctx->tpidr_el1 = READ_SPECIALREG(tpidr_el1);
244 
245 	hypctx->actlr_el1 = READ_SPECIALREG(actlr_el1);
246 	hypctx->csselr_el1 = READ_SPECIALREG(csselr_el1);
247 	hypctx->mdccint_el1 = READ_SPECIALREG(mdccint_el1);
248 	hypctx->mdscr_el1 = READ_SPECIALREG(mdscr_el1);
249 
250 	if (guest_or_nonvhe(guest)) {
251 		hypctx->elr_el1 = READ_SPECIALREG(EL1_REG(ELR));
252 		hypctx->vbar_el1 = READ_SPECIALREG(EL1_REG(VBAR));
253 
254 		hypctx->afsr0_el1 = READ_SPECIALREG(EL1_REG(AFSR0));
255 		hypctx->afsr1_el1 = READ_SPECIALREG(EL1_REG(AFSR1));
256 		hypctx->amair_el1 = READ_SPECIALREG(EL1_REG(AMAIR));
257 		hypctx->contextidr_el1 = READ_SPECIALREG(EL1_REG(CONTEXTIDR));
258 		hypctx->cpacr_el1 = READ_SPECIALREG(EL1_REG(CPACR));
259 		hypctx->esr_el1 = READ_SPECIALREG(EL1_REG(ESR));
260 		hypctx->far_el1 = READ_SPECIALREG(EL1_REG(FAR));
261 		hypctx->mair_el1 = READ_SPECIALREG(EL1_REG(MAIR));
262 		hypctx->sctlr_el1 = READ_SPECIALREG(EL1_REG(SCTLR));
263 		hypctx->spsr_el1 = READ_SPECIALREG(EL1_REG(SPSR));
264 		hypctx->tcr_el1 = READ_SPECIALREG(EL1_REG(TCR));
265 		/* TODO: Support when this is not res0 */
266 		hypctx->tcr2_el1 = 0;
267 		hypctx->ttbr0_el1 = READ_SPECIALREG(EL1_REG(TTBR0));
268 		hypctx->ttbr1_el1 = READ_SPECIALREG(EL1_REG(TTBR1));
269 	}
270 
271 	hypctx->cptr_el2 = READ_SPECIALREG(cptr_el2);
272 	hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2);
273 	hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2);
274 	hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2);
275 }
276 
277 static void
278 vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest,
279     bool ecv_poff)
280 {
281 	uint64_t dfr0;
282 
283 	/* Restore the special registers */
284 	WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
285 
286 	if (guest) {
287 		if ((hyp->feats & HYP_FEAT_HCX) != 0)
288 			WRITE_SPECIALREG(HCRX_EL2_REG, hypctx->hcrx_el2);
289 	}
290 	isb();
291 
292 	WRITE_SPECIALREG(sp_el0, hypctx->sp_el0);
293 	WRITE_SPECIALREG(tpidr_el0, hypctx->tpidr_el0);
294 	WRITE_SPECIALREG(tpidrro_el0, hypctx->tpidrro_el0);
295 	WRITE_SPECIALREG(tpidr_el1, hypctx->tpidr_el1);
296 
297 	WRITE_SPECIALREG(actlr_el1, hypctx->actlr_el1);
298 	WRITE_SPECIALREG(csselr_el1, hypctx->csselr_el1);
299 	WRITE_SPECIALREG(mdccint_el1, hypctx->mdccint_el1);
300 	WRITE_SPECIALREG(mdscr_el1, hypctx->mdscr_el1);
301 
302 	if (guest_or_nonvhe(guest)) {
303 		WRITE_SPECIALREG(EL1_REG(ELR), hypctx->elr_el1);
304 		WRITE_SPECIALREG(EL1_REG(VBAR), hypctx->vbar_el1);
305 
306 		WRITE_SPECIALREG(EL1_REG(AFSR0), hypctx->afsr0_el1);
307 		WRITE_SPECIALREG(EL1_REG(AFSR1), hypctx->afsr1_el1);
308 		WRITE_SPECIALREG(EL1_REG(AMAIR), hypctx->amair_el1);
309 		WRITE_SPECIALREG(EL1_REG(CONTEXTIDR), hypctx->contextidr_el1);
310 		WRITE_SPECIALREG(EL1_REG(CPACR), hypctx->cpacr_el1);
311 		WRITE_SPECIALREG(EL1_REG(ESR), hypctx->esr_el1);
312 		WRITE_SPECIALREG(EL1_REG(FAR), hypctx->far_el1);
313 		WRITE_SPECIALREG(EL1_REG(MAIR), hypctx->mair_el1); //
314 
315 		WRITE_SPECIALREG(EL1_REG(SCTLR), hypctx->sctlr_el1);
316 		WRITE_SPECIALREG(EL1_REG(SPSR), hypctx->spsr_el1);
317 		WRITE_SPECIALREG(EL1_REG(TCR), hypctx->tcr_el1);
318 		/* TODO: tcr2_el1 */
319 		WRITE_SPECIALREG(EL1_REG(TTBR0), hypctx->ttbr0_el1);
320 		WRITE_SPECIALREG(EL1_REG(TTBR1), hypctx->ttbr1_el1);
321 	}
322 
323 	if (guest) {
324 		WRITE_SPECIALREG(par_el1, hypctx->par_el1);
325 	}
326 
327 	WRITE_SPECIALREG(cptr_el2, hypctx->cptr_el2);
328 	WRITE_SPECIALREG(vpidr_el2, hypctx->vpidr_el2);
329 	WRITE_SPECIALREG(vmpidr_el2, hypctx->vmpidr_el2);
330 
331 	/* Load the special regs from the trapframe */
332 	WRITE_SPECIALREG(sp_el1, hypctx->tf.tf_sp);
333 	WRITE_SPECIALREG(elr_el2, hypctx->tf.tf_elr);
334 	WRITE_SPECIALREG(spsr_el2, hypctx->tf.tf_spsr);
335 
336 	/* Restore the PMU registers */
337 	WRITE_SPECIALREG(pmcr_el0, hypctx->pmcr_el0);
338 	WRITE_SPECIALREG(pmccntr_el0, hypctx->pmccntr_el0);
339 	WRITE_SPECIALREG(pmccfiltr_el0, hypctx->pmccfiltr_el0);
340 	/* Clear all events/interrupts then enable them */
341 	WRITE_SPECIALREG(pmcntenclr_el0, 0xfffffffful);
342 	WRITE_SPECIALREG(pmcntenset_el0, hypctx->pmcntenset_el0);
343 	WRITE_SPECIALREG(pmintenclr_el1, 0xfffffffful);
344 	WRITE_SPECIALREG(pmintenset_el1, hypctx->pmintenset_el1);
345 	WRITE_SPECIALREG(pmovsclr_el0, 0xfffffffful);
346 	WRITE_SPECIALREG(pmovsset_el0, hypctx->pmovsset_el0);
347 
348 	switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
349 #define	LOAD_PMU(x)							\
350 	case (x + 1):							\
351 		WRITE_SPECIALREG(pmevcntr ## x ## _el0,			\
352 		    hypctx->pmevcntr_el0[x]);				\
353 		WRITE_SPECIALREG(pmevtyper ## x ## _el0,		\
354 		    hypctx->pmevtyper_el0[x])
355 	LOAD_PMU(30);
356 	LOAD_PMU(29);
357 	LOAD_PMU(28);
358 	LOAD_PMU(27);
359 	LOAD_PMU(26);
360 	LOAD_PMU(25);
361 	LOAD_PMU(24);
362 	LOAD_PMU(23);
363 	LOAD_PMU(22);
364 	LOAD_PMU(21);
365 	LOAD_PMU(20);
366 	LOAD_PMU(19);
367 	LOAD_PMU(18);
368 	LOAD_PMU(17);
369 	LOAD_PMU(16);
370 	LOAD_PMU(15);
371 	LOAD_PMU(14);
372 	LOAD_PMU(13);
373 	LOAD_PMU(12);
374 	LOAD_PMU(11);
375 	LOAD_PMU(10);
376 	LOAD_PMU(9);
377 	LOAD_PMU(8);
378 	LOAD_PMU(7);
379 	LOAD_PMU(6);
380 	LOAD_PMU(5);
381 	LOAD_PMU(4);
382 	LOAD_PMU(3);
383 	LOAD_PMU(2);
384 	LOAD_PMU(1);
385 	LOAD_PMU(0);
386 	default:		/* N == 0 when only PMCCNTR_EL0 is available */
387 		break;
388 #undef LOAD_PMU
389 	}
390 
391 	dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
392 	switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
393 #define	LOAD_DBG_BRP(x)							\
394 	case x:								\
395 		WRITE_SPECIALREG(dbgbcr ## x ## _el1,			\
396 		    hypctx->dbgbcr_el1[x]);				\
397 		WRITE_SPECIALREG(dbgbvr ## x ## _el1,			\
398 		    hypctx->dbgbvr_el1[x])
399 	LOAD_DBG_BRP(15);
400 	LOAD_DBG_BRP(14);
401 	LOAD_DBG_BRP(13);
402 	LOAD_DBG_BRP(12);
403 	LOAD_DBG_BRP(11);
404 	LOAD_DBG_BRP(10);
405 	LOAD_DBG_BRP(9);
406 	LOAD_DBG_BRP(8);
407 	LOAD_DBG_BRP(7);
408 	LOAD_DBG_BRP(6);
409 	LOAD_DBG_BRP(5);
410 	LOAD_DBG_BRP(4);
411 	LOAD_DBG_BRP(3);
412 	LOAD_DBG_BRP(2);
413 	LOAD_DBG_BRP(1);
414 	default:
415 	LOAD_DBG_BRP(0);
416 #undef LOAD_DBG_BRP
417 	}
418 
419 	switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
420 #define	LOAD_DBG_WRP(x)							\
421 	case x:								\
422 		WRITE_SPECIALREG(dbgwcr ## x ## _el1,			\
423 		    hypctx->dbgwcr_el1[x]);				\
424 		WRITE_SPECIALREG(dbgwvr ## x ## _el1,			\
425 		    hypctx->dbgwvr_el1[x])
426 	LOAD_DBG_WRP(15);
427 	LOAD_DBG_WRP(14);
428 	LOAD_DBG_WRP(13);
429 	LOAD_DBG_WRP(12);
430 	LOAD_DBG_WRP(11);
431 	LOAD_DBG_WRP(10);
432 	LOAD_DBG_WRP(9);
433 	LOAD_DBG_WRP(8);
434 	LOAD_DBG_WRP(7);
435 	LOAD_DBG_WRP(6);
436 	LOAD_DBG_WRP(5);
437 	LOAD_DBG_WRP(4);
438 	LOAD_DBG_WRP(3);
439 	LOAD_DBG_WRP(2);
440 	LOAD_DBG_WRP(1);
441 	default:
442 	LOAD_DBG_WRP(0);
443 #undef LOAD_DBG_WRP
444 	}
445 
446 	if (guest) {
447 		/* Load the timer registers */
448 		WRITE_SPECIALREG(EL1_REG(CNTKCTL),
449 		    hypctx->vtimer_cpu.cntkctl_el1);
450 		WRITE_SPECIALREG(EL0_REG(CNTV_CVAL),
451 		    hypctx->vtimer_cpu.virt_timer.cntx_cval_el0);
452 		WRITE_SPECIALREG(EL0_REG(CNTV_CTL),
453 		    hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0);
454 		WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2);
455 		WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2);
456 
457 		if (ecv_poff) {
458 			/*
459 			 * Load the same offset as the virtual timer
460 			 * to keep in sync.
461 			 */
462 			WRITE_SPECIALREG(CNTPOFF_EL2_REG,
463 			    hyp->vtimer.cntvoff_el2);
464 			isb();
465 		}
466 	}
467 	if (guest_or_nonvhe(guest) && ecv_poff) {
468 		/*
469 		 * If we have ECV then the guest could modify these registers.
470 		 * If VHE is enabled then the kernel will see a different view
471 		 * of the registers, so doesn't need to handle them.
472 		 */
473 		WRITE_SPECIALREG(EL0_REG(CNTP_CVAL),
474 		    hypctx->vtimer_cpu.phys_timer.cntx_cval_el0);
475 		WRITE_SPECIALREG(EL0_REG(CNTP_CTL),
476 		    hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0);
477 	}
478 
479 	if (guest) {
480 		/* Load the GICv3 registers */
481 		WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2);
482 		WRITE_SPECIALREG(ich_vmcr_el2,
483 		    hypctx->vgic_v3_regs.ich_vmcr_el2);
484 		switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
485 #define	LOAD_LR(x)					\
486 	case x:						\
487 		WRITE_SPECIALREG(ich_lr ## x ##_el2,	\
488 		    hypctx->vgic_v3_regs.ich_lr_el2[x])
489 		LOAD_LR(15);
490 		LOAD_LR(14);
491 		LOAD_LR(13);
492 		LOAD_LR(12);
493 		LOAD_LR(11);
494 		LOAD_LR(10);
495 		LOAD_LR(9);
496 		LOAD_LR(8);
497 		LOAD_LR(7);
498 		LOAD_LR(6);
499 		LOAD_LR(5);
500 		LOAD_LR(4);
501 		LOAD_LR(3);
502 		LOAD_LR(2);
503 		LOAD_LR(1);
504 		default:
505 		LOAD_LR(0);
506 #undef LOAD_LR
507 		}
508 
509 		switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
510 #define	LOAD_APR(x)						\
511 	case x:							\
512 		WRITE_SPECIALREG(ich_ap0r ## x ##_el2,		\
513 		    hypctx->vgic_v3_regs.ich_ap0r_el2[x]);		\
514 		WRITE_SPECIALREG(ich_ap1r ## x ##_el2,		\
515 		    hypctx->vgic_v3_regs.ich_ap1r_el2[x])
516 		LOAD_APR(3);
517 		LOAD_APR(2);
518 		LOAD_APR(1);
519 		default:
520 		LOAD_APR(0);
521 #undef LOAD_APR
522 		}
523 	}
524 }
525 
526 static uint64_t
527 vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
528 {
529 	struct hypctx host_hypctx;
530 	uint64_t cntvoff_el2;
531 	uint64_t ich_hcr_el2, ich_vmcr_el2, cnthctl_el2, cntkctl_el1;
532 #ifndef VMM_VHE
533 	uint64_t hcrx_el2;
534 #endif
535 	uint64_t ret;
536 	uint64_t s1e1r, hpfar_el2;
537 	bool ecv_poff, hpfar_valid;
538 
539 	ecv_poff = (hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0;
540 	vmm_hyp_reg_store(&host_hypctx, NULL, false, ecv_poff);
541 #ifndef VMM_VHE
542 	if ((hyp->feats & HYP_FEAT_HCX) != 0)
543 		hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2));
544 #endif
545 
546 	/* Save the host special registers */
547 	cnthctl_el2 = READ_SPECIALREG(cnthctl_el2);
548 	cntkctl_el1 = READ_SPECIALREG(cntkctl_el1);
549 	cntvoff_el2 = READ_SPECIALREG(cntvoff_el2);
550 
551 	ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2);
552 	ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2);
553 
554 	vmm_hyp_reg_restore(hypctx, hyp, true, ecv_poff);
555 
556 	/* Load the common hypervisor registers */
557 	WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2);
558 
559 	host_hypctx.mdcr_el2 = READ_SPECIALREG(mdcr_el2);
560 	WRITE_SPECIALREG(mdcr_el2, hypctx->mdcr_el2);
561 
562 	/* Call into the guest */
563 	ret = VMM_HYP_FUNC(do_call_guest)(hypctx);
564 
565 	WRITE_SPECIALREG(mdcr_el2, host_hypctx.mdcr_el2);
566 	isb();
567 
568 	/* Store the exit info */
569 	hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2);
570 	vmm_hyp_reg_store(hypctx, hyp, true, ecv_poff);
571 
572 	hpfar_valid = true;
573 	if (ret == EXCP_TYPE_EL1_SYNC) {
574 		switch (ESR_ELx_EXCEPTION(hypctx->tf.tf_esr)) {
575 		case EXCP_INSN_ABORT_L:
576 		case EXCP_DATA_ABORT_L:
577 			/*
578 			 * The hpfar_el2 register is valid for:
579 			 *  - Translation and Access faults.
580 			 *  - Translation, Access, and permission faults on
581 			 *    the translation table walk on the stage 1 tables.
582 			 *  - A stage 2 Address size fault.
583 			 *
584 			 * As we only need it in the first 2 cases we can just
585 			 * exclude it on permission faults that are not from
586 			 * the stage 1 table walk.
587 			 *
588 			 * TODO: Add a case for Arm erratum 834220.
589 			 */
590 			if ((hypctx->tf.tf_esr & ISS_DATA_S1PTW) != 0)
591 				break;
592 			switch (hypctx->tf.tf_esr & ISS_DATA_DFSC_MASK) {
593 			case ISS_DATA_DFSC_PF_L1:
594 			case ISS_DATA_DFSC_PF_L2:
595 			case ISS_DATA_DFSC_PF_L3:
596 				hpfar_valid = false;
597 				break;
598 			}
599 			break;
600 		}
601 	}
602 	if (hpfar_valid) {
603 		hypctx->exit_info.hpfar_el2 = READ_SPECIALREG(hpfar_el2);
604 	} else {
605 		/*
606 		 * TODO: There is a risk the at instruction could cause an
607 		 * exception here. We should handle it & return a failure.
608 		 */
609 		s1e1r =
610 		    arm64_address_translate_s1e1r(hypctx->exit_info.far_el2);
611 		if (PAR_SUCCESS(s1e1r)) {
612 			hpfar_el2 = (s1e1r & PAR_PA_MASK) >> PAR_PA_SHIFT;
613 			hpfar_el2 <<= HPFAR_EL2_FIPA_SHIFT;
614 			hypctx->exit_info.hpfar_el2 = hpfar_el2;
615 		} else {
616 			ret = EXCP_TYPE_REENTER;
617 		}
618 	}
619 
620 	vmm_hyp_reg_restore(&host_hypctx, NULL, false, ecv_poff);
621 
622 #ifndef VMM_VHE
623 	if ((hyp->feats & HYP_FEAT_HCX) != 0)
624 		WRITE_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2), hcrx_el2);
625 #endif
626 
627 	/* Restore the host special registers */
628 	WRITE_SPECIALREG(ich_hcr_el2, ich_hcr_el2);
629 	WRITE_SPECIALREG(ich_vmcr_el2, ich_vmcr_el2);
630 
631 	WRITE_SPECIALREG(cnthctl_el2, cnthctl_el2);
632 	WRITE_SPECIALREG(cntkctl_el1, cntkctl_el1);
633 	WRITE_SPECIALREG(cntvoff_el2, cntvoff_el2);
634 
635 	return (ret);
636 }
637 
638 VMM_STATIC uint64_t
639 VMM_HYP_FUNC(enter_guest)(struct hyp *hyp, struct hypctx *hypctx)
640 {
641 	uint64_t ret;
642 
643 	do {
644 		ret = vmm_hyp_call_guest(hyp, hypctx);
645 	} while (ret == EXCP_TYPE_REENTER);
646 
647 	return (ret);
648 }
649 
650 VMM_STATIC uint64_t
651 VMM_HYP_FUNC(read_reg)(uint64_t reg)
652 {
653 	switch (reg) {
654 	case HYP_REG_ICH_VTR:
655 		return (READ_SPECIALREG(ich_vtr_el2));
656 	}
657 
658 	return (0);
659 }
660 
661 VMM_STATIC void
662 VMM_HYP_FUNC(clean_s2_tlbi)(void)
663 {
664 	dsb(ishst);
665 	__asm __volatile("tlbi alle1is");
666 	dsb(ish);
667 }
668 
669 VMM_STATIC void
670 VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva,
671     bool final_only)
672 {
673 	uint64_t end, r, start;
674 	uint64_t host_vttbr;
675 #ifdef VMM_VHE
676 	uint64_t host_tcr;
677 #endif
678 
679 #ifdef VMM_VHE
680 	dsb(ishst);
681 #endif
682 
683 #define	TLBI_VA_SHIFT			12
684 #define	TLBI_VA_MASK			((1ul << 44) - 1)
685 #define	TLBI_VA(addr)			(((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK)
686 #define	TLBI_VA_L3_INCR			(L3_SIZE >> TLBI_VA_SHIFT)
687 
688 	/* Switch to the guest vttbr */
689 	/* TODO: Handle Cortex-A57/A72 erratum 131936 */
690 	host_vttbr = READ_SPECIALREG(vttbr_el2);
691 	WRITE_SPECIALREG(vttbr_el2, vttbr);
692 	isb();
693 
694 #ifdef VMM_VHE
695 	host_tcr = READ_SPECIALREG(tcr_el2);
696 	WRITE_SPECIALREG(tcr_el2, host_tcr & ~HCR_TGE);
697 	isb();
698 #endif
699 
700 	/*
701 	 * The CPU can cache the stage 1 + 2 combination so we need to ensure
702 	 * the stage 2 is invalidated first, then when this has completed we
703 	 * invalidate the stage 1 TLB. As we don't know which stage 1 virtual
704 	 * addresses point at the stage 2 IPA we need to invalidate the entire
705 	 * stage 1 TLB.
706 	 */
707 
708 	start = TLBI_VA(sva);
709 	end = TLBI_VA(eva);
710 	for (r = start; r < end; r += TLBI_VA_L3_INCR) {
711 		/* Invalidate the stage 2 TLB entry */
712 		if (final_only)
713 			__asm __volatile("tlbi	ipas2le1is, %0" : : "r"(r));
714 		else
715 			__asm __volatile("tlbi	ipas2e1is, %0" : : "r"(r));
716 	}
717 	/* Ensure the entry has been invalidated */
718 	dsb(ish);
719 	/* Invalidate the stage 1 TLB. */
720 	__asm __volatile("tlbi vmalle1is");
721 	dsb(ish);
722 	isb();
723 
724 #ifdef VMM_VHE
725 	WRITE_SPECIALREG(tcr_el2, host_tcr);
726 	isb();
727 #endif
728 
729 	/* Switch back to the host vttbr */
730 	WRITE_SPECIALREG(vttbr_el2, host_vttbr);
731 	isb();
732 }
733 
734 VMM_STATIC void
735 VMM_HYP_FUNC(s2_tlbi_all)(uint64_t vttbr)
736 {
737 	uint64_t host_vttbr;
738 
739 #ifdef VMM_VHE
740 	dsb(ishst);
741 #endif
742 
743 	/* Switch to the guest vttbr */
744 	/* TODO: Handle Cortex-A57/A72 erratum 131936 */
745 	host_vttbr = READ_SPECIALREG(vttbr_el2);
746 	WRITE_SPECIALREG(vttbr_el2, vttbr);
747 	isb();
748 
749 	__asm __volatile("tlbi vmalls12e1is");
750 	dsb(ish);
751 	isb();
752 
753 	/* Switch back t othe host vttbr */
754 	WRITE_SPECIALREG(vttbr_el2, host_vttbr);
755 	isb();
756 }
757