xref: /freebsd/sys/arm64/vmm/vmm_hyp.c (revision 7fdf597e96a02165cfe22ff357b857d5fa15ed8a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2021 Andrew Turner
5  *
6  * This work was supported by Innovate UK project 105694, "Digital Security
7  * by Design (DSbD) Technology Platform Prototype".
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 #include <sys/cdefs.h>
32 #include <sys/types.h>
33 #include <sys/proc.h>
34 
35 #include <machine/armreg.h>
36 
37 #include "arm64.h"
38 #include "hyp.h"
39 
40 struct hypctx;
41 
42 uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
43 
44 static void
45 vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
46 {
47 	uint64_t dfr0;
48 
49 	/* Store the guest VFP registers */
50 	if (guest) {
51 		/* Store the timer registers */
52 		hypctx->vtimer_cpu.cntkctl_el1 =
53 		    READ_SPECIALREG(EL1_REG(CNTKCTL));
54 		hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 =
55 		    READ_SPECIALREG(EL0_REG(CNTV_CVAL));
56 		hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 =
57 		    READ_SPECIALREG(EL0_REG(CNTV_CTL));
58 
59 		/* Store the GICv3 registers */
60 		hypctx->vgic_v3_regs.ich_eisr_el2 =
61 		    READ_SPECIALREG(ich_eisr_el2);
62 		hypctx->vgic_v3_regs.ich_elrsr_el2 =
63 		    READ_SPECIALREG(ich_elrsr_el2);
64 		hypctx->vgic_v3_regs.ich_hcr_el2 =
65 		    READ_SPECIALREG(ich_hcr_el2);
66 		hypctx->vgic_v3_regs.ich_misr_el2 =
67 		    READ_SPECIALREG(ich_misr_el2);
68 		hypctx->vgic_v3_regs.ich_vmcr_el2 =
69 		    READ_SPECIALREG(ich_vmcr_el2);
70 		switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
71 #define	STORE_LR(x)					\
72 	case x:						\
73 		hypctx->vgic_v3_regs.ich_lr_el2[x] =	\
74 		    READ_SPECIALREG(ich_lr ## x ##_el2)
75 		STORE_LR(15);
76 		STORE_LR(14);
77 		STORE_LR(13);
78 		STORE_LR(12);
79 		STORE_LR(11);
80 		STORE_LR(10);
81 		STORE_LR(9);
82 		STORE_LR(8);
83 		STORE_LR(7);
84 		STORE_LR(6);
85 		STORE_LR(5);
86 		STORE_LR(4);
87 		STORE_LR(3);
88 		STORE_LR(2);
89 		STORE_LR(1);
90 		default:
91 		STORE_LR(0);
92 #undef STORE_LR
93 		}
94 
95 		switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
96 #define	STORE_APR(x)						\
97 	case x:							\
98 		hypctx->vgic_v3_regs.ich_ap0r_el2[x] =		\
99 		    READ_SPECIALREG(ich_ap0r ## x ##_el2);	\
100 		hypctx->vgic_v3_regs.ich_ap1r_el2[x] =		\
101 		    READ_SPECIALREG(ich_ap1r ## x ##_el2)
102 		STORE_APR(3);
103 		STORE_APR(2);
104 		STORE_APR(1);
105 		default:
106 		STORE_APR(0);
107 #undef STORE_APR
108 		}
109 	}
110 
111 	dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
112 	switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
113 #define	STORE_DBG_BRP(x)						\
114 	case x:								\
115 		hypctx->dbgbcr_el1[x] =					\
116 		    READ_SPECIALREG(dbgbcr ## x ## _el1);		\
117 		hypctx->dbgbvr_el1[x] =					\
118 		    READ_SPECIALREG(dbgbvr ## x ## _el1)
119 	STORE_DBG_BRP(15);
120 	STORE_DBG_BRP(14);
121 	STORE_DBG_BRP(13);
122 	STORE_DBG_BRP(12);
123 	STORE_DBG_BRP(11);
124 	STORE_DBG_BRP(10);
125 	STORE_DBG_BRP(9);
126 	STORE_DBG_BRP(8);
127 	STORE_DBG_BRP(7);
128 	STORE_DBG_BRP(6);
129 	STORE_DBG_BRP(5);
130 	STORE_DBG_BRP(4);
131 	STORE_DBG_BRP(3);
132 	STORE_DBG_BRP(2);
133 	STORE_DBG_BRP(1);
134 	default:
135 	STORE_DBG_BRP(0);
136 #undef STORE_DBG_BRP
137 	}
138 
139 	switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
140 #define	STORE_DBG_WRP(x)						\
141 	case x:								\
142 		hypctx->dbgwcr_el1[x] =					\
143 		    READ_SPECIALREG(dbgwcr ## x ## _el1);		\
144 		hypctx->dbgwvr_el1[x] =					\
145 		    READ_SPECIALREG(dbgwvr ## x ## _el1)
146 	STORE_DBG_WRP(15);
147 	STORE_DBG_WRP(14);
148 	STORE_DBG_WRP(13);
149 	STORE_DBG_WRP(12);
150 	STORE_DBG_WRP(11);
151 	STORE_DBG_WRP(10);
152 	STORE_DBG_WRP(9);
153 	STORE_DBG_WRP(8);
154 	STORE_DBG_WRP(7);
155 	STORE_DBG_WRP(6);
156 	STORE_DBG_WRP(5);
157 	STORE_DBG_WRP(4);
158 	STORE_DBG_WRP(3);
159 	STORE_DBG_WRP(2);
160 	STORE_DBG_WRP(1);
161 	default:
162 	STORE_DBG_WRP(0);
163 #undef STORE_DBG_WRP
164 	}
165 
166 	/* Store the PMU registers */
167 	hypctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0);
168 	hypctx->pmccntr_el0 = READ_SPECIALREG(pmccntr_el0);
169 	hypctx->pmccfiltr_el0 = READ_SPECIALREG(pmccfiltr_el0);
170 	hypctx->pmcntenset_el0 = READ_SPECIALREG(pmcntenset_el0);
171 	hypctx->pmintenset_el1 = READ_SPECIALREG(pmintenset_el1);
172 	hypctx->pmovsset_el0 = READ_SPECIALREG(pmovsset_el0);
173 	hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
174 	switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
175 #define	STORE_PMU(x)							\
176 	case (x + 1):							\
177 		hypctx->pmevcntr_el0[x] =				\
178 		    READ_SPECIALREG(pmevcntr ## x ## _el0);		\
179 		hypctx->pmevtyper_el0[x] =				\
180 		    READ_SPECIALREG(pmevtyper ## x ## _el0)
181 	STORE_PMU(30);
182 	STORE_PMU(29);
183 	STORE_PMU(28);
184 	STORE_PMU(27);
185 	STORE_PMU(26);
186 	STORE_PMU(25);
187 	STORE_PMU(24);
188 	STORE_PMU(23);
189 	STORE_PMU(22);
190 	STORE_PMU(21);
191 	STORE_PMU(20);
192 	STORE_PMU(19);
193 	STORE_PMU(18);
194 	STORE_PMU(17);
195 	STORE_PMU(16);
196 	STORE_PMU(15);
197 	STORE_PMU(14);
198 	STORE_PMU(13);
199 	STORE_PMU(12);
200 	STORE_PMU(11);
201 	STORE_PMU(10);
202 	STORE_PMU(9);
203 	STORE_PMU(8);
204 	STORE_PMU(7);
205 	STORE_PMU(6);
206 	STORE_PMU(5);
207 	STORE_PMU(4);
208 	STORE_PMU(3);
209 	STORE_PMU(2);
210 	STORE_PMU(1);
211 	STORE_PMU(0);
212 	default:		/* N == 0 when only PMCCNTR_EL0 is available */
213 		break;
214 #undef STORE_PMU
215 	}
216 
217 	/* Store the special to from the trapframe */
218 	hypctx->tf.tf_sp = READ_SPECIALREG(sp_el1);
219 	hypctx->tf.tf_elr = READ_SPECIALREG(elr_el2);
220 	hypctx->tf.tf_spsr = READ_SPECIALREG(spsr_el2);
221 	if (guest) {
222 		hypctx->tf.tf_esr = READ_SPECIALREG(esr_el2);
223 		hypctx->par_el1 = READ_SPECIALREG(par_el1);
224 	}
225 
226 	/* Store the guest special registers */
227 	hypctx->sp_el0 = READ_SPECIALREG(sp_el0);
228 	hypctx->tpidr_el0 = READ_SPECIALREG(tpidr_el0);
229 	hypctx->tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
230 	hypctx->tpidr_el1 = READ_SPECIALREG(tpidr_el1);
231 
232 	hypctx->actlr_el1 = READ_SPECIALREG(actlr_el1);
233 	hypctx->csselr_el1 = READ_SPECIALREG(csselr_el1);
234 	hypctx->mdccint_el1 = READ_SPECIALREG(mdccint_el1);
235 	hypctx->mdscr_el1 = READ_SPECIALREG(mdscr_el1);
236 
237 	if (guest_or_nonvhe(guest)) {
238 		hypctx->elr_el1 = READ_SPECIALREG(EL1_REG(ELR));
239 		hypctx->vbar_el1 = READ_SPECIALREG(EL1_REG(VBAR));
240 
241 		hypctx->afsr0_el1 = READ_SPECIALREG(EL1_REG(AFSR0));
242 		hypctx->afsr1_el1 = READ_SPECIALREG(EL1_REG(AFSR1));
243 		hypctx->amair_el1 = READ_SPECIALREG(EL1_REG(AMAIR));
244 		hypctx->contextidr_el1 = READ_SPECIALREG(EL1_REG(CONTEXTIDR));
245 		hypctx->cpacr_el1 = READ_SPECIALREG(EL1_REG(CPACR));
246 		hypctx->esr_el1 = READ_SPECIALREG(EL1_REG(ESR));
247 		hypctx->far_el1 = READ_SPECIALREG(EL1_REG(FAR));
248 		hypctx->mair_el1 = READ_SPECIALREG(EL1_REG(MAIR));
249 		hypctx->sctlr_el1 = READ_SPECIALREG(EL1_REG(SCTLR));
250 		hypctx->spsr_el1 = READ_SPECIALREG(EL1_REG(SPSR));
251 		hypctx->tcr_el1 = READ_SPECIALREG(EL1_REG(TCR));
252 		/* TODO: Support when this is not res0 */
253 		hypctx->tcr2_el1 = 0;
254 		hypctx->ttbr0_el1 = READ_SPECIALREG(EL1_REG(TTBR0));
255 		hypctx->ttbr1_el1 = READ_SPECIALREG(EL1_REG(TTBR1));
256 	}
257 
258 	hypctx->cptr_el2 = READ_SPECIALREG(cptr_el2);
259 	hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2);
260 	hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2);
261 	hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2);
262 }
263 
264 static void
265 vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
266 {
267 	uint64_t dfr0;
268 
269 	/* Restore the special registers */
270 	WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
271 	isb();
272 
273 	WRITE_SPECIALREG(sp_el0, hypctx->sp_el0);
274 	WRITE_SPECIALREG(tpidr_el0, hypctx->tpidr_el0);
275 	WRITE_SPECIALREG(tpidrro_el0, hypctx->tpidrro_el0);
276 	WRITE_SPECIALREG(tpidr_el1, hypctx->tpidr_el1);
277 
278 	WRITE_SPECIALREG(actlr_el1, hypctx->actlr_el1);
279 	WRITE_SPECIALREG(csselr_el1, hypctx->csselr_el1);
280 	WRITE_SPECIALREG(mdccint_el1, hypctx->mdccint_el1);
281 	WRITE_SPECIALREG(mdscr_el1, hypctx->mdscr_el1);
282 
283 	if (guest_or_nonvhe(guest)) {
284 		WRITE_SPECIALREG(EL1_REG(ELR), hypctx->elr_el1);
285 		WRITE_SPECIALREG(EL1_REG(VBAR), hypctx->vbar_el1);
286 
287 		WRITE_SPECIALREG(EL1_REG(AFSR0), hypctx->afsr0_el1);
288 		WRITE_SPECIALREG(EL1_REG(AFSR1), hypctx->afsr1_el1);
289 		WRITE_SPECIALREG(EL1_REG(AMAIR), hypctx->amair_el1);
290 		WRITE_SPECIALREG(EL1_REG(CONTEXTIDR), hypctx->contextidr_el1);
291 		WRITE_SPECIALREG(EL1_REG(CPACR), hypctx->cpacr_el1);
292 		WRITE_SPECIALREG(EL1_REG(ESR), hypctx->esr_el1);
293 		WRITE_SPECIALREG(EL1_REG(FAR), hypctx->far_el1);
294 		WRITE_SPECIALREG(EL1_REG(MAIR), hypctx->mair_el1); //
295 
296 		WRITE_SPECIALREG(EL1_REG(SCTLR), hypctx->sctlr_el1);
297 		WRITE_SPECIALREG(EL1_REG(SPSR), hypctx->spsr_el1);
298 		WRITE_SPECIALREG(EL1_REG(TCR), hypctx->tcr_el1);
299 		/* TODO: tcr2_el1 */
300 		WRITE_SPECIALREG(EL1_REG(TTBR0), hypctx->ttbr0_el1);
301 		WRITE_SPECIALREG(EL1_REG(TTBR1), hypctx->ttbr1_el1);
302 	}
303 
304 	if (guest) {
305 		WRITE_SPECIALREG(par_el1, hypctx->par_el1);
306 	}
307 
308 	WRITE_SPECIALREG(cptr_el2, hypctx->cptr_el2);
309 	WRITE_SPECIALREG(vpidr_el2, hypctx->vpidr_el2);
310 	WRITE_SPECIALREG(vmpidr_el2, hypctx->vmpidr_el2);
311 
312 	/* Load the special regs from the trapframe */
313 	WRITE_SPECIALREG(sp_el1, hypctx->tf.tf_sp);
314 	WRITE_SPECIALREG(elr_el2, hypctx->tf.tf_elr);
315 	WRITE_SPECIALREG(spsr_el2, hypctx->tf.tf_spsr);
316 
317 	/* Restore the PMU registers */
318 	WRITE_SPECIALREG(pmcr_el0, hypctx->pmcr_el0);
319 	WRITE_SPECIALREG(pmccntr_el0, hypctx->pmccntr_el0);
320 	WRITE_SPECIALREG(pmccfiltr_el0, hypctx->pmccfiltr_el0);
321 	/* Clear all events/interrupts then enable them */
322 	WRITE_SPECIALREG(pmcntenclr_el0, 0xfffffffful);
323 	WRITE_SPECIALREG(pmcntenset_el0, hypctx->pmcntenset_el0);
324 	WRITE_SPECIALREG(pmintenclr_el1, 0xfffffffful);
325 	WRITE_SPECIALREG(pmintenset_el1, hypctx->pmintenset_el1);
326 	WRITE_SPECIALREG(pmovsclr_el0, 0xfffffffful);
327 	WRITE_SPECIALREG(pmovsset_el0, hypctx->pmovsset_el0);
328 
329 	switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
330 #define	LOAD_PMU(x)							\
331 	case (x + 1):							\
332 		WRITE_SPECIALREG(pmevcntr ## x ## _el0,			\
333 		    hypctx->pmevcntr_el0[x]);				\
334 		WRITE_SPECIALREG(pmevtyper ## x ## _el0,		\
335 		    hypctx->pmevtyper_el0[x])
336 	LOAD_PMU(30);
337 	LOAD_PMU(29);
338 	LOAD_PMU(28);
339 	LOAD_PMU(27);
340 	LOAD_PMU(26);
341 	LOAD_PMU(25);
342 	LOAD_PMU(24);
343 	LOAD_PMU(23);
344 	LOAD_PMU(22);
345 	LOAD_PMU(21);
346 	LOAD_PMU(20);
347 	LOAD_PMU(19);
348 	LOAD_PMU(18);
349 	LOAD_PMU(17);
350 	LOAD_PMU(16);
351 	LOAD_PMU(15);
352 	LOAD_PMU(14);
353 	LOAD_PMU(13);
354 	LOAD_PMU(12);
355 	LOAD_PMU(11);
356 	LOAD_PMU(10);
357 	LOAD_PMU(9);
358 	LOAD_PMU(8);
359 	LOAD_PMU(7);
360 	LOAD_PMU(6);
361 	LOAD_PMU(5);
362 	LOAD_PMU(4);
363 	LOAD_PMU(3);
364 	LOAD_PMU(2);
365 	LOAD_PMU(1);
366 	LOAD_PMU(0);
367 	default:		/* N == 0 when only PMCCNTR_EL0 is available */
368 		break;
369 #undef LOAD_PMU
370 	}
371 
372 	dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
373 	switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
374 #define	LOAD_DBG_BRP(x)							\
375 	case x:								\
376 		WRITE_SPECIALREG(dbgbcr ## x ## _el1,			\
377 		    hypctx->dbgbcr_el1[x]);				\
378 		WRITE_SPECIALREG(dbgbvr ## x ## _el1,			\
379 		    hypctx->dbgbvr_el1[x])
380 	LOAD_DBG_BRP(15);
381 	LOAD_DBG_BRP(14);
382 	LOAD_DBG_BRP(13);
383 	LOAD_DBG_BRP(12);
384 	LOAD_DBG_BRP(11);
385 	LOAD_DBG_BRP(10);
386 	LOAD_DBG_BRP(9);
387 	LOAD_DBG_BRP(8);
388 	LOAD_DBG_BRP(7);
389 	LOAD_DBG_BRP(6);
390 	LOAD_DBG_BRP(5);
391 	LOAD_DBG_BRP(4);
392 	LOAD_DBG_BRP(3);
393 	LOAD_DBG_BRP(2);
394 	LOAD_DBG_BRP(1);
395 	default:
396 	LOAD_DBG_BRP(0);
397 #undef LOAD_DBG_BRP
398 	}
399 
400 	switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
401 #define	LOAD_DBG_WRP(x)							\
402 	case x:								\
403 		WRITE_SPECIALREG(dbgwcr ## x ## _el1,			\
404 		    hypctx->dbgwcr_el1[x]);				\
405 		WRITE_SPECIALREG(dbgwvr ## x ## _el1,			\
406 		    hypctx->dbgwvr_el1[x])
407 	LOAD_DBG_WRP(15);
408 	LOAD_DBG_WRP(14);
409 	LOAD_DBG_WRP(13);
410 	LOAD_DBG_WRP(12);
411 	LOAD_DBG_WRP(11);
412 	LOAD_DBG_WRP(10);
413 	LOAD_DBG_WRP(9);
414 	LOAD_DBG_WRP(8);
415 	LOAD_DBG_WRP(7);
416 	LOAD_DBG_WRP(6);
417 	LOAD_DBG_WRP(5);
418 	LOAD_DBG_WRP(4);
419 	LOAD_DBG_WRP(3);
420 	LOAD_DBG_WRP(2);
421 	LOAD_DBG_WRP(1);
422 	default:
423 	LOAD_DBG_WRP(0);
424 #undef LOAD_DBG_WRP
425 	}
426 
427 	if (guest) {
428 		/* Load the timer registers */
429 		WRITE_SPECIALREG(EL1_REG(CNTKCTL),
430 		    hypctx->vtimer_cpu.cntkctl_el1);
431 		WRITE_SPECIALREG(EL0_REG(CNTV_CVAL),
432 		    hypctx->vtimer_cpu.virt_timer.cntx_cval_el0);
433 		WRITE_SPECIALREG(EL0_REG(CNTV_CTL),
434 		    hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0);
435 		WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2);
436 		WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2);
437 
438 		/* Load the GICv3 registers */
439 		WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2);
440 		WRITE_SPECIALREG(ich_vmcr_el2,
441 		    hypctx->vgic_v3_regs.ich_vmcr_el2);
442 		switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
443 #define	LOAD_LR(x)					\
444 	case x:						\
445 		WRITE_SPECIALREG(ich_lr ## x ##_el2,	\
446 		    hypctx->vgic_v3_regs.ich_lr_el2[x])
447 		LOAD_LR(15);
448 		LOAD_LR(14);
449 		LOAD_LR(13);
450 		LOAD_LR(12);
451 		LOAD_LR(11);
452 		LOAD_LR(10);
453 		LOAD_LR(9);
454 		LOAD_LR(8);
455 		LOAD_LR(7);
456 		LOAD_LR(6);
457 		LOAD_LR(5);
458 		LOAD_LR(4);
459 		LOAD_LR(3);
460 		LOAD_LR(2);
461 		LOAD_LR(1);
462 		default:
463 		LOAD_LR(0);
464 #undef LOAD_LR
465 		}
466 
467 		switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
468 #define	LOAD_APR(x)						\
469 	case x:							\
470 		WRITE_SPECIALREG(ich_ap0r ## x ##_el2,		\
471 		    hypctx->vgic_v3_regs.ich_ap0r_el2[x]);		\
472 		WRITE_SPECIALREG(ich_ap1r ## x ##_el2,		\
473 		    hypctx->vgic_v3_regs.ich_ap1r_el2[x])
474 		LOAD_APR(3);
475 		LOAD_APR(2);
476 		LOAD_APR(1);
477 		default:
478 		LOAD_APR(0);
479 #undef LOAD_APR
480 		}
481 	}
482 }
483 
484 static uint64_t
485 vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
486 {
487 	struct hypctx host_hypctx;
488 	uint64_t cntvoff_el2;
489 	uint64_t ich_hcr_el2, ich_vmcr_el2, cnthctl_el2, cntkctl_el1;
490 	uint64_t ret;
491 	uint64_t s1e1r, hpfar_el2;
492 	bool hpfar_valid;
493 
494 	vmm_hyp_reg_store(&host_hypctx, NULL, false);
495 
496 	/* Save the host special registers */
497 	cnthctl_el2 = READ_SPECIALREG(cnthctl_el2);
498 	cntkctl_el1 = READ_SPECIALREG(cntkctl_el1);
499 	cntvoff_el2 = READ_SPECIALREG(cntvoff_el2);
500 
501 	ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2);
502 	ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2);
503 
504 	vmm_hyp_reg_restore(hypctx, hyp, true);
505 
506 	/* Load the common hypervisor registers */
507 	WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2);
508 
509 	host_hypctx.mdcr_el2 = READ_SPECIALREG(mdcr_el2);
510 	WRITE_SPECIALREG(mdcr_el2, hypctx->mdcr_el2);
511 
512 	/* Call into the guest */
513 	ret = VMM_HYP_FUNC(do_call_guest)(hypctx);
514 
515 	WRITE_SPECIALREG(mdcr_el2, host_hypctx.mdcr_el2);
516 	isb();
517 
518 	/* Store the exit info */
519 	hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2);
520 	vmm_hyp_reg_store(hypctx, hyp, true);
521 
522 	hpfar_valid = true;
523 	if (ret == EXCP_TYPE_EL1_SYNC) {
524 		switch (ESR_ELx_EXCEPTION(hypctx->tf.tf_esr)) {
525 		case EXCP_INSN_ABORT_L:
526 		case EXCP_DATA_ABORT_L:
527 			/*
528 			 * The hpfar_el2 register is valid for:
529 			 *  - Translation and Access faults.
530 			 *  - Translation, Access, and permission faults on
531 			 *    the translation table walk on the stage 1 tables.
532 			 *  - A stage 2 Address size fault.
533 			 *
534 			 * As we only need it in the first 2 cases we can just
535 			 * exclude it on permission faults that are not from
536 			 * the stage 1 table walk.
537 			 *
538 			 * TODO: Add a case for Arm erratum 834220.
539 			 */
540 			if ((hypctx->tf.tf_esr & ISS_DATA_S1PTW) != 0)
541 				break;
542 			switch (hypctx->tf.tf_esr & ISS_DATA_DFSC_MASK) {
543 			case ISS_DATA_DFSC_PF_L1:
544 			case ISS_DATA_DFSC_PF_L2:
545 			case ISS_DATA_DFSC_PF_L3:
546 				hpfar_valid = false;
547 				break;
548 			}
549 			break;
550 		}
551 	}
552 	if (hpfar_valid) {
553 		hypctx->exit_info.hpfar_el2 = READ_SPECIALREG(hpfar_el2);
554 	} else {
555 		/*
556 		 * TODO: There is a risk the at instruction could cause an
557 		 * exception here. We should handle it & return a failure.
558 		 */
559 		s1e1r =
560 		    arm64_address_translate_s1e1r(hypctx->exit_info.far_el2);
561 		if (PAR_SUCCESS(s1e1r)) {
562 			hpfar_el2 = (s1e1r & PAR_PA_MASK) >> PAR_PA_SHIFT;
563 			hpfar_el2 <<= HPFAR_EL2_FIPA_SHIFT;
564 			hypctx->exit_info.hpfar_el2 = hpfar_el2;
565 		} else {
566 			ret = EXCP_TYPE_REENTER;
567 		}
568 	}
569 
570 	vmm_hyp_reg_restore(&host_hypctx, NULL, false);
571 
572 	/* Restore the host special registers */
573 	WRITE_SPECIALREG(ich_hcr_el2, ich_hcr_el2);
574 	WRITE_SPECIALREG(ich_vmcr_el2, ich_vmcr_el2);
575 
576 	WRITE_SPECIALREG(cnthctl_el2, cnthctl_el2);
577 	WRITE_SPECIALREG(cntkctl_el1, cntkctl_el1);
578 	WRITE_SPECIALREG(cntvoff_el2, cntvoff_el2);
579 
580 	return (ret);
581 }
582 
583 VMM_STATIC uint64_t
584 VMM_HYP_FUNC(enter_guest)(struct hyp *hyp, struct hypctx *hypctx)
585 {
586 	uint64_t ret;
587 
588 	do {
589 		ret = vmm_hyp_call_guest(hyp, hypctx);
590 	} while (ret == EXCP_TYPE_REENTER);
591 
592 	return (ret);
593 }
594 
595 VMM_STATIC uint64_t
596 VMM_HYP_FUNC(read_reg)(uint64_t reg)
597 {
598 	switch (reg) {
599 	case HYP_REG_ICH_VTR:
600 		return (READ_SPECIALREG(ich_vtr_el2));
601 	case HYP_REG_CNTHCTL:
602 		return (READ_SPECIALREG(cnthctl_el2));
603 	}
604 
605 	return (0);
606 }
607 
608 VMM_STATIC void
609 VMM_HYP_FUNC(clean_s2_tlbi)(void)
610 {
611 	dsb(ishst);
612 	__asm __volatile("tlbi alle1is");
613 	dsb(ish);
614 }
615 
616 VMM_STATIC void
617 VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva,
618     bool final_only)
619 {
620 	uint64_t end, r, start;
621 	uint64_t host_vttbr;
622 #ifdef VMM_VHE
623 	uint64_t host_tcr;
624 #endif
625 
626 #ifdef VMM_VHE
627 	dsb(ishst);
628 #endif
629 
630 #define	TLBI_VA_SHIFT			12
631 #define	TLBI_VA_MASK			((1ul << 44) - 1)
632 #define	TLBI_VA(addr)			(((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK)
633 #define	TLBI_VA_L3_INCR			(L3_SIZE >> TLBI_VA_SHIFT)
634 
635 	/* Switch to the guest vttbr */
636 	/* TODO: Handle Cortex-A57/A72 erratum 131936 */
637 	host_vttbr = READ_SPECIALREG(vttbr_el2);
638 	WRITE_SPECIALREG(vttbr_el2, vttbr);
639 	isb();
640 
641 #ifdef VMM_VHE
642 	host_tcr = READ_SPECIALREG(tcr_el2);
643 	WRITE_SPECIALREG(tcr_el2, host_tcr & ~HCR_TGE);
644 	isb();
645 #endif
646 
647 	/*
648 	 * The CPU can cache the stage 1 + 2 combination so we need to ensure
649 	 * the stage 2 is invalidated first, then when this has completed we
650 	 * invalidate the stage 1 TLB. As we don't know which stage 1 virtual
651 	 * addresses point at the stage 2 IPA we need to invalidate the entire
652 	 * stage 1 TLB.
653 	 */
654 
655 	start = TLBI_VA(sva);
656 	end = TLBI_VA(eva);
657 	for (r = start; r < end; r += TLBI_VA_L3_INCR) {
658 		/* Invalidate the stage 2 TLB entry */
659 		if (final_only)
660 			__asm __volatile("tlbi	ipas2le1is, %0" : : "r"(r));
661 		else
662 			__asm __volatile("tlbi	ipas2e1is, %0" : : "r"(r));
663 	}
664 	/* Ensure the entry has been invalidated */
665 	dsb(ish);
666 	/* Invalidate the stage 1 TLB. */
667 	__asm __volatile("tlbi vmalle1is");
668 	dsb(ish);
669 	isb();
670 
671 #ifdef VMM_VHE
672 	WRITE_SPECIALREG(tcr_el2, host_tcr);
673 	isb();
674 #endif
675 
676 	/* Switch back to the host vttbr */
677 	WRITE_SPECIALREG(vttbr_el2, host_vttbr);
678 	isb();
679 }
680 
681 VMM_STATIC void
682 VMM_HYP_FUNC(s2_tlbi_all)(uint64_t vttbr)
683 {
684 	uint64_t host_vttbr;
685 
686 #ifdef VMM_VHE
687 	dsb(ishst);
688 #endif
689 
690 	/* Switch to the guest vttbr */
691 	/* TODO: Handle Cortex-A57/A72 erratum 131936 */
692 	host_vttbr = READ_SPECIALREG(vttbr_el2);
693 	WRITE_SPECIALREG(vttbr_el2, vttbr);
694 	isb();
695 
696 	__asm __volatile("tlbi vmalls12e1is");
697 	dsb(ish);
698 	isb();
699 
700 	/* Switch back t othe host vttbr */
701 	WRITE_SPECIALREG(vttbr_el2, host_vttbr);
702 	isb();
703 }
704