1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Andrew Turner
5 *
6 * This work was supported by Innovate UK project 105694, "Digital Security
7 * by Design (DSbD) Technology Platform Prototype".
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 #include <sys/types.h>
33 #include <sys/proc.h>
34
35 #include <machine/armreg.h>
36
37 #include "arm64.h"
38 #include "hyp.h"
39
40 struct hypctx;
41
42 uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
43
44 static void
vmm_hyp_reg_store(struct hypctx * hypctx,struct hyp * hyp,bool guest)45 vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest)
46 {
47 uint64_t dfr0;
48
49 /* Store the guest VFP registers */
50 if (guest) {
51 /* Store the timer registers */
52 hypctx->vtimer_cpu.cntkctl_el1 =
53 READ_SPECIALREG(EL1_REG(CNTKCTL));
54 hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 =
55 READ_SPECIALREG(EL0_REG(CNTV_CVAL));
56 hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 =
57 READ_SPECIALREG(EL0_REG(CNTV_CTL));
58
59 /* Store the GICv3 registers */
60 hypctx->vgic_v3_regs.ich_eisr_el2 =
61 READ_SPECIALREG(ich_eisr_el2);
62 hypctx->vgic_v3_regs.ich_elrsr_el2 =
63 READ_SPECIALREG(ich_elrsr_el2);
64 hypctx->vgic_v3_regs.ich_hcr_el2 =
65 READ_SPECIALREG(ich_hcr_el2);
66 hypctx->vgic_v3_regs.ich_misr_el2 =
67 READ_SPECIALREG(ich_misr_el2);
68 hypctx->vgic_v3_regs.ich_vmcr_el2 =
69 READ_SPECIALREG(ich_vmcr_el2);
70 switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
71 #define STORE_LR(x) \
72 case x: \
73 hypctx->vgic_v3_regs.ich_lr_el2[x] = \
74 READ_SPECIALREG(ich_lr ## x ##_el2)
75 STORE_LR(15);
76 STORE_LR(14);
77 STORE_LR(13);
78 STORE_LR(12);
79 STORE_LR(11);
80 STORE_LR(10);
81 STORE_LR(9);
82 STORE_LR(8);
83 STORE_LR(7);
84 STORE_LR(6);
85 STORE_LR(5);
86 STORE_LR(4);
87 STORE_LR(3);
88 STORE_LR(2);
89 STORE_LR(1);
90 default:
91 STORE_LR(0);
92 #undef STORE_LR
93 }
94
95 switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
96 #define STORE_APR(x) \
97 case x: \
98 hypctx->vgic_v3_regs.ich_ap0r_el2[x] = \
99 READ_SPECIALREG(ich_ap0r ## x ##_el2); \
100 hypctx->vgic_v3_regs.ich_ap1r_el2[x] = \
101 READ_SPECIALREG(ich_ap1r ## x ##_el2)
102 STORE_APR(3);
103 STORE_APR(2);
104 STORE_APR(1);
105 default:
106 STORE_APR(0);
107 #undef STORE_APR
108 }
109 }
110
111 dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
112 switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
113 #define STORE_DBG_BRP(x) \
114 case x: \
115 hypctx->dbgbcr_el1[x] = \
116 READ_SPECIALREG(dbgbcr ## x ## _el1); \
117 hypctx->dbgbvr_el1[x] = \
118 READ_SPECIALREG(dbgbvr ## x ## _el1)
119 STORE_DBG_BRP(15);
120 STORE_DBG_BRP(14);
121 STORE_DBG_BRP(13);
122 STORE_DBG_BRP(12);
123 STORE_DBG_BRP(11);
124 STORE_DBG_BRP(10);
125 STORE_DBG_BRP(9);
126 STORE_DBG_BRP(8);
127 STORE_DBG_BRP(7);
128 STORE_DBG_BRP(6);
129 STORE_DBG_BRP(5);
130 STORE_DBG_BRP(4);
131 STORE_DBG_BRP(3);
132 STORE_DBG_BRP(2);
133 STORE_DBG_BRP(1);
134 default:
135 STORE_DBG_BRP(0);
136 #undef STORE_DBG_BRP
137 }
138
139 switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
140 #define STORE_DBG_WRP(x) \
141 case x: \
142 hypctx->dbgwcr_el1[x] = \
143 READ_SPECIALREG(dbgwcr ## x ## _el1); \
144 hypctx->dbgwvr_el1[x] = \
145 READ_SPECIALREG(dbgwvr ## x ## _el1)
146 STORE_DBG_WRP(15);
147 STORE_DBG_WRP(14);
148 STORE_DBG_WRP(13);
149 STORE_DBG_WRP(12);
150 STORE_DBG_WRP(11);
151 STORE_DBG_WRP(10);
152 STORE_DBG_WRP(9);
153 STORE_DBG_WRP(8);
154 STORE_DBG_WRP(7);
155 STORE_DBG_WRP(6);
156 STORE_DBG_WRP(5);
157 STORE_DBG_WRP(4);
158 STORE_DBG_WRP(3);
159 STORE_DBG_WRP(2);
160 STORE_DBG_WRP(1);
161 default:
162 STORE_DBG_WRP(0);
163 #undef STORE_DBG_WRP
164 }
165
166 /* Store the PMU registers */
167 hypctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0);
168 hypctx->pmccntr_el0 = READ_SPECIALREG(pmccntr_el0);
169 hypctx->pmccfiltr_el0 = READ_SPECIALREG(pmccfiltr_el0);
170 hypctx->pmcntenset_el0 = READ_SPECIALREG(pmcntenset_el0);
171 hypctx->pmintenset_el1 = READ_SPECIALREG(pmintenset_el1);
172 hypctx->pmovsset_el0 = READ_SPECIALREG(pmovsset_el0);
173 hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
174 switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
175 #define STORE_PMU(x) \
176 case (x + 1): \
177 hypctx->pmevcntr_el0[x] = \
178 READ_SPECIALREG(pmevcntr ## x ## _el0); \
179 hypctx->pmevtyper_el0[x] = \
180 READ_SPECIALREG(pmevtyper ## x ## _el0)
181 STORE_PMU(30);
182 STORE_PMU(29);
183 STORE_PMU(28);
184 STORE_PMU(27);
185 STORE_PMU(26);
186 STORE_PMU(25);
187 STORE_PMU(24);
188 STORE_PMU(23);
189 STORE_PMU(22);
190 STORE_PMU(21);
191 STORE_PMU(20);
192 STORE_PMU(19);
193 STORE_PMU(18);
194 STORE_PMU(17);
195 STORE_PMU(16);
196 STORE_PMU(15);
197 STORE_PMU(14);
198 STORE_PMU(13);
199 STORE_PMU(12);
200 STORE_PMU(11);
201 STORE_PMU(10);
202 STORE_PMU(9);
203 STORE_PMU(8);
204 STORE_PMU(7);
205 STORE_PMU(6);
206 STORE_PMU(5);
207 STORE_PMU(4);
208 STORE_PMU(3);
209 STORE_PMU(2);
210 STORE_PMU(1);
211 STORE_PMU(0);
212 default: /* N == 0 when only PMCCNTR_EL0 is available */
213 break;
214 #undef STORE_PMU
215 }
216
217 /* Store the special to from the trapframe */
218 hypctx->tf.tf_sp = READ_SPECIALREG(sp_el1);
219 hypctx->tf.tf_elr = READ_SPECIALREG(elr_el2);
220 hypctx->tf.tf_spsr = READ_SPECIALREG(spsr_el2);
221 if (guest) {
222 hypctx->tf.tf_esr = READ_SPECIALREG(esr_el2);
223 hypctx->par_el1 = READ_SPECIALREG(par_el1);
224 }
225
226 /* Store the guest special registers */
227 hypctx->sp_el0 = READ_SPECIALREG(sp_el0);
228 hypctx->tpidr_el0 = READ_SPECIALREG(tpidr_el0);
229 hypctx->tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
230 hypctx->tpidr_el1 = READ_SPECIALREG(tpidr_el1);
231
232 hypctx->actlr_el1 = READ_SPECIALREG(actlr_el1);
233 hypctx->csselr_el1 = READ_SPECIALREG(csselr_el1);
234 hypctx->mdccint_el1 = READ_SPECIALREG(mdccint_el1);
235 hypctx->mdscr_el1 = READ_SPECIALREG(mdscr_el1);
236
237 if (guest_or_nonvhe(guest)) {
238 hypctx->elr_el1 = READ_SPECIALREG(EL1_REG(ELR));
239 hypctx->vbar_el1 = READ_SPECIALREG(EL1_REG(VBAR));
240
241 hypctx->afsr0_el1 = READ_SPECIALREG(EL1_REG(AFSR0));
242 hypctx->afsr1_el1 = READ_SPECIALREG(EL1_REG(AFSR1));
243 hypctx->amair_el1 = READ_SPECIALREG(EL1_REG(AMAIR));
244 hypctx->contextidr_el1 = READ_SPECIALREG(EL1_REG(CONTEXTIDR));
245 hypctx->cpacr_el1 = READ_SPECIALREG(EL1_REG(CPACR));
246 hypctx->esr_el1 = READ_SPECIALREG(EL1_REG(ESR));
247 hypctx->far_el1 = READ_SPECIALREG(EL1_REG(FAR));
248 hypctx->mair_el1 = READ_SPECIALREG(EL1_REG(MAIR));
249 hypctx->sctlr_el1 = READ_SPECIALREG(EL1_REG(SCTLR));
250 hypctx->spsr_el1 = READ_SPECIALREG(EL1_REG(SPSR));
251 hypctx->tcr_el1 = READ_SPECIALREG(EL1_REG(TCR));
252 /* TODO: Support when this is not res0 */
253 hypctx->tcr2_el1 = 0;
254 hypctx->ttbr0_el1 = READ_SPECIALREG(EL1_REG(TTBR0));
255 hypctx->ttbr1_el1 = READ_SPECIALREG(EL1_REG(TTBR1));
256 }
257
258 hypctx->cptr_el2 = READ_SPECIALREG(cptr_el2);
259 hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2);
260 hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2);
261 hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2);
262
263 #ifndef VMM_VHE
264 /* hcrx_el2 depends on feat_hcx */
265 uint64_t mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
266 if (ID_AA64MMFR1_HCX_VAL(mmfr1) >> ID_AA64MMFR1_HCX_SHIFT) {
267 hypctx->hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2));
268 }
269 #endif
270 }
271
272 static void
vmm_hyp_reg_restore(struct hypctx * hypctx,struct hyp * hyp,bool guest)273 vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest)
274 {
275 uint64_t dfr0;
276
277 /* Restore the special registers */
278 WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
279
280 if (guest_or_nonvhe(guest)) {
281 uint64_t mmfr1 = READ_SPECIALREG(id_aa64mmfr1_el1);
282 if (ID_AA64MMFR1_HCX_VAL(mmfr1) >> ID_AA64MMFR1_HCX_SHIFT) {
283 WRITE_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2), hypctx->hcrx_el2);
284 }
285 }
286 isb();
287
288 WRITE_SPECIALREG(sp_el0, hypctx->sp_el0);
289 WRITE_SPECIALREG(tpidr_el0, hypctx->tpidr_el0);
290 WRITE_SPECIALREG(tpidrro_el0, hypctx->tpidrro_el0);
291 WRITE_SPECIALREG(tpidr_el1, hypctx->tpidr_el1);
292
293 WRITE_SPECIALREG(actlr_el1, hypctx->actlr_el1);
294 WRITE_SPECIALREG(csselr_el1, hypctx->csselr_el1);
295 WRITE_SPECIALREG(mdccint_el1, hypctx->mdccint_el1);
296 WRITE_SPECIALREG(mdscr_el1, hypctx->mdscr_el1);
297
298 if (guest_or_nonvhe(guest)) {
299 WRITE_SPECIALREG(EL1_REG(ELR), hypctx->elr_el1);
300 WRITE_SPECIALREG(EL1_REG(VBAR), hypctx->vbar_el1);
301
302 WRITE_SPECIALREG(EL1_REG(AFSR0), hypctx->afsr0_el1);
303 WRITE_SPECIALREG(EL1_REG(AFSR1), hypctx->afsr1_el1);
304 WRITE_SPECIALREG(EL1_REG(AMAIR), hypctx->amair_el1);
305 WRITE_SPECIALREG(EL1_REG(CONTEXTIDR), hypctx->contextidr_el1);
306 WRITE_SPECIALREG(EL1_REG(CPACR), hypctx->cpacr_el1);
307 WRITE_SPECIALREG(EL1_REG(ESR), hypctx->esr_el1);
308 WRITE_SPECIALREG(EL1_REG(FAR), hypctx->far_el1);
309 WRITE_SPECIALREG(EL1_REG(MAIR), hypctx->mair_el1); //
310
311 WRITE_SPECIALREG(EL1_REG(SCTLR), hypctx->sctlr_el1);
312 WRITE_SPECIALREG(EL1_REG(SPSR), hypctx->spsr_el1);
313 WRITE_SPECIALREG(EL1_REG(TCR), hypctx->tcr_el1);
314 /* TODO: tcr2_el1 */
315 WRITE_SPECIALREG(EL1_REG(TTBR0), hypctx->ttbr0_el1);
316 WRITE_SPECIALREG(EL1_REG(TTBR1), hypctx->ttbr1_el1);
317 }
318
319 if (guest) {
320 WRITE_SPECIALREG(par_el1, hypctx->par_el1);
321 }
322
323 WRITE_SPECIALREG(cptr_el2, hypctx->cptr_el2);
324 WRITE_SPECIALREG(vpidr_el2, hypctx->vpidr_el2);
325 WRITE_SPECIALREG(vmpidr_el2, hypctx->vmpidr_el2);
326
327 /* Load the special regs from the trapframe */
328 WRITE_SPECIALREG(sp_el1, hypctx->tf.tf_sp);
329 WRITE_SPECIALREG(elr_el2, hypctx->tf.tf_elr);
330 WRITE_SPECIALREG(spsr_el2, hypctx->tf.tf_spsr);
331
332 /* Restore the PMU registers */
333 WRITE_SPECIALREG(pmcr_el0, hypctx->pmcr_el0);
334 WRITE_SPECIALREG(pmccntr_el0, hypctx->pmccntr_el0);
335 WRITE_SPECIALREG(pmccfiltr_el0, hypctx->pmccfiltr_el0);
336 /* Clear all events/interrupts then enable them */
337 WRITE_SPECIALREG(pmcntenclr_el0, 0xfffffffful);
338 WRITE_SPECIALREG(pmcntenset_el0, hypctx->pmcntenset_el0);
339 WRITE_SPECIALREG(pmintenclr_el1, 0xfffffffful);
340 WRITE_SPECIALREG(pmintenset_el1, hypctx->pmintenset_el1);
341 WRITE_SPECIALREG(pmovsclr_el0, 0xfffffffful);
342 WRITE_SPECIALREG(pmovsset_el0, hypctx->pmovsset_el0);
343
344 switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
345 #define LOAD_PMU(x) \
346 case (x + 1): \
347 WRITE_SPECIALREG(pmevcntr ## x ## _el0, \
348 hypctx->pmevcntr_el0[x]); \
349 WRITE_SPECIALREG(pmevtyper ## x ## _el0, \
350 hypctx->pmevtyper_el0[x])
351 LOAD_PMU(30);
352 LOAD_PMU(29);
353 LOAD_PMU(28);
354 LOAD_PMU(27);
355 LOAD_PMU(26);
356 LOAD_PMU(25);
357 LOAD_PMU(24);
358 LOAD_PMU(23);
359 LOAD_PMU(22);
360 LOAD_PMU(21);
361 LOAD_PMU(20);
362 LOAD_PMU(19);
363 LOAD_PMU(18);
364 LOAD_PMU(17);
365 LOAD_PMU(16);
366 LOAD_PMU(15);
367 LOAD_PMU(14);
368 LOAD_PMU(13);
369 LOAD_PMU(12);
370 LOAD_PMU(11);
371 LOAD_PMU(10);
372 LOAD_PMU(9);
373 LOAD_PMU(8);
374 LOAD_PMU(7);
375 LOAD_PMU(6);
376 LOAD_PMU(5);
377 LOAD_PMU(4);
378 LOAD_PMU(3);
379 LOAD_PMU(2);
380 LOAD_PMU(1);
381 LOAD_PMU(0);
382 default: /* N == 0 when only PMCCNTR_EL0 is available */
383 break;
384 #undef LOAD_PMU
385 }
386
387 dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
388 switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
389 #define LOAD_DBG_BRP(x) \
390 case x: \
391 WRITE_SPECIALREG(dbgbcr ## x ## _el1, \
392 hypctx->dbgbcr_el1[x]); \
393 WRITE_SPECIALREG(dbgbvr ## x ## _el1, \
394 hypctx->dbgbvr_el1[x])
395 LOAD_DBG_BRP(15);
396 LOAD_DBG_BRP(14);
397 LOAD_DBG_BRP(13);
398 LOAD_DBG_BRP(12);
399 LOAD_DBG_BRP(11);
400 LOAD_DBG_BRP(10);
401 LOAD_DBG_BRP(9);
402 LOAD_DBG_BRP(8);
403 LOAD_DBG_BRP(7);
404 LOAD_DBG_BRP(6);
405 LOAD_DBG_BRP(5);
406 LOAD_DBG_BRP(4);
407 LOAD_DBG_BRP(3);
408 LOAD_DBG_BRP(2);
409 LOAD_DBG_BRP(1);
410 default:
411 LOAD_DBG_BRP(0);
412 #undef LOAD_DBG_BRP
413 }
414
415 switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
416 #define LOAD_DBG_WRP(x) \
417 case x: \
418 WRITE_SPECIALREG(dbgwcr ## x ## _el1, \
419 hypctx->dbgwcr_el1[x]); \
420 WRITE_SPECIALREG(dbgwvr ## x ## _el1, \
421 hypctx->dbgwvr_el1[x])
422 LOAD_DBG_WRP(15);
423 LOAD_DBG_WRP(14);
424 LOAD_DBG_WRP(13);
425 LOAD_DBG_WRP(12);
426 LOAD_DBG_WRP(11);
427 LOAD_DBG_WRP(10);
428 LOAD_DBG_WRP(9);
429 LOAD_DBG_WRP(8);
430 LOAD_DBG_WRP(7);
431 LOAD_DBG_WRP(6);
432 LOAD_DBG_WRP(5);
433 LOAD_DBG_WRP(4);
434 LOAD_DBG_WRP(3);
435 LOAD_DBG_WRP(2);
436 LOAD_DBG_WRP(1);
437 default:
438 LOAD_DBG_WRP(0);
439 #undef LOAD_DBG_WRP
440 }
441
442 if (guest) {
443 /* Load the timer registers */
444 WRITE_SPECIALREG(EL1_REG(CNTKCTL),
445 hypctx->vtimer_cpu.cntkctl_el1);
446 WRITE_SPECIALREG(EL0_REG(CNTV_CVAL),
447 hypctx->vtimer_cpu.virt_timer.cntx_cval_el0);
448 WRITE_SPECIALREG(EL0_REG(CNTV_CTL),
449 hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0);
450 WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2);
451 WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2);
452
453 /* Load the GICv3 registers */
454 WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2);
455 WRITE_SPECIALREG(ich_vmcr_el2,
456 hypctx->vgic_v3_regs.ich_vmcr_el2);
457 switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
458 #define LOAD_LR(x) \
459 case x: \
460 WRITE_SPECIALREG(ich_lr ## x ##_el2, \
461 hypctx->vgic_v3_regs.ich_lr_el2[x])
462 LOAD_LR(15);
463 LOAD_LR(14);
464 LOAD_LR(13);
465 LOAD_LR(12);
466 LOAD_LR(11);
467 LOAD_LR(10);
468 LOAD_LR(9);
469 LOAD_LR(8);
470 LOAD_LR(7);
471 LOAD_LR(6);
472 LOAD_LR(5);
473 LOAD_LR(4);
474 LOAD_LR(3);
475 LOAD_LR(2);
476 LOAD_LR(1);
477 default:
478 LOAD_LR(0);
479 #undef LOAD_LR
480 }
481
482 switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
483 #define LOAD_APR(x) \
484 case x: \
485 WRITE_SPECIALREG(ich_ap0r ## x ##_el2, \
486 hypctx->vgic_v3_regs.ich_ap0r_el2[x]); \
487 WRITE_SPECIALREG(ich_ap1r ## x ##_el2, \
488 hypctx->vgic_v3_regs.ich_ap1r_el2[x])
489 LOAD_APR(3);
490 LOAD_APR(2);
491 LOAD_APR(1);
492 default:
493 LOAD_APR(0);
494 #undef LOAD_APR
495 }
496 }
497 }
498
499 static uint64_t
vmm_hyp_call_guest(struct hyp * hyp,struct hypctx * hypctx)500 vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
501 {
502 struct hypctx host_hypctx;
503 uint64_t cntvoff_el2;
504 uint64_t ich_hcr_el2, ich_vmcr_el2, cnthctl_el2, cntkctl_el1;
505 uint64_t ret;
506 uint64_t s1e1r, hpfar_el2;
507 bool hpfar_valid;
508
509 vmm_hyp_reg_store(&host_hypctx, NULL, false);
510
511 /* Save the host special registers */
512 cnthctl_el2 = READ_SPECIALREG(cnthctl_el2);
513 cntkctl_el1 = READ_SPECIALREG(cntkctl_el1);
514 cntvoff_el2 = READ_SPECIALREG(cntvoff_el2);
515
516 ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2);
517 ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2);
518
519 vmm_hyp_reg_restore(hypctx, hyp, true);
520
521 /* Load the common hypervisor registers */
522 WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2);
523
524 host_hypctx.mdcr_el2 = READ_SPECIALREG(mdcr_el2);
525 WRITE_SPECIALREG(mdcr_el2, hypctx->mdcr_el2);
526
527 /* Call into the guest */
528 ret = VMM_HYP_FUNC(do_call_guest)(hypctx);
529
530 WRITE_SPECIALREG(mdcr_el2, host_hypctx.mdcr_el2);
531 isb();
532
533 /* Store the exit info */
534 hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2);
535 vmm_hyp_reg_store(hypctx, hyp, true);
536
537 hpfar_valid = true;
538 if (ret == EXCP_TYPE_EL1_SYNC) {
539 switch (ESR_ELx_EXCEPTION(hypctx->tf.tf_esr)) {
540 case EXCP_INSN_ABORT_L:
541 case EXCP_DATA_ABORT_L:
542 /*
543 * The hpfar_el2 register is valid for:
544 * - Translation and Access faults.
545 * - Translation, Access, and permission faults on
546 * the translation table walk on the stage 1 tables.
547 * - A stage 2 Address size fault.
548 *
549 * As we only need it in the first 2 cases we can just
550 * exclude it on permission faults that are not from
551 * the stage 1 table walk.
552 *
553 * TODO: Add a case for Arm erratum 834220.
554 */
555 if ((hypctx->tf.tf_esr & ISS_DATA_S1PTW) != 0)
556 break;
557 switch (hypctx->tf.tf_esr & ISS_DATA_DFSC_MASK) {
558 case ISS_DATA_DFSC_PF_L1:
559 case ISS_DATA_DFSC_PF_L2:
560 case ISS_DATA_DFSC_PF_L3:
561 hpfar_valid = false;
562 break;
563 }
564 break;
565 }
566 }
567 if (hpfar_valid) {
568 hypctx->exit_info.hpfar_el2 = READ_SPECIALREG(hpfar_el2);
569 } else {
570 /*
571 * TODO: There is a risk the at instruction could cause an
572 * exception here. We should handle it & return a failure.
573 */
574 s1e1r =
575 arm64_address_translate_s1e1r(hypctx->exit_info.far_el2);
576 if (PAR_SUCCESS(s1e1r)) {
577 hpfar_el2 = (s1e1r & PAR_PA_MASK) >> PAR_PA_SHIFT;
578 hpfar_el2 <<= HPFAR_EL2_FIPA_SHIFT;
579 hypctx->exit_info.hpfar_el2 = hpfar_el2;
580 } else {
581 ret = EXCP_TYPE_REENTER;
582 }
583 }
584
585 vmm_hyp_reg_restore(&host_hypctx, NULL, false);
586
587 /* Restore the host special registers */
588 WRITE_SPECIALREG(ich_hcr_el2, ich_hcr_el2);
589 WRITE_SPECIALREG(ich_vmcr_el2, ich_vmcr_el2);
590
591 WRITE_SPECIALREG(cnthctl_el2, cnthctl_el2);
592 WRITE_SPECIALREG(cntkctl_el1, cntkctl_el1);
593 WRITE_SPECIALREG(cntvoff_el2, cntvoff_el2);
594
595 return (ret);
596 }
597
598 VMM_STATIC uint64_t
VMM_HYP_FUNC(enter_guest)599 VMM_HYP_FUNC(enter_guest)(struct hyp *hyp, struct hypctx *hypctx)
600 {
601 uint64_t ret;
602
603 do {
604 ret = vmm_hyp_call_guest(hyp, hypctx);
605 } while (ret == EXCP_TYPE_REENTER);
606
607 return (ret);
608 }
609
610 VMM_STATIC uint64_t
VMM_HYP_FUNC(read_reg)611 VMM_HYP_FUNC(read_reg)(uint64_t reg)
612 {
613 switch (reg) {
614 case HYP_REG_ICH_VTR:
615 return (READ_SPECIALREG(ich_vtr_el2));
616 case HYP_REG_CNTHCTL:
617 return (READ_SPECIALREG(cnthctl_el2));
618 }
619
620 return (0);
621 }
622
623 VMM_STATIC void
VMM_HYP_FUNC(clean_s2_tlbi)624 VMM_HYP_FUNC(clean_s2_tlbi)(void)
625 {
626 dsb(ishst);
627 __asm __volatile("tlbi alle1is");
628 dsb(ish);
629 }
630
631 VMM_STATIC void
VMM_HYP_FUNC(s2_tlbi_range)632 VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva,
633 bool final_only)
634 {
635 uint64_t end, r, start;
636 uint64_t host_vttbr;
637 #ifdef VMM_VHE
638 uint64_t host_tcr;
639 #endif
640
641 #ifdef VMM_VHE
642 dsb(ishst);
643 #endif
644
645 #define TLBI_VA_SHIFT 12
646 #define TLBI_VA_MASK ((1ul << 44) - 1)
647 #define TLBI_VA(addr) (((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK)
648 #define TLBI_VA_L3_INCR (L3_SIZE >> TLBI_VA_SHIFT)
649
650 /* Switch to the guest vttbr */
651 /* TODO: Handle Cortex-A57/A72 erratum 131936 */
652 host_vttbr = READ_SPECIALREG(vttbr_el2);
653 WRITE_SPECIALREG(vttbr_el2, vttbr);
654 isb();
655
656 #ifdef VMM_VHE
657 host_tcr = READ_SPECIALREG(tcr_el2);
658 WRITE_SPECIALREG(tcr_el2, host_tcr & ~HCR_TGE);
659 isb();
660 #endif
661
662 /*
663 * The CPU can cache the stage 1 + 2 combination so we need to ensure
664 * the stage 2 is invalidated first, then when this has completed we
665 * invalidate the stage 1 TLB. As we don't know which stage 1 virtual
666 * addresses point at the stage 2 IPA we need to invalidate the entire
667 * stage 1 TLB.
668 */
669
670 start = TLBI_VA(sva);
671 end = TLBI_VA(eva);
672 for (r = start; r < end; r += TLBI_VA_L3_INCR) {
673 /* Invalidate the stage 2 TLB entry */
674 if (final_only)
675 __asm __volatile("tlbi ipas2le1is, %0" : : "r"(r));
676 else
677 __asm __volatile("tlbi ipas2e1is, %0" : : "r"(r));
678 }
679 /* Ensure the entry has been invalidated */
680 dsb(ish);
681 /* Invalidate the stage 1 TLB. */
682 __asm __volatile("tlbi vmalle1is");
683 dsb(ish);
684 isb();
685
686 #ifdef VMM_VHE
687 WRITE_SPECIALREG(tcr_el2, host_tcr);
688 isb();
689 #endif
690
691 /* Switch back to the host vttbr */
692 WRITE_SPECIALREG(vttbr_el2, host_vttbr);
693 isb();
694 }
695
696 VMM_STATIC void
VMM_HYP_FUNC(s2_tlbi_all)697 VMM_HYP_FUNC(s2_tlbi_all)(uint64_t vttbr)
698 {
699 uint64_t host_vttbr;
700
701 #ifdef VMM_VHE
702 dsb(ishst);
703 #endif
704
705 /* Switch to the guest vttbr */
706 /* TODO: Handle Cortex-A57/A72 erratum 131936 */
707 host_vttbr = READ_SPECIALREG(vttbr_el2);
708 WRITE_SPECIALREG(vttbr_el2, vttbr);
709 isb();
710
711 __asm __volatile("tlbi vmalls12e1is");
712 dsb(ish);
713 isb();
714
715 /* Switch back t othe host vttbr */
716 WRITE_SPECIALREG(vttbr_el2, host_vttbr);
717 isb();
718 }
719