1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Andrew Turner
5 *
6 * This work was supported by Innovate UK project 105694, "Digital Security
7 * by Design (DSbD) Technology Platform Prototype".
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 #include <sys/types.h>
33 #include <sys/proc.h>
34
35
36 #include "arm64.h"
37 #include "hyp.h"
38
39 struct hypctx;
40
41 uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *);
42
43 static void
vmm_hyp_reg_store(struct hypctx * hypctx,struct hyp * hyp,bool guest,bool ecv_poff)44 vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest,
45 bool ecv_poff)
46 {
47 uint64_t dfr0;
48
49 if (guest) {
50 /* Store the timer registers */
51 hypctx->vtimer_cpu.cntkctl_el1 =
52 READ_SPECIALREG(EL1_REG(CNTKCTL));
53 hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 =
54 READ_SPECIALREG(EL0_REG(CNTV_CVAL));
55 hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 =
56 READ_SPECIALREG(EL0_REG(CNTV_CTL));
57 }
58 if (guest_or_nonvhe(guest) && ecv_poff) {
59 /*
60 * If we have ECV then the guest could modify these registers.
61 * If VHE is enabled then the kernel will see a different view
62 * of the registers, so doesn't need to handle them.
63 */
64 hypctx->vtimer_cpu.phys_timer.cntx_cval_el0 =
65 READ_SPECIALREG(EL0_REG(CNTP_CVAL));
66 hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0 =
67 READ_SPECIALREG(EL0_REG(CNTP_CTL));
68 }
69
70 if (guest) {
71 /* Store the GICv3 registers */
72 hypctx->vgic_v3_regs.ich_eisr_el2 =
73 READ_SPECIALREG(ich_eisr_el2);
74 hypctx->vgic_v3_regs.ich_elrsr_el2 =
75 READ_SPECIALREG(ich_elrsr_el2);
76 hypctx->vgic_v3_regs.ich_hcr_el2 =
77 READ_SPECIALREG(ich_hcr_el2);
78 hypctx->vgic_v3_regs.ich_misr_el2 =
79 READ_SPECIALREG(ich_misr_el2);
80 hypctx->vgic_v3_regs.ich_vmcr_el2 =
81 READ_SPECIALREG(ich_vmcr_el2);
82 switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
83 #define STORE_LR(x) \
84 case x: \
85 hypctx->vgic_v3_regs.ich_lr_el2[x] = \
86 READ_SPECIALREG(ich_lr ## x ##_el2)
87 STORE_LR(15);
88 STORE_LR(14);
89 STORE_LR(13);
90 STORE_LR(12);
91 STORE_LR(11);
92 STORE_LR(10);
93 STORE_LR(9);
94 STORE_LR(8);
95 STORE_LR(7);
96 STORE_LR(6);
97 STORE_LR(5);
98 STORE_LR(4);
99 STORE_LR(3);
100 STORE_LR(2);
101 STORE_LR(1);
102 default:
103 STORE_LR(0);
104 #undef STORE_LR
105 }
106
107 switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
108 #define STORE_APR(x) \
109 case x: \
110 hypctx->vgic_v3_regs.ich_ap0r_el2[x] = \
111 READ_SPECIALREG(ich_ap0r ## x ##_el2); \
112 hypctx->vgic_v3_regs.ich_ap1r_el2[x] = \
113 READ_SPECIALREG(ich_ap1r ## x ##_el2)
114 STORE_APR(3);
115 STORE_APR(2);
116 STORE_APR(1);
117 default:
118 STORE_APR(0);
119 #undef STORE_APR
120 }
121 }
122
123 hypctx->dbgclaimset_el1 = READ_SPECIALREG(dbgclaimset_el1);
124
125 dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
126 switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
127 #define STORE_DBG_BRP(x) \
128 case x: \
129 hypctx->dbgbcr_el1[x] = \
130 READ_SPECIALREG(dbgbcr ## x ## _el1); \
131 hypctx->dbgbvr_el1[x] = \
132 READ_SPECIALREG(dbgbvr ## x ## _el1)
133 STORE_DBG_BRP(15);
134 STORE_DBG_BRP(14);
135 STORE_DBG_BRP(13);
136 STORE_DBG_BRP(12);
137 STORE_DBG_BRP(11);
138 STORE_DBG_BRP(10);
139 STORE_DBG_BRP(9);
140 STORE_DBG_BRP(8);
141 STORE_DBG_BRP(7);
142 STORE_DBG_BRP(6);
143 STORE_DBG_BRP(5);
144 STORE_DBG_BRP(4);
145 STORE_DBG_BRP(3);
146 STORE_DBG_BRP(2);
147 STORE_DBG_BRP(1);
148 default:
149 STORE_DBG_BRP(0);
150 #undef STORE_DBG_BRP
151 }
152
153 switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
154 #define STORE_DBG_WRP(x) \
155 case x: \
156 hypctx->dbgwcr_el1[x] = \
157 READ_SPECIALREG(dbgwcr ## x ## _el1); \
158 hypctx->dbgwvr_el1[x] = \
159 READ_SPECIALREG(dbgwvr ## x ## _el1)
160 STORE_DBG_WRP(15);
161 STORE_DBG_WRP(14);
162 STORE_DBG_WRP(13);
163 STORE_DBG_WRP(12);
164 STORE_DBG_WRP(11);
165 STORE_DBG_WRP(10);
166 STORE_DBG_WRP(9);
167 STORE_DBG_WRP(8);
168 STORE_DBG_WRP(7);
169 STORE_DBG_WRP(6);
170 STORE_DBG_WRP(5);
171 STORE_DBG_WRP(4);
172 STORE_DBG_WRP(3);
173 STORE_DBG_WRP(2);
174 STORE_DBG_WRP(1);
175 default:
176 STORE_DBG_WRP(0);
177 #undef STORE_DBG_WRP
178 }
179
180 /* Store the PMU registers */
181 hypctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0);
182 hypctx->pmccntr_el0 = READ_SPECIALREG(pmccntr_el0);
183 hypctx->pmccfiltr_el0 = READ_SPECIALREG(pmccfiltr_el0);
184 hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0);
185 hypctx->pmselr_el0 = READ_SPECIALREG(pmselr_el0);
186 hypctx->pmxevcntr_el0 = READ_SPECIALREG(pmxevcntr_el0);
187 hypctx->pmcntenset_el0 = READ_SPECIALREG(pmcntenset_el0);
188 hypctx->pmintenset_el1 = READ_SPECIALREG(pmintenset_el1);
189 hypctx->pmovsset_el0 = READ_SPECIALREG(pmovsset_el0);
190
191 switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
192 #define STORE_PMU(x) \
193 case (x + 1): \
194 hypctx->pmevcntr_el0[x] = \
195 READ_SPECIALREG(pmevcntr ## x ## _el0); \
196 hypctx->pmevtyper_el0[x] = \
197 READ_SPECIALREG(pmevtyper ## x ## _el0)
198 STORE_PMU(30);
199 STORE_PMU(29);
200 STORE_PMU(28);
201 STORE_PMU(27);
202 STORE_PMU(26);
203 STORE_PMU(25);
204 STORE_PMU(24);
205 STORE_PMU(23);
206 STORE_PMU(22);
207 STORE_PMU(21);
208 STORE_PMU(20);
209 STORE_PMU(19);
210 STORE_PMU(18);
211 STORE_PMU(17);
212 STORE_PMU(16);
213 STORE_PMU(15);
214 STORE_PMU(14);
215 STORE_PMU(13);
216 STORE_PMU(12);
217 STORE_PMU(11);
218 STORE_PMU(10);
219 STORE_PMU(9);
220 STORE_PMU(8);
221 STORE_PMU(7);
222 STORE_PMU(6);
223 STORE_PMU(5);
224 STORE_PMU(4);
225 STORE_PMU(3);
226 STORE_PMU(2);
227 STORE_PMU(1);
228 STORE_PMU(0);
229 default: /* N == 0 when only PMCCNTR_EL0 is available */
230 break;
231 #undef STORE_PMU
232 }
233
234 /* Store the special to from the trapframe */
235 hypctx->tf.tf_sp = READ_SPECIALREG(sp_el1);
236 hypctx->tf.tf_elr = READ_SPECIALREG(elr_el2);
237 hypctx->tf.tf_spsr = READ_SPECIALREG(spsr_el2);
238 if (guest) {
239 hypctx->tf.tf_esr = READ_SPECIALREG(esr_el2);
240 hypctx->par_el1 = READ_SPECIALREG(par_el1);
241 }
242
243 /* Store the guest special registers */
244 hypctx->sp_el0 = READ_SPECIALREG(sp_el0);
245 hypctx->tpidr_el0 = READ_SPECIALREG(tpidr_el0);
246 hypctx->tpidrro_el0 = READ_SPECIALREG(tpidrro_el0);
247 hypctx->tpidr_el1 = READ_SPECIALREG(tpidr_el1);
248
249 hypctx->actlr_el1 = READ_SPECIALREG(actlr_el1);
250 hypctx->csselr_el1 = READ_SPECIALREG(csselr_el1);
251 hypctx->mdccint_el1 = READ_SPECIALREG(mdccint_el1);
252 hypctx->mdscr_el1 = READ_SPECIALREG(mdscr_el1);
253
254 if (guest_or_nonvhe(guest)) {
255 hypctx->elr_el1 = READ_SPECIALREG(EL1_REG(ELR));
256 hypctx->vbar_el1 = READ_SPECIALREG(EL1_REG(VBAR));
257
258 hypctx->afsr0_el1 = READ_SPECIALREG(EL1_REG(AFSR0));
259 hypctx->afsr1_el1 = READ_SPECIALREG(EL1_REG(AFSR1));
260 hypctx->amair_el1 = READ_SPECIALREG(EL1_REG(AMAIR));
261 hypctx->contextidr_el1 = READ_SPECIALREG(EL1_REG(CONTEXTIDR));
262 hypctx->cpacr_el1 = READ_SPECIALREG(EL1_REG(CPACR));
263 hypctx->esr_el1 = READ_SPECIALREG(EL1_REG(ESR));
264 hypctx->far_el1 = READ_SPECIALREG(EL1_REG(FAR));
265 hypctx->mair_el1 = READ_SPECIALREG(EL1_REG(MAIR));
266 hypctx->sctlr_el1 = READ_SPECIALREG(EL1_REG(SCTLR));
267 hypctx->spsr_el1 = READ_SPECIALREG(EL1_REG(SPSR));
268 hypctx->tcr_el1 = READ_SPECIALREG(EL1_REG(TCR));
269 /* TODO: Support when this is not res0 */
270 hypctx->tcr2_el1 = 0;
271 hypctx->ttbr0_el1 = READ_SPECIALREG(EL1_REG(TTBR0));
272 hypctx->ttbr1_el1 = READ_SPECIALREG(EL1_REG(TTBR1));
273 }
274
275 hypctx->cptr_el2 = READ_SPECIALREG(cptr_el2);
276 hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2);
277 hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2);
278 hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2);
279 }
280
281 static void
vmm_hyp_reg_restore(struct hypctx * hypctx,struct hyp * hyp,bool guest,bool ecv_poff)282 vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest,
283 bool ecv_poff)
284 {
285 uint64_t dfr0;
286
287 /* Restore the special registers */
288 WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2);
289
290 if (guest) {
291 if ((hyp->feats & HYP_FEAT_HCX) != 0)
292 WRITE_SPECIALREG(HCRX_EL2_REG, hypctx->hcrx_el2);
293 }
294 isb();
295
296 WRITE_SPECIALREG(sp_el0, hypctx->sp_el0);
297 WRITE_SPECIALREG(tpidr_el0, hypctx->tpidr_el0);
298 WRITE_SPECIALREG(tpidrro_el0, hypctx->tpidrro_el0);
299 WRITE_SPECIALREG(tpidr_el1, hypctx->tpidr_el1);
300
301 WRITE_SPECIALREG(actlr_el1, hypctx->actlr_el1);
302 WRITE_SPECIALREG(csselr_el1, hypctx->csselr_el1);
303 WRITE_SPECIALREG(mdccint_el1, hypctx->mdccint_el1);
304 WRITE_SPECIALREG(mdscr_el1, hypctx->mdscr_el1);
305
306 if (guest_or_nonvhe(guest)) {
307 WRITE_SPECIALREG(EL1_REG(ELR), hypctx->elr_el1);
308 WRITE_SPECIALREG(EL1_REG(VBAR), hypctx->vbar_el1);
309
310 WRITE_SPECIALREG(EL1_REG(AFSR0), hypctx->afsr0_el1);
311 WRITE_SPECIALREG(EL1_REG(AFSR1), hypctx->afsr1_el1);
312 WRITE_SPECIALREG(EL1_REG(AMAIR), hypctx->amair_el1);
313 WRITE_SPECIALREG(EL1_REG(CONTEXTIDR), hypctx->contextidr_el1);
314 WRITE_SPECIALREG(EL1_REG(CPACR), hypctx->cpacr_el1);
315 WRITE_SPECIALREG(EL1_REG(ESR), hypctx->esr_el1);
316 WRITE_SPECIALREG(EL1_REG(FAR), hypctx->far_el1);
317 WRITE_SPECIALREG(EL1_REG(MAIR), hypctx->mair_el1); //
318
319 WRITE_SPECIALREG(EL1_REG(SCTLR), hypctx->sctlr_el1);
320 WRITE_SPECIALREG(EL1_REG(SPSR), hypctx->spsr_el1);
321 WRITE_SPECIALREG(EL1_REG(TCR), hypctx->tcr_el1);
322 /* TODO: tcr2_el1 */
323 WRITE_SPECIALREG(EL1_REG(TTBR0), hypctx->ttbr0_el1);
324 WRITE_SPECIALREG(EL1_REG(TTBR1), hypctx->ttbr1_el1);
325 }
326
327 if (guest) {
328 WRITE_SPECIALREG(par_el1, hypctx->par_el1);
329 }
330
331 WRITE_SPECIALREG(cptr_el2, hypctx->cptr_el2);
332 WRITE_SPECIALREG(vpidr_el2, hypctx->vpidr_el2);
333 WRITE_SPECIALREG(vmpidr_el2, hypctx->vmpidr_el2);
334
335 /* Load the special regs from the trapframe */
336 WRITE_SPECIALREG(sp_el1, hypctx->tf.tf_sp);
337 WRITE_SPECIALREG(elr_el2, hypctx->tf.tf_elr);
338 WRITE_SPECIALREG(spsr_el2, hypctx->tf.tf_spsr);
339
340 /* Restore the PMU registers */
341 WRITE_SPECIALREG(pmcr_el0, hypctx->pmcr_el0);
342 WRITE_SPECIALREG(pmccntr_el0, hypctx->pmccntr_el0);
343 WRITE_SPECIALREG(pmccfiltr_el0, hypctx->pmccfiltr_el0);
344 WRITE_SPECIALREG(pmuserenr_el0, hypctx->pmuserenr_el0);
345 WRITE_SPECIALREG(pmselr_el0, hypctx->pmselr_el0);
346 WRITE_SPECIALREG(pmxevcntr_el0, hypctx->pmxevcntr_el0);
347 /* Clear all events/interrupts then enable them */
348 WRITE_SPECIALREG(pmcntenclr_el0, ~0ul);
349 WRITE_SPECIALREG(pmcntenset_el0, hypctx->pmcntenset_el0);
350 WRITE_SPECIALREG(pmintenclr_el1, ~0ul);
351 WRITE_SPECIALREG(pmintenset_el1, hypctx->pmintenset_el1);
352 WRITE_SPECIALREG(pmovsclr_el0, ~0ul);
353 WRITE_SPECIALREG(pmovsset_el0, hypctx->pmovsset_el0);
354
355 switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) {
356 #define LOAD_PMU(x) \
357 case (x + 1): \
358 WRITE_SPECIALREG(pmevcntr ## x ## _el0, \
359 hypctx->pmevcntr_el0[x]); \
360 WRITE_SPECIALREG(pmevtyper ## x ## _el0, \
361 hypctx->pmevtyper_el0[x])
362 LOAD_PMU(30);
363 LOAD_PMU(29);
364 LOAD_PMU(28);
365 LOAD_PMU(27);
366 LOAD_PMU(26);
367 LOAD_PMU(25);
368 LOAD_PMU(24);
369 LOAD_PMU(23);
370 LOAD_PMU(22);
371 LOAD_PMU(21);
372 LOAD_PMU(20);
373 LOAD_PMU(19);
374 LOAD_PMU(18);
375 LOAD_PMU(17);
376 LOAD_PMU(16);
377 LOAD_PMU(15);
378 LOAD_PMU(14);
379 LOAD_PMU(13);
380 LOAD_PMU(12);
381 LOAD_PMU(11);
382 LOAD_PMU(10);
383 LOAD_PMU(9);
384 LOAD_PMU(8);
385 LOAD_PMU(7);
386 LOAD_PMU(6);
387 LOAD_PMU(5);
388 LOAD_PMU(4);
389 LOAD_PMU(3);
390 LOAD_PMU(2);
391 LOAD_PMU(1);
392 LOAD_PMU(0);
393 default: /* N == 0 when only PMCCNTR_EL0 is available */
394 break;
395 #undef LOAD_PMU
396 }
397
398 WRITE_SPECIALREG(dbgclaimclr_el1, ~0ul);
399 WRITE_SPECIALREG(dbgclaimclr_el1, hypctx->dbgclaimset_el1);
400
401 dfr0 = READ_SPECIALREG(id_aa64dfr0_el1);
402 switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) {
403 #define LOAD_DBG_BRP(x) \
404 case x: \
405 WRITE_SPECIALREG(dbgbcr ## x ## _el1, \
406 hypctx->dbgbcr_el1[x]); \
407 WRITE_SPECIALREG(dbgbvr ## x ## _el1, \
408 hypctx->dbgbvr_el1[x])
409 LOAD_DBG_BRP(15);
410 LOAD_DBG_BRP(14);
411 LOAD_DBG_BRP(13);
412 LOAD_DBG_BRP(12);
413 LOAD_DBG_BRP(11);
414 LOAD_DBG_BRP(10);
415 LOAD_DBG_BRP(9);
416 LOAD_DBG_BRP(8);
417 LOAD_DBG_BRP(7);
418 LOAD_DBG_BRP(6);
419 LOAD_DBG_BRP(5);
420 LOAD_DBG_BRP(4);
421 LOAD_DBG_BRP(3);
422 LOAD_DBG_BRP(2);
423 LOAD_DBG_BRP(1);
424 default:
425 LOAD_DBG_BRP(0);
426 #undef LOAD_DBG_BRP
427 }
428
429 switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) {
430 #define LOAD_DBG_WRP(x) \
431 case x: \
432 WRITE_SPECIALREG(dbgwcr ## x ## _el1, \
433 hypctx->dbgwcr_el1[x]); \
434 WRITE_SPECIALREG(dbgwvr ## x ## _el1, \
435 hypctx->dbgwvr_el1[x])
436 LOAD_DBG_WRP(15);
437 LOAD_DBG_WRP(14);
438 LOAD_DBG_WRP(13);
439 LOAD_DBG_WRP(12);
440 LOAD_DBG_WRP(11);
441 LOAD_DBG_WRP(10);
442 LOAD_DBG_WRP(9);
443 LOAD_DBG_WRP(8);
444 LOAD_DBG_WRP(7);
445 LOAD_DBG_WRP(6);
446 LOAD_DBG_WRP(5);
447 LOAD_DBG_WRP(4);
448 LOAD_DBG_WRP(3);
449 LOAD_DBG_WRP(2);
450 LOAD_DBG_WRP(1);
451 default:
452 LOAD_DBG_WRP(0);
453 #undef LOAD_DBG_WRP
454 }
455
456 if (guest) {
457 /* Load the timer registers */
458 WRITE_SPECIALREG(EL1_REG(CNTKCTL),
459 hypctx->vtimer_cpu.cntkctl_el1);
460 WRITE_SPECIALREG(EL0_REG(CNTV_CVAL),
461 hypctx->vtimer_cpu.virt_timer.cntx_cval_el0);
462 WRITE_SPECIALREG(EL0_REG(CNTV_CTL),
463 hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0);
464 WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2);
465 WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2);
466
467 if (ecv_poff) {
468 /*
469 * Load the same offset as the virtual timer
470 * to keep in sync.
471 */
472 WRITE_SPECIALREG(CNTPOFF_EL2_REG,
473 hyp->vtimer.cntvoff_el2);
474 isb();
475 }
476 }
477 if (guest_or_nonvhe(guest) && ecv_poff) {
478 /*
479 * If we have ECV then the guest could modify these registers.
480 * If VHE is enabled then the kernel will see a different view
481 * of the registers, so doesn't need to handle them.
482 */
483 WRITE_SPECIALREG(EL0_REG(CNTP_CVAL),
484 hypctx->vtimer_cpu.phys_timer.cntx_cval_el0);
485 WRITE_SPECIALREG(EL0_REG(CNTP_CTL),
486 hypctx->vtimer_cpu.phys_timer.cntx_ctl_el0);
487 }
488
489 if (guest) {
490 /* Load the GICv3 registers */
491 WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2);
492 WRITE_SPECIALREG(ich_vmcr_el2,
493 hypctx->vgic_v3_regs.ich_vmcr_el2);
494 switch (hypctx->vgic_v3_regs.ich_lr_num - 1) {
495 #define LOAD_LR(x) \
496 case x: \
497 WRITE_SPECIALREG(ich_lr ## x ##_el2, \
498 hypctx->vgic_v3_regs.ich_lr_el2[x])
499 LOAD_LR(15);
500 LOAD_LR(14);
501 LOAD_LR(13);
502 LOAD_LR(12);
503 LOAD_LR(11);
504 LOAD_LR(10);
505 LOAD_LR(9);
506 LOAD_LR(8);
507 LOAD_LR(7);
508 LOAD_LR(6);
509 LOAD_LR(5);
510 LOAD_LR(4);
511 LOAD_LR(3);
512 LOAD_LR(2);
513 LOAD_LR(1);
514 default:
515 LOAD_LR(0);
516 #undef LOAD_LR
517 }
518
519 switch (hypctx->vgic_v3_regs.ich_apr_num - 1) {
520 #define LOAD_APR(x) \
521 case x: \
522 WRITE_SPECIALREG(ich_ap0r ## x ##_el2, \
523 hypctx->vgic_v3_regs.ich_ap0r_el2[x]); \
524 WRITE_SPECIALREG(ich_ap1r ## x ##_el2, \
525 hypctx->vgic_v3_regs.ich_ap1r_el2[x])
526 LOAD_APR(3);
527 LOAD_APR(2);
528 LOAD_APR(1);
529 default:
530 LOAD_APR(0);
531 #undef LOAD_APR
532 }
533 }
534 }
535
536 static uint64_t
vmm_hyp_call_guest(struct hyp * hyp,struct hypctx * hypctx)537 vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx)
538 {
539 struct hypctx host_hypctx;
540 uint64_t cntvoff_el2;
541 uint64_t ich_hcr_el2, ich_vmcr_el2, cnthctl_el2, cntkctl_el1;
542 #ifndef VMM_VHE
543 uint64_t hcrx_el2;
544 #endif
545 uint64_t ret;
546 uint64_t s1e1r, hpfar_el2;
547 bool ecv_poff, hpfar_valid;
548
549 ecv_poff = (hyp->vtimer.cnthctl_el2 & CNTHCTL_ECV_EN) != 0;
550 vmm_hyp_reg_store(&host_hypctx, NULL, false, ecv_poff);
551 #ifndef VMM_VHE
552 if ((hyp->feats & HYP_FEAT_HCX) != 0)
553 hcrx_el2 = READ_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2));
554 #endif
555
556 /* Save the host special registers */
557 cnthctl_el2 = READ_SPECIALREG(cnthctl_el2);
558 cntkctl_el1 = READ_SPECIALREG(cntkctl_el1);
559 cntvoff_el2 = READ_SPECIALREG(cntvoff_el2);
560
561 ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2);
562 ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2);
563
564 vmm_hyp_reg_restore(hypctx, hyp, true, ecv_poff);
565
566 /* Load the common hypervisor registers */
567 WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2);
568
569 host_hypctx.mdcr_el2 = READ_SPECIALREG(mdcr_el2);
570 WRITE_SPECIALREG(mdcr_el2, hypctx->mdcr_el2);
571
572 /* Call into the guest */
573 ret = VMM_HYP_FUNC(do_call_guest)(hypctx);
574
575 WRITE_SPECIALREG(mdcr_el2, host_hypctx.mdcr_el2);
576 isb();
577
578 /* Store the exit info */
579 hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2);
580 vmm_hyp_reg_store(hypctx, hyp, true, ecv_poff);
581
582 hpfar_valid = true;
583 if (ret == EXCP_TYPE_EL1_SYNC) {
584 switch (ESR_ELx_EXCEPTION(hypctx->tf.tf_esr)) {
585 case EXCP_INSN_ABORT_L:
586 case EXCP_DATA_ABORT_L:
587 /*
588 * The hpfar_el2 register is valid for:
589 * - Translation and Access faults.
590 * - Translation, Access, and permission faults on
591 * the translation table walk on the stage 1 tables.
592 * - A stage 2 Address size fault.
593 *
594 * As we only need it in the first 2 cases we can just
595 * exclude it on permission faults that are not from
596 * the stage 1 table walk.
597 *
598 * TODO: Add a case for Arm erratum 834220.
599 */
600 if ((hypctx->tf.tf_esr & ISS_DATA_S1PTW) != 0)
601 break;
602 switch (hypctx->tf.tf_esr & ISS_DATA_DFSC_MASK) {
603 case ISS_DATA_DFSC_PF_L1:
604 case ISS_DATA_DFSC_PF_L2:
605 case ISS_DATA_DFSC_PF_L3:
606 hpfar_valid = false;
607 break;
608 }
609 break;
610 }
611 }
612 if (hpfar_valid) {
613 hypctx->exit_info.hpfar_el2 = READ_SPECIALREG(hpfar_el2);
614 } else {
615 /*
616 * TODO: There is a risk the at instruction could cause an
617 * exception here. We should handle it & return a failure.
618 */
619 s1e1r =
620 arm64_address_translate_s1e1r(hypctx->exit_info.far_el2);
621 if (PAR_SUCCESS(s1e1r)) {
622 hpfar_el2 = (s1e1r & PAR_PA_MASK) >> PAR_PA_SHIFT;
623 hpfar_el2 <<= HPFAR_EL2_FIPA_SHIFT;
624 hypctx->exit_info.hpfar_el2 = hpfar_el2;
625 } else {
626 ret = EXCP_TYPE_REENTER;
627 }
628 }
629
630 vmm_hyp_reg_restore(&host_hypctx, NULL, false, ecv_poff);
631
632 #ifndef VMM_VHE
633 if ((hyp->feats & HYP_FEAT_HCX) != 0)
634 WRITE_SPECIALREG(MRS_REG_ALT_NAME(HCRX_EL2), hcrx_el2);
635 #endif
636
637 /* Restore the host special registers */
638 WRITE_SPECIALREG(ich_hcr_el2, ich_hcr_el2);
639 WRITE_SPECIALREG(ich_vmcr_el2, ich_vmcr_el2);
640
641 WRITE_SPECIALREG(cnthctl_el2, cnthctl_el2);
642 WRITE_SPECIALREG(cntkctl_el1, cntkctl_el1);
643 WRITE_SPECIALREG(cntvoff_el2, cntvoff_el2);
644
645 return (ret);
646 }
647
648 VMM_STATIC uint64_t
VMM_HYP_FUNC(enter_guest)649 VMM_HYP_FUNC(enter_guest)(struct hyp *hyp, struct hypctx *hypctx)
650 {
651 uint64_t ret;
652
653 do {
654 ret = vmm_hyp_call_guest(hyp, hypctx);
655 } while (ret == EXCP_TYPE_REENTER);
656
657 return (ret);
658 }
659
660 VMM_STATIC uint64_t
VMM_HYP_FUNC(read_reg)661 VMM_HYP_FUNC(read_reg)(uint64_t reg)
662 {
663 switch (reg) {
664 case HYP_REG_ICH_VTR:
665 return (READ_SPECIALREG(ich_vtr_el2));
666 }
667
668 return (0);
669 }
670
671 VMM_STATIC void
VMM_HYP_FUNC(clean_s2_tlbi)672 VMM_HYP_FUNC(clean_s2_tlbi)(void)
673 {
674 dsb(ishst);
675 __asm __volatile("tlbi alle1is");
676 dsb(ish);
677 }
678
679 VMM_STATIC void
VMM_HYP_FUNC(s2_tlbi_range)680 VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva,
681 bool final_only)
682 {
683 uint64_t end, r, start;
684 uint64_t host_vttbr;
685 #ifdef VMM_VHE
686 uint64_t host_tcr;
687 #endif
688
689 #ifdef VMM_VHE
690 dsb(ishst);
691 #endif
692
693 #define TLBI_VA_SHIFT 12
694 #define TLBI_VA_MASK ((1ul << 44) - 1)
695 #define TLBI_VA(addr) (((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK)
696 #define TLBI_VA_L3_INCR (L3_SIZE >> TLBI_VA_SHIFT)
697
698 /* Switch to the guest vttbr */
699 /* TODO: Handle Cortex-A57/A72 erratum 131936 */
700 host_vttbr = READ_SPECIALREG(vttbr_el2);
701 WRITE_SPECIALREG(vttbr_el2, vttbr);
702 isb();
703
704 #ifdef VMM_VHE
705 host_tcr = READ_SPECIALREG(tcr_el2);
706 WRITE_SPECIALREG(tcr_el2, host_tcr & ~HCR_TGE);
707 isb();
708 #endif
709
710 /*
711 * The CPU can cache the stage 1 + 2 combination so we need to ensure
712 * the stage 2 is invalidated first, then when this has completed we
713 * invalidate the stage 1 TLB. As we don't know which stage 1 virtual
714 * addresses point at the stage 2 IPA we need to invalidate the entire
715 * stage 1 TLB.
716 */
717
718 start = TLBI_VA(sva);
719 end = TLBI_VA(eva);
720 for (r = start; r < end; r += TLBI_VA_L3_INCR) {
721 /* Invalidate the stage 2 TLB entry */
722 if (final_only)
723 __asm __volatile("tlbi ipas2le1is, %0" : : "r"(r));
724 else
725 __asm __volatile("tlbi ipas2e1is, %0" : : "r"(r));
726 }
727 /* Ensure the entry has been invalidated */
728 dsb(ish);
729 /* Invalidate the stage 1 TLB. */
730 __asm __volatile("tlbi vmalle1is");
731 dsb(ish);
732 isb();
733
734 #ifdef VMM_VHE
735 WRITE_SPECIALREG(tcr_el2, host_tcr);
736 isb();
737 #endif
738
739 /* Switch back to the host vttbr */
740 WRITE_SPECIALREG(vttbr_el2, host_vttbr);
741 isb();
742 }
743
744 VMM_STATIC void
VMM_HYP_FUNC(s2_tlbi_all)745 VMM_HYP_FUNC(s2_tlbi_all)(uint64_t vttbr)
746 {
747 uint64_t host_vttbr;
748
749 #ifdef VMM_VHE
750 dsb(ishst);
751 #endif
752
753 /* Switch to the guest vttbr */
754 /* TODO: Handle Cortex-A57/A72 erratum 131936 */
755 host_vttbr = READ_SPECIALREG(vttbr_el2);
756 WRITE_SPECIALREG(vttbr_el2, vttbr);
757 isb();
758
759 __asm __volatile("tlbi vmalls12e1is");
760 dsb(ish);
761 isb();
762
763 /* Switch back t othe host vttbr */
764 WRITE_SPECIALREG(vttbr_el2, host_vttbr);
765 isb();
766 }
767