1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Andrew Turner 5 * 6 * This work was supported by Innovate UK project 105694, "Digital Security 7 * by Design (DSbD) Technology Platform Prototype". 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 #include <sys/types.h> 33 #include <sys/proc.h> 34 35 #include <machine/armreg.h> 36 37 #include "arm64.h" 38 #include "hyp.h" 39 40 struct hypctx; 41 42 uint64_t VMM_HYP_FUNC(do_call_guest)(struct hypctx *); 43 44 static void 45 vmm_hyp_reg_store(struct hypctx *hypctx, struct hyp *hyp, bool guest) 46 { 47 uint64_t dfr0; 48 49 /* Store the guest VFP registers */ 50 if (guest) { 51 /* Store the timer registers */ 52 hypctx->vtimer_cpu.cntkctl_el1 = 53 READ_SPECIALREG(EL1_REG(CNTKCTL)); 54 hypctx->vtimer_cpu.virt_timer.cntx_cval_el0 = 55 READ_SPECIALREG(EL0_REG(CNTV_CVAL)); 56 hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0 = 57 READ_SPECIALREG(EL0_REG(CNTV_CTL)); 58 59 /* Store the GICv3 registers */ 60 hypctx->vgic_v3_regs.ich_eisr_el2 = 61 READ_SPECIALREG(ich_eisr_el2); 62 hypctx->vgic_v3_regs.ich_elrsr_el2 = 63 READ_SPECIALREG(ich_elrsr_el2); 64 hypctx->vgic_v3_regs.ich_hcr_el2 = 65 READ_SPECIALREG(ich_hcr_el2); 66 hypctx->vgic_v3_regs.ich_misr_el2 = 67 READ_SPECIALREG(ich_misr_el2); 68 hypctx->vgic_v3_regs.ich_vmcr_el2 = 69 READ_SPECIALREG(ich_vmcr_el2); 70 switch (hypctx->vgic_v3_regs.ich_lr_num - 1) { 71 #define STORE_LR(x) \ 72 case x: \ 73 hypctx->vgic_v3_regs.ich_lr_el2[x] = \ 74 READ_SPECIALREG(ich_lr ## x ##_el2) 75 STORE_LR(15); 76 STORE_LR(14); 77 STORE_LR(13); 78 STORE_LR(12); 79 STORE_LR(11); 80 STORE_LR(10); 81 STORE_LR(9); 82 STORE_LR(8); 83 STORE_LR(7); 84 STORE_LR(6); 85 STORE_LR(5); 86 STORE_LR(4); 87 STORE_LR(3); 88 STORE_LR(2); 89 STORE_LR(1); 90 default: 91 STORE_LR(0); 92 #undef STORE_LR 93 } 94 95 switch (hypctx->vgic_v3_regs.ich_apr_num - 1) { 96 #define STORE_APR(x) \ 97 case x: \ 98 hypctx->vgic_v3_regs.ich_ap0r_el2[x] = \ 99 READ_SPECIALREG(ich_ap0r ## x ##_el2); \ 100 hypctx->vgic_v3_regs.ich_ap1r_el2[x] = \ 101 READ_SPECIALREG(ich_ap1r ## x ##_el2) 102 STORE_APR(3); 103 STORE_APR(2); 104 STORE_APR(1); 105 default: 106 STORE_APR(0); 107 #undef STORE_APR 108 } 109 } 110 111 dfr0 = READ_SPECIALREG(id_aa64dfr0_el1); 112 switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) { 113 #define STORE_DBG_BRP(x) \ 114 case x: \ 115 hypctx->dbgbcr_el1[x] = \ 116 READ_SPECIALREG(dbgbcr ## x ## _el1); \ 117 hypctx->dbgbvr_el1[x] = \ 118 READ_SPECIALREG(dbgbvr ## x ## _el1) 119 STORE_DBG_BRP(15); 120 STORE_DBG_BRP(14); 121 STORE_DBG_BRP(13); 122 STORE_DBG_BRP(12); 123 STORE_DBG_BRP(11); 124 STORE_DBG_BRP(10); 125 STORE_DBG_BRP(9); 126 STORE_DBG_BRP(8); 127 STORE_DBG_BRP(7); 128 STORE_DBG_BRP(6); 129 STORE_DBG_BRP(5); 130 STORE_DBG_BRP(4); 131 STORE_DBG_BRP(3); 132 STORE_DBG_BRP(2); 133 STORE_DBG_BRP(1); 134 default: 135 STORE_DBG_BRP(0); 136 #undef STORE_DBG_BRP 137 } 138 139 switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) { 140 #define STORE_DBG_WRP(x) \ 141 case x: \ 142 hypctx->dbgwcr_el1[x] = \ 143 READ_SPECIALREG(dbgwcr ## x ## _el1); \ 144 hypctx->dbgwvr_el1[x] = \ 145 READ_SPECIALREG(dbgwvr ## x ## _el1) 146 STORE_DBG_WRP(15); 147 STORE_DBG_WRP(14); 148 STORE_DBG_WRP(13); 149 STORE_DBG_WRP(12); 150 STORE_DBG_WRP(11); 151 STORE_DBG_WRP(10); 152 STORE_DBG_WRP(9); 153 STORE_DBG_WRP(8); 154 STORE_DBG_WRP(7); 155 STORE_DBG_WRP(6); 156 STORE_DBG_WRP(5); 157 STORE_DBG_WRP(4); 158 STORE_DBG_WRP(3); 159 STORE_DBG_WRP(2); 160 STORE_DBG_WRP(1); 161 default: 162 STORE_DBG_WRP(0); 163 #undef STORE_DBG_WRP 164 } 165 166 /* Store the PMU registers */ 167 hypctx->pmcr_el0 = READ_SPECIALREG(pmcr_el0); 168 hypctx->pmccntr_el0 = READ_SPECIALREG(pmccntr_el0); 169 hypctx->pmccfiltr_el0 = READ_SPECIALREG(pmccfiltr_el0); 170 hypctx->pmcntenset_el0 = READ_SPECIALREG(pmcntenset_el0); 171 hypctx->pmintenset_el1 = READ_SPECIALREG(pmintenset_el1); 172 hypctx->pmovsset_el0 = READ_SPECIALREG(pmovsset_el0); 173 hypctx->pmuserenr_el0 = READ_SPECIALREG(pmuserenr_el0); 174 switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) { 175 #define STORE_PMU(x) \ 176 case (x + 1): \ 177 hypctx->pmevcntr_el0[x] = \ 178 READ_SPECIALREG(pmevcntr ## x ## _el0); \ 179 hypctx->pmevtyper_el0[x] = \ 180 READ_SPECIALREG(pmevtyper ## x ## _el0) 181 STORE_PMU(30); 182 STORE_PMU(29); 183 STORE_PMU(28); 184 STORE_PMU(27); 185 STORE_PMU(26); 186 STORE_PMU(25); 187 STORE_PMU(24); 188 STORE_PMU(23); 189 STORE_PMU(22); 190 STORE_PMU(21); 191 STORE_PMU(20); 192 STORE_PMU(19); 193 STORE_PMU(18); 194 STORE_PMU(17); 195 STORE_PMU(16); 196 STORE_PMU(15); 197 STORE_PMU(14); 198 STORE_PMU(13); 199 STORE_PMU(12); 200 STORE_PMU(11); 201 STORE_PMU(10); 202 STORE_PMU(9); 203 STORE_PMU(8); 204 STORE_PMU(7); 205 STORE_PMU(6); 206 STORE_PMU(5); 207 STORE_PMU(4); 208 STORE_PMU(3); 209 STORE_PMU(2); 210 STORE_PMU(1); 211 STORE_PMU(0); 212 default: /* N == 0 when only PMCCNTR_EL0 is available */ 213 break; 214 #undef STORE_PMU 215 } 216 217 /* Store the special to from the trapframe */ 218 hypctx->tf.tf_sp = READ_SPECIALREG(sp_el1); 219 hypctx->tf.tf_elr = READ_SPECIALREG(elr_el2); 220 hypctx->tf.tf_spsr = READ_SPECIALREG(spsr_el2); 221 if (guest) { 222 hypctx->tf.tf_esr = READ_SPECIALREG(esr_el2); 223 } 224 225 /* Store the guest special registers */ 226 hypctx->sp_el0 = READ_SPECIALREG(sp_el0); 227 hypctx->tpidr_el0 = READ_SPECIALREG(tpidr_el0); 228 hypctx->tpidrro_el0 = READ_SPECIALREG(tpidrro_el0); 229 hypctx->tpidr_el1 = READ_SPECIALREG(tpidr_el1); 230 231 hypctx->actlr_el1 = READ_SPECIALREG(actlr_el1); 232 hypctx->csselr_el1 = READ_SPECIALREG(csselr_el1); 233 hypctx->mdccint_el1 = READ_SPECIALREG(mdccint_el1); 234 hypctx->mdscr_el1 = READ_SPECIALREG(mdscr_el1); 235 hypctx->par_el1 = READ_SPECIALREG(par_el1); 236 237 if (guest_or_nonvhe(guest)) { 238 hypctx->elr_el1 = READ_SPECIALREG(EL1_REG(ELR)); 239 hypctx->vbar_el1 = READ_SPECIALREG(EL1_REG(VBAR)); 240 241 hypctx->afsr0_el1 = READ_SPECIALREG(EL1_REG(AFSR0)); 242 hypctx->afsr1_el1 = READ_SPECIALREG(EL1_REG(AFSR1)); 243 hypctx->amair_el1 = READ_SPECIALREG(EL1_REG(AMAIR)); 244 hypctx->contextidr_el1 = READ_SPECIALREG(EL1_REG(CONTEXTIDR)); 245 hypctx->cpacr_el1 = READ_SPECIALREG(EL1_REG(CPACR)); 246 hypctx->esr_el1 = READ_SPECIALREG(EL1_REG(ESR)); 247 hypctx->far_el1 = READ_SPECIALREG(EL1_REG(FAR)); 248 hypctx->mair_el1 = READ_SPECIALREG(EL1_REG(MAIR)); 249 hypctx->sctlr_el1 = READ_SPECIALREG(EL1_REG(SCTLR)); 250 hypctx->spsr_el1 = READ_SPECIALREG(EL1_REG(SPSR)); 251 hypctx->tcr_el1 = READ_SPECIALREG(EL1_REG(TCR)); 252 /* TODO: Support when this is not res0 */ 253 hypctx->tcr2_el1 = 0; 254 hypctx->ttbr0_el1 = READ_SPECIALREG(EL1_REG(TTBR0)); 255 hypctx->ttbr1_el1 = READ_SPECIALREG(EL1_REG(TTBR1)); 256 } 257 258 hypctx->cptr_el2 = READ_SPECIALREG(cptr_el2); 259 hypctx->hcr_el2 = READ_SPECIALREG(hcr_el2); 260 hypctx->vpidr_el2 = READ_SPECIALREG(vpidr_el2); 261 hypctx->vmpidr_el2 = READ_SPECIALREG(vmpidr_el2); 262 } 263 264 static void 265 vmm_hyp_reg_restore(struct hypctx *hypctx, struct hyp *hyp, bool guest) 266 { 267 uint64_t dfr0; 268 269 /* Restore the special registers */ 270 WRITE_SPECIALREG(hcr_el2, hypctx->hcr_el2); 271 isb(); 272 273 WRITE_SPECIALREG(sp_el0, hypctx->sp_el0); 274 WRITE_SPECIALREG(tpidr_el0, hypctx->tpidr_el0); 275 WRITE_SPECIALREG(tpidrro_el0, hypctx->tpidrro_el0); 276 WRITE_SPECIALREG(tpidr_el1, hypctx->tpidr_el1); 277 278 WRITE_SPECIALREG(actlr_el1, hypctx->actlr_el1); 279 WRITE_SPECIALREG(csselr_el1, hypctx->csselr_el1); 280 WRITE_SPECIALREG(mdccint_el1, hypctx->mdccint_el1); 281 WRITE_SPECIALREG(mdscr_el1, hypctx->mdscr_el1); 282 WRITE_SPECIALREG(par_el1, hypctx->par_el1); 283 284 if (guest_or_nonvhe(guest)) { 285 WRITE_SPECIALREG(EL1_REG(ELR), hypctx->elr_el1); 286 WRITE_SPECIALREG(EL1_REG(VBAR), hypctx->vbar_el1); 287 288 WRITE_SPECIALREG(EL1_REG(AFSR0), hypctx->afsr0_el1); 289 WRITE_SPECIALREG(EL1_REG(AFSR1), hypctx->afsr1_el1); 290 WRITE_SPECIALREG(EL1_REG(AMAIR), hypctx->amair_el1); 291 WRITE_SPECIALREG(EL1_REG(CONTEXTIDR), hypctx->contextidr_el1); 292 WRITE_SPECIALREG(EL1_REG(CPACR), hypctx->cpacr_el1); 293 WRITE_SPECIALREG(EL1_REG(ESR), hypctx->esr_el1); 294 WRITE_SPECIALREG(EL1_REG(FAR), hypctx->far_el1); 295 WRITE_SPECIALREG(EL1_REG(MAIR), hypctx->mair_el1); // 296 297 WRITE_SPECIALREG(EL1_REG(SCTLR), hypctx->sctlr_el1); 298 WRITE_SPECIALREG(EL1_REG(SPSR), hypctx->spsr_el1); 299 WRITE_SPECIALREG(EL1_REG(TCR), hypctx->tcr_el1); 300 /* TODO: tcr2_el1 */ 301 WRITE_SPECIALREG(EL1_REG(TTBR0), hypctx->ttbr0_el1); 302 WRITE_SPECIALREG(EL1_REG(TTBR1), hypctx->ttbr1_el1); 303 } 304 305 WRITE_SPECIALREG(cptr_el2, hypctx->cptr_el2); 306 WRITE_SPECIALREG(vpidr_el2, hypctx->vpidr_el2); 307 WRITE_SPECIALREG(vmpidr_el2, hypctx->vmpidr_el2); 308 309 /* Load the special regs from the trapframe */ 310 WRITE_SPECIALREG(sp_el1, hypctx->tf.tf_sp); 311 WRITE_SPECIALREG(elr_el2, hypctx->tf.tf_elr); 312 WRITE_SPECIALREG(spsr_el2, hypctx->tf.tf_spsr); 313 314 /* Restore the PMU registers */ 315 WRITE_SPECIALREG(pmcr_el0, hypctx->pmcr_el0); 316 WRITE_SPECIALREG(pmccntr_el0, hypctx->pmccntr_el0); 317 WRITE_SPECIALREG(pmccfiltr_el0, hypctx->pmccfiltr_el0); 318 /* Clear all events/interrupts then enable them */ 319 WRITE_SPECIALREG(pmcntenclr_el0, 0xfffffffful); 320 WRITE_SPECIALREG(pmcntenset_el0, hypctx->pmcntenset_el0); 321 WRITE_SPECIALREG(pmintenclr_el1, 0xfffffffful); 322 WRITE_SPECIALREG(pmintenset_el1, hypctx->pmintenset_el1); 323 WRITE_SPECIALREG(pmovsclr_el0, 0xfffffffful); 324 WRITE_SPECIALREG(pmovsset_el0, hypctx->pmovsset_el0); 325 326 switch ((hypctx->pmcr_el0 & PMCR_N_MASK) >> PMCR_N_SHIFT) { 327 #define LOAD_PMU(x) \ 328 case (x + 1): \ 329 WRITE_SPECIALREG(pmevcntr ## x ## _el0, \ 330 hypctx->pmevcntr_el0[x]); \ 331 WRITE_SPECIALREG(pmevtyper ## x ## _el0, \ 332 hypctx->pmevtyper_el0[x]) 333 LOAD_PMU(30); 334 LOAD_PMU(29); 335 LOAD_PMU(28); 336 LOAD_PMU(27); 337 LOAD_PMU(26); 338 LOAD_PMU(25); 339 LOAD_PMU(24); 340 LOAD_PMU(23); 341 LOAD_PMU(22); 342 LOAD_PMU(21); 343 LOAD_PMU(20); 344 LOAD_PMU(19); 345 LOAD_PMU(18); 346 LOAD_PMU(17); 347 LOAD_PMU(16); 348 LOAD_PMU(15); 349 LOAD_PMU(14); 350 LOAD_PMU(13); 351 LOAD_PMU(12); 352 LOAD_PMU(11); 353 LOAD_PMU(10); 354 LOAD_PMU(9); 355 LOAD_PMU(8); 356 LOAD_PMU(7); 357 LOAD_PMU(6); 358 LOAD_PMU(5); 359 LOAD_PMU(4); 360 LOAD_PMU(3); 361 LOAD_PMU(2); 362 LOAD_PMU(1); 363 LOAD_PMU(0); 364 default: /* N == 0 when only PMCCNTR_EL0 is available */ 365 break; 366 #undef LOAD_PMU 367 } 368 369 dfr0 = READ_SPECIALREG(id_aa64dfr0_el1); 370 switch (ID_AA64DFR0_BRPs_VAL(dfr0) - 1) { 371 #define LOAD_DBG_BRP(x) \ 372 case x: \ 373 WRITE_SPECIALREG(dbgbcr ## x ## _el1, \ 374 hypctx->dbgbcr_el1[x]); \ 375 WRITE_SPECIALREG(dbgbvr ## x ## _el1, \ 376 hypctx->dbgbvr_el1[x]) 377 LOAD_DBG_BRP(15); 378 LOAD_DBG_BRP(14); 379 LOAD_DBG_BRP(13); 380 LOAD_DBG_BRP(12); 381 LOAD_DBG_BRP(11); 382 LOAD_DBG_BRP(10); 383 LOAD_DBG_BRP(9); 384 LOAD_DBG_BRP(8); 385 LOAD_DBG_BRP(7); 386 LOAD_DBG_BRP(6); 387 LOAD_DBG_BRP(5); 388 LOAD_DBG_BRP(4); 389 LOAD_DBG_BRP(3); 390 LOAD_DBG_BRP(2); 391 LOAD_DBG_BRP(1); 392 default: 393 LOAD_DBG_BRP(0); 394 #undef LOAD_DBG_BRP 395 } 396 397 switch (ID_AA64DFR0_WRPs_VAL(dfr0) - 1) { 398 #define LOAD_DBG_WRP(x) \ 399 case x: \ 400 WRITE_SPECIALREG(dbgwcr ## x ## _el1, \ 401 hypctx->dbgwcr_el1[x]); \ 402 WRITE_SPECIALREG(dbgwvr ## x ## _el1, \ 403 hypctx->dbgwvr_el1[x]) 404 LOAD_DBG_WRP(15); 405 LOAD_DBG_WRP(14); 406 LOAD_DBG_WRP(13); 407 LOAD_DBG_WRP(12); 408 LOAD_DBG_WRP(11); 409 LOAD_DBG_WRP(10); 410 LOAD_DBG_WRP(9); 411 LOAD_DBG_WRP(8); 412 LOAD_DBG_WRP(7); 413 LOAD_DBG_WRP(6); 414 LOAD_DBG_WRP(5); 415 LOAD_DBG_WRP(4); 416 LOAD_DBG_WRP(3); 417 LOAD_DBG_WRP(2); 418 LOAD_DBG_WRP(1); 419 default: 420 LOAD_DBG_WRP(0); 421 #undef LOAD_DBG_WRP 422 } 423 424 if (guest) { 425 /* Load the timer registers */ 426 WRITE_SPECIALREG(EL1_REG(CNTKCTL), 427 hypctx->vtimer_cpu.cntkctl_el1); 428 WRITE_SPECIALREG(EL0_REG(CNTV_CVAL), 429 hypctx->vtimer_cpu.virt_timer.cntx_cval_el0); 430 WRITE_SPECIALREG(EL0_REG(CNTV_CTL), 431 hypctx->vtimer_cpu.virt_timer.cntx_ctl_el0); 432 WRITE_SPECIALREG(cnthctl_el2, hyp->vtimer.cnthctl_el2); 433 WRITE_SPECIALREG(cntvoff_el2, hyp->vtimer.cntvoff_el2); 434 435 /* Load the GICv3 registers */ 436 WRITE_SPECIALREG(ich_hcr_el2, hypctx->vgic_v3_regs.ich_hcr_el2); 437 WRITE_SPECIALREG(ich_vmcr_el2, 438 hypctx->vgic_v3_regs.ich_vmcr_el2); 439 switch (hypctx->vgic_v3_regs.ich_lr_num - 1) { 440 #define LOAD_LR(x) \ 441 case x: \ 442 WRITE_SPECIALREG(ich_lr ## x ##_el2, \ 443 hypctx->vgic_v3_regs.ich_lr_el2[x]) 444 LOAD_LR(15); 445 LOAD_LR(14); 446 LOAD_LR(13); 447 LOAD_LR(12); 448 LOAD_LR(11); 449 LOAD_LR(10); 450 LOAD_LR(9); 451 LOAD_LR(8); 452 LOAD_LR(7); 453 LOAD_LR(6); 454 LOAD_LR(5); 455 LOAD_LR(4); 456 LOAD_LR(3); 457 LOAD_LR(2); 458 LOAD_LR(1); 459 default: 460 LOAD_LR(0); 461 #undef LOAD_LR 462 } 463 464 switch (hypctx->vgic_v3_regs.ich_apr_num - 1) { 465 #define LOAD_APR(x) \ 466 case x: \ 467 WRITE_SPECIALREG(ich_ap0r ## x ##_el2, \ 468 hypctx->vgic_v3_regs.ich_ap0r_el2[x]); \ 469 WRITE_SPECIALREG(ich_ap1r ## x ##_el2, \ 470 hypctx->vgic_v3_regs.ich_ap1r_el2[x]) 471 LOAD_APR(3); 472 LOAD_APR(2); 473 LOAD_APR(1); 474 default: 475 LOAD_APR(0); 476 #undef LOAD_APR 477 } 478 } 479 } 480 481 static uint64_t 482 vmm_hyp_call_guest(struct hyp *hyp, struct hypctx *hypctx) 483 { 484 struct hypctx host_hypctx; 485 uint64_t cntvoff_el2; 486 uint64_t ich_hcr_el2, ich_vmcr_el2, cnthctl_el2, cntkctl_el1; 487 uint64_t ret; 488 uint64_t s1e1r, hpfar_el2; 489 bool hpfar_valid; 490 491 vmm_hyp_reg_store(&host_hypctx, NULL, false); 492 493 /* Save the host special registers */ 494 cnthctl_el2 = READ_SPECIALREG(cnthctl_el2); 495 cntkctl_el1 = READ_SPECIALREG(cntkctl_el1); 496 cntvoff_el2 = READ_SPECIALREG(cntvoff_el2); 497 498 ich_hcr_el2 = READ_SPECIALREG(ich_hcr_el2); 499 ich_vmcr_el2 = READ_SPECIALREG(ich_vmcr_el2); 500 501 vmm_hyp_reg_restore(hypctx, hyp, true); 502 503 /* Load the common hypervisor registers */ 504 WRITE_SPECIALREG(vttbr_el2, hyp->vttbr_el2); 505 506 host_hypctx.mdcr_el2 = READ_SPECIALREG(mdcr_el2); 507 WRITE_SPECIALREG(mdcr_el2, hypctx->mdcr_el2); 508 509 /* Call into the guest */ 510 ret = VMM_HYP_FUNC(do_call_guest)(hypctx); 511 512 WRITE_SPECIALREG(mdcr_el2, host_hypctx.mdcr_el2); 513 isb(); 514 515 /* Store the exit info */ 516 hypctx->exit_info.far_el2 = READ_SPECIALREG(far_el2); 517 vmm_hyp_reg_store(hypctx, hyp, true); 518 519 hpfar_valid = true; 520 if (ret == EXCP_TYPE_EL1_SYNC) { 521 switch (ESR_ELx_EXCEPTION(hypctx->tf.tf_esr)) { 522 case EXCP_INSN_ABORT_L: 523 case EXCP_DATA_ABORT_L: 524 /* 525 * The hpfar_el2 register is valid for: 526 * - Translation and Access faults. 527 * - Translation, Access, and permission faults on 528 * the translation table walk on the stage 1 tables. 529 * - A stage 2 Address size fault. 530 * 531 * As we only need it in the first 2 cases we can just 532 * exclude it on permission faults that are not from 533 * the stage 1 table walk. 534 * 535 * TODO: Add a case for Arm erratum 834220. 536 */ 537 if ((hypctx->tf.tf_esr & ISS_DATA_S1PTW) != 0) 538 break; 539 switch (hypctx->tf.tf_esr & ISS_DATA_DFSC_MASK) { 540 case ISS_DATA_DFSC_PF_L1: 541 case ISS_DATA_DFSC_PF_L2: 542 case ISS_DATA_DFSC_PF_L3: 543 hpfar_valid = false; 544 break; 545 } 546 break; 547 } 548 } 549 if (hpfar_valid) { 550 hypctx->exit_info.hpfar_el2 = READ_SPECIALREG(hpfar_el2); 551 } else { 552 /* 553 * TODO: There is a risk the at instruction could cause an 554 * exception here. We should handle it & return a failure. 555 */ 556 s1e1r = 557 arm64_address_translate_s1e1r(hypctx->exit_info.far_el2); 558 if (PAR_SUCCESS(s1e1r)) { 559 hpfar_el2 = (s1e1r & PAR_PA_MASK) >> PAR_PA_SHIFT; 560 hpfar_el2 <<= HPFAR_EL2_FIPA_SHIFT; 561 hypctx->exit_info.hpfar_el2 = hpfar_el2; 562 } else { 563 ret = EXCP_TYPE_REENTER; 564 } 565 } 566 567 vmm_hyp_reg_restore(&host_hypctx, NULL, false); 568 569 /* Restore the host special registers */ 570 WRITE_SPECIALREG(ich_hcr_el2, ich_hcr_el2); 571 WRITE_SPECIALREG(ich_vmcr_el2, ich_vmcr_el2); 572 573 WRITE_SPECIALREG(cnthctl_el2, cnthctl_el2); 574 WRITE_SPECIALREG(cntkctl_el1, cntkctl_el1); 575 WRITE_SPECIALREG(cntvoff_el2, cntvoff_el2); 576 577 return (ret); 578 } 579 580 VMM_STATIC uint64_t 581 VMM_HYP_FUNC(enter_guest)(struct hyp *hyp, struct hypctx *hypctx) 582 { 583 uint64_t ret; 584 585 do { 586 ret = vmm_hyp_call_guest(hyp, hypctx); 587 } while (ret == EXCP_TYPE_REENTER); 588 589 return (ret); 590 } 591 592 VMM_STATIC uint64_t 593 VMM_HYP_FUNC(read_reg)(uint64_t reg) 594 { 595 switch (reg) { 596 case HYP_REG_ICH_VTR: 597 return (READ_SPECIALREG(ich_vtr_el2)); 598 case HYP_REG_CNTHCTL: 599 return (READ_SPECIALREG(cnthctl_el2)); 600 } 601 602 return (0); 603 } 604 605 VMM_STATIC void 606 VMM_HYP_FUNC(clean_s2_tlbi)(void) 607 { 608 dsb(ishst); 609 __asm __volatile("tlbi alle1is"); 610 dsb(ish); 611 } 612 613 VMM_STATIC void 614 VMM_HYP_FUNC(s2_tlbi_range)(uint64_t vttbr, vm_offset_t sva, vm_offset_t eva, 615 bool final_only) 616 { 617 uint64_t end, r, start; 618 uint64_t host_vttbr; 619 620 #define TLBI_VA_SHIFT 12 621 #define TLBI_VA_MASK ((1ul << 44) - 1) 622 #define TLBI_VA(addr) (((addr) >> TLBI_VA_SHIFT) & TLBI_VA_MASK) 623 #define TLBI_VA_L3_INCR (L3_SIZE >> TLBI_VA_SHIFT) 624 625 /* Switch to the guest vttbr */ 626 /* TODO: Handle Cortex-A57/A72 erratum 131936 */ 627 host_vttbr = READ_SPECIALREG(vttbr_el2); 628 WRITE_SPECIALREG(vttbr_el2, vttbr); 629 isb(); 630 631 /* 632 * The CPU can cache the stage 1 + 2 combination so we need to ensure 633 * the stage 2 is invalidated first, then when this has completed we 634 * invalidate the stage 1 TLB. As we don't know which stage 1 virtual 635 * addresses point at the stage 2 IPA we need to invalidate the entire 636 * stage 1 TLB. 637 */ 638 639 start = TLBI_VA(sva); 640 end = TLBI_VA(eva); 641 for (r = start; r < end; r += TLBI_VA_L3_INCR) { 642 /* Invalidate the stage 2 TLB entry */ 643 if (final_only) 644 __asm __volatile("tlbi ipas2le1is, %0" : : "r"(r)); 645 else 646 __asm __volatile("tlbi ipas2e1is, %0" : : "r"(r)); 647 } 648 /* Ensure the entry has been invalidated */ 649 dsb(ish); 650 /* Invalidate the stage 1 TLB. */ 651 __asm __volatile("tlbi vmalle1is"); 652 dsb(ish); 653 isb(); 654 655 /* Switch back t othe host vttbr */ 656 WRITE_SPECIALREG(vttbr_el2, host_vttbr); 657 isb(); 658 } 659 660 VMM_STATIC void 661 VMM_HYP_FUNC(s2_tlbi_all)(uint64_t vttbr) 662 { 663 uint64_t host_vttbr; 664 665 /* Switch to the guest vttbr */ 666 /* TODO: Handle Cortex-A57/A72 erratum 131936 */ 667 host_vttbr = READ_SPECIALREG(vttbr_el2); 668 WRITE_SPECIALREG(vttbr_el2, vttbr); 669 isb(); 670 671 __asm __volatile("tlbi vmalls12e1is"); 672 dsb(ish); 673 isb(); 674 675 /* Switch back t othe host vttbr */ 676 WRITE_SPECIALREG(vttbr_el2, host_vttbr); 677 isb(); 678 } 679