1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/coproc.c: 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Authors: Rusty Russell <rusty@rustcorp.com.au> 8 * Christoffer Dall <c.dall@virtualopensystems.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License, version 2, as 12 * published by the Free Software Foundation. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this program. If not, see <http://www.gnu.org/licenses/>. 21 */ 22 23 #include <linux/kvm_host.h> 24 #include <linux/mm.h> 25 #include <linux/uaccess.h> 26 27 #include <asm/cacheflush.h> 28 #include <asm/cputype.h> 29 #include <asm/debug-monitors.h> 30 #include <asm/esr.h> 31 #include <asm/kvm_arm.h> 32 #include <asm/kvm_asm.h> 33 #include <asm/kvm_coproc.h> 34 #include <asm/kvm_emulate.h> 35 #include <asm/kvm_host.h> 36 #include <asm/kvm_mmu.h> 37 #include <asm/perf_event.h> 38 39 #include <trace/events/kvm.h> 40 41 #include "sys_regs.h" 42 43 #include "trace.h" 44 45 /* 46 * All of this file is extremly similar to the ARM coproc.c, but the 47 * types are different. My gut feeling is that it should be pretty 48 * easy to merge, but that would be an ABI breakage -- again. VFP 49 * would also need to be abstracted. 50 * 51 * For AArch32, we only take care of what is being trapped. Anything 52 * that has to do with init and userspace access has to go via the 53 * 64bit interface. 54 */ 55 56 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ 57 static u32 cache_levels; 58 59 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ 60 #define CSSELR_MAX 12 61 62 /* Which cache CCSIDR represents depends on CSSELR value. */ 63 static u32 get_ccsidr(u32 csselr) 64 { 65 u32 ccsidr; 66 67 /* Make sure noone else changes CSSELR during this! */ 68 local_irq_disable(); 69 /* Put value into CSSELR */ 70 asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); 71 isb(); 72 /* Read result out of CCSIDR */ 73 asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); 74 local_irq_enable(); 75 76 return ccsidr; 77 } 78 79 /* 80 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). 81 */ 82 static bool access_dcsw(struct kvm_vcpu *vcpu, 83 struct sys_reg_params *p, 84 const struct sys_reg_desc *r) 85 { 86 if (!p->is_write) 87 return read_from_write_only(vcpu, p); 88 89 kvm_set_way_flush(vcpu); 90 return true; 91 } 92 93 /* 94 * Generic accessor for VM registers. Only called as long as HCR_TVM 95 * is set. If the guest enables the MMU, we stop trapping the VM 96 * sys_regs and leave it in complete control of the caches. 97 */ 98 static bool access_vm_reg(struct kvm_vcpu *vcpu, 99 struct sys_reg_params *p, 100 const struct sys_reg_desc *r) 101 { 102 bool was_enabled = vcpu_has_cache_enabled(vcpu); 103 104 BUG_ON(!p->is_write); 105 106 if (!p->is_aarch32) { 107 vcpu_sys_reg(vcpu, r->reg) = p->regval; 108 } else { 109 if (!p->is_32bit) 110 vcpu_cp15_64_high(vcpu, r->reg) = upper_32_bits(p->regval); 111 vcpu_cp15_64_low(vcpu, r->reg) = lower_32_bits(p->regval); 112 } 113 114 kvm_toggle_cache(vcpu, was_enabled); 115 return true; 116 } 117 118 /* 119 * Trap handler for the GICv3 SGI generation system register. 120 * Forward the request to the VGIC emulation. 121 * The cp15_64 code makes sure this automatically works 122 * for both AArch64 and AArch32 accesses. 123 */ 124 static bool access_gic_sgi(struct kvm_vcpu *vcpu, 125 struct sys_reg_params *p, 126 const struct sys_reg_desc *r) 127 { 128 if (!p->is_write) 129 return read_from_write_only(vcpu, p); 130 131 vgic_v3_dispatch_sgi(vcpu, p->regval); 132 133 return true; 134 } 135 136 static bool trap_raz_wi(struct kvm_vcpu *vcpu, 137 struct sys_reg_params *p, 138 const struct sys_reg_desc *r) 139 { 140 if (p->is_write) 141 return ignore_write(vcpu, p); 142 else 143 return read_zero(vcpu, p); 144 } 145 146 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, 147 struct sys_reg_params *p, 148 const struct sys_reg_desc *r) 149 { 150 if (p->is_write) { 151 return ignore_write(vcpu, p); 152 } else { 153 p->regval = (1 << 3); 154 return true; 155 } 156 } 157 158 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu, 159 struct sys_reg_params *p, 160 const struct sys_reg_desc *r) 161 { 162 if (p->is_write) { 163 return ignore_write(vcpu, p); 164 } else { 165 u32 val; 166 asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val)); 167 p->regval = val; 168 return true; 169 } 170 } 171 172 /* 173 * We want to avoid world-switching all the DBG registers all the 174 * time: 175 * 176 * - If we've touched any debug register, it is likely that we're 177 * going to touch more of them. It then makes sense to disable the 178 * traps and start doing the save/restore dance 179 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is 180 * then mandatory to save/restore the registers, as the guest 181 * depends on them. 182 * 183 * For this, we use a DIRTY bit, indicating the guest has modified the 184 * debug registers, used as follow: 185 * 186 * On guest entry: 187 * - If the dirty bit is set (because we're coming back from trapping), 188 * disable the traps, save host registers, restore guest registers. 189 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), 190 * set the dirty bit, disable the traps, save host registers, 191 * restore guest registers. 192 * - Otherwise, enable the traps 193 * 194 * On guest exit: 195 * - If the dirty bit is set, save guest registers, restore host 196 * registers and clear the dirty bit. This ensure that the host can 197 * now use the debug registers. 198 */ 199 static bool trap_debug_regs(struct kvm_vcpu *vcpu, 200 struct sys_reg_params *p, 201 const struct sys_reg_desc *r) 202 { 203 if (p->is_write) { 204 vcpu_sys_reg(vcpu, r->reg) = p->regval; 205 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 206 } else { 207 p->regval = vcpu_sys_reg(vcpu, r->reg); 208 } 209 210 trace_trap_reg(__func__, r->reg, p->is_write, p->regval); 211 212 return true; 213 } 214 215 /* 216 * reg_to_dbg/dbg_to_reg 217 * 218 * A 32 bit write to a debug register leave top bits alone 219 * A 32 bit read from a debug register only returns the bottom bits 220 * 221 * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the 222 * hyp.S code switches between host and guest values in future. 223 */ 224 static void reg_to_dbg(struct kvm_vcpu *vcpu, 225 struct sys_reg_params *p, 226 u64 *dbg_reg) 227 { 228 u64 val = p->regval; 229 230 if (p->is_32bit) { 231 val &= 0xffffffffUL; 232 val |= ((*dbg_reg >> 32) << 32); 233 } 234 235 *dbg_reg = val; 236 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 237 } 238 239 static void dbg_to_reg(struct kvm_vcpu *vcpu, 240 struct sys_reg_params *p, 241 u64 *dbg_reg) 242 { 243 p->regval = *dbg_reg; 244 if (p->is_32bit) 245 p->regval &= 0xffffffffUL; 246 } 247 248 static bool trap_bvr(struct kvm_vcpu *vcpu, 249 struct sys_reg_params *p, 250 const struct sys_reg_desc *rd) 251 { 252 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 253 254 if (p->is_write) 255 reg_to_dbg(vcpu, p, dbg_reg); 256 else 257 dbg_to_reg(vcpu, p, dbg_reg); 258 259 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 260 261 return true; 262 } 263 264 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 265 const struct kvm_one_reg *reg, void __user *uaddr) 266 { 267 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 268 269 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 270 return -EFAULT; 271 return 0; 272 } 273 274 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 275 const struct kvm_one_reg *reg, void __user *uaddr) 276 { 277 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 278 279 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 280 return -EFAULT; 281 return 0; 282 } 283 284 static void reset_bvr(struct kvm_vcpu *vcpu, 285 const struct sys_reg_desc *rd) 286 { 287 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val; 288 } 289 290 static bool trap_bcr(struct kvm_vcpu *vcpu, 291 struct sys_reg_params *p, 292 const struct sys_reg_desc *rd) 293 { 294 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 295 296 if (p->is_write) 297 reg_to_dbg(vcpu, p, dbg_reg); 298 else 299 dbg_to_reg(vcpu, p, dbg_reg); 300 301 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 302 303 return true; 304 } 305 306 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 307 const struct kvm_one_reg *reg, void __user *uaddr) 308 { 309 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 310 311 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 312 return -EFAULT; 313 314 return 0; 315 } 316 317 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 318 const struct kvm_one_reg *reg, void __user *uaddr) 319 { 320 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg]; 321 322 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 323 return -EFAULT; 324 return 0; 325 } 326 327 static void reset_bcr(struct kvm_vcpu *vcpu, 328 const struct sys_reg_desc *rd) 329 { 330 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val; 331 } 332 333 static bool trap_wvr(struct kvm_vcpu *vcpu, 334 struct sys_reg_params *p, 335 const struct sys_reg_desc *rd) 336 { 337 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 338 339 if (p->is_write) 340 reg_to_dbg(vcpu, p, dbg_reg); 341 else 342 dbg_to_reg(vcpu, p, dbg_reg); 343 344 trace_trap_reg(__func__, rd->reg, p->is_write, 345 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]); 346 347 return true; 348 } 349 350 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 351 const struct kvm_one_reg *reg, void __user *uaddr) 352 { 353 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 354 355 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 356 return -EFAULT; 357 return 0; 358 } 359 360 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 361 const struct kvm_one_reg *reg, void __user *uaddr) 362 { 363 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]; 364 365 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 366 return -EFAULT; 367 return 0; 368 } 369 370 static void reset_wvr(struct kvm_vcpu *vcpu, 371 const struct sys_reg_desc *rd) 372 { 373 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val; 374 } 375 376 static bool trap_wcr(struct kvm_vcpu *vcpu, 377 struct sys_reg_params *p, 378 const struct sys_reg_desc *rd) 379 { 380 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 381 382 if (p->is_write) 383 reg_to_dbg(vcpu, p, dbg_reg); 384 else 385 dbg_to_reg(vcpu, p, dbg_reg); 386 387 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 388 389 return true; 390 } 391 392 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 393 const struct kvm_one_reg *reg, void __user *uaddr) 394 { 395 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 396 397 if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0) 398 return -EFAULT; 399 return 0; 400 } 401 402 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, 403 const struct kvm_one_reg *reg, void __user *uaddr) 404 { 405 __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg]; 406 407 if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0) 408 return -EFAULT; 409 return 0; 410 } 411 412 static void reset_wcr(struct kvm_vcpu *vcpu, 413 const struct sys_reg_desc *rd) 414 { 415 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val; 416 } 417 418 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 419 { 420 u64 amair; 421 422 asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); 423 vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; 424 } 425 426 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 427 { 428 u64 mpidr; 429 430 /* 431 * Map the vcpu_id into the first three affinity level fields of 432 * the MPIDR. We limit the number of VCPUs in level 0 due to a 433 * limitation to 16 CPUs in that level in the ICC_SGIxR registers 434 * of the GICv3 to be able to address each CPU directly when 435 * sending IPIs. 436 */ 437 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0); 438 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1); 439 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2); 440 vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr; 441 } 442 443 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) 444 { 445 u64 pmcr, val; 446 447 asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr)); 448 /* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN 449 * except PMCR.E resetting to zero. 450 */ 451 val = ((pmcr & ~ARMV8_PMU_PMCR_MASK) 452 | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E); 453 vcpu_sys_reg(vcpu, PMCR_EL0) = val; 454 } 455 456 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 457 const struct sys_reg_desc *r) 458 { 459 u64 val; 460 461 if (!kvm_arm_pmu_v3_ready(vcpu)) 462 return trap_raz_wi(vcpu, p, r); 463 464 if (p->is_write) { 465 /* Only update writeable bits of PMCR */ 466 val = vcpu_sys_reg(vcpu, PMCR_EL0); 467 val &= ~ARMV8_PMU_PMCR_MASK; 468 val |= p->regval & ARMV8_PMU_PMCR_MASK; 469 vcpu_sys_reg(vcpu, PMCR_EL0) = val; 470 } else { 471 /* PMCR.P & PMCR.C are RAZ */ 472 val = vcpu_sys_reg(vcpu, PMCR_EL0) 473 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); 474 p->regval = val; 475 } 476 477 return true; 478 } 479 480 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 481 const struct sys_reg_desc *r) 482 { 483 if (!kvm_arm_pmu_v3_ready(vcpu)) 484 return trap_raz_wi(vcpu, p, r); 485 486 if (p->is_write) 487 vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval; 488 else 489 /* return PMSELR.SEL field */ 490 p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0) 491 & ARMV8_PMU_COUNTER_MASK; 492 493 return true; 494 } 495 496 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p, 497 const struct sys_reg_desc *r) 498 { 499 u64 pmceid; 500 501 if (!kvm_arm_pmu_v3_ready(vcpu)) 502 return trap_raz_wi(vcpu, p, r); 503 504 BUG_ON(p->is_write); 505 506 if (!(p->Op2 & 1)) 507 asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid)); 508 else 509 asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid)); 510 511 p->regval = pmceid; 512 513 return true; 514 } 515 516 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ 517 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ 518 /* DBGBVRn_EL1 */ \ 519 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \ 520 trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \ 521 /* DBGBCRn_EL1 */ \ 522 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \ 523 trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \ 524 /* DBGWVRn_EL1 */ \ 525 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \ 526 trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \ 527 /* DBGWCRn_EL1 */ \ 528 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \ 529 trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr } 530 531 /* 532 * Architected system registers. 533 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 534 * 535 * We could trap ID_DFR0 and tell the guest we don't support performance 536 * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was 537 * NAKed, so it will read the PMCR anyway. 538 * 539 * Therefore we tell the guest we have 0 counters. Unfortunately, we 540 * must always support PMCCNTR (the cycle counter): we just RAZ/WI for 541 * all PM registers, which doesn't crash the guest kernel at least. 542 * 543 * Debug handling: We do trap most, if not all debug related system 544 * registers. The implementation is good enough to ensure that a guest 545 * can use these with minimal performance degradation. The drawback is 546 * that we don't implement any of the external debug, none of the 547 * OSlock protocol. This should be revisited if we ever encounter a 548 * more demanding guest... 549 */ 550 static const struct sys_reg_desc sys_reg_descs[] = { 551 /* DC ISW */ 552 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), 553 access_dcsw }, 554 /* DC CSW */ 555 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), 556 access_dcsw }, 557 /* DC CISW */ 558 { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), 559 access_dcsw }, 560 561 DBG_BCR_BVR_WCR_WVR_EL1(0), 562 DBG_BCR_BVR_WCR_WVR_EL1(1), 563 /* MDCCINT_EL1 */ 564 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), 565 trap_debug_regs, reset_val, MDCCINT_EL1, 0 }, 566 /* MDSCR_EL1 */ 567 { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), 568 trap_debug_regs, reset_val, MDSCR_EL1, 0 }, 569 DBG_BCR_BVR_WCR_WVR_EL1(2), 570 DBG_BCR_BVR_WCR_WVR_EL1(3), 571 DBG_BCR_BVR_WCR_WVR_EL1(4), 572 DBG_BCR_BVR_WCR_WVR_EL1(5), 573 DBG_BCR_BVR_WCR_WVR_EL1(6), 574 DBG_BCR_BVR_WCR_WVR_EL1(7), 575 DBG_BCR_BVR_WCR_WVR_EL1(8), 576 DBG_BCR_BVR_WCR_WVR_EL1(9), 577 DBG_BCR_BVR_WCR_WVR_EL1(10), 578 DBG_BCR_BVR_WCR_WVR_EL1(11), 579 DBG_BCR_BVR_WCR_WVR_EL1(12), 580 DBG_BCR_BVR_WCR_WVR_EL1(13), 581 DBG_BCR_BVR_WCR_WVR_EL1(14), 582 DBG_BCR_BVR_WCR_WVR_EL1(15), 583 584 /* MDRAR_EL1 */ 585 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 586 trap_raz_wi }, 587 /* OSLAR_EL1 */ 588 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100), 589 trap_raz_wi }, 590 /* OSLSR_EL1 */ 591 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100), 592 trap_oslsr_el1 }, 593 /* OSDLR_EL1 */ 594 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100), 595 trap_raz_wi }, 596 /* DBGPRCR_EL1 */ 597 { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100), 598 trap_raz_wi }, 599 /* DBGCLAIMSET_EL1 */ 600 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110), 601 trap_raz_wi }, 602 /* DBGCLAIMCLR_EL1 */ 603 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110), 604 trap_raz_wi }, 605 /* DBGAUTHSTATUS_EL1 */ 606 { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110), 607 trap_dbgauthstatus_el1 }, 608 609 /* MDCCSR_EL1 */ 610 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000), 611 trap_raz_wi }, 612 /* DBGDTR_EL0 */ 613 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000), 614 trap_raz_wi }, 615 /* DBGDTR[TR]X_EL0 */ 616 { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000), 617 trap_raz_wi }, 618 619 /* DBGVCR32_EL2 */ 620 { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), 621 NULL, reset_val, DBGVCR32_EL2, 0 }, 622 623 /* MPIDR_EL1 */ 624 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), 625 NULL, reset_mpidr, MPIDR_EL1 }, 626 /* SCTLR_EL1 */ 627 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), 628 access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, 629 /* CPACR_EL1 */ 630 { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), 631 NULL, reset_val, CPACR_EL1, 0 }, 632 /* TTBR0_EL1 */ 633 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), 634 access_vm_reg, reset_unknown, TTBR0_EL1 }, 635 /* TTBR1_EL1 */ 636 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), 637 access_vm_reg, reset_unknown, TTBR1_EL1 }, 638 /* TCR_EL1 */ 639 { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), 640 access_vm_reg, reset_val, TCR_EL1, 0 }, 641 642 /* AFSR0_EL1 */ 643 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), 644 access_vm_reg, reset_unknown, AFSR0_EL1 }, 645 /* AFSR1_EL1 */ 646 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), 647 access_vm_reg, reset_unknown, AFSR1_EL1 }, 648 /* ESR_EL1 */ 649 { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), 650 access_vm_reg, reset_unknown, ESR_EL1 }, 651 /* FAR_EL1 */ 652 { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), 653 access_vm_reg, reset_unknown, FAR_EL1 }, 654 /* PAR_EL1 */ 655 { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), 656 NULL, reset_unknown, PAR_EL1 }, 657 658 /* PMINTENSET_EL1 */ 659 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), 660 trap_raz_wi }, 661 /* PMINTENCLR_EL1 */ 662 { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), 663 trap_raz_wi }, 664 665 /* MAIR_EL1 */ 666 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), 667 access_vm_reg, reset_unknown, MAIR_EL1 }, 668 /* AMAIR_EL1 */ 669 { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), 670 access_vm_reg, reset_amair_el1, AMAIR_EL1 }, 671 672 /* VBAR_EL1 */ 673 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), 674 NULL, reset_val, VBAR_EL1, 0 }, 675 676 /* ICC_SGI1R_EL1 */ 677 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1011), Op2(0b101), 678 access_gic_sgi }, 679 /* ICC_SRE_EL1 */ 680 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101), 681 trap_raz_wi }, 682 683 /* CONTEXTIDR_EL1 */ 684 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), 685 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, 686 /* TPIDR_EL1 */ 687 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), 688 NULL, reset_unknown, TPIDR_EL1 }, 689 690 /* CNTKCTL_EL1 */ 691 { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), 692 NULL, reset_val, CNTKCTL_EL1, 0}, 693 694 /* CSSELR_EL1 */ 695 { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), 696 NULL, reset_unknown, CSSELR_EL1 }, 697 698 /* PMCR_EL0 */ 699 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), 700 access_pmcr, reset_pmcr, }, 701 /* PMCNTENSET_EL0 */ 702 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), 703 trap_raz_wi }, 704 /* PMCNTENCLR_EL0 */ 705 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), 706 trap_raz_wi }, 707 /* PMOVSCLR_EL0 */ 708 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), 709 trap_raz_wi }, 710 /* PMSWINC_EL0 */ 711 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), 712 trap_raz_wi }, 713 /* PMSELR_EL0 */ 714 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), 715 access_pmselr, reset_unknown, PMSELR_EL0 }, 716 /* PMCEID0_EL0 */ 717 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), 718 access_pmceid }, 719 /* PMCEID1_EL0 */ 720 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), 721 access_pmceid }, 722 /* PMCCNTR_EL0 */ 723 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), 724 trap_raz_wi }, 725 /* PMXEVTYPER_EL0 */ 726 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), 727 trap_raz_wi }, 728 /* PMXEVCNTR_EL0 */ 729 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), 730 trap_raz_wi }, 731 /* PMUSERENR_EL0 */ 732 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), 733 trap_raz_wi }, 734 /* PMOVSSET_EL0 */ 735 { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), 736 trap_raz_wi }, 737 738 /* TPIDR_EL0 */ 739 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), 740 NULL, reset_unknown, TPIDR_EL0 }, 741 /* TPIDRRO_EL0 */ 742 { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), 743 NULL, reset_unknown, TPIDRRO_EL0 }, 744 745 /* DACR32_EL2 */ 746 { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), 747 NULL, reset_unknown, DACR32_EL2 }, 748 /* IFSR32_EL2 */ 749 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), 750 NULL, reset_unknown, IFSR32_EL2 }, 751 /* FPEXC32_EL2 */ 752 { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), 753 NULL, reset_val, FPEXC32_EL2, 0x70 }, 754 }; 755 756 static bool trap_dbgidr(struct kvm_vcpu *vcpu, 757 struct sys_reg_params *p, 758 const struct sys_reg_desc *r) 759 { 760 if (p->is_write) { 761 return ignore_write(vcpu, p); 762 } else { 763 u64 dfr = read_system_reg(SYS_ID_AA64DFR0_EL1); 764 u64 pfr = read_system_reg(SYS_ID_AA64PFR0_EL1); 765 u32 el3 = !!cpuid_feature_extract_field(pfr, ID_AA64PFR0_EL3_SHIFT); 766 767 p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) | 768 (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) | 769 (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20) 770 | (6 << 16) | (el3 << 14) | (el3 << 12)); 771 return true; 772 } 773 } 774 775 static bool trap_debug32(struct kvm_vcpu *vcpu, 776 struct sys_reg_params *p, 777 const struct sys_reg_desc *r) 778 { 779 if (p->is_write) { 780 vcpu_cp14(vcpu, r->reg) = p->regval; 781 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 782 } else { 783 p->regval = vcpu_cp14(vcpu, r->reg); 784 } 785 786 return true; 787 } 788 789 /* AArch32 debug register mappings 790 * 791 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0] 792 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32] 793 * 794 * All control registers and watchpoint value registers are mapped to 795 * the lower 32 bits of their AArch64 equivalents. We share the trap 796 * handlers with the above AArch64 code which checks what mode the 797 * system is in. 798 */ 799 800 static bool trap_xvr(struct kvm_vcpu *vcpu, 801 struct sys_reg_params *p, 802 const struct sys_reg_desc *rd) 803 { 804 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg]; 805 806 if (p->is_write) { 807 u64 val = *dbg_reg; 808 809 val &= 0xffffffffUL; 810 val |= p->regval << 32; 811 *dbg_reg = val; 812 813 vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY; 814 } else { 815 p->regval = *dbg_reg >> 32; 816 } 817 818 trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg); 819 820 return true; 821 } 822 823 #define DBG_BCR_BVR_WCR_WVR(n) \ 824 /* DBGBVRn */ \ 825 { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \ 826 /* DBGBCRn */ \ 827 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \ 828 /* DBGWVRn */ \ 829 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \ 830 /* DBGWCRn */ \ 831 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n } 832 833 #define DBGBXVR(n) \ 834 { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n } 835 836 /* 837 * Trapped cp14 registers. We generally ignore most of the external 838 * debug, on the principle that they don't really make sense to a 839 * guest. Revisit this one day, would this principle change. 840 */ 841 static const struct sys_reg_desc cp14_regs[] = { 842 /* DBGIDR */ 843 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr }, 844 /* DBGDTRRXext */ 845 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi }, 846 847 DBG_BCR_BVR_WCR_WVR(0), 848 /* DBGDSCRint */ 849 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi }, 850 DBG_BCR_BVR_WCR_WVR(1), 851 /* DBGDCCINT */ 852 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 }, 853 /* DBGDSCRext */ 854 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 }, 855 DBG_BCR_BVR_WCR_WVR(2), 856 /* DBGDTR[RT]Xint */ 857 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi }, 858 /* DBGDTR[RT]Xext */ 859 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi }, 860 DBG_BCR_BVR_WCR_WVR(3), 861 DBG_BCR_BVR_WCR_WVR(4), 862 DBG_BCR_BVR_WCR_WVR(5), 863 /* DBGWFAR */ 864 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi }, 865 /* DBGOSECCR */ 866 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi }, 867 DBG_BCR_BVR_WCR_WVR(6), 868 /* DBGVCR */ 869 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 }, 870 DBG_BCR_BVR_WCR_WVR(7), 871 DBG_BCR_BVR_WCR_WVR(8), 872 DBG_BCR_BVR_WCR_WVR(9), 873 DBG_BCR_BVR_WCR_WVR(10), 874 DBG_BCR_BVR_WCR_WVR(11), 875 DBG_BCR_BVR_WCR_WVR(12), 876 DBG_BCR_BVR_WCR_WVR(13), 877 DBG_BCR_BVR_WCR_WVR(14), 878 DBG_BCR_BVR_WCR_WVR(15), 879 880 /* DBGDRAR (32bit) */ 881 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi }, 882 883 DBGBXVR(0), 884 /* DBGOSLAR */ 885 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi }, 886 DBGBXVR(1), 887 /* DBGOSLSR */ 888 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 }, 889 DBGBXVR(2), 890 DBGBXVR(3), 891 /* DBGOSDLR */ 892 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi }, 893 DBGBXVR(4), 894 /* DBGPRCR */ 895 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi }, 896 DBGBXVR(5), 897 DBGBXVR(6), 898 DBGBXVR(7), 899 DBGBXVR(8), 900 DBGBXVR(9), 901 DBGBXVR(10), 902 DBGBXVR(11), 903 DBGBXVR(12), 904 DBGBXVR(13), 905 DBGBXVR(14), 906 DBGBXVR(15), 907 908 /* DBGDSAR (32bit) */ 909 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi }, 910 911 /* DBGDEVID2 */ 912 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi }, 913 /* DBGDEVID1 */ 914 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi }, 915 /* DBGDEVID */ 916 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi }, 917 /* DBGCLAIMSET */ 918 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi }, 919 /* DBGCLAIMCLR */ 920 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi }, 921 /* DBGAUTHSTATUS */ 922 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 }, 923 }; 924 925 /* Trapped cp14 64bit registers */ 926 static const struct sys_reg_desc cp14_64_regs[] = { 927 /* DBGDRAR (64bit) */ 928 { Op1( 0), CRm( 1), .access = trap_raz_wi }, 929 930 /* DBGDSAR (64bit) */ 931 { Op1( 0), CRm( 2), .access = trap_raz_wi }, 932 }; 933 934 /* 935 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, 936 * depending on the way they are accessed (as a 32bit or a 64bit 937 * register). 938 */ 939 static const struct sys_reg_desc cp15_regs[] = { 940 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, 941 942 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, 943 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 944 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, 945 { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, 946 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, 947 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, 948 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, 949 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, 950 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, 951 { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, 952 { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, 953 954 /* 955 * DC{C,I,CI}SW operations: 956 */ 957 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, 958 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, 959 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, 960 961 /* PMU */ 962 { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr }, 963 { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi }, 964 { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi }, 965 { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi }, 966 { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr }, 967 { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid }, 968 { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid }, 969 { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi }, 970 { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi }, 971 { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi }, 972 { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi }, 973 { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi }, 974 { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi }, 975 976 { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, 977 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, 978 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, 979 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 980 981 /* ICC_SRE */ 982 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi }, 983 984 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 985 }; 986 987 static const struct sys_reg_desc cp15_64_regs[] = { 988 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, 989 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, 990 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, 991 }; 992 993 /* Target specific emulation tables */ 994 static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; 995 996 void kvm_register_target_sys_reg_table(unsigned int target, 997 struct kvm_sys_reg_target_table *table) 998 { 999 target_tables[target] = table; 1000 } 1001 1002 /* Get specific register table for this target. */ 1003 static const struct sys_reg_desc *get_target_table(unsigned target, 1004 bool mode_is_64, 1005 size_t *num) 1006 { 1007 struct kvm_sys_reg_target_table *table; 1008 1009 table = target_tables[target]; 1010 if (mode_is_64) { 1011 *num = table->table64.num; 1012 return table->table64.table; 1013 } else { 1014 *num = table->table32.num; 1015 return table->table32.table; 1016 } 1017 } 1018 1019 static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, 1020 const struct sys_reg_desc table[], 1021 unsigned int num) 1022 { 1023 unsigned int i; 1024 1025 for (i = 0; i < num; i++) { 1026 const struct sys_reg_desc *r = &table[i]; 1027 1028 if (params->Op0 != r->Op0) 1029 continue; 1030 if (params->Op1 != r->Op1) 1031 continue; 1032 if (params->CRn != r->CRn) 1033 continue; 1034 if (params->CRm != r->CRm) 1035 continue; 1036 if (params->Op2 != r->Op2) 1037 continue; 1038 1039 return r; 1040 } 1041 return NULL; 1042 } 1043 1044 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) 1045 { 1046 kvm_inject_undefined(vcpu); 1047 return 1; 1048 } 1049 1050 /* 1051 * emulate_cp -- tries to match a sys_reg access in a handling table, and 1052 * call the corresponding trap handler. 1053 * 1054 * @params: pointer to the descriptor of the access 1055 * @table: array of trap descriptors 1056 * @num: size of the trap descriptor array 1057 * 1058 * Return 0 if the access has been handled, and -1 if not. 1059 */ 1060 static int emulate_cp(struct kvm_vcpu *vcpu, 1061 struct sys_reg_params *params, 1062 const struct sys_reg_desc *table, 1063 size_t num) 1064 { 1065 const struct sys_reg_desc *r; 1066 1067 if (!table) 1068 return -1; /* Not handled */ 1069 1070 r = find_reg(params, table, num); 1071 1072 if (r) { 1073 /* 1074 * Not having an accessor means that we have 1075 * configured a trap that we don't know how to 1076 * handle. This certainly qualifies as a gross bug 1077 * that should be fixed right away. 1078 */ 1079 BUG_ON(!r->access); 1080 1081 if (likely(r->access(vcpu, params, r))) { 1082 /* Skip instruction, since it was emulated */ 1083 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 1084 /* Handled */ 1085 return 0; 1086 } 1087 } 1088 1089 /* Not handled */ 1090 return -1; 1091 } 1092 1093 static void unhandled_cp_access(struct kvm_vcpu *vcpu, 1094 struct sys_reg_params *params) 1095 { 1096 u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); 1097 int cp; 1098 1099 switch(hsr_ec) { 1100 case ESR_ELx_EC_CP15_32: 1101 case ESR_ELx_EC_CP15_64: 1102 cp = 15; 1103 break; 1104 case ESR_ELx_EC_CP14_MR: 1105 case ESR_ELx_EC_CP14_64: 1106 cp = 14; 1107 break; 1108 default: 1109 WARN_ON((cp = -1)); 1110 } 1111 1112 kvm_err("Unsupported guest CP%d access at: %08lx\n", 1113 cp, *vcpu_pc(vcpu)); 1114 print_sys_reg_instr(params); 1115 kvm_inject_undefined(vcpu); 1116 } 1117 1118 /** 1119 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access 1120 * @vcpu: The VCPU pointer 1121 * @run: The kvm_run struct 1122 */ 1123 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu, 1124 const struct sys_reg_desc *global, 1125 size_t nr_global, 1126 const struct sys_reg_desc *target_specific, 1127 size_t nr_specific) 1128 { 1129 struct sys_reg_params params; 1130 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1131 int Rt = (hsr >> 5) & 0xf; 1132 int Rt2 = (hsr >> 10) & 0xf; 1133 1134 params.is_aarch32 = true; 1135 params.is_32bit = false; 1136 params.CRm = (hsr >> 1) & 0xf; 1137 params.is_write = ((hsr & 1) == 0); 1138 1139 params.Op0 = 0; 1140 params.Op1 = (hsr >> 16) & 0xf; 1141 params.Op2 = 0; 1142 params.CRn = 0; 1143 1144 /* 1145 * Make a 64-bit value out of Rt and Rt2. As we use the same trap 1146 * backends between AArch32 and AArch64, we get away with it. 1147 */ 1148 if (params.is_write) { 1149 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff; 1150 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32; 1151 } 1152 1153 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific)) 1154 goto out; 1155 if (!emulate_cp(vcpu, ¶ms, global, nr_global)) 1156 goto out; 1157 1158 unhandled_cp_access(vcpu, ¶ms); 1159 1160 out: 1161 /* Split up the value between registers for the read side */ 1162 if (!params.is_write) { 1163 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval)); 1164 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval)); 1165 } 1166 1167 return 1; 1168 } 1169 1170 /** 1171 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access 1172 * @vcpu: The VCPU pointer 1173 * @run: The kvm_run struct 1174 */ 1175 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu, 1176 const struct sys_reg_desc *global, 1177 size_t nr_global, 1178 const struct sys_reg_desc *target_specific, 1179 size_t nr_specific) 1180 { 1181 struct sys_reg_params params; 1182 u32 hsr = kvm_vcpu_get_hsr(vcpu); 1183 int Rt = (hsr >> 5) & 0xf; 1184 1185 params.is_aarch32 = true; 1186 params.is_32bit = true; 1187 params.CRm = (hsr >> 1) & 0xf; 1188 params.regval = vcpu_get_reg(vcpu, Rt); 1189 params.is_write = ((hsr & 1) == 0); 1190 params.CRn = (hsr >> 10) & 0xf; 1191 params.Op0 = 0; 1192 params.Op1 = (hsr >> 14) & 0x7; 1193 params.Op2 = (hsr >> 17) & 0x7; 1194 1195 if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) || 1196 !emulate_cp(vcpu, ¶ms, global, nr_global)) { 1197 if (!params.is_write) 1198 vcpu_set_reg(vcpu, Rt, params.regval); 1199 return 1; 1200 } 1201 1202 unhandled_cp_access(vcpu, ¶ms); 1203 return 1; 1204 } 1205 1206 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 1207 { 1208 const struct sys_reg_desc *target_specific; 1209 size_t num; 1210 1211 target_specific = get_target_table(vcpu->arch.target, false, &num); 1212 return kvm_handle_cp_64(vcpu, 1213 cp15_64_regs, ARRAY_SIZE(cp15_64_regs), 1214 target_specific, num); 1215 } 1216 1217 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 1218 { 1219 const struct sys_reg_desc *target_specific; 1220 size_t num; 1221 1222 target_specific = get_target_table(vcpu->arch.target, false, &num); 1223 return kvm_handle_cp_32(vcpu, 1224 cp15_regs, ARRAY_SIZE(cp15_regs), 1225 target_specific, num); 1226 } 1227 1228 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run) 1229 { 1230 return kvm_handle_cp_64(vcpu, 1231 cp14_64_regs, ARRAY_SIZE(cp14_64_regs), 1232 NULL, 0); 1233 } 1234 1235 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run) 1236 { 1237 return kvm_handle_cp_32(vcpu, 1238 cp14_regs, ARRAY_SIZE(cp14_regs), 1239 NULL, 0); 1240 } 1241 1242 static int emulate_sys_reg(struct kvm_vcpu *vcpu, 1243 struct sys_reg_params *params) 1244 { 1245 size_t num; 1246 const struct sys_reg_desc *table, *r; 1247 1248 table = get_target_table(vcpu->arch.target, true, &num); 1249 1250 /* Search target-specific then generic table. */ 1251 r = find_reg(params, table, num); 1252 if (!r) 1253 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 1254 1255 if (likely(r)) { 1256 /* 1257 * Not having an accessor means that we have 1258 * configured a trap that we don't know how to 1259 * handle. This certainly qualifies as a gross bug 1260 * that should be fixed right away. 1261 */ 1262 BUG_ON(!r->access); 1263 1264 if (likely(r->access(vcpu, params, r))) { 1265 /* Skip instruction, since it was emulated */ 1266 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); 1267 return 1; 1268 } 1269 /* If access function fails, it should complain. */ 1270 } else { 1271 kvm_err("Unsupported guest sys_reg access at: %lx\n", 1272 *vcpu_pc(vcpu)); 1273 print_sys_reg_instr(params); 1274 } 1275 kvm_inject_undefined(vcpu); 1276 return 1; 1277 } 1278 1279 static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, 1280 const struct sys_reg_desc *table, size_t num) 1281 { 1282 unsigned long i; 1283 1284 for (i = 0; i < num; i++) 1285 if (table[i].reset) 1286 table[i].reset(vcpu, &table[i]); 1287 } 1288 1289 /** 1290 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access 1291 * @vcpu: The VCPU pointer 1292 * @run: The kvm_run struct 1293 */ 1294 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) 1295 { 1296 struct sys_reg_params params; 1297 unsigned long esr = kvm_vcpu_get_hsr(vcpu); 1298 int Rt = (esr >> 5) & 0x1f; 1299 int ret; 1300 1301 trace_kvm_handle_sys_reg(esr); 1302 1303 params.is_aarch32 = false; 1304 params.is_32bit = false; 1305 params.Op0 = (esr >> 20) & 3; 1306 params.Op1 = (esr >> 14) & 0x7; 1307 params.CRn = (esr >> 10) & 0xf; 1308 params.CRm = (esr >> 1) & 0xf; 1309 params.Op2 = (esr >> 17) & 0x7; 1310 params.regval = vcpu_get_reg(vcpu, Rt); 1311 params.is_write = !(esr & 1); 1312 1313 ret = emulate_sys_reg(vcpu, ¶ms); 1314 1315 if (!params.is_write) 1316 vcpu_set_reg(vcpu, Rt, params.regval); 1317 return ret; 1318 } 1319 1320 /****************************************************************************** 1321 * Userspace API 1322 *****************************************************************************/ 1323 1324 static bool index_to_params(u64 id, struct sys_reg_params *params) 1325 { 1326 switch (id & KVM_REG_SIZE_MASK) { 1327 case KVM_REG_SIZE_U64: 1328 /* Any unused index bits means it's not valid. */ 1329 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK 1330 | KVM_REG_ARM_COPROC_MASK 1331 | KVM_REG_ARM64_SYSREG_OP0_MASK 1332 | KVM_REG_ARM64_SYSREG_OP1_MASK 1333 | KVM_REG_ARM64_SYSREG_CRN_MASK 1334 | KVM_REG_ARM64_SYSREG_CRM_MASK 1335 | KVM_REG_ARM64_SYSREG_OP2_MASK)) 1336 return false; 1337 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) 1338 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); 1339 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) 1340 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); 1341 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) 1342 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); 1343 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) 1344 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); 1345 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) 1346 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); 1347 return true; 1348 default: 1349 return false; 1350 } 1351 } 1352 1353 /* Decode an index value, and find the sys_reg_desc entry. */ 1354 static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, 1355 u64 id) 1356 { 1357 size_t num; 1358 const struct sys_reg_desc *table, *r; 1359 struct sys_reg_params params; 1360 1361 /* We only do sys_reg for now. */ 1362 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) 1363 return NULL; 1364 1365 if (!index_to_params(id, ¶ms)) 1366 return NULL; 1367 1368 table = get_target_table(vcpu->arch.target, true, &num); 1369 r = find_reg(¶ms, table, num); 1370 if (!r) 1371 r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 1372 1373 /* Not saved in the sys_reg array? */ 1374 if (r && !r->reg) 1375 r = NULL; 1376 1377 return r; 1378 } 1379 1380 /* 1381 * These are the invariant sys_reg registers: we let the guest see the 1382 * host versions of these, so they're part of the guest state. 1383 * 1384 * A future CPU may provide a mechanism to present different values to 1385 * the guest, or a future kvm may trap them. 1386 */ 1387 1388 #define FUNCTION_INVARIANT(reg) \ 1389 static void get_##reg(struct kvm_vcpu *v, \ 1390 const struct sys_reg_desc *r) \ 1391 { \ 1392 u64 val; \ 1393 \ 1394 asm volatile("mrs %0, " __stringify(reg) "\n" \ 1395 : "=r" (val)); \ 1396 ((struct sys_reg_desc *)r)->val = val; \ 1397 } 1398 1399 FUNCTION_INVARIANT(midr_el1) 1400 FUNCTION_INVARIANT(ctr_el0) 1401 FUNCTION_INVARIANT(revidr_el1) 1402 FUNCTION_INVARIANT(id_pfr0_el1) 1403 FUNCTION_INVARIANT(id_pfr1_el1) 1404 FUNCTION_INVARIANT(id_dfr0_el1) 1405 FUNCTION_INVARIANT(id_afr0_el1) 1406 FUNCTION_INVARIANT(id_mmfr0_el1) 1407 FUNCTION_INVARIANT(id_mmfr1_el1) 1408 FUNCTION_INVARIANT(id_mmfr2_el1) 1409 FUNCTION_INVARIANT(id_mmfr3_el1) 1410 FUNCTION_INVARIANT(id_isar0_el1) 1411 FUNCTION_INVARIANT(id_isar1_el1) 1412 FUNCTION_INVARIANT(id_isar2_el1) 1413 FUNCTION_INVARIANT(id_isar3_el1) 1414 FUNCTION_INVARIANT(id_isar4_el1) 1415 FUNCTION_INVARIANT(id_isar5_el1) 1416 FUNCTION_INVARIANT(clidr_el1) 1417 FUNCTION_INVARIANT(aidr_el1) 1418 1419 /* ->val is filled in by kvm_sys_reg_table_init() */ 1420 static struct sys_reg_desc invariant_sys_regs[] = { 1421 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), 1422 NULL, get_midr_el1 }, 1423 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), 1424 NULL, get_revidr_el1 }, 1425 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), 1426 NULL, get_id_pfr0_el1 }, 1427 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), 1428 NULL, get_id_pfr1_el1 }, 1429 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), 1430 NULL, get_id_dfr0_el1 }, 1431 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), 1432 NULL, get_id_afr0_el1 }, 1433 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), 1434 NULL, get_id_mmfr0_el1 }, 1435 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), 1436 NULL, get_id_mmfr1_el1 }, 1437 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), 1438 NULL, get_id_mmfr2_el1 }, 1439 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), 1440 NULL, get_id_mmfr3_el1 }, 1441 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), 1442 NULL, get_id_isar0_el1 }, 1443 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), 1444 NULL, get_id_isar1_el1 }, 1445 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), 1446 NULL, get_id_isar2_el1 }, 1447 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), 1448 NULL, get_id_isar3_el1 }, 1449 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), 1450 NULL, get_id_isar4_el1 }, 1451 { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), 1452 NULL, get_id_isar5_el1 }, 1453 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), 1454 NULL, get_clidr_el1 }, 1455 { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), 1456 NULL, get_aidr_el1 }, 1457 { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), 1458 NULL, get_ctr_el0 }, 1459 }; 1460 1461 static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) 1462 { 1463 if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) 1464 return -EFAULT; 1465 return 0; 1466 } 1467 1468 static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) 1469 { 1470 if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) 1471 return -EFAULT; 1472 return 0; 1473 } 1474 1475 static int get_invariant_sys_reg(u64 id, void __user *uaddr) 1476 { 1477 struct sys_reg_params params; 1478 const struct sys_reg_desc *r; 1479 1480 if (!index_to_params(id, ¶ms)) 1481 return -ENOENT; 1482 1483 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); 1484 if (!r) 1485 return -ENOENT; 1486 1487 return reg_to_user(uaddr, &r->val, id); 1488 } 1489 1490 static int set_invariant_sys_reg(u64 id, void __user *uaddr) 1491 { 1492 struct sys_reg_params params; 1493 const struct sys_reg_desc *r; 1494 int err; 1495 u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ 1496 1497 if (!index_to_params(id, ¶ms)) 1498 return -ENOENT; 1499 r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); 1500 if (!r) 1501 return -ENOENT; 1502 1503 err = reg_from_user(&val, uaddr, id); 1504 if (err) 1505 return err; 1506 1507 /* This is what we mean by invariant: you can't change it. */ 1508 if (r->val != val) 1509 return -EINVAL; 1510 1511 return 0; 1512 } 1513 1514 static bool is_valid_cache(u32 val) 1515 { 1516 u32 level, ctype; 1517 1518 if (val >= CSSELR_MAX) 1519 return false; 1520 1521 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ 1522 level = (val >> 1); 1523 ctype = (cache_levels >> (level * 3)) & 7; 1524 1525 switch (ctype) { 1526 case 0: /* No cache */ 1527 return false; 1528 case 1: /* Instruction cache only */ 1529 return (val & 1); 1530 case 2: /* Data cache only */ 1531 case 4: /* Unified cache */ 1532 return !(val & 1); 1533 case 3: /* Separate instruction and data caches */ 1534 return true; 1535 default: /* Reserved: we can't know instruction or data. */ 1536 return false; 1537 } 1538 } 1539 1540 static int demux_c15_get(u64 id, void __user *uaddr) 1541 { 1542 u32 val; 1543 u32 __user *uval = uaddr; 1544 1545 /* Fail if we have unknown bits set. */ 1546 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 1547 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 1548 return -ENOENT; 1549 1550 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 1551 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 1552 if (KVM_REG_SIZE(id) != 4) 1553 return -ENOENT; 1554 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 1555 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 1556 if (!is_valid_cache(val)) 1557 return -ENOENT; 1558 1559 return put_user(get_ccsidr(val), uval); 1560 default: 1561 return -ENOENT; 1562 } 1563 } 1564 1565 static int demux_c15_set(u64 id, void __user *uaddr) 1566 { 1567 u32 val, newval; 1568 u32 __user *uval = uaddr; 1569 1570 /* Fail if we have unknown bits set. */ 1571 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK 1572 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) 1573 return -ENOENT; 1574 1575 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { 1576 case KVM_REG_ARM_DEMUX_ID_CCSIDR: 1577 if (KVM_REG_SIZE(id) != 4) 1578 return -ENOENT; 1579 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) 1580 >> KVM_REG_ARM_DEMUX_VAL_SHIFT; 1581 if (!is_valid_cache(val)) 1582 return -ENOENT; 1583 1584 if (get_user(newval, uval)) 1585 return -EFAULT; 1586 1587 /* This is also invariant: you can't change it. */ 1588 if (newval != get_ccsidr(val)) 1589 return -EINVAL; 1590 return 0; 1591 default: 1592 return -ENOENT; 1593 } 1594 } 1595 1596 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1597 { 1598 const struct sys_reg_desc *r; 1599 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 1600 1601 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1602 return demux_c15_get(reg->id, uaddr); 1603 1604 if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) 1605 return -ENOENT; 1606 1607 r = index_to_sys_reg_desc(vcpu, reg->id); 1608 if (!r) 1609 return get_invariant_sys_reg(reg->id, uaddr); 1610 1611 if (r->get_user) 1612 return (r->get_user)(vcpu, r, reg, uaddr); 1613 1614 return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); 1615 } 1616 1617 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 1618 { 1619 const struct sys_reg_desc *r; 1620 void __user *uaddr = (void __user *)(unsigned long)reg->addr; 1621 1622 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) 1623 return demux_c15_set(reg->id, uaddr); 1624 1625 if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) 1626 return -ENOENT; 1627 1628 r = index_to_sys_reg_desc(vcpu, reg->id); 1629 if (!r) 1630 return set_invariant_sys_reg(reg->id, uaddr); 1631 1632 if (r->set_user) 1633 return (r->set_user)(vcpu, r, reg, uaddr); 1634 1635 return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); 1636 } 1637 1638 static unsigned int num_demux_regs(void) 1639 { 1640 unsigned int i, count = 0; 1641 1642 for (i = 0; i < CSSELR_MAX; i++) 1643 if (is_valid_cache(i)) 1644 count++; 1645 1646 return count; 1647 } 1648 1649 static int write_demux_regids(u64 __user *uindices) 1650 { 1651 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; 1652 unsigned int i; 1653 1654 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; 1655 for (i = 0; i < CSSELR_MAX; i++) { 1656 if (!is_valid_cache(i)) 1657 continue; 1658 if (put_user(val | i, uindices)) 1659 return -EFAULT; 1660 uindices++; 1661 } 1662 return 0; 1663 } 1664 1665 static u64 sys_reg_to_index(const struct sys_reg_desc *reg) 1666 { 1667 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | 1668 KVM_REG_ARM64_SYSREG | 1669 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | 1670 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | 1671 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | 1672 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | 1673 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); 1674 } 1675 1676 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) 1677 { 1678 if (!*uind) 1679 return true; 1680 1681 if (put_user(sys_reg_to_index(reg), *uind)) 1682 return false; 1683 1684 (*uind)++; 1685 return true; 1686 } 1687 1688 /* Assumed ordered tables, see kvm_sys_reg_table_init. */ 1689 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) 1690 { 1691 const struct sys_reg_desc *i1, *i2, *end1, *end2; 1692 unsigned int total = 0; 1693 size_t num; 1694 1695 /* We check for duplicates here, to allow arch-specific overrides. */ 1696 i1 = get_target_table(vcpu->arch.target, true, &num); 1697 end1 = i1 + num; 1698 i2 = sys_reg_descs; 1699 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); 1700 1701 BUG_ON(i1 == end1 || i2 == end2); 1702 1703 /* Walk carefully, as both tables may refer to the same register. */ 1704 while (i1 || i2) { 1705 int cmp = cmp_sys_reg(i1, i2); 1706 /* target-specific overrides generic entry. */ 1707 if (cmp <= 0) { 1708 /* Ignore registers we trap but don't save. */ 1709 if (i1->reg) { 1710 if (!copy_reg_to_user(i1, &uind)) 1711 return -EFAULT; 1712 total++; 1713 } 1714 } else { 1715 /* Ignore registers we trap but don't save. */ 1716 if (i2->reg) { 1717 if (!copy_reg_to_user(i2, &uind)) 1718 return -EFAULT; 1719 total++; 1720 } 1721 } 1722 1723 if (cmp <= 0 && ++i1 == end1) 1724 i1 = NULL; 1725 if (cmp >= 0 && ++i2 == end2) 1726 i2 = NULL; 1727 } 1728 return total; 1729 } 1730 1731 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) 1732 { 1733 return ARRAY_SIZE(invariant_sys_regs) 1734 + num_demux_regs() 1735 + walk_sys_regs(vcpu, (u64 __user *)NULL); 1736 } 1737 1738 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 1739 { 1740 unsigned int i; 1741 int err; 1742 1743 /* Then give them all the invariant registers' indices. */ 1744 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { 1745 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) 1746 return -EFAULT; 1747 uindices++; 1748 } 1749 1750 err = walk_sys_regs(vcpu, uindices); 1751 if (err < 0) 1752 return err; 1753 uindices += err; 1754 1755 return write_demux_regids(uindices); 1756 } 1757 1758 static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n) 1759 { 1760 unsigned int i; 1761 1762 for (i = 1; i < n; i++) { 1763 if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) { 1764 kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1); 1765 return 1; 1766 } 1767 } 1768 1769 return 0; 1770 } 1771 1772 void kvm_sys_reg_table_init(void) 1773 { 1774 unsigned int i; 1775 struct sys_reg_desc clidr; 1776 1777 /* Make sure tables are unique and in order. */ 1778 BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs))); 1779 BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs))); 1780 BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs))); 1781 BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); 1782 BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs))); 1783 BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs))); 1784 1785 /* We abuse the reset function to overwrite the table itself. */ 1786 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) 1787 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); 1788 1789 /* 1790 * CLIDR format is awkward, so clean it up. See ARM B4.1.20: 1791 * 1792 * If software reads the Cache Type fields from Ctype1 1793 * upwards, once it has seen a value of 0b000, no caches 1794 * exist at further-out levels of the hierarchy. So, for 1795 * example, if Ctype3 is the first Cache Type field with a 1796 * value of 0b000, the values of Ctype4 to Ctype7 must be 1797 * ignored. 1798 */ 1799 get_clidr_el1(NULL, &clidr); /* Ugly... */ 1800 cache_levels = clidr.val; 1801 for (i = 0; i < 7; i++) 1802 if (((cache_levels >> (i*3)) & 7) == 0) 1803 break; 1804 /* Clear all higher bits. */ 1805 cache_levels &= (1 << (i*3))-1; 1806 } 1807 1808 /** 1809 * kvm_reset_sys_regs - sets system registers to reset value 1810 * @vcpu: The VCPU pointer 1811 * 1812 * This function finds the right table above and sets the registers on the 1813 * virtual CPU struct to their architecturally defined reset values. 1814 */ 1815 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) 1816 { 1817 size_t num; 1818 const struct sys_reg_desc *table; 1819 1820 /* Catch someone adding a register without putting in reset entry. */ 1821 memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); 1822 1823 /* Generic chip reset first (so target could override). */ 1824 reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); 1825 1826 table = get_target_table(vcpu->arch.target, true, &num); 1827 reset_sys_reg_descs(vcpu, table, num); 1828 1829 for (num = 1; num < NR_SYS_REGS; num++) 1830 if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) 1831 panic("Didn't reset vcpu_sys_reg(%zi)", num); 1832 } 1833