1 /* 2 * Copyright (C) 2012,2013 - ARM Ltd 3 * Author: Marc Zyngier <marc.zyngier@arm.com> 4 * 5 * Derived from arch/arm/kvm/guest.c: 6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University 7 * Author: Christoffer Dall <c.dall@virtualopensystems.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #include <linux/bits.h> 23 #include <linux/errno.h> 24 #include <linux/err.h> 25 #include <linux/nospec.h> 26 #include <linux/kvm_host.h> 27 #include <linux/module.h> 28 #include <linux/stddef.h> 29 #include <linux/string.h> 30 #include <linux/vmalloc.h> 31 #include <linux/fs.h> 32 #include <kvm/arm_psci.h> 33 #include <asm/cputype.h> 34 #include <linux/uaccess.h> 35 #include <asm/fpsimd.h> 36 #include <asm/kvm.h> 37 #include <asm/kvm_emulate.h> 38 #include <asm/kvm_coproc.h> 39 #include <asm/kvm_host.h> 40 #include <asm/sigcontext.h> 41 42 #include "trace.h" 43 44 #define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } 45 #define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } 46 47 struct kvm_stats_debugfs_item debugfs_entries[] = { 48 VCPU_STAT(hvc_exit_stat), 49 VCPU_STAT(wfe_exit_stat), 50 VCPU_STAT(wfi_exit_stat), 51 VCPU_STAT(mmio_exit_user), 52 VCPU_STAT(mmio_exit_kernel), 53 VCPU_STAT(exits), 54 { NULL } 55 }; 56 57 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 58 { 59 return 0; 60 } 61 62 static bool core_reg_offset_is_vreg(u64 off) 63 { 64 return off >= KVM_REG_ARM_CORE_REG(fp_regs.vregs) && 65 off < KVM_REG_ARM_CORE_REG(fp_regs.fpsr); 66 } 67 68 static u64 core_reg_offset_from_id(u64 id) 69 { 70 return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); 71 } 72 73 static int validate_core_offset(const struct kvm_vcpu *vcpu, 74 const struct kvm_one_reg *reg) 75 { 76 u64 off = core_reg_offset_from_id(reg->id); 77 int size; 78 79 switch (off) { 80 case KVM_REG_ARM_CORE_REG(regs.regs[0]) ... 81 KVM_REG_ARM_CORE_REG(regs.regs[30]): 82 case KVM_REG_ARM_CORE_REG(regs.sp): 83 case KVM_REG_ARM_CORE_REG(regs.pc): 84 case KVM_REG_ARM_CORE_REG(regs.pstate): 85 case KVM_REG_ARM_CORE_REG(sp_el1): 86 case KVM_REG_ARM_CORE_REG(elr_el1): 87 case KVM_REG_ARM_CORE_REG(spsr[0]) ... 88 KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]): 89 size = sizeof(__u64); 90 break; 91 92 case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ... 93 KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]): 94 size = sizeof(__uint128_t); 95 break; 96 97 case KVM_REG_ARM_CORE_REG(fp_regs.fpsr): 98 case KVM_REG_ARM_CORE_REG(fp_regs.fpcr): 99 size = sizeof(__u32); 100 break; 101 102 default: 103 return -EINVAL; 104 } 105 106 if (KVM_REG_SIZE(reg->id) != size || 107 !IS_ALIGNED(off, size / sizeof(__u32))) 108 return -EINVAL; 109 110 /* 111 * The KVM_REG_ARM64_SVE regs must be used instead of 112 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on 113 * SVE-enabled vcpus: 114 */ 115 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(off)) 116 return -EINVAL; 117 118 return 0; 119 } 120 121 static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 122 { 123 /* 124 * Because the kvm_regs structure is a mix of 32, 64 and 125 * 128bit fields, we index it as if it was a 32bit 126 * array. Hence below, nr_regs is the number of entries, and 127 * off the index in the "array". 128 */ 129 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; 130 struct kvm_regs *regs = vcpu_gp_regs(vcpu); 131 int nr_regs = sizeof(*regs) / sizeof(__u32); 132 u32 off; 133 134 /* Our ID is an index into the kvm_regs struct. */ 135 off = core_reg_offset_from_id(reg->id); 136 if (off >= nr_regs || 137 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 138 return -ENOENT; 139 140 if (validate_core_offset(vcpu, reg)) 141 return -EINVAL; 142 143 if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id))) 144 return -EFAULT; 145 146 return 0; 147 } 148 149 static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 150 { 151 __u32 __user *uaddr = (__u32 __user *)(unsigned long)reg->addr; 152 struct kvm_regs *regs = vcpu_gp_regs(vcpu); 153 int nr_regs = sizeof(*regs) / sizeof(__u32); 154 __uint128_t tmp; 155 void *valp = &tmp; 156 u64 off; 157 int err = 0; 158 159 /* Our ID is an index into the kvm_regs struct. */ 160 off = core_reg_offset_from_id(reg->id); 161 if (off >= nr_regs || 162 (off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs) 163 return -ENOENT; 164 165 if (validate_core_offset(vcpu, reg)) 166 return -EINVAL; 167 168 if (KVM_REG_SIZE(reg->id) > sizeof(tmp)) 169 return -EINVAL; 170 171 if (copy_from_user(valp, uaddr, KVM_REG_SIZE(reg->id))) { 172 err = -EFAULT; 173 goto out; 174 } 175 176 if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) { 177 u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK; 178 switch (mode) { 179 case PSR_AA32_MODE_USR: 180 if (!system_supports_32bit_el0()) 181 return -EINVAL; 182 break; 183 case PSR_AA32_MODE_FIQ: 184 case PSR_AA32_MODE_IRQ: 185 case PSR_AA32_MODE_SVC: 186 case PSR_AA32_MODE_ABT: 187 case PSR_AA32_MODE_UND: 188 if (!vcpu_el1_is_32bit(vcpu)) 189 return -EINVAL; 190 break; 191 case PSR_MODE_EL0t: 192 case PSR_MODE_EL1t: 193 case PSR_MODE_EL1h: 194 if (vcpu_el1_is_32bit(vcpu)) 195 return -EINVAL; 196 break; 197 default: 198 err = -EINVAL; 199 goto out; 200 } 201 } 202 203 memcpy((u32 *)regs + off, valp, KVM_REG_SIZE(reg->id)); 204 out: 205 return err; 206 } 207 208 #define vq_word(vq) (((vq) - SVE_VQ_MIN) / 64) 209 #define vq_mask(vq) ((u64)1 << ((vq) - SVE_VQ_MIN) % 64) 210 211 static bool vq_present( 212 const u64 (*const vqs)[KVM_ARM64_SVE_VLS_WORDS], 213 unsigned int vq) 214 { 215 return (*vqs)[vq_word(vq)] & vq_mask(vq); 216 } 217 218 static int get_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 219 { 220 unsigned int max_vq, vq; 221 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; 222 223 if (!vcpu_has_sve(vcpu)) 224 return -ENOENT; 225 226 if (WARN_ON(!sve_vl_valid(vcpu->arch.sve_max_vl))) 227 return -EINVAL; 228 229 memset(vqs, 0, sizeof(vqs)); 230 231 max_vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 232 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) 233 if (sve_vq_available(vq)) 234 vqs[vq_word(vq)] |= vq_mask(vq); 235 236 if (copy_to_user((void __user *)reg->addr, vqs, sizeof(vqs))) 237 return -EFAULT; 238 239 return 0; 240 } 241 242 static int set_sve_vls(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 243 { 244 unsigned int max_vq, vq; 245 u64 vqs[KVM_ARM64_SVE_VLS_WORDS]; 246 247 if (!vcpu_has_sve(vcpu)) 248 return -ENOENT; 249 250 if (kvm_arm_vcpu_sve_finalized(vcpu)) 251 return -EPERM; /* too late! */ 252 253 if (WARN_ON(vcpu->arch.sve_state)) 254 return -EINVAL; 255 256 if (copy_from_user(vqs, (const void __user *)reg->addr, sizeof(vqs))) 257 return -EFAULT; 258 259 max_vq = 0; 260 for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; ++vq) 261 if (vq_present(&vqs, vq)) 262 max_vq = vq; 263 264 if (max_vq > sve_vq_from_vl(kvm_sve_max_vl)) 265 return -EINVAL; 266 267 /* 268 * Vector lengths supported by the host can't currently be 269 * hidden from the guest individually: instead we can only set a 270 * maxmium via ZCR_EL2.LEN. So, make sure the available vector 271 * lengths match the set requested exactly up to the requested 272 * maximum: 273 */ 274 for (vq = SVE_VQ_MIN; vq <= max_vq; ++vq) 275 if (vq_present(&vqs, vq) != sve_vq_available(vq)) 276 return -EINVAL; 277 278 /* Can't run with no vector lengths at all: */ 279 if (max_vq < SVE_VQ_MIN) 280 return -EINVAL; 281 282 /* vcpu->arch.sve_state will be alloc'd by kvm_vcpu_finalize_sve() */ 283 vcpu->arch.sve_max_vl = sve_vl_from_vq(max_vq); 284 285 return 0; 286 } 287 288 #define SVE_REG_SLICE_SHIFT 0 289 #define SVE_REG_SLICE_BITS 5 290 #define SVE_REG_ID_SHIFT (SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS) 291 #define SVE_REG_ID_BITS 5 292 293 #define SVE_REG_SLICE_MASK \ 294 GENMASK(SVE_REG_SLICE_SHIFT + SVE_REG_SLICE_BITS - 1, \ 295 SVE_REG_SLICE_SHIFT) 296 #define SVE_REG_ID_MASK \ 297 GENMASK(SVE_REG_ID_SHIFT + SVE_REG_ID_BITS - 1, SVE_REG_ID_SHIFT) 298 299 #define SVE_NUM_SLICES (1 << SVE_REG_SLICE_BITS) 300 301 #define KVM_SVE_ZREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_ZREG(0, 0)) 302 #define KVM_SVE_PREG_SIZE KVM_REG_SIZE(KVM_REG_ARM64_SVE_PREG(0, 0)) 303 304 /* 305 * Number of register slices required to cover each whole SVE register. 306 * NOTE: Only the first slice every exists, for now. 307 * If you are tempted to modify this, you must also rework sve_reg_to_region() 308 * to match: 309 */ 310 #define vcpu_sve_slices(vcpu) 1 311 312 /* Bounds of a single SVE register slice within vcpu->arch.sve_state */ 313 struct sve_state_reg_region { 314 unsigned int koffset; /* offset into sve_state in kernel memory */ 315 unsigned int klen; /* length in kernel memory */ 316 unsigned int upad; /* extra trailing padding in user memory */ 317 }; 318 319 /* 320 * Validate SVE register ID and get sanitised bounds for user/kernel SVE 321 * register copy 322 */ 323 static int sve_reg_to_region(struct sve_state_reg_region *region, 324 struct kvm_vcpu *vcpu, 325 const struct kvm_one_reg *reg) 326 { 327 /* reg ID ranges for Z- registers */ 328 const u64 zreg_id_min = KVM_REG_ARM64_SVE_ZREG(0, 0); 329 const u64 zreg_id_max = KVM_REG_ARM64_SVE_ZREG(SVE_NUM_ZREGS - 1, 330 SVE_NUM_SLICES - 1); 331 332 /* reg ID ranges for P- registers and FFR (which are contiguous) */ 333 const u64 preg_id_min = KVM_REG_ARM64_SVE_PREG(0, 0); 334 const u64 preg_id_max = KVM_REG_ARM64_SVE_FFR(SVE_NUM_SLICES - 1); 335 336 unsigned int vq; 337 unsigned int reg_num; 338 339 unsigned int reqoffset, reqlen; /* User-requested offset and length */ 340 unsigned int maxlen; /* Maxmimum permitted length */ 341 342 size_t sve_state_size; 343 344 const u64 last_preg_id = KVM_REG_ARM64_SVE_PREG(SVE_NUM_PREGS - 1, 345 SVE_NUM_SLICES - 1); 346 347 /* Verify that the P-regs and FFR really do have contiguous IDs: */ 348 BUILD_BUG_ON(KVM_REG_ARM64_SVE_FFR(0) != last_preg_id + 1); 349 350 /* Verify that we match the UAPI header: */ 351 BUILD_BUG_ON(SVE_NUM_SLICES != KVM_ARM64_SVE_MAX_SLICES); 352 353 reg_num = (reg->id & SVE_REG_ID_MASK) >> SVE_REG_ID_SHIFT; 354 355 if (reg->id >= zreg_id_min && reg->id <= zreg_id_max) { 356 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) 357 return -ENOENT; 358 359 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 360 361 reqoffset = SVE_SIG_ZREG_OFFSET(vq, reg_num) - 362 SVE_SIG_REGS_OFFSET; 363 reqlen = KVM_SVE_ZREG_SIZE; 364 maxlen = SVE_SIG_ZREG_SIZE(vq); 365 } else if (reg->id >= preg_id_min && reg->id <= preg_id_max) { 366 if (!vcpu_has_sve(vcpu) || (reg->id & SVE_REG_SLICE_MASK) > 0) 367 return -ENOENT; 368 369 vq = sve_vq_from_vl(vcpu->arch.sve_max_vl); 370 371 reqoffset = SVE_SIG_PREG_OFFSET(vq, reg_num) - 372 SVE_SIG_REGS_OFFSET; 373 reqlen = KVM_SVE_PREG_SIZE; 374 maxlen = SVE_SIG_PREG_SIZE(vq); 375 } else { 376 return -EINVAL; 377 } 378 379 sve_state_size = vcpu_sve_state_size(vcpu); 380 if (WARN_ON(!sve_state_size)) 381 return -EINVAL; 382 383 region->koffset = array_index_nospec(reqoffset, sve_state_size); 384 region->klen = min(maxlen, reqlen); 385 region->upad = reqlen - region->klen; 386 387 return 0; 388 } 389 390 static int get_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 391 { 392 int ret; 393 struct sve_state_reg_region region; 394 char __user *uptr = (char __user *)reg->addr; 395 396 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ 397 if (reg->id == KVM_REG_ARM64_SVE_VLS) 398 return get_sve_vls(vcpu, reg); 399 400 /* Try to interpret reg ID as an architectural SVE register... */ 401 ret = sve_reg_to_region(®ion, vcpu, reg); 402 if (ret) 403 return ret; 404 405 if (!kvm_arm_vcpu_sve_finalized(vcpu)) 406 return -EPERM; 407 408 if (copy_to_user(uptr, vcpu->arch.sve_state + region.koffset, 409 region.klen) || 410 clear_user(uptr + region.klen, region.upad)) 411 return -EFAULT; 412 413 return 0; 414 } 415 416 static int set_sve_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 417 { 418 int ret; 419 struct sve_state_reg_region region; 420 const char __user *uptr = (const char __user *)reg->addr; 421 422 /* Handle the KVM_REG_ARM64_SVE_VLS pseudo-reg as a special case: */ 423 if (reg->id == KVM_REG_ARM64_SVE_VLS) 424 return set_sve_vls(vcpu, reg); 425 426 /* Try to interpret reg ID as an architectural SVE register... */ 427 ret = sve_reg_to_region(®ion, vcpu, reg); 428 if (ret) 429 return ret; 430 431 if (!kvm_arm_vcpu_sve_finalized(vcpu)) 432 return -EPERM; 433 434 if (copy_from_user(vcpu->arch.sve_state + region.koffset, uptr, 435 region.klen)) 436 return -EFAULT; 437 438 return 0; 439 } 440 441 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 442 { 443 return -EINVAL; 444 } 445 446 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 447 { 448 return -EINVAL; 449 } 450 451 static int copy_core_reg_indices(const struct kvm_vcpu *vcpu, 452 u64 __user *uindices) 453 { 454 unsigned int i; 455 int n = 0; 456 const u64 core_reg = KVM_REG_ARM64 | KVM_REG_SIZE_U64 | KVM_REG_ARM_CORE; 457 458 for (i = 0; i < sizeof(struct kvm_regs) / sizeof(__u32); i++) { 459 /* 460 * The KVM_REG_ARM64_SVE regs must be used instead of 461 * KVM_REG_ARM_CORE for accessing the FPSIMD V-registers on 462 * SVE-enabled vcpus: 463 */ 464 if (vcpu_has_sve(vcpu) && core_reg_offset_is_vreg(i)) 465 continue; 466 467 if (uindices) { 468 if (put_user(core_reg | i, uindices)) 469 return -EFAULT; 470 uindices++; 471 } 472 473 n++; 474 } 475 476 return n; 477 } 478 479 static unsigned long num_core_regs(const struct kvm_vcpu *vcpu) 480 { 481 return copy_core_reg_indices(vcpu, NULL); 482 } 483 484 /** 485 * ARM64 versions of the TIMER registers, always available on arm64 486 */ 487 488 #define NUM_TIMER_REGS 3 489 490 static bool is_timer_reg(u64 index) 491 { 492 switch (index) { 493 case KVM_REG_ARM_TIMER_CTL: 494 case KVM_REG_ARM_TIMER_CNT: 495 case KVM_REG_ARM_TIMER_CVAL: 496 return true; 497 } 498 return false; 499 } 500 501 static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 502 { 503 if (put_user(KVM_REG_ARM_TIMER_CTL, uindices)) 504 return -EFAULT; 505 uindices++; 506 if (put_user(KVM_REG_ARM_TIMER_CNT, uindices)) 507 return -EFAULT; 508 uindices++; 509 if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices)) 510 return -EFAULT; 511 512 return 0; 513 } 514 515 static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 516 { 517 void __user *uaddr = (void __user *)(long)reg->addr; 518 u64 val; 519 int ret; 520 521 ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)); 522 if (ret != 0) 523 return -EFAULT; 524 525 return kvm_arm_timer_set_reg(vcpu, reg->id, val); 526 } 527 528 static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 529 { 530 void __user *uaddr = (void __user *)(long)reg->addr; 531 u64 val; 532 533 val = kvm_arm_timer_get_reg(vcpu, reg->id); 534 return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0; 535 } 536 537 static unsigned long num_sve_regs(const struct kvm_vcpu *vcpu) 538 { 539 const unsigned int slices = vcpu_sve_slices(vcpu); 540 541 if (!vcpu_has_sve(vcpu)) 542 return 0; 543 544 /* Policed by KVM_GET_REG_LIST: */ 545 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); 546 547 return slices * (SVE_NUM_PREGS + SVE_NUM_ZREGS + 1 /* FFR */) 548 + 1; /* KVM_REG_ARM64_SVE_VLS */ 549 } 550 551 static int copy_sve_reg_indices(const struct kvm_vcpu *vcpu, 552 u64 __user *uindices) 553 { 554 const unsigned int slices = vcpu_sve_slices(vcpu); 555 u64 reg; 556 unsigned int i, n; 557 int num_regs = 0; 558 559 if (!vcpu_has_sve(vcpu)) 560 return 0; 561 562 /* Policed by KVM_GET_REG_LIST: */ 563 WARN_ON(!kvm_arm_vcpu_sve_finalized(vcpu)); 564 565 /* 566 * Enumerate this first, so that userspace can save/restore in 567 * the order reported by KVM_GET_REG_LIST: 568 */ 569 reg = KVM_REG_ARM64_SVE_VLS; 570 if (put_user(reg, uindices++)) 571 return -EFAULT; 572 ++num_regs; 573 574 for (i = 0; i < slices; i++) { 575 for (n = 0; n < SVE_NUM_ZREGS; n++) { 576 reg = KVM_REG_ARM64_SVE_ZREG(n, i); 577 if (put_user(reg, uindices++)) 578 return -EFAULT; 579 num_regs++; 580 } 581 582 for (n = 0; n < SVE_NUM_PREGS; n++) { 583 reg = KVM_REG_ARM64_SVE_PREG(n, i); 584 if (put_user(reg, uindices++)) 585 return -EFAULT; 586 num_regs++; 587 } 588 589 reg = KVM_REG_ARM64_SVE_FFR(i); 590 if (put_user(reg, uindices++)) 591 return -EFAULT; 592 num_regs++; 593 } 594 595 return num_regs; 596 } 597 598 /** 599 * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG 600 * 601 * This is for all registers. 602 */ 603 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) 604 { 605 unsigned long res = 0; 606 607 res += num_core_regs(vcpu); 608 res += num_sve_regs(vcpu); 609 res += kvm_arm_num_sys_reg_descs(vcpu); 610 res += kvm_arm_get_fw_num_regs(vcpu); 611 res += NUM_TIMER_REGS; 612 613 return res; 614 } 615 616 /** 617 * kvm_arm_copy_reg_indices - get indices of all registers. 618 * 619 * We do core registers right here, then we append system regs. 620 */ 621 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) 622 { 623 int ret; 624 625 ret = copy_core_reg_indices(vcpu, uindices); 626 if (ret < 0) 627 return ret; 628 uindices += ret; 629 630 ret = copy_sve_reg_indices(vcpu, uindices); 631 if (ret < 0) 632 return ret; 633 uindices += ret; 634 635 ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices); 636 if (ret < 0) 637 return ret; 638 uindices += kvm_arm_get_fw_num_regs(vcpu); 639 640 ret = copy_timer_indices(vcpu, uindices); 641 if (ret < 0) 642 return ret; 643 uindices += NUM_TIMER_REGS; 644 645 return kvm_arm_copy_sys_reg_indices(vcpu, uindices); 646 } 647 648 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 649 { 650 /* We currently use nothing arch-specific in upper 32 bits */ 651 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) 652 return -EINVAL; 653 654 switch (reg->id & KVM_REG_ARM_COPROC_MASK) { 655 case KVM_REG_ARM_CORE: return get_core_reg(vcpu, reg); 656 case KVM_REG_ARM_FW: return kvm_arm_get_fw_reg(vcpu, reg); 657 case KVM_REG_ARM64_SVE: return get_sve_reg(vcpu, reg); 658 } 659 660 if (is_timer_reg(reg->id)) 661 return get_timer_reg(vcpu, reg); 662 663 return kvm_arm_sys_reg_get_reg(vcpu, reg); 664 } 665 666 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) 667 { 668 /* We currently use nothing arch-specific in upper 32 bits */ 669 if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM64 >> 32) 670 return -EINVAL; 671 672 switch (reg->id & KVM_REG_ARM_COPROC_MASK) { 673 case KVM_REG_ARM_CORE: return set_core_reg(vcpu, reg); 674 case KVM_REG_ARM_FW: return kvm_arm_set_fw_reg(vcpu, reg); 675 case KVM_REG_ARM64_SVE: return set_sve_reg(vcpu, reg); 676 } 677 678 if (is_timer_reg(reg->id)) 679 return set_timer_reg(vcpu, reg); 680 681 return kvm_arm_sys_reg_set_reg(vcpu, reg); 682 } 683 684 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 685 struct kvm_sregs *sregs) 686 { 687 return -EINVAL; 688 } 689 690 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 691 struct kvm_sregs *sregs) 692 { 693 return -EINVAL; 694 } 695 696 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, 697 struct kvm_vcpu_events *events) 698 { 699 events->exception.serror_pending = !!(vcpu->arch.hcr_el2 & HCR_VSE); 700 events->exception.serror_has_esr = cpus_have_const_cap(ARM64_HAS_RAS_EXTN); 701 702 if (events->exception.serror_pending && events->exception.serror_has_esr) 703 events->exception.serror_esr = vcpu_get_vsesr(vcpu); 704 705 return 0; 706 } 707 708 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, 709 struct kvm_vcpu_events *events) 710 { 711 bool serror_pending = events->exception.serror_pending; 712 bool has_esr = events->exception.serror_has_esr; 713 714 if (serror_pending && has_esr) { 715 if (!cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) 716 return -EINVAL; 717 718 if (!((events->exception.serror_esr) & ~ESR_ELx_ISS_MASK)) 719 kvm_set_sei_esr(vcpu, events->exception.serror_esr); 720 else 721 return -EINVAL; 722 } else if (serror_pending) { 723 kvm_inject_vabt(vcpu); 724 } 725 726 return 0; 727 } 728 729 int __attribute_const__ kvm_target_cpu(void) 730 { 731 unsigned long implementor = read_cpuid_implementor(); 732 unsigned long part_number = read_cpuid_part_number(); 733 734 switch (implementor) { 735 case ARM_CPU_IMP_ARM: 736 switch (part_number) { 737 case ARM_CPU_PART_AEM_V8: 738 return KVM_ARM_TARGET_AEM_V8; 739 case ARM_CPU_PART_FOUNDATION: 740 return KVM_ARM_TARGET_FOUNDATION_V8; 741 case ARM_CPU_PART_CORTEX_A53: 742 return KVM_ARM_TARGET_CORTEX_A53; 743 case ARM_CPU_PART_CORTEX_A57: 744 return KVM_ARM_TARGET_CORTEX_A57; 745 } 746 break; 747 case ARM_CPU_IMP_APM: 748 switch (part_number) { 749 case APM_CPU_PART_POTENZA: 750 return KVM_ARM_TARGET_XGENE_POTENZA; 751 } 752 break; 753 } 754 755 /* Return a default generic target */ 756 return KVM_ARM_TARGET_GENERIC_V8; 757 } 758 759 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init) 760 { 761 int target = kvm_target_cpu(); 762 763 if (target < 0) 764 return -ENODEV; 765 766 memset(init, 0, sizeof(*init)); 767 768 /* 769 * For now, we don't return any features. 770 * In future, we might use features to return target 771 * specific features available for the preferred 772 * target type. 773 */ 774 init->target = (__u32)target; 775 776 return 0; 777 } 778 779 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 780 { 781 return -EINVAL; 782 } 783 784 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 785 { 786 return -EINVAL; 787 } 788 789 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 790 struct kvm_translation *tr) 791 { 792 return -EINVAL; 793 } 794 795 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ 796 KVM_GUESTDBG_USE_SW_BP | \ 797 KVM_GUESTDBG_USE_HW | \ 798 KVM_GUESTDBG_SINGLESTEP) 799 800 /** 801 * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging 802 * @kvm: pointer to the KVM struct 803 * @kvm_guest_debug: the ioctl data buffer 804 * 805 * This sets up and enables the VM for guest debugging. Userspace 806 * passes in a control flag to enable different debug types and 807 * potentially other architecture specific information in the rest of 808 * the structure. 809 */ 810 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 811 struct kvm_guest_debug *dbg) 812 { 813 int ret = 0; 814 815 trace_kvm_set_guest_debug(vcpu, dbg->control); 816 817 if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) { 818 ret = -EINVAL; 819 goto out; 820 } 821 822 if (dbg->control & KVM_GUESTDBG_ENABLE) { 823 vcpu->guest_debug = dbg->control; 824 825 /* Hardware assisted Break and Watch points */ 826 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) { 827 vcpu->arch.external_debug_state = dbg->arch; 828 } 829 830 } else { 831 /* If not enabled clear all flags */ 832 vcpu->guest_debug = 0; 833 } 834 835 out: 836 return ret; 837 } 838 839 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, 840 struct kvm_device_attr *attr) 841 { 842 int ret; 843 844 switch (attr->group) { 845 case KVM_ARM_VCPU_PMU_V3_CTRL: 846 ret = kvm_arm_pmu_v3_set_attr(vcpu, attr); 847 break; 848 case KVM_ARM_VCPU_TIMER_CTRL: 849 ret = kvm_arm_timer_set_attr(vcpu, attr); 850 break; 851 default: 852 ret = -ENXIO; 853 break; 854 } 855 856 return ret; 857 } 858 859 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, 860 struct kvm_device_attr *attr) 861 { 862 int ret; 863 864 switch (attr->group) { 865 case KVM_ARM_VCPU_PMU_V3_CTRL: 866 ret = kvm_arm_pmu_v3_get_attr(vcpu, attr); 867 break; 868 case KVM_ARM_VCPU_TIMER_CTRL: 869 ret = kvm_arm_timer_get_attr(vcpu, attr); 870 break; 871 default: 872 ret = -ENXIO; 873 break; 874 } 875 876 return ret; 877 } 878 879 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, 880 struct kvm_device_attr *attr) 881 { 882 int ret; 883 884 switch (attr->group) { 885 case KVM_ARM_VCPU_PMU_V3_CTRL: 886 ret = kvm_arm_pmu_v3_has_attr(vcpu, attr); 887 break; 888 case KVM_ARM_VCPU_TIMER_CTRL: 889 ret = kvm_arm_timer_has_attr(vcpu, attr); 890 break; 891 default: 892 ret = -ENXIO; 893 break; 894 } 895 896 return ret; 897 } 898