1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * VGIC: KVM DEVICE API 4 * 5 * Copyright (C) 2015 ARM Ltd. 6 * Author: Marc Zyngier <marc.zyngier@arm.com> 7 */ 8 #include <linux/kvm_host.h> 9 #include <kvm/arm_vgic.h> 10 #include <linux/uaccess.h> 11 #include <asm/kvm_mmu.h> 12 #include <asm/cputype.h> 13 #include "vgic.h" 14 15 /* common helpers */ 16 17 int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, 18 phys_addr_t addr, phys_addr_t alignment, 19 phys_addr_t size) 20 { 21 if (!IS_VGIC_ADDR_UNDEF(ioaddr)) 22 return -EEXIST; 23 24 if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment)) 25 return -EINVAL; 26 27 if (addr + size < addr) 28 return -EINVAL; 29 30 if (addr & ~kvm_phys_mask(&kvm->arch.mmu) || 31 (addr + size) > kvm_phys_size(&kvm->arch.mmu)) 32 return -E2BIG; 33 34 return 0; 35 } 36 37 static int vgic_check_type(struct kvm *kvm, int type_needed) 38 { 39 if (kvm->arch.vgic.vgic_model != type_needed) 40 return -ENODEV; 41 else 42 return 0; 43 } 44 45 int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr) 46 { 47 struct vgic_dist *vgic = &kvm->arch.vgic; 48 int r; 49 50 mutex_lock(&kvm->arch.config_lock); 51 switch (FIELD_GET(KVM_ARM_DEVICE_TYPE_MASK, dev_addr->id)) { 52 case KVM_VGIC_V2_ADDR_TYPE_DIST: 53 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 54 if (!r) 55 r = vgic_check_iorange(kvm, vgic->vgic_dist_base, dev_addr->addr, 56 SZ_4K, KVM_VGIC_V2_DIST_SIZE); 57 if (!r) 58 vgic->vgic_dist_base = dev_addr->addr; 59 break; 60 case KVM_VGIC_V2_ADDR_TYPE_CPU: 61 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 62 if (!r) 63 r = vgic_check_iorange(kvm, vgic->vgic_cpu_base, dev_addr->addr, 64 SZ_4K, KVM_VGIC_V2_CPU_SIZE); 65 if (!r) 66 vgic->vgic_cpu_base = dev_addr->addr; 67 break; 68 default: 69 r = -ENODEV; 70 } 71 72 mutex_unlock(&kvm->arch.config_lock); 73 74 return r; 75 } 76 77 /** 78 * kvm_vgic_addr - set or get vgic VM base addresses 79 * @kvm: pointer to the vm struct 80 * @attr: pointer to the attribute being retrieved/updated 81 * @write: if true set the address in the VM address space, if false read the 82 * address 83 * 84 * Set or get the vgic base addresses for the distributor and the virtual CPU 85 * interface in the VM physical address space. These addresses are properties 86 * of the emulated core/SoC and therefore user space initially knows this 87 * information. 88 * Check them for sanity (alignment, double assignment). We can't check for 89 * overlapping regions in case of a virtual GICv3 here, since we don't know 90 * the number of VCPUs yet, so we defer this check to map_resources(). 91 */ 92 static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool write) 93 { 94 u64 __user *uaddr = (u64 __user *)attr->addr; 95 struct vgic_dist *vgic = &kvm->arch.vgic; 96 phys_addr_t *addr_ptr, alignment, size; 97 u64 undef_value = VGIC_ADDR_UNDEF; 98 u64 addr; 99 int r; 100 101 /* Reading a redistributor region addr implies getting the index */ 102 if (write || attr->attr == KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION) 103 if (get_user(addr, uaddr)) 104 return -EFAULT; 105 106 /* 107 * Since we can't hold config_lock while registering the redistributor 108 * iodevs, take the slots_lock immediately. 109 */ 110 mutex_lock(&kvm->slots_lock); 111 switch (attr->attr) { 112 case KVM_VGIC_V2_ADDR_TYPE_DIST: 113 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 114 addr_ptr = &vgic->vgic_dist_base; 115 alignment = SZ_4K; 116 size = KVM_VGIC_V2_DIST_SIZE; 117 break; 118 case KVM_VGIC_V2_ADDR_TYPE_CPU: 119 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); 120 addr_ptr = &vgic->vgic_cpu_base; 121 alignment = SZ_4K; 122 size = KVM_VGIC_V2_CPU_SIZE; 123 break; 124 case KVM_VGIC_V3_ADDR_TYPE_DIST: 125 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3); 126 addr_ptr = &vgic->vgic_dist_base; 127 alignment = SZ_64K; 128 size = KVM_VGIC_V3_DIST_SIZE; 129 break; 130 case KVM_VGIC_V3_ADDR_TYPE_REDIST: { 131 struct vgic_redist_region *rdreg; 132 133 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3); 134 if (r) 135 break; 136 if (write) { 137 r = vgic_v3_set_redist_base(kvm, 0, addr, 0); 138 goto out; 139 } 140 rdreg = list_first_entry_or_null(&vgic->rd_regions, 141 struct vgic_redist_region, list); 142 if (!rdreg) 143 addr_ptr = &undef_value; 144 else 145 addr_ptr = &rdreg->base; 146 break; 147 } 148 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION: 149 { 150 struct vgic_redist_region *rdreg; 151 u8 index; 152 153 r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3); 154 if (r) 155 break; 156 157 index = addr & KVM_VGIC_V3_RDIST_INDEX_MASK; 158 159 if (write) { 160 gpa_t base = addr & KVM_VGIC_V3_RDIST_BASE_MASK; 161 u32 count = FIELD_GET(KVM_VGIC_V3_RDIST_COUNT_MASK, addr); 162 u8 flags = FIELD_GET(KVM_VGIC_V3_RDIST_FLAGS_MASK, addr); 163 164 if (!count || flags) 165 r = -EINVAL; 166 else 167 r = vgic_v3_set_redist_base(kvm, index, 168 base, count); 169 goto out; 170 } 171 172 rdreg = vgic_v3_rdist_region_from_index(kvm, index); 173 if (!rdreg) { 174 r = -ENOENT; 175 goto out; 176 } 177 178 addr = index; 179 addr |= rdreg->base; 180 addr |= (u64)rdreg->count << KVM_VGIC_V3_RDIST_COUNT_SHIFT; 181 goto out; 182 } 183 default: 184 r = -ENODEV; 185 } 186 187 if (r) 188 goto out; 189 190 mutex_lock(&kvm->arch.config_lock); 191 if (write) { 192 r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size); 193 if (!r) 194 *addr_ptr = addr; 195 } else { 196 addr = *addr_ptr; 197 } 198 mutex_unlock(&kvm->arch.config_lock); 199 200 out: 201 mutex_unlock(&kvm->slots_lock); 202 203 if (!r && !write) 204 r = put_user(addr, uaddr); 205 206 return r; 207 } 208 209 static int vgic_set_common_attr(struct kvm_device *dev, 210 struct kvm_device_attr *attr) 211 { 212 int r; 213 214 switch (attr->group) { 215 case KVM_DEV_ARM_VGIC_GRP_ADDR: 216 r = kvm_vgic_addr(dev->kvm, attr, true); 217 return (r == -ENODEV) ? -ENXIO : r; 218 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { 219 u32 __user *uaddr = (u32 __user *)(long)attr->addr; 220 u32 val; 221 int ret = 0; 222 223 if (get_user(val, uaddr)) 224 return -EFAULT; 225 226 /* 227 * We require: 228 * - at least 32 SPIs on top of the 16 SGIs and 16 PPIs 229 * - at most 1024 interrupts 230 * - a multiple of 32 interrupts 231 */ 232 if (val < (VGIC_NR_PRIVATE_IRQS + 32) || 233 val > VGIC_MAX_RESERVED || 234 (val & 31)) 235 return -EINVAL; 236 237 mutex_lock(&dev->kvm->arch.config_lock); 238 239 if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_spis) 240 ret = -EBUSY; 241 else 242 dev->kvm->arch.vgic.nr_spis = 243 val - VGIC_NR_PRIVATE_IRQS; 244 245 mutex_unlock(&dev->kvm->arch.config_lock); 246 247 return ret; 248 } 249 case KVM_DEV_ARM_VGIC_GRP_CTRL: { 250 switch (attr->attr) { 251 case KVM_DEV_ARM_VGIC_CTRL_INIT: 252 mutex_lock(&dev->kvm->arch.config_lock); 253 r = vgic_init(dev->kvm); 254 mutex_unlock(&dev->kvm->arch.config_lock); 255 return r; 256 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES: 257 /* 258 * OK, this one isn't common at all, but we 259 * want to handle all control group attributes 260 * in a single place. 261 */ 262 if (vgic_check_type(dev->kvm, KVM_DEV_TYPE_ARM_VGIC_V3)) 263 return -ENXIO; 264 mutex_lock(&dev->kvm->lock); 265 266 if (!lock_all_vcpus(dev->kvm)) { 267 mutex_unlock(&dev->kvm->lock); 268 return -EBUSY; 269 } 270 271 mutex_lock(&dev->kvm->arch.config_lock); 272 r = vgic_v3_save_pending_tables(dev->kvm); 273 mutex_unlock(&dev->kvm->arch.config_lock); 274 unlock_all_vcpus(dev->kvm); 275 mutex_unlock(&dev->kvm->lock); 276 return r; 277 } 278 break; 279 } 280 } 281 282 return -ENXIO; 283 } 284 285 static int vgic_get_common_attr(struct kvm_device *dev, 286 struct kvm_device_attr *attr) 287 { 288 int r = -ENXIO; 289 290 switch (attr->group) { 291 case KVM_DEV_ARM_VGIC_GRP_ADDR: 292 r = kvm_vgic_addr(dev->kvm, attr, false); 293 return (r == -ENODEV) ? -ENXIO : r; 294 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: { 295 u32 __user *uaddr = (u32 __user *)(long)attr->addr; 296 297 r = put_user(dev->kvm->arch.vgic.nr_spis + 298 VGIC_NR_PRIVATE_IRQS, uaddr); 299 break; 300 } 301 } 302 303 return r; 304 } 305 306 static int vgic_create(struct kvm_device *dev, u32 type) 307 { 308 return kvm_vgic_create(dev->kvm, type); 309 } 310 311 static void vgic_destroy(struct kvm_device *dev) 312 { 313 kfree(dev); 314 } 315 316 int kvm_register_vgic_device(unsigned long type) 317 { 318 int ret = -ENODEV; 319 320 switch (type) { 321 case KVM_DEV_TYPE_ARM_VGIC_V2: 322 ret = kvm_register_device_ops(&kvm_arm_vgic_v2_ops, 323 KVM_DEV_TYPE_ARM_VGIC_V2); 324 break; 325 case KVM_DEV_TYPE_ARM_VGIC_V3: 326 ret = kvm_register_device_ops(&kvm_arm_vgic_v3_ops, 327 KVM_DEV_TYPE_ARM_VGIC_V3); 328 329 if (ret) 330 break; 331 ret = kvm_vgic_register_its_device(); 332 break; 333 } 334 335 return ret; 336 } 337 338 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, 339 struct vgic_reg_attr *reg_attr) 340 { 341 int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr); 342 343 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; 344 reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid); 345 if (!reg_attr->vcpu) 346 return -EINVAL; 347 348 return 0; 349 } 350 351 /** 352 * vgic_v2_attr_regs_access - allows user space to access VGIC v2 state 353 * 354 * @dev: kvm device handle 355 * @attr: kvm device attribute 356 * @is_write: true if userspace is writing a register 357 */ 358 static int vgic_v2_attr_regs_access(struct kvm_device *dev, 359 struct kvm_device_attr *attr, 360 bool is_write) 361 { 362 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr; 363 struct vgic_reg_attr reg_attr; 364 gpa_t addr; 365 struct kvm_vcpu *vcpu; 366 int ret; 367 u32 val; 368 369 ret = vgic_v2_parse_attr(dev, attr, ®_attr); 370 if (ret) 371 return ret; 372 373 vcpu = reg_attr.vcpu; 374 addr = reg_attr.addr; 375 376 if (is_write) 377 if (get_user(val, uaddr)) 378 return -EFAULT; 379 380 mutex_lock(&dev->kvm->lock); 381 382 if (!lock_all_vcpus(dev->kvm)) { 383 mutex_unlock(&dev->kvm->lock); 384 return -EBUSY; 385 } 386 387 mutex_lock(&dev->kvm->arch.config_lock); 388 389 ret = vgic_init(dev->kvm); 390 if (ret) 391 goto out; 392 393 switch (attr->group) { 394 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 395 ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, &val); 396 break; 397 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 398 ret = vgic_v2_dist_uaccess(vcpu, is_write, addr, &val); 399 break; 400 default: 401 ret = -EINVAL; 402 break; 403 } 404 405 out: 406 mutex_unlock(&dev->kvm->arch.config_lock); 407 unlock_all_vcpus(dev->kvm); 408 mutex_unlock(&dev->kvm->lock); 409 410 if (!ret && !is_write) 411 ret = put_user(val, uaddr); 412 413 return ret; 414 } 415 416 static int vgic_v2_set_attr(struct kvm_device *dev, 417 struct kvm_device_attr *attr) 418 { 419 switch (attr->group) { 420 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 421 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 422 return vgic_v2_attr_regs_access(dev, attr, true); 423 default: 424 return vgic_set_common_attr(dev, attr); 425 } 426 } 427 428 static int vgic_v2_get_attr(struct kvm_device *dev, 429 struct kvm_device_attr *attr) 430 { 431 switch (attr->group) { 432 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 433 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 434 return vgic_v2_attr_regs_access(dev, attr, false); 435 default: 436 return vgic_get_common_attr(dev, attr); 437 } 438 } 439 440 static int vgic_v2_has_attr(struct kvm_device *dev, 441 struct kvm_device_attr *attr) 442 { 443 switch (attr->group) { 444 case KVM_DEV_ARM_VGIC_GRP_ADDR: 445 switch (attr->attr) { 446 case KVM_VGIC_V2_ADDR_TYPE_DIST: 447 case KVM_VGIC_V2_ADDR_TYPE_CPU: 448 return 0; 449 } 450 break; 451 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 452 case KVM_DEV_ARM_VGIC_GRP_CPU_REGS: 453 return vgic_v2_has_attr_regs(dev, attr); 454 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: 455 return 0; 456 case KVM_DEV_ARM_VGIC_GRP_CTRL: 457 switch (attr->attr) { 458 case KVM_DEV_ARM_VGIC_CTRL_INIT: 459 return 0; 460 } 461 } 462 return -ENXIO; 463 } 464 465 struct kvm_device_ops kvm_arm_vgic_v2_ops = { 466 .name = "kvm-arm-vgic-v2", 467 .create = vgic_create, 468 .destroy = vgic_destroy, 469 .set_attr = vgic_v2_set_attr, 470 .get_attr = vgic_v2_get_attr, 471 .has_attr = vgic_v2_has_attr, 472 }; 473 474 int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, 475 struct vgic_reg_attr *reg_attr) 476 { 477 unsigned long vgic_mpidr, mpidr_reg; 478 479 /* 480 * For KVM_DEV_ARM_VGIC_GRP_DIST_REGS group, 481 * attr might not hold MPIDR. Hence assume vcpu0. 482 */ 483 if (attr->group != KVM_DEV_ARM_VGIC_GRP_DIST_REGS) { 484 vgic_mpidr = (attr->attr & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) >> 485 KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT; 486 487 mpidr_reg = VGIC_TO_MPIDR(vgic_mpidr); 488 reg_attr->vcpu = kvm_mpidr_to_vcpu(dev->kvm, mpidr_reg); 489 } else { 490 reg_attr->vcpu = kvm_get_vcpu(dev->kvm, 0); 491 } 492 493 if (!reg_attr->vcpu) 494 return -EINVAL; 495 496 reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; 497 498 return 0; 499 } 500 501 /* 502 * vgic_v3_attr_regs_access - allows user space to access VGIC v3 state 503 * 504 * @dev: kvm device handle 505 * @attr: kvm device attribute 506 * @is_write: true if userspace is writing a register 507 */ 508 static int vgic_v3_attr_regs_access(struct kvm_device *dev, 509 struct kvm_device_attr *attr, 510 bool is_write) 511 { 512 struct vgic_reg_attr reg_attr; 513 gpa_t addr; 514 struct kvm_vcpu *vcpu; 515 bool uaccess; 516 u32 val; 517 int ret; 518 519 ret = vgic_v3_parse_attr(dev, attr, ®_attr); 520 if (ret) 521 return ret; 522 523 vcpu = reg_attr.vcpu; 524 addr = reg_attr.addr; 525 526 switch (attr->group) { 527 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: 528 /* Sysregs uaccess is performed by the sysreg handling code */ 529 uaccess = false; 530 break; 531 default: 532 uaccess = true; 533 } 534 535 if (uaccess && is_write) { 536 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr; 537 if (get_user(val, uaddr)) 538 return -EFAULT; 539 } 540 541 mutex_lock(&dev->kvm->lock); 542 543 if (!lock_all_vcpus(dev->kvm)) { 544 mutex_unlock(&dev->kvm->lock); 545 return -EBUSY; 546 } 547 548 mutex_lock(&dev->kvm->arch.config_lock); 549 550 if (unlikely(!vgic_initialized(dev->kvm))) { 551 ret = -EBUSY; 552 goto out; 553 } 554 555 switch (attr->group) { 556 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 557 ret = vgic_v3_dist_uaccess(vcpu, is_write, addr, &val); 558 break; 559 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: 560 ret = vgic_v3_redist_uaccess(vcpu, is_write, addr, &val); 561 break; 562 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: 563 ret = vgic_v3_cpu_sysregs_uaccess(vcpu, attr, is_write); 564 break; 565 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: { 566 unsigned int info, intid; 567 568 info = (attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >> 569 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT; 570 if (info == VGIC_LEVEL_INFO_LINE_LEVEL) { 571 intid = attr->attr & 572 KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK; 573 ret = vgic_v3_line_level_info_uaccess(vcpu, is_write, 574 intid, &val); 575 } else { 576 ret = -EINVAL; 577 } 578 break; 579 } 580 default: 581 ret = -EINVAL; 582 break; 583 } 584 585 out: 586 mutex_unlock(&dev->kvm->arch.config_lock); 587 unlock_all_vcpus(dev->kvm); 588 mutex_unlock(&dev->kvm->lock); 589 590 if (!ret && uaccess && !is_write) { 591 u32 __user *uaddr = (u32 __user *)(unsigned long)attr->addr; 592 ret = put_user(val, uaddr); 593 } 594 595 return ret; 596 } 597 598 static int vgic_v3_set_attr(struct kvm_device *dev, 599 struct kvm_device_attr *attr) 600 { 601 switch (attr->group) { 602 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 603 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: 604 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: 605 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: 606 return vgic_v3_attr_regs_access(dev, attr, true); 607 default: 608 return vgic_set_common_attr(dev, attr); 609 } 610 } 611 612 static int vgic_v3_get_attr(struct kvm_device *dev, 613 struct kvm_device_attr *attr) 614 { 615 switch (attr->group) { 616 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 617 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: 618 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: 619 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: 620 return vgic_v3_attr_regs_access(dev, attr, false); 621 default: 622 return vgic_get_common_attr(dev, attr); 623 } 624 } 625 626 static int vgic_v3_has_attr(struct kvm_device *dev, 627 struct kvm_device_attr *attr) 628 { 629 switch (attr->group) { 630 case KVM_DEV_ARM_VGIC_GRP_ADDR: 631 switch (attr->attr) { 632 case KVM_VGIC_V3_ADDR_TYPE_DIST: 633 case KVM_VGIC_V3_ADDR_TYPE_REDIST: 634 case KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION: 635 return 0; 636 } 637 break; 638 case KVM_DEV_ARM_VGIC_GRP_DIST_REGS: 639 case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS: 640 case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: 641 return vgic_v3_has_attr_regs(dev, attr); 642 case KVM_DEV_ARM_VGIC_GRP_NR_IRQS: 643 return 0; 644 case KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO: { 645 if (((attr->attr & KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK) >> 646 KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) == 647 VGIC_LEVEL_INFO_LINE_LEVEL) 648 return 0; 649 break; 650 } 651 case KVM_DEV_ARM_VGIC_GRP_CTRL: 652 switch (attr->attr) { 653 case KVM_DEV_ARM_VGIC_CTRL_INIT: 654 return 0; 655 case KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES: 656 return 0; 657 } 658 } 659 return -ENXIO; 660 } 661 662 struct kvm_device_ops kvm_arm_vgic_v3_ops = { 663 .name = "kvm-arm-vgic-v3", 664 .create = vgic_create, 665 .destroy = vgic_destroy, 666 .set_attr = vgic_v3_set_attr, 667 .get_attr = vgic_v3_get_attr, 668 .has_attr = vgic_v3_has_attr, 669 }; 670