1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Google LLC 4 * Author: Quentin Perret <qperret@google.com> 5 */ 6 7 #include <linux/kvm_host.h> 8 #include <asm/kvm_emulate.h> 9 #include <asm/kvm_hyp.h> 10 #include <asm/kvm_mmu.h> 11 #include <asm/kvm_pgtable.h> 12 #include <asm/kvm_pkvm.h> 13 #include <asm/stage2_pgtable.h> 14 15 #include <hyp/fault.h> 16 17 #include <nvhe/gfp.h> 18 #include <nvhe/memory.h> 19 #include <nvhe/mem_protect.h> 20 #include <nvhe/mm.h> 21 22 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP) 23 24 struct host_mmu host_mmu; 25 26 static struct hyp_pool host_s2_pool; 27 28 static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm); 29 #define current_vm (*this_cpu_ptr(&__current_vm)) 30 31 static void guest_lock_component(struct pkvm_hyp_vm *vm) 32 { 33 hyp_spin_lock(&vm->lock); 34 current_vm = vm; 35 } 36 37 static void guest_unlock_component(struct pkvm_hyp_vm *vm) 38 { 39 current_vm = NULL; 40 hyp_spin_unlock(&vm->lock); 41 } 42 43 static void host_lock_component(void) 44 { 45 hyp_spin_lock(&host_mmu.lock); 46 } 47 48 static void host_unlock_component(void) 49 { 50 hyp_spin_unlock(&host_mmu.lock); 51 } 52 53 static void hyp_lock_component(void) 54 { 55 hyp_spin_lock(&pkvm_pgd_lock); 56 } 57 58 static void hyp_unlock_component(void) 59 { 60 hyp_spin_unlock(&pkvm_pgd_lock); 61 } 62 63 #define for_each_hyp_page(__p, __st, __sz) \ 64 for (struct hyp_page *__p = hyp_phys_to_page(__st), \ 65 *__e = __p + ((__sz) >> PAGE_SHIFT); \ 66 __p < __e; __p++) 67 68 static void *host_s2_zalloc_pages_exact(size_t size) 69 { 70 void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); 71 72 hyp_split_page(hyp_virt_to_page(addr)); 73 74 /* 75 * The size of concatenated PGDs is always a power of two of PAGE_SIZE, 76 * so there should be no need to free any of the tail pages to make the 77 * allocation exact. 78 */ 79 WARN_ON(size != (PAGE_SIZE << get_order(size))); 80 81 return addr; 82 } 83 84 static void *host_s2_zalloc_page(void *pool) 85 { 86 return hyp_alloc_pages(pool, 0); 87 } 88 89 static void host_s2_get_page(void *addr) 90 { 91 hyp_get_page(&host_s2_pool, addr); 92 } 93 94 static void host_s2_put_page(void *addr) 95 { 96 hyp_put_page(&host_s2_pool, addr); 97 } 98 99 static void host_s2_free_unlinked_table(void *addr, s8 level) 100 { 101 kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level); 102 } 103 104 static int prepare_s2_pool(void *pgt_pool_base) 105 { 106 unsigned long nr_pages, pfn; 107 int ret; 108 109 pfn = hyp_virt_to_pfn(pgt_pool_base); 110 nr_pages = host_s2_pgtable_pages(); 111 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0); 112 if (ret) 113 return ret; 114 115 host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) { 116 .zalloc_pages_exact = host_s2_zalloc_pages_exact, 117 .zalloc_page = host_s2_zalloc_page, 118 .free_unlinked_table = host_s2_free_unlinked_table, 119 .phys_to_virt = hyp_phys_to_virt, 120 .virt_to_phys = hyp_virt_to_phys, 121 .page_count = hyp_page_count, 122 .get_page = host_s2_get_page, 123 .put_page = host_s2_put_page, 124 }; 125 126 return 0; 127 } 128 129 static void prepare_host_vtcr(void) 130 { 131 u32 parange, phys_shift; 132 133 /* The host stage 2 is id-mapped, so use parange for T0SZ */ 134 parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val); 135 phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); 136 137 host_mmu.arch.mmu.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, 138 id_aa64mmfr1_el1_sys_val, phys_shift); 139 } 140 141 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot); 142 143 int kvm_host_prepare_stage2(void *pgt_pool_base) 144 { 145 struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu; 146 int ret; 147 148 prepare_host_vtcr(); 149 hyp_spin_lock_init(&host_mmu.lock); 150 mmu->arch = &host_mmu.arch; 151 152 ret = prepare_s2_pool(pgt_pool_base); 153 if (ret) 154 return ret; 155 156 ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu, 157 &host_mmu.mm_ops, KVM_HOST_S2_FLAGS, 158 host_stage2_force_pte_cb); 159 if (ret) 160 return ret; 161 162 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); 163 mmu->pgt = &host_mmu.pgt; 164 atomic64_set(&mmu->vmid.id, 0); 165 166 return 0; 167 } 168 169 static void *guest_s2_zalloc_pages_exact(size_t size) 170 { 171 void *addr = hyp_alloc_pages(¤t_vm->pool, get_order(size)); 172 173 WARN_ON(size != (PAGE_SIZE << get_order(size))); 174 hyp_split_page(hyp_virt_to_page(addr)); 175 176 return addr; 177 } 178 179 static void guest_s2_free_pages_exact(void *addr, unsigned long size) 180 { 181 u8 order = get_order(size); 182 unsigned int i; 183 184 for (i = 0; i < (1 << order); i++) 185 hyp_put_page(¤t_vm->pool, addr + (i * PAGE_SIZE)); 186 } 187 188 static void *guest_s2_zalloc_page(void *mc) 189 { 190 struct hyp_page *p; 191 void *addr; 192 193 addr = hyp_alloc_pages(¤t_vm->pool, 0); 194 if (addr) 195 return addr; 196 197 addr = pop_hyp_memcache(mc, hyp_phys_to_virt); 198 if (!addr) 199 return addr; 200 201 memset(addr, 0, PAGE_SIZE); 202 p = hyp_virt_to_page(addr); 203 p->refcount = 1; 204 p->order = 0; 205 206 return addr; 207 } 208 209 static void guest_s2_get_page(void *addr) 210 { 211 hyp_get_page(¤t_vm->pool, addr); 212 } 213 214 static void guest_s2_put_page(void *addr) 215 { 216 hyp_put_page(¤t_vm->pool, addr); 217 } 218 219 static void __apply_guest_page(void *va, size_t size, 220 void (*func)(void *addr, size_t size)) 221 { 222 size += va - PTR_ALIGN_DOWN(va, PAGE_SIZE); 223 va = PTR_ALIGN_DOWN(va, PAGE_SIZE); 224 size = PAGE_ALIGN(size); 225 226 while (size) { 227 size_t map_size = PAGE_SIZE; 228 void *map; 229 230 if (IS_ALIGNED((unsigned long)va, PMD_SIZE) && size >= PMD_SIZE) 231 map = hyp_fixblock_map(__hyp_pa(va), &map_size); 232 else 233 map = hyp_fixmap_map(__hyp_pa(va)); 234 235 func(map, map_size); 236 237 if (map_size == PMD_SIZE) 238 hyp_fixblock_unmap(); 239 else 240 hyp_fixmap_unmap(); 241 242 size -= map_size; 243 va += map_size; 244 } 245 } 246 247 static void clean_dcache_guest_page(void *va, size_t size) 248 { 249 __apply_guest_page(va, size, __clean_dcache_guest_page); 250 } 251 252 static void invalidate_icache_guest_page(void *va, size_t size) 253 { 254 __apply_guest_page(va, size, __invalidate_icache_guest_page); 255 } 256 257 int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd) 258 { 259 struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu; 260 unsigned long nr_pages; 261 int ret; 262 263 nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT; 264 ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0); 265 if (ret) 266 return ret; 267 268 hyp_spin_lock_init(&vm->lock); 269 vm->mm_ops = (struct kvm_pgtable_mm_ops) { 270 .zalloc_pages_exact = guest_s2_zalloc_pages_exact, 271 .free_pages_exact = guest_s2_free_pages_exact, 272 .zalloc_page = guest_s2_zalloc_page, 273 .phys_to_virt = hyp_phys_to_virt, 274 .virt_to_phys = hyp_virt_to_phys, 275 .page_count = hyp_page_count, 276 .get_page = guest_s2_get_page, 277 .put_page = guest_s2_put_page, 278 .dcache_clean_inval_poc = clean_dcache_guest_page, 279 .icache_inval_pou = invalidate_icache_guest_page, 280 }; 281 282 guest_lock_component(vm); 283 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL); 284 guest_unlock_component(vm); 285 if (ret) 286 return ret; 287 288 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); 289 290 return 0; 291 } 292 293 void reclaim_pgtable_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc) 294 { 295 struct hyp_page *page; 296 void *addr; 297 298 /* Dump all pgtable pages in the hyp_pool */ 299 guest_lock_component(vm); 300 kvm_pgtable_stage2_destroy(&vm->pgt); 301 vm->kvm.arch.mmu.pgd_phys = 0ULL; 302 guest_unlock_component(vm); 303 304 /* Drain the hyp_pool into the memcache */ 305 addr = hyp_alloc_pages(&vm->pool, 0); 306 while (addr) { 307 page = hyp_virt_to_page(addr); 308 page->refcount = 0; 309 page->order = 0; 310 push_hyp_memcache(mc, addr, hyp_virt_to_phys); 311 WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1)); 312 addr = hyp_alloc_pages(&vm->pool, 0); 313 } 314 } 315 316 int __pkvm_prot_finalize(void) 317 { 318 struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu; 319 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); 320 321 if (params->hcr_el2 & HCR_VM) 322 return -EPERM; 323 324 params->vttbr = kvm_get_vttbr(mmu); 325 params->vtcr = mmu->vtcr; 326 params->hcr_el2 |= HCR_VM; 327 328 /* 329 * The CMO below not only cleans the updated params to the 330 * PoC, but also provides the DSB that ensures ongoing 331 * page-table walks that have started before we trapped to EL2 332 * have completed. 333 */ 334 kvm_flush_dcache_to_poc(params, sizeof(*params)); 335 336 write_sysreg_hcr(params->hcr_el2); 337 __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch); 338 339 /* 340 * Make sure to have an ISB before the TLB maintenance below but only 341 * when __load_stage2() doesn't include one already. 342 */ 343 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); 344 345 /* Invalidate stale HCR bits that may be cached in TLBs */ 346 __tlbi(vmalls12e1); 347 dsb(nsh); 348 isb(); 349 350 return 0; 351 } 352 353 static int host_stage2_unmap_dev_all(void) 354 { 355 struct kvm_pgtable *pgt = &host_mmu.pgt; 356 struct memblock_region *reg; 357 u64 addr = 0; 358 int i, ret; 359 360 /* Unmap all non-memory regions to recycle the pages */ 361 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) { 362 reg = &hyp_memory[i]; 363 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); 364 if (ret) 365 return ret; 366 } 367 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); 368 } 369 370 struct kvm_mem_range { 371 u64 start; 372 u64 end; 373 }; 374 375 static struct memblock_region *find_mem_range(phys_addr_t addr, struct kvm_mem_range *range) 376 { 377 int cur, left = 0, right = hyp_memblock_nr; 378 struct memblock_region *reg; 379 phys_addr_t end; 380 381 range->start = 0; 382 range->end = ULONG_MAX; 383 384 /* The list of memblock regions is sorted, binary search it */ 385 while (left < right) { 386 cur = (left + right) >> 1; 387 reg = &hyp_memory[cur]; 388 end = reg->base + reg->size; 389 if (addr < reg->base) { 390 right = cur; 391 range->end = reg->base; 392 } else if (addr >= end) { 393 left = cur + 1; 394 range->start = end; 395 } else { 396 range->start = reg->base; 397 range->end = end; 398 return reg; 399 } 400 } 401 402 return NULL; 403 } 404 405 bool addr_is_memory(phys_addr_t phys) 406 { 407 struct kvm_mem_range range; 408 409 return !!find_mem_range(phys, &range); 410 } 411 412 static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range) 413 { 414 return range->start <= addr && addr < range->end; 415 } 416 417 static int check_range_allowed_memory(u64 start, u64 end) 418 { 419 struct memblock_region *reg; 420 struct kvm_mem_range range; 421 422 /* 423 * Callers can't check the state of a range that overlaps memory and 424 * MMIO regions, so ensure [start, end[ is in the same kvm_mem_range. 425 */ 426 reg = find_mem_range(start, &range); 427 if (!is_in_mem_range(end - 1, &range)) 428 return -EINVAL; 429 430 if (!reg || reg->flags & MEMBLOCK_NOMAP) 431 return -EPERM; 432 433 return 0; 434 } 435 436 static bool range_is_memory(u64 start, u64 end) 437 { 438 struct kvm_mem_range r; 439 440 if (!find_mem_range(start, &r)) 441 return false; 442 443 return is_in_mem_range(end - 1, &r); 444 } 445 446 static inline int __host_stage2_idmap(u64 start, u64 end, 447 enum kvm_pgtable_prot prot) 448 { 449 return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start, 450 prot, &host_s2_pool, 0); 451 } 452 453 /* 454 * The pool has been provided with enough pages to cover all of memory with 455 * page granularity, but it is difficult to know how much of the MMIO range 456 * we will need to cover upfront, so we may need to 'recycle' the pages if we 457 * run out. 458 */ 459 #define host_stage2_try(fn, ...) \ 460 ({ \ 461 int __ret; \ 462 hyp_assert_lock_held(&host_mmu.lock); \ 463 __ret = fn(__VA_ARGS__); \ 464 if (__ret == -ENOMEM) { \ 465 __ret = host_stage2_unmap_dev_all(); \ 466 if (!__ret) \ 467 __ret = fn(__VA_ARGS__); \ 468 } \ 469 __ret; \ 470 }) 471 472 static inline bool range_included(struct kvm_mem_range *child, 473 struct kvm_mem_range *parent) 474 { 475 return parent->start <= child->start && child->end <= parent->end; 476 } 477 478 static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) 479 { 480 struct kvm_mem_range cur; 481 kvm_pte_t pte; 482 u64 granule; 483 s8 level; 484 int ret; 485 486 hyp_assert_lock_held(&host_mmu.lock); 487 ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level); 488 if (ret) 489 return ret; 490 491 if (kvm_pte_valid(pte)) 492 return -EAGAIN; 493 494 if (pte) { 495 WARN_ON(addr_is_memory(addr) && 496 get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE); 497 return -EPERM; 498 } 499 500 for (; level <= KVM_PGTABLE_LAST_LEVEL; level++) { 501 if (!kvm_level_supports_block_mapping(level)) 502 continue; 503 granule = kvm_granule_size(level); 504 cur.start = ALIGN_DOWN(addr, granule); 505 cur.end = cur.start + granule; 506 if (!range_included(&cur, range)) 507 continue; 508 *range = cur; 509 return 0; 510 } 511 512 WARN_ON(1); 513 514 return -EINVAL; 515 } 516 517 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, 518 enum kvm_pgtable_prot prot) 519 { 520 return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot); 521 } 522 523 static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state) 524 { 525 for_each_hyp_page(page, addr, size) 526 set_host_state(page, state); 527 } 528 529 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) 530 { 531 int ret; 532 533 if (!range_is_memory(addr, addr + size)) 534 return -EPERM; 535 536 ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt, 537 addr, size, &host_s2_pool, owner_id); 538 if (ret) 539 return ret; 540 541 /* Don't forget to update the vmemmap tracking for the host */ 542 if (owner_id == PKVM_ID_HOST) 543 __host_update_page_state(addr, size, PKVM_PAGE_OWNED); 544 else 545 __host_update_page_state(addr, size, PKVM_NOPAGE); 546 547 return 0; 548 } 549 550 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot) 551 { 552 /* 553 * Block mappings must be used with care in the host stage-2 as a 554 * kvm_pgtable_stage2_map() operation targeting a page in the range of 555 * an existing block will delete the block under the assumption that 556 * mappings in the rest of the block range can always be rebuilt lazily. 557 * That assumption is correct for the host stage-2 with RWX mappings 558 * targeting memory or RW mappings targeting MMIO ranges (see 559 * host_stage2_idmap() below which implements some of the host memory 560 * abort logic). However, this is not safe for any other mappings where 561 * the host stage-2 page-table is in fact the only place where this 562 * state is stored. In all those cases, it is safer to use page-level 563 * mappings, hence avoiding to lose the state because of side-effects in 564 * kvm_pgtable_stage2_map(). 565 */ 566 if (range_is_memory(addr, end)) 567 return prot != PKVM_HOST_MEM_PROT; 568 else 569 return prot != PKVM_HOST_MMIO_PROT; 570 } 571 572 static int host_stage2_idmap(u64 addr) 573 { 574 struct kvm_mem_range range; 575 bool is_memory = !!find_mem_range(addr, &range); 576 enum kvm_pgtable_prot prot; 577 int ret; 578 579 prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT; 580 581 host_lock_component(); 582 ret = host_stage2_adjust_range(addr, &range); 583 if (ret) 584 goto unlock; 585 586 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); 587 unlock: 588 host_unlock_component(); 589 590 return ret; 591 } 592 593 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) 594 { 595 struct kvm_vcpu_fault_info fault; 596 u64 esr, addr; 597 int ret = 0; 598 599 esr = read_sysreg_el2(SYS_ESR); 600 if (!__get_fault_info(esr, &fault)) { 601 /* 602 * We've presumably raced with a page-table change which caused 603 * AT to fail, try again. 604 */ 605 return; 606 } 607 608 609 /* 610 * Yikes, we couldn't resolve the fault IPA. This should reinject an 611 * abort into the host when we figure out how to do that. 612 */ 613 BUG_ON(!(fault.hpfar_el2 & HPFAR_EL2_NS)); 614 addr = FIELD_GET(HPFAR_EL2_FIPA, fault.hpfar_el2) << 12; 615 616 ret = host_stage2_idmap(addr); 617 BUG_ON(ret && ret != -EAGAIN); 618 } 619 620 struct check_walk_data { 621 enum pkvm_page_state desired; 622 enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr); 623 }; 624 625 static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, 626 enum kvm_pgtable_walk_flags visit) 627 { 628 struct check_walk_data *d = ctx->arg; 629 630 return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM; 631 } 632 633 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, 634 struct check_walk_data *data) 635 { 636 struct kvm_pgtable_walker walker = { 637 .cb = __check_page_state_visitor, 638 .arg = data, 639 .flags = KVM_PGTABLE_WALK_LEAF, 640 }; 641 642 return kvm_pgtable_walk(pgt, addr, size, &walker); 643 } 644 645 static int __host_check_page_state_range(u64 addr, u64 size, 646 enum pkvm_page_state state) 647 { 648 int ret; 649 650 ret = check_range_allowed_memory(addr, addr + size); 651 if (ret) 652 return ret; 653 654 hyp_assert_lock_held(&host_mmu.lock); 655 656 for_each_hyp_page(page, addr, size) { 657 if (get_host_state(page) != state) 658 return -EPERM; 659 } 660 661 return 0; 662 } 663 664 static int __host_set_page_state_range(u64 addr, u64 size, 665 enum pkvm_page_state state) 666 { 667 if (get_host_state(hyp_phys_to_page(addr)) == PKVM_NOPAGE) { 668 int ret = host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT); 669 670 if (ret) 671 return ret; 672 } 673 674 __host_update_page_state(addr, size, state); 675 676 return 0; 677 } 678 679 static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state) 680 { 681 for_each_hyp_page(page, phys, size) 682 set_hyp_state(page, state); 683 } 684 685 static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state) 686 { 687 for_each_hyp_page(page, phys, size) { 688 if (get_hyp_state(page) != state) 689 return -EPERM; 690 } 691 692 return 0; 693 } 694 695 static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr) 696 { 697 if (!kvm_pte_valid(pte)) 698 return PKVM_NOPAGE; 699 700 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); 701 } 702 703 static int __guest_check_page_state_range(struct pkvm_hyp_vm *vm, u64 addr, 704 u64 size, enum pkvm_page_state state) 705 { 706 struct check_walk_data d = { 707 .desired = state, 708 .get_page_state = guest_get_page_state, 709 }; 710 711 hyp_assert_lock_held(&vm->lock); 712 return check_page_state_range(&vm->pgt, addr, size, &d); 713 } 714 715 int __pkvm_host_share_hyp(u64 pfn) 716 { 717 u64 phys = hyp_pfn_to_phys(pfn); 718 u64 size = PAGE_SIZE; 719 int ret; 720 721 host_lock_component(); 722 hyp_lock_component(); 723 724 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); 725 if (ret) 726 goto unlock; 727 ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE); 728 if (ret) 729 goto unlock; 730 731 __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED); 732 WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED)); 733 734 unlock: 735 hyp_unlock_component(); 736 host_unlock_component(); 737 738 return ret; 739 } 740 741 int __pkvm_host_unshare_hyp(u64 pfn) 742 { 743 u64 phys = hyp_pfn_to_phys(pfn); 744 u64 virt = (u64)__hyp_va(phys); 745 u64 size = PAGE_SIZE; 746 int ret; 747 748 host_lock_component(); 749 hyp_lock_component(); 750 751 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); 752 if (ret) 753 goto unlock; 754 ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED); 755 if (ret) 756 goto unlock; 757 if (hyp_page_count((void *)virt)) { 758 ret = -EBUSY; 759 goto unlock; 760 } 761 762 __hyp_set_page_state_range(phys, size, PKVM_NOPAGE); 763 WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED)); 764 765 unlock: 766 hyp_unlock_component(); 767 host_unlock_component(); 768 769 return ret; 770 } 771 772 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages) 773 { 774 u64 phys = hyp_pfn_to_phys(pfn); 775 u64 size = PAGE_SIZE * nr_pages; 776 void *virt = __hyp_va(phys); 777 int ret; 778 779 host_lock_component(); 780 hyp_lock_component(); 781 782 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); 783 if (ret) 784 goto unlock; 785 ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE); 786 if (ret) 787 goto unlock; 788 789 __hyp_set_page_state_range(phys, size, PKVM_PAGE_OWNED); 790 WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP)); 791 WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP)); 792 793 unlock: 794 hyp_unlock_component(); 795 host_unlock_component(); 796 797 return ret; 798 } 799 800 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages) 801 { 802 u64 phys = hyp_pfn_to_phys(pfn); 803 u64 size = PAGE_SIZE * nr_pages; 804 u64 virt = (u64)__hyp_va(phys); 805 int ret; 806 807 host_lock_component(); 808 hyp_lock_component(); 809 810 ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_OWNED); 811 if (ret) 812 goto unlock; 813 ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE); 814 if (ret) 815 goto unlock; 816 817 __hyp_set_page_state_range(phys, size, PKVM_NOPAGE); 818 WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size); 819 WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST)); 820 821 unlock: 822 hyp_unlock_component(); 823 host_unlock_component(); 824 825 return ret; 826 } 827 828 int hyp_pin_shared_mem(void *from, void *to) 829 { 830 u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); 831 u64 end = PAGE_ALIGN((u64)to); 832 u64 phys = __hyp_pa(start); 833 u64 size = end - start; 834 struct hyp_page *p; 835 int ret; 836 837 host_lock_component(); 838 hyp_lock_component(); 839 840 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); 841 if (ret) 842 goto unlock; 843 844 ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED); 845 if (ret) 846 goto unlock; 847 848 for (cur = start; cur < end; cur += PAGE_SIZE) { 849 p = hyp_virt_to_page(cur); 850 hyp_page_ref_inc(p); 851 if (p->refcount == 1) 852 WARN_ON(pkvm_create_mappings_locked((void *)cur, 853 (void *)cur + PAGE_SIZE, 854 PAGE_HYP)); 855 } 856 857 unlock: 858 hyp_unlock_component(); 859 host_unlock_component(); 860 861 return ret; 862 } 863 864 void hyp_unpin_shared_mem(void *from, void *to) 865 { 866 u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); 867 u64 end = PAGE_ALIGN((u64)to); 868 struct hyp_page *p; 869 870 host_lock_component(); 871 hyp_lock_component(); 872 873 for (cur = start; cur < end; cur += PAGE_SIZE) { 874 p = hyp_virt_to_page(cur); 875 if (p->refcount == 1) 876 WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE); 877 hyp_page_ref_dec(p); 878 } 879 880 hyp_unlock_component(); 881 host_unlock_component(); 882 } 883 884 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages) 885 { 886 u64 phys = hyp_pfn_to_phys(pfn); 887 u64 size = PAGE_SIZE * nr_pages; 888 int ret; 889 890 host_lock_component(); 891 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); 892 if (!ret) 893 ret = __host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); 894 host_unlock_component(); 895 896 return ret; 897 } 898 899 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages) 900 { 901 u64 phys = hyp_pfn_to_phys(pfn); 902 u64 size = PAGE_SIZE * nr_pages; 903 int ret; 904 905 host_lock_component(); 906 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); 907 if (!ret) 908 ret = __host_set_page_state_range(phys, size, PKVM_PAGE_OWNED); 909 host_unlock_component(); 910 911 return ret; 912 } 913 914 static int __guest_check_transition_size(u64 phys, u64 ipa, u64 nr_pages, u64 *size) 915 { 916 size_t block_size; 917 918 if (nr_pages == 1) { 919 *size = PAGE_SIZE; 920 return 0; 921 } 922 923 /* We solely support second to last level huge mapping */ 924 block_size = kvm_granule_size(KVM_PGTABLE_LAST_LEVEL - 1); 925 926 if (nr_pages != block_size >> PAGE_SHIFT) 927 return -EINVAL; 928 929 if (!IS_ALIGNED(phys | ipa, block_size)) 930 return -EINVAL; 931 932 *size = block_size; 933 return 0; 934 } 935 936 int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu, 937 enum kvm_pgtable_prot prot) 938 { 939 struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); 940 u64 phys = hyp_pfn_to_phys(pfn); 941 u64 ipa = hyp_pfn_to_phys(gfn); 942 u64 size; 943 int ret; 944 945 if (prot & ~KVM_PGTABLE_PROT_RWX) 946 return -EINVAL; 947 948 ret = __guest_check_transition_size(phys, ipa, nr_pages, &size); 949 if (ret) 950 return ret; 951 952 ret = check_range_allowed_memory(phys, phys + size); 953 if (ret) 954 return ret; 955 956 host_lock_component(); 957 guest_lock_component(vm); 958 959 ret = __guest_check_page_state_range(vm, ipa, size, PKVM_NOPAGE); 960 if (ret) 961 goto unlock; 962 963 for_each_hyp_page(page, phys, size) { 964 switch (get_host_state(page)) { 965 case PKVM_PAGE_OWNED: 966 continue; 967 case PKVM_PAGE_SHARED_OWNED: 968 if (page->host_share_guest_count == U32_MAX) { 969 ret = -EBUSY; 970 goto unlock; 971 } 972 973 /* Only host to np-guest multi-sharing is tolerated */ 974 if (page->host_share_guest_count) 975 continue; 976 977 fallthrough; 978 default: 979 ret = -EPERM; 980 goto unlock; 981 } 982 } 983 984 for_each_hyp_page(page, phys, size) { 985 set_host_state(page, PKVM_PAGE_SHARED_OWNED); 986 page->host_share_guest_count++; 987 } 988 989 WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, size, phys, 990 pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED), 991 &vcpu->vcpu.arch.pkvm_memcache, 0)); 992 993 unlock: 994 guest_unlock_component(vm); 995 host_unlock_component(); 996 997 return ret; 998 } 999 1000 static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa, u64 size) 1001 { 1002 enum pkvm_page_state state; 1003 kvm_pte_t pte; 1004 u64 phys; 1005 s8 level; 1006 int ret; 1007 1008 ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level); 1009 if (ret) 1010 return ret; 1011 if (!kvm_pte_valid(pte)) 1012 return -ENOENT; 1013 if (kvm_granule_size(level) != size) 1014 return -E2BIG; 1015 1016 state = guest_get_page_state(pte, ipa); 1017 if (state != PKVM_PAGE_SHARED_BORROWED) 1018 return -EPERM; 1019 1020 phys = kvm_pte_to_phys(pte); 1021 ret = check_range_allowed_memory(phys, phys + size); 1022 if (WARN_ON(ret)) 1023 return ret; 1024 1025 for_each_hyp_page(page, phys, size) { 1026 if (get_host_state(page) != PKVM_PAGE_SHARED_OWNED) 1027 return -EPERM; 1028 if (WARN_ON(!page->host_share_guest_count)) 1029 return -EINVAL; 1030 } 1031 1032 *__phys = phys; 1033 1034 return 0; 1035 } 1036 1037 int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm) 1038 { 1039 u64 ipa = hyp_pfn_to_phys(gfn); 1040 u64 size, phys; 1041 int ret; 1042 1043 ret = __guest_check_transition_size(0, ipa, nr_pages, &size); 1044 if (ret) 1045 return ret; 1046 1047 host_lock_component(); 1048 guest_lock_component(vm); 1049 1050 ret = __check_host_shared_guest(vm, &phys, ipa, size); 1051 if (ret) 1052 goto unlock; 1053 1054 ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, size); 1055 if (ret) 1056 goto unlock; 1057 1058 for_each_hyp_page(page, phys, size) { 1059 /* __check_host_shared_guest() protects against underflow */ 1060 page->host_share_guest_count--; 1061 if (!page->host_share_guest_count) 1062 set_host_state(page, PKVM_PAGE_OWNED); 1063 } 1064 1065 unlock: 1066 guest_unlock_component(vm); 1067 host_unlock_component(); 1068 1069 return ret; 1070 } 1071 1072 static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa, u64 size) 1073 { 1074 u64 phys; 1075 int ret; 1076 1077 if (!IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) 1078 return; 1079 1080 host_lock_component(); 1081 guest_lock_component(vm); 1082 1083 ret = __check_host_shared_guest(vm, &phys, ipa, size); 1084 1085 guest_unlock_component(vm); 1086 host_unlock_component(); 1087 1088 WARN_ON(ret && ret != -ENOENT); 1089 } 1090 1091 int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot) 1092 { 1093 struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); 1094 u64 ipa = hyp_pfn_to_phys(gfn); 1095 int ret; 1096 1097 if (pkvm_hyp_vm_is_protected(vm)) 1098 return -EPERM; 1099 1100 if (prot & ~KVM_PGTABLE_PROT_RWX) 1101 return -EINVAL; 1102 1103 assert_host_shared_guest(vm, ipa, PAGE_SIZE); 1104 guest_lock_component(vm); 1105 ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0); 1106 guest_unlock_component(vm); 1107 1108 return ret; 1109 } 1110 1111 int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm) 1112 { 1113 u64 size, ipa = hyp_pfn_to_phys(gfn); 1114 int ret; 1115 1116 if (pkvm_hyp_vm_is_protected(vm)) 1117 return -EPERM; 1118 1119 ret = __guest_check_transition_size(0, ipa, nr_pages, &size); 1120 if (ret) 1121 return ret; 1122 1123 assert_host_shared_guest(vm, ipa, size); 1124 guest_lock_component(vm); 1125 ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, size); 1126 guest_unlock_component(vm); 1127 1128 return ret; 1129 } 1130 1131 int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm) 1132 { 1133 u64 size, ipa = hyp_pfn_to_phys(gfn); 1134 int ret; 1135 1136 if (pkvm_hyp_vm_is_protected(vm)) 1137 return -EPERM; 1138 1139 ret = __guest_check_transition_size(0, ipa, nr_pages, &size); 1140 if (ret) 1141 return ret; 1142 1143 assert_host_shared_guest(vm, ipa, size); 1144 guest_lock_component(vm); 1145 ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, size, mkold); 1146 guest_unlock_component(vm); 1147 1148 return ret; 1149 } 1150 1151 int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu) 1152 { 1153 struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); 1154 u64 ipa = hyp_pfn_to_phys(gfn); 1155 1156 if (pkvm_hyp_vm_is_protected(vm)) 1157 return -EPERM; 1158 1159 assert_host_shared_guest(vm, ipa, PAGE_SIZE); 1160 guest_lock_component(vm); 1161 kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0); 1162 guest_unlock_component(vm); 1163 1164 return 0; 1165 } 1166 1167 #ifdef CONFIG_NVHE_EL2_DEBUG 1168 struct pkvm_expected_state { 1169 enum pkvm_page_state host; 1170 enum pkvm_page_state hyp; 1171 enum pkvm_page_state guest[2]; /* [ gfn, gfn + 1 ] */ 1172 }; 1173 1174 static struct pkvm_expected_state selftest_state; 1175 static struct hyp_page *selftest_page; 1176 1177 static struct pkvm_hyp_vm selftest_vm = { 1178 .kvm = { 1179 .arch = { 1180 .mmu = { 1181 .arch = &selftest_vm.kvm.arch, 1182 .pgt = &selftest_vm.pgt, 1183 }, 1184 }, 1185 }, 1186 }; 1187 1188 static struct pkvm_hyp_vcpu selftest_vcpu = { 1189 .vcpu = { 1190 .arch = { 1191 .hw_mmu = &selftest_vm.kvm.arch.mmu, 1192 }, 1193 .kvm = &selftest_vm.kvm, 1194 }, 1195 }; 1196 1197 static void init_selftest_vm(void *virt) 1198 { 1199 struct hyp_page *p = hyp_virt_to_page(virt); 1200 int i; 1201 1202 selftest_vm.kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr; 1203 WARN_ON(kvm_guest_prepare_stage2(&selftest_vm, virt)); 1204 1205 for (i = 0; i < pkvm_selftest_pages(); i++) { 1206 if (p[i].refcount) 1207 continue; 1208 p[i].refcount = 1; 1209 hyp_put_page(&selftest_vm.pool, hyp_page_to_virt(&p[i])); 1210 } 1211 } 1212 1213 static u64 selftest_ipa(void) 1214 { 1215 return BIT(selftest_vm.pgt.ia_bits - 1); 1216 } 1217 1218 static void assert_page_state(void) 1219 { 1220 void *virt = hyp_page_to_virt(selftest_page); 1221 u64 size = PAGE_SIZE << selftest_page->order; 1222 struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu; 1223 u64 phys = hyp_virt_to_phys(virt); 1224 u64 ipa[2] = { selftest_ipa(), selftest_ipa() + PAGE_SIZE }; 1225 struct pkvm_hyp_vm *vm; 1226 1227 vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); 1228 1229 host_lock_component(); 1230 WARN_ON(__host_check_page_state_range(phys, size, selftest_state.host)); 1231 host_unlock_component(); 1232 1233 hyp_lock_component(); 1234 WARN_ON(__hyp_check_page_state_range(phys, size, selftest_state.hyp)); 1235 hyp_unlock_component(); 1236 1237 guest_lock_component(&selftest_vm); 1238 WARN_ON(__guest_check_page_state_range(vm, ipa[0], size, selftest_state.guest[0])); 1239 WARN_ON(__guest_check_page_state_range(vm, ipa[1], size, selftest_state.guest[1])); 1240 guest_unlock_component(&selftest_vm); 1241 } 1242 1243 #define assert_transition_res(res, fn, ...) \ 1244 do { \ 1245 WARN_ON(fn(__VA_ARGS__) != res); \ 1246 assert_page_state(); \ 1247 } while (0) 1248 1249 void pkvm_ownership_selftest(void *base) 1250 { 1251 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_RWX; 1252 void *virt = hyp_alloc_pages(&host_s2_pool, 0); 1253 struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu; 1254 struct pkvm_hyp_vm *vm = &selftest_vm; 1255 u64 phys, size, pfn, gfn; 1256 1257 WARN_ON(!virt); 1258 selftest_page = hyp_virt_to_page(virt); 1259 selftest_page->refcount = 0; 1260 init_selftest_vm(base); 1261 1262 size = PAGE_SIZE << selftest_page->order; 1263 phys = hyp_virt_to_phys(virt); 1264 pfn = hyp_phys_to_pfn(phys); 1265 gfn = hyp_phys_to_pfn(selftest_ipa()); 1266 1267 selftest_state.host = PKVM_NOPAGE; 1268 selftest_state.hyp = PKVM_PAGE_OWNED; 1269 selftest_state.guest[0] = selftest_state.guest[1] = PKVM_NOPAGE; 1270 assert_page_state(); 1271 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); 1272 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); 1273 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); 1274 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); 1275 assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); 1276 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); 1277 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1278 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); 1279 1280 selftest_state.host = PKVM_PAGE_OWNED; 1281 selftest_state.hyp = PKVM_NOPAGE; 1282 assert_transition_res(0, __pkvm_hyp_donate_host, pfn, 1); 1283 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); 1284 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); 1285 assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); 1286 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); 1287 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); 1288 1289 selftest_state.host = PKVM_PAGE_SHARED_OWNED; 1290 selftest_state.hyp = PKVM_PAGE_SHARED_BORROWED; 1291 assert_transition_res(0, __pkvm_host_share_hyp, pfn); 1292 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); 1293 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); 1294 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); 1295 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); 1296 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1297 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); 1298 1299 assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size); 1300 assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size); 1301 hyp_unpin_shared_mem(virt, virt + size); 1302 WARN_ON(hyp_page_count(virt) != 1); 1303 assert_transition_res(-EBUSY, __pkvm_host_unshare_hyp, pfn); 1304 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); 1305 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); 1306 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); 1307 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); 1308 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1309 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); 1310 1311 hyp_unpin_shared_mem(virt, virt + size); 1312 assert_page_state(); 1313 WARN_ON(hyp_page_count(virt)); 1314 1315 selftest_state.host = PKVM_PAGE_OWNED; 1316 selftest_state.hyp = PKVM_NOPAGE; 1317 assert_transition_res(0, __pkvm_host_unshare_hyp, pfn); 1318 1319 selftest_state.host = PKVM_PAGE_SHARED_OWNED; 1320 selftest_state.hyp = PKVM_NOPAGE; 1321 assert_transition_res(0, __pkvm_host_share_ffa, pfn, 1); 1322 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); 1323 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); 1324 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); 1325 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); 1326 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); 1327 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1328 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); 1329 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); 1330 1331 selftest_state.host = PKVM_PAGE_OWNED; 1332 selftest_state.hyp = PKVM_NOPAGE; 1333 assert_transition_res(0, __pkvm_host_unshare_ffa, pfn, 1); 1334 assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); 1335 1336 selftest_state.host = PKVM_PAGE_SHARED_OWNED; 1337 selftest_state.guest[0] = PKVM_PAGE_SHARED_BORROWED; 1338 assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1339 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1340 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); 1341 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); 1342 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); 1343 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); 1344 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); 1345 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); 1346 1347 selftest_state.guest[1] = PKVM_PAGE_SHARED_BORROWED; 1348 assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn + 1, 1, vcpu, prot); 1349 WARN_ON(hyp_virt_to_page(virt)->host_share_guest_count != 2); 1350 1351 selftest_state.guest[0] = PKVM_NOPAGE; 1352 assert_transition_res(0, __pkvm_host_unshare_guest, gfn, 1, vm); 1353 1354 selftest_state.guest[1] = PKVM_NOPAGE; 1355 selftest_state.host = PKVM_PAGE_OWNED; 1356 assert_transition_res(0, __pkvm_host_unshare_guest, gfn + 1, 1, vm); 1357 1358 selftest_state.host = PKVM_NOPAGE; 1359 selftest_state.hyp = PKVM_PAGE_OWNED; 1360 assert_transition_res(0, __pkvm_host_donate_hyp, pfn, 1); 1361 1362 selftest_page->refcount = 1; 1363 hyp_put_page(&host_s2_pool, virt); 1364 } 1365 #endif 1366