1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 Google LLC 4 * Author: Quentin Perret <qperret@google.com> 5 */ 6 7 #include <linux/kvm_host.h> 8 #include <asm/kvm_emulate.h> 9 #include <asm/kvm_hyp.h> 10 #include <asm/kvm_mmu.h> 11 #include <asm/kvm_pgtable.h> 12 #include <asm/kvm_pkvm.h> 13 #include <asm/stage2_pgtable.h> 14 15 #include <hyp/fault.h> 16 17 #include <nvhe/gfp.h> 18 #include <nvhe/memory.h> 19 #include <nvhe/mem_protect.h> 20 #include <nvhe/mm.h> 21 22 #define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP) 23 24 struct host_mmu host_mmu; 25 26 static struct hyp_pool host_s2_pool; 27 28 static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm); 29 #define current_vm (*this_cpu_ptr(&__current_vm)) 30 31 static void guest_lock_component(struct pkvm_hyp_vm *vm) 32 { 33 hyp_spin_lock(&vm->lock); 34 current_vm = vm; 35 } 36 37 static void guest_unlock_component(struct pkvm_hyp_vm *vm) 38 { 39 current_vm = NULL; 40 hyp_spin_unlock(&vm->lock); 41 } 42 43 static void host_lock_component(void) 44 { 45 hyp_spin_lock(&host_mmu.lock); 46 } 47 48 static void host_unlock_component(void) 49 { 50 hyp_spin_unlock(&host_mmu.lock); 51 } 52 53 static void hyp_lock_component(void) 54 { 55 hyp_spin_lock(&pkvm_pgd_lock); 56 } 57 58 static void hyp_unlock_component(void) 59 { 60 hyp_spin_unlock(&pkvm_pgd_lock); 61 } 62 63 #define for_each_hyp_page(__p, __st, __sz) \ 64 for (struct hyp_page *__p = hyp_phys_to_page(__st), \ 65 *__e = __p + ((__sz) >> PAGE_SHIFT); \ 66 __p < __e; __p++) 67 68 static void *host_s2_zalloc_pages_exact(size_t size) 69 { 70 void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size)); 71 72 hyp_split_page(hyp_virt_to_page(addr)); 73 74 /* 75 * The size of concatenated PGDs is always a power of two of PAGE_SIZE, 76 * so there should be no need to free any of the tail pages to make the 77 * allocation exact. 78 */ 79 WARN_ON(size != (PAGE_SIZE << get_order(size))); 80 81 return addr; 82 } 83 84 static void *host_s2_zalloc_page(void *pool) 85 { 86 return hyp_alloc_pages(pool, 0); 87 } 88 89 static void host_s2_get_page(void *addr) 90 { 91 hyp_get_page(&host_s2_pool, addr); 92 } 93 94 static void host_s2_put_page(void *addr) 95 { 96 hyp_put_page(&host_s2_pool, addr); 97 } 98 99 static void host_s2_free_unlinked_table(void *addr, s8 level) 100 { 101 kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level); 102 } 103 104 static int prepare_s2_pool(void *pgt_pool_base) 105 { 106 unsigned long nr_pages, pfn; 107 int ret; 108 109 pfn = hyp_virt_to_pfn(pgt_pool_base); 110 nr_pages = host_s2_pgtable_pages(); 111 ret = hyp_pool_init(&host_s2_pool, pfn, nr_pages, 0); 112 if (ret) 113 return ret; 114 115 host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) { 116 .zalloc_pages_exact = host_s2_zalloc_pages_exact, 117 .zalloc_page = host_s2_zalloc_page, 118 .free_unlinked_table = host_s2_free_unlinked_table, 119 .phys_to_virt = hyp_phys_to_virt, 120 .virt_to_phys = hyp_virt_to_phys, 121 .page_count = hyp_page_count, 122 .get_page = host_s2_get_page, 123 .put_page = host_s2_put_page, 124 }; 125 126 return 0; 127 } 128 129 static void prepare_host_vtcr(void) 130 { 131 u32 parange, phys_shift; 132 133 /* The host stage 2 is id-mapped, so use parange for T0SZ */ 134 parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val); 135 phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); 136 137 host_mmu.arch.mmu.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, 138 id_aa64mmfr1_el1_sys_val, phys_shift); 139 } 140 141 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot); 142 143 int kvm_host_prepare_stage2(void *pgt_pool_base) 144 { 145 struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu; 146 int ret; 147 148 prepare_host_vtcr(); 149 hyp_spin_lock_init(&host_mmu.lock); 150 mmu->arch = &host_mmu.arch; 151 152 ret = prepare_s2_pool(pgt_pool_base); 153 if (ret) 154 return ret; 155 156 ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu, 157 &host_mmu.mm_ops, KVM_HOST_S2_FLAGS, 158 host_stage2_force_pte_cb); 159 if (ret) 160 return ret; 161 162 mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd); 163 mmu->pgt = &host_mmu.pgt; 164 atomic64_set(&mmu->vmid.id, 0); 165 166 return 0; 167 } 168 169 static void *guest_s2_zalloc_pages_exact(size_t size) 170 { 171 void *addr = hyp_alloc_pages(¤t_vm->pool, get_order(size)); 172 173 WARN_ON(size != (PAGE_SIZE << get_order(size))); 174 hyp_split_page(hyp_virt_to_page(addr)); 175 176 return addr; 177 } 178 179 static void guest_s2_free_pages_exact(void *addr, unsigned long size) 180 { 181 u8 order = get_order(size); 182 unsigned int i; 183 184 for (i = 0; i < (1 << order); i++) 185 hyp_put_page(¤t_vm->pool, addr + (i * PAGE_SIZE)); 186 } 187 188 static void *guest_s2_zalloc_page(void *mc) 189 { 190 struct hyp_page *p; 191 void *addr; 192 193 addr = hyp_alloc_pages(¤t_vm->pool, 0); 194 if (addr) 195 return addr; 196 197 addr = pop_hyp_memcache(mc, hyp_phys_to_virt); 198 if (!addr) 199 return addr; 200 201 memset(addr, 0, PAGE_SIZE); 202 p = hyp_virt_to_page(addr); 203 p->refcount = 1; 204 p->order = 0; 205 206 return addr; 207 } 208 209 static void guest_s2_get_page(void *addr) 210 { 211 hyp_get_page(¤t_vm->pool, addr); 212 } 213 214 static void guest_s2_put_page(void *addr) 215 { 216 hyp_put_page(¤t_vm->pool, addr); 217 } 218 219 static void __apply_guest_page(void *va, size_t size, 220 void (*func)(void *addr, size_t size)) 221 { 222 size += va - PTR_ALIGN_DOWN(va, PAGE_SIZE); 223 va = PTR_ALIGN_DOWN(va, PAGE_SIZE); 224 size = PAGE_ALIGN(size); 225 226 while (size) { 227 size_t map_size = PAGE_SIZE; 228 void *map; 229 230 if (IS_ALIGNED((unsigned long)va, PMD_SIZE) && size >= PMD_SIZE) 231 map = hyp_fixblock_map(__hyp_pa(va), &map_size); 232 else 233 map = hyp_fixmap_map(__hyp_pa(va)); 234 235 func(map, map_size); 236 237 if (map_size == PMD_SIZE) 238 hyp_fixblock_unmap(); 239 else 240 hyp_fixmap_unmap(); 241 242 size -= map_size; 243 va += map_size; 244 } 245 } 246 247 static void clean_dcache_guest_page(void *va, size_t size) 248 { 249 __apply_guest_page(va, size, __clean_dcache_guest_page); 250 } 251 252 static void invalidate_icache_guest_page(void *va, size_t size) 253 { 254 __apply_guest_page(va, size, __invalidate_icache_guest_page); 255 } 256 257 int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd) 258 { 259 struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu; 260 unsigned long nr_pages; 261 int ret; 262 263 nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT; 264 ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0); 265 if (ret) 266 return ret; 267 268 hyp_spin_lock_init(&vm->lock); 269 vm->mm_ops = (struct kvm_pgtable_mm_ops) { 270 .zalloc_pages_exact = guest_s2_zalloc_pages_exact, 271 .free_pages_exact = guest_s2_free_pages_exact, 272 .zalloc_page = guest_s2_zalloc_page, 273 .phys_to_virt = hyp_phys_to_virt, 274 .virt_to_phys = hyp_virt_to_phys, 275 .page_count = hyp_page_count, 276 .get_page = guest_s2_get_page, 277 .put_page = guest_s2_put_page, 278 .dcache_clean_inval_poc = clean_dcache_guest_page, 279 .icache_inval_pou = invalidate_icache_guest_page, 280 }; 281 282 guest_lock_component(vm); 283 ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0, NULL); 284 guest_unlock_component(vm); 285 if (ret) 286 return ret; 287 288 vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd); 289 290 return 0; 291 } 292 293 void reclaim_pgtable_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc) 294 { 295 struct hyp_page *page; 296 void *addr; 297 298 /* Dump all pgtable pages in the hyp_pool */ 299 guest_lock_component(vm); 300 kvm_pgtable_stage2_destroy(&vm->pgt); 301 vm->kvm.arch.mmu.pgd_phys = 0ULL; 302 guest_unlock_component(vm); 303 304 /* Drain the hyp_pool into the memcache */ 305 addr = hyp_alloc_pages(&vm->pool, 0); 306 while (addr) { 307 page = hyp_virt_to_page(addr); 308 page->refcount = 0; 309 page->order = 0; 310 push_hyp_memcache(mc, addr, hyp_virt_to_phys); 311 WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1)); 312 addr = hyp_alloc_pages(&vm->pool, 0); 313 } 314 } 315 316 int __pkvm_prot_finalize(void) 317 { 318 struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu; 319 struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); 320 321 if (params->hcr_el2 & HCR_VM) 322 return -EPERM; 323 324 params->vttbr = kvm_get_vttbr(mmu); 325 params->vtcr = mmu->vtcr; 326 params->hcr_el2 |= HCR_VM; 327 328 /* 329 * The CMO below not only cleans the updated params to the 330 * PoC, but also provides the DSB that ensures ongoing 331 * page-table walks that have started before we trapped to EL2 332 * have completed. 333 */ 334 kvm_flush_dcache_to_poc(params, sizeof(*params)); 335 336 write_sysreg_hcr(params->hcr_el2); 337 __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch); 338 339 /* 340 * Make sure to have an ISB before the TLB maintenance below but only 341 * when __load_stage2() doesn't include one already. 342 */ 343 asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); 344 345 /* Invalidate stale HCR bits that may be cached in TLBs */ 346 __tlbi(vmalls12e1); 347 dsb(nsh); 348 isb(); 349 350 return 0; 351 } 352 353 static int host_stage2_unmap_dev_all(void) 354 { 355 struct kvm_pgtable *pgt = &host_mmu.pgt; 356 struct memblock_region *reg; 357 u64 addr = 0; 358 int i, ret; 359 360 /* Unmap all non-memory regions to recycle the pages */ 361 for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) { 362 reg = &hyp_memory[i]; 363 ret = kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr); 364 if (ret) 365 return ret; 366 } 367 return kvm_pgtable_stage2_unmap(pgt, addr, BIT(pgt->ia_bits) - addr); 368 } 369 370 struct kvm_mem_range { 371 u64 start; 372 u64 end; 373 }; 374 375 static struct memblock_region *find_mem_range(phys_addr_t addr, struct kvm_mem_range *range) 376 { 377 int cur, left = 0, right = hyp_memblock_nr; 378 struct memblock_region *reg; 379 phys_addr_t end; 380 381 range->start = 0; 382 range->end = ULONG_MAX; 383 384 /* The list of memblock regions is sorted, binary search it */ 385 while (left < right) { 386 cur = (left + right) >> 1; 387 reg = &hyp_memory[cur]; 388 end = reg->base + reg->size; 389 if (addr < reg->base) { 390 right = cur; 391 range->end = reg->base; 392 } else if (addr >= end) { 393 left = cur + 1; 394 range->start = end; 395 } else { 396 range->start = reg->base; 397 range->end = end; 398 return reg; 399 } 400 } 401 402 return NULL; 403 } 404 405 bool addr_is_memory(phys_addr_t phys) 406 { 407 struct kvm_mem_range range; 408 409 return !!find_mem_range(phys, &range); 410 } 411 412 static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range) 413 { 414 return range->start <= addr && addr < range->end; 415 } 416 417 static int check_range_allowed_memory(u64 start, u64 end) 418 { 419 struct memblock_region *reg; 420 struct kvm_mem_range range; 421 422 /* 423 * Callers can't check the state of a range that overlaps memory and 424 * MMIO regions, so ensure [start, end[ is in the same kvm_mem_range. 425 */ 426 reg = find_mem_range(start, &range); 427 if (!is_in_mem_range(end - 1, &range)) 428 return -EINVAL; 429 430 if (!reg || reg->flags & MEMBLOCK_NOMAP) 431 return -EPERM; 432 433 return 0; 434 } 435 436 static bool range_is_memory(u64 start, u64 end) 437 { 438 struct kvm_mem_range r; 439 440 if (!find_mem_range(start, &r)) 441 return false; 442 443 return is_in_mem_range(end - 1, &r); 444 } 445 446 static inline int __host_stage2_idmap(u64 start, u64 end, 447 enum kvm_pgtable_prot prot) 448 { 449 return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start, 450 prot, &host_s2_pool, 0); 451 } 452 453 /* 454 * The pool has been provided with enough pages to cover all of memory with 455 * page granularity, but it is difficult to know how much of the MMIO range 456 * we will need to cover upfront, so we may need to 'recycle' the pages if we 457 * run out. 458 */ 459 #define host_stage2_try(fn, ...) \ 460 ({ \ 461 int __ret; \ 462 hyp_assert_lock_held(&host_mmu.lock); \ 463 __ret = fn(__VA_ARGS__); \ 464 if (__ret == -ENOMEM) { \ 465 __ret = host_stage2_unmap_dev_all(); \ 466 if (!__ret) \ 467 __ret = fn(__VA_ARGS__); \ 468 } \ 469 __ret; \ 470 }) 471 472 static inline bool range_included(struct kvm_mem_range *child, 473 struct kvm_mem_range *parent) 474 { 475 return parent->start <= child->start && child->end <= parent->end; 476 } 477 478 static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range) 479 { 480 struct kvm_mem_range cur; 481 kvm_pte_t pte; 482 s8 level; 483 int ret; 484 485 hyp_assert_lock_held(&host_mmu.lock); 486 ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level); 487 if (ret) 488 return ret; 489 490 if (kvm_pte_valid(pte)) 491 return -EAGAIN; 492 493 if (pte) { 494 WARN_ON(addr_is_memory(addr) && 495 get_host_state(hyp_phys_to_page(addr)) != PKVM_NOPAGE); 496 return -EPERM; 497 } 498 499 do { 500 u64 granule = kvm_granule_size(level); 501 cur.start = ALIGN_DOWN(addr, granule); 502 cur.end = cur.start + granule; 503 level++; 504 } while ((level <= KVM_PGTABLE_LAST_LEVEL) && 505 !(kvm_level_supports_block_mapping(level) && 506 range_included(&cur, range))); 507 508 *range = cur; 509 510 return 0; 511 } 512 513 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, 514 enum kvm_pgtable_prot prot) 515 { 516 return host_stage2_try(__host_stage2_idmap, addr, addr + size, prot); 517 } 518 519 static void __host_update_page_state(phys_addr_t addr, u64 size, enum pkvm_page_state state) 520 { 521 for_each_hyp_page(page, addr, size) 522 set_host_state(page, state); 523 } 524 525 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id) 526 { 527 int ret; 528 529 if (!range_is_memory(addr, addr + size)) 530 return -EPERM; 531 532 ret = host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt, 533 addr, size, &host_s2_pool, owner_id); 534 if (ret) 535 return ret; 536 537 /* Don't forget to update the vmemmap tracking for the host */ 538 if (owner_id == PKVM_ID_HOST) 539 __host_update_page_state(addr, size, PKVM_PAGE_OWNED); 540 else 541 __host_update_page_state(addr, size, PKVM_NOPAGE); 542 543 return 0; 544 } 545 546 static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot) 547 { 548 /* 549 * Block mappings must be used with care in the host stage-2 as a 550 * kvm_pgtable_stage2_map() operation targeting a page in the range of 551 * an existing block will delete the block under the assumption that 552 * mappings in the rest of the block range can always be rebuilt lazily. 553 * That assumption is correct for the host stage-2 with RWX mappings 554 * targeting memory or RW mappings targeting MMIO ranges (see 555 * host_stage2_idmap() below which implements some of the host memory 556 * abort logic). However, this is not safe for any other mappings where 557 * the host stage-2 page-table is in fact the only place where this 558 * state is stored. In all those cases, it is safer to use page-level 559 * mappings, hence avoiding to lose the state because of side-effects in 560 * kvm_pgtable_stage2_map(). 561 */ 562 if (range_is_memory(addr, end)) 563 return prot != PKVM_HOST_MEM_PROT; 564 else 565 return prot != PKVM_HOST_MMIO_PROT; 566 } 567 568 static int host_stage2_idmap(u64 addr) 569 { 570 struct kvm_mem_range range; 571 bool is_memory = !!find_mem_range(addr, &range); 572 enum kvm_pgtable_prot prot; 573 int ret; 574 575 prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT; 576 577 host_lock_component(); 578 ret = host_stage2_adjust_range(addr, &range); 579 if (ret) 580 goto unlock; 581 582 ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot); 583 unlock: 584 host_unlock_component(); 585 586 return ret; 587 } 588 589 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt) 590 { 591 struct kvm_vcpu_fault_info fault; 592 u64 esr, addr; 593 int ret = 0; 594 595 esr = read_sysreg_el2(SYS_ESR); 596 if (!__get_fault_info(esr, &fault)) { 597 /* 598 * We've presumably raced with a page-table change which caused 599 * AT to fail, try again. 600 */ 601 return; 602 } 603 604 605 /* 606 * Yikes, we couldn't resolve the fault IPA. This should reinject an 607 * abort into the host when we figure out how to do that. 608 */ 609 BUG_ON(!(fault.hpfar_el2 & HPFAR_EL2_NS)); 610 addr = FIELD_GET(HPFAR_EL2_FIPA, fault.hpfar_el2) << 12; 611 612 ret = host_stage2_idmap(addr); 613 BUG_ON(ret && ret != -EAGAIN); 614 } 615 616 struct check_walk_data { 617 enum pkvm_page_state desired; 618 enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr); 619 }; 620 621 static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx, 622 enum kvm_pgtable_walk_flags visit) 623 { 624 struct check_walk_data *d = ctx->arg; 625 626 return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM; 627 } 628 629 static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size, 630 struct check_walk_data *data) 631 { 632 struct kvm_pgtable_walker walker = { 633 .cb = __check_page_state_visitor, 634 .arg = data, 635 .flags = KVM_PGTABLE_WALK_LEAF, 636 }; 637 638 return kvm_pgtable_walk(pgt, addr, size, &walker); 639 } 640 641 static int __host_check_page_state_range(u64 addr, u64 size, 642 enum pkvm_page_state state) 643 { 644 int ret; 645 646 ret = check_range_allowed_memory(addr, addr + size); 647 if (ret) 648 return ret; 649 650 hyp_assert_lock_held(&host_mmu.lock); 651 652 for_each_hyp_page(page, addr, size) { 653 if (get_host_state(page) != state) 654 return -EPERM; 655 } 656 657 return 0; 658 } 659 660 static int __host_set_page_state_range(u64 addr, u64 size, 661 enum pkvm_page_state state) 662 { 663 if (get_host_state(hyp_phys_to_page(addr)) == PKVM_NOPAGE) { 664 int ret = host_stage2_idmap_locked(addr, size, PKVM_HOST_MEM_PROT); 665 666 if (ret) 667 return ret; 668 } 669 670 __host_update_page_state(addr, size, state); 671 672 return 0; 673 } 674 675 static void __hyp_set_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state) 676 { 677 for_each_hyp_page(page, phys, size) 678 set_hyp_state(page, state); 679 } 680 681 static int __hyp_check_page_state_range(phys_addr_t phys, u64 size, enum pkvm_page_state state) 682 { 683 for_each_hyp_page(page, phys, size) { 684 if (get_hyp_state(page) != state) 685 return -EPERM; 686 } 687 688 return 0; 689 } 690 691 static enum pkvm_page_state guest_get_page_state(kvm_pte_t pte, u64 addr) 692 { 693 if (!kvm_pte_valid(pte)) 694 return PKVM_NOPAGE; 695 696 return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)); 697 } 698 699 static int __guest_check_page_state_range(struct pkvm_hyp_vm *vm, u64 addr, 700 u64 size, enum pkvm_page_state state) 701 { 702 struct check_walk_data d = { 703 .desired = state, 704 .get_page_state = guest_get_page_state, 705 }; 706 707 hyp_assert_lock_held(&vm->lock); 708 return check_page_state_range(&vm->pgt, addr, size, &d); 709 } 710 711 int __pkvm_host_share_hyp(u64 pfn) 712 { 713 u64 phys = hyp_pfn_to_phys(pfn); 714 u64 size = PAGE_SIZE; 715 int ret; 716 717 host_lock_component(); 718 hyp_lock_component(); 719 720 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); 721 if (ret) 722 goto unlock; 723 ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE); 724 if (ret) 725 goto unlock; 726 727 __hyp_set_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED); 728 WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED)); 729 730 unlock: 731 hyp_unlock_component(); 732 host_unlock_component(); 733 734 return ret; 735 } 736 737 int __pkvm_host_unshare_hyp(u64 pfn) 738 { 739 u64 phys = hyp_pfn_to_phys(pfn); 740 u64 virt = (u64)__hyp_va(phys); 741 u64 size = PAGE_SIZE; 742 int ret; 743 744 host_lock_component(); 745 hyp_lock_component(); 746 747 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); 748 if (ret) 749 goto unlock; 750 ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED); 751 if (ret) 752 goto unlock; 753 if (hyp_page_count((void *)virt)) { 754 ret = -EBUSY; 755 goto unlock; 756 } 757 758 __hyp_set_page_state_range(phys, size, PKVM_NOPAGE); 759 WARN_ON(__host_set_page_state_range(phys, size, PKVM_PAGE_OWNED)); 760 761 unlock: 762 hyp_unlock_component(); 763 host_unlock_component(); 764 765 return ret; 766 } 767 768 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages) 769 { 770 u64 phys = hyp_pfn_to_phys(pfn); 771 u64 size = PAGE_SIZE * nr_pages; 772 void *virt = __hyp_va(phys); 773 int ret; 774 775 host_lock_component(); 776 hyp_lock_component(); 777 778 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); 779 if (ret) 780 goto unlock; 781 ret = __hyp_check_page_state_range(phys, size, PKVM_NOPAGE); 782 if (ret) 783 goto unlock; 784 785 __hyp_set_page_state_range(phys, size, PKVM_PAGE_OWNED); 786 WARN_ON(pkvm_create_mappings_locked(virt, virt + size, PAGE_HYP)); 787 WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HYP)); 788 789 unlock: 790 hyp_unlock_component(); 791 host_unlock_component(); 792 793 return ret; 794 } 795 796 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages) 797 { 798 u64 phys = hyp_pfn_to_phys(pfn); 799 u64 size = PAGE_SIZE * nr_pages; 800 u64 virt = (u64)__hyp_va(phys); 801 int ret; 802 803 host_lock_component(); 804 hyp_lock_component(); 805 806 ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_OWNED); 807 if (ret) 808 goto unlock; 809 ret = __host_check_page_state_range(phys, size, PKVM_NOPAGE); 810 if (ret) 811 goto unlock; 812 813 __hyp_set_page_state_range(phys, size, PKVM_NOPAGE); 814 WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, virt, size) != size); 815 WARN_ON(host_stage2_set_owner_locked(phys, size, PKVM_ID_HOST)); 816 817 unlock: 818 hyp_unlock_component(); 819 host_unlock_component(); 820 821 return ret; 822 } 823 824 int hyp_pin_shared_mem(void *from, void *to) 825 { 826 u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); 827 u64 end = PAGE_ALIGN((u64)to); 828 u64 phys = __hyp_pa(start); 829 u64 size = end - start; 830 struct hyp_page *p; 831 int ret; 832 833 host_lock_component(); 834 hyp_lock_component(); 835 836 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); 837 if (ret) 838 goto unlock; 839 840 ret = __hyp_check_page_state_range(phys, size, PKVM_PAGE_SHARED_BORROWED); 841 if (ret) 842 goto unlock; 843 844 for (cur = start; cur < end; cur += PAGE_SIZE) { 845 p = hyp_virt_to_page(cur); 846 hyp_page_ref_inc(p); 847 if (p->refcount == 1) 848 WARN_ON(pkvm_create_mappings_locked((void *)cur, 849 (void *)cur + PAGE_SIZE, 850 PAGE_HYP)); 851 } 852 853 unlock: 854 hyp_unlock_component(); 855 host_unlock_component(); 856 857 return ret; 858 } 859 860 void hyp_unpin_shared_mem(void *from, void *to) 861 { 862 u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE); 863 u64 end = PAGE_ALIGN((u64)to); 864 struct hyp_page *p; 865 866 host_lock_component(); 867 hyp_lock_component(); 868 869 for (cur = start; cur < end; cur += PAGE_SIZE) { 870 p = hyp_virt_to_page(cur); 871 if (p->refcount == 1) 872 WARN_ON(kvm_pgtable_hyp_unmap(&pkvm_pgtable, cur, PAGE_SIZE) != PAGE_SIZE); 873 hyp_page_ref_dec(p); 874 } 875 876 hyp_unlock_component(); 877 host_unlock_component(); 878 } 879 880 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages) 881 { 882 u64 phys = hyp_pfn_to_phys(pfn); 883 u64 size = PAGE_SIZE * nr_pages; 884 int ret; 885 886 host_lock_component(); 887 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_OWNED); 888 if (!ret) 889 ret = __host_set_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); 890 host_unlock_component(); 891 892 return ret; 893 } 894 895 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages) 896 { 897 u64 phys = hyp_pfn_to_phys(pfn); 898 u64 size = PAGE_SIZE * nr_pages; 899 int ret; 900 901 host_lock_component(); 902 ret = __host_check_page_state_range(phys, size, PKVM_PAGE_SHARED_OWNED); 903 if (!ret) 904 ret = __host_set_page_state_range(phys, size, PKVM_PAGE_OWNED); 905 host_unlock_component(); 906 907 return ret; 908 } 909 910 static int __guest_check_transition_size(u64 phys, u64 ipa, u64 nr_pages, u64 *size) 911 { 912 size_t block_size; 913 914 if (nr_pages == 1) { 915 *size = PAGE_SIZE; 916 return 0; 917 } 918 919 /* We solely support second to last level huge mapping */ 920 block_size = kvm_granule_size(KVM_PGTABLE_LAST_LEVEL - 1); 921 922 if (nr_pages != block_size >> PAGE_SHIFT) 923 return -EINVAL; 924 925 if (!IS_ALIGNED(phys | ipa, block_size)) 926 return -EINVAL; 927 928 *size = block_size; 929 return 0; 930 } 931 932 int __pkvm_host_share_guest(u64 pfn, u64 gfn, u64 nr_pages, struct pkvm_hyp_vcpu *vcpu, 933 enum kvm_pgtable_prot prot) 934 { 935 struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); 936 u64 phys = hyp_pfn_to_phys(pfn); 937 u64 ipa = hyp_pfn_to_phys(gfn); 938 u64 size; 939 int ret; 940 941 if (prot & ~KVM_PGTABLE_PROT_RWX) 942 return -EINVAL; 943 944 ret = __guest_check_transition_size(phys, ipa, nr_pages, &size); 945 if (ret) 946 return ret; 947 948 ret = check_range_allowed_memory(phys, phys + size); 949 if (ret) 950 return ret; 951 952 host_lock_component(); 953 guest_lock_component(vm); 954 955 ret = __guest_check_page_state_range(vm, ipa, size, PKVM_NOPAGE); 956 if (ret) 957 goto unlock; 958 959 for_each_hyp_page(page, phys, size) { 960 switch (get_host_state(page)) { 961 case PKVM_PAGE_OWNED: 962 continue; 963 case PKVM_PAGE_SHARED_OWNED: 964 if (page->host_share_guest_count == U32_MAX) { 965 ret = -EBUSY; 966 goto unlock; 967 } 968 969 /* Only host to np-guest multi-sharing is tolerated */ 970 if (page->host_share_guest_count) 971 continue; 972 973 fallthrough; 974 default: 975 ret = -EPERM; 976 goto unlock; 977 } 978 } 979 980 for_each_hyp_page(page, phys, size) { 981 set_host_state(page, PKVM_PAGE_SHARED_OWNED); 982 page->host_share_guest_count++; 983 } 984 985 WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, size, phys, 986 pkvm_mkstate(prot, PKVM_PAGE_SHARED_BORROWED), 987 &vcpu->vcpu.arch.pkvm_memcache, 0)); 988 989 unlock: 990 guest_unlock_component(vm); 991 host_unlock_component(); 992 993 return ret; 994 } 995 996 static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ipa, u64 size) 997 { 998 enum pkvm_page_state state; 999 kvm_pte_t pte; 1000 u64 phys; 1001 s8 level; 1002 int ret; 1003 1004 ret = kvm_pgtable_get_leaf(&vm->pgt, ipa, &pte, &level); 1005 if (ret) 1006 return ret; 1007 if (!kvm_pte_valid(pte)) 1008 return -ENOENT; 1009 if (kvm_granule_size(level) != size) 1010 return -E2BIG; 1011 1012 state = guest_get_page_state(pte, ipa); 1013 if (state != PKVM_PAGE_SHARED_BORROWED) 1014 return -EPERM; 1015 1016 phys = kvm_pte_to_phys(pte); 1017 ret = check_range_allowed_memory(phys, phys + size); 1018 if (WARN_ON(ret)) 1019 return ret; 1020 1021 for_each_hyp_page(page, phys, size) { 1022 if (get_host_state(page) != PKVM_PAGE_SHARED_OWNED) 1023 return -EPERM; 1024 if (WARN_ON(!page->host_share_guest_count)) 1025 return -EINVAL; 1026 } 1027 1028 *__phys = phys; 1029 1030 return 0; 1031 } 1032 1033 int __pkvm_host_unshare_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm) 1034 { 1035 u64 ipa = hyp_pfn_to_phys(gfn); 1036 u64 size, phys; 1037 int ret; 1038 1039 ret = __guest_check_transition_size(0, ipa, nr_pages, &size); 1040 if (ret) 1041 return ret; 1042 1043 host_lock_component(); 1044 guest_lock_component(vm); 1045 1046 ret = __check_host_shared_guest(vm, &phys, ipa, size); 1047 if (ret) 1048 goto unlock; 1049 1050 ret = kvm_pgtable_stage2_unmap(&vm->pgt, ipa, size); 1051 if (ret) 1052 goto unlock; 1053 1054 for_each_hyp_page(page, phys, size) { 1055 /* __check_host_shared_guest() protects against underflow */ 1056 page->host_share_guest_count--; 1057 if (!page->host_share_guest_count) 1058 set_host_state(page, PKVM_PAGE_OWNED); 1059 } 1060 1061 unlock: 1062 guest_unlock_component(vm); 1063 host_unlock_component(); 1064 1065 return ret; 1066 } 1067 1068 static void assert_host_shared_guest(struct pkvm_hyp_vm *vm, u64 ipa, u64 size) 1069 { 1070 u64 phys; 1071 int ret; 1072 1073 if (!IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) 1074 return; 1075 1076 host_lock_component(); 1077 guest_lock_component(vm); 1078 1079 ret = __check_host_shared_guest(vm, &phys, ipa, size); 1080 1081 guest_unlock_component(vm); 1082 host_unlock_component(); 1083 1084 WARN_ON(ret && ret != -ENOENT); 1085 } 1086 1087 int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot) 1088 { 1089 struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); 1090 u64 ipa = hyp_pfn_to_phys(gfn); 1091 int ret; 1092 1093 if (pkvm_hyp_vm_is_protected(vm)) 1094 return -EPERM; 1095 1096 if (prot & ~KVM_PGTABLE_PROT_RWX) 1097 return -EINVAL; 1098 1099 assert_host_shared_guest(vm, ipa, PAGE_SIZE); 1100 guest_lock_component(vm); 1101 ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0); 1102 guest_unlock_component(vm); 1103 1104 return ret; 1105 } 1106 1107 int __pkvm_host_wrprotect_guest(u64 gfn, u64 nr_pages, struct pkvm_hyp_vm *vm) 1108 { 1109 u64 size, ipa = hyp_pfn_to_phys(gfn); 1110 int ret; 1111 1112 if (pkvm_hyp_vm_is_protected(vm)) 1113 return -EPERM; 1114 1115 ret = __guest_check_transition_size(0, ipa, nr_pages, &size); 1116 if (ret) 1117 return ret; 1118 1119 assert_host_shared_guest(vm, ipa, size); 1120 guest_lock_component(vm); 1121 ret = kvm_pgtable_stage2_wrprotect(&vm->pgt, ipa, size); 1122 guest_unlock_component(vm); 1123 1124 return ret; 1125 } 1126 1127 int __pkvm_host_test_clear_young_guest(u64 gfn, u64 nr_pages, bool mkold, struct pkvm_hyp_vm *vm) 1128 { 1129 u64 size, ipa = hyp_pfn_to_phys(gfn); 1130 int ret; 1131 1132 if (pkvm_hyp_vm_is_protected(vm)) 1133 return -EPERM; 1134 1135 ret = __guest_check_transition_size(0, ipa, nr_pages, &size); 1136 if (ret) 1137 return ret; 1138 1139 assert_host_shared_guest(vm, ipa, size); 1140 guest_lock_component(vm); 1141 ret = kvm_pgtable_stage2_test_clear_young(&vm->pgt, ipa, size, mkold); 1142 guest_unlock_component(vm); 1143 1144 return ret; 1145 } 1146 1147 int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu) 1148 { 1149 struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); 1150 u64 ipa = hyp_pfn_to_phys(gfn); 1151 1152 if (pkvm_hyp_vm_is_protected(vm)) 1153 return -EPERM; 1154 1155 assert_host_shared_guest(vm, ipa, PAGE_SIZE); 1156 guest_lock_component(vm); 1157 kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0); 1158 guest_unlock_component(vm); 1159 1160 return 0; 1161 } 1162 1163 #ifdef CONFIG_NVHE_EL2_DEBUG 1164 struct pkvm_expected_state { 1165 enum pkvm_page_state host; 1166 enum pkvm_page_state hyp; 1167 enum pkvm_page_state guest[2]; /* [ gfn, gfn + 1 ] */ 1168 }; 1169 1170 static struct pkvm_expected_state selftest_state; 1171 static struct hyp_page *selftest_page; 1172 1173 static struct pkvm_hyp_vm selftest_vm = { 1174 .kvm = { 1175 .arch = { 1176 .mmu = { 1177 .arch = &selftest_vm.kvm.arch, 1178 .pgt = &selftest_vm.pgt, 1179 }, 1180 }, 1181 }, 1182 }; 1183 1184 static struct pkvm_hyp_vcpu selftest_vcpu = { 1185 .vcpu = { 1186 .arch = { 1187 .hw_mmu = &selftest_vm.kvm.arch.mmu, 1188 }, 1189 .kvm = &selftest_vm.kvm, 1190 }, 1191 }; 1192 1193 static void init_selftest_vm(void *virt) 1194 { 1195 struct hyp_page *p = hyp_virt_to_page(virt); 1196 int i; 1197 1198 selftest_vm.kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr; 1199 WARN_ON(kvm_guest_prepare_stage2(&selftest_vm, virt)); 1200 1201 for (i = 0; i < pkvm_selftest_pages(); i++) { 1202 if (p[i].refcount) 1203 continue; 1204 p[i].refcount = 1; 1205 hyp_put_page(&selftest_vm.pool, hyp_page_to_virt(&p[i])); 1206 } 1207 } 1208 1209 static u64 selftest_ipa(void) 1210 { 1211 return BIT(selftest_vm.pgt.ia_bits - 1); 1212 } 1213 1214 static void assert_page_state(void) 1215 { 1216 void *virt = hyp_page_to_virt(selftest_page); 1217 u64 size = PAGE_SIZE << selftest_page->order; 1218 struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu; 1219 u64 phys = hyp_virt_to_phys(virt); 1220 u64 ipa[2] = { selftest_ipa(), selftest_ipa() + PAGE_SIZE }; 1221 struct pkvm_hyp_vm *vm; 1222 1223 vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu); 1224 1225 host_lock_component(); 1226 WARN_ON(__host_check_page_state_range(phys, size, selftest_state.host)); 1227 host_unlock_component(); 1228 1229 hyp_lock_component(); 1230 WARN_ON(__hyp_check_page_state_range(phys, size, selftest_state.hyp)); 1231 hyp_unlock_component(); 1232 1233 guest_lock_component(&selftest_vm); 1234 WARN_ON(__guest_check_page_state_range(vm, ipa[0], size, selftest_state.guest[0])); 1235 WARN_ON(__guest_check_page_state_range(vm, ipa[1], size, selftest_state.guest[1])); 1236 guest_unlock_component(&selftest_vm); 1237 } 1238 1239 #define assert_transition_res(res, fn, ...) \ 1240 do { \ 1241 WARN_ON(fn(__VA_ARGS__) != res); \ 1242 assert_page_state(); \ 1243 } while (0) 1244 1245 void pkvm_ownership_selftest(void *base) 1246 { 1247 enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_RWX; 1248 void *virt = hyp_alloc_pages(&host_s2_pool, 0); 1249 struct pkvm_hyp_vcpu *vcpu = &selftest_vcpu; 1250 struct pkvm_hyp_vm *vm = &selftest_vm; 1251 u64 phys, size, pfn, gfn; 1252 1253 WARN_ON(!virt); 1254 selftest_page = hyp_virt_to_page(virt); 1255 selftest_page->refcount = 0; 1256 init_selftest_vm(base); 1257 1258 size = PAGE_SIZE << selftest_page->order; 1259 phys = hyp_virt_to_phys(virt); 1260 pfn = hyp_phys_to_pfn(phys); 1261 gfn = hyp_phys_to_pfn(selftest_ipa()); 1262 1263 selftest_state.host = PKVM_NOPAGE; 1264 selftest_state.hyp = PKVM_PAGE_OWNED; 1265 selftest_state.guest[0] = selftest_state.guest[1] = PKVM_NOPAGE; 1266 assert_page_state(); 1267 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); 1268 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); 1269 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); 1270 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); 1271 assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); 1272 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); 1273 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1274 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); 1275 1276 selftest_state.host = PKVM_PAGE_OWNED; 1277 selftest_state.hyp = PKVM_NOPAGE; 1278 assert_transition_res(0, __pkvm_hyp_donate_host, pfn, 1); 1279 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); 1280 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); 1281 assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); 1282 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); 1283 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); 1284 1285 selftest_state.host = PKVM_PAGE_SHARED_OWNED; 1286 selftest_state.hyp = PKVM_PAGE_SHARED_BORROWED; 1287 assert_transition_res(0, __pkvm_host_share_hyp, pfn); 1288 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); 1289 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); 1290 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); 1291 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); 1292 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1293 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); 1294 1295 assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size); 1296 assert_transition_res(0, hyp_pin_shared_mem, virt, virt + size); 1297 hyp_unpin_shared_mem(virt, virt + size); 1298 WARN_ON(hyp_page_count(virt) != 1); 1299 assert_transition_res(-EBUSY, __pkvm_host_unshare_hyp, pfn); 1300 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); 1301 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); 1302 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); 1303 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); 1304 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1305 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); 1306 1307 hyp_unpin_shared_mem(virt, virt + size); 1308 assert_page_state(); 1309 WARN_ON(hyp_page_count(virt)); 1310 1311 selftest_state.host = PKVM_PAGE_OWNED; 1312 selftest_state.hyp = PKVM_NOPAGE; 1313 assert_transition_res(0, __pkvm_host_unshare_hyp, pfn); 1314 1315 selftest_state.host = PKVM_PAGE_SHARED_OWNED; 1316 selftest_state.hyp = PKVM_NOPAGE; 1317 assert_transition_res(0, __pkvm_host_share_ffa, pfn, 1); 1318 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); 1319 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); 1320 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); 1321 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); 1322 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); 1323 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1324 assert_transition_res(-ENOENT, __pkvm_host_unshare_guest, gfn, 1, vm); 1325 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); 1326 1327 selftest_state.host = PKVM_PAGE_OWNED; 1328 selftest_state.hyp = PKVM_NOPAGE; 1329 assert_transition_res(0, __pkvm_host_unshare_ffa, pfn, 1); 1330 assert_transition_res(-EPERM, __pkvm_host_unshare_ffa, pfn, 1); 1331 1332 selftest_state.host = PKVM_PAGE_SHARED_OWNED; 1333 selftest_state.guest[0] = PKVM_PAGE_SHARED_BORROWED; 1334 assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1335 assert_transition_res(-EPERM, __pkvm_host_share_guest, pfn, gfn, 1, vcpu, prot); 1336 assert_transition_res(-EPERM, __pkvm_host_share_ffa, pfn, 1); 1337 assert_transition_res(-EPERM, __pkvm_host_donate_hyp, pfn, 1); 1338 assert_transition_res(-EPERM, __pkvm_host_share_hyp, pfn); 1339 assert_transition_res(-EPERM, __pkvm_host_unshare_hyp, pfn); 1340 assert_transition_res(-EPERM, __pkvm_hyp_donate_host, pfn, 1); 1341 assert_transition_res(-EPERM, hyp_pin_shared_mem, virt, virt + size); 1342 1343 selftest_state.guest[1] = PKVM_PAGE_SHARED_BORROWED; 1344 assert_transition_res(0, __pkvm_host_share_guest, pfn, gfn + 1, 1, vcpu, prot); 1345 WARN_ON(hyp_virt_to_page(virt)->host_share_guest_count != 2); 1346 1347 selftest_state.guest[0] = PKVM_NOPAGE; 1348 assert_transition_res(0, __pkvm_host_unshare_guest, gfn, 1, vm); 1349 1350 selftest_state.guest[1] = PKVM_NOPAGE; 1351 selftest_state.host = PKVM_PAGE_OWNED; 1352 assert_transition_res(0, __pkvm_host_unshare_guest, gfn + 1, 1, vm); 1353 1354 selftest_state.host = PKVM_NOPAGE; 1355 selftest_state.hyp = PKVM_PAGE_OWNED; 1356 assert_transition_res(0, __pkvm_host_donate_hyp, pfn, 1); 1357 1358 selftest_page->refcount = 1; 1359 hyp_put_page(&host_s2_pool, virt); 1360 } 1361 #endif 1362