xref: /linux/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h (revision e2ee2e9b159094527ae7ad78058b1316f62fc5b7)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #ifndef __KVM_NVHE_MEM_PROTECT__
8 #define __KVM_NVHE_MEM_PROTECT__
9 #include <linux/kvm_host.h>
10 #include <asm/kvm_hyp.h>
11 #include <asm/kvm_mmu.h>
12 #include <asm/kvm_pgtable.h>
13 #include <asm/virt.h>
14 #include <nvhe/memory.h>
15 #include <nvhe/pkvm.h>
16 #include <nvhe/spinlock.h>
17 
18 struct host_mmu {
19 	struct kvm_arch arch;
20 	struct kvm_pgtable pgt;
21 	struct kvm_pgtable_mm_ops mm_ops;
22 	hyp_spinlock_t lock;
23 };
24 extern struct host_mmu host_mmu;
25 
26 /* This corresponds to page-table locking order */
27 enum pkvm_component_id {
28 	PKVM_ID_HOST,
29 	PKVM_ID_HYP,
30 	PKVM_ID_FFA,
31 };
32 
33 extern unsigned long hyp_nr_cpus;
34 
35 int __pkvm_prot_finalize(void);
36 int __pkvm_host_share_hyp(u64 pfn);
37 int __pkvm_host_unshare_hyp(u64 pfn);
38 int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
39 int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
40 int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages);
41 int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages);
42 int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu,
43 			    enum kvm_pgtable_prot prot);
44 int __pkvm_host_unshare_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
45 int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_pgtable_prot prot);
46 int __pkvm_host_wrprotect_guest(u64 gfn, struct pkvm_hyp_vm *hyp_vm);
47 int __pkvm_host_test_clear_young_guest(u64 gfn, bool mkold, struct pkvm_hyp_vm *vm);
48 int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu);
49 
50 bool addr_is_memory(phys_addr_t phys);
51 int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
52 int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id);
53 int kvm_host_prepare_stage2(void *pgt_pool_base);
54 int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd);
55 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
56 
57 int hyp_pin_shared_mem(void *from, void *to);
58 void hyp_unpin_shared_mem(void *from, void *to);
59 void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc);
60 int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
61 		    struct kvm_hyp_memcache *host_mc);
62 
63 static __always_inline void __load_host_stage2(void)
64 {
65 	if (static_branch_likely(&kvm_protected_mode_initialized))
66 		__load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
67 	else
68 		write_sysreg(0, vttbr_el2);
69 }
70 #endif /* __KVM_NVHE_MEM_PROTECT__ */
71