1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 /* 3 * Copyright (C) 2021 Google LLC 4 * Author: Fuad Tabba <tabba@google.com> 5 */ 6 7 #ifndef __ARM64_KVM_NVHE_PKVM_H__ 8 #define __ARM64_KVM_NVHE_PKVM_H__ 9 10 #include <asm/kvm_pkvm.h> 11 12 #include <nvhe/gfp.h> 13 #include <nvhe/spinlock.h> 14 15 /* 16 * Holds the relevant data for maintaining the vcpu state completely at hyp. 17 */ 18 struct pkvm_hyp_vcpu { 19 struct kvm_vcpu vcpu; 20 21 /* Backpointer to the host's (untrusted) vCPU instance. */ 22 struct kvm_vcpu *host_vcpu; 23 }; 24 25 /* 26 * Holds the relevant data for running a protected vm. 27 */ 28 struct pkvm_hyp_vm { 29 struct kvm kvm; 30 31 /* Backpointer to the host's (untrusted) KVM instance. */ 32 struct kvm *host_kvm; 33 34 /* The guest's stage-2 page-table managed by the hypervisor. */ 35 struct kvm_pgtable pgt; 36 struct kvm_pgtable_mm_ops mm_ops; 37 struct hyp_pool pool; 38 hyp_spinlock_t lock; 39 40 /* 41 * The number of vcpus initialized and ready to run. 42 * Modifying this is protected by 'vm_table_lock'. 43 */ 44 unsigned int nr_vcpus; 45 46 /* Array of the hyp vCPU structures for this VM. */ 47 struct pkvm_hyp_vcpu *vcpus[]; 48 }; 49 50 static inline struct pkvm_hyp_vm * 51 pkvm_hyp_vcpu_to_hyp_vm(struct pkvm_hyp_vcpu *hyp_vcpu) 52 { 53 return container_of(hyp_vcpu->vcpu.kvm, struct pkvm_hyp_vm, kvm); 54 } 55 56 static inline bool pkvm_hyp_vcpu_is_protected(struct pkvm_hyp_vcpu *hyp_vcpu) 57 { 58 return vcpu_is_protected(&hyp_vcpu->vcpu); 59 } 60 61 void pkvm_hyp_vm_table_init(void *tbl); 62 63 int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, 64 unsigned long pgd_hva); 65 int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu, 66 unsigned long vcpu_hva); 67 int __pkvm_teardown_vm(pkvm_handle_t handle); 68 69 struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle, 70 unsigned int vcpu_idx); 71 void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu); 72 73 #endif /* __ARM64_KVM_NVHE_PKVM_H__ */ 74