xref: /linux/arch/arm64/kvm/hyp/include/nvhe/memory.h (revision 43db1111073049220381944af4a3b8a5400eda71)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __KVM_HYP_MEMORY_H
3 #define __KVM_HYP_MEMORY_H
4 
5 #include <asm/kvm_mmu.h>
6 #include <asm/page.h>
7 
8 #include <linux/types.h>
9 
10 /*
11  * Bits 0-1 are used to encode the memory ownership state of each page from the
12  * point of view of a pKVM "component" (host, hyp, guest, ... see enum
13  * pkvm_component_id):
14  *   00: The page is owned and exclusively accessible by the component;
15  *   01: The page is owned and accessible by the component, but is also
16  *       accessible by another component;
17  *   10: The page is accessible but not owned by the component;
18  * The storage of this state depends on the component: either in the
19  * hyp_vmemmap for the host and hyp states or in PTE software bits for guests.
20  */
21 enum pkvm_page_state {
22 	PKVM_PAGE_OWNED			= 0ULL,
23 	PKVM_PAGE_SHARED_OWNED		= BIT(0),
24 	PKVM_PAGE_SHARED_BORROWED	= BIT(1),
25 
26 	/*
27 	 * 'Meta-states' are not stored directly in PTE SW bits for guest
28 	 * states, but inferred from the context (e.g. invalid PTE entries).
29 	 * For the host and hyp, meta-states are stored directly in the
30 	 * struct hyp_page.
31 	 */
32 	PKVM_NOPAGE			= BIT(0) | BIT(1),
33 };
34 #define PKVM_PAGE_STATE_MASK		(BIT(0) | BIT(1))
35 
36 #define PKVM_PAGE_STATE_PROT_MASK	(KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
pkvm_mkstate(enum kvm_pgtable_prot prot,enum pkvm_page_state state)37 static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
38 						 enum pkvm_page_state state)
39 {
40 	prot &= ~PKVM_PAGE_STATE_PROT_MASK;
41 	prot |= FIELD_PREP(PKVM_PAGE_STATE_PROT_MASK, state);
42 	return prot;
43 }
44 
pkvm_getstate(enum kvm_pgtable_prot prot)45 static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
46 {
47 	return FIELD_GET(PKVM_PAGE_STATE_PROT_MASK, prot);
48 }
49 
50 struct hyp_page {
51 	u16 refcount;
52 	u8 order;
53 
54 	/* Host state. Guarded by the host stage-2 lock. */
55 	unsigned __host_state : 4;
56 
57 	/*
58 	 * Complement of the hyp state. Guarded by the hyp stage-1 lock. We use
59 	 * the complement so that the initial 0 in __hyp_state_comp (due to the
60 	 * entire vmemmap starting off zeroed) encodes PKVM_NOPAGE.
61 	 */
62 	unsigned __hyp_state_comp : 4;
63 
64 	u32 host_share_guest_count;
65 };
66 
67 extern u64 __hyp_vmemmap;
68 #define hyp_vmemmap ((struct hyp_page *)__hyp_vmemmap)
69 
70 #define __hyp_va(phys)	((void *)((phys_addr_t)(phys) - hyp_physvirt_offset))
71 
hyp_phys_to_virt(phys_addr_t phys)72 static inline void *hyp_phys_to_virt(phys_addr_t phys)
73 {
74 	return __hyp_va(phys);
75 }
76 
hyp_virt_to_phys(void * addr)77 static inline phys_addr_t hyp_virt_to_phys(void *addr)
78 {
79 	return __hyp_pa(addr);
80 }
81 
82 #define hyp_phys_to_pfn(phys)	((phys) >> PAGE_SHIFT)
83 #define hyp_pfn_to_phys(pfn)	((phys_addr_t)((pfn) << PAGE_SHIFT))
84 
hyp_phys_to_page(phys_addr_t phys)85 static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
86 {
87 	BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u64));
88 	return &hyp_vmemmap[hyp_phys_to_pfn(phys)];
89 }
90 
91 #define hyp_virt_to_page(virt)	hyp_phys_to_page(__hyp_pa(virt))
92 #define hyp_virt_to_pfn(virt)	hyp_phys_to_pfn(__hyp_pa(virt))
93 
94 #define hyp_page_to_pfn(page)	((struct hyp_page *)(page) - hyp_vmemmap)
95 #define hyp_page_to_phys(page)  hyp_pfn_to_phys((hyp_page_to_pfn(page)))
96 #define hyp_page_to_virt(page)	__hyp_va(hyp_page_to_phys(page))
97 #define hyp_page_to_pool(page)	(((struct hyp_page *)page)->pool)
98 
get_host_state(struct hyp_page * p)99 static inline enum pkvm_page_state get_host_state(struct hyp_page *p)
100 {
101 	return p->__host_state;
102 }
103 
set_host_state(struct hyp_page * p,enum pkvm_page_state state)104 static inline void set_host_state(struct hyp_page *p, enum pkvm_page_state state)
105 {
106 	p->__host_state = state;
107 }
108 
get_hyp_state(struct hyp_page * p)109 static inline enum pkvm_page_state get_hyp_state(struct hyp_page *p)
110 {
111 	return p->__hyp_state_comp ^ PKVM_PAGE_STATE_MASK;
112 }
113 
set_hyp_state(struct hyp_page * p,enum pkvm_page_state state)114 static inline void set_hyp_state(struct hyp_page *p, enum pkvm_page_state state)
115 {
116 	p->__hyp_state_comp = state ^ PKVM_PAGE_STATE_MASK;
117 }
118 
119 /*
120  * Refcounting for 'struct hyp_page'.
121  * hyp_pool::lock must be held if atomic access to the refcount is required.
122  */
hyp_page_count(void * addr)123 static inline int hyp_page_count(void *addr)
124 {
125 	struct hyp_page *p = hyp_virt_to_page(addr);
126 
127 	return p->refcount;
128 }
129 
hyp_page_ref_inc(struct hyp_page * p)130 static inline void hyp_page_ref_inc(struct hyp_page *p)
131 {
132 	BUG_ON(p->refcount == USHRT_MAX);
133 	p->refcount++;
134 }
135 
hyp_page_ref_dec(struct hyp_page * p)136 static inline void hyp_page_ref_dec(struct hyp_page *p)
137 {
138 	BUG_ON(!p->refcount);
139 	p->refcount--;
140 }
141 
hyp_page_ref_dec_and_test(struct hyp_page * p)142 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
143 {
144 	hyp_page_ref_dec(p);
145 	return (p->refcount == 0);
146 }
147 
hyp_set_page_refcounted(struct hyp_page * p)148 static inline void hyp_set_page_refcounted(struct hyp_page *p)
149 {
150 	BUG_ON(p->refcount);
151 	p->refcount = 1;
152 }
153 #endif /* __KVM_HYP_MEMORY_H */
154