xref: /linux/arch/arm64/kvm/hyp/include/nvhe/memory.h (revision 5bae7bc6360a7297e0be2c37017fe863b965646d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __KVM_HYP_MEMORY_H
3 #define __KVM_HYP_MEMORY_H
4 
5 #include <asm/kvm_mmu.h>
6 #include <asm/page.h>
7 
8 #include <linux/types.h>
9 
10 /*
11  * Bits 0-1 are used to encode the memory ownership state of each page from the
12  * point of view of a pKVM "component" (host, hyp, guest, ... see enum
13  * pkvm_component_id):
14  *   00: The page is owned and exclusively accessible by the component;
15  *   01: The page is owned and accessible by the component, but is also
16  *       accessible by another component;
17  *   10: The page is accessible but not owned by the component;
18  * The storage of this state depends on the component: either in the
19  * hyp_vmemmap for the host and hyp states or in PTE software bits for guests.
20  */
21 enum pkvm_page_state {
22 	PKVM_PAGE_OWNED			= 0ULL,
23 	PKVM_PAGE_SHARED_OWNED		= BIT(0),
24 	PKVM_PAGE_SHARED_BORROWED	= BIT(1),
25 
26 	/*
27 	 * 'Meta-states' are not stored directly in PTE SW bits for guest
28 	 * states, but inferred from the context (e.g. invalid PTE entries).
29 	 * For the host and hyp, meta-states are stored directly in the
30 	 * struct hyp_page.
31 	 */
32 	PKVM_NOPAGE			= BIT(0) | BIT(1),
33 
34 	/*
35 	 * 'Meta-states' which aren't encoded directly in the PTE's SW bits (or
36 	 * the hyp_vmemmap entry for the host)
37 	 */
38 	PKVM_POISON			= BIT(2),
39 };
40 #define PKVM_PAGE_STATE_VMEMMAP_MASK	(BIT(0) | BIT(1))
41 
42 #define PKVM_PAGE_STATE_PROT_MASK	(KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
43 static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
44 						 enum pkvm_page_state state)
45 {
46 	prot &= ~PKVM_PAGE_STATE_PROT_MASK;
47 	prot |= FIELD_PREP(PKVM_PAGE_STATE_PROT_MASK, state);
48 	return prot;
49 }
50 
51 static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
52 {
53 	return FIELD_GET(PKVM_PAGE_STATE_PROT_MASK, prot);
54 }
55 
56 struct hyp_page {
57 	u16 refcount;
58 	u8 order;
59 
60 	/* Host state. Guarded by the host stage-2 lock. */
61 	unsigned __host_state : 4;
62 
63 	/*
64 	 * Complement of the hyp state. Guarded by the hyp stage-1 lock. We use
65 	 * the complement so that the initial 0 in __hyp_state_comp (due to the
66 	 * entire vmemmap starting off zeroed) encodes PKVM_NOPAGE.
67 	 */
68 	unsigned __hyp_state_comp : 4;
69 
70 	u32 host_share_guest_count;
71 };
72 
73 extern u64 __hyp_vmemmap;
74 #define hyp_vmemmap ((struct hyp_page *)__hyp_vmemmap)
75 
76 #define __hyp_va(phys)	((void *)((phys_addr_t)(phys) - hyp_physvirt_offset))
77 
78 static inline void *hyp_phys_to_virt(phys_addr_t phys)
79 {
80 	return __hyp_va(phys);
81 }
82 
83 static inline phys_addr_t hyp_virt_to_phys(void *addr)
84 {
85 	return __hyp_pa(addr);
86 }
87 
88 #define hyp_phys_to_pfn(phys)	((phys) >> PAGE_SHIFT)
89 #define hyp_pfn_to_phys(pfn)	((phys_addr_t)((pfn) << PAGE_SHIFT))
90 
91 static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
92 {
93 	BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u64));
94 	return &hyp_vmemmap[hyp_phys_to_pfn(phys)];
95 }
96 
97 #define hyp_virt_to_page(virt)	hyp_phys_to_page(__hyp_pa(virt))
98 #define hyp_virt_to_pfn(virt)	hyp_phys_to_pfn(__hyp_pa(virt))
99 
100 #define hyp_page_to_pfn(page)	((struct hyp_page *)(page) - hyp_vmemmap)
101 #define hyp_page_to_phys(page)  hyp_pfn_to_phys((hyp_page_to_pfn(page)))
102 #define hyp_page_to_virt(page)	__hyp_va(hyp_page_to_phys(page))
103 #define hyp_page_to_pool(page)	(((struct hyp_page *)page)->pool)
104 
105 static inline enum pkvm_page_state get_host_state(struct hyp_page *p)
106 {
107 	return p->__host_state;
108 }
109 
110 static inline void set_host_state(struct hyp_page *p, enum pkvm_page_state state)
111 {
112 	p->__host_state = state;
113 }
114 
115 static inline enum pkvm_page_state get_hyp_state(struct hyp_page *p)
116 {
117 	return p->__hyp_state_comp ^ PKVM_PAGE_STATE_VMEMMAP_MASK;
118 }
119 
120 static inline void set_hyp_state(struct hyp_page *p, enum pkvm_page_state state)
121 {
122 	p->__hyp_state_comp = state ^ PKVM_PAGE_STATE_VMEMMAP_MASK;
123 }
124 
125 /*
126  * Refcounting for 'struct hyp_page'.
127  * hyp_pool::lock must be held if atomic access to the refcount is required.
128  */
129 static inline int hyp_page_count(void *addr)
130 {
131 	struct hyp_page *p = hyp_virt_to_page(addr);
132 
133 	return p->refcount;
134 }
135 
136 static inline void hyp_page_ref_inc(struct hyp_page *p)
137 {
138 	BUG_ON(p->refcount == USHRT_MAX);
139 	p->refcount++;
140 }
141 
142 static inline void hyp_page_ref_dec(struct hyp_page *p)
143 {
144 	BUG_ON(!p->refcount);
145 	p->refcount--;
146 }
147 
148 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
149 {
150 	hyp_page_ref_dec(p);
151 	return (p->refcount == 0);
152 }
153 
154 static inline void hyp_set_page_refcounted(struct hyp_page *p)
155 {
156 	BUG_ON(p->refcount);
157 	p->refcount = 1;
158 }
159 #endif /* __KVM_HYP_MEMORY_H */
160