xref: /linux/arch/arm64/kvm/hyp/include/nvhe/memory.h (revision e2ee2e9b159094527ae7ad78058b1316f62fc5b7)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef __KVM_HYP_MEMORY_H
3 #define __KVM_HYP_MEMORY_H
4 
5 #include <asm/kvm_mmu.h>
6 #include <asm/page.h>
7 
8 #include <linux/types.h>
9 
10 /*
11  * Bits 0-1 are reserved to track the memory ownership state of each page:
12  *   00: The page is owned exclusively by the page-table owner.
13  *   01: The page is owned by the page-table owner, but is shared
14  *       with another entity.
15  *   10: The page is shared with, but not owned by the page-table owner.
16  *   11: Reserved for future use (lending).
17  */
18 enum pkvm_page_state {
19 	PKVM_PAGE_OWNED			= 0ULL,
20 	PKVM_PAGE_SHARED_OWNED		= BIT(0),
21 	PKVM_PAGE_SHARED_BORROWED	= BIT(1),
22 	__PKVM_PAGE_RESERVED		= BIT(0) | BIT(1),
23 
24 	/* Meta-states which aren't encoded directly in the PTE's SW bits */
25 	PKVM_NOPAGE			= BIT(2),
26 };
27 #define PKVM_PAGE_META_STATES_MASK	(~__PKVM_PAGE_RESERVED)
28 
29 #define PKVM_PAGE_STATE_PROT_MASK	(KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
30 static inline enum kvm_pgtable_prot pkvm_mkstate(enum kvm_pgtable_prot prot,
31 						 enum pkvm_page_state state)
32 {
33 	prot &= ~PKVM_PAGE_STATE_PROT_MASK;
34 	prot |= FIELD_PREP(PKVM_PAGE_STATE_PROT_MASK, state);
35 	return prot;
36 }
37 
38 static inline enum pkvm_page_state pkvm_getstate(enum kvm_pgtable_prot prot)
39 {
40 	return FIELD_GET(PKVM_PAGE_STATE_PROT_MASK, prot);
41 }
42 
43 struct hyp_page {
44 	u16 refcount;
45 	u8 order;
46 
47 	/* Host (non-meta) state. Guarded by the host stage-2 lock. */
48 	enum pkvm_page_state host_state : 8;
49 
50 	u32 host_share_guest_count;
51 };
52 
53 extern u64 __hyp_vmemmap;
54 #define hyp_vmemmap ((struct hyp_page *)__hyp_vmemmap)
55 
56 #define __hyp_va(phys)	((void *)((phys_addr_t)(phys) - hyp_physvirt_offset))
57 
58 static inline void *hyp_phys_to_virt(phys_addr_t phys)
59 {
60 	return __hyp_va(phys);
61 }
62 
63 static inline phys_addr_t hyp_virt_to_phys(void *addr)
64 {
65 	return __hyp_pa(addr);
66 }
67 
68 #define hyp_phys_to_pfn(phys)	((phys) >> PAGE_SHIFT)
69 #define hyp_pfn_to_phys(pfn)	((phys_addr_t)((pfn) << PAGE_SHIFT))
70 
71 static inline struct hyp_page *hyp_phys_to_page(phys_addr_t phys)
72 {
73 	BUILD_BUG_ON(sizeof(struct hyp_page) != sizeof(u64));
74 	return &hyp_vmemmap[hyp_phys_to_pfn(phys)];
75 }
76 
77 #define hyp_virt_to_page(virt)	hyp_phys_to_page(__hyp_pa(virt))
78 #define hyp_virt_to_pfn(virt)	hyp_phys_to_pfn(__hyp_pa(virt))
79 
80 #define hyp_page_to_pfn(page)	((struct hyp_page *)(page) - hyp_vmemmap)
81 #define hyp_page_to_phys(page)  hyp_pfn_to_phys((hyp_page_to_pfn(page)))
82 #define hyp_page_to_virt(page)	__hyp_va(hyp_page_to_phys(page))
83 #define hyp_page_to_pool(page)	(((struct hyp_page *)page)->pool)
84 
85 /*
86  * Refcounting for 'struct hyp_page'.
87  * hyp_pool::lock must be held if atomic access to the refcount is required.
88  */
89 static inline int hyp_page_count(void *addr)
90 {
91 	struct hyp_page *p = hyp_virt_to_page(addr);
92 
93 	return p->refcount;
94 }
95 
96 static inline void hyp_page_ref_inc(struct hyp_page *p)
97 {
98 	BUG_ON(p->refcount == USHRT_MAX);
99 	p->refcount++;
100 }
101 
102 static inline void hyp_page_ref_dec(struct hyp_page *p)
103 {
104 	BUG_ON(!p->refcount);
105 	p->refcount--;
106 }
107 
108 static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
109 {
110 	hyp_page_ref_dec(p);
111 	return (p->refcount == 0);
112 }
113 
114 static inline void hyp_set_page_refcounted(struct hyp_page *p)
115 {
116 	BUG_ON(p->refcount);
117 	p->refcount = 1;
118 }
119 #endif /* __KVM_HYP_MEMORY_H */
120