xref: /linux/arch/arm64/kvm/hyp/nvhe/page_alloc.c (revision 1c07425e902cd3137961c3d45b4271bf8a9b8eb9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Google LLC
4  * Author: Quentin Perret <qperret@google.com>
5  */
6 
7 #include <asm/kvm_hyp.h>
8 #include <nvhe/gfp.h>
9 
10 u64 __hyp_vmemmap;
11 
12 /*
13  * Index the hyp_vmemmap to find a potential buddy page, but make no assumption
14  * about its current state.
15  *
16  * Example buddy-tree for a 4-pages physically contiguous pool:
17  *
18  *                 o : Page 3
19  *                /
20  *               o-o : Page 2
21  *              /
22  *             /   o : Page 1
23  *            /   /
24  *           o---o-o : Page 0
25  *    Order  2   1 0
26  *
27  * Example of requests on this pool:
28  *   __find_buddy_nocheck(pool, page 0, order 0) => page 1
29  *   __find_buddy_nocheck(pool, page 0, order 1) => page 2
30  *   __find_buddy_nocheck(pool, page 1, order 0) => page 0
31  *   __find_buddy_nocheck(pool, page 2, order 0) => page 3
32  */
33 static struct hyp_page *__find_buddy_nocheck(struct hyp_pool *pool,
34 					     struct hyp_page *p,
35 					     unsigned short order)
36 {
37 	phys_addr_t addr = hyp_page_to_phys(p);
38 
39 	addr ^= (PAGE_SIZE << order);
40 
41 	/*
42 	 * Don't return a page outside the pool range -- it belongs to
43 	 * something else and may not be mapped in hyp_vmemmap.
44 	 */
45 	if (addr < pool->range_start || addr >= pool->range_end)
46 		return NULL;
47 
48 	return hyp_phys_to_page(addr);
49 }
50 
51 /* Find a buddy page currently available for allocation */
52 static struct hyp_page *__find_buddy_avail(struct hyp_pool *pool,
53 					   struct hyp_page *p,
54 					   unsigned short order)
55 {
56 	struct hyp_page *buddy = __find_buddy_nocheck(pool, p, order);
57 
58 	if (!buddy || buddy->order != order || buddy->refcount)
59 		return NULL;
60 
61 	return buddy;
62 
63 }
64 
65 /*
66  * Pages that are available for allocation are tracked in free-lists, so we use
67  * the pages themselves to store the list nodes to avoid wasting space. As the
68  * allocator always returns zeroed pages (which are zeroed on the hyp_put_page()
69  * path to optimize allocation speed), we also need to clean-up the list node in
70  * each page when we take it out of the list.
71  */
72 static inline void page_remove_from_list(struct hyp_page *p)
73 {
74 	struct list_head *node = hyp_page_to_virt(p);
75 
76 	__list_del_entry(node);
77 	memset(node, 0, sizeof(*node));
78 }
79 
80 static inline void page_add_to_list(struct hyp_page *p, struct list_head *head)
81 {
82 	struct list_head *node = hyp_page_to_virt(p);
83 
84 	INIT_LIST_HEAD(node);
85 	list_add_tail(node, head);
86 }
87 
88 static inline struct hyp_page *node_to_page(struct list_head *node)
89 {
90 	return hyp_virt_to_page(node);
91 }
92 
93 static void __hyp_attach_page(struct hyp_pool *pool,
94 			      struct hyp_page *p)
95 {
96 	phys_addr_t phys = hyp_page_to_phys(p);
97 	unsigned short order = p->order;
98 	struct hyp_page *buddy;
99 
100 	memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
101 
102 	/* Skip coalescing for 'external' pages being freed into the pool. */
103 	if (phys < pool->range_start || phys >= pool->range_end)
104 		goto insert;
105 
106 	/*
107 	 * Only the first struct hyp_page of a high-order page (otherwise known
108 	 * as the 'head') should have p->order set. The non-head pages should
109 	 * have p->order = HYP_NO_ORDER. Here @p may no longer be the head
110 	 * after coalescing, so make sure to mark it HYP_NO_ORDER proactively.
111 	 */
112 	p->order = HYP_NO_ORDER;
113 	for (; (order + 1) < pool->max_order; order++) {
114 		buddy = __find_buddy_avail(pool, p, order);
115 		if (!buddy)
116 			break;
117 
118 		/* Take the buddy out of its list, and coalesce with @p */
119 		page_remove_from_list(buddy);
120 		buddy->order = HYP_NO_ORDER;
121 		p = min(p, buddy);
122 	}
123 
124 insert:
125 	/* Mark the new head, and insert it */
126 	p->order = order;
127 	page_add_to_list(p, &pool->free_area[order]);
128 }
129 
130 static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
131 					   struct hyp_page *p,
132 					   unsigned short order)
133 {
134 	struct hyp_page *buddy;
135 
136 	page_remove_from_list(p);
137 	while (p->order > order) {
138 		/*
139 		 * The buddy of order n - 1 currently has HYP_NO_ORDER as it
140 		 * is covered by a higher-level page (whose head is @p). Use
141 		 * __find_buddy_nocheck() to find it and inject it in the
142 		 * free_list[n - 1], effectively splitting @p in half.
143 		 */
144 		p->order--;
145 		buddy = __find_buddy_nocheck(pool, p, p->order);
146 		buddy->order = p->order;
147 		page_add_to_list(buddy, &pool->free_area[buddy->order]);
148 	}
149 
150 	return p;
151 }
152 
153 static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
154 {
155 	if (hyp_page_ref_dec_and_test(p))
156 		__hyp_attach_page(pool, p);
157 }
158 
159 /*
160  * Changes to the buddy tree and page refcounts must be done with the hyp_pool
161  * lock held. If a refcount change requires an update to the buddy tree (e.g.
162  * hyp_put_page()), both operations must be done within the same critical
163  * section to guarantee transient states (e.g. a page with null refcount but
164  * not yet attached to a free list) can't be observed by well-behaved readers.
165  */
166 void hyp_put_page(struct hyp_pool *pool, void *addr)
167 {
168 	struct hyp_page *p = hyp_virt_to_page(addr);
169 
170 	hyp_spin_lock(&pool->lock);
171 	__hyp_put_page(pool, p);
172 	hyp_spin_unlock(&pool->lock);
173 }
174 
175 void hyp_get_page(struct hyp_pool *pool, void *addr)
176 {
177 	struct hyp_page *p = hyp_virt_to_page(addr);
178 
179 	hyp_spin_lock(&pool->lock);
180 	hyp_page_ref_inc(p);
181 	hyp_spin_unlock(&pool->lock);
182 }
183 
184 void hyp_split_page(struct hyp_page *p)
185 {
186 	unsigned short order = p->order;
187 	unsigned int i;
188 
189 	p->order = 0;
190 	for (i = 1; i < (1 << order); i++) {
191 		struct hyp_page *tail = p + i;
192 
193 		tail->order = 0;
194 		hyp_set_page_refcounted(tail);
195 	}
196 }
197 
198 void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
199 {
200 	unsigned short i = order;
201 	struct hyp_page *p;
202 
203 	hyp_spin_lock(&pool->lock);
204 
205 	/* Look for a high-enough-order page */
206 	while (i < pool->max_order && list_empty(&pool->free_area[i]))
207 		i++;
208 	if (i >= pool->max_order) {
209 		hyp_spin_unlock(&pool->lock);
210 		return NULL;
211 	}
212 
213 	/* Extract it from the tree at the right order */
214 	p = node_to_page(pool->free_area[i].next);
215 	p = __hyp_extract_page(pool, p, order);
216 
217 	hyp_set_page_refcounted(p);
218 	hyp_spin_unlock(&pool->lock);
219 
220 	return hyp_page_to_virt(p);
221 }
222 
223 int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
224 		  unsigned int reserved_pages)
225 {
226 	phys_addr_t phys = hyp_pfn_to_phys(pfn);
227 	struct hyp_page *p;
228 	int i;
229 
230 	hyp_spin_lock_init(&pool->lock);
231 	pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
232 	for (i = 0; i < pool->max_order; i++)
233 		INIT_LIST_HEAD(&pool->free_area[i]);
234 	pool->range_start = phys;
235 	pool->range_end = phys + (nr_pages << PAGE_SHIFT);
236 
237 	/* Init the vmemmap portion */
238 	p = hyp_phys_to_page(phys);
239 	for (i = 0; i < nr_pages; i++)
240 		hyp_set_page_refcounted(&p[i]);
241 
242 	/* Attach the unused pages to the buddy tree */
243 	for (i = reserved_pages; i < nr_pages; i++)
244 		__hyp_put_page(pool, &p[i]);
245 
246 	return 0;
247 }
248