1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/gfp.h>
4 #include <linux/kernel.h>
5 #include <linux/mm.h>
6 #include <linux/memremap.h>
7 #include <linux/slab.h>
8
9 #include <asm/page.h>
10
11 #include <xen/balloon.h>
12 #include <xen/page.h>
13 #include <xen/xen.h>
14
15 static DEFINE_MUTEX(list_lock);
16 static struct page *page_list;
17 static unsigned int list_count;
18
19 static struct resource *target_resource;
20
21 /* Pages to subtract from the memory count when setting balloon target. */
22 unsigned long xen_unpopulated_pages __initdata;
23
24 /*
25 * If arch is not happy with system "iomem_resource" being used for
26 * the region allocation it can provide it's own view by creating specific
27 * Xen resource with unused regions of guest physical address space provided
28 * by the hypervisor.
29 */
arch_xen_unpopulated_init(struct resource ** res)30 int __weak __init arch_xen_unpopulated_init(struct resource **res)
31 {
32 *res = &iomem_resource;
33
34 return 0;
35 }
36
fill_list(unsigned int nr_pages)37 static int fill_list(unsigned int nr_pages)
38 {
39 struct dev_pagemap *pgmap;
40 struct resource *res, *tmp_res = NULL;
41 void *vaddr;
42 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
43 struct range mhp_range;
44 int ret;
45
46 res = kzalloc_obj(*res);
47 if (!res)
48 return -ENOMEM;
49
50 res->name = "Xen scratch";
51 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
52
53 mhp_range = mhp_get_pluggable_range(true);
54
55 ret = allocate_resource(target_resource, res,
56 alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
57 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
58 if (ret < 0) {
59 pr_err("Cannot allocate new IOMEM resource\n");
60 goto err_resource;
61 }
62
63 /*
64 * Reserve the region previously allocated from Xen resource to avoid
65 * re-using it by someone else.
66 */
67 if (target_resource != &iomem_resource) {
68 tmp_res = kzalloc_obj(*tmp_res);
69 if (!tmp_res) {
70 ret = -ENOMEM;
71 goto err_insert;
72 }
73
74 tmp_res->name = res->name;
75 tmp_res->start = res->start;
76 tmp_res->end = res->end;
77 tmp_res->flags = res->flags;
78
79 ret = request_resource(&iomem_resource, tmp_res);
80 if (ret < 0) {
81 pr_err("Cannot request resource %pR (%d)\n", tmp_res, ret);
82 kfree(tmp_res);
83 goto err_insert;
84 }
85 }
86
87 pgmap = kzalloc_obj(*pgmap);
88 if (!pgmap) {
89 ret = -ENOMEM;
90 goto err_pgmap;
91 }
92
93 pgmap->type = MEMORY_DEVICE_GENERIC;
94 pgmap->range = (struct range) {
95 .start = res->start,
96 .end = res->end,
97 };
98 pgmap->nr_range = 1;
99 pgmap->owner = res;
100
101 #ifdef CONFIG_XEN_HAVE_PVMMU
102 /*
103 * memremap will build page tables for the new memory so
104 * the p2m must contain invalid entries so the correct
105 * non-present PTEs will be written.
106 *
107 * If a failure occurs, the original (identity) p2m entries
108 * are not restored since this region is now known not to
109 * conflict with any devices.
110 */
111 if (xen_pv_domain()) {
112 xen_pfn_t pfn = PFN_DOWN(res->start);
113
114 for (i = 0; i < alloc_pages; i++) {
115 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
116 pr_warn("set_phys_to_machine() failed, no memory added\n");
117 ret = -ENOMEM;
118 goto err_memremap;
119 }
120 }
121 }
122 #endif
123
124 vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
125 if (IS_ERR(vaddr)) {
126 pr_err("Cannot remap memory range\n");
127 ret = PTR_ERR(vaddr);
128 goto err_memremap;
129 }
130
131 for (i = 0; i < alloc_pages; i++) {
132 struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
133
134 pg->zone_device_data = page_list;
135 page_list = pg;
136 list_count++;
137 }
138
139 return 0;
140
141 err_memremap:
142 kfree(pgmap);
143 err_pgmap:
144 if (tmp_res) {
145 release_resource(tmp_res);
146 kfree(tmp_res);
147 }
148 err_insert:
149 release_resource(res);
150 err_resource:
151 kfree(res);
152 return ret;
153 }
154
155 /**
156 * xen_alloc_unpopulated_pages - alloc unpopulated pages
157 * @nr_pages: Number of pages
158 * @pages: pages returned
159 * @return 0 on success, error otherwise
160 */
xen_alloc_unpopulated_pages(unsigned int nr_pages,struct page ** pages)161 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
162 {
163 unsigned int i;
164 int ret = 0;
165
166 /*
167 * Fallback to default behavior if we do not have any suitable resource
168 * to allocate required region from and as the result we won't be able to
169 * construct pages.
170 */
171 if (!target_resource)
172 return xen_alloc_ballooned_pages(nr_pages, pages);
173
174 mutex_lock(&list_lock);
175 if (list_count < nr_pages) {
176 ret = fill_list(nr_pages - list_count);
177 if (ret)
178 goto out;
179 }
180
181 for (i = 0; i < nr_pages; i++) {
182 struct page *pg = page_list;
183
184 BUG_ON(!pg);
185 page_list = pg->zone_device_data;
186 list_count--;
187 pages[i] = pg;
188
189 #ifdef CONFIG_XEN_HAVE_PVMMU
190 if (xen_pv_domain()) {
191 ret = xen_alloc_p2m_entry(page_to_pfn(pg));
192 if (ret < 0) {
193 unsigned int j;
194
195 for (j = 0; j <= i; j++) {
196 pages[j]->zone_device_data = page_list;
197 page_list = pages[j];
198 list_count++;
199 }
200 goto out;
201 }
202 }
203 #endif
204 }
205
206 out:
207 mutex_unlock(&list_lock);
208 return ret;
209 }
210 EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
211
212 /**
213 * xen_free_unpopulated_pages - return unpopulated pages
214 * @nr_pages: Number of pages
215 * @pages: pages to return
216 */
xen_free_unpopulated_pages(unsigned int nr_pages,struct page ** pages)217 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
218 {
219 unsigned int i;
220
221 if (!target_resource) {
222 xen_free_ballooned_pages(nr_pages, pages);
223 return;
224 }
225
226 mutex_lock(&list_lock);
227 for (i = 0; i < nr_pages; i++) {
228 pages[i]->zone_device_data = page_list;
229 page_list = pages[i];
230 list_count++;
231 }
232 mutex_unlock(&list_lock);
233 }
234 EXPORT_SYMBOL(xen_free_unpopulated_pages);
235
unpopulated_init(void)236 static int __init unpopulated_init(void)
237 {
238 int ret;
239
240 if (!xen_domain())
241 return -ENODEV;
242
243 ret = arch_xen_unpopulated_init(&target_resource);
244 if (ret) {
245 pr_err("xen:unpopulated: Cannot initialize target resource\n");
246 target_resource = NULL;
247 }
248
249 return ret;
250 }
251 early_initcall(unpopulated_init);
252