19e2369c0SRoger Pau Monne // SPDX-License-Identifier: GPL-2.0
29e2369c0SRoger Pau Monne #include <linux/errno.h>
39e2369c0SRoger Pau Monne #include <linux/gfp.h>
49e2369c0SRoger Pau Monne #include <linux/kernel.h>
59e2369c0SRoger Pau Monne #include <linux/mm.h>
69e2369c0SRoger Pau Monne #include <linux/memremap.h>
79e2369c0SRoger Pau Monne #include <linux/slab.h>
89e2369c0SRoger Pau Monne
99e2369c0SRoger Pau Monne #include <asm/page.h>
109e2369c0SRoger Pau Monne
11*d1a928eaSOleksandr Tyshchenko #include <xen/balloon.h>
129e2369c0SRoger Pau Monne #include <xen/page.h>
139e2369c0SRoger Pau Monne #include <xen/xen.h>
149e2369c0SRoger Pau Monne
159e2369c0SRoger Pau Monne static DEFINE_MUTEX(list_lock);
16ee32f323SJuergen Gross static struct page *page_list;
179e2369c0SRoger Pau Monne static unsigned int list_count;
189e2369c0SRoger Pau Monne
19*d1a928eaSOleksandr Tyshchenko static struct resource *target_resource;
20*d1a928eaSOleksandr Tyshchenko
21*d1a928eaSOleksandr Tyshchenko /*
22*d1a928eaSOleksandr Tyshchenko * If arch is not happy with system "iomem_resource" being used for
23*d1a928eaSOleksandr Tyshchenko * the region allocation it can provide it's own view by creating specific
24*d1a928eaSOleksandr Tyshchenko * Xen resource with unused regions of guest physical address space provided
25*d1a928eaSOleksandr Tyshchenko * by the hypervisor.
26*d1a928eaSOleksandr Tyshchenko */
arch_xen_unpopulated_init(struct resource ** res)27*d1a928eaSOleksandr Tyshchenko int __weak __init arch_xen_unpopulated_init(struct resource **res)
28*d1a928eaSOleksandr Tyshchenko {
29*d1a928eaSOleksandr Tyshchenko *res = &iomem_resource;
30*d1a928eaSOleksandr Tyshchenko
31*d1a928eaSOleksandr Tyshchenko return 0;
32*d1a928eaSOleksandr Tyshchenko }
33*d1a928eaSOleksandr Tyshchenko
fill_list(unsigned int nr_pages)349e2369c0SRoger Pau Monne static int fill_list(unsigned int nr_pages)
359e2369c0SRoger Pau Monne {
369e2369c0SRoger Pau Monne struct dev_pagemap *pgmap;
37*d1a928eaSOleksandr Tyshchenko struct resource *res, *tmp_res = NULL;
389e2369c0SRoger Pau Monne void *vaddr;
399e2369c0SRoger Pau Monne unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
40*d1a928eaSOleksandr Tyshchenko struct range mhp_range;
41*d1a928eaSOleksandr Tyshchenko int ret;
42a4574f63SDan Williams
43a4574f63SDan Williams res = kzalloc(sizeof(*res), GFP_KERNEL);
44a4574f63SDan Williams if (!res)
45a4574f63SDan Williams return -ENOMEM;
469e2369c0SRoger Pau Monne
47a4574f63SDan Williams res->name = "Xen scratch";
48a4574f63SDan Williams res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
499e2369c0SRoger Pau Monne
50*d1a928eaSOleksandr Tyshchenko mhp_range = mhp_get_pluggable_range(true);
51*d1a928eaSOleksandr Tyshchenko
52*d1a928eaSOleksandr Tyshchenko ret = allocate_resource(target_resource, res,
53*d1a928eaSOleksandr Tyshchenko alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
549e2369c0SRoger Pau Monne PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
559e2369c0SRoger Pau Monne if (ret < 0) {
569e2369c0SRoger Pau Monne pr_err("Cannot allocate new IOMEM resource\n");
57a4574f63SDan Williams goto err_resource;
589e2369c0SRoger Pau Monne }
599e2369c0SRoger Pau Monne
60*d1a928eaSOleksandr Tyshchenko /*
61*d1a928eaSOleksandr Tyshchenko * Reserve the region previously allocated from Xen resource to avoid
62*d1a928eaSOleksandr Tyshchenko * re-using it by someone else.
63*d1a928eaSOleksandr Tyshchenko */
64*d1a928eaSOleksandr Tyshchenko if (target_resource != &iomem_resource) {
65*d1a928eaSOleksandr Tyshchenko tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
66*d1a928eaSOleksandr Tyshchenko if (!tmp_res) {
67*d1a928eaSOleksandr Tyshchenko ret = -ENOMEM;
68*d1a928eaSOleksandr Tyshchenko goto err_insert;
69*d1a928eaSOleksandr Tyshchenko }
70*d1a928eaSOleksandr Tyshchenko
71*d1a928eaSOleksandr Tyshchenko tmp_res->name = res->name;
72*d1a928eaSOleksandr Tyshchenko tmp_res->start = res->start;
73*d1a928eaSOleksandr Tyshchenko tmp_res->end = res->end;
74*d1a928eaSOleksandr Tyshchenko tmp_res->flags = res->flags;
75*d1a928eaSOleksandr Tyshchenko
76*d1a928eaSOleksandr Tyshchenko ret = request_resource(&iomem_resource, tmp_res);
77*d1a928eaSOleksandr Tyshchenko if (ret < 0) {
78*d1a928eaSOleksandr Tyshchenko pr_err("Cannot request resource %pR (%d)\n", tmp_res, ret);
79*d1a928eaSOleksandr Tyshchenko kfree(tmp_res);
80*d1a928eaSOleksandr Tyshchenko goto err_insert;
81*d1a928eaSOleksandr Tyshchenko }
82*d1a928eaSOleksandr Tyshchenko }
83*d1a928eaSOleksandr Tyshchenko
843a250629SDan Williams pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
85dbc03e81SZhen Lei if (!pgmap) {
86dbc03e81SZhen Lei ret = -ENOMEM;
873a250629SDan Williams goto err_pgmap;
88dbc03e81SZhen Lei }
893a250629SDan Williams
903a250629SDan Williams pgmap->type = MEMORY_DEVICE_GENERIC;
91a4574f63SDan Williams pgmap->range = (struct range) {
92a4574f63SDan Williams .start = res->start,
93a4574f63SDan Williams .end = res->end,
94a4574f63SDan Williams };
95b7b3c01bSDan Williams pgmap->nr_range = 1;
96a4574f63SDan Williams pgmap->owner = res;
97a4574f63SDan Williams
989e2369c0SRoger Pau Monne #ifdef CONFIG_XEN_HAVE_PVMMU
999e2369c0SRoger Pau Monne /*
1009e2369c0SRoger Pau Monne * memremap will build page tables for the new memory so
1019e2369c0SRoger Pau Monne * the p2m must contain invalid entries so the correct
1029e2369c0SRoger Pau Monne * non-present PTEs will be written.
1039e2369c0SRoger Pau Monne *
1049e2369c0SRoger Pau Monne * If a failure occurs, the original (identity) p2m entries
1059e2369c0SRoger Pau Monne * are not restored since this region is now known not to
1069e2369c0SRoger Pau Monne * conflict with any devices.
1079e2369c0SRoger Pau Monne */
1089e2369c0SRoger Pau Monne if (!xen_feature(XENFEAT_auto_translated_physmap)) {
109a4574f63SDan Williams xen_pfn_t pfn = PFN_DOWN(res->start);
1109e2369c0SRoger Pau Monne
1119e2369c0SRoger Pau Monne for (i = 0; i < alloc_pages; i++) {
1129e2369c0SRoger Pau Monne if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
1139e2369c0SRoger Pau Monne pr_warn("set_phys_to_machine() failed, no memory added\n");
114a4574f63SDan Williams ret = -ENOMEM;
115a4574f63SDan Williams goto err_memremap;
1169e2369c0SRoger Pau Monne }
1179e2369c0SRoger Pau Monne }
1189e2369c0SRoger Pau Monne }
1199e2369c0SRoger Pau Monne #endif
1209e2369c0SRoger Pau Monne
1219e2369c0SRoger Pau Monne vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
1229e2369c0SRoger Pau Monne if (IS_ERR(vaddr)) {
1239e2369c0SRoger Pau Monne pr_err("Cannot remap memory range\n");
124a4574f63SDan Williams ret = PTR_ERR(vaddr);
125a4574f63SDan Williams goto err_memremap;
1269e2369c0SRoger Pau Monne }
1279e2369c0SRoger Pau Monne
1289e2369c0SRoger Pau Monne for (i = 0; i < alloc_pages; i++) {
1299e2369c0SRoger Pau Monne struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
1309e2369c0SRoger Pau Monne
131ee32f323SJuergen Gross pg->zone_device_data = page_list;
132ee32f323SJuergen Gross page_list = pg;
1339e2369c0SRoger Pau Monne list_count++;
1349e2369c0SRoger Pau Monne }
1359e2369c0SRoger Pau Monne
1369e2369c0SRoger Pau Monne return 0;
137a4574f63SDan Williams
138a4574f63SDan Williams err_memremap:
139a4574f63SDan Williams kfree(pgmap);
140a4574f63SDan Williams err_pgmap:
141*d1a928eaSOleksandr Tyshchenko if (tmp_res) {
142*d1a928eaSOleksandr Tyshchenko release_resource(tmp_res);
143*d1a928eaSOleksandr Tyshchenko kfree(tmp_res);
144*d1a928eaSOleksandr Tyshchenko }
145*d1a928eaSOleksandr Tyshchenko err_insert:
1463a250629SDan Williams release_resource(res);
1473a250629SDan Williams err_resource:
148a4574f63SDan Williams kfree(res);
149a4574f63SDan Williams return ret;
1509e2369c0SRoger Pau Monne }
1519e2369c0SRoger Pau Monne
1529e2369c0SRoger Pau Monne /**
1539e2369c0SRoger Pau Monne * xen_alloc_unpopulated_pages - alloc unpopulated pages
1549e2369c0SRoger Pau Monne * @nr_pages: Number of pages
1559e2369c0SRoger Pau Monne * @pages: pages returned
1569e2369c0SRoger Pau Monne * @return 0 on success, error otherwise
1579e2369c0SRoger Pau Monne */
xen_alloc_unpopulated_pages(unsigned int nr_pages,struct page ** pages)1589e2369c0SRoger Pau Monne int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
1599e2369c0SRoger Pau Monne {
1609e2369c0SRoger Pau Monne unsigned int i;
1619e2369c0SRoger Pau Monne int ret = 0;
1629e2369c0SRoger Pau Monne
163*d1a928eaSOleksandr Tyshchenko /*
164*d1a928eaSOleksandr Tyshchenko * Fallback to default behavior if we do not have any suitable resource
165*d1a928eaSOleksandr Tyshchenko * to allocate required region from and as the result we won't be able to
166*d1a928eaSOleksandr Tyshchenko * construct pages.
167*d1a928eaSOleksandr Tyshchenko */
168*d1a928eaSOleksandr Tyshchenko if (!target_resource)
169*d1a928eaSOleksandr Tyshchenko return xen_alloc_ballooned_pages(nr_pages, pages);
170*d1a928eaSOleksandr Tyshchenko
1719e2369c0SRoger Pau Monne mutex_lock(&list_lock);
1729e2369c0SRoger Pau Monne if (list_count < nr_pages) {
1739e2369c0SRoger Pau Monne ret = fill_list(nr_pages - list_count);
1749e2369c0SRoger Pau Monne if (ret)
1759e2369c0SRoger Pau Monne goto out;
1769e2369c0SRoger Pau Monne }
1779e2369c0SRoger Pau Monne
1789e2369c0SRoger Pau Monne for (i = 0; i < nr_pages; i++) {
179ee32f323SJuergen Gross struct page *pg = page_list;
1809e2369c0SRoger Pau Monne
1819e2369c0SRoger Pau Monne BUG_ON(!pg);
182ee32f323SJuergen Gross page_list = pg->zone_device_data;
1839e2369c0SRoger Pau Monne list_count--;
1849e2369c0SRoger Pau Monne pages[i] = pg;
1859e2369c0SRoger Pau Monne
1869e2369c0SRoger Pau Monne #ifdef CONFIG_XEN_HAVE_PVMMU
1879e2369c0SRoger Pau Monne if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1889e2369c0SRoger Pau Monne ret = xen_alloc_p2m_entry(page_to_pfn(pg));
1899e2369c0SRoger Pau Monne if (ret < 0) {
1909e2369c0SRoger Pau Monne unsigned int j;
1919e2369c0SRoger Pau Monne
1929e2369c0SRoger Pau Monne for (j = 0; j <= i; j++) {
193ee32f323SJuergen Gross pages[j]->zone_device_data = page_list;
194ee32f323SJuergen Gross page_list = pages[j];
1959e2369c0SRoger Pau Monne list_count++;
1969e2369c0SRoger Pau Monne }
1979e2369c0SRoger Pau Monne goto out;
1989e2369c0SRoger Pau Monne }
1999e2369c0SRoger Pau Monne }
2009e2369c0SRoger Pau Monne #endif
2019e2369c0SRoger Pau Monne }
2029e2369c0SRoger Pau Monne
2039e2369c0SRoger Pau Monne out:
2049e2369c0SRoger Pau Monne mutex_unlock(&list_lock);
2059e2369c0SRoger Pau Monne return ret;
2069e2369c0SRoger Pau Monne }
2079e2369c0SRoger Pau Monne EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
2089e2369c0SRoger Pau Monne
2099e2369c0SRoger Pau Monne /**
2109e2369c0SRoger Pau Monne * xen_free_unpopulated_pages - return unpopulated pages
2119e2369c0SRoger Pau Monne * @nr_pages: Number of pages
2129e2369c0SRoger Pau Monne * @pages: pages to return
2139e2369c0SRoger Pau Monne */
xen_free_unpopulated_pages(unsigned int nr_pages,struct page ** pages)2149e2369c0SRoger Pau Monne void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
2159e2369c0SRoger Pau Monne {
2169e2369c0SRoger Pau Monne unsigned int i;
2179e2369c0SRoger Pau Monne
218*d1a928eaSOleksandr Tyshchenko if (!target_resource) {
219*d1a928eaSOleksandr Tyshchenko xen_free_ballooned_pages(nr_pages, pages);
220*d1a928eaSOleksandr Tyshchenko return;
221*d1a928eaSOleksandr Tyshchenko }
222*d1a928eaSOleksandr Tyshchenko
2239e2369c0SRoger Pau Monne mutex_lock(&list_lock);
2249e2369c0SRoger Pau Monne for (i = 0; i < nr_pages; i++) {
225ee32f323SJuergen Gross pages[i]->zone_device_data = page_list;
226ee32f323SJuergen Gross page_list = pages[i];
2279e2369c0SRoger Pau Monne list_count++;
2289e2369c0SRoger Pau Monne }
2299e2369c0SRoger Pau Monne mutex_unlock(&list_lock);
2309e2369c0SRoger Pau Monne }
2319e2369c0SRoger Pau Monne EXPORT_SYMBOL(xen_free_unpopulated_pages);
2329e2369c0SRoger Pau Monne
unpopulated_init(void)233*d1a928eaSOleksandr Tyshchenko static int __init unpopulated_init(void)
234*d1a928eaSOleksandr Tyshchenko {
235*d1a928eaSOleksandr Tyshchenko int ret;
236*d1a928eaSOleksandr Tyshchenko
237*d1a928eaSOleksandr Tyshchenko if (!xen_domain())
238*d1a928eaSOleksandr Tyshchenko return -ENODEV;
239*d1a928eaSOleksandr Tyshchenko
240*d1a928eaSOleksandr Tyshchenko ret = arch_xen_unpopulated_init(&target_resource);
241*d1a928eaSOleksandr Tyshchenko if (ret) {
242*d1a928eaSOleksandr Tyshchenko pr_err("xen:unpopulated: Cannot initialize target resource\n");
243*d1a928eaSOleksandr Tyshchenko target_resource = NULL;
244*d1a928eaSOleksandr Tyshchenko }
245*d1a928eaSOleksandr Tyshchenko
246*d1a928eaSOleksandr Tyshchenko return ret;
247*d1a928eaSOleksandr Tyshchenko }
248*d1a928eaSOleksandr Tyshchenko early_initcall(unpopulated_init);
249