xref: /linux/drivers/xen/unpopulated-alloc.c (revision b7b3c01b191596d27a6980d1a42504f5b607f802)
19e2369c0SRoger Pau Monne // SPDX-License-Identifier: GPL-2.0
29e2369c0SRoger Pau Monne #include <linux/errno.h>
39e2369c0SRoger Pau Monne #include <linux/gfp.h>
49e2369c0SRoger Pau Monne #include <linux/kernel.h>
59e2369c0SRoger Pau Monne #include <linux/mm.h>
69e2369c0SRoger Pau Monne #include <linux/memremap.h>
79e2369c0SRoger Pau Monne #include <linux/slab.h>
89e2369c0SRoger Pau Monne 
99e2369c0SRoger Pau Monne #include <asm/page.h>
109e2369c0SRoger Pau Monne 
119e2369c0SRoger Pau Monne #include <xen/page.h>
129e2369c0SRoger Pau Monne #include <xen/xen.h>
139e2369c0SRoger Pau Monne 
149e2369c0SRoger Pau Monne static DEFINE_MUTEX(list_lock);
159e2369c0SRoger Pau Monne static LIST_HEAD(page_list);
169e2369c0SRoger Pau Monne static unsigned int list_count;
179e2369c0SRoger Pau Monne 
189e2369c0SRoger Pau Monne static int fill_list(unsigned int nr_pages)
199e2369c0SRoger Pau Monne {
209e2369c0SRoger Pau Monne 	struct dev_pagemap *pgmap;
21a4574f63SDan Williams 	struct resource *res;
229e2369c0SRoger Pau Monne 	void *vaddr;
239e2369c0SRoger Pau Monne 	unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
24a4574f63SDan Williams 	int ret = -ENOMEM;
25a4574f63SDan Williams 
26a4574f63SDan Williams 	res = kzalloc(sizeof(*res), GFP_KERNEL);
27a4574f63SDan Williams 	if (!res)
28a4574f63SDan Williams 		return -ENOMEM;
299e2369c0SRoger Pau Monne 
309e2369c0SRoger Pau Monne 	pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
319e2369c0SRoger Pau Monne 	if (!pgmap)
32a4574f63SDan Williams 		goto err_pgmap;
339e2369c0SRoger Pau Monne 
349e2369c0SRoger Pau Monne 	pgmap->type = MEMORY_DEVICE_GENERIC;
35a4574f63SDan Williams 	res->name = "Xen scratch";
36a4574f63SDan Williams 	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
379e2369c0SRoger Pau Monne 
38a4574f63SDan Williams 	ret = allocate_resource(&iomem_resource, res,
399e2369c0SRoger Pau Monne 				alloc_pages * PAGE_SIZE, 0, -1,
409e2369c0SRoger Pau Monne 				PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
419e2369c0SRoger Pau Monne 	if (ret < 0) {
429e2369c0SRoger Pau Monne 		pr_err("Cannot allocate new IOMEM resource\n");
43a4574f63SDan Williams 		goto err_resource;
449e2369c0SRoger Pau Monne 	}
459e2369c0SRoger Pau Monne 
46a4574f63SDan Williams 	pgmap->range = (struct range) {
47a4574f63SDan Williams 		.start = res->start,
48a4574f63SDan Williams 		.end = res->end,
49a4574f63SDan Williams 	};
50*b7b3c01bSDan Williams 	pgmap->nr_range = 1;
51a4574f63SDan Williams 	pgmap->owner = res;
52a4574f63SDan Williams 
539e2369c0SRoger Pau Monne #ifdef CONFIG_XEN_HAVE_PVMMU
549e2369c0SRoger Pau Monne         /*
559e2369c0SRoger Pau Monne          * memremap will build page tables for the new memory so
569e2369c0SRoger Pau Monne          * the p2m must contain invalid entries so the correct
579e2369c0SRoger Pau Monne          * non-present PTEs will be written.
589e2369c0SRoger Pau Monne          *
599e2369c0SRoger Pau Monne          * If a failure occurs, the original (identity) p2m entries
609e2369c0SRoger Pau Monne          * are not restored since this region is now known not to
619e2369c0SRoger Pau Monne          * conflict with any devices.
629e2369c0SRoger Pau Monne          */
639e2369c0SRoger Pau Monne 	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
64a4574f63SDan Williams 		xen_pfn_t pfn = PFN_DOWN(res->start);
659e2369c0SRoger Pau Monne 
669e2369c0SRoger Pau Monne 		for (i = 0; i < alloc_pages; i++) {
679e2369c0SRoger Pau Monne 			if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) {
689e2369c0SRoger Pau Monne 				pr_warn("set_phys_to_machine() failed, no memory added\n");
69a4574f63SDan Williams 				ret = -ENOMEM;
70a4574f63SDan Williams 				goto err_memremap;
719e2369c0SRoger Pau Monne 			}
729e2369c0SRoger Pau Monne                 }
739e2369c0SRoger Pau Monne 	}
749e2369c0SRoger Pau Monne #endif
759e2369c0SRoger Pau Monne 
769e2369c0SRoger Pau Monne 	vaddr = memremap_pages(pgmap, NUMA_NO_NODE);
779e2369c0SRoger Pau Monne 	if (IS_ERR(vaddr)) {
789e2369c0SRoger Pau Monne 		pr_err("Cannot remap memory range\n");
79a4574f63SDan Williams 		ret = PTR_ERR(vaddr);
80a4574f63SDan Williams 		goto err_memremap;
819e2369c0SRoger Pau Monne 	}
829e2369c0SRoger Pau Monne 
839e2369c0SRoger Pau Monne 	for (i = 0; i < alloc_pages; i++) {
849e2369c0SRoger Pau Monne 		struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
859e2369c0SRoger Pau Monne 
869e2369c0SRoger Pau Monne 		BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
879e2369c0SRoger Pau Monne 		list_add(&pg->lru, &page_list);
889e2369c0SRoger Pau Monne 		list_count++;
899e2369c0SRoger Pau Monne 	}
909e2369c0SRoger Pau Monne 
919e2369c0SRoger Pau Monne 	return 0;
92a4574f63SDan Williams 
93a4574f63SDan Williams err_memremap:
94a4574f63SDan Williams 	release_resource(res);
95a4574f63SDan Williams err_resource:
96a4574f63SDan Williams 	kfree(pgmap);
97a4574f63SDan Williams err_pgmap:
98a4574f63SDan Williams 	kfree(res);
99a4574f63SDan Williams 	return ret;
1009e2369c0SRoger Pau Monne }
1019e2369c0SRoger Pau Monne 
1029e2369c0SRoger Pau Monne /**
1039e2369c0SRoger Pau Monne  * xen_alloc_unpopulated_pages - alloc unpopulated pages
1049e2369c0SRoger Pau Monne  * @nr_pages: Number of pages
1059e2369c0SRoger Pau Monne  * @pages: pages returned
1069e2369c0SRoger Pau Monne  * @return 0 on success, error otherwise
1079e2369c0SRoger Pau Monne  */
1089e2369c0SRoger Pau Monne int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
1099e2369c0SRoger Pau Monne {
1109e2369c0SRoger Pau Monne 	unsigned int i;
1119e2369c0SRoger Pau Monne 	int ret = 0;
1129e2369c0SRoger Pau Monne 
1139e2369c0SRoger Pau Monne 	mutex_lock(&list_lock);
1149e2369c0SRoger Pau Monne 	if (list_count < nr_pages) {
1159e2369c0SRoger Pau Monne 		ret = fill_list(nr_pages - list_count);
1169e2369c0SRoger Pau Monne 		if (ret)
1179e2369c0SRoger Pau Monne 			goto out;
1189e2369c0SRoger Pau Monne 	}
1199e2369c0SRoger Pau Monne 
1209e2369c0SRoger Pau Monne 	for (i = 0; i < nr_pages; i++) {
1219e2369c0SRoger Pau Monne 		struct page *pg = list_first_entry_or_null(&page_list,
1229e2369c0SRoger Pau Monne 							   struct page,
1239e2369c0SRoger Pau Monne 							   lru);
1249e2369c0SRoger Pau Monne 
1259e2369c0SRoger Pau Monne 		BUG_ON(!pg);
1269e2369c0SRoger Pau Monne 		list_del(&pg->lru);
1279e2369c0SRoger Pau Monne 		list_count--;
1289e2369c0SRoger Pau Monne 		pages[i] = pg;
1299e2369c0SRoger Pau Monne 
1309e2369c0SRoger Pau Monne #ifdef CONFIG_XEN_HAVE_PVMMU
1319e2369c0SRoger Pau Monne 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1329e2369c0SRoger Pau Monne 			ret = xen_alloc_p2m_entry(page_to_pfn(pg));
1339e2369c0SRoger Pau Monne 			if (ret < 0) {
1349e2369c0SRoger Pau Monne 				unsigned int j;
1359e2369c0SRoger Pau Monne 
1369e2369c0SRoger Pau Monne 				for (j = 0; j <= i; j++) {
1379e2369c0SRoger Pau Monne 					list_add(&pages[j]->lru, &page_list);
1389e2369c0SRoger Pau Monne 					list_count++;
1399e2369c0SRoger Pau Monne 				}
1409e2369c0SRoger Pau Monne 				goto out;
1419e2369c0SRoger Pau Monne 			}
1429e2369c0SRoger Pau Monne 		}
1439e2369c0SRoger Pau Monne #endif
1449e2369c0SRoger Pau Monne 	}
1459e2369c0SRoger Pau Monne 
1469e2369c0SRoger Pau Monne out:
1479e2369c0SRoger Pau Monne 	mutex_unlock(&list_lock);
1489e2369c0SRoger Pau Monne 	return ret;
1499e2369c0SRoger Pau Monne }
1509e2369c0SRoger Pau Monne EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
1519e2369c0SRoger Pau Monne 
1529e2369c0SRoger Pau Monne /**
1539e2369c0SRoger Pau Monne  * xen_free_unpopulated_pages - return unpopulated pages
1549e2369c0SRoger Pau Monne  * @nr_pages: Number of pages
1559e2369c0SRoger Pau Monne  * @pages: pages to return
1569e2369c0SRoger Pau Monne  */
1579e2369c0SRoger Pau Monne void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
1589e2369c0SRoger Pau Monne {
1599e2369c0SRoger Pau Monne 	unsigned int i;
1609e2369c0SRoger Pau Monne 
1619e2369c0SRoger Pau Monne 	mutex_lock(&list_lock);
1629e2369c0SRoger Pau Monne 	for (i = 0; i < nr_pages; i++) {
1639e2369c0SRoger Pau Monne 		list_add(&pages[i]->lru, &page_list);
1649e2369c0SRoger Pau Monne 		list_count++;
1659e2369c0SRoger Pau Monne 	}
1669e2369c0SRoger Pau Monne 	mutex_unlock(&list_lock);
1679e2369c0SRoger Pau Monne }
1689e2369c0SRoger Pau Monne EXPORT_SYMBOL(xen_free_unpopulated_pages);
1699e2369c0SRoger Pau Monne 
1709e2369c0SRoger Pau Monne #ifdef CONFIG_XEN_PV
1719e2369c0SRoger Pau Monne static int __init init(void)
1729e2369c0SRoger Pau Monne {
1739e2369c0SRoger Pau Monne 	unsigned int i;
1749e2369c0SRoger Pau Monne 
1759e2369c0SRoger Pau Monne 	if (!xen_domain())
1769e2369c0SRoger Pau Monne 		return -ENODEV;
1779e2369c0SRoger Pau Monne 
1789e2369c0SRoger Pau Monne 	if (!xen_pv_domain())
1799e2369c0SRoger Pau Monne 		return 0;
1809e2369c0SRoger Pau Monne 
1819e2369c0SRoger Pau Monne 	/*
1829e2369c0SRoger Pau Monne 	 * Initialize with pages from the extra memory regions (see
1839e2369c0SRoger Pau Monne 	 * arch/x86/xen/setup.c).
1849e2369c0SRoger Pau Monne 	 */
1859e2369c0SRoger Pau Monne 	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
1869e2369c0SRoger Pau Monne 		unsigned int j;
1879e2369c0SRoger Pau Monne 
1889e2369c0SRoger Pau Monne 		for (j = 0; j < xen_extra_mem[i].n_pfns; j++) {
1899e2369c0SRoger Pau Monne 			struct page *pg =
1909e2369c0SRoger Pau Monne 				pfn_to_page(xen_extra_mem[i].start_pfn + j);
1919e2369c0SRoger Pau Monne 
1929e2369c0SRoger Pau Monne 			list_add(&pg->lru, &page_list);
1939e2369c0SRoger Pau Monne 			list_count++;
1949e2369c0SRoger Pau Monne 		}
1959e2369c0SRoger Pau Monne 	}
1969e2369c0SRoger Pau Monne 
1979e2369c0SRoger Pau Monne 	return 0;
1989e2369c0SRoger Pau Monne }
1999e2369c0SRoger Pau Monne subsys_initcall(init);
2009e2369c0SRoger Pau Monne #endif
201