1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/errno.h> 3 #include <linux/gfp.h> 4 #include <linux/kernel.h> 5 #include <linux/mm.h> 6 #include <linux/memremap.h> 7 #include <linux/slab.h> 8 9 #include <asm/page.h> 10 11 #include <xen/page.h> 12 #include <xen/xen.h> 13 14 static DEFINE_MUTEX(list_lock); 15 static LIST_HEAD(page_list); 16 static unsigned int list_count; 17 18 static int fill_list(unsigned int nr_pages) 19 { 20 struct dev_pagemap *pgmap; 21 struct resource *res; 22 void *vaddr; 23 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); 24 int ret = -ENOMEM; 25 26 res = kzalloc(sizeof(*res), GFP_KERNEL); 27 if (!res) 28 return -ENOMEM; 29 30 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); 31 if (!pgmap) 32 goto err_pgmap; 33 34 pgmap->type = MEMORY_DEVICE_GENERIC; 35 res->name = "Xen scratch"; 36 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 37 38 ret = allocate_resource(&iomem_resource, res, 39 alloc_pages * PAGE_SIZE, 0, -1, 40 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); 41 if (ret < 0) { 42 pr_err("Cannot allocate new IOMEM resource\n"); 43 goto err_resource; 44 } 45 46 pgmap->range = (struct range) { 47 .start = res->start, 48 .end = res->end, 49 }; 50 pgmap->owner = res; 51 52 #ifdef CONFIG_XEN_HAVE_PVMMU 53 /* 54 * memremap will build page tables for the new memory so 55 * the p2m must contain invalid entries so the correct 56 * non-present PTEs will be written. 57 * 58 * If a failure occurs, the original (identity) p2m entries 59 * are not restored since this region is now known not to 60 * conflict with any devices. 61 */ 62 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 63 xen_pfn_t pfn = PFN_DOWN(res->start); 64 65 for (i = 0; i < alloc_pages; i++) { 66 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { 67 pr_warn("set_phys_to_machine() failed, no memory added\n"); 68 ret = -ENOMEM; 69 goto err_memremap; 70 } 71 } 72 } 73 #endif 74 75 vaddr = memremap_pages(pgmap, NUMA_NO_NODE); 76 if (IS_ERR(vaddr)) { 77 pr_err("Cannot remap memory range\n"); 78 ret = PTR_ERR(vaddr); 79 goto err_memremap; 80 } 81 82 for (i = 0; i < alloc_pages; i++) { 83 struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i); 84 85 BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i)); 86 list_add(&pg->lru, &page_list); 87 list_count++; 88 } 89 90 return 0; 91 92 err_memremap: 93 release_resource(res); 94 err_resource: 95 kfree(pgmap); 96 err_pgmap: 97 kfree(res); 98 return ret; 99 } 100 101 /** 102 * xen_alloc_unpopulated_pages - alloc unpopulated pages 103 * @nr_pages: Number of pages 104 * @pages: pages returned 105 * @return 0 on success, error otherwise 106 */ 107 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) 108 { 109 unsigned int i; 110 int ret = 0; 111 112 mutex_lock(&list_lock); 113 if (list_count < nr_pages) { 114 ret = fill_list(nr_pages - list_count); 115 if (ret) 116 goto out; 117 } 118 119 for (i = 0; i < nr_pages; i++) { 120 struct page *pg = list_first_entry_or_null(&page_list, 121 struct page, 122 lru); 123 124 BUG_ON(!pg); 125 list_del(&pg->lru); 126 list_count--; 127 pages[i] = pg; 128 129 #ifdef CONFIG_XEN_HAVE_PVMMU 130 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 131 ret = xen_alloc_p2m_entry(page_to_pfn(pg)); 132 if (ret < 0) { 133 unsigned int j; 134 135 for (j = 0; j <= i; j++) { 136 list_add(&pages[j]->lru, &page_list); 137 list_count++; 138 } 139 goto out; 140 } 141 } 142 #endif 143 } 144 145 out: 146 mutex_unlock(&list_lock); 147 return ret; 148 } 149 EXPORT_SYMBOL(xen_alloc_unpopulated_pages); 150 151 /** 152 * xen_free_unpopulated_pages - return unpopulated pages 153 * @nr_pages: Number of pages 154 * @pages: pages to return 155 */ 156 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) 157 { 158 unsigned int i; 159 160 mutex_lock(&list_lock); 161 for (i = 0; i < nr_pages; i++) { 162 list_add(&pages[i]->lru, &page_list); 163 list_count++; 164 } 165 mutex_unlock(&list_lock); 166 } 167 EXPORT_SYMBOL(xen_free_unpopulated_pages); 168 169 #ifdef CONFIG_XEN_PV 170 static int __init init(void) 171 { 172 unsigned int i; 173 174 if (!xen_domain()) 175 return -ENODEV; 176 177 if (!xen_pv_domain()) 178 return 0; 179 180 /* 181 * Initialize with pages from the extra memory regions (see 182 * arch/x86/xen/setup.c). 183 */ 184 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 185 unsigned int j; 186 187 for (j = 0; j < xen_extra_mem[i].n_pfns; j++) { 188 struct page *pg = 189 pfn_to_page(xen_extra_mem[i].start_pfn + j); 190 191 list_add(&pg->lru, &page_list); 192 list_count++; 193 } 194 } 195 196 return 0; 197 } 198 subsys_initcall(init); 199 #endif 200