1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/errno.h> 3 #include <linux/gfp.h> 4 #include <linux/kernel.h> 5 #include <linux/mm.h> 6 #include <linux/memremap.h> 7 #include <linux/slab.h> 8 9 #include <asm/page.h> 10 11 #include <xen/page.h> 12 #include <xen/xen.h> 13 14 static DEFINE_MUTEX(list_lock); 15 static struct page *page_list; 16 static unsigned int list_count; 17 18 static int fill_list(unsigned int nr_pages) 19 { 20 struct dev_pagemap *pgmap; 21 struct resource *res; 22 void *vaddr; 23 unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); 24 int ret = -ENOMEM; 25 26 res = kzalloc(sizeof(*res), GFP_KERNEL); 27 if (!res) 28 return -ENOMEM; 29 30 res->name = "Xen scratch"; 31 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 32 33 ret = allocate_resource(&iomem_resource, res, 34 alloc_pages * PAGE_SIZE, 0, -1, 35 PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); 36 if (ret < 0) { 37 pr_err("Cannot allocate new IOMEM resource\n"); 38 goto err_resource; 39 } 40 41 pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); 42 if (!pgmap) 43 goto err_pgmap; 44 45 pgmap->type = MEMORY_DEVICE_GENERIC; 46 pgmap->range = (struct range) { 47 .start = res->start, 48 .end = res->end, 49 }; 50 pgmap->nr_range = 1; 51 pgmap->owner = res; 52 53 #ifdef CONFIG_XEN_HAVE_PVMMU 54 /* 55 * memremap will build page tables for the new memory so 56 * the p2m must contain invalid entries so the correct 57 * non-present PTEs will be written. 58 * 59 * If a failure occurs, the original (identity) p2m entries 60 * are not restored since this region is now known not to 61 * conflict with any devices. 62 */ 63 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 64 xen_pfn_t pfn = PFN_DOWN(res->start); 65 66 for (i = 0; i < alloc_pages; i++) { 67 if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { 68 pr_warn("set_phys_to_machine() failed, no memory added\n"); 69 ret = -ENOMEM; 70 goto err_memremap; 71 } 72 } 73 } 74 #endif 75 76 vaddr = memremap_pages(pgmap, NUMA_NO_NODE); 77 if (IS_ERR(vaddr)) { 78 pr_err("Cannot remap memory range\n"); 79 ret = PTR_ERR(vaddr); 80 goto err_memremap; 81 } 82 83 for (i = 0; i < alloc_pages; i++) { 84 struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i); 85 86 BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i)); 87 pg->zone_device_data = page_list; 88 page_list = pg; 89 list_count++; 90 } 91 92 return 0; 93 94 err_memremap: 95 kfree(pgmap); 96 err_pgmap: 97 release_resource(res); 98 err_resource: 99 kfree(res); 100 return ret; 101 } 102 103 /** 104 * xen_alloc_unpopulated_pages - alloc unpopulated pages 105 * @nr_pages: Number of pages 106 * @pages: pages returned 107 * @return 0 on success, error otherwise 108 */ 109 int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) 110 { 111 unsigned int i; 112 int ret = 0; 113 114 mutex_lock(&list_lock); 115 if (list_count < nr_pages) { 116 ret = fill_list(nr_pages - list_count); 117 if (ret) 118 goto out; 119 } 120 121 for (i = 0; i < nr_pages; i++) { 122 struct page *pg = page_list; 123 124 BUG_ON(!pg); 125 page_list = pg->zone_device_data; 126 list_count--; 127 pages[i] = pg; 128 129 #ifdef CONFIG_XEN_HAVE_PVMMU 130 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 131 ret = xen_alloc_p2m_entry(page_to_pfn(pg)); 132 if (ret < 0) { 133 unsigned int j; 134 135 for (j = 0; j <= i; j++) { 136 pages[j]->zone_device_data = page_list; 137 page_list = pages[j]; 138 list_count++; 139 } 140 goto out; 141 } 142 } 143 #endif 144 } 145 146 out: 147 mutex_unlock(&list_lock); 148 return ret; 149 } 150 EXPORT_SYMBOL(xen_alloc_unpopulated_pages); 151 152 /** 153 * xen_free_unpopulated_pages - return unpopulated pages 154 * @nr_pages: Number of pages 155 * @pages: pages to return 156 */ 157 void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages) 158 { 159 unsigned int i; 160 161 mutex_lock(&list_lock); 162 for (i = 0; i < nr_pages; i++) { 163 pages[i]->zone_device_data = page_list; 164 page_list = pages[i]; 165 list_count++; 166 } 167 mutex_unlock(&list_lock); 168 } 169 EXPORT_SYMBOL(xen_free_unpopulated_pages); 170 171 #ifdef CONFIG_XEN_PV 172 static int __init init(void) 173 { 174 unsigned int i; 175 176 if (!xen_domain()) 177 return -ENODEV; 178 179 if (!xen_pv_domain()) 180 return 0; 181 182 /* 183 * Initialize with pages from the extra memory regions (see 184 * arch/x86/xen/setup.c). 185 */ 186 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 187 unsigned int j; 188 189 for (j = 0; j < xen_extra_mem[i].n_pfns; j++) { 190 struct page *pg = 191 pfn_to_page(xen_extra_mem[i].start_pfn + j); 192 193 pg->zone_device_data = page_list; 194 page_list = pg; 195 list_count++; 196 } 197 } 198 199 return 0; 200 } 201 subsys_initcall(init); 202 #endif 203