1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Virtual Memory Map support 4 * 5 * (C) 2007 sgi. Christoph Lameter. 6 * 7 * Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn, 8 * virt_to_page, page_address() to be implemented as a base offset 9 * calculation without memory access. 10 * 11 * However, virtual mappings need a page table and TLBs. Many Linux 12 * architectures already map their physical space using 1-1 mappings 13 * via TLBs. For those arches the virtual memory map is essentially 14 * for free if we use the same page size as the 1-1 mappings. In that 15 * case the overhead consists of a few additional pages that are 16 * allocated to create a view of memory for vmemmap. 17 * 18 * The architecture is expected to provide a vmemmap_populate() function 19 * to instantiate the mapping. 20 */ 21 #include <linux/mm.h> 22 #include <linux/mmzone.h> 23 #include <linux/memblock.h> 24 #include <linux/memremap.h> 25 #include <linux/highmem.h> 26 #include <linux/slab.h> 27 #include <linux/spinlock.h> 28 #include <linux/vmalloc.h> 29 #include <linux/sched.h> 30 31 #include <asm/dma.h> 32 #include <asm/pgalloc.h> 33 34 /* 35 * Allocate a block of memory to be used to back the virtual memory map 36 * or to back the page tables that are used to create the mapping. 37 * Uses the main allocators if they are available, else bootmem. 38 */ 39 40 static void * __ref __earlyonly_bootmem_alloc(int node, 41 unsigned long size, 42 unsigned long align, 43 unsigned long goal) 44 { 45 return memblock_alloc_try_nid_raw(size, align, goal, 46 MEMBLOCK_ALLOC_ACCESSIBLE, node); 47 } 48 49 void * __meminit vmemmap_alloc_block(unsigned long size, int node) 50 { 51 /* If the main allocator is up use that, fallback to bootmem. */ 52 if (slab_is_available()) { 53 gfp_t gfp_mask = GFP_KERNEL|__GFP_RETRY_MAYFAIL|__GFP_NOWARN; 54 int order = get_order(size); 55 static bool warned; 56 struct page *page; 57 58 page = alloc_pages_node(node, gfp_mask, order); 59 if (page) 60 return page_address(page); 61 62 if (!warned) { 63 warn_alloc(gfp_mask & ~__GFP_NOWARN, NULL, 64 "vmemmap alloc failure: order:%u", order); 65 warned = true; 66 } 67 return NULL; 68 } else 69 return __earlyonly_bootmem_alloc(node, size, size, 70 __pa(MAX_DMA_ADDRESS)); 71 } 72 73 static void * __meminit altmap_alloc_block_buf(unsigned long size, 74 struct vmem_altmap *altmap); 75 76 /* need to make sure size is all the same during early stage */ 77 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, 78 struct vmem_altmap *altmap) 79 { 80 void *ptr; 81 82 if (altmap) 83 return altmap_alloc_block_buf(size, altmap); 84 85 ptr = sparse_buffer_alloc(size); 86 if (!ptr) 87 ptr = vmemmap_alloc_block(size, node); 88 return ptr; 89 } 90 91 static unsigned long __meminit vmem_altmap_next_pfn(struct vmem_altmap *altmap) 92 { 93 return altmap->base_pfn + altmap->reserve + altmap->alloc 94 + altmap->align; 95 } 96 97 static unsigned long __meminit vmem_altmap_nr_free(struct vmem_altmap *altmap) 98 { 99 unsigned long allocated = altmap->alloc + altmap->align; 100 101 if (altmap->free > allocated) 102 return altmap->free - allocated; 103 return 0; 104 } 105 106 static void * __meminit altmap_alloc_block_buf(unsigned long size, 107 struct vmem_altmap *altmap) 108 { 109 unsigned long pfn, nr_pfns, nr_align; 110 111 if (size & ~PAGE_MASK) { 112 pr_warn_once("%s: allocations must be multiple of PAGE_SIZE (%ld)\n", 113 __func__, size); 114 return NULL; 115 } 116 117 pfn = vmem_altmap_next_pfn(altmap); 118 nr_pfns = size >> PAGE_SHIFT; 119 nr_align = 1UL << find_first_bit(&nr_pfns, BITS_PER_LONG); 120 nr_align = ALIGN(pfn, nr_align) - pfn; 121 if (nr_pfns + nr_align > vmem_altmap_nr_free(altmap)) 122 return NULL; 123 124 altmap->alloc += nr_pfns; 125 altmap->align += nr_align; 126 pfn += nr_align; 127 128 pr_debug("%s: pfn: %#lx alloc: %ld align: %ld nr: %#lx\n", 129 __func__, pfn, altmap->alloc, altmap->align, nr_pfns); 130 return __va(__pfn_to_phys(pfn)); 131 } 132 133 void __meminit vmemmap_verify(pte_t *pte, int node, 134 unsigned long start, unsigned long end) 135 { 136 unsigned long pfn = pte_pfn(*pte); 137 int actual_node = early_pfn_to_nid(pfn); 138 139 if (node_distance(actual_node, node) > LOCAL_DISTANCE) 140 pr_warn_once("[%lx-%lx] potential offnode page_structs\n", 141 start, end - 1); 142 } 143 144 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, 145 struct vmem_altmap *altmap, 146 struct page *reuse) 147 { 148 pte_t *pte = pte_offset_kernel(pmd, addr); 149 if (pte_none(*pte)) { 150 pte_t entry; 151 void *p; 152 153 if (!reuse) { 154 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap); 155 if (!p) 156 return NULL; 157 } else { 158 /* 159 * When a PTE/PMD entry is freed from the init_mm 160 * there's a free_pages() call to this page allocated 161 * above. Thus this get_page() is paired with the 162 * put_page_testzero() on the freeing path. 163 * This can only called by certain ZONE_DEVICE path, 164 * and through vmemmap_populate_compound_pages() when 165 * slab is available. 166 */ 167 get_page(reuse); 168 p = page_to_virt(reuse); 169 } 170 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); 171 set_pte_at(&init_mm, addr, pte, entry); 172 } 173 return pte; 174 } 175 176 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) 177 { 178 void *p = vmemmap_alloc_block(size, node); 179 180 if (!p) 181 return NULL; 182 memset(p, 0, size); 183 184 return p; 185 } 186 187 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) 188 { 189 pmd_t *pmd = pmd_offset(pud, addr); 190 if (pmd_none(*pmd)) { 191 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 192 if (!p) 193 return NULL; 194 pmd_populate_kernel(&init_mm, pmd, p); 195 } 196 return pmd; 197 } 198 199 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) 200 { 201 pud_t *pud = pud_offset(p4d, addr); 202 if (pud_none(*pud)) { 203 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 204 if (!p) 205 return NULL; 206 pud_populate(&init_mm, pud, p); 207 } 208 return pud; 209 } 210 211 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) 212 { 213 p4d_t *p4d = p4d_offset(pgd, addr); 214 if (p4d_none(*p4d)) { 215 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 216 if (!p) 217 return NULL; 218 p4d_populate(&init_mm, p4d, p); 219 } 220 return p4d; 221 } 222 223 pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node) 224 { 225 pgd_t *pgd = pgd_offset_k(addr); 226 if (pgd_none(*pgd)) { 227 void *p = vmemmap_alloc_block_zero(PAGE_SIZE, node); 228 if (!p) 229 return NULL; 230 pgd_populate(&init_mm, pgd, p); 231 } 232 return pgd; 233 } 234 235 static pte_t * __meminit vmemmap_populate_address(unsigned long addr, int node, 236 struct vmem_altmap *altmap, 237 struct page *reuse) 238 { 239 pgd_t *pgd; 240 p4d_t *p4d; 241 pud_t *pud; 242 pmd_t *pmd; 243 pte_t *pte; 244 245 pgd = vmemmap_pgd_populate(addr, node); 246 if (!pgd) 247 return NULL; 248 p4d = vmemmap_p4d_populate(pgd, addr, node); 249 if (!p4d) 250 return NULL; 251 pud = vmemmap_pud_populate(p4d, addr, node); 252 if (!pud) 253 return NULL; 254 pmd = vmemmap_pmd_populate(pud, addr, node); 255 if (!pmd) 256 return NULL; 257 pte = vmemmap_pte_populate(pmd, addr, node, altmap, reuse); 258 if (!pte) 259 return NULL; 260 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE); 261 262 return pte; 263 } 264 265 static int __meminit vmemmap_populate_range(unsigned long start, 266 unsigned long end, int node, 267 struct vmem_altmap *altmap, 268 struct page *reuse) 269 { 270 unsigned long addr = start; 271 pte_t *pte; 272 273 for (; addr < end; addr += PAGE_SIZE) { 274 pte = vmemmap_populate_address(addr, node, altmap, reuse); 275 if (!pte) 276 return -ENOMEM; 277 } 278 279 return 0; 280 } 281 282 int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end, 283 int node, struct vmem_altmap *altmap) 284 { 285 return vmemmap_populate_range(start, end, node, altmap, NULL); 286 } 287 288 /* 289 * For compound pages bigger than section size (e.g. x86 1G compound 290 * pages with 2M subsection size) fill the rest of sections as tail 291 * pages. 292 * 293 * Note that memremap_pages() resets @nr_range value and will increment 294 * it after each range successful onlining. Thus the value or @nr_range 295 * at section memmap populate corresponds to the in-progress range 296 * being onlined here. 297 */ 298 static bool __meminit reuse_compound_section(unsigned long start_pfn, 299 struct dev_pagemap *pgmap) 300 { 301 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap); 302 unsigned long offset = start_pfn - 303 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start); 304 305 return !IS_ALIGNED(offset, nr_pages) && nr_pages > PAGES_PER_SUBSECTION; 306 } 307 308 static pte_t * __meminit compound_section_tail_page(unsigned long addr) 309 { 310 pte_t *pte; 311 312 addr -= PAGE_SIZE; 313 314 /* 315 * Assuming sections are populated sequentially, the previous section's 316 * page data can be reused. 317 */ 318 pte = pte_offset_kernel(pmd_off_k(addr), addr); 319 if (!pte) 320 return NULL; 321 322 return pte; 323 } 324 325 static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, 326 unsigned long start, 327 unsigned long end, int node, 328 struct dev_pagemap *pgmap) 329 { 330 unsigned long size, addr; 331 pte_t *pte; 332 int rc; 333 334 if (reuse_compound_section(start_pfn, pgmap)) { 335 pte = compound_section_tail_page(start); 336 if (!pte) 337 return -ENOMEM; 338 339 /* 340 * Reuse the page that was populated in the prior iteration 341 * with just tail struct pages. 342 */ 343 return vmemmap_populate_range(start, end, node, NULL, 344 pte_page(*pte)); 345 } 346 347 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); 348 for (addr = start; addr < end; addr += size) { 349 unsigned long next, last = addr + size; 350 351 /* Populate the head page vmemmap page */ 352 pte = vmemmap_populate_address(addr, node, NULL, NULL); 353 if (!pte) 354 return -ENOMEM; 355 356 /* Populate the tail pages vmemmap page */ 357 next = addr + PAGE_SIZE; 358 pte = vmemmap_populate_address(next, node, NULL, NULL); 359 if (!pte) 360 return -ENOMEM; 361 362 /* 363 * Reuse the previous page for the rest of tail pages 364 * See layout diagram in Documentation/mm/vmemmap_dedup.rst 365 */ 366 next += PAGE_SIZE; 367 rc = vmemmap_populate_range(next, last, node, NULL, 368 pte_page(*pte)); 369 if (rc) 370 return -ENOMEM; 371 } 372 373 return 0; 374 } 375 376 struct page * __meminit __populate_section_memmap(unsigned long pfn, 377 unsigned long nr_pages, int nid, struct vmem_altmap *altmap, 378 struct dev_pagemap *pgmap) 379 { 380 unsigned long start = (unsigned long) pfn_to_page(pfn); 381 unsigned long end = start + nr_pages * sizeof(struct page); 382 int r; 383 384 if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) || 385 !IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION))) 386 return NULL; 387 388 if (is_power_of_2(sizeof(struct page)) && 389 pgmap && pgmap_vmemmap_nr(pgmap) > 1 && !altmap) 390 r = vmemmap_populate_compound_pages(pfn, start, end, nid, pgmap); 391 else 392 r = vmemmap_populate(start, end, nid, altmap); 393 394 if (r < 0) 395 return NULL; 396 397 return pfn_to_page(pfn); 398 } 399