Lines Matching +full:page +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0
11 * However, virtual mappings need a page table and TLBs. Many Linux
12 * architectures already map their physical space using 1-1 mappings
14 * for free if we use the same page size as the 1-1 mappings. In that
40 /* Get a ref on the head page struct page, for ZONE_DEVICE compound pages */
47 * or to back the page tables that are used to create the mapping.
52 unsigned long size, in __earlyonly_bootmem_alloc() argument
56 return memmap_alloc(size, align, goal, node, false); in __earlyonly_bootmem_alloc()
59 void * __meminit vmemmap_alloc_block(unsigned long size, int node) in vmemmap_alloc_block() argument
64 int order = get_order(size); in vmemmap_alloc_block()
66 struct page *page; in vmemmap_alloc_block() local
68 page = alloc_pages_node(node, gfp_mask, order); in vmemmap_alloc_block()
69 if (page) in vmemmap_alloc_block()
70 return page_address(page); in vmemmap_alloc_block()
79 return __earlyonly_bootmem_alloc(node, size, size, in vmemmap_alloc_block()
83 static void * __meminit altmap_alloc_block_buf(unsigned long size,
86 /* need to make sure size is all the same during early stage */
87 void * __meminit vmemmap_alloc_block_buf(unsigned long size, int node, in vmemmap_alloc_block_buf() argument
93 return altmap_alloc_block_buf(size, altmap); in vmemmap_alloc_block_buf()
95 ptr = sparse_buffer_alloc(size); in vmemmap_alloc_block_buf()
97 ptr = vmemmap_alloc_block(size, node); in vmemmap_alloc_block_buf()
103 return altmap->base_pfn + altmap->reserve + altmap->alloc in vmem_altmap_next_pfn()
104 + altmap->align; in vmem_altmap_next_pfn()
109 unsigned long allocated = altmap->alloc + altmap->align; in vmem_altmap_nr_free()
111 if (altmap->free > allocated) in vmem_altmap_nr_free()
112 return altmap->free - allocated; in vmem_altmap_nr_free()
116 static void * __meminit altmap_alloc_block_buf(unsigned long size, in altmap_alloc_block_buf() argument
121 if (size & ~PAGE_MASK) { in altmap_alloc_block_buf()
123 __func__, size); in altmap_alloc_block_buf()
128 nr_pfns = size >> PAGE_SHIFT; in altmap_alloc_block_buf()
130 nr_align = ALIGN(pfn, nr_align) - pfn; in altmap_alloc_block_buf()
134 altmap->alloc += nr_pfns; in altmap_alloc_block_buf()
135 altmap->align += nr_align; in altmap_alloc_block_buf()
139 __func__, pfn, altmap->alloc, altmap->align, nr_pfns); in altmap_alloc_block_buf()
150 pr_warn_once("[%lx-%lx] potential offnode page_structs\n", in vmemmap_verify()
151 start, end - 1); in vmemmap_verify()
163 if (ptpfn == (unsigned long)-1) { in vmemmap_pte_populate()
171 * there's a free_pages() call to this page allocated in vmemmap_pte_populate()
187 static void * __meminit vmemmap_alloc_block_zero(unsigned long size, int node) in vmemmap_alloc_block_zero() argument
189 void *p = vmemmap_alloc_block(size, node); in vmemmap_alloc_block_zero()
193 memset(p, 0, size); in vmemmap_alloc_block_zero()
293 return -ENOMEM; in vmemmap_populate_range()
302 return vmemmap_populate_range(start, end, node, altmap, -1, 0); in vmemmap_populate_basepages()
306 * Undo populate_hvo, and replace it with a normal base page mapping.
310 * hugetlb page spans multiple zones, which can only be verified
317 * 2) The rest of the vmemmap pages are mirrors of the last head page.
328 * be accessing these page structures. in vmemmap_undo_hvo()
335 * Clear mirrored mappings for tail page structs. in vmemmap_undo_hvo()
343 * Clear and free mappings for head page and first tail page in vmemmap_undo_hvo()
346 for (maddr = addr; headpages-- > 0; maddr += PAGE_SIZE) { in vmemmap_undo_hvo()
359 * Write protect the mirrored tail page structs for HVO. This will be
363 * page structures will not be written to during initialization,
382 * Populate vmemmap pages HVO-style. The first page contains the head
383 * page and needed tail pages, the other ones are mirrors of the first
384 * page.
393 pte = vmemmap_populate_address(maddr, node, NULL, -1, 0); in vmemmap_populate_hvo()
395 return -ENOMEM; in vmemmap_populate_hvo()
399 * Reuse the last page struct page mapped above for the rest. in vmemmap_populate_hvo()
431 return -ENOMEM; in vmemmap_populate_hugepages()
435 return -ENOMEM; in vmemmap_populate_hugepages()
439 return -ENOMEM; in vmemmap_populate_hugepages()
456 * a configuration issue with the size of the altmap. in vmemmap_populate_hugepages()
458 return -ENOMEM; in vmemmap_populate_hugepages()
463 return -ENOMEM; in vmemmap_populate_hugepages()
470 * For compound pages bigger than section size (e.g. x86 1G compound
471 * pages with 2M subsection size) fill the rest of sections as tail
476 * at section memmap populate corresponds to the in-progress range
483 unsigned long offset = start_pfn - in reuse_compound_section()
484 PHYS_PFN(pgmap->ranges[pgmap->nr_range].start); in reuse_compound_section()
493 addr -= PAGE_SIZE; in compound_section_tail_page()
497 * page data can be reused. in compound_section_tail_page()
511 unsigned long size, addr; in vmemmap_populate_compound_pages() local
518 return -ENOMEM; in vmemmap_populate_compound_pages()
521 * Reuse the page that was populated in the prior iteration in vmemmap_populate_compound_pages()
529 size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); in vmemmap_populate_compound_pages()
530 for (addr = start; addr < end; addr += size) { in vmemmap_populate_compound_pages()
531 unsigned long next, last = addr + size; in vmemmap_populate_compound_pages()
533 /* Populate the head page vmemmap page */ in vmemmap_populate_compound_pages()
534 pte = vmemmap_populate_address(addr, node, NULL, -1, 0); in vmemmap_populate_compound_pages()
536 return -ENOMEM; in vmemmap_populate_compound_pages()
538 /* Populate the tail pages vmemmap page */ in vmemmap_populate_compound_pages()
540 pte = vmemmap_populate_address(next, node, NULL, -1, 0); in vmemmap_populate_compound_pages()
542 return -ENOMEM; in vmemmap_populate_compound_pages()
545 * Reuse the previous page for the rest of tail pages in vmemmap_populate_compound_pages()
553 return -ENOMEM; in vmemmap_populate_compound_pages()
561 struct page * __meminit __populate_section_memmap(unsigned long pfn, in __populate_section_memmap()
566 unsigned long end = start + nr_pages * sizeof(struct page); in __populate_section_memmap()
598 * This is called just before the initialization of page structures