Lines Matching +full:4 +full:kb +full:- +full:page

1 /* SPDX-License-Identifier: GPL-2.0 */
5 #define H_PTE_INDEX_SIZE 8 // size: 8B << 8 = 2KB, maps 2^8 x 64KB = 16MB
6 #define H_PMD_INDEX_SIZE 10 // size: 8B << 10 = 8KB, maps 2^10 x 16MB = 16GB
7 #define H_PUD_INDEX_SIZE 10 // size: 8B << 10 = 8KB, maps 2^10 x 16GB = 16TB
8 #define H_PGD_INDEX_SIZE 8 // size: 8B << 8 = 2KB, maps 2^8 x 16TB = 4PB
11 * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
12 * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
13 * page_to_nid does a page->section->node lookup
37 * Define the address range of the kernel non-linear virtual area
46 #define H_PAGE_COMBO _RPAGE_RPN0 /* this is a combo 4k page */
47 #define H_PAGE_4K_PFN _RPAGE_RPN1 /* PFN is for a single 4k page */
59 * We need to differentiate between explicit huge page and THP huge
60 * page, since THP huge page also need to track real subpage details
67 * We use a 2K PTE page fragment and another 2K for storing
87 * uses a second "half" of the page table to encode sub-page information
88 * in order to deal with 64K made of 4K HW pages. Thus we override the
112 * shift the hidx representation by one-modulo-0xf; i.e hidx 0 is respresented
143 * PTE by using the read-side barrier smp_rmb(). __real_pte() can be in pte_set_hidx()
156 * a 16M page as well as we want only one iteration
160 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
165 vpn += (1L << (shift - VPN_SHIFT))) { \
180 return -EINVAL; in hash__remap_4k_pfn()
220 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
223 * are also used as normal page PTE pointers. So if we have any pointers
247 * page. The hugetlbfs page table walking and mangling paths are totally