| /linux/arch/powerpc/include/asm/nohash/32/ |
| H A D | pte-8xx.h | 123 unsigned long clr, unsigned long set, int huge); 136 int huge = psize > mmu_virtual_psize ? 1 : 0; in __ptep_set_access_flags() local 138 pte_update(vma->vm_mm, address, ptep, clr, set, huge); in __ptep_set_access_flags() 175 static inline int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge) in number_of_cells_per_pte() argument 177 if (!huge) in number_of_cells_per_pte() 188 unsigned long clr, unsigned long set, int huge) in __pte_update() argument 196 num = number_of_cells_per_pte(pmd, new, huge); in __pte_update() 211 unsigned long clr, unsigned long set, int huge) in pte_update() argument 215 if (huge && ptep_is_8m_pmdp(mm, addr, ptep)) { in pte_update() 218 old = __pte_update(mm, addr, pte_offset_kernel(pmdp, 0), clr, set, huge); in pte_update() [all …]
|
| /linux/Documentation/admin-guide/mm/ |
| H A D | transhuge.rst | 11 using huge pages for the backing of virtual memory with huge pages 20 the huge page size is 2M, although the actual numbers may vary 56 prominent because the size of each page isn't as huge as the PMD-sized 66 collapses sequences of basic pages into PMD-sized huge pages. 125 PMD-sized huge pages unconditionally. 196 MADV_COLLAPSE)`` can still cause transparent huge pages to be 199 By default kernel tries to use huge, PMD-mappable zero page on read 200 page fault to anonymous mapping. It's possible to disable huge zero 323 swap when collapsing a group of pages into a transparent huge page:: 409 Traditionally, tmpfs only supported a single huge page size ("PMD"). Today, [all …]
|
| H A D | concepts.rst | 79 `huge`. Usage of huge pages significantly reduces pressure on TLB, 83 memory with the huge pages. The first one is `HugeTLB filesystem`, or 86 the memory and mapped using huge pages. The hugetlbfs is described at 89 Another, more recent, mechanism that enables use of the huge pages is 92 the system memory should and can be mapped by the huge pages, THP 201 buffer for DMA, or when THP allocates a huge page. Memory `compaction`
|
| /linux/tools/testing/selftests/mm/ |
| H A D | charge_reserved_hugetlb.sh | 54 if [[ -e /mnt/huge ]]; then 55 rm -rf /mnt/huge/* 56 umount /mnt/huge || echo error 57 rmdir /mnt/huge 262 if [[ -e /mnt/huge ]]; then 263 rm -rf /mnt/huge/* 264 umount /mnt/huge 265 rmdir /mnt/huge 292 mkdir -p /mnt/huge 293 mount -t hugetlbfs -o pagesize=${MB}M,size=256M none /mnt/huge [all …]
|
| H A D | run_vmtests.sh | 73 test transparent huge pages 75 test hugetlbfs huge pages 131 for huge in -t -T "-H -m $hugetlb_mb"; do 143 $huge $test_cmd $write $share $num
|
| /linux/Documentation/mm/ |
| H A D | hugetlbfs_reserv.rst | 9 typically preallocated for application use. These huge pages are instantiated 10 in a task's address space at page fault time if the VMA indicates huge pages 11 are to be used. If no huge page exists at page fault time, the task is sent 12 a SIGBUS and often dies an unhappy death. Shortly after huge page support 14 of huge pages at mmap() time. The idea is that if there were not enough 15 huge pages to cover the mapping, the mmap() would fail. This was first 17 were enough free huge pages to cover the mapping. Like most things in the 19 'reserve' huge pages at mmap() time to ensure that huge pages would be 21 describe how huge page reserve processing is done in the v4.10 kernel. 34 This is a global (per-hstate) count of reserved huge pages. Reserved [all …]
|
| H A D | arch_pgtable_helpers.rst | 136 | pmd_set_huge | Creates a PMD huge mapping | 138 | pmd_clear_huge | Clears a PMD huge mapping | 187 | pud_set_huge | Creates a PUD huge mapping | 189 | pud_clear_huge | Clears a PUD huge mapping |
|
| H A D | unevictable-lru.rst | 315 (unless it is a PTE mapping of a part of a transparent huge page). Or when 350 hugetlbfs ranges, allocating the huge pages and populating the PTEs. 437 A transparent huge page is represented by a single entry on an LRU list. 441 If a user tries to mlock() part of a huge page, and no user mlock()s the 442 whole of the huge page, we want the rest of the page to be reclaimable. 447 We handle this by keeping PTE-mlocked huge pages on evictable LRU lists: 450 This way the huge page is accessible for vmscan. Under memory pressure the 455 of a transparent huge page which are mapped only by PTEs in VM_LOCKED VMAs. 491 (unless it was a PTE mapping of a part of a transparent huge page). 516 (unless it was a PTE mapping of a part of a transparent huge page).
|
| /linux/arch/powerpc/include/asm/book3s/64/ |
| H A D | hash.h | 162 pte_t *ptep, unsigned long pte, int huge); 190 int huge) in hash__pte_update() argument 196 if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && huge) { in hash__pte_update() 213 if (!huge) in hash__pte_update() 217 hpte_need_flush(mm, addr, ptep, old, huge); in hash__pte_update()
|
| H A D | radix.h | 176 int huge) in radix__pte_update() argument 181 if (!huge) in radix__pte_update()
|
| /linux/arch/powerpc/include/asm/nohash/ |
| H A D | pgtable.h | 7 unsigned long clr, unsigned long set, int huge); 58 unsigned long clr, unsigned long set, int huge) in pte_update() argument 69 if (huge) in pte_update() 95 if (!huge) in pte_update() 143 int huge = psize > mmu_virtual_psize ? 1 : 0; in __ptep_set_access_flags() local 145 pte_update(vma->vm_mm, address, ptep, 0, set, huge); in __ptep_set_access_flags()
|
| /linux/arch/loongarch/mm/ |
| H A D | init.c | 123 int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE; in vmemmap_check_pmd() local 125 if (huge) in vmemmap_check_pmd() 128 return huge; in vmemmap_check_pmd()
|
| /linux/mm/ |
| H A D | shmem.c | 121 int huge; member 538 * Definitions for "huge tmpfs": tmpfs mounted with the huge= option 541 * disables huge pages for the mount; 543 * enables huge pages for the mount; 545 * only allocate huge pages if the page will be fully within i_size, 548 * only allocate huge pages if requested with madvise(); 561 * disables huge on shm_mnt and all mounts, for emergency use; 563 * enables huge on shm_mnt and all mounts, w/o needing option, for testing; 643 * The huge orde in shmem_huge_global_enabled() 672 int huge; shmem_parse_huge() local 705 shmem_format_huge(int huge) shmem_format_huge() argument 5495 int huge, err; shmem_enabled_store() local 5607 int huge; setup_transparent_hugepage_shmem() local 5622 int huge; setup_transparent_hugepage_tmpfs() local [all...] |
| H A D | memory-failure.c | 2664 bool huge = false; in unpoison_memory() local 2720 huge = true; in unpoison_memory() 2736 huge = true; in unpoison_memory() 2754 if (!huge) in unpoison_memory() 2777 bool huge = folio_test_hugetlb(folio); in soft_offline_in_use_page() local 2786 if (!huge && folio_test_large(folio)) { in soft_offline_in_use_page() 2805 if (!huge) in soft_offline_in_use_page() 2814 if (!huge && folio_test_lru(folio) && !folio_test_swapcache(folio)) in soft_offline_in_use_page() 2843 bool release = !huge; in soft_offline_in_use_page() 2845 if (!page_handle_poison(page, huge, release)) in soft_offline_in_use_page() [all …]
|
| H A D | Kconfig | 324 providing a huge performance boost. If this option is not enabled, 609 with the reduced number of transparent huge pages that could be used 658 to the processors accessing. The second is when allocating huge 659 pages as migration can relocate pages to satisfy a huge page 794 of the huge zero folio and expand the places in the kernel 795 that can use huge zero folios. For instance, block I/O benefits 798 With this option enabled, the huge zero folio is allocated 799 once and never freed. One full huge page's worth of memory shall 815 Transparent Hugepages allows the kernel to use huge pages and 816 huge tlb transparently to the applications whenever possible. [all …]
|
| /linux/Documentation/driver-api/cxl/allocation/ |
| H A D | hugepages.rst | 19 Different huge page sizes allow different memory configurations. 24 for use as 2MB huge pages.
|
| /linux/Documentation/admin-guide/hw-vuln/ |
| H A D | multihit.rst | 81 * - KVM: Mitigation: Split huge pages 111 In order to mitigate the vulnerability, KVM initially marks all huge pages 125 The KVM hypervisor mitigation mechanism for marking huge pages as 134 non-executable huge pages in Linux kernel KVM module. All huge
|
| /linux/Documentation/core-api/ |
| H A D | pin_user_pages.rst | 64 severely by huge pages, because each tail page adds a refcount to the 66 field, refcount overflows were seen in some huge page stress tests. 68 This also means that huge pages and large folios do not suffer 248 acquired since the system was powered on. For huge pages, the head page is 249 pinned once for each page (head page and each tail page) within the huge page. 250 This follows the same sort of behavior that get_user_pages() uses for huge 251 pages: the head page is refcounted once for each tail or head page in the huge 252 page, when get_user_pages() is applied to a huge page. 256 PAGE_SIZE granularity, even if the original pin was applied to a huge page.
|
| /linux/arch/alpha/lib/ |
| H A D | ev6-clear_user.S | 86 subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop 87 and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop 88 subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop)
|
| /linux/Documentation/arch/riscv/ |
| H A D | vm-layout.rst | 42 …0000004000000000 | +256 GB | ffffffbfffffffff | ~16M TB | ... huge, almost 64 bits wide hole of… 79 …0000800000000000 | +128 TB | ffff7fffffffffff | ~16M TB | ... huge, almost 64 bits wide hole of… 115 …0100000000000000 | +64 PB | feffffffffffffff | ~16K PB | ... huge, almost 64 bits wide hole of…
|
| /linux/arch/powerpc/mm/book3s64/ |
| H A D | hash_tlb.c | 41 pte_t *ptep, unsigned long pte, int huge) in hpte_need_flush() argument 61 if (huge) { in hpte_need_flush()
|
| /linux/Documentation/admin-guide/blockdev/ |
| H A D | zram.rst | 145 size of the disk when not in use so a huge zram is wasteful. 352 echo huge > /sys/block/zramX/writeback 360 Additionally, if a user choose to writeback only huge and idle pages 447 algorithm can, for example, be more successful compressing huge pages (those 484 #HUGE pages recompression is activated by `huge` mode 485 echo "type=huge" > /sys/block/zram0/recompress 522 echo "type=huge algo=zstd" > /sys/block/zramX/recompress 525 echo "type=huge priority=1" > /sys/block/zramX/recompress 555 huge page 564 and the block's state is huge so it is written back to the backing
|
| /linux/Documentation/arch/x86/x86_64/ |
| H A D | mm.rst | 36 …0000800000000000 | +128 TB | 7fffffffffffffff | ~8 EB | ... huge, almost 63 bits wide hole of… 47 …8000000000000000 | -8 EB | ffff7fffffffffff | ~8 EB | ... huge, almost 63 bits wide hole of… 107 …0100000000000000 | +64 PB | 7fffffffffffffff | ~8 EB | ... huge, almost 63 bits wide hole of… 117 …8000000000000000 | -8 EB | feffffffffffffff | ~8 EB | ... huge, almost 63 bits wide hole of…
|
| /linux/Documentation/features/vm/huge-vmap/ |
| H A D | arch-support.txt | 2 # Feature name: huge-vmap
|
| /linux/Documentation/filesystems/ext4/ |
| H A D | bigalloc.rst | 9 exceeds the page size. However, for a filesystem of mostly huge files,
|