Home
last modified time | relevance | path

Searched refs:HPAGE_PMD_NR (Results 1 – 21 of 21) sorted by relevance

/linux/mm/
H A Dkhugepaged.c255 if (err || max_ptes_none > HPAGE_PMD_NR - 1) in max_ptes_none_store()
280 if (err || max_ptes_swap > HPAGE_PMD_NR - 1) in max_ptes_swap_store()
306 if (err || max_ptes_shared > HPAGE_PMD_NR - 1) in max_ptes_shared_store()
377 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8; in khugepaged_init()
378 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1; in khugepaged_init()
379 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8; in khugepaged_init()
380 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2; in khugepaged_init()
542 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; in __collapse_huge_page_isolate()
682 for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte += nr_ptes, in __collapse_huge_page_copy_succeeded()
755 release_pte_pages(pte, pte + HPAGE_PMD_NR, compound_pagelist); in __collapse_huge_page_copy_failed()
[all …]
H A Dhuge_memory.c283 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; in shrink_huge_zero_folio_count()
294 return HPAGE_PMD_NR; in shrink_huge_zero_folio_scan()
1317 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in map_anon_folio_pmd_pf()
1570 add_mm_counter(mm, mm_counter_file(fop.folio), HPAGE_PMD_NR); in insert_pmd()
1841 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); in copy_huge_non_present_pmd()
1935 add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); in copy_huge_pmd()
2236 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); in do_huge_pmd_numa_page()
2257 task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); in do_huge_pmd_numa_page()
2396 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR); in zap_huge_pmd()
2401 -HPAGE_PMD_NR); in zap_huge_pmd()
[all …]
H A Dpage_vma_mapped.c142 if ((pfn + HPAGE_PMD_NR - 1) < pvmw->pfn) in check_pmd()
287 (pvmw->nr_pages >= HPAGE_PMD_NR)) { in page_vma_mapped_walk()
H A Dmigrate_device.c875 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in migrate_vma_insert_huge_pmd_page()
903 for (i = 0; i < HPAGE_PMD_NR; i++) in migrate_vma_insert_huge_pmd_page()
925 for (i = 1; i < HPAGE_PMD_NR; i++) in migrate_vma_split_unmapped_folio()
952 nr = HPAGE_PMD_NR; in migrate_vma_nr_pages()
H A Dswap.h12 #define SWAPFILE_CLUSTER HPAGE_PMD_NR
H A Dmprotect.c495 if (ret == HPAGE_PMD_NR) { in change_pmd_range()
496 pages += HPAGE_PMD_NR; in change_pmd_range()
H A Drmap.c1614 __folio_add_anon_rmap(folio, page, HPAGE_PMD_NR, vma, address, flags, in folio_add_anon_rmap_pmd()
1743 __folio_add_file_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); in folio_add_file_rmap_pmd()
1911 __folio_remove_rmap(folio, page, HPAGE_PMD_NR, vma, PGTABLE_LEVEL_PMD); in folio_remove_rmap_pmd()
H A Dvmstat.c1762 pages /= HPAGE_PMD_NR; in is_zone_first_populated()
1905 v[i] /= HPAGE_PMD_NR; in vmstat_start()
H A Dfilemap.c3355 ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); in do_sync_mmap_readahead()
3356 ra->size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
3363 ra->async_size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
H A Dmemory-failure.c727 if (pfn <= hwp->pfn && hwp->pfn < pfn + HPAGE_PMD_NR) { in check_hwpoisoned_pmd_entry()
H A Dmigrate.c1583 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
H A Dgup.c741 *page_mask = HPAGE_PMD_NR - 1; in follow_huge_pmd()
H A Dshmem.c1968 if (pages == HPAGE_PMD_NR) in shmem_alloc_and_add_folio()
1991 if (pages == HPAGE_PMD_NR) { in shmem_alloc_and_add_folio()
H A Dmemory.c5456 flush_icache_pages(vma, page, HPAGE_PMD_NR); in do_set_pmd()
5462 add_mm_counter(vma->vm_mm, mm_counter_file(folio), HPAGE_PMD_NR); in do_set_pmd()
H A Dvmscan.c1315 if (nr_pages >= HPAGE_PMD_NR) { in shrink_folio_list()
/linux/lib/
H A Dtest_hmm.c580 if (is_large && IS_ALIGNED(pfn, HPAGE_PMD_NR) in dmirror_allocate_chunk()
581 && (pfn + HPAGE_PMD_NR <= pfn_last)) { in dmirror_allocate_chunk()
584 pfn += HPAGE_PMD_NR; in dmirror_allocate_chunk()
602 mdevice->calloc += HPAGE_PMD_NR; in dmirror_allocate_chunk()
721 nr = HPAGE_PMD_NR; in dmirror_migrate_alloc_and_copy()
/linux/include/linux/
H A Dhuge_mm.h118 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER) macro
537 return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1)); in is_huge_zero_pfn()
/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_dmem.c848 if (max > (unsigned long)HPAGE_PMD_NR) in nouveau_dmem_migrate_vma()
849 max = (unsigned long)HPAGE_PMD_NR; in nouveau_dmem_migrate_vma()
/linux/arch/arc/mm/
H A Dtlb.c534 update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR); in update_mmu_cache_pmd()
/linux/drivers/base/
H A Dnode.c612 pages /= HPAGE_PMD_NR; in node_read_vmstat()
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_vram_mgr.c472 pages_per_block = HPAGE_PMD_NR; in amdgpu_vram_mgr_new()