| /linux/include/linux/ |
| H A D | pagewalk.h | 131 int walk_page_range(struct mm_struct *mm, unsigned long start,
|
| /linux/mm/damon/ |
| H A D | vaddr.c | 393 walk_page_range(mm, addr, addr + 1, &damon_mkold_ops, NULL); in damon_va_mkold() 528 walk_page_range(mm, addr, addr + 1, &damon_young_ops, &arg); in damon_va_young() 838 walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv); in damos_va_migrate() 945 walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv); in damos_va_stat()
|
| /linux/mm/ |
| H A D | mincore.c | 262 err = walk_page_range(vma->vm_mm, addr, end, &mincore_walk_ops, vec); in do_mincore()
|
| H A D | mlock.c | 448 walk_page_range(vma->vm_mm, start, end, &mlock_walk_ops, NULL); in mlock_vma_pages_range()
|
| H A D | hmm.c | 675 ret = walk_page_range(mm, hmm_vma_walk.last, range->end, in hmm_range_fault()
|
| H A D | pagewalk.c | 578 int walk_page_range(struct mm_struct *mm, unsigned long start, in walk_page_range() function
|
| H A D | mprotect.c | 724 error = walk_page_range(current->mm, start, end, in mprotect_fixup()
|
| H A D | migrate_device.c | 522 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, in migrate_vma_collect()
|
| H A D | memory-failure.c | 836 ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops, in kill_accessing_process()
|
| H A D | mempolicy.c | 991 err = walk_page_range(mm, start, end, ops, &qp); in queue_pages_range()
|
| H A D | vmscan.c | 3792 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); in walk_mm()
|
| /linux/Documentation/mm/ |
| H A D | multigen_lru.rst | 121 and calls ``walk_page_range()`` with each ``mm_struct`` on this list 167 ``walk_page_range()`` with each ``mm_struct`` on this list to scan
|
| H A D | unevictable-lru.rst | 302 mlock_pte_range() via walk_page_range() via mlock_vma_pages_range(). 377 mlock_pte_range() via walk_page_range() via mlock_vma_pages_range() - the same
|
| /linux/arch/s390/mm/ |
| H A D | gmap.c | 2285 walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL); in s390_enable_skey() 2311 walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL); in s390_reset_cmma() 2388 r = walk_page_range(mm, state.next, end, &gather_pages_ops, &state); in __s390_uv_destroy_range()
|
| /linux/fs/proc/ |
| H A D | task_mmu.c | 1310 walk_page_range(vma->vm_mm, start, vma->vm_end, ops, mss); in smap_gather_stats() 1814 walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp); in clear_refs_write() 2270 ret = walk_page_range(mm, start_vaddr, end, &pagemap_ops, &pm); in pagemap_read() 3047 ret = walk_page_range(mm, walk_start, p.arg.end, in do_pagemap_scan()
|