Lines Matching defs:walk
31 unsigned long end, struct mm_walk *walk)
33 const struct mm_walk_ops *ops = walk->ops;
41 walk);
45 set_pte_at(walk->mm, addr, pte, new_pte);
47 if (!WARN_ON_ONCE(walk->no_vma))
48 update_mmu_cache(walk->vma, addr, pte);
50 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
63 struct mm_walk *walk)
69 if (walk->no_vma) {
74 * and CONFIG_EFI_PGT_DUMP efi_mm goes so far as to walk them.
76 if (walk->mm == &init_mm || addr >= TASK_SIZE)
81 err = walk_pte_range_inner(pte, addr, end, walk);
82 if (walk->mm != &init_mm && addr < TASK_SIZE)
86 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
88 err = walk_pte_range_inner(pte, addr, end, walk);
93 walk->action = ACTION_AGAIN;
98 struct mm_walk *walk)
102 const struct mm_walk_ops *ops = walk->ops;
114 err = __pte_alloc(walk->mm, pmd);
116 err = ops->pte_hole(addr, next, depth, walk);
123 walk->action = ACTION_SUBTREE;
130 err = ops->pmd_entry(pmd, addr, next, walk);
134 if (walk->action == ACTION_AGAIN)
136 if (walk->action == ACTION_CONTINUE)
151 if (walk->vma)
152 split_huge_pmd(walk->vma, pmd, addr);
156 err = walk_pte_range(pmd, addr, next, walk);
160 if (walk->action == ACTION_AGAIN)
169 struct mm_walk *walk)
173 const struct mm_walk_ops *ops = walk->ops;
185 err = __pmd_alloc(walk->mm, pud, addr);
187 err = ops->pte_hole(addr, next, depth, walk);
194 walk->action = ACTION_SUBTREE;
197 err = ops->pud_entry(pud, addr, next, walk);
201 if (walk->action == ACTION_AGAIN)
203 if (walk->action == ACTION_CONTINUE)
218 if (walk->vma)
219 split_huge_pud(walk->vma, pud, addr);
226 err = walk_pmd_range(pud, addr, next, walk);
235 struct mm_walk *walk)
239 const struct mm_walk_ops *ops = walk->ops;
250 err = __pud_alloc(walk->mm, p4d, addr);
252 err = ops->pte_hole(addr, next, depth, walk);
259 err = ops->p4d_entry(p4d, addr, next, walk);
264 err = walk_pud_range(p4d, addr, next, walk);
273 struct mm_walk *walk)
277 const struct mm_walk_ops *ops = walk->ops;
283 if (walk->pgd)
284 pgd = walk->pgd + pgd_index(addr);
286 pgd = pgd_offset(walk->mm, addr);
291 err = __p4d_alloc(walk->mm, pgd, addr);
293 err = ops->pte_hole(addr, next, 0, walk);
300 err = ops->pgd_entry(pgd, addr, next, walk);
305 err = walk_p4d_range(pgd, addr, next, walk);
322 struct mm_walk *walk)
324 struct vm_area_struct *vma = walk->vma;
330 const struct mm_walk_ops *ops = walk->ops;
338 err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
340 err = ops->pte_hole(addr, next, -1, walk);
351 struct mm_walk *walk)
359 * Decide whether we really walk over the current vma on [@start, @end)
360 * or skip it via the returned value. Return 0 if we do walk over the
362 * error, where we abort the current walk.
365 struct mm_walk *walk)
367 struct vm_area_struct *vma = walk->vma;
368 const struct mm_walk_ops *ops = walk->ops;
371 return ops->test_walk(start, end, walk);
375 * range, so we don't walk over it as we do for normal vmas. However,
384 err = ops->pte_hole(start, end, -1, walk);
391 struct mm_walk *walk)
394 struct vm_area_struct *vma = walk->vma;
395 const struct mm_walk_ops *ops = walk->ops;
403 err = ops->pre_vma(start, end, walk);
410 err = walk_hugetlb_range(start, end, walk);
412 err = walk_pgd_range(start, end, walk);
415 ops->post_vma(walk);
449 * operation, only sets no restrictions on how the walk proceeds.
461 struct mm_walk walk = {
470 if (!walk.mm)
473 process_mm_walk_lock(walk.mm, ops->walk_lock);
475 vma = find_vma(walk.mm, start);
478 walk.vma = NULL;
481 err = ops->pte_hole(start, next, -1, &walk);
483 walk.vma = NULL;
486 err = ops->pte_hole(start, next, -1, &walk);
489 walk.vma = vma;
493 err = walk_page_test(start, next, &walk);
505 err = __walk_page_range(start, next, &walk);
514 * Determine if the walk operations specified are permitted to be used for a
515 * page table walk.
517 * This check is performed on all functions which are parameterised by walk
538 * walk_page_range - walk page table with caller specific callbacks
539 * @mm: mm_struct representing the target process of page table walk
542 * @ops: operation to call during the walk
545 * Recursively walk the page table tree of the process represented by @mm
553 * end address yet, continue to walk.
559 * Before starting to walk page table, some callers want to check whether
560 * they really want to walk over the current vma, typically by checking
588 * walk_page_range_novma - walk a range of pagetables not backed by a vma
589 * @mm: mm_struct representing the target process of page table walk
592 * @ops: operation to call during the walk
593 * @pgd: pgd to walk if different from mm->pgd
596 * Similar to walk_page_range() but can walk any page tables even if they are
601 * Note: Be careful to walk the kernel pages tables, the caller may be need to
611 struct mm_walk walk = {
619 if (start >= end || !walk.mm)
628 * tables during the walk. However a read lock is insufficient to
644 mmap_assert_locked(walk.mm);
646 mmap_assert_write_locked(walk.mm);
648 return walk_pgd_range(start, end, &walk);
655 struct mm_walk walk = {
662 if (start >= end || !walk.mm)
669 process_mm_walk_lock(walk.mm, ops->walk_lock);
671 return __walk_page_range(start, end, &walk);
677 struct mm_walk walk = {
684 if (!walk.mm)
689 process_mm_walk_lock(walk.mm, ops->walk_lock);
691 return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
695 * walk_page_mapping - walk all memory areas mapped into a struct address_space.
699 * @ops: operation to call during the walk
703 * The walk is limited to only the given page-size index range, but if
728 struct mm_walk walk = {
756 walk.vma = vma;
757 walk.mm = vma->vm_mm;
759 err = walk_page_test(vma->vm_start, vma->vm_end, &walk);
766 err = __walk_page_range(start_addr, end_addr, &walk);
775 * folio_walk_start - walk the page tables to a folio
778 * @addr: the virtual address to use for the page table walk.
779 * @flags: flags modifying which folios to walk to.