Lines Matching defs:pmc

193 static int move_ptes(struct pagetable_move_control *pmc,
196 struct vm_area_struct *vma = pmc->old;
204 unsigned long old_addr = pmc->old_addr;
205 unsigned long new_addr = pmc->new_addr;
230 if (pmc->need_rmap_locks)
312 if (pmc->need_rmap_locks)
326 static inline bool uffd_supports_page_table_move(struct pagetable_move_control *pmc)
341 return !vma_has_uffd_without_event_remap(pmc->old) &&
342 !vma_has_uffd_without_event_remap(pmc->new);
346 static bool move_normal_pmd(struct pagetable_move_control *pmc,
350 struct vm_area_struct *vma = pmc->old;
357 if (!uffd_supports_page_table_move(pmc))
406 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PMD_SIZE);
415 static inline bool move_normal_pmd(struct pagetable_move_control *pmc,
423 static bool move_normal_pud(struct pagetable_move_control *pmc,
427 struct vm_area_struct *vma = pmc->old;
433 if (!uffd_supports_page_table_move(pmc))
458 flush_tlb_range(vma, pmc->old_addr, pmc->old_addr + PUD_SIZE);
466 static inline bool move_normal_pud(struct pagetable_move_control *pmc,
474 static bool move_huge_pud(struct pagetable_move_control *pmc,
478 struct vm_area_struct *vma = pmc->old;
506 set_pud_at(mm, pmc->new_addr, new_pud, pud);
507 flush_pud_tlb_range(vma, pmc->old_addr, pmc->old_addr + HPAGE_PUD_SIZE);
515 static bool move_huge_pud(struct pagetable_move_control *pmc,
538 struct pagetable_move_control *pmc)
541 unsigned long old_addr = pmc->old_addr;
542 unsigned long old_end = pmc->old_end;
543 unsigned long new_addr = pmc->new_addr;
576 static bool should_take_rmap_locks(struct pagetable_move_control *pmc,
584 return pmc->need_rmap_locks;
592 static bool move_pgt_entry(struct pagetable_move_control *pmc,
596 bool need_rmap_locks = should_take_rmap_locks(pmc, entry);
600 take_rmap_locks(pmc->old);
604 moved = move_normal_pmd(pmc, old_entry, new_entry);
607 moved = move_normal_pud(pmc, old_entry, new_entry);
611 move_huge_pmd(pmc->old, pmc->old_addr, pmc->new_addr, old_entry,
616 move_huge_pud(pmc, old_entry, new_entry);
625 drop_rmap_locks(pmc->old);
636 static bool can_align_down(struct pagetable_move_control *pmc,
647 if (!pmc->for_stack && vma->vm_start != addr_to_align)
651 if (pmc->for_stack && addr_masked >= vma->vm_start)
665 static bool can_realign_addr(struct pagetable_move_control *pmc,
669 unsigned long old_align = pmc->old_addr & align_mask;
670 unsigned long new_align = pmc->new_addr & align_mask;
694 if (pmc->len_in < old_align_next)
706 if (!can_align_down(pmc, pmc->old, pmc->old_addr, pagetable_mask) ||
707 !can_align_down(pmc, pmc->new, pmc->new_addr, pagetable_mask))
724 * . pmc->old_addr . pmc->old_end
732 * The idea here is simply to align pmc->old_addr, pmc->new_addr down to the
740 * pmc->old_addr . pmc->old_end
744 static void try_realign_addr(struct pagetable_move_control *pmc,
748 if (!can_realign_addr(pmc, pagetable_mask))
753 * pmc->old_end value, and since the move_page_tables() operation spans
757 pmc->old_addr &= pagetable_mask;
758 pmc->new_addr &= pagetable_mask;
762 static bool pmc_done(struct pagetable_move_control *pmc)
764 return pmc->old_addr >= pmc->old_end;
768 static void pmc_next(struct pagetable_move_control *pmc, unsigned long extent)
770 pmc->old_addr += extent;
771 pmc->new_addr += extent;
778 static unsigned long pmc_progress(struct pagetable_move_control *pmc)
780 unsigned long orig_old_addr = pmc->old_end - pmc->len_in;
781 unsigned long old_addr = pmc->old_addr;
791 unsigned long move_page_tables(struct pagetable_move_control *pmc)
797 struct mm_struct *mm = pmc->old->vm_mm;
799 if (!pmc->len_in)
802 if (is_vm_hugetlb_page(pmc->old))
803 return move_hugetlb_page_tables(pmc->old, pmc->new, pmc->old_addr,
804 pmc->new_addr, pmc->len_in);
810 try_realign_addr(pmc, PMD_MASK);
812 flush_cache_range(pmc->old, pmc->old_addr, pmc->old_end);
814 pmc->old_addr, pmc->old_end);
817 for (; !pmc_done(pmc); pmc_next(pmc, extent)) {
823 extent = get_extent(NORMAL_PUD, pmc);
825 old_pud = get_old_pud(mm, pmc->old_addr);
828 new_pud = alloc_new_pud(mm, pmc->new_addr);
833 move_pgt_entry(pmc, HPAGE_PUD, old_pud, new_pud);
838 if (move_pgt_entry(pmc, NORMAL_PUD, old_pud, new_pud))
842 extent = get_extent(NORMAL_PMD, pmc);
843 old_pmd = get_old_pmd(mm, pmc->old_addr);
846 new_pmd = alloc_new_pmd(mm, pmc->new_addr);
852 move_pgt_entry(pmc, HPAGE_PMD, old_pmd, new_pmd))
854 split_huge_pmd(pmc->old, old_pmd, pmc->old_addr);
861 if (move_pgt_entry(pmc, NORMAL_PMD, old_pmd, new_pmd))
866 if (pte_alloc(pmc->new->vm_mm, new_pmd))
868 if (move_ptes(pmc, extent, old_pmd, new_pmd) < 0)
874 return pmc_progress(pmc);
1191 PAGETABLE_MOVE(pmc, NULL, NULL, vrm->addr, vrm->new_addr, vrm->old_len);
1194 &pmc.need_rmap_locks);
1205 pmc.old = vma;
1206 pmc.new = new_vma;
1208 moved_len = move_page_tables(&pmc);