Home
last modified time | relevance | path

Searched refs:vm_mm (Results 1 – 25 of 124) sorted by relevance

12345

/linux/arch/powerpc/mm/book3s64/
H A Dradix_hugetlbpage.c16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page()
25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page()
39 radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range()
41 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range()
42 mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); in radix__flush_hugetlb_tlb_range()
49 struct mm_struct *mm = vma->vm_mm; in radix__huge_ptep_modify_prot_commit()
62 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); in radix__huge_ptep_modify_prot_commit()
/linux/arch/mips/mm/
H A Dtlb-r3k.c71 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
152 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page()
157 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page()
159 newpid = cpu_context(cpu, vma->vm_mm) & asid_mask; in local_flush_tlb_page()
188 if (current->active_mm != vma->vm_mm) in __update_tlb()
194 if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb()
196 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
/linux/arch/sh/mm/
H A Dtlbflush_32.c19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page()
24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page()
28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page()
42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
/linux/arch/arc/mm/
H A Dtlb.c222 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range()
235 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range()
237 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range()
288 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page()
289 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page()
351 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page()
363 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range()
376 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range()
425 if (current->active_mm != vma->vm_mm) in create_tlb()
547 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { in local_flush_pmd_tlb_range()
[all …]
/linux/arch/s390/include/asm/
H A Dhugetlb.h59 return __huge_ptep_get_and_clear(vma->vm_mm, address, ptep); in huge_ptep_clear_flush()
67 int changed = !pte_same(huge_ptep_get(vma->vm_mm, addr, ptep), pte); in huge_ptep_set_access_flags()
70 __huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); in huge_ptep_set_access_flags()
71 __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
/linux/mm/
H A Dmemory.c499 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte()
755 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte()
944 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page()
952 struct mm_struct *src_mm = src_vma->vm_mm; in __copy_present_ptes()
968 set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); in __copy_present_ptes()
1082 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range()
1083 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range()
1237 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range()
1238 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range()
1274 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range()
[all …]
H A Dmremap.c142 struct mm_struct *mm = vma->vm_mm; in move_ptes()
196 flush_tlb_batched_pending(vma->vm_mm); in move_ptes()
261 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd()
306 old_ptl = pmd_lock(vma->vm_mm, old_pmd); in move_normal_pmd()
345 struct mm_struct *mm = vma->vm_mm; in move_normal_pud()
370 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_normal_pud()
403 struct mm_struct *mm = vma->vm_mm; in move_huge_pud()
417 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_huge_pud()
565 return find_vma_intersection(vma->vm_mm, addr_masked, vma->vm_start) == NULL; in can_align_down()
618 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in move_page_tables()
[all …]
H A Dhuge_memory.c123 if (!vma->vm_mm) /* vdso */ in __thp_vma_allowable_orders()
1174 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in vma_alloc_anon_folio_pmd()
1210 set_pmd_at(vma->vm_mm, haddr, pmd, entry); in map_anon_folio_pmd()
1212 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in map_anon_folio_pmd()
1215 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); in map_anon_folio_pmd()
1230 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page()
1236 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
1240 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page()
1248 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
1253 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
[all …]
H A Dpgtable-generic.c74 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags()
97 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush()
116 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags()
144 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush()
157 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); in pudp_huge_clear_flush()
229 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
H A Dmadvise.c104 mmap_assert_locked(vma->vm_mm); in anon_vma_name()
150 struct mm_struct *mm = vma->vm_mm; in madvise_update_vma()
194 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in swapin_walk_pmd_entry()
273 struct mm_struct *mm = vma->vm_mm; in madvise_willneed()
280 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma); in madvise_willneed()
442 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range()
579 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_cold_page_range()
592 struct mm_struct *mm = vma->vm_mm; in madvise_cold()
617 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private); in madvise_pageout_page_range()
625 struct mm_struct *mm = vma->vm_mm; in madvise_pageout()
[all …]
/linux/arch/arm/mm/
H A Dfault-armv.c57 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte()
76 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte()
98 pte = pte_offset_map_rw_nolock(vma->vm_mm, pmd, address, &pmdval, &ptl); in adjust_pte()
129 struct mm_struct *mm = vma->vm_mm; in make_coherent()
157 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent()
/linux/arch/riscv/mm/
H A Dpgtable.c16 __set_pte_at(vma->vm_mm, ptep, entry); in ptep_set_access_flags()
25 __set_pte_at(vma->vm_mm, ptep, entry); in ptep_set_access_flags()
142 pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
154 flush_tlb_mm(vma->vm_mm); in pmdp_collapse_flush()
/linux/arch/arm/kernel/
H A Dsmp_tlb.c202 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page()
206 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page()
228 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range()
232 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
/linux/arch/m68k/include/asm/
H A Dtlbflush.h86 if (vma->vm_mm == current->active_mm) in flush_tlb_page()
93 if (vma->vm_mm == current->active_mm) in flush_tlb_range()
171 sun3_put_context(vma->vm_mm->context); in flush_tlb_page()
188 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
/linux/arch/um/include/asm/
H A Dtlbflush.h41 um_tlb_mark_sync(vma->vm_mm, address, address + PAGE_SIZE); in flush_tlb_page()
47 um_tlb_mark_sync(vma->vm_mm, start, end); in flush_tlb_range()
/linux/arch/parisc/include/asm/
H A Dtlbflush.h20 __flush_tlb_range((vma)->vm_mm->context.space_id, start, end)
67 purge_tlb_entries(vma->vm_mm, addr); in flush_tlb_page()
/linux/arch/hexagon/mm/
H A Dvm_tlb.c29 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
69 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
/linux/arch/mips/kernel/
H A Dsmp.c583 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
663 write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); in flush_tlb_page()
670 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page()
671 (current->mm != vma->vm_mm)) { in flush_tlb_page()
689 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) in flush_tlb_page()
690 set_cpu_context(cpu, vma->vm_mm, 1); in flush_tlb_page()
/linux/drivers/xen/
H A Dxlate_mmu.c138 set_pte_at(info->vma->vm_mm, addr, ptep, pte); in remap_pte_fn()
168 err = apply_to_page_range(vma->vm_mm, addr, range, in xen_xlate_remap_gfn_array()
288 .mm = vma->vm_mm, in xen_remap_vma_range()
293 return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r); in xen_remap_vma_range()
/linux/arch/alpha/include/asm/
H A Dtlbflush.h86 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
100 flush_tlb_mm(vma->vm_mm); in flush_tlb_range()
/linux/arch/powerpc/mm/book3s32/
H A Dtlb.c93 hash__flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); in hash__flush_tlb_mm()
102 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; in hash__flush_tlb_page()
/linux/arch/hexagon/kernel/
H A Dvdso.c92 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
/linux/arch/sh/kernel/vsyscall/
H A Dvsyscall.c97 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
/linux/arch/mips/include/asm/
H A Dhugetlb.h53 pte = huge_ptep_get_and_clear(vma->vm_mm, addr, ptep, sz); in huge_ptep_clear_flush()
74 set_pte_at(vma->vm_mm, addr, ptep, pte); in huge_ptep_set_access_flags()
/linux/tools/testing/selftests/bpf/progs/
H A Dlsm.c98 is_stack = (vma->vm_start <= vma->vm_mm->start_stack && in BPF_PROG()
99 vma->vm_end >= vma->vm_mm->start_stack); in BPF_PROG()
121 bpf_copy_from_user(args, sizeof(args), (void *)bprm->vma->vm_mm->arg_start); in BPF_PROG()

12345