| /linux/arch/powerpc/mm/book3s64/ |
| H A D | radix_hugetlbpage.c | 16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page() 25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page() 39 radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 41 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 42 mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); in radix__flush_hugetlb_tlb_range() 49 struct mm_struct *mm = vma->vm_mm; in radix__huge_ptep_modify_prot_commit() 62 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); in radix__huge_ptep_modify_prot_commit()
|
| /linux/arch/mips/mm/ |
| H A D | tlb-r3k.c | 71 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 152 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 157 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page() 159 newpid = cpu_context(cpu, vma->vm_mm) & asid_mask; in local_flush_tlb_page() 188 if (current->active_mm != vma->vm_mm) in __update_tlb() 194 if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb() 196 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
|
| H A D | tlb-r4k.c | 111 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 217 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 229 write_c0_memorymapid(cpu_asid(cpu, vma->vm_mm)); in local_flush_tlb_page() 231 write_c0_entryhi(page | cpu_asid(cpu, vma->vm_mm)); in local_flush_tlb_page() 308 if (current->active_mm != vma->vm_mm) in __update_tlb() 321 pgdp = pgd_offset(vma->vm_mm, address); in __update_tlb()
|
| /linux/arch/riscv/mm/ |
| H A D | tlbflush.c | 166 __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), in flush_tlb_page() 199 __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), in flush_tlb_range() 213 __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), in flush_pmd_tlb_range() 220 __flush_tlb_range(vma->vm_mm, mm_cpumask(vma->vm_mm), in flush_pud_tlb_range()
|
| /linux/arch/sh/mm/ |
| H A D | tlbflush_32.c | 19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page() 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page() 42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
|
| H A D | cache-sh4.c | 231 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_page() 234 pmd = pmd_off(vma->vm_mm, address); in sh4_flush_cache_page() 241 if ((vma->vm_mm == current->active_mm)) in sh4_flush_cache_page() 293 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) in sh4_flush_cache_range()
|
| /linux/arch/arc/mm/ |
| H A D | tlb.c | 222 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range() 235 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range() 237 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range() 288 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page() 289 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page() 351 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page() 363 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range() 376 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range() 425 if (current->active_mm != vma->vm_mm) in create_tlb() 547 if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) { in local_flush_pmd_tlb_range() [all …]
|
| /linux/mm/ |
| H A D | memory.c | 614 __print_bad_page_map_pgtable(vma->vm_mm, addr); in print_bad_page_map() 902 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte() 1091 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page() 1099 struct mm_struct *src_mm = src_vma->vm_mm; in __copy_present_ptes() 1115 set_ptes(dst_vma->vm_mm, addr, dst_pte, pte, nr); in __copy_present_ptes() 1225 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range() 1226 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range() 1380 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range() 1381 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range() 1417 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range() [all …]
|
| H A D | pgtable-generic.c | 76 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 99 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 118 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags() 145 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush() 158 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); in pudp_huge_clear_flush() 230 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
|
| H A D | migrate_device.c | 254 struct mm_struct *mm = vma->vm_mm; in migrate_vma_collect_pmd() 518 migrate->vma->vm_mm, migrate->start, migrate->end, in migrate_vma_collect() 522 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, in migrate_vma_collect() 779 * migrate_vma_insert_huge_pmd_page: Insert a huge folio into @migrate->vma->vm_mm 823 if (mem_cgroup_charge(folio, migrate->vma->vm_mm, gfp)) { in migrate_vma_insert_huge_pmd_page() 832 pgtable = pte_alloc_one(vma->vm_mm); in migrate_vma_insert_huge_pmd_page() 856 ptl = pmd_lock(vma->vm_mm, pmdp); in migrate_vma_insert_huge_pmd_page() 857 csa_ret = check_stable_address_space(vma->vm_mm); in migrate_vma_insert_huge_pmd_page() 875 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in migrate_vma_insert_huge_pmd_page() 882 pte_free(vma->vm_mm, pgtabl in migrate_vma_insert_huge_pmd_page() [all...] |
| H A D | huge_memory.c | 125 if (!vma->vm_mm) /* vdso */ in __thp_vma_allowable_orders() 1272 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in vma_alloc_anon_folio_pmd() 1308 set_pmd_at(vma->vm_mm, haddr, pmd, entry); in map_anon_folio_pmd_nopf() 1317 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in map_anon_folio_pmd_pf() 1320 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); in map_anon_folio_pmd_pf() 1335 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page() 1341 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 1345 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page() 1353 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page() 1358 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page() [all …]
|
| H A D | mprotect.c | 228 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 235 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range() 315 set_pte_at(vma->vm_mm, addr, pte, in change_pte_range() 363 pte_clear(vma->vm_mm, addr, pte); in change_pte_range() 377 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range() 429 if (pte_alloc(vma->vm_mm, pmd)) \ 444 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \ 549 vma->vm_mm, addr, end); in change_pud_range() 607 struct mm_struct *mm = vma->vm_mm; in change_protection_range() 699 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup() [all...] |
| H A D | mmap_lock.c | 111 mmap_assert_write_locked(vma->vm_mm); in __vma_start_write() 126 err = rcuwait_wait_event(&vma->vm_mm->vma_writer_wait, 248 if (unlikely(vma->vm_mm != mm)) in lock_vma_under_rcu() 280 other_mm = vma->vm_mm; /* use a copy as vma can be freed after we drop vm_refcnt */ in lock_vma_under_rcu()
|
| /linux/arch/arm/mm/ |
| H A D | fault-armv.c | 57 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 76 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 98 pte = pte_offset_map_rw_nolock(vma->vm_mm, pmd, address, &pmdval, &ptl); in adjust_pte() 129 struct mm_struct *mm = vma->vm_mm; in make_coherent() 157 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent()
|
| /linux/arch/arm/kernel/ |
| H A D | smp_tlb.c | 202 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page() 206 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page() 228 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range() 232 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
|
| /linux/arch/m68k/include/asm/ |
| H A D | tlbflush.h | 86 if (vma->vm_mm == current->active_mm) in flush_tlb_page() 93 if (vma->vm_mm == current->active_mm) in flush_tlb_range() 171 sun3_put_context(vma->vm_mm->context); in flush_tlb_page() 188 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
|
| /linux/arch/um/include/asm/ |
| H A D | tlbflush.h | 41 um_tlb_mark_sync(vma->vm_mm, address, address + PAGE_SIZE); in flush_tlb_page() 47 um_tlb_mark_sync(vma->vm_mm, start, end); in flush_tlb_range()
|
| /linux/arch/parisc/include/asm/ |
| H A D | tlbflush.h | 20 __flush_tlb_range((vma)->vm_mm->context.space_id, start, end) 67 purge_tlb_entries(vma->vm_mm, addr); in flush_tlb_page()
|
| /linux/arch/hexagon/mm/ |
| H A D | vm_tlb.c | 29 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 69 struct mm_struct *mm = vma->vm_mm; in flush_tlb_page()
|
| /linux/arch/mips/kernel/ |
| H A D | smp.c | 601 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 681 write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); in flush_tlb_page() 688 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page() 689 (current->mm != vma->vm_mm)) { in flush_tlb_page() 707 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) in flush_tlb_page() 708 set_cpu_context(cpu, vma->vm_mm, 1); in flush_tlb_page()
|
| /linux/drivers/xen/ |
| H A D | xlate_mmu.c | 138 set_pte_at(info->vma->vm_mm, addr, ptep, pte); in remap_pte_fn() 168 err = apply_to_page_range(vma->vm_mm, addr, range, in xen_xlate_remap_gfn_array() 288 .mm = vma->vm_mm, in xen_remap_vma_range() 293 return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r); in xen_remap_vma_range()
|
| /linux/arch/hexagon/kernel/ |
| H A D | vdso.c | 92 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) in arch_vma_name()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| H A D | lsm.c | 92 struct mm_struct *mm = vma->vm_mm; in BPF_PROG() 123 bpf_copy_from_user(args, sizeof(args), (void *)bprm->vma->vm_mm->arg_start); in BPF_PROG()
|
| /linux/arch/sh/kernel/ |
| H A D | smp.c | 395 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range() 441 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || in flush_tlb_page() 442 (current->mm != vma->vm_mm)) { in flush_tlb_page() 452 cpu_context(i, vma->vm_mm) = 0; in flush_tlb_page()
|
| /linux/arch/powerpc/mm/book3s32/ |
| H A D | tlb.c | 93 hash__flush_range(mp->vm_mm, mp->vm_start, mp->vm_end); in hash__flush_tlb_mm() 102 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; in hash__flush_tlb_page()
|