| /linux/mm/ |
| H A D | mmap_lock.c | 53 static inline int __vma_enter_locked(struct vm_area_struct *vma, in __vma_enter_locked() argument 59 mmap_assert_write_locked(vma->vm_mm); in __vma_enter_locked() 69 if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt)) in __vma_enter_locked() 72 rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_); in __vma_enter_locked() 73 err = rcuwait_wait_event(&vma->vm_mm->vma_writer_wait, in __vma_enter_locked() 74 refcount_read(&vma->vm_refcnt) == tgt_refcnt, in __vma_enter_locked() 77 if (refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt)) { in __vma_enter_locked() 85 rwsem_release(&vma->vmlock_dep_map, _RET_IP_); in __vma_enter_locked() 88 lock_acquired(&vma->vmlock_dep_map, _RET_IP_); in __vma_enter_locked() 93 static inline void __vma_exit_locked(struct vm_area_struct *vma, bool *detached) in __vma_exit_locked() argument [all …]
|
| H A D | vma.c | 77 static bool vma_had_uncowed_parents(struct vm_area_struct *vma) in vma_had_uncowed_parents() argument 83 return vma && vma->anon_vma && !list_is_singular(&vma->anon_vma_chain); in vma_had_uncowed_parents() 88 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; in is_mergeable_vma() local 90 if (!mpol_equal(vmg->policy, vma_policy(vma))) in is_mergeable_vma() 92 if ((vma->vm_flags ^ vmg->vm_flags) & ~VM_IGNORE_MERGE) in is_mergeable_vma() 94 if (vma->vm_file != vmg->file) in is_mergeable_vma() 96 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) in is_mergeable_vma() 98 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) in is_mergeable_vma() 135 struct vm_area_struct *vma, in init_multi_vma_prep() argument 142 vp->vma = vma; in init_multi_vma_prep() [all …]
|
| H A D | nommu.c | 92 struct vm_area_struct *vma; in kobjsize() local 94 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize() 95 if (vma) in kobjsize() 96 return vma->vm_end - vma->vm_start; in kobjsize() 148 struct vm_area_struct *vma; in __vmalloc_user_flags() local 151 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags() 152 if (vma) in __vmalloc_user_flags() 153 vm_flags_set(vma, VM_USERMAP); in __vmalloc_user_flags() 345 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument 352 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument [all …]
|
| H A D | mprotect.c | 41 static bool maybe_change_pte_writable(struct vm_area_struct *vma, pte_t pte) in maybe_change_pte_writable() argument 43 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE))) in maybe_change_pte_writable() 51 if (pte_needs_soft_dirty_wp(vma, pte)) in maybe_change_pte_writable() 55 if (userfaultfd_pte_wp(vma, pte)) in maybe_change_pte_writable() 61 static bool can_change_private_pte_writable(struct vm_area_struct *vma, in can_change_private_pte_writable() argument 66 if (!maybe_change_pte_writable(vma, pte)) in can_change_private_pte_writable() 75 page = vm_normal_page(vma, addr, pte); in can_change_private_pte_writable() 79 static bool can_change_shared_pte_writable(struct vm_area_struct *vma, in can_change_shared_pte_writable() argument 82 if (!maybe_change_pte_writable(vma, pte)) in can_change_shared_pte_writable() 97 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, in can_change_pte_writable() argument [all …]
|
| H A D | vma.h | 14 struct vm_area_struct *vma; member 36 struct vm_area_struct *vma; /* The first vma to munmap */ member 161 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma, in vma_pgoff_offset() argument 164 return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start); in vma_pgoff_offset() 205 struct vm_area_struct *vma, 209 struct vm_area_struct *vma, gfp_t gfp) in vma_iter_store_gfp() argument 213 ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start))) in vma_iter_store_gfp() 216 __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); in vma_iter_store_gfp() 217 mas_store_gfp(&vmi->mas, vma, gfp); in vma_iter_store_gfp() 221 vma_mark_attached(vma); in vma_iter_store_gfp() [all …]
|
| H A D | madvise.c | 80 struct vm_area_struct *vma; member 110 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) in anon_vma_name() argument 112 if (!rwsem_is_locked(&vma->vm_mm->mmap_lock)) in anon_vma_name() 113 vma_assert_locked(vma); in anon_vma_name() 115 return vma->anon_name; in anon_vma_name() 119 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument 122 struct anon_vma_name *orig_name = anon_vma_name(vma); in replace_anon_vma_name() 125 vma->anon_name = NULL; in replace_anon_vma_name() 133 vma->anon_name = anon_vma_name_reuse(anon_name); in replace_anon_vma_name() 139 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument [all …]
|
| H A D | memory.c | 107 if (!userfaultfd_wp(vmf->vma)) in vmf_orig_pte_uffd_wp() 374 struct vm_area_struct *vma, unsigned long floor, in free_pgtables() argument 382 unsigned long addr = vma->vm_start; in free_pgtables() 398 vma_start_write(vma); in free_pgtables() 399 unlink_anon_vmas(vma); in free_pgtables() 402 unlink_file_vma_batch_add(&vb, vma); in free_pgtables() 407 while (next && next->vm_start <= vma->vm_end + PMD_SIZE) { in free_pgtables() 408 vma = next; in free_pgtables() 413 vma_start_write(vma); in free_pgtables() 414 unlink_anon_vmas(vma); in free_pgtables() [all …]
|
| H A D | mremap.c | 64 struct vm_area_struct *vma; member 143 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument 145 if (vma->vm_file) in take_rmap_locks() 146 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks() 147 if (vma->anon_vma) in take_rmap_locks() 148 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks() 151 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument 153 if (vma->anon_vma) in drop_rmap_locks() 154 anon_vma_unlock_write(vma->anon_vma); in drop_rmap_locks() 155 if (vma->vm_file) in drop_rmap_locks() [all …]
|
| H A D | vma_exec.c | 19 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) in relocate_vma_down() argument 32 struct mm_struct *mm = vma->vm_mm; in relocate_vma_down() 33 unsigned long old_start = vma->vm_start; in relocate_vma_down() 34 unsigned long old_end = vma->vm_end; in relocate_vma_down() 39 VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff); in relocate_vma_down() 42 PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length); in relocate_vma_down() 50 if (vma != vma_next(&vmi)) in relocate_vma_down() 57 vmg.target = vma; in relocate_vma_down() 91 return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff); in relocate_vma_down() 112 struct vm_area_struct *vma = vm_area_alloc(mm); in create_init_stack_vma() local [all …]
|
| H A D | rmap.c | 149 static void anon_vma_chain_link(struct vm_area_struct *vma, in anon_vma_chain_link() argument 153 avc->vma = vma; in anon_vma_chain_link() 155 list_add(&avc->same_vma, &vma->anon_vma_chain); in anon_vma_chain_link() 185 int __anon_vma_prepare(struct vm_area_struct *vma) in __anon_vma_prepare() argument 187 struct mm_struct *mm = vma->vm_mm; in __anon_vma_prepare() 198 anon_vma = find_mergeable_anon_vma(vma); in __anon_vma_prepare() 211 if (likely(!vma->anon_vma)) { in __anon_vma_prepare() 212 vma->anon_vma = anon_vma; in __anon_vma_prepare() 213 anon_vma_chain_link(vma, avc, anon_vma); in __anon_vma_prepare() 333 int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) in anon_vma_fork() argument [all …]
|
| H A D | pagewalk.c | 48 update_mmu_cache(walk->vma, addr, pte); in walk_pte_range_inner() 150 if (walk->vma) in walk_pmd_range() 151 split_huge_pmd(walk->vma, pmd, addr); in walk_pmd_range() 216 if (walk->vma) in walk_pud_range() 217 split_huge_pud(walk->vma, pud, addr); in walk_pud_range() 322 struct vm_area_struct *vma = walk->vma; in walk_hugetlb_range() local 323 struct hstate *h = hstate_vma(vma); in walk_hugetlb_range() 331 hugetlb_vma_lock_read(vma); in walk_hugetlb_range() 334 pte = hugetlb_walk(vma, addr & hmask, sz); in walk_hugetlb_range() 342 hugetlb_vma_unlock_read(vma); in walk_hugetlb_range() [all …]
|
| H A D | mmap.c | 81 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument 83 vm_flags_t vm_flags = vma->vm_flags; in vma_set_page_prot() 86 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot() 87 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot() 92 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot() 693 struct vm_area_struct *vma, *prev; in generic_get_unmapped_area() local 705 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area() 707 (!vma || addr + len <= vm_start_gap(vma)) && in generic_get_unmapped_area() 741 struct vm_area_struct *vma, *prev; in generic_get_unmapped_area_topdown() local 756 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area_topdown() [all …]
|
| H A D | mseal.c | 41 struct vm_area_struct *vma; in range_contains_unmapped() local 45 for_each_vma_range(vmi, vma, end) { in range_contains_unmapped() 46 if (vma->vm_start > prev_end) in range_contains_unmapped() 49 prev_end = vma->vm_end; in range_contains_unmapped() 58 struct vm_area_struct *vma, *prev; in mseal_apply() local 63 vma = vma_iter_load(&vmi); in mseal_apply() 65 if (start > vma->vm_start) in mseal_apply() 66 prev = vma; in mseal_apply() 68 for_each_vma_range(vmi, vma, end) { in mseal_apply() 69 const unsigned long curr_end = MIN(vma->vm_end, end); in mseal_apply() [all …]
|
| /linux/drivers/gpu/drm/i915/ |
| H A D | i915_gem_evict.c | 41 static bool dying_vma(struct i915_vma *vma) 43 return !kref_read(&vma->obj->base.refcount); 67 static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) in grab_vma() argument 73 if (i915_gem_object_get_rcu(vma->obj)) { in grab_vma() 74 if (!i915_gem_object_trylock(vma->obj, ww)) { in grab_vma() 75 i915_gem_object_put(vma->obj); in grab_vma() 80 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); in grab_vma() 86 static void ungrab_vma(struct i915_vma *vma) in ungrab_vma() argument 88 if (dying_vma(vma)) in ungrab_vma() 91 i915_gem_object_unlock(vma->obj); in ungrab_vma() [all …]
|
| /linux/tools/testing/vma/ |
| H A D | vma.c | 18 #define vma_iter_prealloc(vmi, vma) \ argument 19 (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL)) 72 struct vm_area_struct *vma = vm_area_alloc(mm); in alloc_vma() local 74 if (vma == NULL) in alloc_vma() 77 vma->vm_start = start; in alloc_vma() 78 vma->vm_end = end; in alloc_vma() 79 vma->vm_pgoff = pgoff; in alloc_vma() 80 vm_flags_reset(vma, vm_flags); in alloc_vma() 81 vma_assert_detached(vma); in alloc_vma() 83 return vma; in alloc_vma() [all …]
|
| H A D | vma_internal.h | 385 #define vma_policy(vma) NULL argument 584 int (*success_hook)(const struct vm_area_struct *vma); 762 int (*mprotect)(struct vm_area_struct *vma, unsigned long start, 781 int (*access)(struct vm_area_struct *vma, unsigned long addr, 787 const char *(*name)(struct vm_area_struct *vma); 797 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 809 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 824 struct page *(*find_normal_page)(struct vm_area_struct *vma, 883 static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma) in vma_is_shared_maywrite() argument 885 return is_shared_maywrite(vma->vm_flags); in vma_is_shared_maywrite() [all …]
|
| /linux/include/linux/ |
| H A D | userfaultfd_k.h | 131 extern long uffd_wp_range(struct vm_area_struct *vma, 145 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument 148 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx() 162 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) in uffd_disable_huge_pmd_share() argument 164 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share() 174 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) in uffd_disable_fault_around() argument 176 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_fault_around() 179 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument 181 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing() 184 static inline bool userfaultfd_wp(struct vm_area_struct *vma) in userfaultfd_wp() argument [all …]
|
| H A D | mmap_lock.h | 113 static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) in vma_lock_init() argument 118 lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0); in vma_lock_init() 121 refcount_set(&vma->vm_refcnt, 0); in vma_lock_init() 122 vma->vm_lock_seq = UINT_MAX; in vma_lock_init() 136 static inline void vma_refcount_put(struct vm_area_struct *vma) in vma_refcount_put() argument 139 struct mm_struct *mm = vma->vm_mm; in vma_refcount_put() 142 rwsem_release(&vma->vmlock_dep_map, _RET_IP_); in vma_refcount_put() 143 if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) { in vma_refcount_put() 156 static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) in vma_start_read_locked_nested() argument 160 mmap_assert_locked(vma->vm_mm); in vma_start_read_locked_nested() [all …]
|
| H A D | hugetlb.h | 108 struct vm_area_struct *vma; member 123 void hugetlb_dup_vma_private(struct vm_area_struct *vma); 124 void clear_vma_resv_huge_pages(struct vm_area_struct *vma); 125 int move_hugetlb_page_tables(struct vm_area_struct *vma, 135 struct vm_area_struct *vma, 142 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 166 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, 169 struct vm_area_struct *vma, 201 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, 243 int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, [all …]
|
| /linux/drivers/gpu/drm/nouveau/ |
| H A D | nouveau_vmm.c | 29 nouveau_vma_unmap(struct nouveau_vma *vma) in nouveau_vma_unmap() argument 31 if (vma->mem) { in nouveau_vma_unmap() 32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap() 33 vma->mem = NULL; in nouveau_vma_unmap() 38 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) in nouveau_vma_map() argument 40 struct nvif_vma tmp = { .addr = vma->addr }; in nouveau_vma_map() 41 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); in nouveau_vma_map() 44 vma->mem = mem; in nouveau_vma_map() 51 struct nouveau_vma *vma; in nouveau_vma_find() local 53 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_vma_find() [all …]
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_trace_bo.h | 21 #define __dev_name_vma(vma) __dev_name_vm(xe_vma_vm(vma)) argument 89 TP_PROTO(struct xe_vma *vma), 90 TP_ARGS(vma), 93 __string(dev, __dev_name_vma(vma)) 94 __field(struct xe_vma *, vma) 104 __entry->vma = vma; 105 __entry->vm = xe_vma_vm(vma); 106 __entry->asid = xe_vma_vm(vma)->usm.asid; 107 __entry->start = xe_vma_start(vma); 108 __entry->end = xe_vma_end(vma) - 1; [all …]
|
| H A D | xe_vm.h | 69 bool xe_vma_has_default_mem_attrs(struct xe_vma *vma); 112 static inline u64 xe_vma_start(struct xe_vma *vma) in xe_vma_start() argument 114 return vma->gpuva.va.addr; in xe_vma_start() 117 static inline u64 xe_vma_size(struct xe_vma *vma) in xe_vma_size() argument 119 return vma->gpuva.va.range; in xe_vma_size() 122 static inline u64 xe_vma_end(struct xe_vma *vma) in xe_vma_end() argument 124 return xe_vma_start(vma) + xe_vma_size(vma); in xe_vma_end() 127 static inline u64 xe_vma_bo_offset(struct xe_vma *vma) in xe_vma_bo_offset() argument 129 return vma->gpuva.gem.offset; in xe_vma_bo_offset() 132 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma) in xe_vma_bo() argument [all …]
|
| /linux/drivers/pci/ |
| H A D | mmap.c | 25 struct vm_area_struct *vma, in pci_mmap_resource_range() argument 32 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range() 36 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range() 38 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range() 41 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range() 45 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range() 47 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range() 49 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_resource_range() 50 vma->vm_end - vma->vm_start, in pci_mmap_resource_range() 51 vma->vm_page_prot); in pci_mmap_resource_range() [all …]
|
| /linux/fs/proc/ |
| H A D | task_mmu.c | 186 struct vm_area_struct *vma; in get_next_vma() local 192 vma = lock_next_vma(lock_ctx->mm, &priv->iter, last_pos); in get_next_vma() 193 if (!IS_ERR_OR_NULL(vma)) in get_next_vma() 194 lock_ctx->locked_vma = vma; in get_next_vma() 196 return vma; in get_next_vma() 246 struct vm_area_struct *vma; in proc_get_vma() local 249 vma = get_next_vma(priv, *ppos); in proc_get_vma() 251 if (IS_ERR(vma)) { in proc_get_vma() 252 if (PTR_ERR(vma) == -EAGAIN && fallback_to_mmap_lock(priv, *ppos)) in proc_get_vma() 255 return vma; in proc_get_vma() [all …]
|
| /linux/drivers/gpu/drm/i915/gt/ |
| H A D | intel_ring.c | 37 struct i915_vma *vma = ring->vma; in intel_ring_pin() local 46 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); in intel_ring_pin() 48 if (i915_gem_object_is_stolen(vma->obj)) in intel_ring_pin() 53 ret = i915_ggtt_pin(vma, ww, 0, flags); in intel_ring_pin() 57 if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) { in intel_ring_pin() 58 addr = (void __force *)i915_vma_pin_iomap(vma); in intel_ring_pin() 60 int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false); in intel_ring_pin() 62 addr = i915_gem_object_pin_map(vma->obj, type); in intel_ring_pin() 70 i915_vma_make_unshrinkable(vma); in intel_ring_pin() 79 i915_vma_unpin(vma); in intel_ring_pin() [all …]
|