Home
last modified time | relevance | path

Searched refs:vma (Results 1 – 25 of 823) sorted by relevance

12345678910>>...33

/linux/drivers/gpu/drm/i915/
H A Di915_vma.h51 static inline bool i915_vma_is_active(const struct i915_vma *vma) in i915_vma_is_active() argument
53 return !i915_active_is_idle(&vma->active); in i915_vma_is_active()
60 int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
65 i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, in i915_vma_move_to_active() argument
68 return _i915_vma_move_to_active(vma, rq, &rq->fence, flags); in i915_vma_move_to_active()
73 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) in i915_vma_is_ggtt() argument
75 return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); in i915_vma_is_ggtt()
78 static inline bool i915_vma_is_dpt(const struct i915_vma *vma) in i915_vma_is_dpt() argument
80 return i915_is_dpt(vma->vm); in i915_vma_is_dpt()
83 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) in i915_vma_has_ggtt_write() argument
[all …]
H A Di915_gem_evict.c41 static bool dying_vma(struct i915_vma *vma)
43 return !kref_read(&vma->obj->base.refcount);
67 static bool grab_vma(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) in grab_vma() argument
73 if (i915_gem_object_get_rcu(vma->obj)) { in grab_vma()
74 if (!i915_gem_object_trylock(vma->obj, ww)) { in grab_vma()
75 i915_gem_object_put(vma->obj); in grab_vma()
80 atomic_and(~I915_VMA_PIN_MASK, &vma->flags); in grab_vma()
86 static void ungrab_vma(struct i915_vma *vma) in ungrab_vma() argument
88 if (dying_vma(vma)) in ungrab_vma()
91 i915_gem_object_unlock(vma->obj); in ungrab_vma()
[all …]
/linux/mm/
H A Dvma.c55 .vma = vma_, \
63 struct vm_area_struct *vma = merge_next ? vmg->next : vmg->prev; in is_mergeable_vma() local
65 if (!mpol_equal(vmg->policy, vma_policy(vma))) in is_mergeable_vma()
75 if ((vma->vm_flags ^ vmg->flags) & ~VM_SOFTDIRTY) in is_mergeable_vma()
77 if (vma->vm_file != vmg->file) in is_mergeable_vma()
79 if (!is_mergeable_vm_userfaultfd_ctx(vma, vmg->uffd_ctx)) in is_mergeable_vma()
81 if (!anon_vma_name_eq(anon_vma_name(vma), vmg->anon_name)) in is_mergeable_vma()
87 struct anon_vma *anon_vma2, struct vm_area_struct *vma) in is_mergeable_anon_vma() argument
93 if ((!anon_vma1 || !anon_vma2) && (!vma || in is_mergeable_anon_vma()
94 list_is_singular(&vma->anon_vma_chain))) in is_mergeable_anon_vma()
[all …]
H A Dmremap.c72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pud() argument
86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma, in alloc_new_pmd() argument
92 pud = alloc_new_pud(mm, vma, addr); in alloc_new_pmd()
105 static void take_rmap_locks(struct vm_area_struct *vma) in take_rmap_locks() argument
107 if (vma->vm_file) in take_rmap_locks()
108 i_mmap_lock_write(vma->vm_file->f_mapping); in take_rmap_locks()
109 if (vma->anon_vma) in take_rmap_locks()
110 anon_vma_lock_write(vma->anon_vma); in take_rmap_locks()
113 static void drop_rmap_locks(struct vm_area_struct *vma) in drop_rmap_locks() argument
115 if (vma->anon_vma) in drop_rmap_locks()
[all …]
H A Dnommu.c99 struct vm_area_struct *vma; in kobjsize() local
101 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
102 if (vma) in kobjsize()
103 return vma->vm_end - vma->vm_start; in kobjsize()
154 struct vm_area_struct *vma; in __vmalloc_user_flags() local
157 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags()
158 if (vma) in __vmalloc_user_flags()
159 vm_flags_set(vma, VM_USERMAP); in __vmalloc_user_flags()
335 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
342 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, in vm_insert_pages() argument
[all …]
H A Dmadvise.c102 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma) in anon_vma_name() argument
104 mmap_assert_locked(vma->vm_mm); in anon_vma_name()
106 return vma->anon_name; in anon_vma_name()
110 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument
113 struct anon_vma_name *orig_name = anon_vma_name(vma); in replace_anon_vma_name()
116 vma->anon_name = NULL; in replace_anon_vma_name()
124 vma->anon_name = anon_vma_name_reuse(anon_name); in replace_anon_vma_name()
130 static int replace_anon_vma_name(struct vm_area_struct *vma, in replace_anon_vma_name() argument
145 static int madvise_update_vma(struct vm_area_struct *vma, in madvise_update_vma() argument
150 struct mm_struct *mm = vma->vm_mm; in madvise_update_vma()
[all …]
H A Dmemory.c116 if (!userfaultfd_wp(vmf->vma)) in vmf_orig_pte_uffd_wp()
364 struct vm_area_struct *vma, unsigned long floor, in free_pgtables() argument
370 unsigned long addr = vma->vm_start; in free_pgtables()
386 vma_start_write(vma); in free_pgtables()
387 unlink_anon_vmas(vma); in free_pgtables()
389 if (is_vm_hugetlb_page(vma)) { in free_pgtables()
390 unlink_file_vma(vma); in free_pgtables()
391 hugetlb_free_pgd_range(tlb, addr, vma->vm_end, in free_pgtables()
395 unlink_file_vma_batch_add(&vb, vma); in free_pgtables()
400 while (next && next->vm_start <= vma->vm_end + PMD_SIZE in free_pgtables()
[all …]
H A Dmmap.c81 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
83 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
86 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
87 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot()
92 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot()
696 struct vm_area_struct *vma, *prev; in generic_get_unmapped_area() local
708 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area()
710 (!vma || addr + len <= vm_start_gap(vma)) && in generic_get_unmapped_area()
744 struct vm_area_struct *vma, *prev; in generic_get_unmapped_area_topdown() local
759 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area_topdown()
[all …]
H A Dmlock.c322 struct vm_area_struct *vma, unsigned long start, in allow_mlock_munlock() argument
334 if (!(vma->vm_flags & VM_LOCKED)) in allow_mlock_munlock()
342 if (!folio_within_range(folio, vma, start, end)) in allow_mlock_munlock()
356 struct vm_area_struct *vma = walk->vma; in mlock_pte_range() local
364 ptl = pmd_trans_huge_lock(pmd, vma); in mlock_pte_range()
371 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range()
378 start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mlock_pte_range()
388 folio = vm_normal_folio(vma, addr, ptent); in mlock_pte_range()
393 if (!allow_mlock_munlock(folio, vma, start, end, step)) in mlock_pte_range()
396 if (vma->vm_flags & VM_LOCKED) in mlock_pte_range()
[all …]
H A Dhuge_memory.c86 static inline bool file_thp_enabled(struct vm_area_struct *vma) in file_thp_enabled() argument
93 if (!vma->vm_file) in file_thp_enabled()
96 inode = file_inode(vma->vm_file); in file_thp_enabled()
101 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, in __thp_vma_allowable_orders() argument
112 if (vma_is_anonymous(vma)) in __thp_vma_allowable_orders()
114 else if (vma_is_special_huge(vma)) in __thp_vma_allowable_orders()
123 if (!vma->vm_mm) /* vdso */ in __thp_vma_allowable_orders()
126 if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags)) in __thp_vma_allowable_orders()
130 if (vma_is_dax(vma)) in __thp_vma_allowable_orders()
153 addr = vma->vm_end - (PAGE_SIZE << order); in __thp_vma_allowable_orders()
[all …]
/linux/include/linux/
H A Duserfaultfd_k.h135 extern long uffd_wp_range(struct vm_area_struct *vma,
149 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, in is_mergeable_vm_userfaultfd_ctx() argument
152 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx; in is_mergeable_vm_userfaultfd_ctx()
166 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma) in uffd_disable_huge_pmd_share() argument
168 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_huge_pmd_share()
178 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma) in uffd_disable_fault_around() argument
180 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR); in uffd_disable_fault_around()
183 static inline bool userfaultfd_missing(struct vm_area_struct *vma) in userfaultfd_missing() argument
185 return vma->vm_flags & VM_UFFD_MISSING; in userfaultfd_missing()
188 static inline bool userfaultfd_wp(struct vm_area_struct *vma) in userfaultfd_wp() argument
[all …]
H A Dhugetlb.h107 struct vm_area_struct *vma; member
122 void hugetlb_dup_vma_private(struct vm_area_struct *vma);
123 void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
124 int move_hugetlb_page_tables(struct vm_area_struct *vma,
134 struct vm_area_struct *vma,
141 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
152 struct vm_area_struct *vma,
166 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
169 struct vm_area_struct *vma,
196 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
[all …]
H A Dmm.h261 void __vm_area_free(struct vm_area_struct *vma);
562 struct vm_area_struct *vma; /* Target VMA */ member
626 int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
645 int (*access)(struct vm_area_struct *vma, unsigned long addr,
651 const char *(*name)(struct vm_area_struct *vma);
661 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
673 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
681 struct page *(*find_special_page)(struct vm_area_struct *vma,
686 static inline void vma_numab_state_init(struct vm_area_struct *vma) in vma_numab_state_init() argument
688 vma->numab_state = NULL; in vma_numab_state_init()
[all …]
/linux/drivers/gpu/drm/nouveau/
H A Dnouveau_vmm.c29 nouveau_vma_unmap(struct nouveau_vma *vma) in nouveau_vma_unmap() argument
31 if (vma->mem) { in nouveau_vma_unmap()
32 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); in nouveau_vma_unmap()
33 vma->mem = NULL; in nouveau_vma_unmap()
38 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) in nouveau_vma_map() argument
40 struct nvif_vma tmp = { .addr = vma->addr }; in nouveau_vma_map()
41 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); in nouveau_vma_map()
44 vma->mem = mem; in nouveau_vma_map()
51 struct nouveau_vma *vma; in nouveau_vma_find() local
53 list_for_each_entry(vma, &nvbo->vma_list, head) { in nouveau_vma_find()
[all …]
/linux/drivers/gpu/drm/msm/
H A Dmsm_gem_vma.c42 void msm_gem_vma_purge(struct msm_gem_vma *vma) in msm_gem_vma_purge() argument
44 struct msm_gem_address_space *aspace = vma->aspace; in msm_gem_vma_purge()
45 unsigned size = vma->node.size; in msm_gem_vma_purge()
48 if (!vma->mapped) in msm_gem_vma_purge()
51 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size); in msm_gem_vma_purge()
53 vma->mapped = false; in msm_gem_vma_purge()
58 msm_gem_vma_map(struct msm_gem_vma *vma, int prot, in msm_gem_vma_map() argument
61 struct msm_gem_address_space *aspace = vma->aspace; in msm_gem_vma_map()
64 if (GEM_WARN_ON(!vma->iova)) in msm_gem_vma_map()
67 if (vma->mapped) in msm_gem_vma_map()
[all …]
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dvmm.c802 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); in nvkm_vma_new() local
803 if (vma) { in nvkm_vma_new()
804 vma->addr = addr; in nvkm_vma_new()
805 vma->size = size; in nvkm_vma_new()
806 vma->page = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
807 vma->refd = NVKM_VMA_PAGE_NONE; in nvkm_vma_new()
809 return vma; in nvkm_vma_new()
813 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) in nvkm_vma_tail() argument
817 BUG_ON(vma->size == tail); in nvkm_vma_tail()
819 if (!(new = nvkm_vma_new(vma->addr + (vma->size - tail), tail))) in nvkm_vma_tail()
[all …]
/linux/drivers/gpu/drm/i915/selftests/
H A Di915_gem_gtt.c397 struct i915_vma *vma; in close_object_list() local
399 vma = i915_vma_instance(obj, vm, NULL); in close_object_list()
400 if (!IS_ERR(vma)) in close_object_list()
401 ignored = i915_vma_unbind_unlocked(vma); in close_object_list()
420 struct i915_vma *vma; in fill_hole() local
461 vma = i915_vma_instance(obj, vm, NULL); in fill_hole()
462 if (IS_ERR(vma)) in fill_hole()
471 err = i915_vma_pin(vma, 0, 0, offset | flags); in fill_hole()
478 if (!drm_mm_node_allocated(&vma->node) || in fill_hole()
479 i915_vma_misplaced(vma, 0, 0, offset | flags)) { in fill_hole()
[all …]
H A Di915_vma.c37 static bool assert_vma(struct i915_vma *vma, in assert_vma() argument
43 if (vma->vm != ctx->vm) { in assert_vma()
48 if (vma->size != obj->base.size) { in assert_vma()
50 vma->size, obj->base.size); in assert_vma()
54 if (vma->gtt_view.type != I915_GTT_VIEW_NORMAL) { in assert_vma()
56 vma->gtt_view.type); in assert_vma()
68 struct i915_vma *vma; in checked_vma_instance() local
71 vma = i915_vma_instance(obj, vm, view); in checked_vma_instance()
72 if (IS_ERR(vma)) in checked_vma_instance()
73 return vma; in checked_vma_instance()
[all …]
/linux/drivers/gpu/drm/xe/
H A Dxe_trace_bo.h21 #define __dev_name_vma(vma) __dev_name_vm(xe_vma_vm(vma)) argument
84 TP_PROTO(struct xe_vma *vma),
85 TP_ARGS(vma),
88 __string(dev, __dev_name_vma(vma))
89 __field(struct xe_vma *, vma)
98 __entry->vma = vma;
99 __entry->asid = xe_vma_vm(vma)->usm.asid;
100 __entry->start = xe_vma_start(vma);
101 __entry->end = xe_vma_end(vma) - 1;
102 __entry->ptr = xe_vma_userptr(vma);
[all …]
H A Dxe_vm.h107 static inline u64 xe_vma_start(struct xe_vma *vma) in xe_vma_start() argument
109 return vma->gpuva.va.addr; in xe_vma_start()
112 static inline u64 xe_vma_size(struct xe_vma *vma) in xe_vma_size() argument
114 return vma->gpuva.va.range; in xe_vma_size()
117 static inline u64 xe_vma_end(struct xe_vma *vma) in xe_vma_end() argument
119 return xe_vma_start(vma) + xe_vma_size(vma); in xe_vma_end()
122 static inline u64 xe_vma_bo_offset(struct xe_vma *vma) in xe_vma_bo_offset() argument
124 return vma->gpuva.gem.offset; in xe_vma_bo_offset()
127 static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma) in xe_vma_bo() argument
129 return !vma->gpuva.gem.obj ? NULL : in xe_vma_bo()
[all …]
/linux/drivers/pci/
H A Dmmap.c25 struct vm_area_struct *vma, in pci_mmap_resource_range() argument
32 if (vma->vm_pgoff + vma_pages(vma) > size) in pci_mmap_resource_range()
36 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); in pci_mmap_resource_range()
38 vma->vm_page_prot = pgprot_device(vma->vm_page_prot); in pci_mmap_resource_range()
41 ret = pci_iobar_pfn(pdev, bar, vma); in pci_mmap_resource_range()
45 vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); in pci_mmap_resource_range()
47 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range()
49 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, in pci_mmap_resource_range()
50 vma->vm_end - vma->vm_start, in pci_mmap_resource_range()
51 vma->vm_page_prot); in pci_mmap_resource_range()
[all …]
/linux/fs/proc/
H A Dtask_mmu.c133 struct vm_area_struct *vma = vma_next(&priv->iter); in proc_get_vma() local
135 if (vma) { in proc_get_vma()
136 *ppos = vma->vm_start; in proc_get_vma()
139 vma = get_gate_vma(priv->mm); in proc_get_vma()
142 return vma; in proc_get_vma()
243 static void get_vma_name(struct vm_area_struct *vma, in get_vma_name() argument
248 struct anon_vma_name *anon_name = vma->vm_mm ? anon_vma_name(vma) : NULL; in get_vma_name()
258 if (vma->vm_file) { in get_vma_name()
267 *path = file_user_path(vma->vm_file); in get_vma_name()
272 if (vma->vm_ops && vma->vm_ops->name) { in get_vma_name()
[all …]
/linux/drivers/gpu/drm/i915/gt/
H A Dintel_ring.c37 struct i915_vma *vma = ring->vma; in intel_ring_pin() local
46 flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); in intel_ring_pin()
48 if (i915_gem_object_is_stolen(vma->obj)) in intel_ring_pin()
53 ret = i915_ggtt_pin(vma, ww, 0, flags); in intel_ring_pin()
57 if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) { in intel_ring_pin()
58 addr = (void __force *)i915_vma_pin_iomap(vma); in intel_ring_pin()
60 int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false); in intel_ring_pin()
62 addr = i915_gem_object_pin_map(vma->obj, type); in intel_ring_pin()
70 i915_vma_make_unshrinkable(vma); in intel_ring_pin()
79 i915_vma_unpin(vma); in intel_ring_pin()
[all …]
/linux/arch/powerpc/mm/book3s64/
H A Dradix_hugetlbpage.c10 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) in radix__flush_hugetlb_page() argument
13 struct hstate *hstate = hstate_file(vma->vm_file); in radix__flush_hugetlb_page()
16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page()
19 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr) in radix__local_flush_hugetlb_page() argument
22 struct hstate *hstate = hstate_file(vma->vm_file); in radix__local_flush_hugetlb_page()
25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page()
28 void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start, in radix__flush_hugetlb_tlb_range() argument
32 struct hstate *hstate = hstate_file(vma->vm_file); in radix__flush_hugetlb_tlb_range()
39 radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range()
41 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range()
[all …]
/linux/drivers/gpu/drm/i915/gem/
H A Di915_gem_mman.c30 __vma_matches(struct vm_area_struct *vma, struct file *filp, in __vma_matches() argument
33 if (vma->vm_file != filp) in __vma_matches()
36 return vma->vm_start == addr && in __vma_matches()
37 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size); in __vma_matches()
107 struct vm_area_struct *vma; in i915_gem_mmap_ioctl() local
113 vma = find_vma(mm, addr); in i915_gem_mmap_ioctl()
114 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) in i915_gem_mmap_ioctl()
115 vma->vm_page_prot = in i915_gem_mmap_ioctl()
116 pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); in i915_gem_mmap_ioctl()
252 struct vm_area_struct *area = vmf->vma; in vm_fault_cpu()
[all …]

12345678910>>...33