Lines Matching refs:vma

28 __vma_matches(struct vm_area_struct *vma, struct file *filp,
31 if (vma->vm_file != filp)
34 return vma->vm_start == addr &&
35 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
105 struct vm_area_struct *vma;
111 vma = find_vma(mm, addr);
112 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
113 vma->vm_page_prot =
114 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
253 struct vm_area_struct *area = vmf->vma;
297 struct i915_vma *vma,
313 vma_size = vma->size >> PAGE_SHIFT;
322 start += vma->gtt_view.partial.offset;
332 *pfn = (gmadr_start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
334 *pfn += obj_offset - vma->gtt_view.partial.offset;
340 struct vm_area_struct *area = vmf->vma;
352 struct i915_vma *vma;
387 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, NULL, 0, 0,
391 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
406 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
407 if (IS_ERR(vma) && vma != ERR_PTR(-EDEADLK)) {
410 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
418 if (vma == ERR_PTR(-ENOSPC)) {
426 vma = i915_gem_object_ggtt_pin_ww(obj, &ww, &view, 0, 0, flags);
429 if (IS_ERR(vma)) {
430 ret = PTR_ERR(vma);
449 ret = i915_vma_pin_fence(vma);
458 set_address_limits(area, vma, obj_offset, ggtt->gmadr.start,
470 if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
474 /* Track the mmo associated with the fenced vma */
475 vma->mmo = mmo;
483 i915_vma_set_ggtt_write(vma);
488 i915_vma_unpin_fence(vma);
490 __i915_vma_unpin(vma);
560 struct i915_vma *vma;
564 for_each_ggtt_vma(vma, obj)
565 i915_vma_revoke_mmap(vma);
920 static void vm_open(struct vm_area_struct *vma)
922 struct i915_mmap_offset *mmo = vma->vm_private_data;
929 static void vm_close(struct vm_area_struct *vma)
931 struct i915_mmap_offset *mmo = vma->vm_private_data;
991 struct vm_area_struct *vma)
998 if (vma->vm_flags & VM_WRITE) {
1002 vm_flags_clear(vma, VM_MAYWRITE);
1011 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
1015 * vma->vm_file->f_mapping, see vma_link(), for later revocation.
1021 vma_set_file(vma, anon);
1022 /* Drop the initial creation reference, the vma is now holding one. */
1026 vma->vm_page_prot = pgprot_decrypted(vm_get_page_prot(vma->vm_flags));
1027 vma->vm_ops = obj->ops->mmap_ops;
1028 vma->vm_private_data = obj->base.vma_node.driver_private;
1032 vma->vm_private_data = mmo;
1036 vma->vm_page_prot =
1037 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1038 vma->vm_ops = &vm_ops_cpu;
1045 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1046 vma->vm_ops = &vm_ops_cpu;
1050 vma->vm_page_prot =
1051 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
1052 vma->vm_ops = &vm_ops_cpu;
1056 vma->vm_page_prot =
1057 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1058 vma->vm_ops = &vm_ops_gtt;
1061 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1068 * drm_gem_object as the vma->vm_private_data. Since we need to
1072 int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1086 vma->vm_pgoff,
1087 vma_pages(vma));
1091 * destroyed and will be invalid when the vma manager lock
1112 return i915_gem_object_mmap(obj, mmo, vma);
1115 int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma)
1132 vma->vm_pgoff += drm_vma_node_start(&obj->base.vma_node);
1140 vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
1150 return i915_gem_object_mmap(obj, mmo, vma);