Lines Matching refs:vma
139 struct vm_area_struct *vma; member
208 io_remap_pfn_range(struct vm_area_struct *vma, in io_remap_pfn_range() argument
212 vma->vm_page_prot = prot; in io_remap_pfn_range()
213 vma->vm_pfn = pfn; in io_remap_pfn_range()
214 vma->vm_len = size; in io_remap_pfn_range()
220 lkpi_vmf_insert_pfn_prot_locked(struct vm_area_struct *vma, unsigned long addr,
224 vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_prot() argument
229 VM_OBJECT_WLOCK(vma->vm_obj); in vmf_insert_pfn_prot()
230 ret = lkpi_vmf_insert_pfn_prot_locked(vma, addr, pfn, prot); in vmf_insert_pfn_prot()
231 VM_OBJECT_WUNLOCK(vma->vm_obj); in vmf_insert_pfn_prot()
246 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
249 int lkpi_remap_pfn_range(struct vm_area_struct *vma,
254 remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
257 return (lkpi_remap_pfn_range(vma, addr, pfn, size, prot)); in remap_pfn_range()
261 vma_pages(struct vm_area_struct *vma) in vma_pages() argument
263 return ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); in vma_pages()
386 vm_flags_set(struct vm_area_struct *vma, unsigned long flags) in vm_flags_set() argument
388 vma->vm_flags |= flags; in vm_flags_set()
392 vm_flags_clear(struct vm_area_struct *vma, unsigned long flags) in vm_flags_clear() argument
394 vma->vm_flags &= ~flags; in vm_flags_clear()
434 void vma_set_file(struct vm_area_struct *vma, struct linux_file *file);