Searched refs:vma_lock (Results 1 – 7 of 7) sorted by relevance
264 * hugetlb vma_lock helper routines269 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_lock_read() local 271 down_read(&vma_lock->rw_sema); in hugetlb_vma_lock_read() 282 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_unlock_read() local 284 up_read(&vma_lock->rw_sema); in hugetlb_vma_unlock_read() 295 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_lock_write() local 297 down_write(&vma_lock->rw_sema); in hugetlb_vma_lock_write() 308 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_vma_unlock_write() local 310 up_write(&vma_lock->rw_sema); in hugetlb_vma_unlock_write() 322 struct hugetlb_vma_lock *vma_lock in hugetlb_vma_trylock_write() local 337 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; hugetlb_vma_assert_locked() local 349 struct hugetlb_vma_lock *vma_lock = container_of(kref, hugetlb_vma_lock_release() local 355 __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock * vma_lock) __hugetlb_vma_unlock_write_put() argument 373 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; __hugetlb_vma_unlock_write_free() local 393 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; hugetlb_vma_lock_free() local 402 struct hugetlb_vma_lock *vma_lock; hugetlb_vma_lock_alloc() local 1209 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; hugetlb_dup_vma_private() local 5198 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; hugetlb_vm_op_open() local [all...]
1613 /* Return 1 on zap and vma_lock acquired, 0 on contention (only with @try) */ in vfio_pci_zap_and_vma_lock() 1620 * vma_lock is nested under mmap_lock for vm_ops callback paths. in vfio_pci_zap_and_vma_lock() 1625 * When zapping vmas we need to maintain the mmap_lock => vma_lock in vfio_pci_zap_and_vma_lock() 1626 * ordering, which requires using vma_lock to walk vma_list to in vfio_pci_zap_and_vma_lock() 1627 * acquire an mm, then dropping vma_lock to get the mmap_lock and in vfio_pci_zap_and_vma_lock() 1628 * reacquiring vma_lock. This logic is derived from similar in vfio_pci_zap_and_vma_lock() 1635 * vma_lock, thus memory_lock is nested under vma_lock. in vfio_pci_zap_and_vma_lock() 1637 * This enables the vm_ops.fault callback to acquire vma_lock, in vfio_pci_zap_and_vma_lock() 1645 if (!mutex_trylock(&vdev->vma_lock)) in vfio_pci_zap_and_vma_lock() [all...]
473 struct hugetlb_vma_lock *vma_lock; in hugetlb_unmap_file_folio() 485 vma_lock = NULL; in hugetlb_unmap_file_folio() 494 vma_lock = vma->vm_private_data; in hugetlb_unmap_file_folio() 498 * take a ref on the vma_lock structure so that in hugetlb_unmap_file_folio() 502 kref_get(&vma_lock->refs); in hugetlb_unmap_file_folio() 513 if (vma_lock) { in hugetlb_unmap_file_folio() 515 * Wait on vma_lock. We know it is still valid as we have in hugetlb_unmap_file_folio() 517 * not know if vma_lock is still attached to vma. in hugetlb_unmap_file_folio() 519 down_write(&vma_lock->rw_sema); in hugetlb_unmap_file_folio() 522 vma = vma_lock in hugetlb_unmap_file_folio() 476 struct hugetlb_vma_lock *vma_lock; hugetlb_unmap_file_folio() local [all...]
96 struct mutex vma_lock; member
602 struct vma_lock { argument 693 struct vma_lock *vm_lock;601 struct vma_lock { global() struct
1287 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; in hugetlb_walk() 1297 WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&1278 struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; hugetlb_walk() local
3186 vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT); in proc_caches_init()