| /linux/include/linux/ |
| H A D | mmap_lock.h | 18 .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), 71 rwsem_assert_held(&mm->mmap_lock); in mmap_assert_locked() 76 rwsem_assert_held_write(&mm->mmap_lock); in mmap_assert_write_locked() 409 if (lockdep_is_held(&vma->vm_mm->mmap_lock)) in vma_assert_stabilised() 412 if (rwsem_is_locked(&vma->vm_mm->mmap_lock)) in vma_assert_stabilised() 536 down_write(&mm->mmap_lock); in mmap_write_lock() 544 down_write_nested(&mm->mmap_lock, subclass); in mmap_write_lock_nested() 554 ret = down_write_killable(&mm->mmap_lock); in mmap_write_lock_killable() 579 up_write(&mm->mmap_lock); in mmap_write_unlock() 586 downgrade_write(&mm->mmap_lock); in mmap_write_downgrade() [all …]
|
| H A D | io_uring_types.h | 501 struct mutex mmap_lock; member
|
| /linux/drivers/media/common/videobuf2/ |
| H A D | videobuf2-core.c | 588 lockdep_assert_held(&q->mmap_lock); in __vb2_queue_free() 894 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 906 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 927 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 930 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 999 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 1008 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 1011 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 1024 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 1026 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() [all …]
|
| /linux/io_uring/ |
| H A D | memmap.c | 261 lockdep_assert_held(&ctx->mmap_lock); in io_region_validate_mmap() 304 guard(mutex)(&ctx->mmap_lock); in io_uring_mmap() 336 guard(mutex)(&ctx->mmap_lock); in io_uring_get_unmapped_area() 397 guard(mutex)(&ctx->mmap_lock); in io_uring_mmap() 430 guard(mutex)(&ctx->mmap_lock); in io_uring_get_unmapped_area()
|
| H A D | kbuf.c | 95 guard(mutex)(&ctx->mmap_lock); in io_buffer_add_list() 462 scoped_guard(mutex, &ctx->mmap_lock) { in io_destroy_buffers() 475 scoped_guard(mutex, &ctx->mmap_lock) in io_destroy_bl() 721 scoped_guard(mutex, &ctx->mmap_lock) in io_unregister_pbuf_ring() 756 lockdep_assert_held(&ctx->mmap_lock); in io_pbuf_get_region()
|
| H A D | zcrx.c | 674 lockdep_assert_held(&ctx->mmap_lock); in io_zcrx_get_region() 759 scoped_guard(mutex, &ctx->mmap_lock) { in import_zcrx() 772 scoped_guard(mutex, &ctx->mmap_lock) { in import_zcrx() 780 scoped_guard(mutex, &ctx->mmap_lock) in import_zcrx() 888 scoped_guard(mutex, &ctx->mmap_lock) { in io_register_zcrx() 913 scoped_guard(mutex, &ctx->mmap_lock) { in io_register_zcrx() 930 scoped_guard(mutex, &ctx->mmap_lock) in io_register_zcrx() 955 scoped_guard(mutex, &ctx->mmap_lock) in io_terminate_zcrx() 974 scoped_guard(mutex, &ctx->mmap_lock) { in io_unregister_zcrx()
|
| H A D | register.c | 586 mutex_lock(&ctx->mmap_lock); in io_register_resize_rings() 680 mutex_unlock(&ctx->mmap_lock); in io_register_resize_rings()
|
| H A D | io_uring.c | 295 mutex_init(&ctx->mmap_lock); in io_ring_ctx_alloc()
|
| /linux/kernel/bpf/ |
| H A D | mmap_unlock_work.h | 60 rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); in bpf_mmap_unlock_mm()
|
| /linux/mm/ |
| H A D | init-mm.c | 33 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
|
| H A D | vma.c | 2149 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); in vm_lock_anon_vma() 2179 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); in vm_lock_mapping()
|
| H A D | memory.c | 6844 lockdep_is_held(&vma->vm_mm->mmap_lock)); in pfnmap_lockdep_assert() 6846 lockdep_assert(lockdep_is_held(&vma->vm_mm->mmap_lock)); in pfnmap_lockdep_assert() 7324 might_lock_read(¤t->mm->mmap_lock); in __might_fault()
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | provider.c | 89 spin_lock_init(&context->mmap_lock); in c4iw_alloc_ucontext() 103 spin_lock(&context->mmap_lock); in c4iw_alloc_ucontext() 106 spin_unlock(&context->mmap_lock); in c4iw_alloc_ucontext()
|
| H A D | cq.c | 1110 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq() 1120 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
|
| H A D | qp.c | 2265 spin_lock(&ucontext->mmap_lock); in c4iw_create_qp() 2282 spin_unlock(&ucontext->mmap_lock); in c4iw_create_qp() 2774 spin_lock(&ucontext->mmap_lock); in c4iw_create_srq() 2779 spin_unlock(&ucontext->mmap_lock); in c4iw_create_srq()
|
| /linux/tools/perf/util/bpf_skel/vmlinux/ |
| H A D | vmlinux.h | 97 struct rw_semaphore mmap_lock; member
|
| /linux/tools/perf/util/bpf_skel/ |
| H A D | lock_contention.bpf.c | 175 struct rw_semaphore mmap_lock; member 377 if (bpf_core_field_exists(mm_new->mmap_lock)) { in check_lock_type() 378 if (&mm_new->mmap_lock == (void *)lock) in check_lock_type()
|
| /linux/Documentation/admin-guide/mm/ |
| H A D | multigen_lru.rst | 41 theoretically worsen lock contention (mmap_lock). If it is
|
| H A D | userfaultfd.rst | 37 ``userfaultfd`` runtime load never takes the mmap_lock for writing).
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_gem.c | 673 might_lock_read(¤t->mm->mmap_lock); in etnaviv_gem_userptr_get_pages()
|
| /linux/Documentation/gpu/ |
| H A D | drm-vm-bind-locking.rst | 82 correspond to the mmap_lock. An rwsem allows several readers to walk 419 mmap_lock that is grabbed when resolving a CPU pagefault. This means
|
| /linux/kernel/ |
| H A D | fork.c | 1068 init_rwsem(&mm->mmap_lock); in mmap_init_lock() 1079 mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); in mm_init()
|
| /linux/ |
| H A D | MAINTAINERS | 17142 F: include/linux/mmap_lock.h 17143 F: include/trace/events/mmap_lock.h 17144 F: mm/mmap_lock.c
|