| /linux/drivers/media/common/videobuf2/ |
| H A D | videobuf2-core.c | 588 lockdep_assert_held(&q->mmap_lock); in __vb2_queue_free() 894 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 906 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 927 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 930 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 999 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 1008 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 1011 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() 1024 mutex_lock(&q->mmap_lock); in vb2_core_reqbufs() 1026 mutex_unlock(&q->mmap_lock); in vb2_core_reqbufs() [all …]
|
| /linux/io_uring/ |
| H A D | memmap.c | 261 lockdep_assert_held(&ctx->mmap_lock); in io_region_validate_mmap() 304 guard(mutex)(&ctx->mmap_lock); in io_uring_mmap() 336 guard(mutex)(&ctx->mmap_lock); in io_uring_get_unmapped_area() 386 guard(mutex)(&ctx->mmap_lock); in io_uring_get_unmapped_area()
|
| H A D | kbuf.c | 95 guard(mutex)(&ctx->mmap_lock); in io_buffer_add_list() 462 scoped_guard(mutex, &ctx->mmap_lock) { in io_destroy_buffers() 475 scoped_guard(mutex, &ctx->mmap_lock) in io_destroy_bl() 716 scoped_guard(mutex, &ctx->mmap_lock) in io_unregister_pbuf_ring() 751 lockdep_assert_held(&ctx->mmap_lock); in io_pbuf_get_region()
|
| H A D | zcrx.c | 641 lockdep_assert_held(&ctx->mmap_lock); in io_zcrx_get_region() 726 scoped_guard(mutex, &ctx->mmap_lock) { in import_zcrx() 739 scoped_guard(mutex, &ctx->mmap_lock) { in import_zcrx() 747 scoped_guard(mutex, &ctx->mmap_lock) in import_zcrx() 811 scoped_guard(mutex, &ctx->mmap_lock) { in io_register_zcrx_ifq() 852 scoped_guard(mutex, &ctx->mmap_lock) { in io_register_zcrx_ifq() 871 scoped_guard(mutex, &ctx->mmap_lock) in io_register_zcrx_ifq() 895 scoped_guard(mutex, &ctx->mmap_lock) { in io_unregister_zcrx_ifqs()
|
| H A D | register.c | 578 mutex_lock(&ctx->mmap_lock); in io_register_resize_rings() 652 mutex_unlock(&ctx->mmap_lock); in io_register_resize_rings()
|
| /linux/kernel/bpf/ |
| H A D | mmap_unlock_work.h | 60 rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_); in bpf_mmap_unlock_mm()
|
| /linux/mm/ |
| H A D | init-mm.c | 33 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, init_mm.mmap_lock),
|
| H A D | Makefile | 58 debug.o gup.o mmap_lock.o vma_init.o $(mmu-y)
|
| H A D | mmu_notifier.c | 979 might_lock(&mm->mmap_lock); in mmu_interval_notifier_insert()
|
| H A D | vma.c | 2125 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); in vm_lock_anon_vma() 2155 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); in vm_lock_mapping()
|
| /linux/drivers/infiniband/hw/cxgb4/ |
| H A D | provider.c | 89 spin_lock_init(&context->mmap_lock); in c4iw_alloc_ucontext() 103 spin_lock(&context->mmap_lock); in c4iw_alloc_ucontext() 106 spin_unlock(&context->mmap_lock); in c4iw_alloc_ucontext()
|
| H A D | cq.c | 1110 spin_lock(&ucontext->mmap_lock); in c4iw_create_cq() 1120 spin_unlock(&ucontext->mmap_lock); in c4iw_create_cq()
|
| H A D | qp.c | 2265 spin_lock(&ucontext->mmap_lock); in c4iw_create_qp() 2282 spin_unlock(&ucontext->mmap_lock); in c4iw_create_qp() 2774 spin_lock(&ucontext->mmap_lock); in c4iw_create_srq() 2779 spin_unlock(&ucontext->mmap_lock); in c4iw_create_srq()
|
| /linux/tools/perf/util/bpf_skel/vmlinux/ |
| H A D | vmlinux.h | 97 struct rw_semaphore mmap_lock; member
|
| /linux/tools/perf/util/bpf_skel/ |
| H A D | lock_contention.bpf.c | 175 struct rw_semaphore mmap_lock; member 377 if (bpf_core_field_exists(mm_new->mmap_lock)) { in check_lock_type() 378 if (&mm_new->mmap_lock == (void *)lock) in check_lock_type()
|
| /linux/Documentation/admin-guide/mm/ |
| H A D | numa_memory_policy.rst | 381 task's mm's mmap_lock for read during the query. The set_mempolicy() and 382 mbind() APIs [see below] always acquire the mmap_lock for write when 388 we hold them mmap_lock for read. Again, because replacing the task or vma 389 policy requires that the mmap_lock be held for write, the policy can't be 393 shared memory policy while another task, with a distinct mmap_lock, is
|
| H A D | multigen_lru.rst | 41 theoretically worsen lock contention (mmap_lock). If it is
|
| /linux/include/media/ |
| H A D | videobuf2-core.h | 564 * @mmap_lock: private mutex used when buffers are allocated/freed/mmapped 636 struct mutex mmap_lock; 646 struct mutex mmap_lock; global() member
|
| /linux/include/linux/ |
| H A D | io_uring_types.h | 485 struct mutex mmap_lock; member
|
| H A D | mm_types.h | 1196 struct rw_semaphore mmap_lock; member
|
| /linux/drivers/gpu/drm/etnaviv/ |
| H A D | etnaviv_gem.c | 673 might_lock_read(¤t->mm->mmap_lock); in etnaviv_gem_userptr_get_pages()
|
| /linux/Documentation/filesystems/ |
| H A D | locking.rst | 604 ops mmap_lock PageLocked(page) 629 The mmap_lock may not be held when this method is called.
|
| /linux/drivers/firmware/efi/ |
| H A D | efi.c | 69 .mm_mt = MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
|
| /linux/Documentation/gpu/ |
| H A D | drm-vm-bind-locking.rst | 82 correspond to the mmap_lock. An rwsem allows several readers to walk 419 mmap_lock that is grabbed when resolving a CPU pagefault. This means
|
| /linux/kernel/ |
| H A D | fork.c | 1065 init_rwsem(&mm->mmap_lock); in mmap_init_lock() 1076 mt_set_external_lock(&mm->mm_mt, &mm->mmap_lock); in mm_init()
|