Home
last modified time | relevance | path

Searched refs:xa_lock (Results 1 – 25 of 63) sorted by relevance

123

/linux/Documentation/translations/zh_CN/core-api/
H A Dxarray.rst162 内部使用xa_lock:
186 假设进入时持有xa_lock:
195 如果你想利用锁来保护你存储在XArray中的数据结构,你可以在调用xa_load()之前调用xa_lock(),然后在
225 xa_lock(&foo->array);
234 上面的例子还显示了一个常见的模式,即希望在存储端扩展xa_lock的覆盖范围,以保护与数组相关的一些统计
238 在进程上下文中使用xa_lock_irq(),在中断处理程序中使用xa_lock()。一些更常见的模式有一些辅助函数,
242 意味着你有权使用像__xa_erase()这样的函数而不占用xa_lockxa_lock是用来进行lockdep验证的,将来也
252 你需要在修改数组的时候使用xa_lock。在对数组进行只读操作时,你可以选择使用xa_lock或RCU锁。你可以在
267 存在xa_state中供下一次尝试。这个想法是,你拿着xa_lock,尝试操作,然后放弃锁。该操作试图在持有锁的情
295 - 这个条目目前正在被一个拥有xa_lock的线程修改。在这个RCU周期结束时,包含该条目的节点可能会被释放。
[all …]
/linux/include/linux/
H A Dxarray.h301 spinlock_t xa_lock; member
308 .xa_lock = __SPIN_LOCK_UNLOCKED(name.xa_lock), \
384 spin_lock_init(&xa->xa_lock); in xa_init_flags()
535 #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock)
536 #define xa_lock(xa) spin_lock(&(xa)->xa_lock) macro
537 #define xa_unlock(xa) spin_unlock(&(xa)->xa_lock)
538 #define xa_lock_bh(xa) spin_lock_bh(&(xa)->xa_lock)
539 #define xa_unlock_bh(xa) spin_unlock_bh(&(xa)->xa_lock)
540 #define xa_lock_irq(xa) spin_lock_irq(&(xa)->xa_lock)
541 #define xa_unlock_irq(xa) spin_unlock_irq(&(xa)->xa_lock)
[all …]
H A Didr.h102 #define idr_lock(idr) xa_lock(&(idr)->idr_rt)
H A Dbacking-dev.h242 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && in inode_to_wb()
/linux/tools/testing/radix-tree/
H A Dregression1.c128 xa_lock(&mt_tree); in regression1_fn()
133 xa_lock(&mt_tree); in regression1_fn()
137 xa_lock(&mt_tree); in regression1_fn()
145 xa_lock(&mt_tree); in regression1_fn()
/linux/drivers/infiniband/core/
H A Dib_core_uverbs.c129 xa_lock(&ucontext->mmap_xa); in rdma_user_mmap_entry_get_pgoff()
194 xa_lock(&ucontext->mmap_xa); in rdma_user_mmap_entry_free()
242 xa_lock(&entry->ucontext->mmap_xa); in rdma_user_mmap_entry_remove()
311 xa_lock(&ucontext->mmap_xa); in rdma_user_mmap_entry_insert_range()
H A Drestrack.c72 xa_lock(&rt->xa); in rdma_restrack_count()
251 xa_lock(&rt->xa); in rdma_restrack_get_byid()
H A Ducma.c145 xa_lock(&ctx_table); in ucma_get_ctx()
367 xa_lock(&ctx_table); in ucma_event_handler()
498 xa_lock(&multicast_table); in ucma_cleanup_multicast()
611 xa_lock(&ctx_table); in ucma_destroy_id()
1543 xa_lock(&multicast_table); in ucma_process_join()
1578 xa_lock(&multicast_table); in ucma_process_join()
1642 xa_lock(&multicast_table); in ucma_leave_multicast()
1712 xa_lock(&ctx_table); in ucma_migrate_id()
/linux/drivers/iommu/iommufd/
H A Dvfio_compat.c19 xa_lock(&ictx->objects); in get_compat_ioas()
59 xa_lock(&ictx->objects); in iommufd_vfio_compat_set_no_iommu()
89 xa_lock(&ictx->objects); in iommufd_vfio_compat_ioas_create()
143 xa_lock(&ucmd->ictx->objects); in iommufd_vfio_ioas()
150 xa_lock(&ucmd->ictx->objects); in iommufd_vfio_ioas()
H A Dmain.c108 xa_lock(&ictx->objects); in iommufd_object_finalize()
121 xa_lock(&ictx->objects); in iommufd_object_abort()
174 xa_lock(&ictx->objects); in iommufd_get_object()
235 xa_lock(&ictx->objects); in iommufd_object_remove()
H A Ddriver.c98 lockdep_assert_held(&viommu->vdevs.xa_lock); in iommufd_viommu_find_dev()
116 xa_lock(&viommu->vdevs); in iommufd_viommu_get_vdev_id()
H A Dioas.c412 xa_lock(&ictx->objects); in iommufd_take_all_iova_rwsem()
434 xa_lock(&ictx->objects); in iommufd_take_all_iova_rwsem()
603 xa_lock(&ictx->objects); in iommufd_option_rlimit_mode()
H A Ddevice.c81 xa_lock(&ictx->groups); in iommufd_get_group()
114 xa_lock(&ictx->groups); in iommufd_get_group()
311 xa_lock(&ictx->objects); in iommufd_ctx_has_group()
1308 xa_lock(&ioas->iopt.access_list); in iommufd_access_notify_unmap()
1318 xa_lock(&ioas->iopt.access_list); in iommufd_access_notify_unmap()
/linux/drivers/gpu/drm/imagination/
H A Dpvr_free_list.h142 xa_lock(&pvr_file->free_list_handles); in pvr_free_list_lookup()
165 xa_lock(&pvr_dev->free_list_ids); in pvr_free_list_lookup_id()
H A Dpvr_hwrt.h104 xa_lock(&pvr_file->hwrt_handles); in pvr_hwrt_dataset_lookup()
/linux/drivers/iommu/amd/
H A Dnested.c72 xa_lock(xa); in gdom_info_load_or_alloc_locked()
139 xa_lock(&aviommu->gdomid_array); in amd_iommu_alloc_domain_nested()
264 xa_lock(&aviommu->gdomid_array); in nested_domain_free()
/linux/fs/cachefiles/
H A Dondemand.c34 xa_lock(&cache->reqs); in cachefiles_ondemand_fd_release()
130 xa_lock(&cache->reqs); in cachefiles_ondemand_fd_ioctl()
191 xa_lock(&cache->reqs); in cachefiles_ondemand_copen()
423 xa_lock(&cache->reqs); in cachefiles_ondemand_daemon_read()
716 xa_lock(&cache->reqs); in cachefiles_ondemand_clean_object()
/linux/rust/helpers/
H A Dxarray.c22 return xa_lock(xa); in rust_helper_xa_lock()
/linux/arch/arm64/kernel/
H A Dhibernate.c246 xa_lock(&mte_pages); in swsusp_mte_free_storage()
303 xa_lock(&mte_pages); in swsusp_mte_restore_tags()
/linux/drivers/block/
H A Dbrd.c95 xa_lock(&brd->brd_pages); in brd_insert_page()
190 xa_lock(&brd->brd_pages); in brd_do_discard()
/linux/drivers/infiniband/hw/mlx4/
H A Dcm.c366 xa_lock(&sriov->xa_rej_tmout); in alloc_rej_tmout()
411 xa_lock(&sriov->xa_rej_tmout); in lookup_rej_tmout_slave()
501 xa_lock(&sriov->xa_rej_tmout); in rej_tmout_xa_cleanup()
/linux/rust/kernel/
H A Dxarray.rs137 unsafe { bindings::xa_lock(self.xa.get()) }; in lock()
/linux/net/shaper/
H A Dshaper.c341 xa_lock(&hierarchy->shapers); in net_shaper_pre_insert()
370 xa_lock(&hierarchy->shapers); in net_shaper_commit()
398 xa_lock(&hierarchy->shapers); in net_shaper_rollback()
1363 xa_lock(&hierarchy->shapers); in net_shaper_flush()
/linux/net/devlink/
H A Dregion.c283 xa_lock(&devlink->snapshot_ids); in __devlink_snapshot_id_increment()
325 xa_lock(&devlink->snapshot_ids); in __devlink_snapshot_id_decrement()
367 xa_lock(&devlink->snapshot_ids); in __devlink_snapshot_id_insert()
/linux/lib/
H A Dxarray.c329 __must_hold(xas->xa->xa_lock) in __xas_nomem()
1669 xa_lock(xa); in xa_erase()
1733 xa_lock(xa); in xa_store()
2148 xa_lock(xa); in xa_set_mark()
2166 xa_lock(xa); in xa_clear_mark()

123