| /linux/drivers/dma-buf/ |
| H A D | st-dma-resv.c | 40 struct dma_resv resv; in sanitycheck() local 53 dma_resv_init(&resv); in sanitycheck() 54 r = dma_resv_lock(&resv, NULL); in sanitycheck() 58 dma_resv_unlock(&resv); in sanitycheck() 59 dma_resv_fini(&resv); in sanitycheck() 66 struct dma_resv resv; in test_signaling() local 76 dma_resv_init(&resv); in test_signaling() 77 r = dma_resv_lock(&resv, NULL); in test_signaling() 83 r = dma_resv_reserve_fences(&resv, 1); in test_signaling() 89 dma_resv_add_fence(&resv, f, usage); in test_signaling() [all …]
|
| /linux/fs/xfs/scrub/ |
| H A D | newbt.c | 100 enum xfs_ag_resv_type resv) in xrep_newbt_init_ag() argument 106 xnr->resv = resv; in xrep_newbt_init_ag() 195 struct xrep_newbt_resv *resv; in xrep_newbt_add_blocks() local 198 resv = kmalloc(sizeof(struct xrep_newbt_resv), XCHK_GFP_FLAGS); in xrep_newbt_add_blocks() 199 if (!resv) in xrep_newbt_add_blocks() 202 INIT_LIST_HEAD(&resv->list); in xrep_newbt_add_blocks() 203 resv->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); in xrep_newbt_add_blocks() 204 resv->len = args->len; in xrep_newbt_add_blocks() 205 resv in xrep_newbt_add_blocks() 422 xrep_newbt_free_extent(struct xrep_newbt * xnr,struct xrep_newbt_resv * resv,bool btree_committed) xrep_newbt_free_extent() argument 480 struct xrep_newbt_resv *resv, *n; xrep_newbt_free() local 566 struct xrep_newbt_resv *resv; xrep_newbt_claim_block() local 607 struct xrep_newbt_resv *resv; xrep_newbt_unused_blocks() local [all...] |
| H A D | alloc_repair.c | 533 struct xrep_newbt_resv *resv) in xrep_abt_dispose_one() argument 537 xfs_agblock_t free_agbno = resv->agbno + resv->used; in xrep_abt_dispose_one() 538 xfs_extlen_t free_aglen = resv->len - resv->used; in xrep_abt_dispose_one() 541 ASSERT(pag == resv->pag); in xrep_abt_dispose_one() 544 if (resv->used > 0) in xrep_abt_dispose_one() 546 xfs_agbno_to_fsb(pag, resv->agbno), resv->used, in xrep_abt_dispose_one() 557 trace_xrep_newbt_free_blocks(resv in xrep_abt_dispose_one() 579 struct xrep_newbt_resv *resv, *n; xrep_abt_dispose_reservations() local [all...] |
| /linux/fs/xfs/libxfs/ |
| H A D | xfs_ag_resv.c | 135 struct xfs_ag_resv *resv; in __xfs_ag_resv_free() local 140 resv = xfs_perag_resv(pag, type); in __xfs_ag_resv_free() 142 pag_mount(pag)->m_ag_max_usable += resv->ar_asked; in __xfs_ag_resv_free() 149 oldresv = resv->ar_orig_reserved; in __xfs_ag_resv_free() 151 oldresv = resv->ar_reserved; in __xfs_ag_resv_free() 153 resv->ar_reserved = 0; in __xfs_ag_resv_free() 154 resv->ar_asked = 0; in __xfs_ag_resv_free() 155 resv->ar_orig_reserved = 0; in __xfs_ag_resv_free() 175 struct xfs_ag_resv *resv; in __xfs_ag_resv_init() local 226 resv in __xfs_ag_resv_init() 342 struct xfs_ag_resv *resv; xfs_ag_resv_alloc_extent() local 387 struct xfs_ag_resv *resv; xfs_ag_resv_free_extent() local [all...] |
| /linux/drivers/gpu/drm/ |
| H A D | drm_gem_shmem_helper.c | 175 dma_resv_lock(shmem->base.resv, NULL); in drm_gem_shmem_create_with_mnt() 191 dma_resv_unlock(shmem->base.resv); in drm_gem_shmem_release() 217 dma_resv_assert_held(shmem->base.resv); 256 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_get_pages_locked() 276 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_put_pages_locked() 293 dma_resv_assert_held(shmem->base.resv); in drm_gem_shmem_pin_locked() 320 ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); 324 dma_resv_unlock(shmem->base.resv); 346 dma_resv_lock(shmem->base.resv, NULL); 348 dma_resv_unlock(shmem->base.resv); [all...] |
| H A D | drm_gem.c | 231 if (!obj->resv) 232 obj->resv = &obj->_resv; 898 ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all), in drm_gem_flink_ioctl() 1381 dma_resv_assert_held(obj->resv); in drm_gem_lock_reservations() 1398 dma_resv_assert_held(obj->resv); in drm_gem_lock_reservations() 1413 dma_resv_lock(obj->resv, NULL); 1419 dma_resv_unlock(obj->resv); 1427 dma_resv_lock(obj->resv, NULL); 1429 dma_resv_unlock(obj->resv); 1437 dma_resv_lock(obj->resv, NUL in drm_gem_lru_remove_locked() [all...] |
| /linux/drivers/gpu/drm/ttm/ |
| H A D | ttm_bo.c | 49 #include <linux/dma-resv.h> 81 dma_resv_assert_held(bo->base.resv); in ttm_bo_move_to_lru_tail() 105 dma_resv_assert_held(bo->base.resv); in ttm_bo_set_bulk_move() 153 ret = dma_resv_reserve_fences(bo->base.resv, 1); in ttm_bo_handle_move_mem() 195 if (bo->base.resv == &bo->base._resv) in ttm_bo_individualize_resv() 200 r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv); in ttm_bo_individualize_resv() 208 * the resv object while holding the lru_lock. in ttm_bo_individualize_resv() 211 bo->base.resv = &bo->base._resv; in ttm_bo_individualize_resv() 220 struct dma_resv *resv = &bo->base._resv; in ttm_bo_flush_all_fences() local 224 dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEE in ttm_bo_flush_all_fences() 934 ttm_bo_init_reserved(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,struct ttm_operation_ctx * ctx,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *)) ttm_bo_init_reserved() argument 1027 ttm_bo_init_validate(struct ttm_device * bdev,struct ttm_buffer_object * bo,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t alignment,bool interruptible,struct sg_table * sg,struct dma_resv * resv,void (* destroy)(struct ttm_buffer_object *)) ttm_bo_init_validate() argument [all...] |
| H A D | ttm_bo_vm.c | 51 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_KERNEL)) in ttm_bo_vm_fault_idle() 65 (void)dma_resv_wait_timeout(bo->base.resv, in ttm_bo_vm_fault_idle() 68 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault_idle() 76 err = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_KERNEL, true, in ttm_bo_vm_fault_idle() 127 if (unlikely(!dma_resv_trylock(bo->base.resv))) { in ttm_bo_vm_reserve() 137 if (!dma_resv_lock_interruptible(bo->base.resv, in ttm_bo_vm_reserve() 139 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve() 146 if (dma_resv_lock_interruptible(bo->base.resv, NULL)) in ttm_bo_vm_reserve() 156 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_reserve() 343 dma_resv_unlock(bo->base.resv); in ttm_bo_vm_fault() [all...] |
| H A D | ttm_bo_util.c | 254 fbo->base.base.resv = &fbo->base.base._resv; in ttm_buffer_object_transfer() 503 dma_resv_assert_held(bo->base.resv); in ttm_bo_vmap() 568 dma_resv_assert_held(bo->base.resv); 588 ret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, in ttm_bo_wait_free_node() 711 dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_KERNEL); in ttm_bo_move_accel_cleanup() 770 if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) { in ttm_bo_pipeline_gutting() 804 ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); in ttm_bo_pipeline_gutting() 807 dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, in ttm_bo_pipeline_gutting() 828 if (dma_resv_trylock(bo->base.resv)) { in ttm_lru_walk_trylock() 833 if (bo->base.resv in ttm_lru_walk_trylock() 851 struct dma_resv *resv = bo->base.resv; ttm_lru_walk_ticketlock() local [all...] |
| /linux/drivers/gpu/drm/virtio/ |
| H A D | virtgpu_prime.c | 134 exp_info.resv = obj->resv; in virtgpu_gem_prime_export() 155 dma_resv_assert_held(attach->dmabuf->resv); in virtgpu_dma_buf_import_sgt() 157 ret = dma_resv_wait_timeout(attach->dmabuf->resv, in virtgpu_dma_buf_import_sgt() 190 dma_resv_assert_held(attach->dmabuf->resv); in virtgpu_dma_buf_unmap() 212 dma_resv_lock(dmabuf->resv, NULL); in virtgpu_dma_buf_free_obj() 214 dma_resv_unlock(dmabuf->resv); in virtgpu_dma_buf_free_obj() 234 struct dma_resv *resv = attach->dmabuf->resv; in virtgpu_dma_buf_init_obj() local 245 dma_resv_lock(resv, NULL); in virtgpu_dma_buf_init_obj() 265 dma_resv_unlock(resv); in virtgpu_dma_buf_init_obj() 272 dma_resv_unlock(resv); in virtgpu_dma_buf_init_obj() [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_dma_buf.c | 103 r = dma_resv_lock(bo->tbo.base.resv, NULL); in amdgpu_dma_buf_attach() 109 dma_resv_unlock(bo->tbo.base.resv); in amdgpu_dma_buf_attach() 127 dma_resv_assert_held(dmabuf->resv); in amdgpu_dma_buf_pin() 422 struct dma_resv *resv = dma_buf->resv; in amdgpu_dma_buf_create_obj() 429 dma_resv_lock(resv, NULL); in amdgpu_dma_buf_create_obj() 442 ttm_bo_type_sg, resv, &gobj, 0); 450 dma_resv_unlock(resv); in amdgpu_dma_buf_move_notify() 454 dma_resv_unlock(resv); in amdgpu_dma_buf_move_notify() 470 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv); in amdgpu_dma_buf_move_notify() 404 struct dma_resv *resv = dma_buf->resv; amdgpu_dma_buf_create_obj() local 475 struct dma_resv *resv = vm->root.bo->tbo.base.resv; amdgpu_dma_buf_move_notify() local [all...] |
| H A D | amdgpu_eviction_fence.c | 201 struct dma_resv *resv = bo->tbo.base.resv; in amdgpu_eviction_fence_attach() local 204 if (!resv) in amdgpu_eviction_fence_attach() 207 ret = dma_resv_reserve_fences(resv, 1); in amdgpu_eviction_fence_attach() 216 dma_resv_add_fence(resv, &ev_fence->base, DMA_RESV_USAGE_BOOKKEEP); in amdgpu_eviction_fence_attach() 227 dma_resv_replace_fences(bo->tbo.base.resv, evf_mgr->ev_fence_ctx, in amdgpu_eviction_fence_detach()
|
| /linux/drivers/gpu/drm/radeon/ |
| H A D | radeon_prime.c | 49 struct dma_resv *resv = attach->dmabuf->resv; in radeon_gem_prime_import_sg_table() local 54 dma_resv_lock(resv, NULL); in radeon_gem_prime_import_sg_table() 56 RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo); in radeon_gem_prime_import_sg_table() 57 dma_resv_unlock(resv); in radeon_gem_prime_import_sg_table()
|
| H A D | radeon_benchmark.c | 38 struct dma_resv *resv) in radeon_benchmark_do_move() argument 51 resv); in radeon_benchmark_do_move() 56 resv); in radeon_benchmark_do_move() 125 dobj->tbo.base.resv); in radeon_benchmark_move() 136 dobj->tbo.base.resv); in radeon_benchmark_move()
|
| /linux/tools/include/uapi/linux/ |
| H A D | io_uring.h | 493 __u32 resv[3]; member 577 __u32 resv; member 597 __u32 resv; member 603 __u32 resv; member 617 __u8 resv; member 625 __u16 resv; member 637 __u8 resv; member 645 __u16 resv; member 684 __u64 resv[3]; member 733 __u64 resv; member
|
| /linux/drivers/gpu/drm/ttm/tests/ |
| H A D | ttm_bo_validate_test.c | 66 struct dma_resv *resv, in dma_resv_kunit_active_fence_init() argument 74 dma_resv_lock(resv, NULL); in dma_resv_kunit_active_fence_init() 75 dma_resv_reserve_fences(resv, 1); in dma_resv_kunit_active_fence_init() 76 dma_resv_add_fence(resv, fence, usage); in dma_resv_kunit_active_fence_init() 77 dma_resv_unlock(resv); in dma_resv_kunit_active_fence_init() 127 dma_resv_unlock(bo->base.resv); in ttm_bo_init_reserved_sys_man() 139 KUNIT_EXPECT_NOT_NULL(test, (void *)bo->base.resv->fences); in ttm_bo_init_reserved_sys_man() 176 dma_resv_unlock(bo->base.resv); in ttm_bo_init_reserved_mock_man() 202 struct dma_resv resv; in ttm_bo_init_reserved_resv() local 212 dma_resv_init(&resv); in ttm_bo_init_reserved_resv() 576 struct dma_resv *resv = bo->base.resv; threaded_dma_resv_signal() local [all...] |
| H A D | ttm_tt_test.c | 161 dma_resv_lock(bo->base.resv, NULL); in ttm_tt_create_basic() 163 dma_resv_unlock(bo->base.resv); in ttm_tt_create_basic() 180 dma_resv_lock(bo->base.resv, NULL); in ttm_tt_create_invalid_bo_type() 182 dma_resv_unlock(bo->base.resv); in ttm_tt_create_invalid_bo_type() 204 dma_resv_lock(bo->base.resv, NULL); in ttm_tt_create_ttm_exists() 206 dma_resv_unlock(bo->base.resv); in ttm_tt_create_ttm_exists() 234 dma_resv_lock(bo->base.resv, NULL); in ttm_tt_create_failed() 236 dma_resv_unlock(bo->base.resv); in ttm_tt_create_failed() 249 dma_resv_lock(bo->base.resv, NULL); in ttm_tt_destroy_basic() 251 dma_resv_unlock(bo->base.resv); in ttm_tt_destroy_basic()
|
| /linux/kernel/irq/ |
| H A D | affinity.c | 112 unsigned int resv = affd->pre_vectors + affd->post_vectors; in irq_calc_affinity_vectors() local 115 if (resv > minvec) in irq_calc_affinity_vectors() 119 set_vecs = maxvec - resv; in irq_calc_affinity_vectors() 126 return resv + min(set_vecs, maxvec - resv); in irq_calc_affinity_vectors()
|
| /linux/drivers/infiniband/core/ |
| H A D | umem_dmabuf.c | 7 #include <linux/dma-resv.h> 24 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_map_pages() 75 ret = dma_resv_wait_timeout(umem_dmabuf->attach->dmabuf->resv, in ib_umem_dmabuf_map_pages() 88 dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_unmap_pages() 213 dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL); in ib_umem_dmabuf_get_pinned_with_dma_device() 222 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_get_pinned_with_dma_device() 229 dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); in ib_umem_dmabuf_get_pinned_with_dma_device() 249 dma_resv_lock(dmabuf->resv, NULL); in ib_umem_dmabuf_revoke() 259 dma_resv_unlock(dmabuf->resv); in ib_umem_dmabuf_revoke()
|
| /linux/include/uapi/linux/ |
| H A D | io_uring.h | 614 __u32 resv[3]; 734 __u32 resv; 779 __u32 resv; 785 __u32 resv; 799 __u8 resv; 807 __u16 resv; 819 __u8 resv; 826 __u32 resv[3]; 853 __u16 resv; 860 * ring tail is overlaid with the io_uring_buf->resv fiel 602 __u32 resv[3]; global() member 719 __u32 resv; global() member 764 __u32 resv; global() member 770 __u32 resv; global() member 784 __u8 resv; global() member 792 __u16 resv; global() member 804 __u8 resv; global() member 831 __u16 resv; global() member 878 __u64 resv[3]; global() member 885 __u32 resv[8]; global() member 921 __u32 resv; global() member 993 __u64 resv; global() member [all...] |
| /linux/drivers/vfio/pci/ |
| H A D | vfio_pci_dmabuf.c | 6 #include <linux/dma-resv.h> 53 dma_resv_assert_held(priv->dmabuf->resv); in vfio_pci_dma_buf_map() 113 dma_resv_assert_held(attachment->dmabuf->resv); in vfio_pci_dma_buf_iommufd_map() 293 dma_resv_lock(priv->dmabuf->resv, NULL); in vfio_pci_core_feature_dma_buf() 296 dma_resv_unlock(priv->dmabuf->resv); in vfio_pci_core_feature_dma_buf() 333 dma_resv_lock(priv->dmabuf->resv, NULL); in vfio_pci_dma_buf_move() 336 dma_resv_unlock(priv->dmabuf->resv); in vfio_pci_dma_buf_move() 352 dma_resv_lock(priv->dmabuf->resv, NULL); in vfio_pci_dma_buf_cleanup() 357 dma_resv_unlock(priv->dmabuf->resv); in vfio_pci_dma_buf_cleanup()
|
| /linux/mm/ |
| H A D | hugetlb.c | 460 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) in hugetlb_vma_lock_alloc() 464 VM_BUG_ON(resv->region_cache_count <= 0); in hugetlb_vma_lock_alloc() 466 resv->region_cache_count--; 467 nrg = list_first_entry(&resv->region_cache, struct file_region, link); 490 struct resv_map *resv, in copy_hugetlb_cgroup_uncharge_info() 509 if (!resv->pages_per_hpage) in record_hugetlb_cgroup_uncharge_info() 510 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info() 514 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info() 542 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) in has_same_uncharge_info() 547 if (&prg->link != &resv in has_same_uncharge_info() 470 get_file_region_entry_from_cache(struct resv_map * resv,long from,long to) get_file_region_entry_from_cache() argument 500 record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup * h_cg,struct hstate * h,struct resv_map * resv,struct file_region * nrg) record_hugetlb_cgroup_uncharge_info() argument 552 coalesce_file_region(struct resv_map * resv,struct file_region * rg) coalesce_file_region() argument 605 add_reservation_in_range(struct resv_map * resv,long f,long t,struct hugetlb_cgroup * h_cg,struct hstate * h,long * regions_needed) add_reservation_in_range() argument 667 allocate_file_region_entries(struct resv_map * resv,int regions_needed) allocate_file_region_entries() argument 738 region_add(struct resv_map * resv,long f,long t,long in_regions_needed,struct hstate * h,struct hugetlb_cgroup * h_cg) region_add() argument 805 region_chg(struct resv_map * resv,long f,long t,long * out_regions_needed) region_chg() argument 841 region_abort(struct resv_map * resv,long f,long t,long regions_needed) region_abort() argument 864 region_del(struct resv_map * resv,long f,long t) region_del() argument 991 region_count(struct resv_map * resv,long f,long t) region_count() argument 2514 struct resv_map *resv; __vma_reservation_common() local 4692 struct resv_map *resv = vma_resv_map(vma); hugetlb_vm_op_open() local 4731 struct resv_map *resv; hugetlb_vm_op_close() local [all...] |
| /linux/include/linux/sunrpc/ |
| H A D | svc.h | 525 struct kvec *resv = buf->head; in svcxdr_init_encode() 530 xdr->iov = resv; in svcxdr_init_encode() 531 xdr->p = resv->iov_base + resv->iov_len; 532 xdr->end = resv->iov_base + PAGE_SIZE; 533 buf->len = resv->iov_len; 572 struct kvec *resv = buf->head; in svcxdr_set_auth_slack() 579 WARN_ON(xdr->iov != resv); 518 struct kvec *resv = buf->head; svcxdr_init_encode() local 565 struct kvec *resv = buf->head; svcxdr_set_auth_slack() local
|
| /linux/drivers/net/ethernet/netronome/nfp/crypto/ |
| H A D | fw.h | 17 u8 resv[2]; member 39 u8 resv[3]; 92 u8 resv[3]; 37 u8 resv[3]; global() member 86 u8 resv[3]; global() member
|
| /linux/drivers/gpu/drm/imagination/ |
| H A D | pvr_gem.c | 214 dma_resv_lock(obj->resv, NULL); in pvr_gem_object_vmap() 230 dma_resv_unlock(obj->resv); in pvr_gem_object_vmap() 235 dma_resv_unlock(obj->resv); in pvr_gem_object_vmap() 258 dma_resv_lock(obj->resv, NULL); in pvr_gem_object_vunmap() 272 dma_resv_unlock(obj->resv); in pvr_gem_object_vunmap()
|