| /linux/drivers/gpu/drm/i915/ |
| H A D | i915_deps.c | 38 if (deps->fences != &deps->single) in i915_deps_reset_fences() 39 kfree(deps->fences); in i915_deps_reset_fences() 42 deps->fences = &deps->single; in i915_deps_reset_fences() 52 deps->fences = NULL; in i915_deps_init() 69 dma_fence_put(deps->fences[i]); in i915_deps_fini() 71 if (deps->fences != &deps->single) in i915_deps_fini() 72 kfree(deps->fences); in i915_deps_fini() 89 memcpy(new_fences, deps->fences, in i915_deps_grow() 91 swap(new_fences, deps->fences); in i915_deps_grow() 96 deps->fences[deps->num_deps++] = dma_fence_get(fence); in i915_deps_grow() [all …]
|
| H A D | i915_deps.h | 26 struct dma_fence **fences; member
|
| /linux/drivers/dma-buf/ |
| H A D | dma-fence-unwrap.c | 96 int dma_fence_dedup_array(struct dma_fence **fences, int num_fences) in dma_fence_dedup_array() argument 100 sort(fences, num_fences, sizeof(*fences), fence_cmp, NULL); in dma_fence_dedup_array() 107 if (fences[i]->context == fences[j]->context) in dma_fence_dedup_array() 108 dma_fence_put(fences[i]); in dma_fence_dedup_array() 110 fences[++j] = fences[i]; in dma_fence_dedup_array() 119 struct dma_fence **fences, in __dma_fence_unwrap_merge() argument 130 dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { in __dma_fence_unwrap_merge() 164 dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { in __dma_fence_unwrap_merge()
|
| H A D | st-dma-fence-chain.c | 102 struct dma_fence **fences; member 123 fc->fences = kvmalloc_objs(*fc->fences, count, GFP_KERNEL | __GFP_ZERO); in fence_chains_init() 124 if (!fc->fences) { in fence_chains_init() 131 fc->fences[i] = mock_fence(); in fence_chains_init() 132 if (!fc->fences[i]) { in fence_chains_init() 138 fc->fences[i], in fence_chains_init() 155 dma_fence_put(fc->fences[i]); in fence_chains_init() 158 kvfree(fc->fences); in fence_chains_init() 169 dma_fence_signal(fc->fences[i]); in fence_chains_fini() 170 dma_fence_put(fc->fences[i]); in fence_chains_fini() [all …]
|
| H A D | st-dma-resv.c | 228 cursor.fences = (void*)~0; in test_for_each_unlocked() 247 struct dma_fence *f, **fences = NULL; in test_get_fences() local 274 r = dma_resv_get_fences(&resv, usage, &i, &fences); in test_get_fences() 280 if (i != 1 || fences[0] != f) { in test_get_fences() 288 dma_fence_put(fences[i]); in test_get_fences() 289 kfree(fences); in test_get_fences()
|
| H A D | st-dma-fence-unwrap.c | 53 struct dma_fence **fences; in mock_array() local 57 fences = kzalloc_objs(*fences, num_fences); in mock_array() 58 if (!fences) in mock_array() 63 fences[i] = va_arg(valist, typeof(*fences)); in mock_array() 66 array = dma_fence_array_create(num_fences, fences, in mock_array() 74 kfree(fences); in mock_array() 79 dma_fence_put(va_arg(valist, typeof(*fences))); in mock_array()
|
| H A D | dma-fence.c | 861 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, in dma_fence_test_signaled_any() argument 867 struct dma_fence *fence = fences[i]; in dma_fence_test_signaled_any() 898 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, in dma_fence_wait_any_timeout() argument 905 if (WARN_ON(!fences || !count || timeout < 0)) in dma_fence_wait_any_timeout() 910 if (dma_fence_is_signaled(fences[i])) { in dma_fence_wait_any_timeout() 926 struct dma_fence *fence = fences[i]; in dma_fence_wait_any_timeout() 944 if (dma_fence_test_signaled_any(fences, count, idx)) in dma_fence_wait_any_timeout() 957 dma_fence_remove_callback(fences[i], &cb[i].base); in dma_fence_wait_any_timeout()
|
| /linux/drivers/gpu/host1x/ |
| H A D | intr.c | 35 if (!list_empty(&sp->fences.list)) { in host1x_intr_update_hw_state() 36 fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list); in host1x_intr_update_hw_state() 47 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_add_fence_locked() 57 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_remove_fence() 83 spin_lock(&sp->fences.lock); in host1x_intr_handle_interrupt() 85 list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) { in host1x_intr_handle_interrupt() 98 spin_unlock(&sp->fences.lock); in host1x_intr_handle_interrupt() 110 spin_lock_init(&syncpt->fences.lock); in host1x_intr_init() 111 INIT_LIST_HEAD(&syncpt->fences.list); in host1x_intr_init()
|
| H A D | syncpt.h | 43 struct host1x_fence_list fences; member
|
| H A D | fence.c | 138 dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock, in host1x_fence_create()
|
| /linux/Documentation/driver-api/ |
| H A D | sync_file.rst | 9 the fences(struct dma_fence) that are needed to synchronize between drivers or 29 in-fences and out-fences 33 the driver to userspace we call the fences it contains 'out-fences'. They are 37 Out-fences are fences that the driver creates. 40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that 42 the in-fences. 72 of the Sync File to the kernel. The kernel can then retrieve the fences
|
| /linux/drivers/gpu/drm/xe/ |
| H A D | xe_sync.c | 335 struct dma_fence **fences = NULL; in xe_sync_in_fence_get() local 358 fences = kmalloc_objs(*fences, num_fence); in xe_sync_in_fence_get() 359 if (!fences) in xe_sync_in_fence_get() 362 fences[current_fence++] = in xe_sync_in_fence_get() 365 fences[current_fence++] = in xe_sync_in_fence_get() 369 fences[current_fence++] = in xe_sync_in_fence_get() 372 fences[current_fence++] = in xe_sync_in_fence_get() 377 cf = dma_fence_array_create(num_fence, fences, in xe_sync_in_fence_get() 391 dma_fence_put(fences[--current_fence]); in xe_sync_in_fence_get() 392 kfree(fences); in xe_sync_in_fence_get()
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_userq_fence.c | 83 INIT_LIST_HEAD(&fence_drv->fences); in amdgpu_userq_fence_driver_alloc() 151 list_for_each_entry(userq_fence, &fence_drv->fences, link) { in amdgpu_userq_fence_driver_process() 156 list_cut_before(&to_be_signaled, &fence_drv->fences, in amdgpu_userq_fence_driver_process() 184 list_for_each_entry_safe(fence, tmp, &fence_drv->fences, link) { in amdgpu_userq_fence_driver_destroy() 273 list_add_tail(&userq_fence->link, &fence_drv->fences); in amdgpu_userq_fence_create() 704 struct dma_fence **fences, unsigned int *num_fences, in amdgpu_userq_wait_add_fence() argument 711 fences[(*num_fences)++] = dma_fence_get(fence); in amdgpu_userq_wait_add_fence() 728 struct dma_fence **fences, *fence, *f; in amdgpu_userq_wait_return_fence_info() local 740 fences = kmalloc_array(wait_info->num_fences, sizeof(*fences), in amdgpu_userq_wait_return_fence_info() 742 if (!fences) { in amdgpu_userq_wait_return_fence_info() [all …]
|
| H A D | amdgpu_userq_fence.h | 56 struct list_head fences; member
|
| H A D | amdgpu_jpeg.c | 113 unsigned int fences = 0; in amdgpu_jpeg_idle_work_handler() local 121 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]); in amdgpu_jpeg_idle_work_handler() 124 if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) { in amdgpu_jpeg_idle_work_handler()
|
| H A D | amdgpu_gfx.c | 2229 u32 i, idx, fences = 0; in amdgpu_gfx_enforce_isolation_handler() local 2242 fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]); in amdgpu_gfx_enforce_isolation_handler() 2246 fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]); in amdgpu_gfx_enforce_isolation_handler() 2248 if (fences) { in amdgpu_gfx_enforce_isolation_handler() 2403 u32 i, fences = 0; in amdgpu_gfx_profile_idle_work_handler() local 2412 fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]); in amdgpu_gfx_profile_idle_work_handler() 2414 fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]); in amdgpu_gfx_profile_idle_work_handler() 2415 if (!fences && !atomic_read(&adev->gfx.total_submission_cnt)) { in amdgpu_gfx_profile_idle_work_handler()
|
| H A D | amdgpu_vcn.c | 467 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; in amdgpu_vcn_idle_work_handler() local 491 fences += fence[i]; in amdgpu_vcn_idle_work_handler() 493 if (!fences && !atomic_read(&vcn_inst->total_submission_cnt)) { in amdgpu_vcn_idle_work_handler() 525 unsigned int fences = 0; in amdgpu_vcn_ring_begin_use() local 529 fences += amdgpu_fence_count_emitted(&vcn_inst->ring_enc[i]); in amdgpu_vcn_ring_begin_use() 531 if (fences || atomic_read(&vcn_inst->dpg_enc_submission_cnt)) in amdgpu_vcn_ring_begin_use()
|
| H A D | vcn_v2_5.c | 117 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; in vcn_v2_5_idle_work_handler() local 144 fences += fence[i]; in vcn_v2_5_idle_work_handler() 148 if (!fences && !atomic_read(&adev->vcn.inst[0].total_submission_cnt)) { in vcn_v2_5_idle_work_handler() 183 unsigned int fences = 0; in vcn_v2_5_ring_begin_use() local 187 fences += amdgpu_fence_count_emitted(&v->ring_enc[i]); in vcn_v2_5_ring_begin_use() 189 if (fences || atomic_read(&v->dpg_enc_submission_cnt)) in vcn_v2_5_ring_begin_use()
|
| /linux/Documentation/gpu/ |
| H A D | drm-vm-bind-async.rst | 20 synchronization objects can be either generic, like dma-fences or 31 understanding of dma-fences is required to digest this 38 the GPU and CPU. Memory fences are sometimes referred to as 39 user-fences, userspace-fences or gpu futexes and do not necessarily obey 41 The kernel should thus avoid waiting for memory fences with locks held. 46 a certain mode that disallows completion dma-fences. 72 IOCTL returns. A synchronous VM_BIND takes neither in-fences nor 73 out-fences. Synchronous VM_BIND may block and wait for GPU operations; 94 Since asynchronous VM_BIND operations may use dma-fences embedded in 96 memory fences given as VM_BIND in-fences need to be awaited [all …]
|
| H A D | drm-compute.rst | 8 that cannot use fences.
|
| /linux/drivers/gpu/drm/i915/gem/ |
| H A D | i915_gem_execbuffer.c | 313 struct eb_fence *fences; member 2777 __free_fence_array(struct eb_fence *fences, unsigned int n) in __free_fence_array() argument 2780 drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); in __free_fence_array() 2781 dma_fence_put(fences[n].dma_fence); in __free_fence_array() 2782 dma_fence_chain_free(fences[n].chain_fence); in __free_fence_array() 2784 kvfree(fences); in __free_fence_array() 2816 f = krealloc(eb->fences, in add_timeline_fence_array() 2822 eb->fences = f; in add_timeline_fence_array() 2945 f = krealloc(eb->fences, in add_fence_array() 2951 eb->fences = f; in add_fence_array() [all …]
|
| /linux/drivers/gpu/drm/virtio/ |
| H A D | virtgpu_fence.c | 94 list_add_tail(&fence->node, &drv->fences); in virtio_gpu_fence_emit() 119 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process() 129 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
|
| /linux/drivers/gpu/drm/radeon/ |
| H A D | radeon_trace.h | 36 __field(u32, fences) 42 __entry->fences = radeon_fence_count_emitted( 47 __entry->fences)
|
| /linux/tools/memory-model/ |
| H A D | linux-kernel.cat | 55 * A-cumulative release fences of lock-release ensure that any stores that 97 (* Propagation: Ordering from release operations and strong fences. *) 107 * No fences needed here for prop because relation confined to one process.
|
| /linux/tools/memory-model/Documentation/ |
| H A D | explanation.txt | 301 fences), such as calls to smp_rmb() or rcu_read_lock(). 786 only internal operations. However, loads, stores, and fences involve 826 about the fence. However, fences do constrain the way CPUs and the 833 Strong fences, including smp_mb() and synchronize_rcu(), force 843 Acquire fences, such as smp_load_acquire(), force the CPU to 848 Release fences, such as smp_store_release(), force the CPU to 869 The propagation ordering enforced by release fences and strong fences 872 fence. We describe this property by saying that release fences and 873 strong fences are A-cumulative. By contrast, smp_wmb() fences are not 878 rcu_read_lock(), rcu_read_unlock(), and synchronize_rcu() fences have [all …]
|