| /linux/drivers/gpu/drm/i915/ |
| H A D | i915_deps.c | 38 if (deps->fences != &deps->single) in i915_deps_reset_fences() 39 kfree(deps->fences); in i915_deps_reset_fences() 42 deps->fences = &deps->single; in i915_deps_reset_fences() 52 deps->fences = NULL; in i915_deps_init() 69 dma_fence_put(deps->fences[i]); in i915_deps_fini() 71 if (deps->fences != &deps->single) in i915_deps_fini() 72 kfree(deps->fences); in i915_deps_fini() 89 memcpy(new_fences, deps->fences, in i915_deps_grow() 91 swap(new_fences, deps->fences); in i915_deps_grow() 96 deps->fences[deps->num_deps++] = dma_fence_get(fence); in i915_deps_grow() [all …]
|
| H A D | i915_deps.h | 26 struct dma_fence **fences; member
|
| /linux/drivers/dma-buf/ |
| H A D | dma-resv.c | 142 RCU_INIT_POINTER(obj->fences, NULL); in dma_resv_init() 156 dma_resv_list_free(rcu_dereference_protected(obj->fences, true)); in dma_resv_fini() 164 return rcu_dereference_check(obj->fences, dma_resv_held(obj)); in dma_resv_fences_list() 235 rcu_assign_pointer(obj->fences, new); in dma_resv_reserve_fences() 265 struct dma_resv_list *fences = dma_resv_fences_list(obj); in dma_resv_reset_max_fences() local 270 if (fences) in dma_resv_reset_max_fences() 271 fences->max_fences = fences->num_fences; in dma_resv_reset_max_fences() 371 cursor->fences = dma_resv_fences_list(cursor->obj); in dma_resv_iter_restart_unlocked() 372 if (cursor->fences) in dma_resv_iter_restart_unlocked() 373 cursor->num_fences = cursor->fences->num_fences; in dma_resv_iter_restart_unlocked() [all …]
|
| H A D | dma-fence-array.c | 87 if (dma_fence_add_callback(array->fences[i], &cb[i].cb, in dma_fence_array_enable_signaling() 89 int error = array->fences[i]->error; in dma_fence_array_enable_signaling() 130 if (dma_fence_is_signaled(array->fences[i]) && !--num_pending) in dma_fence_array_signaled() 146 dma_fence_put(array->fences[i]); in dma_fence_array_release() 148 kfree(array->fences); in dma_fence_array_release() 159 dma_fence_set_deadline(array->fences[i], deadline); in dma_fence_array_set_deadline() 199 int num_fences, struct dma_fence **fences, in dma_fence_array_init() argument 203 WARN_ON(!num_fences || !fences); in dma_fence_array_init() 213 array->fences = fences; in dma_fence_array_init() 229 WARN_ON(dma_fence_is_container(fences[num_fences])); in dma_fence_array_init() [all …]
|
| H A D | st-dma-fence-chain.c | 102 struct dma_fence **fences; member 124 fc->fences = kvmalloc_array(count, sizeof(*fc->fences), in fence_chains_init() 126 if (!fc->fences) { in fence_chains_init() 133 fc->fences[i] = mock_fence(); in fence_chains_init() 134 if (!fc->fences[i]) { in fence_chains_init() 140 fc->fences[i], in fence_chains_init() 157 dma_fence_put(fc->fences[i]); in fence_chains_init() 160 kvfree(fc->fences); in fence_chains_init() 171 dma_fence_signal(fc->fences[i]); in fence_chains_fini() 172 dma_fence_put(fc->fences[i]); in fence_chains_fini() [all …]
|
| H A D | st-dma-resv.c | 228 cursor.fences = (void*)~0; in test_for_each_unlocked() 247 struct dma_fence *f, **fences = NULL; in test_get_fences() local 274 r = dma_resv_get_fences(&resv, usage, &i, &fences); in test_get_fences() 280 if (i != 1 || fences[0] != f) { in test_get_fences() 288 dma_fence_put(fences[i]); in test_get_fences() 289 kfree(fences); in test_get_fences()
|
| H A D | dma-fence.c | 826 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count, in dma_fence_test_signaled_any() argument 832 struct dma_fence *fence = fences[i]; in dma_fence_test_signaled_any() 863 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count, in dma_fence_wait_any_timeout() argument 870 if (WARN_ON(!fences || !count || timeout < 0)) in dma_fence_wait_any_timeout() 875 if (dma_fence_is_signaled(fences[i])) { in dma_fence_wait_any_timeout() 891 struct dma_fence *fence = fences[i]; in dma_fence_wait_any_timeout() 909 if (dma_fence_test_signaled_any(fences, count, idx)) in dma_fence_wait_any_timeout() 922 dma_fence_remove_callback(fences[i], &cb[i].base); in dma_fence_wait_any_timeout()
|
| H A D | st-dma-fence.c | 446 struct dma_fence __rcu **fences; member 477 rcu_assign_pointer(t->fences[t->id], f1); in thread_signal_callback() 482 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]); in thread_signal_callback() 514 rcu_assign_pointer(t->fences[t->id], NULL); in thread_signal_callback() 538 t[i].fences = f; in race_signal_callback()
|
| /linux/drivers/gpu/host1x/ |
| H A D | intr.c | 35 if (!list_empty(&sp->fences.list)) { in host1x_intr_update_hw_state() 36 fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list); in host1x_intr_update_hw_state() 47 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_add_fence_locked() 57 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_remove_fence() 83 spin_lock(&sp->fences.lock); in host1x_intr_handle_interrupt() 85 list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) { in host1x_intr_handle_interrupt() 98 spin_unlock(&sp->fences.lock); in host1x_intr_handle_interrupt() 110 spin_lock_init(&syncpt->fences.lock); in host1x_intr_init() 111 INIT_LIST_HEAD(&syncpt->fences.list); in host1x_intr_init()
|
| H A D | syncpt.h | 43 struct host1x_fence_list fences; member
|
| H A D | fence.c | 138 dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock, in host1x_fence_create()
|
| /linux/Documentation/driver-api/ |
| H A D | sync_file.rst | 9 the fences(struct dma_fence) that are needed to synchronize between drivers or 29 in-fences and out-fences 33 the driver to userspace we call the fences it contains 'out-fences'. They are 37 Out-fences are fences that the driver creates. 40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that 42 the in-fences. 72 of the Sync File to the kernel. The kernel can then retrieve the fences
|
| H A D | dma-buf.rst | 21 - dma-resv, which manages a set of dma-fences for a particular dma-buf 169 :doc: DMA fences overview 243 * Future fences, used in HWC1 to signal when a buffer isn't used by the display 247 * Proxy fences, proposed to handle &drm_syncobj for which the fence has not yet 250 * Userspace fences or gpu futexes, fine-grained locking within a command buffer 256 batch DMA fences for memory management instead of context preemption DMA 257 fences which get reattached when the compute job is rescheduled. 260 fences and controls when they fire. Mixing indefinite fences with normal 261 in-kernel DMA fences does not work, even when a fallback timeout is included to 267 * Only userspace knows about all dependencies in indefinite fences and when [all …]
|
| /linux/drivers/gpu/drm/i915/selftests/ |
| H A D | i915_sw_fence.c | 453 struct i915_sw_fence **fences; in test_chain() local 457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); in test_chain() 458 if (!fences) in test_chain() 462 fences[i] = alloc_fence(); in test_chain() 463 if (!fences[i]) { in test_chain() 470 ret = i915_sw_fence_await_sw_fence_gfp(fences[i], in test_chain() 471 fences[i - 1], in test_chain() 478 i915_sw_fence_commit(fences[i]); in test_chain() 484 if (i915_sw_fence_done(fences[i])) { in test_chain() 490 i915_sw_fence_commit(fences[0]); in test_chain() [all …]
|
| /linux/Documentation/gpu/ |
| H A D | drm-vm-bind-async.rst | 20 synchronization objects can be either generic, like dma-fences or 31 understanding of dma-fences is required to digest this 38 the GPU and CPU. Memory fences are sometimes referred to as 39 user-fences, userspace-fences or gpu futexes and do not necessarily obey 41 The kernel should thus avoid waiting for memory fences with locks held. 46 a certain mode that disallows completion dma-fences. 72 IOCTL returns. A synchronous VM_BIND takes neither in-fences nor 73 out-fences. Synchronous VM_BIND may block and wait for GPU operations; 94 Since asynchronous VM_BIND operations may use dma-fences embedded in 96 memory fences given as VM_BIND in-fences need to be awaited [all …]
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_fence.c | 123 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit() 236 ptr = &drv->fences[last_seq]; in amdgpu_fence_process() 294 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty() 371 fence = drv->fences[last_seq]; in amdgpu_fence_last_unsignaled_time_us() 395 fence = drv->fences[seq]; in amdgpu_fence_update_start_timestamp() 471 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring() 474 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring() 608 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_sw_fini() 609 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_sw_fini() 610 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_sw_fini() [all …]
|
| H A D | amdgpu_ctx.c | 198 res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i])); in amdgpu_ctx_entity_time() 215 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), in amdgpu_ctx_init_entity() 282 res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i])); in amdgpu_ctx_fini_entity() 283 dma_fence_put(entity->fences[i]); in amdgpu_ctx_fini_entity() 767 other = centity->fences[idx]; in amdgpu_ctx_add_fence() 773 centity->fences[idx] = fence; in amdgpu_ctx_add_fence() 807 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); in amdgpu_ctx_get_fence() 869 other = dma_fence_get(centity->fences[idx]); in amdgpu_ctx_wait_prev_fence()
|
| H A D | amdgpu_cs.c | 1636 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_all_fences() argument 1646 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_all_fences() 1681 struct drm_amdgpu_fence *fences) in amdgpu_cs_wait_any_fence() argument 1699 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); in amdgpu_cs_wait_any_fence() 1747 struct drm_amdgpu_fence *fences; in amdgpu_cs_wait_fences_ioctl() local 1751 fences = memdup_array_user(u64_to_user_ptr(wait->in.fences), in amdgpu_cs_wait_fences_ioctl() 1754 if (IS_ERR(fences)) in amdgpu_cs_wait_fences_ioctl() 1755 return PTR_ERR(fences); in amdgpu_cs_wait_fences_ioctl() 1758 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); in amdgpu_cs_wait_fences_ioctl() 1760 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); in amdgpu_cs_wait_fences_ioctl() [all …]
|
| H A D | amdgpu_ctx.h | 42 struct dma_fence *fences[]; member
|
| H A D | amdgpu_jpeg.c | 113 unsigned int fences = 0; in amdgpu_jpeg_idle_work_handler() local 121 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec[j]); in amdgpu_jpeg_idle_work_handler() 124 if (!fences && !atomic_read(&adev->jpeg.total_submission_cnt)) { in amdgpu_jpeg_idle_work_handler()
|
| H A D | amdgpu_vpe.c | 349 unsigned int fences = 0; in vpe_idle_work_handler() local 351 fences += amdgpu_fence_count_emitted(&adev->vpe.ring); in vpe_idle_work_handler() 352 if (fences) in vpe_idle_work_handler()
|
| /linux/include/linux/ |
| H A D | dma-fence-array.h | 44 struct dma_fence **fences; member 84 int num_fences, struct dma_fence **fences, 89 struct dma_fence **fences,
|
| /linux/drivers/gpu/drm/radeon/ |
| H A D | radeon_trace.h | 36 __field(u32, fences) 42 __entry->fences = radeon_fence_count_emitted( 47 __entry->fences)
|
| /linux/tools/memory-model/ |
| H A D | linux-kernel.cat | 55 * A-cumulative release fences of lock-release ensure that any stores that 97 (* Propagation: Ordering from release operations and strong fences. *) 107 * No fences needed here for prop because relation confined to one process.
|
| /linux/tools/memory-model/Documentation/ |
| H A D | explanation.txt | 301 fences), such as calls to smp_rmb() or rcu_read_lock(). 786 only internal operations. However, loads, stores, and fences involve 826 about the fence. However, fences do constrain the way CPUs and the 833 Strong fences, including smp_mb() and synchronize_rcu(), force 843 Acquire fences, such as smp_load_acquire(), force the CPU to 848 Release fences, such as smp_store_release(), force the CPU to 869 The propagation ordering enforced by release fences and strong fences 872 fence. We describe this property by saying that release fences and 873 strong fences are A-cumulative. By contrast, smp_wmb() fences are not 878 rcu_read_lock(), rcu_read_unlock(), and synchronize_rcu() fences have [all …]
|