Home
last modified time | relevance | path

Searched refs:fences (Results 1 – 25 of 55) sorted by relevance

123

/linux/drivers/gpu/drm/i915/
H A Di915_deps.c38 if (deps->fences != &deps->single) in i915_deps_reset_fences()
39 kfree(deps->fences); in i915_deps_reset_fences()
42 deps->fences = &deps->single; in i915_deps_reset_fences()
52 deps->fences = NULL; in i915_deps_init()
69 dma_fence_put(deps->fences[i]); in i915_deps_fini()
71 if (deps->fences != &deps->single) in i915_deps_fini()
72 kfree(deps->fences); in i915_deps_fini()
89 memcpy(new_fences, deps->fences, in i915_deps_grow()
91 swap(new_fences, deps->fences); in i915_deps_grow()
96 deps->fences[deps->num_deps++] = dma_fence_get(fence); in i915_deps_grow()
[all …]
H A Di915_deps.h26 struct dma_fence **fences; member
/linux/drivers/dma-buf/
H A Ddma-resv.c142 RCU_INIT_POINTER(obj->fences, NULL); in dma_resv_init()
156 dma_resv_list_free(rcu_dereference_protected(obj->fences, true)); in dma_resv_fini()
164 return rcu_dereference_check(obj->fences, dma_resv_held(obj)); in dma_resv_fences_list()
235 rcu_assign_pointer(obj->fences, new); in dma_resv_reserve_fences()
265 struct dma_resv_list *fences = dma_resv_fences_list(obj); in dma_resv_reset_max_fences() local
270 if (fences) in dma_resv_reset_max_fences()
271 fences->max_fences = fences->num_fences; in dma_resv_reset_max_fences()
370 cursor->fences = dma_resv_fences_list(cursor->obj); in dma_resv_iter_restart_unlocked()
371 if (cursor->fences) in dma_resv_iter_restart_unlocked()
372 cursor->num_fences = cursor->fences->num_fences; in dma_resv_iter_restart_unlocked()
[all …]
H A Dst-dma-fence-chain.c102 struct dma_fence **fences; member
124 fc->fences = kvmalloc_array(count, sizeof(*fc->fences), in fence_chains_init()
126 if (!fc->fences) { in fence_chains_init()
133 fc->fences[i] = mock_fence(); in fence_chains_init()
134 if (!fc->fences[i]) { in fence_chains_init()
140 fc->fences[i], in fence_chains_init()
157 dma_fence_put(fc->fences[i]); in fence_chains_init()
160 kvfree(fc->fences); in fence_chains_init()
171 dma_fence_signal(fc->fences[i]); in fence_chains_fini()
172 dma_fence_put(fc->fences[i]); in fence_chains_fini()
[all …]
H A Dst-dma-fence-unwrap.c49 struct dma_fence **fences; in mock_array() local
53 fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL); in mock_array()
54 if (!fences) in mock_array()
59 fences[i] = va_arg(valist, typeof(*fences)); in mock_array()
62 array = dma_fence_array_create(num_fences, fences, in mock_array()
70 kfree(fences); in mock_array()
75 dma_fence_put(va_arg(valist, typeof(*fences))); in mock_array()
H A Dst-dma-resv.c228 cursor.fences = (void*)~0; in test_for_each_unlocked()
247 struct dma_fence *f, **fences = NULL; in test_get_fences() local
274 r = dma_resv_get_fences(&resv, usage, &i, &fences); in test_get_fences()
280 if (i != 1 || fences[0] != f) { in test_get_fences()
288 dma_fence_put(fences[i]); in test_get_fences()
289 kfree(fences); in test_get_fences()
H A Dst-dma-fence.c446 struct dma_fence __rcu **fences; member
477 rcu_assign_pointer(t->fences[t->id], f1); in thread_signal_callback()
482 f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]); in thread_signal_callback()
514 rcu_assign_pointer(t->fences[t->id], NULL); in thread_signal_callback()
538 t[i].fences = f; in race_signal_callback()
/linux/drivers/gpu/host1x/
H A Dintr.c35 if (!list_empty(&sp->fences.list)) { in host1x_intr_update_hw_state()
36 fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list); in host1x_intr_update_hw_state()
47 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_add_fence_locked()
57 struct host1x_fence_list *fence_list = &fence->sp->fences; in host1x_intr_remove_fence()
83 spin_lock(&sp->fences.lock); in host1x_intr_handle_interrupt()
85 list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) { in host1x_intr_handle_interrupt()
98 spin_unlock(&sp->fences.lock); in host1x_intr_handle_interrupt()
112 spin_lock_init(&syncpt->fences.lock); in host1x_intr_init()
113 INIT_LIST_HEAD(&syncpt->fences.list); in host1x_intr_init()
H A Ddebug.c96 spin_lock_irqsave(&m->syncpt[i].fences.lock, irqflags); in show_syncpts()
97 list_for_each(pos, &m->syncpt[i].fences.list) in show_syncpts()
99 spin_unlock_irqrestore(&m->syncpt[i].fences.lock, irqflags); in show_syncpts()
/linux/Documentation/driver-api/
H A Dsync_file.rst9 the fences(struct dma_fence) that are needed to synchronize between drivers or
29 in-fences and out-fences
33 the driver to userspace we call the fences it contains 'out-fences'. They are
37 Out-fences are fences that the driver creates.
40 userspace we call these fence(s) 'in-fences'. Receiving in-fences means that
42 the in-fences.
72 of the Sync File to the kernel. The kernel can then retrieve the fences
H A Ddma-buf.rst21 - dma-resv, which manages a set of dma-fences for a particular dma-buf
169 :doc: DMA fences overview
243 * Future fences, used in HWC1 to signal when a buffer isn't used by the display
247 * Proxy fences, proposed to handle &drm_syncobj for which the fence has not yet
250 * Userspace fences or gpu futexes, fine-grained locking within a command buffer
256 batch DMA fences for memory management instead of context preemption DMA
257 fences which get reattached when the compute job is rescheduled.
260 fences and controls when they fire. Mixing indefinite fences with normal
261 in-kernel DMA fences does not work, even when a fallback timeout is included to
267 * Only userspace knows about all dependencies in indefinite fences and when
[all …]
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_sync.c54 hash_init(sync->fences); in amdgpu_sync_create()
137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later()
169 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence()
317 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence()
357 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence()
388 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone()
416 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_push_to_job()
439 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait()
463 hash_for_each_safe(sync->fences, i, tmp, e, node) in amdgpu_sync_free()
H A Damdgpu_ids.c204 struct dma_fence **fences; in amdgpu_vmid_grab_idle() local
212 fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); in amdgpu_vmid_grab_idle()
213 if (!fences) in amdgpu_vmid_grab_idle()
223 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); in amdgpu_vmid_grab_idle()
224 if (!fences[i]) in amdgpu_vmid_grab_idle()
238 dma_fence_get(fences[j]); in amdgpu_vmid_grab_idle()
240 array = dma_fence_array_create(i, fences, fence_context, in amdgpu_vmid_grab_idle()
244 dma_fence_put(fences[j]); in amdgpu_vmid_grab_idle()
245 kfree(fences); in amdgpu_vmid_grab_idle()
254 kfree(fences); in amdgpu_vmid_grab_idle()
H A Damdgpu_fence.c184 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_emit()
298 ptr = &drv->fences[last_seq]; in amdgpu_fence_process()
349 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; in amdgpu_fence_wait_empty()
426 fence = drv->fences[last_seq]; in amdgpu_fence_last_unsignaled_time_us()
450 fence = drv->fences[seq]; in amdgpu_fence_update_start_timestamp()
526 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *), in amdgpu_fence_driver_init_ring()
529 if (!ring->fence_drv.fences) in amdgpu_fence_driver_init_ring()
663 dma_fence_put(ring->fence_drv.fences[j]); in amdgpu_fence_driver_sw_fini()
664 kfree(ring->fence_drv.fences); in amdgpu_fence_driver_sw_fini()
665 ring->fence_drv.fences = NULL; in amdgpu_fence_driver_sw_fini()
[all …]
H A Damdgpu_ctx.c198 res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i])); in amdgpu_ctx_entity_time()
215 entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs), in amdgpu_ctx_init_entity()
282 res = ktime_add(res, amdgpu_ctx_fence_time(entity->fences[i])); in amdgpu_ctx_fini_entity()
283 dma_fence_put(entity->fences[i]); in amdgpu_ctx_fini_entity()
767 other = centity->fences[idx]; in amdgpu_ctx_add_fence()
773 centity->fences[idx] = fence; in amdgpu_ctx_add_fence()
807 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); in amdgpu_ctx_get_fence()
869 other = dma_fence_get(centity->fences[idx]); in amdgpu_ctx_wait_prev_fence()
H A Damdgpu_debugfs.c1815 struct dma_fence **fences) in amdgpu_ib_preempt_fences_swap() argument
1831 ptr = &drv->fences[last_seq]; in amdgpu_ib_preempt_fences_swap()
1839 fences[last_seq] = fence; in amdgpu_ib_preempt_fences_swap()
1844 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences, in amdgpu_ib_preempt_signal_fences() argument
1851 fence = fences[i]; in amdgpu_ib_preempt_signal_fences()
1892 ptr = &drv->fences[preempt_seq]; in amdgpu_ib_preempt_mark_partial_job()
1916 struct dma_fence **fences = NULL; in amdgpu_debugfs_ib_preempt() local
1933 fences = kcalloc(length, sizeof(void *), GFP_KERNEL); in amdgpu_debugfs_ib_preempt()
1934 if (!fences) in amdgpu_debugfs_ib_preempt()
1961 amdgpu_ib_preempt_fences_swap(ring, fences); in amdgpu_debugfs_ib_preempt()
[all …]
/linux/drivers/gpu/drm/i915/selftests/
H A Di915_sw_fence.c453 struct i915_sw_fence **fences; in test_chain() local
457 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); in test_chain()
458 if (!fences) in test_chain()
462 fences[i] = alloc_fence(); in test_chain()
463 if (!fences[i]) { in test_chain()
470 ret = i915_sw_fence_await_sw_fence_gfp(fences[i], in test_chain()
471 fences[i - 1], in test_chain()
478 i915_sw_fence_commit(fences[i]); in test_chain()
484 if (i915_sw_fence_done(fences[i])) { in test_chain()
490 i915_sw_fence_commit(fences[0]); in test_chain()
[all …]
/linux/drivers/gpu/drm/
H A Ddrm_suballoc.c225 struct dma_fence **fences, in drm_suballoc_next_hole() argument
248 fences[i] = NULL; in drm_suballoc_next_hole()
257 fences[i] = sa->fence; in drm_suballoc_next_hole()
316 struct dma_fence *fences[DRM_SUBALLOC_MAX_QUEUES]; in drm_suballoc_new() local
353 } while (drm_suballoc_next_hole(sa_manager, fences, tries)); in drm_suballoc_new()
356 if (fences[i]) in drm_suballoc_new()
357 fences[count++] = dma_fence_get(fences[i]); in drm_suballoc_new()
363 t = dma_fence_wait_any_timeout(fences, count, intr, in drm_suballoc_new()
367 dma_fence_put(fences[i]); in drm_suballoc_new()
/linux/drivers/gpu/drm/xe/
H A Dxe_sync.c283 struct dma_fence **fences = NULL; in xe_sync_in_fence_get() local
305 fences = kmalloc_array(num_in_fence + 1, sizeof(*fences), GFP_KERNEL); in xe_sync_in_fence_get()
306 if (!fences) in xe_sync_in_fence_get()
311 fences[current_fence++] = sync[i].fence; in xe_sync_in_fence_get()
314 fences[current_fence++] = xe_exec_queue_last_fence_get(q, vm); in xe_sync_in_fence_get()
315 cf = dma_fence_array_create(num_in_fence, fences, in xe_sync_in_fence_get()
328 dma_fence_put(fences[--current_fence]); in xe_sync_in_fence_get()
329 kfree(fences); in xe_sync_in_fence_get()
/linux/Documentation/gpu/
H A Ddrm-vm-bind-async.rst20 synchronization objects can be either generic, like dma-fences or
31 understanding of dma-fences is required to digest this
38 the GPU and CPU. Memory fences are sometimes referred to as
39 user-fences, userspace-fences or gpu futexes and do not necessarily obey
41 The kernel should thus avoid waiting for memory fences with locks held.
46 a certain mode that disallows completion dma-fences.
72 IOCTL returns. A synchronous VM_BIND takes neither in-fences nor
73 out-fences. Synchronous VM_BIND may block and wait for GPU operations;
94 Since asynchronous VM_BIND operations may use dma-fences embedded in
96 memory fences given as VM_BIND in-fences need to be awaited
[all …]
/linux/include/linux/
H A Ddma-fence-array.h44 struct dma_fence **fences; member
84 int num_fences, struct dma_fence **fences,
89 struct dma_fence **fences,
H A Ddma-fence-unwrap.h52 struct dma_fence **fences,
/linux/drivers/gpu/drm/i915/gem/
H A Di915_gem_execbuffer.c313 struct eb_fence *fences; member
2789 __free_fence_array(struct eb_fence *fences, unsigned int n) in __free_fence_array() argument
2792 drm_syncobj_put(ptr_mask_bits(fences[n].syncobj, 2)); in __free_fence_array()
2793 dma_fence_put(fences[n].dma_fence); in __free_fence_array()
2794 dma_fence_chain_free(fences[n].chain_fence); in __free_fence_array()
2796 kvfree(fences); in __free_fence_array()
2828 f = krealloc(eb->fences, in add_timeline_fence_array()
2834 eb->fences = f; in add_timeline_fence_array()
2957 f = krealloc(eb->fences, in add_fence_array()
2963 eb->fences = f; in add_fence_array()
[all …]
/linux/drivers/gpu/drm/virtio/
H A Dvirtgpu_fence.c111 list_add_tail(&fence->node, &drv->fences); in virtio_gpu_fence_emit()
136 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
146 list_for_each_entry_safe(curr, tmp, &drv->fences, node) { in virtio_gpu_fence_event_process()
/linux/drivers/gpu/drm/radeon/
H A Dradeon_trace.h36 __field(u32, fences)
42 __entry->fences = radeon_fence_count_emitted(
47 __entry->fences)

123