Lines Matching +full:embedded +full:- +full:sync
1 // SPDX-License-Identifier: MIT
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
32 #include <linux/dma-fence-chain.h>
46 * amdgpu_sync_create - zero init sync object
48 * @sync: sync object to initialize
50 * Just clear the sync object for now.
52 void amdgpu_sync_create(struct amdgpu_sync *sync) in amdgpu_sync_create() argument
54 hash_init(sync->fences); in amdgpu_sync_create()
58 * amdgpu_sync_same_dev - test if fence belong to us
73 ring = container_of(s_fence->sched, struct amdgpu_ring, sched); in amdgpu_sync_same_dev()
74 return ring->adev == adev; in amdgpu_sync_same_dev()
81 * amdgpu_sync_get_owner - extract the owner of a fence
97 return s_fence->owner; in amdgpu_sync_get_owner()
107 * amdgpu_sync_keep_later - Keep the later fence
125 * amdgpu_sync_add_later - add the fence to the hash
127 * @sync: sync object to add the fence to
133 static bool amdgpu_sync_add_later(struct amdgpu_sync *sync, struct dma_fence *f) in amdgpu_sync_add_later() argument
137 hash_for_each_possible(sync->fences, e, node, f->context) { in amdgpu_sync_add_later()
138 if (unlikely(e->fence->context != f->context)) in amdgpu_sync_add_later()
141 amdgpu_sync_keep_later(&e->fence, f); in amdgpu_sync_add_later()
148 * amdgpu_sync_fence - remember to sync to this fence
150 * @sync: sync object to add fence to
151 * @f: fence to sync to
153 * Add the fence to the sync object.
155 int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f) in amdgpu_sync_fence() argument
162 if (amdgpu_sync_add_later(sync, f)) in amdgpu_sync_fence()
167 return -ENOMEM; in amdgpu_sync_fence()
169 hash_add(sync->fences, &e->node, f->context); in amdgpu_sync_fence()
170 e->fence = dma_fence_get(f); in amdgpu_sync_fence()
174 /* Determine based on the owner and mode if we should sync to a fence or not */
181 /* Always sync to moves, no matter what */ in amdgpu_sync_test_fence()
192 /* Never sync to VM updates either. */ in amdgpu_sync_test_fence()
198 /* Ignore fences depending on the sync mode */ in amdgpu_sync_test_fence()
220 "Adding eviction fence to sync obj"); in amdgpu_sync_test_fence()
225 * amdgpu_sync_resv - sync to a reservation object
228 * @sync: sync object to add fences from reservation object to
229 * @resv: reservation object with embedded fence
230 * @mode: how owner affects which fences we sync to
233 * Sync to the fence
235 int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, in amdgpu_sync_resv() argument
244 return -EINVAL; in amdgpu_sync_resv()
252 r = amdgpu_sync_fence(sync, f); in amdgpu_sync_resv()
264 * amdgpu_sync_kfd - sync to KFD fences
266 * @sync: sync object to add KFD fences to
269 * Extract all KFD fences and add them to the sync object.
271 int amdgpu_sync_kfd(struct amdgpu_sync *sync, struct dma_resv *resv) in amdgpu_sync_kfd() argument
284 r = amdgpu_sync_fence(sync, f); in amdgpu_sync_kfd()
296 hash_del(&e->node); in amdgpu_sync_entry_free()
297 dma_fence_put(e->fence); in amdgpu_sync_entry_free()
302 * amdgpu_sync_peek_fence - get the next fence not signaled yet
304 * @sync: the sync object
307 * Returns the next fence not signaled yet without removing it from the sync
310 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, in amdgpu_sync_peek_fence() argument
317 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_peek_fence()
318 struct dma_fence *f = e->fence; in amdgpu_sync_peek_fence()
329 if (s_fence->sched == &ring->sched) { in amdgpu_sync_peek_fence()
330 if (dma_fence_is_signaled(&s_fence->scheduled)) in amdgpu_sync_peek_fence()
333 return &s_fence->scheduled; in amdgpu_sync_peek_fence()
344 * amdgpu_sync_get_fence - get the next fence from the sync object
346 * @sync: sync object to use
348 * Get and removes the next fence from the sync object not signaled yet.
350 struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) in amdgpu_sync_get_fence() argument
357 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_get_fence()
359 f = e->fence; in amdgpu_sync_get_fence()
361 hash_del(&e->node); in amdgpu_sync_get_fence()
373 * amdgpu_sync_clone - clone a sync object
375 * @source: sync object to clone
376 * @clone: pointer to destination sync object
388 hash_for_each_safe(source->fences, i, tmp, e, node) { in amdgpu_sync_clone()
389 f = e->fence; in amdgpu_sync_clone()
403 * amdgpu_sync_push_to_job - push fences into job
404 * @sync: sync object to get the fences from
407 * Add all unsignaled fences from sync to job.
409 int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job) in amdgpu_sync_push_to_job() argument
416 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_push_to_job()
417 f = e->fence; in amdgpu_sync_push_to_job()
424 r = drm_sched_job_add_dependency(&job->base, f); in amdgpu_sync_push_to_job()
433 int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr) in amdgpu_sync_wait() argument
439 hash_for_each_safe(sync->fences, i, tmp, e, node) { in amdgpu_sync_wait()
440 r = dma_fence_wait(e->fence, intr); in amdgpu_sync_wait()
451 * amdgpu_sync_free - free the sync object
453 * @sync: sync object to use
455 * Free the sync object.
457 void amdgpu_sync_free(struct amdgpu_sync *sync) in amdgpu_sync_free() argument
463 hash_for_each_safe(sync->fences, i, tmp, e, node) in amdgpu_sync_free()
468 * amdgpu_sync_init - init sync object subsystem
476 return -ENOMEM; in amdgpu_sync_init()
482 * amdgpu_sync_fini - fini sync object subsystem