1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Fence mechanism for dma-buf and to allow for asynchronous dma access
4 *
5 * Copyright (C) 2012 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
7 *
8 * Authors:
9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 */
12
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/atomic.h>
16 #include <linux/dma-fence.h>
17 #include <linux/sched/signal.h>
18 #include <linux/seq_file.h>
19
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/dma_fence.h>
22
23 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
24 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
25 EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
26
27 static DEFINE_SPINLOCK(dma_fence_stub_lock);
28 static struct dma_fence dma_fence_stub;
29
30 /*
31 * fence context counter: each execution context should have its own
32 * fence context, this allows checking if fences belong to the same
33 * context or not. One device can have multiple separate contexts,
34 * and they're used if some engine can run independently of another.
35 */
36 static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
37
38 /**
39 * DOC: DMA fences overview
40 *
41 * DMA fences, represented by &struct dma_fence, are the kernel internal
42 * synchronization primitive for DMA operations like GPU rendering, video
43 * encoding/decoding, or displaying buffers on a screen.
44 *
45 * A fence is initialized using dma_fence_init() and completed using
46 * dma_fence_signal(). Fences are associated with a context, allocated through
47 * dma_fence_context_alloc(), and all fences on the same context are
48 * fully ordered.
49 *
50 * Since the purposes of fences is to facilitate cross-device and
51 * cross-application synchronization, there's multiple ways to use one:
52 *
53 * - Individual fences can be exposed as a &sync_file, accessed as a file
54 * descriptor from userspace, created by calling sync_file_create(). This is
55 * called explicit fencing, since userspace passes around explicit
56 * synchronization points.
57 *
58 * - Some subsystems also have their own explicit fencing primitives, like
59 * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
60 * fence to be updated.
61 *
62 * - Then there's also implicit fencing, where the synchronization points are
63 * implicitly passed around as part of shared &dma_buf instances. Such
64 * implicit fences are stored in &struct dma_resv through the
65 * &dma_buf.resv pointer.
66 */
67
68 /**
69 * DOC: fence cross-driver contract
70 *
71 * Since &dma_fence provide a cross driver contract, all drivers must follow the
72 * same rules:
73 *
74 * * Fences must complete in a reasonable time. Fences which represent kernels
75 * and shaders submitted by userspace, which could run forever, must be backed
76 * up by timeout and gpu hang recovery code. Minimally that code must prevent
77 * further command submission and force complete all in-flight fences, e.g.
78 * when the driver or hardware do not support gpu reset, or if the gpu reset
79 * failed for some reason. Ideally the driver supports gpu recovery which only
80 * affects the offending userspace context, and no other userspace
81 * submissions.
82 *
83 * * Drivers may have different ideas of what completion within a reasonable
84 * time means. Some hang recovery code uses a fixed timeout, others a mix
85 * between observing forward progress and increasingly strict timeouts.
86 * Drivers should not try to second guess timeout handling of fences from
87 * other drivers.
88 *
89 * * To ensure there's no deadlocks of dma_fence_wait() against other locks
90 * drivers should annotate all code required to reach dma_fence_signal(),
91 * which completes the fences, with dma_fence_begin_signalling() and
92 * dma_fence_end_signalling().
93 *
94 * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
95 * This means any code required for fence completion cannot acquire a
96 * &dma_resv lock. Note that this also pulls in the entire established
97 * locking hierarchy around dma_resv_lock() and dma_resv_unlock().
98 *
99 * * Drivers are allowed to call dma_fence_wait() from their &shrinker
100 * callbacks. This means any code required for fence completion cannot
101 * allocate memory with GFP_KERNEL.
102 *
103 * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
104 * respectively &mmu_interval_notifier callbacks. This means any code required
105 * for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO.
106 * Only GFP_ATOMIC is permissible, which might fail.
107 *
108 * Note that only GPU drivers have a reasonable excuse for both requiring
109 * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
110 * track asynchronous compute work using &dma_fence. No driver outside of
111 * drivers/gpu should ever call dma_fence_wait() in such contexts.
112 */
113
dma_fence_stub_get_name(struct dma_fence * fence)114 static const char *dma_fence_stub_get_name(struct dma_fence *fence)
115 {
116 return "stub";
117 }
118
119 static const struct dma_fence_ops dma_fence_stub_ops = {
120 .get_driver_name = dma_fence_stub_get_name,
121 .get_timeline_name = dma_fence_stub_get_name,
122 };
123
dma_fence_init_stub(void)124 static int __init dma_fence_init_stub(void)
125 {
126 dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
127 &dma_fence_stub_lock, 0, 0);
128
129 set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
130 &dma_fence_stub.flags);
131
132 dma_fence_signal(&dma_fence_stub);
133 return 0;
134 }
135 subsys_initcall(dma_fence_init_stub);
136
137 /**
138 * dma_fence_get_stub - return a signaled fence
139 *
140 * Return a stub fence which is already signaled. The fence's timestamp
141 * corresponds to the initialisation time of the linux kernel.
142 */
dma_fence_get_stub(void)143 struct dma_fence *dma_fence_get_stub(void)
144 {
145 return dma_fence_get(&dma_fence_stub);
146 }
147 EXPORT_SYMBOL(dma_fence_get_stub);
148
149 /**
150 * dma_fence_allocate_private_stub - return a private, signaled fence
151 * @timestamp: timestamp when the fence was signaled
152 *
153 * Return a newly allocated and signaled stub fence.
154 */
dma_fence_allocate_private_stub(ktime_t timestamp)155 struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
156 {
157 struct dma_fence *fence;
158
159 fence = kzalloc_obj(*fence);
160 if (fence == NULL)
161 return NULL;
162
163 dma_fence_init(fence,
164 &dma_fence_stub_ops,
165 &dma_fence_stub_lock,
166 0, 0);
167
168 set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
169 &fence->flags);
170
171 dma_fence_signal_timestamp(fence, timestamp);
172
173 return fence;
174 }
175 EXPORT_SYMBOL(dma_fence_allocate_private_stub);
176
177 /**
178 * dma_fence_context_alloc - allocate an array of fence contexts
179 * @num: amount of contexts to allocate
180 *
181 * This function will return the first index of the number of fence contexts
182 * allocated. The fence context is used for setting &dma_fence.context to a
183 * unique number by passing the context to dma_fence_init().
184 */
dma_fence_context_alloc(unsigned num)185 u64 dma_fence_context_alloc(unsigned num)
186 {
187 WARN_ON(!num);
188 return atomic64_fetch_add(num, &dma_fence_context_counter);
189 }
190 EXPORT_SYMBOL(dma_fence_context_alloc);
191
192 /**
193 * DOC: fence signalling annotation
194 *
195 * Proving correctness of all the kernel code around &dma_fence through code
196 * review and testing is tricky for a few reasons:
197 *
198 * * It is a cross-driver contract, and therefore all drivers must follow the
199 * same rules for lock nesting order, calling contexts for various functions
200 * and anything else significant for in-kernel interfaces. But it is also
201 * impossible to test all drivers in a single machine, hence brute-force N vs.
202 * N testing of all combinations is impossible. Even just limiting to the
203 * possible combinations is infeasible.
204 *
205 * * There is an enormous amount of driver code involved. For render drivers
206 * there's the tail of command submission, after fences are published,
207 * scheduler code, interrupt and workers to process job completion,
208 * and timeout, gpu reset and gpu hang recovery code. Plus for integration
209 * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
210 * and &shrinker. For modesetting drivers there's the commit tail functions
211 * between when fences for an atomic modeset are published, and when the
212 * corresponding vblank completes, including any interrupt processing and
213 * related workers. Auditing all that code, across all drivers, is not
214 * feasible.
215 *
216 * * Due to how many other subsystems are involved and the locking hierarchies
217 * this pulls in there is extremely thin wiggle-room for driver-specific
218 * differences. &dma_fence interacts with almost all of the core memory
219 * handling through page fault handlers via &dma_resv, dma_resv_lock() and
220 * dma_resv_unlock(). On the other side it also interacts through all
221 * allocation sites through &mmu_notifier and &shrinker.
222 *
223 * Furthermore lockdep does not handle cross-release dependencies, which means
224 * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
225 * at runtime with some quick testing. The simplest example is one thread
226 * waiting on a &dma_fence while holding a lock::
227 *
228 * lock(A);
229 * dma_fence_wait(B);
230 * unlock(A);
231 *
232 * while the other thread is stuck trying to acquire the same lock, which
233 * prevents it from signalling the fence the previous thread is stuck waiting
234 * on::
235 *
236 * lock(A);
237 * unlock(A);
238 * dma_fence_signal(B);
239 *
240 * By manually annotating all code relevant to signalling a &dma_fence we can
241 * teach lockdep about these dependencies, which also helps with the validation
242 * headache since now lockdep can check all the rules for us::
243 *
244 * cookie = dma_fence_begin_signalling();
245 * lock(A);
246 * unlock(A);
247 * dma_fence_signal(B);
248 * dma_fence_end_signalling(cookie);
249 *
250 * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
251 * annotate critical sections the following rules need to be observed:
252 *
253 * * All code necessary to complete a &dma_fence must be annotated, from the
254 * point where a fence is accessible to other threads, to the point where
255 * dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
256 * and due to the very strict rules and many corner cases it is infeasible to
257 * catch these just with review or normal stress testing.
258 *
259 * * &struct dma_resv deserves a special note, since the readers are only
260 * protected by rcu. This means the signalling critical section starts as soon
261 * as the new fences are installed, even before dma_resv_unlock() is called.
262 *
263 * * The only exception are fast paths and opportunistic signalling code, which
264 * calls dma_fence_signal() purely as an optimization, but is not required to
265 * guarantee completion of a &dma_fence. The usual example is a wait IOCTL
266 * which calls dma_fence_signal(), while the mandatory completion path goes
267 * through a hardware interrupt and possible job completion worker.
268 *
269 * * To aid composability of code, the annotations can be freely nested, as long
270 * as the overall locking hierarchy is consistent. The annotations also work
271 * both in interrupt and process context. Due to implementation details this
272 * requires that callers pass an opaque cookie from
273 * dma_fence_begin_signalling() to dma_fence_end_signalling().
274 *
275 * * Validation against the cross driver contract is implemented by priming
276 * lockdep with the relevant hierarchy at boot-up. This means even just
277 * testing with a single device is enough to validate a driver, at least as
278 * far as deadlocks with dma_fence_wait() against dma_fence_signal() are
279 * concerned.
280 */
281 #ifdef CONFIG_LOCKDEP
282 static struct lockdep_map dma_fence_lockdep_map = {
283 .name = "dma_fence_map"
284 };
285
286 /**
287 * dma_fence_begin_signalling - begin a critical DMA fence signalling section
288 *
289 * Drivers should use this to annotate the beginning of any code section
290 * required to eventually complete &dma_fence by calling dma_fence_signal().
291 *
292 * The end of these critical sections are annotated with
293 * dma_fence_end_signalling().
294 *
295 * Returns:
296 *
297 * Opaque cookie needed by the implementation, which needs to be passed to
298 * dma_fence_end_signalling().
299 */
dma_fence_begin_signalling(void)300 bool dma_fence_begin_signalling(void)
301 {
302 /* explicitly nesting ... */
303 if (lock_is_held_type(&dma_fence_lockdep_map, 1))
304 return true;
305
306 /* rely on might_sleep check for soft/hardirq locks */
307 if (in_atomic())
308 return true;
309
310 /* ... and non-recursive successful read_trylock */
311 lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _RET_IP_);
312
313 return false;
314 }
315 EXPORT_SYMBOL(dma_fence_begin_signalling);
316
317 /**
318 * dma_fence_end_signalling - end a critical DMA fence signalling section
319 * @cookie: opaque cookie from dma_fence_begin_signalling()
320 *
321 * Closes a critical section annotation opened by dma_fence_begin_signalling().
322 */
dma_fence_end_signalling(bool cookie)323 void dma_fence_end_signalling(bool cookie)
324 {
325 if (cookie)
326 return;
327
328 lock_release(&dma_fence_lockdep_map, _RET_IP_);
329 }
330 EXPORT_SYMBOL(dma_fence_end_signalling);
331
__dma_fence_might_wait(void)332 void __dma_fence_might_wait(void)
333 {
334 bool tmp;
335
336 tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
337 if (tmp)
338 lock_release(&dma_fence_lockdep_map, _THIS_IP_);
339 lock_map_acquire(&dma_fence_lockdep_map);
340 lock_map_release(&dma_fence_lockdep_map);
341 if (tmp)
342 lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _THIS_IP_);
343 }
344 #endif
345
346
347 /**
348 * dma_fence_signal_timestamp_locked - signal completion of a fence
349 * @fence: the fence to signal
350 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
351 *
352 * Signal completion for software callbacks on a fence, this will unblock
353 * dma_fence_wait() calls and run all the callbacks added with
354 * dma_fence_add_callback(). Can be called multiple times, but since a fence
355 * can only go from the unsignaled to the signaled state and not back, it will
356 * only be effective the first time. Set the timestamp provided as the fence
357 * signal timestamp.
358 *
359 * Unlike dma_fence_signal_timestamp(), this function must be called with
360 * &dma_fence.lock held.
361 */
dma_fence_signal_timestamp_locked(struct dma_fence * fence,ktime_t timestamp)362 void dma_fence_signal_timestamp_locked(struct dma_fence *fence,
363 ktime_t timestamp)
364 {
365 struct dma_fence_cb *cur, *tmp;
366 struct list_head cb_list;
367
368 lockdep_assert_held(fence->lock);
369
370 if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
371 &fence->flags)))
372 return;
373
374 /* Stash the cb_list before replacing it with the timestamp */
375 list_replace(&fence->cb_list, &cb_list);
376
377 fence->timestamp = timestamp;
378 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
379 trace_dma_fence_signaled(fence);
380
381 list_for_each_entry_safe(cur, tmp, &cb_list, node) {
382 INIT_LIST_HEAD(&cur->node);
383 cur->func(fence, cur);
384 }
385 }
386 EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
387
388 /**
389 * dma_fence_signal_timestamp - signal completion of a fence
390 * @fence: the fence to signal
391 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
392 *
393 * Signal completion for software callbacks on a fence, this will unblock
394 * dma_fence_wait() calls and run all the callbacks added with
395 * dma_fence_add_callback(). Can be called multiple times, but since a fence
396 * can only go from the unsignaled to the signaled state and not back, it will
397 * only be effective the first time. Set the timestamp provided as the fence
398 * signal timestamp.
399 */
dma_fence_signal_timestamp(struct dma_fence * fence,ktime_t timestamp)400 void dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
401 {
402 unsigned long flags;
403
404 if (WARN_ON(!fence))
405 return;
406
407 spin_lock_irqsave(fence->lock, flags);
408 dma_fence_signal_timestamp_locked(fence, timestamp);
409 spin_unlock_irqrestore(fence->lock, flags);
410 }
411 EXPORT_SYMBOL(dma_fence_signal_timestamp);
412
413 /**
414 * dma_fence_signal_locked - signal completion of a fence
415 * @fence: the fence to signal
416 *
417 * Signal completion for software callbacks on a fence, this will unblock
418 * dma_fence_wait() calls and run all the callbacks added with
419 * dma_fence_add_callback(). Can be called multiple times, but since a fence
420 * can only go from the unsignaled to the signaled state and not back, it will
421 * only be effective the first time.
422 *
423 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
424 * held.
425 */
dma_fence_signal_locked(struct dma_fence * fence)426 void dma_fence_signal_locked(struct dma_fence *fence)
427 {
428 dma_fence_signal_timestamp_locked(fence, ktime_get());
429 }
430 EXPORT_SYMBOL(dma_fence_signal_locked);
431
432 /**
433 * dma_fence_check_and_signal_locked - signal the fence if it's not yet signaled
434 * @fence: the fence to check and signal
435 *
436 * Checks whether a fence was signaled and signals it if it was not yet signaled.
437 *
438 * Unlike dma_fence_check_and_signal(), this function must be called with
439 * &struct dma_fence.lock being held.
440 *
441 * Return: true if fence has been signaled already, false otherwise.
442 */
dma_fence_check_and_signal_locked(struct dma_fence * fence)443 bool dma_fence_check_and_signal_locked(struct dma_fence *fence)
444 {
445 bool ret;
446
447 ret = dma_fence_test_signaled_flag(fence);
448 dma_fence_signal_locked(fence);
449
450 return ret;
451 }
452 EXPORT_SYMBOL(dma_fence_check_and_signal_locked);
453
454 /**
455 * dma_fence_check_and_signal - signal the fence if it's not yet signaled
456 * @fence: the fence to check and signal
457 *
458 * Checks whether a fence was signaled and signals it if it was not yet signaled.
459 * All this is done in a race-free manner.
460 *
461 * Return: true if fence has been signaled already, false otherwise.
462 */
dma_fence_check_and_signal(struct dma_fence * fence)463 bool dma_fence_check_and_signal(struct dma_fence *fence)
464 {
465 unsigned long flags;
466 bool ret;
467
468 spin_lock_irqsave(fence->lock, flags);
469 ret = dma_fence_check_and_signal_locked(fence);
470 spin_unlock_irqrestore(fence->lock, flags);
471
472 return ret;
473 }
474 EXPORT_SYMBOL(dma_fence_check_and_signal);
475
476 /**
477 * dma_fence_signal - signal completion of a fence
478 * @fence: the fence to signal
479 *
480 * Signal completion for software callbacks on a fence, this will unblock
481 * dma_fence_wait() calls and run all the callbacks added with
482 * dma_fence_add_callback(). Can be called multiple times, but since a fence
483 * can only go from the unsignaled to the signaled state and not back, it will
484 * only be effective the first time.
485 */
dma_fence_signal(struct dma_fence * fence)486 void dma_fence_signal(struct dma_fence *fence)
487 {
488 unsigned long flags;
489 bool tmp;
490
491 if (WARN_ON(!fence))
492 return;
493
494 tmp = dma_fence_begin_signalling();
495
496 spin_lock_irqsave(fence->lock, flags);
497 dma_fence_signal_timestamp_locked(fence, ktime_get());
498 spin_unlock_irqrestore(fence->lock, flags);
499
500 dma_fence_end_signalling(tmp);
501 }
502 EXPORT_SYMBOL(dma_fence_signal);
503
504 /**
505 * dma_fence_wait_timeout - sleep until the fence gets signaled
506 * or until timeout elapses
507 * @fence: the fence to wait on
508 * @intr: if true, do an interruptible wait
509 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
510 *
511 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
512 * remaining timeout in jiffies on success. Other error values may be
513 * returned on custom implementations.
514 *
515 * Performs a synchronous wait on this fence. It is assumed the caller
516 * directly or indirectly (buf-mgr between reservation and committing)
517 * holds a reference to the fence, otherwise the fence might be
518 * freed before return, resulting in undefined behavior.
519 *
520 * See also dma_fence_wait() and dma_fence_wait_any_timeout().
521 */
522 signed long
dma_fence_wait_timeout(struct dma_fence * fence,bool intr,signed long timeout)523 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
524 {
525 signed long ret;
526
527 if (WARN_ON(timeout < 0))
528 return -EINVAL;
529
530 might_sleep();
531
532 __dma_fence_might_wait();
533
534 dma_fence_enable_sw_signaling(fence);
535
536 if (trace_dma_fence_wait_start_enabled()) {
537 rcu_read_lock();
538 trace_dma_fence_wait_start(fence);
539 rcu_read_unlock();
540 }
541 if (fence->ops->wait)
542 ret = fence->ops->wait(fence, intr, timeout);
543 else
544 ret = dma_fence_default_wait(fence, intr, timeout);
545 if (trace_dma_fence_wait_end_enabled()) {
546 rcu_read_lock();
547 trace_dma_fence_wait_end(fence);
548 rcu_read_unlock();
549 }
550 return ret;
551 }
552 EXPORT_SYMBOL(dma_fence_wait_timeout);
553
554 /**
555 * dma_fence_release - default release function for fences
556 * @kref: &dma_fence.recfount
557 *
558 * This is the default release functions for &dma_fence. Drivers shouldn't call
559 * this directly, but instead call dma_fence_put().
560 */
dma_fence_release(struct kref * kref)561 void dma_fence_release(struct kref *kref)
562 {
563 struct dma_fence *fence =
564 container_of(kref, struct dma_fence, refcount);
565
566 rcu_read_lock();
567 trace_dma_fence_destroy(fence);
568
569 if (!list_empty(&fence->cb_list) &&
570 !dma_fence_test_signaled_flag(fence)) {
571 const char __rcu *timeline;
572 const char __rcu *driver;
573 unsigned long flags;
574
575 driver = dma_fence_driver_name(fence);
576 timeline = dma_fence_timeline_name(fence);
577
578 WARN(1,
579 "Fence %s:%s:%llx:%llx released with pending signals!\n",
580 rcu_dereference(driver), rcu_dereference(timeline),
581 fence->context, fence->seqno);
582
583 /*
584 * Failed to signal before release, likely a refcounting issue.
585 *
586 * This should never happen, but if it does make sure that we
587 * don't leave chains dangling. We set the error flag first
588 * so that the callbacks know this signal is due to an error.
589 */
590 spin_lock_irqsave(fence->lock, flags);
591 fence->error = -EDEADLK;
592 dma_fence_signal_locked(fence);
593 spin_unlock_irqrestore(fence->lock, flags);
594 }
595
596 rcu_read_unlock();
597
598 if (fence->ops->release)
599 fence->ops->release(fence);
600 else
601 dma_fence_free(fence);
602 }
603 EXPORT_SYMBOL(dma_fence_release);
604
605 /**
606 * dma_fence_free - default release function for &dma_fence.
607 * @fence: fence to release
608 *
609 * This is the default implementation for &dma_fence_ops.release. It calls
610 * kfree_rcu() on @fence.
611 */
dma_fence_free(struct dma_fence * fence)612 void dma_fence_free(struct dma_fence *fence)
613 {
614 kfree_rcu(fence, rcu);
615 }
616 EXPORT_SYMBOL(dma_fence_free);
617
__dma_fence_enable_signaling(struct dma_fence * fence)618 static bool __dma_fence_enable_signaling(struct dma_fence *fence)
619 {
620 bool was_set;
621
622 lockdep_assert_held(fence->lock);
623
624 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
625 &fence->flags);
626
627 if (dma_fence_test_signaled_flag(fence))
628 return false;
629
630 if (!was_set && fence->ops->enable_signaling) {
631 trace_dma_fence_enable_signal(fence);
632
633 if (!fence->ops->enable_signaling(fence)) {
634 dma_fence_signal_locked(fence);
635 return false;
636 }
637 }
638
639 return true;
640 }
641
642 /**
643 * dma_fence_enable_sw_signaling - enable signaling on fence
644 * @fence: the fence to enable
645 *
646 * This will request for sw signaling to be enabled, to make the fence
647 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
648 * internally.
649 */
dma_fence_enable_sw_signaling(struct dma_fence * fence)650 void dma_fence_enable_sw_signaling(struct dma_fence *fence)
651 {
652 unsigned long flags;
653
654 spin_lock_irqsave(fence->lock, flags);
655 __dma_fence_enable_signaling(fence);
656 spin_unlock_irqrestore(fence->lock, flags);
657 }
658 EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
659
660 /**
661 * dma_fence_add_callback - add a callback to be called when the fence
662 * is signaled
663 * @fence: the fence to wait on
664 * @cb: the callback to register
665 * @func: the function to call
666 *
667 * Add a software callback to the fence. The caller should keep a reference to
668 * the fence.
669 *
670 * @cb will be initialized by dma_fence_add_callback(), no initialization
671 * by the caller is required. Any number of callbacks can be registered
672 * to a fence, but a callback can only be registered to one fence at a time.
673 *
674 * If fence is already signaled, this function will return -ENOENT (and
675 * *not* call the callback).
676 *
677 * Note that the callback can be called from an atomic context or irq context.
678 *
679 * Returns 0 in case of success, -ENOENT if the fence is already signaled
680 * and -EINVAL in case of error.
681 */
dma_fence_add_callback(struct dma_fence * fence,struct dma_fence_cb * cb,dma_fence_func_t func)682 int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
683 dma_fence_func_t func)
684 {
685 unsigned long flags;
686 int ret = 0;
687
688 if (WARN_ON(!fence || !func))
689 return -EINVAL;
690
691 if (dma_fence_test_signaled_flag(fence)) {
692 INIT_LIST_HEAD(&cb->node);
693 return -ENOENT;
694 }
695
696 spin_lock_irqsave(fence->lock, flags);
697
698 if (__dma_fence_enable_signaling(fence)) {
699 cb->func = func;
700 list_add_tail(&cb->node, &fence->cb_list);
701 } else {
702 INIT_LIST_HEAD(&cb->node);
703 ret = -ENOENT;
704 }
705
706 spin_unlock_irqrestore(fence->lock, flags);
707
708 return ret;
709 }
710 EXPORT_SYMBOL(dma_fence_add_callback);
711
712 /**
713 * dma_fence_get_status - returns the status upon completion
714 * @fence: the dma_fence to query
715 *
716 * This wraps dma_fence_get_status_locked() to return the error status
717 * condition on a signaled fence. See dma_fence_get_status_locked() for more
718 * details.
719 *
720 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
721 * been signaled without an error condition, or a negative error code
722 * if the fence has been completed in err.
723 */
dma_fence_get_status(struct dma_fence * fence)724 int dma_fence_get_status(struct dma_fence *fence)
725 {
726 unsigned long flags;
727 int status;
728
729 spin_lock_irqsave(fence->lock, flags);
730 status = dma_fence_get_status_locked(fence);
731 spin_unlock_irqrestore(fence->lock, flags);
732
733 return status;
734 }
735 EXPORT_SYMBOL(dma_fence_get_status);
736
737 /**
738 * dma_fence_remove_callback - remove a callback from the signaling list
739 * @fence: the fence to wait on
740 * @cb: the callback to remove
741 *
742 * Remove a previously queued callback from the fence. This function returns
743 * true if the callback is successfully removed, or false if the fence has
744 * already been signaled.
745 *
746 * *WARNING*:
747 * Cancelling a callback should only be done if you really know what you're
748 * doing, since deadlocks and race conditions could occur all too easily. For
749 * this reason, it should only ever be done on hardware lockup recovery,
750 * with a reference held to the fence.
751 *
752 * Behaviour is undefined if @cb has not been added to @fence using
753 * dma_fence_add_callback() beforehand.
754 */
755 bool
dma_fence_remove_callback(struct dma_fence * fence,struct dma_fence_cb * cb)756 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
757 {
758 unsigned long flags;
759 bool ret;
760
761 spin_lock_irqsave(fence->lock, flags);
762
763 ret = !list_empty(&cb->node);
764 if (ret)
765 list_del_init(&cb->node);
766
767 spin_unlock_irqrestore(fence->lock, flags);
768
769 return ret;
770 }
771 EXPORT_SYMBOL(dma_fence_remove_callback);
772
773 struct default_wait_cb {
774 struct dma_fence_cb base;
775 struct task_struct *task;
776 };
777
778 static void
dma_fence_default_wait_cb(struct dma_fence * fence,struct dma_fence_cb * cb)779 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
780 {
781 struct default_wait_cb *wait =
782 container_of(cb, struct default_wait_cb, base);
783
784 wake_up_state(wait->task, TASK_NORMAL);
785 }
786
787 /**
788 * dma_fence_default_wait - default sleep until the fence gets signaled
789 * or until timeout elapses
790 * @fence: the fence to wait on
791 * @intr: if true, do an interruptible wait
792 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
793 *
794 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
795 * remaining timeout in jiffies on success. If timeout is zero the value one is
796 * returned if the fence is already signaled for consistency with other
797 * functions taking a jiffies timeout.
798 */
799 signed long
dma_fence_default_wait(struct dma_fence * fence,bool intr,signed long timeout)800 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
801 {
802 struct default_wait_cb cb;
803 unsigned long flags;
804 signed long ret = timeout ? timeout : 1;
805
806 spin_lock_irqsave(fence->lock, flags);
807
808 if (dma_fence_test_signaled_flag(fence))
809 goto out;
810
811 if (intr && signal_pending(current)) {
812 ret = -ERESTARTSYS;
813 goto out;
814 }
815
816 if (!timeout) {
817 ret = 0;
818 goto out;
819 }
820
821 cb.base.func = dma_fence_default_wait_cb;
822 cb.task = current;
823 list_add(&cb.base.node, &fence->cb_list);
824
825 while (!dma_fence_test_signaled_flag(fence) && ret > 0) {
826 if (intr)
827 __set_current_state(TASK_INTERRUPTIBLE);
828 else
829 __set_current_state(TASK_UNINTERRUPTIBLE);
830 spin_unlock_irqrestore(fence->lock, flags);
831
832 ret = schedule_timeout(ret);
833
834 spin_lock_irqsave(fence->lock, flags);
835 if (ret > 0 && intr && signal_pending(current))
836 ret = -ERESTARTSYS;
837 }
838
839 if (!list_empty(&cb.base.node))
840 list_del(&cb.base.node);
841 __set_current_state(TASK_RUNNING);
842
843 out:
844 spin_unlock_irqrestore(fence->lock, flags);
845 return ret;
846 }
847 EXPORT_SYMBOL(dma_fence_default_wait);
848
849 static bool
dma_fence_test_signaled_any(struct dma_fence ** fences,uint32_t count,uint32_t * idx)850 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
851 uint32_t *idx)
852 {
853 int i;
854
855 for (i = 0; i < count; ++i) {
856 struct dma_fence *fence = fences[i];
857 if (dma_fence_test_signaled_flag(fence)) {
858 if (idx)
859 *idx = i;
860 return true;
861 }
862 }
863 return false;
864 }
865
866 /**
867 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
868 * or until timeout elapses
869 * @fences: array of fences to wait on
870 * @count: number of fences to wait on
871 * @intr: if true, do an interruptible wait
872 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
873 * @idx: used to store the first signaled fence index, meaningful only on
874 * positive return
875 *
876 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
877 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
878 * on success.
879 *
880 * Synchronous waits for the first fence in the array to be signaled. The
881 * caller needs to hold a reference to all fences in the array, otherwise a
882 * fence might be freed before return, resulting in undefined behavior.
883 *
884 * See also dma_fence_wait() and dma_fence_wait_timeout().
885 */
886 signed long
dma_fence_wait_any_timeout(struct dma_fence ** fences,uint32_t count,bool intr,signed long timeout,uint32_t * idx)887 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
888 bool intr, signed long timeout, uint32_t *idx)
889 {
890 struct default_wait_cb *cb;
891 signed long ret = timeout;
892 unsigned i;
893
894 if (WARN_ON(!fences || !count || timeout < 0))
895 return -EINVAL;
896
897 if (timeout == 0) {
898 for (i = 0; i < count; ++i)
899 if (dma_fence_is_signaled(fences[i])) {
900 if (idx)
901 *idx = i;
902 return 1;
903 }
904
905 return 0;
906 }
907
908 cb = kzalloc_objs(struct default_wait_cb, count);
909 if (cb == NULL) {
910 ret = -ENOMEM;
911 goto err_free_cb;
912 }
913
914 for (i = 0; i < count; ++i) {
915 struct dma_fence *fence = fences[i];
916
917 cb[i].task = current;
918 if (dma_fence_add_callback(fence, &cb[i].base,
919 dma_fence_default_wait_cb)) {
920 /* This fence is already signaled */
921 if (idx)
922 *idx = i;
923 goto fence_rm_cb;
924 }
925 }
926
927 while (ret > 0) {
928 if (intr)
929 set_current_state(TASK_INTERRUPTIBLE);
930 else
931 set_current_state(TASK_UNINTERRUPTIBLE);
932
933 if (dma_fence_test_signaled_any(fences, count, idx))
934 break;
935
936 ret = schedule_timeout(ret);
937
938 if (ret > 0 && intr && signal_pending(current))
939 ret = -ERESTARTSYS;
940 }
941
942 __set_current_state(TASK_RUNNING);
943
944 fence_rm_cb:
945 while (i-- > 0)
946 dma_fence_remove_callback(fences[i], &cb[i].base);
947
948 err_free_cb:
949 kfree(cb);
950
951 return ret;
952 }
953 EXPORT_SYMBOL(dma_fence_wait_any_timeout);
954
955 /**
956 * DOC: deadline hints
957 *
958 * In an ideal world, it would be possible to pipeline a workload sufficiently
959 * that a utilization based device frequency governor could arrive at a minimum
960 * frequency that meets the requirements of the use-case, in order to minimize
961 * power consumption. But in the real world there are many workloads which
962 * defy this ideal. For example, but not limited to:
963 *
964 * * Workloads that ping-pong between device and CPU, with alternating periods
965 * of CPU waiting for device, and device waiting on CPU. This can result in
966 * devfreq and cpufreq seeing idle time in their respective domains and in
967 * result reduce frequency.
968 *
969 * * Workloads that interact with a periodic time based deadline, such as double
970 * buffered GPU rendering vs vblank sync'd page flipping. In this scenario,
971 * missing a vblank deadline results in an *increase* in idle time on the GPU
972 * (since it has to wait an additional vblank period), sending a signal to
973 * the GPU's devfreq to reduce frequency, when in fact the opposite is what is
974 * needed.
975 *
976 * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline
977 * (or indirectly via userspace facing ioctls like &sync_set_deadline).
978 * The deadline hint provides a way for the waiting driver, or userspace, to
979 * convey an appropriate sense of urgency to the signaling driver.
980 *
981 * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace
982 * facing APIs). The time could either be some point in the future (such as
983 * the vblank based deadline for page-flipping, or the start of a compositor's
984 * composition cycle), or the current time to indicate an immediate deadline
985 * hint (Ie. forward progress cannot be made until this fence is signaled).
986 *
987 * Multiple deadlines may be set on a given fence, even in parallel. See the
988 * documentation for &dma_fence_ops.set_deadline.
989 *
990 * The deadline hint is just that, a hint. The driver that created the fence
991 * may react by increasing frequency, making different scheduling choices, etc.
992 * Or doing nothing at all.
993 */
994
995 /**
996 * dma_fence_set_deadline - set desired fence-wait deadline hint
997 * @fence: the fence that is to be waited on
998 * @deadline: the time by which the waiter hopes for the fence to be
999 * signaled
1000 *
1001 * Give the fence signaler a hint about an upcoming deadline, such as
1002 * vblank, by which point the waiter would prefer the fence to be
1003 * signaled by. This is intended to give feedback to the fence signaler
1004 * to aid in power management decisions, such as boosting GPU frequency
1005 * if a periodic vblank deadline is approaching but the fence is not
1006 * yet signaled..
1007 */
dma_fence_set_deadline(struct dma_fence * fence,ktime_t deadline)1008 void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
1009 {
1010 if (fence->ops->set_deadline && !dma_fence_is_signaled(fence))
1011 fence->ops->set_deadline(fence, deadline);
1012 }
1013 EXPORT_SYMBOL(dma_fence_set_deadline);
1014
1015 /**
1016 * dma_fence_describe - Dump fence description into seq_file
1017 * @fence: the fence to describe
1018 * @seq: the seq_file to put the textual description into
1019 *
1020 * Dump a textual description of the fence and it's state into the seq_file.
1021 */
dma_fence_describe(struct dma_fence * fence,struct seq_file * seq)1022 void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
1023 {
1024 const char __rcu *timeline = "";
1025 const char __rcu *driver = "";
1026 const char *signaled = "";
1027
1028 rcu_read_lock();
1029
1030 if (!dma_fence_is_signaled(fence)) {
1031 timeline = dma_fence_timeline_name(fence);
1032 driver = dma_fence_driver_name(fence);
1033 signaled = "un";
1034 }
1035
1036 seq_printf(seq, "%llu:%llu %s %s %ssignalled\n",
1037 fence->context, fence->seqno, timeline, driver,
1038 signaled);
1039
1040 rcu_read_unlock();
1041 }
1042 EXPORT_SYMBOL(dma_fence_describe);
1043
1044 static void
__dma_fence_init(struct dma_fence * fence,const struct dma_fence_ops * ops,spinlock_t * lock,u64 context,u64 seqno,unsigned long flags)1045 __dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1046 spinlock_t *lock, u64 context, u64 seqno, unsigned long flags)
1047 {
1048 BUG_ON(!lock);
1049 BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
1050
1051 kref_init(&fence->refcount);
1052 fence->ops = ops;
1053 INIT_LIST_HEAD(&fence->cb_list);
1054 fence->lock = lock;
1055 fence->context = context;
1056 fence->seqno = seqno;
1057 fence->flags = flags;
1058 fence->error = 0;
1059
1060 trace_dma_fence_init(fence);
1061 }
1062
1063 /**
1064 * dma_fence_init - Initialize a custom fence.
1065 * @fence: the fence to initialize
1066 * @ops: the dma_fence_ops for operations on this fence
1067 * @lock: the irqsafe spinlock to use for locking this fence
1068 * @context: the execution context this fence is run on
1069 * @seqno: a linear increasing sequence number for this context
1070 *
1071 * Initializes an allocated fence, the caller doesn't have to keep its
1072 * refcount after committing with this fence, but it will need to hold a
1073 * refcount again if &dma_fence_ops.enable_signaling gets called.
1074 *
1075 * context and seqno are used for easy comparison between fences, allowing
1076 * to check which fence is later by simply using dma_fence_later().
1077 */
1078 void
dma_fence_init(struct dma_fence * fence,const struct dma_fence_ops * ops,spinlock_t * lock,u64 context,u64 seqno)1079 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1080 spinlock_t *lock, u64 context, u64 seqno)
1081 {
1082 __dma_fence_init(fence, ops, lock, context, seqno, 0UL);
1083 }
1084 EXPORT_SYMBOL(dma_fence_init);
1085
1086 /**
1087 * dma_fence_init64 - Initialize a custom fence with 64-bit seqno support.
1088 * @fence: the fence to initialize
1089 * @ops: the dma_fence_ops for operations on this fence
1090 * @lock: the irqsafe spinlock to use for locking this fence
1091 * @context: the execution context this fence is run on
1092 * @seqno: a linear increasing sequence number for this context
1093 *
1094 * Initializes an allocated fence, the caller doesn't have to keep its
1095 * refcount after committing with this fence, but it will need to hold a
1096 * refcount again if &dma_fence_ops.enable_signaling gets called.
1097 *
1098 * Context and seqno are used for easy comparison between fences, allowing
1099 * to check which fence is later by simply using dma_fence_later().
1100 */
1101 void
dma_fence_init64(struct dma_fence * fence,const struct dma_fence_ops * ops,spinlock_t * lock,u64 context,u64 seqno)1102 dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
1103 spinlock_t *lock, u64 context, u64 seqno)
1104 {
1105 __dma_fence_init(fence, ops, lock, context, seqno,
1106 BIT(DMA_FENCE_FLAG_SEQNO64_BIT));
1107 }
1108 EXPORT_SYMBOL(dma_fence_init64);
1109
1110 /**
1111 * dma_fence_driver_name - Access the driver name
1112 * @fence: the fence to query
1113 *
1114 * Returns a driver name backing the dma-fence implementation.
1115 *
1116 * IMPORTANT CONSIDERATION:
1117 * Dma-fence contract stipulates that access to driver provided data (data not
1118 * directly embedded into the object itself), such as the &dma_fence.lock and
1119 * memory potentially accessed by the &dma_fence.ops functions, is forbidden
1120 * after the fence has been signalled. Drivers are allowed to free that data,
1121 * and some do.
1122 *
1123 * To allow safe access drivers are mandated to guarantee a RCU grace period
1124 * between signalling the fence and freeing said data.
1125 *
1126 * As such access to the driver name is only valid inside a RCU locked section.
1127 * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
1128 * by the &rcu_read_lock and &rcu_read_unlock pair.
1129 */
dma_fence_driver_name(struct dma_fence * fence)1130 const char __rcu *dma_fence_driver_name(struct dma_fence *fence)
1131 {
1132 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1133 "RCU protection is required for safe access to returned string");
1134
1135 if (!dma_fence_test_signaled_flag(fence))
1136 return fence->ops->get_driver_name(fence);
1137 else
1138 return "detached-driver";
1139 }
1140 EXPORT_SYMBOL(dma_fence_driver_name);
1141
1142 /**
1143 * dma_fence_timeline_name - Access the timeline name
1144 * @fence: the fence to query
1145 *
1146 * Returns a timeline name provided by the dma-fence implementation.
1147 *
1148 * IMPORTANT CONSIDERATION:
1149 * Dma-fence contract stipulates that access to driver provided data (data not
1150 * directly embedded into the object itself), such as the &dma_fence.lock and
1151 * memory potentially accessed by the &dma_fence.ops functions, is forbidden
1152 * after the fence has been signalled. Drivers are allowed to free that data,
1153 * and some do.
1154 *
1155 * To allow safe access drivers are mandated to guarantee a RCU grace period
1156 * between signalling the fence and freeing said data.
1157 *
1158 * As such access to the driver name is only valid inside a RCU locked section.
1159 * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
1160 * by the &rcu_read_lock and &rcu_read_unlock pair.
1161 */
dma_fence_timeline_name(struct dma_fence * fence)1162 const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
1163 {
1164 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1165 "RCU protection is required for safe access to returned string");
1166
1167 if (!dma_fence_test_signaled_flag(fence))
1168 return fence->ops->get_timeline_name(fence);
1169 else
1170 return "signaled-timeline";
1171 }
1172 EXPORT_SYMBOL(dma_fence_timeline_name);
1173