1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Fence mechanism for dma-buf and to allow for asynchronous dma access
4 *
5 * Copyright (C) 2012 Canonical Ltd
6 * Copyright (C) 2012 Texas Instruments
7 *
8 * Authors:
9 * Rob Clark <robdclark@gmail.com>
10 * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11 */
12
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/atomic.h>
16 #include <linux/dma-fence.h>
17 #include <linux/sched/signal.h>
18 #include <linux/seq_file.h>
19
20 #define CREATE_TRACE_POINTS
21 #include <trace/events/dma_fence.h>
22
23 EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
24 EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
25 EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
26
27 static DEFINE_SPINLOCK(dma_fence_stub_lock);
28 static struct dma_fence dma_fence_stub;
29
30 /*
31 * fence context counter: each execution context should have its own
32 * fence context, this allows checking if fences belong to the same
33 * context or not. One device can have multiple separate contexts,
34 * and they're used if some engine can run independently of another.
35 */
36 static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
37
38 /**
39 * DOC: DMA fences overview
40 *
41 * DMA fences, represented by &struct dma_fence, are the kernel internal
42 * synchronization primitive for DMA operations like GPU rendering, video
43 * encoding/decoding, or displaying buffers on a screen.
44 *
45 * A fence is initialized using dma_fence_init() and completed using
46 * dma_fence_signal(). Fences are associated with a context, allocated through
47 * dma_fence_context_alloc(), and all fences on the same context are
48 * fully ordered.
49 *
50 * Since the purposes of fences is to facilitate cross-device and
51 * cross-application synchronization, there's multiple ways to use one:
52 *
53 * - Individual fences can be exposed as a &sync_file, accessed as a file
54 * descriptor from userspace, created by calling sync_file_create(). This is
55 * called explicit fencing, since userspace passes around explicit
56 * synchronization points.
57 *
58 * - Some subsystems also have their own explicit fencing primitives, like
59 * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
60 * fence to be updated.
61 *
62 * - Then there's also implicit fencing, where the synchronization points are
63 * implicitly passed around as part of shared &dma_buf instances. Such
64 * implicit fences are stored in &struct dma_resv through the
65 * &dma_buf.resv pointer.
66 */
67
68 /**
69 * DOC: fence cross-driver contract
70 *
71 * Since &dma_fence provide a cross driver contract, all drivers must follow the
72 * same rules:
73 *
74 * * Fences must complete in a reasonable time. Fences which represent kernels
75 * and shaders submitted by userspace, which could run forever, must be backed
76 * up by timeout and gpu hang recovery code. Minimally that code must prevent
77 * further command submission and force complete all in-flight fences, e.g.
78 * when the driver or hardware do not support gpu reset, or if the gpu reset
79 * failed for some reason. Ideally the driver supports gpu recovery which only
80 * affects the offending userspace context, and no other userspace
81 * submissions.
82 *
83 * * Drivers may have different ideas of what completion within a reasonable
84 * time means. Some hang recovery code uses a fixed timeout, others a mix
85 * between observing forward progress and increasingly strict timeouts.
86 * Drivers should not try to second guess timeout handling of fences from
87 * other drivers.
88 *
89 * * To ensure there's no deadlocks of dma_fence_wait() against other locks
90 * drivers should annotate all code required to reach dma_fence_signal(),
91 * which completes the fences, with dma_fence_begin_signalling() and
92 * dma_fence_end_signalling().
93 *
94 * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
95 * This means any code required for fence completion cannot acquire a
96 * &dma_resv lock. Note that this also pulls in the entire established
97 * locking hierarchy around dma_resv_lock() and dma_resv_unlock().
98 *
99 * * Drivers are allowed to call dma_fence_wait() from their &shrinker
100 * callbacks. This means any code required for fence completion cannot
101 * allocate memory with GFP_KERNEL.
102 *
103 * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
104 * respectively &mmu_interval_notifier callbacks. This means any code required
105 * for fence completion cannot allocate memory with GFP_NOFS or GFP_NOIO.
106 * Only GFP_ATOMIC is permissible, which might fail.
107 *
108 * Note that only GPU drivers have a reasonable excuse for both requiring
109 * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
110 * track asynchronous compute work using &dma_fence. No driver outside of
111 * drivers/gpu should ever call dma_fence_wait() in such contexts.
112 */
113
dma_fence_stub_get_name(struct dma_fence * fence)114 static const char *dma_fence_stub_get_name(struct dma_fence *fence)
115 {
116 return "stub";
117 }
118
119 static const struct dma_fence_ops dma_fence_stub_ops = {
120 .get_driver_name = dma_fence_stub_get_name,
121 .get_timeline_name = dma_fence_stub_get_name,
122 };
123
dma_fence_init_stub(void)124 static int __init dma_fence_init_stub(void)
125 {
126 dma_fence_init(&dma_fence_stub, &dma_fence_stub_ops,
127 &dma_fence_stub_lock, 0, 0);
128
129 set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
130 &dma_fence_stub.flags);
131
132 dma_fence_signal(&dma_fence_stub);
133 return 0;
134 }
135 subsys_initcall(dma_fence_init_stub);
136
137 /**
138 * dma_fence_get_stub - return a signaled fence
139 *
140 * Return a stub fence which is already signaled. The fence's timestamp
141 * corresponds to the initialisation time of the linux kernel.
142 */
dma_fence_get_stub(void)143 struct dma_fence *dma_fence_get_stub(void)
144 {
145 return dma_fence_get(&dma_fence_stub);
146 }
147 EXPORT_SYMBOL(dma_fence_get_stub);
148
149 /**
150 * dma_fence_allocate_private_stub - return a private, signaled fence
151 * @timestamp: timestamp when the fence was signaled
152 *
153 * Return a newly allocated and signaled stub fence.
154 */
dma_fence_allocate_private_stub(ktime_t timestamp)155 struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
156 {
157 struct dma_fence *fence;
158
159 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
160 if (fence == NULL)
161 return NULL;
162
163 dma_fence_init(fence,
164 &dma_fence_stub_ops,
165 &dma_fence_stub_lock,
166 0, 0);
167
168 set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
169 &fence->flags);
170
171 dma_fence_signal_timestamp(fence, timestamp);
172
173 return fence;
174 }
175 EXPORT_SYMBOL(dma_fence_allocate_private_stub);
176
177 /**
178 * dma_fence_context_alloc - allocate an array of fence contexts
179 * @num: amount of contexts to allocate
180 *
181 * This function will return the first index of the number of fence contexts
182 * allocated. The fence context is used for setting &dma_fence.context to a
183 * unique number by passing the context to dma_fence_init().
184 */
dma_fence_context_alloc(unsigned num)185 u64 dma_fence_context_alloc(unsigned num)
186 {
187 WARN_ON(!num);
188 return atomic64_fetch_add(num, &dma_fence_context_counter);
189 }
190 EXPORT_SYMBOL(dma_fence_context_alloc);
191
192 /**
193 * DOC: fence signalling annotation
194 *
195 * Proving correctness of all the kernel code around &dma_fence through code
196 * review and testing is tricky for a few reasons:
197 *
198 * * It is a cross-driver contract, and therefore all drivers must follow the
199 * same rules for lock nesting order, calling contexts for various functions
200 * and anything else significant for in-kernel interfaces. But it is also
201 * impossible to test all drivers in a single machine, hence brute-force N vs.
202 * N testing of all combinations is impossible. Even just limiting to the
203 * possible combinations is infeasible.
204 *
205 * * There is an enormous amount of driver code involved. For render drivers
206 * there's the tail of command submission, after fences are published,
207 * scheduler code, interrupt and workers to process job completion,
208 * and timeout, gpu reset and gpu hang recovery code. Plus for integration
209 * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
210 * and &shrinker. For modesetting drivers there's the commit tail functions
211 * between when fences for an atomic modeset are published, and when the
212 * corresponding vblank completes, including any interrupt processing and
213 * related workers. Auditing all that code, across all drivers, is not
214 * feasible.
215 *
216 * * Due to how many other subsystems are involved and the locking hierarchies
217 * this pulls in there is extremely thin wiggle-room for driver-specific
218 * differences. &dma_fence interacts with almost all of the core memory
219 * handling through page fault handlers via &dma_resv, dma_resv_lock() and
220 * dma_resv_unlock(). On the other side it also interacts through all
221 * allocation sites through &mmu_notifier and &shrinker.
222 *
223 * Furthermore lockdep does not handle cross-release dependencies, which means
224 * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
225 * at runtime with some quick testing. The simplest example is one thread
226 * waiting on a &dma_fence while holding a lock::
227 *
228 * lock(A);
229 * dma_fence_wait(B);
230 * unlock(A);
231 *
232 * while the other thread is stuck trying to acquire the same lock, which
233 * prevents it from signalling the fence the previous thread is stuck waiting
234 * on::
235 *
236 * lock(A);
237 * unlock(A);
238 * dma_fence_signal(B);
239 *
240 * By manually annotating all code relevant to signalling a &dma_fence we can
241 * teach lockdep about these dependencies, which also helps with the validation
242 * headache since now lockdep can check all the rules for us::
243 *
244 * cookie = dma_fence_begin_signalling();
245 * lock(A);
246 * unlock(A);
247 * dma_fence_signal(B);
248 * dma_fence_end_signalling(cookie);
249 *
250 * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
251 * annotate critical sections the following rules need to be observed:
252 *
253 * * All code necessary to complete a &dma_fence must be annotated, from the
254 * point where a fence is accessible to other threads, to the point where
255 * dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
256 * and due to the very strict rules and many corner cases it is infeasible to
257 * catch these just with review or normal stress testing.
258 *
259 * * &struct dma_resv deserves a special note, since the readers are only
260 * protected by rcu. This means the signalling critical section starts as soon
261 * as the new fences are installed, even before dma_resv_unlock() is called.
262 *
263 * * The only exception are fast paths and opportunistic signalling code, which
264 * calls dma_fence_signal() purely as an optimization, but is not required to
265 * guarantee completion of a &dma_fence. The usual example is a wait IOCTL
266 * which calls dma_fence_signal(), while the mandatory completion path goes
267 * through a hardware interrupt and possible job completion worker.
268 *
269 * * To aid composability of code, the annotations can be freely nested, as long
270 * as the overall locking hierarchy is consistent. The annotations also work
271 * both in interrupt and process context. Due to implementation details this
272 * requires that callers pass an opaque cookie from
273 * dma_fence_begin_signalling() to dma_fence_end_signalling().
274 *
275 * * Validation against the cross driver contract is implemented by priming
276 * lockdep with the relevant hierarchy at boot-up. This means even just
277 * testing with a single device is enough to validate a driver, at least as
278 * far as deadlocks with dma_fence_wait() against dma_fence_signal() are
279 * concerned.
280 */
281 #ifdef CONFIG_LOCKDEP
282 static struct lockdep_map dma_fence_lockdep_map = {
283 .name = "dma_fence_map"
284 };
285
286 /**
287 * dma_fence_begin_signalling - begin a critical DMA fence signalling section
288 *
289 * Drivers should use this to annotate the beginning of any code section
290 * required to eventually complete &dma_fence by calling dma_fence_signal().
291 *
292 * The end of these critical sections are annotated with
293 * dma_fence_end_signalling().
294 *
295 * Returns:
296 *
297 * Opaque cookie needed by the implementation, which needs to be passed to
298 * dma_fence_end_signalling().
299 */
dma_fence_begin_signalling(void)300 bool dma_fence_begin_signalling(void)
301 {
302 /* explicitly nesting ... */
303 if (lock_is_held_type(&dma_fence_lockdep_map, 1))
304 return true;
305
306 /* rely on might_sleep check for soft/hardirq locks */
307 if (in_atomic())
308 return true;
309
310 /* ... and non-recursive successful read_trylock */
311 lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _RET_IP_);
312
313 return false;
314 }
315 EXPORT_SYMBOL(dma_fence_begin_signalling);
316
317 /**
318 * dma_fence_end_signalling - end a critical DMA fence signalling section
319 * @cookie: opaque cookie from dma_fence_begin_signalling()
320 *
321 * Closes a critical section annotation opened by dma_fence_begin_signalling().
322 */
dma_fence_end_signalling(bool cookie)323 void dma_fence_end_signalling(bool cookie)
324 {
325 if (cookie)
326 return;
327
328 lock_release(&dma_fence_lockdep_map, _RET_IP_);
329 }
330 EXPORT_SYMBOL(dma_fence_end_signalling);
331
__dma_fence_might_wait(void)332 void __dma_fence_might_wait(void)
333 {
334 bool tmp;
335
336 tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
337 if (tmp)
338 lock_release(&dma_fence_lockdep_map, _THIS_IP_);
339 lock_map_acquire(&dma_fence_lockdep_map);
340 lock_map_release(&dma_fence_lockdep_map);
341 if (tmp)
342 lock_acquire(&dma_fence_lockdep_map, 0, 1, 1, 1, NULL, _THIS_IP_);
343 }
344 #endif
345
346
347 /**
348 * dma_fence_signal_timestamp_locked - signal completion of a fence
349 * @fence: the fence to signal
350 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
351 *
352 * Signal completion for software callbacks on a fence, this will unblock
353 * dma_fence_wait() calls and run all the callbacks added with
354 * dma_fence_add_callback(). Can be called multiple times, but since a fence
355 * can only go from the unsignaled to the signaled state and not back, it will
356 * only be effective the first time. Set the timestamp provided as the fence
357 * signal timestamp.
358 *
359 * Unlike dma_fence_signal_timestamp(), this function must be called with
360 * &dma_fence.lock held.
361 *
362 * Returns 0 on success and a negative error value when @fence has been
363 * signalled already.
364 */
dma_fence_signal_timestamp_locked(struct dma_fence * fence,ktime_t timestamp)365 int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
366 ktime_t timestamp)
367 {
368 struct dma_fence_cb *cur, *tmp;
369 struct list_head cb_list;
370
371 lockdep_assert_held(fence->lock);
372
373 if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
374 &fence->flags)))
375 return -EINVAL;
376
377 /* Stash the cb_list before replacing it with the timestamp */
378 list_replace(&fence->cb_list, &cb_list);
379
380 fence->timestamp = timestamp;
381 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
382 trace_dma_fence_signaled(fence);
383
384 list_for_each_entry_safe(cur, tmp, &cb_list, node) {
385 INIT_LIST_HEAD(&cur->node);
386 cur->func(fence, cur);
387 }
388
389 return 0;
390 }
391 EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
392
393 /**
394 * dma_fence_signal_timestamp - signal completion of a fence
395 * @fence: the fence to signal
396 * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
397 *
398 * Signal completion for software callbacks on a fence, this will unblock
399 * dma_fence_wait() calls and run all the callbacks added with
400 * dma_fence_add_callback(). Can be called multiple times, but since a fence
401 * can only go from the unsignaled to the signaled state and not back, it will
402 * only be effective the first time. Set the timestamp provided as the fence
403 * signal timestamp.
404 *
405 * Returns 0 on success and a negative error value when @fence has been
406 * signalled already.
407 */
dma_fence_signal_timestamp(struct dma_fence * fence,ktime_t timestamp)408 int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
409 {
410 unsigned long flags;
411 int ret;
412
413 if (WARN_ON(!fence))
414 return -EINVAL;
415
416 spin_lock_irqsave(fence->lock, flags);
417 ret = dma_fence_signal_timestamp_locked(fence, timestamp);
418 spin_unlock_irqrestore(fence->lock, flags);
419
420 return ret;
421 }
422 EXPORT_SYMBOL(dma_fence_signal_timestamp);
423
424 /**
425 * dma_fence_signal_locked - signal completion of a fence
426 * @fence: the fence to signal
427 *
428 * Signal completion for software callbacks on a fence, this will unblock
429 * dma_fence_wait() calls and run all the callbacks added with
430 * dma_fence_add_callback(). Can be called multiple times, but since a fence
431 * can only go from the unsignaled to the signaled state and not back, it will
432 * only be effective the first time.
433 *
434 * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
435 * held.
436 *
437 * Returns 0 on success and a negative error value when @fence has been
438 * signalled already.
439 */
dma_fence_signal_locked(struct dma_fence * fence)440 int dma_fence_signal_locked(struct dma_fence *fence)
441 {
442 return dma_fence_signal_timestamp_locked(fence, ktime_get());
443 }
444 EXPORT_SYMBOL(dma_fence_signal_locked);
445
446 /**
447 * dma_fence_signal - signal completion of a fence
448 * @fence: the fence to signal
449 *
450 * Signal completion for software callbacks on a fence, this will unblock
451 * dma_fence_wait() calls and run all the callbacks added with
452 * dma_fence_add_callback(). Can be called multiple times, but since a fence
453 * can only go from the unsignaled to the signaled state and not back, it will
454 * only be effective the first time.
455 *
456 * Returns 0 on success and a negative error value when @fence has been
457 * signalled already.
458 */
dma_fence_signal(struct dma_fence * fence)459 int dma_fence_signal(struct dma_fence *fence)
460 {
461 unsigned long flags;
462 int ret;
463 bool tmp;
464
465 if (WARN_ON(!fence))
466 return -EINVAL;
467
468 tmp = dma_fence_begin_signalling();
469
470 spin_lock_irqsave(fence->lock, flags);
471 ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
472 spin_unlock_irqrestore(fence->lock, flags);
473
474 dma_fence_end_signalling(tmp);
475
476 return ret;
477 }
478 EXPORT_SYMBOL(dma_fence_signal);
479
480 /**
481 * dma_fence_wait_timeout - sleep until the fence gets signaled
482 * or until timeout elapses
483 * @fence: the fence to wait on
484 * @intr: if true, do an interruptible wait
485 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
486 *
487 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
488 * remaining timeout in jiffies on success. Other error values may be
489 * returned on custom implementations.
490 *
491 * Performs a synchronous wait on this fence. It is assumed the caller
492 * directly or indirectly (buf-mgr between reservation and committing)
493 * holds a reference to the fence, otherwise the fence might be
494 * freed before return, resulting in undefined behavior.
495 *
496 * See also dma_fence_wait() and dma_fence_wait_any_timeout().
497 */
498 signed long
dma_fence_wait_timeout(struct dma_fence * fence,bool intr,signed long timeout)499 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
500 {
501 signed long ret;
502
503 if (WARN_ON(timeout < 0))
504 return -EINVAL;
505
506 might_sleep();
507
508 __dma_fence_might_wait();
509
510 dma_fence_enable_sw_signaling(fence);
511
512 if (trace_dma_fence_wait_start_enabled()) {
513 rcu_read_lock();
514 trace_dma_fence_wait_start(fence);
515 rcu_read_unlock();
516 }
517 if (fence->ops->wait)
518 ret = fence->ops->wait(fence, intr, timeout);
519 else
520 ret = dma_fence_default_wait(fence, intr, timeout);
521 if (trace_dma_fence_wait_end_enabled()) {
522 rcu_read_lock();
523 trace_dma_fence_wait_end(fence);
524 rcu_read_unlock();
525 }
526 return ret;
527 }
528 EXPORT_SYMBOL(dma_fence_wait_timeout);
529
530 /**
531 * dma_fence_release - default release function for fences
532 * @kref: &dma_fence.recfount
533 *
534 * This is the default release functions for &dma_fence. Drivers shouldn't call
535 * this directly, but instead call dma_fence_put().
536 */
dma_fence_release(struct kref * kref)537 void dma_fence_release(struct kref *kref)
538 {
539 struct dma_fence *fence =
540 container_of(kref, struct dma_fence, refcount);
541
542 rcu_read_lock();
543 trace_dma_fence_destroy(fence);
544
545 if (!list_empty(&fence->cb_list) &&
546 !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
547 const char __rcu *timeline;
548 const char __rcu *driver;
549 unsigned long flags;
550
551 driver = dma_fence_driver_name(fence);
552 timeline = dma_fence_timeline_name(fence);
553
554 WARN(1,
555 "Fence %s:%s:%llx:%llx released with pending signals!\n",
556 rcu_dereference(driver), rcu_dereference(timeline),
557 fence->context, fence->seqno);
558
559 /*
560 * Failed to signal before release, likely a refcounting issue.
561 *
562 * This should never happen, but if it does make sure that we
563 * don't leave chains dangling. We set the error flag first
564 * so that the callbacks know this signal is due to an error.
565 */
566 spin_lock_irqsave(fence->lock, flags);
567 fence->error = -EDEADLK;
568 dma_fence_signal_locked(fence);
569 spin_unlock_irqrestore(fence->lock, flags);
570 }
571
572 rcu_read_unlock();
573
574 if (fence->ops->release)
575 fence->ops->release(fence);
576 else
577 dma_fence_free(fence);
578 }
579 EXPORT_SYMBOL(dma_fence_release);
580
581 /**
582 * dma_fence_free - default release function for &dma_fence.
583 * @fence: fence to release
584 *
585 * This is the default implementation for &dma_fence_ops.release. It calls
586 * kfree_rcu() on @fence.
587 */
dma_fence_free(struct dma_fence * fence)588 void dma_fence_free(struct dma_fence *fence)
589 {
590 kfree_rcu(fence, rcu);
591 }
592 EXPORT_SYMBOL(dma_fence_free);
593
__dma_fence_enable_signaling(struct dma_fence * fence)594 static bool __dma_fence_enable_signaling(struct dma_fence *fence)
595 {
596 bool was_set;
597
598 lockdep_assert_held(fence->lock);
599
600 was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
601 &fence->flags);
602
603 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
604 return false;
605
606 if (!was_set && fence->ops->enable_signaling) {
607 trace_dma_fence_enable_signal(fence);
608
609 if (!fence->ops->enable_signaling(fence)) {
610 dma_fence_signal_locked(fence);
611 return false;
612 }
613 }
614
615 return true;
616 }
617
618 /**
619 * dma_fence_enable_sw_signaling - enable signaling on fence
620 * @fence: the fence to enable
621 *
622 * This will request for sw signaling to be enabled, to make the fence
623 * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
624 * internally.
625 */
dma_fence_enable_sw_signaling(struct dma_fence * fence)626 void dma_fence_enable_sw_signaling(struct dma_fence *fence)
627 {
628 unsigned long flags;
629
630 spin_lock_irqsave(fence->lock, flags);
631 __dma_fence_enable_signaling(fence);
632 spin_unlock_irqrestore(fence->lock, flags);
633 }
634 EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
635
636 /**
637 * dma_fence_add_callback - add a callback to be called when the fence
638 * is signaled
639 * @fence: the fence to wait on
640 * @cb: the callback to register
641 * @func: the function to call
642 *
643 * Add a software callback to the fence. The caller should keep a reference to
644 * the fence.
645 *
646 * @cb will be initialized by dma_fence_add_callback(), no initialization
647 * by the caller is required. Any number of callbacks can be registered
648 * to a fence, but a callback can only be registered to one fence at a time.
649 *
650 * If fence is already signaled, this function will return -ENOENT (and
651 * *not* call the callback).
652 *
653 * Note that the callback can be called from an atomic context or irq context.
654 *
655 * Returns 0 in case of success, -ENOENT if the fence is already signaled
656 * and -EINVAL in case of error.
657 */
dma_fence_add_callback(struct dma_fence * fence,struct dma_fence_cb * cb,dma_fence_func_t func)658 int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
659 dma_fence_func_t func)
660 {
661 unsigned long flags;
662 int ret = 0;
663
664 if (WARN_ON(!fence || !func))
665 return -EINVAL;
666
667 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
668 INIT_LIST_HEAD(&cb->node);
669 return -ENOENT;
670 }
671
672 spin_lock_irqsave(fence->lock, flags);
673
674 if (__dma_fence_enable_signaling(fence)) {
675 cb->func = func;
676 list_add_tail(&cb->node, &fence->cb_list);
677 } else {
678 INIT_LIST_HEAD(&cb->node);
679 ret = -ENOENT;
680 }
681
682 spin_unlock_irqrestore(fence->lock, flags);
683
684 return ret;
685 }
686 EXPORT_SYMBOL(dma_fence_add_callback);
687
688 /**
689 * dma_fence_get_status - returns the status upon completion
690 * @fence: the dma_fence to query
691 *
692 * This wraps dma_fence_get_status_locked() to return the error status
693 * condition on a signaled fence. See dma_fence_get_status_locked() for more
694 * details.
695 *
696 * Returns 0 if the fence has not yet been signaled, 1 if the fence has
697 * been signaled without an error condition, or a negative error code
698 * if the fence has been completed in err.
699 */
dma_fence_get_status(struct dma_fence * fence)700 int dma_fence_get_status(struct dma_fence *fence)
701 {
702 unsigned long flags;
703 int status;
704
705 spin_lock_irqsave(fence->lock, flags);
706 status = dma_fence_get_status_locked(fence);
707 spin_unlock_irqrestore(fence->lock, flags);
708
709 return status;
710 }
711 EXPORT_SYMBOL(dma_fence_get_status);
712
713 /**
714 * dma_fence_remove_callback - remove a callback from the signaling list
715 * @fence: the fence to wait on
716 * @cb: the callback to remove
717 *
718 * Remove a previously queued callback from the fence. This function returns
719 * true if the callback is successfully removed, or false if the fence has
720 * already been signaled.
721 *
722 * *WARNING*:
723 * Cancelling a callback should only be done if you really know what you're
724 * doing, since deadlocks and race conditions could occur all too easily. For
725 * this reason, it should only ever be done on hardware lockup recovery,
726 * with a reference held to the fence.
727 *
728 * Behaviour is undefined if @cb has not been added to @fence using
729 * dma_fence_add_callback() beforehand.
730 */
731 bool
dma_fence_remove_callback(struct dma_fence * fence,struct dma_fence_cb * cb)732 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
733 {
734 unsigned long flags;
735 bool ret;
736
737 spin_lock_irqsave(fence->lock, flags);
738
739 ret = !list_empty(&cb->node);
740 if (ret)
741 list_del_init(&cb->node);
742
743 spin_unlock_irqrestore(fence->lock, flags);
744
745 return ret;
746 }
747 EXPORT_SYMBOL(dma_fence_remove_callback);
748
749 struct default_wait_cb {
750 struct dma_fence_cb base;
751 struct task_struct *task;
752 };
753
754 static void
dma_fence_default_wait_cb(struct dma_fence * fence,struct dma_fence_cb * cb)755 dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
756 {
757 struct default_wait_cb *wait =
758 container_of(cb, struct default_wait_cb, base);
759
760 wake_up_state(wait->task, TASK_NORMAL);
761 }
762
763 /**
764 * dma_fence_default_wait - default sleep until the fence gets signaled
765 * or until timeout elapses
766 * @fence: the fence to wait on
767 * @intr: if true, do an interruptible wait
768 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
769 *
770 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
771 * remaining timeout in jiffies on success. If timeout is zero the value one is
772 * returned if the fence is already signaled for consistency with other
773 * functions taking a jiffies timeout.
774 */
775 signed long
dma_fence_default_wait(struct dma_fence * fence,bool intr,signed long timeout)776 dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
777 {
778 struct default_wait_cb cb;
779 unsigned long flags;
780 signed long ret = timeout ? timeout : 1;
781
782 spin_lock_irqsave(fence->lock, flags);
783
784 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
785 goto out;
786
787 if (intr && signal_pending(current)) {
788 ret = -ERESTARTSYS;
789 goto out;
790 }
791
792 if (!timeout) {
793 ret = 0;
794 goto out;
795 }
796
797 cb.base.func = dma_fence_default_wait_cb;
798 cb.task = current;
799 list_add(&cb.base.node, &fence->cb_list);
800
801 while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
802 if (intr)
803 __set_current_state(TASK_INTERRUPTIBLE);
804 else
805 __set_current_state(TASK_UNINTERRUPTIBLE);
806 spin_unlock_irqrestore(fence->lock, flags);
807
808 ret = schedule_timeout(ret);
809
810 spin_lock_irqsave(fence->lock, flags);
811 if (ret > 0 && intr && signal_pending(current))
812 ret = -ERESTARTSYS;
813 }
814
815 if (!list_empty(&cb.base.node))
816 list_del(&cb.base.node);
817 __set_current_state(TASK_RUNNING);
818
819 out:
820 spin_unlock_irqrestore(fence->lock, flags);
821 return ret;
822 }
823 EXPORT_SYMBOL(dma_fence_default_wait);
824
825 static bool
dma_fence_test_signaled_any(struct dma_fence ** fences,uint32_t count,uint32_t * idx)826 dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
827 uint32_t *idx)
828 {
829 int i;
830
831 for (i = 0; i < count; ++i) {
832 struct dma_fence *fence = fences[i];
833 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
834 if (idx)
835 *idx = i;
836 return true;
837 }
838 }
839 return false;
840 }
841
842 /**
843 * dma_fence_wait_any_timeout - sleep until any fence gets signaled
844 * or until timeout elapses
845 * @fences: array of fences to wait on
846 * @count: number of fences to wait on
847 * @intr: if true, do an interruptible wait
848 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
849 * @idx: used to store the first signaled fence index, meaningful only on
850 * positive return
851 *
852 * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
853 * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
854 * on success.
855 *
856 * Synchronous waits for the first fence in the array to be signaled. The
857 * caller needs to hold a reference to all fences in the array, otherwise a
858 * fence might be freed before return, resulting in undefined behavior.
859 *
860 * See also dma_fence_wait() and dma_fence_wait_timeout().
861 */
862 signed long
dma_fence_wait_any_timeout(struct dma_fence ** fences,uint32_t count,bool intr,signed long timeout,uint32_t * idx)863 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
864 bool intr, signed long timeout, uint32_t *idx)
865 {
866 struct default_wait_cb *cb;
867 signed long ret = timeout;
868 unsigned i;
869
870 if (WARN_ON(!fences || !count || timeout < 0))
871 return -EINVAL;
872
873 if (timeout == 0) {
874 for (i = 0; i < count; ++i)
875 if (dma_fence_is_signaled(fences[i])) {
876 if (idx)
877 *idx = i;
878 return 1;
879 }
880
881 return 0;
882 }
883
884 cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
885 if (cb == NULL) {
886 ret = -ENOMEM;
887 goto err_free_cb;
888 }
889
890 for (i = 0; i < count; ++i) {
891 struct dma_fence *fence = fences[i];
892
893 cb[i].task = current;
894 if (dma_fence_add_callback(fence, &cb[i].base,
895 dma_fence_default_wait_cb)) {
896 /* This fence is already signaled */
897 if (idx)
898 *idx = i;
899 goto fence_rm_cb;
900 }
901 }
902
903 while (ret > 0) {
904 if (intr)
905 set_current_state(TASK_INTERRUPTIBLE);
906 else
907 set_current_state(TASK_UNINTERRUPTIBLE);
908
909 if (dma_fence_test_signaled_any(fences, count, idx))
910 break;
911
912 ret = schedule_timeout(ret);
913
914 if (ret > 0 && intr && signal_pending(current))
915 ret = -ERESTARTSYS;
916 }
917
918 __set_current_state(TASK_RUNNING);
919
920 fence_rm_cb:
921 while (i-- > 0)
922 dma_fence_remove_callback(fences[i], &cb[i].base);
923
924 err_free_cb:
925 kfree(cb);
926
927 return ret;
928 }
929 EXPORT_SYMBOL(dma_fence_wait_any_timeout);
930
931 /**
932 * DOC: deadline hints
933 *
934 * In an ideal world, it would be possible to pipeline a workload sufficiently
935 * that a utilization based device frequency governor could arrive at a minimum
936 * frequency that meets the requirements of the use-case, in order to minimize
937 * power consumption. But in the real world there are many workloads which
938 * defy this ideal. For example, but not limited to:
939 *
940 * * Workloads that ping-pong between device and CPU, with alternating periods
941 * of CPU waiting for device, and device waiting on CPU. This can result in
942 * devfreq and cpufreq seeing idle time in their respective domains and in
943 * result reduce frequency.
944 *
945 * * Workloads that interact with a periodic time based deadline, such as double
946 * buffered GPU rendering vs vblank sync'd page flipping. In this scenario,
947 * missing a vblank deadline results in an *increase* in idle time on the GPU
948 * (since it has to wait an additional vblank period), sending a signal to
949 * the GPU's devfreq to reduce frequency, when in fact the opposite is what is
950 * needed.
951 *
952 * To this end, deadline hint(s) can be set on a &dma_fence via &dma_fence_set_deadline
953 * (or indirectly via userspace facing ioctls like &sync_set_deadline).
954 * The deadline hint provides a way for the waiting driver, or userspace, to
955 * convey an appropriate sense of urgency to the signaling driver.
956 *
957 * A deadline hint is given in absolute ktime (CLOCK_MONOTONIC for userspace
958 * facing APIs). The time could either be some point in the future (such as
959 * the vblank based deadline for page-flipping, or the start of a compositor's
960 * composition cycle), or the current time to indicate an immediate deadline
961 * hint (Ie. forward progress cannot be made until this fence is signaled).
962 *
963 * Multiple deadlines may be set on a given fence, even in parallel. See the
964 * documentation for &dma_fence_ops.set_deadline.
965 *
966 * The deadline hint is just that, a hint. The driver that created the fence
967 * may react by increasing frequency, making different scheduling choices, etc.
968 * Or doing nothing at all.
969 */
970
971 /**
972 * dma_fence_set_deadline - set desired fence-wait deadline hint
973 * @fence: the fence that is to be waited on
974 * @deadline: the time by which the waiter hopes for the fence to be
975 * signaled
976 *
977 * Give the fence signaler a hint about an upcoming deadline, such as
978 * vblank, by which point the waiter would prefer the fence to be
979 * signaled by. This is intended to give feedback to the fence signaler
980 * to aid in power management decisions, such as boosting GPU frequency
981 * if a periodic vblank deadline is approaching but the fence is not
982 * yet signaled..
983 */
dma_fence_set_deadline(struct dma_fence * fence,ktime_t deadline)984 void dma_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
985 {
986 if (fence->ops->set_deadline && !dma_fence_is_signaled(fence))
987 fence->ops->set_deadline(fence, deadline);
988 }
989 EXPORT_SYMBOL(dma_fence_set_deadline);
990
991 /**
992 * dma_fence_describe - Dump fence description into seq_file
993 * @fence: the fence to describe
994 * @seq: the seq_file to put the textual description into
995 *
996 * Dump a textual description of the fence and it's state into the seq_file.
997 */
dma_fence_describe(struct dma_fence * fence,struct seq_file * seq)998 void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
999 {
1000 const char __rcu *timeline = "";
1001 const char __rcu *driver = "";
1002 const char *signaled = "";
1003
1004 rcu_read_lock();
1005
1006 if (!dma_fence_is_signaled(fence)) {
1007 timeline = dma_fence_timeline_name(fence);
1008 driver = dma_fence_driver_name(fence);
1009 signaled = "un";
1010 }
1011
1012 seq_printf(seq, "%llu:%llu %s %s %ssignalled\n",
1013 fence->context, fence->seqno, timeline, driver,
1014 signaled);
1015
1016 rcu_read_unlock();
1017 }
1018 EXPORT_SYMBOL(dma_fence_describe);
1019
1020 static void
__dma_fence_init(struct dma_fence * fence,const struct dma_fence_ops * ops,spinlock_t * lock,u64 context,u64 seqno,unsigned long flags)1021 __dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1022 spinlock_t *lock, u64 context, u64 seqno, unsigned long flags)
1023 {
1024 BUG_ON(!lock);
1025 BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
1026
1027 kref_init(&fence->refcount);
1028 fence->ops = ops;
1029 INIT_LIST_HEAD(&fence->cb_list);
1030 fence->lock = lock;
1031 fence->context = context;
1032 fence->seqno = seqno;
1033 fence->flags = flags;
1034 fence->error = 0;
1035
1036 trace_dma_fence_init(fence);
1037 }
1038
1039 /**
1040 * dma_fence_init - Initialize a custom fence.
1041 * @fence: the fence to initialize
1042 * @ops: the dma_fence_ops for operations on this fence
1043 * @lock: the irqsafe spinlock to use for locking this fence
1044 * @context: the execution context this fence is run on
1045 * @seqno: a linear increasing sequence number for this context
1046 *
1047 * Initializes an allocated fence, the caller doesn't have to keep its
1048 * refcount after committing with this fence, but it will need to hold a
1049 * refcount again if &dma_fence_ops.enable_signaling gets called.
1050 *
1051 * context and seqno are used for easy comparison between fences, allowing
1052 * to check which fence is later by simply using dma_fence_later().
1053 */
1054 void
dma_fence_init(struct dma_fence * fence,const struct dma_fence_ops * ops,spinlock_t * lock,u64 context,u64 seqno)1055 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
1056 spinlock_t *lock, u64 context, u64 seqno)
1057 {
1058 __dma_fence_init(fence, ops, lock, context, seqno, 0UL);
1059 }
1060 EXPORT_SYMBOL(dma_fence_init);
1061
1062 /**
1063 * dma_fence_init64 - Initialize a custom fence with 64-bit seqno support.
1064 * @fence: the fence to initialize
1065 * @ops: the dma_fence_ops for operations on this fence
1066 * @lock: the irqsafe spinlock to use for locking this fence
1067 * @context: the execution context this fence is run on
1068 * @seqno: a linear increasing sequence number for this context
1069 *
1070 * Initializes an allocated fence, the caller doesn't have to keep its
1071 * refcount after committing with this fence, but it will need to hold a
1072 * refcount again if &dma_fence_ops.enable_signaling gets called.
1073 *
1074 * Context and seqno are used for easy comparison between fences, allowing
1075 * to check which fence is later by simply using dma_fence_later().
1076 */
1077 void
dma_fence_init64(struct dma_fence * fence,const struct dma_fence_ops * ops,spinlock_t * lock,u64 context,u64 seqno)1078 dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
1079 spinlock_t *lock, u64 context, u64 seqno)
1080 {
1081 __dma_fence_init(fence, ops, lock, context, seqno,
1082 BIT(DMA_FENCE_FLAG_SEQNO64_BIT));
1083 }
1084 EXPORT_SYMBOL(dma_fence_init64);
1085
1086 /**
1087 * dma_fence_driver_name - Access the driver name
1088 * @fence: the fence to query
1089 *
1090 * Returns a driver name backing the dma-fence implementation.
1091 *
1092 * IMPORTANT CONSIDERATION:
1093 * Dma-fence contract stipulates that access to driver provided data (data not
1094 * directly embedded into the object itself), such as the &dma_fence.lock and
1095 * memory potentially accessed by the &dma_fence.ops functions, is forbidden
1096 * after the fence has been signalled. Drivers are allowed to free that data,
1097 * and some do.
1098 *
1099 * To allow safe access drivers are mandated to guarantee a RCU grace period
1100 * between signalling the fence and freeing said data.
1101 *
1102 * As such access to the driver name is only valid inside a RCU locked section.
1103 * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
1104 * by the &rcu_read_lock and &rcu_read_unlock pair.
1105 */
dma_fence_driver_name(struct dma_fence * fence)1106 const char __rcu *dma_fence_driver_name(struct dma_fence *fence)
1107 {
1108 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1109 "RCU protection is required for safe access to returned string");
1110
1111 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1112 return fence->ops->get_driver_name(fence);
1113 else
1114 return "detached-driver";
1115 }
1116 EXPORT_SYMBOL(dma_fence_driver_name);
1117
1118 /**
1119 * dma_fence_timeline_name - Access the timeline name
1120 * @fence: the fence to query
1121 *
1122 * Returns a timeline name provided by the dma-fence implementation.
1123 *
1124 * IMPORTANT CONSIDERATION:
1125 * Dma-fence contract stipulates that access to driver provided data (data not
1126 * directly embedded into the object itself), such as the &dma_fence.lock and
1127 * memory potentially accessed by the &dma_fence.ops functions, is forbidden
1128 * after the fence has been signalled. Drivers are allowed to free that data,
1129 * and some do.
1130 *
1131 * To allow safe access drivers are mandated to guarantee a RCU grace period
1132 * between signalling the fence and freeing said data.
1133 *
1134 * As such access to the driver name is only valid inside a RCU locked section.
1135 * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
1136 * by the &rcu_read_lock and &rcu_read_unlock pair.
1137 */
dma_fence_timeline_name(struct dma_fence * fence)1138 const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
1139 {
1140 RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
1141 "RCU protection is required for safe access to returned string");
1142
1143 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1144 return fence->ops->get_timeline_name(fence);
1145 else
1146 return "signaled-timeline";
1147 }
1148 EXPORT_SYMBOL(dma_fence_timeline_name);
1149