xref: /linux/include/linux/dma-resv.h (revision 0ad53fe3ae82443c74ff8cfd7bd13377cc1134a3)
1 /*
2  * Header file for reservations for dma-buf and ttm
3  *
4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
5  * Copyright (C) 2012-2013 Canonical Ltd
6  * Copyright (C) 2012 Texas Instruments
7  *
8  * Authors:
9  * Rob Clark <robdclark@gmail.com>
10  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11  * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
12  *
13  * Based on bo.c which bears the following copyright notice,
14  * but is dual licensed:
15  *
16  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
17  * All Rights Reserved.
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a
20  * copy of this software and associated documentation files (the
21  * "Software"), to deal in the Software without restriction, including
22  * without limitation the rights to use, copy, modify, merge, publish,
23  * distribute, sub license, and/or sell copies of the Software, and to
24  * permit persons to whom the Software is furnished to do so, subject to
25  * the following conditions:
26  *
27  * The above copyright notice and this permission notice (including the
28  * next paragraph) shall be included in all copies or substantial portions
29  * of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37  * USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 #ifndef _LINUX_RESERVATION_H
40 #define _LINUX_RESERVATION_H
41 
42 #include <linux/ww_mutex.h>
43 #include <linux/dma-fence.h>
44 #include <linux/slab.h>
45 #include <linux/seqlock.h>
46 #include <linux/rcupdate.h>
47 
48 extern struct ww_class reservation_ww_class;
49 
50 /**
51  * struct dma_resv_list - a list of shared fences
52  * @rcu: for internal use
53  * @shared_count: table of shared fences
54  * @shared_max: for growing shared fence table
55  * @shared: shared fence table
56  */
57 struct dma_resv_list {
58 	struct rcu_head rcu;
59 	u32 shared_count, shared_max;
60 	struct dma_fence __rcu *shared[];
61 };
62 
63 /**
64  * struct dma_resv - a reservation object manages fences for a buffer
65  *
66  * There are multiple uses for this, with sometimes slightly different rules in
67  * how the fence slots are used.
68  *
69  * One use is to synchronize cross-driver access to a struct dma_buf, either for
70  * dynamic buffer management or just to handle implicit synchronization between
71  * different users of the buffer in userspace. See &dma_buf.resv for a more
72  * in-depth discussion.
73  *
74  * The other major use is to manage access and locking within a driver in a
75  * buffer based memory manager. struct ttm_buffer_object is the canonical
76  * example here, since this is where reservation objects originated from. But
77  * use in drivers is spreading and some drivers also manage struct
78  * drm_gem_object with the same scheme.
79  */
80 struct dma_resv {
81 	/**
82 	 * @lock:
83 	 *
84 	 * Update side lock. Don't use directly, instead use the wrapper
85 	 * functions like dma_resv_lock() and dma_resv_unlock().
86 	 *
87 	 * Drivers which use the reservation object to manage memory dynamically
88 	 * also use this lock to protect buffer object state like placement,
89 	 * allocation policies or throughout command submission.
90 	 */
91 	struct ww_mutex lock;
92 
93 	/**
94 	 * @seq:
95 	 *
96 	 * Sequence count for managing RCU read-side synchronization, allows
97 	 * read-only access to @fence_excl and @fence while ensuring we take a
98 	 * consistent snapshot.
99 	 */
100 	seqcount_ww_mutex_t seq;
101 
102 	/**
103 	 * @fence_excl:
104 	 *
105 	 * The exclusive fence, if there is one currently.
106 	 *
107 	 * There are two ways to update this fence:
108 	 *
109 	 * - First by calling dma_resv_add_excl_fence(), which replaces all
110 	 *   fences attached to the reservation object. To guarantee that no
111 	 *   fences are lost, this new fence must signal only after all previous
112 	 *   fences, both shared and exclusive, have signalled. In some cases it
113 	 *   is convenient to achieve that by attaching a struct dma_fence_array
114 	 *   with all the new and old fences.
115 	 *
116 	 * - Alternatively the fence can be set directly, which leaves the
117 	 *   shared fences unchanged. To guarantee that no fences are lost, this
118 	 *   new fence must signal only after the previous exclusive fence has
119 	 *   signalled. Since the shared fences are staying intact, it is not
120 	 *   necessary to maintain any ordering against those. If semantically
121 	 *   only a new access is added without actually treating the previous
122 	 *   one as a dependency the exclusive fences can be strung together
123 	 *   using struct dma_fence_chain.
124 	 *
125 	 * Note that actual semantics of what an exclusive or shared fence mean
126 	 * is defined by the user, for reservation objects shared across drivers
127 	 * see &dma_buf.resv.
128 	 */
129 	struct dma_fence __rcu *fence_excl;
130 
131 	/**
132 	 * @fence:
133 	 *
134 	 * List of current shared fences.
135 	 *
136 	 * There are no ordering constraints of shared fences against the
137 	 * exclusive fence slot. If a waiter needs to wait for all access, it
138 	 * has to wait for both sets of fences to signal.
139 	 *
140 	 * A new fence is added by calling dma_resv_add_shared_fence(). Since
141 	 * this often needs to be done past the point of no return in command
142 	 * submission it cannot fail, and therefore sufficient slots need to be
143 	 * reserved by calling dma_resv_reserve_shared().
144 	 *
145 	 * Note that actual semantics of what an exclusive or shared fence mean
146 	 * is defined by the user, for reservation objects shared across drivers
147 	 * see &dma_buf.resv.
148 	 */
149 	struct dma_resv_list __rcu *fence;
150 };
151 
152 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
153 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
154 
155 #ifdef CONFIG_DEBUG_MUTEXES
156 void dma_resv_reset_shared_max(struct dma_resv *obj);
157 #else
158 static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
159 #endif
160 
161 /**
162  * dma_resv_lock - lock the reservation object
163  * @obj: the reservation object
164  * @ctx: the locking context
165  *
166  * Locks the reservation object for exclusive access and modification. Note,
167  * that the lock is only against other writers, readers will run concurrently
168  * with a writer under RCU. The seqlock is used to notify readers if they
169  * overlap with a writer.
170  *
171  * As the reservation object may be locked by multiple parties in an
172  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
173  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
174  * object may be locked by itself by passing NULL as @ctx.
175  *
176  * When a die situation is indicated by returning -EDEADLK all locks held by
177  * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
178  *
179  * Unlocked by calling dma_resv_unlock().
180  *
181  * See also dma_resv_lock_interruptible() for the interruptible variant.
182  */
183 static inline int dma_resv_lock(struct dma_resv *obj,
184 				struct ww_acquire_ctx *ctx)
185 {
186 	return ww_mutex_lock(&obj->lock, ctx);
187 }
188 
189 /**
190  * dma_resv_lock_interruptible - lock the reservation object
191  * @obj: the reservation object
192  * @ctx: the locking context
193  *
194  * Locks the reservation object interruptible for exclusive access and
195  * modification. Note, that the lock is only against other writers, readers
196  * will run concurrently with a writer under RCU. The seqlock is used to
197  * notify readers if they overlap with a writer.
198  *
199  * As the reservation object may be locked by multiple parties in an
200  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
201  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
202  * object may be locked by itself by passing NULL as @ctx.
203  *
204  * When a die situation is indicated by returning -EDEADLK all locks held by
205  * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
206  * @obj.
207  *
208  * Unlocked by calling dma_resv_unlock().
209  */
210 static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
211 					      struct ww_acquire_ctx *ctx)
212 {
213 	return ww_mutex_lock_interruptible(&obj->lock, ctx);
214 }
215 
216 /**
217  * dma_resv_lock_slow - slowpath lock the reservation object
218  * @obj: the reservation object
219  * @ctx: the locking context
220  *
221  * Acquires the reservation object after a die case. This function
222  * will sleep until the lock becomes available. See dma_resv_lock() as
223  * well.
224  *
225  * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
226  */
227 static inline void dma_resv_lock_slow(struct dma_resv *obj,
228 				      struct ww_acquire_ctx *ctx)
229 {
230 	ww_mutex_lock_slow(&obj->lock, ctx);
231 }
232 
233 /**
234  * dma_resv_lock_slow_interruptible - slowpath lock the reservation
235  * object, interruptible
236  * @obj: the reservation object
237  * @ctx: the locking context
238  *
239  * Acquires the reservation object interruptible after a die case. This function
240  * will sleep until the lock becomes available. See
241  * dma_resv_lock_interruptible() as well.
242  */
243 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
244 						   struct ww_acquire_ctx *ctx)
245 {
246 	return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
247 }
248 
249 /**
250  * dma_resv_trylock - trylock the reservation object
251  * @obj: the reservation object
252  *
253  * Tries to lock the reservation object for exclusive access and modification.
254  * Note, that the lock is only against other writers, readers will run
255  * concurrently with a writer under RCU. The seqlock is used to notify readers
256  * if they overlap with a writer.
257  *
258  * Also note that since no context is provided, no deadlock protection is
259  * possible, which is also not needed for a trylock.
260  *
261  * Returns true if the lock was acquired, false otherwise.
262  */
263 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
264 {
265 	return ww_mutex_trylock(&obj->lock);
266 }
267 
268 /**
269  * dma_resv_is_locked - is the reservation object locked
270  * @obj: the reservation object
271  *
272  * Returns true if the mutex is locked, false if unlocked.
273  */
274 static inline bool dma_resv_is_locked(struct dma_resv *obj)
275 {
276 	return ww_mutex_is_locked(&obj->lock);
277 }
278 
279 /**
280  * dma_resv_locking_ctx - returns the context used to lock the object
281  * @obj: the reservation object
282  *
283  * Returns the context used to lock a reservation object or NULL if no context
284  * was used or the object is not locked at all.
285  *
286  * WARNING: This interface is pretty horrible, but TTM needs it because it
287  * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
288  * Everyone else just uses it to check whether they're holding a reservation or
289  * not.
290  */
291 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
292 {
293 	return READ_ONCE(obj->lock.ctx);
294 }
295 
296 /**
297  * dma_resv_unlock - unlock the reservation object
298  * @obj: the reservation object
299  *
300  * Unlocks the reservation object following exclusive access.
301  */
302 static inline void dma_resv_unlock(struct dma_resv *obj)
303 {
304 	dma_resv_reset_shared_max(obj);
305 	ww_mutex_unlock(&obj->lock);
306 }
307 
308 /**
309  * dma_resv_excl_fence - return the object's exclusive fence
310  * @obj: the reservation object
311  *
312  * Returns the exclusive fence (if any). Caller must either hold the objects
313  * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
314  * or one of the variants of each
315  *
316  * RETURNS
317  * The exclusive fence or NULL
318  */
319 static inline struct dma_fence *
320 dma_resv_excl_fence(struct dma_resv *obj)
321 {
322 	return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
323 }
324 
325 /**
326  * dma_resv_get_excl_unlocked - get the reservation object's
327  * exclusive fence, without lock held.
328  * @obj: the reservation object
329  *
330  * If there is an exclusive fence, this atomically increments it's
331  * reference count and returns it.
332  *
333  * RETURNS
334  * The exclusive fence or NULL if none
335  */
336 static inline struct dma_fence *
337 dma_resv_get_excl_unlocked(struct dma_resv *obj)
338 {
339 	struct dma_fence *fence;
340 
341 	if (!rcu_access_pointer(obj->fence_excl))
342 		return NULL;
343 
344 	rcu_read_lock();
345 	fence = dma_fence_get_rcu_safe(&obj->fence_excl);
346 	rcu_read_unlock();
347 
348 	return fence;
349 }
350 
351 /**
352  * dma_resv_shared_list - get the reservation object's shared fence list
353  * @obj: the reservation object
354  *
355  * Returns the shared fence list. Caller must either hold the objects
356  * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(),
357  * or one of the variants of each
358  */
359 static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
360 {
361 	return rcu_dereference_check(obj->fence, dma_resv_held(obj));
362 }
363 
364 void dma_resv_init(struct dma_resv *obj);
365 void dma_resv_fini(struct dma_resv *obj);
366 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
367 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
368 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
369 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
370 			unsigned *pshared_count, struct dma_fence ***pshared);
371 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
372 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
373 			   unsigned long timeout);
374 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all);
375 
376 #endif /* _LINUX_RESERVATION_H */
377