1 /* 2 * Header file for reservations for dma-buf and ttm 3 * 4 * Copyright(C) 2011 Linaro Limited. All rights reserved. 5 * Copyright (C) 2012-2013 Canonical Ltd 6 * Copyright (C) 2012 Texas Instruments 7 * 8 * Authors: 9 * Rob Clark <robdclark@gmail.com> 10 * Maarten Lankhorst <maarten.lankhorst@canonical.com> 11 * Thomas Hellstrom <thellstrom-at-vmware-dot-com> 12 * 13 * Based on bo.c which bears the following copyright notice, 14 * but is dual licensed: 15 * 16 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 17 * All Rights Reserved. 18 * 19 * Permission is hereby granted, free of charge, to any person obtaining a 20 * copy of this software and associated documentation files (the 21 * "Software"), to deal in the Software without restriction, including 22 * without limitation the rights to use, copy, modify, merge, publish, 23 * distribute, sub license, and/or sell copies of the Software, and to 24 * permit persons to whom the Software is furnished to do so, subject to 25 * the following conditions: 26 * 27 * The above copyright notice and this permission notice (including the 28 * next paragraph) shall be included in all copies or substantial portions 29 * of the Software. 30 * 31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 32 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 33 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 34 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 35 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 36 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 37 * USE OR OTHER DEALINGS IN THE SOFTWARE. 38 */ 39 #ifndef _LINUX_RESERVATION_H 40 #define _LINUX_RESERVATION_H 41 42 #include <linux/ww_mutex.h> 43 #include <linux/dma-fence.h> 44 #include <linux/slab.h> 45 #include <linux/seqlock.h> 46 #include <linux/rcupdate.h> 47 48 extern struct ww_class reservation_ww_class; 49 50 /** 51 * struct dma_resv_list - a list of shared fences 52 * @rcu: for internal use 53 * @shared_count: table of shared fences 54 * @shared_max: for growing shared fence table 55 * @shared: shared fence table 56 */ 57 struct dma_resv_list { 58 struct rcu_head rcu; 59 u32 shared_count, shared_max; 60 struct dma_fence __rcu *shared[]; 61 }; 62 63 /** 64 * struct dma_resv - a reservation object manages fences for a buffer 65 * 66 * There are multiple uses for this, with sometimes slightly different rules in 67 * how the fence slots are used. 68 * 69 * One use is to synchronize cross-driver access to a struct dma_buf, either for 70 * dynamic buffer management or just to handle implicit synchronization between 71 * different users of the buffer in userspace. See &dma_buf.resv for a more 72 * in-depth discussion. 73 * 74 * The other major use is to manage access and locking within a driver in a 75 * buffer based memory manager. struct ttm_buffer_object is the canonical 76 * example here, since this is where reservation objects originated from. But 77 * use in drivers is spreading and some drivers also manage struct 78 * drm_gem_object with the same scheme. 79 */ 80 struct dma_resv { 81 /** 82 * @lock: 83 * 84 * Update side lock. Don't use directly, instead use the wrapper 85 * functions like dma_resv_lock() and dma_resv_unlock(). 86 * 87 * Drivers which use the reservation object to manage memory dynamically 88 * also use this lock to protect buffer object state like placement, 89 * allocation policies or throughout command submission. 90 */ 91 struct ww_mutex lock; 92 93 /** 94 * @seq: 95 * 96 * Sequence count for managing RCU read-side synchronization, allows 97 * read-only access to @fence_excl and @fence while ensuring we take a 98 * consistent snapshot. 99 */ 100 seqcount_ww_mutex_t seq; 101 102 /** 103 * @fence_excl: 104 * 105 * The exclusive fence, if there is one currently. 106 * 107 * There are two ways to update this fence: 108 * 109 * - First by calling dma_resv_add_excl_fence(), which replaces all 110 * fences attached to the reservation object. To guarantee that no 111 * fences are lost, this new fence must signal only after all previous 112 * fences, both shared and exclusive, have signalled. In some cases it 113 * is convenient to achieve that by attaching a struct dma_fence_array 114 * with all the new and old fences. 115 * 116 * - Alternatively the fence can be set directly, which leaves the 117 * shared fences unchanged. To guarantee that no fences are lost, this 118 * new fence must signal only after the previous exclusive fence has 119 * signalled. Since the shared fences are staying intact, it is not 120 * necessary to maintain any ordering against those. If semantically 121 * only a new access is added without actually treating the previous 122 * one as a dependency the exclusive fences can be strung together 123 * using struct dma_fence_chain. 124 * 125 * Note that actual semantics of what an exclusive or shared fence mean 126 * is defined by the user, for reservation objects shared across drivers 127 * see &dma_buf.resv. 128 */ 129 struct dma_fence __rcu *fence_excl; 130 131 /** 132 * @fence: 133 * 134 * List of current shared fences. 135 * 136 * There are no ordering constraints of shared fences against the 137 * exclusive fence slot. If a waiter needs to wait for all access, it 138 * has to wait for both sets of fences to signal. 139 * 140 * A new fence is added by calling dma_resv_add_shared_fence(). Since 141 * this often needs to be done past the point of no return in command 142 * submission it cannot fail, and therefore sufficient slots need to be 143 * reserved by calling dma_resv_reserve_shared(). 144 * 145 * Note that actual semantics of what an exclusive or shared fence mean 146 * is defined by the user, for reservation objects shared across drivers 147 * see &dma_buf.resv. 148 */ 149 struct dma_resv_list __rcu *fence; 150 }; 151 152 /** 153 * struct dma_resv_iter - current position into the dma_resv fences 154 * 155 * Don't touch this directly in the driver, use the accessor function instead. 156 * 157 * IMPORTANT 158 * 159 * When using the lockless iterators like dma_resv_iter_next_unlocked() or 160 * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted. 161 * Code which accumulates statistics or similar needs to check for this with 162 * dma_resv_iter_is_restarted(). 163 */ 164 struct dma_resv_iter { 165 /** @obj: The dma_resv object we iterate over */ 166 struct dma_resv *obj; 167 168 /** @all_fences: If all fences should be returned */ 169 bool all_fences; 170 171 /** @fence: the currently handled fence */ 172 struct dma_fence *fence; 173 174 /** @seq: sequence number to check for modifications */ 175 unsigned int seq; 176 177 /** @index: index into the shared fences */ 178 unsigned int index; 179 180 /** @fences: the shared fences; private, *MUST* not dereference */ 181 struct dma_resv_list *fences; 182 183 /** @shared_count: number of shared fences */ 184 unsigned int shared_count; 185 186 /** @is_restarted: true if this is the first returned fence */ 187 bool is_restarted; 188 }; 189 190 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor); 191 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor); 192 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor); 193 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor); 194 195 /** 196 * dma_resv_iter_begin - initialize a dma_resv_iter object 197 * @cursor: The dma_resv_iter object to initialize 198 * @obj: The dma_resv object which we want to iterate over 199 * @all_fences: If all fences should be returned or just the exclusive one 200 */ 201 static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor, 202 struct dma_resv *obj, 203 bool all_fences) 204 { 205 cursor->obj = obj; 206 cursor->all_fences = all_fences; 207 cursor->fence = NULL; 208 } 209 210 /** 211 * dma_resv_iter_end - cleanup a dma_resv_iter object 212 * @cursor: the dma_resv_iter object which should be cleaned up 213 * 214 * Make sure that the reference to the fence in the cursor is properly 215 * dropped. 216 */ 217 static inline void dma_resv_iter_end(struct dma_resv_iter *cursor) 218 { 219 dma_fence_put(cursor->fence); 220 } 221 222 /** 223 * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one 224 * @cursor: the cursor of the current position 225 * 226 * Returns true if the currently returned fence is the exclusive one. 227 */ 228 static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor) 229 { 230 return cursor->index == 0; 231 } 232 233 /** 234 * dma_resv_iter_is_restarted - test if this is the first fence after a restart 235 * @cursor: the cursor with the current position 236 * 237 * Return true if this is the first fence in an iteration after a restart. 238 */ 239 static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) 240 { 241 return cursor->is_restarted; 242 } 243 244 /** 245 * dma_resv_for_each_fence_unlocked - unlocked fence iterator 246 * @cursor: a struct dma_resv_iter pointer 247 * @fence: the current fence 248 * 249 * Iterate over the fences in a struct dma_resv object without holding the 250 * &dma_resv.lock and using RCU instead. The cursor needs to be initialized 251 * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside 252 * the iterator a reference to the dma_fence is held and the RCU lock dropped. 253 * 254 * Beware that the iterator can be restarted when the struct dma_resv for 255 * @cursor is modified. Code which accumulates statistics or similar needs to 256 * check for this with dma_resv_iter_is_restarted(). For this reason prefer the 257 * lock iterator dma_resv_for_each_fence() whenever possible. 258 */ 259 #define dma_resv_for_each_fence_unlocked(cursor, fence) \ 260 for (fence = dma_resv_iter_first_unlocked(cursor); \ 261 fence; fence = dma_resv_iter_next_unlocked(cursor)) 262 263 /** 264 * dma_resv_for_each_fence - fence iterator 265 * @cursor: a struct dma_resv_iter pointer 266 * @obj: a dma_resv object pointer 267 * @all_fences: true if all fences should be returned 268 * @fence: the current fence 269 * 270 * Iterate over the fences in a struct dma_resv object while holding the 271 * &dma_resv.lock. @all_fences controls if the shared fences are returned as 272 * well. The cursor initialisation is part of the iterator and the fence stays 273 * valid as long as the lock is held and so no extra reference to the fence is 274 * taken. 275 */ 276 #define dma_resv_for_each_fence(cursor, obj, all_fences, fence) \ 277 for (dma_resv_iter_begin(cursor, obj, all_fences), \ 278 fence = dma_resv_iter_first(cursor); fence; \ 279 fence = dma_resv_iter_next(cursor)) 280 281 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base) 282 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base) 283 284 #ifdef CONFIG_DEBUG_MUTEXES 285 void dma_resv_reset_shared_max(struct dma_resv *obj); 286 #else 287 static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {} 288 #endif 289 290 /** 291 * dma_resv_lock - lock the reservation object 292 * @obj: the reservation object 293 * @ctx: the locking context 294 * 295 * Locks the reservation object for exclusive access and modification. Note, 296 * that the lock is only against other writers, readers will run concurrently 297 * with a writer under RCU. The seqlock is used to notify readers if they 298 * overlap with a writer. 299 * 300 * As the reservation object may be locked by multiple parties in an 301 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle 302 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 303 * object may be locked by itself by passing NULL as @ctx. 304 * 305 * When a die situation is indicated by returning -EDEADLK all locks held by 306 * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj. 307 * 308 * Unlocked by calling dma_resv_unlock(). 309 * 310 * See also dma_resv_lock_interruptible() for the interruptible variant. 311 */ 312 static inline int dma_resv_lock(struct dma_resv *obj, 313 struct ww_acquire_ctx *ctx) 314 { 315 return ww_mutex_lock(&obj->lock, ctx); 316 } 317 318 /** 319 * dma_resv_lock_interruptible - lock the reservation object 320 * @obj: the reservation object 321 * @ctx: the locking context 322 * 323 * Locks the reservation object interruptible for exclusive access and 324 * modification. Note, that the lock is only against other writers, readers 325 * will run concurrently with a writer under RCU. The seqlock is used to 326 * notify readers if they overlap with a writer. 327 * 328 * As the reservation object may be locked by multiple parties in an 329 * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle 330 * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation 331 * object may be locked by itself by passing NULL as @ctx. 332 * 333 * When a die situation is indicated by returning -EDEADLK all locks held by 334 * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on 335 * @obj. 336 * 337 * Unlocked by calling dma_resv_unlock(). 338 */ 339 static inline int dma_resv_lock_interruptible(struct dma_resv *obj, 340 struct ww_acquire_ctx *ctx) 341 { 342 return ww_mutex_lock_interruptible(&obj->lock, ctx); 343 } 344 345 /** 346 * dma_resv_lock_slow - slowpath lock the reservation object 347 * @obj: the reservation object 348 * @ctx: the locking context 349 * 350 * Acquires the reservation object after a die case. This function 351 * will sleep until the lock becomes available. See dma_resv_lock() as 352 * well. 353 * 354 * See also dma_resv_lock_slow_interruptible() for the interruptible variant. 355 */ 356 static inline void dma_resv_lock_slow(struct dma_resv *obj, 357 struct ww_acquire_ctx *ctx) 358 { 359 ww_mutex_lock_slow(&obj->lock, ctx); 360 } 361 362 /** 363 * dma_resv_lock_slow_interruptible - slowpath lock the reservation 364 * object, interruptible 365 * @obj: the reservation object 366 * @ctx: the locking context 367 * 368 * Acquires the reservation object interruptible after a die case. This function 369 * will sleep until the lock becomes available. See 370 * dma_resv_lock_interruptible() as well. 371 */ 372 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj, 373 struct ww_acquire_ctx *ctx) 374 { 375 return ww_mutex_lock_slow_interruptible(&obj->lock, ctx); 376 } 377 378 /** 379 * dma_resv_trylock - trylock the reservation object 380 * @obj: the reservation object 381 * 382 * Tries to lock the reservation object for exclusive access and modification. 383 * Note, that the lock is only against other writers, readers will run 384 * concurrently with a writer under RCU. The seqlock is used to notify readers 385 * if they overlap with a writer. 386 * 387 * Also note that since no context is provided, no deadlock protection is 388 * possible, which is also not needed for a trylock. 389 * 390 * Returns true if the lock was acquired, false otherwise. 391 */ 392 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj) 393 { 394 return ww_mutex_trylock(&obj->lock, NULL); 395 } 396 397 /** 398 * dma_resv_is_locked - is the reservation object locked 399 * @obj: the reservation object 400 * 401 * Returns true if the mutex is locked, false if unlocked. 402 */ 403 static inline bool dma_resv_is_locked(struct dma_resv *obj) 404 { 405 return ww_mutex_is_locked(&obj->lock); 406 } 407 408 /** 409 * dma_resv_locking_ctx - returns the context used to lock the object 410 * @obj: the reservation object 411 * 412 * Returns the context used to lock a reservation object or NULL if no context 413 * was used or the object is not locked at all. 414 * 415 * WARNING: This interface is pretty horrible, but TTM needs it because it 416 * doesn't pass the struct ww_acquire_ctx around in some very long callchains. 417 * Everyone else just uses it to check whether they're holding a reservation or 418 * not. 419 */ 420 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj) 421 { 422 return READ_ONCE(obj->lock.ctx); 423 } 424 425 /** 426 * dma_resv_unlock - unlock the reservation object 427 * @obj: the reservation object 428 * 429 * Unlocks the reservation object following exclusive access. 430 */ 431 static inline void dma_resv_unlock(struct dma_resv *obj) 432 { 433 dma_resv_reset_shared_max(obj); 434 ww_mutex_unlock(&obj->lock); 435 } 436 437 /** 438 * dma_resv_excl_fence - return the object's exclusive fence 439 * @obj: the reservation object 440 * 441 * Returns the exclusive fence (if any). Caller must either hold the objects 442 * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), 443 * or one of the variants of each 444 * 445 * RETURNS 446 * The exclusive fence or NULL 447 */ 448 static inline struct dma_fence * 449 dma_resv_excl_fence(struct dma_resv *obj) 450 { 451 return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj)); 452 } 453 454 /** 455 * dma_resv_shared_list - get the reservation object's shared fence list 456 * @obj: the reservation object 457 * 458 * Returns the shared fence list. Caller must either hold the objects 459 * through dma_resv_lock() or the RCU read side lock through rcu_read_lock(), 460 * or one of the variants of each 461 */ 462 static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj) 463 { 464 return rcu_dereference_check(obj->fence, dma_resv_held(obj)); 465 } 466 467 void dma_resv_init(struct dma_resv *obj); 468 void dma_resv_fini(struct dma_resv *obj); 469 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences); 470 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence); 471 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence); 472 int dma_resv_get_fences(struct dma_resv *obj, bool write, 473 unsigned int *num_fences, struct dma_fence ***fences); 474 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src); 475 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr, 476 unsigned long timeout); 477 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all); 478 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq); 479 480 #endif /* _LINUX_RESERVATION_H */ 481