1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst) 4 * 5 * Based on bo.c which bears the following copyright notice, 6 * but is dual licensed: 7 * 8 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 9 * All Rights Reserved. 10 * 11 * Permission is hereby granted, free of charge, to any person obtaining a 12 * copy of this software and associated documentation files (the 13 * "Software"), to deal in the Software without restriction, including 14 * without limitation the rights to use, copy, modify, merge, publish, 15 * distribute, sub license, and/or sell copies of the Software, and to 16 * permit persons to whom the Software is furnished to do so, subject to 17 * the following conditions: 18 * 19 * The above copyright notice and this permission notice (including the 20 * next paragraph) shall be included in all copies or substantial portions 21 * of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 29 * USE OR OTHER DEALINGS IN THE SOFTWARE. 30 * 31 **************************************************************************/ 32 /* 33 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 34 */ 35 36 #include <linux/dma-resv.h> 37 #include <linux/dma-fence-array.h> 38 #include <linux/export.h> 39 #include <linux/mm.h> 40 #include <linux/sched/mm.h> 41 #include <linux/mmu_notifier.h> 42 #include <linux/seq_file.h> 43 44 /** 45 * DOC: Reservation Object Overview 46 * 47 * The reservation object provides a mechanism to manage a container of 48 * dma_fence object associated with a resource. A reservation object 49 * can have any number of fences attaches to it. Each fence carries an usage 50 * parameter determining how the operation represented by the fence is using the 51 * resource. The RCU mechanism is used to protect read access to fences from 52 * locked write-side updates. 53 * 54 * See struct dma_resv for more details. 55 */ 56 57 DEFINE_WD_CLASS(reservation_ww_class); 58 EXPORT_SYMBOL(reservation_ww_class); 59 60 /* Mask for the lower fence pointer bits */ 61 #define DMA_RESV_LIST_MASK 0x3 62 63 struct dma_resv_list { 64 struct rcu_head rcu; 65 u32 num_fences, max_fences; 66 struct dma_fence __rcu *table[]; 67 }; 68 69 /* Extract the fence and usage flags from an RCU protected entry in the list. */ 70 static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index, 71 struct dma_resv *resv, struct dma_fence **fence, 72 enum dma_resv_usage *usage) 73 { 74 long tmp; 75 76 tmp = (long)rcu_dereference_check(list->table[index], 77 resv ? dma_resv_held(resv) : true); 78 *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK); 79 if (usage) 80 *usage = tmp & DMA_RESV_LIST_MASK; 81 } 82 83 /* Set the fence and usage flags at the specific index in the list. */ 84 static void dma_resv_list_set(struct dma_resv_list *list, 85 unsigned int index, 86 struct dma_fence *fence, 87 enum dma_resv_usage usage) 88 { 89 long tmp = ((long)fence) | usage; 90 91 RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp); 92 } 93 94 /* 95 * Allocate a new dma_resv_list and make sure to correctly initialize 96 * max_fences. 97 */ 98 static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences) 99 { 100 struct dma_resv_list *list; 101 102 list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL); 103 if (!list) 104 return NULL; 105 106 list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) / 107 sizeof(*list->table); 108 109 return list; 110 } 111 112 /* Free a dma_resv_list and make sure to drop all references. */ 113 static void dma_resv_list_free(struct dma_resv_list *list) 114 { 115 unsigned int i; 116 117 if (!list) 118 return; 119 120 for (i = 0; i < list->num_fences; ++i) { 121 struct dma_fence *fence; 122 123 dma_resv_list_entry(list, i, NULL, &fence, NULL); 124 dma_fence_put(fence); 125 } 126 kfree_rcu(list, rcu); 127 } 128 129 /** 130 * dma_resv_init - initialize a reservation object 131 * @obj: the reservation object 132 */ 133 void dma_resv_init(struct dma_resv *obj) 134 { 135 ww_mutex_init(&obj->lock, &reservation_ww_class); 136 137 RCU_INIT_POINTER(obj->fences, NULL); 138 } 139 EXPORT_SYMBOL(dma_resv_init); 140 141 /** 142 * dma_resv_fini - destroys a reservation object 143 * @obj: the reservation object 144 */ 145 void dma_resv_fini(struct dma_resv *obj) 146 { 147 /* 148 * This object should be dead and all references must have 149 * been released to it, so no need to be protected with rcu. 150 */ 151 dma_resv_list_free(rcu_dereference_protected(obj->fences, true)); 152 ww_mutex_destroy(&obj->lock); 153 } 154 EXPORT_SYMBOL(dma_resv_fini); 155 156 /* Dereference the fences while ensuring RCU rules */ 157 static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj) 158 { 159 return rcu_dereference_check(obj->fences, dma_resv_held(obj)); 160 } 161 162 /** 163 * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object. 164 * @obj: reservation object 165 * @num_fences: number of fences we want to add 166 * 167 * Should be called before dma_resv_add_fence(). Must be called with @obj 168 * locked through dma_resv_lock(). 169 * 170 * Note that the preallocated slots need to be re-reserved if @obj is unlocked 171 * at any time before calling dma_resv_add_fence(). This is validated when 172 * CONFIG_DEBUG_MUTEXES is enabled. 173 * 174 * RETURNS 175 * Zero for success, or -errno 176 */ 177 int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences) 178 { 179 struct dma_resv_list *old, *new; 180 unsigned int i, j, k, max; 181 182 dma_resv_assert_held(obj); 183 184 old = dma_resv_fences_list(obj); 185 if (old && old->max_fences) { 186 if ((old->num_fences + num_fences) <= old->max_fences) 187 return 0; 188 max = max(old->num_fences + num_fences, old->max_fences * 2); 189 } else { 190 max = max(4ul, roundup_pow_of_two(num_fences)); 191 } 192 193 new = dma_resv_list_alloc(max); 194 if (!new) 195 return -ENOMEM; 196 197 /* 198 * no need to bump fence refcounts, rcu_read access 199 * requires the use of kref_get_unless_zero, and the 200 * references from the old struct are carried over to 201 * the new. 202 */ 203 for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) { 204 enum dma_resv_usage usage; 205 struct dma_fence *fence; 206 207 dma_resv_list_entry(old, i, obj, &fence, &usage); 208 if (dma_fence_is_signaled(fence)) 209 RCU_INIT_POINTER(new->table[--k], fence); 210 else 211 dma_resv_list_set(new, j++, fence, usage); 212 } 213 new->num_fences = j; 214 215 /* 216 * We are not changing the effective set of fences here so can 217 * merely update the pointer to the new array; both existing 218 * readers and new readers will see exactly the same set of 219 * active (unsignaled) fences. Individual fences and the 220 * old array are protected by RCU and so will not vanish under 221 * the gaze of the rcu_read_lock() readers. 222 */ 223 rcu_assign_pointer(obj->fences, new); 224 225 if (!old) 226 return 0; 227 228 /* Drop the references to the signaled fences */ 229 for (i = k; i < max; ++i) { 230 struct dma_fence *fence; 231 232 fence = rcu_dereference_protected(new->table[i], 233 dma_resv_held(obj)); 234 dma_fence_put(fence); 235 } 236 kfree_rcu(old, rcu); 237 238 return 0; 239 } 240 EXPORT_SYMBOL(dma_resv_reserve_fences); 241 242 #ifdef CONFIG_DEBUG_MUTEXES 243 /** 244 * dma_resv_reset_max_fences - reset fences for debugging 245 * @obj: the dma_resv object to reset 246 * 247 * Reset the number of pre-reserved fence slots to test that drivers do 248 * correct slot allocation using dma_resv_reserve_fences(). See also 249 * &dma_resv_list.max_fences. 250 */ 251 void dma_resv_reset_max_fences(struct dma_resv *obj) 252 { 253 struct dma_resv_list *fences = dma_resv_fences_list(obj); 254 255 dma_resv_assert_held(obj); 256 257 /* Test fence slot reservation */ 258 if (fences) 259 fences->max_fences = fences->num_fences; 260 } 261 EXPORT_SYMBOL(dma_resv_reset_max_fences); 262 #endif 263 264 /** 265 * dma_resv_add_fence - Add a fence to the dma_resv obj 266 * @obj: the reservation object 267 * @fence: the fence to add 268 * @usage: how the fence is used, see enum dma_resv_usage 269 * 270 * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and 271 * dma_resv_reserve_fences() has been called. 272 * 273 * See also &dma_resv.fence for a discussion of the semantics. 274 */ 275 void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence, 276 enum dma_resv_usage usage) 277 { 278 struct dma_resv_list *fobj; 279 struct dma_fence *old; 280 unsigned int i, count; 281 282 dma_fence_get(fence); 283 284 dma_resv_assert_held(obj); 285 286 /* Drivers should not add containers here, instead add each fence 287 * individually. 288 */ 289 WARN_ON(dma_fence_is_container(fence)); 290 291 fobj = dma_resv_fences_list(obj); 292 count = fobj->num_fences; 293 294 for (i = 0; i < count; ++i) { 295 enum dma_resv_usage old_usage; 296 297 dma_resv_list_entry(fobj, i, obj, &old, &old_usage); 298 if ((old->context == fence->context && old_usage >= usage && 299 dma_fence_is_later(fence, old)) || 300 dma_fence_is_signaled(old)) { 301 dma_resv_list_set(fobj, i, fence, usage); 302 dma_fence_put(old); 303 return; 304 } 305 } 306 307 BUG_ON(fobj->num_fences >= fobj->max_fences); 308 count++; 309 310 dma_resv_list_set(fobj, i, fence, usage); 311 /* pointer update must be visible before we extend the num_fences */ 312 smp_store_mb(fobj->num_fences, count); 313 } 314 EXPORT_SYMBOL(dma_resv_add_fence); 315 316 /** 317 * dma_resv_replace_fences - replace fences in the dma_resv obj 318 * @obj: the reservation object 319 * @context: the context of the fences to replace 320 * @replacement: the new fence to use instead 321 * @usage: how the new fence is used, see enum dma_resv_usage 322 * 323 * Replace fences with a specified context with a new fence. Only valid if the 324 * operation represented by the original fence has no longer access to the 325 * resources represented by the dma_resv object when the new fence completes. 326 * 327 * And example for using this is replacing a preemption fence with a page table 328 * update fence which makes the resource inaccessible. 329 */ 330 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context, 331 struct dma_fence *replacement, 332 enum dma_resv_usage usage) 333 { 334 struct dma_resv_list *list; 335 unsigned int i; 336 337 dma_resv_assert_held(obj); 338 339 list = dma_resv_fences_list(obj); 340 for (i = 0; list && i < list->num_fences; ++i) { 341 struct dma_fence *old; 342 343 dma_resv_list_entry(list, i, obj, &old, NULL); 344 if (old->context != context) 345 continue; 346 347 dma_resv_list_set(list, i, dma_fence_get(replacement), usage); 348 dma_fence_put(old); 349 } 350 } 351 EXPORT_SYMBOL(dma_resv_replace_fences); 352 353 /* Restart the unlocked iteration by initializing the cursor object. */ 354 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) 355 { 356 cursor->index = 0; 357 cursor->num_fences = 0; 358 cursor->fences = dma_resv_fences_list(cursor->obj); 359 if (cursor->fences) 360 cursor->num_fences = cursor->fences->num_fences; 361 cursor->is_restarted = true; 362 } 363 364 /* Walk to the next not signaled fence and grab a reference to it */ 365 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) 366 { 367 if (!cursor->fences) 368 return; 369 370 do { 371 /* Drop the reference from the previous round */ 372 dma_fence_put(cursor->fence); 373 374 if (cursor->index >= cursor->num_fences) { 375 cursor->fence = NULL; 376 break; 377 378 } 379 380 dma_resv_list_entry(cursor->fences, cursor->index++, 381 cursor->obj, &cursor->fence, 382 &cursor->fence_usage); 383 cursor->fence = dma_fence_get_rcu(cursor->fence); 384 if (!cursor->fence) { 385 dma_resv_iter_restart_unlocked(cursor); 386 continue; 387 } 388 389 if (!dma_fence_is_signaled(cursor->fence) && 390 cursor->usage >= cursor->fence_usage) 391 break; 392 } while (true); 393 } 394 395 /** 396 * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj. 397 * @cursor: the cursor with the current position 398 * 399 * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). 400 * 401 * Beware that the iterator can be restarted. Code which accumulates statistics 402 * or similar needs to check for this with dma_resv_iter_is_restarted(). For 403 * this reason prefer the locked dma_resv_iter_first() whenver possible. 404 * 405 * Returns the first fence from an unlocked dma_resv obj. 406 */ 407 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor) 408 { 409 rcu_read_lock(); 410 do { 411 dma_resv_iter_restart_unlocked(cursor); 412 dma_resv_iter_walk_unlocked(cursor); 413 } while (dma_resv_fences_list(cursor->obj) != cursor->fences); 414 rcu_read_unlock(); 415 416 return cursor->fence; 417 } 418 EXPORT_SYMBOL(dma_resv_iter_first_unlocked); 419 420 /** 421 * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj. 422 * @cursor: the cursor with the current position 423 * 424 * Beware that the iterator can be restarted. Code which accumulates statistics 425 * or similar needs to check for this with dma_resv_iter_is_restarted(). For 426 * this reason prefer the locked dma_resv_iter_next() whenver possible. 427 * 428 * Returns the next fence from an unlocked dma_resv obj. 429 */ 430 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) 431 { 432 bool restart; 433 434 rcu_read_lock(); 435 cursor->is_restarted = false; 436 restart = dma_resv_fences_list(cursor->obj) != cursor->fences; 437 do { 438 if (restart) 439 dma_resv_iter_restart_unlocked(cursor); 440 dma_resv_iter_walk_unlocked(cursor); 441 restart = true; 442 } while (dma_resv_fences_list(cursor->obj) != cursor->fences); 443 rcu_read_unlock(); 444 445 return cursor->fence; 446 } 447 EXPORT_SYMBOL(dma_resv_iter_next_unlocked); 448 449 /** 450 * dma_resv_iter_first - first fence from a locked dma_resv object 451 * @cursor: cursor to record the current position 452 * 453 * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). 454 * 455 * Return the first fence in the dma_resv object while holding the 456 * &dma_resv.lock. 457 */ 458 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor) 459 { 460 struct dma_fence *fence; 461 462 dma_resv_assert_held(cursor->obj); 463 464 cursor->index = 0; 465 cursor->fences = dma_resv_fences_list(cursor->obj); 466 467 fence = dma_resv_iter_next(cursor); 468 cursor->is_restarted = true; 469 return fence; 470 } 471 EXPORT_SYMBOL_GPL(dma_resv_iter_first); 472 473 /** 474 * dma_resv_iter_next - next fence from a locked dma_resv object 475 * @cursor: cursor to record the current position 476 * 477 * Return the next fences from the dma_resv object while holding the 478 * &dma_resv.lock. 479 */ 480 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor) 481 { 482 struct dma_fence *fence; 483 484 dma_resv_assert_held(cursor->obj); 485 486 cursor->is_restarted = false; 487 488 do { 489 if (!cursor->fences || 490 cursor->index >= cursor->fences->num_fences) 491 return NULL; 492 493 dma_resv_list_entry(cursor->fences, cursor->index++, 494 cursor->obj, &fence, &cursor->fence_usage); 495 } while (cursor->fence_usage > cursor->usage); 496 497 return fence; 498 } 499 EXPORT_SYMBOL_GPL(dma_resv_iter_next); 500 501 /** 502 * dma_resv_copy_fences - Copy all fences from src to dst. 503 * @dst: the destination reservation object 504 * @src: the source reservation object 505 * 506 * Copy all fences from src to dst. dst-lock must be held. 507 */ 508 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src) 509 { 510 struct dma_resv_iter cursor; 511 struct dma_resv_list *list; 512 struct dma_fence *f; 513 514 dma_resv_assert_held(dst); 515 516 list = NULL; 517 518 dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP); 519 dma_resv_for_each_fence_unlocked(&cursor, f) { 520 521 if (dma_resv_iter_is_restarted(&cursor)) { 522 dma_resv_list_free(list); 523 524 list = dma_resv_list_alloc(cursor.num_fences); 525 if (!list) { 526 dma_resv_iter_end(&cursor); 527 return -ENOMEM; 528 } 529 list->num_fences = 0; 530 } 531 532 dma_fence_get(f); 533 dma_resv_list_set(list, list->num_fences++, f, 534 dma_resv_iter_usage(&cursor)); 535 } 536 dma_resv_iter_end(&cursor); 537 538 list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst)); 539 dma_resv_list_free(list); 540 return 0; 541 } 542 EXPORT_SYMBOL(dma_resv_copy_fences); 543 544 /** 545 * dma_resv_get_fences - Get an object's fences 546 * fences without update side lock held 547 * @obj: the reservation object 548 * @usage: controls which fences to include, see enum dma_resv_usage. 549 * @num_fences: the number of fences returned 550 * @fences: the array of fence ptrs returned (array is krealloc'd to the 551 * required size, and must be freed by caller) 552 * 553 * Retrieve all fences from the reservation object. 554 * Returns either zero or -ENOMEM. 555 */ 556 int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage, 557 unsigned int *num_fences, struct dma_fence ***fences) 558 { 559 struct dma_resv_iter cursor; 560 struct dma_fence *fence; 561 562 *num_fences = 0; 563 *fences = NULL; 564 565 dma_resv_iter_begin(&cursor, obj, usage); 566 dma_resv_for_each_fence_unlocked(&cursor, fence) { 567 568 if (dma_resv_iter_is_restarted(&cursor)) { 569 unsigned int count; 570 571 while (*num_fences) 572 dma_fence_put((*fences)[--(*num_fences)]); 573 574 count = cursor.num_fences + 1; 575 576 /* Eventually re-allocate the array */ 577 *fences = krealloc_array(*fences, count, 578 sizeof(void *), 579 GFP_KERNEL); 580 if (count && !*fences) { 581 dma_resv_iter_end(&cursor); 582 return -ENOMEM; 583 } 584 } 585 586 (*fences)[(*num_fences)++] = dma_fence_get(fence); 587 } 588 dma_resv_iter_end(&cursor); 589 590 return 0; 591 } 592 EXPORT_SYMBOL_GPL(dma_resv_get_fences); 593 594 /** 595 * dma_resv_get_singleton - Get a single fence for all the fences 596 * @obj: the reservation object 597 * @usage: controls which fences to include, see enum dma_resv_usage. 598 * @fence: the resulting fence 599 * 600 * Get a single fence representing all the fences inside the resv object. 601 * Returns either 0 for success or -ENOMEM. 602 * 603 * Warning: This can't be used like this when adding the fence back to the resv 604 * object since that can lead to stack corruption when finalizing the 605 * dma_fence_array. 606 * 607 * Returns 0 on success and negative error values on failure. 608 */ 609 int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage, 610 struct dma_fence **fence) 611 { 612 struct dma_fence_array *array; 613 struct dma_fence **fences; 614 unsigned count; 615 int r; 616 617 r = dma_resv_get_fences(obj, usage, &count, &fences); 618 if (r) 619 return r; 620 621 if (count == 0) { 622 *fence = NULL; 623 return 0; 624 } 625 626 if (count == 1) { 627 *fence = fences[0]; 628 kfree(fences); 629 return 0; 630 } 631 632 array = dma_fence_array_create(count, fences, 633 dma_fence_context_alloc(1), 634 1, false); 635 if (!array) { 636 while (count--) 637 dma_fence_put(fences[count]); 638 kfree(fences); 639 return -ENOMEM; 640 } 641 642 *fence = &array->base; 643 return 0; 644 } 645 EXPORT_SYMBOL_GPL(dma_resv_get_singleton); 646 647 /** 648 * dma_resv_wait_timeout - Wait on reservation's objects fences 649 * @obj: the reservation object 650 * @usage: controls which fences to include, see enum dma_resv_usage. 651 * @intr: if true, do interruptible wait 652 * @timeout: timeout value in jiffies or zero to return immediately 653 * 654 * Callers are not required to hold specific locks, but maybe hold 655 * dma_resv_lock() already 656 * RETURNS 657 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 658 * greater than zer on success. 659 */ 660 long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, 661 bool intr, unsigned long timeout) 662 { 663 long ret = timeout ? timeout : 1; 664 struct dma_resv_iter cursor; 665 struct dma_fence *fence; 666 667 dma_resv_iter_begin(&cursor, obj, usage); 668 dma_resv_for_each_fence_unlocked(&cursor, fence) { 669 670 ret = dma_fence_wait_timeout(fence, intr, ret); 671 if (ret <= 0) { 672 dma_resv_iter_end(&cursor); 673 return ret; 674 } 675 } 676 dma_resv_iter_end(&cursor); 677 678 return ret; 679 } 680 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout); 681 682 683 /** 684 * dma_resv_test_signaled - Test if a reservation object's fences have been 685 * signaled. 686 * @obj: the reservation object 687 * @usage: controls which fences to include, see enum dma_resv_usage. 688 * 689 * Callers are not required to hold specific locks, but maybe hold 690 * dma_resv_lock() already. 691 * 692 * RETURNS 693 * 694 * True if all fences signaled, else false. 695 */ 696 bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage) 697 { 698 struct dma_resv_iter cursor; 699 struct dma_fence *fence; 700 701 dma_resv_iter_begin(&cursor, obj, usage); 702 dma_resv_for_each_fence_unlocked(&cursor, fence) { 703 dma_resv_iter_end(&cursor); 704 return false; 705 } 706 dma_resv_iter_end(&cursor); 707 return true; 708 } 709 EXPORT_SYMBOL_GPL(dma_resv_test_signaled); 710 711 /** 712 * dma_resv_describe - Dump description of the resv object into seq_file 713 * @obj: the reservation object 714 * @seq: the seq_file to dump the description into 715 * 716 * Dump a textual description of the fences inside an dma_resv object into the 717 * seq_file. 718 */ 719 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq) 720 { 721 static const char *usage[] = { "kernel", "write", "read", "bookkeep" }; 722 struct dma_resv_iter cursor; 723 struct dma_fence *fence; 724 725 dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) { 726 seq_printf(seq, "\t%s fence:", 727 usage[dma_resv_iter_usage(&cursor)]); 728 dma_fence_describe(fence, seq); 729 } 730 } 731 EXPORT_SYMBOL_GPL(dma_resv_describe); 732 733 #if IS_ENABLED(CONFIG_LOCKDEP) 734 static int __init dma_resv_lockdep(void) 735 { 736 struct mm_struct *mm = mm_alloc(); 737 struct ww_acquire_ctx ctx; 738 struct dma_resv obj; 739 struct address_space mapping; 740 int ret; 741 742 if (!mm) 743 return -ENOMEM; 744 745 dma_resv_init(&obj); 746 address_space_init_once(&mapping); 747 748 mmap_read_lock(mm); 749 ww_acquire_init(&ctx, &reservation_ww_class); 750 ret = dma_resv_lock(&obj, &ctx); 751 if (ret == -EDEADLK) 752 dma_resv_lock_slow(&obj, &ctx); 753 fs_reclaim_acquire(GFP_KERNEL); 754 /* for unmap_mapping_range on trylocked buffer objects in shrinkers */ 755 i_mmap_lock_write(&mapping); 756 i_mmap_unlock_write(&mapping); 757 #ifdef CONFIG_MMU_NOTIFIER 758 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map); 759 __dma_fence_might_wait(); 760 lock_map_release(&__mmu_notifier_invalidate_range_start_map); 761 #else 762 __dma_fence_might_wait(); 763 #endif 764 fs_reclaim_release(GFP_KERNEL); 765 ww_mutex_unlock(&obj.lock); 766 ww_acquire_fini(&ctx); 767 mmap_read_unlock(mm); 768 769 mmput(mm); 770 771 return 0; 772 } 773 subsys_initcall(dma_resv_lockdep); 774 #endif 775