1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 3 #ifndef __DRM_GPUVM_H__ 4 #define __DRM_GPUVM_H__ 5 6 /* 7 * Copyright (c) 2022 Red Hat. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included in 17 * all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 * OTHER DEALINGS IN THE SOFTWARE. 26 */ 27 28 #include <linux/dma-resv.h> 29 #include <linux/list.h> 30 #include <linux/rbtree.h> 31 #include <linux/types.h> 32 33 #include <drm/drm_device.h> 34 #include <drm/drm_gem.h> 35 #include <drm/drm_exec.h> 36 37 struct drm_gpuvm; 38 struct drm_gpuvm_bo; 39 struct drm_gpuvm_ops; 40 41 /** 42 * enum drm_gpuva_flags - flags for struct drm_gpuva 43 */ 44 enum drm_gpuva_flags { 45 /** 46 * @DRM_GPUVA_INVALIDATED: 47 * 48 * Flag indicating that the &drm_gpuva's backing GEM is invalidated. 49 */ 50 DRM_GPUVA_INVALIDATED = (1 << 0), 51 52 /** 53 * @DRM_GPUVA_SPARSE: 54 * 55 * Flag indicating that the &drm_gpuva is a sparse mapping. 56 */ 57 DRM_GPUVA_SPARSE = (1 << 1), 58 59 /** 60 * @DRM_GPUVA_USERBITS: user defined bits 61 */ 62 DRM_GPUVA_USERBITS = (1 << 2), 63 }; 64 65 /** 66 * struct drm_gpuva - structure to track a GPU VA mapping 67 * 68 * This structure represents a GPU VA mapping and is associated with a 69 * &drm_gpuvm. 70 * 71 * Typically, this structure is embedded in bigger driver structures. 72 */ 73 struct drm_gpuva { 74 /** 75 * @vm: the &drm_gpuvm this object is associated with 76 */ 77 struct drm_gpuvm *vm; 78 79 /** 80 * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped 81 * &drm_gem_object 82 */ 83 struct drm_gpuvm_bo *vm_bo; 84 85 /** 86 * @flags: the &drm_gpuva_flags for this mapping 87 */ 88 enum drm_gpuva_flags flags; 89 90 /** 91 * @va: structure containing the address and range of the &drm_gpuva 92 */ 93 struct { 94 /** 95 * @addr: the start address 96 */ 97 u64 addr; 98 99 /* 100 * @range: the range 101 */ 102 u64 range; 103 } va; 104 105 /** 106 * @gem: structure containing the &drm_gem_object and it's offset 107 */ 108 struct { 109 /** 110 * @offset: the offset within the &drm_gem_object 111 */ 112 u64 offset; 113 114 /** 115 * @obj: the mapped &drm_gem_object 116 */ 117 struct drm_gem_object *obj; 118 119 /** 120 * @entry: the &list_head to attach this object to a &drm_gpuvm_bo 121 */ 122 struct list_head entry; 123 } gem; 124 125 /** 126 * @rb: structure containing data to store &drm_gpuvas in a rb-tree 127 */ 128 struct { 129 /** 130 * @rb: the rb-tree node 131 */ 132 struct rb_node node; 133 134 /** 135 * @entry: The &list_head to additionally connect &drm_gpuvas 136 * in the same order they appear in the interval tree. This is 137 * useful to keep iterating &drm_gpuvas from a start node found 138 * through the rb-tree while doing modifications on the rb-tree 139 * itself. 140 */ 141 struct list_head entry; 142 143 /** 144 * @__subtree_last: needed by the interval tree, holding last-in-subtree 145 */ 146 u64 __subtree_last; 147 } rb; 148 }; 149 150 int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va); 151 void drm_gpuva_remove(struct drm_gpuva *va); 152 153 void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo); 154 void drm_gpuva_unlink(struct drm_gpuva *va); 155 156 struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm, 157 u64 addr, u64 range); 158 struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm, 159 u64 addr, u64 range); 160 struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start); 161 struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end); 162 163 static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range, 164 struct drm_gem_object *obj, u64 offset) 165 { 166 va->va.addr = addr; 167 va->va.range = range; 168 va->gem.obj = obj; 169 va->gem.offset = offset; 170 } 171 172 /** 173 * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is 174 * invalidated 175 * @va: the &drm_gpuva to set the invalidate flag for 176 * @invalidate: indicates whether the &drm_gpuva is invalidated 177 */ 178 static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate) 179 { 180 if (invalidate) 181 va->flags |= DRM_GPUVA_INVALIDATED; 182 else 183 va->flags &= ~DRM_GPUVA_INVALIDATED; 184 } 185 186 /** 187 * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva 188 * is invalidated 189 * @va: the &drm_gpuva to check 190 */ 191 static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) 192 { 193 return va->flags & DRM_GPUVA_INVALIDATED; 194 } 195 196 /** 197 * enum drm_gpuvm_flags - flags for struct drm_gpuvm 198 */ 199 enum drm_gpuvm_flags { 200 /** 201 * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the 202 * GPUVM's &dma_resv lock 203 */ 204 DRM_GPUVM_RESV_PROTECTED = BIT(0), 205 206 /** 207 * @DRM_GPUVM_USERBITS: user defined bits 208 */ 209 DRM_GPUVM_USERBITS = BIT(1), 210 }; 211 212 /** 213 * struct drm_gpuvm - DRM GPU VA Manager 214 * 215 * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using 216 * &maple_tree structures. Typically, this structure is embedded in bigger 217 * driver structures. 218 * 219 * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or 220 * pages. 221 * 222 * There should be one manager instance per GPU virtual address space. 223 */ 224 struct drm_gpuvm { 225 /** 226 * @name: the name of the DRM GPU VA space 227 */ 228 const char *name; 229 230 /** 231 * @flags: the &drm_gpuvm_flags of this GPUVM 232 */ 233 enum drm_gpuvm_flags flags; 234 235 /** 236 * @drm: the &drm_device this VM lives in 237 */ 238 struct drm_device *drm; 239 240 /** 241 * @mm_start: start of the VA space 242 */ 243 u64 mm_start; 244 245 /** 246 * @mm_range: length of the VA space 247 */ 248 u64 mm_range; 249 250 /** 251 * @rb: structures to track &drm_gpuva entries 252 */ 253 struct { 254 /** 255 * @tree: the rb-tree to track GPU VA mappings 256 */ 257 struct rb_root_cached tree; 258 259 /** 260 * @list: the &list_head to track GPU VA mappings 261 */ 262 struct list_head list; 263 } rb; 264 265 /** 266 * @kref: reference count of this object 267 */ 268 struct kref kref; 269 270 /** 271 * @kernel_alloc_node: 272 * 273 * &drm_gpuva representing the address space cutout reserved for 274 * the kernel 275 */ 276 struct drm_gpuva kernel_alloc_node; 277 278 /** 279 * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers 280 */ 281 const struct drm_gpuvm_ops *ops; 282 283 /** 284 * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv. 285 */ 286 struct drm_gem_object *r_obj; 287 288 /** 289 * @extobj: structure holding the extobj list 290 */ 291 struct { 292 /** 293 * @list: &list_head storing &drm_gpuvm_bos serving as 294 * external object 295 */ 296 struct list_head list; 297 298 /** 299 * @local_list: pointer to the local list temporarily storing 300 * entries from the external object list 301 */ 302 struct list_head *local_list; 303 304 /** 305 * @lock: spinlock to protect the extobj list 306 */ 307 spinlock_t lock; 308 } extobj; 309 310 /** 311 * @evict: structure holding the evict list and evict list lock 312 */ 313 struct { 314 /** 315 * @list: &list_head storing &drm_gpuvm_bos currently being 316 * evicted 317 */ 318 struct list_head list; 319 320 /** 321 * @local_list: pointer to the local list temporarily storing 322 * entries from the evicted object list 323 */ 324 struct list_head *local_list; 325 326 /** 327 * @lock: spinlock to protect the evict list 328 */ 329 spinlock_t lock; 330 } evict; 331 }; 332 333 void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, 334 enum drm_gpuvm_flags flags, 335 struct drm_device *drm, 336 struct drm_gem_object *r_obj, 337 u64 start_offset, u64 range, 338 u64 reserve_offset, u64 reserve_range, 339 const struct drm_gpuvm_ops *ops); 340 341 /** 342 * drm_gpuvm_get() - acquire a struct drm_gpuvm reference 343 * @gpuvm: the &drm_gpuvm to acquire the reference of 344 * 345 * This function acquires an additional reference to @gpuvm. It is illegal to 346 * call this without already holding a reference. No locks required. 347 */ 348 static inline struct drm_gpuvm * 349 drm_gpuvm_get(struct drm_gpuvm *gpuvm) 350 { 351 kref_get(&gpuvm->kref); 352 353 return gpuvm; 354 } 355 356 void drm_gpuvm_put(struct drm_gpuvm *gpuvm); 357 358 bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); 359 bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); 360 361 struct drm_gem_object * 362 drm_gpuvm_resv_object_alloc(struct drm_device *drm); 363 364 /** 365 * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is 366 * set 367 * @gpuvm: the &drm_gpuvm 368 * 369 * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise. 370 */ 371 static inline bool 372 drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm) 373 { 374 return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED; 375 } 376 377 /** 378 * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv 379 * @gpuvm__: the &drm_gpuvm 380 * 381 * Returns: a pointer to the &drm_gpuvm's shared &dma_resv 382 */ 383 #define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv) 384 385 /** 386 * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's 387 * &dma_resv 388 * @gpuvm__: the &drm_gpuvm 389 * 390 * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared 391 * &dma_resv 392 */ 393 #define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj) 394 395 #define drm_gpuvm_resv_held(gpuvm__) \ 396 dma_resv_held(drm_gpuvm_resv(gpuvm__)) 397 398 #define drm_gpuvm_resv_assert_held(gpuvm__) \ 399 dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) 400 401 #define drm_gpuvm_resv_held(gpuvm__) \ 402 dma_resv_held(drm_gpuvm_resv(gpuvm__)) 403 404 #define drm_gpuvm_resv_assert_held(gpuvm__) \ 405 dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) 406 407 /** 408 * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an 409 * external object 410 * @gpuvm: the &drm_gpuvm to check 411 * @obj: the &drm_gem_object to check 412 * 413 * Returns: true if the &drm_gem_object &dma_resv differs from the 414 * &drm_gpuvms &dma_resv, false otherwise 415 */ 416 static inline bool 417 drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm, 418 struct drm_gem_object *obj) 419 { 420 return obj && obj->resv != drm_gpuvm_resv(gpuvm); 421 } 422 423 static inline struct drm_gpuva * 424 __drm_gpuva_next(struct drm_gpuva *va) 425 { 426 if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list)) 427 return list_next_entry(va, rb.entry); 428 429 return NULL; 430 } 431 432 /** 433 * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas 434 * @va__: &drm_gpuva structure to assign to in each iteration step 435 * @gpuvm__: &drm_gpuvm to walk over 436 * @start__: starting offset, the first gpuva will overlap this 437 * @end__: ending offset, the last gpuva will start before this (but may 438 * overlap) 439 * 440 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie 441 * between @start__ and @end__. It is implemented similarly to list_for_each(), 442 * but is using the &drm_gpuvm's internal interval tree to accelerate 443 * the search for the starting &drm_gpuva, and hence isn't safe against removal 444 * of elements. It assumes that @end__ is within (or is the upper limit of) the 445 * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's 446 * @kernel_alloc_node. 447 */ 448 #define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \ 449 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \ 450 va__ && (va__->va.addr < (end__)); \ 451 va__ = __drm_gpuva_next(va__)) 452 453 /** 454 * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of 455 * &drm_gpuvas 456 * @va__: &drm_gpuva to assign to in each iteration step 457 * @next__: another &drm_gpuva to use as temporary storage 458 * @gpuvm__: &drm_gpuvm to walk over 459 * @start__: starting offset, the first gpuva will overlap this 460 * @end__: ending offset, the last gpuva will start before this (but may 461 * overlap) 462 * 463 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie 464 * between @start__ and @end__. It is implemented similarly to 465 * list_for_each_safe(), but is using the &drm_gpuvm's internal interval 466 * tree to accelerate the search for the starting &drm_gpuva, and hence is safe 467 * against removal of elements. It assumes that @end__ is within (or is the 468 * upper limit of) the &drm_gpuvm. This iterator does not skip over the 469 * &drm_gpuvm's @kernel_alloc_node. 470 */ 471 #define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \ 472 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \ 473 next__ = __drm_gpuva_next(va__); \ 474 va__ && (va__->va.addr < (end__)); \ 475 va__ = next__, next__ = __drm_gpuva_next(va__)) 476 477 /** 478 * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas 479 * @va__: &drm_gpuva to assign to in each iteration step 480 * @gpuvm__: &drm_gpuvm to walk over 481 * 482 * This iterator walks over all &drm_gpuva structures associated with the given 483 * &drm_gpuvm. 484 */ 485 #define drm_gpuvm_for_each_va(va__, gpuvm__) \ 486 list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry) 487 488 /** 489 * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas 490 * @va__: &drm_gpuva to assign to in each iteration step 491 * @next__: another &drm_gpuva to use as temporary storage 492 * @gpuvm__: &drm_gpuvm to walk over 493 * 494 * This iterator walks over all &drm_gpuva structures associated with the given 495 * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and 496 * hence safe against the removal of elements. 497 */ 498 #define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \ 499 list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry) 500 501 /** 502 * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec 503 * 504 * This structure should be created on the stack as &drm_exec should be. 505 * 506 * Optionally, @extra can be set in order to lock additional &drm_gem_objects. 507 */ 508 struct drm_gpuvm_exec { 509 /** 510 * @exec: the &drm_exec structure 511 */ 512 struct drm_exec exec; 513 514 /** 515 * @flags: the flags for the struct drm_exec 516 */ 517 uint32_t flags; 518 519 /** 520 * @vm: the &drm_gpuvm to lock its DMA reservations 521 */ 522 struct drm_gpuvm *vm; 523 524 /** 525 * @num_fences: the number of fences to reserve for the &dma_resv of the 526 * locked &drm_gem_objects 527 */ 528 unsigned int num_fences; 529 530 /** 531 * @extra: Callback and corresponding private data for the driver to 532 * lock arbitrary additional &drm_gem_objects. 533 */ 534 struct { 535 /** 536 * @fn: The driver callback to lock additional &drm_gem_objects. 537 */ 538 int (*fn)(struct drm_gpuvm_exec *vm_exec); 539 540 /** 541 * @priv: driver private data for the @fn callback 542 */ 543 void *priv; 544 } extra; 545 }; 546 547 int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, 548 struct drm_exec *exec, 549 unsigned int num_fences); 550 551 int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, 552 struct drm_exec *exec, 553 unsigned int num_fences); 554 555 int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, 556 struct drm_exec *exec, 557 u64 addr, u64 range, 558 unsigned int num_fences); 559 560 int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec); 561 562 int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec, 563 struct drm_gem_object **objs, 564 unsigned int num_objs); 565 566 int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec, 567 u64 addr, u64 range); 568 569 /** 570 * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs 571 * @vm_exec: the &drm_gpuvm_exec wrapper 572 * 573 * Releases all dma-resv locks of all &drm_gem_objects previously acquired 574 * through drm_gpuvm_exec_lock() or its variants. 575 * 576 * Returns: 0 on success, negative error code on failure. 577 */ 578 static inline void 579 drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec) 580 { 581 drm_exec_fini(&vm_exec->exec); 582 } 583 584 int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec); 585 void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm, 586 struct drm_exec *exec, 587 struct dma_fence *fence, 588 enum dma_resv_usage private_usage, 589 enum dma_resv_usage extobj_usage); 590 591 /** 592 * drm_gpuvm_exec_resv_add_fence() 593 * @vm_exec: the &drm_gpuvm_exec wrapper 594 * @fence: fence to add 595 * @private_usage: private dma-resv usage 596 * @extobj_usage: extobj dma-resv usage 597 * 598 * See drm_gpuvm_resv_add_fence(). 599 */ 600 static inline void 601 drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec, 602 struct dma_fence *fence, 603 enum dma_resv_usage private_usage, 604 enum dma_resv_usage extobj_usage) 605 { 606 drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence, 607 private_usage, extobj_usage); 608 } 609 610 /** 611 * drm_gpuvm_exec_validate() 612 * @vm_exec: the &drm_gpuvm_exec wrapper 613 * 614 * See drm_gpuvm_validate(). 615 */ 616 static inline int 617 drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec) 618 { 619 return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); 620 } 621 622 /** 623 * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and 624 * &drm_gem_object combination 625 * 626 * This structure is an abstraction representing a &drm_gpuvm and 627 * &drm_gem_object combination. It serves as an indirection to accelerate 628 * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same 629 * &drm_gem_object. 630 * 631 * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to 632 * accelerate validation. 633 * 634 * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once 635 * a GEM object is mapped first in a GPU-VM and release the instance once the 636 * last mapping of the GEM object in this GPU-VM is unmapped. 637 */ 638 struct drm_gpuvm_bo { 639 /** 640 * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference 641 * counted pointer. 642 */ 643 struct drm_gpuvm *vm; 644 645 /** 646 * @obj: The &drm_gem_object being mapped in @vm. This is a reference 647 * counted pointer. 648 */ 649 struct drm_gem_object *obj; 650 651 /** 652 * @evicted: Indicates whether the &drm_gem_object is evicted; field 653 * protected by the &drm_gem_object's dma-resv lock. 654 */ 655 bool evicted; 656 657 /** 658 * @kref: The reference count for this &drm_gpuvm_bo. 659 */ 660 struct kref kref; 661 662 /** 663 * @list: Structure containing all &list_heads. 664 */ 665 struct { 666 /** 667 * @gpuva: The list of linked &drm_gpuvas. 668 * 669 * It is safe to access entries from this list as long as the 670 * GEM's gpuva lock is held. See also struct drm_gem_object. 671 */ 672 struct list_head gpuva; 673 674 /** 675 * @entry: Structure containing all &list_heads serving as 676 * entry. 677 */ 678 struct { 679 /** 680 * @gem: List entry to attach to the &drm_gem_objects 681 * gpuva list. 682 */ 683 struct list_head gem; 684 685 /** 686 * @evict: List entry to attach to the &drm_gpuvms 687 * extobj list. 688 */ 689 struct list_head extobj; 690 691 /** 692 * @evict: List entry to attach to the &drm_gpuvms evict 693 * list. 694 */ 695 struct list_head evict; 696 } entry; 697 } list; 698 }; 699 700 struct drm_gpuvm_bo * 701 drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, 702 struct drm_gem_object *obj); 703 704 struct drm_gpuvm_bo * 705 drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm, 706 struct drm_gem_object *obj); 707 struct drm_gpuvm_bo * 708 drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo); 709 710 /** 711 * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference 712 * @vm_bo: the &drm_gpuvm_bo to acquire the reference of 713 * 714 * This function acquires an additional reference to @vm_bo. It is illegal to 715 * call this without already holding a reference. No locks required. 716 */ 717 static inline struct drm_gpuvm_bo * 718 drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) 719 { 720 kref_get(&vm_bo->kref); 721 return vm_bo; 722 } 723 724 bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo); 725 726 struct drm_gpuvm_bo * 727 drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, 728 struct drm_gem_object *obj); 729 730 void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict); 731 732 /** 733 * drm_gpuvm_bo_gem_evict() 734 * @obj: the &drm_gem_object 735 * @evict: indicates whether @obj is evicted 736 * 737 * See drm_gpuvm_bo_evict(). 738 */ 739 static inline void 740 drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict) 741 { 742 struct drm_gpuvm_bo *vm_bo; 743 744 drm_gem_gpuva_assert_lock_held(obj); 745 drm_gem_for_each_gpuvm_bo(vm_bo, obj) 746 drm_gpuvm_bo_evict(vm_bo, evict); 747 } 748 749 void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo); 750 751 /** 752 * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva 753 * @va__: &drm_gpuva structure to assign to in each iteration step 754 * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with 755 * 756 * This iterator walks over all &drm_gpuva structures associated with the 757 * &drm_gpuvm_bo. 758 * 759 * The caller must hold the GEM's gpuva lock. 760 */ 761 #define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \ 762 list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry) 763 764 /** 765 * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of 766 * &drm_gpuva 767 * @va__: &drm_gpuva structure to assign to in each iteration step 768 * @next__: &next &drm_gpuva to store the next step 769 * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with 770 * 771 * This iterator walks over all &drm_gpuva structures associated with the 772 * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence 773 * it is save against removal of elements. 774 * 775 * The caller must hold the GEM's gpuva lock. 776 */ 777 #define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \ 778 list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry) 779 780 /** 781 * enum drm_gpuva_op_type - GPU VA operation type 782 * 783 * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm. 784 */ 785 enum drm_gpuva_op_type { 786 /** 787 * @DRM_GPUVA_OP_MAP: the map op type 788 */ 789 DRM_GPUVA_OP_MAP, 790 791 /** 792 * @DRM_GPUVA_OP_REMAP: the remap op type 793 */ 794 DRM_GPUVA_OP_REMAP, 795 796 /** 797 * @DRM_GPUVA_OP_UNMAP: the unmap op type 798 */ 799 DRM_GPUVA_OP_UNMAP, 800 801 /** 802 * @DRM_GPUVA_OP_PREFETCH: the prefetch op type 803 */ 804 DRM_GPUVA_OP_PREFETCH, 805 }; 806 807 /** 808 * struct drm_gpuva_op_map - GPU VA map operation 809 * 810 * This structure represents a single map operation generated by the 811 * DRM GPU VA manager. 812 */ 813 struct drm_gpuva_op_map { 814 /** 815 * @va: structure containing address and range of a map 816 * operation 817 */ 818 struct { 819 /** 820 * @addr: the base address of the new mapping 821 */ 822 u64 addr; 823 824 /** 825 * @range: the range of the new mapping 826 */ 827 u64 range; 828 } va; 829 830 /** 831 * @gem: structure containing the &drm_gem_object and it's offset 832 */ 833 struct { 834 /** 835 * @offset: the offset within the &drm_gem_object 836 */ 837 u64 offset; 838 839 /** 840 * @obj: the &drm_gem_object to map 841 */ 842 struct drm_gem_object *obj; 843 } gem; 844 }; 845 846 /** 847 * struct drm_gpuva_op_unmap - GPU VA unmap operation 848 * 849 * This structure represents a single unmap operation generated by the 850 * DRM GPU VA manager. 851 */ 852 struct drm_gpuva_op_unmap { 853 /** 854 * @va: the &drm_gpuva to unmap 855 */ 856 struct drm_gpuva *va; 857 858 /** 859 * @keep: 860 * 861 * Indicates whether this &drm_gpuva is physically contiguous with the 862 * original mapping request. 863 * 864 * Optionally, if &keep is set, drivers may keep the actual page table 865 * mappings for this &drm_gpuva, adding the missing page table entries 866 * only and update the &drm_gpuvm accordingly. 867 */ 868 bool keep; 869 }; 870 871 /** 872 * struct drm_gpuva_op_remap - GPU VA remap operation 873 * 874 * This represents a single remap operation generated by the DRM GPU VA manager. 875 * 876 * A remap operation is generated when an existing GPU VA mmapping is split up 877 * by inserting a new GPU VA mapping or by partially unmapping existent 878 * mapping(s), hence it consists of a maximum of two map and one unmap 879 * operation. 880 * 881 * The @unmap operation takes care of removing the original existing mapping. 882 * @prev is used to remap the preceding part, @next the subsequent part. 883 * 884 * If either a new mapping's start address is aligned with the start address 885 * of the old mapping or the new mapping's end address is aligned with the 886 * end address of the old mapping, either @prev or @next is NULL. 887 * 888 * Note, the reason for a dedicated remap operation, rather than arbitrary 889 * unmap and map operations, is to give drivers the chance of extracting driver 890 * specific data for creating the new mappings from the unmap operations's 891 * &drm_gpuva structure which typically is embedded in larger driver specific 892 * structures. 893 */ 894 struct drm_gpuva_op_remap { 895 /** 896 * @prev: the preceding part of a split mapping 897 */ 898 struct drm_gpuva_op_map *prev; 899 900 /** 901 * @next: the subsequent part of a split mapping 902 */ 903 struct drm_gpuva_op_map *next; 904 905 /** 906 * @unmap: the unmap operation for the original existing mapping 907 */ 908 struct drm_gpuva_op_unmap *unmap; 909 }; 910 911 /** 912 * struct drm_gpuva_op_prefetch - GPU VA prefetch operation 913 * 914 * This structure represents a single prefetch operation generated by the 915 * DRM GPU VA manager. 916 */ 917 struct drm_gpuva_op_prefetch { 918 /** 919 * @va: the &drm_gpuva to prefetch 920 */ 921 struct drm_gpuva *va; 922 }; 923 924 /** 925 * struct drm_gpuva_op - GPU VA operation 926 * 927 * This structure represents a single generic operation. 928 * 929 * The particular type of the operation is defined by @op. 930 */ 931 struct drm_gpuva_op { 932 /** 933 * @entry: 934 * 935 * The &list_head used to distribute instances of this struct within 936 * &drm_gpuva_ops. 937 */ 938 struct list_head entry; 939 940 /** 941 * @op: the type of the operation 942 */ 943 enum drm_gpuva_op_type op; 944 945 union { 946 /** 947 * @map: the map operation 948 */ 949 struct drm_gpuva_op_map map; 950 951 /** 952 * @remap: the remap operation 953 */ 954 struct drm_gpuva_op_remap remap; 955 956 /** 957 * @unmap: the unmap operation 958 */ 959 struct drm_gpuva_op_unmap unmap; 960 961 /** 962 * @prefetch: the prefetch operation 963 */ 964 struct drm_gpuva_op_prefetch prefetch; 965 }; 966 }; 967 968 /** 969 * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op 970 */ 971 struct drm_gpuva_ops { 972 /** 973 * @list: the &list_head 974 */ 975 struct list_head list; 976 }; 977 978 /** 979 * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops 980 * @op: &drm_gpuva_op to assign in each iteration step 981 * @ops: &drm_gpuva_ops to walk 982 * 983 * This iterator walks over all ops within a given list of operations. 984 */ 985 #define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry) 986 987 /** 988 * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops 989 * @op: &drm_gpuva_op to assign in each iteration step 990 * @next: &next &drm_gpuva_op to store the next step 991 * @ops: &drm_gpuva_ops to walk 992 * 993 * This iterator walks over all ops within a given list of operations. It is 994 * implemented with list_for_each_safe(), so save against removal of elements. 995 */ 996 #define drm_gpuva_for_each_op_safe(op, next, ops) \ 997 list_for_each_entry_safe(op, next, &(ops)->list, entry) 998 999 /** 1000 * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point 1001 * @op: &drm_gpuva_op to assign in each iteration step 1002 * @ops: &drm_gpuva_ops to walk 1003 * 1004 * This iterator walks over all ops within a given list of operations beginning 1005 * from the given operation in reverse order. 1006 */ 1007 #define drm_gpuva_for_each_op_from_reverse(op, ops) \ 1008 list_for_each_entry_from_reverse(op, &(ops)->list, entry) 1009 1010 /** 1011 * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops 1012 * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from 1013 */ 1014 #define drm_gpuva_first_op(ops) \ 1015 list_first_entry(&(ops)->list, struct drm_gpuva_op, entry) 1016 1017 /** 1018 * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops 1019 * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from 1020 */ 1021 #define drm_gpuva_last_op(ops) \ 1022 list_last_entry(&(ops)->list, struct drm_gpuva_op, entry) 1023 1024 /** 1025 * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list 1026 * @op: the current &drm_gpuva_op 1027 */ 1028 #define drm_gpuva_prev_op(op) list_prev_entry(op, entry) 1029 1030 /** 1031 * drm_gpuva_next_op() - next &drm_gpuva_op in the list 1032 * @op: the current &drm_gpuva_op 1033 */ 1034 #define drm_gpuva_next_op(op) list_next_entry(op, entry) 1035 1036 struct drm_gpuva_ops * 1037 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, 1038 u64 addr, u64 range, 1039 struct drm_gem_object *obj, u64 offset); 1040 struct drm_gpuva_ops * 1041 drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm, 1042 u64 addr, u64 range); 1043 1044 struct drm_gpuva_ops * 1045 drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm, 1046 u64 addr, u64 range); 1047 1048 struct drm_gpuva_ops * 1049 drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo); 1050 1051 void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, 1052 struct drm_gpuva_ops *ops); 1053 1054 static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, 1055 struct drm_gpuva_op_map *op) 1056 { 1057 drm_gpuva_init(va, op->va.addr, op->va.range, 1058 op->gem.obj, op->gem.offset); 1059 } 1060 1061 /** 1062 * struct drm_gpuvm_ops - callbacks for split/merge steps 1063 * 1064 * This structure defines the callbacks used by &drm_gpuvm_sm_map and 1065 * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap 1066 * operations to drivers. 1067 */ 1068 struct drm_gpuvm_ops { 1069 /** 1070 * @vm_free: called when the last reference of a struct drm_gpuvm is 1071 * dropped 1072 * 1073 * This callback is mandatory. 1074 */ 1075 void (*vm_free)(struct drm_gpuvm *gpuvm); 1076 1077 /** 1078 * @op_alloc: called when the &drm_gpuvm allocates 1079 * a struct drm_gpuva_op 1080 * 1081 * Some drivers may want to embed struct drm_gpuva_op into driver 1082 * specific structures. By implementing this callback drivers can 1083 * allocate memory accordingly. 1084 * 1085 * This callback is optional. 1086 */ 1087 struct drm_gpuva_op *(*op_alloc)(void); 1088 1089 /** 1090 * @op_free: called when the &drm_gpuvm frees a 1091 * struct drm_gpuva_op 1092 * 1093 * Some drivers may want to embed struct drm_gpuva_op into driver 1094 * specific structures. By implementing this callback drivers can 1095 * free the previously allocated memory accordingly. 1096 * 1097 * This callback is optional. 1098 */ 1099 void (*op_free)(struct drm_gpuva_op *op); 1100 1101 /** 1102 * @vm_bo_alloc: called when the &drm_gpuvm allocates 1103 * a struct drm_gpuvm_bo 1104 * 1105 * Some drivers may want to embed struct drm_gpuvm_bo into driver 1106 * specific structures. By implementing this callback drivers can 1107 * allocate memory accordingly. 1108 * 1109 * This callback is optional. 1110 */ 1111 struct drm_gpuvm_bo *(*vm_bo_alloc)(void); 1112 1113 /** 1114 * @vm_bo_free: called when the &drm_gpuvm frees a 1115 * struct drm_gpuvm_bo 1116 * 1117 * Some drivers may want to embed struct drm_gpuvm_bo into driver 1118 * specific structures. By implementing this callback drivers can 1119 * free the previously allocated memory accordingly. 1120 * 1121 * This callback is optional. 1122 */ 1123 void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo); 1124 1125 /** 1126 * @vm_bo_validate: called from drm_gpuvm_validate() 1127 * 1128 * Drivers receive this callback for every evicted &drm_gem_object being 1129 * mapped in the corresponding &drm_gpuvm. 1130 * 1131 * Typically, drivers would call their driver specific variant of 1132 * ttm_bo_validate() from within this callback. 1133 */ 1134 int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo, 1135 struct drm_exec *exec); 1136 1137 /** 1138 * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the 1139 * mapping once all previous steps were completed 1140 * 1141 * The &priv pointer matches the one the driver passed to 1142 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. 1143 * 1144 * Can be NULL if &drm_gpuvm_sm_map is used. 1145 */ 1146 int (*sm_step_map)(struct drm_gpuva_op *op, void *priv); 1147 1148 /** 1149 * @sm_step_remap: called from &drm_gpuvm_sm_map and 1150 * &drm_gpuvm_sm_unmap to split up an existent mapping 1151 * 1152 * This callback is called when existent mapping needs to be split up. 1153 * This is the case when either a newly requested mapping overlaps or 1154 * is enclosed by an existent mapping or a partial unmap of an existent 1155 * mapping is requested. 1156 * 1157 * The &priv pointer matches the one the driver passed to 1158 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. 1159 * 1160 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is 1161 * used. 1162 */ 1163 int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv); 1164 1165 /** 1166 * @sm_step_unmap: called from &drm_gpuvm_sm_map and 1167 * &drm_gpuvm_sm_unmap to unmap an existent mapping 1168 * 1169 * This callback is called when existent mapping needs to be unmapped. 1170 * This is the case when either a newly requested mapping encloses an 1171 * existent mapping or an unmap of an existent mapping is requested. 1172 * 1173 * The &priv pointer matches the one the driver passed to 1174 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. 1175 * 1176 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is 1177 * used. 1178 */ 1179 int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv); 1180 }; 1181 1182 int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, 1183 u64 addr, u64 range, 1184 struct drm_gem_object *obj, u64 offset); 1185 1186 int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, 1187 u64 addr, u64 range); 1188 1189 void drm_gpuva_map(struct drm_gpuvm *gpuvm, 1190 struct drm_gpuva *va, 1191 struct drm_gpuva_op_map *op); 1192 1193 void drm_gpuva_remap(struct drm_gpuva *prev, 1194 struct drm_gpuva *next, 1195 struct drm_gpuva_op_remap *op); 1196 1197 void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op); 1198 1199 /** 1200 * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of 1201 * the unmap stage of a remap op. 1202 * @op: Remap op. 1203 * @start_addr: Output pointer for the start of the required unmap. 1204 * @range: Output pointer for the length of the required unmap. 1205 * 1206 * The given start address and range will be set such that they represent the 1207 * range of the address space that was previously covered by the mapping being 1208 * re-mapped, but is now empty. 1209 */ 1210 static inline void 1211 drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op, 1212 u64 *start_addr, u64 *range) 1213 { 1214 const u64 va_start = op->prev ? 1215 op->prev->va.addr + op->prev->va.range : 1216 op->unmap->va->va.addr; 1217 const u64 va_end = op->next ? 1218 op->next->va.addr : 1219 op->unmap->va->va.addr + op->unmap->va->va.range; 1220 1221 if (start_addr) 1222 *start_addr = va_start; 1223 if (range) 1224 *range = va_end - va_start; 1225 } 1226 1227 #endif /* __DRM_GPUVM_H__ */ 1228