1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 3 #ifndef __DRM_GPUVM_H__ 4 #define __DRM_GPUVM_H__ 5 6 /* 7 * Copyright (c) 2022 Red Hat. 8 * 9 * Permission is hereby granted, free of charge, to any person obtaining a 10 * copy of this software and associated documentation files (the "Software"), 11 * to deal in the Software without restriction, including without limitation 12 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 13 * and/or sell copies of the Software, and to permit persons to whom the 14 * Software is furnished to do so, subject to the following conditions: 15 * 16 * The above copyright notice and this permission notice shall be included in 17 * all copies or substantial portions of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 25 * OTHER DEALINGS IN THE SOFTWARE. 26 */ 27 28 #include <linux/dma-resv.h> 29 #include <linux/list.h> 30 #include <linux/rbtree.h> 31 #include <linux/types.h> 32 33 #include <drm/drm_device.h> 34 #include <drm/drm_gem.h> 35 #include <drm/drm_exec.h> 36 37 struct drm_gpuvm; 38 struct drm_gpuvm_bo; 39 struct drm_gpuvm_ops; 40 41 /** 42 * enum drm_gpuva_flags - flags for struct drm_gpuva 43 */ 44 enum drm_gpuva_flags { 45 /** 46 * @DRM_GPUVA_INVALIDATED: 47 * 48 * Flag indicating that the &drm_gpuva's backing GEM is invalidated. 49 */ 50 DRM_GPUVA_INVALIDATED = (1 << 0), 51 52 /** 53 * @DRM_GPUVA_SPARSE: 54 * 55 * Flag indicating that the &drm_gpuva is a sparse mapping. 56 */ 57 DRM_GPUVA_SPARSE = (1 << 1), 58 59 /** 60 * @DRM_GPUVA_USERBITS: user defined bits 61 */ 62 DRM_GPUVA_USERBITS = (1 << 2), 63 }; 64 65 /** 66 * struct drm_gpuva - structure to track a GPU VA mapping 67 * 68 * This structure represents a GPU VA mapping and is associated with a 69 * &drm_gpuvm. 70 * 71 * Typically, this structure is embedded in bigger driver structures. 72 */ 73 struct drm_gpuva { 74 /** 75 * @vm: the &drm_gpuvm this object is associated with 76 */ 77 struct drm_gpuvm *vm; 78 79 /** 80 * @vm_bo: the &drm_gpuvm_bo abstraction for the mapped 81 * &drm_gem_object 82 */ 83 struct drm_gpuvm_bo *vm_bo; 84 85 /** 86 * @flags: the &drm_gpuva_flags for this mapping 87 */ 88 enum drm_gpuva_flags flags; 89 90 /** 91 * @va: structure containing the address and range of the &drm_gpuva 92 */ 93 struct { 94 /** 95 * @va.addr: the start address 96 */ 97 u64 addr; 98 99 /* 100 * @range: the range 101 */ 102 u64 range; 103 } va; 104 105 /** 106 * @gem: structure containing the &drm_gem_object and it's offset 107 */ 108 struct { 109 /** 110 * @gem.offset: the offset within the &drm_gem_object 111 */ 112 u64 offset; 113 114 /** 115 * @gem.obj: the mapped &drm_gem_object 116 */ 117 struct drm_gem_object *obj; 118 119 /** 120 * @gem.entry: the &list_head to attach this object to a &drm_gpuvm_bo 121 */ 122 struct list_head entry; 123 } gem; 124 125 /** 126 * @rb: structure containing data to store &drm_gpuvas in a rb-tree 127 */ 128 struct { 129 /** 130 * @rb.node: the rb-tree node 131 */ 132 struct rb_node node; 133 134 /** 135 * @rb.entry: The &list_head to additionally connect &drm_gpuvas 136 * in the same order they appear in the interval tree. This is 137 * useful to keep iterating &drm_gpuvas from a start node found 138 * through the rb-tree while doing modifications on the rb-tree 139 * itself. 140 */ 141 struct list_head entry; 142 143 /** 144 * @rb.__subtree_last: needed by the interval tree, holding last-in-subtree 145 */ 146 u64 __subtree_last; 147 } rb; 148 }; 149 150 int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va); 151 void drm_gpuva_remove(struct drm_gpuva *va); 152 153 void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo); 154 void drm_gpuva_unlink(struct drm_gpuva *va); 155 156 struct drm_gpuva *drm_gpuva_find(struct drm_gpuvm *gpuvm, 157 u64 addr, u64 range); 158 struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuvm *gpuvm, 159 u64 addr, u64 range); 160 struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start); 161 struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end); 162 163 /** 164 * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is 165 * invalidated 166 * @va: the &drm_gpuva to set the invalidate flag for 167 * @invalidate: indicates whether the &drm_gpuva is invalidated 168 */ 169 static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate) 170 { 171 if (invalidate) 172 va->flags |= DRM_GPUVA_INVALIDATED; 173 else 174 va->flags &= ~DRM_GPUVA_INVALIDATED; 175 } 176 177 /** 178 * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva 179 * is invalidated 180 * @va: the &drm_gpuva to check 181 * 182 * Returns: %true if the GPU VA is invalidated, %false otherwise 183 */ 184 static inline bool drm_gpuva_invalidated(struct drm_gpuva *va) 185 { 186 return va->flags & DRM_GPUVA_INVALIDATED; 187 } 188 189 /** 190 * enum drm_gpuvm_flags - flags for struct drm_gpuvm 191 */ 192 enum drm_gpuvm_flags { 193 /** 194 * @DRM_GPUVM_RESV_PROTECTED: GPUVM is protected externally by the 195 * GPUVM's &dma_resv lock 196 */ 197 DRM_GPUVM_RESV_PROTECTED = BIT(0), 198 199 /** 200 * @DRM_GPUVM_USERBITS: user defined bits 201 */ 202 DRM_GPUVM_USERBITS = BIT(1), 203 }; 204 205 /** 206 * struct drm_gpuvm - DRM GPU VA Manager 207 * 208 * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using 209 * &maple_tree structures. Typically, this structure is embedded in bigger 210 * driver structures. 211 * 212 * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or 213 * pages. 214 * 215 * There should be one manager instance per GPU virtual address space. 216 */ 217 struct drm_gpuvm { 218 /** 219 * @name: the name of the DRM GPU VA space 220 */ 221 const char *name; 222 223 /** 224 * @flags: the &drm_gpuvm_flags of this GPUVM 225 */ 226 enum drm_gpuvm_flags flags; 227 228 /** 229 * @drm: the &drm_device this VM lives in 230 */ 231 struct drm_device *drm; 232 233 /** 234 * @mm_start: start of the VA space 235 */ 236 u64 mm_start; 237 238 /** 239 * @mm_range: length of the VA space 240 */ 241 u64 mm_range; 242 243 /** 244 * @rb: structures to track &drm_gpuva entries 245 */ 246 struct { 247 /** 248 * @rb.tree: the rb-tree to track GPU VA mappings 249 */ 250 struct rb_root_cached tree; 251 252 /** 253 * @rb.list: the &list_head to track GPU VA mappings 254 */ 255 struct list_head list; 256 } rb; 257 258 /** 259 * @kref: reference count of this object 260 */ 261 struct kref kref; 262 263 /** 264 * @kernel_alloc_node: 265 * 266 * &drm_gpuva representing the address space cutout reserved for 267 * the kernel 268 */ 269 struct drm_gpuva kernel_alloc_node; 270 271 /** 272 * @ops: &drm_gpuvm_ops providing the split/merge steps to drivers 273 */ 274 const struct drm_gpuvm_ops *ops; 275 276 /** 277 * @r_obj: Resv GEM object; representing the GPUVM's common &dma_resv. 278 */ 279 struct drm_gem_object *r_obj; 280 281 /** 282 * @extobj: structure holding the extobj list 283 */ 284 struct { 285 /** 286 * @extobj.list: &list_head storing &drm_gpuvm_bos serving as 287 * external object 288 */ 289 struct list_head list; 290 291 /** 292 * @extobj.local_list: pointer to the local list temporarily 293 * storing entries from the external object list 294 */ 295 struct list_head *local_list; 296 297 /** 298 * @extobj.lock: spinlock to protect the extobj list 299 */ 300 spinlock_t lock; 301 } extobj; 302 303 /** 304 * @evict: structure holding the evict list and evict list lock 305 */ 306 struct { 307 /** 308 * @evict.list: &list_head storing &drm_gpuvm_bos currently 309 * being evicted 310 */ 311 struct list_head list; 312 313 /** 314 * @evict.local_list: pointer to the local list temporarily 315 * storing entries from the evicted object list 316 */ 317 struct list_head *local_list; 318 319 /** 320 * @evict.lock: spinlock to protect the evict list 321 */ 322 spinlock_t lock; 323 } evict; 324 }; 325 326 void drm_gpuvm_init(struct drm_gpuvm *gpuvm, const char *name, 327 enum drm_gpuvm_flags flags, 328 struct drm_device *drm, 329 struct drm_gem_object *r_obj, 330 u64 start_offset, u64 range, 331 u64 reserve_offset, u64 reserve_range, 332 const struct drm_gpuvm_ops *ops); 333 334 /** 335 * drm_gpuvm_get() - acquire a struct drm_gpuvm reference 336 * @gpuvm: the &drm_gpuvm to acquire the reference of 337 * 338 * This function acquires an additional reference to @gpuvm. It is illegal to 339 * call this without already holding a reference. No locks required. 340 * 341 * Returns: the &struct drm_gpuvm pointer 342 */ 343 static inline struct drm_gpuvm * 344 drm_gpuvm_get(struct drm_gpuvm *gpuvm) 345 { 346 kref_get(&gpuvm->kref); 347 348 return gpuvm; 349 } 350 351 void drm_gpuvm_put(struct drm_gpuvm *gpuvm); 352 353 bool drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, u64 addr, u64 range); 354 bool drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range); 355 356 struct drm_gem_object * 357 drm_gpuvm_resv_object_alloc(struct drm_device *drm); 358 359 /** 360 * drm_gpuvm_resv_protected() - indicates whether &DRM_GPUVM_RESV_PROTECTED is 361 * set 362 * @gpuvm: the &drm_gpuvm 363 * 364 * Returns: true if &DRM_GPUVM_RESV_PROTECTED is set, false otherwise. 365 */ 366 static inline bool 367 drm_gpuvm_resv_protected(struct drm_gpuvm *gpuvm) 368 { 369 return gpuvm->flags & DRM_GPUVM_RESV_PROTECTED; 370 } 371 372 /** 373 * drm_gpuvm_resv() - returns the &drm_gpuvm's &dma_resv 374 * @gpuvm__: the &drm_gpuvm 375 * 376 * Returns: a pointer to the &drm_gpuvm's shared &dma_resv 377 */ 378 #define drm_gpuvm_resv(gpuvm__) ((gpuvm__)->r_obj->resv) 379 380 /** 381 * drm_gpuvm_resv_obj() - returns the &drm_gem_object holding the &drm_gpuvm's 382 * &dma_resv 383 * @gpuvm__: the &drm_gpuvm 384 * 385 * Returns: a pointer to the &drm_gem_object holding the &drm_gpuvm's shared 386 * &dma_resv 387 */ 388 #define drm_gpuvm_resv_obj(gpuvm__) ((gpuvm__)->r_obj) 389 390 #define drm_gpuvm_resv_held(gpuvm__) \ 391 dma_resv_held(drm_gpuvm_resv(gpuvm__)) 392 393 #define drm_gpuvm_resv_assert_held(gpuvm__) \ 394 dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) 395 396 #define drm_gpuvm_resv_held(gpuvm__) \ 397 dma_resv_held(drm_gpuvm_resv(gpuvm__)) 398 399 #define drm_gpuvm_resv_assert_held(gpuvm__) \ 400 dma_resv_assert_held(drm_gpuvm_resv(gpuvm__)) 401 402 /** 403 * drm_gpuvm_is_extobj() - indicates whether the given &drm_gem_object is an 404 * external object 405 * @gpuvm: the &drm_gpuvm to check 406 * @obj: the &drm_gem_object to check 407 * 408 * Returns: true if the &drm_gem_object &dma_resv differs from the 409 * &drm_gpuvms &dma_resv, false otherwise 410 */ 411 static inline bool 412 drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm, 413 struct drm_gem_object *obj) 414 { 415 return obj && obj->resv != drm_gpuvm_resv(gpuvm); 416 } 417 418 static inline struct drm_gpuva * 419 __drm_gpuva_next(struct drm_gpuva *va) 420 { 421 if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list)) 422 return list_next_entry(va, rb.entry); 423 424 return NULL; 425 } 426 427 /** 428 * drm_gpuvm_for_each_va_range() - iterate over a range of &drm_gpuvas 429 * @va__: &drm_gpuva structure to assign to in each iteration step 430 * @gpuvm__: &drm_gpuvm to walk over 431 * @start__: starting offset, the first gpuva will overlap this 432 * @end__: ending offset, the last gpuva will start before this (but may 433 * overlap) 434 * 435 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie 436 * between @start__ and @end__. It is implemented similarly to list_for_each(), 437 * but is using the &drm_gpuvm's internal interval tree to accelerate 438 * the search for the starting &drm_gpuva, and hence isn't safe against removal 439 * of elements. It assumes that @end__ is within (or is the upper limit of) the 440 * &drm_gpuvm. This iterator does not skip over the &drm_gpuvm's 441 * @kernel_alloc_node. 442 */ 443 #define drm_gpuvm_for_each_va_range(va__, gpuvm__, start__, end__) \ 444 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)); \ 445 va__ && (va__->va.addr < (end__)); \ 446 va__ = __drm_gpuva_next(va__)) 447 448 /** 449 * drm_gpuvm_for_each_va_range_safe() - safely iterate over a range of 450 * &drm_gpuvas 451 * @va__: &drm_gpuva to assign to in each iteration step 452 * @next__: another &drm_gpuva to use as temporary storage 453 * @gpuvm__: &drm_gpuvm to walk over 454 * @start__: starting offset, the first gpuva will overlap this 455 * @end__: ending offset, the last gpuva will start before this (but may 456 * overlap) 457 * 458 * This iterator walks over all &drm_gpuvas in the &drm_gpuvm that lie 459 * between @start__ and @end__. It is implemented similarly to 460 * list_for_each_safe(), but is using the &drm_gpuvm's internal interval 461 * tree to accelerate the search for the starting &drm_gpuva, and hence is safe 462 * against removal of elements. It assumes that @end__ is within (or is the 463 * upper limit of) the &drm_gpuvm. This iterator does not skip over the 464 * &drm_gpuvm's @kernel_alloc_node. 465 */ 466 #define drm_gpuvm_for_each_va_range_safe(va__, next__, gpuvm__, start__, end__) \ 467 for (va__ = drm_gpuva_find_first((gpuvm__), (start__), (end__) - (start__)), \ 468 next__ = __drm_gpuva_next(va__); \ 469 va__ && (va__->va.addr < (end__)); \ 470 va__ = next__, next__ = __drm_gpuva_next(va__)) 471 472 /** 473 * drm_gpuvm_for_each_va() - iterate over all &drm_gpuvas 474 * @va__: &drm_gpuva to assign to in each iteration step 475 * @gpuvm__: &drm_gpuvm to walk over 476 * 477 * This iterator walks over all &drm_gpuva structures associated with the given 478 * &drm_gpuvm. 479 */ 480 #define drm_gpuvm_for_each_va(va__, gpuvm__) \ 481 list_for_each_entry(va__, &(gpuvm__)->rb.list, rb.entry) 482 483 /** 484 * drm_gpuvm_for_each_va_safe() - safely iterate over all &drm_gpuvas 485 * @va__: &drm_gpuva to assign to in each iteration step 486 * @next__: another &drm_gpuva to use as temporary storage 487 * @gpuvm__: &drm_gpuvm to walk over 488 * 489 * This iterator walks over all &drm_gpuva structures associated with the given 490 * &drm_gpuvm. It is implemented with list_for_each_entry_safe(), and 491 * hence safe against the removal of elements. 492 */ 493 #define drm_gpuvm_for_each_va_safe(va__, next__, gpuvm__) \ 494 list_for_each_entry_safe(va__, next__, &(gpuvm__)->rb.list, rb.entry) 495 496 /** 497 * struct drm_gpuvm_exec - &drm_gpuvm abstraction of &drm_exec 498 * 499 * This structure should be created on the stack as &drm_exec should be. 500 * 501 * Optionally, @extra can be set in order to lock additional &drm_gem_objects. 502 */ 503 struct drm_gpuvm_exec { 504 /** 505 * @exec: the &drm_exec structure 506 */ 507 struct drm_exec exec; 508 509 /** 510 * @flags: the flags for the struct drm_exec 511 */ 512 u32 flags; 513 514 /** 515 * @vm: the &drm_gpuvm to lock its DMA reservations 516 */ 517 struct drm_gpuvm *vm; 518 519 /** 520 * @num_fences: the number of fences to reserve for the &dma_resv of the 521 * locked &drm_gem_objects 522 */ 523 unsigned int num_fences; 524 525 /** 526 * @extra: Callback and corresponding private data for the driver to 527 * lock arbitrary additional &drm_gem_objects. 528 */ 529 struct { 530 /** 531 * @extra.fn: The driver callback to lock additional 532 * &drm_gem_objects. 533 */ 534 int (*fn)(struct drm_gpuvm_exec *vm_exec); 535 536 /** 537 * @extra.priv: driver private data for the @fn callback 538 */ 539 void *priv; 540 } extra; 541 }; 542 543 int drm_gpuvm_prepare_vm(struct drm_gpuvm *gpuvm, 544 struct drm_exec *exec, 545 unsigned int num_fences); 546 547 int drm_gpuvm_prepare_objects(struct drm_gpuvm *gpuvm, 548 struct drm_exec *exec, 549 unsigned int num_fences); 550 551 int drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, 552 struct drm_exec *exec, 553 u64 addr, u64 range, 554 unsigned int num_fences); 555 556 int drm_gpuvm_exec_lock(struct drm_gpuvm_exec *vm_exec); 557 558 int drm_gpuvm_exec_lock_array(struct drm_gpuvm_exec *vm_exec, 559 struct drm_gem_object **objs, 560 unsigned int num_objs); 561 562 int drm_gpuvm_exec_lock_range(struct drm_gpuvm_exec *vm_exec, 563 u64 addr, u64 range); 564 565 /** 566 * drm_gpuvm_exec_unlock() - lock all dma-resv of all assoiciated BOs 567 * @vm_exec: the &drm_gpuvm_exec wrapper 568 * 569 * Releases all dma-resv locks of all &drm_gem_objects previously acquired 570 * through drm_gpuvm_exec_lock() or its variants. 571 * 572 * Returns: 0 on success, negative error code on failure. 573 */ 574 static inline void 575 drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec) 576 { 577 drm_exec_fini(&vm_exec->exec); 578 } 579 580 int drm_gpuvm_validate(struct drm_gpuvm *gpuvm, struct drm_exec *exec); 581 void drm_gpuvm_resv_add_fence(struct drm_gpuvm *gpuvm, 582 struct drm_exec *exec, 583 struct dma_fence *fence, 584 enum dma_resv_usage private_usage, 585 enum dma_resv_usage extobj_usage); 586 587 /** 588 * drm_gpuvm_exec_resv_add_fence() - add fence to private and all extobj 589 * @vm_exec: the &drm_gpuvm_exec wrapper 590 * @fence: fence to add 591 * @private_usage: private dma-resv usage 592 * @extobj_usage: extobj dma-resv usage 593 * 594 * See drm_gpuvm_resv_add_fence(). 595 */ 596 static inline void 597 drm_gpuvm_exec_resv_add_fence(struct drm_gpuvm_exec *vm_exec, 598 struct dma_fence *fence, 599 enum dma_resv_usage private_usage, 600 enum dma_resv_usage extobj_usage) 601 { 602 drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence, 603 private_usage, extobj_usage); 604 } 605 606 /** 607 * drm_gpuvm_exec_validate() - validate all BOs marked as evicted 608 * @vm_exec: the &drm_gpuvm_exec wrapper 609 * 610 * See drm_gpuvm_validate(). 611 * 612 * Returns: 0 on success, negative error code on failure. 613 */ 614 static inline int 615 drm_gpuvm_exec_validate(struct drm_gpuvm_exec *vm_exec) 616 { 617 return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec); 618 } 619 620 /** 621 * struct drm_gpuvm_bo - structure representing a &drm_gpuvm and 622 * &drm_gem_object combination 623 * 624 * This structure is an abstraction representing a &drm_gpuvm and 625 * &drm_gem_object combination. It serves as an indirection to accelerate 626 * iterating all &drm_gpuvas within a &drm_gpuvm backed by the same 627 * &drm_gem_object. 628 * 629 * Furthermore it is used cache evicted GEM objects for a certain GPU-VM to 630 * accelerate validation. 631 * 632 * Typically, drivers want to create an instance of a struct drm_gpuvm_bo once 633 * a GEM object is mapped first in a GPU-VM and release the instance once the 634 * last mapping of the GEM object in this GPU-VM is unmapped. 635 */ 636 struct drm_gpuvm_bo { 637 /** 638 * @vm: The &drm_gpuvm the @obj is mapped in. This is a reference 639 * counted pointer. 640 */ 641 struct drm_gpuvm *vm; 642 643 /** 644 * @obj: The &drm_gem_object being mapped in @vm. This is a reference 645 * counted pointer. 646 */ 647 struct drm_gem_object *obj; 648 649 /** 650 * @evicted: Indicates whether the &drm_gem_object is evicted; field 651 * protected by the &drm_gem_object's dma-resv lock. 652 */ 653 bool evicted; 654 655 /** 656 * @kref: The reference count for this &drm_gpuvm_bo. 657 */ 658 struct kref kref; 659 660 /** 661 * @list: Structure containing all &list_heads. 662 */ 663 struct { 664 /** 665 * @list.gpuva: The list of linked &drm_gpuvas. 666 * 667 * It is safe to access entries from this list as long as the 668 * GEM's gpuva lock is held. See also struct drm_gem_object. 669 */ 670 struct list_head gpuva; 671 672 /** 673 * @list.entry: Structure containing all &list_heads serving as 674 * entry. 675 */ 676 struct { 677 /** 678 * @list.entry.gem: List entry to attach to the 679 * &drm_gem_objects gpuva list. 680 */ 681 struct list_head gem; 682 683 /** 684 * @list.entry.evict: List entry to attach to the 685 * &drm_gpuvms extobj list. 686 */ 687 struct list_head extobj; 688 689 /** 690 * @list.entry.evict: List entry to attach to the 691 * &drm_gpuvms evict list. 692 */ 693 struct list_head evict; 694 } entry; 695 } list; 696 }; 697 698 struct drm_gpuvm_bo * 699 drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm, 700 struct drm_gem_object *obj); 701 702 struct drm_gpuvm_bo * 703 drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm, 704 struct drm_gem_object *obj); 705 struct drm_gpuvm_bo * 706 drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo); 707 708 /** 709 * drm_gpuvm_bo_get() - acquire a struct drm_gpuvm_bo reference 710 * @vm_bo: the &drm_gpuvm_bo to acquire the reference of 711 * 712 * This function acquires an additional reference to @vm_bo. It is illegal to 713 * call this without already holding a reference. No locks required. 714 * 715 * Returns: the &struct vm_bo pointer 716 */ 717 static inline struct drm_gpuvm_bo * 718 drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo) 719 { 720 kref_get(&vm_bo->kref); 721 return vm_bo; 722 } 723 724 bool drm_gpuvm_bo_put(struct drm_gpuvm_bo *vm_bo); 725 726 struct drm_gpuvm_bo * 727 drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, 728 struct drm_gem_object *obj); 729 730 void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict); 731 732 /** 733 * drm_gpuvm_bo_gem_evict() - add/remove all &drm_gpuvm_bo's in the list 734 * to/from the &drm_gpuvms evicted list 735 * @obj: the &drm_gem_object 736 * @evict: indicates whether @obj is evicted 737 * 738 * See drm_gpuvm_bo_evict(). 739 */ 740 static inline void 741 drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict) 742 { 743 struct drm_gpuvm_bo *vm_bo; 744 745 drm_gem_gpuva_assert_lock_held(obj); 746 drm_gem_for_each_gpuvm_bo(vm_bo, obj) 747 drm_gpuvm_bo_evict(vm_bo, evict); 748 } 749 750 void drm_gpuvm_bo_extobj_add(struct drm_gpuvm_bo *vm_bo); 751 752 /** 753 * drm_gpuvm_bo_for_each_va() - iterator to walk over a list of &drm_gpuva 754 * @va__: &drm_gpuva structure to assign to in each iteration step 755 * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with 756 * 757 * This iterator walks over all &drm_gpuva structures associated with the 758 * &drm_gpuvm_bo. 759 * 760 * The caller must hold the GEM's gpuva lock. 761 */ 762 #define drm_gpuvm_bo_for_each_va(va__, vm_bo__) \ 763 list_for_each_entry(va__, &(vm_bo)->list.gpuva, gem.entry) 764 765 /** 766 * drm_gpuvm_bo_for_each_va_safe() - iterator to safely walk over a list of 767 * &drm_gpuva 768 * @va__: &drm_gpuva structure to assign to in each iteration step 769 * @next__: &next &drm_gpuva to store the next step 770 * @vm_bo__: the &drm_gpuvm_bo the &drm_gpuva to walk are associated with 771 * 772 * This iterator walks over all &drm_gpuva structures associated with the 773 * &drm_gpuvm_bo. It is implemented with list_for_each_entry_safe(), hence 774 * it is save against removal of elements. 775 * 776 * The caller must hold the GEM's gpuva lock. 777 */ 778 #define drm_gpuvm_bo_for_each_va_safe(va__, next__, vm_bo__) \ 779 list_for_each_entry_safe(va__, next__, &(vm_bo)->list.gpuva, gem.entry) 780 781 /** 782 * enum drm_gpuva_op_type - GPU VA operation type 783 * 784 * Operations to alter the GPU VA mappings tracked by the &drm_gpuvm. 785 */ 786 enum drm_gpuva_op_type { 787 /** 788 * @DRM_GPUVA_OP_MAP: the map op type 789 */ 790 DRM_GPUVA_OP_MAP, 791 792 /** 793 * @DRM_GPUVA_OP_REMAP: the remap op type 794 */ 795 DRM_GPUVA_OP_REMAP, 796 797 /** 798 * @DRM_GPUVA_OP_UNMAP: the unmap op type 799 */ 800 DRM_GPUVA_OP_UNMAP, 801 802 /** 803 * @DRM_GPUVA_OP_PREFETCH: the prefetch op type 804 */ 805 DRM_GPUVA_OP_PREFETCH, 806 807 /** 808 * @DRM_GPUVA_OP_DRIVER: the driver defined op type 809 */ 810 DRM_GPUVA_OP_DRIVER, 811 }; 812 813 /** 814 * struct drm_gpuva_op_map - GPU VA map operation 815 * 816 * This structure represents a single map operation generated by the 817 * DRM GPU VA manager. 818 */ 819 struct drm_gpuva_op_map { 820 /** 821 * @va: structure containing address and range of a map 822 * operation 823 */ 824 struct { 825 /** 826 * @va.addr: the base address of the new mapping 827 */ 828 u64 addr; 829 830 /** 831 * @va.range: the range of the new mapping 832 */ 833 u64 range; 834 } va; 835 836 /** 837 * @gem: structure containing the &drm_gem_object and it's offset 838 */ 839 struct { 840 /** 841 * @gem.offset: the offset within the &drm_gem_object 842 */ 843 u64 offset; 844 845 /** 846 * @gem.obj: the &drm_gem_object to map 847 */ 848 struct drm_gem_object *obj; 849 } gem; 850 }; 851 852 /** 853 * struct drm_gpuva_op_unmap - GPU VA unmap operation 854 * 855 * This structure represents a single unmap operation generated by the 856 * DRM GPU VA manager. 857 */ 858 struct drm_gpuva_op_unmap { 859 /** 860 * @va: the &drm_gpuva to unmap 861 */ 862 struct drm_gpuva *va; 863 864 /** 865 * @keep: 866 * 867 * Indicates whether this &drm_gpuva is physically contiguous with the 868 * original mapping request. 869 * 870 * Optionally, if &keep is set, drivers may keep the actual page table 871 * mappings for this &drm_gpuva, adding the missing page table entries 872 * only and update the &drm_gpuvm accordingly. 873 */ 874 bool keep; 875 }; 876 877 /** 878 * struct drm_gpuva_op_remap - GPU VA remap operation 879 * 880 * This represents a single remap operation generated by the DRM GPU VA manager. 881 * 882 * A remap operation is generated when an existing GPU VA mmapping is split up 883 * by inserting a new GPU VA mapping or by partially unmapping existent 884 * mapping(s), hence it consists of a maximum of two map and one unmap 885 * operation. 886 * 887 * The @unmap operation takes care of removing the original existing mapping. 888 * @prev is used to remap the preceding part, @next the subsequent part. 889 * 890 * If either a new mapping's start address is aligned with the start address 891 * of the old mapping or the new mapping's end address is aligned with the 892 * end address of the old mapping, either @prev or @next is NULL. 893 * 894 * Note, the reason for a dedicated remap operation, rather than arbitrary 895 * unmap and map operations, is to give drivers the chance of extracting driver 896 * specific data for creating the new mappings from the unmap operations's 897 * &drm_gpuva structure which typically is embedded in larger driver specific 898 * structures. 899 */ 900 struct drm_gpuva_op_remap { 901 /** 902 * @prev: the preceding part of a split mapping 903 */ 904 struct drm_gpuva_op_map *prev; 905 906 /** 907 * @next: the subsequent part of a split mapping 908 */ 909 struct drm_gpuva_op_map *next; 910 911 /** 912 * @unmap: the unmap operation for the original existing mapping 913 */ 914 struct drm_gpuva_op_unmap *unmap; 915 }; 916 917 /** 918 * struct drm_gpuva_op_prefetch - GPU VA prefetch operation 919 * 920 * This structure represents a single prefetch operation generated by the 921 * DRM GPU VA manager. 922 */ 923 struct drm_gpuva_op_prefetch { 924 /** 925 * @va: the &drm_gpuva to prefetch 926 */ 927 struct drm_gpuva *va; 928 }; 929 930 /** 931 * struct drm_gpuva_op - GPU VA operation 932 * 933 * This structure represents a single generic operation. 934 * 935 * The particular type of the operation is defined by @op. 936 */ 937 struct drm_gpuva_op { 938 /** 939 * @entry: 940 * 941 * The &list_head used to distribute instances of this struct within 942 * &drm_gpuva_ops. 943 */ 944 struct list_head entry; 945 946 /** 947 * @op: the type of the operation 948 */ 949 enum drm_gpuva_op_type op; 950 951 union { 952 /** 953 * @map: the map operation 954 */ 955 struct drm_gpuva_op_map map; 956 957 /** 958 * @remap: the remap operation 959 */ 960 struct drm_gpuva_op_remap remap; 961 962 /** 963 * @unmap: the unmap operation 964 */ 965 struct drm_gpuva_op_unmap unmap; 966 967 /** 968 * @prefetch: the prefetch operation 969 */ 970 struct drm_gpuva_op_prefetch prefetch; 971 }; 972 }; 973 974 /** 975 * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op 976 */ 977 struct drm_gpuva_ops { 978 /** 979 * @list: the &list_head 980 */ 981 struct list_head list; 982 }; 983 984 /** 985 * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops 986 * @op: &drm_gpuva_op to assign in each iteration step 987 * @ops: &drm_gpuva_ops to walk 988 * 989 * This iterator walks over all ops within a given list of operations. 990 */ 991 #define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry) 992 993 /** 994 * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops 995 * @op: &drm_gpuva_op to assign in each iteration step 996 * @next: &next &drm_gpuva_op to store the next step 997 * @ops: &drm_gpuva_ops to walk 998 * 999 * This iterator walks over all ops within a given list of operations. It is 1000 * implemented with list_for_each_safe(), so save against removal of elements. 1001 */ 1002 #define drm_gpuva_for_each_op_safe(op, next, ops) \ 1003 list_for_each_entry_safe(op, next, &(ops)->list, entry) 1004 1005 /** 1006 * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point 1007 * @op: &drm_gpuva_op to assign in each iteration step 1008 * @ops: &drm_gpuva_ops to walk 1009 * 1010 * This iterator walks over all ops within a given list of operations beginning 1011 * from the given operation in reverse order. 1012 */ 1013 #define drm_gpuva_for_each_op_from_reverse(op, ops) \ 1014 list_for_each_entry_from_reverse(op, &(ops)->list, entry) 1015 1016 /** 1017 * drm_gpuva_for_each_op_reverse - iterator to walk over &drm_gpuva_ops in reverse 1018 * @op: &drm_gpuva_op to assign in each iteration step 1019 * @ops: &drm_gpuva_ops to walk 1020 * 1021 * This iterator walks over all ops within a given list of operations in reverse 1022 */ 1023 #define drm_gpuva_for_each_op_reverse(op, ops) \ 1024 list_for_each_entry_reverse(op, &(ops)->list, entry) 1025 1026 /** 1027 * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops 1028 * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from 1029 */ 1030 #define drm_gpuva_first_op(ops) \ 1031 list_first_entry(&(ops)->list, struct drm_gpuva_op, entry) 1032 1033 /** 1034 * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops 1035 * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from 1036 */ 1037 #define drm_gpuva_last_op(ops) \ 1038 list_last_entry(&(ops)->list, struct drm_gpuva_op, entry) 1039 1040 /** 1041 * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list 1042 * @op: the current &drm_gpuva_op 1043 */ 1044 #define drm_gpuva_prev_op(op) list_prev_entry(op, entry) 1045 1046 /** 1047 * drm_gpuva_next_op() - next &drm_gpuva_op in the list 1048 * @op: the current &drm_gpuva_op 1049 */ 1050 #define drm_gpuva_next_op(op) list_next_entry(op, entry) 1051 1052 /** 1053 * struct drm_gpuvm_map_req - arguments passed to drm_gpuvm_sm_map[_ops_create]() 1054 */ 1055 struct drm_gpuvm_map_req { 1056 /** 1057 * @op_map: struct drm_gpuva_op_map 1058 */ 1059 struct drm_gpuva_op_map map; 1060 }; 1061 1062 struct drm_gpuva_ops * 1063 drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, 1064 const struct drm_gpuvm_map_req *req); 1065 struct drm_gpuva_ops * 1066 drm_gpuvm_madvise_ops_create(struct drm_gpuvm *gpuvm, 1067 const struct drm_gpuvm_map_req *req); 1068 1069 struct drm_gpuva_ops * 1070 drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm, 1071 u64 addr, u64 range); 1072 1073 struct drm_gpuva_ops * 1074 drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm, 1075 u64 addr, u64 range); 1076 1077 struct drm_gpuva_ops * 1078 drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo); 1079 1080 void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, 1081 struct drm_gpuva_ops *ops); 1082 1083 static inline void drm_gpuva_init_from_op(struct drm_gpuva *va, 1084 struct drm_gpuva_op_map *op) 1085 { 1086 va->va.addr = op->va.addr; 1087 va->va.range = op->va.range; 1088 va->gem.obj = op->gem.obj; 1089 va->gem.offset = op->gem.offset; 1090 } 1091 1092 /** 1093 * struct drm_gpuvm_ops - callbacks for split/merge steps 1094 * 1095 * This structure defines the callbacks used by &drm_gpuvm_sm_map and 1096 * &drm_gpuvm_sm_unmap to provide the split/merge steps for map and unmap 1097 * operations to drivers. 1098 */ 1099 struct drm_gpuvm_ops { 1100 /** 1101 * @vm_free: called when the last reference of a struct drm_gpuvm is 1102 * dropped 1103 * 1104 * This callback is mandatory. 1105 */ 1106 void (*vm_free)(struct drm_gpuvm *gpuvm); 1107 1108 /** 1109 * @op_alloc: called when the &drm_gpuvm allocates 1110 * a struct drm_gpuva_op 1111 * 1112 * Some drivers may want to embed struct drm_gpuva_op into driver 1113 * specific structures. By implementing this callback drivers can 1114 * allocate memory accordingly. 1115 * 1116 * This callback is optional. 1117 */ 1118 struct drm_gpuva_op *(*op_alloc)(void); 1119 1120 /** 1121 * @op_free: called when the &drm_gpuvm frees a 1122 * struct drm_gpuva_op 1123 * 1124 * Some drivers may want to embed struct drm_gpuva_op into driver 1125 * specific structures. By implementing this callback drivers can 1126 * free the previously allocated memory accordingly. 1127 * 1128 * This callback is optional. 1129 */ 1130 void (*op_free)(struct drm_gpuva_op *op); 1131 1132 /** 1133 * @vm_bo_alloc: called when the &drm_gpuvm allocates 1134 * a struct drm_gpuvm_bo 1135 * 1136 * Some drivers may want to embed struct drm_gpuvm_bo into driver 1137 * specific structures. By implementing this callback drivers can 1138 * allocate memory accordingly. 1139 * 1140 * This callback is optional. 1141 */ 1142 struct drm_gpuvm_bo *(*vm_bo_alloc)(void); 1143 1144 /** 1145 * @vm_bo_free: called when the &drm_gpuvm frees a 1146 * struct drm_gpuvm_bo 1147 * 1148 * Some drivers may want to embed struct drm_gpuvm_bo into driver 1149 * specific structures. By implementing this callback drivers can 1150 * free the previously allocated memory accordingly. 1151 * 1152 * This callback is optional. 1153 */ 1154 void (*vm_bo_free)(struct drm_gpuvm_bo *vm_bo); 1155 1156 /** 1157 * @vm_bo_validate: called from drm_gpuvm_validate() 1158 * 1159 * Drivers receive this callback for every evicted &drm_gem_object being 1160 * mapped in the corresponding &drm_gpuvm. 1161 * 1162 * Typically, drivers would call their driver specific variant of 1163 * ttm_bo_validate() from within this callback. 1164 */ 1165 int (*vm_bo_validate)(struct drm_gpuvm_bo *vm_bo, 1166 struct drm_exec *exec); 1167 1168 /** 1169 * @sm_step_map: called from &drm_gpuvm_sm_map to finally insert the 1170 * mapping once all previous steps were completed 1171 * 1172 * The &priv pointer matches the one the driver passed to 1173 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. 1174 * 1175 * Can be NULL if &drm_gpuvm_sm_map is used. 1176 */ 1177 int (*sm_step_map)(struct drm_gpuva_op *op, void *priv); 1178 1179 /** 1180 * @sm_step_remap: called from &drm_gpuvm_sm_map and 1181 * &drm_gpuvm_sm_unmap to split up an existent mapping 1182 * 1183 * This callback is called when existent mapping needs to be split up. 1184 * This is the case when either a newly requested mapping overlaps or 1185 * is enclosed by an existent mapping or a partial unmap of an existent 1186 * mapping is requested. 1187 * 1188 * The &priv pointer matches the one the driver passed to 1189 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. 1190 * 1191 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is 1192 * used. 1193 */ 1194 int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv); 1195 1196 /** 1197 * @sm_step_unmap: called from &drm_gpuvm_sm_map and 1198 * &drm_gpuvm_sm_unmap to unmap an existent mapping 1199 * 1200 * This callback is called when existent mapping needs to be unmapped. 1201 * This is the case when either a newly requested mapping encloses an 1202 * existent mapping or an unmap of an existent mapping is requested. 1203 * 1204 * The &priv pointer matches the one the driver passed to 1205 * &drm_gpuvm_sm_map or &drm_gpuvm_sm_unmap, respectively. 1206 * 1207 * Can be NULL if neither &drm_gpuvm_sm_map nor &drm_gpuvm_sm_unmap is 1208 * used. 1209 */ 1210 int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv); 1211 }; 1212 1213 int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, 1214 const struct drm_gpuvm_map_req *req); 1215 1216 int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, 1217 u64 addr, u64 range); 1218 1219 int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, 1220 struct drm_exec *exec, unsigned int num_fences, 1221 struct drm_gpuvm_map_req *req); 1222 1223 int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec, 1224 u64 req_addr, u64 req_range); 1225 1226 void drm_gpuva_map(struct drm_gpuvm *gpuvm, 1227 struct drm_gpuva *va, 1228 struct drm_gpuva_op_map *op); 1229 1230 void drm_gpuva_remap(struct drm_gpuva *prev, 1231 struct drm_gpuva *next, 1232 struct drm_gpuva_op_remap *op); 1233 1234 void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op); 1235 1236 /** 1237 * drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of 1238 * the unmap stage of a remap op. 1239 * @op: Remap op. 1240 * @start_addr: Output pointer for the start of the required unmap. 1241 * @range: Output pointer for the length of the required unmap. 1242 * 1243 * The given start address and range will be set such that they represent the 1244 * range of the address space that was previously covered by the mapping being 1245 * re-mapped, but is now empty. 1246 */ 1247 static inline void 1248 drm_gpuva_op_remap_to_unmap_range(const struct drm_gpuva_op_remap *op, 1249 u64 *start_addr, u64 *range) 1250 { 1251 const u64 va_start = op->prev ? 1252 op->prev->va.addr + op->prev->va.range : 1253 op->unmap->va->va.addr; 1254 const u64 va_end = op->next ? 1255 op->next->va.addr : 1256 op->unmap->va->va.addr + op->unmap->va->va.range; 1257 1258 if (start_addr) 1259 *start_addr = va_start; 1260 if (range) 1261 *range = va_end - va_start; 1262 } 1263 1264 #endif /* __DRM_GPUVM_H__ */ 1265