1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #ifndef _TTM_BO_API_H_ 32 #define _TTM_BO_API_H_ 33 34 #include <drm/drm_gem.h> 35 36 #include <linux/kref.h> 37 #include <linux/list.h> 38 39 #include "ttm_device.h" 40 41 /* Default number of pre-faulted pages in the TTM fault handler */ 42 #define TTM_BO_VM_NUM_PREFAULT 16 43 44 struct iosys_map; 45 46 struct ttm_global; 47 struct ttm_device; 48 struct ttm_placement; 49 struct ttm_place; 50 struct ttm_resource; 51 struct ttm_resource_manager; 52 struct ttm_tt; 53 54 /** 55 * enum ttm_bo_type 56 * 57 * @ttm_bo_type_device: These are 'normal' buffers that can 58 * be mmapped by user space. Each of these bos occupy a slot in the 59 * device address space, that can be used for normal vm operations. 60 * 61 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, 62 * but they cannot be accessed from user-space. For kernel-only use. 63 * 64 * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another 65 * driver. 66 */ 67 enum ttm_bo_type { 68 ttm_bo_type_device, 69 ttm_bo_type_kernel, 70 ttm_bo_type_sg 71 }; 72 73 /** 74 * struct ttm_buffer_object 75 * 76 * @base: drm_gem_object superclass data. 77 * @bdev: Pointer to the buffer object device structure. 78 * @type: The bo type. 79 * @page_alignment: Page alignment. 80 * @destroy: Destruction function. If NULL, kfree is used. 81 * @kref: Reference count of this buffer object. When this refcount reaches 82 * zero, the object is destroyed or put on the delayed delete list. 83 * @resource: structure describing current placement. 84 * @ttm: TTM structure holding system pages. 85 * @deleted: True if the object is only a zombie and already deleted. 86 * @bulk_move: The bulk move object. 87 * @priority: Priority for LRU, BOs with lower priority are evicted first. 88 * @pin_count: Pin count. 89 * 90 * Base class for TTM buffer object, that deals with data placement and CPU 91 * mappings. GPU mappings are really up to the driver, but for simpler GPUs 92 * the driver can usually use the placement offset @offset directly as the 93 * GPU virtual address. For drivers implementing multiple 94 * GPU memory manager contexts, the driver should manage the address space 95 * in these contexts separately and use these objects to get the correct 96 * placement and caching for these GPU maps. This makes it possible to use 97 * these objects for even quite elaborate memory management schemes. 98 * The destroy member, the API visibility of this object makes it possible 99 * to derive driver specific types. 100 */ 101 struct ttm_buffer_object { 102 struct drm_gem_object base; 103 104 /* 105 * Members constant at init. 106 */ 107 struct ttm_device *bdev; 108 enum ttm_bo_type type; 109 uint32_t page_alignment; 110 void (*destroy) (struct ttm_buffer_object *); 111 112 /* 113 * Members not needing protection. 114 */ 115 struct kref kref; 116 117 /* 118 * Members protected by the bo::resv::reserved lock. 119 */ 120 struct ttm_resource *resource; 121 struct ttm_tt *ttm; 122 bool deleted; 123 struct ttm_lru_bulk_move *bulk_move; 124 unsigned priority; 125 unsigned pin_count; 126 127 /** 128 * @delayed_delete: Work item used when we can't delete the BO 129 * immediately 130 */ 131 struct work_struct delayed_delete; 132 133 /** 134 * @sg: external source of pages and DMA addresses, protected by the 135 * reservation lock. 136 */ 137 struct sg_table *sg; 138 }; 139 140 #define TTM_BO_MAP_IOMEM_MASK 0x80 141 142 /** 143 * struct ttm_bo_kmap_obj 144 * 145 * @virtual: The current kernel virtual address. 146 * @page: The page when kmap'ing a single page. 147 * @bo_kmap_type: Type of bo_kmap. 148 * @bo: The TTM BO. 149 * 150 * Object describing a kernel mapping. Since a TTM bo may be located 151 * in various memory types with various caching policies, the 152 * mapping can either be an ioremap, a vmap, a kmap or part of a 153 * premapped region. 154 */ 155 struct ttm_bo_kmap_obj { 156 void *virtual; 157 struct page *page; 158 enum { 159 ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, 160 ttm_bo_map_vmap = 2, 161 ttm_bo_map_kmap = 3, 162 ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, 163 } bo_kmap_type; 164 struct ttm_buffer_object *bo; 165 }; 166 167 /** 168 * struct ttm_operation_ctx 169 * 170 * Context for TTM operations like changing buffer placement or general memory 171 * allocation. 172 */ 173 struct ttm_operation_ctx { 174 /** @interruptible: Sleep interruptible if sleeping. */ 175 bool interruptible; 176 /** @no_wait_gpu: Return immediately if the GPU is busy. */ 177 bool no_wait_gpu; 178 /** 179 * @gfp_retry_mayfail: Use __GFP_RETRY_MAYFAIL | __GFP_NOWARN 180 * when allocation pages. This is to avoid invoking the OOM 181 * killer when populating a buffer object, in order to 182 * forward the error for it to be dealt with. 183 */ 184 bool gfp_retry_mayfail; 185 /** 186 * @allow_res_evict: Allow eviction of reserved BOs. Can be used 187 * when multiple BOs share the same reservation object @resv. 188 */ 189 bool allow_res_evict; 190 /** 191 * @resv: Reservation object to be used together with 192 * @allow_res_evict. 193 */ 194 struct dma_resv *resv; 195 /** 196 * @bytes_moved: Statistics on how many bytes have been moved. 197 */ 198 uint64_t bytes_moved; 199 }; 200 201 struct ttm_lru_walk; 202 203 /** struct ttm_lru_walk_ops - Operations for a LRU walk. */ 204 struct ttm_lru_walk_ops { 205 /** 206 * process_bo - Process this bo. 207 * @walk: struct ttm_lru_walk describing the walk. 208 * @bo: A locked and referenced buffer object. 209 * 210 * Return: Negative error code on error, User-defined positive value 211 * (typically, but not always, size of the processed bo) on success. 212 * On success, the returned values are summed by the walk and the 213 * walk exits when its target is met. 214 * 0 also indicates success, -EBUSY means this bo was skipped. 215 */ 216 s64 (*process_bo)(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo); 217 }; 218 219 /** 220 * struct ttm_lru_walk_arg - Common part for the variants of BO LRU walk. 221 */ 222 struct ttm_lru_walk_arg { 223 /** @ctx: Pointer to the struct ttm_operation_ctx. */ 224 struct ttm_operation_ctx *ctx; 225 /** @ticket: The struct ww_acquire_ctx if any. */ 226 struct ww_acquire_ctx *ticket; 227 /** @trylock_only: Only use trylock for locking. */ 228 bool trylock_only; 229 }; 230 231 /** 232 * struct ttm_lru_walk - Structure describing a LRU walk. 233 */ 234 struct ttm_lru_walk { 235 /** @ops: Pointer to the ops structure. */ 236 const struct ttm_lru_walk_ops *ops; 237 /** @arg: Common bo LRU walk arguments. */ 238 struct ttm_lru_walk_arg arg; 239 }; 240 241 s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, 242 struct ttm_resource_manager *man, s64 target); 243 244 /** 245 * struct ttm_bo_shrink_flags - flags to govern the bo shrinking behaviour 246 * @purge: Purge the content rather than backing it up. 247 * @writeback: Attempt to immediately write content to swap space. 248 * @allow_move: Allow moving to system before shrinking. This is typically 249 * not desired for zombie- or ghost objects (with zombie object meaning 250 * objects with a zero gem object refcount) 251 */ 252 struct ttm_bo_shrink_flags { 253 u32 purge : 1; 254 u32 writeback : 1; 255 u32 allow_move : 1; 256 }; 257 258 long ttm_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, 259 const struct ttm_bo_shrink_flags flags); 260 261 bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx); 262 263 bool ttm_bo_shrink_avoid_wait(void); 264 265 /** 266 * ttm_bo_reserve: 267 * 268 * @bo: A pointer to a struct ttm_buffer_object. 269 * @interruptible: Sleep interruptible if waiting. 270 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 271 * @ticket: ticket used to acquire the ww_mutex. 272 * 273 * Locks a buffer object for validation. (Or prevents other processes from 274 * locking it for validation), while taking a number of measures to prevent 275 * deadlocks. 276 * 277 * Returns: 278 * -EDEADLK: The reservation may cause a deadlock. 279 * Release all buffer reservations, wait for @bo to become unreserved and 280 * try again. 281 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 282 * a signal. Release all buffer reservations and return to user-space. 283 * -EBUSY: The function needed to sleep, but @no_wait was true 284 * -EALREADY: Bo already reserved using @ticket. This error code will only 285 * be returned if @use_ticket is set to true. 286 */ 287 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, 288 bool interruptible, bool no_wait, 289 struct ww_acquire_ctx *ticket) 290 { 291 int ret = 0; 292 293 if (no_wait) { 294 bool success; 295 296 if (WARN_ON(ticket)) 297 return -EBUSY; 298 299 success = dma_resv_trylock(bo->base.resv); 300 return success ? 0 : -EBUSY; 301 } 302 303 if (interruptible) 304 ret = dma_resv_lock_interruptible(bo->base.resv, ticket); 305 else 306 ret = dma_resv_lock(bo->base.resv, ticket); 307 if (ret == -EINTR) 308 return -ERESTARTSYS; 309 return ret; 310 } 311 312 /** 313 * ttm_bo_reserve_slowpath: 314 * @bo: A pointer to a struct ttm_buffer_object. 315 * @interruptible: Sleep interruptible if waiting. 316 * @ticket: Ticket used to acquire the ww_mutex. 317 * 318 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off 319 * from all our other reservations. Because there are no other reservations 320 * held by us, this function cannot deadlock any more. 321 */ 322 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 323 bool interruptible, 324 struct ww_acquire_ctx *ticket) 325 { 326 if (interruptible) { 327 int ret = dma_resv_lock_slow_interruptible(bo->base.resv, 328 ticket); 329 if (ret == -EINTR) 330 ret = -ERESTARTSYS; 331 return ret; 332 } 333 dma_resv_lock_slow(bo->base.resv, ticket); 334 return 0; 335 } 336 337 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); 338 339 static inline void 340 ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) 341 { 342 spin_lock(&bo->bdev->lru_lock); 343 ttm_bo_move_to_lru_tail(bo); 344 spin_unlock(&bo->bdev->lru_lock); 345 } 346 347 static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, 348 struct ttm_resource *new_mem) 349 { 350 WARN_ON(bo->resource); 351 bo->resource = new_mem; 352 } 353 354 /** 355 * ttm_bo_move_null - assign memory for a buffer object. 356 * @bo: The bo to assign the memory to 357 * @new_mem: The memory to be assigned. 358 * 359 * Assign the memory from new_mem to the memory of the buffer object bo. 360 */ 361 static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, 362 struct ttm_resource *new_mem) 363 { 364 ttm_resource_free(bo, &bo->resource); 365 ttm_bo_assign_mem(bo, new_mem); 366 } 367 368 /** 369 * ttm_bo_unreserve 370 * 371 * @bo: A pointer to a struct ttm_buffer_object. 372 * 373 * Unreserve a previous reservation of @bo. 374 */ 375 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) 376 { 377 ttm_bo_move_to_lru_tail_unlocked(bo); 378 dma_resv_unlock(bo->base.resv); 379 } 380 381 /** 382 * ttm_kmap_obj_virtual 383 * 384 * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. 385 * @is_iomem: Pointer to an integer that on return indicates 1 if the 386 * virtual map is io memory, 0 if normal memory. 387 * 388 * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. 389 * If *is_iomem is 1 on return, the virtual address points to an io memory area, 390 * that should strictly be accessed by the iowriteXX() and similar functions. 391 */ 392 static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, 393 bool *is_iomem) 394 { 395 *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); 396 return map->virtual; 397 } 398 399 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, 400 struct ttm_operation_ctx *ctx); 401 int ttm_bo_validate(struct ttm_buffer_object *bo, 402 struct ttm_placement *placement, 403 struct ttm_operation_ctx *ctx); 404 void ttm_bo_fini(struct ttm_buffer_object *bo); 405 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, 406 struct ttm_lru_bulk_move *bulk); 407 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 408 const struct ttm_place *place); 409 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, 410 enum ttm_bo_type type, struct ttm_placement *placement, 411 uint32_t alignment, struct ttm_operation_ctx *ctx, 412 struct sg_table *sg, struct dma_resv *resv, 413 void (*destroy)(struct ttm_buffer_object *)); 414 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo, 415 enum ttm_bo_type type, struct ttm_placement *placement, 416 uint32_t alignment, bool interruptible, 417 struct sg_table *sg, struct dma_resv *resv, 418 void (*destroy)(struct ttm_buffer_object *)); 419 int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, 420 unsigned long num_pages, struct ttm_bo_kmap_obj *map); 421 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); 422 void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page); 423 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map); 424 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map); 425 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); 426 s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, 427 struct ttm_resource_manager *man, gfp_t gfp_flags, 428 s64 target); 429 void ttm_bo_pin(struct ttm_buffer_object *bo); 430 void ttm_bo_unpin(struct ttm_buffer_object *bo); 431 int ttm_bo_evict_first(struct ttm_device *bdev, 432 struct ttm_resource_manager *man, 433 struct ttm_operation_ctx *ctx); 434 int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset, 435 void *buf, int len, int write); 436 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, 437 struct vm_fault *vmf); 438 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, 439 pgprot_t prot, 440 pgoff_t num_prefault); 441 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf); 442 void ttm_bo_vm_open(struct vm_area_struct *vma); 443 void ttm_bo_vm_close(struct vm_area_struct *vma); 444 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, 445 void *buf, int len, int write); 446 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot); 447 448 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 449 struct ttm_placement *placement, 450 struct ttm_resource **mem, 451 struct ttm_operation_ctx *ctx); 452 453 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 454 /* 455 * ttm_bo_util.c 456 */ 457 int ttm_mem_io_reserve(struct ttm_device *bdev, 458 struct ttm_resource *mem); 459 void ttm_mem_io_free(struct ttm_device *bdev, 460 struct ttm_resource *mem); 461 void ttm_move_memcpy(bool clear, u32 num_pages, 462 struct ttm_kmap_iter *dst_iter, 463 struct ttm_kmap_iter *src_iter); 464 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 465 struct ttm_operation_ctx *ctx, 466 struct ttm_resource *new_mem); 467 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 468 struct dma_fence *fence, bool evict, 469 bool pipeline, 470 struct ttm_resource *new_mem); 471 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, 472 struct ttm_resource *new_mem); 473 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); 474 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, 475 pgprot_t tmp); 476 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); 477 int ttm_bo_populate(struct ttm_buffer_object *bo, 478 struct ttm_operation_ctx *ctx); 479 int ttm_bo_setup_export(struct ttm_buffer_object *bo, 480 struct ttm_operation_ctx *ctx); 481 482 /* Driver LRU walk helpers initially targeted for shrinking. */ 483 484 /** 485 * struct ttm_bo_lru_cursor - Iterator cursor for TTM LRU list looping 486 */ 487 struct ttm_bo_lru_cursor { 488 /** @res_curs: Embedded struct ttm_resource_cursor. */ 489 struct ttm_resource_cursor res_curs; 490 /** 491 * @bo: Buffer object pointer if a buffer object is refcounted, 492 * NULL otherwise. 493 */ 494 struct ttm_buffer_object *bo; 495 /** 496 * @needs_unlock: Valid iff @bo != NULL. The bo resv needs 497 * unlock before the next iteration or after loop exit. 498 */ 499 bool needs_unlock; 500 /** @arg: Pointer to common BO LRU walk arguments. */ 501 struct ttm_lru_walk_arg *arg; 502 }; 503 504 void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs); 505 506 struct ttm_bo_lru_cursor * 507 ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, 508 struct ttm_resource_manager *man, 509 struct ttm_lru_walk_arg *arg); 510 511 struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs); 512 513 struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs); 514 515 /* 516 * Defines needed to use autocleanup (linux/cleanup.h) with struct ttm_bo_lru_cursor. 517 */ 518 DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *, 519 if (_T) {ttm_bo_lru_cursor_fini(_T); }, 520 ttm_bo_lru_cursor_init(curs, man, arg), 521 struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man, 522 struct ttm_lru_walk_arg *arg); 523 static inline void * 524 class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T) 525 { return *_T; } 526 #define class_ttm_bo_lru_cursor_is_conditional false 527 528 /** 529 * ttm_bo_lru_for_each_reserved_guarded() - Iterate over buffer objects owning 530 * resources on LRU lists. 531 * @_cursor: struct ttm_bo_lru_cursor to use for the iteration. 532 * @_man: The resource manager whose LRU lists to iterate over. 533 * @_arg: The struct ttm_lru_walk_arg to govern the LRU walk. 534 * @_bo: The struct ttm_buffer_object pointer pointing to the buffer object 535 * for the current iteration. 536 * 537 * Iterate over all resources of @_man and for each resource, attempt to 538 * reference and lock (using the locking mode detailed in @_ctx) the buffer 539 * object it points to. If successful, assign @_bo to the address of the 540 * buffer object and update @_cursor. The iteration is guarded in the 541 * sense that @_cursor will be initialized before looping start and cleaned 542 * up at looping termination, even if terminated prematurely by, for 543 * example a return or break statement. Exiting the loop will also unlock 544 * (if needed) and unreference @_bo. 545 * 546 * Return: If locking of a bo returns an error, then iteration is terminated 547 * and @_bo is set to a corresponding error pointer. It's illegal to 548 * dereference @_bo after loop exit. 549 */ 550 #define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _arg, _bo) \ 551 scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _arg) \ 552 for ((_bo) = ttm_bo_lru_cursor_first(_cursor); \ 553 !IS_ERR_OR_NULL(_bo); \ 554 (_bo) = ttm_bo_lru_cursor_next(_cursor)) 555 556 #endif 557