1 /************************************************************************** 2 * 3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #ifndef _TTM_BO_API_H_ 32 #define _TTM_BO_API_H_ 33 34 #include <drm/drm_gem.h> 35 36 #include <linux/kref.h> 37 #include <linux/list.h> 38 39 #include "ttm_device.h" 40 41 /* Default number of pre-faulted pages in the TTM fault handler */ 42 #define TTM_BO_VM_NUM_PREFAULT 16 43 44 struct iosys_map; 45 46 struct ttm_global; 47 struct ttm_device; 48 struct ttm_placement; 49 struct ttm_place; 50 struct ttm_resource; 51 struct ttm_resource_manager; 52 struct ttm_tt; 53 54 /** 55 * enum ttm_bo_type 56 * 57 * @ttm_bo_type_device: These are 'normal' buffers that can 58 * be mmapped by user space. Each of these bos occupy a slot in the 59 * device address space, that can be used for normal vm operations. 60 * 61 * @ttm_bo_type_kernel: These buffers are like ttm_bo_type_device buffers, 62 * but they cannot be accessed from user-space. For kernel-only use. 63 * 64 * @ttm_bo_type_sg: Buffer made from dmabuf sg table shared with another 65 * driver. 66 */ 67 enum ttm_bo_type { 68 ttm_bo_type_device, 69 ttm_bo_type_kernel, 70 ttm_bo_type_sg 71 }; 72 73 /** 74 * struct ttm_buffer_object 75 * 76 * @base: drm_gem_object superclass data. 77 * @bdev: Pointer to the buffer object device structure. 78 * @type: The bo type. 79 * @page_alignment: Page alignment. 80 * @destroy: Destruction function. If NULL, kfree is used. 81 * @kref: Reference count of this buffer object. When this refcount reaches 82 * zero, the object is destroyed or put on the delayed delete list. 83 * @resource: structure describing current placement. 84 * @ttm: TTM structure holding system pages. 85 * @deleted: True if the object is only a zombie and already deleted. 86 * @bulk_move: The bulk move object. 87 * @priority: Priority for LRU, BOs with lower priority are evicted first. 88 * @pin_count: Pin count. 89 * 90 * Base class for TTM buffer object, that deals with data placement and CPU 91 * mappings. GPU mappings are really up to the driver, but for simpler GPUs 92 * the driver can usually use the placement offset @offset directly as the 93 * GPU virtual address. For drivers implementing multiple 94 * GPU memory manager contexts, the driver should manage the address space 95 * in these contexts separately and use these objects to get the correct 96 * placement and caching for these GPU maps. This makes it possible to use 97 * these objects for even quite elaborate memory management schemes. 98 * The destroy member, the API visibility of this object makes it possible 99 * to derive driver specific types. 100 */ 101 struct ttm_buffer_object { 102 struct drm_gem_object base; 103 104 /* 105 * Members constant at init. 106 */ 107 struct ttm_device *bdev; 108 enum ttm_bo_type type; 109 uint32_t page_alignment; 110 void (*destroy) (struct ttm_buffer_object *); 111 112 /* 113 * Members not needing protection. 114 */ 115 struct kref kref; 116 117 /* 118 * Members protected by the bo::resv::reserved lock. 119 */ 120 struct ttm_resource *resource; 121 struct ttm_tt *ttm; 122 bool deleted; 123 struct ttm_lru_bulk_move *bulk_move; 124 unsigned priority; 125 unsigned pin_count; 126 127 /** 128 * @delayed_delete: Work item used when we can't delete the BO 129 * immediately 130 */ 131 struct work_struct delayed_delete; 132 133 /** 134 * @sg: external source of pages and DMA addresses, protected by the 135 * reservation lock. 136 */ 137 struct sg_table *sg; 138 }; 139 140 #define TTM_BO_MAP_IOMEM_MASK 0x80 141 142 /** 143 * struct ttm_bo_kmap_obj 144 * 145 * @virtual: The current kernel virtual address. 146 * @page: The page when kmap'ing a single page. 147 * @bo_kmap_type: Type of bo_kmap. 148 * @bo: The TTM BO. 149 * 150 * Object describing a kernel mapping. Since a TTM bo may be located 151 * in various memory types with various caching policies, the 152 * mapping can either be an ioremap, a vmap, a kmap or part of a 153 * premapped region. 154 */ 155 struct ttm_bo_kmap_obj { 156 void *virtual; 157 struct page *page; 158 enum { 159 ttm_bo_map_iomap = 1 | TTM_BO_MAP_IOMEM_MASK, 160 ttm_bo_map_vmap = 2, 161 ttm_bo_map_kmap = 3, 162 ttm_bo_map_premapped = 4 | TTM_BO_MAP_IOMEM_MASK, 163 } bo_kmap_type; 164 struct ttm_buffer_object *bo; 165 }; 166 167 /** 168 * struct ttm_operation_ctx 169 * 170 * @interruptible: Sleep interruptible if sleeping. 171 * @no_wait_gpu: Return immediately if the GPU is busy. 172 * @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages. 173 * @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple 174 * BOs share the same reservation object. 175 * @force_alloc: Don't check the memory account during suspend or CPU page 176 * faults. Should only be used by TTM internally. 177 * @resv: Reservation object to allow reserved evictions with. 178 * @bytes_moved: Statistics on how many bytes have been moved. 179 * 180 * Context for TTM operations like changing buffer placement or general memory 181 * allocation. 182 */ 183 struct ttm_operation_ctx { 184 bool interruptible; 185 bool no_wait_gpu; 186 bool gfp_retry_mayfail; 187 bool allow_res_evict; 188 bool force_alloc; 189 struct dma_resv *resv; 190 uint64_t bytes_moved; 191 }; 192 193 /** 194 * ttm_bo_get - reference a struct ttm_buffer_object 195 * 196 * @bo: The buffer object. 197 */ 198 static inline void ttm_bo_get(struct ttm_buffer_object *bo) 199 { 200 kref_get(&bo->kref); 201 } 202 203 /** 204 * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless 205 * its refcount has already reached zero. 206 * @bo: The buffer object. 207 * 208 * Used to reference a TTM buffer object in lookups where the object is removed 209 * from the lookup structure during the destructor and for RCU lookups. 210 * 211 * Returns: @bo if the referencing was successful, NULL otherwise. 212 */ 213 static inline __must_check struct ttm_buffer_object * 214 ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) 215 { 216 if (!kref_get_unless_zero(&bo->kref)) 217 return NULL; 218 return bo; 219 } 220 221 /** 222 * ttm_bo_reserve: 223 * 224 * @bo: A pointer to a struct ttm_buffer_object. 225 * @interruptible: Sleep interruptible if waiting. 226 * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY. 227 * @ticket: ticket used to acquire the ww_mutex. 228 * 229 * Locks a buffer object for validation. (Or prevents other processes from 230 * locking it for validation), while taking a number of measures to prevent 231 * deadlocks. 232 * 233 * Returns: 234 * -EDEADLK: The reservation may cause a deadlock. 235 * Release all buffer reservations, wait for @bo to become unreserved and 236 * try again. 237 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 238 * a signal. Release all buffer reservations and return to user-space. 239 * -EBUSY: The function needed to sleep, but @no_wait was true 240 * -EALREADY: Bo already reserved using @ticket. This error code will only 241 * be returned if @use_ticket is set to true. 242 */ 243 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo, 244 bool interruptible, bool no_wait, 245 struct ww_acquire_ctx *ticket) 246 { 247 int ret = 0; 248 249 if (no_wait) { 250 bool success; 251 252 if (WARN_ON(ticket)) 253 return -EBUSY; 254 255 success = dma_resv_trylock(bo->base.resv); 256 return success ? 0 : -EBUSY; 257 } 258 259 if (interruptible) 260 ret = dma_resv_lock_interruptible(bo->base.resv, ticket); 261 else 262 ret = dma_resv_lock(bo->base.resv, ticket); 263 if (ret == -EINTR) 264 return -ERESTARTSYS; 265 return ret; 266 } 267 268 /** 269 * ttm_bo_reserve_slowpath: 270 * @bo: A pointer to a struct ttm_buffer_object. 271 * @interruptible: Sleep interruptible if waiting. 272 * @ticket: Ticket used to acquire the ww_mutex. 273 * 274 * This is called after ttm_bo_reserve returns -EAGAIN and we backed off 275 * from all our other reservations. Because there are no other reservations 276 * held by us, this function cannot deadlock any more. 277 */ 278 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo, 279 bool interruptible, 280 struct ww_acquire_ctx *ticket) 281 { 282 if (interruptible) { 283 int ret = dma_resv_lock_slow_interruptible(bo->base.resv, 284 ticket); 285 if (ret == -EINTR) 286 ret = -ERESTARTSYS; 287 return ret; 288 } 289 dma_resv_lock_slow(bo->base.resv, ticket); 290 return 0; 291 } 292 293 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); 294 295 static inline void 296 ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo) 297 { 298 spin_lock(&bo->bdev->lru_lock); 299 ttm_bo_move_to_lru_tail(bo); 300 spin_unlock(&bo->bdev->lru_lock); 301 } 302 303 static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo, 304 struct ttm_resource *new_mem) 305 { 306 WARN_ON(bo->resource); 307 bo->resource = new_mem; 308 } 309 310 /** 311 * ttm_bo_move_null - assign memory for a buffer object. 312 * @bo: The bo to assign the memory to 313 * @new_mem: The memory to be assigned. 314 * 315 * Assign the memory from new_mem to the memory of the buffer object bo. 316 */ 317 static inline void ttm_bo_move_null(struct ttm_buffer_object *bo, 318 struct ttm_resource *new_mem) 319 { 320 ttm_resource_free(bo, &bo->resource); 321 ttm_bo_assign_mem(bo, new_mem); 322 } 323 324 /** 325 * ttm_bo_unreserve 326 * 327 * @bo: A pointer to a struct ttm_buffer_object. 328 * 329 * Unreserve a previous reservation of @bo. 330 */ 331 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo) 332 { 333 ttm_bo_move_to_lru_tail_unlocked(bo); 334 dma_resv_unlock(bo->base.resv); 335 } 336 337 /** 338 * ttm_kmap_obj_virtual 339 * 340 * @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap. 341 * @is_iomem: Pointer to an integer that on return indicates 1 if the 342 * virtual map is io memory, 0 if normal memory. 343 * 344 * Returns the virtual address of a buffer object area mapped by ttm_bo_kmap. 345 * If *is_iomem is 1 on return, the virtual address points to an io memory area, 346 * that should strictly be accessed by the iowriteXX() and similar functions. 347 */ 348 static inline void *ttm_kmap_obj_virtual(struct ttm_bo_kmap_obj *map, 349 bool *is_iomem) 350 { 351 *is_iomem = !!(map->bo_kmap_type & TTM_BO_MAP_IOMEM_MASK); 352 return map->virtual; 353 } 354 355 int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, 356 struct ttm_operation_ctx *ctx); 357 int ttm_bo_validate(struct ttm_buffer_object *bo, 358 struct ttm_placement *placement, 359 struct ttm_operation_ctx *ctx); 360 void ttm_bo_put(struct ttm_buffer_object *bo); 361 void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, 362 struct ttm_lru_bulk_move *bulk); 363 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, 364 const struct ttm_place *place); 365 int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo, 366 enum ttm_bo_type type, struct ttm_placement *placement, 367 uint32_t alignment, struct ttm_operation_ctx *ctx, 368 struct sg_table *sg, struct dma_resv *resv, 369 void (*destroy)(struct ttm_buffer_object *)); 370 int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo, 371 enum ttm_bo_type type, struct ttm_placement *placement, 372 uint32_t alignment, bool interruptible, 373 struct sg_table *sg, struct dma_resv *resv, 374 void (*destroy)(struct ttm_buffer_object *)); 375 int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, 376 unsigned long num_pages, struct ttm_bo_kmap_obj *map); 377 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map); 378 int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map); 379 void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map); 380 int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo); 381 int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, 382 gfp_t gfp_flags); 383 void ttm_bo_pin(struct ttm_buffer_object *bo); 384 void ttm_bo_unpin(struct ttm_buffer_object *bo); 385 int ttm_mem_evict_first(struct ttm_device *bdev, 386 struct ttm_resource_manager *man, 387 const struct ttm_place *place, 388 struct ttm_operation_ctx *ctx, 389 struct ww_acquire_ctx *ticket); 390 vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, 391 struct vm_fault *vmf); 392 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, 393 pgprot_t prot, 394 pgoff_t num_prefault); 395 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf); 396 void ttm_bo_vm_open(struct vm_area_struct *vma); 397 void ttm_bo_vm_close(struct vm_area_struct *vma); 398 int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, 399 void *buf, int len, int write); 400 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot); 401 402 int ttm_bo_mem_space(struct ttm_buffer_object *bo, 403 struct ttm_placement *placement, 404 struct ttm_resource **mem, 405 struct ttm_operation_ctx *ctx); 406 407 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); 408 /* 409 * ttm_bo_util.c 410 */ 411 int ttm_mem_io_reserve(struct ttm_device *bdev, 412 struct ttm_resource *mem); 413 void ttm_mem_io_free(struct ttm_device *bdev, 414 struct ttm_resource *mem); 415 void ttm_move_memcpy(bool clear, u32 num_pages, 416 struct ttm_kmap_iter *dst_iter, 417 struct ttm_kmap_iter *src_iter); 418 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 419 struct ttm_operation_ctx *ctx, 420 struct ttm_resource *new_mem); 421 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 422 struct dma_fence *fence, bool evict, 423 bool pipeline, 424 struct ttm_resource *new_mem); 425 void ttm_bo_move_sync_cleanup(struct ttm_buffer_object *bo, 426 struct ttm_resource *new_mem); 427 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); 428 pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, 429 pgprot_t tmp); 430 void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); 431 432 #endif 433