1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_OBJECT_H__ 29 #define __AMDGPU_OBJECT_H__ 30 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 #include "amdgpu_res_cursor.h" 34 35 #ifdef CONFIG_MMU_NOTIFIER 36 #include <linux/mmu_notifier.h> 37 #endif 38 39 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX 40 #define AMDGPU_BO_MAX_PLACEMENTS 3 41 42 /* BO flag to indicate a KFD userptr BO */ 43 #define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63) 44 45 #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo) 46 #define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo) 47 48 struct amdgpu_bo_param { 49 unsigned long size; 50 int byte_align; 51 u32 bo_ptr_size; 52 u32 domain; 53 u32 preferred_domain; 54 u64 flags; 55 enum ttm_bo_type type; 56 bool no_wait_gpu; 57 struct dma_resv *resv; 58 void (*destroy)(struct ttm_buffer_object *bo); 59 /* xcp partition number plus 1, 0 means any partition */ 60 int8_t xcp_id_plus1; 61 }; 62 63 /* bo virtual addresses in a vm */ 64 struct amdgpu_bo_va_mapping { 65 struct amdgpu_bo_va *bo_va; 66 struct list_head list; 67 struct rb_node rb; 68 uint64_t start; 69 uint64_t last; 70 uint64_t __subtree_last; 71 uint64_t offset; 72 uint64_t flags; 73 }; 74 75 /* User space allocated BO in a VM */ 76 struct amdgpu_bo_va { 77 struct amdgpu_vm_bo_base base; 78 79 /* protected by bo being reserved */ 80 unsigned ref_count; 81 82 /* all other members protected by the VM PD being reserved */ 83 struct dma_fence *last_pt_update; 84 85 /* mappings for this bo_va */ 86 struct list_head invalids; 87 struct list_head valids; 88 89 /* If the mappings are cleared or filled */ 90 bool cleared; 91 92 bool is_xgmi; 93 94 /* 95 * protected by vm reservation lock 96 * if non-zero, cannot unmap from GPU because user queues may still access it 97 */ 98 unsigned int queue_refcount; 99 }; 100 101 struct amdgpu_bo { 102 /* Protected by tbo.reserved */ 103 u32 preferred_domains; 104 u32 allowed_domains; 105 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS]; 106 struct ttm_placement placement; 107 struct ttm_buffer_object tbo; 108 struct ttm_bo_kmap_obj kmap; 109 u64 flags; 110 /* per VM structure for page tables and with virtual addresses */ 111 struct amdgpu_vm_bo_base *vm_bo; 112 /* Constant after initialization */ 113 struct amdgpu_bo *parent; 114 115 #ifdef CONFIG_MMU_NOTIFIER 116 struct mmu_interval_notifier notifier; 117 #endif 118 struct kgd_mem *kfd_bo; 119 120 /* 121 * For GPUs with spatial partitioning, xcp partition number, -1 means 122 * any partition. For other ASICs without spatial partition, always 0 123 * for memory accounting. 124 */ 125 int8_t xcp_id; 126 }; 127 128 struct amdgpu_bo_user { 129 struct amdgpu_bo bo; 130 u64 tiling_flags; 131 u64 metadata_flags; 132 void *metadata; 133 u32 metadata_size; 134 135 }; 136 137 struct amdgpu_bo_vm { 138 struct amdgpu_bo bo; 139 struct amdgpu_bo *shadow; 140 struct list_head shadow_list; 141 struct amdgpu_vm_bo_base entries[]; 142 }; 143 144 struct amdgpu_mem_stats { 145 /* current VRAM usage, includes visible VRAM */ 146 uint64_t vram; 147 /* current shared VRAM usage, includes visible VRAM */ 148 uint64_t vram_shared; 149 /* current visible VRAM usage */ 150 uint64_t visible_vram; 151 /* current GTT usage */ 152 uint64_t gtt; 153 /* current shared GTT usage */ 154 uint64_t gtt_shared; 155 /* current system memory usage */ 156 uint64_t cpu; 157 /* current shared system memory usage */ 158 uint64_t cpu_shared; 159 /* sum of evicted buffers, includes visible VRAM */ 160 uint64_t evicted_vram; 161 /* sum of evicted buffers due to CPU access */ 162 uint64_t evicted_visible_vram; 163 /* how much userspace asked for, includes vis.VRAM */ 164 uint64_t requested_vram; 165 /* how much userspace asked for */ 166 uint64_t requested_visible_vram; 167 /* how much userspace asked for */ 168 uint64_t requested_gtt; 169 }; 170 171 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) 172 { 173 return container_of(tbo, struct amdgpu_bo, tbo); 174 } 175 176 /** 177 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type 178 * @mem_type: ttm memory type 179 * 180 * Returns corresponding domain of the ttm mem_type 181 */ 182 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) 183 { 184 switch (mem_type) { 185 case TTM_PL_VRAM: 186 return AMDGPU_GEM_DOMAIN_VRAM; 187 case TTM_PL_TT: 188 return AMDGPU_GEM_DOMAIN_GTT; 189 case TTM_PL_SYSTEM: 190 return AMDGPU_GEM_DOMAIN_CPU; 191 case AMDGPU_PL_GDS: 192 return AMDGPU_GEM_DOMAIN_GDS; 193 case AMDGPU_PL_GWS: 194 return AMDGPU_GEM_DOMAIN_GWS; 195 case AMDGPU_PL_OA: 196 return AMDGPU_GEM_DOMAIN_OA; 197 case AMDGPU_PL_DOORBELL: 198 return AMDGPU_GEM_DOMAIN_DOORBELL; 199 default: 200 break; 201 } 202 return 0; 203 } 204 205 /** 206 * amdgpu_bo_reserve - reserve bo 207 * @bo: bo structure 208 * @no_intr: don't return -ERESTARTSYS on pending signal 209 * 210 * Returns: 211 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 212 * a signal. Release all buffer reservations and return to user-space. 213 */ 214 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) 215 { 216 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 217 int r; 218 219 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 220 if (unlikely(r != 0)) { 221 if (r != -ERESTARTSYS) 222 dev_err(adev->dev, "%p reserve failed\n", bo); 223 return r; 224 } 225 return 0; 226 } 227 228 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) 229 { 230 ttm_bo_unreserve(&bo->tbo); 231 } 232 233 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) 234 { 235 return bo->tbo.base.size; 236 } 237 238 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) 239 { 240 return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE; 241 } 242 243 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) 244 { 245 return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; 246 } 247 248 /** 249 * amdgpu_bo_mmap_offset - return mmap offset of bo 250 * @bo: amdgpu object for which we query the offset 251 * 252 * Returns mmap offset of the object. 253 */ 254 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) 255 { 256 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); 257 } 258 259 /** 260 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced 261 */ 262 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) 263 { 264 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; 265 } 266 267 /** 268 * amdgpu_bo_encrypted - test if the BO is encrypted 269 * @bo: pointer to a buffer object 270 * 271 * Return true if the buffer object is encrypted, false otherwise. 272 */ 273 static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) 274 { 275 return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; 276 } 277 278 /** 279 * amdgpu_bo_shadowed - check if the BO is shadowed 280 * 281 * @bo: BO to be tested. 282 * 283 * Returns: 284 * NULL if not shadowed or else return a BO pointer. 285 */ 286 static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo) 287 { 288 if (bo->tbo.type == ttm_bo_type_kernel) 289 return to_amdgpu_bo_vm(bo)->shadow; 290 291 return NULL; 292 } 293 294 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 295 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); 296 297 int amdgpu_bo_create(struct amdgpu_device *adev, 298 struct amdgpu_bo_param *bp, 299 struct amdgpu_bo **bo_ptr); 300 int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 301 unsigned long size, int align, 302 u32 domain, struct amdgpu_bo **bo_ptr, 303 u64 *gpu_addr, void **cpu_addr); 304 int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 305 unsigned long size, int align, 306 u32 domain, struct amdgpu_bo **bo_ptr, 307 u64 *gpu_addr, void **cpu_addr); 308 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, 309 uint64_t offset, uint64_t size, 310 struct amdgpu_bo **bo_ptr, void **cpu_addr); 311 int amdgpu_bo_create_user(struct amdgpu_device *adev, 312 struct amdgpu_bo_param *bp, 313 struct amdgpu_bo_user **ubo_ptr); 314 int amdgpu_bo_create_vm(struct amdgpu_device *adev, 315 struct amdgpu_bo_param *bp, 316 struct amdgpu_bo_vm **ubo_ptr); 317 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, 318 void **cpu_addr); 319 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 320 void *amdgpu_bo_kptr(struct amdgpu_bo *bo); 321 void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 322 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); 323 void amdgpu_bo_unref(struct amdgpu_bo **bo); 324 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); 325 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 326 u64 min_offset, u64 max_offset); 327 void amdgpu_bo_unpin(struct amdgpu_bo *bo); 328 int amdgpu_bo_init(struct amdgpu_device *adev); 329 void amdgpu_bo_fini(struct amdgpu_device *adev); 330 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); 331 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); 332 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, 333 uint32_t metadata_size, uint64_t flags); 334 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, 335 size_t buffer_size, uint32_t *metadata_size, 336 uint64_t *flags); 337 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 338 bool evict, 339 struct ttm_resource *new_mem); 340 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); 341 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 342 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 343 bool shared); 344 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, 345 enum amdgpu_sync_mode sync_mode, void *owner, 346 bool intr); 347 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); 348 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); 349 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); 350 void amdgpu_bo_get_memory(struct amdgpu_bo *bo, 351 struct amdgpu_mem_stats *stats); 352 void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); 353 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, 354 struct dma_fence **fence); 355 uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, 356 uint32_t domain); 357 358 /* 359 * sub allocation 360 */ 361 static inline struct amdgpu_sa_manager * 362 to_amdgpu_sa_manager(struct drm_suballoc_manager *manager) 363 { 364 return container_of(manager, struct amdgpu_sa_manager, base); 365 } 366 367 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo) 368 { 369 return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr + 370 drm_suballoc_soffset(sa_bo); 371 } 372 373 static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo) 374 { 375 return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr + 376 drm_suballoc_soffset(sa_bo); 377 } 378 379 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, 380 struct amdgpu_sa_manager *sa_manager, 381 unsigned size, u32 align, u32 domain); 382 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, 383 struct amdgpu_sa_manager *sa_manager); 384 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, 385 struct amdgpu_sa_manager *sa_manager); 386 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 387 struct drm_suballoc **sa_bo, 388 unsigned int size); 389 void amdgpu_sa_bo_free(struct amdgpu_device *adev, 390 struct drm_suballoc **sa_bo, 391 struct dma_fence *fence); 392 #if defined(CONFIG_DEBUG_FS) 393 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 394 struct seq_file *m); 395 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m); 396 #endif 397 void amdgpu_debugfs_sa_init(struct amdgpu_device *adev); 398 399 bool amdgpu_bo_support_uswc(u64 bo_flags); 400 401 402 #endif 403