1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #ifndef __AMDGPU_OBJECT_H__ 29 #define __AMDGPU_OBJECT_H__ 30 31 #include <drm/amdgpu_drm.h> 32 #include "amdgpu.h" 33 #include "amdgpu_res_cursor.h" 34 35 #ifdef CONFIG_MMU_NOTIFIER 36 #include <linux/mmu_notifier.h> 37 #endif 38 39 #define AMDGPU_BO_INVALID_OFFSET LONG_MAX 40 #define AMDGPU_BO_MAX_PLACEMENTS 3 41 42 /* BO flag to indicate a KFD userptr BO */ 43 #define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63) 44 #define AMDGPU_AMDKFD_CREATE_SVM_BO (1ULL << 62) 45 46 #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo) 47 48 struct amdgpu_bo_param { 49 unsigned long size; 50 int byte_align; 51 u32 bo_ptr_size; 52 u32 domain; 53 u32 preferred_domain; 54 u64 flags; 55 enum ttm_bo_type type; 56 bool no_wait_gpu; 57 struct dma_resv *resv; 58 }; 59 60 /* bo virtual addresses in a vm */ 61 struct amdgpu_bo_va_mapping { 62 struct amdgpu_bo_va *bo_va; 63 struct list_head list; 64 struct rb_node rb; 65 uint64_t start; 66 uint64_t last; 67 uint64_t __subtree_last; 68 uint64_t offset; 69 uint64_t flags; 70 }; 71 72 /* User space allocated BO in a VM */ 73 struct amdgpu_bo_va { 74 struct amdgpu_vm_bo_base base; 75 76 /* protected by bo being reserved */ 77 unsigned ref_count; 78 79 /* all other members protected by the VM PD being reserved */ 80 struct dma_fence *last_pt_update; 81 82 /* mappings for this bo_va */ 83 struct list_head invalids; 84 struct list_head valids; 85 86 /* If the mappings are cleared or filled */ 87 bool cleared; 88 89 bool is_xgmi; 90 }; 91 92 struct amdgpu_bo { 93 /* Protected by tbo.reserved */ 94 u32 preferred_domains; 95 u32 allowed_domains; 96 struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS]; 97 struct ttm_placement placement; 98 struct ttm_buffer_object tbo; 99 struct ttm_bo_kmap_obj kmap; 100 u64 flags; 101 unsigned prime_shared_count; 102 /* per VM structure for page tables and with virtual addresses */ 103 struct amdgpu_vm_bo_base *vm_bo; 104 /* Constant after initialization */ 105 struct amdgpu_bo *parent; 106 struct amdgpu_bo *shadow; 107 108 109 110 #ifdef CONFIG_MMU_NOTIFIER 111 struct mmu_interval_notifier notifier; 112 #endif 113 114 struct list_head shadow_list; 115 116 struct kgd_mem *kfd_bo; 117 }; 118 119 struct amdgpu_bo_user { 120 struct amdgpu_bo bo; 121 u64 tiling_flags; 122 u64 metadata_flags; 123 void *metadata; 124 u32 metadata_size; 125 126 }; 127 128 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) 129 { 130 return container_of(tbo, struct amdgpu_bo, tbo); 131 } 132 133 /** 134 * amdgpu_mem_type_to_domain - return domain corresponding to mem_type 135 * @mem_type: ttm memory type 136 * 137 * Returns corresponding domain of the ttm mem_type 138 */ 139 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) 140 { 141 switch (mem_type) { 142 case TTM_PL_VRAM: 143 return AMDGPU_GEM_DOMAIN_VRAM; 144 case TTM_PL_TT: 145 return AMDGPU_GEM_DOMAIN_GTT; 146 case TTM_PL_SYSTEM: 147 return AMDGPU_GEM_DOMAIN_CPU; 148 case AMDGPU_PL_GDS: 149 return AMDGPU_GEM_DOMAIN_GDS; 150 case AMDGPU_PL_GWS: 151 return AMDGPU_GEM_DOMAIN_GWS; 152 case AMDGPU_PL_OA: 153 return AMDGPU_GEM_DOMAIN_OA; 154 default: 155 break; 156 } 157 return 0; 158 } 159 160 /** 161 * amdgpu_bo_reserve - reserve bo 162 * @bo: bo structure 163 * @no_intr: don't return -ERESTARTSYS on pending signal 164 * 165 * Returns: 166 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 167 * a signal. Release all buffer reservations and return to user-space. 168 */ 169 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) 170 { 171 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 172 int r; 173 174 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 175 if (unlikely(r != 0)) { 176 if (r != -ERESTARTSYS) 177 dev_err(adev->dev, "%p reserve failed\n", bo); 178 return r; 179 } 180 return 0; 181 } 182 183 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) 184 { 185 ttm_bo_unreserve(&bo->tbo); 186 } 187 188 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) 189 { 190 return bo->tbo.base.size; 191 } 192 193 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) 194 { 195 return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE; 196 } 197 198 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) 199 { 200 return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; 201 } 202 203 /** 204 * amdgpu_bo_mmap_offset - return mmap offset of bo 205 * @bo: amdgpu object for which we query the offset 206 * 207 * Returns mmap offset of the object. 208 */ 209 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) 210 { 211 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); 212 } 213 214 /** 215 * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM 216 */ 217 static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) 218 { 219 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 220 struct amdgpu_res_cursor cursor; 221 222 if (bo->tbo.resource->mem_type != TTM_PL_VRAM) 223 return false; 224 225 amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); 226 while (cursor.remaining) { 227 if (cursor.start < adev->gmc.visible_vram_size) 228 return true; 229 230 amdgpu_res_next(&cursor, cursor.size); 231 } 232 233 return false; 234 } 235 236 /** 237 * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced 238 */ 239 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) 240 { 241 return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; 242 } 243 244 /** 245 * amdgpu_bo_encrypted - test if the BO is encrypted 246 * @bo: pointer to a buffer object 247 * 248 * Return true if the buffer object is encrypted, false otherwise. 249 */ 250 static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) 251 { 252 return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; 253 } 254 255 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 256 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); 257 258 int amdgpu_bo_create(struct amdgpu_device *adev, 259 struct amdgpu_bo_param *bp, 260 struct amdgpu_bo **bo_ptr); 261 int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 262 unsigned long size, int align, 263 u32 domain, struct amdgpu_bo **bo_ptr, 264 u64 *gpu_addr, void **cpu_addr); 265 int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 266 unsigned long size, int align, 267 u32 domain, struct amdgpu_bo **bo_ptr, 268 u64 *gpu_addr, void **cpu_addr); 269 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, 270 uint64_t offset, uint64_t size, uint32_t domain, 271 struct amdgpu_bo **bo_ptr, void **cpu_addr); 272 int amdgpu_bo_create_user(struct amdgpu_device *adev, 273 struct amdgpu_bo_param *bp, 274 struct amdgpu_bo_user **ubo_ptr); 275 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, 276 void **cpu_addr); 277 int amdgpu_bo_create_shadow(struct amdgpu_device *adev, 278 unsigned long size, 279 struct amdgpu_bo *bo); 280 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 281 void *amdgpu_bo_kptr(struct amdgpu_bo *bo); 282 void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 283 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); 284 void amdgpu_bo_unref(struct amdgpu_bo **bo); 285 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); 286 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 287 u64 min_offset, u64 max_offset); 288 void amdgpu_bo_unpin(struct amdgpu_bo *bo); 289 int amdgpu_bo_evict_vram(struct amdgpu_device *adev); 290 int amdgpu_bo_init(struct amdgpu_device *adev); 291 void amdgpu_bo_fini(struct amdgpu_device *adev); 292 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); 293 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); 294 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, 295 uint32_t metadata_size, uint64_t flags); 296 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, 297 size_t buffer_size, uint32_t *metadata_size, 298 uint64_t *flags); 299 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 300 bool evict, 301 struct ttm_resource *new_mem); 302 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); 303 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 304 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 305 bool shared); 306 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, 307 enum amdgpu_sync_mode sync_mode, void *owner, 308 bool intr); 309 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); 310 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); 311 u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); 312 int amdgpu_bo_validate(struct amdgpu_bo *bo); 313 void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, 314 uint64_t *gtt_mem, uint64_t *cpu_mem); 315 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, 316 struct dma_fence **fence); 317 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, 318 uint32_t domain); 319 320 /* 321 * sub allocation 322 */ 323 324 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) 325 { 326 return sa_bo->manager->gpu_addr + sa_bo->soffset; 327 } 328 329 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) 330 { 331 return sa_bo->manager->cpu_ptr + sa_bo->soffset; 332 } 333 334 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, 335 struct amdgpu_sa_manager *sa_manager, 336 unsigned size, u32 align, u32 domain); 337 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, 338 struct amdgpu_sa_manager *sa_manager); 339 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, 340 struct amdgpu_sa_manager *sa_manager); 341 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 342 struct amdgpu_sa_bo **sa_bo, 343 unsigned size, unsigned align); 344 void amdgpu_sa_bo_free(struct amdgpu_device *adev, 345 struct amdgpu_sa_bo **sa_bo, 346 struct dma_fence *fence); 347 #if defined(CONFIG_DEBUG_FS) 348 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 349 struct seq_file *m); 350 u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m); 351 #endif 352 void amdgpu_debugfs_sa_init(struct amdgpu_device *adev); 353 354 bool amdgpu_bo_support_uswc(u64 bo_flags); 355 356 357 #endif 358