1d38ceaf9SAlex Deucher /* 2d38ceaf9SAlex Deucher * Copyright 2008 Advanced Micro Devices, Inc. 3d38ceaf9SAlex Deucher * Copyright 2008 Red Hat Inc. 4d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse. 5d38ceaf9SAlex Deucher * 6d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 7d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the "Software"), 8d38ceaf9SAlex Deucher * to deal in the Software without restriction, including without limitation 9d38ceaf9SAlex Deucher * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10d38ceaf9SAlex Deucher * and/or sell copies of the Software, and to permit persons to whom the 11d38ceaf9SAlex Deucher * Software is furnished to do so, subject to the following conditions: 12d38ceaf9SAlex Deucher * 13d38ceaf9SAlex Deucher * The above copyright notice and this permission notice shall be included in 14d38ceaf9SAlex Deucher * all copies or substantial portions of the Software. 15d38ceaf9SAlex Deucher * 16d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20d38ceaf9SAlex Deucher * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21d38ceaf9SAlex Deucher * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22d38ceaf9SAlex Deucher * OTHER DEALINGS IN THE SOFTWARE. 23d38ceaf9SAlex Deucher * 24d38ceaf9SAlex Deucher * Authors: Dave Airlie 25d38ceaf9SAlex Deucher * Alex Deucher 26d38ceaf9SAlex Deucher * Jerome Glisse 27d38ceaf9SAlex Deucher */ 28d38ceaf9SAlex Deucher #ifndef __AMDGPU_OBJECT_H__ 29d38ceaf9SAlex Deucher #define __AMDGPU_OBJECT_H__ 30d38ceaf9SAlex Deucher 31d38ceaf9SAlex Deucher #include <drm/amdgpu_drm.h> 32d38ceaf9SAlex Deucher #include "amdgpu.h" 332b77ade8SChristian König #include "amdgpu_res_cursor.h" 342b77ade8SChristian König 3562914a99SJason Gunthorpe #ifdef CONFIG_MMU_NOTIFIER 3662914a99SJason Gunthorpe #include <linux/mmu_notifier.h> 3762914a99SJason Gunthorpe #endif 38d38ceaf9SAlex Deucher 399702d40dSChristian König #define AMDGPU_BO_INVALID_OFFSET LONG_MAX 40bf314ca3SChristian König #define AMDGPU_BO_MAX_PLACEMENTS 3 419702d40dSChristian König 42f04c79cfSAlex Sierra /* BO flag to indicate a KFD userptr BO */ 43f04c79cfSAlex Sierra #define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63) 44f04c79cfSAlex Sierra #define AMDGPU_AMDKFD_CREATE_SVM_BO (1ULL << 62) 45f04c79cfSAlex Sierra 469ad0d033SNirmoy Das #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo) 476fdd6f4aSNirmoy Das #define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo) 489ad0d033SNirmoy Das 49a906dbb1SChunming Zhou struct amdgpu_bo_param { 50a906dbb1SChunming Zhou unsigned long size; 51a906dbb1SChunming Zhou int byte_align; 529fd5543eSNirmoy Das u32 bo_ptr_size; 53a906dbb1SChunming Zhou u32 domain; 54aa2b2e28SChunming Zhou u32 preferred_domain; 55a906dbb1SChunming Zhou u64 flags; 56a906dbb1SChunming Zhou enum ttm_bo_type type; 57061468c4SChristian König bool no_wait_gpu; 5852791eeeSChristian König struct dma_resv *resv; 59a906dbb1SChunming Zhou }; 60a906dbb1SChunming Zhou 61ec681545SChristian König /* bo virtual addresses in a vm */ 629124a398SChristian König struct amdgpu_bo_va_mapping { 63aebc5e6fSChristian König struct amdgpu_bo_va *bo_va; 649124a398SChristian König struct list_head list; 659124a398SChristian König struct rb_node rb; 669124a398SChristian König uint64_t start; 679124a398SChristian König uint64_t last; 689124a398SChristian König uint64_t __subtree_last; 699124a398SChristian König uint64_t offset; 709124a398SChristian König uint64_t flags; 719124a398SChristian König }; 729124a398SChristian König 73ec681545SChristian König /* User space allocated BO in a VM */ 749124a398SChristian König struct amdgpu_bo_va { 75ec681545SChristian König struct amdgpu_vm_bo_base base; 76ec681545SChristian König 779124a398SChristian König /* protected by bo being reserved */ 789124a398SChristian König unsigned ref_count; 799124a398SChristian König 8000b5cc83SChristian König /* all other members protected by the VM PD being reserved */ 8100b5cc83SChristian König struct dma_fence *last_pt_update; 8200b5cc83SChristian König 839124a398SChristian König /* mappings for this bo_va */ 849124a398SChristian König struct list_head invalids; 859124a398SChristian König struct list_head valids; 86cb7b6ec2SChristian König 87cb7b6ec2SChristian König /* If the mappings are cleared or filled */ 88cb7b6ec2SChristian König bool cleared; 89df399b06Sshaoyunl 90df399b06Sshaoyunl bool is_xgmi; 919124a398SChristian König }; 929124a398SChristian König 939124a398SChristian König struct amdgpu_bo { 949124a398SChristian König /* Protected by tbo.reserved */ 956d7d9c5aSKent Russell u32 preferred_domains; 969124a398SChristian König u32 allowed_domains; 97bf314ca3SChristian König struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS]; 989124a398SChristian König struct ttm_placement placement; 999124a398SChristian König struct ttm_buffer_object tbo; 1009124a398SChristian König struct ttm_bo_kmap_obj kmap; 1019124a398SChristian König u64 flags; 1029124a398SChristian König unsigned prime_shared_count; 103646b9025SChristian König /* per VM structure for page tables and with virtual addresses */ 104646b9025SChristian König struct amdgpu_vm_bo_base *vm_bo; 1059124a398SChristian König /* Constant after initialization */ 1069124a398SChristian König struct amdgpu_bo *parent; 10762914a99SJason Gunthorpe 10862914a99SJason Gunthorpe #ifdef CONFIG_MMU_NOTIFIER 10962914a99SJason Gunthorpe struct mmu_interval_notifier notifier; 11062914a99SJason Gunthorpe #endif 11162914a99SJason Gunthorpe 1129124a398SChristian König struct list_head shadow_list; 113a46a2cd1SFelix Kuehling 114a46a2cd1SFelix Kuehling struct kgd_mem *kfd_bo; 115ed5b89c6SChristian König }; 1169124a398SChristian König 1179ad0d033SNirmoy Das struct amdgpu_bo_user { 1189ad0d033SNirmoy Das struct amdgpu_bo bo; 1199ad0d033SNirmoy Das u64 tiling_flags; 1209ad0d033SNirmoy Das u64 metadata_flags; 1219ad0d033SNirmoy Das void *metadata; 1229ad0d033SNirmoy Das u32 metadata_size; 1239ad0d033SNirmoy Das 1249ad0d033SNirmoy Das }; 1259ad0d033SNirmoy Das 1266fdd6f4aSNirmoy Das struct amdgpu_bo_vm { 1276fdd6f4aSNirmoy Das struct amdgpu_bo bo; 1286fdd6f4aSNirmoy Das struct amdgpu_bo *shadow; 129*391629bdSNirmoy Das struct amdgpu_vm_bo_base entries[]; 1306fdd6f4aSNirmoy Das }; 1316fdd6f4aSNirmoy Das 132b82485fdSAndres Rodriguez static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) 133b82485fdSAndres Rodriguez { 134b82485fdSAndres Rodriguez return container_of(tbo, struct amdgpu_bo, tbo); 135b82485fdSAndres Rodriguez } 136b82485fdSAndres Rodriguez 137d38ceaf9SAlex Deucher /** 138d38ceaf9SAlex Deucher * amdgpu_mem_type_to_domain - return domain corresponding to mem_type 139d38ceaf9SAlex Deucher * @mem_type: ttm memory type 140d38ceaf9SAlex Deucher * 141d38ceaf9SAlex Deucher * Returns corresponding domain of the ttm mem_type 142d38ceaf9SAlex Deucher */ 143d38ceaf9SAlex Deucher static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) 144d38ceaf9SAlex Deucher { 145d38ceaf9SAlex Deucher switch (mem_type) { 146d38ceaf9SAlex Deucher case TTM_PL_VRAM: 147d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_VRAM; 148d38ceaf9SAlex Deucher case TTM_PL_TT: 149d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_GTT; 150d38ceaf9SAlex Deucher case TTM_PL_SYSTEM: 151d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_CPU; 152d38ceaf9SAlex Deucher case AMDGPU_PL_GDS: 153d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_GDS; 154d38ceaf9SAlex Deucher case AMDGPU_PL_GWS: 155d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_GWS; 156d38ceaf9SAlex Deucher case AMDGPU_PL_OA: 157d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_OA; 158d38ceaf9SAlex Deucher default: 159d38ceaf9SAlex Deucher break; 160d38ceaf9SAlex Deucher } 161d38ceaf9SAlex Deucher return 0; 162d38ceaf9SAlex Deucher } 163d38ceaf9SAlex Deucher 164d38ceaf9SAlex Deucher /** 165d38ceaf9SAlex Deucher * amdgpu_bo_reserve - reserve bo 166d38ceaf9SAlex Deucher * @bo: bo structure 167d38ceaf9SAlex Deucher * @no_intr: don't return -ERESTARTSYS on pending signal 168d38ceaf9SAlex Deucher * 169d38ceaf9SAlex Deucher * Returns: 170d38ceaf9SAlex Deucher * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 171d38ceaf9SAlex Deucher * a signal. Release all buffer reservations and return to user-space. 172d38ceaf9SAlex Deucher */ 173d38ceaf9SAlex Deucher static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) 174d38ceaf9SAlex Deucher { 175a7d64de6SChristian König struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 176d38ceaf9SAlex Deucher int r; 177d38ceaf9SAlex Deucher 17846bca88bSDave Airlie r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 179d38ceaf9SAlex Deucher if (unlikely(r != 0)) { 180d38ceaf9SAlex Deucher if (r != -ERESTARTSYS) 181a7d64de6SChristian König dev_err(adev->dev, "%p reserve failed\n", bo); 182d38ceaf9SAlex Deucher return r; 183d38ceaf9SAlex Deucher } 184d38ceaf9SAlex Deucher return 0; 185d38ceaf9SAlex Deucher } 186d38ceaf9SAlex Deucher 187d38ceaf9SAlex Deucher static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) 188d38ceaf9SAlex Deucher { 189d38ceaf9SAlex Deucher ttm_bo_unreserve(&bo->tbo); 190d38ceaf9SAlex Deucher } 191d38ceaf9SAlex Deucher 192d38ceaf9SAlex Deucher static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) 193d38ceaf9SAlex Deucher { 194e11bfb99SChristian König return bo->tbo.base.size; 195d38ceaf9SAlex Deucher } 196d38ceaf9SAlex Deucher 197d38ceaf9SAlex Deucher static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) 198d38ceaf9SAlex Deucher { 199e11bfb99SChristian König return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE; 200d38ceaf9SAlex Deucher } 201d38ceaf9SAlex Deucher 202d38ceaf9SAlex Deucher static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) 203d38ceaf9SAlex Deucher { 204c777dc9eSChristian König return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; 205d38ceaf9SAlex Deucher } 206d38ceaf9SAlex Deucher 207d38ceaf9SAlex Deucher /** 208d38ceaf9SAlex Deucher * amdgpu_bo_mmap_offset - return mmap offset of bo 209d38ceaf9SAlex Deucher * @bo: amdgpu object for which we query the offset 210d38ceaf9SAlex Deucher * 211d38ceaf9SAlex Deucher * Returns mmap offset of the object. 212d38ceaf9SAlex Deucher */ 213d38ceaf9SAlex Deucher static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) 214d38ceaf9SAlex Deucher { 215b96f3e7cSGerd Hoffmann return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); 216d38ceaf9SAlex Deucher } 217d38ceaf9SAlex Deucher 218b99f3103SNicolai Hähnle /** 2195422a28fSChristian König * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM 2205422a28fSChristian König */ 2215422a28fSChristian König static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) 2225422a28fSChristian König { 2235422a28fSChristian König struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 2242b77ade8SChristian König struct amdgpu_res_cursor cursor; 2255422a28fSChristian König 226d3116756SChristian König if (bo->tbo.resource->mem_type != TTM_PL_VRAM) 2275422a28fSChristian König return false; 2285422a28fSChristian König 229d3116756SChristian König amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); 2302b77ade8SChristian König while (cursor.remaining) { 2312b77ade8SChristian König if (cursor.start < adev->gmc.visible_vram_size) 2325422a28fSChristian König return true; 2335422a28fSChristian König 2342b77ade8SChristian König amdgpu_res_next(&cursor, cursor.size); 2352b77ade8SChristian König } 2362b77ade8SChristian König 2375422a28fSChristian König return false; 2385422a28fSChristian König } 2395422a28fSChristian König 2405422a28fSChristian König /** 241177ae09bSAndres Rodriguez * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced 242177ae09bSAndres Rodriguez */ 243177ae09bSAndres Rodriguez static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) 244177ae09bSAndres Rodriguez { 245177ae09bSAndres Rodriguez return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; 246177ae09bSAndres Rodriguez } 247177ae09bSAndres Rodriguez 2484cd24494SAlex Deucher /** 2494cd24494SAlex Deucher * amdgpu_bo_encrypted - test if the BO is encrypted 2504cd24494SAlex Deucher * @bo: pointer to a buffer object 2514cd24494SAlex Deucher * 2524cd24494SAlex Deucher * Return true if the buffer object is encrypted, false otherwise. 2534cd24494SAlex Deucher */ 2544cd24494SAlex Deucher static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) 2554cd24494SAlex Deucher { 2564cd24494SAlex Deucher return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; 2574cd24494SAlex Deucher } 2584cd24494SAlex Deucher 2591fdc79f6SNirmoy Das /** 2601fdc79f6SNirmoy Das * amdgpu_bo_shadowed - check if the BO is shadowed 2611fdc79f6SNirmoy Das * 2621fdc79f6SNirmoy Das * @bo: BO to be tested. 2631fdc79f6SNirmoy Das * 2641fdc79f6SNirmoy Das * Returns: 2651fdc79f6SNirmoy Das * NULL if not shadowed or else return a BO pointer. 2661fdc79f6SNirmoy Das */ 2671fdc79f6SNirmoy Das static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo) 2681fdc79f6SNirmoy Das { 2691fdc79f6SNirmoy Das if (bo->tbo.type == ttm_bo_type_kernel) 2701fdc79f6SNirmoy Das return to_amdgpu_bo_vm(bo)->shadow; 2711fdc79f6SNirmoy Das 2721fdc79f6SNirmoy Das return NULL; 2731fdc79f6SNirmoy Das } 2741fdc79f6SNirmoy Das 275c704ab18SChristian König bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 276c704ab18SChristian König void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); 277c704ab18SChristian König 2783216c6b7SChunming Zhou int amdgpu_bo_create(struct amdgpu_device *adev, 2793216c6b7SChunming Zhou struct amdgpu_bo_param *bp, 280d38ceaf9SAlex Deucher struct amdgpu_bo **bo_ptr); 2819d903cbdSChristian König int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 2829d903cbdSChristian König unsigned long size, int align, 2839d903cbdSChristian König u32 domain, struct amdgpu_bo **bo_ptr, 2849d903cbdSChristian König u64 *gpu_addr, void **cpu_addr); 2857c204889SChristian König int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 2867c204889SChristian König unsigned long size, int align, 2877c204889SChristian König u32 domain, struct amdgpu_bo **bo_ptr, 2887c204889SChristian König u64 *gpu_addr, void **cpu_addr); 289de7b45baSChristian König int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, 290de7b45baSChristian König uint64_t offset, uint64_t size, uint32_t domain, 291de7b45baSChristian König struct amdgpu_bo **bo_ptr, void **cpu_addr); 2929ad0d033SNirmoy Das int amdgpu_bo_create_user(struct amdgpu_device *adev, 2939ad0d033SNirmoy Das struct amdgpu_bo_param *bp, 2949ad0d033SNirmoy Das struct amdgpu_bo_user **ubo_ptr); 2956fdd6f4aSNirmoy Das int amdgpu_bo_create_vm(struct amdgpu_device *adev, 2966fdd6f4aSNirmoy Das struct amdgpu_bo_param *bp, 2976fdd6f4aSNirmoy Das struct amdgpu_bo_vm **ubo_ptr); 298aa1d562eSJunwei Zhang void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, 299aa1d562eSJunwei Zhang void **cpu_addr); 300d38ceaf9SAlex Deucher int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 301f5e1c740SChristian König void *amdgpu_bo_kptr(struct amdgpu_bo *bo); 302d38ceaf9SAlex Deucher void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 303d38ceaf9SAlex Deucher struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); 304d38ceaf9SAlex Deucher void amdgpu_bo_unref(struct amdgpu_bo **bo); 3057b7c6c81SJunwei Zhang int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); 306d38ceaf9SAlex Deucher int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 3077b7c6c81SJunwei Zhang u64 min_offset, u64 max_offset); 3084671078eSChristian König void amdgpu_bo_unpin(struct amdgpu_bo *bo); 309d38ceaf9SAlex Deucher int amdgpu_bo_evict_vram(struct amdgpu_device *adev); 310d38ceaf9SAlex Deucher int amdgpu_bo_init(struct amdgpu_device *adev); 311d38ceaf9SAlex Deucher void amdgpu_bo_fini(struct amdgpu_device *adev); 312d38ceaf9SAlex Deucher int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); 313d38ceaf9SAlex Deucher void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); 314d38ceaf9SAlex Deucher int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, 315d38ceaf9SAlex Deucher uint32_t metadata_size, uint64_t flags); 316d38ceaf9SAlex Deucher int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, 317d38ceaf9SAlex Deucher size_t buffer_size, uint32_t *metadata_size, 318d38ceaf9SAlex Deucher uint64_t *flags); 319d38ceaf9SAlex Deucher void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, 32066257db7SNicolai Hähnle bool evict, 3212966141aSDave Airlie struct ttm_resource *new_mem); 322ab2f7a5cSFelix Kuehling void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); 323d3ef581aSChristian König vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 324f54d1867SChris Wilson void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 325d38ceaf9SAlex Deucher bool shared); 3269f3cc18dSChristian König int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, 3279f3cc18dSChristian König enum amdgpu_sync_mode sync_mode, void *owner, 3289f3cc18dSChristian König bool intr); 329e8e32426SFelix Kuehling int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); 330cdb7e8f2SChristian König u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); 331b1a8ef95SNirmoy Das u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); 33282521316SRoger.He int amdgpu_bo_validate(struct amdgpu_bo *bo); 33387444254SRoy Sun void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem, 33487444254SRoy Sun uint64_t *gtt_mem, uint64_t *cpu_mem); 3351fdc79f6SNirmoy Das void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo); 336403009bfSChristian König int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, 337403009bfSChristian König struct dma_fence **fence); 33884b74608SDeepak Sharma uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev, 33984b74608SDeepak Sharma uint32_t domain); 340d38ceaf9SAlex Deucher 341d38ceaf9SAlex Deucher /* 342d38ceaf9SAlex Deucher * sub allocation 343d38ceaf9SAlex Deucher */ 344d38ceaf9SAlex Deucher 345d38ceaf9SAlex Deucher static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo) 346d38ceaf9SAlex Deucher { 347d38ceaf9SAlex Deucher return sa_bo->manager->gpu_addr + sa_bo->soffset; 348d38ceaf9SAlex Deucher } 349d38ceaf9SAlex Deucher 350d38ceaf9SAlex Deucher static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo) 351d38ceaf9SAlex Deucher { 352d38ceaf9SAlex Deucher return sa_bo->manager->cpu_ptr + sa_bo->soffset; 353d38ceaf9SAlex Deucher } 354d38ceaf9SAlex Deucher 355d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, 356d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager, 357d38ceaf9SAlex Deucher unsigned size, u32 align, u32 domain); 358d38ceaf9SAlex Deucher void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, 359d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager); 360d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, 361d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager); 362bbf0b345SJunwei Zhang int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 363d38ceaf9SAlex Deucher struct amdgpu_sa_bo **sa_bo, 364d38ceaf9SAlex Deucher unsigned size, unsigned align); 365d38ceaf9SAlex Deucher void amdgpu_sa_bo_free(struct amdgpu_device *adev, 366d38ceaf9SAlex Deucher struct amdgpu_sa_bo **sa_bo, 367f54d1867SChris Wilson struct dma_fence *fence); 368d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 369d38ceaf9SAlex Deucher void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 370d38ceaf9SAlex Deucher struct seq_file *m); 371ff72bc40SMihir Bhogilal Patel u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m); 372d38ceaf9SAlex Deucher #endif 37398d28ac2SNirmoy Das void amdgpu_debugfs_sa_init(struct amdgpu_device *adev); 374d38ceaf9SAlex Deucher 3753d1b8ec7SAndrey Grodzovsky bool amdgpu_bo_support_uswc(u64 bo_flags); 3763d1b8ec7SAndrey Grodzovsky 377d38ceaf9SAlex Deucher 378d38ceaf9SAlex Deucher #endif 379