1d38ceaf9SAlex Deucher /* 2d38ceaf9SAlex Deucher * Copyright 2008 Advanced Micro Devices, Inc. 3d38ceaf9SAlex Deucher * Copyright 2008 Red Hat Inc. 4d38ceaf9SAlex Deucher * Copyright 2009 Jerome Glisse. 5d38ceaf9SAlex Deucher * 6d38ceaf9SAlex Deucher * Permission is hereby granted, free of charge, to any person obtaining a 7d38ceaf9SAlex Deucher * copy of this software and associated documentation files (the "Software"), 8d38ceaf9SAlex Deucher * to deal in the Software without restriction, including without limitation 9d38ceaf9SAlex Deucher * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10d38ceaf9SAlex Deucher * and/or sell copies of the Software, and to permit persons to whom the 11d38ceaf9SAlex Deucher * Software is furnished to do so, subject to the following conditions: 12d38ceaf9SAlex Deucher * 13d38ceaf9SAlex Deucher * The above copyright notice and this permission notice shall be included in 14d38ceaf9SAlex Deucher * all copies or substantial portions of the Software. 15d38ceaf9SAlex Deucher * 16d38ceaf9SAlex Deucher * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17d38ceaf9SAlex Deucher * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18d38ceaf9SAlex Deucher * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19d38ceaf9SAlex Deucher * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20d38ceaf9SAlex Deucher * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21d38ceaf9SAlex Deucher * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22d38ceaf9SAlex Deucher * OTHER DEALINGS IN THE SOFTWARE. 23d38ceaf9SAlex Deucher * 24d38ceaf9SAlex Deucher * Authors: Dave Airlie 25d38ceaf9SAlex Deucher * Alex Deucher 26d38ceaf9SAlex Deucher * Jerome Glisse 27d38ceaf9SAlex Deucher */ 28d38ceaf9SAlex Deucher #ifndef __AMDGPU_OBJECT_H__ 29d38ceaf9SAlex Deucher #define __AMDGPU_OBJECT_H__ 30d38ceaf9SAlex Deucher 31d38ceaf9SAlex Deucher #include <drm/amdgpu_drm.h> 32d38ceaf9SAlex Deucher #include "amdgpu.h" 332b77ade8SChristian König #include "amdgpu_res_cursor.h" 342b77ade8SChristian König 3562914a99SJason Gunthorpe #ifdef CONFIG_MMU_NOTIFIER 3662914a99SJason Gunthorpe #include <linux/mmu_notifier.h> 3762914a99SJason Gunthorpe #endif 38d38ceaf9SAlex Deucher 399702d40dSChristian König #define AMDGPU_BO_INVALID_OFFSET LONG_MAX 40bf314ca3SChristian König #define AMDGPU_BO_MAX_PLACEMENTS 3 419702d40dSChristian König 42f04c79cfSAlex Sierra /* BO flag to indicate a KFD userptr BO */ 43f04c79cfSAlex Sierra #define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63) 44f04c79cfSAlex Sierra 459ad0d033SNirmoy Das #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo) 466fdd6f4aSNirmoy Das #define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo) 479ad0d033SNirmoy Das 48a906dbb1SChunming Zhou struct amdgpu_bo_param { 49a906dbb1SChunming Zhou unsigned long size; 50a906dbb1SChunming Zhou int byte_align; 519fd5543eSNirmoy Das u32 bo_ptr_size; 52a906dbb1SChunming Zhou u32 domain; 53aa2b2e28SChunming Zhou u32 preferred_domain; 54a906dbb1SChunming Zhou u64 flags; 55a906dbb1SChunming Zhou enum ttm_bo_type type; 56061468c4SChristian König bool no_wait_gpu; 5752791eeeSChristian König struct dma_resv *resv; 5823e24fbbSNirmoy Das void (*destroy)(struct ttm_buffer_object *bo); 593ebfd221SPhilip Yang /* xcp partition number plus 1, 0 means any partition */ 603ebfd221SPhilip Yang int8_t xcp_id_plus1; 61a906dbb1SChunming Zhou }; 62a906dbb1SChunming Zhou 63ec681545SChristian König /* bo virtual addresses in a vm */ 649124a398SChristian König struct amdgpu_bo_va_mapping { 65aebc5e6fSChristian König struct amdgpu_bo_va *bo_va; 669124a398SChristian König struct list_head list; 679124a398SChristian König struct rb_node rb; 689124a398SChristian König uint64_t start; 699124a398SChristian König uint64_t last; 709124a398SChristian König uint64_t __subtree_last; 719124a398SChristian König uint64_t offset; 729124a398SChristian König uint64_t flags; 739124a398SChristian König }; 749124a398SChristian König 75ec681545SChristian König /* User space allocated BO in a VM */ 769124a398SChristian König struct amdgpu_bo_va { 77ec681545SChristian König struct amdgpu_vm_bo_base base; 78ec681545SChristian König 799124a398SChristian König /* protected by bo being reserved */ 809124a398SChristian König unsigned ref_count; 819124a398SChristian König 8200b5cc83SChristian König /* all other members protected by the VM PD being reserved */ 8300b5cc83SChristian König struct dma_fence *last_pt_update; 8400b5cc83SChristian König 859124a398SChristian König /* mappings for this bo_va */ 869124a398SChristian König struct list_head invalids; 879124a398SChristian König struct list_head valids; 88cb7b6ec2SChristian König 89cb7b6ec2SChristian König /* If the mappings are cleared or filled */ 90cb7b6ec2SChristian König bool cleared; 91df399b06Sshaoyunl 92df399b06Sshaoyunl bool is_xgmi; 939124a398SChristian König }; 949124a398SChristian König 959124a398SChristian König struct amdgpu_bo { 969124a398SChristian König /* Protected by tbo.reserved */ 976d7d9c5aSKent Russell u32 preferred_domains; 989124a398SChristian König u32 allowed_domains; 99bf314ca3SChristian König struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS]; 1009124a398SChristian König struct ttm_placement placement; 1019124a398SChristian König struct ttm_buffer_object tbo; 1029124a398SChristian König struct ttm_bo_kmap_obj kmap; 1039124a398SChristian König u64 flags; 104646b9025SChristian König /* per VM structure for page tables and with virtual addresses */ 105646b9025SChristian König struct amdgpu_vm_bo_base *vm_bo; 1069124a398SChristian König /* Constant after initialization */ 1079124a398SChristian König struct amdgpu_bo *parent; 10862914a99SJason Gunthorpe 10962914a99SJason Gunthorpe #ifdef CONFIG_MMU_NOTIFIER 11062914a99SJason Gunthorpe struct mmu_interval_notifier notifier; 11162914a99SJason Gunthorpe #endif 112a46a2cd1SFelix Kuehling struct kgd_mem *kfd_bo; 113f24e924bSPhilip Yang 1143ebfd221SPhilip Yang /* 1153ebfd221SPhilip Yang * For GPUs with spatial partitioning, xcp partition number, -1 means 1163ebfd221SPhilip Yang * any partition. For other ASICs without spatial partition, always 0 1173ebfd221SPhilip Yang * for memory accounting. 1183ebfd221SPhilip Yang */ 1193ebfd221SPhilip Yang int8_t xcp_id; 120ed5b89c6SChristian König }; 1219124a398SChristian König 1229ad0d033SNirmoy Das struct amdgpu_bo_user { 1239ad0d033SNirmoy Das struct amdgpu_bo bo; 1249ad0d033SNirmoy Das u64 tiling_flags; 1259ad0d033SNirmoy Das u64 metadata_flags; 1269ad0d033SNirmoy Das void *metadata; 1279ad0d033SNirmoy Das u32 metadata_size; 1289ad0d033SNirmoy Das 1299ad0d033SNirmoy Das }; 1309ad0d033SNirmoy Das 1316fdd6f4aSNirmoy Das struct amdgpu_bo_vm { 1326fdd6f4aSNirmoy Das struct amdgpu_bo bo; 1336fdd6f4aSNirmoy Das struct amdgpu_bo *shadow; 134e18aaea7SNirmoy Das struct list_head shadow_list; 135391629bdSNirmoy Das struct amdgpu_vm_bo_base entries[]; 1366fdd6f4aSNirmoy Das }; 1376fdd6f4aSNirmoy Das 138d6530c33SMarek Olšák struct amdgpu_mem_stats { 139d6530c33SMarek Olšák /* current VRAM usage, includes visible VRAM */ 140d6530c33SMarek Olšák uint64_t vram; 141*ba1a58d5SAlex Deucher /* current shared VRAM usage, includes visible VRAM */ 142*ba1a58d5SAlex Deucher uint64_t vram_shared; 143d6530c33SMarek Olšák /* current visible VRAM usage */ 144d6530c33SMarek Olšák uint64_t visible_vram; 145d6530c33SMarek Olšák /* current GTT usage */ 146d6530c33SMarek Olšák uint64_t gtt; 147*ba1a58d5SAlex Deucher /* current shared GTT usage */ 148*ba1a58d5SAlex Deucher uint64_t gtt_shared; 149d6530c33SMarek Olšák /* current system memory usage */ 150d6530c33SMarek Olšák uint64_t cpu; 151*ba1a58d5SAlex Deucher /* current shared system memory usage */ 152*ba1a58d5SAlex Deucher uint64_t cpu_shared; 153d6530c33SMarek Olšák /* sum of evicted buffers, includes visible VRAM */ 154d6530c33SMarek Olšák uint64_t evicted_vram; 155d6530c33SMarek Olšák /* sum of evicted buffers due to CPU access */ 156d6530c33SMarek Olšák uint64_t evicted_visible_vram; 157d6530c33SMarek Olšák /* how much userspace asked for, includes vis.VRAM */ 158d6530c33SMarek Olšák uint64_t requested_vram; 159d6530c33SMarek Olšák /* how much userspace asked for */ 160d6530c33SMarek Olšák uint64_t requested_visible_vram; 161d6530c33SMarek Olšák /* how much userspace asked for */ 162d6530c33SMarek Olšák uint64_t requested_gtt; 163d6530c33SMarek Olšák }; 164d6530c33SMarek Olšák 165b82485fdSAndres Rodriguez static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) 166b82485fdSAndres Rodriguez { 167b82485fdSAndres Rodriguez return container_of(tbo, struct amdgpu_bo, tbo); 168b82485fdSAndres Rodriguez } 169b82485fdSAndres Rodriguez 170d38ceaf9SAlex Deucher /** 171d38ceaf9SAlex Deucher * amdgpu_mem_type_to_domain - return domain corresponding to mem_type 172d38ceaf9SAlex Deucher * @mem_type: ttm memory type 173d38ceaf9SAlex Deucher * 174d38ceaf9SAlex Deucher * Returns corresponding domain of the ttm mem_type 175d38ceaf9SAlex Deucher */ 176d38ceaf9SAlex Deucher static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) 177d38ceaf9SAlex Deucher { 178d38ceaf9SAlex Deucher switch (mem_type) { 179d38ceaf9SAlex Deucher case TTM_PL_VRAM: 180d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_VRAM; 181d38ceaf9SAlex Deucher case TTM_PL_TT: 182d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_GTT; 183d38ceaf9SAlex Deucher case TTM_PL_SYSTEM: 184d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_CPU; 185d38ceaf9SAlex Deucher case AMDGPU_PL_GDS: 186d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_GDS; 187d38ceaf9SAlex Deucher case AMDGPU_PL_GWS: 188d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_GWS; 189d38ceaf9SAlex Deucher case AMDGPU_PL_OA: 190d38ceaf9SAlex Deucher return AMDGPU_GEM_DOMAIN_OA; 191dc3499c7SAlex Deucher case AMDGPU_PL_DOORBELL: 192dc3499c7SAlex Deucher return AMDGPU_GEM_DOMAIN_DOORBELL; 193d38ceaf9SAlex Deucher default: 194d38ceaf9SAlex Deucher break; 195d38ceaf9SAlex Deucher } 196d38ceaf9SAlex Deucher return 0; 197d38ceaf9SAlex Deucher } 198d38ceaf9SAlex Deucher 199d38ceaf9SAlex Deucher /** 200d38ceaf9SAlex Deucher * amdgpu_bo_reserve - reserve bo 201d38ceaf9SAlex Deucher * @bo: bo structure 202d38ceaf9SAlex Deucher * @no_intr: don't return -ERESTARTSYS on pending signal 203d38ceaf9SAlex Deucher * 204d38ceaf9SAlex Deucher * Returns: 205d38ceaf9SAlex Deucher * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by 206d38ceaf9SAlex Deucher * a signal. Release all buffer reservations and return to user-space. 207d38ceaf9SAlex Deucher */ 208d38ceaf9SAlex Deucher static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) 209d38ceaf9SAlex Deucher { 210a7d64de6SChristian König struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 211d38ceaf9SAlex Deucher int r; 212d38ceaf9SAlex Deucher 21346bca88bSDave Airlie r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 214d38ceaf9SAlex Deucher if (unlikely(r != 0)) { 215d38ceaf9SAlex Deucher if (r != -ERESTARTSYS) 216a7d64de6SChristian König dev_err(adev->dev, "%p reserve failed\n", bo); 217d38ceaf9SAlex Deucher return r; 218d38ceaf9SAlex Deucher } 219d38ceaf9SAlex Deucher return 0; 220d38ceaf9SAlex Deucher } 221d38ceaf9SAlex Deucher 222d38ceaf9SAlex Deucher static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) 223d38ceaf9SAlex Deucher { 224d38ceaf9SAlex Deucher ttm_bo_unreserve(&bo->tbo); 225d38ceaf9SAlex Deucher } 226d38ceaf9SAlex Deucher 227d38ceaf9SAlex Deucher static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) 228d38ceaf9SAlex Deucher { 229e11bfb99SChristian König return bo->tbo.base.size; 230d38ceaf9SAlex Deucher } 231d38ceaf9SAlex Deucher 232d38ceaf9SAlex Deucher static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) 233d38ceaf9SAlex Deucher { 234e11bfb99SChristian König return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE; 235d38ceaf9SAlex Deucher } 236d38ceaf9SAlex Deucher 237d38ceaf9SAlex Deucher static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) 238d38ceaf9SAlex Deucher { 239c777dc9eSChristian König return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; 240d38ceaf9SAlex Deucher } 241d38ceaf9SAlex Deucher 242d38ceaf9SAlex Deucher /** 243d38ceaf9SAlex Deucher * amdgpu_bo_mmap_offset - return mmap offset of bo 244d38ceaf9SAlex Deucher * @bo: amdgpu object for which we query the offset 245d38ceaf9SAlex Deucher * 246d38ceaf9SAlex Deucher * Returns mmap offset of the object. 247d38ceaf9SAlex Deucher */ 248d38ceaf9SAlex Deucher static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) 249d38ceaf9SAlex Deucher { 250b96f3e7cSGerd Hoffmann return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); 251d38ceaf9SAlex Deucher } 252d38ceaf9SAlex Deucher 253b99f3103SNicolai Hähnle /** 2545422a28fSChristian König * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM 2555422a28fSChristian König */ 2565422a28fSChristian König static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo) 2575422a28fSChristian König { 2585422a28fSChristian König struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 2592b77ade8SChristian König struct amdgpu_res_cursor cursor; 2605422a28fSChristian König 261ff89f064SChristian König if (!bo->tbo.resource || bo->tbo.resource->mem_type != TTM_PL_VRAM) 2625422a28fSChristian König return false; 2635422a28fSChristian König 264d3116756SChristian König amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor); 2652b77ade8SChristian König while (cursor.remaining) { 2662b77ade8SChristian König if (cursor.start < adev->gmc.visible_vram_size) 2675422a28fSChristian König return true; 2685422a28fSChristian König 2692b77ade8SChristian König amdgpu_res_next(&cursor, cursor.size); 2702b77ade8SChristian König } 2712b77ade8SChristian König 2725422a28fSChristian König return false; 2735422a28fSChristian König } 2745422a28fSChristian König 2755422a28fSChristian König /** 276177ae09bSAndres Rodriguez * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced 277177ae09bSAndres Rodriguez */ 278177ae09bSAndres Rodriguez static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) 279177ae09bSAndres Rodriguez { 280177ae09bSAndres Rodriguez return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; 281177ae09bSAndres Rodriguez } 282177ae09bSAndres Rodriguez 2834cd24494SAlex Deucher /** 2844cd24494SAlex Deucher * amdgpu_bo_encrypted - test if the BO is encrypted 2854cd24494SAlex Deucher * @bo: pointer to a buffer object 2864cd24494SAlex Deucher * 2874cd24494SAlex Deucher * Return true if the buffer object is encrypted, false otherwise. 2884cd24494SAlex Deucher */ 2894cd24494SAlex Deucher static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) 2904cd24494SAlex Deucher { 2914cd24494SAlex Deucher return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; 2924cd24494SAlex Deucher } 2934cd24494SAlex Deucher 2941fdc79f6SNirmoy Das /** 2951fdc79f6SNirmoy Das * amdgpu_bo_shadowed - check if the BO is shadowed 2961fdc79f6SNirmoy Das * 2971fdc79f6SNirmoy Das * @bo: BO to be tested. 2981fdc79f6SNirmoy Das * 2991fdc79f6SNirmoy Das * Returns: 3001fdc79f6SNirmoy Das * NULL if not shadowed or else return a BO pointer. 3011fdc79f6SNirmoy Das */ 3021fdc79f6SNirmoy Das static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo) 3031fdc79f6SNirmoy Das { 3041fdc79f6SNirmoy Das if (bo->tbo.type == ttm_bo_type_kernel) 3051fdc79f6SNirmoy Das return to_amdgpu_bo_vm(bo)->shadow; 3061fdc79f6SNirmoy Das 3071fdc79f6SNirmoy Das return NULL; 3081fdc79f6SNirmoy Das } 3091fdc79f6SNirmoy Das 310c704ab18SChristian König bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 311c704ab18SChristian König void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); 312c704ab18SChristian König 3133216c6b7SChunming Zhou int amdgpu_bo_create(struct amdgpu_device *adev, 3143216c6b7SChunming Zhou struct amdgpu_bo_param *bp, 315d38ceaf9SAlex Deucher struct amdgpu_bo **bo_ptr); 3169d903cbdSChristian König int amdgpu_bo_create_reserved(struct amdgpu_device *adev, 3179d903cbdSChristian König unsigned long size, int align, 3189d903cbdSChristian König u32 domain, struct amdgpu_bo **bo_ptr, 3199d903cbdSChristian König u64 *gpu_addr, void **cpu_addr); 3207c204889SChristian König int amdgpu_bo_create_kernel(struct amdgpu_device *adev, 3217c204889SChristian König unsigned long size, int align, 3227c204889SChristian König u32 domain, struct amdgpu_bo **bo_ptr, 3237c204889SChristian König u64 *gpu_addr, void **cpu_addr); 324de7b45baSChristian König int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, 3253273f116SLuben Tuikov uint64_t offset, uint64_t size, 326de7b45baSChristian König struct amdgpu_bo **bo_ptr, void **cpu_addr); 3279ad0d033SNirmoy Das int amdgpu_bo_create_user(struct amdgpu_device *adev, 3289ad0d033SNirmoy Das struct amdgpu_bo_param *bp, 3299ad0d033SNirmoy Das struct amdgpu_bo_user **ubo_ptr); 3306fdd6f4aSNirmoy Das int amdgpu_bo_create_vm(struct amdgpu_device *adev, 3316fdd6f4aSNirmoy Das struct amdgpu_bo_param *bp, 3326fdd6f4aSNirmoy Das struct amdgpu_bo_vm **ubo_ptr); 333aa1d562eSJunwei Zhang void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, 334aa1d562eSJunwei Zhang void **cpu_addr); 335d38ceaf9SAlex Deucher int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); 336f5e1c740SChristian König void *amdgpu_bo_kptr(struct amdgpu_bo *bo); 337d38ceaf9SAlex Deucher void amdgpu_bo_kunmap(struct amdgpu_bo *bo); 338d38ceaf9SAlex Deucher struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); 339d38ceaf9SAlex Deucher void amdgpu_bo_unref(struct amdgpu_bo **bo); 3407b7c6c81SJunwei Zhang int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); 341d38ceaf9SAlex Deucher int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, 3427b7c6c81SJunwei Zhang u64 min_offset, u64 max_offset); 3434671078eSChristian König void amdgpu_bo_unpin(struct amdgpu_bo *bo); 344d38ceaf9SAlex Deucher int amdgpu_bo_init(struct amdgpu_device *adev); 345d38ceaf9SAlex Deucher void amdgpu_bo_fini(struct amdgpu_device *adev); 346d38ceaf9SAlex Deucher int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); 347d38ceaf9SAlex Deucher void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); 348d38ceaf9SAlex Deucher int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, 349d38ceaf9SAlex Deucher uint32_t metadata_size, uint64_t flags); 350d38ceaf9SAlex Deucher int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, 351d38ceaf9SAlex Deucher size_t buffer_size, uint32_t *metadata_size, 352d38ceaf9SAlex Deucher uint64_t *flags); 35394aeb411SWang, Beyond void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict); 354ab2f7a5cSFelix Kuehling void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); 355d3ef581aSChristian König vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 356f54d1867SChris Wilson void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, 357d38ceaf9SAlex Deucher bool shared); 3589f3cc18dSChristian König int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, 3599f3cc18dSChristian König enum amdgpu_sync_mode sync_mode, void *owner, 3609f3cc18dSChristian König bool intr); 361e8e32426SFelix Kuehling int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); 362cdb7e8f2SChristian König u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); 363b1a8ef95SNirmoy Das u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); 364d6530c33SMarek Olšák void amdgpu_bo_get_memory(struct amdgpu_bo *bo, 365d6530c33SMarek Olšák struct amdgpu_mem_stats *stats); 366e18aaea7SNirmoy Das void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo); 367403009bfSChristian König int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, 368403009bfSChristian König struct dma_fence **fence); 369d035f84dSYifan Zhang uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, 37084b74608SDeepak Sharma uint32_t domain); 371d38ceaf9SAlex Deucher 372d38ceaf9SAlex Deucher /* 373d38ceaf9SAlex Deucher * sub allocation 374d38ceaf9SAlex Deucher */ 375c103a23fSMaarten Lankhorst static inline struct amdgpu_sa_manager * 376c103a23fSMaarten Lankhorst to_amdgpu_sa_manager(struct drm_suballoc_manager *manager) 377d38ceaf9SAlex Deucher { 378c103a23fSMaarten Lankhorst return container_of(manager, struct amdgpu_sa_manager, base); 379d38ceaf9SAlex Deucher } 380d38ceaf9SAlex Deucher 381c103a23fSMaarten Lankhorst static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo) 382d38ceaf9SAlex Deucher { 383c103a23fSMaarten Lankhorst return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr + 384c103a23fSMaarten Lankhorst drm_suballoc_soffset(sa_bo); 385c103a23fSMaarten Lankhorst } 386c103a23fSMaarten Lankhorst 387c103a23fSMaarten Lankhorst static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo) 388c103a23fSMaarten Lankhorst { 389c103a23fSMaarten Lankhorst return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr + 390c103a23fSMaarten Lankhorst drm_suballoc_soffset(sa_bo); 391d38ceaf9SAlex Deucher } 392d38ceaf9SAlex Deucher 393d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, 394d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager, 395d38ceaf9SAlex Deucher unsigned size, u32 align, u32 domain); 396d38ceaf9SAlex Deucher void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, 397d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager); 398d38ceaf9SAlex Deucher int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, 399d38ceaf9SAlex Deucher struct amdgpu_sa_manager *sa_manager); 400bbf0b345SJunwei Zhang int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, 401c103a23fSMaarten Lankhorst struct drm_suballoc **sa_bo, 402c103a23fSMaarten Lankhorst unsigned int size); 403d38ceaf9SAlex Deucher void amdgpu_sa_bo_free(struct amdgpu_device *adev, 404c103a23fSMaarten Lankhorst struct drm_suballoc **sa_bo, 405f54d1867SChris Wilson struct dma_fence *fence); 406d38ceaf9SAlex Deucher #if defined(CONFIG_DEBUG_FS) 407d38ceaf9SAlex Deucher void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, 408d38ceaf9SAlex Deucher struct seq_file *m); 409ff72bc40SMihir Bhogilal Patel u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m); 410d38ceaf9SAlex Deucher #endif 41198d28ac2SNirmoy Das void amdgpu_debugfs_sa_init(struct amdgpu_device *adev); 412d38ceaf9SAlex Deucher 4133d1b8ec7SAndrey Grodzovsky bool amdgpu_bo_support_uswc(u64 bo_flags); 4143d1b8ec7SAndrey Grodzovsky 415d38ceaf9SAlex Deucher 416d38ceaf9SAlex Deucher #endif 417