1130e0371SOded Gabbay /* 2130e0371SOded Gabbay * Copyright 2014 Advanced Micro Devices, Inc. 3130e0371SOded Gabbay * 4130e0371SOded Gabbay * Permission is hereby granted, free of charge, to any person obtaining a 5130e0371SOded Gabbay * copy of this software and associated documentation files (the "Software"), 6130e0371SOded Gabbay * to deal in the Software without restriction, including without limitation 7130e0371SOded Gabbay * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8130e0371SOded Gabbay * and/or sell copies of the Software, and to permit persons to whom the 9130e0371SOded Gabbay * Software is furnished to do so, subject to the following conditions: 10130e0371SOded Gabbay * 11130e0371SOded Gabbay * The above copyright notice and this permission notice shall be included in 12130e0371SOded Gabbay * all copies or substantial portions of the Software. 13130e0371SOded Gabbay * 14130e0371SOded Gabbay * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15130e0371SOded Gabbay * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16130e0371SOded Gabbay * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17130e0371SOded Gabbay * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18130e0371SOded Gabbay * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19130e0371SOded Gabbay * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20130e0371SOded Gabbay * OTHER DEALINGS IN THE SOFTWARE. 21130e0371SOded Gabbay */ 22130e0371SOded Gabbay 23130e0371SOded Gabbay /* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */ 24130e0371SOded Gabbay 25130e0371SOded Gabbay #ifndef AMDGPU_AMDKFD_H_INCLUDED 26130e0371SOded Gabbay #define AMDGPU_AMDKFD_H_INCLUDED 27130e0371SOded Gabbay 28130e0371SOded Gabbay #include <linux/types.h> 297420f482SOded Gabbay #include <linux/mm.h> 309bf5b9ebSChristoph Hellwig #include <linux/kthread.h> 315ae0283eSFelix Kuehling #include <linux/workqueue.h> 32130e0371SOded Gabbay #include <kgd_kfd_interface.h> 33a46a2cd1SFelix Kuehling #include <drm/ttm/ttm_execbuf_util.h> 34a46a2cd1SFelix Kuehling #include "amdgpu_sync.h" 35a46a2cd1SFelix Kuehling #include "amdgpu_vm.h" 36130e0371SOded Gabbay 37611736d8SFelix Kuehling extern uint64_t amdgpu_amdkfd_total_mem_size; 38d8d019ccSFelix Kuehling 39765385ecSPhilip Yang enum TLB_FLUSH_TYPE { 40765385ecSPhilip Yang TLB_FLUSH_LEGACY = 0, 41765385ecSPhilip Yang TLB_FLUSH_LIGHTWEIGHT, 42765385ecSPhilip Yang TLB_FLUSH_HEAVYWEIGHT 43765385ecSPhilip Yang }; 44765385ecSPhilip Yang 45130e0371SOded Gabbay struct amdgpu_device; 46130e0371SOded Gabbay 47264fb4d3SFelix Kuehling enum kfd_mem_attachment_type { 48264fb4d3SFelix Kuehling KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */ 49264fb4d3SFelix Kuehling KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */ 505ac3c3e4SFelix Kuehling KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */ 5108a2fd23SRamesh Errabolu KFD_MEM_ATT_SG /* Tag to DMA map SG BOs */ 52264fb4d3SFelix Kuehling }; 53264fb4d3SFelix Kuehling 54c780b2eeSFelix Kuehling struct kfd_mem_attachment { 55c780b2eeSFelix Kuehling struct list_head list; 56264fb4d3SFelix Kuehling enum kfd_mem_attachment_type type; 57a46a2cd1SFelix Kuehling bool is_mapped; 58a46a2cd1SFelix Kuehling struct amdgpu_bo_va *bo_va; 59c780b2eeSFelix Kuehling struct amdgpu_device *adev; 60a46a2cd1SFelix Kuehling uint64_t va; 61a46a2cd1SFelix Kuehling uint64_t pte_flags; 62a46a2cd1SFelix Kuehling }; 63a46a2cd1SFelix Kuehling 64130e0371SOded Gabbay struct kgd_mem { 65a46a2cd1SFelix Kuehling struct mutex lock; 66130e0371SOded Gabbay struct amdgpu_bo *bo; 675ac3c3e4SFelix Kuehling struct dma_buf *dmabuf; 68c780b2eeSFelix Kuehling struct list_head attachments; 69a46a2cd1SFelix Kuehling /* protected by amdkfd_process_info.lock */ 70a46a2cd1SFelix Kuehling struct ttm_validate_buffer validate_list; 71a46a2cd1SFelix Kuehling struct ttm_validate_buffer resv_list; 72a46a2cd1SFelix Kuehling uint32_t domain; 73a46a2cd1SFelix Kuehling unsigned int mapped_to_gpu_memory; 74a46a2cd1SFelix Kuehling uint64_t va; 75a46a2cd1SFelix Kuehling 76d0ba51b1SFelix Kuehling uint32_t alloc_flags; 77a46a2cd1SFelix Kuehling 785ae0283eSFelix Kuehling atomic_t invalid; 79a46a2cd1SFelix Kuehling struct amdkfd_process_info *process_info; 80a46a2cd1SFelix Kuehling 81a46a2cd1SFelix Kuehling struct amdgpu_sync sync; 82a46a2cd1SFelix Kuehling 83a46a2cd1SFelix Kuehling bool aql_queue; 84d4566deeSMukul Joshi bool is_imported; 85130e0371SOded Gabbay }; 86130e0371SOded Gabbay 87d8d019ccSFelix Kuehling /* KFD Memory Eviction */ 88d8d019ccSFelix Kuehling struct amdgpu_amdkfd_fence { 89d8d019ccSFelix Kuehling struct dma_fence base; 90d8d019ccSFelix Kuehling struct mm_struct *mm; 91d8d019ccSFelix Kuehling spinlock_t lock; 92d8d019ccSFelix Kuehling char timeline_name[TASK_COMM_LEN]; 93eb2cec55SAlex Sierra struct svm_range_bo *svm_bo; 94d8d019ccSFelix Kuehling }; 95d8d019ccSFelix Kuehling 96611736d8SFelix Kuehling struct amdgpu_kfd_dev { 97611736d8SFelix Kuehling struct kfd_dev *dev; 98611736d8SFelix Kuehling uint64_t vram_used; 998e2712e7Sshaoyunl bool init_complete; 100611736d8SFelix Kuehling }; 101611736d8SFelix Kuehling 1020da8b10eSAmber Lin enum kgd_engine_type { 1030da8b10eSAmber Lin KGD_ENGINE_PFP = 1, 1040da8b10eSAmber Lin KGD_ENGINE_ME, 1050da8b10eSAmber Lin KGD_ENGINE_CE, 1060da8b10eSAmber Lin KGD_ENGINE_MEC1, 1070da8b10eSAmber Lin KGD_ENGINE_MEC2, 1080da8b10eSAmber Lin KGD_ENGINE_RLC, 1090da8b10eSAmber Lin KGD_ENGINE_SDMA1, 1100da8b10eSAmber Lin KGD_ENGINE_SDMA2, 1110da8b10eSAmber Lin KGD_ENGINE_MAX 1120da8b10eSAmber Lin }; 1130da8b10eSAmber Lin 114d8d019ccSFelix Kuehling 115a46a2cd1SFelix Kuehling struct amdkfd_process_info { 116a46a2cd1SFelix Kuehling /* List head of all VMs that belong to a KFD process */ 117a46a2cd1SFelix Kuehling struct list_head vm_list_head; 118a46a2cd1SFelix Kuehling /* List head for all KFD BOs that belong to a KFD process. */ 119a46a2cd1SFelix Kuehling struct list_head kfd_bo_list; 1205ae0283eSFelix Kuehling /* List of userptr BOs that are valid or invalid */ 1215ae0283eSFelix Kuehling struct list_head userptr_valid_list; 1225ae0283eSFelix Kuehling struct list_head userptr_inval_list; 123a46a2cd1SFelix Kuehling /* Lock to protect kfd_bo_list */ 124a46a2cd1SFelix Kuehling struct mutex lock; 125a46a2cd1SFelix Kuehling 126a46a2cd1SFelix Kuehling /* Number of VMs */ 127a46a2cd1SFelix Kuehling unsigned int n_vms; 128a46a2cd1SFelix Kuehling /* Eviction Fence */ 129a46a2cd1SFelix Kuehling struct amdgpu_amdkfd_fence *eviction_fence; 1305ae0283eSFelix Kuehling 1315ae0283eSFelix Kuehling /* MMU-notifier related fields */ 1325ae0283eSFelix Kuehling atomic_t evicted_bos; 1335ae0283eSFelix Kuehling struct delayed_work restore_userptr_work; 1345ae0283eSFelix Kuehling struct pid *pid; 135011bbb03SRajneesh Bhardwaj bool block_mmu_notifications; 136a46a2cd1SFelix Kuehling }; 137a46a2cd1SFelix Kuehling 138efb1c658SOded Gabbay int amdgpu_amdkfd_init(void); 139130e0371SOded Gabbay void amdgpu_amdkfd_fini(void); 140130e0371SOded Gabbay 1419593f4d6SRajneesh Bhardwaj void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); 14280660084SJames Zhu int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev); 1439593f4d6SRajneesh Bhardwaj int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); 144dc102c43SAndres Rodriguez void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, 145130e0371SOded Gabbay const void *ih_ring_entry); 146dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); 147dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); 148e9669fb7SAndrey Grodzovsky void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); 1496bfc7c7eSGraham Sider int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, 1506bfc7c7eSGraham Sider enum kgd_engine_type engine, 1514c660c8fSFelix Kuehling uint32_t vmid, uint64_t gpu_addr, 1524c660c8fSFelix Kuehling uint32_t *ib_cmd, uint32_t ib_len); 1536bfc7c7eSGraham Sider void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle); 1546bfc7c7eSGraham Sider bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); 1556bfc7c7eSGraham Sider int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, 1566bfc7c7eSGraham Sider uint16_t vmid); 1576bfc7c7eSGraham Sider int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 1586bfc7c7eSGraham Sider uint16_t pasid, enum TLB_FLUSH_TYPE flush_type); 1594c660c8fSFelix Kuehling 160155494dbSFelix Kuehling bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); 161155494dbSFelix Kuehling 1625c6dd71eSShaoyun Liu int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); 1635c6dd71eSShaoyun Liu 1645c6dd71eSShaoyun Liu int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); 1655c6dd71eSShaoyun Liu 1666bfc7c7eSGraham Sider void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev); 16724da5a9cSShaoyun Liu 168d09f85d5SYong Zhao int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, 169d09f85d5SYong Zhao int queue_bit); 170d09f85d5SYong Zhao 171cd63989eSLang Yu struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, 172eb2cec55SAlex Sierra struct mm_struct *mm, 173eb2cec55SAlex Sierra struct svm_range_bo *svm_bo); 174cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD) 175cd63989eSLang Yu bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); 176cd63989eSLang Yu struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); 177cd63989eSLang Yu int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); 178cd63989eSLang Yu int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); 179cd63989eSLang Yu #else 180cd63989eSLang Yu static inline 181cd63989eSLang Yu bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) 182cd63989eSLang Yu { 183cd63989eSLang Yu return false; 184cd63989eSLang Yu } 185cd63989eSLang Yu 186cd63989eSLang Yu static inline 187cd63989eSLang Yu struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) 188cd63989eSLang Yu { 189cd63989eSLang Yu return NULL; 190cd63989eSLang Yu } 191cd63989eSLang Yu 192cd63989eSLang Yu static inline 193cd63989eSLang Yu int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 194cd63989eSLang Yu { 195cd63989eSLang Yu return 0; 196cd63989eSLang Yu } 197cd63989eSLang Yu 198cd63989eSLang Yu static inline 199cd63989eSLang Yu int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) 200cd63989eSLang Yu { 201cd63989eSLang Yu return 0; 202cd63989eSLang Yu } 203cd63989eSLang Yu #endif 204130e0371SOded Gabbay /* Shared API */ 2056bfc7c7eSGraham Sider int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, 206130e0371SOded Gabbay void **mem_obj, uint64_t *gpu_addr, 20715426dbbSYong Zhao void **cpu_ptr, bool mqd_gfx9); 2086bfc7c7eSGraham Sider void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj); 2096bfc7c7eSGraham Sider int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, 2106bfc7c7eSGraham Sider void **mem_obj); 2116bfc7c7eSGraham Sider void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj); 21271efab6aSOak Zeng int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem); 21371efab6aSOak Zeng int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); 214574c4183SGraham Sider uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, 2150da8b10eSAmber Lin enum kgd_engine_type type); 216574c4183SGraham Sider void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, 21730f1c042SHarish Kasiviswanathan struct kfd_local_mem_info *mem_info); 218574c4183SGraham Sider uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); 219130e0371SOded Gabbay 220574c4183SGraham Sider uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); 221574c4183SGraham Sider void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, 222574c4183SGraham Sider struct kfd_cu_info *cu_info); 223574c4183SGraham Sider int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, 224574c4183SGraham Sider struct amdgpu_device **dmabuf_adev, 2251dde0ea9SFelix Kuehling uint64_t *bo_size, void *metadata_buffer, 2261dde0ea9SFelix Kuehling size_t buffer_size, uint32_t *metadata_size, 2271dde0ea9SFelix Kuehling uint32_t *flags); 228574c4183SGraham Sider uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, 229574c4183SGraham Sider struct amdgpu_device *src); 230574c4183SGraham Sider int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, 231574c4183SGraham Sider struct amdgpu_device *src, 232574c4183SGraham Sider bool is_min); 233574c4183SGraham Sider int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); 234130e0371SOded Gabbay 235cd05c865SFelix Kuehling /* Read user wptr from a specified user address space with page fault 236cd05c865SFelix Kuehling * disabled. The memory must be pinned and mapped to the hardware when 237cd05c865SFelix Kuehling * this is called in hqd_load functions, so it should never fault in 238cd05c865SFelix Kuehling * the first place. This resolves a circular lock dependency involving 239c1e8d7c6SMichel Lespinasse * four locks, including the DQM lock and mmap_lock. 240cd05c865SFelix Kuehling */ 24170539bd7SFelix Kuehling #define read_user_wptr(mmptr, wptr, dst) \ 24270539bd7SFelix Kuehling ({ \ 24370539bd7SFelix Kuehling bool valid = false; \ 24470539bd7SFelix Kuehling if ((mmptr) && (wptr)) { \ 245cd05c865SFelix Kuehling pagefault_disable(); \ 24670539bd7SFelix Kuehling if ((mmptr) == current->mm) { \ 24770539bd7SFelix Kuehling valid = !get_user((dst), (wptr)); \ 2488449d150SChristoph Hellwig } else if (current->flags & PF_KTHREAD) { \ 249f5678e7fSChristoph Hellwig kthread_use_mm(mmptr); \ 25070539bd7SFelix Kuehling valid = !get_user((dst), (wptr)); \ 251f5678e7fSChristoph Hellwig kthread_unuse_mm(mmptr); \ 25270539bd7SFelix Kuehling } \ 253cd05c865SFelix Kuehling pagefault_enable(); \ 25470539bd7SFelix Kuehling } \ 25570539bd7SFelix Kuehling valid; \ 25670539bd7SFelix Kuehling }) 25770539bd7SFelix Kuehling 258a46a2cd1SFelix Kuehling /* GPUVM API */ 259f80fe9d3SFelix Kuehling #define drm_priv_to_vm(drm_priv) \ 260f80fe9d3SFelix Kuehling (&((struct amdgpu_fpriv *) \ 261f80fe9d3SFelix Kuehling ((struct drm_file *)(drm_priv))->driver_priv)->vm) 262f80fe9d3SFelix Kuehling 263dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, 264c7b6bac9SFenghua Yu struct file *filp, u32 pasid, 265b40a6ab2SFelix Kuehling void **process_info, 266ede0dd86SFelix Kuehling struct dma_fence **ef); 267dff63da9SGraham Sider void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, 268dff63da9SGraham Sider void *drm_priv); 269b40a6ab2SFelix Kuehling uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); 270a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 271dff63da9SGraham Sider struct amdgpu_device *adev, uint64_t va, uint64_t size, 272b40a6ab2SFelix Kuehling void *drm_priv, struct kgd_mem **mem, 273011bbb03SRajneesh Bhardwaj uint64_t *offset, uint32_t flags, bool criu_resume); 274a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 275dff63da9SGraham Sider struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, 276d4ec4bdcSFelix Kuehling uint64_t *size); 2774d30a83cSChristian König int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev, 2784d30a83cSChristian König struct kgd_mem *mem, void *drm_priv); 279a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 280dff63da9SGraham Sider struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); 281a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_sync_memory( 282dff63da9SGraham Sider struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); 283*4e2d1044SFelix Kuehling int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, 284*4e2d1044SFelix Kuehling void **kptr, uint64_t *size); 285*4e2d1044SFelix Kuehling void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem); 28668df0f19SLang Yu 287a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, 288a46a2cd1SFelix Kuehling struct dma_fence **ef); 289dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, 290b97dfa27Sshaoyunl struct kfd_vm_fault_info *info); 291dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, 2921dde0ea9SFelix Kuehling struct dma_buf *dmabuf, 293b40a6ab2SFelix Kuehling uint64_t va, void *drm_priv, 2941dde0ea9SFelix Kuehling struct kgd_mem **mem, uint64_t *size, 2951dde0ea9SFelix Kuehling uint64_t *mmap_offset); 296dff63da9SGraham Sider int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, 297fd7d08baSYong Zhao struct tile_config *config); 298b6485bedSTao Zhou void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, 299b6485bedSTao Zhou bool reset); 3005ccbb057SRajneesh Bhardwaj bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); 301011bbb03SRajneesh Bhardwaj void amdgpu_amdkfd_block_mmu_notifications(void *p); 302011bbb03SRajneesh Bhardwaj int amdgpu_amdkfd_criu_resume(void *p); 3036475ae2bSTao Zhou bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev); 304011bbb03SRajneesh Bhardwaj 305cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD) 306cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_init_mem_limits(void); 307cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 308cd63989eSLang Yu struct amdgpu_vm *vm); 309f441dd33SRamesh Errabolu 310f441dd33SRamesh Errabolu /** 311f441dd33SRamesh Errabolu * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released 312f441dd33SRamesh Errabolu * 313f441dd33SRamesh Errabolu * Allows KFD to release its resources associated with the GEM object. 314f441dd33SRamesh Errabolu */ 3155702d052SFelix Kuehling void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo); 316c46ebb6aSPhilip Yang void amdgpu_amdkfd_reserve_system_mem(uint64_t size); 317cd63989eSLang Yu #else 318cd63989eSLang Yu static inline 319cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 320cd63989eSLang Yu { 321cd63989eSLang Yu } 322fd7d08baSYong Zhao 323cd63989eSLang Yu static inline 324cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 325cd63989eSLang Yu struct amdgpu_vm *vm) 326cd63989eSLang Yu { 327cd63989eSLang Yu } 328cd63989eSLang Yu 329cd63989eSLang Yu static inline 3305702d052SFelix Kuehling void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) 331cd63989eSLang Yu { 332cd63989eSLang Yu } 333cd63989eSLang Yu #endif 3342d3d25b6SAmber Lin /* KGD2KFD callbacks */ 335cd63989eSLang Yu int kgd2kfd_quiesce_mm(struct mm_struct *mm); 336cd63989eSLang Yu int kgd2kfd_resume_mm(struct mm_struct *mm); 337cd63989eSLang Yu int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 338cd63989eSLang Yu struct dma_fence *fence); 339cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD) 340308176d6SAmber Lin int kgd2kfd_init(void); 3412d3d25b6SAmber Lin void kgd2kfd_exit(void); 342b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); 3432d3d25b6SAmber Lin bool kgd2kfd_device_init(struct kfd_dev *kfd, 3443a0c3423SHarish Kasiviswanathan struct drm_device *ddev, 3452d3d25b6SAmber Lin const struct kgd2kfd_shared_resources *gpu_resources); 3462d3d25b6SAmber Lin void kgd2kfd_device_exit(struct kfd_dev *kfd); 3479593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); 348fefc01f0SJames Zhu int kgd2kfd_resume_iommu(struct kfd_dev *kfd); 3499593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); 3502d3d25b6SAmber Lin int kgd2kfd_pre_reset(struct kfd_dev *kfd); 3512d3d25b6SAmber Lin int kgd2kfd_post_reset(struct kfd_dev *kfd); 3522d3d25b6SAmber Lin void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); 3539b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); 354410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); 355cd63989eSLang Yu #else 356cd63989eSLang Yu static inline int kgd2kfd_init(void) 357cd63989eSLang Yu { 358cd63989eSLang Yu return -ENOENT; 359cd63989eSLang Yu } 3602d3d25b6SAmber Lin 361cd63989eSLang Yu static inline void kgd2kfd_exit(void) 362cd63989eSLang Yu { 363cd63989eSLang Yu } 364cd63989eSLang Yu 365cd63989eSLang Yu static inline 366b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) 367cd63989eSLang Yu { 368cd63989eSLang Yu return NULL; 369cd63989eSLang Yu } 370cd63989eSLang Yu 371cd63989eSLang Yu static inline 372cd63989eSLang Yu bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, 373cd63989eSLang Yu const struct kgd2kfd_shared_resources *gpu_resources) 374cd63989eSLang Yu { 375cd63989eSLang Yu return false; 376cd63989eSLang Yu } 377cd63989eSLang Yu 378cd63989eSLang Yu static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) 379cd63989eSLang Yu { 380cd63989eSLang Yu } 381cd63989eSLang Yu 382cd63989eSLang Yu static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 383cd63989eSLang Yu { 384cd63989eSLang Yu } 385cd63989eSLang Yu 386fefc01f0SJames Zhu static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd) 387fefc01f0SJames Zhu { 388fefc01f0SJames Zhu return 0; 389fefc01f0SJames Zhu } 390fefc01f0SJames Zhu 391cd63989eSLang Yu static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) 392cd63989eSLang Yu { 393cd63989eSLang Yu return 0; 394cd63989eSLang Yu } 395cd63989eSLang Yu 396cd63989eSLang Yu static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd) 397cd63989eSLang Yu { 398cd63989eSLang Yu return 0; 399cd63989eSLang Yu } 400cd63989eSLang Yu 401cd63989eSLang Yu static inline int kgd2kfd_post_reset(struct kfd_dev *kfd) 402cd63989eSLang Yu { 403cd63989eSLang Yu return 0; 404cd63989eSLang Yu } 405cd63989eSLang Yu 406cd63989eSLang Yu static inline 407cd63989eSLang Yu void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 408cd63989eSLang Yu { 409cd63989eSLang Yu } 410cd63989eSLang Yu 411cd63989eSLang Yu static inline 412cd63989eSLang Yu void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) 413cd63989eSLang Yu { 414cd63989eSLang Yu } 415cd63989eSLang Yu 416cd63989eSLang Yu static inline 417410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) 418cd63989eSLang Yu { 419cd63989eSLang Yu } 420cd63989eSLang Yu #endif 421130e0371SOded Gabbay #endif /* AMDGPU_AMDKFD_H_INCLUDED */ 422