1130e0371SOded Gabbay /* 2130e0371SOded Gabbay * Copyright 2014 Advanced Micro Devices, Inc. 3130e0371SOded Gabbay * 4130e0371SOded Gabbay * Permission is hereby granted, free of charge, to any person obtaining a 5130e0371SOded Gabbay * copy of this software and associated documentation files (the "Software"), 6130e0371SOded Gabbay * to deal in the Software without restriction, including without limitation 7130e0371SOded Gabbay * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8130e0371SOded Gabbay * and/or sell copies of the Software, and to permit persons to whom the 9130e0371SOded Gabbay * Software is furnished to do so, subject to the following conditions: 10130e0371SOded Gabbay * 11130e0371SOded Gabbay * The above copyright notice and this permission notice shall be included in 12130e0371SOded Gabbay * all copies or substantial portions of the Software. 13130e0371SOded Gabbay * 14130e0371SOded Gabbay * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15130e0371SOded Gabbay * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16130e0371SOded Gabbay * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17130e0371SOded Gabbay * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18130e0371SOded Gabbay * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19130e0371SOded Gabbay * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20130e0371SOded Gabbay * OTHER DEALINGS IN THE SOFTWARE. 21130e0371SOded Gabbay */ 22130e0371SOded Gabbay 23130e0371SOded Gabbay /* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */ 24130e0371SOded Gabbay 25130e0371SOded Gabbay #ifndef AMDGPU_AMDKFD_H_INCLUDED 26130e0371SOded Gabbay #define AMDGPU_AMDKFD_H_INCLUDED 27130e0371SOded Gabbay 28130e0371SOded Gabbay #include <linux/types.h> 297420f482SOded Gabbay #include <linux/mm.h> 309bf5b9ebSChristoph Hellwig #include <linux/kthread.h> 315ae0283eSFelix Kuehling #include <linux/workqueue.h> 32130e0371SOded Gabbay #include <kgd_kfd_interface.h> 33a46a2cd1SFelix Kuehling #include <drm/ttm/ttm_execbuf_util.h> 34a46a2cd1SFelix Kuehling #include "amdgpu_sync.h" 35a46a2cd1SFelix Kuehling #include "amdgpu_vm.h" 36130e0371SOded Gabbay 37611736d8SFelix Kuehling extern uint64_t amdgpu_amdkfd_total_mem_size; 38d8d019ccSFelix Kuehling 39765385ecSPhilip Yang enum TLB_FLUSH_TYPE { 40765385ecSPhilip Yang TLB_FLUSH_LEGACY = 0, 41765385ecSPhilip Yang TLB_FLUSH_LIGHTWEIGHT, 42765385ecSPhilip Yang TLB_FLUSH_HEAVYWEIGHT 43765385ecSPhilip Yang }; 44765385ecSPhilip Yang 45130e0371SOded Gabbay struct amdgpu_device; 46130e0371SOded Gabbay 47264fb4d3SFelix Kuehling enum kfd_mem_attachment_type { 48264fb4d3SFelix Kuehling KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */ 49264fb4d3SFelix Kuehling KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */ 505ac3c3e4SFelix Kuehling KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */ 51264fb4d3SFelix Kuehling }; 52264fb4d3SFelix Kuehling 53c780b2eeSFelix Kuehling struct kfd_mem_attachment { 54c780b2eeSFelix Kuehling struct list_head list; 55264fb4d3SFelix Kuehling enum kfd_mem_attachment_type type; 56a46a2cd1SFelix Kuehling bool is_mapped; 57a46a2cd1SFelix Kuehling struct amdgpu_bo_va *bo_va; 58c780b2eeSFelix Kuehling struct amdgpu_device *adev; 59a46a2cd1SFelix Kuehling uint64_t va; 60a46a2cd1SFelix Kuehling uint64_t pte_flags; 61a46a2cd1SFelix Kuehling }; 62a46a2cd1SFelix Kuehling 63130e0371SOded Gabbay struct kgd_mem { 64a46a2cd1SFelix Kuehling struct mutex lock; 65130e0371SOded Gabbay struct amdgpu_bo *bo; 665ac3c3e4SFelix Kuehling struct dma_buf *dmabuf; 67c780b2eeSFelix Kuehling struct list_head attachments; 68a46a2cd1SFelix Kuehling /* protected by amdkfd_process_info.lock */ 69a46a2cd1SFelix Kuehling struct ttm_validate_buffer validate_list; 70a46a2cd1SFelix Kuehling struct ttm_validate_buffer resv_list; 71a46a2cd1SFelix Kuehling uint32_t domain; 72a46a2cd1SFelix Kuehling unsigned int mapped_to_gpu_memory; 73a46a2cd1SFelix Kuehling uint64_t va; 74a46a2cd1SFelix Kuehling 75d0ba51b1SFelix Kuehling uint32_t alloc_flags; 76a46a2cd1SFelix Kuehling 775ae0283eSFelix Kuehling atomic_t invalid; 78a46a2cd1SFelix Kuehling struct amdkfd_process_info *process_info; 79a46a2cd1SFelix Kuehling 80a46a2cd1SFelix Kuehling struct amdgpu_sync sync; 81a46a2cd1SFelix Kuehling 82a46a2cd1SFelix Kuehling bool aql_queue; 83d4566deeSMukul Joshi bool is_imported; 84130e0371SOded Gabbay }; 85130e0371SOded Gabbay 86d8d019ccSFelix Kuehling /* KFD Memory Eviction */ 87d8d019ccSFelix Kuehling struct amdgpu_amdkfd_fence { 88d8d019ccSFelix Kuehling struct dma_fence base; 89d8d019ccSFelix Kuehling struct mm_struct *mm; 90d8d019ccSFelix Kuehling spinlock_t lock; 91d8d019ccSFelix Kuehling char timeline_name[TASK_COMM_LEN]; 92eb2cec55SAlex Sierra struct svm_range_bo *svm_bo; 93d8d019ccSFelix Kuehling }; 94d8d019ccSFelix Kuehling 95611736d8SFelix Kuehling struct amdgpu_kfd_dev { 96611736d8SFelix Kuehling struct kfd_dev *dev; 97611736d8SFelix Kuehling uint64_t vram_used; 988e2712e7Sshaoyunl bool init_complete; 99611736d8SFelix Kuehling }; 100611736d8SFelix Kuehling 1010da8b10eSAmber Lin enum kgd_engine_type { 1020da8b10eSAmber Lin KGD_ENGINE_PFP = 1, 1030da8b10eSAmber Lin KGD_ENGINE_ME, 1040da8b10eSAmber Lin KGD_ENGINE_CE, 1050da8b10eSAmber Lin KGD_ENGINE_MEC1, 1060da8b10eSAmber Lin KGD_ENGINE_MEC2, 1070da8b10eSAmber Lin KGD_ENGINE_RLC, 1080da8b10eSAmber Lin KGD_ENGINE_SDMA1, 1090da8b10eSAmber Lin KGD_ENGINE_SDMA2, 1100da8b10eSAmber Lin KGD_ENGINE_MAX 1110da8b10eSAmber Lin }; 1120da8b10eSAmber Lin 113d8d019ccSFelix Kuehling 114a46a2cd1SFelix Kuehling struct amdkfd_process_info { 115a46a2cd1SFelix Kuehling /* List head of all VMs that belong to a KFD process */ 116a46a2cd1SFelix Kuehling struct list_head vm_list_head; 117a46a2cd1SFelix Kuehling /* List head for all KFD BOs that belong to a KFD process. */ 118a46a2cd1SFelix Kuehling struct list_head kfd_bo_list; 1195ae0283eSFelix Kuehling /* List of userptr BOs that are valid or invalid */ 1205ae0283eSFelix Kuehling struct list_head userptr_valid_list; 1215ae0283eSFelix Kuehling struct list_head userptr_inval_list; 122a46a2cd1SFelix Kuehling /* Lock to protect kfd_bo_list */ 123a46a2cd1SFelix Kuehling struct mutex lock; 124a46a2cd1SFelix Kuehling 125a46a2cd1SFelix Kuehling /* Number of VMs */ 126a46a2cd1SFelix Kuehling unsigned int n_vms; 127a46a2cd1SFelix Kuehling /* Eviction Fence */ 128a46a2cd1SFelix Kuehling struct amdgpu_amdkfd_fence *eviction_fence; 1295ae0283eSFelix Kuehling 1305ae0283eSFelix Kuehling /* MMU-notifier related fields */ 1315ae0283eSFelix Kuehling atomic_t evicted_bos; 1325ae0283eSFelix Kuehling struct delayed_work restore_userptr_work; 1335ae0283eSFelix Kuehling struct pid *pid; 134a46a2cd1SFelix Kuehling }; 135a46a2cd1SFelix Kuehling 136efb1c658SOded Gabbay int amdgpu_amdkfd_init(void); 137130e0371SOded Gabbay void amdgpu_amdkfd_fini(void); 138130e0371SOded Gabbay 1399593f4d6SRajneesh Bhardwaj void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); 1409593f4d6SRajneesh Bhardwaj int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); 141dc102c43SAndres Rodriguez void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, 142130e0371SOded Gabbay const void *ih_ring_entry); 143dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); 144dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); 145e9669fb7SAndrey Grodzovsky void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); 1464c660c8fSFelix Kuehling int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, 1474c660c8fSFelix Kuehling uint32_t vmid, uint64_t gpu_addr, 1484c660c8fSFelix Kuehling uint32_t *ib_cmd, uint32_t ib_len); 14901c097dbSFelix Kuehling void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); 150aabf3a95SJack Xiao bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); 151ffa02269SAlex Sierra int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid); 152765385ecSPhilip Yang int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, 153765385ecSPhilip Yang enum TLB_FLUSH_TYPE flush_type); 1544c660c8fSFelix Kuehling 155155494dbSFelix Kuehling bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); 156155494dbSFelix Kuehling 1575c6dd71eSShaoyun Liu int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); 1585c6dd71eSShaoyun Liu 1595c6dd71eSShaoyun Liu int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); 1605c6dd71eSShaoyun Liu 16124da5a9cSShaoyun Liu void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); 16224da5a9cSShaoyun Liu 163d09f85d5SYong Zhao int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, 164d09f85d5SYong Zhao int queue_bit); 165d09f85d5SYong Zhao 166cd63989eSLang Yu struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, 167eb2cec55SAlex Sierra struct mm_struct *mm, 168eb2cec55SAlex Sierra struct svm_range_bo *svm_bo); 169cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD) 170cd63989eSLang Yu bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); 171cd63989eSLang Yu struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); 172cd63989eSLang Yu int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); 173cd63989eSLang Yu int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); 174cd63989eSLang Yu #else 175cd63989eSLang Yu static inline 176cd63989eSLang Yu bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) 177cd63989eSLang Yu { 178cd63989eSLang Yu return false; 179cd63989eSLang Yu } 180cd63989eSLang Yu 181cd63989eSLang Yu static inline 182cd63989eSLang Yu struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) 183cd63989eSLang Yu { 184cd63989eSLang Yu return NULL; 185cd63989eSLang Yu } 186cd63989eSLang Yu 187cd63989eSLang Yu static inline 188cd63989eSLang Yu int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) 189cd63989eSLang Yu { 190cd63989eSLang Yu return 0; 191cd63989eSLang Yu } 192cd63989eSLang Yu 193cd63989eSLang Yu static inline 194cd63989eSLang Yu int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) 195cd63989eSLang Yu { 196cd63989eSLang Yu return 0; 197cd63989eSLang Yu } 198cd63989eSLang Yu #endif 199130e0371SOded Gabbay /* Shared API */ 2007cd52c91SAmber Lin int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 201130e0371SOded Gabbay void **mem_obj, uint64_t *gpu_addr, 20215426dbbSYong Zhao void **cpu_ptr, bool mqd_gfx9); 2037cd52c91SAmber Lin void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); 204ca66fb8fSOak Zeng int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj); 205ca66fb8fSOak Zeng void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj); 20671efab6aSOak Zeng int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem); 20771efab6aSOak Zeng int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); 2080da8b10eSAmber Lin uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, 2090da8b10eSAmber Lin enum kgd_engine_type type); 2107cd52c91SAmber Lin void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, 21130f1c042SHarish Kasiviswanathan struct kfd_local_mem_info *mem_info); 2127cd52c91SAmber Lin uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd); 213130e0371SOded Gabbay 2147cd52c91SAmber Lin uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd); 2157cd52c91SAmber Lin void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); 2161dde0ea9SFelix Kuehling int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, 2171dde0ea9SFelix Kuehling struct kgd_dev **dmabuf_kgd, 2181dde0ea9SFelix Kuehling uint64_t *bo_size, void *metadata_buffer, 2191dde0ea9SFelix Kuehling size_t buffer_size, uint32_t *metadata_size, 2201dde0ea9SFelix Kuehling uint32_t *flags); 2219f0a0b41SKent Russell uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); 222db8b62c0SShaoyun Liu uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); 2230c663695SDivya Shikre uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd); 224d8e408a8SOak Zeng uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); 22529e76462SOak Zeng uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); 226c6d1ec41SJoseph Greathouse uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd); 2279b498efaSAlex Deucher int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd); 228da361dd1Sshaoyunl uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); 2293f46c4e9SJonathan Kim int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min); 23093304810SJonathan Kim int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min); 231130e0371SOded Gabbay 232cd05c865SFelix Kuehling /* Read user wptr from a specified user address space with page fault 233cd05c865SFelix Kuehling * disabled. The memory must be pinned and mapped to the hardware when 234cd05c865SFelix Kuehling * this is called in hqd_load functions, so it should never fault in 235cd05c865SFelix Kuehling * the first place. This resolves a circular lock dependency involving 236c1e8d7c6SMichel Lespinasse * four locks, including the DQM lock and mmap_lock. 237cd05c865SFelix Kuehling */ 23870539bd7SFelix Kuehling #define read_user_wptr(mmptr, wptr, dst) \ 23970539bd7SFelix Kuehling ({ \ 24070539bd7SFelix Kuehling bool valid = false; \ 24170539bd7SFelix Kuehling if ((mmptr) && (wptr)) { \ 242cd05c865SFelix Kuehling pagefault_disable(); \ 24370539bd7SFelix Kuehling if ((mmptr) == current->mm) { \ 24470539bd7SFelix Kuehling valid = !get_user((dst), (wptr)); \ 2458449d150SChristoph Hellwig } else if (current->flags & PF_KTHREAD) { \ 246f5678e7fSChristoph Hellwig kthread_use_mm(mmptr); \ 24770539bd7SFelix Kuehling valid = !get_user((dst), (wptr)); \ 248f5678e7fSChristoph Hellwig kthread_unuse_mm(mmptr); \ 24970539bd7SFelix Kuehling } \ 250cd05c865SFelix Kuehling pagefault_enable(); \ 25170539bd7SFelix Kuehling } \ 25270539bd7SFelix Kuehling valid; \ 25370539bd7SFelix Kuehling }) 25470539bd7SFelix Kuehling 255a46a2cd1SFelix Kuehling /* GPUVM API */ 256f80fe9d3SFelix Kuehling #define drm_priv_to_vm(drm_priv) \ 257f80fe9d3SFelix Kuehling (&((struct amdgpu_fpriv *) \ 258f80fe9d3SFelix Kuehling ((struct drm_file *)(drm_priv))->driver_priv)->vm) 259f80fe9d3SFelix Kuehling 260ede0dd86SFelix Kuehling int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, 261c7b6bac9SFenghua Yu struct file *filp, u32 pasid, 262b40a6ab2SFelix Kuehling void **process_info, 263ede0dd86SFelix Kuehling struct dma_fence **ef); 264b40a6ab2SFelix Kuehling void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv); 265b40a6ab2SFelix Kuehling uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); 266a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( 267a46a2cd1SFelix Kuehling struct kgd_dev *kgd, uint64_t va, uint64_t size, 268b40a6ab2SFelix Kuehling void *drm_priv, struct kgd_mem **mem, 269a46a2cd1SFelix Kuehling uint64_t *offset, uint32_t flags); 270a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( 271d4ec4bdcSFelix Kuehling struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, 272d4ec4bdcSFelix Kuehling uint64_t *size); 273a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 2747ed9876cSEric Huang struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); 275a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 276b40a6ab2SFelix Kuehling struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); 277a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_sync_memory( 278a46a2cd1SFelix Kuehling struct kgd_dev *kgd, struct kgd_mem *mem, bool intr); 279a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, 280a46a2cd1SFelix Kuehling struct kgd_mem *mem, void **kptr, uint64_t *size); 281a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, 282a46a2cd1SFelix Kuehling struct dma_fence **ef); 283b97dfa27Sshaoyunl int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, 284b97dfa27Sshaoyunl struct kfd_vm_fault_info *info); 2851dde0ea9SFelix Kuehling int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, 2861dde0ea9SFelix Kuehling struct dma_buf *dmabuf, 287b40a6ab2SFelix Kuehling uint64_t va, void *drm_priv, 2881dde0ea9SFelix Kuehling struct kgd_mem **mem, uint64_t *size, 2891dde0ea9SFelix Kuehling uint64_t *mmap_offset); 290fd7d08baSYong Zhao int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, 291fd7d08baSYong Zhao struct tile_config *config); 292cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD) 293cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_init_mem_limits(void); 294cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 295cd63989eSLang Yu struct amdgpu_vm *vm); 296cd63989eSLang Yu void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); 297c46ebb6aSPhilip Yang void amdgpu_amdkfd_reserve_system_mem(uint64_t size); 298cd63989eSLang Yu #else 299cd63989eSLang Yu static inline 300cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_init_mem_limits(void) 301cd63989eSLang Yu { 302cd63989eSLang Yu } 303fd7d08baSYong Zhao 304cd63989eSLang Yu static inline 305cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, 306cd63989eSLang Yu struct amdgpu_vm *vm) 307cd63989eSLang Yu { 308cd63989eSLang Yu } 309cd63989eSLang Yu 310cd63989eSLang Yu static inline 311cd63989eSLang Yu void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) 312cd63989eSLang Yu { 313cd63989eSLang Yu } 314cd63989eSLang Yu #endif 3152d3d25b6SAmber Lin /* KGD2KFD callbacks */ 316cd63989eSLang Yu int kgd2kfd_quiesce_mm(struct mm_struct *mm); 317cd63989eSLang Yu int kgd2kfd_resume_mm(struct mm_struct *mm); 318cd63989eSLang Yu int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 319cd63989eSLang Yu struct dma_fence *fence); 320cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD) 321308176d6SAmber Lin int kgd2kfd_init(void); 3222d3d25b6SAmber Lin void kgd2kfd_exit(void); 3232d3d25b6SAmber Lin struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, 324050091abSYong Zhao unsigned int asic_type, bool vf); 3252d3d25b6SAmber Lin bool kgd2kfd_device_init(struct kfd_dev *kfd, 3263a0c3423SHarish Kasiviswanathan struct drm_device *ddev, 3272d3d25b6SAmber Lin const struct kgd2kfd_shared_resources *gpu_resources); 3282d3d25b6SAmber Lin void kgd2kfd_device_exit(struct kfd_dev *kfd); 3299593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); 3309593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); 3312d3d25b6SAmber Lin int kgd2kfd_pre_reset(struct kfd_dev *kfd); 3322d3d25b6SAmber Lin int kgd2kfd_post_reset(struct kfd_dev *kfd); 3332d3d25b6SAmber Lin void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); 3349b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); 335*410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); 336cd63989eSLang Yu #else 337cd63989eSLang Yu static inline int kgd2kfd_init(void) 338cd63989eSLang Yu { 339cd63989eSLang Yu return -ENOENT; 340cd63989eSLang Yu } 3412d3d25b6SAmber Lin 342cd63989eSLang Yu static inline void kgd2kfd_exit(void) 343cd63989eSLang Yu { 344cd63989eSLang Yu } 345cd63989eSLang Yu 346cd63989eSLang Yu static inline 347cd63989eSLang Yu struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, 348cd63989eSLang Yu unsigned int asic_type, bool vf) 349cd63989eSLang Yu { 350cd63989eSLang Yu return NULL; 351cd63989eSLang Yu } 352cd63989eSLang Yu 353cd63989eSLang Yu static inline 354cd63989eSLang Yu bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, 355cd63989eSLang Yu const struct kgd2kfd_shared_resources *gpu_resources) 356cd63989eSLang Yu { 357cd63989eSLang Yu return false; 358cd63989eSLang Yu } 359cd63989eSLang Yu 360cd63989eSLang Yu static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) 361cd63989eSLang Yu { 362cd63989eSLang Yu } 363cd63989eSLang Yu 364cd63989eSLang Yu static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 365cd63989eSLang Yu { 366cd63989eSLang Yu } 367cd63989eSLang Yu 368cd63989eSLang Yu static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) 369cd63989eSLang Yu { 370cd63989eSLang Yu return 0; 371cd63989eSLang Yu } 372cd63989eSLang Yu 373cd63989eSLang Yu static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd) 374cd63989eSLang Yu { 375cd63989eSLang Yu return 0; 376cd63989eSLang Yu } 377cd63989eSLang Yu 378cd63989eSLang Yu static inline int kgd2kfd_post_reset(struct kfd_dev *kfd) 379cd63989eSLang Yu { 380cd63989eSLang Yu return 0; 381cd63989eSLang Yu } 382cd63989eSLang Yu 383cd63989eSLang Yu static inline 384cd63989eSLang Yu void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 385cd63989eSLang Yu { 386cd63989eSLang Yu } 387cd63989eSLang Yu 388cd63989eSLang Yu static inline 389cd63989eSLang Yu void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) 390cd63989eSLang Yu { 391cd63989eSLang Yu } 392cd63989eSLang Yu 393cd63989eSLang Yu static inline 394*410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) 395cd63989eSLang Yu { 396cd63989eSLang Yu } 397cd63989eSLang Yu #endif 398130e0371SOded Gabbay #endif /* AMDGPU_AMDKFD_H_INCLUDED */ 399