xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h (revision 610dab118ff5013d46069c828b58d576e0907b66)
1130e0371SOded Gabbay /*
2130e0371SOded Gabbay  * Copyright 2014 Advanced Micro Devices, Inc.
3130e0371SOded Gabbay  *
4130e0371SOded Gabbay  * Permission is hereby granted, free of charge, to any person obtaining a
5130e0371SOded Gabbay  * copy of this software and associated documentation files (the "Software"),
6130e0371SOded Gabbay  * to deal in the Software without restriction, including without limitation
7130e0371SOded Gabbay  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8130e0371SOded Gabbay  * and/or sell copies of the Software, and to permit persons to whom the
9130e0371SOded Gabbay  * Software is furnished to do so, subject to the following conditions:
10130e0371SOded Gabbay  *
11130e0371SOded Gabbay  * The above copyright notice and this permission notice shall be included in
12130e0371SOded Gabbay  * all copies or substantial portions of the Software.
13130e0371SOded Gabbay  *
14130e0371SOded Gabbay  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15130e0371SOded Gabbay  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16130e0371SOded Gabbay  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17130e0371SOded Gabbay  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18130e0371SOded Gabbay  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19130e0371SOded Gabbay  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20130e0371SOded Gabbay  * OTHER DEALINGS IN THE SOFTWARE.
21130e0371SOded Gabbay  */
22130e0371SOded Gabbay 
23130e0371SOded Gabbay /* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
24130e0371SOded Gabbay 
25130e0371SOded Gabbay #ifndef AMDGPU_AMDKFD_H_INCLUDED
26130e0371SOded Gabbay #define AMDGPU_AMDKFD_H_INCLUDED
27130e0371SOded Gabbay 
28130e0371SOded Gabbay #include <linux/types.h>
297420f482SOded Gabbay #include <linux/mm.h>
309bf5b9ebSChristoph Hellwig #include <linux/kthread.h>
315ae0283eSFelix Kuehling #include <linux/workqueue.h>
32f95f51a4SFelix Kuehling #include <linux/mmu_notifier.h>
33*610dab11SPhilip Yang #include <linux/memremap.h>
34130e0371SOded Gabbay #include <kgd_kfd_interface.h>
35a46a2cd1SFelix Kuehling #include <drm/ttm/ttm_execbuf_util.h>
36a46a2cd1SFelix Kuehling #include "amdgpu_sync.h"
37a46a2cd1SFelix Kuehling #include "amdgpu_vm.h"
38130e0371SOded Gabbay 
39611736d8SFelix Kuehling extern uint64_t amdgpu_amdkfd_total_mem_size;
40d8d019ccSFelix Kuehling 
41765385ecSPhilip Yang enum TLB_FLUSH_TYPE {
42765385ecSPhilip Yang 	TLB_FLUSH_LEGACY = 0,
43765385ecSPhilip Yang 	TLB_FLUSH_LIGHTWEIGHT,
44765385ecSPhilip Yang 	TLB_FLUSH_HEAVYWEIGHT
45765385ecSPhilip Yang };
46765385ecSPhilip Yang 
47130e0371SOded Gabbay struct amdgpu_device;
48130e0371SOded Gabbay 
49264fb4d3SFelix Kuehling enum kfd_mem_attachment_type {
50264fb4d3SFelix Kuehling 	KFD_MEM_ATT_SHARED,	/* Share kgd_mem->bo or another attachment's */
51264fb4d3SFelix Kuehling 	KFD_MEM_ATT_USERPTR,	/* SG bo to DMA map pages from a userptr bo */
525ac3c3e4SFelix Kuehling 	KFD_MEM_ATT_DMABUF,	/* DMAbuf to DMA map TTM BOs */
5308a2fd23SRamesh Errabolu 	KFD_MEM_ATT_SG		/* Tag to DMA map SG BOs */
54264fb4d3SFelix Kuehling };
55264fb4d3SFelix Kuehling 
56c780b2eeSFelix Kuehling struct kfd_mem_attachment {
57c780b2eeSFelix Kuehling 	struct list_head list;
58264fb4d3SFelix Kuehling 	enum kfd_mem_attachment_type type;
59a46a2cd1SFelix Kuehling 	bool is_mapped;
60a46a2cd1SFelix Kuehling 	struct amdgpu_bo_va *bo_va;
61c780b2eeSFelix Kuehling 	struct amdgpu_device *adev;
62a46a2cd1SFelix Kuehling 	uint64_t va;
63a46a2cd1SFelix Kuehling 	uint64_t pte_flags;
64a46a2cd1SFelix Kuehling };
65a46a2cd1SFelix Kuehling 
66130e0371SOded Gabbay struct kgd_mem {
67a46a2cd1SFelix Kuehling 	struct mutex lock;
68130e0371SOded Gabbay 	struct amdgpu_bo *bo;
695ac3c3e4SFelix Kuehling 	struct dma_buf *dmabuf;
70f95f51a4SFelix Kuehling 	struct hmm_range *range;
71c780b2eeSFelix Kuehling 	struct list_head attachments;
72a46a2cd1SFelix Kuehling 	/* protected by amdkfd_process_info.lock */
73a46a2cd1SFelix Kuehling 	struct ttm_validate_buffer validate_list;
74a46a2cd1SFelix Kuehling 	struct ttm_validate_buffer resv_list;
75a46a2cd1SFelix Kuehling 	uint32_t domain;
76a46a2cd1SFelix Kuehling 	unsigned int mapped_to_gpu_memory;
77a46a2cd1SFelix Kuehling 	uint64_t va;
78a46a2cd1SFelix Kuehling 
79d0ba51b1SFelix Kuehling 	uint32_t alloc_flags;
80a46a2cd1SFelix Kuehling 
81f95f51a4SFelix Kuehling 	uint32_t invalid;
82a46a2cd1SFelix Kuehling 	struct amdkfd_process_info *process_info;
83a46a2cd1SFelix Kuehling 
84a46a2cd1SFelix Kuehling 	struct amdgpu_sync sync;
85a46a2cd1SFelix Kuehling 
86a46a2cd1SFelix Kuehling 	bool aql_queue;
87d4566deeSMukul Joshi 	bool is_imported;
88130e0371SOded Gabbay };
89130e0371SOded Gabbay 
90d8d019ccSFelix Kuehling /* KFD Memory Eviction */
91d8d019ccSFelix Kuehling struct amdgpu_amdkfd_fence {
92d8d019ccSFelix Kuehling 	struct dma_fence base;
93d8d019ccSFelix Kuehling 	struct mm_struct *mm;
94d8d019ccSFelix Kuehling 	spinlock_t lock;
95d8d019ccSFelix Kuehling 	char timeline_name[TASK_COMM_LEN];
96eb2cec55SAlex Sierra 	struct svm_range_bo *svm_bo;
97d8d019ccSFelix Kuehling };
98d8d019ccSFelix Kuehling 
99611736d8SFelix Kuehling struct amdgpu_kfd_dev {
100611736d8SFelix Kuehling 	struct kfd_dev *dev;
1010c2dece8SPhilip Yang 	int64_t vram_used;
1021ac354beSDaniel Phillips 	uint64_t vram_used_aligned;
1038e2712e7Sshaoyunl 	bool init_complete;
104b5fd0cf3SAndrey Grodzovsky 	struct work_struct reset_work;
105*610dab11SPhilip Yang 
106*610dab11SPhilip Yang 	/* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
107*610dab11SPhilip Yang 	struct dev_pagemap pgmap;
108611736d8SFelix Kuehling };
109611736d8SFelix Kuehling 
1100da8b10eSAmber Lin enum kgd_engine_type {
1110da8b10eSAmber Lin 	KGD_ENGINE_PFP = 1,
1120da8b10eSAmber Lin 	KGD_ENGINE_ME,
1130da8b10eSAmber Lin 	KGD_ENGINE_CE,
1140da8b10eSAmber Lin 	KGD_ENGINE_MEC1,
1150da8b10eSAmber Lin 	KGD_ENGINE_MEC2,
1160da8b10eSAmber Lin 	KGD_ENGINE_RLC,
1170da8b10eSAmber Lin 	KGD_ENGINE_SDMA1,
1180da8b10eSAmber Lin 	KGD_ENGINE_SDMA2,
1190da8b10eSAmber Lin 	KGD_ENGINE_MAX
1200da8b10eSAmber Lin };
1210da8b10eSAmber Lin 
122d8d019ccSFelix Kuehling 
123a46a2cd1SFelix Kuehling struct amdkfd_process_info {
124a46a2cd1SFelix Kuehling 	/* List head of all VMs that belong to a KFD process */
125a46a2cd1SFelix Kuehling 	struct list_head vm_list_head;
126a46a2cd1SFelix Kuehling 	/* List head for all KFD BOs that belong to a KFD process. */
127a46a2cd1SFelix Kuehling 	struct list_head kfd_bo_list;
1285ae0283eSFelix Kuehling 	/* List of userptr BOs that are valid or invalid */
1295ae0283eSFelix Kuehling 	struct list_head userptr_valid_list;
1305ae0283eSFelix Kuehling 	struct list_head userptr_inval_list;
131a46a2cd1SFelix Kuehling 	/* Lock to protect kfd_bo_list */
132a46a2cd1SFelix Kuehling 	struct mutex lock;
133a46a2cd1SFelix Kuehling 
134a46a2cd1SFelix Kuehling 	/* Number of VMs */
135a46a2cd1SFelix Kuehling 	unsigned int n_vms;
136a46a2cd1SFelix Kuehling 	/* Eviction Fence */
137a46a2cd1SFelix Kuehling 	struct amdgpu_amdkfd_fence *eviction_fence;
1385ae0283eSFelix Kuehling 
1395ae0283eSFelix Kuehling 	/* MMU-notifier related fields */
140f95f51a4SFelix Kuehling 	struct mutex notifier_lock;
141f95f51a4SFelix Kuehling 	uint32_t evicted_bos;
1425ae0283eSFelix Kuehling 	struct delayed_work restore_userptr_work;
1435ae0283eSFelix Kuehling 	struct pid *pid;
144011bbb03SRajneesh Bhardwaj 	bool block_mmu_notifications;
145a46a2cd1SFelix Kuehling };
146a46a2cd1SFelix Kuehling 
147efb1c658SOded Gabbay int amdgpu_amdkfd_init(void);
148130e0371SOded Gabbay void amdgpu_amdkfd_fini(void);
149130e0371SOded Gabbay 
1509593f4d6SRajneesh Bhardwaj void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
15180660084SJames Zhu int amdgpu_amdkfd_resume_iommu(struct amdgpu_device *adev);
1529593f4d6SRajneesh Bhardwaj int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
153dc102c43SAndres Rodriguez void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
154130e0371SOded Gabbay 			const void *ih_ring_entry);
155dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
156dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
157e9669fb7SAndrey Grodzovsky void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
1580c7315e7SMukul Joshi int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev);
1590c7315e7SMukul Joshi void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev);
1606bfc7c7eSGraham Sider int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
1616bfc7c7eSGraham Sider 				enum kgd_engine_type engine,
1624c660c8fSFelix Kuehling 				uint32_t vmid, uint64_t gpu_addr,
1634c660c8fSFelix Kuehling 				uint32_t *ib_cmd, uint32_t ib_len);
1646bfc7c7eSGraham Sider void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle);
1656bfc7c7eSGraham Sider bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
1666bfc7c7eSGraham Sider int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev,
1676bfc7c7eSGraham Sider 				uint16_t vmid);
1686bfc7c7eSGraham Sider int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
169f87f6864SMukul Joshi 				uint16_t pasid, enum TLB_FLUSH_TYPE flush_type,
170f87f6864SMukul Joshi 				uint32_t inst);
1714c660c8fSFelix Kuehling 
172155494dbSFelix Kuehling bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
173155494dbSFelix Kuehling 
1745c6dd71eSShaoyun Liu int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
1755c6dd71eSShaoyun Liu 
1765c6dd71eSShaoyun Liu int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
1775c6dd71eSShaoyun Liu 
1786bfc7c7eSGraham Sider void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev);
17924da5a9cSShaoyun Liu 
180d09f85d5SYong Zhao int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
181d09f85d5SYong Zhao 					int queue_bit);
182d09f85d5SYong Zhao 
183cd63989eSLang Yu struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
184eb2cec55SAlex Sierra 				struct mm_struct *mm,
185eb2cec55SAlex Sierra 				struct svm_range_bo *svm_bo);
1863d2af401SAlex Sierra #if defined(CONFIG_DEBUG_FS)
1873d2af401SAlex Sierra int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
1883d2af401SAlex Sierra #endif
189cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD)
190cd63989eSLang Yu bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
191cd63989eSLang Yu struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
192cd63989eSLang Yu int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
193f95f51a4SFelix Kuehling int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
194f95f51a4SFelix Kuehling 				unsigned long cur_seq, struct kgd_mem *mem);
195cd63989eSLang Yu #else
196cd63989eSLang Yu static inline
197cd63989eSLang Yu bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
198cd63989eSLang Yu {
199cd63989eSLang Yu 	return false;
200cd63989eSLang Yu }
201cd63989eSLang Yu 
202cd63989eSLang Yu static inline
203cd63989eSLang Yu struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
204cd63989eSLang Yu {
205cd63989eSLang Yu 	return NULL;
206cd63989eSLang Yu }
207cd63989eSLang Yu 
208cd63989eSLang Yu static inline
209cd63989eSLang Yu int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
210cd63989eSLang Yu {
211cd63989eSLang Yu 	return 0;
212cd63989eSLang Yu }
213cd63989eSLang Yu 
214cd63989eSLang Yu static inline
215f95f51a4SFelix Kuehling int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
216f95f51a4SFelix Kuehling 				unsigned long cur_seq, struct kgd_mem *mem)
217cd63989eSLang Yu {
218cd63989eSLang Yu 	return 0;
219cd63989eSLang Yu }
220cd63989eSLang Yu #endif
221130e0371SOded Gabbay /* Shared API */
2226bfc7c7eSGraham Sider int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
223130e0371SOded Gabbay 				void **mem_obj, uint64_t *gpu_addr,
22415426dbbSYong Zhao 				void **cpu_ptr, bool mqd_gfx9);
2256bfc7c7eSGraham Sider void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
2266bfc7c7eSGraham Sider int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
2276bfc7c7eSGraham Sider 				void **mem_obj);
2286bfc7c7eSGraham Sider void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
22971efab6aSOak Zeng int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
23071efab6aSOak Zeng int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
231574c4183SGraham Sider uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
2320da8b10eSAmber Lin 				      enum kgd_engine_type type);
233574c4183SGraham Sider void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
23430f1c042SHarish Kasiviswanathan 				      struct kfd_local_mem_info *mem_info);
235574c4183SGraham Sider uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev);
236130e0371SOded Gabbay 
237574c4183SGraham Sider uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev);
238574c4183SGraham Sider void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev,
239574c4183SGraham Sider 			       struct kfd_cu_info *cu_info);
240574c4183SGraham Sider int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
241574c4183SGraham Sider 				  struct amdgpu_device **dmabuf_adev,
2421dde0ea9SFelix Kuehling 				  uint64_t *bo_size, void *metadata_buffer,
2431dde0ea9SFelix Kuehling 				  size_t buffer_size, uint32_t *metadata_size,
2441dde0ea9SFelix Kuehling 				  uint32_t *flags);
245574c4183SGraham Sider uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
246574c4183SGraham Sider 					  struct amdgpu_device *src);
247574c4183SGraham Sider int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
248574c4183SGraham Sider 					    struct amdgpu_device *src,
249574c4183SGraham Sider 					    bool is_min);
250574c4183SGraham Sider int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
251130e0371SOded Gabbay 
252cd05c865SFelix Kuehling /* Read user wptr from a specified user address space with page fault
253cd05c865SFelix Kuehling  * disabled. The memory must be pinned and mapped to the hardware when
254cd05c865SFelix Kuehling  * this is called in hqd_load functions, so it should never fault in
255cd05c865SFelix Kuehling  * the first place. This resolves a circular lock dependency involving
256c1e8d7c6SMichel Lespinasse  * four locks, including the DQM lock and mmap_lock.
257cd05c865SFelix Kuehling  */
25870539bd7SFelix Kuehling #define read_user_wptr(mmptr, wptr, dst)				\
25970539bd7SFelix Kuehling 	({								\
26070539bd7SFelix Kuehling 		bool valid = false;					\
26170539bd7SFelix Kuehling 		if ((mmptr) && (wptr)) {				\
262cd05c865SFelix Kuehling 			pagefault_disable();				\
26370539bd7SFelix Kuehling 			if ((mmptr) == current->mm) {			\
26470539bd7SFelix Kuehling 				valid = !get_user((dst), (wptr));	\
2658449d150SChristoph Hellwig 			} else if (current->flags & PF_KTHREAD) {	\
266f5678e7fSChristoph Hellwig 				kthread_use_mm(mmptr);			\
26770539bd7SFelix Kuehling 				valid = !get_user((dst), (wptr));	\
268f5678e7fSChristoph Hellwig 				kthread_unuse_mm(mmptr);		\
26970539bd7SFelix Kuehling 			}						\
270cd05c865SFelix Kuehling 			pagefault_enable();				\
27170539bd7SFelix Kuehling 		}							\
27270539bd7SFelix Kuehling 		valid;							\
27370539bd7SFelix Kuehling 	})
27470539bd7SFelix Kuehling 
275a46a2cd1SFelix Kuehling /* GPUVM API */
276f80fe9d3SFelix Kuehling #define drm_priv_to_vm(drm_priv)					\
277f80fe9d3SFelix Kuehling 	(&((struct amdgpu_fpriv *)					\
278f80fe9d3SFelix Kuehling 		((struct drm_file *)(drm_priv))->driver_priv)->vm)
279f80fe9d3SFelix Kuehling 
28041d82649SPhilip Yang int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
28123b02b0eSPhilip Yang 				     struct amdgpu_vm *avm, u32 pasid);
282dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
28323b02b0eSPhilip Yang 					struct amdgpu_vm *avm,
284b40a6ab2SFelix Kuehling 					void **process_info,
285ede0dd86SFelix Kuehling 					struct dma_fence **ef);
286dff63da9SGraham Sider void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
287dff63da9SGraham Sider 					void *drm_priv);
288b40a6ab2SFelix Kuehling uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
2899731dd4cSDaniel Phillips size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev);
290a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
291dff63da9SGraham Sider 		struct amdgpu_device *adev, uint64_t va, uint64_t size,
292b40a6ab2SFelix Kuehling 		void *drm_priv, struct kgd_mem **mem,
293011bbb03SRajneesh Bhardwaj 		uint64_t *offset, uint32_t flags, bool criu_resume);
294a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
295dff63da9SGraham Sider 		struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
296d4ec4bdcSFelix Kuehling 		uint64_t *size);
2974d30a83cSChristian König int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
2984d30a83cSChristian König 					  struct kgd_mem *mem, void *drm_priv);
299a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
300dff63da9SGraham Sider 		struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
301a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_sync_memory(
302dff63da9SGraham Sider 		struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
3034e2d1044SFelix Kuehling int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
3044e2d1044SFelix Kuehling 					     void **kptr, uint64_t *size);
3054e2d1044SFelix Kuehling void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
30668df0f19SLang Yu 
307e77a541fSGraham Sider int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo);
308e77a541fSGraham Sider 
309a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
310a46a2cd1SFelix Kuehling 					    struct dma_fence **ef);
311dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
312b97dfa27Sshaoyunl 					      struct kfd_vm_fault_info *info);
313dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
3141dde0ea9SFelix Kuehling 				      struct dma_buf *dmabuf,
315b40a6ab2SFelix Kuehling 				      uint64_t va, void *drm_priv,
3161dde0ea9SFelix Kuehling 				      struct kgd_mem **mem, uint64_t *size,
3171dde0ea9SFelix Kuehling 				      uint64_t *mmap_offset);
318fd234e75SFelix Kuehling int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
319fd234e75SFelix Kuehling 				      struct dma_buf **dmabuf);
320dff63da9SGraham Sider int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
321fd7d08baSYong Zhao 				struct tile_config *config);
322b6485bedSTao Zhou void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
323b6485bedSTao Zhou 				bool reset);
3245ccbb057SRajneesh Bhardwaj bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
325011bbb03SRajneesh Bhardwaj void amdgpu_amdkfd_block_mmu_notifications(void *p);
326011bbb03SRajneesh Bhardwaj int amdgpu_amdkfd_criu_resume(void *p);
3276475ae2bSTao Zhou bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev);
328f9af3c16SAlex Sierra int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
329f9af3c16SAlex Sierra 		uint64_t size, u32 alloc_flag);
330f9af3c16SAlex Sierra void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
331f9af3c16SAlex Sierra 		uint64_t size, u32 alloc_flag);
332011bbb03SRajneesh Bhardwaj 
333cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD)
334cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
335cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
336cd63989eSLang Yu 				struct amdgpu_vm *vm);
337f441dd33SRamesh Errabolu 
338f441dd33SRamesh Errabolu /**
339f441dd33SRamesh Errabolu  * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released
340f441dd33SRamesh Errabolu  *
341f441dd33SRamesh Errabolu  * Allows KFD to release its resources associated with the GEM object.
342f441dd33SRamesh Errabolu  */
3435702d052SFelix Kuehling void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
344c46ebb6aSPhilip Yang void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
345cd63989eSLang Yu #else
346cd63989eSLang Yu static inline
347cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
348cd63989eSLang Yu {
349cd63989eSLang Yu }
350fd7d08baSYong Zhao 
351cd63989eSLang Yu static inline
352cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
353cd63989eSLang Yu 					struct amdgpu_vm *vm)
354cd63989eSLang Yu {
355cd63989eSLang Yu }
356cd63989eSLang Yu 
357cd63989eSLang Yu static inline
3585702d052SFelix Kuehling void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
359cd63989eSLang Yu {
360cd63989eSLang Yu }
361cd63989eSLang Yu #endif
3622d3d25b6SAmber Lin /* KGD2KFD callbacks */
363c7f21978SPhilip Yang int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger);
364cd63989eSLang Yu int kgd2kfd_resume_mm(struct mm_struct *mm);
365cd63989eSLang Yu int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
366cd63989eSLang Yu 						struct dma_fence *fence);
367cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD)
368308176d6SAmber Lin int kgd2kfd_init(void);
3692d3d25b6SAmber Lin void kgd2kfd_exit(void);
370b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
3712d3d25b6SAmber Lin bool kgd2kfd_device_init(struct kfd_dev *kfd,
3722d3d25b6SAmber Lin 			 const struct kgd2kfd_shared_resources *gpu_resources);
3732d3d25b6SAmber Lin void kgd2kfd_device_exit(struct kfd_dev *kfd);
3749593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
375fefc01f0SJames Zhu int kgd2kfd_resume_iommu(struct kfd_dev *kfd);
3769593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
3772d3d25b6SAmber Lin int kgd2kfd_pre_reset(struct kfd_dev *kfd);
3782d3d25b6SAmber Lin int kgd2kfd_post_reset(struct kfd_dev *kfd);
3792d3d25b6SAmber Lin void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
3809b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
381410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
3820c7315e7SMukul Joshi int kgd2kfd_check_and_lock_kfd(void);
3830c7315e7SMukul Joshi void kgd2kfd_unlock_kfd(void);
384cd63989eSLang Yu #else
385cd63989eSLang Yu static inline int kgd2kfd_init(void)
386cd63989eSLang Yu {
387cd63989eSLang Yu 	return -ENOENT;
388cd63989eSLang Yu }
3892d3d25b6SAmber Lin 
390cd63989eSLang Yu static inline void kgd2kfd_exit(void)
391cd63989eSLang Yu {
392cd63989eSLang Yu }
393cd63989eSLang Yu 
394cd63989eSLang Yu static inline
395b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
396cd63989eSLang Yu {
397cd63989eSLang Yu 	return NULL;
398cd63989eSLang Yu }
399cd63989eSLang Yu 
400cd63989eSLang Yu static inline
401d69a3b76SMukul Joshi bool kgd2kfd_device_init(struct kfd_dev *kfd,
402cd63989eSLang Yu 				const struct kgd2kfd_shared_resources *gpu_resources)
403cd63989eSLang Yu {
404cd63989eSLang Yu 	return false;
405cd63989eSLang Yu }
406cd63989eSLang Yu 
407cd63989eSLang Yu static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
408cd63989eSLang Yu {
409cd63989eSLang Yu }
410cd63989eSLang Yu 
411cd63989eSLang Yu static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
412cd63989eSLang Yu {
413cd63989eSLang Yu }
414cd63989eSLang Yu 
415fefc01f0SJames Zhu static int __maybe_unused kgd2kfd_resume_iommu(struct kfd_dev *kfd)
416fefc01f0SJames Zhu {
417fefc01f0SJames Zhu 	return 0;
418fefc01f0SJames Zhu }
419fefc01f0SJames Zhu 
420cd63989eSLang Yu static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
421cd63989eSLang Yu {
422cd63989eSLang Yu 	return 0;
423cd63989eSLang Yu }
424cd63989eSLang Yu 
425cd63989eSLang Yu static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd)
426cd63989eSLang Yu {
427cd63989eSLang Yu 	return 0;
428cd63989eSLang Yu }
429cd63989eSLang Yu 
430cd63989eSLang Yu static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
431cd63989eSLang Yu {
432cd63989eSLang Yu 	return 0;
433cd63989eSLang Yu }
434cd63989eSLang Yu 
435cd63989eSLang Yu static inline
436cd63989eSLang Yu void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
437cd63989eSLang Yu {
438cd63989eSLang Yu }
439cd63989eSLang Yu 
440cd63989eSLang Yu static inline
441cd63989eSLang Yu void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
442cd63989eSLang Yu {
443cd63989eSLang Yu }
444cd63989eSLang Yu 
445cd63989eSLang Yu static inline
446410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
447cd63989eSLang Yu {
448cd63989eSLang Yu }
4490c7315e7SMukul Joshi 
4500c7315e7SMukul Joshi static inline int kgd2kfd_check_and_lock_kfd(void)
4510c7315e7SMukul Joshi {
4520c7315e7SMukul Joshi 	return 0;
4530c7315e7SMukul Joshi }
4540c7315e7SMukul Joshi 
4550c7315e7SMukul Joshi static inline void kgd2kfd_unlock_kfd(void)
4560c7315e7SMukul Joshi {
4570c7315e7SMukul Joshi }
458cd63989eSLang Yu #endif
459130e0371SOded Gabbay #endif /* AMDGPU_AMDKFD_H_INCLUDED */
460