xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h (revision db5d28c0bfe566908719bec8e25443aabecbb802)
1130e0371SOded Gabbay /*
2130e0371SOded Gabbay  * Copyright 2014 Advanced Micro Devices, Inc.
3130e0371SOded Gabbay  *
4130e0371SOded Gabbay  * Permission is hereby granted, free of charge, to any person obtaining a
5130e0371SOded Gabbay  * copy of this software and associated documentation files (the "Software"),
6130e0371SOded Gabbay  * to deal in the Software without restriction, including without limitation
7130e0371SOded Gabbay  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8130e0371SOded Gabbay  * and/or sell copies of the Software, and to permit persons to whom the
9130e0371SOded Gabbay  * Software is furnished to do so, subject to the following conditions:
10130e0371SOded Gabbay  *
11130e0371SOded Gabbay  * The above copyright notice and this permission notice shall be included in
12130e0371SOded Gabbay  * all copies or substantial portions of the Software.
13130e0371SOded Gabbay  *
14130e0371SOded Gabbay  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15130e0371SOded Gabbay  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16130e0371SOded Gabbay  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17130e0371SOded Gabbay  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18130e0371SOded Gabbay  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19130e0371SOded Gabbay  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20130e0371SOded Gabbay  * OTHER DEALINGS IN THE SOFTWARE.
21130e0371SOded Gabbay  */
22130e0371SOded Gabbay 
23130e0371SOded Gabbay /* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
24130e0371SOded Gabbay 
25130e0371SOded Gabbay #ifndef AMDGPU_AMDKFD_H_INCLUDED
26130e0371SOded Gabbay #define AMDGPU_AMDKFD_H_INCLUDED
27130e0371SOded Gabbay 
288abc1eb2SChristian König #include <linux/list.h>
29130e0371SOded Gabbay #include <linux/types.h>
307420f482SOded Gabbay #include <linux/mm.h>
319bf5b9ebSChristoph Hellwig #include <linux/kthread.h>
325ae0283eSFelix Kuehling #include <linux/workqueue.h>
33f95f51a4SFelix Kuehling #include <linux/mmu_notifier.h>
34610dab11SPhilip Yang #include <linux/memremap.h>
35130e0371SOded Gabbay #include <kgd_kfd_interface.h>
3618192001SFelix Kuehling #include <drm/drm_client.h>
37a46a2cd1SFelix Kuehling #include "amdgpu_sync.h"
38a46a2cd1SFelix Kuehling #include "amdgpu_vm.h"
391c77527aSMukul Joshi #include "amdgpu_xcp.h"
40130e0371SOded Gabbay 
41611736d8SFelix Kuehling extern uint64_t amdgpu_amdkfd_total_mem_size;
42d8d019ccSFelix Kuehling 
43765385ecSPhilip Yang enum TLB_FLUSH_TYPE {
44765385ecSPhilip Yang 	TLB_FLUSH_LEGACY = 0,
45765385ecSPhilip Yang 	TLB_FLUSH_LIGHTWEIGHT,
46765385ecSPhilip Yang 	TLB_FLUSH_HEAVYWEIGHT
47765385ecSPhilip Yang };
48765385ecSPhilip Yang 
49130e0371SOded Gabbay struct amdgpu_device;
50130e0371SOded Gabbay 
51264fb4d3SFelix Kuehling enum kfd_mem_attachment_type {
52264fb4d3SFelix Kuehling 	KFD_MEM_ATT_SHARED,	/* Share kgd_mem->bo or another attachment's */
53264fb4d3SFelix Kuehling 	KFD_MEM_ATT_USERPTR,	/* SG bo to DMA map pages from a userptr bo */
545ac3c3e4SFelix Kuehling 	KFD_MEM_ATT_DMABUF,	/* DMAbuf to DMA map TTM BOs */
5508a2fd23SRamesh Errabolu 	KFD_MEM_ATT_SG		/* Tag to DMA map SG BOs */
56264fb4d3SFelix Kuehling };
57264fb4d3SFelix Kuehling 
58c780b2eeSFelix Kuehling struct kfd_mem_attachment {
59c780b2eeSFelix Kuehling 	struct list_head list;
60264fb4d3SFelix Kuehling 	enum kfd_mem_attachment_type type;
61a46a2cd1SFelix Kuehling 	bool is_mapped;
62a46a2cd1SFelix Kuehling 	struct amdgpu_bo_va *bo_va;
63c780b2eeSFelix Kuehling 	struct amdgpu_device *adev;
64a46a2cd1SFelix Kuehling 	uint64_t va;
65a46a2cd1SFelix Kuehling 	uint64_t pte_flags;
66a46a2cd1SFelix Kuehling };
67a46a2cd1SFelix Kuehling 
68130e0371SOded Gabbay struct kgd_mem {
69a46a2cd1SFelix Kuehling 	struct mutex lock;
70130e0371SOded Gabbay 	struct amdgpu_bo *bo;
715ac3c3e4SFelix Kuehling 	struct dma_buf *dmabuf;
72f95f51a4SFelix Kuehling 	struct hmm_range *range;
73c780b2eeSFelix Kuehling 	struct list_head attachments;
74a46a2cd1SFelix Kuehling 	/* protected by amdkfd_process_info.lock */
758abc1eb2SChristian König 	struct list_head validate_list;
76a46a2cd1SFelix Kuehling 	uint32_t domain;
77a46a2cd1SFelix Kuehling 	unsigned int mapped_to_gpu_memory;
78a46a2cd1SFelix Kuehling 	uint64_t va;
79a46a2cd1SFelix Kuehling 
80d0ba51b1SFelix Kuehling 	uint32_t alloc_flags;
81a46a2cd1SFelix Kuehling 
82f95f51a4SFelix Kuehling 	uint32_t invalid;
83a46a2cd1SFelix Kuehling 	struct amdkfd_process_info *process_info;
84a46a2cd1SFelix Kuehling 
85a46a2cd1SFelix Kuehling 	struct amdgpu_sync sync;
86a46a2cd1SFelix Kuehling 
8718192001SFelix Kuehling 	uint32_t gem_handle;
88a46a2cd1SFelix Kuehling 	bool aql_queue;
89d4566deeSMukul Joshi 	bool is_imported;
90130e0371SOded Gabbay };
91130e0371SOded Gabbay 
92d8d019ccSFelix Kuehling /* KFD Memory Eviction */
93d8d019ccSFelix Kuehling struct amdgpu_amdkfd_fence {
94d8d019ccSFelix Kuehling 	struct dma_fence base;
95d8d019ccSFelix Kuehling 	struct mm_struct *mm;
96d8d019ccSFelix Kuehling 	spinlock_t lock;
97d8d019ccSFelix Kuehling 	char timeline_name[TASK_COMM_LEN];
98eb2cec55SAlex Sierra 	struct svm_range_bo *svm_bo;
99d8d019ccSFelix Kuehling };
100d8d019ccSFelix Kuehling 
101611736d8SFelix Kuehling struct amdgpu_kfd_dev {
102611736d8SFelix Kuehling 	struct kfd_dev *dev;
1031c77527aSMukul Joshi 	int64_t vram_used[MAX_XCP];
1041c77527aSMukul Joshi 	uint64_t vram_used_aligned[MAX_XCP];
1058e2712e7Sshaoyunl 	bool init_complete;
106b5fd0cf3SAndrey Grodzovsky 	struct work_struct reset_work;
107610dab11SPhilip Yang 
108610dab11SPhilip Yang 	/* HMM page migration MEMORY_DEVICE_PRIVATE mapping */
109610dab11SPhilip Yang 	struct dev_pagemap pgmap;
11018192001SFelix Kuehling 
11118192001SFelix Kuehling 	/* Client for KFD BO GEM handle allocations */
11218192001SFelix Kuehling 	struct drm_client_dev client;
113611736d8SFelix Kuehling };
114611736d8SFelix Kuehling 
1150da8b10eSAmber Lin enum kgd_engine_type {
1160da8b10eSAmber Lin 	KGD_ENGINE_PFP = 1,
1170da8b10eSAmber Lin 	KGD_ENGINE_ME,
1180da8b10eSAmber Lin 	KGD_ENGINE_CE,
1190da8b10eSAmber Lin 	KGD_ENGINE_MEC1,
1200da8b10eSAmber Lin 	KGD_ENGINE_MEC2,
1210da8b10eSAmber Lin 	KGD_ENGINE_RLC,
1220da8b10eSAmber Lin 	KGD_ENGINE_SDMA1,
1230da8b10eSAmber Lin 	KGD_ENGINE_SDMA2,
1240da8b10eSAmber Lin 	KGD_ENGINE_MAX
1250da8b10eSAmber Lin };
1260da8b10eSAmber Lin 
127d8d019ccSFelix Kuehling 
128a46a2cd1SFelix Kuehling struct amdkfd_process_info {
129a46a2cd1SFelix Kuehling 	/* List head of all VMs that belong to a KFD process */
130a46a2cd1SFelix Kuehling 	struct list_head vm_list_head;
131a46a2cd1SFelix Kuehling 	/* List head for all KFD BOs that belong to a KFD process. */
132a46a2cd1SFelix Kuehling 	struct list_head kfd_bo_list;
1335ae0283eSFelix Kuehling 	/* List of userptr BOs that are valid or invalid */
1345ae0283eSFelix Kuehling 	struct list_head userptr_valid_list;
1355ae0283eSFelix Kuehling 	struct list_head userptr_inval_list;
136a46a2cd1SFelix Kuehling 	/* Lock to protect kfd_bo_list */
137a46a2cd1SFelix Kuehling 	struct mutex lock;
138a46a2cd1SFelix Kuehling 
139a46a2cd1SFelix Kuehling 	/* Number of VMs */
140a46a2cd1SFelix Kuehling 	unsigned int n_vms;
141a46a2cd1SFelix Kuehling 	/* Eviction Fence */
142a46a2cd1SFelix Kuehling 	struct amdgpu_amdkfd_fence *eviction_fence;
1435ae0283eSFelix Kuehling 
1445ae0283eSFelix Kuehling 	/* MMU-notifier related fields */
145f95f51a4SFelix Kuehling 	struct mutex notifier_lock;
146f95f51a4SFelix Kuehling 	uint32_t evicted_bos;
1475ae0283eSFelix Kuehling 	struct delayed_work restore_userptr_work;
1485ae0283eSFelix Kuehling 	struct pid *pid;
149011bbb03SRajneesh Bhardwaj 	bool block_mmu_notifications;
150a46a2cd1SFelix Kuehling };
151a46a2cd1SFelix Kuehling 
152efb1c658SOded Gabbay int amdgpu_amdkfd_init(void);
153130e0371SOded Gabbay void amdgpu_amdkfd_fini(void);
154130e0371SOded Gabbay 
1559593f4d6SRajneesh Bhardwaj void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
1569593f4d6SRajneesh Bhardwaj int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
157dc102c43SAndres Rodriguez void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
158130e0371SOded Gabbay 			const void *ih_ring_entry);
159dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
160dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
161e9669fb7SAndrey Grodzovsky void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev);
1620c7315e7SMukul Joshi int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev);
1630c7315e7SMukul Joshi void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev);
1646bfc7c7eSGraham Sider int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
1656bfc7c7eSGraham Sider 				enum kgd_engine_type engine,
1664c660c8fSFelix Kuehling 				uint32_t vmid, uint64_t gpu_addr,
1674c660c8fSFelix Kuehling 				uint32_t *ib_cmd, uint32_t ib_len);
1686bfc7c7eSGraham Sider void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle);
1696bfc7c7eSGraham Sider bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev);
1704c660c8fSFelix Kuehling 
171155494dbSFelix Kuehling bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
172155494dbSFelix Kuehling 
1735c6dd71eSShaoyun Liu int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
1745c6dd71eSShaoyun Liu 
1755c6dd71eSShaoyun Liu int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
1765c6dd71eSShaoyun Liu 
1776bfc7c7eSGraham Sider void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev);
17824da5a9cSShaoyun Liu 
179d09f85d5SYong Zhao int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
180d09f85d5SYong Zhao 					int queue_bit);
181d09f85d5SYong Zhao 
182cd63989eSLang Yu struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
183eb2cec55SAlex Sierra 				struct mm_struct *mm,
184eb2cec55SAlex Sierra 				struct svm_range_bo *svm_bo);
185c0125b84SLe Ma 
186c0125b84SLe Ma int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev);
1873d2af401SAlex Sierra #if defined(CONFIG_DEBUG_FS)
1883d2af401SAlex Sierra int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data);
1893d2af401SAlex Sierra #endif
190cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD)
191cd63989eSLang Yu bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
192cd63989eSLang Yu struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
193cd63989eSLang Yu int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
194f95f51a4SFelix Kuehling int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
195f95f51a4SFelix Kuehling 				unsigned long cur_seq, struct kgd_mem *mem);
19650661eb1SFelix Kuehling int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
19750661eb1SFelix Kuehling 					uint32_t domain,
19850661eb1SFelix Kuehling 					struct dma_fence *fence);
199cd63989eSLang Yu #else
200cd63989eSLang Yu static inline
amdkfd_fence_check_mm(struct dma_fence * f,struct mm_struct * mm)201cd63989eSLang Yu bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
202cd63989eSLang Yu {
203cd63989eSLang Yu 	return false;
204cd63989eSLang Yu }
205cd63989eSLang Yu 
206cd63989eSLang Yu static inline
to_amdgpu_amdkfd_fence(struct dma_fence * f)207cd63989eSLang Yu struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
208cd63989eSLang Yu {
209cd63989eSLang Yu 	return NULL;
210cd63989eSLang Yu }
211cd63989eSLang Yu 
212cd63989eSLang Yu static inline
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo * bo)213cd63989eSLang Yu int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
214cd63989eSLang Yu {
215cd63989eSLang Yu 	return 0;
216cd63989eSLang Yu }
217cd63989eSLang Yu 
218cd63989eSLang Yu static inline
amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier * mni,unsigned long cur_seq,struct kgd_mem * mem)219f95f51a4SFelix Kuehling int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni,
220f95f51a4SFelix Kuehling 				unsigned long cur_seq, struct kgd_mem *mem)
221cd63989eSLang Yu {
222cd63989eSLang Yu 	return 0;
223cd63989eSLang Yu }
22450661eb1SFelix Kuehling static inline
amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo * bo,uint32_t domain,struct dma_fence * fence)22550661eb1SFelix Kuehling int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
22650661eb1SFelix Kuehling 					uint32_t domain,
22750661eb1SFelix Kuehling 					struct dma_fence *fence)
22850661eb1SFelix Kuehling {
22950661eb1SFelix Kuehling 	return 0;
23050661eb1SFelix Kuehling }
231cd63989eSLang Yu #endif
232130e0371SOded Gabbay /* Shared API */
2336bfc7c7eSGraham Sider int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size,
234130e0371SOded Gabbay 				void **mem_obj, uint64_t *gpu_addr,
23515426dbbSYong Zhao 				void **cpu_ptr, bool mqd_gfx9);
2366bfc7c7eSGraham Sider void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj);
2376bfc7c7eSGraham Sider int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size,
2386bfc7c7eSGraham Sider 				void **mem_obj);
2396bfc7c7eSGraham Sider void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj);
24071efab6aSOak Zeng int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
24171efab6aSOak Zeng int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
242574c4183SGraham Sider uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev,
2430da8b10eSAmber Lin 				      enum kgd_engine_type type);
244574c4183SGraham Sider void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev,
245315e29ecSMukul Joshi 				      struct kfd_local_mem_info *mem_info,
2469a3ce1a7SHawking Zhang 				      struct amdgpu_xcp *xcp);
247574c4183SGraham Sider uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev);
248130e0371SOded Gabbay 
249574c4183SGraham Sider uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev);
250574c4183SGraham Sider int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
251574c4183SGraham Sider 				  struct amdgpu_device **dmabuf_adev,
2521dde0ea9SFelix Kuehling 				  uint64_t *bo_size, void *metadata_buffer,
2531dde0ea9SFelix Kuehling 				  size_t buffer_size, uint32_t *metadata_size,
2542fa9ff25SPhilip Yang 				  uint32_t *flags, int8_t *xcp_id);
255574c4183SGraham Sider uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
256574c4183SGraham Sider 					  struct amdgpu_device *src);
257574c4183SGraham Sider int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
258574c4183SGraham Sider 					    struct amdgpu_device *src,
259574c4183SGraham Sider 					    bool is_min);
260574c4183SGraham Sider int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min);
26112fb1ad7SJonathan Kim int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev,
26212fb1ad7SJonathan Kim 					uint32_t *payload);
2639041b53aSMukul Joshi int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off,
2649041b53aSMukul Joshi 				u32 inst);
265130e0371SOded Gabbay 
266cd05c865SFelix Kuehling /* Read user wptr from a specified user address space with page fault
267cd05c865SFelix Kuehling  * disabled. The memory must be pinned and mapped to the hardware when
268cd05c865SFelix Kuehling  * this is called in hqd_load functions, so it should never fault in
269cd05c865SFelix Kuehling  * the first place. This resolves a circular lock dependency involving
270c1e8d7c6SMichel Lespinasse  * four locks, including the DQM lock and mmap_lock.
271cd05c865SFelix Kuehling  */
27270539bd7SFelix Kuehling #define read_user_wptr(mmptr, wptr, dst)				\
27370539bd7SFelix Kuehling 	({								\
27470539bd7SFelix Kuehling 		bool valid = false;					\
27570539bd7SFelix Kuehling 		if ((mmptr) && (wptr)) {				\
276cd05c865SFelix Kuehling 			pagefault_disable();				\
27770539bd7SFelix Kuehling 			if ((mmptr) == current->mm) {			\
27870539bd7SFelix Kuehling 				valid = !get_user((dst), (wptr));	\
2798449d150SChristoph Hellwig 			} else if (current->flags & PF_KTHREAD) {	\
280f5678e7fSChristoph Hellwig 				kthread_use_mm(mmptr);			\
28170539bd7SFelix Kuehling 				valid = !get_user((dst), (wptr));	\
282f5678e7fSChristoph Hellwig 				kthread_unuse_mm(mmptr);		\
28370539bd7SFelix Kuehling 			}						\
284cd05c865SFelix Kuehling 			pagefault_enable();				\
28570539bd7SFelix Kuehling 		}							\
28670539bd7SFelix Kuehling 		valid;							\
28770539bd7SFelix Kuehling 	})
28870539bd7SFelix Kuehling 
289a46a2cd1SFelix Kuehling /* GPUVM API */
290f80fe9d3SFelix Kuehling #define drm_priv_to_vm(drm_priv)					\
291f80fe9d3SFelix Kuehling 	(&((struct amdgpu_fpriv *)					\
292f80fe9d3SFelix Kuehling 		((struct drm_file *)(drm_priv))->driver_priv)->vm)
293f80fe9d3SFelix Kuehling 
29441d82649SPhilip Yang int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev,
29523b02b0eSPhilip Yang 				     struct amdgpu_vm *avm, u32 pasid);
296dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev,
29723b02b0eSPhilip Yang 					struct amdgpu_vm *avm,
298b40a6ab2SFelix Kuehling 					void **process_info,
299ede0dd86SFelix Kuehling 					struct dma_fence **ef);
300dff63da9SGraham Sider void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev,
301dff63da9SGraham Sider 					void *drm_priv);
302b40a6ab2SFelix Kuehling uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv);
3031c77527aSMukul Joshi size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
3041c77527aSMukul Joshi 					uint8_t xcp_id);
305a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
306dff63da9SGraham Sider 		struct amdgpu_device *adev, uint64_t va, uint64_t size,
307b40a6ab2SFelix Kuehling 		void *drm_priv, struct kgd_mem **mem,
308011bbb03SRajneesh Bhardwaj 		uint64_t *offset, uint32_t flags, bool criu_resume);
309a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
310dff63da9SGraham Sider 		struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv,
311d4ec4bdcSFelix Kuehling 		uint64_t *size);
3124d30a83cSChristian König int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev,
3134d30a83cSChristian König 					  struct kgd_mem *mem, void *drm_priv);
314a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
315dff63da9SGraham Sider 		struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv);
3160c93bd49SLang Yu int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv);
317a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_sync_memory(
318dff63da9SGraham Sider 		struct amdgpu_device *adev, struct kgd_mem *mem, bool intr);
3194e2d1044SFelix Kuehling int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem,
3204e2d1044SFelix Kuehling 					     void **kptr, uint64_t *size);
3214e2d1044SFelix Kuehling void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem);
32268df0f19SLang Yu 
3231761d9a6SEric Huang int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo);
324e77a541fSGraham Sider 
325a46a2cd1SFelix Kuehling int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
326c147ddc6SFelix Kuehling 					    struct dma_fence __rcu **ef);
327dff63da9SGraham Sider int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev,
328b97dfa27Sshaoyunl 					      struct kfd_vm_fault_info *info);
3290188006dSFelix Kuehling int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd,
330b40a6ab2SFelix Kuehling 					 uint64_t va, void *drm_priv,
3311dde0ea9SFelix Kuehling 					 struct kgd_mem **mem, uint64_t *size,
3321dde0ea9SFelix Kuehling 					 uint64_t *mmap_offset);
333fd234e75SFelix Kuehling int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem,
334fd234e75SFelix Kuehling 				      struct dma_buf **dmabuf);
335a70a93faSJonathan Kim void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev);
336dff63da9SGraham Sider int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev,
337fd7d08baSYong Zhao 				struct tile_config *config);
338b6485bedSTao Zhou void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev,
3392fc46e0bSTao Zhou 			enum amdgpu_ras_block block, uint32_t reset);
340*bfa579b3SYiPeng Chai 
341*bfa579b3SYiPeng Chai void amdgpu_amdkfd_ras_pasid_poison_consumption_handler(struct amdgpu_device *adev,
342*bfa579b3SYiPeng Chai 			enum amdgpu_ras_block block, uint16_t pasid,
343*bfa579b3SYiPeng Chai 			pasid_notify pasid_fn, void *data, uint32_t reset);
344*bfa579b3SYiPeng Chai 
345e1f6746fSLijo Lazar bool amdgpu_amdkfd_is_fed(struct amdgpu_device *adev);
3465ccbb057SRajneesh Bhardwaj bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem);
347011bbb03SRajneesh Bhardwaj void amdgpu_amdkfd_block_mmu_notifications(void *p);
348011bbb03SRajneesh Bhardwaj int amdgpu_amdkfd_criu_resume(void *p);
34971a8d61eSTao Zhou bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev,
350d8070c42STao Zhou 			int hub_inst, int hub_type);
351f9af3c16SAlex Sierra int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
3521c77527aSMukul Joshi 		uint64_t size, u32 alloc_flag, int8_t xcp_id);
353f9af3c16SAlex Sierra void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
3541c77527aSMukul Joshi 		uint64_t size, u32 alloc_flag, int8_t xcp_id);
355011bbb03SRajneesh Bhardwaj 
35645b3a914SAlex Deucher u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id);
35745b3a914SAlex Deucher 
3583ebfd221SPhilip Yang #define KFD_XCP_MEM_ID(adev, xcp_id) \
3593ebfd221SPhilip Yang 		((adev)->xcp_mgr && (xcp_id) >= 0 ?\
3603ebfd221SPhilip Yang 		(adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1)
3613ebfd221SPhilip Yang 
36245b3a914SAlex Deucher #define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id))
36345b3a914SAlex Deucher 
3644c6ce75fSPhilip Yang 
365cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD)
366cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
367cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
368cd63989eSLang Yu 				struct amdgpu_vm *vm);
369f441dd33SRamesh Errabolu 
370f441dd33SRamesh Errabolu /**
371f441dd33SRamesh Errabolu  * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released
372f441dd33SRamesh Errabolu  *
373f441dd33SRamesh Errabolu  * Allows KFD to release its resources associated with the GEM object.
374f441dd33SRamesh Errabolu  */
3755702d052SFelix Kuehling void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo);
376c46ebb6aSPhilip Yang void amdgpu_amdkfd_reserve_system_mem(uint64_t size);
377cd63989eSLang Yu #else
378cd63989eSLang Yu static inline
amdgpu_amdkfd_gpuvm_init_mem_limits(void)379cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
380cd63989eSLang Yu {
381cd63989eSLang Yu }
382fd7d08baSYong Zhao 
383cd63989eSLang Yu static inline
amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device * adev,struct amdgpu_vm * vm)384cd63989eSLang Yu void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
385cd63989eSLang Yu 					struct amdgpu_vm *vm)
386cd63989eSLang Yu {
387cd63989eSLang Yu }
388cd63989eSLang Yu 
389cd63989eSLang Yu static inline
amdgpu_amdkfd_release_notify(struct amdgpu_bo * bo)3905702d052SFelix Kuehling void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo)
391cd63989eSLang Yu {
392cd63989eSLang Yu }
393cd63989eSLang Yu #endif
39484b4dd3fSPhilip Yang 
39584b4dd3fSPhilip Yang #if IS_ENABLED(CONFIG_HSA_AMD_SVM)
39684b4dd3fSPhilip Yang int kgd2kfd_init_zone_device(struct amdgpu_device *adev);
39784b4dd3fSPhilip Yang #else
39884b4dd3fSPhilip Yang static inline
kgd2kfd_init_zone_device(struct amdgpu_device * adev)39984b4dd3fSPhilip Yang int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
40084b4dd3fSPhilip Yang {
40184b4dd3fSPhilip Yang 	return 0;
40284b4dd3fSPhilip Yang }
40384b4dd3fSPhilip Yang #endif
40484b4dd3fSPhilip Yang 
4052d3d25b6SAmber Lin /* KGD2KFD callbacks */
406c7f21978SPhilip Yang int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger);
407cd63989eSLang Yu int kgd2kfd_resume_mm(struct mm_struct *mm);
408cd63989eSLang Yu int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
409cd63989eSLang Yu 						struct dma_fence *fence);
410cd63989eSLang Yu #if IS_ENABLED(CONFIG_HSA_AMD)
411308176d6SAmber Lin int kgd2kfd_init(void);
4122d3d25b6SAmber Lin void kgd2kfd_exit(void);
413b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf);
4142d3d25b6SAmber Lin bool kgd2kfd_device_init(struct kfd_dev *kfd,
4152d3d25b6SAmber Lin 			 const struct kgd2kfd_shared_resources *gpu_resources);
4162d3d25b6SAmber Lin void kgd2kfd_device_exit(struct kfd_dev *kfd);
4179593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
4189593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
4192d3d25b6SAmber Lin int kgd2kfd_pre_reset(struct kfd_dev *kfd);
4202d3d25b6SAmber Lin int kgd2kfd_post_reset(struct kfd_dev *kfd);
4212d3d25b6SAmber Lin void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
4229b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
423410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
4240c7315e7SMukul Joshi int kgd2kfd_check_and_lock_kfd(void);
4250c7315e7SMukul Joshi void kgd2kfd_unlock_kfd(void);
426cd63989eSLang Yu #else
kgd2kfd_init(void)427cd63989eSLang Yu static inline int kgd2kfd_init(void)
428cd63989eSLang Yu {
429cd63989eSLang Yu 	return -ENOENT;
430cd63989eSLang Yu }
4312d3d25b6SAmber Lin 
kgd2kfd_exit(void)432cd63989eSLang Yu static inline void kgd2kfd_exit(void)
433cd63989eSLang Yu {
434cd63989eSLang Yu }
435cd63989eSLang Yu 
436cd63989eSLang Yu static inline
kgd2kfd_probe(struct amdgpu_device * adev,bool vf)437b5d1d755SGraham Sider struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
438cd63989eSLang Yu {
439cd63989eSLang Yu 	return NULL;
440cd63989eSLang Yu }
441cd63989eSLang Yu 
442cd63989eSLang Yu static inline
kgd2kfd_device_init(struct kfd_dev * kfd,const struct kgd2kfd_shared_resources * gpu_resources)443d69a3b76SMukul Joshi bool kgd2kfd_device_init(struct kfd_dev *kfd,
444cd63989eSLang Yu 				const struct kgd2kfd_shared_resources *gpu_resources)
445cd63989eSLang Yu {
446cd63989eSLang Yu 	return false;
447cd63989eSLang Yu }
448cd63989eSLang Yu 
kgd2kfd_device_exit(struct kfd_dev * kfd)449cd63989eSLang Yu static inline void kgd2kfd_device_exit(struct kfd_dev *kfd)
450cd63989eSLang Yu {
451cd63989eSLang Yu }
452cd63989eSLang Yu 
kgd2kfd_suspend(struct kfd_dev * kfd,bool run_pm)453cd63989eSLang Yu static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
454cd63989eSLang Yu {
455cd63989eSLang Yu }
456cd63989eSLang Yu 
kgd2kfd_resume(struct kfd_dev * kfd,bool run_pm)457cd63989eSLang Yu static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
458cd63989eSLang Yu {
459cd63989eSLang Yu 	return 0;
460cd63989eSLang Yu }
461cd63989eSLang Yu 
kgd2kfd_pre_reset(struct kfd_dev * kfd)462cd63989eSLang Yu static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd)
463cd63989eSLang Yu {
464cd63989eSLang Yu 	return 0;
465cd63989eSLang Yu }
466cd63989eSLang Yu 
kgd2kfd_post_reset(struct kfd_dev * kfd)467cd63989eSLang Yu static inline int kgd2kfd_post_reset(struct kfd_dev *kfd)
468cd63989eSLang Yu {
469cd63989eSLang Yu 	return 0;
470cd63989eSLang Yu }
471cd63989eSLang Yu 
472cd63989eSLang Yu static inline
kgd2kfd_interrupt(struct kfd_dev * kfd,const void * ih_ring_entry)473cd63989eSLang Yu void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
474cd63989eSLang Yu {
475cd63989eSLang Yu }
476cd63989eSLang Yu 
477cd63989eSLang Yu static inline
kgd2kfd_set_sram_ecc_flag(struct kfd_dev * kfd)478cd63989eSLang Yu void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
479cd63989eSLang Yu {
480cd63989eSLang Yu }
481cd63989eSLang Yu 
482cd63989eSLang Yu static inline
kgd2kfd_smi_event_throttle(struct kfd_dev * kfd,uint64_t throttle_bitmask)483410e302eSGraham Sider void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
484cd63989eSLang Yu {
485cd63989eSLang Yu }
4860c7315e7SMukul Joshi 
kgd2kfd_check_and_lock_kfd(void)4870c7315e7SMukul Joshi static inline int kgd2kfd_check_and_lock_kfd(void)
4880c7315e7SMukul Joshi {
4890c7315e7SMukul Joshi 	return 0;
4900c7315e7SMukul Joshi }
4910c7315e7SMukul Joshi 
kgd2kfd_unlock_kfd(void)4920c7315e7SMukul Joshi static inline void kgd2kfd_unlock_kfd(void)
4930c7315e7SMukul Joshi {
4940c7315e7SMukul Joshi }
495cd63989eSLang Yu #endif
496130e0371SOded Gabbay #endif /* AMDGPU_AMDKFD_H_INCLUDED */
497