xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c (revision 4a580877bdcb837e7a3754ae20798dcfccb44e80)
1130e0371SOded Gabbay /*
2130e0371SOded Gabbay  * Copyright 2014 Advanced Micro Devices, Inc.
3130e0371SOded Gabbay  *
4130e0371SOded Gabbay  * Permission is hereby granted, free of charge, to any person obtaining a
5130e0371SOded Gabbay  * copy of this software and associated documentation files (the "Software"),
6130e0371SOded Gabbay  * to deal in the Software without restriction, including without limitation
7130e0371SOded Gabbay  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8130e0371SOded Gabbay  * and/or sell copies of the Software, and to permit persons to whom the
9130e0371SOded Gabbay  * Software is furnished to do so, subject to the following conditions:
10130e0371SOded Gabbay  *
11130e0371SOded Gabbay  * The above copyright notice and this permission notice shall be included in
12130e0371SOded Gabbay  * all copies or substantial portions of the Software.
13130e0371SOded Gabbay  *
14130e0371SOded Gabbay  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15130e0371SOded Gabbay  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16130e0371SOded Gabbay  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17130e0371SOded Gabbay  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18130e0371SOded Gabbay  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19130e0371SOded Gabbay  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20130e0371SOded Gabbay  * OTHER DEALINGS IN THE SOFTWARE.
21130e0371SOded Gabbay  */
22130e0371SOded Gabbay 
23130e0371SOded Gabbay #include "amdgpu_amdkfd.h"
242f7d10b3SJammy Zhou #include "amd_shared.h"
25fdf2f6c5SSam Ravnborg 
26130e0371SOded Gabbay #include "amdgpu.h"
272db0cdbeSAlex Deucher #include "amdgpu_gfx.h"
282fbd6f94SChristian König #include "amdgpu_dma_buf.h"
29130e0371SOded Gabbay #include <linux/module.h>
301dde0ea9SFelix Kuehling #include <linux/dma-buf.h>
31da361dd1Sshaoyunl #include "amdgpu_xgmi.h"
321d251d90SYong Zhao #include <uapi/linux/kfd_ioctl.h>
33130e0371SOded Gabbay 
34611736d8SFelix Kuehling /* Total memory size in system memory and all GPU VRAM. Used to
35611736d8SFelix Kuehling  * estimate worst case amount of memory to reserve for page tables
36611736d8SFelix Kuehling  */
37611736d8SFelix Kuehling uint64_t amdgpu_amdkfd_total_mem_size;
38611736d8SFelix Kuehling 
39efb1c658SOded Gabbay int amdgpu_amdkfd_init(void)
40130e0371SOded Gabbay {
41611736d8SFelix Kuehling 	struct sysinfo si;
42efb1c658SOded Gabbay 	int ret;
43efb1c658SOded Gabbay 
44611736d8SFelix Kuehling 	si_meminfo(&si);
45611736d8SFelix Kuehling 	amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
46611736d8SFelix Kuehling 	amdgpu_amdkfd_total_mem_size *= si.mem_unit;
47611736d8SFelix Kuehling 
4882b7b619SAmber Lin #ifdef CONFIG_HSA_AMD
49308176d6SAmber Lin 	ret = kgd2kfd_init();
5082b7b619SAmber Lin 	amdgpu_amdkfd_gpuvm_init_mem_limits();
51efb1c658SOded Gabbay #else
52efb1c658SOded Gabbay 	ret = -ENOENT;
53130e0371SOded Gabbay #endif
54fcdfa432SOded Gabbay 
55efb1c658SOded Gabbay 	return ret;
56130e0371SOded Gabbay }
57130e0371SOded Gabbay 
58130e0371SOded Gabbay void amdgpu_amdkfd_fini(void)
59130e0371SOded Gabbay {
608e07e267SAmber Lin 	kgd2kfd_exit();
61130e0371SOded Gabbay }
62130e0371SOded Gabbay 
63dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
64130e0371SOded Gabbay {
65050091abSYong Zhao 	bool vf = amdgpu_sriov_vf(adev);
665c33f214SFelix Kuehling 
678e07e267SAmber Lin 	adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
68e392c887SYong Zhao 				      adev->pdev, adev->asic_type, vf);
69611736d8SFelix Kuehling 
70611736d8SFelix Kuehling 	if (adev->kfd.dev)
71611736d8SFelix Kuehling 		amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
72130e0371SOded Gabbay }
73130e0371SOded Gabbay 
7422cb0164SAlex Deucher /**
7522cb0164SAlex Deucher  * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
7622cb0164SAlex Deucher  *                                setup amdkfd
7722cb0164SAlex Deucher  *
7822cb0164SAlex Deucher  * @adev: amdgpu_device pointer
7922cb0164SAlex Deucher  * @aperture_base: output returning doorbell aperture base physical address
8022cb0164SAlex Deucher  * @aperture_size: output returning doorbell aperture size in bytes
8122cb0164SAlex Deucher  * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
8222cb0164SAlex Deucher  *
8322cb0164SAlex Deucher  * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
8422cb0164SAlex Deucher  * takes doorbells required for its own rings and reports the setup to amdkfd.
8522cb0164SAlex Deucher  * amdgpu reserved doorbells are at the start of the doorbell aperture.
8622cb0164SAlex Deucher  */
8722cb0164SAlex Deucher static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
8822cb0164SAlex Deucher 					 phys_addr_t *aperture_base,
8922cb0164SAlex Deucher 					 size_t *aperture_size,
9022cb0164SAlex Deucher 					 size_t *start_offset)
9122cb0164SAlex Deucher {
9222cb0164SAlex Deucher 	/*
9322cb0164SAlex Deucher 	 * The first num_doorbells are used by amdgpu.
9422cb0164SAlex Deucher 	 * amdkfd takes whatever's left in the aperture.
9522cb0164SAlex Deucher 	 */
9622cb0164SAlex Deucher 	if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
9722cb0164SAlex Deucher 		*aperture_base = adev->doorbell.base;
9822cb0164SAlex Deucher 		*aperture_size = adev->doorbell.size;
9922cb0164SAlex Deucher 		*start_offset = adev->doorbell.num_doorbells * sizeof(u32);
10022cb0164SAlex Deucher 	} else {
10122cb0164SAlex Deucher 		*aperture_base = 0;
10222cb0164SAlex Deucher 		*aperture_size = 0;
10322cb0164SAlex Deucher 		*start_offset = 0;
10422cb0164SAlex Deucher 	}
10522cb0164SAlex Deucher }
10622cb0164SAlex Deucher 
107dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
108130e0371SOded Gabbay {
109234441ddSYong Zhao 	int i;
110d0b63bb3SAndres Rodriguez 	int last_valid_bit;
111611736d8SFelix Kuehling 
112611736d8SFelix Kuehling 	if (adev->kfd.dev) {
113130e0371SOded Gabbay 		struct kgd2kfd_shared_resources gpu_resources = {
11440111ec2SFelix Kuehling 			.compute_vmid_bitmap =
11540111ec2SFelix Kuehling 				((1 << AMDGPU_NUM_VMID) - 1) -
11640111ec2SFelix Kuehling 				((1 << adev->vm_manager.first_kfd_vmid) - 1),
117d0b63bb3SAndres Rodriguez 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
118155494dbSFelix Kuehling 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
119155494dbSFelix Kuehling 			.gpuvm_size = min(adev->vm_manager.max_pfn
120155494dbSFelix Kuehling 					  << AMDGPU_GPU_PAGE_SHIFT,
121ad9a5b78SChristian König 					  AMDGPU_GMC_HOLE_START),
122*4a580877SLuben Tuikov 			.drm_render_minor = adev_to_drm(adev)->render->index,
123234441ddSYong Zhao 			.sdma_doorbell_idx = adev->doorbell_index.sdma_engine,
124234441ddSYong Zhao 
125130e0371SOded Gabbay 		};
126130e0371SOded Gabbay 
127d0b63bb3SAndres Rodriguez 		/* this is going to have a few of the MSBs set that we need to
1280d87c9cfSKent Russell 		 * clear
1290d87c9cfSKent Russell 		 */
130e6945304SYong Zhao 		bitmap_complement(gpu_resources.cp_queue_bitmap,
131d0b63bb3SAndres Rodriguez 				  adev->gfx.mec.queue_bitmap,
132d0b63bb3SAndres Rodriguez 				  KGD_MAX_QUEUES);
133d0b63bb3SAndres Rodriguez 
134d0b63bb3SAndres Rodriguez 		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
1350d87c9cfSKent Russell 		 * nbits is not compile time constant
1360d87c9cfSKent Russell 		 */
1373447d220SJay Cornwall 		last_valid_bit = 1 /* only first MEC can have compute queues */
138d0b63bb3SAndres Rodriguez 				* adev->gfx.mec.num_pipe_per_mec
139d0b63bb3SAndres Rodriguez 				* adev->gfx.mec.num_queue_per_pipe;
140d0b63bb3SAndres Rodriguez 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
141e6945304SYong Zhao 			clear_bit(i, gpu_resources.cp_queue_bitmap);
142d0b63bb3SAndres Rodriguez 
143dc102c43SAndres Rodriguez 		amdgpu_doorbell_get_kfd_info(adev,
144130e0371SOded Gabbay 				&gpu_resources.doorbell_physical_address,
145130e0371SOded Gabbay 				&gpu_resources.doorbell_aperture_size,
146130e0371SOded Gabbay 				&gpu_resources.doorbell_start_offset);
147c5892230SShaoyun Liu 
1481f86805aSYong Zhao 		/* Since SOC15, BIF starts to statically use the
1491f86805aSYong Zhao 		 * lower 12 bits of doorbell addresses for routing
1501f86805aSYong Zhao 		 * based on settings in registers like
1511f86805aSYong Zhao 		 * SDMA0_DOORBELL_RANGE etc..
1521f86805aSYong Zhao 		 * In order to route a doorbell to CP engine, the lower
1531f86805aSYong Zhao 		 * 12 bits of its address has to be outside the range
1541f86805aSYong Zhao 		 * set for SDMA, VCN, and IH blocks.
155642a0e80SFelix Kuehling 		 */
156234441ddSYong Zhao 		if (adev->asic_type >= CHIP_VEGA10) {
1571f86805aSYong Zhao 			gpu_resources.non_cp_doorbells_start =
1581f86805aSYong Zhao 					adev->doorbell_index.first_non_cp;
1591f86805aSYong Zhao 			gpu_resources.non_cp_doorbells_end =
1601f86805aSYong Zhao 					adev->doorbell_index.last_non_cp;
161234441ddSYong Zhao 		}
162130e0371SOded Gabbay 
163*4a580877SLuben Tuikov 		kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources);
164130e0371SOded Gabbay 	}
165130e0371SOded Gabbay }
166130e0371SOded Gabbay 
167dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
168130e0371SOded Gabbay {
169611736d8SFelix Kuehling 	if (adev->kfd.dev) {
1708e07e267SAmber Lin 		kgd2kfd_device_exit(adev->kfd.dev);
171611736d8SFelix Kuehling 		adev->kfd.dev = NULL;
172130e0371SOded Gabbay 	}
173130e0371SOded Gabbay }
174130e0371SOded Gabbay 
175dc102c43SAndres Rodriguez void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
176130e0371SOded Gabbay 		const void *ih_ring_entry)
177130e0371SOded Gabbay {
178611736d8SFelix Kuehling 	if (adev->kfd.dev)
1798e07e267SAmber Lin 		kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
180130e0371SOded Gabbay }
181130e0371SOded Gabbay 
1829593f4d6SRajneesh Bhardwaj void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
183130e0371SOded Gabbay {
184611736d8SFelix Kuehling 	if (adev->kfd.dev)
1859593f4d6SRajneesh Bhardwaj 		kgd2kfd_suspend(adev->kfd.dev, run_pm);
186130e0371SOded Gabbay }
187130e0371SOded Gabbay 
1889593f4d6SRajneesh Bhardwaj int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
189130e0371SOded Gabbay {
190130e0371SOded Gabbay 	int r = 0;
191130e0371SOded Gabbay 
192611736d8SFelix Kuehling 	if (adev->kfd.dev)
1939593f4d6SRajneesh Bhardwaj 		r = kgd2kfd_resume(adev->kfd.dev, run_pm);
194130e0371SOded Gabbay 
195130e0371SOded Gabbay 	return r;
196130e0371SOded Gabbay }
197130e0371SOded Gabbay 
1985c6dd71eSShaoyun Liu int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
1995c6dd71eSShaoyun Liu {
2005c6dd71eSShaoyun Liu 	int r = 0;
2015c6dd71eSShaoyun Liu 
202611736d8SFelix Kuehling 	if (adev->kfd.dev)
2038e07e267SAmber Lin 		r = kgd2kfd_pre_reset(adev->kfd.dev);
2045c6dd71eSShaoyun Liu 
2055c6dd71eSShaoyun Liu 	return r;
2065c6dd71eSShaoyun Liu }
2075c6dd71eSShaoyun Liu 
2085c6dd71eSShaoyun Liu int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
2095c6dd71eSShaoyun Liu {
2105c6dd71eSShaoyun Liu 	int r = 0;
2115c6dd71eSShaoyun Liu 
212611736d8SFelix Kuehling 	if (adev->kfd.dev)
2138e07e267SAmber Lin 		r = kgd2kfd_post_reset(adev->kfd.dev);
2145c6dd71eSShaoyun Liu 
2155c6dd71eSShaoyun Liu 	return r;
2165c6dd71eSShaoyun Liu }
2175c6dd71eSShaoyun Liu 
21824da5a9cSShaoyun Liu void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
21924da5a9cSShaoyun Liu {
22024da5a9cSShaoyun Liu 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
22124da5a9cSShaoyun Liu 
22212938fadSChristian König 	if (amdgpu_device_should_recover_gpu(adev))
22312938fadSChristian König 		amdgpu_device_gpu_recover(adev, NULL);
22424da5a9cSShaoyun Liu }
22524da5a9cSShaoyun Liu 
2267cd52c91SAmber Lin int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
227130e0371SOded Gabbay 				void **mem_obj, uint64_t *gpu_addr,
228fa5bde80SYong Zhao 				void **cpu_ptr, bool cp_mqd_gfx9)
229130e0371SOded Gabbay {
230dc102c43SAndres Rodriguez 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
231473fee47SYong Zhao 	struct amdgpu_bo *bo = NULL;
2323216c6b7SChunming Zhou 	struct amdgpu_bo_param bp;
233130e0371SOded Gabbay 	int r;
234473fee47SYong Zhao 	void *cpu_ptr_tmp = NULL;
235130e0371SOded Gabbay 
2363216c6b7SChunming Zhou 	memset(&bp, 0, sizeof(bp));
2373216c6b7SChunming Zhou 	bp.size = size;
2383216c6b7SChunming Zhou 	bp.byte_align = PAGE_SIZE;
2393216c6b7SChunming Zhou 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
2403216c6b7SChunming Zhou 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
2413216c6b7SChunming Zhou 	bp.type = ttm_bo_type_kernel;
2423216c6b7SChunming Zhou 	bp.resv = NULL;
24315426dbbSYong Zhao 
244fa5bde80SYong Zhao 	if (cp_mqd_gfx9)
245fa5bde80SYong Zhao 		bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
24615426dbbSYong Zhao 
2473216c6b7SChunming Zhou 	r = amdgpu_bo_create(adev, &bp, &bo);
248130e0371SOded Gabbay 	if (r) {
249dc102c43SAndres Rodriguez 		dev_err(adev->dev,
250130e0371SOded Gabbay 			"failed to allocate BO for amdkfd (%d)\n", r);
251f1403342SChristian König 		return r;
252130e0371SOded Gabbay 	}
253130e0371SOded Gabbay 
254130e0371SOded Gabbay 	/* map the buffer */
255473fee47SYong Zhao 	r = amdgpu_bo_reserve(bo, true);
256130e0371SOded Gabbay 	if (r) {
257dc102c43SAndres Rodriguez 		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
258130e0371SOded Gabbay 		goto allocate_mem_reserve_bo_failed;
259130e0371SOded Gabbay 	}
260130e0371SOded Gabbay 
2617b7c6c81SJunwei Zhang 	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
262130e0371SOded Gabbay 	if (r) {
263dc102c43SAndres Rodriguez 		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
264130e0371SOded Gabbay 		goto allocate_mem_pin_bo_failed;
265130e0371SOded Gabbay 	}
266130e0371SOded Gabbay 
267bb812f1eSJunwei Zhang 	r = amdgpu_ttm_alloc_gart(&bo->tbo);
268bb812f1eSJunwei Zhang 	if (r) {
269bb812f1eSJunwei Zhang 		dev_err(adev->dev, "%p bind failed\n", bo);
270bb812f1eSJunwei Zhang 		goto allocate_mem_kmap_bo_failed;
271bb812f1eSJunwei Zhang 	}
272bb812f1eSJunwei Zhang 
273473fee47SYong Zhao 	r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
274130e0371SOded Gabbay 	if (r) {
275dc102c43SAndres Rodriguez 		dev_err(adev->dev,
276130e0371SOded Gabbay 			"(%d) failed to map bo to kernel for amdkfd\n", r);
277130e0371SOded Gabbay 		goto allocate_mem_kmap_bo_failed;
278130e0371SOded Gabbay 	}
279130e0371SOded Gabbay 
280473fee47SYong Zhao 	*mem_obj = bo;
2817b7c6c81SJunwei Zhang 	*gpu_addr = amdgpu_bo_gpu_offset(bo);
282473fee47SYong Zhao 	*cpu_ptr = cpu_ptr_tmp;
283473fee47SYong Zhao 
284473fee47SYong Zhao 	amdgpu_bo_unreserve(bo);
285130e0371SOded Gabbay 
286130e0371SOded Gabbay 	return 0;
287130e0371SOded Gabbay 
288130e0371SOded Gabbay allocate_mem_kmap_bo_failed:
289473fee47SYong Zhao 	amdgpu_bo_unpin(bo);
290130e0371SOded Gabbay allocate_mem_pin_bo_failed:
291473fee47SYong Zhao 	amdgpu_bo_unreserve(bo);
292130e0371SOded Gabbay allocate_mem_reserve_bo_failed:
293473fee47SYong Zhao 	amdgpu_bo_unref(&bo);
294f1403342SChristian König 
295130e0371SOded Gabbay 	return r;
296130e0371SOded Gabbay }
297130e0371SOded Gabbay 
2987cd52c91SAmber Lin void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
299130e0371SOded Gabbay {
300473fee47SYong Zhao 	struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
301130e0371SOded Gabbay 
302473fee47SYong Zhao 	amdgpu_bo_reserve(bo, true);
303473fee47SYong Zhao 	amdgpu_bo_kunmap(bo);
304473fee47SYong Zhao 	amdgpu_bo_unpin(bo);
305473fee47SYong Zhao 	amdgpu_bo_unreserve(bo);
306473fee47SYong Zhao 	amdgpu_bo_unref(&(bo));
307130e0371SOded Gabbay }
308130e0371SOded Gabbay 
309ca66fb8fSOak Zeng int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size,
310ca66fb8fSOak Zeng 				void **mem_obj)
311ca66fb8fSOak Zeng {
312ca66fb8fSOak Zeng 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
313ca66fb8fSOak Zeng 	struct amdgpu_bo *bo = NULL;
314ca66fb8fSOak Zeng 	struct amdgpu_bo_param bp;
315ca66fb8fSOak Zeng 	int r;
316ca66fb8fSOak Zeng 
317ca66fb8fSOak Zeng 	memset(&bp, 0, sizeof(bp));
318ca66fb8fSOak Zeng 	bp.size = size;
319ca66fb8fSOak Zeng 	bp.byte_align = 1;
320ca66fb8fSOak Zeng 	bp.domain = AMDGPU_GEM_DOMAIN_GWS;
321ca66fb8fSOak Zeng 	bp.flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
322ca66fb8fSOak Zeng 	bp.type = ttm_bo_type_device;
323ca66fb8fSOak Zeng 	bp.resv = NULL;
324ca66fb8fSOak Zeng 
325ca66fb8fSOak Zeng 	r = amdgpu_bo_create(adev, &bp, &bo);
326ca66fb8fSOak Zeng 	if (r) {
327ca66fb8fSOak Zeng 		dev_err(adev->dev,
328ca66fb8fSOak Zeng 			"failed to allocate gws BO for amdkfd (%d)\n", r);
329ca66fb8fSOak Zeng 		return r;
330ca66fb8fSOak Zeng 	}
331ca66fb8fSOak Zeng 
332ca66fb8fSOak Zeng 	*mem_obj = bo;
333ca66fb8fSOak Zeng 	return 0;
334ca66fb8fSOak Zeng }
335ca66fb8fSOak Zeng 
336ca66fb8fSOak Zeng void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj)
337ca66fb8fSOak Zeng {
338ca66fb8fSOak Zeng 	struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj;
339ca66fb8fSOak Zeng 
340ca66fb8fSOak Zeng 	amdgpu_bo_unref(&bo);
341ca66fb8fSOak Zeng }
342ca66fb8fSOak Zeng 
3430da8b10eSAmber Lin uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
3440da8b10eSAmber Lin 				      enum kgd_engine_type type)
3450da8b10eSAmber Lin {
3460da8b10eSAmber Lin 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
3470da8b10eSAmber Lin 
3480da8b10eSAmber Lin 	switch (type) {
3490da8b10eSAmber Lin 	case KGD_ENGINE_PFP:
3500da8b10eSAmber Lin 		return adev->gfx.pfp_fw_version;
3510da8b10eSAmber Lin 
3520da8b10eSAmber Lin 	case KGD_ENGINE_ME:
3530da8b10eSAmber Lin 		return adev->gfx.me_fw_version;
3540da8b10eSAmber Lin 
3550da8b10eSAmber Lin 	case KGD_ENGINE_CE:
3560da8b10eSAmber Lin 		return adev->gfx.ce_fw_version;
3570da8b10eSAmber Lin 
3580da8b10eSAmber Lin 	case KGD_ENGINE_MEC1:
3590da8b10eSAmber Lin 		return adev->gfx.mec_fw_version;
3600da8b10eSAmber Lin 
3610da8b10eSAmber Lin 	case KGD_ENGINE_MEC2:
3620da8b10eSAmber Lin 		return adev->gfx.mec2_fw_version;
3630da8b10eSAmber Lin 
3640da8b10eSAmber Lin 	case KGD_ENGINE_RLC:
3650da8b10eSAmber Lin 		return adev->gfx.rlc_fw_version;
3660da8b10eSAmber Lin 
3670da8b10eSAmber Lin 	case KGD_ENGINE_SDMA1:
3680da8b10eSAmber Lin 		return adev->sdma.instance[0].fw_version;
3690da8b10eSAmber Lin 
3700da8b10eSAmber Lin 	case KGD_ENGINE_SDMA2:
3710da8b10eSAmber Lin 		return adev->sdma.instance[1].fw_version;
3720da8b10eSAmber Lin 
3730da8b10eSAmber Lin 	default:
3740da8b10eSAmber Lin 		return 0;
3750da8b10eSAmber Lin 	}
3760da8b10eSAmber Lin 
3770da8b10eSAmber Lin 	return 0;
3780da8b10eSAmber Lin }
3790da8b10eSAmber Lin 
3807cd52c91SAmber Lin void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
38130f1c042SHarish Kasiviswanathan 				      struct kfd_local_mem_info *mem_info)
38230f1c042SHarish Kasiviswanathan {
38330f1c042SHarish Kasiviswanathan 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
38430f1c042SHarish Kasiviswanathan 	uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
38530f1c042SHarish Kasiviswanathan 					     ~((1ULL << 32) - 1);
386770d13b1SChristian König 	resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
38730f1c042SHarish Kasiviswanathan 
38830f1c042SHarish Kasiviswanathan 	memset(mem_info, 0, sizeof(*mem_info));
389770d13b1SChristian König 	if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
390770d13b1SChristian König 		mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
391770d13b1SChristian König 		mem_info->local_mem_size_private = adev->gmc.real_vram_size -
392770d13b1SChristian König 				adev->gmc.visible_vram_size;
39330f1c042SHarish Kasiviswanathan 	} else {
39430f1c042SHarish Kasiviswanathan 		mem_info->local_mem_size_public = 0;
395770d13b1SChristian König 		mem_info->local_mem_size_private = adev->gmc.real_vram_size;
39630f1c042SHarish Kasiviswanathan 	}
397770d13b1SChristian König 	mem_info->vram_width = adev->gmc.vram_width;
39830f1c042SHarish Kasiviswanathan 
399fb8baefcSArnd Bergmann 	pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
400770d13b1SChristian König 			&adev->gmc.aper_base, &aper_limit,
40130f1c042SHarish Kasiviswanathan 			mem_info->local_mem_size_public,
40230f1c042SHarish Kasiviswanathan 			mem_info->local_mem_size_private);
40330f1c042SHarish Kasiviswanathan 
40430f1c042SHarish Kasiviswanathan 	if (amdgpu_sriov_vf(adev))
40530f1c042SHarish Kasiviswanathan 		mem_info->mem_clk_max = adev->clock.default_mclk / 100;
406944effd3SKent Russell 	else if (adev->pm.dpm_enabled) {
4076bdadb20SHawking Zhang 		if (amdgpu_emu_mode == 1)
4086bdadb20SHawking Zhang 			mem_info->mem_clk_max = 0;
4097ba01f9eSShaoyun Liu 		else
4106bdadb20SHawking Zhang 			mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
4116bdadb20SHawking Zhang 	} else
4127ba01f9eSShaoyun Liu 		mem_info->mem_clk_max = 100;
41330f1c042SHarish Kasiviswanathan }
41430f1c042SHarish Kasiviswanathan 
4157cd52c91SAmber Lin uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd)
416130e0371SOded Gabbay {
417dc102c43SAndres Rodriguez 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
418130e0371SOded Gabbay 
419dc102c43SAndres Rodriguez 	if (adev->gfx.funcs->get_gpu_clock_counter)
420dc102c43SAndres Rodriguez 		return adev->gfx.funcs->get_gpu_clock_counter(adev);
421130e0371SOded Gabbay 	return 0;
422130e0371SOded Gabbay }
423130e0371SOded Gabbay 
4247cd52c91SAmber Lin uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
425130e0371SOded Gabbay {
426dc102c43SAndres Rodriguez 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
427130e0371SOded Gabbay 
428a9efcc19SFelix Kuehling 	/* the sclk is in quantas of 10kHz */
429a9efcc19SFelix Kuehling 	if (amdgpu_sriov_vf(adev))
430a9efcc19SFelix Kuehling 		return adev->clock.default_sclk / 100;
431944effd3SKent Russell 	else if (adev->pm.dpm_enabled)
432a9efcc19SFelix Kuehling 		return amdgpu_dpm_get_sclk(adev, false) / 100;
4337ba01f9eSShaoyun Liu 	else
4347ba01f9eSShaoyun Liu 		return 100;
435130e0371SOded Gabbay }
436ebdebf42SFlora Cui 
4377cd52c91SAmber Lin void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
438ebdebf42SFlora Cui {
439ebdebf42SFlora Cui 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
440ebdebf42SFlora Cui 	struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
441ebdebf42SFlora Cui 
442ebdebf42SFlora Cui 	memset(cu_info, 0, sizeof(*cu_info));
443ebdebf42SFlora Cui 	if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
444ebdebf42SFlora Cui 		return;
445ebdebf42SFlora Cui 
446ebdebf42SFlora Cui 	cu_info->cu_active_number = acu_info.number;
447ebdebf42SFlora Cui 	cu_info->cu_ao_mask = acu_info.ao_cu_mask;
448ebdebf42SFlora Cui 	memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
449ebdebf42SFlora Cui 	       sizeof(acu_info.bitmap));
450ebdebf42SFlora Cui 	cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
451ebdebf42SFlora Cui 	cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
452ebdebf42SFlora Cui 	cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
453ebdebf42SFlora Cui 	cu_info->simd_per_cu = acu_info.simd_per_cu;
454ebdebf42SFlora Cui 	cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
455ebdebf42SFlora Cui 	cu_info->wave_front_size = acu_info.wave_front_size;
456ebdebf42SFlora Cui 	cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
457ebdebf42SFlora Cui 	cu_info->lds_size = acu_info.lds_size;
458ebdebf42SFlora Cui }
4599f0a0b41SKent Russell 
4601dde0ea9SFelix Kuehling int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
4611dde0ea9SFelix Kuehling 				  struct kgd_dev **dma_buf_kgd,
4621dde0ea9SFelix Kuehling 				  uint64_t *bo_size, void *metadata_buffer,
4631dde0ea9SFelix Kuehling 				  size_t buffer_size, uint32_t *metadata_size,
4641dde0ea9SFelix Kuehling 				  uint32_t *flags)
4651dde0ea9SFelix Kuehling {
4661dde0ea9SFelix Kuehling 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
4671dde0ea9SFelix Kuehling 	struct dma_buf *dma_buf;
4681dde0ea9SFelix Kuehling 	struct drm_gem_object *obj;
4691dde0ea9SFelix Kuehling 	struct amdgpu_bo *bo;
4701dde0ea9SFelix Kuehling 	uint64_t metadata_flags;
4711dde0ea9SFelix Kuehling 	int r = -EINVAL;
4721dde0ea9SFelix Kuehling 
4731dde0ea9SFelix Kuehling 	dma_buf = dma_buf_get(dma_buf_fd);
4741dde0ea9SFelix Kuehling 	if (IS_ERR(dma_buf))
4751dde0ea9SFelix Kuehling 		return PTR_ERR(dma_buf);
4761dde0ea9SFelix Kuehling 
4771dde0ea9SFelix Kuehling 	if (dma_buf->ops != &amdgpu_dmabuf_ops)
4781dde0ea9SFelix Kuehling 		/* Can't handle non-graphics buffers */
4791dde0ea9SFelix Kuehling 		goto out_put;
4801dde0ea9SFelix Kuehling 
4811dde0ea9SFelix Kuehling 	obj = dma_buf->priv;
482*4a580877SLuben Tuikov 	if (obj->dev->driver != adev_to_drm(adev)->driver)
4831dde0ea9SFelix Kuehling 		/* Can't handle buffers from different drivers */
4841dde0ea9SFelix Kuehling 		goto out_put;
4851dde0ea9SFelix Kuehling 
4861348969aSLuben Tuikov 	adev = drm_to_adev(obj->dev);
4871dde0ea9SFelix Kuehling 	bo = gem_to_amdgpu_bo(obj);
4881dde0ea9SFelix Kuehling 	if (!(bo->preferred_domains & (AMDGPU_GEM_DOMAIN_VRAM |
4891dde0ea9SFelix Kuehling 				    AMDGPU_GEM_DOMAIN_GTT)))
4901dde0ea9SFelix Kuehling 		/* Only VRAM and GTT BOs are supported */
4911dde0ea9SFelix Kuehling 		goto out_put;
4921dde0ea9SFelix Kuehling 
4931dde0ea9SFelix Kuehling 	r = 0;
4941dde0ea9SFelix Kuehling 	if (dma_buf_kgd)
4951dde0ea9SFelix Kuehling 		*dma_buf_kgd = (struct kgd_dev *)adev;
4961dde0ea9SFelix Kuehling 	if (bo_size)
4971dde0ea9SFelix Kuehling 		*bo_size = amdgpu_bo_size(bo);
4981dde0ea9SFelix Kuehling 	if (metadata_size)
4991dde0ea9SFelix Kuehling 		*metadata_size = bo->metadata_size;
5001dde0ea9SFelix Kuehling 	if (metadata_buffer)
5011dde0ea9SFelix Kuehling 		r = amdgpu_bo_get_metadata(bo, metadata_buffer, buffer_size,
5021dde0ea9SFelix Kuehling 					   metadata_size, &metadata_flags);
5031dde0ea9SFelix Kuehling 	if (flags) {
5041dde0ea9SFelix Kuehling 		*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
5051d251d90SYong Zhao 				KFD_IOC_ALLOC_MEM_FLAGS_VRAM
5061d251d90SYong Zhao 				: KFD_IOC_ALLOC_MEM_FLAGS_GTT;
5071dde0ea9SFelix Kuehling 
5081dde0ea9SFelix Kuehling 		if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
5091d251d90SYong Zhao 			*flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
5101dde0ea9SFelix Kuehling 	}
5111dde0ea9SFelix Kuehling 
5121dde0ea9SFelix Kuehling out_put:
5131dde0ea9SFelix Kuehling 	dma_buf_put(dma_buf);
5141dde0ea9SFelix Kuehling 	return r;
5151dde0ea9SFelix Kuehling }
5161dde0ea9SFelix Kuehling 
5179f0a0b41SKent Russell uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
5189f0a0b41SKent Russell {
5199f0a0b41SKent Russell 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
5209f0a0b41SKent Russell 
5219f0a0b41SKent Russell 	return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
5229f0a0b41SKent Russell }
523155494dbSFelix Kuehling 
524db8b62c0SShaoyun Liu uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
525db8b62c0SShaoyun Liu {
526db8b62c0SShaoyun Liu 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
527db8b62c0SShaoyun Liu 
528db8b62c0SShaoyun Liu 	return adev->gmc.xgmi.hive_id;
529db8b62c0SShaoyun Liu }
5300c663695SDivya Shikre 
5310c663695SDivya Shikre uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
5320c663695SDivya Shikre {
5330c663695SDivya Shikre 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
5340c663695SDivya Shikre 
5350c663695SDivya Shikre 	return adev->unique_id;
5360c663695SDivya Shikre }
5370c663695SDivya Shikre 
538da361dd1Sshaoyunl uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
539da361dd1Sshaoyunl {
540da361dd1Sshaoyunl 	struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
541da361dd1Sshaoyunl 	struct amdgpu_device *adev = (struct amdgpu_device *)dst;
542da361dd1Sshaoyunl 	int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev);
543da361dd1Sshaoyunl 
544da361dd1Sshaoyunl 	if (ret < 0) {
545da361dd1Sshaoyunl 		DRM_ERROR("amdgpu: failed to get  xgmi hops count between node %d and %d. ret = %d\n",
546da361dd1Sshaoyunl 			adev->gmc.xgmi.physical_node_id,
547da361dd1Sshaoyunl 			peer_adev->gmc.xgmi.physical_node_id, ret);
548da361dd1Sshaoyunl 		ret = 0;
549da361dd1Sshaoyunl 	}
550da361dd1Sshaoyunl 	return  (uint8_t)ret;
551da361dd1Sshaoyunl }
552db8b62c0SShaoyun Liu 
553d8e408a8SOak Zeng uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
554d8e408a8SOak Zeng {
555d8e408a8SOak Zeng 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
556d8e408a8SOak Zeng 
557d8e408a8SOak Zeng 	return adev->rmmio_remap.bus_addr;
558d8e408a8SOak Zeng }
559d8e408a8SOak Zeng 
56029e76462SOak Zeng uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
56129e76462SOak Zeng {
56229e76462SOak Zeng 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
56329e76462SOak Zeng 
56429e76462SOak Zeng 	return adev->gds.gws_size;
56529e76462SOak Zeng }
56629e76462SOak Zeng 
567c6d1ec41SJoseph Greathouse uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
568c6d1ec41SJoseph Greathouse {
569c6d1ec41SJoseph Greathouse 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
570c6d1ec41SJoseph Greathouse 
571c6d1ec41SJoseph Greathouse 	return adev->rev_id;
572c6d1ec41SJoseph Greathouse }
573c6d1ec41SJoseph Greathouse 
5744c660c8fSFelix Kuehling int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
5754c660c8fSFelix Kuehling 				uint32_t vmid, uint64_t gpu_addr,
5764c660c8fSFelix Kuehling 				uint32_t *ib_cmd, uint32_t ib_len)
5774c660c8fSFelix Kuehling {
5784c660c8fSFelix Kuehling 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
5794c660c8fSFelix Kuehling 	struct amdgpu_job *job;
5804c660c8fSFelix Kuehling 	struct amdgpu_ib *ib;
5814c660c8fSFelix Kuehling 	struct amdgpu_ring *ring;
5824c660c8fSFelix Kuehling 	struct dma_fence *f = NULL;
5834c660c8fSFelix Kuehling 	int ret;
5844c660c8fSFelix Kuehling 
5854c660c8fSFelix Kuehling 	switch (engine) {
5864c660c8fSFelix Kuehling 	case KGD_ENGINE_MEC1:
5874c660c8fSFelix Kuehling 		ring = &adev->gfx.compute_ring[0];
5884c660c8fSFelix Kuehling 		break;
5894c660c8fSFelix Kuehling 	case KGD_ENGINE_SDMA1:
5904c660c8fSFelix Kuehling 		ring = &adev->sdma.instance[0].ring;
5914c660c8fSFelix Kuehling 		break;
5924c660c8fSFelix Kuehling 	case KGD_ENGINE_SDMA2:
5934c660c8fSFelix Kuehling 		ring = &adev->sdma.instance[1].ring;
5944c660c8fSFelix Kuehling 		break;
5954c660c8fSFelix Kuehling 	default:
5964c660c8fSFelix Kuehling 		pr_err("Invalid engine in IB submission: %d\n", engine);
5974c660c8fSFelix Kuehling 		ret = -EINVAL;
5984c660c8fSFelix Kuehling 		goto err;
5994c660c8fSFelix Kuehling 	}
6004c660c8fSFelix Kuehling 
6014c660c8fSFelix Kuehling 	ret = amdgpu_job_alloc(adev, 1, &job, NULL);
6024c660c8fSFelix Kuehling 	if (ret)
6034c660c8fSFelix Kuehling 		goto err;
6044c660c8fSFelix Kuehling 
6054c660c8fSFelix Kuehling 	ib = &job->ibs[0];
6064c660c8fSFelix Kuehling 	memset(ib, 0, sizeof(struct amdgpu_ib));
6074c660c8fSFelix Kuehling 
6084c660c8fSFelix Kuehling 	ib->gpu_addr = gpu_addr;
6094c660c8fSFelix Kuehling 	ib->ptr = ib_cmd;
6104c660c8fSFelix Kuehling 	ib->length_dw = ib_len;
6114c660c8fSFelix Kuehling 	/* This works for NO_HWS. TODO: need to handle without knowing VMID */
6124c660c8fSFelix Kuehling 	job->vmid = vmid;
6134c660c8fSFelix Kuehling 
6144c660c8fSFelix Kuehling 	ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
61594561899SDennis Li 
6164c660c8fSFelix Kuehling 	if (ret) {
6174c660c8fSFelix Kuehling 		DRM_ERROR("amdgpu: failed to schedule IB.\n");
6184c660c8fSFelix Kuehling 		goto err_ib_sched;
6194c660c8fSFelix Kuehling 	}
6204c660c8fSFelix Kuehling 
6214c660c8fSFelix Kuehling 	ret = dma_fence_wait(f, false);
6224c660c8fSFelix Kuehling 
6234c660c8fSFelix Kuehling err_ib_sched:
6244c660c8fSFelix Kuehling 	dma_fence_put(f);
6254c660c8fSFelix Kuehling 	amdgpu_job_free(job);
6264c660c8fSFelix Kuehling err:
6274c660c8fSFelix Kuehling 	return ret;
6284c660c8fSFelix Kuehling }
6294c660c8fSFelix Kuehling 
63001c097dbSFelix Kuehling void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
63101c097dbSFelix Kuehling {
63201c097dbSFelix Kuehling 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
63301c097dbSFelix Kuehling 
63401c097dbSFelix Kuehling 	amdgpu_dpm_switch_power_profile(adev,
635919a52fcSFelix Kuehling 					PP_SMC_POWER_PROFILE_COMPUTE,
636919a52fcSFelix Kuehling 					!idle);
63701c097dbSFelix Kuehling }
63801c097dbSFelix Kuehling 
639155494dbSFelix Kuehling bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
640155494dbSFelix Kuehling {
64140111ec2SFelix Kuehling 	if (adev->kfd.dev)
64240111ec2SFelix Kuehling 		return vmid >= adev->vm_manager.first_kfd_vmid;
643155494dbSFelix Kuehling 
644155494dbSFelix Kuehling 	return false;
645155494dbSFelix Kuehling }
646fcdfa432SOded Gabbay 
647ffa02269SAlex Sierra int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
648ffa02269SAlex Sierra {
649ffa02269SAlex Sierra 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
650ffa02269SAlex Sierra 
651ffa02269SAlex Sierra 	if (adev->family == AMDGPU_FAMILY_AI) {
652ffa02269SAlex Sierra 		int i;
653ffa02269SAlex Sierra 
654ffa02269SAlex Sierra 		for (i = 0; i < adev->num_vmhubs; i++)
655ffa02269SAlex Sierra 			amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
656ffa02269SAlex Sierra 	} else {
657ffa02269SAlex Sierra 		amdgpu_gmc_flush_gpu_tlb(adev, vmid, AMDGPU_GFXHUB_0, 0);
658ffa02269SAlex Sierra 	}
659ffa02269SAlex Sierra 
660ffa02269SAlex Sierra 	return 0;
661ffa02269SAlex Sierra }
662ffa02269SAlex Sierra 
663ffa02269SAlex Sierra int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
664ffa02269SAlex Sierra {
665ffa02269SAlex Sierra 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
666b80cd524SFelix Kuehling 	const uint32_t flush_type = 0;
667ffa02269SAlex Sierra 	bool all_hub = false;
668ffa02269SAlex Sierra 
669ffa02269SAlex Sierra 	if (adev->family == AMDGPU_FAMILY_AI)
670ffa02269SAlex Sierra 		all_hub = true;
671ffa02269SAlex Sierra 
672f1403342SChristian König 	return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub);
673ffa02269SAlex Sierra }
674ffa02269SAlex Sierra 
675aabf3a95SJack Xiao bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
676aabf3a95SJack Xiao {
677aabf3a95SJack Xiao 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
678aabf3a95SJack Xiao 
679aabf3a95SJack Xiao 	return adev->have_atomics_support;
680aabf3a95SJack Xiao }
681aabf3a95SJack Xiao 
68282b7b619SAmber Lin #ifndef CONFIG_HSA_AMD
683fcdfa432SOded Gabbay bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
684fcdfa432SOded Gabbay {
685fcdfa432SOded Gabbay 	return false;
686fcdfa432SOded Gabbay }
687fcdfa432SOded Gabbay 
6884f01b73eSKuehling, Felix void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
689fcdfa432SOded Gabbay {
690fcdfa432SOded Gabbay }
691fcdfa432SOded Gabbay 
692f4a3c42bSxinhui pan int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
693f4a3c42bSxinhui pan {
694f4a3c42bSxinhui pan 	return 0;
695f4a3c42bSxinhui pan }
696f4a3c42bSxinhui pan 
697fcdfa432SOded Gabbay void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
698fcdfa432SOded Gabbay 					struct amdgpu_vm *vm)
699fcdfa432SOded Gabbay {
700fcdfa432SOded Gabbay }
701fcdfa432SOded Gabbay 
702fcdfa432SOded Gabbay struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
703fcdfa432SOded Gabbay {
704fcdfa432SOded Gabbay 	return NULL;
705fcdfa432SOded Gabbay }
706fcdfa432SOded Gabbay 
707fcdfa432SOded Gabbay int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
708fcdfa432SOded Gabbay {
709fcdfa432SOded Gabbay 	return 0;
710fcdfa432SOded Gabbay }
711fcdfa432SOded Gabbay 
7122d3d25b6SAmber Lin struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
713050091abSYong Zhao 			      unsigned int asic_type, bool vf)
7142d3d25b6SAmber Lin {
7152d3d25b6SAmber Lin 	return NULL;
7162d3d25b6SAmber Lin }
7172d3d25b6SAmber Lin 
7182d3d25b6SAmber Lin bool kgd2kfd_device_init(struct kfd_dev *kfd,
71988d80250SPrike Liang 			 struct drm_device *ddev,
7202d3d25b6SAmber Lin 			 const struct kgd2kfd_shared_resources *gpu_resources)
7212d3d25b6SAmber Lin {
7222d3d25b6SAmber Lin 	return false;
7232d3d25b6SAmber Lin }
7242d3d25b6SAmber Lin 
7252d3d25b6SAmber Lin void kgd2kfd_device_exit(struct kfd_dev *kfd)
7262d3d25b6SAmber Lin {
7272d3d25b6SAmber Lin }
7282d3d25b6SAmber Lin 
7292d3d25b6SAmber Lin void kgd2kfd_exit(void)
7302d3d25b6SAmber Lin {
7312d3d25b6SAmber Lin }
7322d3d25b6SAmber Lin 
7339593f4d6SRajneesh Bhardwaj void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
7342d3d25b6SAmber Lin {
7352d3d25b6SAmber Lin }
7362d3d25b6SAmber Lin 
7379593f4d6SRajneesh Bhardwaj int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
7382d3d25b6SAmber Lin {
7392d3d25b6SAmber Lin 	return 0;
7402d3d25b6SAmber Lin }
7412d3d25b6SAmber Lin 
7422d3d25b6SAmber Lin int kgd2kfd_pre_reset(struct kfd_dev *kfd)
7432d3d25b6SAmber Lin {
7442d3d25b6SAmber Lin 	return 0;
7452d3d25b6SAmber Lin }
7462d3d25b6SAmber Lin 
7472d3d25b6SAmber Lin int kgd2kfd_post_reset(struct kfd_dev *kfd)
7482d3d25b6SAmber Lin {
7492d3d25b6SAmber Lin 	return 0;
7502d3d25b6SAmber Lin }
7512d3d25b6SAmber Lin 
7522d3d25b6SAmber Lin void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
7532d3d25b6SAmber Lin {
7542d3d25b6SAmber Lin }
7559b54d201SEric Huang 
7569b54d201SEric Huang void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
7579b54d201SEric Huang {
7589b54d201SEric Huang }
7592c2b0d88SMukul Joshi 
7602c2b0d88SMukul Joshi void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
7612c2b0d88SMukul Joshi {
7622c2b0d88SMukul Joshi }
763fcdfa432SOded Gabbay #endif
764