xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c (revision db8b62c04b2344f17570186f8f022fb96e71d8d8)
1130e0371SOded Gabbay /*
2130e0371SOded Gabbay  * Copyright 2014 Advanced Micro Devices, Inc.
3130e0371SOded Gabbay  *
4130e0371SOded Gabbay  * Permission is hereby granted, free of charge, to any person obtaining a
5130e0371SOded Gabbay  * copy of this software and associated documentation files (the "Software"),
6130e0371SOded Gabbay  * to deal in the Software without restriction, including without limitation
7130e0371SOded Gabbay  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8130e0371SOded Gabbay  * and/or sell copies of the Software, and to permit persons to whom the
9130e0371SOded Gabbay  * Software is furnished to do so, subject to the following conditions:
10130e0371SOded Gabbay  *
11130e0371SOded Gabbay  * The above copyright notice and this permission notice shall be included in
12130e0371SOded Gabbay  * all copies or substantial portions of the Software.
13130e0371SOded Gabbay  *
14130e0371SOded Gabbay  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15130e0371SOded Gabbay  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16130e0371SOded Gabbay  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17130e0371SOded Gabbay  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18130e0371SOded Gabbay  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19130e0371SOded Gabbay  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20130e0371SOded Gabbay  * OTHER DEALINGS IN THE SOFTWARE.
21130e0371SOded Gabbay  */
22130e0371SOded Gabbay 
23130e0371SOded Gabbay #include "amdgpu_amdkfd.h"
242f7d10b3SJammy Zhou #include "amd_shared.h"
25130e0371SOded Gabbay #include <drm/drmP.h>
26130e0371SOded Gabbay #include "amdgpu.h"
272db0cdbeSAlex Deucher #include "amdgpu_gfx.h"
28130e0371SOded Gabbay #include <linux/module.h>
29130e0371SOded Gabbay 
30130e0371SOded Gabbay const struct kgd2kfd_calls *kgd2kfd;
31130e0371SOded Gabbay 
32155494dbSFelix Kuehling static const unsigned int compute_vmid_bitmap = 0xFF00;
33155494dbSFelix Kuehling 
34efb1c658SOded Gabbay int amdgpu_amdkfd_init(void)
35130e0371SOded Gabbay {
36efb1c658SOded Gabbay 	int ret;
37efb1c658SOded Gabbay 
3882b7b619SAmber Lin #ifdef CONFIG_HSA_AMD
39efb1c658SOded Gabbay 	ret = kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd);
40efb1c658SOded Gabbay 	if (ret)
41efb1c658SOded Gabbay 		kgd2kfd = NULL;
4282b7b619SAmber Lin 	amdgpu_amdkfd_gpuvm_init_mem_limits();
43efb1c658SOded Gabbay #else
44fcdfa432SOded Gabbay 	kgd2kfd = NULL;
45efb1c658SOded Gabbay 	ret = -ENOENT;
46130e0371SOded Gabbay #endif
47fcdfa432SOded Gabbay 
48efb1c658SOded Gabbay 	return ret;
49130e0371SOded Gabbay }
50130e0371SOded Gabbay 
51130e0371SOded Gabbay void amdgpu_amdkfd_fini(void)
52130e0371SOded Gabbay {
5303a1c08dSFelix Kuehling 	if (kgd2kfd)
54130e0371SOded Gabbay 		kgd2kfd->exit();
55130e0371SOded Gabbay }
56130e0371SOded Gabbay 
57dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
58130e0371SOded Gabbay {
595c33f214SFelix Kuehling 	const struct kfd2kgd_calls *kfd2kgd;
605c33f214SFelix Kuehling 
615c33f214SFelix Kuehling 	if (!kgd2kfd)
625c33f214SFelix Kuehling 		return;
635c33f214SFelix Kuehling 
645c33f214SFelix Kuehling 	switch (adev->asic_type) {
655c33f214SFelix Kuehling #ifdef CONFIG_DRM_AMDGPU_CIK
665c33f214SFelix Kuehling 	case CHIP_KAVERI:
6730d13424SFelix Kuehling 	case CHIP_HAWAII:
685c33f214SFelix Kuehling 		kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
695c33f214SFelix Kuehling 		break;
705c33f214SFelix Kuehling #endif
715c33f214SFelix Kuehling 	case CHIP_CARRIZO:
7230d13424SFelix Kuehling 	case CHIP_TONGA:
7330d13424SFelix Kuehling 	case CHIP_FIJI:
7430d13424SFelix Kuehling 	case CHIP_POLARIS10:
7530d13424SFelix Kuehling 	case CHIP_POLARIS11:
765c33f214SFelix Kuehling 		kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
775c33f214SFelix Kuehling 		break;
78d5a114a6SFelix Kuehling 	case CHIP_VEGA10:
79d5a114a6SFelix Kuehling 	case CHIP_RAVEN:
80d5a114a6SFelix Kuehling 		kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions();
81d5a114a6SFelix Kuehling 		break;
825c33f214SFelix Kuehling 	default:
83c3032fd9STom Stellard 		dev_info(adev->dev, "kfd not supported on this ASIC\n");
845c33f214SFelix Kuehling 		return;
855c33f214SFelix Kuehling 	}
865c33f214SFelix Kuehling 
87dc102c43SAndres Rodriguez 	adev->kfd = kgd2kfd->probe((struct kgd_dev *)adev,
88dc102c43SAndres Rodriguez 				   adev->pdev, kfd2kgd);
89130e0371SOded Gabbay }
90130e0371SOded Gabbay 
9122cb0164SAlex Deucher /**
9222cb0164SAlex Deucher  * amdgpu_doorbell_get_kfd_info - Report doorbell configuration required to
9322cb0164SAlex Deucher  *                                setup amdkfd
9422cb0164SAlex Deucher  *
9522cb0164SAlex Deucher  * @adev: amdgpu_device pointer
9622cb0164SAlex Deucher  * @aperture_base: output returning doorbell aperture base physical address
9722cb0164SAlex Deucher  * @aperture_size: output returning doorbell aperture size in bytes
9822cb0164SAlex Deucher  * @start_offset: output returning # of doorbell bytes reserved for amdgpu.
9922cb0164SAlex Deucher  *
10022cb0164SAlex Deucher  * amdgpu and amdkfd share the doorbell aperture. amdgpu sets it up,
10122cb0164SAlex Deucher  * takes doorbells required for its own rings and reports the setup to amdkfd.
10222cb0164SAlex Deucher  * amdgpu reserved doorbells are at the start of the doorbell aperture.
10322cb0164SAlex Deucher  */
10422cb0164SAlex Deucher static void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
10522cb0164SAlex Deucher 					 phys_addr_t *aperture_base,
10622cb0164SAlex Deucher 					 size_t *aperture_size,
10722cb0164SAlex Deucher 					 size_t *start_offset)
10822cb0164SAlex Deucher {
10922cb0164SAlex Deucher 	/*
11022cb0164SAlex Deucher 	 * The first num_doorbells are used by amdgpu.
11122cb0164SAlex Deucher 	 * amdkfd takes whatever's left in the aperture.
11222cb0164SAlex Deucher 	 */
11322cb0164SAlex Deucher 	if (adev->doorbell.size > adev->doorbell.num_doorbells * sizeof(u32)) {
11422cb0164SAlex Deucher 		*aperture_base = adev->doorbell.base;
11522cb0164SAlex Deucher 		*aperture_size = adev->doorbell.size;
11622cb0164SAlex Deucher 		*start_offset = adev->doorbell.num_doorbells * sizeof(u32);
11722cb0164SAlex Deucher 	} else {
11822cb0164SAlex Deucher 		*aperture_base = 0;
11922cb0164SAlex Deucher 		*aperture_size = 0;
12022cb0164SAlex Deucher 		*start_offset = 0;
12122cb0164SAlex Deucher 	}
12222cb0164SAlex Deucher }
12322cb0164SAlex Deucher 
124dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
125130e0371SOded Gabbay {
126d0b63bb3SAndres Rodriguez 	int i;
127d0b63bb3SAndres Rodriguez 	int last_valid_bit;
128dc102c43SAndres Rodriguez 	if (adev->kfd) {
129130e0371SOded Gabbay 		struct kgd2kfd_shared_resources gpu_resources = {
130155494dbSFelix Kuehling 			.compute_vmid_bitmap = compute_vmid_bitmap,
131d0b63bb3SAndres Rodriguez 			.num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec,
132155494dbSFelix Kuehling 			.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
133155494dbSFelix Kuehling 			.gpuvm_size = min(adev->vm_manager.max_pfn
134155494dbSFelix Kuehling 					  << AMDGPU_GPU_PAGE_SHIFT,
135ad9a5b78SChristian König 					  AMDGPU_GMC_HOLE_START),
136155494dbSFelix Kuehling 			.drm_render_minor = adev->ddev->render->index
137130e0371SOded Gabbay 		};
138130e0371SOded Gabbay 
139d0b63bb3SAndres Rodriguez 		/* this is going to have a few of the MSBs set that we need to
140d0b63bb3SAndres Rodriguez 		 * clear */
141d0b63bb3SAndres Rodriguez 		bitmap_complement(gpu_resources.queue_bitmap,
142d0b63bb3SAndres Rodriguez 				  adev->gfx.mec.queue_bitmap,
143d0b63bb3SAndres Rodriguez 				  KGD_MAX_QUEUES);
144d0b63bb3SAndres Rodriguez 
1457b2124a5SAndres Rodriguez 		/* remove the KIQ bit as well */
1467b2124a5SAndres Rodriguez 		if (adev->gfx.kiq.ring.ready)
1472db0cdbeSAlex Deucher 			clear_bit(amdgpu_gfx_queue_to_bit(adev,
1487b2124a5SAndres Rodriguez 							  adev->gfx.kiq.ring.me - 1,
1497b2124a5SAndres Rodriguez 							  adev->gfx.kiq.ring.pipe,
1507b2124a5SAndres Rodriguez 							  adev->gfx.kiq.ring.queue),
1517b2124a5SAndres Rodriguez 				  gpu_resources.queue_bitmap);
1527b2124a5SAndres Rodriguez 
153d0b63bb3SAndres Rodriguez 		/* According to linux/bitmap.h we shouldn't use bitmap_clear if
154d0b63bb3SAndres Rodriguez 		 * nbits is not compile time constant */
1553447d220SJay Cornwall 		last_valid_bit = 1 /* only first MEC can have compute queues */
156d0b63bb3SAndres Rodriguez 				* adev->gfx.mec.num_pipe_per_mec
157d0b63bb3SAndres Rodriguez 				* adev->gfx.mec.num_queue_per_pipe;
158d0b63bb3SAndres Rodriguez 		for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
159d0b63bb3SAndres Rodriguez 			clear_bit(i, gpu_resources.queue_bitmap);
160d0b63bb3SAndres Rodriguez 
161dc102c43SAndres Rodriguez 		amdgpu_doorbell_get_kfd_info(adev,
162130e0371SOded Gabbay 				&gpu_resources.doorbell_physical_address,
163130e0371SOded Gabbay 				&gpu_resources.doorbell_aperture_size,
164130e0371SOded Gabbay 				&gpu_resources.doorbell_start_offset);
165642a0e80SFelix Kuehling 		if (adev->asic_type >= CHIP_VEGA10) {
166642a0e80SFelix Kuehling 			/* On SOC15 the BIF is involved in routing
167642a0e80SFelix Kuehling 			 * doorbells using the low 12 bits of the
168642a0e80SFelix Kuehling 			 * address. Communicate the assignments to
169642a0e80SFelix Kuehling 			 * KFD. KFD uses two doorbell pages per
170642a0e80SFelix Kuehling 			 * process in case of 64-bit doorbells so we
171642a0e80SFelix Kuehling 			 * can use each doorbell assignment twice.
172642a0e80SFelix Kuehling 			 */
173642a0e80SFelix Kuehling 			gpu_resources.sdma_doorbell[0][0] =
174642a0e80SFelix Kuehling 				AMDGPU_DOORBELL64_sDMA_ENGINE0;
175642a0e80SFelix Kuehling 			gpu_resources.sdma_doorbell[0][1] =
176642a0e80SFelix Kuehling 				AMDGPU_DOORBELL64_sDMA_ENGINE0 + 0x200;
177642a0e80SFelix Kuehling 			gpu_resources.sdma_doorbell[1][0] =
178642a0e80SFelix Kuehling 				AMDGPU_DOORBELL64_sDMA_ENGINE1;
179642a0e80SFelix Kuehling 			gpu_resources.sdma_doorbell[1][1] =
180642a0e80SFelix Kuehling 				AMDGPU_DOORBELL64_sDMA_ENGINE1 + 0x200;
181642a0e80SFelix Kuehling 			/* Doorbells 0x0f0-0ff and 0x2f0-2ff are reserved for
182642a0e80SFelix Kuehling 			 * SDMA, IH and VCN. So don't use them for the CP.
183642a0e80SFelix Kuehling 			 */
184642a0e80SFelix Kuehling 			gpu_resources.reserved_doorbell_mask = 0x1f0;
185642a0e80SFelix Kuehling 			gpu_resources.reserved_doorbell_val  = 0x0f0;
186642a0e80SFelix Kuehling 		}
187130e0371SOded Gabbay 
188dc102c43SAndres Rodriguez 		kgd2kfd->device_init(adev->kfd, &gpu_resources);
189130e0371SOded Gabbay 	}
190130e0371SOded Gabbay }
191130e0371SOded Gabbay 
192dc102c43SAndres Rodriguez void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev)
193130e0371SOded Gabbay {
194dc102c43SAndres Rodriguez 	if (adev->kfd) {
195dc102c43SAndres Rodriguez 		kgd2kfd->device_exit(adev->kfd);
196dc102c43SAndres Rodriguez 		adev->kfd = NULL;
197130e0371SOded Gabbay 	}
198130e0371SOded Gabbay }
199130e0371SOded Gabbay 
200dc102c43SAndres Rodriguez void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
201130e0371SOded Gabbay 		const void *ih_ring_entry)
202130e0371SOded Gabbay {
203dc102c43SAndres Rodriguez 	if (adev->kfd)
204dc102c43SAndres Rodriguez 		kgd2kfd->interrupt(adev->kfd, ih_ring_entry);
205130e0371SOded Gabbay }
206130e0371SOded Gabbay 
207dc102c43SAndres Rodriguez void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
208130e0371SOded Gabbay {
209dc102c43SAndres Rodriguez 	if (adev->kfd)
210dc102c43SAndres Rodriguez 		kgd2kfd->suspend(adev->kfd);
211130e0371SOded Gabbay }
212130e0371SOded Gabbay 
213dc102c43SAndres Rodriguez int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
214130e0371SOded Gabbay {
215130e0371SOded Gabbay 	int r = 0;
216130e0371SOded Gabbay 
217dc102c43SAndres Rodriguez 	if (adev->kfd)
218dc102c43SAndres Rodriguez 		r = kgd2kfd->resume(adev->kfd);
219130e0371SOded Gabbay 
220130e0371SOded Gabbay 	return r;
221130e0371SOded Gabbay }
222130e0371SOded Gabbay 
2235c6dd71eSShaoyun Liu int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
2245c6dd71eSShaoyun Liu {
2255c6dd71eSShaoyun Liu 	int r = 0;
2265c6dd71eSShaoyun Liu 
2275c6dd71eSShaoyun Liu 	if (adev->kfd)
2285c6dd71eSShaoyun Liu 		r = kgd2kfd->pre_reset(adev->kfd);
2295c6dd71eSShaoyun Liu 
2305c6dd71eSShaoyun Liu 	return r;
2315c6dd71eSShaoyun Liu }
2325c6dd71eSShaoyun Liu 
2335c6dd71eSShaoyun Liu int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
2345c6dd71eSShaoyun Liu {
2355c6dd71eSShaoyun Liu 	int r = 0;
2365c6dd71eSShaoyun Liu 
2375c6dd71eSShaoyun Liu 	if (adev->kfd)
2385c6dd71eSShaoyun Liu 		r = kgd2kfd->post_reset(adev->kfd);
2395c6dd71eSShaoyun Liu 
2405c6dd71eSShaoyun Liu 	return r;
2415c6dd71eSShaoyun Liu }
2425c6dd71eSShaoyun Liu 
24324da5a9cSShaoyun Liu void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
24424da5a9cSShaoyun Liu {
24524da5a9cSShaoyun Liu 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
24624da5a9cSShaoyun Liu 
24712938fadSChristian König 	if (amdgpu_device_should_recover_gpu(adev))
24812938fadSChristian König 		amdgpu_device_gpu_recover(adev, NULL);
24924da5a9cSShaoyun Liu }
25024da5a9cSShaoyun Liu 
251130e0371SOded Gabbay int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
252130e0371SOded Gabbay 			void **mem_obj, uint64_t *gpu_addr,
253130e0371SOded Gabbay 			void **cpu_ptr)
254130e0371SOded Gabbay {
255dc102c43SAndres Rodriguez 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
256473fee47SYong Zhao 	struct amdgpu_bo *bo = NULL;
2573216c6b7SChunming Zhou 	struct amdgpu_bo_param bp;
258130e0371SOded Gabbay 	int r;
259473fee47SYong Zhao 	void *cpu_ptr_tmp = NULL;
260130e0371SOded Gabbay 
2613216c6b7SChunming Zhou 	memset(&bp, 0, sizeof(bp));
2623216c6b7SChunming Zhou 	bp.size = size;
2633216c6b7SChunming Zhou 	bp.byte_align = PAGE_SIZE;
2643216c6b7SChunming Zhou 	bp.domain = AMDGPU_GEM_DOMAIN_GTT;
2653216c6b7SChunming Zhou 	bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
2663216c6b7SChunming Zhou 	bp.type = ttm_bo_type_kernel;
2673216c6b7SChunming Zhou 	bp.resv = NULL;
2683216c6b7SChunming Zhou 	r = amdgpu_bo_create(adev, &bp, &bo);
269130e0371SOded Gabbay 	if (r) {
270dc102c43SAndres Rodriguez 		dev_err(adev->dev,
271130e0371SOded Gabbay 			"failed to allocate BO for amdkfd (%d)\n", r);
272130e0371SOded Gabbay 		return r;
273130e0371SOded Gabbay 	}
274130e0371SOded Gabbay 
275130e0371SOded Gabbay 	/* map the buffer */
276473fee47SYong Zhao 	r = amdgpu_bo_reserve(bo, true);
277130e0371SOded Gabbay 	if (r) {
278dc102c43SAndres Rodriguez 		dev_err(adev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
279130e0371SOded Gabbay 		goto allocate_mem_reserve_bo_failed;
280130e0371SOded Gabbay 	}
281130e0371SOded Gabbay 
2827b7c6c81SJunwei Zhang 	r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
283130e0371SOded Gabbay 	if (r) {
284dc102c43SAndres Rodriguez 		dev_err(adev->dev, "(%d) failed to pin bo for amdkfd\n", r);
285130e0371SOded Gabbay 		goto allocate_mem_pin_bo_failed;
286130e0371SOded Gabbay 	}
287130e0371SOded Gabbay 
288bb812f1eSJunwei Zhang 	r = amdgpu_ttm_alloc_gart(&bo->tbo);
289bb812f1eSJunwei Zhang 	if (r) {
290bb812f1eSJunwei Zhang 		dev_err(adev->dev, "%p bind failed\n", bo);
291bb812f1eSJunwei Zhang 		goto allocate_mem_kmap_bo_failed;
292bb812f1eSJunwei Zhang 	}
293bb812f1eSJunwei Zhang 
294473fee47SYong Zhao 	r = amdgpu_bo_kmap(bo, &cpu_ptr_tmp);
295130e0371SOded Gabbay 	if (r) {
296dc102c43SAndres Rodriguez 		dev_err(adev->dev,
297130e0371SOded Gabbay 			"(%d) failed to map bo to kernel for amdkfd\n", r);
298130e0371SOded Gabbay 		goto allocate_mem_kmap_bo_failed;
299130e0371SOded Gabbay 	}
300130e0371SOded Gabbay 
301473fee47SYong Zhao 	*mem_obj = bo;
3027b7c6c81SJunwei Zhang 	*gpu_addr = amdgpu_bo_gpu_offset(bo);
303473fee47SYong Zhao 	*cpu_ptr = cpu_ptr_tmp;
304473fee47SYong Zhao 
305473fee47SYong Zhao 	amdgpu_bo_unreserve(bo);
306130e0371SOded Gabbay 
307130e0371SOded Gabbay 	return 0;
308130e0371SOded Gabbay 
309130e0371SOded Gabbay allocate_mem_kmap_bo_failed:
310473fee47SYong Zhao 	amdgpu_bo_unpin(bo);
311130e0371SOded Gabbay allocate_mem_pin_bo_failed:
312473fee47SYong Zhao 	amdgpu_bo_unreserve(bo);
313130e0371SOded Gabbay allocate_mem_reserve_bo_failed:
314473fee47SYong Zhao 	amdgpu_bo_unref(&bo);
315130e0371SOded Gabbay 
316130e0371SOded Gabbay 	return r;
317130e0371SOded Gabbay }
318130e0371SOded Gabbay 
319130e0371SOded Gabbay void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
320130e0371SOded Gabbay {
321473fee47SYong Zhao 	struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj;
322130e0371SOded Gabbay 
323473fee47SYong Zhao 	amdgpu_bo_reserve(bo, true);
324473fee47SYong Zhao 	amdgpu_bo_kunmap(bo);
325473fee47SYong Zhao 	amdgpu_bo_unpin(bo);
326473fee47SYong Zhao 	amdgpu_bo_unreserve(bo);
327473fee47SYong Zhao 	amdgpu_bo_unref(&(bo));
328130e0371SOded Gabbay }
329130e0371SOded Gabbay 
33030f1c042SHarish Kasiviswanathan void get_local_mem_info(struct kgd_dev *kgd,
33130f1c042SHarish Kasiviswanathan 			struct kfd_local_mem_info *mem_info)
33230f1c042SHarish Kasiviswanathan {
33330f1c042SHarish Kasiviswanathan 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
33430f1c042SHarish Kasiviswanathan 	uint64_t address_mask = adev->dev->dma_mask ? ~*adev->dev->dma_mask :
33530f1c042SHarish Kasiviswanathan 					     ~((1ULL << 32) - 1);
336770d13b1SChristian König 	resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size;
33730f1c042SHarish Kasiviswanathan 
33830f1c042SHarish Kasiviswanathan 	memset(mem_info, 0, sizeof(*mem_info));
339770d13b1SChristian König 	if (!(adev->gmc.aper_base & address_mask || aper_limit & address_mask)) {
340770d13b1SChristian König 		mem_info->local_mem_size_public = adev->gmc.visible_vram_size;
341770d13b1SChristian König 		mem_info->local_mem_size_private = adev->gmc.real_vram_size -
342770d13b1SChristian König 				adev->gmc.visible_vram_size;
34330f1c042SHarish Kasiviswanathan 	} else {
34430f1c042SHarish Kasiviswanathan 		mem_info->local_mem_size_public = 0;
345770d13b1SChristian König 		mem_info->local_mem_size_private = adev->gmc.real_vram_size;
34630f1c042SHarish Kasiviswanathan 	}
347770d13b1SChristian König 	mem_info->vram_width = adev->gmc.vram_width;
34830f1c042SHarish Kasiviswanathan 
349fb8baefcSArnd Bergmann 	pr_debug("Address base: %pap limit %pap public 0x%llx private 0x%llx\n",
350770d13b1SChristian König 			&adev->gmc.aper_base, &aper_limit,
35130f1c042SHarish Kasiviswanathan 			mem_info->local_mem_size_public,
35230f1c042SHarish Kasiviswanathan 			mem_info->local_mem_size_private);
35330f1c042SHarish Kasiviswanathan 
35430f1c042SHarish Kasiviswanathan 	if (amdgpu_sriov_vf(adev))
35530f1c042SHarish Kasiviswanathan 		mem_info->mem_clk_max = adev->clock.default_mclk / 100;
3567ba01f9eSShaoyun Liu 	else if (adev->powerplay.pp_funcs)
35730f1c042SHarish Kasiviswanathan 		mem_info->mem_clk_max = amdgpu_dpm_get_mclk(adev, false) / 100;
3587ba01f9eSShaoyun Liu 	else
3597ba01f9eSShaoyun Liu 		mem_info->mem_clk_max = 100;
36030f1c042SHarish Kasiviswanathan }
36130f1c042SHarish Kasiviswanathan 
362130e0371SOded Gabbay uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
363130e0371SOded Gabbay {
364dc102c43SAndres Rodriguez 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
365130e0371SOded Gabbay 
366dc102c43SAndres Rodriguez 	if (adev->gfx.funcs->get_gpu_clock_counter)
367dc102c43SAndres Rodriguez 		return adev->gfx.funcs->get_gpu_clock_counter(adev);
368130e0371SOded Gabbay 	return 0;
369130e0371SOded Gabbay }
370130e0371SOded Gabbay 
371130e0371SOded Gabbay uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
372130e0371SOded Gabbay {
373dc102c43SAndres Rodriguez 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
374130e0371SOded Gabbay 
375a9efcc19SFelix Kuehling 	/* the sclk is in quantas of 10kHz */
376a9efcc19SFelix Kuehling 	if (amdgpu_sriov_vf(adev))
377a9efcc19SFelix Kuehling 		return adev->clock.default_sclk / 100;
3787ba01f9eSShaoyun Liu 	else if (adev->powerplay.pp_funcs)
379a9efcc19SFelix Kuehling 		return amdgpu_dpm_get_sclk(adev, false) / 100;
3807ba01f9eSShaoyun Liu 	else
3817ba01f9eSShaoyun Liu 		return 100;
382130e0371SOded Gabbay }
383ebdebf42SFlora Cui 
384ebdebf42SFlora Cui void get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info)
385ebdebf42SFlora Cui {
386ebdebf42SFlora Cui 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
387ebdebf42SFlora Cui 	struct amdgpu_cu_info acu_info = adev->gfx.cu_info;
388ebdebf42SFlora Cui 
389ebdebf42SFlora Cui 	memset(cu_info, 0, sizeof(*cu_info));
390ebdebf42SFlora Cui 	if (sizeof(cu_info->cu_bitmap) != sizeof(acu_info.bitmap))
391ebdebf42SFlora Cui 		return;
392ebdebf42SFlora Cui 
393ebdebf42SFlora Cui 	cu_info->cu_active_number = acu_info.number;
394ebdebf42SFlora Cui 	cu_info->cu_ao_mask = acu_info.ao_cu_mask;
395ebdebf42SFlora Cui 	memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
396ebdebf42SFlora Cui 	       sizeof(acu_info.bitmap));
397ebdebf42SFlora Cui 	cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
398ebdebf42SFlora Cui 	cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
399ebdebf42SFlora Cui 	cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;
400ebdebf42SFlora Cui 	cu_info->simd_per_cu = acu_info.simd_per_cu;
401ebdebf42SFlora Cui 	cu_info->max_waves_per_simd = acu_info.max_waves_per_simd;
402ebdebf42SFlora Cui 	cu_info->wave_front_size = acu_info.wave_front_size;
403ebdebf42SFlora Cui 	cu_info->max_scratch_slots_per_cu = acu_info.max_scratch_slots_per_cu;
404ebdebf42SFlora Cui 	cu_info->lds_size = acu_info.lds_size;
405ebdebf42SFlora Cui }
4069f0a0b41SKent Russell 
4079f0a0b41SKent Russell uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd)
4089f0a0b41SKent Russell {
4099f0a0b41SKent Russell 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
4109f0a0b41SKent Russell 
4119f0a0b41SKent Russell 	return amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]);
4129f0a0b41SKent Russell }
413155494dbSFelix Kuehling 
414*db8b62c0SShaoyun Liu uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
415*db8b62c0SShaoyun Liu {
416*db8b62c0SShaoyun Liu 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
417*db8b62c0SShaoyun Liu 
418*db8b62c0SShaoyun Liu 	return adev->gmc.xgmi.hive_id;
419*db8b62c0SShaoyun Liu }
420*db8b62c0SShaoyun Liu 
4214c660c8fSFelix Kuehling int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
4224c660c8fSFelix Kuehling 				uint32_t vmid, uint64_t gpu_addr,
4234c660c8fSFelix Kuehling 				uint32_t *ib_cmd, uint32_t ib_len)
4244c660c8fSFelix Kuehling {
4254c660c8fSFelix Kuehling 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
4264c660c8fSFelix Kuehling 	struct amdgpu_job *job;
4274c660c8fSFelix Kuehling 	struct amdgpu_ib *ib;
4284c660c8fSFelix Kuehling 	struct amdgpu_ring *ring;
4294c660c8fSFelix Kuehling 	struct dma_fence *f = NULL;
4304c660c8fSFelix Kuehling 	int ret;
4314c660c8fSFelix Kuehling 
4324c660c8fSFelix Kuehling 	switch (engine) {
4334c660c8fSFelix Kuehling 	case KGD_ENGINE_MEC1:
4344c660c8fSFelix Kuehling 		ring = &adev->gfx.compute_ring[0];
4354c660c8fSFelix Kuehling 		break;
4364c660c8fSFelix Kuehling 	case KGD_ENGINE_SDMA1:
4374c660c8fSFelix Kuehling 		ring = &adev->sdma.instance[0].ring;
4384c660c8fSFelix Kuehling 		break;
4394c660c8fSFelix Kuehling 	case KGD_ENGINE_SDMA2:
4404c660c8fSFelix Kuehling 		ring = &adev->sdma.instance[1].ring;
4414c660c8fSFelix Kuehling 		break;
4424c660c8fSFelix Kuehling 	default:
4434c660c8fSFelix Kuehling 		pr_err("Invalid engine in IB submission: %d\n", engine);
4444c660c8fSFelix Kuehling 		ret = -EINVAL;
4454c660c8fSFelix Kuehling 		goto err;
4464c660c8fSFelix Kuehling 	}
4474c660c8fSFelix Kuehling 
4484c660c8fSFelix Kuehling 	ret = amdgpu_job_alloc(adev, 1, &job, NULL);
4494c660c8fSFelix Kuehling 	if (ret)
4504c660c8fSFelix Kuehling 		goto err;
4514c660c8fSFelix Kuehling 
4524c660c8fSFelix Kuehling 	ib = &job->ibs[0];
4534c660c8fSFelix Kuehling 	memset(ib, 0, sizeof(struct amdgpu_ib));
4544c660c8fSFelix Kuehling 
4554c660c8fSFelix Kuehling 	ib->gpu_addr = gpu_addr;
4564c660c8fSFelix Kuehling 	ib->ptr = ib_cmd;
4574c660c8fSFelix Kuehling 	ib->length_dw = ib_len;
4584c660c8fSFelix Kuehling 	/* This works for NO_HWS. TODO: need to handle without knowing VMID */
4594c660c8fSFelix Kuehling 	job->vmid = vmid;
4604c660c8fSFelix Kuehling 
4614c660c8fSFelix Kuehling 	ret = amdgpu_ib_schedule(ring, 1, ib, job, &f);
4624c660c8fSFelix Kuehling 	if (ret) {
4634c660c8fSFelix Kuehling 		DRM_ERROR("amdgpu: failed to schedule IB.\n");
4644c660c8fSFelix Kuehling 		goto err_ib_sched;
4654c660c8fSFelix Kuehling 	}
4664c660c8fSFelix Kuehling 
4674c660c8fSFelix Kuehling 	ret = dma_fence_wait(f, false);
4684c660c8fSFelix Kuehling 
4694c660c8fSFelix Kuehling err_ib_sched:
4704c660c8fSFelix Kuehling 	dma_fence_put(f);
4714c660c8fSFelix Kuehling 	amdgpu_job_free(job);
4724c660c8fSFelix Kuehling err:
4734c660c8fSFelix Kuehling 	return ret;
4744c660c8fSFelix Kuehling }
4754c660c8fSFelix Kuehling 
47601c097dbSFelix Kuehling void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
47701c097dbSFelix Kuehling {
47801c097dbSFelix Kuehling 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
47901c097dbSFelix Kuehling 
48001c097dbSFelix Kuehling 	amdgpu_dpm_switch_power_profile(adev,
48101c097dbSFelix Kuehling 					PP_SMC_POWER_PROFILE_COMPUTE, !idle);
48201c097dbSFelix Kuehling }
48301c097dbSFelix Kuehling 
484155494dbSFelix Kuehling bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
485155494dbSFelix Kuehling {
486155494dbSFelix Kuehling 	if (adev->kfd) {
487155494dbSFelix Kuehling 		if ((1 << vmid) & compute_vmid_bitmap)
488155494dbSFelix Kuehling 			return true;
489155494dbSFelix Kuehling 	}
490155494dbSFelix Kuehling 
491155494dbSFelix Kuehling 	return false;
492155494dbSFelix Kuehling }
493fcdfa432SOded Gabbay 
49482b7b619SAmber Lin #ifndef CONFIG_HSA_AMD
495fcdfa432SOded Gabbay bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
496fcdfa432SOded Gabbay {
497fcdfa432SOded Gabbay 	return false;
498fcdfa432SOded Gabbay }
499fcdfa432SOded Gabbay 
500fcdfa432SOded Gabbay void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo)
501fcdfa432SOded Gabbay {
502fcdfa432SOded Gabbay }
503fcdfa432SOded Gabbay 
504fcdfa432SOded Gabbay void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
505fcdfa432SOded Gabbay 					struct amdgpu_vm *vm)
506fcdfa432SOded Gabbay {
507fcdfa432SOded Gabbay }
508fcdfa432SOded Gabbay 
509fcdfa432SOded Gabbay struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
510fcdfa432SOded Gabbay {
511fcdfa432SOded Gabbay 	return NULL;
512fcdfa432SOded Gabbay }
513fcdfa432SOded Gabbay 
514fcdfa432SOded Gabbay int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm)
515fcdfa432SOded Gabbay {
516fcdfa432SOded Gabbay 	return 0;
517fcdfa432SOded Gabbay }
518fcdfa432SOded Gabbay 
519fcdfa432SOded Gabbay struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
520fcdfa432SOded Gabbay {
521fcdfa432SOded Gabbay 	return NULL;
522fcdfa432SOded Gabbay }
523fcdfa432SOded Gabbay 
524fcdfa432SOded Gabbay struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
525fcdfa432SOded Gabbay {
526fcdfa432SOded Gabbay 	return NULL;
527fcdfa432SOded Gabbay }
528fcdfa432SOded Gabbay 
529fcdfa432SOded Gabbay struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
530fcdfa432SOded Gabbay {
531fcdfa432SOded Gabbay 	return NULL;
532fcdfa432SOded Gabbay }
533fcdfa432SOded Gabbay #endif
534