xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c (revision de848da12f752170c2ebe114804a985314fd5a6a)
1d5a114a6SFelix Kuehling /*
2d5a114a6SFelix Kuehling  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3d5a114a6SFelix Kuehling  *
4d5a114a6SFelix Kuehling  * Permission is hereby granted, free of charge, to any person obtaining a
5d5a114a6SFelix Kuehling  * copy of this software and associated documentation files (the "Software"),
6d5a114a6SFelix Kuehling  * to deal in the Software without restriction, including without limitation
7d5a114a6SFelix Kuehling  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8d5a114a6SFelix Kuehling  * and/or sell copies of the Software, and to permit persons to whom the
9d5a114a6SFelix Kuehling  * Software is furnished to do so, subject to the following conditions:
10d5a114a6SFelix Kuehling  *
11d5a114a6SFelix Kuehling  * The above copyright notice and this permission notice shall be included in
12d5a114a6SFelix Kuehling  * all copies or substantial portions of the Software.
13d5a114a6SFelix Kuehling  *
14d5a114a6SFelix Kuehling  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15d5a114a6SFelix Kuehling  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16d5a114a6SFelix Kuehling  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17d5a114a6SFelix Kuehling  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18d5a114a6SFelix Kuehling  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19d5a114a6SFelix Kuehling  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20d5a114a6SFelix Kuehling  * OTHER DEALINGS IN THE SOFTWARE.
21d5a114a6SFelix Kuehling  */
22d5a114a6SFelix Kuehling #include "amdgpu.h"
23d5a114a6SFelix Kuehling #include "amdgpu_amdkfd.h"
24d5a114a6SFelix Kuehling #include "gc/gc_9_0_offset.h"
25d5a114a6SFelix Kuehling #include "gc/gc_9_0_sh_mask.h"
26d5a114a6SFelix Kuehling #include "vega10_enum.h"
27d5a114a6SFelix Kuehling #include "sdma0/sdma0_4_0_offset.h"
28d5a114a6SFelix Kuehling #include "sdma0/sdma0_4_0_sh_mask.h"
29d5a114a6SFelix Kuehling #include "sdma1/sdma1_4_0_offset.h"
30d5a114a6SFelix Kuehling #include "sdma1/sdma1_4_0_sh_mask.h"
31d5a114a6SFelix Kuehling #include "athub/athub_1_0_offset.h"
32d5a114a6SFelix Kuehling #include "athub/athub_1_0_sh_mask.h"
33d5a114a6SFelix Kuehling #include "oss/osssys_4_0_offset.h"
34d5a114a6SFelix Kuehling #include "oss/osssys_4_0_sh_mask.h"
35d5a114a6SFelix Kuehling #include "soc15_common.h"
36d5a114a6SFelix Kuehling #include "v9_structs.h"
37d5a114a6SFelix Kuehling #include "soc15.h"
38d5a114a6SFelix Kuehling #include "soc15d.h"
3943a4bc82SRamesh Errabolu #include "gfx_v9_0.h"
403ac2bc76SRamesh Errabolu #include "amdgpu_amdkfd_gfx_v9.h"
41101827e1SJonathan Kim #include <uapi/linux/kfd_ioctl.h>
42d5a114a6SFelix Kuehling 
43d5a114a6SFelix Kuehling enum hqd_dequeue_request_type {
44d5a114a6SFelix Kuehling 	NO_ACTION = 0,
45d5a114a6SFelix Kuehling 	DRAIN_PIPE,
46b53ef0dfSMukul Joshi 	RESET_WAVES,
47b53ef0dfSMukul Joshi 	SAVE_WAVES
48d5a114a6SFelix Kuehling };
49d5a114a6SFelix Kuehling 
50e2069a7bSMukul Joshi static void kgd_gfx_v9_lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
51e2069a7bSMukul Joshi 			uint32_t queue, uint32_t vmid, uint32_t inst)
52d5a114a6SFelix Kuehling {
53d5a114a6SFelix Kuehling 	mutex_lock(&adev->srbm_mutex);
5402ee3b02SMukul Joshi 	soc15_grbm_select(adev, mec, pipe, queue, vmid, GET_INST(GC, inst));
55d5a114a6SFelix Kuehling }
56d5a114a6SFelix Kuehling 
57e2069a7bSMukul Joshi static void kgd_gfx_v9_unlock_srbm(struct amdgpu_device *adev, uint32_t inst)
58d5a114a6SFelix Kuehling {
5902ee3b02SMukul Joshi 	soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, inst));
60d5a114a6SFelix Kuehling 	mutex_unlock(&adev->srbm_mutex);
61d5a114a6SFelix Kuehling }
62d5a114a6SFelix Kuehling 
63f544afacSAmber Lin void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
64e2069a7bSMukul Joshi 				uint32_t queue_id, uint32_t inst)
65d5a114a6SFelix Kuehling {
66d5a114a6SFelix Kuehling 	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
67d5a114a6SFelix Kuehling 	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
68d5a114a6SFelix Kuehling 
69e2069a7bSMukul Joshi 	kgd_gfx_v9_lock_srbm(adev, mec, pipe, queue_id, 0, inst);
70d5a114a6SFelix Kuehling }
71d5a114a6SFelix Kuehling 
72f544afacSAmber Lin uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev,
73d5a114a6SFelix Kuehling 			       uint32_t pipe_id, uint32_t queue_id)
74d5a114a6SFelix Kuehling {
7535cd89d5SAaron Liu 	unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
7635cd89d5SAaron Liu 			queue_id;
77d5a114a6SFelix Kuehling 
7835cd89d5SAaron Liu 	return 1ull << bit;
79d5a114a6SFelix Kuehling }
80d5a114a6SFelix Kuehling 
81e2069a7bSMukul Joshi void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst)
82d5a114a6SFelix Kuehling {
83e2069a7bSMukul Joshi 	kgd_gfx_v9_unlock_srbm(adev, inst);
84d5a114a6SFelix Kuehling }
85d5a114a6SFelix Kuehling 
863356c38dSGraham Sider void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
87d5a114a6SFelix Kuehling 					uint32_t sh_mem_config,
88d5a114a6SFelix Kuehling 					uint32_t sh_mem_ape1_base,
89d5a114a6SFelix Kuehling 					uint32_t sh_mem_ape1_limit,
90e2069a7bSMukul Joshi 					uint32_t sh_mem_bases, uint32_t inst)
91d5a114a6SFelix Kuehling {
92e2069a7bSMukul Joshi 	kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
93d5a114a6SFelix Kuehling 
9419726428SVictor Lu 	WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG, sh_mem_config);
9519726428SVictor Lu 	WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_BASES, sh_mem_bases);
96d5a114a6SFelix Kuehling 	/* APE1 no longer exists on GFX9 */
97d5a114a6SFelix Kuehling 
98e2069a7bSMukul Joshi 	kgd_gfx_v9_unlock_srbm(adev, inst);
99d5a114a6SFelix Kuehling }
100d5a114a6SFelix Kuehling 
1013356c38dSGraham Sider int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
102e2069a7bSMukul Joshi 					unsigned int vmid, uint32_t inst)
103d5a114a6SFelix Kuehling {
104d5a114a6SFelix Kuehling 	/*
105d5a114a6SFelix Kuehling 	 * We have to assume that there is no outstanding mapping.
106d5a114a6SFelix Kuehling 	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
107d5a114a6SFelix Kuehling 	 * a mapping is in progress or because a mapping finished
108d5a114a6SFelix Kuehling 	 * and the SW cleared it.
109d5a114a6SFelix Kuehling 	 * So the protocol is to always wait & clear.
110d5a114a6SFelix Kuehling 	 */
111d5a114a6SFelix Kuehling 	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
112d5a114a6SFelix Kuehling 			ATC_VMID0_PASID_MAPPING__VALID_MASK;
113d5a114a6SFelix Kuehling 
114d5a114a6SFelix Kuehling 	/*
115d5a114a6SFelix Kuehling 	 * need to do this twice, once for gfx and once for mmhub
116d5a114a6SFelix Kuehling 	 * for ATC add 16 to VMID for mmhub, for IH different registers.
117d5a114a6SFelix Kuehling 	 * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
118d5a114a6SFelix Kuehling 	 */
119d5a114a6SFelix Kuehling 
120d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
121d5a114a6SFelix Kuehling 	       pasid_mapping);
122d5a114a6SFelix Kuehling 
123d5a114a6SFelix Kuehling 	while (!(RREG32(SOC15_REG_OFFSET(
124d5a114a6SFelix Kuehling 				ATHUB, 0,
125d5a114a6SFelix Kuehling 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
126d5a114a6SFelix Kuehling 		 (1U << vmid)))
127d5a114a6SFelix Kuehling 		cpu_relax();
128d5a114a6SFelix Kuehling 
129d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
130d5a114a6SFelix Kuehling 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
131d5a114a6SFelix Kuehling 	       1U << vmid);
132d5a114a6SFelix Kuehling 
133d5a114a6SFelix Kuehling 	/* Mapping vmid to pasid also for IH block */
134d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
135d5a114a6SFelix Kuehling 	       pasid_mapping);
136d5a114a6SFelix Kuehling 
137d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid,
138d5a114a6SFelix Kuehling 	       pasid_mapping);
139d5a114a6SFelix Kuehling 
140d5a114a6SFelix Kuehling 	while (!(RREG32(SOC15_REG_OFFSET(
141d5a114a6SFelix Kuehling 				ATHUB, 0,
142d5a114a6SFelix Kuehling 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
143d5a114a6SFelix Kuehling 		 (1U << (vmid + 16))))
144d5a114a6SFelix Kuehling 		cpu_relax();
145d5a114a6SFelix Kuehling 
146d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
147d5a114a6SFelix Kuehling 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
148d5a114a6SFelix Kuehling 	       1U << (vmid + 16));
149d5a114a6SFelix Kuehling 
150d5a114a6SFelix Kuehling 	/* Mapping vmid to pasid also for IH block */
151d5a114a6SFelix Kuehling 	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid,
152d5a114a6SFelix Kuehling 	       pasid_mapping);
153d5a114a6SFelix Kuehling 	return 0;
154d5a114a6SFelix Kuehling }
155d5a114a6SFelix Kuehling 
156d5a114a6SFelix Kuehling /* TODO - RING0 form of field is obsolete, seems to date back to SI
157d5a114a6SFelix Kuehling  * but still works
158d5a114a6SFelix Kuehling  */
159d5a114a6SFelix Kuehling 
160e2069a7bSMukul Joshi int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
161e2069a7bSMukul Joshi 				uint32_t inst)
162d5a114a6SFelix Kuehling {
163d5a114a6SFelix Kuehling 	uint32_t mec;
164d5a114a6SFelix Kuehling 	uint32_t pipe;
165d5a114a6SFelix Kuehling 
166d5a114a6SFelix Kuehling 	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
167d5a114a6SFelix Kuehling 	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
168d5a114a6SFelix Kuehling 
169e2069a7bSMukul Joshi 	kgd_gfx_v9_lock_srbm(adev, mec, pipe, 0, 0, inst);
170d5a114a6SFelix Kuehling 
17102ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmCPC_INT_CNTL,
172d5a114a6SFelix Kuehling 		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
173d5a114a6SFelix Kuehling 		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
174d5a114a6SFelix Kuehling 
175e2069a7bSMukul Joshi 	kgd_gfx_v9_unlock_srbm(adev, inst);
176d5a114a6SFelix Kuehling 
177d5a114a6SFelix Kuehling 	return 0;
178d5a114a6SFelix Kuehling }
179d5a114a6SFelix Kuehling 
180b55a8b8bSYong Zhao static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
181d5a114a6SFelix Kuehling 				unsigned int engine_id,
182d5a114a6SFelix Kuehling 				unsigned int queue_id)
183d5a114a6SFelix Kuehling {
18434174b89SHuang Rui 	uint32_t sdma_engine_reg_base = 0;
18534174b89SHuang Rui 	uint32_t sdma_rlc_reg_offset;
18634174b89SHuang Rui 
18734174b89SHuang Rui 	switch (engine_id) {
18834174b89SHuang Rui 	default:
18934174b89SHuang Rui 		dev_warn(adev->dev,
19034174b89SHuang Rui 			 "Invalid sdma engine id (%d), using engine id 0\n",
19134174b89SHuang Rui 			 engine_id);
19234174b89SHuang Rui 		fallthrough;
19334174b89SHuang Rui 	case 0:
19434174b89SHuang Rui 		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
19534174b89SHuang Rui 				mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
19634174b89SHuang Rui 		break;
19734174b89SHuang Rui 	case 1:
19834174b89SHuang Rui 		sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
19934174b89SHuang Rui 				mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
20034174b89SHuang Rui 		break;
20134174b89SHuang Rui 	}
20234174b89SHuang Rui 
20334174b89SHuang Rui 	sdma_rlc_reg_offset = sdma_engine_reg_base
204b55a8b8bSYong Zhao 		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
205d5a114a6SFelix Kuehling 
206b55a8b8bSYong Zhao 	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
20734174b89SHuang Rui 		 queue_id, sdma_rlc_reg_offset);
208d5a114a6SFelix Kuehling 
20934174b89SHuang Rui 	return sdma_rlc_reg_offset;
210d5a114a6SFelix Kuehling }
211d5a114a6SFelix Kuehling 
212d5a114a6SFelix Kuehling static inline struct v9_mqd *get_mqd(void *mqd)
213d5a114a6SFelix Kuehling {
214d5a114a6SFelix Kuehling 	return (struct v9_mqd *)mqd;
215d5a114a6SFelix Kuehling }
216d5a114a6SFelix Kuehling 
217d5a114a6SFelix Kuehling static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
218d5a114a6SFelix Kuehling {
219d5a114a6SFelix Kuehling 	return (struct v9_sdma_mqd *)mqd;
220d5a114a6SFelix Kuehling }
221d5a114a6SFelix Kuehling 
222420185fdSGraham Sider int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
223420185fdSGraham Sider 			uint32_t pipe_id, uint32_t queue_id,
224420185fdSGraham Sider 			uint32_t __user *wptr, uint32_t wptr_shift,
225e2069a7bSMukul Joshi 			uint32_t wptr_mask, struct mm_struct *mm,
226e2069a7bSMukul Joshi 			uint32_t inst)
227d5a114a6SFelix Kuehling {
228d5a114a6SFelix Kuehling 	struct v9_mqd *m;
229d5a114a6SFelix Kuehling 	uint32_t *mqd_hqd;
230d5a114a6SFelix Kuehling 	uint32_t reg, hqd_base, data;
231d5a114a6SFelix Kuehling 
232d5a114a6SFelix Kuehling 	m = get_mqd(mqd);
233d5a114a6SFelix Kuehling 
234e2069a7bSMukul Joshi 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
235d5a114a6SFelix Kuehling 
236d5a114a6SFelix Kuehling 	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
237d5a114a6SFelix Kuehling 	mqd_hqd = &m->cp_mqd_base_addr_lo;
23802ee3b02SMukul Joshi 	hqd_base = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR);
239d5a114a6SFelix Kuehling 
240d5a114a6SFelix Kuehling 	for (reg = hqd_base;
24102ee3b02SMukul Joshi 	     reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
24285150626SVictor Lu 		WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst);
243d5a114a6SFelix Kuehling 
244d5a114a6SFelix Kuehling 
245d5a114a6SFelix Kuehling 	/* Activate doorbell logic before triggering WPTR poll. */
246d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
247d5a114a6SFelix Kuehling 			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
24819726428SVictor Lu 	WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL, data);
249d5a114a6SFelix Kuehling 
250d5a114a6SFelix Kuehling 	if (wptr) {
251d5a114a6SFelix Kuehling 		/* Don't read wptr with get_user because the user
252d5a114a6SFelix Kuehling 		 * context may not be accessible (if this function
253d5a114a6SFelix Kuehling 		 * runs in a work queue). Instead trigger a one-shot
254d5a114a6SFelix Kuehling 		 * polling read from memory in the CP. This assumes
255d5a114a6SFelix Kuehling 		 * that wptr is GPU-accessible in the queue's VMID via
256d5a114a6SFelix Kuehling 		 * ATC or SVM. WPTR==RPTR before starting the poll so
257d5a114a6SFelix Kuehling 		 * the CP starts fetching new commands from the right
258d5a114a6SFelix Kuehling 		 * place.
259d5a114a6SFelix Kuehling 		 *
260d5a114a6SFelix Kuehling 		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
261d5a114a6SFelix Kuehling 		 * tricky. Assume that the queue didn't overflow. The
262d5a114a6SFelix Kuehling 		 * number of valid bits in the 32-bit RPTR depends on
263d5a114a6SFelix Kuehling 		 * the queue size. The remaining bits are taken from
264d5a114a6SFelix Kuehling 		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
265d5a114a6SFelix Kuehling 		 * queue size.
266d5a114a6SFelix Kuehling 		 */
267d5a114a6SFelix Kuehling 		uint32_t queue_size =
268d5a114a6SFelix Kuehling 			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
269d5a114a6SFelix Kuehling 					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
270d5a114a6SFelix Kuehling 		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
271d5a114a6SFelix Kuehling 
272d5a114a6SFelix Kuehling 		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
273d5a114a6SFelix Kuehling 			guessed_wptr += queue_size;
274d5a114a6SFelix Kuehling 		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
275d5a114a6SFelix Kuehling 		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
276d5a114a6SFelix Kuehling 
27719726428SVictor Lu 		WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO,
278d5a114a6SFelix Kuehling 			lower_32_bits(guessed_wptr));
27919726428SVictor Lu 		WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI,
280d5a114a6SFelix Kuehling 			upper_32_bits(guessed_wptr));
28119726428SVictor Lu 		WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR,
282ebe1d22bSArnd Bergmann 			lower_32_bits((uintptr_t)wptr));
28319726428SVictor Lu 		WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
284ebe1d22bSArnd Bergmann 			upper_32_bits((uintptr_t)wptr));
28519726428SVictor Lu 		WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1,
286f544afacSAmber Lin 			(uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
287d5a114a6SFelix Kuehling 	}
288d5a114a6SFelix Kuehling 
289d5a114a6SFelix Kuehling 	/* Start the EOP fetcher */
29019726428SVictor Lu 	WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR,
29119726428SVictor Lu 	       REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
292d5a114a6SFelix Kuehling 
293d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
29419726428SVictor Lu 	WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE, data);
295d5a114a6SFelix Kuehling 
296e2069a7bSMukul Joshi 	kgd_gfx_v9_release_queue(adev, inst);
297d5a114a6SFelix Kuehling 
298d5a114a6SFelix Kuehling 	return 0;
299d5a114a6SFelix Kuehling }
300d5a114a6SFelix Kuehling 
301420185fdSGraham Sider int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd,
30235cd89d5SAaron Liu 			    uint32_t pipe_id, uint32_t queue_id,
303e2069a7bSMukul Joshi 			    uint32_t doorbell_off, uint32_t inst)
30435cd89d5SAaron Liu {
305e2069a7bSMukul Joshi 	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[inst].ring;
30635cd89d5SAaron Liu 	struct v9_mqd *m;
30735cd89d5SAaron Liu 	uint32_t mec, pipe;
30835cd89d5SAaron Liu 	int r;
30935cd89d5SAaron Liu 
31035cd89d5SAaron Liu 	m = get_mqd(mqd);
31135cd89d5SAaron Liu 
312e2069a7bSMukul Joshi 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
31335cd89d5SAaron Liu 
31435cd89d5SAaron Liu 	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
31535cd89d5SAaron Liu 	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
31635cd89d5SAaron Liu 
31735cd89d5SAaron Liu 	pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
31835cd89d5SAaron Liu 		 mec, pipe, queue_id);
31935cd89d5SAaron Liu 
320e2069a7bSMukul Joshi 	spin_lock(&adev->gfx.kiq[inst].ring_lock);
32135cd89d5SAaron Liu 	r = amdgpu_ring_alloc(kiq_ring, 7);
32235cd89d5SAaron Liu 	if (r) {
32335cd89d5SAaron Liu 		pr_err("Failed to alloc KIQ (%d).\n", r);
32435cd89d5SAaron Liu 		goto out_unlock;
32535cd89d5SAaron Liu 	}
32635cd89d5SAaron Liu 
32735cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
32835cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring,
32935cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
33035cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
33135cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_QUEUE(queue_id) |
33235cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_PIPE(pipe) |
33335cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_ME((mec - 1)) |
33435cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
33535cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
33635cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
33735cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
33835cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring,
33935cd89d5SAaron Liu 			  PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
34035cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
34135cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
34235cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
34335cd89d5SAaron Liu 	amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
34435cd89d5SAaron Liu 	amdgpu_ring_commit(kiq_ring);
34535cd89d5SAaron Liu 
34635cd89d5SAaron Liu out_unlock:
347e2069a7bSMukul Joshi 	spin_unlock(&adev->gfx.kiq[inst].ring_lock);
348e2069a7bSMukul Joshi 	kgd_gfx_v9_release_queue(adev, inst);
34935cd89d5SAaron Liu 
35035cd89d5SAaron Liu 	return r;
35135cd89d5SAaron Liu }
35235cd89d5SAaron Liu 
353420185fdSGraham Sider int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev,
354d5a114a6SFelix Kuehling 			uint32_t pipe_id, uint32_t queue_id,
355e2069a7bSMukul Joshi 			uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
356d5a114a6SFelix Kuehling {
357d5a114a6SFelix Kuehling 	uint32_t i = 0, reg;
358d5a114a6SFelix Kuehling #define HQD_N_REGS 56
359d5a114a6SFelix Kuehling #define DUMP_REG(addr) do {				\
360d5a114a6SFelix Kuehling 		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
361d5a114a6SFelix Kuehling 			break;				\
362d5a114a6SFelix Kuehling 		(*dump)[i][0] = (addr) << 2;		\
363d5a114a6SFelix Kuehling 		(*dump)[i++][1] = RREG32(addr);		\
364d5a114a6SFelix Kuehling 	} while (0)
365d5a114a6SFelix Kuehling 
366b5a52d2aSSam James 	*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
367d5a114a6SFelix Kuehling 	if (*dump == NULL)
368d5a114a6SFelix Kuehling 		return -ENOMEM;
369d5a114a6SFelix Kuehling 
370e2069a7bSMukul Joshi 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
371d5a114a6SFelix Kuehling 
37202ee3b02SMukul Joshi 	for (reg = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_MQD_BASE_ADDR);
37302ee3b02SMukul Joshi 	     reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
374d5a114a6SFelix Kuehling 		DUMP_REG(reg);
375d5a114a6SFelix Kuehling 
376e2069a7bSMukul Joshi 	kgd_gfx_v9_release_queue(adev, inst);
377d5a114a6SFelix Kuehling 
378d5a114a6SFelix Kuehling 	WARN_ON_ONCE(i != HQD_N_REGS);
379d5a114a6SFelix Kuehling 	*n_regs = i;
380d5a114a6SFelix Kuehling 
381d5a114a6SFelix Kuehling 	return 0;
382d5a114a6SFelix Kuehling }
383d5a114a6SFelix Kuehling 
384420185fdSGraham Sider static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
385d5a114a6SFelix Kuehling 			     uint32_t __user *wptr, struct mm_struct *mm)
386d5a114a6SFelix Kuehling {
387d5a114a6SFelix Kuehling 	struct v9_sdma_mqd *m;
388b55a8b8bSYong Zhao 	uint32_t sdma_rlc_reg_offset;
389d5a114a6SFelix Kuehling 	unsigned long end_jiffies;
390d5a114a6SFelix Kuehling 	uint32_t data;
391d5a114a6SFelix Kuehling 	uint64_t data64;
392d5a114a6SFelix Kuehling 	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
393d5a114a6SFelix Kuehling 
394d5a114a6SFelix Kuehling 	m = get_sdma_mqd(mqd);
395b55a8b8bSYong Zhao 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
396d5a114a6SFelix Kuehling 					    m->sdma_queue_id);
397d5a114a6SFelix Kuehling 
398b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
399d5a114a6SFelix Kuehling 		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
400d5a114a6SFelix Kuehling 
401d5a114a6SFelix Kuehling 	end_jiffies = msecs_to_jiffies(2000) + jiffies;
402d5a114a6SFelix Kuehling 	while (true) {
403b55a8b8bSYong Zhao 		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
404d5a114a6SFelix Kuehling 		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
405d5a114a6SFelix Kuehling 			break;
406812330ebSYong Zhao 		if (time_after(jiffies, end_jiffies)) {
407812330ebSYong Zhao 			pr_err("SDMA RLC not idle in %s\n", __func__);
408d5a114a6SFelix Kuehling 			return -ETIME;
409812330ebSYong Zhao 		}
410d5a114a6SFelix Kuehling 		usleep_range(500, 1000);
411d5a114a6SFelix Kuehling 	}
412d5a114a6SFelix Kuehling 
413b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
414d5a114a6SFelix Kuehling 	       m->sdmax_rlcx_doorbell_offset);
415d5a114a6SFelix Kuehling 
416d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
417d5a114a6SFelix Kuehling 			     ENABLE, 1);
418b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
419b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
420b55a8b8bSYong Zhao 				m->sdmax_rlcx_rb_rptr);
421b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
422d5a114a6SFelix Kuehling 				m->sdmax_rlcx_rb_rptr_hi);
423d5a114a6SFelix Kuehling 
424b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
425d5a114a6SFelix Kuehling 	if (read_user_wptr(mm, wptr64, data64)) {
426b55a8b8bSYong Zhao 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
427d5a114a6SFelix Kuehling 		       lower_32_bits(data64));
428b55a8b8bSYong Zhao 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
429d5a114a6SFelix Kuehling 		       upper_32_bits(data64));
430d5a114a6SFelix Kuehling 	} else {
431b55a8b8bSYong Zhao 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
432d5a114a6SFelix Kuehling 		       m->sdmax_rlcx_rb_rptr);
433b55a8b8bSYong Zhao 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
434d5a114a6SFelix Kuehling 		       m->sdmax_rlcx_rb_rptr_hi);
435d5a114a6SFelix Kuehling 	}
436b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
437d5a114a6SFelix Kuehling 
438b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
439b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
440d5a114a6SFelix Kuehling 			m->sdmax_rlcx_rb_base_hi);
441b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
442d5a114a6SFelix Kuehling 			m->sdmax_rlcx_rb_rptr_addr_lo);
443b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
444d5a114a6SFelix Kuehling 			m->sdmax_rlcx_rb_rptr_addr_hi);
445d5a114a6SFelix Kuehling 
446d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
447d5a114a6SFelix Kuehling 			     RB_ENABLE, 1);
448b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
449d5a114a6SFelix Kuehling 
450d5a114a6SFelix Kuehling 	return 0;
451d5a114a6SFelix Kuehling }
452d5a114a6SFelix Kuehling 
453420185fdSGraham Sider static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
454d5a114a6SFelix Kuehling 			     uint32_t engine_id, uint32_t queue_id,
455d5a114a6SFelix Kuehling 			     uint32_t (**dump)[2], uint32_t *n_regs)
456d5a114a6SFelix Kuehling {
457b55a8b8bSYong Zhao 	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
458b55a8b8bSYong Zhao 			engine_id, queue_id);
459d5a114a6SFelix Kuehling 	uint32_t i = 0, reg;
460d5a114a6SFelix Kuehling #undef HQD_N_REGS
461d5a114a6SFelix Kuehling #define HQD_N_REGS (19+6+7+10)
462d5a114a6SFelix Kuehling 
463b5a52d2aSSam James 	*dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
464d5a114a6SFelix Kuehling 	if (*dump == NULL)
465d5a114a6SFelix Kuehling 		return -ENOMEM;
466d5a114a6SFelix Kuehling 
467d5a114a6SFelix Kuehling 	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
468b55a8b8bSYong Zhao 		DUMP_REG(sdma_rlc_reg_offset + reg);
469d5a114a6SFelix Kuehling 	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
470b55a8b8bSYong Zhao 		DUMP_REG(sdma_rlc_reg_offset + reg);
471d5a114a6SFelix Kuehling 	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
472d5a114a6SFelix Kuehling 	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
473b55a8b8bSYong Zhao 		DUMP_REG(sdma_rlc_reg_offset + reg);
474d5a114a6SFelix Kuehling 	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
475d5a114a6SFelix Kuehling 	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
476b55a8b8bSYong Zhao 		DUMP_REG(sdma_rlc_reg_offset + reg);
477d5a114a6SFelix Kuehling 
478d5a114a6SFelix Kuehling 	WARN_ON_ONCE(i != HQD_N_REGS);
479d5a114a6SFelix Kuehling 	*n_regs = i;
480d5a114a6SFelix Kuehling 
481d5a114a6SFelix Kuehling 	return 0;
482d5a114a6SFelix Kuehling }
483d5a114a6SFelix Kuehling 
484420185fdSGraham Sider bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev,
485420185fdSGraham Sider 				uint64_t queue_address, uint32_t pipe_id,
486e2069a7bSMukul Joshi 				uint32_t queue_id, uint32_t inst)
487d5a114a6SFelix Kuehling {
488d5a114a6SFelix Kuehling 	uint32_t act;
489d5a114a6SFelix Kuehling 	bool retval = false;
490d5a114a6SFelix Kuehling 	uint32_t low, high;
491d5a114a6SFelix Kuehling 
492e2069a7bSMukul Joshi 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
49302ee3b02SMukul Joshi 	act = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
494d5a114a6SFelix Kuehling 	if (act) {
495d5a114a6SFelix Kuehling 		low = lower_32_bits(queue_address >> 8);
496d5a114a6SFelix Kuehling 		high = upper_32_bits(queue_address >> 8);
497d5a114a6SFelix Kuehling 
49802ee3b02SMukul Joshi 		if (low == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE) &&
49902ee3b02SMukul Joshi 		   high == RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI))
500d5a114a6SFelix Kuehling 			retval = true;
501d5a114a6SFelix Kuehling 	}
502e2069a7bSMukul Joshi 	kgd_gfx_v9_release_queue(adev, inst);
503d5a114a6SFelix Kuehling 	return retval;
504d5a114a6SFelix Kuehling }
505d5a114a6SFelix Kuehling 
506420185fdSGraham Sider static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
507d5a114a6SFelix Kuehling {
508d5a114a6SFelix Kuehling 	struct v9_sdma_mqd *m;
509b55a8b8bSYong Zhao 	uint32_t sdma_rlc_reg_offset;
510d5a114a6SFelix Kuehling 	uint32_t sdma_rlc_rb_cntl;
511d5a114a6SFelix Kuehling 
512d5a114a6SFelix Kuehling 	m = get_sdma_mqd(mqd);
513b55a8b8bSYong Zhao 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
514d5a114a6SFelix Kuehling 					    m->sdma_queue_id);
515d5a114a6SFelix Kuehling 
516b55a8b8bSYong Zhao 	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
517d5a114a6SFelix Kuehling 
518d5a114a6SFelix Kuehling 	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
519d5a114a6SFelix Kuehling 		return true;
520d5a114a6SFelix Kuehling 
521d5a114a6SFelix Kuehling 	return false;
522d5a114a6SFelix Kuehling }
523d5a114a6SFelix Kuehling 
524420185fdSGraham Sider int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
525d5a114a6SFelix Kuehling 				enum kfd_preempt_type reset_type,
526d5a114a6SFelix Kuehling 				unsigned int utimeout, uint32_t pipe_id,
527e2069a7bSMukul Joshi 				uint32_t queue_id, uint32_t inst)
528d5a114a6SFelix Kuehling {
529d5a114a6SFelix Kuehling 	enum hqd_dequeue_request_type type;
530d5a114a6SFelix Kuehling 	unsigned long end_jiffies;
531d5a114a6SFelix Kuehling 	uint32_t temp;
532d5a114a6SFelix Kuehling 	struct v9_mqd *m = get_mqd(mqd);
533d5a114a6SFelix Kuehling 
53453b3f8f4SDennis Li 	if (amdgpu_in_reset(adev))
5351b0bfcffSShaoyun Liu 		return -EIO;
5361b0bfcffSShaoyun Liu 
537e2069a7bSMukul Joshi 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
538d5a114a6SFelix Kuehling 
539d5a114a6SFelix Kuehling 	if (m->cp_hqd_vmid == 0)
54002ee3b02SMukul Joshi 		WREG32_FIELD15_RLC(GC, GET_INST(GC, inst), RLC_CP_SCHEDULERS, scheduler1, 0);
541d5a114a6SFelix Kuehling 
542d5a114a6SFelix Kuehling 	switch (reset_type) {
543d5a114a6SFelix Kuehling 	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
544d5a114a6SFelix Kuehling 		type = DRAIN_PIPE;
545d5a114a6SFelix Kuehling 		break;
546d5a114a6SFelix Kuehling 	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
547d5a114a6SFelix Kuehling 		type = RESET_WAVES;
548d5a114a6SFelix Kuehling 		break;
549b53ef0dfSMukul Joshi 	case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
550b53ef0dfSMukul Joshi 		type = SAVE_WAVES;
551b53ef0dfSMukul Joshi 		break;
552d5a114a6SFelix Kuehling 	default:
553d5a114a6SFelix Kuehling 		type = DRAIN_PIPE;
554d5a114a6SFelix Kuehling 		break;
555d5a114a6SFelix Kuehling 	}
556d5a114a6SFelix Kuehling 
55719726428SVictor Lu 	WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST, type);
558d5a114a6SFelix Kuehling 
559d5a114a6SFelix Kuehling 	end_jiffies = (utimeout * HZ / 1000) + jiffies;
560d5a114a6SFelix Kuehling 	while (true) {
56102ee3b02SMukul Joshi 		temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
562d5a114a6SFelix Kuehling 		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
563d5a114a6SFelix Kuehling 			break;
564d5a114a6SFelix Kuehling 		if (time_after(jiffies, end_jiffies)) {
565d5a114a6SFelix Kuehling 			pr_err("cp queue preemption time out.\n");
566e2069a7bSMukul Joshi 			kgd_gfx_v9_release_queue(adev, inst);
567d5a114a6SFelix Kuehling 			return -ETIME;
568d5a114a6SFelix Kuehling 		}
569d5a114a6SFelix Kuehling 		usleep_range(500, 1000);
570d5a114a6SFelix Kuehling 	}
571d5a114a6SFelix Kuehling 
572e2069a7bSMukul Joshi 	kgd_gfx_v9_release_queue(adev, inst);
573d5a114a6SFelix Kuehling 	return 0;
574d5a114a6SFelix Kuehling }
575d5a114a6SFelix Kuehling 
576420185fdSGraham Sider static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
577d5a114a6SFelix Kuehling 				unsigned int utimeout)
578d5a114a6SFelix Kuehling {
579d5a114a6SFelix Kuehling 	struct v9_sdma_mqd *m;
580b55a8b8bSYong Zhao 	uint32_t sdma_rlc_reg_offset;
581d5a114a6SFelix Kuehling 	uint32_t temp;
582d5a114a6SFelix Kuehling 	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
583d5a114a6SFelix Kuehling 
584d5a114a6SFelix Kuehling 	m = get_sdma_mqd(mqd);
585b55a8b8bSYong Zhao 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
586d5a114a6SFelix Kuehling 					    m->sdma_queue_id);
587d5a114a6SFelix Kuehling 
588b55a8b8bSYong Zhao 	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
589d5a114a6SFelix Kuehling 	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
590b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
591d5a114a6SFelix Kuehling 
592d5a114a6SFelix Kuehling 	while (true) {
593b55a8b8bSYong Zhao 		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
594d5a114a6SFelix Kuehling 		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
595d5a114a6SFelix Kuehling 			break;
596812330ebSYong Zhao 		if (time_after(jiffies, end_jiffies)) {
597812330ebSYong Zhao 			pr_err("SDMA RLC not idle in %s\n", __func__);
598d5a114a6SFelix Kuehling 			return -ETIME;
599812330ebSYong Zhao 		}
600d5a114a6SFelix Kuehling 		usleep_range(500, 1000);
601d5a114a6SFelix Kuehling 	}
602d5a114a6SFelix Kuehling 
603b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
604b55a8b8bSYong Zhao 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
605b55a8b8bSYong Zhao 		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
606d5a114a6SFelix Kuehling 		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
607d5a114a6SFelix Kuehling 
608b55a8b8bSYong Zhao 	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
609d5a114a6SFelix Kuehling 	m->sdmax_rlcx_rb_rptr_hi =
610b55a8b8bSYong Zhao 		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
611d5a114a6SFelix Kuehling 
612d5a114a6SFelix Kuehling 	return 0;
613d5a114a6SFelix Kuehling }
614d5a114a6SFelix Kuehling 
6153356c38dSGraham Sider bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
61656fc40abSYong Zhao 					uint8_t vmid, uint16_t *p_pasid)
617d5a114a6SFelix Kuehling {
61856fc40abSYong Zhao 	uint32_t value;
619d5a114a6SFelix Kuehling 
62056fc40abSYong Zhao 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
621d5a114a6SFelix Kuehling 		     + vmid);
62256fc40abSYong Zhao 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
623d5a114a6SFelix Kuehling 
62456fc40abSYong Zhao 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
625d5a114a6SFelix Kuehling }
626d5a114a6SFelix Kuehling 
6273356c38dSGraham Sider int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev,
628d5a114a6SFelix Kuehling 					uint32_t gfx_index_val,
629e2069a7bSMukul Joshi 					uint32_t sq_cmd, uint32_t inst)
630d5a114a6SFelix Kuehling {
631d5a114a6SFelix Kuehling 	uint32_t data = 0;
632d5a114a6SFelix Kuehling 
633d5a114a6SFelix Kuehling 	mutex_lock(&adev->grbm_idx_mutex);
634d5a114a6SFelix Kuehling 
63502ee3b02SMukul Joshi 	WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, gfx_index_val);
63602ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_CMD, sq_cmd);
637d5a114a6SFelix Kuehling 
638d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
639d5a114a6SFelix Kuehling 		INSTANCE_BROADCAST_WRITES, 1);
640d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
641d5a114a6SFelix Kuehling 		SH_BROADCAST_WRITES, 1);
642d5a114a6SFelix Kuehling 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
643d5a114a6SFelix Kuehling 		SE_BROADCAST_WRITES, 1);
644d5a114a6SFelix Kuehling 
64502ee3b02SMukul Joshi 	WREG32_SOC15_RLC_SHADOW(GC, GET_INST(GC, inst), mmGRBM_GFX_INDEX, data);
646d5a114a6SFelix Kuehling 	mutex_unlock(&adev->grbm_idx_mutex);
647d5a114a6SFelix Kuehling 
648d5a114a6SFelix Kuehling 	return 0;
649d5a114a6SFelix Kuehling }
650d5a114a6SFelix Kuehling 
651cde2e087SJonathan Kim /*
652cde2e087SJonathan Kim  * GFX9 helper for wave launch stall requirements on debug trap setting.
653cde2e087SJonathan Kim  *
654cde2e087SJonathan Kim  * vmid:
655cde2e087SJonathan Kim  *   Target VMID to stall/unstall.
656cde2e087SJonathan Kim  *
657cde2e087SJonathan Kim  * stall:
658cde2e087SJonathan Kim  *   0-unstall wave launch (enable), 1-stall wave launch (disable).
659cde2e087SJonathan Kim  *   After wavefront launch has been stalled, allocated waves must drain from
660cde2e087SJonathan Kim  *   SPI in order for debug trap settings to take effect on those waves.
661cde2e087SJonathan Kim  *   This is roughly a ~96 clock cycle wait on SPI where a read on
662cde2e087SJonathan Kim  *   SPI_GDBG_WAVE_CNTL translates to ~32 clock cycles.
663cde2e087SJonathan Kim  *   KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY indicates the number of reads required.
664cde2e087SJonathan Kim  *
665cde2e087SJonathan Kim  *   NOTE: We can afford to clear the entire STALL_VMID field on unstall
666cde2e087SJonathan Kim  *   because GFX9.4.1 cannot support multi-process debugging due to trap
667cde2e087SJonathan Kim  *   configuration and masking being limited to global scope.  Always assume
668cde2e087SJonathan Kim  *   single process conditions.
669cde2e087SJonathan Kim  */
670cde2e087SJonathan Kim #define KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY	3
671cde2e087SJonathan Kim void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev,
672cde2e087SJonathan Kim 					uint32_t vmid,
673cde2e087SJonathan Kim 					bool stall)
674cde2e087SJonathan Kim {
675cde2e087SJonathan Kim 	int i;
676cde2e087SJonathan Kim 	uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
677cde2e087SJonathan Kim 
6784e8303cfSLijo Lazar 	if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1))
679cde2e087SJonathan Kim 		data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_VMID,
680cde2e087SJonathan Kim 							stall ? 1 << vmid : 0);
681cde2e087SJonathan Kim 	else
682cde2e087SJonathan Kim 		data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA,
683cde2e087SJonathan Kim 							stall ? 1 : 0);
684cde2e087SJonathan Kim 
685cde2e087SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
686cde2e087SJonathan Kim 
687cde2e087SJonathan Kim 	if (!stall)
688cde2e087SJonathan Kim 		return;
689cde2e087SJonathan Kim 
690cde2e087SJonathan Kim 	for (i = 0; i < KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY; i++)
691cde2e087SJonathan Kim 		RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
692cde2e087SJonathan Kim }
693cde2e087SJonathan Kim 
694cde2e087SJonathan Kim /*
695cde2e087SJonathan Kim  * restore_dbg_registers is ignored here but is a general interface requirement
696cde2e087SJonathan Kim  * for devices that support GFXOFF and where the RLC save/restore list
697cde2e087SJonathan Kim  * does not support hw registers for debugging i.e. the driver has to manually
698cde2e087SJonathan Kim  * initialize the debug mode registers after it has disabled GFX off during the
699cde2e087SJonathan Kim  * debug session.
700cde2e087SJonathan Kim  */
701cde2e087SJonathan Kim uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev,
702cde2e087SJonathan Kim 				bool restore_dbg_registers,
703cde2e087SJonathan Kim 				uint32_t vmid)
704cde2e087SJonathan Kim {
705cde2e087SJonathan Kim 	mutex_lock(&adev->grbm_idx_mutex);
706cde2e087SJonathan Kim 
707cde2e087SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
708cde2e087SJonathan Kim 
709cde2e087SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
710cde2e087SJonathan Kim 
711cde2e087SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
712cde2e087SJonathan Kim 
713cde2e087SJonathan Kim 	mutex_unlock(&adev->grbm_idx_mutex);
714cde2e087SJonathan Kim 
715cde2e087SJonathan Kim 	return 0;
716cde2e087SJonathan Kim }
717cde2e087SJonathan Kim 
718cde2e087SJonathan Kim /*
719cde2e087SJonathan Kim  * keep_trap_enabled is ignored here but is a general interface requirement
720cde2e087SJonathan Kim  * for devices that support multi-process debugging where the performance
721cde2e087SJonathan Kim  * overhead from trap temporary setup needs to be bypassed when the debug
722cde2e087SJonathan Kim  * session has ended.
723cde2e087SJonathan Kim  */
724cde2e087SJonathan Kim uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev,
725cde2e087SJonathan Kim 					bool keep_trap_enabled,
726cde2e087SJonathan Kim 					uint32_t vmid)
727cde2e087SJonathan Kim {
728cde2e087SJonathan Kim 	mutex_lock(&adev->grbm_idx_mutex);
729cde2e087SJonathan Kim 
730cde2e087SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
731cde2e087SJonathan Kim 
732cde2e087SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
733cde2e087SJonathan Kim 
734cde2e087SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
735cde2e087SJonathan Kim 
736cde2e087SJonathan Kim 	mutex_unlock(&adev->grbm_idx_mutex);
737cde2e087SJonathan Kim 
738cde2e087SJonathan Kim 	return 0;
739cde2e087SJonathan Kim }
740cde2e087SJonathan Kim 
741101827e1SJonathan Kim int kgd_gfx_v9_validate_trap_override_request(struct amdgpu_device *adev,
742101827e1SJonathan Kim 					uint32_t trap_override,
743101827e1SJonathan Kim 					uint32_t *trap_mask_supported)
744101827e1SJonathan Kim {
745101827e1SJonathan Kim 	*trap_mask_supported &= KFD_DBG_TRAP_MASK_DBG_ADDRESS_WATCH;
746101827e1SJonathan Kim 
747101827e1SJonathan Kim 	/* The SPI_GDBG_TRAP_MASK register is global and affects all
748101827e1SJonathan Kim 	 * processes. Only allow OR-ing the address-watch bit, since
749101827e1SJonathan Kim 	 * this only affects processes under the debugger. Other bits
750101827e1SJonathan Kim 	 * should stay 0 to avoid the debugger interfering with other
751101827e1SJonathan Kim 	 * processes.
752101827e1SJonathan Kim 	 */
753101827e1SJonathan Kim 	if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR)
754101827e1SJonathan Kim 		return -EINVAL;
755101827e1SJonathan Kim 
756101827e1SJonathan Kim 	return 0;
757101827e1SJonathan Kim }
758101827e1SJonathan Kim 
759101827e1SJonathan Kim uint32_t kgd_gfx_v9_set_wave_launch_trap_override(struct amdgpu_device *adev,
760101827e1SJonathan Kim 					     uint32_t vmid,
761101827e1SJonathan Kim 					     uint32_t trap_override,
762101827e1SJonathan Kim 					     uint32_t trap_mask_bits,
763101827e1SJonathan Kim 					     uint32_t trap_mask_request,
764101827e1SJonathan Kim 					     uint32_t *trap_mask_prev,
765101827e1SJonathan Kim 					     uint32_t kfd_dbg_cntl_prev)
766101827e1SJonathan Kim {
767101827e1SJonathan Kim 	uint32_t data, wave_cntl_prev;
768101827e1SJonathan Kim 
769101827e1SJonathan Kim 	mutex_lock(&adev->grbm_idx_mutex);
770101827e1SJonathan Kim 
771101827e1SJonathan Kim 	wave_cntl_prev = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
772101827e1SJonathan Kim 
773101827e1SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
774101827e1SJonathan Kim 
775101827e1SJonathan Kim 	data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK));
776101827e1SJonathan Kim 	*trap_mask_prev = REG_GET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN);
777101827e1SJonathan Kim 
778101827e1SJonathan Kim 	trap_mask_bits = (trap_mask_bits & trap_mask_request) |
779101827e1SJonathan Kim 		(*trap_mask_prev & ~trap_mask_request);
780101827e1SJonathan Kim 
781101827e1SJonathan Kim 	data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, EXCP_EN, trap_mask_bits);
782101827e1SJonathan Kim 	data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK, REPLACE, trap_override);
783101827e1SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
784101827e1SJonathan Kim 
785101827e1SJonathan Kim 	/* We need to preserve wave launch mode stall settings. */
786101827e1SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), wave_cntl_prev);
787101827e1SJonathan Kim 
788101827e1SJonathan Kim 	mutex_unlock(&adev->grbm_idx_mutex);
789101827e1SJonathan Kim 
790101827e1SJonathan Kim 	return 0;
791101827e1SJonathan Kim }
792101827e1SJonathan Kim 
793aea1b473SJonathan Kim uint32_t kgd_gfx_v9_set_wave_launch_mode(struct amdgpu_device *adev,
794aea1b473SJonathan Kim 					uint8_t wave_launch_mode,
795aea1b473SJonathan Kim 					uint32_t vmid)
796aea1b473SJonathan Kim {
797aea1b473SJonathan Kim 	uint32_t data = 0;
798aea1b473SJonathan Kim 	bool is_mode_set = !!wave_launch_mode;
799aea1b473SJonathan Kim 
800aea1b473SJonathan Kim 	mutex_lock(&adev->grbm_idx_mutex);
801aea1b473SJonathan Kim 
802aea1b473SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, true);
803aea1b473SJonathan Kim 
804aea1b473SJonathan Kim 	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
805aea1b473SJonathan Kim 		VMID_MASK, is_mode_set ? 1 << vmid : 0);
806aea1b473SJonathan Kim 	data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
807aea1b473SJonathan Kim 		MODE, is_mode_set ? wave_launch_mode : 0);
808aea1b473SJonathan Kim 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
809aea1b473SJonathan Kim 
810aea1b473SJonathan Kim 	kgd_gfx_v9_set_wave_launch_stall(adev, vmid, false);
811aea1b473SJonathan Kim 
812aea1b473SJonathan Kim 	mutex_unlock(&adev->grbm_idx_mutex);
813aea1b473SJonathan Kim 
814aea1b473SJonathan Kim 	return 0;
815aea1b473SJonathan Kim }
816aea1b473SJonathan Kim 
817e0f85f46SJonathan Kim #define TCP_WATCH_STRIDE (mmTCP_WATCH1_ADDR_H - mmTCP_WATCH0_ADDR_H)
818e0f85f46SJonathan Kim uint32_t kgd_gfx_v9_set_address_watch(struct amdgpu_device *adev,
819e0f85f46SJonathan Kim 					uint64_t watch_address,
820e0f85f46SJonathan Kim 					uint32_t watch_address_mask,
821e0f85f46SJonathan Kim 					uint32_t watch_id,
822e0f85f46SJonathan Kim 					uint32_t watch_mode,
823036e348fSEric Huang 					uint32_t debug_vmid,
824036e348fSEric Huang 					uint32_t inst)
825e0f85f46SJonathan Kim {
826e0f85f46SJonathan Kim 	uint32_t watch_address_high;
827e0f85f46SJonathan Kim 	uint32_t watch_address_low;
828e0f85f46SJonathan Kim 	uint32_t watch_address_cntl;
829e0f85f46SJonathan Kim 
830e0f85f46SJonathan Kim 	watch_address_cntl = 0;
831e0f85f46SJonathan Kim 
832e0f85f46SJonathan Kim 	watch_address_low = lower_32_bits(watch_address);
833e0f85f46SJonathan Kim 	watch_address_high = upper_32_bits(watch_address) & 0xffff;
834e0f85f46SJonathan Kim 
835e0f85f46SJonathan Kim 	watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
836e0f85f46SJonathan Kim 			TCP_WATCH0_CNTL,
837e0f85f46SJonathan Kim 			VMID,
838e0f85f46SJonathan Kim 			debug_vmid);
839e0f85f46SJonathan Kim 	watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
840e0f85f46SJonathan Kim 			TCP_WATCH0_CNTL,
841e0f85f46SJonathan Kim 			MODE,
842e0f85f46SJonathan Kim 			watch_mode);
843e0f85f46SJonathan Kim 	watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
844e0f85f46SJonathan Kim 			TCP_WATCH0_CNTL,
845e0f85f46SJonathan Kim 			MASK,
846e0f85f46SJonathan Kim 			watch_address_mask >> 6);
847e0f85f46SJonathan Kim 
848e0f85f46SJonathan Kim 	/* Turning off this watch point until we set all the registers */
849e0f85f46SJonathan Kim 	watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
850e0f85f46SJonathan Kim 			TCP_WATCH0_CNTL,
851e0f85f46SJonathan Kim 			VALID,
852e0f85f46SJonathan Kim 			0);
853e0f85f46SJonathan Kim 
854e0f85f46SJonathan Kim 	WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
855e0f85f46SJonathan Kim 			(watch_id * TCP_WATCH_STRIDE)),
856e0f85f46SJonathan Kim 			watch_address_cntl);
857e0f85f46SJonathan Kim 
858e0f85f46SJonathan Kim 	WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_H) +
859e0f85f46SJonathan Kim 			(watch_id * TCP_WATCH_STRIDE)),
860e0f85f46SJonathan Kim 			watch_address_high);
861e0f85f46SJonathan Kim 
862e0f85f46SJonathan Kim 	WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_ADDR_L) +
863e0f85f46SJonathan Kim 			(watch_id * TCP_WATCH_STRIDE)),
864e0f85f46SJonathan Kim 			watch_address_low);
865e0f85f46SJonathan Kim 
866e0f85f46SJonathan Kim 	/* Enable the watch point */
867e0f85f46SJonathan Kim 	watch_address_cntl = REG_SET_FIELD(watch_address_cntl,
868e0f85f46SJonathan Kim 			TCP_WATCH0_CNTL,
869e0f85f46SJonathan Kim 			VALID,
870e0f85f46SJonathan Kim 			1);
871e0f85f46SJonathan Kim 
872e0f85f46SJonathan Kim 	WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
873e0f85f46SJonathan Kim 			(watch_id * TCP_WATCH_STRIDE)),
874e0f85f46SJonathan Kim 			watch_address_cntl);
875e0f85f46SJonathan Kim 
876e0f85f46SJonathan Kim 	return 0;
877e0f85f46SJonathan Kim }
878e0f85f46SJonathan Kim 
879e0f85f46SJonathan Kim uint32_t kgd_gfx_v9_clear_address_watch(struct amdgpu_device *adev,
880e0f85f46SJonathan Kim 					uint32_t watch_id)
881e0f85f46SJonathan Kim {
882e0f85f46SJonathan Kim 	uint32_t watch_address_cntl;
883e0f85f46SJonathan Kim 
884e0f85f46SJonathan Kim 	watch_address_cntl = 0;
885e0f85f46SJonathan Kim 
886e0f85f46SJonathan Kim 	WREG32_RLC((SOC15_REG_OFFSET(GC, 0, mmTCP_WATCH0_CNTL) +
887e0f85f46SJonathan Kim 			(watch_id * TCP_WATCH_STRIDE)),
888e0f85f46SJonathan Kim 			watch_address_cntl);
889e0f85f46SJonathan Kim 
890e0f85f46SJonathan Kim 	return 0;
891e0f85f46SJonathan Kim }
892e0f85f46SJonathan Kim 
8937cee6a68SJonathan Kim /* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
8947cee6a68SJonathan Kim  * The values read are:
8957cee6a68SJonathan Kim  *     ib_offload_wait_time     -- Wait Count for Indirect Buffer Offloads.
8967cee6a68SJonathan Kim  *     atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads.
8977cee6a68SJonathan Kim  *     wrm_offload_wait_time    -- Wait Count for WAIT_REG_MEM Offloads.
8987cee6a68SJonathan Kim  *     gws_wait_time            -- Wait Count for Global Wave Syncs.
8997cee6a68SJonathan Kim  *     que_sleep_wait_time      -- Wait Count for Dequeue Retry.
9007cee6a68SJonathan Kim  *     sch_wave_wait_time       -- Wait Count for Scheduling Wave Message.
9017cee6a68SJonathan Kim  *     sem_rearm_wait_time      -- Wait Count for Semaphore re-arm.
9027cee6a68SJonathan Kim  *     deq_retry_wait_time      -- Wait Count for Global Wave Syncs.
9037cee6a68SJonathan Kim  */
9047cee6a68SJonathan Kim void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
905036e348fSEric Huang 					uint32_t *wait_times,
906036e348fSEric Huang 					uint32_t inst)
9077cee6a68SJonathan Kim 
9087cee6a68SJonathan Kim {
90919726428SVictor Lu 	*wait_times = RREG32_SOC15_RLC(GC, GET_INST(GC, inst),
91019726428SVictor Lu 			mmCP_IQ_WAIT_TIME2);
9117cee6a68SJonathan Kim }
9127cee6a68SJonathan Kim 
9133356c38dSGraham Sider void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
914ad5901dfSYong Zhao 			uint32_t vmid, uint64_t page_table_base)
915d5a114a6SFelix Kuehling {
916d5a114a6SFelix Kuehling 	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
917d5a114a6SFelix Kuehling 		pr_err("trying to set page table base for wrong VMID %u\n",
918d5a114a6SFelix Kuehling 		       vmid);
919d5a114a6SFelix Kuehling 		return;
920d5a114a6SFelix Kuehling 	}
921d5a114a6SFelix Kuehling 
9229fb1506eSOak Zeng 	adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
923d5a114a6SFelix Kuehling 
9248ffff9b4SOak Zeng 	adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
925d5a114a6SFelix Kuehling }
9263e205a08SOak Zeng 
92743a4bc82SRamesh Errabolu static void lock_spi_csq_mutexes(struct amdgpu_device *adev)
92843a4bc82SRamesh Errabolu {
92943a4bc82SRamesh Errabolu 	mutex_lock(&adev->srbm_mutex);
93043a4bc82SRamesh Errabolu 	mutex_lock(&adev->grbm_idx_mutex);
93143a4bc82SRamesh Errabolu 
93243a4bc82SRamesh Errabolu }
93343a4bc82SRamesh Errabolu 
93443a4bc82SRamesh Errabolu static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
93543a4bc82SRamesh Errabolu {
93643a4bc82SRamesh Errabolu 	mutex_unlock(&adev->grbm_idx_mutex);
93743a4bc82SRamesh Errabolu 	mutex_unlock(&adev->srbm_mutex);
93843a4bc82SRamesh Errabolu }
93943a4bc82SRamesh Errabolu 
94043a4bc82SRamesh Errabolu /**
9411fdbbc12SFabio M. De Francesco  * get_wave_count: Read device registers to get number of waves in flight for
94243a4bc82SRamesh Errabolu  * a particular queue. The method also returns the VMID associated with the
94343a4bc82SRamesh Errabolu  * queue.
94443a4bc82SRamesh Errabolu  *
94543a4bc82SRamesh Errabolu  * @adev: Handle of device whose registers are to be read
94643a4bc82SRamesh Errabolu  * @queue_idx: Index of queue in the queue-map bit-field
94743a4bc82SRamesh Errabolu  * @wave_cnt: Output parameter updated with number of waves in flight
94843a4bc82SRamesh Errabolu  * @vmid: Output parameter updated with VMID of queue whose wave count
94943a4bc82SRamesh Errabolu  *        is being collected
9503eeb0d03SSrinivasan Shanmugam  * @inst: xcc's instance number on a multi-XCC setup
95143a4bc82SRamesh Errabolu  */
95243a4bc82SRamesh Errabolu static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
953e2069a7bSMukul Joshi 		int *wave_cnt, int *vmid, uint32_t inst)
95443a4bc82SRamesh Errabolu {
95543a4bc82SRamesh Errabolu 	int pipe_idx;
95643a4bc82SRamesh Errabolu 	int queue_slot;
95743a4bc82SRamesh Errabolu 	unsigned int reg_val;
95843a4bc82SRamesh Errabolu 
95943a4bc82SRamesh Errabolu 	/*
96043a4bc82SRamesh Errabolu 	 * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID
96143a4bc82SRamesh Errabolu 	 * parameters to read out waves in flight. Get VMID if there are
96243a4bc82SRamesh Errabolu 	 * non-zero waves in flight.
96343a4bc82SRamesh Errabolu 	 */
96443a4bc82SRamesh Errabolu 	*vmid = 0xFF;
96543a4bc82SRamesh Errabolu 	*wave_cnt = 0;
96643a4bc82SRamesh Errabolu 	pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
96743a4bc82SRamesh Errabolu 	queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
968e2069a7bSMukul Joshi 	soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, inst);
969e2069a7bSMukul Joshi 	reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, inst, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
97043a4bc82SRamesh Errabolu 			 queue_slot);
97143a4bc82SRamesh Errabolu 	*wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
97243a4bc82SRamesh Errabolu 	if (*wave_cnt != 0)
973e2069a7bSMukul Joshi 		*vmid = (RREG32_SOC15(GC, inst, mmCP_HQD_VMID) &
97443a4bc82SRamesh Errabolu 			 CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
97543a4bc82SRamesh Errabolu }
97643a4bc82SRamesh Errabolu 
97743a4bc82SRamesh Errabolu /**
9781fdbbc12SFabio M. De Francesco  * kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each
97943a4bc82SRamesh Errabolu  * shader engine and aggregates the number of waves that are in flight for the
98043a4bc82SRamesh Errabolu  * process whose pasid is provided as a parameter. The process could have ZERO
98143a4bc82SRamesh Errabolu  * or more queues running and submitting waves to compute units.
98243a4bc82SRamesh Errabolu  *
98377608faaSRajneesh Bhardwaj  * @adev: Handle of device from which to get number of waves in flight
98443a4bc82SRamesh Errabolu  * @pasid: Identifies the process for which this query call is invoked
9851fdbbc12SFabio M. De Francesco  * @pasid_wave_cnt: Output parameter updated with number of waves in flight that
98643a4bc82SRamesh Errabolu  *                  belong to process with given pasid
98743a4bc82SRamesh Errabolu  * @max_waves_per_cu: Output parameter updated with maximum number of waves
98843a4bc82SRamesh Errabolu  *                    possible per Compute Unit
9893eeb0d03SSrinivasan Shanmugam  * @inst: xcc's instance number on a multi-XCC setup
99043a4bc82SRamesh Errabolu  *
9911fdbbc12SFabio M. De Francesco  * Note: It's possible that the device has too many queues (oversubscription)
99243a4bc82SRamesh Errabolu  * in which case a VMID could be remapped to a different PASID. This could lead
99377608faaSRajneesh Bhardwaj  * to an inaccurate wave count. Following is a high-level sequence:
99443a4bc82SRamesh Errabolu  *    Time T1: vmid = getVmid(); vmid is associated with Pasid P1
99543a4bc82SRamesh Errabolu  *    Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2
99643a4bc82SRamesh Errabolu  * In the sequence above wave count obtained from time T1 will be incorrectly
99743a4bc82SRamesh Errabolu  * lost or added to total wave count.
99843a4bc82SRamesh Errabolu  *
99943a4bc82SRamesh Errabolu  * The registers that provide the waves in flight are:
100043a4bc82SRamesh Errabolu  *
100143a4bc82SRamesh Errabolu  *  SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. The bit is ON if a
100243a4bc82SRamesh Errabolu  *  queue is slotted, OFF if there is no queue. A process could have ZERO or
100343a4bc82SRamesh Errabolu  *  more queues slotted and submitting waves to be run on compute units. Even
100443a4bc82SRamesh Errabolu  *  when there is a queue it is possible there could be zero wave fronts, this
100543a4bc82SRamesh Errabolu  *  can happen when queue is waiting on top-of-pipe events - e.g. waitRegMem
100643a4bc82SRamesh Errabolu  *  command
100743a4bc82SRamesh Errabolu  *
100843a4bc82SRamesh Errabolu  *  For each bit that is ON from above:
100943a4bc82SRamesh Errabolu  *
101043a4bc82SRamesh Errabolu  *    Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the
101143a4bc82SRamesh Errabolu  *    number of waves that are in flight for the queue at specified index. The
101243a4bc82SRamesh Errabolu  *    index ranges from 0 to 7.
101343a4bc82SRamesh Errabolu  *
101443a4bc82SRamesh Errabolu  *    If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID
101543a4bc82SRamesh Errabolu  *    of the wave(s).
101643a4bc82SRamesh Errabolu  *
101743a4bc82SRamesh Errabolu  *    Determine if VMID from above step maps to pasid provided as parameter. If
101843a4bc82SRamesh Errabolu  *    it matches agrregate the wave count. That the VMID will not match pasid is
101943a4bc82SRamesh Errabolu  *    a normal condition i.e. a device is expected to support multiple queues
102043a4bc82SRamesh Errabolu  *    from multiple proceses.
102143a4bc82SRamesh Errabolu  *
102243a4bc82SRamesh Errabolu  *  Reading registers referenced above involves programming GRBM appropriately
102343a4bc82SRamesh Errabolu  */
10243356c38dSGraham Sider void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid,
1025e2069a7bSMukul Joshi 		int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst)
102643a4bc82SRamesh Errabolu {
102743a4bc82SRamesh Errabolu 	int qidx;
102843a4bc82SRamesh Errabolu 	int vmid;
102943a4bc82SRamesh Errabolu 	int se_idx;
103043a4bc82SRamesh Errabolu 	int sh_idx;
103143a4bc82SRamesh Errabolu 	int se_cnt;
103243a4bc82SRamesh Errabolu 	int sh_cnt;
103343a4bc82SRamesh Errabolu 	int wave_cnt;
103443a4bc82SRamesh Errabolu 	int queue_map;
103543a4bc82SRamesh Errabolu 	int pasid_tmp;
103643a4bc82SRamesh Errabolu 	int max_queue_cnt;
103743a4bc82SRamesh Errabolu 	int vmid_wave_cnt = 0;
103868fa72a4SMukul Joshi 	DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES);
103943a4bc82SRamesh Errabolu 
104043a4bc82SRamesh Errabolu 	lock_spi_csq_mutexes(adev);
1041e2069a7bSMukul Joshi 	soc15_grbm_select(adev, 1, 0, 0, 0, inst);
104243a4bc82SRamesh Errabolu 
104343a4bc82SRamesh Errabolu 	/*
104443a4bc82SRamesh Errabolu 	 * Iterate through the shader engines and arrays of the device
104543a4bc82SRamesh Errabolu 	 * to get number of waves in flight
104643a4bc82SRamesh Errabolu 	 */
1047be697aa3SLe Ma 	bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap,
104868fa72a4SMukul Joshi 			  AMDGPU_MAX_QUEUES);
104943a4bc82SRamesh Errabolu 	max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
105043a4bc82SRamesh Errabolu 			adev->gfx.mec.num_queue_per_pipe;
105143a4bc82SRamesh Errabolu 	sh_cnt = adev->gfx.config.max_sh_per_se;
105243a4bc82SRamesh Errabolu 	se_cnt = adev->gfx.config.max_shader_engines;
105343a4bc82SRamesh Errabolu 	for (se_idx = 0; se_idx < se_cnt; se_idx++) {
105443a4bc82SRamesh Errabolu 		for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
105543a4bc82SRamesh Errabolu 
1056e2069a7bSMukul Joshi 			amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, inst);
1057e2069a7bSMukul Joshi 			queue_map = RREG32_SOC15(GC, inst, mmSPI_CSQ_WF_ACTIVE_STATUS);
105843a4bc82SRamesh Errabolu 
105943a4bc82SRamesh Errabolu 			/*
106043a4bc82SRamesh Errabolu 			 * Assumption: queue map encodes following schema: four
106143a4bc82SRamesh Errabolu 			 * pipes per each micro-engine, with each pipe mapping
106243a4bc82SRamesh Errabolu 			 * eight queues. This schema is true for GFX9 devices
106343a4bc82SRamesh Errabolu 			 * and must be verified for newer device families
106443a4bc82SRamesh Errabolu 			 */
106543a4bc82SRamesh Errabolu 			for (qidx = 0; qidx < max_queue_cnt; qidx++) {
106643a4bc82SRamesh Errabolu 
106743a4bc82SRamesh Errabolu 				/* Skip qeueus that are not associated with
106843a4bc82SRamesh Errabolu 				 * compute functions
106943a4bc82SRamesh Errabolu 				 */
107043a4bc82SRamesh Errabolu 				if (!test_bit(qidx, cp_queue_bitmap))
107143a4bc82SRamesh Errabolu 					continue;
107243a4bc82SRamesh Errabolu 
107343a4bc82SRamesh Errabolu 				if (!(queue_map & (1 << qidx)))
107443a4bc82SRamesh Errabolu 					continue;
107543a4bc82SRamesh Errabolu 
107643a4bc82SRamesh Errabolu 				/* Get number of waves in flight and aggregate them */
1077e2069a7bSMukul Joshi 				get_wave_count(adev, qidx, &wave_cnt, &vmid,
1078e2069a7bSMukul Joshi 						inst);
107943a4bc82SRamesh Errabolu 				if (wave_cnt != 0) {
108043a4bc82SRamesh Errabolu 					pasid_tmp =
1081e2069a7bSMukul Joshi 					  RREG32(SOC15_REG_OFFSET(OSSSYS, inst,
108243a4bc82SRamesh Errabolu 						 mmIH_VMID_0_LUT) + vmid);
108343a4bc82SRamesh Errabolu 					if (pasid_tmp == pasid)
108443a4bc82SRamesh Errabolu 						vmid_wave_cnt += wave_cnt;
108543a4bc82SRamesh Errabolu 				}
108643a4bc82SRamesh Errabolu 			}
108743a4bc82SRamesh Errabolu 		}
108843a4bc82SRamesh Errabolu 	}
108943a4bc82SRamesh Errabolu 
1090e2069a7bSMukul Joshi 	amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst);
1091e2069a7bSMukul Joshi 	soc15_grbm_select(adev, 0, 0, 0, 0, inst);
109243a4bc82SRamesh Errabolu 	unlock_spi_csq_mutexes(adev);
109343a4bc82SRamesh Errabolu 
109443a4bc82SRamesh Errabolu 	/* Update the output parameters and return */
109543a4bc82SRamesh Errabolu 	*pasid_wave_cnt = vmid_wave_cnt;
109643a4bc82SRamesh Errabolu 	*max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
109743a4bc82SRamesh Errabolu 				adev->gfx.cu_info.max_waves_per_simd;
1098d5a114a6SFelix Kuehling }
10993e205a08SOak Zeng 
11007cee6a68SJonathan Kim void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
11017cee6a68SJonathan Kim 		uint32_t wait_times,
11027cee6a68SJonathan Kim 		uint32_t grace_period,
11037cee6a68SJonathan Kim 		uint32_t *reg_offset,
110456d6daa3SMukul Joshi 		uint32_t *reg_data)
11057cee6a68SJonathan Kim {
11067cee6a68SJonathan Kim 	*reg_data = wait_times;
11077cee6a68SJonathan Kim 
11087cee6a68SJonathan Kim 	/*
11091879e009SMukul Joshi 	 * The CP cannot handle a 0 grace period input and will result in
11107cee6a68SJonathan Kim 	 * an infinite grace period being set so set to 1 to prevent this.
11117cee6a68SJonathan Kim 	 */
11127cee6a68SJonathan Kim 	if (grace_period == 0)
11137cee6a68SJonathan Kim 		grace_period = 1;
11147cee6a68SJonathan Kim 
11157cee6a68SJonathan Kim 	*reg_data = REG_SET_FIELD(*reg_data,
11167cee6a68SJonathan Kim 			CP_IQ_WAIT_TIME2,
11177cee6a68SJonathan Kim 			SCH_WAVE,
11187cee6a68SJonathan Kim 			grace_period);
11197cee6a68SJonathan Kim 
112056d6daa3SMukul Joshi 	*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
11217cee6a68SJonathan Kim }
11227cee6a68SJonathan Kim 
11233356c38dSGraham Sider void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,
1124e2069a7bSMukul Joshi 		uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst)
1125b53ef0dfSMukul Joshi {
1126e2069a7bSMukul Joshi 	kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
1127b53ef0dfSMukul Joshi 
1128b53ef0dfSMukul Joshi 	/*
1129b53ef0dfSMukul Joshi 	 * Program TBA registers
1130b53ef0dfSMukul Joshi 	 */
113102ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_LO,
1132b53ef0dfSMukul Joshi 			lower_32_bits(tba_addr >> 8));
113302ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TBA_HI,
1134b53ef0dfSMukul Joshi 			upper_32_bits(tba_addr >> 8));
1135b53ef0dfSMukul Joshi 
1136b53ef0dfSMukul Joshi 	/*
1137b53ef0dfSMukul Joshi 	 * Program TMA registers
1138b53ef0dfSMukul Joshi 	 */
113902ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_LO,
1140b53ef0dfSMukul Joshi 			lower_32_bits(tma_addr >> 8));
114102ee3b02SMukul Joshi 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSQ_SHADER_TMA_HI,
1142b53ef0dfSMukul Joshi 			upper_32_bits(tma_addr >> 8));
1143b53ef0dfSMukul Joshi 
1144e2069a7bSMukul Joshi 	kgd_gfx_v9_unlock_srbm(adev, inst);
1145b53ef0dfSMukul Joshi }
1146b53ef0dfSMukul Joshi 
1147ee0a469cSJonathan Kim uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
1148ee0a469cSJonathan Kim 				    uint32_t pipe_id, uint32_t queue_id,
1149ee0a469cSJonathan Kim 				    uint32_t inst)
1150ee0a469cSJonathan Kim {
1151ee0a469cSJonathan Kim 	uint32_t low, high;
1152ee0a469cSJonathan Kim 	uint64_t queue_addr = 0;
1153ee0a469cSJonathan Kim 
1154*7c1a2d8aSAlex Deucher 	if (!adev->debug_exp_resets &&
1155*7c1a2d8aSAlex Deucher 	    !adev->gfx.num_gfx_rings)
1156*7c1a2d8aSAlex Deucher 		return 0;
1157*7c1a2d8aSAlex Deucher 
1158ee0a469cSJonathan Kim 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
1159ee0a469cSJonathan Kim 	amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
1160ee0a469cSJonathan Kim 
1161ee0a469cSJonathan Kim 	if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
1162ee0a469cSJonathan Kim 		goto unlock_out;
1163ee0a469cSJonathan Kim 
1164ee0a469cSJonathan Kim 	low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
1165ee0a469cSJonathan Kim 	high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
1166ee0a469cSJonathan Kim 
1167ee0a469cSJonathan Kim 	/* only concerned with user queues. */
1168ee0a469cSJonathan Kim 	if (!high)
1169ee0a469cSJonathan Kim 		goto unlock_out;
1170ee0a469cSJonathan Kim 
1171ee0a469cSJonathan Kim 	queue_addr = (((queue_addr | high) << 32) | low) << 8;
1172ee0a469cSJonathan Kim 
1173ee0a469cSJonathan Kim unlock_out:
1174ee0a469cSJonathan Kim 	amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
1175ee0a469cSJonathan Kim 	kgd_gfx_v9_release_queue(adev, inst);
1176ee0a469cSJonathan Kim 
1177ee0a469cSJonathan Kim 	return queue_addr;
1178ee0a469cSJonathan Kim }
1179ee0a469cSJonathan Kim 
1180a85c3db6SJonathan Kim /* assume queue acquired  */
1181a85c3db6SJonathan Kim static int kgd_gfx_v9_hqd_dequeue_wait(struct amdgpu_device *adev, uint32_t inst,
1182a85c3db6SJonathan Kim 				       unsigned int utimeout)
1183a85c3db6SJonathan Kim {
1184a85c3db6SJonathan Kim 	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
1185a85c3db6SJonathan Kim 
1186a85c3db6SJonathan Kim 	while (true) {
1187a85c3db6SJonathan Kim 		uint32_t temp = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE);
1188a85c3db6SJonathan Kim 
1189a85c3db6SJonathan Kim 		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
1190a85c3db6SJonathan Kim 			return 0;
1191a85c3db6SJonathan Kim 
1192a85c3db6SJonathan Kim 		if (time_after(jiffies, end_jiffies))
1193a85c3db6SJonathan Kim 			return -ETIME;
1194a85c3db6SJonathan Kim 
1195a85c3db6SJonathan Kim 		usleep_range(500, 1000);
1196a85c3db6SJonathan Kim 	}
1197a85c3db6SJonathan Kim }
1198a85c3db6SJonathan Kim 
1199ee0a469cSJonathan Kim uint64_t kgd_gfx_v9_hqd_reset(struct amdgpu_device *adev,
1200ee0a469cSJonathan Kim 			      uint32_t pipe_id, uint32_t queue_id,
1201ee0a469cSJonathan Kim 			      uint32_t inst, unsigned int utimeout)
1202ee0a469cSJonathan Kim {
1203a85c3db6SJonathan Kim 	uint32_t low, high, pipe_reset_data = 0;
1204ee0a469cSJonathan Kim 	uint64_t queue_addr = 0;
1205ee0a469cSJonathan Kim 
1206ee0a469cSJonathan Kim 	kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
1207ee0a469cSJonathan Kim 	amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
1208ee0a469cSJonathan Kim 
1209ee0a469cSJonathan Kim 	if (!RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE))
1210ee0a469cSJonathan Kim 		goto unlock_out;
1211ee0a469cSJonathan Kim 
1212ee0a469cSJonathan Kim 	low = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE);
1213ee0a469cSJonathan Kim 	high = RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_BASE_HI);
1214ee0a469cSJonathan Kim 
1215ee0a469cSJonathan Kim 	/* only concerned with user queues. */
1216ee0a469cSJonathan Kim 	if (!high)
1217ee0a469cSJonathan Kim 		goto unlock_out;
1218ee0a469cSJonathan Kim 
1219ee0a469cSJonathan Kim 	queue_addr = (((queue_addr | high) << 32) | low) << 8;
1220ee0a469cSJonathan Kim 
1221ee0a469cSJonathan Kim 	pr_debug("Attempting queue reset on XCC %i pipe id %i queue id %i\n",
1222ee0a469cSJonathan Kim 		 inst, pipe_id, queue_id);
1223ee0a469cSJonathan Kim 
1224ee0a469cSJonathan Kim 	/* assume previous dequeue request issued will take affect after reset */
1225ee0a469cSJonathan Kim 	WREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_COMPUTE_QUEUE_RESET, 0x1);
1226ee0a469cSJonathan Kim 
1227a85c3db6SJonathan Kim 	if (!kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
1228a85c3db6SJonathan Kim 		goto unlock_out;
1229ee0a469cSJonathan Kim 
1230a85c3db6SJonathan Kim 	pr_debug("Attempting pipe reset on XCC %i pipe id %i\n", inst, pipe_id);
1231ee0a469cSJonathan Kim 
1232a85c3db6SJonathan Kim 	pipe_reset_data = REG_SET_FIELD(pipe_reset_data, CP_MEC_CNTL, MEC_ME1_PIPE0_RESET, 1);
1233a85c3db6SJonathan Kim 	pipe_reset_data = pipe_reset_data << pipe_id;
1234a85c3db6SJonathan Kim 
1235a85c3db6SJonathan Kim 	WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, pipe_reset_data);
1236a85c3db6SJonathan Kim 	WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_MEC_CNTL, 0);
1237a85c3db6SJonathan Kim 
1238a85c3db6SJonathan Kim 	if (kgd_gfx_v9_hqd_dequeue_wait(adev, inst, utimeout))
1239ee0a469cSJonathan Kim 		queue_addr = 0;
1240ee0a469cSJonathan Kim 
1241ee0a469cSJonathan Kim unlock_out:
1242a85c3db6SJonathan Kim 	pr_debug("queue reset on XCC %i pipe id %i queue id %i %s\n",
1243a85c3db6SJonathan Kim 		 inst, pipe_id, queue_id, !!queue_addr ? "succeeded!" : "failed!");
1244ee0a469cSJonathan Kim 	amdgpu_gfx_rlc_exit_safe_mode(adev, inst);
1245ee0a469cSJonathan Kim 	kgd_gfx_v9_release_queue(adev, inst);
1246ee0a469cSJonathan Kim 
1247ee0a469cSJonathan Kim 	return queue_addr;
1248ee0a469cSJonathan Kim }
1249ee0a469cSJonathan Kim 
1250e392c887SYong Zhao const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
12513e205a08SOak Zeng 	.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
12523e205a08SOak Zeng 	.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
12533e205a08SOak Zeng 	.init_interrupts = kgd_gfx_v9_init_interrupts,
12543e205a08SOak Zeng 	.hqd_load = kgd_gfx_v9_hqd_load,
125535cd89d5SAaron Liu 	.hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
12563e205a08SOak Zeng 	.hqd_sdma_load = kgd_hqd_sdma_load,
12573e205a08SOak Zeng 	.hqd_dump = kgd_gfx_v9_hqd_dump,
12583e205a08SOak Zeng 	.hqd_sdma_dump = kgd_hqd_sdma_dump,
12593e205a08SOak Zeng 	.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
12603e205a08SOak Zeng 	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
12613e205a08SOak Zeng 	.hqd_destroy = kgd_gfx_v9_hqd_destroy,
12623e205a08SOak Zeng 	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
12633e205a08SOak Zeng 	.wave_control_execute = kgd_gfx_v9_wave_control_execute,
126456fc40abSYong Zhao 	.get_atc_vmid_pasid_mapping_info =
126556fc40abSYong Zhao 			kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
12663e205a08SOak Zeng 	.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
1267cde2e087SJonathan Kim 	.enable_debug_trap = kgd_gfx_v9_enable_debug_trap,
1268cde2e087SJonathan Kim 	.disable_debug_trap = kgd_gfx_v9_disable_debug_trap,
1269101827e1SJonathan Kim 	.validate_trap_override_request = kgd_gfx_v9_validate_trap_override_request,
1270101827e1SJonathan Kim 	.set_wave_launch_trap_override = kgd_gfx_v9_set_wave_launch_trap_override,
1271aea1b473SJonathan Kim 	.set_wave_launch_mode = kgd_gfx_v9_set_wave_launch_mode,
1272e0f85f46SJonathan Kim 	.set_address_watch = kgd_gfx_v9_set_address_watch,
1273e0f85f46SJonathan Kim 	.clear_address_watch = kgd_gfx_v9_clear_address_watch,
12747cee6a68SJonathan Kim 	.get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times,
12757cee6a68SJonathan Kim 	.build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info,
127643a4bc82SRamesh Errabolu 	.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
1277b53ef0dfSMukul Joshi 	.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
1278ee0a469cSJonathan Kim 	.hqd_get_pq_addr = kgd_gfx_v9_hqd_get_pq_addr,
1279ee0a469cSJonathan Kim 	.hqd_reset = kgd_gfx_v9_hqd_reset
12803e205a08SOak Zeng };
1281