xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c (revision cfda8617e22a8bf217a613d0b3ba3a38778443ba)
1 /*
2  * Copyright 2014-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include <linux/mmu_context.h>
23 
24 #include "amdgpu.h"
25 #include "amdgpu_amdkfd.h"
26 #include "gc/gc_9_0_offset.h"
27 #include "gc/gc_9_0_sh_mask.h"
28 #include "vega10_enum.h"
29 #include "sdma0/sdma0_4_0_offset.h"
30 #include "sdma0/sdma0_4_0_sh_mask.h"
31 #include "sdma1/sdma1_4_0_offset.h"
32 #include "sdma1/sdma1_4_0_sh_mask.h"
33 #include "athub/athub_1_0_offset.h"
34 #include "athub/athub_1_0_sh_mask.h"
35 #include "oss/osssys_4_0_offset.h"
36 #include "oss/osssys_4_0_sh_mask.h"
37 #include "soc15_common.h"
38 #include "v9_structs.h"
39 #include "soc15.h"
40 #include "soc15d.h"
41 #include "mmhub_v1_0.h"
42 #include "gfxhub_v1_0.h"
43 #include "gmc_v9_0.h"
44 
45 
46 enum hqd_dequeue_request_type {
47 	NO_ACTION = 0,
48 	DRAIN_PIPE,
49 	RESET_WAVES
50 };
51 
52 
53 /* Because of REG_GET_FIELD() being used, we put this function in the
54  * asic specific file.
55  */
56 int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
57 		struct tile_config *config)
58 {
59 	struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
60 
61 	config->gb_addr_config = adev->gfx.config.gb_addr_config;
62 
63 	config->tile_config_ptr = adev->gfx.config.tile_mode_array;
64 	config->num_tile_configs =
65 			ARRAY_SIZE(adev->gfx.config.tile_mode_array);
66 	config->macro_tile_config_ptr =
67 			adev->gfx.config.macrotile_mode_array;
68 	config->num_macro_tile_configs =
69 			ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
70 
71 	return 0;
72 }
73 
74 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
75 {
76 	return (struct amdgpu_device *)kgd;
77 }
78 
79 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
80 			uint32_t queue, uint32_t vmid)
81 {
82 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
83 
84 	mutex_lock(&adev->srbm_mutex);
85 	soc15_grbm_select(adev, mec, pipe, queue, vmid);
86 }
87 
88 static void unlock_srbm(struct kgd_dev *kgd)
89 {
90 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
91 
92 	soc15_grbm_select(adev, 0, 0, 0, 0);
93 	mutex_unlock(&adev->srbm_mutex);
94 }
95 
96 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
97 				uint32_t queue_id)
98 {
99 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
100 
101 	uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
102 	uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
103 
104 	lock_srbm(kgd, mec, pipe, queue_id, 0);
105 }
106 
107 static uint32_t get_queue_mask(struct amdgpu_device *adev,
108 			       uint32_t pipe_id, uint32_t queue_id)
109 {
110 	unsigned int bit = (pipe_id * adev->gfx.mec.num_queue_per_pipe +
111 			    queue_id) & 31;
112 
113 	return ((uint32_t)1) << bit;
114 }
115 
116 static void release_queue(struct kgd_dev *kgd)
117 {
118 	unlock_srbm(kgd);
119 }
120 
121 void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
122 					uint32_t sh_mem_config,
123 					uint32_t sh_mem_ape1_base,
124 					uint32_t sh_mem_ape1_limit,
125 					uint32_t sh_mem_bases)
126 {
127 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
128 
129 	lock_srbm(kgd, 0, 0, 0, vmid);
130 
131 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
132 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
133 	/* APE1 no longer exists on GFX9 */
134 
135 	unlock_srbm(kgd);
136 }
137 
138 int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
139 					unsigned int vmid)
140 {
141 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
142 
143 	/*
144 	 * We have to assume that there is no outstanding mapping.
145 	 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
146 	 * a mapping is in progress or because a mapping finished
147 	 * and the SW cleared it.
148 	 * So the protocol is to always wait & clear.
149 	 */
150 	uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
151 			ATC_VMID0_PASID_MAPPING__VALID_MASK;
152 
153 	/*
154 	 * need to do this twice, once for gfx and once for mmhub
155 	 * for ATC add 16 to VMID for mmhub, for IH different registers.
156 	 * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
157 	 */
158 
159 	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
160 	       pasid_mapping);
161 
162 	while (!(RREG32(SOC15_REG_OFFSET(
163 				ATHUB, 0,
164 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
165 		 (1U << vmid)))
166 		cpu_relax();
167 
168 	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
169 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
170 	       1U << vmid);
171 
172 	/* Mapping vmid to pasid also for IH block */
173 	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
174 	       pasid_mapping);
175 
176 	WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid,
177 	       pasid_mapping);
178 
179 	while (!(RREG32(SOC15_REG_OFFSET(
180 				ATHUB, 0,
181 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
182 		 (1U << (vmid + 16))))
183 		cpu_relax();
184 
185 	WREG32(SOC15_REG_OFFSET(ATHUB, 0,
186 				mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
187 	       1U << (vmid + 16));
188 
189 	/* Mapping vmid to pasid also for IH block */
190 	WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid,
191 	       pasid_mapping);
192 	return 0;
193 }
194 
195 /* TODO - RING0 form of field is obsolete, seems to date back to SI
196  * but still works
197  */
198 
199 int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
200 {
201 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
202 	uint32_t mec;
203 	uint32_t pipe;
204 
205 	mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
206 	pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
207 
208 	lock_srbm(kgd, mec, pipe, 0, 0);
209 
210 	WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
211 		CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
212 		CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
213 
214 	unlock_srbm(kgd);
215 
216 	return 0;
217 }
218 
219 static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
220 				unsigned int engine_id,
221 				unsigned int queue_id)
222 {
223 	uint32_t sdma_engine_reg_base[2] = {
224 		SOC15_REG_OFFSET(SDMA0, 0,
225 				 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
226 		SOC15_REG_OFFSET(SDMA1, 0,
227 				 mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
228 	};
229 	uint32_t retval = sdma_engine_reg_base[engine_id]
230 		+ queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
231 
232 	pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
233 			queue_id, retval);
234 
235 	return retval;
236 }
237 
238 static inline struct v9_mqd *get_mqd(void *mqd)
239 {
240 	return (struct v9_mqd *)mqd;
241 }
242 
243 static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
244 {
245 	return (struct v9_sdma_mqd *)mqd;
246 }
247 
248 int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
249 			uint32_t queue_id, uint32_t __user *wptr,
250 			uint32_t wptr_shift, uint32_t wptr_mask,
251 			struct mm_struct *mm)
252 {
253 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
254 	struct v9_mqd *m;
255 	uint32_t *mqd_hqd;
256 	uint32_t reg, hqd_base, data;
257 
258 	m = get_mqd(mqd);
259 
260 	acquire_queue(kgd, pipe_id, queue_id);
261 
262 	/* HIQ is set during driver init period with vmid set to 0*/
263 	if (m->cp_hqd_vmid == 0) {
264 		uint32_t value, mec, pipe;
265 
266 		mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
267 		pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
268 
269 		pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
270 			mec, pipe, queue_id);
271 		value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
272 		value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
273 			((mec << 5) | (pipe << 3) | queue_id | 0x80));
274 		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
275 	}
276 
277 	/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
278 	mqd_hqd = &m->cp_mqd_base_addr_lo;
279 	hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
280 
281 	for (reg = hqd_base;
282 	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
283 		WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
284 
285 
286 	/* Activate doorbell logic before triggering WPTR poll. */
287 	data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
288 			     CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
289 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
290 
291 	if (wptr) {
292 		/* Don't read wptr with get_user because the user
293 		 * context may not be accessible (if this function
294 		 * runs in a work queue). Instead trigger a one-shot
295 		 * polling read from memory in the CP. This assumes
296 		 * that wptr is GPU-accessible in the queue's VMID via
297 		 * ATC or SVM. WPTR==RPTR before starting the poll so
298 		 * the CP starts fetching new commands from the right
299 		 * place.
300 		 *
301 		 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
302 		 * tricky. Assume that the queue didn't overflow. The
303 		 * number of valid bits in the 32-bit RPTR depends on
304 		 * the queue size. The remaining bits are taken from
305 		 * the saved 64-bit WPTR. If the WPTR wrapped, add the
306 		 * queue size.
307 		 */
308 		uint32_t queue_size =
309 			2 << REG_GET_FIELD(m->cp_hqd_pq_control,
310 					   CP_HQD_PQ_CONTROL, QUEUE_SIZE);
311 		uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
312 
313 		if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
314 			guessed_wptr += queue_size;
315 		guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
316 		guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
317 
318 		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
319 		       lower_32_bits(guessed_wptr));
320 		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
321 		       upper_32_bits(guessed_wptr));
322 		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
323 		       lower_32_bits((uintptr_t)wptr));
324 		WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
325 		       upper_32_bits((uintptr_t)wptr));
326 		WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
327 		       get_queue_mask(adev, pipe_id, queue_id));
328 	}
329 
330 	/* Start the EOP fetcher */
331 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
332 	       REG_SET_FIELD(m->cp_hqd_eop_rptr,
333 			     CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
334 
335 	data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
336 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
337 
338 	release_queue(kgd);
339 
340 	return 0;
341 }
342 
343 int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
344 			uint32_t pipe_id, uint32_t queue_id,
345 			uint32_t (**dump)[2], uint32_t *n_regs)
346 {
347 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
348 	uint32_t i = 0, reg;
349 #define HQD_N_REGS 56
350 #define DUMP_REG(addr) do {				\
351 		if (WARN_ON_ONCE(i >= HQD_N_REGS))	\
352 			break;				\
353 		(*dump)[i][0] = (addr) << 2;		\
354 		(*dump)[i++][1] = RREG32(addr);		\
355 	} while (0)
356 
357 	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
358 	if (*dump == NULL)
359 		return -ENOMEM;
360 
361 	acquire_queue(kgd, pipe_id, queue_id);
362 
363 	for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
364 	     reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
365 		DUMP_REG(reg);
366 
367 	release_queue(kgd);
368 
369 	WARN_ON_ONCE(i != HQD_N_REGS);
370 	*n_regs = i;
371 
372 	return 0;
373 }
374 
375 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
376 			     uint32_t __user *wptr, struct mm_struct *mm)
377 {
378 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
379 	struct v9_sdma_mqd *m;
380 	uint32_t sdma_rlc_reg_offset;
381 	unsigned long end_jiffies;
382 	uint32_t data;
383 	uint64_t data64;
384 	uint64_t __user *wptr64 = (uint64_t __user *)wptr;
385 
386 	m = get_sdma_mqd(mqd);
387 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
388 					    m->sdma_queue_id);
389 
390 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
391 		m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
392 
393 	end_jiffies = msecs_to_jiffies(2000) + jiffies;
394 	while (true) {
395 		data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
396 		if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
397 			break;
398 		if (time_after(jiffies, end_jiffies)) {
399 			pr_err("SDMA RLC not idle in %s\n", __func__);
400 			return -ETIME;
401 		}
402 		usleep_range(500, 1000);
403 	}
404 
405 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
406 	       m->sdmax_rlcx_doorbell_offset);
407 
408 	data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
409 			     ENABLE, 1);
410 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
411 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
412 				m->sdmax_rlcx_rb_rptr);
413 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
414 				m->sdmax_rlcx_rb_rptr_hi);
415 
416 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
417 	if (read_user_wptr(mm, wptr64, data64)) {
418 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
419 		       lower_32_bits(data64));
420 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
421 		       upper_32_bits(data64));
422 	} else {
423 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
424 		       m->sdmax_rlcx_rb_rptr);
425 		WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
426 		       m->sdmax_rlcx_rb_rptr_hi);
427 	}
428 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
429 
430 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
431 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
432 			m->sdmax_rlcx_rb_base_hi);
433 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
434 			m->sdmax_rlcx_rb_rptr_addr_lo);
435 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
436 			m->sdmax_rlcx_rb_rptr_addr_hi);
437 
438 	data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
439 			     RB_ENABLE, 1);
440 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
441 
442 	return 0;
443 }
444 
445 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
446 			     uint32_t engine_id, uint32_t queue_id,
447 			     uint32_t (**dump)[2], uint32_t *n_regs)
448 {
449 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
450 	uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
451 			engine_id, queue_id);
452 	uint32_t i = 0, reg;
453 #undef HQD_N_REGS
454 #define HQD_N_REGS (19+6+7+10)
455 
456 	*dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
457 	if (*dump == NULL)
458 		return -ENOMEM;
459 
460 	for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
461 		DUMP_REG(sdma_rlc_reg_offset + reg);
462 	for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
463 		DUMP_REG(sdma_rlc_reg_offset + reg);
464 	for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
465 	     reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
466 		DUMP_REG(sdma_rlc_reg_offset + reg);
467 	for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
468 	     reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
469 		DUMP_REG(sdma_rlc_reg_offset + reg);
470 
471 	WARN_ON_ONCE(i != HQD_N_REGS);
472 	*n_regs = i;
473 
474 	return 0;
475 }
476 
477 bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
478 				uint32_t pipe_id, uint32_t queue_id)
479 {
480 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
481 	uint32_t act;
482 	bool retval = false;
483 	uint32_t low, high;
484 
485 	acquire_queue(kgd, pipe_id, queue_id);
486 	act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
487 	if (act) {
488 		low = lower_32_bits(queue_address >> 8);
489 		high = upper_32_bits(queue_address >> 8);
490 
491 		if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
492 		   high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
493 			retval = true;
494 	}
495 	release_queue(kgd);
496 	return retval;
497 }
498 
499 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
500 {
501 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
502 	struct v9_sdma_mqd *m;
503 	uint32_t sdma_rlc_reg_offset;
504 	uint32_t sdma_rlc_rb_cntl;
505 
506 	m = get_sdma_mqd(mqd);
507 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
508 					    m->sdma_queue_id);
509 
510 	sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
511 
512 	if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
513 		return true;
514 
515 	return false;
516 }
517 
518 int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
519 				enum kfd_preempt_type reset_type,
520 				unsigned int utimeout, uint32_t pipe_id,
521 				uint32_t queue_id)
522 {
523 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
524 	enum hqd_dequeue_request_type type;
525 	unsigned long end_jiffies;
526 	uint32_t temp;
527 	struct v9_mqd *m = get_mqd(mqd);
528 
529 	if (adev->in_gpu_reset)
530 		return -EIO;
531 
532 	acquire_queue(kgd, pipe_id, queue_id);
533 
534 	if (m->cp_hqd_vmid == 0)
535 		WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
536 
537 	switch (reset_type) {
538 	case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
539 		type = DRAIN_PIPE;
540 		break;
541 	case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
542 		type = RESET_WAVES;
543 		break;
544 	default:
545 		type = DRAIN_PIPE;
546 		break;
547 	}
548 
549 	WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
550 
551 	end_jiffies = (utimeout * HZ / 1000) + jiffies;
552 	while (true) {
553 		temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
554 		if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
555 			break;
556 		if (time_after(jiffies, end_jiffies)) {
557 			pr_err("cp queue preemption time out.\n");
558 			release_queue(kgd);
559 			return -ETIME;
560 		}
561 		usleep_range(500, 1000);
562 	}
563 
564 	release_queue(kgd);
565 	return 0;
566 }
567 
568 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
569 				unsigned int utimeout)
570 {
571 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
572 	struct v9_sdma_mqd *m;
573 	uint32_t sdma_rlc_reg_offset;
574 	uint32_t temp;
575 	unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
576 
577 	m = get_sdma_mqd(mqd);
578 	sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
579 					    m->sdma_queue_id);
580 
581 	temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
582 	temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
583 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
584 
585 	while (true) {
586 		temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
587 		if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
588 			break;
589 		if (time_after(jiffies, end_jiffies)) {
590 			pr_err("SDMA RLC not idle in %s\n", __func__);
591 			return -ETIME;
592 		}
593 		usleep_range(500, 1000);
594 	}
595 
596 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
597 	WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
598 		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
599 		SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
600 
601 	m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
602 	m->sdmax_rlcx_rb_rptr_hi =
603 		RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
604 
605 	return 0;
606 }
607 
608 bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
609 					uint8_t vmid, uint16_t *p_pasid)
610 {
611 	uint32_t value;
612 	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
613 
614 	value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
615 		     + vmid);
616 	*p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
617 
618 	return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
619 }
620 
621 static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid,
622 			uint32_t flush_type)
623 {
624 	signed long r;
625 	uint32_t seq;
626 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
627 
628 	spin_lock(&adev->gfx.kiq.ring_lock);
629 	amdgpu_ring_alloc(ring, 12); /* fence + invalidate_tlbs package*/
630 	amdgpu_ring_write(ring, PACKET3(PACKET3_INVALIDATE_TLBS, 0));
631 	amdgpu_ring_write(ring,
632 			PACKET3_INVALIDATE_TLBS_DST_SEL(1) |
633 			PACKET3_INVALIDATE_TLBS_ALL_HUB(1) |
634 			PACKET3_INVALIDATE_TLBS_PASID(pasid) |
635 			PACKET3_INVALIDATE_TLBS_FLUSH_TYPE(flush_type));
636 	amdgpu_fence_emit_polling(ring, &seq);
637 	amdgpu_ring_commit(ring);
638 	spin_unlock(&adev->gfx.kiq.ring_lock);
639 
640 	r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
641 	if (r < 1) {
642 		DRM_ERROR("wait for kiq fence error: %ld.\n", r);
643 		return -ETIME;
644 	}
645 
646 	return 0;
647 }
648 
649 int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
650 {
651 	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
652 	int vmid, i;
653 	uint16_t queried_pasid;
654 	bool ret;
655 	struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
656 	uint32_t flush_type = 0;
657 
658 	if (adev->in_gpu_reset)
659 		return -EIO;
660 	if (adev->gmc.xgmi.num_physical_nodes &&
661 		adev->asic_type == CHIP_VEGA20)
662 		flush_type = 2;
663 
664 	if (ring->sched.ready)
665 		return invalidate_tlbs_with_kiq(adev, pasid, flush_type);
666 
667 	for (vmid = 0; vmid < 16; vmid++) {
668 		if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
669 			continue;
670 
671 		ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid,
672 				&queried_pasid);
673 		if (ret && queried_pasid == pasid) {
674 			for (i = 0; i < adev->num_vmhubs; i++)
675 				amdgpu_gmc_flush_gpu_tlb(adev, vmid,
676 							i, flush_type);
677 			break;
678 		}
679 	}
680 
681 	return 0;
682 }
683 
684 int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
685 {
686 	struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
687 	int i;
688 
689 	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
690 		pr_err("non kfd vmid %d\n", vmid);
691 		return 0;
692 	}
693 
694 	/* Use legacy mode tlb invalidation.
695 	 *
696 	 * Currently on Raven the code below is broken for anything but
697 	 * legacy mode due to a MMHUB power gating problem. A workaround
698 	 * is for MMHUB to wait until the condition PER_VMID_INVALIDATE_REQ
699 	 * == PER_VMID_INVALIDATE_ACK instead of simply waiting for the ack
700 	 * bit.
701 	 *
702 	 * TODO 1: agree on the right set of invalidation registers for
703 	 * KFD use. Use the last one for now. Invalidate both GC and
704 	 * MMHUB.
705 	 *
706 	 * TODO 2: support range-based invalidation, requires kfg2kgd
707 	 * interface change
708 	 */
709 	for (i = 0; i < adev->num_vmhubs; i++)
710 		amdgpu_gmc_flush_gpu_tlb(adev, vmid, i, 0);
711 
712 	return 0;
713 }
714 
715 int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
716 {
717 	return 0;
718 }
719 
720 int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
721 					unsigned int watch_point_id,
722 					uint32_t cntl_val,
723 					uint32_t addr_hi,
724 					uint32_t addr_lo)
725 {
726 	return 0;
727 }
728 
729 int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
730 					uint32_t gfx_index_val,
731 					uint32_t sq_cmd)
732 {
733 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
734 	uint32_t data = 0;
735 
736 	mutex_lock(&adev->grbm_idx_mutex);
737 
738 	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
739 	WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
740 
741 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
742 		INSTANCE_BROADCAST_WRITES, 1);
743 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
744 		SH_BROADCAST_WRITES, 1);
745 	data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
746 		SE_BROADCAST_WRITES, 1);
747 
748 	WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
749 	mutex_unlock(&adev->grbm_idx_mutex);
750 
751 	return 0;
752 }
753 
754 uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
755 					unsigned int watch_point_id,
756 					unsigned int reg_offset)
757 {
758 	return 0;
759 }
760 
761 void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
762 		uint64_t page_table_base)
763 {
764 	struct amdgpu_device *adev = get_amdgpu_device(kgd);
765 
766 	if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
767 		pr_err("trying to set page table base for wrong VMID %u\n",
768 		       vmid);
769 		return;
770 	}
771 
772 	/* TODO: take advantage of per-process address space size. For
773 	 * now, all processes share the same address space size, like
774 	 * on GFX8 and older.
775 	 */
776 	if (adev->asic_type == CHIP_ARCTURUS) {
777 		/* Two MMHUBs */
778 		mmhub_v9_4_setup_vm_pt_regs(adev, 0, vmid, page_table_base);
779 		mmhub_v9_4_setup_vm_pt_regs(adev, 1, vmid, page_table_base);
780 	} else
781 		mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
782 
783 	gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
784 }
785 
786 const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
787 	.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
788 	.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
789 	.init_interrupts = kgd_gfx_v9_init_interrupts,
790 	.hqd_load = kgd_gfx_v9_hqd_load,
791 	.hqd_sdma_load = kgd_hqd_sdma_load,
792 	.hqd_dump = kgd_gfx_v9_hqd_dump,
793 	.hqd_sdma_dump = kgd_hqd_sdma_dump,
794 	.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
795 	.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
796 	.hqd_destroy = kgd_gfx_v9_hqd_destroy,
797 	.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
798 	.address_watch_disable = kgd_gfx_v9_address_watch_disable,
799 	.address_watch_execute = kgd_gfx_v9_address_watch_execute,
800 	.wave_control_execute = kgd_gfx_v9_wave_control_execute,
801 	.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
802 	.get_atc_vmid_pasid_mapping_info =
803 			kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
804 	.get_tile_config = kgd_gfx_v9_get_tile_config,
805 	.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
806 	.invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs,
807 	.invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid,
808 	.get_hive_id = amdgpu_amdkfd_get_hive_id,
809 };
810