xref: /linux/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c (revision f14aa5ea415b8add245e976bfab96a12986c6843)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_ucode.h"
31 #include "amdgpu_trace.h"
32 
33 #include "gc/gc_10_3_0_offset.h"
34 #include "gc/gc_10_3_0_sh_mask.h"
35 #include "ivsrcid/sdma0/irqsrcs_sdma0_5_0.h"
36 #include "ivsrcid/sdma1/irqsrcs_sdma1_5_0.h"
37 #include "ivsrcid/sdma2/irqsrcs_sdma2_5_0.h"
38 #include "ivsrcid/sdma3/irqsrcs_sdma3_5_0.h"
39 
40 #include "soc15_common.h"
41 #include "soc15.h"
42 #include "navi10_sdma_pkt_open.h"
43 #include "nbio_v2_3.h"
44 #include "sdma_common.h"
45 #include "sdma_v5_2.h"
46 
47 MODULE_FIRMWARE("amdgpu/sienna_cichlid_sdma.bin");
48 MODULE_FIRMWARE("amdgpu/navy_flounder_sdma.bin");
49 MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_sdma.bin");
50 MODULE_FIRMWARE("amdgpu/beige_goby_sdma.bin");
51 
52 MODULE_FIRMWARE("amdgpu/vangogh_sdma.bin");
53 MODULE_FIRMWARE("amdgpu/yellow_carp_sdma.bin");
54 MODULE_FIRMWARE("amdgpu/sdma_5_2_6.bin");
55 MODULE_FIRMWARE("amdgpu/sdma_5_2_7.bin");
56 
57 #define SDMA1_REG_OFFSET 0x600
58 #define SDMA3_REG_OFFSET 0x400
59 #define SDMA0_HYP_DEC_REG_START 0x5880
60 #define SDMA0_HYP_DEC_REG_END 0x5893
61 #define SDMA1_HYP_DEC_REG_OFFSET 0x20
62 
63 static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev);
64 static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev);
65 static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev);
66 static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev);
67 
68 static u32 sdma_v5_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
69 {
70 	u32 base;
71 
72 	if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
73 	    internal_offset <= SDMA0_HYP_DEC_REG_END) {
74 		base = adev->reg_offset[GC_HWIP][0][1];
75 		if (instance != 0)
76 			internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance;
77 	} else {
78 		if (instance < 2) {
79 			base = adev->reg_offset[GC_HWIP][0][0];
80 			if (instance == 1)
81 				internal_offset += SDMA1_REG_OFFSET;
82 		} else {
83 			base = adev->reg_offset[GC_HWIP][0][2];
84 			if (instance == 3)
85 				internal_offset += SDMA3_REG_OFFSET;
86 		}
87 	}
88 
89 	return base + internal_offset;
90 }
91 
92 static unsigned sdma_v5_2_ring_init_cond_exec(struct amdgpu_ring *ring,
93 					      uint64_t addr)
94 {
95 	unsigned ret;
96 
97 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_COND_EXE));
98 	amdgpu_ring_write(ring, lower_32_bits(addr));
99 	amdgpu_ring_write(ring, upper_32_bits(addr));
100 	amdgpu_ring_write(ring, 1);
101 	/* this is the offset we need patch later */
102 	ret = ring->wptr & ring->buf_mask;
103 	/* insert dummy here and patch it later */
104 	amdgpu_ring_write(ring, 0);
105 
106 	return ret;
107 }
108 
109 /**
110  * sdma_v5_2_ring_get_rptr - get the current read pointer
111  *
112  * @ring: amdgpu ring pointer
113  *
114  * Get the current rptr from the hardware (NAVI10+).
115  */
116 static uint64_t sdma_v5_2_ring_get_rptr(struct amdgpu_ring *ring)
117 {
118 	u64 *rptr;
119 
120 	/* XXX check if swapping is necessary on BE */
121 	rptr = (u64 *)ring->rptr_cpu_addr;
122 
123 	DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
124 	return ((*rptr) >> 2);
125 }
126 
127 /**
128  * sdma_v5_2_ring_get_wptr - get the current write pointer
129  *
130  * @ring: amdgpu ring pointer
131  *
132  * Get the current wptr from the hardware (NAVI10+).
133  */
134 static uint64_t sdma_v5_2_ring_get_wptr(struct amdgpu_ring *ring)
135 {
136 	struct amdgpu_device *adev = ring->adev;
137 	u64 wptr;
138 
139 	if (ring->use_doorbell) {
140 		/* XXX check if swapping is necessary on BE */
141 		wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
142 		DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
143 	} else {
144 		wptr = RREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
145 		wptr = wptr << 32;
146 		wptr |= RREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
147 		DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
148 	}
149 
150 	return wptr >> 2;
151 }
152 
153 /**
154  * sdma_v5_2_ring_set_wptr - commit the write pointer
155  *
156  * @ring: amdgpu ring pointer
157  *
158  * Write the wptr back to the hardware (NAVI10+).
159  */
160 static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
161 {
162 	struct amdgpu_device *adev = ring->adev;
163 
164 	DRM_DEBUG("Setting write pointer\n");
165 	if (ring->use_doorbell) {
166 		DRM_DEBUG("Using doorbell -- "
167 				"wptr_offs == 0x%08x "
168 				"lower_32_bits(ring->wptr << 2) == 0x%08x "
169 				"upper_32_bits(ring->wptr << 2) == 0x%08x\n",
170 				ring->wptr_offs,
171 				lower_32_bits(ring->wptr << 2),
172 				upper_32_bits(ring->wptr << 2));
173 		/* XXX check if swapping is necessary on BE */
174 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
175 			     ring->wptr << 2);
176 		DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
177 				ring->doorbell_index, ring->wptr << 2);
178 		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
179 	} else {
180 		DRM_DEBUG("Not using doorbell -- "
181 				"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
182 				"mmSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
183 				ring->me,
184 				lower_32_bits(ring->wptr << 2),
185 				ring->me,
186 				upper_32_bits(ring->wptr << 2));
187 		WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
188 			lower_32_bits(ring->wptr << 2));
189 		WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
190 			upper_32_bits(ring->wptr << 2));
191 	}
192 }
193 
194 static void sdma_v5_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
195 {
196 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
197 	int i;
198 
199 	for (i = 0; i < count; i++)
200 		if (sdma && sdma->burst_nop && (i == 0))
201 			amdgpu_ring_write(ring, ring->funcs->nop |
202 				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
203 		else
204 			amdgpu_ring_write(ring, ring->funcs->nop);
205 }
206 
207 /**
208  * sdma_v5_2_ring_emit_ib - Schedule an IB on the DMA engine
209  *
210  * @ring: amdgpu ring pointer
211  * @job: job to retrieve vmid from
212  * @ib: IB object to schedule
213  * @flags: unused
214  *
215  * Schedule an IB in the DMA ring.
216  */
217 static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
218 				   struct amdgpu_job *job,
219 				   struct amdgpu_ib *ib,
220 				   uint32_t flags)
221 {
222 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
223 	uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
224 
225 	/* An IB packet must end on a 8 DW boundary--the next dword
226 	 * must be on a 8-dword boundary. Our IB packet below is 6
227 	 * dwords long, thus add x number of NOPs, such that, in
228 	 * modular arithmetic,
229 	 * wptr + 6 + x = 8k, k >= 0, which in C is,
230 	 * (wptr + 6 + x) % 8 = 0.
231 	 * The expression below, is a solution of x.
232 	 */
233 	sdma_v5_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
234 
235 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
236 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
237 	/* base must be 32 byte aligned */
238 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
239 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
240 	amdgpu_ring_write(ring, ib->length_dw);
241 	amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
242 	amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
243 }
244 
245 /**
246  * sdma_v5_2_ring_emit_mem_sync - flush the IB by graphics cache rinse
247  *
248  * @ring: amdgpu ring pointer
249  *
250  * flush the IB by graphics cache rinse.
251  */
252 static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
253 {
254 	uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB |
255 			    SDMA_GCR_GLM_INV | SDMA_GCR_GL1_INV |
256 			    SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
257 			    SDMA_GCR_GLI_INV(1);
258 
259 	/* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
260 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
261 	amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
262 	amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
263 			SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
264 	amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
265 			SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
266 	amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
267 			SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
268 }
269 
270 /**
271  * sdma_v5_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
272  *
273  * @ring: amdgpu ring pointer
274  *
275  * Emit an hdp flush packet on the requested DMA ring.
276  */
277 static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
278 {
279 	struct amdgpu_device *adev = ring->adev;
280 	u32 ref_and_mask = 0;
281 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
282 
283 	if (ring->me > 1) {
284 		amdgpu_asic_flush_hdp(adev, ring);
285 	} else {
286 		ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
287 
288 		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
289 				  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
290 				  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
291 		amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
292 		amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
293 		amdgpu_ring_write(ring, ref_and_mask); /* reference */
294 		amdgpu_ring_write(ring, ref_and_mask); /* mask */
295 		amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
296 				  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
297 	}
298 }
299 
300 /**
301  * sdma_v5_2_ring_emit_fence - emit a fence on the DMA ring
302  *
303  * @ring: amdgpu ring pointer
304  * @addr: address
305  * @seq: sequence number
306  * @flags: fence related flags
307  *
308  * Add a DMA fence packet to the ring to write
309  * the fence seq number and DMA trap packet to generate
310  * an interrupt if needed.
311  */
312 static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
313 				      unsigned flags)
314 {
315 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
316 	/* write the fence */
317 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
318 			  SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
319 	/* zero in first two bits */
320 	BUG_ON(addr & 0x3);
321 	amdgpu_ring_write(ring, lower_32_bits(addr));
322 	amdgpu_ring_write(ring, upper_32_bits(addr));
323 	amdgpu_ring_write(ring, lower_32_bits(seq));
324 
325 	/* optionally write high bits as well */
326 	if (write64bit) {
327 		addr += 4;
328 		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE) |
329 				  SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
330 		/* zero in first two bits */
331 		BUG_ON(addr & 0x3);
332 		amdgpu_ring_write(ring, lower_32_bits(addr));
333 		amdgpu_ring_write(ring, upper_32_bits(addr));
334 		amdgpu_ring_write(ring, upper_32_bits(seq));
335 	}
336 
337 	if ((flags & AMDGPU_FENCE_FLAG_INT)) {
338 		uint32_t ctx = ring->is_mes_queue ?
339 			(ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
340 		/* generate an interrupt */
341 		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
342 		amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
343 	}
344 }
345 
346 
347 /**
348  * sdma_v5_2_gfx_stop - stop the gfx async dma engines
349  *
350  * @adev: amdgpu_device pointer
351  *
352  * Stop the gfx async dma ring buffers.
353  */
354 static void sdma_v5_2_gfx_stop(struct amdgpu_device *adev)
355 {
356 	u32 rb_cntl, ib_cntl;
357 	int i;
358 
359 	for (i = 0; i < adev->sdma.num_instances; i++) {
360 		rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
361 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
362 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
363 		ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
364 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
365 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
366 	}
367 }
368 
369 /**
370  * sdma_v5_2_rlc_stop - stop the compute async dma engines
371  *
372  * @adev: amdgpu_device pointer
373  *
374  * Stop the compute async dma queues.
375  */
376 static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
377 {
378 	/* XXX todo */
379 }
380 
381 /**
382  * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
383  *
384  * @adev: amdgpu_device pointer
385  * @enable: enable/disable the DMA MEs context switch.
386  *
387  * Halt or unhalt the async dma engines context switch.
388  */
389 static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
390 {
391 	u32 f32_cntl, phase_quantum = 0;
392 	int i;
393 
394 	if (amdgpu_sdma_phase_quantum) {
395 		unsigned value = amdgpu_sdma_phase_quantum;
396 		unsigned unit = 0;
397 
398 		while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
399 				SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) {
400 			value = (value + 1) >> 1;
401 			unit++;
402 		}
403 		if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
404 			    SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) {
405 			value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >>
406 				 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT);
407 			unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >>
408 				SDMA0_PHASE0_QUANTUM__UNIT__SHIFT);
409 			WARN_ONCE(1,
410 			"clamping sdma_phase_quantum to %uK clock cycles\n",
411 				  value << unit);
412 		}
413 		phase_quantum =
414 			value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT |
415 			unit  << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT;
416 	}
417 
418 	for (i = 0; i < adev->sdma.num_instances; i++) {
419 		if (enable && amdgpu_sdma_phase_quantum) {
420 			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
421 			       phase_quantum);
422 			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
423 			       phase_quantum);
424 			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
425 			       phase_quantum);
426 		}
427 
428 		if (!amdgpu_sriov_vf(adev)) {
429 			f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
430 			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
431 					AUTO_CTXSW_ENABLE, enable ? 1 : 0);
432 			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
433 		}
434 	}
435 
436 }
437 
438 /**
439  * sdma_v5_2_enable - stop the async dma engines
440  *
441  * @adev: amdgpu_device pointer
442  * @enable: enable/disable the DMA MEs.
443  *
444  * Halt or unhalt the async dma engines.
445  */
446 static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable)
447 {
448 	u32 f32_cntl;
449 	int i;
450 
451 	if (!enable) {
452 		sdma_v5_2_gfx_stop(adev);
453 		sdma_v5_2_rlc_stop(adev);
454 	}
455 
456 	if (!amdgpu_sriov_vf(adev)) {
457 		for (i = 0; i < adev->sdma.num_instances; i++) {
458 			f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
459 			f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
460 			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl);
461 		}
462 	}
463 }
464 
465 /**
466  * sdma_v5_2_gfx_resume - setup and start the async dma engines
467  *
468  * @adev: amdgpu_device pointer
469  *
470  * Set up the gfx DMA ring buffers and enable them.
471  * Returns 0 for success, error for failure.
472  */
473 static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev)
474 {
475 	struct amdgpu_ring *ring;
476 	u32 rb_cntl, ib_cntl;
477 	u32 rb_bufsz;
478 	u32 doorbell;
479 	u32 doorbell_offset;
480 	u32 temp;
481 	u32 wptr_poll_cntl;
482 	u64 wptr_gpu_addr;
483 	int i, r;
484 
485 	for (i = 0; i < adev->sdma.num_instances; i++) {
486 		ring = &adev->sdma.instance[i].ring;
487 
488 		if (!amdgpu_sriov_vf(adev))
489 			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
490 
491 		/* Set ring buffer size in dwords */
492 		rb_bufsz = order_base_2(ring->ring_size / 4);
493 		rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
494 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
495 #ifdef __BIG_ENDIAN
496 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
497 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
498 					RPTR_WRITEBACK_SWAP_ENABLE, 1);
499 #endif
500 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
501 
502 		/* Initialize the ring buffer's read and write pointers */
503 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
504 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
505 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
506 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
507 
508 		/* setup the wptr shadow polling */
509 		wptr_gpu_addr = ring->wptr_gpu_addr;
510 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
511 		       lower_32_bits(wptr_gpu_addr));
512 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
513 		       upper_32_bits(wptr_gpu_addr));
514 		wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i,
515 							 mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
516 		wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
517 					       SDMA0_GFX_RB_WPTR_POLL_CNTL,
518 					       F32_POLL_ENABLE, 1);
519 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
520 		       wptr_poll_cntl);
521 
522 		/* set the wb address whether it's enabled or not */
523 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
524 		       upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
525 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
526 		       lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
527 
528 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
529 
530 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
531 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
532 
533 		ring->wptr = 0;
534 
535 		/* before programing wptr to a less value, need set minor_ptr_update first */
536 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
537 
538 		if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
539 			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr << 2));
540 			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr << 2));
541 		}
542 
543 		doorbell = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
544 		doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
545 
546 		if (ring->use_doorbell) {
547 			doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
548 			doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_GFX_DOORBELL_OFFSET,
549 					OFFSET, ring->doorbell_index);
550 		} else {
551 			doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
552 		}
553 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
554 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
555 
556 		adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
557 						      ring->doorbell_index,
558 						      adev->doorbell_index.sdma_doorbell_range);
559 
560 		if (amdgpu_sriov_vf(adev))
561 			sdma_v5_2_ring_set_wptr(ring);
562 
563 		/* set minor_ptr_update to 0 after wptr programed */
564 
565 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
566 
567 		/* SRIOV VF has no control of any of registers below */
568 		if (!amdgpu_sriov_vf(adev)) {
569 			/* set utc l1 enable flag always to 1 */
570 			temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL));
571 			temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
572 
573 			/* enable MCBP */
574 			temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
575 			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
576 
577 			/* Set up RESP_MODE to non-copy addresses */
578 			temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
579 			temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
580 			temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
581 			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
582 
583 			/* program default cache read and write policy */
584 			temp = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
585 			/* clean read policy and write policy bits */
586 			temp &= 0xFF0FFF;
587 			temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
588 				 (CACHE_WRITE_POLICY_L2__DEFAULT << 14) |
589 				 SDMA0_UTCL1_PAGE__LLC_NOALLOC_MASK);
590 			WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
591 
592 			/* unhalt engine */
593 			temp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
594 			temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
595 			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), temp);
596 		}
597 
598 		/* enable DMA RB */
599 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
600 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
601 
602 		ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
603 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
604 #ifdef __BIG_ENDIAN
605 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
606 #endif
607 		/* enable DMA IBs */
608 		WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
609 
610 		if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
611 			sdma_v5_2_ctx_switch_enable(adev, true);
612 			sdma_v5_2_enable(adev, true);
613 		}
614 
615 		r = amdgpu_ring_test_helper(ring);
616 		if (r)
617 			return r;
618 	}
619 
620 	return 0;
621 }
622 
623 /**
624  * sdma_v5_2_rlc_resume - setup and start the async dma engines
625  *
626  * @adev: amdgpu_device pointer
627  *
628  * Set up the compute DMA queues and enable them.
629  * Returns 0 for success, error for failure.
630  */
631 static int sdma_v5_2_rlc_resume(struct amdgpu_device *adev)
632 {
633 	return 0;
634 }
635 
636 /**
637  * sdma_v5_2_load_microcode - load the sDMA ME ucode
638  *
639  * @adev: amdgpu_device pointer
640  *
641  * Loads the sDMA0/1/2/3 ucode.
642  * Returns 0 for success, -EINVAL if the ucode is not available.
643  */
644 static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
645 {
646 	const struct sdma_firmware_header_v1_0 *hdr;
647 	const __le32 *fw_data;
648 	u32 fw_size;
649 	int i, j;
650 
651 	/* halt the MEs */
652 	sdma_v5_2_enable(adev, false);
653 
654 	for (i = 0; i < adev->sdma.num_instances; i++) {
655 		if (!adev->sdma.instance[i].fw)
656 			return -EINVAL;
657 
658 		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
659 		amdgpu_ucode_print_sdma_hdr(&hdr->header);
660 		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
661 
662 		fw_data = (const __le32 *)
663 			(adev->sdma.instance[i].fw->data +
664 				le32_to_cpu(hdr->header.ucode_array_offset_bytes));
665 
666 		WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), 0);
667 
668 		for (j = 0; j < fw_size; j++) {
669 			if (amdgpu_emu_mode == 1 && j % 500 == 0)
670 				msleep(1);
671 			WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_DATA), le32_to_cpup(fw_data++));
672 		}
673 
674 		WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_UCODE_ADDR), adev->sdma.instance[i].fw_version);
675 	}
676 
677 	return 0;
678 }
679 
680 static int sdma_v5_2_soft_reset(void *handle)
681 {
682 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
683 	u32 grbm_soft_reset;
684 	u32 tmp;
685 	int i;
686 
687 	for (i = 0; i < adev->sdma.num_instances; i++) {
688 		grbm_soft_reset = REG_SET_FIELD(0,
689 						GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
690 						1);
691 		grbm_soft_reset <<= i;
692 
693 		tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
694 		tmp |= grbm_soft_reset;
695 		DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
696 		WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
697 		tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
698 
699 		udelay(50);
700 
701 		tmp &= ~grbm_soft_reset;
702 		WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
703 		tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
704 
705 		udelay(50);
706 	}
707 
708 	return 0;
709 }
710 
711 /**
712  * sdma_v5_2_start - setup and start the async dma engines
713  *
714  * @adev: amdgpu_device pointer
715  *
716  * Set up the DMA engines and enable them.
717  * Returns 0 for success, error for failure.
718  */
719 static int sdma_v5_2_start(struct amdgpu_device *adev)
720 {
721 	int r = 0;
722 
723 	if (amdgpu_sriov_vf(adev)) {
724 		sdma_v5_2_ctx_switch_enable(adev, false);
725 		sdma_v5_2_enable(adev, false);
726 
727 		/* set RB registers */
728 		r = sdma_v5_2_gfx_resume(adev);
729 		return r;
730 	}
731 
732 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
733 		r = sdma_v5_2_load_microcode(adev);
734 		if (r)
735 			return r;
736 
737 		/* The value of mmSDMA_F32_CNTL is invalid the moment after loading fw */
738 		if (amdgpu_emu_mode == 1)
739 			msleep(1000);
740 	}
741 
742 	sdma_v5_2_soft_reset(adev);
743 	/* unhalt the MEs */
744 	sdma_v5_2_enable(adev, true);
745 	/* enable sdma ring preemption */
746 	sdma_v5_2_ctx_switch_enable(adev, true);
747 
748 	/* start the gfx rings and rlc compute queues */
749 	r = sdma_v5_2_gfx_resume(adev);
750 	if (r)
751 		return r;
752 	r = sdma_v5_2_rlc_resume(adev);
753 
754 	return r;
755 }
756 
757 static int sdma_v5_2_mqd_init(struct amdgpu_device *adev, void *mqd,
758 			      struct amdgpu_mqd_prop *prop)
759 {
760 	struct v10_sdma_mqd *m = mqd;
761 	uint64_t wb_gpu_addr;
762 
763 	m->sdmax_rlcx_rb_cntl =
764 		order_base_2(prop->queue_size / 4) << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
765 		1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
766 		6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
767 		1 << SDMA0_RLC0_RB_CNTL__RB_PRIV__SHIFT;
768 
769 	m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
770 	m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
771 
772 	m->sdmax_rlcx_rb_wptr_poll_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, 0,
773 						  mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
774 
775 	wb_gpu_addr = prop->wptr_gpu_addr;
776 	m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
777 	m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
778 
779 	wb_gpu_addr = prop->rptr_gpu_addr;
780 	m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
781 	m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
782 
783 	m->sdmax_rlcx_ib_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, 0,
784 							mmSDMA0_GFX_IB_CNTL));
785 
786 	m->sdmax_rlcx_doorbell_offset =
787 		prop->doorbell_index << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
788 
789 	m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_RLC0_DOORBELL, ENABLE, 1);
790 
791 	return 0;
792 }
793 
794 static void sdma_v5_2_set_mqd_funcs(struct amdgpu_device *adev)
795 {
796 	adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v10_sdma_mqd);
797 	adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v5_2_mqd_init;
798 }
799 
800 /**
801  * sdma_v5_2_ring_test_ring - simple async dma engine test
802  *
803  * @ring: amdgpu_ring structure holding ring information
804  *
805  * Test the DMA engine by writing using it to write an
806  * value to memory.
807  * Returns 0 for success, error for failure.
808  */
809 static int sdma_v5_2_ring_test_ring(struct amdgpu_ring *ring)
810 {
811 	struct amdgpu_device *adev = ring->adev;
812 	unsigned i;
813 	unsigned index;
814 	int r;
815 	u32 tmp;
816 	u64 gpu_addr;
817 	volatile uint32_t *cpu_ptr = NULL;
818 
819 	tmp = 0xCAFEDEAD;
820 
821 	if (ring->is_mes_queue) {
822 		uint32_t offset = 0;
823 		offset = amdgpu_mes_ctx_get_offs(ring,
824 					 AMDGPU_MES_CTX_PADDING_OFFS);
825 		gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
826 		cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
827 		*cpu_ptr = tmp;
828 	} else {
829 		r = amdgpu_device_wb_get(adev, &index);
830 		if (r) {
831 			dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
832 			return r;
833 		}
834 
835 		gpu_addr = adev->wb.gpu_addr + (index * 4);
836 		adev->wb.wb[index] = cpu_to_le32(tmp);
837 	}
838 
839 	r = amdgpu_ring_alloc(ring, 20);
840 	if (r) {
841 		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
842 		if (!ring->is_mes_queue)
843 			amdgpu_device_wb_free(adev, index);
844 		return r;
845 	}
846 
847 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
848 			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
849 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
850 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
851 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
852 	amdgpu_ring_write(ring, 0xDEADBEEF);
853 	amdgpu_ring_commit(ring);
854 
855 	for (i = 0; i < adev->usec_timeout; i++) {
856 		if (ring->is_mes_queue)
857 			tmp = le32_to_cpu(*cpu_ptr);
858 		else
859 			tmp = le32_to_cpu(adev->wb.wb[index]);
860 		if (tmp == 0xDEADBEEF)
861 			break;
862 		if (amdgpu_emu_mode == 1)
863 			msleep(1);
864 		else
865 			udelay(1);
866 	}
867 
868 	if (i >= adev->usec_timeout)
869 		r = -ETIMEDOUT;
870 
871 	if (!ring->is_mes_queue)
872 		amdgpu_device_wb_free(adev, index);
873 
874 	return r;
875 }
876 
877 /**
878  * sdma_v5_2_ring_test_ib - test an IB on the DMA engine
879  *
880  * @ring: amdgpu_ring structure holding ring information
881  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
882  *
883  * Test a simple IB in the DMA ring.
884  * Returns 0 on success, error on failure.
885  */
886 static int sdma_v5_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
887 {
888 	struct amdgpu_device *adev = ring->adev;
889 	struct amdgpu_ib ib;
890 	struct dma_fence *f = NULL;
891 	unsigned index;
892 	long r;
893 	u32 tmp = 0;
894 	u64 gpu_addr;
895 	volatile uint32_t *cpu_ptr = NULL;
896 
897 	tmp = 0xCAFEDEAD;
898 	memset(&ib, 0, sizeof(ib));
899 
900 	if (ring->is_mes_queue) {
901 		uint32_t offset = 0;
902 		offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
903 		ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
904 		ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
905 
906 		offset = amdgpu_mes_ctx_get_offs(ring,
907 					 AMDGPU_MES_CTX_PADDING_OFFS);
908 		gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
909 		cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
910 		*cpu_ptr = tmp;
911 	} else {
912 		r = amdgpu_device_wb_get(adev, &index);
913 		if (r) {
914 			dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
915 			return r;
916 		}
917 
918 		gpu_addr = adev->wb.gpu_addr + (index * 4);
919 		adev->wb.wb[index] = cpu_to_le32(tmp);
920 
921 		r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
922 		if (r) {
923 			DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
924 			goto err0;
925 		}
926 	}
927 
928 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
929 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
930 	ib.ptr[1] = lower_32_bits(gpu_addr);
931 	ib.ptr[2] = upper_32_bits(gpu_addr);
932 	ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
933 	ib.ptr[4] = 0xDEADBEEF;
934 	ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
935 	ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
936 	ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
937 	ib.length_dw = 8;
938 
939 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
940 	if (r)
941 		goto err1;
942 
943 	r = dma_fence_wait_timeout(f, false, timeout);
944 	if (r == 0) {
945 		DRM_ERROR("amdgpu: IB test timed out\n");
946 		r = -ETIMEDOUT;
947 		goto err1;
948 	} else if (r < 0) {
949 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
950 		goto err1;
951 	}
952 
953 	if (ring->is_mes_queue)
954 		tmp = le32_to_cpu(*cpu_ptr);
955 	else
956 		tmp = le32_to_cpu(adev->wb.wb[index]);
957 
958 	if (tmp == 0xDEADBEEF)
959 		r = 0;
960 	else
961 		r = -EINVAL;
962 
963 err1:
964 	amdgpu_ib_free(adev, &ib, NULL);
965 	dma_fence_put(f);
966 err0:
967 	if (!ring->is_mes_queue)
968 		amdgpu_device_wb_free(adev, index);
969 	return r;
970 }
971 
972 
973 /**
974  * sdma_v5_2_vm_copy_pte - update PTEs by copying them from the GART
975  *
976  * @ib: indirect buffer to fill with commands
977  * @pe: addr of the page entry
978  * @src: src addr to copy from
979  * @count: number of page entries to update
980  *
981  * Update PTEs by copying them from the GART using sDMA.
982  */
983 static void sdma_v5_2_vm_copy_pte(struct amdgpu_ib *ib,
984 				  uint64_t pe, uint64_t src,
985 				  unsigned count)
986 {
987 	unsigned bytes = count * 8;
988 
989 	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
990 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
991 	ib->ptr[ib->length_dw++] = bytes - 1;
992 	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
993 	ib->ptr[ib->length_dw++] = lower_32_bits(src);
994 	ib->ptr[ib->length_dw++] = upper_32_bits(src);
995 	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
996 	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
997 
998 }
999 
1000 /**
1001  * sdma_v5_2_vm_write_pte - update PTEs by writing them manually
1002  *
1003  * @ib: indirect buffer to fill with commands
1004  * @pe: addr of the page entry
1005  * @value: dst addr to write into pe
1006  * @count: number of page entries to update
1007  * @incr: increase next addr by incr bytes
1008  *
1009  * Update PTEs by writing them manually using sDMA.
1010  */
1011 static void sdma_v5_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1012 				   uint64_t value, unsigned count,
1013 				   uint32_t incr)
1014 {
1015 	unsigned ndw = count * 2;
1016 
1017 	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1018 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1019 	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1020 	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1021 	ib->ptr[ib->length_dw++] = ndw - 1;
1022 	for (; ndw > 0; ndw -= 2) {
1023 		ib->ptr[ib->length_dw++] = lower_32_bits(value);
1024 		ib->ptr[ib->length_dw++] = upper_32_bits(value);
1025 		value += incr;
1026 	}
1027 }
1028 
1029 /**
1030  * sdma_v5_2_vm_set_pte_pde - update the page tables using sDMA
1031  *
1032  * @ib: indirect buffer to fill with commands
1033  * @pe: addr of the page entry
1034  * @addr: dst addr to write into pe
1035  * @count: number of page entries to update
1036  * @incr: increase next addr by incr bytes
1037  * @flags: access flags
1038  *
1039  * Update the page tables using sDMA.
1040  */
1041 static void sdma_v5_2_vm_set_pte_pde(struct amdgpu_ib *ib,
1042 				     uint64_t pe,
1043 				     uint64_t addr, unsigned count,
1044 				     uint32_t incr, uint64_t flags)
1045 {
1046 	/* for physically contiguous pages (vram) */
1047 	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1048 	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1049 	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1050 	ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1051 	ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1052 	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1053 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1054 	ib->ptr[ib->length_dw++] = incr; /* increment size */
1055 	ib->ptr[ib->length_dw++] = 0;
1056 	ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1057 }
1058 
1059 /**
1060  * sdma_v5_2_ring_pad_ib - pad the IB
1061  *
1062  * @ib: indirect buffer to fill with padding
1063  * @ring: amdgpu_ring structure holding ring information
1064  *
1065  * Pad the IB with NOPs to a boundary multiple of 8.
1066  */
1067 static void sdma_v5_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1068 {
1069 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1070 	u32 pad_count;
1071 	int i;
1072 
1073 	pad_count = (-ib->length_dw) & 0x7;
1074 	for (i = 0; i < pad_count; i++)
1075 		if (sdma && sdma->burst_nop && (i == 0))
1076 			ib->ptr[ib->length_dw++] =
1077 				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1078 				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1079 		else
1080 			ib->ptr[ib->length_dw++] =
1081 				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1082 }
1083 
1084 
1085 /**
1086  * sdma_v5_2_ring_emit_pipeline_sync - sync the pipeline
1087  *
1088  * @ring: amdgpu_ring pointer
1089  *
1090  * Make sure all previous operations are completed (CIK).
1091  */
1092 static void sdma_v5_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1093 {
1094 	uint32_t seq = ring->fence_drv.sync_seq;
1095 	uint64_t addr = ring->fence_drv.gpu_addr;
1096 
1097 	/* wait for idle */
1098 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1099 			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1100 			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1101 			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1102 	amdgpu_ring_write(ring, addr & 0xfffffffc);
1103 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1104 	amdgpu_ring_write(ring, seq); /* reference */
1105 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
1106 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1107 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1108 }
1109 
1110 
1111 /**
1112  * sdma_v5_2_ring_emit_vm_flush - vm flush using sDMA
1113  *
1114  * @ring: amdgpu_ring pointer
1115  * @vmid: vmid number to use
1116  * @pd_addr: address
1117  *
1118  * Update the page table base and flush the VM TLB
1119  * using sDMA.
1120  */
1121 static void sdma_v5_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
1122 					 unsigned vmid, uint64_t pd_addr)
1123 {
1124 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1125 }
1126 
1127 static void sdma_v5_2_ring_emit_wreg(struct amdgpu_ring *ring,
1128 				     uint32_t reg, uint32_t val)
1129 {
1130 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1131 			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1132 	amdgpu_ring_write(ring, reg);
1133 	amdgpu_ring_write(ring, val);
1134 }
1135 
1136 static void sdma_v5_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1137 					 uint32_t val, uint32_t mask)
1138 {
1139 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1140 			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1141 			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1142 	amdgpu_ring_write(ring, reg << 2);
1143 	amdgpu_ring_write(ring, 0);
1144 	amdgpu_ring_write(ring, val); /* reference */
1145 	amdgpu_ring_write(ring, mask); /* mask */
1146 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1147 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1148 }
1149 
1150 static void sdma_v5_2_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1151 						   uint32_t reg0, uint32_t reg1,
1152 						   uint32_t ref, uint32_t mask)
1153 {
1154 	amdgpu_ring_emit_wreg(ring, reg0, ref);
1155 	/* wait for a cycle to reset vm_inv_eng*_ack */
1156 	amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1157 	amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1158 }
1159 
1160 static int sdma_v5_2_early_init(void *handle)
1161 {
1162 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1163 	int r;
1164 
1165 	r = amdgpu_sdma_init_microcode(adev, 0, true);
1166 	if (r)
1167 		return r;
1168 
1169 	sdma_v5_2_set_ring_funcs(adev);
1170 	sdma_v5_2_set_buffer_funcs(adev);
1171 	sdma_v5_2_set_vm_pte_funcs(adev);
1172 	sdma_v5_2_set_irq_funcs(adev);
1173 	sdma_v5_2_set_mqd_funcs(adev);
1174 
1175 	return 0;
1176 }
1177 
1178 static unsigned sdma_v5_2_seq_to_irq_id(int seq_num)
1179 {
1180 	switch (seq_num) {
1181 	case 0:
1182 		return SOC15_IH_CLIENTID_SDMA0;
1183 	case 1:
1184 		return SOC15_IH_CLIENTID_SDMA1;
1185 	case 2:
1186 		return SOC15_IH_CLIENTID_SDMA2;
1187 	case 3:
1188 		return SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid;
1189 	default:
1190 		break;
1191 	}
1192 	return -EINVAL;
1193 }
1194 
1195 static unsigned sdma_v5_2_seq_to_trap_id(int seq_num)
1196 {
1197 	switch (seq_num) {
1198 	case 0:
1199 		return SDMA0_5_0__SRCID__SDMA_TRAP;
1200 	case 1:
1201 		return SDMA1_5_0__SRCID__SDMA_TRAP;
1202 	case 2:
1203 		return SDMA2_5_0__SRCID__SDMA_TRAP;
1204 	case 3:
1205 		return SDMA3_5_0__SRCID__SDMA_TRAP;
1206 	default:
1207 		break;
1208 	}
1209 	return -EINVAL;
1210 }
1211 
1212 static int sdma_v5_2_sw_init(void *handle)
1213 {
1214 	struct amdgpu_ring *ring;
1215 	int r, i;
1216 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1217 
1218 	/* SDMA trap event */
1219 	for (i = 0; i < adev->sdma.num_instances; i++) {
1220 		r = amdgpu_irq_add_id(adev, sdma_v5_2_seq_to_irq_id(i),
1221 				      sdma_v5_2_seq_to_trap_id(i),
1222 				      &adev->sdma.trap_irq);
1223 		if (r)
1224 			return r;
1225 	}
1226 
1227 	for (i = 0; i < adev->sdma.num_instances; i++) {
1228 		ring = &adev->sdma.instance[i].ring;
1229 		ring->ring_obj = NULL;
1230 		ring->use_doorbell = true;
1231 		ring->me = i;
1232 
1233 		DRM_INFO("use_doorbell being set to: [%s]\n",
1234 				ring->use_doorbell?"true":"false");
1235 
1236 		ring->doorbell_index =
1237 			(adev->doorbell_index.sdma_engine[i] << 1); //get DWORD offset
1238 
1239 		ring->vm_hub = AMDGPU_GFXHUB(0);
1240 		sprintf(ring->name, "sdma%d", i);
1241 		r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1242 				     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1243 				     AMDGPU_RING_PRIO_DEFAULT, NULL);
1244 		if (r)
1245 			return r;
1246 	}
1247 
1248 	return r;
1249 }
1250 
1251 static int sdma_v5_2_sw_fini(void *handle)
1252 {
1253 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1254 	int i;
1255 
1256 	for (i = 0; i < adev->sdma.num_instances; i++)
1257 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1258 
1259 	amdgpu_sdma_destroy_inst_ctx(adev, true);
1260 
1261 	return 0;
1262 }
1263 
1264 static int sdma_v5_2_hw_init(void *handle)
1265 {
1266 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1267 
1268 	return sdma_v5_2_start(adev);
1269 }
1270 
1271 static int sdma_v5_2_hw_fini(void *handle)
1272 {
1273 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1274 
1275 	if (amdgpu_sriov_vf(adev))
1276 		return 0;
1277 
1278 	sdma_v5_2_ctx_switch_enable(adev, false);
1279 	sdma_v5_2_enable(adev, false);
1280 
1281 	return 0;
1282 }
1283 
1284 static int sdma_v5_2_suspend(void *handle)
1285 {
1286 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1287 
1288 	return sdma_v5_2_hw_fini(adev);
1289 }
1290 
1291 static int sdma_v5_2_resume(void *handle)
1292 {
1293 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1294 
1295 	return sdma_v5_2_hw_init(adev);
1296 }
1297 
1298 static bool sdma_v5_2_is_idle(void *handle)
1299 {
1300 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1301 	u32 i;
1302 
1303 	for (i = 0; i < adev->sdma.num_instances; i++) {
1304 		u32 tmp = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_STATUS_REG));
1305 
1306 		if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1307 			return false;
1308 	}
1309 
1310 	return true;
1311 }
1312 
1313 static int sdma_v5_2_wait_for_idle(void *handle)
1314 {
1315 	unsigned i;
1316 	u32 sdma0, sdma1, sdma2, sdma3;
1317 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1318 
1319 	for (i = 0; i < adev->usec_timeout; i++) {
1320 		sdma0 = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_STATUS_REG));
1321 		sdma1 = RREG32(sdma_v5_2_get_reg_offset(adev, 1, mmSDMA0_STATUS_REG));
1322 		sdma2 = RREG32(sdma_v5_2_get_reg_offset(adev, 2, mmSDMA0_STATUS_REG));
1323 		sdma3 = RREG32(sdma_v5_2_get_reg_offset(adev, 3, mmSDMA0_STATUS_REG));
1324 
1325 		if (sdma0 & sdma1 & sdma2 & sdma3 & SDMA0_STATUS_REG__IDLE_MASK)
1326 			return 0;
1327 		udelay(1);
1328 	}
1329 	return -ETIMEDOUT;
1330 }
1331 
1332 static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
1333 {
1334 	int i, r = 0;
1335 	struct amdgpu_device *adev = ring->adev;
1336 	u32 index = 0;
1337 	u64 sdma_gfx_preempt;
1338 
1339 	amdgpu_sdma_get_index_from_ring(ring, &index);
1340 	sdma_gfx_preempt =
1341 		sdma_v5_2_get_reg_offset(adev, index, mmSDMA0_GFX_PREEMPT);
1342 
1343 	/* assert preemption condition */
1344 	amdgpu_ring_set_preempt_cond_exec(ring, false);
1345 
1346 	/* emit the trailing fence */
1347 	ring->trail_seq += 1;
1348 	amdgpu_ring_alloc(ring, 10);
1349 	sdma_v5_2_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1350 				  ring->trail_seq, 0);
1351 	amdgpu_ring_commit(ring);
1352 
1353 	/* assert IB preemption */
1354 	WREG32(sdma_gfx_preempt, 1);
1355 
1356 	/* poll the trailing fence */
1357 	for (i = 0; i < adev->usec_timeout; i++) {
1358 		if (ring->trail_seq ==
1359 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1360 			break;
1361 		udelay(1);
1362 	}
1363 
1364 	if (i >= adev->usec_timeout) {
1365 		r = -EINVAL;
1366 		DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1367 	}
1368 
1369 	/* deassert IB preemption */
1370 	WREG32(sdma_gfx_preempt, 0);
1371 
1372 	/* deassert the preemption condition */
1373 	amdgpu_ring_set_preempt_cond_exec(ring, true);
1374 	return r;
1375 }
1376 
1377 static int sdma_v5_2_set_trap_irq_state(struct amdgpu_device *adev,
1378 					struct amdgpu_irq_src *source,
1379 					unsigned type,
1380 					enum amdgpu_interrupt_state state)
1381 {
1382 	u32 sdma_cntl;
1383 	u32 reg_offset = sdma_v5_2_get_reg_offset(adev, type, mmSDMA0_CNTL);
1384 
1385 	if (!amdgpu_sriov_vf(adev)) {
1386 		sdma_cntl = RREG32(reg_offset);
1387 		sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1388 			       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1389 		WREG32(reg_offset, sdma_cntl);
1390 	}
1391 
1392 	return 0;
1393 }
1394 
1395 static int sdma_v5_2_process_trap_irq(struct amdgpu_device *adev,
1396 				      struct amdgpu_irq_src *source,
1397 				      struct amdgpu_iv_entry *entry)
1398 {
1399 	uint32_t mes_queue_id = entry->src_data[0];
1400 
1401 	DRM_DEBUG("IH: SDMA trap\n");
1402 
1403 	if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
1404 		struct amdgpu_mes_queue *queue;
1405 
1406 		mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
1407 
1408 		spin_lock(&adev->mes.queue_id_lock);
1409 		queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
1410 		if (queue) {
1411 			DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
1412 			amdgpu_fence_process(queue->ring);
1413 		}
1414 		spin_unlock(&adev->mes.queue_id_lock);
1415 		return 0;
1416 	}
1417 
1418 	switch (entry->client_id) {
1419 	case SOC15_IH_CLIENTID_SDMA0:
1420 		switch (entry->ring_id) {
1421 		case 0:
1422 			amdgpu_fence_process(&adev->sdma.instance[0].ring);
1423 			break;
1424 		case 1:
1425 			/* XXX compute */
1426 			break;
1427 		case 2:
1428 			/* XXX compute */
1429 			break;
1430 		case 3:
1431 			/* XXX page queue*/
1432 			break;
1433 		}
1434 		break;
1435 	case SOC15_IH_CLIENTID_SDMA1:
1436 		switch (entry->ring_id) {
1437 		case 0:
1438 			amdgpu_fence_process(&adev->sdma.instance[1].ring);
1439 			break;
1440 		case 1:
1441 			/* XXX compute */
1442 			break;
1443 		case 2:
1444 			/* XXX compute */
1445 			break;
1446 		case 3:
1447 			/* XXX page queue*/
1448 			break;
1449 		}
1450 		break;
1451 	case SOC15_IH_CLIENTID_SDMA2:
1452 		switch (entry->ring_id) {
1453 		case 0:
1454 			amdgpu_fence_process(&adev->sdma.instance[2].ring);
1455 			break;
1456 		case 1:
1457 			/* XXX compute */
1458 			break;
1459 		case 2:
1460 			/* XXX compute */
1461 			break;
1462 		case 3:
1463 			/* XXX page queue*/
1464 			break;
1465 		}
1466 		break;
1467 	case SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid:
1468 		switch (entry->ring_id) {
1469 		case 0:
1470 			amdgpu_fence_process(&adev->sdma.instance[3].ring);
1471 			break;
1472 		case 1:
1473 			/* XXX compute */
1474 			break;
1475 		case 2:
1476 			/* XXX compute */
1477 			break;
1478 		case 3:
1479 			/* XXX page queue*/
1480 			break;
1481 		}
1482 		break;
1483 	}
1484 	return 0;
1485 }
1486 
1487 static int sdma_v5_2_process_illegal_inst_irq(struct amdgpu_device *adev,
1488 					      struct amdgpu_irq_src *source,
1489 					      struct amdgpu_iv_entry *entry)
1490 {
1491 	return 0;
1492 }
1493 
1494 static bool sdma_v5_2_firmware_mgcg_support(struct amdgpu_device *adev,
1495 						     int i)
1496 {
1497 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1498 	case IP_VERSION(5, 2, 1):
1499 		if (adev->sdma.instance[i].fw_version < 70)
1500 			return false;
1501 		break;
1502 	case IP_VERSION(5, 2, 3):
1503 		if (adev->sdma.instance[i].fw_version < 47)
1504 			return false;
1505 		break;
1506 	case IP_VERSION(5, 2, 7):
1507 		if (adev->sdma.instance[i].fw_version < 9)
1508 			return false;
1509 		break;
1510 	default:
1511 		return true;
1512 	}
1513 
1514 	return true;
1515 
1516 }
1517 
1518 static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
1519 						       bool enable)
1520 {
1521 	uint32_t data, def;
1522 	int i;
1523 
1524 	for (i = 0; i < adev->sdma.num_instances; i++) {
1525 
1526 		if (!sdma_v5_2_firmware_mgcg_support(adev, i))
1527 			adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG;
1528 
1529 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
1530 			/* Enable sdma clock gating */
1531 			def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1532 			data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1533 				  SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1534 				  SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1535 				  SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1536 				  SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK |
1537 				  SDMA0_CLK_CTRL__SOFT_OVERRIDER_REG_MASK);
1538 			if (def != data)
1539 				WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1540 		} else {
1541 			/* Disable sdma clock gating */
1542 			def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL));
1543 			data |= (SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1544 				 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1545 				 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1546 				 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1547 				 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK |
1548 				 SDMA0_CLK_CTRL__SOFT_OVERRIDER_REG_MASK);
1549 			if (def != data)
1550 				WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CLK_CTRL), data);
1551 		}
1552 	}
1553 }
1554 
1555 static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev,
1556 						      bool enable)
1557 {
1558 	uint32_t data, def;
1559 	int i;
1560 
1561 	for (i = 0; i < adev->sdma.num_instances; i++) {
1562 		if (adev->sdma.instance[i].fw_version < 70 &&
1563 		    amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
1564 			    IP_VERSION(5, 2, 1))
1565 			adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS;
1566 
1567 		if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
1568 			/* Enable sdma mem light sleep */
1569 			def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1570 			data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1571 			if (def != data)
1572 				WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1573 
1574 		} else {
1575 			/* Disable sdma mem light sleep */
1576 			def = data = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL));
1577 			data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1578 			if (def != data)
1579 				WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_POWER_CNTL), data);
1580 
1581 		}
1582 	}
1583 }
1584 
1585 static int sdma_v5_2_set_clockgating_state(void *handle,
1586 					   enum amd_clockgating_state state)
1587 {
1588 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1589 
1590 	if (amdgpu_sriov_vf(adev))
1591 		return 0;
1592 
1593 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1594 	case IP_VERSION(5, 2, 0):
1595 	case IP_VERSION(5, 2, 2):
1596 	case IP_VERSION(5, 2, 1):
1597 	case IP_VERSION(5, 2, 4):
1598 	case IP_VERSION(5, 2, 5):
1599 	case IP_VERSION(5, 2, 6):
1600 	case IP_VERSION(5, 2, 3):
1601 	case IP_VERSION(5, 2, 7):
1602 		sdma_v5_2_update_medium_grain_clock_gating(adev,
1603 				state == AMD_CG_STATE_GATE);
1604 		sdma_v5_2_update_medium_grain_light_sleep(adev,
1605 				state == AMD_CG_STATE_GATE);
1606 		break;
1607 	default:
1608 		break;
1609 	}
1610 
1611 	return 0;
1612 }
1613 
1614 static int sdma_v5_2_set_powergating_state(void *handle,
1615 					  enum amd_powergating_state state)
1616 {
1617 	return 0;
1618 }
1619 
1620 static void sdma_v5_2_get_clockgating_state(void *handle, u64 *flags)
1621 {
1622 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1623 	int data;
1624 
1625 	if (amdgpu_sriov_vf(adev))
1626 		*flags = 0;
1627 
1628 	/* AMD_CG_SUPPORT_SDMA_MGCG */
1629 	data = RREG32(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_CLK_CTRL));
1630 	if (!(data & SDMA0_CLK_CTRL__CGCG_EN_OVERRIDE_MASK))
1631 		*flags |= AMD_CG_SUPPORT_SDMA_MGCG;
1632 
1633 	/* AMD_CG_SUPPORT_SDMA_LS */
1634 	data = RREG32_KIQ(sdma_v5_2_get_reg_offset(adev, 0, mmSDMA0_POWER_CNTL));
1635 	if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
1636 		*flags |= AMD_CG_SUPPORT_SDMA_LS;
1637 }
1638 
1639 static void sdma_v5_2_ring_begin_use(struct amdgpu_ring *ring)
1640 {
1641 	struct amdgpu_device *adev = ring->adev;
1642 
1643 	/* SDMA 5.2.3 (RMB) FW doesn't seem to properly
1644 	 * disallow GFXOFF in some cases leading to
1645 	 * hangs in SDMA.  Disallow GFXOFF while SDMA is active.
1646 	 * We can probably just limit this to 5.2.3,
1647 	 * but it shouldn't hurt for other parts since
1648 	 * this GFXOFF will be disallowed anyway when SDMA is
1649 	 * active, this just makes it explicit.
1650 	 */
1651 	amdgpu_gfx_off_ctrl(adev, false);
1652 }
1653 
1654 static void sdma_v5_2_ring_end_use(struct amdgpu_ring *ring)
1655 {
1656 	struct amdgpu_device *adev = ring->adev;
1657 
1658 	/* SDMA 5.2.3 (RMB) FW doesn't seem to properly
1659 	 * disallow GFXOFF in some cases leading to
1660 	 * hangs in SDMA.  Allow GFXOFF when SDMA is complete.
1661 	 */
1662 	amdgpu_gfx_off_ctrl(adev, true);
1663 }
1664 
1665 const struct amd_ip_funcs sdma_v5_2_ip_funcs = {
1666 	.name = "sdma_v5_2",
1667 	.early_init = sdma_v5_2_early_init,
1668 	.late_init = NULL,
1669 	.sw_init = sdma_v5_2_sw_init,
1670 	.sw_fini = sdma_v5_2_sw_fini,
1671 	.hw_init = sdma_v5_2_hw_init,
1672 	.hw_fini = sdma_v5_2_hw_fini,
1673 	.suspend = sdma_v5_2_suspend,
1674 	.resume = sdma_v5_2_resume,
1675 	.is_idle = sdma_v5_2_is_idle,
1676 	.wait_for_idle = sdma_v5_2_wait_for_idle,
1677 	.soft_reset = sdma_v5_2_soft_reset,
1678 	.set_clockgating_state = sdma_v5_2_set_clockgating_state,
1679 	.set_powergating_state = sdma_v5_2_set_powergating_state,
1680 	.get_clockgating_state = sdma_v5_2_get_clockgating_state,
1681 };
1682 
1683 static const struct amdgpu_ring_funcs sdma_v5_2_ring_funcs = {
1684 	.type = AMDGPU_RING_TYPE_SDMA,
1685 	.align_mask = 0xf,
1686 	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1687 	.support_64bit_ptrs = true,
1688 	.secure_submission_supported = true,
1689 	.get_rptr = sdma_v5_2_ring_get_rptr,
1690 	.get_wptr = sdma_v5_2_ring_get_wptr,
1691 	.set_wptr = sdma_v5_2_ring_set_wptr,
1692 	.emit_frame_size =
1693 		5 + /* sdma_v5_2_ring_init_cond_exec */
1694 		6 + /* sdma_v5_2_ring_emit_hdp_flush */
1695 		3 + /* hdp_invalidate */
1696 		6 + /* sdma_v5_2_ring_emit_pipeline_sync */
1697 		/* sdma_v5_2_ring_emit_vm_flush */
1698 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1699 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1700 		10 + 10 + 10, /* sdma_v5_2_ring_emit_fence x3 for user fence, vm fence */
1701 	.emit_ib_size = 7 + 6, /* sdma_v5_2_ring_emit_ib */
1702 	.emit_ib = sdma_v5_2_ring_emit_ib,
1703 	.emit_mem_sync = sdma_v5_2_ring_emit_mem_sync,
1704 	.emit_fence = sdma_v5_2_ring_emit_fence,
1705 	.emit_pipeline_sync = sdma_v5_2_ring_emit_pipeline_sync,
1706 	.emit_vm_flush = sdma_v5_2_ring_emit_vm_flush,
1707 	.emit_hdp_flush = sdma_v5_2_ring_emit_hdp_flush,
1708 	.test_ring = sdma_v5_2_ring_test_ring,
1709 	.test_ib = sdma_v5_2_ring_test_ib,
1710 	.insert_nop = sdma_v5_2_ring_insert_nop,
1711 	.pad_ib = sdma_v5_2_ring_pad_ib,
1712 	.begin_use = sdma_v5_2_ring_begin_use,
1713 	.end_use = sdma_v5_2_ring_end_use,
1714 	.emit_wreg = sdma_v5_2_ring_emit_wreg,
1715 	.emit_reg_wait = sdma_v5_2_ring_emit_reg_wait,
1716 	.emit_reg_write_reg_wait = sdma_v5_2_ring_emit_reg_write_reg_wait,
1717 	.init_cond_exec = sdma_v5_2_ring_init_cond_exec,
1718 	.preempt_ib = sdma_v5_2_ring_preempt_ib,
1719 };
1720 
1721 static void sdma_v5_2_set_ring_funcs(struct amdgpu_device *adev)
1722 {
1723 	int i;
1724 
1725 	for (i = 0; i < adev->sdma.num_instances; i++) {
1726 		adev->sdma.instance[i].ring.funcs = &sdma_v5_2_ring_funcs;
1727 		adev->sdma.instance[i].ring.me = i;
1728 	}
1729 }
1730 
1731 static const struct amdgpu_irq_src_funcs sdma_v5_2_trap_irq_funcs = {
1732 	.set = sdma_v5_2_set_trap_irq_state,
1733 	.process = sdma_v5_2_process_trap_irq,
1734 };
1735 
1736 static const struct amdgpu_irq_src_funcs sdma_v5_2_illegal_inst_irq_funcs = {
1737 	.process = sdma_v5_2_process_illegal_inst_irq,
1738 };
1739 
1740 static void sdma_v5_2_set_irq_funcs(struct amdgpu_device *adev)
1741 {
1742 	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1743 					adev->sdma.num_instances;
1744 	adev->sdma.trap_irq.funcs = &sdma_v5_2_trap_irq_funcs;
1745 	adev->sdma.illegal_inst_irq.funcs = &sdma_v5_2_illegal_inst_irq_funcs;
1746 }
1747 
1748 /**
1749  * sdma_v5_2_emit_copy_buffer - copy buffer using the sDMA engine
1750  *
1751  * @ib: indirect buffer to copy to
1752  * @src_offset: src GPU address
1753  * @dst_offset: dst GPU address
1754  * @byte_count: number of bytes to xfer
1755  * @copy_flags: copy flags for the buffers
1756  *
1757  * Copy GPU buffers using the DMA engine.
1758  * Used by the amdgpu ttm implementation to move pages if
1759  * registered as the asic copy callback.
1760  */
1761 static void sdma_v5_2_emit_copy_buffer(struct amdgpu_ib *ib,
1762 				       uint64_t src_offset,
1763 				       uint64_t dst_offset,
1764 				       uint32_t byte_count,
1765 				       uint32_t copy_flags)
1766 {
1767 	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1768 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1769 		SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
1770 	ib->ptr[ib->length_dw++] = byte_count - 1;
1771 	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1772 	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1773 	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1774 	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1775 	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1776 }
1777 
1778 /**
1779  * sdma_v5_2_emit_fill_buffer - fill buffer using the sDMA engine
1780  *
1781  * @ib: indirect buffer to fill
1782  * @src_data: value to write to buffer
1783  * @dst_offset: dst GPU address
1784  * @byte_count: number of bytes to xfer
1785  *
1786  * Fill GPU buffers using the DMA engine.
1787  */
1788 static void sdma_v5_2_emit_fill_buffer(struct amdgpu_ib *ib,
1789 				       uint32_t src_data,
1790 				       uint64_t dst_offset,
1791 				       uint32_t byte_count)
1792 {
1793 	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
1794 	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1795 	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1796 	ib->ptr[ib->length_dw++] = src_data;
1797 	ib->ptr[ib->length_dw++] = byte_count - 1;
1798 }
1799 
1800 static const struct amdgpu_buffer_funcs sdma_v5_2_buffer_funcs = {
1801 	.copy_max_bytes = 0x400000,
1802 	.copy_num_dw = 7,
1803 	.emit_copy_buffer = sdma_v5_2_emit_copy_buffer,
1804 
1805 	.fill_max_bytes = 0x400000,
1806 	.fill_num_dw = 5,
1807 	.emit_fill_buffer = sdma_v5_2_emit_fill_buffer,
1808 };
1809 
1810 static void sdma_v5_2_set_buffer_funcs(struct amdgpu_device *adev)
1811 {
1812 	if (adev->mman.buffer_funcs == NULL) {
1813 		adev->mman.buffer_funcs = &sdma_v5_2_buffer_funcs;
1814 		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1815 	}
1816 }
1817 
1818 static const struct amdgpu_vm_pte_funcs sdma_v5_2_vm_pte_funcs = {
1819 	.copy_pte_num_dw = 7,
1820 	.copy_pte = sdma_v5_2_vm_copy_pte,
1821 	.write_pte = sdma_v5_2_vm_write_pte,
1822 	.set_pte_pde = sdma_v5_2_vm_set_pte_pde,
1823 };
1824 
1825 static void sdma_v5_2_set_vm_pte_funcs(struct amdgpu_device *adev)
1826 {
1827 	unsigned i;
1828 
1829 	if (adev->vm_manager.vm_pte_funcs == NULL) {
1830 		adev->vm_manager.vm_pte_funcs = &sdma_v5_2_vm_pte_funcs;
1831 		for (i = 0; i < adev->sdma.num_instances; i++) {
1832 			adev->vm_manager.vm_pte_scheds[i] =
1833 				&adev->sdma.instance[i].ring.sched;
1834 		}
1835 		adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1836 	}
1837 }
1838 
1839 const struct amdgpu_ip_block_version sdma_v5_2_ip_block = {
1840 	.type = AMD_IP_BLOCK_TYPE_SDMA,
1841 	.major = 5,
1842 	.minor = 2,
1843 	.rev = 0,
1844 	.funcs = &sdma_v5_2_ip_funcs,
1845 };
1846