xref: /linux/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c (revision fa73ec95c969c7af292caf622ef499e7af7cb062)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_ucode.h"
31 #include "amdgpu_trace.h"
32 
33 #include "gc/gc_12_0_0_offset.h"
34 #include "gc/gc_12_0_0_sh_mask.h"
35 #include "hdp/hdp_6_0_0_offset.h"
36 #include "ivsrcid/gfx/irqsrcs_gfx_11_0_0.h"
37 
38 #include "soc15_common.h"
39 #include "soc15.h"
40 #include "sdma_v6_0_0_pkt_open.h"
41 #include "nbio_v4_3.h"
42 #include "sdma_common.h"
43 #include "sdma_v7_0.h"
44 #include "v12_structs.h"
45 
46 MODULE_FIRMWARE("amdgpu/sdma_7_0_0.bin");
47 MODULE_FIRMWARE("amdgpu/sdma_7_0_1.bin");
48 
49 #define SDMA1_REG_OFFSET 0x600
50 #define SDMA0_HYP_DEC_REG_START 0x5880
51 #define SDMA0_HYP_DEC_REG_END 0x589a
52 #define SDMA1_HYP_DEC_REG_OFFSET 0x20
53 
54 static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev);
55 static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev);
56 static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev);
57 static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev);
58 static int sdma_v7_0_start(struct amdgpu_device *adev);
59 
60 static u32 sdma_v7_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 internal_offset)
61 {
62 	u32 base;
63 
64 	if (internal_offset >= SDMA0_HYP_DEC_REG_START &&
65 	    internal_offset <= SDMA0_HYP_DEC_REG_END) {
66 		base = adev->reg_offset[GC_HWIP][0][1];
67 		if (instance != 0)
68 			internal_offset += SDMA1_HYP_DEC_REG_OFFSET * instance;
69 	} else {
70 		base = adev->reg_offset[GC_HWIP][0][0];
71 		if (instance == 1)
72 			internal_offset += SDMA1_REG_OFFSET;
73 	}
74 
75 	return base + internal_offset;
76 }
77 
78 static unsigned sdma_v7_0_ring_init_cond_exec(struct amdgpu_ring *ring,
79 					      uint64_t addr)
80 {
81 	unsigned ret;
82 
83 	amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COND_EXE));
84 	amdgpu_ring_write(ring, lower_32_bits(addr));
85 	amdgpu_ring_write(ring, upper_32_bits(addr));
86 	amdgpu_ring_write(ring, 1);
87 	/* this is the offset we need patch later */
88 	ret = ring->wptr & ring->buf_mask;
89 	/* insert dummy here and patch it later */
90 	amdgpu_ring_write(ring, 0);
91 
92 	return ret;
93 }
94 
95 /**
96  * sdma_v7_0_ring_get_rptr - get the current read pointer
97  *
98  * @ring: amdgpu ring pointer
99  *
100  * Get the current rptr from the hardware.
101  */
102 static uint64_t sdma_v7_0_ring_get_rptr(struct amdgpu_ring *ring)
103 {
104 	u64 *rptr;
105 
106 	/* XXX check if swapping is necessary on BE */
107 	rptr = (u64 *)ring->rptr_cpu_addr;
108 
109 	DRM_DEBUG("rptr before shift == 0x%016llx\n", *rptr);
110 	return ((*rptr) >> 2);
111 }
112 
113 /**
114  * sdma_v7_0_ring_get_wptr - get the current write pointer
115  *
116  * @ring: amdgpu ring pointer
117  *
118  * Get the current wptr from the hardware.
119  */
120 static uint64_t sdma_v7_0_ring_get_wptr(struct amdgpu_ring *ring)
121 {
122 	u64 wptr = 0;
123 
124 	if (ring->use_doorbell) {
125 		/* XXX check if swapping is necessary on BE */
126 		wptr = READ_ONCE(*((u64 *)ring->wptr_cpu_addr));
127 		DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
128 	}
129 
130 	return wptr >> 2;
131 }
132 
133 /**
134  * sdma_v7_0_ring_set_wptr - commit the write pointer
135  *
136  * @ring: amdgpu ring pointer
137  *
138  * Write the wptr back to the hardware.
139  */
140 static void sdma_v7_0_ring_set_wptr(struct amdgpu_ring *ring)
141 {
142 	struct amdgpu_device *adev = ring->adev;
143 	uint32_t *wptr_saved;
144 	uint32_t *is_queue_unmap;
145 	uint64_t aggregated_db_index;
146 	uint32_t mqd_size = adev->mqds[AMDGPU_HW_IP_DMA].mqd_size;
147 
148 	DRM_DEBUG("Setting write pointer\n");
149 
150 	if (ring->is_mes_queue) {
151 		wptr_saved = (uint32_t *)(ring->mqd_ptr + mqd_size);
152 		is_queue_unmap = (uint32_t *)(ring->mqd_ptr + mqd_size +
153 					      sizeof(uint32_t));
154 		aggregated_db_index =
155 			amdgpu_mes_get_aggregated_doorbell_index(adev,
156 							 ring->hw_prio);
157 
158 		atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
159 			     ring->wptr << 2);
160 		*wptr_saved = ring->wptr << 2;
161 		if (*is_queue_unmap) {
162 			WDOORBELL64(aggregated_db_index, ring->wptr << 2);
163 			DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
164 					ring->doorbell_index, ring->wptr << 2);
165 			WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
166 		} else {
167 			DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
168 					ring->doorbell_index, ring->wptr << 2);
169 			WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
170 
171 			if (*is_queue_unmap)
172 				WDOORBELL64(aggregated_db_index,
173 					    ring->wptr << 2);
174 		}
175 	} else {
176 		if (ring->use_doorbell) {
177 			DRM_DEBUG("Using doorbell -- "
178 				  "wptr_offs == 0x%08x "
179 				  "lower_32_bits(ring->wptr) << 2 == 0x%08x "
180 				  "upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
181 				  ring->wptr_offs,
182 				  lower_32_bits(ring->wptr << 2),
183 				  upper_32_bits(ring->wptr << 2));
184 			/* XXX check if swapping is necessary on BE */
185 			atomic64_set((atomic64_t *)ring->wptr_cpu_addr,
186 				     ring->wptr << 2);
187 			DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
188 				  ring->doorbell_index, ring->wptr << 2);
189 			WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
190 		} else {
191 			DRM_DEBUG("Not using doorbell -- "
192 				  "regSDMA%i_GFX_RB_WPTR == 0x%08x "
193 				  "regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
194 				  ring->me,
195 				  lower_32_bits(ring->wptr << 2),
196 				  ring->me,
197 				  upper_32_bits(ring->wptr << 2));
198 			WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
199 								     ring->me,
200 								     regSDMA0_QUEUE0_RB_WPTR),
201 					lower_32_bits(ring->wptr << 2));
202 			WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev,
203 								     ring->me,
204 								     regSDMA0_QUEUE0_RB_WPTR_HI),
205 					upper_32_bits(ring->wptr << 2));
206 		}
207 	}
208 }
209 
210 static void sdma_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
211 {
212 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
213 	int i;
214 
215 	for (i = 0; i < count; i++)
216 		if (sdma && sdma->burst_nop && (i == 0))
217 			amdgpu_ring_write(ring, ring->funcs->nop |
218 				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
219 		else
220 			amdgpu_ring_write(ring, ring->funcs->nop);
221 }
222 
223 /**
224  * sdma_v7_0_ring_emit_ib - Schedule an IB on the DMA engine
225  *
226  * @ring: amdgpu ring pointer
227  * @ib: IB object to schedule
228  *
229  * Schedule an IB in the DMA ring.
230  */
231 static void sdma_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
232 				   struct amdgpu_job *job,
233 				   struct amdgpu_ib *ib,
234 				   uint32_t flags)
235 {
236 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
237 	uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
238 
239 	/* An IB packet must end on a 8 DW boundary--the next dword
240 	 * must be on a 8-dword boundary. Our IB packet below is 6
241 	 * dwords long, thus add x number of NOPs, such that, in
242 	 * modular arithmetic,
243 	 * wptr + 6 + x = 8k, k >= 0, which in C is,
244 	 * (wptr + 6 + x) % 8 = 0.
245 	 * The expression below, is a solution of x.
246 	 */
247 	sdma_v7_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
248 
249 	amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_INDIRECT) |
250 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
251 	/* base must be 32 byte aligned */
252 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
253 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
254 	amdgpu_ring_write(ring, ib->length_dw);
255 	amdgpu_ring_write(ring, lower_32_bits(csa_mc_addr));
256 	amdgpu_ring_write(ring, upper_32_bits(csa_mc_addr));
257 }
258 
259 /**
260  * sdma_v7_0_ring_emit_mem_sync - flush the IB by graphics cache rinse
261  *
262  * @ring: amdgpu ring pointer
263  * @job: job to retrieve vmid from
264  * @ib: IB object to schedule
265  *
266  * flush the IB by graphics cache rinse.
267  */
268 static void sdma_v7_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
269 {
270 	uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
271 		SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
272 		SDMA_GCR_GLI_INV(1);
273 
274 	/* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
275 	amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_GCR_REQ));
276 	amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
277 	amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
278 			  SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
279 	amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
280 			  SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
281 	amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
282 			  SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
283 }
284 
285 
286 /**
287  * sdma_v7_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
288  *
289  * @ring: amdgpu ring pointer
290  *
291  * Emit an hdp flush packet on the requested DMA ring.
292  */
293 static void sdma_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
294 {
295 	struct amdgpu_device *adev = ring->adev;
296 	u32 ref_and_mask = 0;
297 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
298 
299 	ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
300 
301 	amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
302 			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
303 			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
304 	amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
305 	amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
306 	amdgpu_ring_write(ring, ref_and_mask); /* reference */
307 	amdgpu_ring_write(ring, ref_and_mask); /* mask */
308 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
309 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
310 }
311 
312 /**
313  * sdma_v7_0_ring_emit_fence - emit a fence on the DMA ring
314  *
315  * @ring: amdgpu ring pointer
316  * @fence: amdgpu fence object
317  *
318  * Add a DMA fence packet to the ring to write
319  * the fence seq number and DMA trap packet to generate
320  * an interrupt if needed.
321  */
322 static void sdma_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
323 				      unsigned flags)
324 {
325 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
326 	/* write the fence */
327 	amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) |
328 			  SDMA_PKT_FENCE_HEADER_MTYPE(0x3)); /* Ucached(UC) */
329 	/* zero in first two bits */
330 	BUG_ON(addr & 0x3);
331 	amdgpu_ring_write(ring, lower_32_bits(addr));
332 	amdgpu_ring_write(ring, upper_32_bits(addr));
333 	amdgpu_ring_write(ring, lower_32_bits(seq));
334 
335 	/* optionally write high bits as well */
336 	if (write64bit) {
337 		addr += 4;
338 		amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_FENCE) |
339 				  SDMA_PKT_FENCE_HEADER_MTYPE(0x3));
340 		/* zero in first two bits */
341 		BUG_ON(addr & 0x3);
342 		amdgpu_ring_write(ring, lower_32_bits(addr));
343 		amdgpu_ring_write(ring, upper_32_bits(addr));
344 		amdgpu_ring_write(ring, upper_32_bits(seq));
345 	}
346 
347 	if (flags & AMDGPU_FENCE_FLAG_INT) {
348 		uint32_t ctx = ring->is_mes_queue ?
349 			(ring->hw_queue_id | AMDGPU_FENCE_MES_QUEUE_FLAG) : 0;
350 		/* generate an interrupt */
351 		amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_TRAP));
352 		amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(ctx));
353 	}
354 }
355 
356 /**
357  * sdma_v7_0_gfx_stop - stop the gfx async dma engines
358  *
359  * @adev: amdgpu_device pointer
360  *
361  * Stop the gfx async dma ring buffers.
362  */
363 static void sdma_v7_0_gfx_stop(struct amdgpu_device *adev)
364 {
365 	u32 rb_cntl, ib_cntl;
366 	int i;
367 
368 	for (i = 0; i < adev->sdma.num_instances; i++) {
369 		rb_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
370 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 0);
371 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
372 		ib_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
373 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 0);
374 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
375 	}
376 }
377 
378 /**
379  * sdma_v7_0_rlc_stop - stop the compute async dma engines
380  *
381  * @adev: amdgpu_device pointer
382  *
383  * Stop the compute async dma queues.
384  */
385 static void sdma_v7_0_rlc_stop(struct amdgpu_device *adev)
386 {
387 	/* XXX todo */
388 }
389 
390 /**
391  * sdma_v7_0_ctx_switch_enable - stop the async dma engines context switch
392  *
393  * @adev: amdgpu_device pointer
394  * @enable: enable/disable the DMA MEs context switch.
395  *
396  * Halt or unhalt the async dma engines context switch.
397  */
398 static void sdma_v7_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
399 {
400 }
401 
402 /**
403  * sdma_v7_0_enable - stop the async dma engines
404  *
405  * @adev: amdgpu_device pointer
406  * @enable: enable/disable the DMA MEs.
407  *
408  * Halt or unhalt the async dma engines.
409  */
410 static void sdma_v7_0_enable(struct amdgpu_device *adev, bool enable)
411 {
412 	u32 mcu_cntl;
413 	int i;
414 
415 	if (!enable) {
416 		sdma_v7_0_gfx_stop(adev);
417 		sdma_v7_0_rlc_stop(adev);
418 	}
419 
420 	if (amdgpu_sriov_vf(adev))
421 		return;
422 
423 	for (i = 0; i < adev->sdma.num_instances; i++) {
424 		mcu_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL));
425 		mcu_cntl = REG_SET_FIELD(mcu_cntl, SDMA0_MCU_CNTL, HALT, enable ? 0 : 1);
426 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL), mcu_cntl);
427 	}
428 }
429 
430 /**
431  * sdma_v7_0_gfx_resume - setup and start the async dma engines
432  *
433  * @adev: amdgpu_device pointer
434  *
435  * Set up the gfx DMA ring buffers and enable them.
436  * Returns 0 for success, error for failure.
437  */
438 static int sdma_v7_0_gfx_resume(struct amdgpu_device *adev)
439 {
440 	struct amdgpu_ring *ring;
441 	u32 rb_cntl, ib_cntl;
442 	u32 rb_bufsz;
443 	u32 doorbell;
444 	u32 doorbell_offset;
445 	u32 tmp;
446 	u64 wptr_gpu_addr;
447 	int i, r;
448 
449 	for (i = 0; i < adev->sdma.num_instances; i++) {
450 		ring = &adev->sdma.instance[i].ring;
451 
452 		//if (!amdgpu_sriov_vf(adev))
453 		//	WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
454 
455 		/* Set ring buffer size in dwords */
456 		rb_bufsz = order_base_2(ring->ring_size / 4);
457 		rb_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL));
458 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SIZE, rb_bufsz);
459 #ifdef __BIG_ENDIAN
460 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_SWAP_ENABLE, 1);
461 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL,
462 					RPTR_WRITEBACK_SWAP_ENABLE, 1);
463 #endif
464 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_PRIV, 1);
465 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
466 
467 		/* Initialize the ring buffer's read and write pointers */
468 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR), 0);
469 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_HI), 0);
470 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), 0);
471 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), 0);
472 
473 		/* setup the wptr shadow polling */
474 		wptr_gpu_addr = ring->wptr_gpu_addr;
475 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_LO),
476 		       lower_32_bits(wptr_gpu_addr));
477 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_POLL_ADDR_HI),
478 		       upper_32_bits(wptr_gpu_addr));
479 
480 		/* set the wb address whether it's enabled or not */
481 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_HI),
482 		       upper_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFF);
483 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_RPTR_ADDR_LO),
484 		       lower_32_bits(ring->rptr_gpu_addr) & 0xFFFFFFFC);
485 
486 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
487 		if (amdgpu_sriov_vf(adev))
488 			rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 1);
489 		else
490 			rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, WPTR_POLL_ENABLE, 0);
491 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, MCU_WPTR_POLL_ENABLE, 1);
492 
493 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE), ring->gpu_addr >> 8);
494 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_BASE_HI), ring->gpu_addr >> 40);
495 
496 		ring->wptr = 0;
497 
498 		/* before programing wptr to a less value, need set minor_ptr_update first */
499 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 1);
500 
501 		if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
502 			WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR), lower_32_bits(ring->wptr) << 2);
503 			WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
504 		}
505 
506 		doorbell = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL));
507 		doorbell_offset = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET));
508 
509 		if (ring->use_doorbell) {
510 			doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
511 			doorbell_offset = REG_SET_FIELD(doorbell_offset, SDMA0_QUEUE0_DOORBELL_OFFSET,
512 					OFFSET, ring->doorbell_index);
513 		} else {
514 			doorbell = REG_SET_FIELD(doorbell, SDMA0_QUEUE0_DOORBELL, ENABLE, 0);
515 		}
516 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL), doorbell);
517 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_DOORBELL_OFFSET), doorbell_offset);
518 
519 		if (i == 0)
520 			adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
521 						      ring->doorbell_index,
522 						      adev->doorbell_index.sdma_doorbell_range * adev->sdma.num_instances);
523 
524 		if (amdgpu_sriov_vf(adev))
525 			sdma_v7_0_ring_set_wptr(ring);
526 
527 		/* set minor_ptr_update to 0 after wptr programed */
528 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_MINOR_PTR_UPDATE), 0);
529 
530 		/* Set up sdma hang watchdog */
531 		tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL));
532 		/* 100ms per unit */
533 		tmp = REG_SET_FIELD(tmp, SDMA0_WATCHDOG_CNTL, QUEUE_HANG_COUNT,
534 				    max(adev->usec_timeout/100000, 1));
535 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_WATCHDOG_CNTL), tmp);
536 
537 		/* Set up RESP_MODE to non-copy addresses */
538 		tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL));
539 		tmp = REG_SET_FIELD(tmp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
540 		tmp = REG_SET_FIELD(tmp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
541 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_CNTL), tmp);
542 
543 		/* program default cache read and write policy */
544 		tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE));
545 		/* clean read policy and write policy bits */
546 		tmp &= 0xFF0FFF;
547 		tmp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) |
548 			 (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
549 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_UTCL1_PAGE), tmp);
550 
551 		if (!amdgpu_sriov_vf(adev)) {
552 			/* unhalt engine */
553 			tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL));
554 			tmp = REG_SET_FIELD(tmp, SDMA0_MCU_CNTL, HALT, 0);
555 			tmp = REG_SET_FIELD(tmp, SDMA0_MCU_CNTL, RESET, 0);
556 			WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL), tmp);
557 		}
558 
559 		/* enable DMA RB */
560 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_QUEUE0_RB_CNTL, RB_ENABLE, 1);
561 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_RB_CNTL), rb_cntl);
562 
563 		ib_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL));
564 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_ENABLE, 1);
565 #ifdef __BIG_ENDIAN
566 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_QUEUE0_IB_CNTL, IB_SWAP_ENABLE, 1);
567 #endif
568 		/* enable DMA IBs */
569 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_IB_CNTL), ib_cntl);
570 
571 		ring->sched.ready = true;
572 
573 		if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */
574 			sdma_v7_0_ctx_switch_enable(adev, true);
575 			sdma_v7_0_enable(adev, true);
576 		}
577 
578 		r = amdgpu_ring_test_helper(ring);
579 		if (r) {
580 			ring->sched.ready = false;
581 			return r;
582 		}
583 
584 	}
585 
586 	return 0;
587 }
588 
589 /**
590  * sdma_v7_0_rlc_resume - setup and start the async dma engines
591  *
592  * @adev: amdgpu_device pointer
593  *
594  * Set up the compute DMA queues and enable them.
595  * Returns 0 for success, error for failure.
596  */
597 static int sdma_v7_0_rlc_resume(struct amdgpu_device *adev)
598 {
599 	return 0;
600 }
601 
602 static void sdma_v12_0_free_ucode_buffer(struct amdgpu_device *adev)
603 {
604 	int i;
605 
606 	for (i = 0; i < adev->sdma.num_instances; i++) {
607 		amdgpu_bo_free_kernel(&adev->sdma.instance[i].sdma_fw_obj,
608 				      &adev->sdma.instance[i].sdma_fw_gpu_addr,
609 				      (void **)&adev->sdma.instance[i].sdma_fw_ptr);
610 	}
611 }
612 
613 /**
614  * sdma_v7_0_load_microcode - load the sDMA ME ucode
615  *
616  * @adev: amdgpu_device pointer
617  *
618  * Loads the sDMA0/1 ucode.
619  * Returns 0 for success, -EINVAL if the ucode is not available.
620  */
621 static int sdma_v7_0_load_microcode(struct amdgpu_device *adev)
622 {
623 	const struct sdma_firmware_header_v3_0 *hdr;
624 	const __le32 *fw_data;
625 	u32 fw_size;
626 	uint32_t tmp, sdma_status, ic_op_cntl;
627 	int i, r, j;
628 
629 	/* halt the MEs */
630 	sdma_v7_0_enable(adev, false);
631 
632 	if (!adev->sdma.instance[0].fw)
633 		return -EINVAL;
634 
635 	hdr = (const struct sdma_firmware_header_v3_0 *)
636 		adev->sdma.instance[0].fw->data;
637 	amdgpu_ucode_print_sdma_hdr(&hdr->header);
638 
639 	fw_data = (const __le32 *)(adev->sdma.instance[0].fw->data +
640 			le32_to_cpu(hdr->ucode_offset_bytes));
641 	fw_size = le32_to_cpu(hdr->ucode_size_bytes);
642 
643 	for (i = 0; i < adev->sdma.num_instances; i++) {
644 		r = amdgpu_bo_create_reserved(adev, fw_size,
645 					      PAGE_SIZE,
646 					      AMDGPU_GEM_DOMAIN_VRAM,
647 					      &adev->sdma.instance[i].sdma_fw_obj,
648 					      &adev->sdma.instance[i].sdma_fw_gpu_addr,
649 					      (void **)&adev->sdma.instance[i].sdma_fw_ptr);
650 		if (r) {
651 			dev_err(adev->dev, "(%d) failed to create sdma ucode bo\n", r);
652 			return r;
653 		}
654 
655 		memcpy(adev->sdma.instance[i].sdma_fw_ptr, fw_data, fw_size);
656 
657 		amdgpu_bo_kunmap(adev->sdma.instance[i].sdma_fw_obj);
658 		amdgpu_bo_unreserve(adev->sdma.instance[i].sdma_fw_obj);
659 
660 		tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_IC_CNTL));
661 		tmp = REG_SET_FIELD(tmp, SDMA0_IC_CNTL, GPA, 0);
662 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_IC_CNTL), tmp);
663 
664 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_IC_BASE_LO),
665 			lower_32_bits(adev->sdma.instance[i].sdma_fw_gpu_addr));
666 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_IC_BASE_HI),
667 			upper_32_bits(adev->sdma.instance[i].sdma_fw_gpu_addr));
668 
669 		tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_IC_OP_CNTL));
670 		tmp = REG_SET_FIELD(tmp, SDMA0_IC_OP_CNTL, PRIME_ICACHE, 1);
671 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_IC_OP_CNTL), tmp);
672 
673 		/* Wait for sdma ucode init complete */
674 		for (j = 0; j < adev->usec_timeout; j++) {
675 			ic_op_cntl = RREG32_SOC15_IP(GC,
676 					sdma_v7_0_get_reg_offset(adev, i, regSDMA0_IC_OP_CNTL));
677 			sdma_status = RREG32_SOC15_IP(GC,
678 					sdma_v7_0_get_reg_offset(adev, i, regSDMA0_STATUS_REG));
679 			if ((REG_GET_FIELD(ic_op_cntl, SDMA0_IC_OP_CNTL, ICACHE_PRIMED) == 1) &&
680 			    (REG_GET_FIELD(sdma_status, SDMA0_STATUS_REG, UCODE_INIT_DONE) == 1))
681 				break;
682 			udelay(1);
683 		}
684 
685 		if (j >= adev->usec_timeout) {
686 			dev_err(adev->dev, "failed to init sdma ucode\n");
687 			return -EINVAL;
688 		}
689 	}
690 
691 	return 0;
692 }
693 
694 static int sdma_v7_0_soft_reset(void *handle)
695 {
696 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
697 	u32 tmp;
698 	int i;
699 
700 	sdma_v7_0_gfx_stop(adev);
701 
702 	for (i = 0; i < adev->sdma.num_instances; i++) {
703 		//tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_FREEZE));
704 		//tmp |= SDMA0_FREEZE__FREEZE_MASK;
705 		//WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_FREEZE), tmp);
706 		tmp = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL));
707 		tmp |= SDMA0_MCU_CNTL__HALT_MASK;
708 		tmp |= SDMA0_MCU_CNTL__RESET_MASK;
709 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_MCU_CNTL), tmp);
710 
711 		WREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, i, regSDMA0_QUEUE0_PREEMPT), 0);
712 
713 		udelay(100);
714 
715 		tmp = GRBM_SOFT_RESET__SOFT_RESET_SDMA0_MASK << i;
716 		WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, tmp);
717 		tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
718 
719 		udelay(100);
720 
721 		WREG32_SOC15(GC, 0, regGRBM_SOFT_RESET, 0);
722 		tmp = RREG32_SOC15(GC, 0, regGRBM_SOFT_RESET);
723 
724 		udelay(100);
725 	}
726 
727 	return sdma_v7_0_start(adev);
728 }
729 
730 static bool sdma_v7_0_check_soft_reset(void *handle)
731 {
732 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
733 	struct amdgpu_ring *ring;
734 	int i, r;
735 	long tmo = msecs_to_jiffies(1000);
736 
737 	for (i = 0; i < adev->sdma.num_instances; i++) {
738 		ring = &adev->sdma.instance[i].ring;
739 		r = amdgpu_ring_test_ib(ring, tmo);
740 		if (r)
741 			return true;
742 	}
743 
744 	return false;
745 }
746 
747 /**
748  * sdma_v7_0_start - setup and start the async dma engines
749  *
750  * @adev: amdgpu_device pointer
751  *
752  * Set up the DMA engines and enable them.
753  * Returns 0 for success, error for failure.
754  */
755 static int sdma_v7_0_start(struct amdgpu_device *adev)
756 {
757 	int r = 0;
758 
759 	if (amdgpu_sriov_vf(adev)) {
760 		sdma_v7_0_ctx_switch_enable(adev, false);
761 		sdma_v7_0_enable(adev, false);
762 
763 		/* set RB registers */
764 		r = sdma_v7_0_gfx_resume(adev);
765 		return r;
766 	}
767 
768 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) {
769 		r = sdma_v7_0_load_microcode(adev);
770 		if (r) {
771 			sdma_v12_0_free_ucode_buffer(adev);
772 			return r;
773 		}
774 
775 		if (amdgpu_emu_mode == 1)
776 			msleep(1000);
777 	}
778 
779 	/* unhalt the MEs */
780 	sdma_v7_0_enable(adev, true);
781 	/* enable sdma ring preemption */
782 	sdma_v7_0_ctx_switch_enable(adev, true);
783 
784 	/* start the gfx rings and rlc compute queues */
785 	r = sdma_v7_0_gfx_resume(adev);
786 	if (r)
787 		return r;
788 	r = sdma_v7_0_rlc_resume(adev);
789 
790 	return r;
791 }
792 
793 static int sdma_v7_0_mqd_init(struct amdgpu_device *adev, void *mqd,
794 			      struct amdgpu_mqd_prop *prop)
795 {
796 	struct v12_sdma_mqd *m = mqd;
797 	uint64_t wb_gpu_addr;
798 
799 	m->sdmax_rlcx_rb_cntl =
800 		order_base_2(prop->queue_size / 4) << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
801 		1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
802 		4 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
803 		1 << SDMA0_QUEUE0_RB_CNTL__MCU_WPTR_POLL_ENABLE__SHIFT;
804 
805 	m->sdmax_rlcx_rb_base = lower_32_bits(prop->hqd_base_gpu_addr >> 8);
806 	m->sdmax_rlcx_rb_base_hi = upper_32_bits(prop->hqd_base_gpu_addr >> 8);
807 
808 	wb_gpu_addr = prop->wptr_gpu_addr;
809 	m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits(wb_gpu_addr);
810 	m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr);
811 
812 	wb_gpu_addr = prop->rptr_gpu_addr;
813 	m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits(wb_gpu_addr);
814 	m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits(wb_gpu_addr);
815 
816 	m->sdmax_rlcx_ib_cntl = RREG32_SOC15_IP(GC, sdma_v7_0_get_reg_offset(adev, 0,
817 							regSDMA0_QUEUE0_IB_CNTL));
818 
819 	m->sdmax_rlcx_doorbell_offset =
820 		prop->doorbell_index << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
821 
822 	m->sdmax_rlcx_doorbell = REG_SET_FIELD(0, SDMA0_QUEUE0_DOORBELL, ENABLE, 1);
823 
824 	m->sdmax_rlcx_doorbell_log = 0;
825 	m->sdmax_rlcx_rb_aql_cntl = 0x4000;	//regSDMA0_QUEUE0_RB_AQL_CNTL_DEFAULT;
826 	m->sdmax_rlcx_dummy_reg = 0xf;	//regSDMA0_QUEUE0_DUMMY_REG_DEFAULT;
827 
828 	return 0;
829 }
830 
831 static void sdma_v7_0_set_mqd_funcs(struct amdgpu_device *adev)
832 {
833 	adev->mqds[AMDGPU_HW_IP_DMA].mqd_size = sizeof(struct v12_sdma_mqd);
834 	adev->mqds[AMDGPU_HW_IP_DMA].init_mqd = sdma_v7_0_mqd_init;
835 }
836 
837 /**
838  * sdma_v7_0_ring_test_ring - simple async dma engine test
839  *
840  * @ring: amdgpu_ring structure holding ring information
841  *
842  * Test the DMA engine by writing using it to write an
843  * value to memory.
844  * Returns 0 for success, error for failure.
845  */
846 static int sdma_v7_0_ring_test_ring(struct amdgpu_ring *ring)
847 {
848 	struct amdgpu_device *adev = ring->adev;
849 	unsigned i;
850 	unsigned index;
851 	int r;
852 	u32 tmp;
853 	u64 gpu_addr;
854 	volatile uint32_t *cpu_ptr = NULL;
855 
856 	tmp = 0xCAFEDEAD;
857 
858 	if (ring->is_mes_queue) {
859 		uint32_t offset = 0;
860 		offset = amdgpu_mes_ctx_get_offs(ring,
861 					 AMDGPU_MES_CTX_PADDING_OFFS);
862 		gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
863 		cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
864 		*cpu_ptr = tmp;
865 	} else {
866 		r = amdgpu_device_wb_get(adev, &index);
867 		if (r) {
868 			dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r);
869 			return r;
870 		}
871 
872 		gpu_addr = adev->wb.gpu_addr + (index * 4);
873 		adev->wb.wb[index] = cpu_to_le32(tmp);
874 	}
875 
876 	r = amdgpu_ring_alloc(ring, 5);
877 	if (r) {
878 		DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring->idx, r);
879 		if (!ring->is_mes_queue)
880 			amdgpu_device_wb_free(adev, index);
881 		return r;
882 	}
883 
884 	amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
885 			  SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
886 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
887 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
888 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
889 	amdgpu_ring_write(ring, 0xDEADBEEF);
890 	amdgpu_ring_commit(ring);
891 
892 	for (i = 0; i < adev->usec_timeout; i++) {
893 		if (ring->is_mes_queue)
894 			tmp = le32_to_cpu(*cpu_ptr);
895 		else
896 			tmp = le32_to_cpu(adev->wb.wb[index]);
897 		if (tmp == 0xDEADBEEF)
898 			break;
899 		if (amdgpu_emu_mode == 1)
900 			msleep(1);
901 		else
902 			udelay(1);
903 	}
904 
905 	if (i >= adev->usec_timeout)
906 		r = -ETIMEDOUT;
907 
908 	if (!ring->is_mes_queue)
909 		amdgpu_device_wb_free(adev, index);
910 
911 	return r;
912 }
913 
914 /**
915  * sdma_v7_0_ring_test_ib - test an IB on the DMA engine
916  *
917  * @ring: amdgpu_ring structure holding ring information
918  *
919  * Test a simple IB in the DMA ring.
920  * Returns 0 on success, error on failure.
921  */
922 static int sdma_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
923 {
924 	struct amdgpu_device *adev = ring->adev;
925 	struct amdgpu_ib ib;
926 	struct dma_fence *f = NULL;
927 	unsigned index;
928 	long r;
929 	u32 tmp = 0;
930 	u64 gpu_addr;
931 	volatile uint32_t *cpu_ptr = NULL;
932 
933 	tmp = 0xCAFEDEAD;
934 	memset(&ib, 0, sizeof(ib));
935 
936 	if (ring->is_mes_queue) {
937 		uint32_t offset = 0;
938 		offset = amdgpu_mes_ctx_get_offs(ring, AMDGPU_MES_CTX_IB_OFFS);
939 		ib.gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
940 		ib.ptr = (void *)amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
941 
942 		offset = amdgpu_mes_ctx_get_offs(ring,
943 					 AMDGPU_MES_CTX_PADDING_OFFS);
944 		gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
945 		cpu_ptr = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
946 		*cpu_ptr = tmp;
947 	} else {
948 		r = amdgpu_device_wb_get(adev, &index);
949 		if (r) {
950 			dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
951 			return r;
952 		}
953 
954 		gpu_addr = adev->wb.gpu_addr + (index * 4);
955 		adev->wb.wb[index] = cpu_to_le32(tmp);
956 
957 		r = amdgpu_ib_get(adev, NULL, 256, AMDGPU_IB_POOL_DIRECT, &ib);
958 		if (r) {
959 			DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
960 			goto err0;
961 		}
962 	}
963 
964 	ib.ptr[0] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
965 		SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
966 	ib.ptr[1] = lower_32_bits(gpu_addr);
967 	ib.ptr[2] = upper_32_bits(gpu_addr);
968 	ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
969 	ib.ptr[4] = 0xDEADBEEF;
970 	ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
971 	ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
972 	ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
973 	ib.length_dw = 8;
974 
975 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
976 	if (r)
977 		goto err1;
978 
979 	r = dma_fence_wait_timeout(f, false, timeout);
980 	if (r == 0) {
981 		DRM_ERROR("amdgpu: IB test timed out\n");
982 		r = -ETIMEDOUT;
983 		goto err1;
984 	} else if (r < 0) {
985 		DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
986 		goto err1;
987 	}
988 
989 	if (ring->is_mes_queue)
990 		tmp = le32_to_cpu(*cpu_ptr);
991 	else
992 		tmp = le32_to_cpu(adev->wb.wb[index]);
993 
994 	if (tmp == 0xDEADBEEF)
995 		r = 0;
996 	else
997 		r = -EINVAL;
998 
999 err1:
1000 	amdgpu_ib_free(adev, &ib, NULL);
1001 	dma_fence_put(f);
1002 err0:
1003 	if (!ring->is_mes_queue)
1004 		amdgpu_device_wb_free(adev, index);
1005 	return r;
1006 }
1007 
1008 
1009 /**
1010  * sdma_v7_0_vm_copy_pte - update PTEs by copying them from the GART
1011  *
1012  * @ib: indirect buffer to fill with commands
1013  * @pe: addr of the page entry
1014  * @src: src addr to copy from
1015  * @count: number of page entries to update
1016  *
1017  * Update PTEs by copying them from the GART using sDMA.
1018  */
1019 static void sdma_v7_0_vm_copy_pte(struct amdgpu_ib *ib,
1020 				  uint64_t pe, uint64_t src,
1021 				  unsigned count)
1022 {
1023 	unsigned bytes = count * 8;
1024 
1025 	ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
1026 		SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1027 	ib->ptr[ib->length_dw++] = bytes - 1;
1028 	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1029 	ib->ptr[ib->length_dw++] = lower_32_bits(src);
1030 	ib->ptr[ib->length_dw++] = upper_32_bits(src);
1031 	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1032 	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1033 
1034 }
1035 
1036 /**
1037  * sdma_v7_0_vm_write_pte - update PTEs by writing them manually
1038  *
1039  * @ib: indirect buffer to fill with commands
1040  * @pe: addr of the page entry
1041  * @addr: dst addr to write into pe
1042  * @count: number of page entries to update
1043  * @incr: increase next addr by incr bytes
1044  * @flags: access flags
1045  *
1046  * Update PTEs by writing them manually using sDMA.
1047  */
1048 static void sdma_v7_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1049 				   uint64_t value, unsigned count,
1050 				   uint32_t incr)
1051 {
1052 	unsigned ndw = count * 2;
1053 
1054 	ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_WRITE) |
1055 		SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1056 	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1057 	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1058 	ib->ptr[ib->length_dw++] = ndw - 1;
1059 	for (; ndw > 0; ndw -= 2) {
1060 		ib->ptr[ib->length_dw++] = lower_32_bits(value);
1061 		ib->ptr[ib->length_dw++] = upper_32_bits(value);
1062 		value += incr;
1063 	}
1064 }
1065 
1066 /**
1067  * sdma_v7_0_vm_set_pte_pde - update the page tables using sDMA
1068  *
1069  * @ib: indirect buffer to fill with commands
1070  * @pe: addr of the page entry
1071  * @addr: dst addr to write into pe
1072  * @count: number of page entries to update
1073  * @incr: increase next addr by incr bytes
1074  * @flags: access flags
1075  *
1076  * Update the page tables using sDMA.
1077  */
1078 static void sdma_v7_0_vm_set_pte_pde(struct amdgpu_ib *ib,
1079 				     uint64_t pe,
1080 				     uint64_t addr, unsigned count,
1081 				     uint32_t incr, uint64_t flags)
1082 {
1083 	/* for physically contiguous pages (vram) */
1084 	ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_PTEPDE);
1085 	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1086 	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1087 	ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1088 	ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1089 	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1090 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1091 	ib->ptr[ib->length_dw++] = incr; /* increment size */
1092 	ib->ptr[ib->length_dw++] = 0;
1093 	ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1094 }
1095 
1096 /**
1097  * sdma_v7_0_ring_pad_ib - pad the IB
1098  * @ib: indirect buffer to fill with padding
1099  *
1100  * Pad the IB with NOPs to a boundary multiple of 8.
1101  */
1102 static void sdma_v7_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1103 {
1104 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1105 	u32 pad_count;
1106 	int i;
1107 
1108 	pad_count = (-ib->length_dw) & 0x7;
1109 	for (i = 0; i < pad_count; i++)
1110 		if (sdma && sdma->burst_nop && (i == 0))
1111 			ib->ptr[ib->length_dw++] =
1112 				SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP) |
1113 				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1114 		else
1115 			ib->ptr[ib->length_dw++] =
1116 				SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_NOP);
1117 }
1118 
1119 /**
1120  * sdma_v7_0_ring_emit_pipeline_sync - sync the pipeline
1121  *
1122  * @ring: amdgpu_ring pointer
1123  *
1124  * Make sure all previous operations are completed (CIK).
1125  */
1126 static void sdma_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1127 {
1128 	uint32_t seq = ring->fence_drv.sync_seq;
1129 	uint64_t addr = ring->fence_drv.gpu_addr;
1130 
1131 	/* wait for idle */
1132 	amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1133 			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1134 			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */
1135 			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1));
1136 	amdgpu_ring_write(ring, addr & 0xfffffffc);
1137 	amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1138 	amdgpu_ring_write(ring, seq); /* reference */
1139 	amdgpu_ring_write(ring, 0xffffffff); /* mask */
1140 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1141 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1142 }
1143 
1144 /**
1145  * sdma_v7_0_ring_emit_vm_flush - vm flush using sDMA
1146  *
1147  * @ring: amdgpu_ring pointer
1148  * @vm: amdgpu_vm pointer
1149  *
1150  * Update the page table base and flush the VM TLB
1151  * using sDMA.
1152  */
1153 static void sdma_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1154 					 unsigned vmid, uint64_t pd_addr)
1155 {
1156 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1157 }
1158 
1159 static void sdma_v7_0_ring_emit_wreg(struct amdgpu_ring *ring,
1160 				     uint32_t reg, uint32_t val)
1161 {
1162 	/* SRBM WRITE command will not support on sdma v7.
1163 	 * Use Register WRITE command instead, which OPCODE is same as SRBM WRITE
1164 	 */
1165 	amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_SRBM_WRITE));
1166 	amdgpu_ring_write(ring, reg << 2);
1167 	amdgpu_ring_write(ring, val);
1168 }
1169 
1170 static void sdma_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1171 					 uint32_t val, uint32_t mask)
1172 {
1173 	amdgpu_ring_write(ring, SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_POLL_REGMEM) |
1174 			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1175 			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* equal */
1176 	amdgpu_ring_write(ring, reg << 2);
1177 	amdgpu_ring_write(ring, 0);
1178 	amdgpu_ring_write(ring, val); /* reference */
1179 	amdgpu_ring_write(ring, mask); /* mask */
1180 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1181 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10));
1182 }
1183 
1184 static void sdma_v7_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
1185 						   uint32_t reg0, uint32_t reg1,
1186 						   uint32_t ref, uint32_t mask)
1187 {
1188 	amdgpu_ring_emit_wreg(ring, reg0, ref);
1189 	/* wait for a cycle to reset vm_inv_eng*_ack */
1190 	amdgpu_ring_emit_reg_wait(ring, reg0, 0, 0);
1191 	amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
1192 }
1193 
1194 static int sdma_v7_0_early_init(void *handle)
1195 {
1196 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1197 	int r;
1198 
1199 	r = amdgpu_sdma_init_microcode(adev, 0, true);
1200 	if (r) {
1201 		DRM_ERROR("Failed to init sdma firmware!\n");
1202 		return r;
1203 	}
1204 
1205 	sdma_v7_0_set_ring_funcs(adev);
1206 	sdma_v7_0_set_buffer_funcs(adev);
1207 	sdma_v7_0_set_vm_pte_funcs(adev);
1208 	sdma_v7_0_set_irq_funcs(adev);
1209 	sdma_v7_0_set_mqd_funcs(adev);
1210 
1211 	return 0;
1212 }
1213 
1214 static int sdma_v7_0_sw_init(void *handle)
1215 {
1216 	struct amdgpu_ring *ring;
1217 	int r, i;
1218 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1219 
1220 	/* SDMA trap event */
1221 	r = amdgpu_irq_add_id(adev, SOC21_IH_CLIENTID_GFX,
1222 			      GFX_11_0_0__SRCID__SDMA_TRAP,
1223 			      &adev->sdma.trap_irq);
1224 	if (r)
1225 		return r;
1226 
1227 	for (i = 0; i < adev->sdma.num_instances; i++) {
1228 		ring = &adev->sdma.instance[i].ring;
1229 		ring->ring_obj = NULL;
1230 		ring->use_doorbell = true;
1231 		ring->me = i;
1232 
1233 		DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1234 				ring->use_doorbell?"true":"false");
1235 
1236 		ring->doorbell_index =
1237 			(adev->doorbell_index.sdma_engine[i] << 1); // get DWORD offset
1238 
1239 		ring->vm_hub = AMDGPU_GFXHUB(0);
1240 		sprintf(ring->name, "sdma%d", i);
1241 		r = amdgpu_ring_init(adev, ring, 1024,
1242 				     &adev->sdma.trap_irq,
1243 				     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1244 				     AMDGPU_RING_PRIO_DEFAULT, NULL);
1245 		if (r)
1246 			return r;
1247 	}
1248 
1249 	return r;
1250 }
1251 
1252 static int sdma_v7_0_sw_fini(void *handle)
1253 {
1254 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1255 	int i;
1256 
1257 	for (i = 0; i < adev->sdma.num_instances; i++)
1258 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1259 
1260 	amdgpu_sdma_destroy_inst_ctx(adev, true);
1261 
1262 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)
1263 		sdma_v12_0_free_ucode_buffer(adev);
1264 
1265 	return 0;
1266 }
1267 
1268 static int sdma_v7_0_hw_init(void *handle)
1269 {
1270 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1271 
1272 	return sdma_v7_0_start(adev);
1273 }
1274 
1275 static int sdma_v7_0_hw_fini(void *handle)
1276 {
1277 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1278 
1279 	if (amdgpu_sriov_vf(adev))
1280 		return 0;
1281 
1282 	sdma_v7_0_ctx_switch_enable(adev, false);
1283 	sdma_v7_0_enable(adev, false);
1284 
1285 	return 0;
1286 }
1287 
1288 static int sdma_v7_0_suspend(void *handle)
1289 {
1290 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1291 
1292 	return sdma_v7_0_hw_fini(adev);
1293 }
1294 
1295 static int sdma_v7_0_resume(void *handle)
1296 {
1297 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1298 
1299 	return sdma_v7_0_hw_init(adev);
1300 }
1301 
1302 static bool sdma_v7_0_is_idle(void *handle)
1303 {
1304 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1305 	u32 i;
1306 
1307 	for (i = 0; i < adev->sdma.num_instances; i++) {
1308 		u32 tmp = RREG32(sdma_v7_0_get_reg_offset(adev, i, regSDMA0_STATUS_REG));
1309 
1310 		if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
1311 			return false;
1312 	}
1313 
1314 	return true;
1315 }
1316 
1317 static int sdma_v7_0_wait_for_idle(void *handle)
1318 {
1319 	unsigned i;
1320 	u32 sdma0, sdma1;
1321 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1322 
1323 	for (i = 0; i < adev->usec_timeout; i++) {
1324 		sdma0 = RREG32(sdma_v7_0_get_reg_offset(adev, 0, regSDMA0_STATUS_REG));
1325 		sdma1 = RREG32(sdma_v7_0_get_reg_offset(adev, 1, regSDMA0_STATUS_REG));
1326 
1327 		if (sdma0 & sdma1 & SDMA0_STATUS_REG__IDLE_MASK)
1328 			return 0;
1329 		udelay(1);
1330 	}
1331 	return -ETIMEDOUT;
1332 }
1333 
1334 static int sdma_v7_0_ring_preempt_ib(struct amdgpu_ring *ring)
1335 {
1336 	int i, r = 0;
1337 	struct amdgpu_device *adev = ring->adev;
1338 	u32 index = 0;
1339 	u64 sdma_gfx_preempt;
1340 
1341 	amdgpu_sdma_get_index_from_ring(ring, &index);
1342 	sdma_gfx_preempt =
1343 		sdma_v7_0_get_reg_offset(adev, index, regSDMA0_QUEUE0_PREEMPT);
1344 
1345 	/* assert preemption condition */
1346 	amdgpu_ring_set_preempt_cond_exec(ring, false);
1347 
1348 	/* emit the trailing fence */
1349 	ring->trail_seq += 1;
1350 	amdgpu_ring_alloc(ring, 10);
1351 	sdma_v7_0_ring_emit_fence(ring, ring->trail_fence_gpu_addr,
1352 				  ring->trail_seq, 0);
1353 	amdgpu_ring_commit(ring);
1354 
1355 	/* assert IB preemption */
1356 	WREG32(sdma_gfx_preempt, 1);
1357 
1358 	/* poll the trailing fence */
1359 	for (i = 0; i < adev->usec_timeout; i++) {
1360 		if (ring->trail_seq ==
1361 		    le32_to_cpu(*(ring->trail_fence_cpu_addr)))
1362 			break;
1363 		udelay(1);
1364 	}
1365 
1366 	if (i >= adev->usec_timeout) {
1367 		r = -EINVAL;
1368 		DRM_ERROR("ring %d failed to be preempted\n", ring->idx);
1369 	}
1370 
1371 	/* deassert IB preemption */
1372 	WREG32(sdma_gfx_preempt, 0);
1373 
1374 	/* deassert the preemption condition */
1375 	amdgpu_ring_set_preempt_cond_exec(ring, true);
1376 	return r;
1377 }
1378 
1379 static int sdma_v7_0_set_trap_irq_state(struct amdgpu_device *adev,
1380 					struct amdgpu_irq_src *source,
1381 					unsigned type,
1382 					enum amdgpu_interrupt_state state)
1383 {
1384 	u32 sdma_cntl;
1385 
1386 	u32 reg_offset = sdma_v7_0_get_reg_offset(adev, type, regSDMA0_CNTL);
1387 
1388 	sdma_cntl = RREG32(reg_offset);
1389 	sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
1390 		       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1391 	WREG32(reg_offset, sdma_cntl);
1392 
1393 	return 0;
1394 }
1395 
1396 static int sdma_v7_0_process_trap_irq(struct amdgpu_device *adev,
1397 				      struct amdgpu_irq_src *source,
1398 				      struct amdgpu_iv_entry *entry)
1399 {
1400 	int instances, queue;
1401 	uint32_t mes_queue_id = entry->src_data[0];
1402 
1403 	DRM_DEBUG("IH: SDMA trap\n");
1404 
1405 	if (adev->enable_mes && (mes_queue_id & AMDGPU_FENCE_MES_QUEUE_FLAG)) {
1406 		struct amdgpu_mes_queue *queue;
1407 
1408 		mes_queue_id &= AMDGPU_FENCE_MES_QUEUE_ID_MASK;
1409 
1410 		spin_lock(&adev->mes.queue_id_lock);
1411 		queue = idr_find(&adev->mes.queue_id_idr, mes_queue_id);
1412 		if (queue) {
1413 			DRM_DEBUG("process smda queue id = %d\n", mes_queue_id);
1414 			amdgpu_fence_process(queue->ring);
1415 		}
1416 		spin_unlock(&adev->mes.queue_id_lock);
1417 		return 0;
1418 	}
1419 
1420 	queue = entry->ring_id & 0xf;
1421 	instances = (entry->ring_id & 0xf0) >> 4;
1422 	if (instances > 1) {
1423 		DRM_ERROR("IH: wrong ring_ID detected, as wrong sdma instance\n");
1424 		return -EINVAL;
1425 	}
1426 
1427 	switch (entry->client_id) {
1428 	case SOC21_IH_CLIENTID_GFX:
1429 		switch (queue) {
1430 		case 0:
1431 			amdgpu_fence_process(&adev->sdma.instance[instances].ring);
1432 			break;
1433 		default:
1434 			break;
1435 		}
1436 		break;
1437 	}
1438 	return 0;
1439 }
1440 
1441 static int sdma_v7_0_process_illegal_inst_irq(struct amdgpu_device *adev,
1442 					      struct amdgpu_irq_src *source,
1443 					      struct amdgpu_iv_entry *entry)
1444 {
1445 	return 0;
1446 }
1447 
1448 static int sdma_v7_0_set_clockgating_state(void *handle,
1449 					   enum amd_clockgating_state state)
1450 {
1451 	return 0;
1452 }
1453 
1454 static int sdma_v7_0_set_powergating_state(void *handle,
1455 					  enum amd_powergating_state state)
1456 {
1457 	return 0;
1458 }
1459 
1460 static void sdma_v7_0_get_clockgating_state(void *handle, u64 *flags)
1461 {
1462 }
1463 
1464 const struct amd_ip_funcs sdma_v7_0_ip_funcs = {
1465 	.name = "sdma_v7_0",
1466 	.early_init = sdma_v7_0_early_init,
1467 	.late_init = NULL,
1468 	.sw_init = sdma_v7_0_sw_init,
1469 	.sw_fini = sdma_v7_0_sw_fini,
1470 	.hw_init = sdma_v7_0_hw_init,
1471 	.hw_fini = sdma_v7_0_hw_fini,
1472 	.suspend = sdma_v7_0_suspend,
1473 	.resume = sdma_v7_0_resume,
1474 	.is_idle = sdma_v7_0_is_idle,
1475 	.wait_for_idle = sdma_v7_0_wait_for_idle,
1476 	.soft_reset = sdma_v7_0_soft_reset,
1477 	.check_soft_reset = sdma_v7_0_check_soft_reset,
1478 	.set_clockgating_state = sdma_v7_0_set_clockgating_state,
1479 	.set_powergating_state = sdma_v7_0_set_powergating_state,
1480 	.get_clockgating_state = sdma_v7_0_get_clockgating_state,
1481 };
1482 
1483 static const struct amdgpu_ring_funcs sdma_v7_0_ring_funcs = {
1484 	.type = AMDGPU_RING_TYPE_SDMA,
1485 	.align_mask = 0xf,
1486 	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
1487 	.support_64bit_ptrs = true,
1488 	.secure_submission_supported = true,
1489 	.get_rptr = sdma_v7_0_ring_get_rptr,
1490 	.get_wptr = sdma_v7_0_ring_get_wptr,
1491 	.set_wptr = sdma_v7_0_ring_set_wptr,
1492 	.emit_frame_size =
1493 		5 + /* sdma_v7_0_ring_init_cond_exec */
1494 		6 + /* sdma_v7_0_ring_emit_hdp_flush */
1495 		6 + /* sdma_v7_0_ring_emit_pipeline_sync */
1496 		/* sdma_v7_0_ring_emit_vm_flush */
1497 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1498 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
1499 		10 + 10 + 10, /* sdma_v7_0_ring_emit_fence x3 for user fence, vm fence */
1500 	.emit_ib_size = 5 + 7 + 6, /* sdma_v7_0_ring_emit_ib */
1501 	.emit_ib = sdma_v7_0_ring_emit_ib,
1502 	.emit_mem_sync = sdma_v7_0_ring_emit_mem_sync,
1503 	.emit_fence = sdma_v7_0_ring_emit_fence,
1504 	.emit_pipeline_sync = sdma_v7_0_ring_emit_pipeline_sync,
1505 	.emit_vm_flush = sdma_v7_0_ring_emit_vm_flush,
1506 	.emit_hdp_flush = sdma_v7_0_ring_emit_hdp_flush,
1507 	.test_ring = sdma_v7_0_ring_test_ring,
1508 	.test_ib = sdma_v7_0_ring_test_ib,
1509 	.insert_nop = sdma_v7_0_ring_insert_nop,
1510 	.pad_ib = sdma_v7_0_ring_pad_ib,
1511 	.emit_wreg = sdma_v7_0_ring_emit_wreg,
1512 	.emit_reg_wait = sdma_v7_0_ring_emit_reg_wait,
1513 	.emit_reg_write_reg_wait = sdma_v7_0_ring_emit_reg_write_reg_wait,
1514 	.init_cond_exec = sdma_v7_0_ring_init_cond_exec,
1515 	.preempt_ib = sdma_v7_0_ring_preempt_ib,
1516 };
1517 
1518 static void sdma_v7_0_set_ring_funcs(struct amdgpu_device *adev)
1519 {
1520 	int i;
1521 
1522 	for (i = 0; i < adev->sdma.num_instances; i++) {
1523 		adev->sdma.instance[i].ring.funcs = &sdma_v7_0_ring_funcs;
1524 		adev->sdma.instance[i].ring.me = i;
1525 	}
1526 }
1527 
1528 static const struct amdgpu_irq_src_funcs sdma_v7_0_trap_irq_funcs = {
1529 	.set = sdma_v7_0_set_trap_irq_state,
1530 	.process = sdma_v7_0_process_trap_irq,
1531 };
1532 
1533 static const struct amdgpu_irq_src_funcs sdma_v7_0_illegal_inst_irq_funcs = {
1534 	.process = sdma_v7_0_process_illegal_inst_irq,
1535 };
1536 
1537 static void sdma_v7_0_set_irq_funcs(struct amdgpu_device *adev)
1538 {
1539 	adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE0 +
1540 					adev->sdma.num_instances;
1541 	adev->sdma.trap_irq.funcs = &sdma_v7_0_trap_irq_funcs;
1542 	adev->sdma.illegal_inst_irq.funcs = &sdma_v7_0_illegal_inst_irq_funcs;
1543 }
1544 
1545 /**
1546  * sdma_v7_0_emit_copy_buffer - copy buffer using the sDMA engine
1547  *
1548  * @ring: amdgpu_ring structure holding ring information
1549  * @src_offset: src GPU address
1550  * @dst_offset: dst GPU address
1551  * @byte_count: number of bytes to xfer
1552  * @copy_flags: flags for the copy
1553  *
1554  * Copy GPU buffers using the DMA engine.
1555  * Used by the amdgpu ttm implementation to move pages if
1556  * registered as the asic copy callback.
1557  */
1558 static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
1559 				       uint64_t src_offset,
1560 				       uint64_t dst_offset,
1561 				       uint32_t byte_count,
1562 				       uint32_t copy_flags)
1563 {
1564 	ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
1565 		SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
1566 		SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
1567 	ib->ptr[ib->length_dw++] = byte_count - 1;
1568 	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1569 	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
1570 	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
1571 	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1572 	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1573 }
1574 
1575 /**
1576  * sdma_v7_0_emit_fill_buffer - fill buffer using the sDMA engine
1577  *
1578  * @ring: amdgpu_ring structure holding ring information
1579  * @src_data: value to write to buffer
1580  * @dst_offset: dst GPU address
1581  * @byte_count: number of bytes to xfer
1582  *
1583  * Fill GPU buffers using the DMA engine.
1584  */
1585 static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib,
1586 				       uint32_t src_data,
1587 				       uint64_t dst_offset,
1588 				       uint32_t byte_count)
1589 {
1590 	ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_CONST_FILL);
1591 	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
1592 	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
1593 	ib->ptr[ib->length_dw++] = src_data;
1594 	ib->ptr[ib->length_dw++] = byte_count - 1;
1595 }
1596 
1597 static const struct amdgpu_buffer_funcs sdma_v7_0_buffer_funcs = {
1598 	.copy_max_bytes = 0x400000,
1599 	.copy_num_dw = 7,
1600 	.emit_copy_buffer = sdma_v7_0_emit_copy_buffer,
1601 
1602 	.fill_max_bytes = 0x400000,
1603 	.fill_num_dw = 5,
1604 	.emit_fill_buffer = sdma_v7_0_emit_fill_buffer,
1605 };
1606 
1607 static void sdma_v7_0_set_buffer_funcs(struct amdgpu_device *adev)
1608 {
1609 	adev->mman.buffer_funcs = &sdma_v7_0_buffer_funcs;
1610 	adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
1611 }
1612 
1613 static const struct amdgpu_vm_pte_funcs sdma_v7_0_vm_pte_funcs = {
1614 	.copy_pte_num_dw = 7,
1615 	.copy_pte = sdma_v7_0_vm_copy_pte,
1616 	.write_pte = sdma_v7_0_vm_write_pte,
1617 	.set_pte_pde = sdma_v7_0_vm_set_pte_pde,
1618 };
1619 
1620 static void sdma_v7_0_set_vm_pte_funcs(struct amdgpu_device *adev)
1621 {
1622 	unsigned i;
1623 
1624 	adev->vm_manager.vm_pte_funcs = &sdma_v7_0_vm_pte_funcs;
1625 	for (i = 0; i < adev->sdma.num_instances; i++) {
1626 		adev->vm_manager.vm_pte_scheds[i] =
1627 			&adev->sdma.instance[i].ring.sched;
1628 	}
1629 	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
1630 }
1631 
1632 const struct amdgpu_ip_block_version sdma_v7_0_ip_block = {
1633 	.type = AMD_IP_BLOCK_TYPE_SDMA,
1634 	.major = 7,
1635 	.minor = 0,
1636 	.rev = 0,
1637 	.funcs = &sdma_v7_0_ip_funcs,
1638 };
1639