xref: /linux/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c (revision fcab107abe1ab5be9dbe874baa722372da8f4f73)
1 /*
2  * Copyright 2022 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include <linux/delay.h>
25 #include <linux/firmware.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 
29 #include "amdgpu.h"
30 #include "amdgpu_xcp.h"
31 #include "amdgpu_ucode.h"
32 #include "amdgpu_trace.h"
33 #include "amdgpu_reset.h"
34 
35 #include "sdma/sdma_4_4_2_offset.h"
36 #include "sdma/sdma_4_4_2_sh_mask.h"
37 
38 #include "soc15_common.h"
39 #include "soc15.h"
40 #include "vega10_sdma_pkt_open.h"
41 
42 #include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
43 #include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
44 
45 #include "amdgpu_ras.h"
46 
47 MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin");
48 MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin");
49 
50 static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = {
51 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS_REG),
52 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS1_REG),
53 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS2_REG),
54 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_STATUS3_REG),
55 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UCODE_CHECKSUM),
56 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH_HI),
57 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RB_RPTR_FETCH),
58 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_STATUS),
59 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_STATUS),
60 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK0),
61 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_RD_XNACK1),
62 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK0),
63 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_UTCL1_WR_XNACK1),
64 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_CNTL),
65 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR),
66 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_RPTR_HI),
67 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR),
68 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_RB_WPTR_HI),
69 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_OFFSET),
70 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_LO),
71 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_BASE_HI),
72 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_CNTL),
73 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_RPTR),
74 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_IB_SUB_REMAIN),
75 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_GFX_DUMMY_REG),
76 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_CNTL),
77 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR),
78 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_RPTR_HI),
79 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR),
80 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_RB_WPTR_HI),
81 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_OFFSET),
82 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_LO),
83 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_IB_BASE_HI),
84 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_PAGE_DUMMY_REG),
85 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_CNTL),
86 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR),
87 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_RPTR_HI),
88 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR),
89 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_RB_WPTR_HI),
90 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_OFFSET),
91 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_LO),
92 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_IB_BASE_HI),
93 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_RLC0_DUMMY_REG),
94 	SOC15_REG_ENTRY_STR(GC, 0, regSDMA_VM_CNTL)
95 };
96 
97 #define mmSMNAID_AID0_MCA_SMU 0x03b30400
98 
99 #define WREG32_SDMA(instance, offset, value) \
100 	WREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)), value)
101 #define RREG32_SDMA(instance, offset) \
102 	RREG32(sdma_v4_4_2_get_reg_offset(adev, (instance), (offset)))
103 
104 static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev);
105 static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev);
106 static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev);
107 static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev);
108 static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev);
109 static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev);
110 static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring);
111 static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring);
112 
113 static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev,
114 		u32 instance, u32 offset)
115 {
116 	u32 dev_inst = GET_INST(SDMA0, instance);
117 
118 	return (adev->reg_offset[SDMA0_HWIP][dev_inst][0] + offset);
119 }
120 
121 static unsigned sdma_v4_4_2_seq_to_irq_id(int seq_num)
122 {
123 	switch (seq_num) {
124 	case 0:
125 		return SOC15_IH_CLIENTID_SDMA0;
126 	case 1:
127 		return SOC15_IH_CLIENTID_SDMA1;
128 	case 2:
129 		return SOC15_IH_CLIENTID_SDMA2;
130 	case 3:
131 		return SOC15_IH_CLIENTID_SDMA3;
132 	default:
133 		return -EINVAL;
134 	}
135 }
136 
137 static int sdma_v4_4_2_irq_id_to_seq(struct amdgpu_device *adev, unsigned client_id)
138 {
139 	switch (client_id) {
140 	case SOC15_IH_CLIENTID_SDMA0:
141 		return 0;
142 	case SOC15_IH_CLIENTID_SDMA1:
143 		return 1;
144 	case SOC15_IH_CLIENTID_SDMA2:
145 		if (amdgpu_sriov_vf(adev) && (adev->gfx.xcc_mask == 0x1))
146 			return 0;
147 		else
148 			return 2;
149 	case SOC15_IH_CLIENTID_SDMA3:
150 		if (amdgpu_sriov_vf(adev) && (adev->gfx.xcc_mask == 0x1))
151 			return 1;
152 		else
153 			return 3;
154 	default:
155 		return -EINVAL;
156 	}
157 }
158 
159 static void sdma_v4_4_2_inst_init_golden_registers(struct amdgpu_device *adev,
160 						   uint32_t inst_mask)
161 {
162 	u32 val;
163 	int i;
164 
165 	for (i = 0; i < adev->sdma.num_instances; i++) {
166 		val = RREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG);
167 		val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG, NUM_BANKS, 4);
168 		val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG,
169 				    PIPE_INTERLEAVE_SIZE, 0);
170 		WREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG, val);
171 
172 		val = RREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG_READ);
173 		val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG_READ, NUM_BANKS,
174 				    4);
175 		val = REG_SET_FIELD(val, SDMA_GB_ADDR_CONFIG_READ,
176 				    PIPE_INTERLEAVE_SIZE, 0);
177 		WREG32_SDMA(i, regSDMA_GB_ADDR_CONFIG_READ, val);
178 	}
179 }
180 
181 /**
182  * sdma_v4_4_2_init_microcode - load ucode images from disk
183  *
184  * @adev: amdgpu_device pointer
185  *
186  * Use the firmware interface to load the ucode images into
187  * the driver (not loaded into hw).
188  * Returns 0 on success, error on failure.
189  */
190 static int sdma_v4_4_2_init_microcode(struct amdgpu_device *adev)
191 {
192 	int ret, i;
193 
194 	for (i = 0; i < adev->sdma.num_instances; i++) {
195 		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
196 		    amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 4) ||
197 		    amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) {
198 			ret = amdgpu_sdma_init_microcode(adev, 0, true);
199 			break;
200 		} else {
201 			ret = amdgpu_sdma_init_microcode(adev, i, false);
202 			if (ret)
203 				return ret;
204 		}
205 	}
206 
207 	return ret;
208 }
209 
210 /**
211  * sdma_v4_4_2_ring_get_rptr - get the current read pointer
212  *
213  * @ring: amdgpu ring pointer
214  *
215  * Get the current rptr from the hardware.
216  */
217 static uint64_t sdma_v4_4_2_ring_get_rptr(struct amdgpu_ring *ring)
218 {
219 	u64 rptr;
220 
221 	/* XXX check if swapping is necessary on BE */
222 	rptr = READ_ONCE(*((u64 *)&ring->adev->wb.wb[ring->rptr_offs]));
223 
224 	DRM_DEBUG("rptr before shift == 0x%016llx\n", rptr);
225 	return rptr >> 2;
226 }
227 
228 /**
229  * sdma_v4_4_2_ring_get_wptr - get the current write pointer
230  *
231  * @ring: amdgpu ring pointer
232  *
233  * Get the current wptr from the hardware.
234  */
235 static uint64_t sdma_v4_4_2_ring_get_wptr(struct amdgpu_ring *ring)
236 {
237 	struct amdgpu_device *adev = ring->adev;
238 	u64 wptr;
239 
240 	if (ring->use_doorbell) {
241 		/* XXX check if swapping is necessary on BE */
242 		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
243 		DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
244 	} else {
245 		wptr = RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI);
246 		wptr = wptr << 32;
247 		wptr |= RREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR);
248 		DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n",
249 				ring->me, wptr);
250 	}
251 
252 	return wptr >> 2;
253 }
254 
255 /**
256  * sdma_v4_4_2_ring_set_wptr - commit the write pointer
257  *
258  * @ring: amdgpu ring pointer
259  *
260  * Write the wptr back to the hardware.
261  */
262 static void sdma_v4_4_2_ring_set_wptr(struct amdgpu_ring *ring)
263 {
264 	struct amdgpu_device *adev = ring->adev;
265 
266 	DRM_DEBUG("Setting write pointer\n");
267 	if (ring->use_doorbell) {
268 		u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
269 
270 		DRM_DEBUG("Using doorbell -- "
271 				"wptr_offs == 0x%08x "
272 				"lower_32_bits(ring->wptr) << 2 == 0x%08x "
273 				"upper_32_bits(ring->wptr) << 2 == 0x%08x\n",
274 				ring->wptr_offs,
275 				lower_32_bits(ring->wptr << 2),
276 				upper_32_bits(ring->wptr << 2));
277 		/* XXX check if swapping is necessary on BE */
278 		WRITE_ONCE(*wb, (ring->wptr << 2));
279 		DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
280 				ring->doorbell_index, ring->wptr << 2);
281 		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
282 	} else {
283 		DRM_DEBUG("Not using doorbell -- "
284 				"regSDMA%i_GFX_RB_WPTR == 0x%08x "
285 				"regSDMA%i_GFX_RB_WPTR_HI == 0x%08x\n",
286 				ring->me,
287 				lower_32_bits(ring->wptr << 2),
288 				ring->me,
289 				upper_32_bits(ring->wptr << 2));
290 		WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR,
291 			    lower_32_bits(ring->wptr << 2));
292 		WREG32_SDMA(ring->me, regSDMA_GFX_RB_WPTR_HI,
293 			    upper_32_bits(ring->wptr << 2));
294 	}
295 }
296 
297 /**
298  * sdma_v4_4_2_page_ring_get_wptr - get the current write pointer
299  *
300  * @ring: amdgpu ring pointer
301  *
302  * Get the current wptr from the hardware.
303  */
304 static uint64_t sdma_v4_4_2_page_ring_get_wptr(struct amdgpu_ring *ring)
305 {
306 	struct amdgpu_device *adev = ring->adev;
307 	u64 wptr;
308 
309 	if (ring->use_doorbell) {
310 		/* XXX check if swapping is necessary on BE */
311 		wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
312 	} else {
313 		wptr = RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI);
314 		wptr = wptr << 32;
315 		wptr |= RREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR);
316 	}
317 
318 	return wptr >> 2;
319 }
320 
321 /**
322  * sdma_v4_4_2_page_ring_set_wptr - commit the write pointer
323  *
324  * @ring: amdgpu ring pointer
325  *
326  * Write the wptr back to the hardware.
327  */
328 static void sdma_v4_4_2_page_ring_set_wptr(struct amdgpu_ring *ring)
329 {
330 	struct amdgpu_device *adev = ring->adev;
331 
332 	if (ring->use_doorbell) {
333 		u64 *wb = (u64 *)&adev->wb.wb[ring->wptr_offs];
334 
335 		/* XXX check if swapping is necessary on BE */
336 		WRITE_ONCE(*wb, (ring->wptr << 2));
337 		WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
338 	} else {
339 		uint64_t wptr = ring->wptr << 2;
340 
341 		WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR,
342 			    lower_32_bits(wptr));
343 		WREG32_SDMA(ring->me, regSDMA_PAGE_RB_WPTR_HI,
344 			    upper_32_bits(wptr));
345 	}
346 }
347 
348 static void sdma_v4_4_2_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
349 {
350 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
351 	int i;
352 
353 	for (i = 0; i < count; i++)
354 		if (sdma && sdma->burst_nop && (i == 0))
355 			amdgpu_ring_write(ring, ring->funcs->nop |
356 				SDMA_PKT_NOP_HEADER_COUNT(count - 1));
357 		else
358 			amdgpu_ring_write(ring, ring->funcs->nop);
359 }
360 
361 /**
362  * sdma_v4_4_2_ring_emit_ib - Schedule an IB on the DMA engine
363  *
364  * @ring: amdgpu ring pointer
365  * @job: job to retrieve vmid from
366  * @ib: IB object to schedule
367  * @flags: unused
368  *
369  * Schedule an IB in the DMA ring.
370  */
371 static void sdma_v4_4_2_ring_emit_ib(struct amdgpu_ring *ring,
372 				   struct amdgpu_job *job,
373 				   struct amdgpu_ib *ib,
374 				   uint32_t flags)
375 {
376 	unsigned vmid = AMDGPU_JOB_GET_VMID(job);
377 
378 	/* IB packet must end on a 8 DW boundary */
379 	sdma_v4_4_2_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7);
380 
381 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) |
382 			  SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf));
383 	/* base must be 32 byte aligned */
384 	amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0);
385 	amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
386 	amdgpu_ring_write(ring, ib->length_dw);
387 	amdgpu_ring_write(ring, 0);
388 	amdgpu_ring_write(ring, 0);
389 
390 }
391 
392 static void sdma_v4_4_2_wait_reg_mem(struct amdgpu_ring *ring,
393 				   int mem_space, int hdp,
394 				   uint32_t addr0, uint32_t addr1,
395 				   uint32_t ref, uint32_t mask,
396 				   uint32_t inv)
397 {
398 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
399 			  SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(hdp) |
400 			  SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(mem_space) |
401 			  SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
402 	if (mem_space) {
403 		/* memory */
404 		amdgpu_ring_write(ring, addr0);
405 		amdgpu_ring_write(ring, addr1);
406 	} else {
407 		/* registers */
408 		amdgpu_ring_write(ring, addr0 << 2);
409 		amdgpu_ring_write(ring, addr1 << 2);
410 	}
411 	amdgpu_ring_write(ring, ref); /* reference */
412 	amdgpu_ring_write(ring, mask); /* mask */
413 	amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
414 			  SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(inv)); /* retry count, poll interval */
415 }
416 
417 /**
418  * sdma_v4_4_2_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
419  *
420  * @ring: amdgpu ring pointer
421  *
422  * Emit an hdp flush packet on the requested DMA ring.
423  */
424 static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
425 {
426 	struct amdgpu_device *adev = ring->adev;
427 	u32 ref_and_mask = 0;
428 	const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
429 
430 	ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
431 		       << (ring->me % adev->sdma.num_inst_per_aid);
432 
433 	sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
434 			       adev->nbio.funcs->get_hdp_flush_done_offset(adev),
435 			       adev->nbio.funcs->get_hdp_flush_req_offset(adev),
436 			       ref_and_mask, ref_and_mask, 10);
437 }
438 
439 /**
440  * sdma_v4_4_2_ring_emit_fence - emit a fence on the DMA ring
441  *
442  * @ring: amdgpu ring pointer
443  * @addr: address
444  * @seq: sequence number
445  * @flags: fence related flags
446  *
447  * Add a DMA fence packet to the ring to write
448  * the fence seq number and DMA trap packet to generate
449  * an interrupt if needed.
450  */
451 static void sdma_v4_4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
452 				      unsigned flags)
453 {
454 	bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
455 	/* write the fence */
456 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
457 	/* zero in first two bits */
458 	BUG_ON(addr & 0x3);
459 	amdgpu_ring_write(ring, lower_32_bits(addr));
460 	amdgpu_ring_write(ring, upper_32_bits(addr));
461 	amdgpu_ring_write(ring, lower_32_bits(seq));
462 
463 	/* optionally write high bits as well */
464 	if (write64bit) {
465 		addr += 4;
466 		amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE));
467 		/* zero in first two bits */
468 		BUG_ON(addr & 0x3);
469 		amdgpu_ring_write(ring, lower_32_bits(addr));
470 		amdgpu_ring_write(ring, upper_32_bits(addr));
471 		amdgpu_ring_write(ring, upper_32_bits(seq));
472 	}
473 
474 	/* generate an interrupt */
475 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP));
476 	amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
477 }
478 
479 
480 /**
481  * sdma_v4_4_2_inst_gfx_stop - stop the gfx async dma engines
482  *
483  * @adev: amdgpu_device pointer
484  * @inst_mask: mask of dma engine instances to be disabled
485  *
486  * Stop the gfx async dma ring buffers.
487  */
488 static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
489 				      uint32_t inst_mask)
490 {
491 	struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
492 	u32 doorbell_offset, doorbell;
493 	u32 rb_cntl, ib_cntl;
494 	int i;
495 
496 	for_each_inst(i, inst_mask) {
497 		sdma[i] = &adev->sdma.instance[i].ring;
498 
499 		rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
500 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
501 		WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
502 		ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
503 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
504 		WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
505 
506 		if (sdma[i]->use_doorbell) {
507 			doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
508 			doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
509 
510 			doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE, 0);
511 			doorbell_offset = REG_SET_FIELD(doorbell_offset,
512 					SDMA_GFX_DOORBELL_OFFSET,
513 					OFFSET, 0);
514 			WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
515 			WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
516 		}
517 	}
518 }
519 
520 /**
521  * sdma_v4_4_2_inst_rlc_stop - stop the compute async dma engines
522  *
523  * @adev: amdgpu_device pointer
524  * @inst_mask: mask of dma engine instances to be disabled
525  *
526  * Stop the compute async dma queues.
527  */
528 static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
529 				      uint32_t inst_mask)
530 {
531 	/* XXX todo */
532 }
533 
534 /**
535  * sdma_v4_4_2_inst_page_stop - stop the page async dma engines
536  *
537  * @adev: amdgpu_device pointer
538  * @inst_mask: mask of dma engine instances to be disabled
539  *
540  * Stop the page async dma ring buffers.
541  */
542 static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
543 				       uint32_t inst_mask)
544 {
545 	u32 rb_cntl, ib_cntl;
546 	int i;
547 
548 	for_each_inst(i, inst_mask) {
549 		rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
550 		rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
551 					RB_ENABLE, 0);
552 		WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
553 		ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
554 		ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL,
555 					IB_ENABLE, 0);
556 		WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
557 	}
558 }
559 
560 /**
561  * sdma_v4_4_2_inst_ctx_switch_enable - stop the async dma engines context switch
562  *
563  * @adev: amdgpu_device pointer
564  * @enable: enable/disable the DMA MEs context switch.
565  * @inst_mask: mask of dma engine instances to be enabled
566  *
567  * Halt or unhalt the async dma engines context switch.
568  */
569 static void sdma_v4_4_2_inst_ctx_switch_enable(struct amdgpu_device *adev,
570 					       bool enable, uint32_t inst_mask)
571 {
572 	u32 f32_cntl, phase_quantum = 0;
573 	int i;
574 
575 	if (amdgpu_sdma_phase_quantum) {
576 		unsigned value = amdgpu_sdma_phase_quantum;
577 		unsigned unit = 0;
578 
579 		while (value > (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
580 				SDMA_PHASE0_QUANTUM__VALUE__SHIFT)) {
581 			value = (value + 1) >> 1;
582 			unit++;
583 		}
584 		if (unit > (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
585 			    SDMA_PHASE0_QUANTUM__UNIT__SHIFT)) {
586 			value = (SDMA_PHASE0_QUANTUM__VALUE_MASK >>
587 				 SDMA_PHASE0_QUANTUM__VALUE__SHIFT);
588 			unit = (SDMA_PHASE0_QUANTUM__UNIT_MASK >>
589 				SDMA_PHASE0_QUANTUM__UNIT__SHIFT);
590 			WARN_ONCE(1,
591 			"clamping sdma_phase_quantum to %uK clock cycles\n",
592 				  value << unit);
593 		}
594 		phase_quantum =
595 			value << SDMA_PHASE0_QUANTUM__VALUE__SHIFT |
596 			unit  << SDMA_PHASE0_QUANTUM__UNIT__SHIFT;
597 	}
598 
599 	for_each_inst(i, inst_mask) {
600 		f32_cntl = RREG32_SDMA(i, regSDMA_CNTL);
601 		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_CNTL,
602 				AUTO_CTXSW_ENABLE, enable ? 1 : 0);
603 		if (enable && amdgpu_sdma_phase_quantum) {
604 			WREG32_SDMA(i, regSDMA_PHASE0_QUANTUM, phase_quantum);
605 			WREG32_SDMA(i, regSDMA_PHASE1_QUANTUM, phase_quantum);
606 			WREG32_SDMA(i, regSDMA_PHASE2_QUANTUM, phase_quantum);
607 		}
608 		WREG32_SDMA(i, regSDMA_CNTL, f32_cntl);
609 
610 		/* Extend page fault timeout to avoid interrupt storm */
611 		WREG32_SDMA(i, regSDMA_UTCL1_TIMEOUT, 0x00800080);
612 	}
613 }
614 
615 /**
616  * sdma_v4_4_2_inst_enable - stop the async dma engines
617  *
618  * @adev: amdgpu_device pointer
619  * @enable: enable/disable the DMA MEs.
620  * @inst_mask: mask of dma engine instances to be enabled
621  *
622  * Halt or unhalt the async dma engines.
623  */
624 static void sdma_v4_4_2_inst_enable(struct amdgpu_device *adev, bool enable,
625 				    uint32_t inst_mask)
626 {
627 	u32 f32_cntl;
628 	int i;
629 
630 	if (!enable) {
631 		sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
632 		sdma_v4_4_2_inst_rlc_stop(adev, inst_mask);
633 		if (adev->sdma.has_page_queue)
634 			sdma_v4_4_2_inst_page_stop(adev, inst_mask);
635 
636 		/* SDMA FW needs to respond to FREEZE requests during reset.
637 		 * Keep it running during reset */
638 		if (!amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
639 			return;
640 	}
641 
642 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
643 		return;
644 
645 	for_each_inst(i, inst_mask) {
646 		f32_cntl = RREG32_SDMA(i, regSDMA_F32_CNTL);
647 		f32_cntl = REG_SET_FIELD(f32_cntl, SDMA_F32_CNTL, HALT, enable ? 0 : 1);
648 		WREG32_SDMA(i, regSDMA_F32_CNTL, f32_cntl);
649 	}
650 }
651 
652 /*
653  * sdma_v4_4_2_rb_cntl - get parameters for rb_cntl
654  */
655 static uint32_t sdma_v4_4_2_rb_cntl(struct amdgpu_ring *ring, uint32_t rb_cntl)
656 {
657 	/* Set ring buffer size in dwords */
658 	uint32_t rb_bufsz = order_base_2(ring->ring_size / 4);
659 
660 	barrier(); /* work around https://llvm.org/pr42576 */
661 	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
662 #ifdef __BIG_ENDIAN
663 	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
664 	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
665 				RPTR_WRITEBACK_SWAP_ENABLE, 1);
666 #endif
667 	return rb_cntl;
668 }
669 
670 /**
671  * sdma_v4_4_2_gfx_resume - setup and start the async dma engines
672  *
673  * @adev: amdgpu_device pointer
674  * @i: instance to resume
675  * @restore: used to restore wptr when restart
676  *
677  * Set up the gfx DMA ring buffers and enable them.
678  * Returns 0 for success, error for failure.
679  */
680 static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
681 {
682 	struct amdgpu_ring *ring = &adev->sdma.instance[i].ring;
683 	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
684 	u32 wb_offset;
685 	u32 doorbell;
686 	u32 doorbell_offset;
687 	u64 wptr_gpu_addr;
688 	u64 rwptr;
689 
690 	wb_offset = (ring->rptr_offs * 4);
691 
692 	rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
693 	rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
694 	WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
695 
696 	/* set the wb address whether it's enabled or not */
697 	WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI,
698 	       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
699 	WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_LO,
700 	       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
701 
702 	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL,
703 				RPTR_WRITEBACK_ENABLE, 1);
704 
705 	WREG32_SDMA(i, regSDMA_GFX_RB_BASE, ring->gpu_addr >> 8);
706 	WREG32_SDMA(i, regSDMA_GFX_RB_BASE_HI, ring->gpu_addr >> 40);
707 
708 	if (!restore)
709 		ring->wptr = 0;
710 
711 	/* before programing wptr to a less value, need set minor_ptr_update first */
712 	WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
713 
714 	/* For the guilty queue, set RPTR to the current wptr to skip bad commands,
715 	 * It is not a guilty queue, restore cache_rptr and continue execution.
716 	 */
717 	if (adev->sdma.instance[i].gfx_guilty)
718 		rwptr = ring->wptr;
719 	else
720 		rwptr = ring->cached_rptr;
721 
722 	/* Initialize the ring buffer's read and write pointers */
723 	if (restore) {
724 		WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, lower_32_bits(rwptr << 2));
725 		WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, upper_32_bits(rwptr << 2));
726 		WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, lower_32_bits(rwptr << 2));
727 		WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, upper_32_bits(rwptr << 2));
728 	} else {
729 		WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
730 		WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
731 		WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
732 		WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
733 	}
734 
735 	doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
736 	doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
737 
738 	doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE,
739 				 ring->use_doorbell);
740 	doorbell_offset = REG_SET_FIELD(doorbell_offset,
741 					SDMA_GFX_DOORBELL_OFFSET,
742 					OFFSET, ring->doorbell_index);
743 	WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
744 	WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
745 
746 	sdma_v4_4_2_ring_set_wptr(ring);
747 
748 	/* set minor_ptr_update to 0 after wptr programed */
749 	WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 0);
750 
751 	/* setup the wptr shadow polling */
752 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
753 	WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_LO,
754 		    lower_32_bits(wptr_gpu_addr));
755 	WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_ADDR_HI,
756 		    upper_32_bits(wptr_gpu_addr));
757 	wptr_poll_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL);
758 	wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
759 				       SDMA_GFX_RB_WPTR_POLL_CNTL,
760 				       F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
761 	WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
762 
763 	/* enable DMA RB */
764 	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 1);
765 	WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
766 
767 	ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
768 	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 1);
769 #ifdef __BIG_ENDIAN
770 	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
771 #endif
772 	/* enable DMA IBs */
773 	WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
774 }
775 
776 /**
777  * sdma_v4_4_2_page_resume - setup and start the async dma engines
778  *
779  * @adev: amdgpu_device pointer
780  * @i: instance to resume
781  * @restore: boolean to say restore needed or not
782  *
783  * Set up the page DMA ring buffers and enable them.
784  * Returns 0 for success, error for failure.
785  */
786 static void sdma_v4_4_2_page_resume(struct amdgpu_device *adev, unsigned int i, bool restore)
787 {
788 	struct amdgpu_ring *ring = &adev->sdma.instance[i].page;
789 	u32 rb_cntl, ib_cntl, wptr_poll_cntl;
790 	u32 wb_offset;
791 	u32 doorbell;
792 	u32 doorbell_offset;
793 	u64 wptr_gpu_addr;
794 	u64 rwptr;
795 
796 	wb_offset = (ring->rptr_offs * 4);
797 
798 	rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
799 	rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
800 	WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
801 
802 	/* For the guilty queue, set RPTR to the current wptr to skip bad commands,
803 	 * It is not a guilty queue, restore cache_rptr and continue execution.
804 	 */
805 	if (adev->sdma.instance[i].page_guilty)
806 		rwptr = ring->wptr;
807 	else
808 		rwptr = ring->cached_rptr;
809 
810 	/* Initialize the ring buffer's read and write pointers */
811 	if (restore) {
812 		WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, lower_32_bits(rwptr << 2));
813 		WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, upper_32_bits(rwptr << 2));
814 		WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, lower_32_bits(rwptr << 2));
815 		WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, upper_32_bits(rwptr << 2));
816 	} else {
817 		WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR, 0);
818 		WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_HI, 0);
819 		WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR, 0);
820 		WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_HI, 0);
821 	}
822 
823 	/* set the wb address whether it's enabled or not */
824 	WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_HI,
825 	       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
826 	WREG32_SDMA(i, regSDMA_PAGE_RB_RPTR_ADDR_LO,
827 	       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
828 
829 	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
830 				RPTR_WRITEBACK_ENABLE, 1);
831 
832 	WREG32_SDMA(i, regSDMA_PAGE_RB_BASE, ring->gpu_addr >> 8);
833 	WREG32_SDMA(i, regSDMA_PAGE_RB_BASE_HI, ring->gpu_addr >> 40);
834 
835 	if (!restore)
836 		ring->wptr = 0;
837 
838 	/* before programing wptr to a less value, need set minor_ptr_update first */
839 	WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 1);
840 
841 	doorbell = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL);
842 	doorbell_offset = RREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET);
843 
844 	doorbell = REG_SET_FIELD(doorbell, SDMA_PAGE_DOORBELL, ENABLE,
845 				 ring->use_doorbell);
846 	doorbell_offset = REG_SET_FIELD(doorbell_offset,
847 					SDMA_PAGE_DOORBELL_OFFSET,
848 					OFFSET, ring->doorbell_index);
849 	WREG32_SDMA(i, regSDMA_PAGE_DOORBELL, doorbell);
850 	WREG32_SDMA(i, regSDMA_PAGE_DOORBELL_OFFSET, doorbell_offset);
851 
852 	/* paging queue doorbell range is setup at sdma_v4_4_2_gfx_resume */
853 	sdma_v4_4_2_page_ring_set_wptr(ring);
854 
855 	/* set minor_ptr_update to 0 after wptr programed */
856 	WREG32_SDMA(i, regSDMA_PAGE_MINOR_PTR_UPDATE, 0);
857 
858 	/* setup the wptr shadow polling */
859 	wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
860 	WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_LO,
861 		    lower_32_bits(wptr_gpu_addr));
862 	WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_ADDR_HI,
863 		    upper_32_bits(wptr_gpu_addr));
864 	wptr_poll_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL);
865 	wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
866 				       SDMA_PAGE_RB_WPTR_POLL_CNTL,
867 				       F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
868 	WREG32_SDMA(i, regSDMA_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
869 
870 	/* enable DMA RB */
871 	rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL, RB_ENABLE, 1);
872 	WREG32_SDMA(i, regSDMA_PAGE_RB_CNTL, rb_cntl);
873 
874 	ib_cntl = RREG32_SDMA(i, regSDMA_PAGE_IB_CNTL);
875 	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_ENABLE, 1);
876 #ifdef __BIG_ENDIAN
877 	ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_PAGE_IB_CNTL, IB_SWAP_ENABLE, 1);
878 #endif
879 	/* enable DMA IBs */
880 	WREG32_SDMA(i, regSDMA_PAGE_IB_CNTL, ib_cntl);
881 }
882 
883 static void sdma_v4_4_2_init_pg(struct amdgpu_device *adev)
884 {
885 
886 }
887 
888 /**
889  * sdma_v4_4_2_inst_rlc_resume - setup and start the async dma engines
890  *
891  * @adev: amdgpu_device pointer
892  * @inst_mask: mask of dma engine instances to be enabled
893  *
894  * Set up the compute DMA queues and enable them.
895  * Returns 0 for success, error for failure.
896  */
897 static int sdma_v4_4_2_inst_rlc_resume(struct amdgpu_device *adev,
898 				       uint32_t inst_mask)
899 {
900 	sdma_v4_4_2_init_pg(adev);
901 
902 	return 0;
903 }
904 
905 /**
906  * sdma_v4_4_2_inst_load_microcode - load the sDMA ME ucode
907  *
908  * @adev: amdgpu_device pointer
909  * @inst_mask: mask of dma engine instances to be enabled
910  *
911  * Loads the sDMA0/1 ucode.
912  * Returns 0 for success, -EINVAL if the ucode is not available.
913  */
914 static int sdma_v4_4_2_inst_load_microcode(struct amdgpu_device *adev,
915 					   uint32_t inst_mask)
916 {
917 	const struct sdma_firmware_header_v1_0 *hdr;
918 	const __le32 *fw_data;
919 	u32 fw_size;
920 	int i, j;
921 
922 	/* halt the MEs */
923 	sdma_v4_4_2_inst_enable(adev, false, inst_mask);
924 
925 	for_each_inst(i, inst_mask) {
926 		if (!adev->sdma.instance[i].fw)
927 			return -EINVAL;
928 
929 		hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data;
930 		amdgpu_ucode_print_sdma_hdr(&hdr->header);
931 		fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
932 
933 		fw_data = (const __le32 *)
934 			(adev->sdma.instance[i].fw->data +
935 				le32_to_cpu(hdr->header.ucode_array_offset_bytes));
936 
937 		WREG32_SDMA(i, regSDMA_UCODE_ADDR, 0);
938 
939 		for (j = 0; j < fw_size; j++)
940 			WREG32_SDMA(i, regSDMA_UCODE_DATA,
941 				    le32_to_cpup(fw_data++));
942 
943 		WREG32_SDMA(i, regSDMA_UCODE_ADDR,
944 			    adev->sdma.instance[i].fw_version);
945 	}
946 
947 	return 0;
948 }
949 
950 /**
951  * sdma_v4_4_2_inst_start - setup and start the async dma engines
952  *
953  * @adev: amdgpu_device pointer
954  * @inst_mask: mask of dma engine instances to be enabled
955  * @restore: boolean to say restore needed or not
956  *
957  * Set up the DMA engines and enable them.
958  * Returns 0 for success, error for failure.
959  */
960 static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
961 				  uint32_t inst_mask, bool restore)
962 {
963 	struct amdgpu_ring *ring;
964 	uint32_t tmp_mask;
965 	int i, r = 0;
966 
967 	if (amdgpu_sriov_vf(adev)) {
968 		sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
969 		sdma_v4_4_2_inst_enable(adev, false, inst_mask);
970 	} else {
971 		/* bypass sdma microcode loading on Gopher */
972 		if (!restore && adev->firmware.load_type != AMDGPU_FW_LOAD_PSP &&
973 		    adev->sdma.instance[0].fw) {
974 			r = sdma_v4_4_2_inst_load_microcode(adev, inst_mask);
975 			if (r)
976 				return r;
977 		}
978 
979 		/* unhalt the MEs */
980 		sdma_v4_4_2_inst_enable(adev, true, inst_mask);
981 		/* enable sdma ring preemption */
982 		sdma_v4_4_2_inst_ctx_switch_enable(adev, true, inst_mask);
983 	}
984 
985 	/* start the gfx rings and rlc compute queues */
986 	tmp_mask = inst_mask;
987 	for_each_inst(i, tmp_mask) {
988 		uint32_t temp;
989 
990 		WREG32_SDMA(i, regSDMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
991 		sdma_v4_4_2_gfx_resume(adev, i, restore);
992 		if (adev->sdma.has_page_queue)
993 			sdma_v4_4_2_page_resume(adev, i, restore);
994 
995 		/* set utc l1 enable flag always to 1 */
996 		temp = RREG32_SDMA(i, regSDMA_CNTL);
997 		temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1);
998 
999 		if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) {
1000 			/* enable context empty interrupt during initialization */
1001 			temp = REG_SET_FIELD(temp, SDMA_CNTL, CTXEMPTY_INT_ENABLE, 1);
1002 			WREG32_SDMA(i, regSDMA_CNTL, temp);
1003 		}
1004 		if (!amdgpu_sriov_vf(adev)) {
1005 			if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1006 				/* unhalt engine */
1007 				temp = RREG32_SDMA(i, regSDMA_F32_CNTL);
1008 				temp = REG_SET_FIELD(temp, SDMA_F32_CNTL, HALT, 0);
1009 				WREG32_SDMA(i, regSDMA_F32_CNTL, temp);
1010 			}
1011 		}
1012 	}
1013 
1014 	if (amdgpu_sriov_vf(adev)) {
1015 		sdma_v4_4_2_inst_ctx_switch_enable(adev, true, inst_mask);
1016 		sdma_v4_4_2_inst_enable(adev, true, inst_mask);
1017 	} else {
1018 		r = sdma_v4_4_2_inst_rlc_resume(adev, inst_mask);
1019 		if (r)
1020 			return r;
1021 	}
1022 
1023 	tmp_mask = inst_mask;
1024 	for_each_inst(i, tmp_mask) {
1025 		ring = &adev->sdma.instance[i].ring;
1026 
1027 		r = amdgpu_ring_test_helper(ring);
1028 		if (r)
1029 			return r;
1030 
1031 		if (adev->sdma.has_page_queue) {
1032 			struct amdgpu_ring *page = &adev->sdma.instance[i].page;
1033 
1034 			r = amdgpu_ring_test_helper(page);
1035 			if (r)
1036 				return r;
1037 		}
1038 	}
1039 
1040 	return r;
1041 }
1042 
1043 /**
1044  * sdma_v4_4_2_ring_test_ring - simple async dma engine test
1045  *
1046  * @ring: amdgpu_ring structure holding ring information
1047  *
1048  * Test the DMA engine by writing using it to write an
1049  * value to memory.
1050  * Returns 0 for success, error for failure.
1051  */
1052 static int sdma_v4_4_2_ring_test_ring(struct amdgpu_ring *ring)
1053 {
1054 	struct amdgpu_device *adev = ring->adev;
1055 	unsigned i;
1056 	unsigned index;
1057 	int r;
1058 	u32 tmp;
1059 	u64 gpu_addr;
1060 
1061 	r = amdgpu_device_wb_get(adev, &index);
1062 	if (r)
1063 		return r;
1064 
1065 	gpu_addr = adev->wb.gpu_addr + (index * 4);
1066 	tmp = 0xCAFEDEAD;
1067 	adev->wb.wb[index] = cpu_to_le32(tmp);
1068 
1069 	r = amdgpu_ring_alloc(ring, 5);
1070 	if (r)
1071 		goto error_free_wb;
1072 
1073 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1074 			  SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
1075 	amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
1076 	amdgpu_ring_write(ring, upper_32_bits(gpu_addr));
1077 	amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0));
1078 	amdgpu_ring_write(ring, 0xDEADBEEF);
1079 	amdgpu_ring_commit(ring);
1080 
1081 	for (i = 0; i < adev->usec_timeout; i++) {
1082 		tmp = le32_to_cpu(adev->wb.wb[index]);
1083 		if (tmp == 0xDEADBEEF)
1084 			break;
1085 		udelay(1);
1086 	}
1087 
1088 	if (i >= adev->usec_timeout)
1089 		r = -ETIMEDOUT;
1090 
1091 error_free_wb:
1092 	amdgpu_device_wb_free(adev, index);
1093 	return r;
1094 }
1095 
1096 /**
1097  * sdma_v4_4_2_ring_test_ib - test an IB on the DMA engine
1098  *
1099  * @ring: amdgpu_ring structure holding ring information
1100  * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1101  *
1102  * Test a simple IB in the DMA ring.
1103  * Returns 0 on success, error on failure.
1104  */
1105 static int sdma_v4_4_2_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1106 {
1107 	struct amdgpu_device *adev = ring->adev;
1108 	struct amdgpu_ib ib;
1109 	struct dma_fence *f = NULL;
1110 	unsigned index;
1111 	long r;
1112 	u32 tmp = 0;
1113 	u64 gpu_addr;
1114 
1115 	r = amdgpu_device_wb_get(adev, &index);
1116 	if (r)
1117 		return r;
1118 
1119 	gpu_addr = adev->wb.gpu_addr + (index * 4);
1120 	tmp = 0xCAFEDEAD;
1121 	adev->wb.wb[index] = cpu_to_le32(tmp);
1122 	memset(&ib, 0, sizeof(ib));
1123 	r = amdgpu_ib_get(adev, NULL, 256,
1124 					AMDGPU_IB_POOL_DIRECT, &ib);
1125 	if (r)
1126 		goto err0;
1127 
1128 	ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1129 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1130 	ib.ptr[1] = lower_32_bits(gpu_addr);
1131 	ib.ptr[2] = upper_32_bits(gpu_addr);
1132 	ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(0);
1133 	ib.ptr[4] = 0xDEADBEEF;
1134 	ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1135 	ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1136 	ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
1137 	ib.length_dw = 8;
1138 
1139 	r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
1140 	if (r)
1141 		goto err1;
1142 
1143 	r = dma_fence_wait_timeout(f, false, timeout);
1144 	if (r == 0) {
1145 		r = -ETIMEDOUT;
1146 		goto err1;
1147 	} else if (r < 0) {
1148 		goto err1;
1149 	}
1150 	tmp = le32_to_cpu(adev->wb.wb[index]);
1151 	if (tmp == 0xDEADBEEF)
1152 		r = 0;
1153 	else
1154 		r = -EINVAL;
1155 
1156 err1:
1157 	amdgpu_ib_free(&ib, NULL);
1158 	dma_fence_put(f);
1159 err0:
1160 	amdgpu_device_wb_free(adev, index);
1161 	return r;
1162 }
1163 
1164 
1165 /**
1166  * sdma_v4_4_2_vm_copy_pte - update PTEs by copying them from the GART
1167  *
1168  * @ib: indirect buffer to fill with commands
1169  * @pe: addr of the page entry
1170  * @src: src addr to copy from
1171  * @count: number of page entries to update
1172  *
1173  * Update PTEs by copying them from the GART using sDMA.
1174  */
1175 static void sdma_v4_4_2_vm_copy_pte(struct amdgpu_ib *ib,
1176 				  uint64_t pe, uint64_t src,
1177 				  unsigned count)
1178 {
1179 	unsigned bytes = count * 8;
1180 
1181 	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
1182 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
1183 	ib->ptr[ib->length_dw++] = bytes - 1;
1184 	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
1185 	ib->ptr[ib->length_dw++] = lower_32_bits(src);
1186 	ib->ptr[ib->length_dw++] = upper_32_bits(src);
1187 	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1188 	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1189 
1190 }
1191 
1192 /**
1193  * sdma_v4_4_2_vm_write_pte - update PTEs by writing them manually
1194  *
1195  * @ib: indirect buffer to fill with commands
1196  * @pe: addr of the page entry
1197  * @value: dst addr to write into pe
1198  * @count: number of page entries to update
1199  * @incr: increase next addr by incr bytes
1200  *
1201  * Update PTEs by writing them manually using sDMA.
1202  */
1203 static void sdma_v4_4_2_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
1204 				   uint64_t value, unsigned count,
1205 				   uint32_t incr)
1206 {
1207 	unsigned ndw = count * 2;
1208 
1209 	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
1210 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR);
1211 	ib->ptr[ib->length_dw++] = lower_32_bits(pe);
1212 	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1213 	ib->ptr[ib->length_dw++] = ndw - 1;
1214 	for (; ndw > 0; ndw -= 2) {
1215 		ib->ptr[ib->length_dw++] = lower_32_bits(value);
1216 		ib->ptr[ib->length_dw++] = upper_32_bits(value);
1217 		value += incr;
1218 	}
1219 }
1220 
1221 /**
1222  * sdma_v4_4_2_vm_set_pte_pde - update the page tables using sDMA
1223  *
1224  * @ib: indirect buffer to fill with commands
1225  * @pe: addr of the page entry
1226  * @addr: dst addr to write into pe
1227  * @count: number of page entries to update
1228  * @incr: increase next addr by incr bytes
1229  * @flags: access flags
1230  *
1231  * Update the page tables using sDMA.
1232  */
1233 static void sdma_v4_4_2_vm_set_pte_pde(struct amdgpu_ib *ib,
1234 				     uint64_t pe,
1235 				     uint64_t addr, unsigned count,
1236 				     uint32_t incr, uint64_t flags)
1237 {
1238 	/* for physically contiguous pages (vram) */
1239 	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_PTEPDE);
1240 	ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */
1241 	ib->ptr[ib->length_dw++] = upper_32_bits(pe);
1242 	ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
1243 	ib->ptr[ib->length_dw++] = upper_32_bits(flags);
1244 	ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */
1245 	ib->ptr[ib->length_dw++] = upper_32_bits(addr);
1246 	ib->ptr[ib->length_dw++] = incr; /* increment size */
1247 	ib->ptr[ib->length_dw++] = 0;
1248 	ib->ptr[ib->length_dw++] = count - 1; /* number of entries */
1249 }
1250 
1251 /**
1252  * sdma_v4_4_2_ring_pad_ib - pad the IB to the required number of dw
1253  *
1254  * @ring: amdgpu_ring structure holding ring information
1255  * @ib: indirect buffer to fill with padding
1256  */
1257 static void sdma_v4_4_2_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
1258 {
1259 	struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring);
1260 	u32 pad_count;
1261 	int i;
1262 
1263 	pad_count = (-ib->length_dw) & 7;
1264 	for (i = 0; i < pad_count; i++)
1265 		if (sdma && sdma->burst_nop && (i == 0))
1266 			ib->ptr[ib->length_dw++] =
1267 				SDMA_PKT_HEADER_OP(SDMA_OP_NOP) |
1268 				SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1);
1269 		else
1270 			ib->ptr[ib->length_dw++] =
1271 				SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
1272 }
1273 
1274 
1275 /**
1276  * sdma_v4_4_2_ring_emit_pipeline_sync - sync the pipeline
1277  *
1278  * @ring: amdgpu_ring pointer
1279  *
1280  * Make sure all previous operations are completed (CIK).
1281  */
1282 static void sdma_v4_4_2_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1283 {
1284 	uint32_t seq = ring->fence_drv.sync_seq;
1285 	uint64_t addr = ring->fence_drv.gpu_addr;
1286 
1287 	/* wait for idle */
1288 	sdma_v4_4_2_wait_reg_mem(ring, 1, 0,
1289 			       addr & 0xfffffffc,
1290 			       upper_32_bits(addr) & 0xffffffff,
1291 			       seq, 0xffffffff, 4);
1292 }
1293 
1294 
1295 /**
1296  * sdma_v4_4_2_ring_emit_vm_flush - vm flush using sDMA
1297  *
1298  * @ring: amdgpu_ring pointer
1299  * @vmid: vmid number to use
1300  * @pd_addr: address
1301  *
1302  * Update the page table base and flush the VM TLB
1303  * using sDMA.
1304  */
1305 static void sdma_v4_4_2_ring_emit_vm_flush(struct amdgpu_ring *ring,
1306 					 unsigned vmid, uint64_t pd_addr)
1307 {
1308 	amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
1309 }
1310 
1311 static void sdma_v4_4_2_ring_emit_wreg(struct amdgpu_ring *ring,
1312 				     uint32_t reg, uint32_t val)
1313 {
1314 	amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
1315 			  SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1316 	amdgpu_ring_write(ring, reg);
1317 	amdgpu_ring_write(ring, val);
1318 }
1319 
1320 static void sdma_v4_4_2_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
1321 					 uint32_t val, uint32_t mask)
1322 {
1323 	sdma_v4_4_2_wait_reg_mem(ring, 0, 0, reg, 0, val, mask, 10);
1324 }
1325 
1326 static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev)
1327 {
1328 	switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1329 	case IP_VERSION(4, 4, 2):
1330 	case IP_VERSION(4, 4, 5):
1331 		return false;
1332 	default:
1333 		return false;
1334 	}
1335 }
1336 
1337 static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = {
1338 	.stop_kernel_queue = &sdma_v4_4_2_stop_queue,
1339 	.start_kernel_queue = &sdma_v4_4_2_restore_queue,
1340 };
1341 
1342 static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block)
1343 {
1344 	struct amdgpu_device *adev = ip_block->adev;
1345 	int r;
1346 
1347 	r = sdma_v4_4_2_init_microcode(adev);
1348 	if (r)
1349 		return r;
1350 
1351 	/* TODO: Page queue breaks driver reload under SRIOV */
1352 	if (sdma_v4_4_2_fw_support_paging_queue(adev))
1353 		adev->sdma.has_page_queue = true;
1354 
1355 	sdma_v4_4_2_set_ring_funcs(adev);
1356 	sdma_v4_4_2_set_buffer_funcs(adev);
1357 	sdma_v4_4_2_set_vm_pte_funcs(adev);
1358 	sdma_v4_4_2_set_irq_funcs(adev);
1359 	sdma_v4_4_2_set_ras_funcs(adev);
1360 	return 0;
1361 }
1362 
1363 #if 0
1364 static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
1365 		void *err_data,
1366 		struct amdgpu_iv_entry *entry);
1367 #endif
1368 
1369 static int sdma_v4_4_2_late_init(struct amdgpu_ip_block *ip_block)
1370 {
1371 	struct amdgpu_device *adev = ip_block->adev;
1372 #if 0
1373 	struct ras_ih_if ih_info = {
1374 		.cb = sdma_v4_4_2_process_ras_data_cb,
1375 	};
1376 #endif
1377 	if (!amdgpu_persistent_edc_harvesting_supported(adev))
1378 		amdgpu_ras_reset_error_count(adev, AMDGPU_RAS_BLOCK__SDMA);
1379 
1380 	/* The initialization is done in the late_init stage to ensure that the SMU
1381 	 * initialization and capability setup are completed before we check the SDMA
1382 	 * reset capability
1383 	 */
1384 	sdma_v4_4_2_update_reset_mask(adev);
1385 
1386 	return 0;
1387 }
1388 
1389 static int sdma_v4_4_2_sw_init(struct amdgpu_ip_block *ip_block)
1390 {
1391 	struct amdgpu_ring *ring;
1392 	int r, i;
1393 	struct amdgpu_device *adev = ip_block->adev;
1394 	u32 aid_id;
1395 	uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
1396 	uint32_t *ptr;
1397 
1398 	/* SDMA trap event */
1399 	for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
1400 		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1401 				      SDMA0_4_0__SRCID__SDMA_TRAP,
1402 				      &adev->sdma.trap_irq);
1403 		if (r)
1404 			return r;
1405 	}
1406 
1407 	/* SDMA SRAM ECC event */
1408 	for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
1409 		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1410 				      SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
1411 				      &adev->sdma.ecc_irq);
1412 		if (r)
1413 			return r;
1414 	}
1415 
1416 	/* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
1417 	for (i = 0; i < adev->sdma.num_inst_per_aid; i++) {
1418 		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1419 				      SDMA0_4_0__SRCID__SDMA_VM_HOLE,
1420 				      &adev->sdma.vm_hole_irq);
1421 		if (r)
1422 			return r;
1423 
1424 		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1425 				      SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
1426 				      &adev->sdma.doorbell_invalid_irq);
1427 		if (r)
1428 			return r;
1429 
1430 		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1431 				      SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
1432 				      &adev->sdma.pool_timeout_irq);
1433 		if (r)
1434 			return r;
1435 
1436 		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1437 				      SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
1438 				      &adev->sdma.srbm_write_irq);
1439 		if (r)
1440 			return r;
1441 
1442 		r = amdgpu_irq_add_id(adev, sdma_v4_4_2_seq_to_irq_id(i),
1443 				      SDMA0_4_0__SRCID__SDMA_CTXEMPTY,
1444 				      &adev->sdma.ctxt_empty_irq);
1445 		if (r)
1446 			return r;
1447 	}
1448 
1449 	for (i = 0; i < adev->sdma.num_instances; i++) {
1450 		mutex_init(&adev->sdma.instance[i].engine_reset_mutex);
1451 		/* Initialize guilty flags for GFX and PAGE queues */
1452 		adev->sdma.instance[i].gfx_guilty = false;
1453 		adev->sdma.instance[i].page_guilty = false;
1454 		adev->sdma.instance[i].funcs = &sdma_v4_4_2_sdma_funcs;
1455 
1456 		ring = &adev->sdma.instance[i].ring;
1457 		ring->ring_obj = NULL;
1458 		ring->use_doorbell = true;
1459 		aid_id = adev->sdma.instance[i].aid_id;
1460 
1461 		DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
1462 				ring->use_doorbell?"true":"false");
1463 
1464 		/* doorbell size is 2 dwords, get DWORD offset */
1465 		ring->doorbell_index = adev->doorbell_index.sdma_engine[i] << 1;
1466 		ring->vm_hub = AMDGPU_MMHUB0(aid_id);
1467 
1468 		sprintf(ring->name, "sdma%d.%d", aid_id,
1469 				i % adev->sdma.num_inst_per_aid);
1470 		r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
1471 				     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1472 				     AMDGPU_RING_PRIO_DEFAULT, NULL);
1473 		if (r)
1474 			return r;
1475 
1476 		if (adev->sdma.has_page_queue) {
1477 			ring = &adev->sdma.instance[i].page;
1478 			ring->ring_obj = NULL;
1479 			ring->use_doorbell = true;
1480 
1481 			/* doorbell index of page queue is assigned right after
1482 			 * gfx queue on the same instance
1483 			 */
1484 			ring->doorbell_index =
1485 				(adev->doorbell_index.sdma_engine[i] + 1) << 1;
1486 			ring->vm_hub = AMDGPU_MMHUB0(aid_id);
1487 
1488 			sprintf(ring->name, "page%d.%d", aid_id,
1489 					i % adev->sdma.num_inst_per_aid);
1490 			r = amdgpu_ring_init(adev, ring, 1024,
1491 					     &adev->sdma.trap_irq,
1492 					     AMDGPU_SDMA_IRQ_INSTANCE0 + i,
1493 					     AMDGPU_RING_PRIO_DEFAULT, NULL);
1494 			if (r)
1495 				return r;
1496 		}
1497 	}
1498 
1499 	adev->sdma.supported_reset =
1500 		amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring);
1501 
1502 	if (amdgpu_sdma_ras_sw_init(adev)) {
1503 		dev_err(adev->dev, "fail to initialize sdma ras block\n");
1504 		return -EINVAL;
1505 	}
1506 
1507 	/* Allocate memory for SDMA IP Dump buffer */
1508 	ptr = kcalloc(adev->sdma.num_instances * reg_count, sizeof(uint32_t), GFP_KERNEL);
1509 	if (ptr)
1510 		adev->sdma.ip_dump = ptr;
1511 	else
1512 		DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n");
1513 
1514 	r = amdgpu_sdma_sysfs_reset_mask_init(adev);
1515 	if (r)
1516 		return r;
1517 
1518 	return r;
1519 }
1520 
1521 static int sdma_v4_4_2_sw_fini(struct amdgpu_ip_block *ip_block)
1522 {
1523 	struct amdgpu_device *adev = ip_block->adev;
1524 	int i;
1525 
1526 	for (i = 0; i < adev->sdma.num_instances; i++) {
1527 		amdgpu_ring_fini(&adev->sdma.instance[i].ring);
1528 		if (adev->sdma.has_page_queue)
1529 			amdgpu_ring_fini(&adev->sdma.instance[i].page);
1530 	}
1531 
1532 	amdgpu_sdma_sysfs_reset_mask_fini(adev);
1533 	if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2) ||
1534 	    amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 4) ||
1535 	    amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5))
1536 		amdgpu_sdma_destroy_inst_ctx(adev, true);
1537 	else
1538 		amdgpu_sdma_destroy_inst_ctx(adev, false);
1539 
1540 	kfree(adev->sdma.ip_dump);
1541 
1542 	return 0;
1543 }
1544 
1545 static int sdma_v4_4_2_hw_init(struct amdgpu_ip_block *ip_block)
1546 {
1547 	int r;
1548 	struct amdgpu_device *adev = ip_block->adev;
1549 	uint32_t inst_mask;
1550 
1551 	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
1552 	if (!amdgpu_sriov_vf(adev))
1553 		sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
1554 
1555 	r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
1556 
1557 	return r;
1558 }
1559 
1560 static int sdma_v4_4_2_hw_fini(struct amdgpu_ip_block *ip_block)
1561 {
1562 	struct amdgpu_device *adev = ip_block->adev;
1563 	uint32_t inst_mask;
1564 	int i;
1565 
1566 	if (amdgpu_sriov_vf(adev))
1567 		return 0;
1568 
1569 	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
1570 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
1571 		for (i = 0; i < adev->sdma.num_instances; i++) {
1572 			amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
1573 				       AMDGPU_SDMA_IRQ_INSTANCE0 + i);
1574 		}
1575 	}
1576 
1577 	sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
1578 	sdma_v4_4_2_inst_enable(adev, false, inst_mask);
1579 
1580 	return 0;
1581 }
1582 
1583 static int sdma_v4_4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
1584 					     enum amd_clockgating_state state);
1585 
1586 static int sdma_v4_4_2_suspend(struct amdgpu_ip_block *ip_block)
1587 {
1588 	struct amdgpu_device *adev = ip_block->adev;
1589 
1590 	if (amdgpu_in_reset(adev))
1591 		sdma_v4_4_2_set_clockgating_state(ip_block, AMD_CG_STATE_UNGATE);
1592 
1593 	return sdma_v4_4_2_hw_fini(ip_block);
1594 }
1595 
1596 static int sdma_v4_4_2_resume(struct amdgpu_ip_block *ip_block)
1597 {
1598 	return sdma_v4_4_2_hw_init(ip_block);
1599 }
1600 
1601 static bool sdma_v4_4_2_is_idle(struct amdgpu_ip_block *ip_block)
1602 {
1603 	struct amdgpu_device *adev = ip_block->adev;
1604 	u32 i;
1605 
1606 	for (i = 0; i < adev->sdma.num_instances; i++) {
1607 		u32 tmp = RREG32_SDMA(i, regSDMA_STATUS_REG);
1608 
1609 		if (!(tmp & SDMA_STATUS_REG__IDLE_MASK))
1610 			return false;
1611 	}
1612 
1613 	return true;
1614 }
1615 
1616 static int sdma_v4_4_2_wait_for_idle(struct amdgpu_ip_block *ip_block)
1617 {
1618 	unsigned i, j;
1619 	u32 sdma[AMDGPU_MAX_SDMA_INSTANCES];
1620 	struct amdgpu_device *adev = ip_block->adev;
1621 
1622 	for (i = 0; i < adev->usec_timeout; i++) {
1623 		for (j = 0; j < adev->sdma.num_instances; j++) {
1624 			sdma[j] = RREG32_SDMA(j, regSDMA_STATUS_REG);
1625 			if (!(sdma[j] & SDMA_STATUS_REG__IDLE_MASK))
1626 				break;
1627 		}
1628 		if (j == adev->sdma.num_instances)
1629 			return 0;
1630 		udelay(1);
1631 	}
1632 	return -ETIMEDOUT;
1633 }
1634 
1635 static int sdma_v4_4_2_soft_reset(struct amdgpu_ip_block *ip_block)
1636 {
1637 	/* todo */
1638 
1639 	return 0;
1640 }
1641 
1642 static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t instance_id, bool is_page_queue)
1643 {
1644 	uint32_t reg_offset = is_page_queue ? regSDMA_PAGE_CONTEXT_STATUS : regSDMA_GFX_CONTEXT_STATUS;
1645 	uint32_t context_status = RREG32(sdma_v4_4_2_get_reg_offset(adev, instance_id, reg_offset));
1646 
1647 	/* Check if the SELECTED bit is set */
1648 	return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0;
1649 }
1650 
1651 static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring)
1652 {
1653 	struct amdgpu_device *adev = ring->adev;
1654 	uint32_t instance_id = ring->me;
1655 
1656 	return sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
1657 }
1658 
1659 static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring)
1660 {
1661 	struct amdgpu_device *adev = ring->adev;
1662 	uint32_t instance_id = ring->me;
1663 
1664 	if (!adev->sdma.has_page_queue)
1665 		return false;
1666 
1667 	return sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
1668 }
1669 
1670 static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid)
1671 {
1672 	struct amdgpu_device *adev = ring->adev;
1673 	u32 id = GET_INST(SDMA0, ring->me);
1674 	int r;
1675 
1676 	if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE))
1677 		return -EOPNOTSUPP;
1678 
1679 	amdgpu_amdkfd_suspend(adev, false);
1680 	r = amdgpu_sdma_reset_engine(adev, id);
1681 	amdgpu_amdkfd_resume(adev, false);
1682 
1683 	return r;
1684 }
1685 
1686 static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring)
1687 {
1688 	struct amdgpu_device *adev = ring->adev;
1689 	u32 instance_id = GET_INST(SDMA0, ring->me);
1690 	u32 inst_mask;
1691 	uint64_t rptr;
1692 
1693 	if (amdgpu_sriov_vf(adev))
1694 		return -EINVAL;
1695 
1696 	/* Check if this queue is the guilty one */
1697 	adev->sdma.instance[instance_id].gfx_guilty =
1698 		sdma_v4_4_2_is_queue_selected(adev, instance_id, false);
1699 	if (adev->sdma.has_page_queue)
1700 		adev->sdma.instance[instance_id].page_guilty =
1701 			sdma_v4_4_2_is_queue_selected(adev, instance_id, true);
1702 
1703 	/* Cache the rptr before reset, after the reset,
1704 	* all of the registers will be reset to 0
1705 	*/
1706 	rptr = amdgpu_ring_get_rptr(ring);
1707 	ring->cached_rptr = rptr;
1708 	/* Cache the rptr for the page queue if it exists */
1709 	if (adev->sdma.has_page_queue) {
1710 		struct amdgpu_ring *page_ring = &adev->sdma.instance[instance_id].page;
1711 		rptr = amdgpu_ring_get_rptr(page_ring);
1712 		page_ring->cached_rptr = rptr;
1713 	}
1714 
1715 	/* stop queue */
1716 	inst_mask = 1 << ring->me;
1717 	sdma_v4_4_2_inst_gfx_stop(adev, inst_mask);
1718 	if (adev->sdma.has_page_queue)
1719 		sdma_v4_4_2_inst_page_stop(adev, inst_mask);
1720 
1721 	return 0;
1722 }
1723 
1724 static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring)
1725 {
1726 	struct amdgpu_device *adev = ring->adev;
1727 	u32 inst_mask;
1728 	int i;
1729 
1730 	inst_mask = 1 << ring->me;
1731 	udelay(50);
1732 
1733 	for (i = 0; i < adev->usec_timeout; i++) {
1734 		if (!REG_GET_FIELD(RREG32_SDMA(ring->me, regSDMA_F32_CNTL), SDMA_F32_CNTL, HALT))
1735 			break;
1736 		udelay(1);
1737 	}
1738 
1739 	if (i == adev->usec_timeout) {
1740 		dev_err(adev->dev, "timed out waiting for SDMA%d unhalt after reset\n",
1741 			ring->me);
1742 		return -ETIMEDOUT;
1743 	}
1744 
1745 	return sdma_v4_4_2_inst_start(adev, inst_mask, true);
1746 }
1747 
1748 static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev,
1749 					struct amdgpu_irq_src *source,
1750 					unsigned type,
1751 					enum amdgpu_interrupt_state state)
1752 {
1753 	u32 sdma_cntl;
1754 
1755 	sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
1756 	sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, TRAP_ENABLE,
1757 		       state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1758 	WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
1759 
1760 	return 0;
1761 }
1762 
1763 static int sdma_v4_4_2_process_trap_irq(struct amdgpu_device *adev,
1764 				      struct amdgpu_irq_src *source,
1765 				      struct amdgpu_iv_entry *entry)
1766 {
1767 	uint32_t instance, i;
1768 
1769 	DRM_DEBUG("IH: SDMA trap\n");
1770 	instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
1771 
1772 	/* Client id gives the SDMA instance in AID. To know the exact SDMA
1773 	 * instance, interrupt entry gives the node id which corresponds to the AID instance.
1774 	 * Match node id with the AID id associated with the SDMA instance. */
1775 	for (i = instance; i < adev->sdma.num_instances;
1776 	     i += adev->sdma.num_inst_per_aid) {
1777 		if (adev->sdma.instance[i].aid_id ==
1778 		    node_id_to_phys_map[entry->node_id])
1779 			break;
1780 	}
1781 
1782 	if (i >= adev->sdma.num_instances) {
1783 		dev_WARN_ONCE(
1784 			adev->dev, 1,
1785 			"Couldn't find the right sdma instance in trap handler");
1786 		return 0;
1787 	}
1788 
1789 	switch (entry->ring_id) {
1790 	case 0:
1791 		amdgpu_fence_process(&adev->sdma.instance[i].ring);
1792 		break;
1793 	case 1:
1794 		amdgpu_fence_process(&adev->sdma.instance[i].page);
1795 		break;
1796 	default:
1797 		break;
1798 	}
1799 	return 0;
1800 }
1801 
1802 #if 0
1803 static int sdma_v4_4_2_process_ras_data_cb(struct amdgpu_device *adev,
1804 		void *err_data,
1805 		struct amdgpu_iv_entry *entry)
1806 {
1807 	int instance;
1808 
1809 	/* When “Full RAS” is enabled, the per-IP interrupt sources should
1810 	 * be disabled and the driver should only look for the aggregated
1811 	 * interrupt via sync flood
1812 	 */
1813 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA))
1814 		goto out;
1815 
1816 	instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
1817 	if (instance < 0)
1818 		goto out;
1819 
1820 	amdgpu_sdma_process_ras_data_cb(adev, err_data, entry);
1821 
1822 out:
1823 	return AMDGPU_RAS_SUCCESS;
1824 }
1825 #endif
1826 
1827 static int sdma_v4_4_2_process_illegal_inst_irq(struct amdgpu_device *adev,
1828 					      struct amdgpu_irq_src *source,
1829 					      struct amdgpu_iv_entry *entry)
1830 {
1831 	int instance;
1832 
1833 	DRM_ERROR("Illegal instruction in SDMA command stream\n");
1834 
1835 	instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
1836 	if (instance < 0)
1837 		return 0;
1838 
1839 	switch (entry->ring_id) {
1840 	case 0:
1841 		drm_sched_fault(&adev->sdma.instance[instance].ring.sched);
1842 		break;
1843 	}
1844 	return 0;
1845 }
1846 
1847 static int sdma_v4_4_2_set_ecc_irq_state(struct amdgpu_device *adev,
1848 					struct amdgpu_irq_src *source,
1849 					unsigned type,
1850 					enum amdgpu_interrupt_state state)
1851 {
1852 	u32 sdma_cntl;
1853 
1854 	sdma_cntl = RREG32_SDMA(type, regSDMA_CNTL);
1855 	sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, DRAM_ECC_INT_ENABLE,
1856 					state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
1857 	WREG32_SDMA(type, regSDMA_CNTL, sdma_cntl);
1858 
1859 	return 0;
1860 }
1861 
1862 static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
1863 					      struct amdgpu_iv_entry *entry)
1864 {
1865 	int instance;
1866 	struct amdgpu_task_info *task_info;
1867 	u64 addr;
1868 
1869 	instance = sdma_v4_4_2_irq_id_to_seq(adev, entry->client_id);
1870 	if (instance < 0 || instance >= adev->sdma.num_instances) {
1871 		dev_err(adev->dev, "sdma instance invalid %d\n", instance);
1872 		return -EINVAL;
1873 	}
1874 
1875 	addr = (u64)entry->src_data[0] << 12;
1876 	addr |= ((u64)entry->src_data[1] & 0xf) << 44;
1877 
1878 	dev_dbg_ratelimited(adev->dev,
1879 			    "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u pasid:%u\n",
1880 			    instance, addr, entry->src_id, entry->ring_id, entry->vmid,
1881 			    entry->pasid);
1882 
1883 	task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
1884 	if (task_info) {
1885 		dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n",
1886 				    task_info->process_name, task_info->tgid,
1887 				    task_info->task_name, task_info->pid);
1888 		amdgpu_vm_put_task_info(task_info);
1889 	}
1890 
1891 	return 0;
1892 }
1893 
1894 static int sdma_v4_4_2_process_vm_hole_irq(struct amdgpu_device *adev,
1895 					      struct amdgpu_irq_src *source,
1896 					      struct amdgpu_iv_entry *entry)
1897 {
1898 	dev_dbg_ratelimited(adev->dev, "MC or SEM address in VM hole\n");
1899 	sdma_v4_4_2_print_iv_entry(adev, entry);
1900 	return 0;
1901 }
1902 
1903 static int sdma_v4_4_2_process_doorbell_invalid_irq(struct amdgpu_device *adev,
1904 					      struct amdgpu_irq_src *source,
1905 					      struct amdgpu_iv_entry *entry)
1906 {
1907 
1908 	dev_dbg_ratelimited(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
1909 	sdma_v4_4_2_print_iv_entry(adev, entry);
1910 	return 0;
1911 }
1912 
1913 static int sdma_v4_4_2_process_pool_timeout_irq(struct amdgpu_device *adev,
1914 					      struct amdgpu_irq_src *source,
1915 					      struct amdgpu_iv_entry *entry)
1916 {
1917 	dev_dbg_ratelimited(adev->dev,
1918 		"Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
1919 	sdma_v4_4_2_print_iv_entry(adev, entry);
1920 	return 0;
1921 }
1922 
1923 static int sdma_v4_4_2_process_srbm_write_irq(struct amdgpu_device *adev,
1924 					      struct amdgpu_irq_src *source,
1925 					      struct amdgpu_iv_entry *entry)
1926 {
1927 	dev_dbg_ratelimited(adev->dev,
1928 		"SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
1929 	sdma_v4_4_2_print_iv_entry(adev, entry);
1930 	return 0;
1931 }
1932 
1933 static int sdma_v4_4_2_process_ctxt_empty_irq(struct amdgpu_device *adev,
1934 					      struct amdgpu_irq_src *source,
1935 					      struct amdgpu_iv_entry *entry)
1936 {
1937 	/* There is nothing useful to be done here, only kept for debug */
1938 	dev_dbg_ratelimited(adev->dev, "SDMA context empty interrupt");
1939 	sdma_v4_4_2_print_iv_entry(adev, entry);
1940 	return 0;
1941 }
1942 
1943 static void sdma_v4_4_2_inst_update_medium_grain_light_sleep(
1944 	struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
1945 {
1946 	uint32_t data, def;
1947 	int i;
1948 
1949 	/* leave as default if it is not driver controlled */
1950 	if (!(adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS))
1951 		return;
1952 
1953 	if (enable) {
1954 		for_each_inst(i, inst_mask) {
1955 			/* 1-not override: enable sdma mem light sleep */
1956 			def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL);
1957 			data |= SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1958 			if (def != data)
1959 				WREG32_SDMA(i, regSDMA_POWER_CNTL, data);
1960 		}
1961 	} else {
1962 		for_each_inst(i, inst_mask) {
1963 			/* 0-override:disable sdma mem light sleep */
1964 			def = data = RREG32_SDMA(i, regSDMA_POWER_CNTL);
1965 			data &= ~SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
1966 			if (def != data)
1967 				WREG32_SDMA(i, regSDMA_POWER_CNTL, data);
1968 		}
1969 	}
1970 }
1971 
1972 static void sdma_v4_4_2_inst_update_medium_grain_clock_gating(
1973 	struct amdgpu_device *adev, bool enable, uint32_t inst_mask)
1974 {
1975 	uint32_t data, def;
1976 	int i;
1977 
1978 	/* leave as default if it is not driver controlled */
1979 	if (!(adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG))
1980 		return;
1981 
1982 	if (enable) {
1983 		for_each_inst(i, inst_mask) {
1984 			def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
1985 			data &= ~(SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1986 				  SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1987 				  SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
1988 				  SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
1989 				  SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
1990 				  SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
1991 			if (def != data)
1992 				WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
1993 		}
1994 	} else {
1995 		for_each_inst(i, inst_mask) {
1996 			def = data = RREG32_SDMA(i, regSDMA_CLK_CTRL);
1997 			data |= (SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK |
1998 				 SDMA_CLK_CTRL__SOFT_OVERRIDE4_MASK |
1999 				 SDMA_CLK_CTRL__SOFT_OVERRIDE3_MASK |
2000 				 SDMA_CLK_CTRL__SOFT_OVERRIDE2_MASK |
2001 				 SDMA_CLK_CTRL__SOFT_OVERRIDE1_MASK |
2002 				 SDMA_CLK_CTRL__SOFT_OVERRIDE0_MASK);
2003 			if (def != data)
2004 				WREG32_SDMA(i, regSDMA_CLK_CTRL, data);
2005 		}
2006 	}
2007 }
2008 
2009 static int sdma_v4_4_2_set_clockgating_state(struct amdgpu_ip_block *ip_block,
2010 					  enum amd_clockgating_state state)
2011 {
2012 	struct amdgpu_device *adev = ip_block->adev;
2013 	uint32_t inst_mask;
2014 
2015 	if (amdgpu_sriov_vf(adev))
2016 		return 0;
2017 
2018 	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
2019 
2020 	sdma_v4_4_2_inst_update_medium_grain_clock_gating(
2021 		adev, state == AMD_CG_STATE_GATE, inst_mask);
2022 	sdma_v4_4_2_inst_update_medium_grain_light_sleep(
2023 		adev, state == AMD_CG_STATE_GATE, inst_mask);
2024 	return 0;
2025 }
2026 
2027 static int sdma_v4_4_2_set_powergating_state(struct amdgpu_ip_block *ip_block,
2028 					  enum amd_powergating_state state)
2029 {
2030 	return 0;
2031 }
2032 
2033 static void sdma_v4_4_2_get_clockgating_state(struct amdgpu_ip_block *ip_block, u64 *flags)
2034 {
2035 	struct amdgpu_device *adev = ip_block->adev;
2036 	int data;
2037 
2038 	if (amdgpu_sriov_vf(adev))
2039 		*flags = 0;
2040 
2041 	/* AMD_CG_SUPPORT_SDMA_MGCG */
2042 	data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_CLK_CTRL));
2043 	if (!(data & SDMA_CLK_CTRL__SOFT_OVERRIDE5_MASK))
2044 		*flags |= AMD_CG_SUPPORT_SDMA_MGCG;
2045 
2046 	/* AMD_CG_SUPPORT_SDMA_LS */
2047 	data = RREG32(SOC15_REG_OFFSET(SDMA0, GET_INST(SDMA0, 0), regSDMA_POWER_CNTL));
2048 	if (data & SDMA_POWER_CNTL__MEM_POWER_OVERRIDE_MASK)
2049 		*flags |= AMD_CG_SUPPORT_SDMA_LS;
2050 }
2051 
2052 static void sdma_v4_4_2_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p)
2053 {
2054 	struct amdgpu_device *adev = ip_block->adev;
2055 	int i, j;
2056 	uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
2057 	uint32_t instance_offset;
2058 
2059 	if (!adev->sdma.ip_dump)
2060 		return;
2061 
2062 	drm_printf(p, "num_instances:%d\n", adev->sdma.num_instances);
2063 	for (i = 0; i < adev->sdma.num_instances; i++) {
2064 		instance_offset = i * reg_count;
2065 		drm_printf(p, "\nInstance:%d\n", i);
2066 
2067 		for (j = 0; j < reg_count; j++)
2068 			drm_printf(p, "%-50s \t 0x%08x\n", sdma_reg_list_4_4_2[j].reg_name,
2069 				   adev->sdma.ip_dump[instance_offset + j]);
2070 	}
2071 }
2072 
2073 static void sdma_v4_4_2_dump_ip_state(struct amdgpu_ip_block *ip_block)
2074 {
2075 	struct amdgpu_device *adev = ip_block->adev;
2076 	int i, j;
2077 	uint32_t instance_offset;
2078 	uint32_t reg_count = ARRAY_SIZE(sdma_reg_list_4_4_2);
2079 
2080 	if (!adev->sdma.ip_dump)
2081 		return;
2082 
2083 	for (i = 0; i < adev->sdma.num_instances; i++) {
2084 		instance_offset = i * reg_count;
2085 		for (j = 0; j < reg_count; j++)
2086 			adev->sdma.ip_dump[instance_offset + j] =
2087 				RREG32(sdma_v4_4_2_get_reg_offset(adev, i,
2088 				       sdma_reg_list_4_4_2[j].reg_offset));
2089 	}
2090 }
2091 
2092 const struct amd_ip_funcs sdma_v4_4_2_ip_funcs = {
2093 	.name = "sdma_v4_4_2",
2094 	.early_init = sdma_v4_4_2_early_init,
2095 	.late_init = sdma_v4_4_2_late_init,
2096 	.sw_init = sdma_v4_4_2_sw_init,
2097 	.sw_fini = sdma_v4_4_2_sw_fini,
2098 	.hw_init = sdma_v4_4_2_hw_init,
2099 	.hw_fini = sdma_v4_4_2_hw_fini,
2100 	.suspend = sdma_v4_4_2_suspend,
2101 	.resume = sdma_v4_4_2_resume,
2102 	.is_idle = sdma_v4_4_2_is_idle,
2103 	.wait_for_idle = sdma_v4_4_2_wait_for_idle,
2104 	.soft_reset = sdma_v4_4_2_soft_reset,
2105 	.set_clockgating_state = sdma_v4_4_2_set_clockgating_state,
2106 	.set_powergating_state = sdma_v4_4_2_set_powergating_state,
2107 	.get_clockgating_state = sdma_v4_4_2_get_clockgating_state,
2108 	.dump_ip_state = sdma_v4_4_2_dump_ip_state,
2109 	.print_ip_state = sdma_v4_4_2_print_ip_state,
2110 };
2111 
2112 static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = {
2113 	.type = AMDGPU_RING_TYPE_SDMA,
2114 	.align_mask = 0xff,
2115 	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2116 	.support_64bit_ptrs = true,
2117 	.get_rptr = sdma_v4_4_2_ring_get_rptr,
2118 	.get_wptr = sdma_v4_4_2_ring_get_wptr,
2119 	.set_wptr = sdma_v4_4_2_ring_set_wptr,
2120 	.emit_frame_size =
2121 		6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
2122 		3 + /* hdp invalidate */
2123 		6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
2124 		/* sdma_v4_4_2_ring_emit_vm_flush */
2125 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2126 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2127 		10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
2128 	.emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
2129 	.emit_ib = sdma_v4_4_2_ring_emit_ib,
2130 	.emit_fence = sdma_v4_4_2_ring_emit_fence,
2131 	.emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
2132 	.emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
2133 	.emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
2134 	.test_ring = sdma_v4_4_2_ring_test_ring,
2135 	.test_ib = sdma_v4_4_2_ring_test_ib,
2136 	.insert_nop = sdma_v4_4_2_ring_insert_nop,
2137 	.pad_ib = sdma_v4_4_2_ring_pad_ib,
2138 	.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
2139 	.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
2140 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2141 	.reset = sdma_v4_4_2_reset_queue,
2142 	.is_guilty = sdma_v4_4_2_ring_is_guilty,
2143 };
2144 
2145 static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = {
2146 	.type = AMDGPU_RING_TYPE_SDMA,
2147 	.align_mask = 0xff,
2148 	.nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP),
2149 	.support_64bit_ptrs = true,
2150 	.get_rptr = sdma_v4_4_2_ring_get_rptr,
2151 	.get_wptr = sdma_v4_4_2_page_ring_get_wptr,
2152 	.set_wptr = sdma_v4_4_2_page_ring_set_wptr,
2153 	.emit_frame_size =
2154 		6 + /* sdma_v4_4_2_ring_emit_hdp_flush */
2155 		3 + /* hdp invalidate */
2156 		6 + /* sdma_v4_4_2_ring_emit_pipeline_sync */
2157 		/* sdma_v4_4_2_ring_emit_vm_flush */
2158 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
2159 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 +
2160 		10 + 10 + 10, /* sdma_v4_4_2_ring_emit_fence x3 for user fence, vm fence */
2161 	.emit_ib_size = 7 + 6, /* sdma_v4_4_2_ring_emit_ib */
2162 	.emit_ib = sdma_v4_4_2_ring_emit_ib,
2163 	.emit_fence = sdma_v4_4_2_ring_emit_fence,
2164 	.emit_pipeline_sync = sdma_v4_4_2_ring_emit_pipeline_sync,
2165 	.emit_vm_flush = sdma_v4_4_2_ring_emit_vm_flush,
2166 	.emit_hdp_flush = sdma_v4_4_2_ring_emit_hdp_flush,
2167 	.test_ring = sdma_v4_4_2_ring_test_ring,
2168 	.test_ib = sdma_v4_4_2_ring_test_ib,
2169 	.insert_nop = sdma_v4_4_2_ring_insert_nop,
2170 	.pad_ib = sdma_v4_4_2_ring_pad_ib,
2171 	.emit_wreg = sdma_v4_4_2_ring_emit_wreg,
2172 	.emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait,
2173 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
2174 	.reset = sdma_v4_4_2_reset_queue,
2175 	.is_guilty = sdma_v4_4_2_page_ring_is_guilty,
2176 };
2177 
2178 static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev)
2179 {
2180 	int i, dev_inst;
2181 
2182 	for (i = 0; i < adev->sdma.num_instances; i++) {
2183 		adev->sdma.instance[i].ring.funcs = &sdma_v4_4_2_ring_funcs;
2184 		adev->sdma.instance[i].ring.me = i;
2185 		if (adev->sdma.has_page_queue) {
2186 			adev->sdma.instance[i].page.funcs =
2187 				&sdma_v4_4_2_page_ring_funcs;
2188 			adev->sdma.instance[i].page.me = i;
2189 		}
2190 
2191 		dev_inst = GET_INST(SDMA0, i);
2192 		/* AID to which SDMA belongs depends on physical instance */
2193 		adev->sdma.instance[i].aid_id =
2194 			dev_inst / adev->sdma.num_inst_per_aid;
2195 	}
2196 }
2197 
2198 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_trap_irq_funcs = {
2199 	.set = sdma_v4_4_2_set_trap_irq_state,
2200 	.process = sdma_v4_4_2_process_trap_irq,
2201 };
2202 
2203 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_illegal_inst_irq_funcs = {
2204 	.process = sdma_v4_4_2_process_illegal_inst_irq,
2205 };
2206 
2207 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ecc_irq_funcs = {
2208 	.set = sdma_v4_4_2_set_ecc_irq_state,
2209 	.process = amdgpu_sdma_process_ecc_irq,
2210 };
2211 
2212 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_vm_hole_irq_funcs = {
2213 	.process = sdma_v4_4_2_process_vm_hole_irq,
2214 };
2215 
2216 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_doorbell_invalid_irq_funcs = {
2217 	.process = sdma_v4_4_2_process_doorbell_invalid_irq,
2218 };
2219 
2220 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_pool_timeout_irq_funcs = {
2221 	.process = sdma_v4_4_2_process_pool_timeout_irq,
2222 };
2223 
2224 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_srbm_write_irq_funcs = {
2225 	.process = sdma_v4_4_2_process_srbm_write_irq,
2226 };
2227 
2228 static const struct amdgpu_irq_src_funcs sdma_v4_4_2_ctxt_empty_irq_funcs = {
2229 	.process = sdma_v4_4_2_process_ctxt_empty_irq,
2230 };
2231 
2232 static void sdma_v4_4_2_set_irq_funcs(struct amdgpu_device *adev)
2233 {
2234 	adev->sdma.trap_irq.num_types = adev->sdma.num_instances;
2235 	adev->sdma.ecc_irq.num_types = adev->sdma.num_instances;
2236 	adev->sdma.vm_hole_irq.num_types = adev->sdma.num_instances;
2237 	adev->sdma.doorbell_invalid_irq.num_types = adev->sdma.num_instances;
2238 	adev->sdma.pool_timeout_irq.num_types = adev->sdma.num_instances;
2239 	adev->sdma.srbm_write_irq.num_types = adev->sdma.num_instances;
2240 	adev->sdma.ctxt_empty_irq.num_types = adev->sdma.num_instances;
2241 
2242 	adev->sdma.trap_irq.funcs = &sdma_v4_4_2_trap_irq_funcs;
2243 	adev->sdma.illegal_inst_irq.funcs = &sdma_v4_4_2_illegal_inst_irq_funcs;
2244 	adev->sdma.ecc_irq.funcs = &sdma_v4_4_2_ecc_irq_funcs;
2245 	adev->sdma.vm_hole_irq.funcs = &sdma_v4_4_2_vm_hole_irq_funcs;
2246 	adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_4_2_doorbell_invalid_irq_funcs;
2247 	adev->sdma.pool_timeout_irq.funcs = &sdma_v4_4_2_pool_timeout_irq_funcs;
2248 	adev->sdma.srbm_write_irq.funcs = &sdma_v4_4_2_srbm_write_irq_funcs;
2249 	adev->sdma.ctxt_empty_irq.funcs = &sdma_v4_4_2_ctxt_empty_irq_funcs;
2250 }
2251 
2252 /**
2253  * sdma_v4_4_2_emit_copy_buffer - copy buffer using the sDMA engine
2254  *
2255  * @ib: indirect buffer to copy to
2256  * @src_offset: src GPU address
2257  * @dst_offset: dst GPU address
2258  * @byte_count: number of bytes to xfer
2259  * @copy_flags: copy flags for the buffers
2260  *
2261  * Copy GPU buffers using the DMA engine.
2262  * Used by the amdgpu ttm implementation to move pages if
2263  * registered as the asic copy callback.
2264  */
2265 static void sdma_v4_4_2_emit_copy_buffer(struct amdgpu_ib *ib,
2266 				       uint64_t src_offset,
2267 				       uint64_t dst_offset,
2268 				       uint32_t byte_count,
2269 				       uint32_t copy_flags)
2270 {
2271 	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
2272 		SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
2273 		SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0);
2274 	ib->ptr[ib->length_dw++] = byte_count - 1;
2275 	ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
2276 	ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
2277 	ib->ptr[ib->length_dw++] = upper_32_bits(src_offset);
2278 	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2279 	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2280 }
2281 
2282 /**
2283  * sdma_v4_4_2_emit_fill_buffer - fill buffer using the sDMA engine
2284  *
2285  * @ib: indirect buffer to copy to
2286  * @src_data: value to write to buffer
2287  * @dst_offset: dst GPU address
2288  * @byte_count: number of bytes to xfer
2289  *
2290  * Fill GPU buffers using the DMA engine.
2291  */
2292 static void sdma_v4_4_2_emit_fill_buffer(struct amdgpu_ib *ib,
2293 				       uint32_t src_data,
2294 				       uint64_t dst_offset,
2295 				       uint32_t byte_count)
2296 {
2297 	ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL);
2298 	ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
2299 	ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset);
2300 	ib->ptr[ib->length_dw++] = src_data;
2301 	ib->ptr[ib->length_dw++] = byte_count - 1;
2302 }
2303 
2304 static const struct amdgpu_buffer_funcs sdma_v4_4_2_buffer_funcs = {
2305 	.copy_max_bytes = 0x400000,
2306 	.copy_num_dw = 7,
2307 	.emit_copy_buffer = sdma_v4_4_2_emit_copy_buffer,
2308 
2309 	.fill_max_bytes = 0x400000,
2310 	.fill_num_dw = 5,
2311 	.emit_fill_buffer = sdma_v4_4_2_emit_fill_buffer,
2312 };
2313 
2314 static void sdma_v4_4_2_set_buffer_funcs(struct amdgpu_device *adev)
2315 {
2316 	adev->mman.buffer_funcs = &sdma_v4_4_2_buffer_funcs;
2317 	if (adev->sdma.has_page_queue)
2318 		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
2319 	else
2320 		adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
2321 }
2322 
2323 static const struct amdgpu_vm_pte_funcs sdma_v4_4_2_vm_pte_funcs = {
2324 	.copy_pte_num_dw = 7,
2325 	.copy_pte = sdma_v4_4_2_vm_copy_pte,
2326 
2327 	.write_pte = sdma_v4_4_2_vm_write_pte,
2328 	.set_pte_pde = sdma_v4_4_2_vm_set_pte_pde,
2329 };
2330 
2331 static void sdma_v4_4_2_set_vm_pte_funcs(struct amdgpu_device *adev)
2332 {
2333 	struct drm_gpu_scheduler *sched;
2334 	unsigned i;
2335 
2336 	adev->vm_manager.vm_pte_funcs = &sdma_v4_4_2_vm_pte_funcs;
2337 	for (i = 0; i < adev->sdma.num_instances; i++) {
2338 		if (adev->sdma.has_page_queue)
2339 			sched = &adev->sdma.instance[i].page.sched;
2340 		else
2341 			sched = &adev->sdma.instance[i].ring.sched;
2342 		adev->vm_manager.vm_pte_scheds[i] = sched;
2343 	}
2344 	adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
2345 }
2346 
2347 /**
2348  * sdma_v4_4_2_update_reset_mask - update  reset mask for SDMA
2349  * @adev: Pointer to the AMDGPU device structure
2350  *
2351  * This function update reset mask for SDMA and sets the supported
2352  * reset types based on the IP version and firmware versions.
2353  *
2354  */
2355 static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev)
2356 {
2357 	/* per queue reset not supported for SRIOV */
2358 	if (amdgpu_sriov_vf(adev))
2359 		return;
2360 
2361 	/*
2362 	 * the user queue relies on MEC fw and pmfw when the sdma queue do reset.
2363 	 * it needs to check both of them at here to skip old mec and pmfw.
2364 	 */
2365 	switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
2366 	case IP_VERSION(9, 4, 3):
2367 	case IP_VERSION(9, 4, 4):
2368 		if ((adev->gfx.mec_fw_version >= 0xb0) && amdgpu_dpm_reset_sdma_is_supported(adev))
2369 			adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
2370 		break;
2371 	case IP_VERSION(9, 5, 0):
2372 		if ((adev->gfx.mec_fw_version >= 0xf) && amdgpu_dpm_reset_sdma_is_supported(adev))
2373 			adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
2374 		break;
2375 	default:
2376 		break;
2377 	}
2378 
2379 }
2380 
2381 const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
2382 	.type = AMD_IP_BLOCK_TYPE_SDMA,
2383 	.major = 4,
2384 	.minor = 4,
2385 	.rev = 2,
2386 	.funcs = &sdma_v4_4_2_ip_funcs,
2387 };
2388 
2389 static int sdma_v4_4_2_xcp_resume(void *handle, uint32_t inst_mask)
2390 {
2391 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2392 	int r;
2393 
2394 	if (!amdgpu_sriov_vf(adev))
2395 		sdma_v4_4_2_inst_init_golden_registers(adev, inst_mask);
2396 
2397 	r = sdma_v4_4_2_inst_start(adev, inst_mask, false);
2398 
2399 	return r;
2400 }
2401 
2402 static int sdma_v4_4_2_xcp_suspend(void *handle, uint32_t inst_mask)
2403 {
2404 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2405 	uint32_t tmp_mask = inst_mask;
2406 	int i;
2407 
2408 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2409 		for_each_inst(i, tmp_mask) {
2410 			amdgpu_irq_put(adev, &adev->sdma.ecc_irq,
2411 				       AMDGPU_SDMA_IRQ_INSTANCE0 + i);
2412 		}
2413 	}
2414 
2415 	sdma_v4_4_2_inst_ctx_switch_enable(adev, false, inst_mask);
2416 	sdma_v4_4_2_inst_enable(adev, false, inst_mask);
2417 
2418 	return 0;
2419 }
2420 
2421 struct amdgpu_xcp_ip_funcs sdma_v4_4_2_xcp_funcs = {
2422 	.suspend = &sdma_v4_4_2_xcp_suspend,
2423 	.resume = &sdma_v4_4_2_xcp_resume
2424 };
2425 
2426 static const struct amdgpu_ras_err_status_reg_entry sdma_v4_2_2_ue_reg_list[] = {
2427 	{AMDGPU_RAS_REG_ENTRY(SDMA0, 0, regSDMA_UE_ERR_STATUS_LO, regSDMA_UE_ERR_STATUS_HI),
2428 	1, (AMDGPU_RAS_ERR_INFO_VALID | AMDGPU_RAS_ERR_STATUS_VALID), "SDMA"},
2429 };
2430 
2431 static const struct amdgpu_ras_memory_id_entry sdma_v4_4_2_ras_memory_list[] = {
2432 	{AMDGPU_SDMA_MBANK_DATA_BUF0, "SDMA_MBANK_DATA_BUF0"},
2433 	{AMDGPU_SDMA_MBANK_DATA_BUF1, "SDMA_MBANK_DATA_BUF1"},
2434 	{AMDGPU_SDMA_MBANK_DATA_BUF2, "SDMA_MBANK_DATA_BUF2"},
2435 	{AMDGPU_SDMA_MBANK_DATA_BUF3, "SDMA_MBANK_DATA_BUF3"},
2436 	{AMDGPU_SDMA_MBANK_DATA_BUF4, "SDMA_MBANK_DATA_BUF4"},
2437 	{AMDGPU_SDMA_MBANK_DATA_BUF5, "SDMA_MBANK_DATA_BUF5"},
2438 	{AMDGPU_SDMA_MBANK_DATA_BUF6, "SDMA_MBANK_DATA_BUF6"},
2439 	{AMDGPU_SDMA_MBANK_DATA_BUF7, "SDMA_MBANK_DATA_BUF7"},
2440 	{AMDGPU_SDMA_MBANK_DATA_BUF8, "SDMA_MBANK_DATA_BUF8"},
2441 	{AMDGPU_SDMA_MBANK_DATA_BUF9, "SDMA_MBANK_DATA_BUF9"},
2442 	{AMDGPU_SDMA_MBANK_DATA_BUF10, "SDMA_MBANK_DATA_BUF10"},
2443 	{AMDGPU_SDMA_MBANK_DATA_BUF11, "SDMA_MBANK_DATA_BUF11"},
2444 	{AMDGPU_SDMA_MBANK_DATA_BUF12, "SDMA_MBANK_DATA_BUF12"},
2445 	{AMDGPU_SDMA_MBANK_DATA_BUF13, "SDMA_MBANK_DATA_BUF13"},
2446 	{AMDGPU_SDMA_MBANK_DATA_BUF14, "SDMA_MBANK_DATA_BUF14"},
2447 	{AMDGPU_SDMA_MBANK_DATA_BUF15, "SDMA_MBANK_DATA_BUF15"},
2448 	{AMDGPU_SDMA_UCODE_BUF, "SDMA_UCODE_BUF"},
2449 	{AMDGPU_SDMA_RB_CMD_BUF, "SDMA_RB_CMD_BUF"},
2450 	{AMDGPU_SDMA_IB_CMD_BUF, "SDMA_IB_CMD_BUF"},
2451 	{AMDGPU_SDMA_UTCL1_RD_FIFO, "SDMA_UTCL1_RD_FIFO"},
2452 	{AMDGPU_SDMA_UTCL1_RDBST_FIFO, "SDMA_UTCL1_RDBST_FIFO"},
2453 	{AMDGPU_SDMA_UTCL1_WR_FIFO, "SDMA_UTCL1_WR_FIFO"},
2454 	{AMDGPU_SDMA_DATA_LUT_FIFO, "SDMA_DATA_LUT_FIFO"},
2455 	{AMDGPU_SDMA_SPLIT_DAT_BUF, "SDMA_SPLIT_DAT_BUF"},
2456 };
2457 
2458 static void sdma_v4_4_2_inst_query_ras_error_count(struct amdgpu_device *adev,
2459 						   uint32_t sdma_inst,
2460 						   void *ras_err_status)
2461 {
2462 	struct ras_err_data *err_data = (struct ras_err_data *)ras_err_status;
2463 	uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst);
2464 	unsigned long ue_count = 0;
2465 	struct amdgpu_smuio_mcm_config_info mcm_info = {
2466 		.socket_id = adev->smuio.funcs->get_socket_id(adev),
2467 		.die_id = adev->sdma.instance[sdma_inst].aid_id,
2468 	};
2469 
2470 	/* sdma v4_4_2 doesn't support query ce counts */
2471 	amdgpu_ras_inst_query_ras_error_count(adev,
2472 					sdma_v4_2_2_ue_reg_list,
2473 					ARRAY_SIZE(sdma_v4_2_2_ue_reg_list),
2474 					sdma_v4_4_2_ras_memory_list,
2475 					ARRAY_SIZE(sdma_v4_4_2_ras_memory_list),
2476 					sdma_dev_inst,
2477 					AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
2478 					&ue_count);
2479 
2480 	amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, ue_count);
2481 }
2482 
2483 static void sdma_v4_4_2_query_ras_error_count(struct amdgpu_device *adev,
2484 					      void *ras_err_status)
2485 {
2486 	uint32_t inst_mask;
2487 	int i = 0;
2488 
2489 	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
2490 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2491 		for_each_inst(i, inst_mask)
2492 			sdma_v4_4_2_inst_query_ras_error_count(adev, i, ras_err_status);
2493 	} else {
2494 		dev_warn(adev->dev, "SDMA RAS is not supported\n");
2495 	}
2496 }
2497 
2498 static void sdma_v4_4_2_inst_reset_ras_error_count(struct amdgpu_device *adev,
2499 						   uint32_t sdma_inst)
2500 {
2501 	uint32_t sdma_dev_inst = GET_INST(SDMA0, sdma_inst);
2502 
2503 	amdgpu_ras_inst_reset_ras_error_count(adev,
2504 					sdma_v4_2_2_ue_reg_list,
2505 					ARRAY_SIZE(sdma_v4_2_2_ue_reg_list),
2506 					sdma_dev_inst);
2507 }
2508 
2509 static void sdma_v4_4_2_reset_ras_error_count(struct amdgpu_device *adev)
2510 {
2511 	uint32_t inst_mask;
2512 	int i = 0;
2513 
2514 	inst_mask = GENMASK(adev->sdma.num_instances - 1, 0);
2515 	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
2516 		for_each_inst(i, inst_mask)
2517 			sdma_v4_4_2_inst_reset_ras_error_count(adev, i);
2518 	} else {
2519 		dev_warn(adev->dev, "SDMA RAS is not supported\n");
2520 	}
2521 }
2522 
2523 static const struct amdgpu_ras_block_hw_ops sdma_v4_4_2_ras_hw_ops = {
2524 	.query_ras_error_count = sdma_v4_4_2_query_ras_error_count,
2525 	.reset_ras_error_count = sdma_v4_4_2_reset_ras_error_count,
2526 };
2527 
2528 static int sdma_v4_4_2_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank,
2529 				       enum aca_smu_type type, void *data)
2530 {
2531 	struct aca_bank_info info;
2532 	u64 misc0;
2533 	int ret;
2534 
2535 	ret = aca_bank_info_decode(bank, &info);
2536 	if (ret)
2537 		return ret;
2538 
2539 	misc0 = bank->regs[ACA_REG_IDX_MISC0];
2540 	switch (type) {
2541 	case ACA_SMU_TYPE_UE:
2542 		bank->aca_err_type = ACA_ERROR_TYPE_UE;
2543 		ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE,
2544 						     1ULL);
2545 		break;
2546 	case ACA_SMU_TYPE_CE:
2547 		bank->aca_err_type = ACA_ERROR_TYPE_CE;
2548 		ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type,
2549 						     ACA_REG__MISC0__ERRCNT(misc0));
2550 		break;
2551 	default:
2552 		return -EINVAL;
2553 	}
2554 
2555 	return ret;
2556 }
2557 
2558 /* CODE_SDMA0 - CODE_SDMA4, reference to smu driver if header file */
2559 static int sdma_v4_4_2_err_codes[] = { 33, 34, 35, 36 };
2560 
2561 static bool sdma_v4_4_2_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank,
2562 					  enum aca_smu_type type, void *data)
2563 {
2564 	u32 instlo;
2565 
2566 	instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]);
2567 	instlo &= GENMASK(31, 1);
2568 
2569 	if (instlo != mmSMNAID_AID0_MCA_SMU)
2570 		return false;
2571 
2572 	if (aca_bank_check_error_codes(handle->adev, bank,
2573 				       sdma_v4_4_2_err_codes,
2574 				       ARRAY_SIZE(sdma_v4_4_2_err_codes)))
2575 		return false;
2576 
2577 	return true;
2578 }
2579 
2580 static const struct aca_bank_ops sdma_v4_4_2_aca_bank_ops = {
2581 	.aca_bank_parser = sdma_v4_4_2_aca_bank_parser,
2582 	.aca_bank_is_valid = sdma_v4_4_2_aca_bank_is_valid,
2583 };
2584 
2585 static const struct aca_info sdma_v4_4_2_aca_info = {
2586 	.hwip = ACA_HWIP_TYPE_SMU,
2587 	.mask = ACA_ERROR_UE_MASK,
2588 	.bank_ops = &sdma_v4_4_2_aca_bank_ops,
2589 };
2590 
2591 static int sdma_v4_4_2_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
2592 {
2593 	int r;
2594 
2595 	r = amdgpu_sdma_ras_late_init(adev, ras_block);
2596 	if (r)
2597 		return r;
2598 
2599 	return amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__SDMA,
2600 				   &sdma_v4_4_2_aca_info, NULL);
2601 }
2602 
2603 static struct amdgpu_sdma_ras sdma_v4_4_2_ras = {
2604 	.ras_block = {
2605 		.hw_ops = &sdma_v4_4_2_ras_hw_ops,
2606 		.ras_late_init = sdma_v4_4_2_ras_late_init,
2607 	},
2608 };
2609 
2610 static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev)
2611 {
2612 	adev->sdma.ras = &sdma_v4_4_2_ras;
2613 }
2614