xref: /linux/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c (revision 52174e0eb13876654f56701c26a672890aa5e7e3)
1 /*
2  * Copyright 2019 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "amdgpu_jpeg.h"
26 #include "amdgpu_pm.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "jpeg_v2_0.h"
30 
31 #include "vcn/vcn_3_0_0_offset.h"
32 #include "vcn/vcn_3_0_0_sh_mask.h"
33 #include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
34 
35 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET	0x401f
36 
37 static const struct amdgpu_hwip_reg_entry jpeg_reg_list_3_0[] = {
38 	SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
39 	SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_INT_STAT),
40 	SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_RPTR),
41 	SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_WPTR),
42 	SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_CNTL),
43 	SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_RB_SIZE),
44 	SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JRBC_STATUS),
45 	SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_ADDR_MODE),
46 	SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG),
47 	SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_Y_GFX10_TILING_SURFACE),
48 	SOC15_REG_ENTRY_STR(JPEG, 0, mmJPEG_DEC_UV_GFX10_TILING_SURFACE),
49 	SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_PITCH),
50 	SOC15_REG_ENTRY_STR(JPEG, 0, mmUVD_JPEG_UV_PITCH),
51 };
52 
53 static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
54 static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev);
55 static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
56 				enum amd_powergating_state state);
57 
58 /**
59  * jpeg_v3_0_early_init - set function pointers
60  *
61  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
62  *
63  * Set ring and irq function pointers
64  */
65 static int jpeg_v3_0_early_init(struct amdgpu_ip_block *ip_block)
66 {
67 	struct amdgpu_device *adev = ip_block->adev;
68 
69 	u32 harvest;
70 
71 	switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
72 	case IP_VERSION(3, 1, 1):
73 	case IP_VERSION(3, 1, 2):
74 		break;
75 	default:
76 		harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING);
77 		if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
78 			return -ENOENT;
79 		break;
80 	}
81 
82 	adev->jpeg.num_jpeg_inst = 1;
83 	adev->jpeg.num_jpeg_rings = 1;
84 
85 	jpeg_v3_0_set_dec_ring_funcs(adev);
86 	jpeg_v3_0_set_irq_funcs(adev);
87 
88 	return 0;
89 }
90 
91 /**
92  * jpeg_v3_0_sw_init - sw init for JPEG block
93  *
94  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
95  *
96  * Load firmware and sw initialization
97  */
98 static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
99 {
100 	struct amdgpu_device *adev = ip_block->adev;
101 	struct amdgpu_ring *ring;
102 	int r;
103 
104 	/* JPEG TRAP */
105 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
106 		VCN_2_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
107 	if (r)
108 		return r;
109 
110 	r = amdgpu_jpeg_sw_init(adev);
111 	if (r)
112 		return r;
113 
114 	r = amdgpu_jpeg_resume(adev);
115 	if (r)
116 		return r;
117 
118 	ring = adev->jpeg.inst->ring_dec;
119 	ring->use_doorbell = true;
120 	ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
121 	ring->vm_hub = AMDGPU_MMHUB0(0);
122 	sprintf(ring->name, "jpeg_dec");
123 	r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
124 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
125 	if (r)
126 		return r;
127 
128 	adev->jpeg.internal.jpeg_pitch[0] = mmUVD_JPEG_PITCH_INTERNAL_OFFSET;
129 	adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_PITCH);
130 
131 	r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_3_0, ARRAY_SIZE(jpeg_reg_list_3_0));
132 	if (r)
133 		return r;
134 
135 	adev->jpeg.supported_reset =
136 		amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec);
137 	if (!amdgpu_sriov_vf(adev))
138 		adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
139 	r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
140 
141 	return r;
142 }
143 
144 /**
145  * jpeg_v3_0_sw_fini - sw fini for JPEG block
146  *
147  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
148  *
149  * JPEG suspend and free up sw allocation
150  */
151 static int jpeg_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
152 {
153 	struct amdgpu_device *adev = ip_block->adev;
154 	int r;
155 
156 	r = amdgpu_jpeg_suspend(adev);
157 	if (r)
158 		return r;
159 
160 	amdgpu_jpeg_sysfs_reset_mask_fini(adev);
161 
162 	r = amdgpu_jpeg_sw_fini(adev);
163 
164 	return r;
165 }
166 
167 /**
168  * jpeg_v3_0_hw_init - start and test JPEG block
169  *
170  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
171  *
172  */
173 static int jpeg_v3_0_hw_init(struct amdgpu_ip_block *ip_block)
174 {
175 	struct amdgpu_device *adev = ip_block->adev;
176 	struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
177 
178 	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
179 		(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
180 
181 	return amdgpu_ring_test_helper(ring);
182 }
183 
184 /**
185  * jpeg_v3_0_hw_fini - stop the hardware block
186  *
187  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
188  *
189  * Stop the JPEG block, mark ring as not ready any more
190  */
191 static int jpeg_v3_0_hw_fini(struct amdgpu_ip_block *ip_block)
192 {
193 	struct amdgpu_device *adev = ip_block->adev;
194 
195 	cancel_delayed_work_sync(&adev->jpeg.idle_work);
196 
197 	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
198 	      RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
199 		jpeg_v3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
200 
201 	return 0;
202 }
203 
204 /**
205  * jpeg_v3_0_suspend - suspend JPEG block
206  *
207  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
208  *
209  * HW fini and suspend JPEG block
210  */
211 static int jpeg_v3_0_suspend(struct amdgpu_ip_block *ip_block)
212 {
213 	int r;
214 
215 	r = jpeg_v3_0_hw_fini(ip_block);
216 	if (r)
217 		return r;
218 
219 	r = amdgpu_jpeg_suspend(ip_block->adev);
220 
221 	return r;
222 }
223 
224 /**
225  * jpeg_v3_0_resume - resume JPEG block
226  *
227  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
228  *
229  * Resume firmware and hw init JPEG block
230  */
231 static int jpeg_v3_0_resume(struct amdgpu_ip_block *ip_block)
232 {
233 	int r;
234 
235 	r = amdgpu_jpeg_resume(ip_block->adev);
236 	if (r)
237 		return r;
238 
239 	r = jpeg_v3_0_hw_init(ip_block);
240 
241 	return r;
242 }
243 
244 static void jpeg_v3_0_disable_clock_gating(struct amdgpu_device *adev)
245 {
246 	uint32_t data = 0;
247 
248 	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
249 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
250 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
251 	else
252 		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
253 
254 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
255 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
256 	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
257 
258 	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
259 	data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
260 		| JPEG_CGC_GATE__JPEG2_DEC_MASK
261 		| JPEG_CGC_GATE__JPEG_ENC_MASK
262 		| JPEG_CGC_GATE__JMCIF_MASK
263 		| JPEG_CGC_GATE__JRBBM_MASK);
264 	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
265 
266 	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL);
267 	data &= ~(JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK
268 		| JPEG_CGC_CTRL__JPEG2_DEC_MODE_MASK
269 		| JPEG_CGC_CTRL__JMCIF_MODE_MASK
270 		| JPEG_CGC_CTRL__JRBBM_MODE_MASK);
271 	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_CTRL, data);
272 }
273 
274 static void jpeg_v3_0_enable_clock_gating(struct amdgpu_device *adev)
275 {
276 	uint32_t data = 0;
277 
278 	data = RREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE);
279 	data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
280 		|JPEG_CGC_GATE__JPEG2_DEC_MASK
281 		|JPEG_CGC_GATE__JPEG_ENC_MASK
282 		|JPEG_CGC_GATE__JMCIF_MASK
283 		|JPEG_CGC_GATE__JRBBM_MASK);
284 	WREG32_SOC15(JPEG, 0, mmJPEG_CGC_GATE, data);
285 }
286 
287 static int jpeg_v3_0_disable_static_power_gating(struct amdgpu_device *adev)
288 {
289 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
290 		uint32_t data = 0;
291 		int r = 0;
292 
293 		data = 1 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
294 		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
295 
296 		r = SOC15_WAIT_ON_RREG(JPEG, 0,
297 			mmUVD_PGFSM_STATUS, UVD_PGFSM_STATUS_UVDJ_PWR_ON,
298 			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
299 
300 		if (r) {
301 			DRM_ERROR("amdgpu: JPEG disable power gating failed\n");
302 			return r;
303 		}
304 	}
305 
306 	/* disable anti hang mechanism */
307 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
308 		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
309 
310 	/* keep the JPEG in static PG mode */
311 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS), 0,
312 		~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
313 
314 	return 0;
315 }
316 
317 static int jpeg_v3_0_enable_static_power_gating(struct amdgpu_device *adev)
318 {
319 	/* enable anti hang mechanism */
320 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JPEG_POWER_STATUS),
321 		UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
322 		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
323 
324 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
325 		uint32_t data = 0;
326 		int r = 0;
327 
328 		data = 2 << UVD_PGFSM_CONFIG__UVDJ_PWR_CONFIG__SHIFT;
329 		WREG32(SOC15_REG_OFFSET(JPEG, 0, mmUVD_PGFSM_CONFIG), data);
330 
331 		r = SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_PGFSM_STATUS,
332 			(2 << UVD_PGFSM_STATUS__UVDJ_PWR_STATUS__SHIFT),
333 			UVD_PGFSM_STATUS__UVDJ_PWR_STATUS_MASK);
334 
335 		if (r) {
336 			DRM_ERROR("amdgpu: JPEG enable power gating failed\n");
337 			return r;
338 		}
339 	}
340 
341 	return 0;
342 }
343 
344 /**
345  * jpeg_v3_0_start - start JPEG block
346  *
347  * @adev: amdgpu_device pointer
348  *
349  * Setup and start the JPEG block
350  */
351 static int jpeg_v3_0_start(struct amdgpu_device *adev)
352 {
353 	struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
354 	int r;
355 
356 	if (adev->pm.dpm_enabled)
357 		amdgpu_dpm_enable_jpeg(adev, true);
358 
359 	/* disable power gating */
360 	r = jpeg_v3_0_disable_static_power_gating(adev);
361 	if (r)
362 		return r;
363 
364 	/* JPEG disable CGC */
365 	jpeg_v3_0_disable_clock_gating(adev);
366 
367 	/* MJPEG global tiling registers */
368 	WREG32_SOC15(JPEG, 0, mmJPEG_DEC_GFX10_ADDR_CONFIG,
369 		adev->gfx.config.gb_addr_config);
370 	WREG32_SOC15(JPEG, 0, mmJPEG_ENC_GFX10_ADDR_CONFIG,
371 		adev->gfx.config.gb_addr_config);
372 
373 	/* enable JMI channel */
374 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL), 0,
375 		~UVD_JMI_CNTL__SOFT_RESET_MASK);
376 
377 	/* enable System Interrupt for JRBC */
378 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmJPEG_SYS_INT_EN),
379 		JPEG_SYS_INT_EN__DJRBC_MASK,
380 		~JPEG_SYS_INT_EN__DJRBC_MASK);
381 
382 	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_VMID, 0);
383 	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
384 	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
385 		lower_32_bits(ring->gpu_addr));
386 	WREG32_SOC15(JPEG, 0, mmUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
387 		upper_32_bits(ring->gpu_addr));
388 	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR, 0);
389 	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, 0);
390 	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_CNTL, 0x00000002L);
391 	WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_SIZE, ring->ring_size / 4);
392 	ring->wptr = RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
393 
394 	return 0;
395 }
396 
397 /**
398  * jpeg_v3_0_stop - stop JPEG block
399  *
400  * @adev: amdgpu_device pointer
401  *
402  * stop the JPEG block
403  */
404 static int jpeg_v3_0_stop(struct amdgpu_device *adev)
405 {
406 	int r;
407 
408 	/* reset JMI */
409 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, mmUVD_JMI_CNTL),
410 		UVD_JMI_CNTL__SOFT_RESET_MASK,
411 		~UVD_JMI_CNTL__SOFT_RESET_MASK);
412 
413 	jpeg_v3_0_enable_clock_gating(adev);
414 
415 	/* enable power gating */
416 	r = jpeg_v3_0_enable_static_power_gating(adev);
417 	if (r)
418 		return r;
419 
420 	if (adev->pm.dpm_enabled)
421 		amdgpu_dpm_enable_jpeg(adev, false);
422 
423 	return 0;
424 }
425 
426 /**
427  * jpeg_v3_0_dec_ring_get_rptr - get read pointer
428  *
429  * @ring: amdgpu_ring pointer
430  *
431  * Returns the current hardware read pointer
432  */
433 static uint64_t jpeg_v3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
434 {
435 	struct amdgpu_device *adev = ring->adev;
436 
437 	return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_RPTR);
438 }
439 
440 /**
441  * jpeg_v3_0_dec_ring_get_wptr - get write pointer
442  *
443  * @ring: amdgpu_ring pointer
444  *
445  * Returns the current hardware write pointer
446  */
447 static uint64_t jpeg_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
448 {
449 	struct amdgpu_device *adev = ring->adev;
450 
451 	if (ring->use_doorbell)
452 		return *ring->wptr_cpu_addr;
453 	else
454 		return RREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR);
455 }
456 
457 /**
458  * jpeg_v3_0_dec_ring_set_wptr - set write pointer
459  *
460  * @ring: amdgpu_ring pointer
461  *
462  * Commits the write pointer to the hardware
463  */
464 static void jpeg_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
465 {
466 	struct amdgpu_device *adev = ring->adev;
467 
468 	if (ring->use_doorbell) {
469 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
470 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
471 	} else {
472 		WREG32_SOC15(JPEG, 0, mmUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
473 	}
474 }
475 
476 static bool jpeg_v3_0_is_idle(struct amdgpu_ip_block *ip_block)
477 {
478 	struct amdgpu_device *adev = ip_block->adev;
479 	int ret = 1;
480 
481 	ret &= (((RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS) &
482 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
483 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
484 
485 	return ret;
486 }
487 
488 static int jpeg_v3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
489 {
490 	struct amdgpu_device *adev = ip_block->adev;
491 
492 	return SOC15_WAIT_ON_RREG(JPEG, 0, mmUVD_JRBC_STATUS,
493 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
494 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
495 }
496 
497 static int jpeg_v3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
498 					  enum amd_clockgating_state state)
499 {
500 	struct amdgpu_device *adev = ip_block->adev;
501 	bool enable = state == AMD_CG_STATE_GATE;
502 
503 	if (enable) {
504 		if (!jpeg_v3_0_is_idle(ip_block))
505 			return -EBUSY;
506 		jpeg_v3_0_enable_clock_gating(adev);
507 	} else {
508 		jpeg_v3_0_disable_clock_gating(adev);
509 	}
510 
511 	return 0;
512 }
513 
514 static int jpeg_v3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
515 					  enum amd_powergating_state state)
516 {
517 	struct amdgpu_device *adev = ip_block->adev;
518 	int ret;
519 
520 	if(state == adev->jpeg.cur_state)
521 		return 0;
522 
523 	if (state == AMD_PG_STATE_GATE)
524 		ret = jpeg_v3_0_stop(adev);
525 	else
526 		ret = jpeg_v3_0_start(adev);
527 
528 	if(!ret)
529 		adev->jpeg.cur_state = state;
530 
531 	return ret;
532 }
533 
534 static int jpeg_v3_0_set_interrupt_state(struct amdgpu_device *adev,
535 					struct amdgpu_irq_src *source,
536 					unsigned type,
537 					enum amdgpu_interrupt_state state)
538 {
539 	return 0;
540 }
541 
542 static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev,
543 				      struct amdgpu_irq_src *source,
544 				      struct amdgpu_iv_entry *entry)
545 {
546 	DRM_DEBUG("IH: JPEG TRAP\n");
547 
548 	switch (entry->src_id) {
549 	case VCN_2_0__SRCID__JPEG_DECODE:
550 		amdgpu_fence_process(adev->jpeg.inst->ring_dec);
551 		break;
552 	default:
553 		DRM_ERROR("Unhandled interrupt: %d %d\n",
554 			  entry->src_id, entry->src_data[0]);
555 		break;
556 	}
557 
558 	return 0;
559 }
560 
561 static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring,
562 				unsigned int vmid,
563 				struct amdgpu_fence *timedout_fence)
564 {
565 	int r;
566 
567 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
568 	r = jpeg_v3_0_stop(ring->adev);
569 	if (r)
570 		return r;
571 	r = jpeg_v3_0_start(ring->adev);
572 	if (r)
573 		return r;
574 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
575 }
576 
577 static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = {
578 	.name = "jpeg_v3_0",
579 	.early_init = jpeg_v3_0_early_init,
580 	.sw_init = jpeg_v3_0_sw_init,
581 	.sw_fini = jpeg_v3_0_sw_fini,
582 	.hw_init = jpeg_v3_0_hw_init,
583 	.hw_fini = jpeg_v3_0_hw_fini,
584 	.suspend = jpeg_v3_0_suspend,
585 	.resume = jpeg_v3_0_resume,
586 	.is_idle = jpeg_v3_0_is_idle,
587 	.wait_for_idle = jpeg_v3_0_wait_for_idle,
588 	.set_clockgating_state = jpeg_v3_0_set_clockgating_state,
589 	.set_powergating_state = jpeg_v3_0_set_powergating_state,
590 	.dump_ip_state = amdgpu_jpeg_dump_ip_state,
591 	.print_ip_state = amdgpu_jpeg_print_ip_state,
592 };
593 
594 static const struct amdgpu_ring_funcs jpeg_v3_0_dec_ring_vm_funcs = {
595 	.type = AMDGPU_RING_TYPE_VCN_JPEG,
596 	.align_mask = 0xf,
597 	.get_rptr = jpeg_v3_0_dec_ring_get_rptr,
598 	.get_wptr = jpeg_v3_0_dec_ring_get_wptr,
599 	.set_wptr = jpeg_v3_0_dec_ring_set_wptr,
600 	.parse_cs = jpeg_v2_dec_ring_parse_cs,
601 	.emit_frame_size =
602 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
603 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
604 		8 + /* jpeg_v3_0_dec_ring_emit_vm_flush */
605 		18 + 18 + /* jpeg_v3_0_dec_ring_emit_fence x2 vm fence */
606 		8 + 16,
607 	.emit_ib_size = 22, /* jpeg_v3_0_dec_ring_emit_ib */
608 	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
609 	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
610 	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
611 	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
612 	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
613 	.insert_nop = jpeg_v2_0_dec_ring_nop,
614 	.insert_start = jpeg_v2_0_dec_ring_insert_start,
615 	.insert_end = jpeg_v2_0_dec_ring_insert_end,
616 	.pad_ib = amdgpu_ring_generic_pad_ib,
617 	.begin_use = amdgpu_jpeg_ring_begin_use,
618 	.end_use = amdgpu_jpeg_ring_end_use,
619 	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
620 	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
621 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
622 	.reset = jpeg_v3_0_ring_reset,
623 };
624 
625 static void jpeg_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
626 {
627 	adev->jpeg.inst->ring_dec->funcs = &jpeg_v3_0_dec_ring_vm_funcs;
628 }
629 
630 static const struct amdgpu_irq_src_funcs jpeg_v3_0_irq_funcs = {
631 	.set = jpeg_v3_0_set_interrupt_state,
632 	.process = jpeg_v3_0_process_interrupt,
633 };
634 
635 static void jpeg_v3_0_set_irq_funcs(struct amdgpu_device *adev)
636 {
637 	adev->jpeg.inst->irq.num_types = 1;
638 	adev->jpeg.inst->irq.funcs = &jpeg_v3_0_irq_funcs;
639 }
640 
641 const struct amdgpu_ip_block_version jpeg_v3_0_ip_block =
642 {
643 	.type = AMD_IP_BLOCK_TYPE_JPEG,
644 	.major = 3,
645 	.minor = 0,
646 	.rev = 0,
647 	.funcs = &jpeg_v3_0_ip_funcs,
648 };
649