xref: /linux/drivers/gpu/drm/amd/amdgpu/jpeg_v5_3_0.c (revision eee8227dd18868bb16dbf72e2ab11d1a9008b874)
1 /*
2  * Copyright 2025 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "amdgpu_jpeg.h"
26 #include "amdgpu_pm.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "jpeg_v2_0.h"
30 #include "jpeg_v4_0_3.h"
31 
32 #include "vcn/vcn_5_3_0_offset.h"
33 #include "vcn/vcn_5_3_0_sh_mask.h"
34 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
35 #include "jpeg_v5_3_0.h"
36 
37 static void jpeg_v5_3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
38 static void jpeg_v5_3_0_set_irq_funcs(struct amdgpu_device *adev);
39 static int jpeg_v5_3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
40 				enum amd_powergating_state state);
41 
42 
43 /**
44  * jpeg_v5_3_0_early_init - set function pointers
45  *
46  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
47  *
48  * Set ring and irq function pointers
49  */
50 static int jpeg_v5_3_0_early_init(struct amdgpu_ip_block *ip_block)
51 {
52 	struct amdgpu_device *adev = ip_block->adev;
53 
54 	adev->jpeg.num_jpeg_inst = 1;
55 	adev->jpeg.num_jpeg_rings = 1;
56 
57 	jpeg_v5_3_0_set_dec_ring_funcs(adev);
58 	jpeg_v5_3_0_set_irq_funcs(adev);
59 
60 	return 0;
61 }
62 
63 /**
64  * jpeg_v5_3_0_sw_init - sw init for JPEG block
65  *
66  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
67  *
68  * Load firmware and sw initialization
69  */
70 static int jpeg_v5_3_0_sw_init(struct amdgpu_ip_block *ip_block)
71 {
72 	struct amdgpu_device *adev = ip_block->adev;
73 	struct amdgpu_ring *ring;
74 	int r;
75 
76 	/* JPEG TRAP */
77 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
78 		VCN_5_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
79 	if (r)
80 		return r;
81 
82 	r = amdgpu_jpeg_sw_init(adev);
83 	if (r)
84 		return r;
85 
86 	r = amdgpu_jpeg_resume(adev);
87 	if (r)
88 		return r;
89 
90 	ring = adev->jpeg.inst->ring_dec;
91 	ring->use_doorbell = true;
92 	ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
93 	ring->vm_hub = AMDGPU_MMHUB0(0);
94 
95 	sprintf(ring->name, "jpeg_dec");
96 	r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
97 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
98 	if (r)
99 		return r;
100 
101 	adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
102 	adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
103 
104 	/* TODO: Add queue reset mask when FW fully supports it */
105 	adev->jpeg.supported_reset =
106 		amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
107 	if (!amdgpu_sriov_vf(adev))
108 		adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE;
109 	r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
110 	if (r)
111 		return r;
112 	return 0;
113 }
114 
115 /**
116  * jpeg_v5_3_0_sw_fini - sw fini for JPEG block
117  *
118  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
119  *
120  * JPEG suspend and free up sw allocation
121  */
122 static int jpeg_v5_3_0_sw_fini(struct amdgpu_ip_block *ip_block)
123 {
124 	struct amdgpu_device *adev = ip_block->adev;
125 	int r;
126 
127 	r = amdgpu_jpeg_suspend(adev);
128 	if (r)
129 		return r;
130 
131 	amdgpu_jpeg_sysfs_reset_mask_fini(adev);
132 	r = amdgpu_jpeg_sw_fini(adev);
133 
134 	return r;
135 }
136 
137 /**
138  * jpeg_v5_3_0_hw_init - start and test JPEG block
139  *
140  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
141  *
142  */
143 static int jpeg_v5_3_0_hw_init(struct amdgpu_ip_block *ip_block)
144 {
145 	struct amdgpu_device *adev = ip_block->adev;
146 	struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
147 	int r;
148 
149 	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
150 			(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
151 
152 	/* Skip ring test because pause DPG is not implemented. */
153 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG)
154 		return 0;
155 
156 	r = amdgpu_ring_test_helper(ring);
157 	if (r)
158 		return r;
159 
160 	return 0;
161 }
162 
163 /**
164  * jpeg_v5_3_0_hw_fini - stop the hardware block
165  *
166  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
167  *
168  * Stop the JPEG block, mark ring as not ready any more
169  */
170 static int jpeg_v5_3_0_hw_fini(struct amdgpu_ip_block *ip_block)
171 {
172 	struct amdgpu_device *adev = ip_block->adev;
173 
174 	cancel_delayed_work_sync(&adev->jpeg.idle_work);
175 
176 	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
177 	      RREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS))
178 		jpeg_v5_3_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
179 
180 	return 0;
181 }
182 
183 /**
184  * jpeg_v5_3_0_suspend - suspend JPEG block
185  *
186  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
187  *
188  * HW fini and suspend JPEG block
189  */
190 static int jpeg_v5_3_0_suspend(struct amdgpu_ip_block *ip_block)
191 {
192 	int r;
193 
194 	r = jpeg_v5_3_0_hw_fini(ip_block);
195 	if (r)
196 		return r;
197 
198 	r = amdgpu_jpeg_suspend(ip_block->adev);
199 
200 	return r;
201 }
202 
203 /**
204  * jpeg_v5_3_0_resume - resume JPEG block
205  *
206  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
207  *
208  * Resume firmware and hw init JPEG block
209  */
210 static int jpeg_v5_3_0_resume(struct amdgpu_ip_block *ip_block)
211 {
212 	int r;
213 
214 	r = amdgpu_jpeg_resume(ip_block->adev);
215 	if (r)
216 		return r;
217 
218 	r = jpeg_v5_3_0_hw_init(ip_block);
219 
220 	return r;
221 }
222 
223 static void jpeg_v5_3_0_disable_clock_gating(struct amdgpu_device *adev)
224 {
225 	uint32_t data = 0;
226 
227 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
228 
229 	data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
230 	data &= ~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK
231 		| JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK);
232 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
233 }
234 
235 static void jpeg_v5_3_0_enable_clock_gating(struct amdgpu_device *adev)
236 {
237 	uint32_t data = 0;
238 
239 	data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
240 
241 	data |= 1 << JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT;
242 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
243 
244 	data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
245 	data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK
246 		|JPEG_CGC_GATE__JPEG_ENC_MASK
247 		|JPEG_CGC_GATE__JMCIF_MASK
248 		|JPEG_CGC_GATE__JRBBM_MASK);
249 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
250 }
251 
252 static int jpeg_v5_3_0_disable_power_gating(struct amdgpu_device *adev)
253 {
254 	uint32_t data = 0;
255 
256 	data = 1 << UVD_IPX_DLDO_CONFIG_ONO1__ONO1_PWR_CONFIG__SHIFT;
257 	WREG32_SOC15(JPEG, 0, regUVD_IPX_DLDO_CONFIG_ONO1, data);
258 	SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 0,
259 			UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
260 
261 	/* disable anti hang mechanism */
262 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
263 		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
264 
265 	return 0;
266 }
267 
268 static int jpeg_v5_3_0_enable_power_gating(struct amdgpu_device *adev)
269 {
270 	/* enable anti hang mechanism */
271 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
272 		UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
273 		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
274 
275 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
276 		WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG_ONO1),
277 			2 << UVD_IPX_DLDO_CONFIG_ONO1__ONO1_PWR_CONFIG__SHIFT);
278 		SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
279 			1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
280 			UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
281 	}
282 
283 	return 0;
284 }
285 
286 static void jpeg_engine_5_0_0_dpg_clock_gating_mode(struct amdgpu_device *adev,
287 	       int inst_idx, uint8_t indirect)
288 {
289 	uint32_t data = 0;
290 
291 	// JPEG disable CGC
292 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
293 		data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
294 	else
295 		data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
296 
297 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
298 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
299 
300 	if (indirect) {
301 		ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_CGC_CTRL, data, indirect);
302 
303 		// Turn on All JPEG clocks
304 		data = 0;
305 		ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_CGC_GATE, data, indirect);
306 	} else {
307 		WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_CGC_CTRL, data, indirect);
308 
309 		// Turn on All JPEG clocks
310 		data = 0;
311 		WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_CGC_GATE, data, indirect);
312 	}
313 }
314 
315 /**
316  * jpeg_v5_3_0_start_dpg_mode - Jpeg start with dpg mode
317  *
318  * @adev: amdgpu_device pointer
319  * @inst_idx: instance number index
320  * @indirect: indirectly write sram
321  *
322  * Start JPEG block with dpg mode
323  */
324 static int jpeg_v5_3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
325 {
326 	struct amdgpu_ring *ring = adev->jpeg.inst[inst_idx].ring_dec;
327 	uint32_t reg_data = 0;
328 
329 	jpeg_v5_3_0_enable_power_gating(adev);
330 
331 	// enable dynamic power gating mode
332 	reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
333 	reg_data |= UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
334 	WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
335 
336 	if (indirect)
337 		adev->jpeg.inst[inst_idx].dpg_sram_curr_addr =
338 			(uint32_t *)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr;
339 
340 	jpeg_engine_5_0_0_dpg_clock_gating_mode(adev, inst_idx, indirect);
341 
342 	/* MJPEG global tiling registers */
343 	if (indirect)
344 		ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_DEC_GFX10_ADDR_CONFIG,
345 			adev->gfx.config.gb_addr_config, indirect);
346 	else
347 		WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_DEC_GFX10_ADDR_CONFIG,
348 			adev->gfx.config.gb_addr_config, 1);
349 
350 	/* enable System Interrupt for JRBC */
351 	if (indirect)
352 		ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_SYS_INT_EN,
353 			JPEG_SYS_INT_EN__DJRBC0_MASK, indirect);
354 	else
355 		WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_SYS_INT_EN,
356 			JPEG_SYS_INT_EN__DJRBC0_MASK, 1);
357 
358 	if (indirect) {
359 		/* add nop to workaround PSP size check */
360 		ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipUVD_NO_OP, 0, indirect);
361 
362 		amdgpu_jpeg_psp_update_sram(adev, inst_idx, 0);
363 	}
364 
365 	WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
366 		ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
367 		VCN_JPEG_DB_CTRL__EN_MASK);
368 
369 	WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_VMID, 0);
370 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC0_UVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
371 	WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
372 		lower_32_bits(ring->gpu_addr));
373 	WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
374 		upper_32_bits(ring->gpu_addr));
375 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC0_UVD_JRBC_RB_RPTR, 0);
376 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC0_UVD_JRBC_RB_WPTR, 0);
377 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC0_UVD_JRBC_RB_CNTL, 0x00000002L);
378 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC0_UVD_JRBC_RB_SIZE, ring->ring_size / 4);
379 	ring->wptr = RREG32_SOC15(JPEG, inst_idx, regUVD_JRBC0_UVD_JRBC_RB_WPTR);
380 
381 	return 0;
382 }
383 
384 /**
385  * jpeg_v5_3_0_stop_dpg_mode - Jpeg stop with dpg mode
386  *
387  * @adev: amdgpu_device pointer
388  * @inst_idx: instance number index
389  *
390  * Stop JPEG block with dpg mode
391  */
392 static void jpeg_v5_3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
393 {
394 	uint32_t reg_data = 0;
395 
396 	reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
397 	reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
398 	WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
399 }
400 
401 /**
402  * jpeg_v5_3_0_start - start JPEG block
403  *
404  * @adev: amdgpu_device pointer
405  *
406  * Setup and start the JPEG block
407  */
408 static int jpeg_v5_3_0_start(struct amdgpu_device *adev)
409 {
410 	struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
411 	int r;
412 
413 	if (adev->pm.dpm_enabled)
414 		amdgpu_dpm_enable_jpeg(adev, true);
415 
416 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
417 		r = jpeg_v5_3_0_start_dpg_mode(adev, 0, adev->jpeg.indirect_sram);
418 		return r;
419 	}
420 
421 	/* disable power gating */
422 	r = jpeg_v5_3_0_disable_power_gating(adev);
423 	if (r)
424 		return r;
425 
426 	/* JPEG disable CGC */
427 	jpeg_v5_3_0_disable_clock_gating(adev);
428 
429 	/* MJPEG global tiling registers */
430 	WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
431 		adev->gfx.config.gb_addr_config);
432 
433 	/* enable JMI channel */
434 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
435 		~UVD_JMI_CNTL__SOFT_RESET_MASK);
436 
437 	/* enable System Interrupt for JRBC */
438 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN),
439 		JPEG_SYS_INT_EN__DJRBC0_MASK,
440 		~JPEG_SYS_INT_EN__DJRBC0_MASK);
441 
442 	WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
443 		ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
444 		VCN_JPEG_DB_CTRL__EN_MASK);
445 
446 	WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
447 	WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
448 	WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
449 		lower_32_bits(ring->gpu_addr));
450 	WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
451 		upper_32_bits(ring->gpu_addr));
452 	WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR, 0);
453 	WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR, 0);
454 	WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_CNTL, 0x00000002L);
455 	WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_SIZE, ring->ring_size / 4);
456 	ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR);
457 
458 	return 0;
459 }
460 
461 /**
462  * jpeg_v5_3_0_stop - stop JPEG block
463  *
464  * @adev: amdgpu_device pointer
465  *
466  * stop the JPEG block
467  */
468 static int jpeg_v5_3_0_stop(struct amdgpu_device *adev)
469 {
470 	int r;
471 
472 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
473 		jpeg_v5_3_0_stop_dpg_mode(adev, 0);
474 	} else {
475 
476 		/* reset JMI */
477 		WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
478 			UVD_JMI_CNTL__SOFT_RESET_MASK,
479 			~UVD_JMI_CNTL__SOFT_RESET_MASK);
480 
481 		jpeg_v5_3_0_enable_clock_gating(adev);
482 
483 		/* enable power gating */
484 		r = jpeg_v5_3_0_enable_power_gating(adev);
485 		if (r)
486 			return r;
487 	}
488 
489 	if (adev->pm.dpm_enabled)
490 		amdgpu_dpm_enable_jpeg(adev, false);
491 
492 	return 0;
493 }
494 
495 /**
496  * jpeg_v5_3_0_dec_ring_get_rptr - get read pointer
497  *
498  * @ring: amdgpu_ring pointer
499  *
500  * Returns the current hardware read pointer
501  */
502 static uint64_t jpeg_v5_3_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
503 {
504 	struct amdgpu_device *adev = ring->adev;
505 
506 	return RREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_RPTR);
507 }
508 
509 /**
510  * jpeg_v5_3_0_dec_ring_get_wptr - get write pointer
511  *
512  * @ring: amdgpu_ring pointer
513  *
514  * Returns the current hardware write pointer
515  */
516 static uint64_t jpeg_v5_3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
517 {
518 	struct amdgpu_device *adev = ring->adev;
519 
520 	if (ring->use_doorbell)
521 		return *ring->wptr_cpu_addr;
522 	else
523 		return RREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR);
524 }
525 
526 /**
527  * jpeg_v5_3_0_dec_ring_set_wptr - set write pointer
528  *
529  * @ring: amdgpu_ring pointer
530  *
531  * Commits the write pointer to the hardware
532  */
533 static void jpeg_v5_3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
534 {
535 	struct amdgpu_device *adev = ring->adev;
536 
537 	if (ring->use_doorbell) {
538 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
539 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
540 	} else {
541 		WREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
542 	}
543 }
544 
545 static bool jpeg_v5_3_0_is_idle(struct amdgpu_ip_block *ip_block)
546 {
547 	struct amdgpu_device *adev = ip_block->adev;
548 	int ret = 1;
549 
550 	ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS) &
551 		UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
552 		UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
553 
554 	return ret;
555 }
556 
557 static int jpeg_v5_3_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
558 {
559 	struct amdgpu_device *adev = ip_block->adev;
560 
561 	return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC0_UVD_JRBC_STATUS,
562 		UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
563 		UVD_JRBC0_UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
564 }
565 
566 static int jpeg_v5_3_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
567 					  enum amd_clockgating_state state)
568 {
569 	struct amdgpu_device *adev = ip_block->adev;
570 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
571 
572 	if (enable) {
573 		if (!jpeg_v5_3_0_is_idle(ip_block))
574 			return -EBUSY;
575 		jpeg_v5_3_0_enable_clock_gating(adev);
576 	} else {
577 		jpeg_v5_3_0_disable_clock_gating(adev);
578 	}
579 
580 	return 0;
581 }
582 
583 static int jpeg_v5_3_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
584 					  enum amd_powergating_state state)
585 {
586 	struct amdgpu_device *adev = ip_block->adev;
587 	int ret;
588 
589 	if (state == adev->jpeg.cur_state)
590 		return 0;
591 
592 	if (state == AMD_PG_STATE_GATE)
593 		ret = jpeg_v5_3_0_stop(adev);
594 	else
595 		ret = jpeg_v5_3_0_start(adev);
596 
597 	if (!ret)
598 		adev->jpeg.cur_state = state;
599 
600 	return ret;
601 }
602 
603 static int jpeg_v5_3_0_set_interrupt_state(struct amdgpu_device *adev,
604 					struct amdgpu_irq_src *source,
605 					unsigned int type,
606 					enum amdgpu_interrupt_state state)
607 {
608 	return 0;
609 }
610 
611 static int jpeg_v5_3_0_process_interrupt(struct amdgpu_device *adev,
612 				      struct amdgpu_irq_src *source,
613 				      struct amdgpu_iv_entry *entry)
614 {
615 	DRM_DEBUG("IH: JPEG TRAP\n");
616 
617 	switch (entry->src_id) {
618 	case VCN_5_0__SRCID__JPEG_DECODE:
619 		amdgpu_fence_process(adev->jpeg.inst->ring_dec);
620 		break;
621 	default:
622 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
623 			  entry->src_id, entry->src_data[0]);
624 		break;
625 	}
626 
627 	return 0;
628 }
629 
630 static int jpeg_v5_3_0_ring_reset(struct amdgpu_ring *ring,
631 				  unsigned int vmid,
632 				  struct amdgpu_fence *timedout_fence)
633 {
634 	int r;
635 
636 	amdgpu_ring_reset_helper_begin(ring, timedout_fence);
637 	r = jpeg_v5_3_0_stop(ring->adev);
638 	if (r)
639 		return r;
640 	r = jpeg_v5_3_0_start(ring->adev);
641 	if (r)
642 		return r;
643 	return amdgpu_ring_reset_helper_end(ring, timedout_fence);
644 }
645 
646 static const struct amd_ip_funcs jpeg_v5_3_0_ip_funcs = {
647 	.name = "jpeg_v5_3_0",
648 	.early_init = jpeg_v5_3_0_early_init,
649 	.sw_init = jpeg_v5_3_0_sw_init,
650 	.sw_fini = jpeg_v5_3_0_sw_fini,
651 	.hw_init = jpeg_v5_3_0_hw_init,
652 	.hw_fini = jpeg_v5_3_0_hw_fini,
653 	.suspend = jpeg_v5_3_0_suspend,
654 	.resume = jpeg_v5_3_0_resume,
655 	.is_idle = jpeg_v5_3_0_is_idle,
656 	.wait_for_idle = jpeg_v5_3_0_wait_for_idle,
657 	.set_clockgating_state = jpeg_v5_3_0_set_clockgating_state,
658 	.set_powergating_state = jpeg_v5_3_0_set_powergating_state,
659 };
660 
661 static const struct amdgpu_ring_funcs jpeg_v5_3_0_dec_ring_vm_funcs = {
662 	.type = AMDGPU_RING_TYPE_VCN_JPEG,
663 	.align_mask = 0xf,
664 	.get_rptr = jpeg_v5_3_0_dec_ring_get_rptr,
665 	.get_wptr = jpeg_v5_3_0_dec_ring_get_wptr,
666 	.set_wptr = jpeg_v5_3_0_dec_ring_set_wptr,
667 	.parse_cs = amdgpu_jpeg_dec_parse_cs,
668 	.emit_frame_size =
669 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
670 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
671 		8 + /* jpeg_v5_3_0_dec_ring_emit_vm_flush */
672 		22 + 22 + /* jpeg_v5_3_0_dec_ring_emit_fence x2 vm fence */
673 		8 + 16,
674 	.emit_ib_size = 22, /* jpeg_v5_3_0_dec_ring_emit_ib */
675 	.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
676 	.emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
677 	.emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
678 	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
679 	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
680 	.insert_nop = jpeg_v4_0_3_dec_ring_nop,
681 	.insert_start = jpeg_v4_0_3_dec_ring_insert_start,
682 	.insert_end = jpeg_v4_0_3_dec_ring_insert_end,
683 	.pad_ib = amdgpu_ring_generic_pad_ib,
684 	.begin_use = amdgpu_jpeg_ring_begin_use,
685 	.end_use = amdgpu_jpeg_ring_end_use,
686 	.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
687 	.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
688 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
689 	.reset = jpeg_v5_3_0_ring_reset,
690 };
691 
692 static void jpeg_v5_3_0_set_dec_ring_funcs(struct amdgpu_device *adev)
693 {
694 	adev->jpeg.inst->ring_dec->funcs = &jpeg_v5_3_0_dec_ring_vm_funcs;
695 }
696 
697 static const struct amdgpu_irq_src_funcs jpeg_v5_3_0_irq_funcs = {
698 	.set = jpeg_v5_3_0_set_interrupt_state,
699 	.process = jpeg_v5_3_0_process_interrupt,
700 };
701 
702 static void jpeg_v5_3_0_set_irq_funcs(struct amdgpu_device *adev)
703 {
704 	adev->jpeg.inst->irq.num_types = 1;
705 	adev->jpeg.inst->irq.funcs = &jpeg_v5_3_0_irq_funcs;
706 }
707 
708 const struct amdgpu_ip_block_version jpeg_v5_3_0_ip_block = {
709 	.type = AMD_IP_BLOCK_TYPE_JPEG,
710 	.major = 5,
711 	.minor = 3,
712 	.rev = 0,
713 	.funcs = &jpeg_v5_3_0_ip_funcs,
714 };
715