xref: /linux/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c (revision 7dc340540363a008cee1e160e8f2a4f034f196d4)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "amdgpu_jpeg.h"
26 #include "amdgpu_pm.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "jpeg_v2_0.h"
30 #include "jpeg_v4_0_3.h"
31 
32 #include "vcn/vcn_5_0_0_offset.h"
33 #include "vcn/vcn_5_0_0_sh_mask.h"
34 #include "ivsrcid/vcn/irqsrcs_vcn_5_0.h"
35 #include "jpeg_v5_0_0.h"
36 
37 static const struct amdgpu_hwip_reg_entry jpeg_reg_list_5_0[] = {
38 	SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_POWER_STATUS),
39 	SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_INT_STAT),
40 	SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_RPTR),
41 	SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_WPTR),
42 	SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_CNTL),
43 	SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_RB_SIZE),
44 	SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JRBC_STATUS),
45 	SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_ADDR_MODE),
46 	SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG),
47 	SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_Y_GFX10_TILING_SURFACE),
48 	SOC15_REG_ENTRY_STR(JPEG, 0, regJPEG_DEC_UV_GFX10_TILING_SURFACE),
49 	SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_PITCH),
50 	SOC15_REG_ENTRY_STR(JPEG, 0, regUVD_JPEG_UV_PITCH),
51 };
52 
53 static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev);
54 static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev);
55 static int jpeg_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
56 				enum amd_powergating_state state);
57 
58 /**
59  * jpeg_v5_0_0_early_init - set function pointers
60  *
61  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
62  *
63  * Set ring and irq function pointers
64  */
65 static int jpeg_v5_0_0_early_init(struct amdgpu_ip_block *ip_block)
66 {
67 	struct amdgpu_device *adev = ip_block->adev;
68 
69 	adev->jpeg.num_jpeg_inst = 1;
70 	adev->jpeg.num_jpeg_rings = 1;
71 
72 	jpeg_v5_0_0_set_dec_ring_funcs(adev);
73 	jpeg_v5_0_0_set_irq_funcs(adev);
74 
75 	return 0;
76 }
77 
78 /**
79  * jpeg_v5_0_0_sw_init - sw init for JPEG block
80  *
81  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
82  *
83  * Load firmware and sw initialization
84  */
85 static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block)
86 {
87 	struct amdgpu_device *adev = ip_block->adev;
88 	struct amdgpu_ring *ring;
89 	int r;
90 
91 	/* JPEG TRAP */
92 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
93 		VCN_5_0__SRCID__JPEG_DECODE, &adev->jpeg.inst->irq);
94 	if (r)
95 		return r;
96 
97 	r = amdgpu_jpeg_sw_init(adev);
98 	if (r)
99 		return r;
100 
101 	r = amdgpu_jpeg_resume(adev);
102 	if (r)
103 		return r;
104 
105 	ring = adev->jpeg.inst->ring_dec;
106 	ring->use_doorbell = true;
107 	ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
108 	ring->vm_hub = AMDGPU_MMHUB0(0);
109 
110 	sprintf(ring->name, "jpeg_dec");
111 	r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0,
112 			     AMDGPU_RING_PRIO_DEFAULT, NULL);
113 	if (r)
114 		return r;
115 
116 	adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
117 	adev->jpeg.inst->external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_PITCH);
118 
119 	r = amdgpu_jpeg_reg_dump_init(adev, jpeg_reg_list_5_0, ARRAY_SIZE(jpeg_reg_list_5_0));
120 	if (r)
121 		return r;
122 
123 	/* TODO: Add queue reset mask when FW fully supports it */
124 	adev->jpeg.supported_reset =
125 		amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]);
126 	r = amdgpu_jpeg_sysfs_reset_mask_init(adev);
127 	if (r)
128 		return r;
129 	return 0;
130 }
131 
132 /**
133  * jpeg_v5_0_0_sw_fini - sw fini for JPEG block
134  *
135  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
136  *
137  * JPEG suspend and free up sw allocation
138  */
139 static int jpeg_v5_0_0_sw_fini(struct amdgpu_ip_block *ip_block)
140 {
141 	struct amdgpu_device *adev = ip_block->adev;
142 	int r;
143 
144 	r = amdgpu_jpeg_suspend(adev);
145 	if (r)
146 		return r;
147 
148 	amdgpu_jpeg_sysfs_reset_mask_fini(adev);
149 	r = amdgpu_jpeg_sw_fini(adev);
150 
151 	return r;
152 }
153 
154 /**
155  * jpeg_v5_0_0_hw_init - start and test JPEG block
156  *
157  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
158  *
159  */
160 static int jpeg_v5_0_0_hw_init(struct amdgpu_ip_block *ip_block)
161 {
162 	struct amdgpu_device *adev = ip_block->adev;
163 	struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
164 	int r;
165 
166 	adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
167 			(adev->doorbell_index.vcn.vcn_ring0_1 << 1), 0);
168 
169 	/* Skip ring test because pause DPG is not implemented. */
170 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG)
171 		return 0;
172 
173 	r = amdgpu_ring_test_helper(ring);
174 	if (r)
175 		return r;
176 
177 	return 0;
178 }
179 
180 /**
181  * jpeg_v5_0_0_hw_fini - stop the hardware block
182  *
183  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
184  *
185  * Stop the JPEG block, mark ring as not ready any more
186  */
187 static int jpeg_v5_0_0_hw_fini(struct amdgpu_ip_block *ip_block)
188 {
189 	struct amdgpu_device *adev = ip_block->adev;
190 
191 	cancel_delayed_work_sync(&adev->jpeg.idle_work);
192 
193 	if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
194 	      RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
195 		jpeg_v5_0_0_set_powergating_state(ip_block, AMD_PG_STATE_GATE);
196 
197 	return 0;
198 }
199 
200 /**
201  * jpeg_v5_0_0_suspend - suspend JPEG block
202  *
203  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
204  *
205  * HW fini and suspend JPEG block
206  */
207 static int jpeg_v5_0_0_suspend(struct amdgpu_ip_block *ip_block)
208 {
209 	int r;
210 
211 	r = jpeg_v5_0_0_hw_fini(ip_block);
212 	if (r)
213 		return r;
214 
215 	r = amdgpu_jpeg_suspend(ip_block->adev);
216 
217 	return r;
218 }
219 
220 /**
221  * jpeg_v5_0_0_resume - resume JPEG block
222  *
223  * @ip_block: Pointer to the amdgpu_ip_block for this hw instance.
224  *
225  * Resume firmware and hw init JPEG block
226  */
227 static int jpeg_v5_0_0_resume(struct amdgpu_ip_block *ip_block)
228 {
229 	int r;
230 
231 	r = amdgpu_jpeg_resume(ip_block->adev);
232 	if (r)
233 		return r;
234 
235 	r = jpeg_v5_0_0_hw_init(ip_block);
236 
237 	return r;
238 }
239 
240 static void jpeg_v5_0_0_disable_clock_gating(struct amdgpu_device *adev)
241 {
242 	uint32_t data = 0;
243 
244 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
245 
246 	data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
247 	data &= ~(JPEG_CGC_CTRL__JPEG0_DEC_MODE_MASK
248 		| JPEG_CGC_CTRL__JPEG_ENC_MODE_MASK);
249 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
250 }
251 
252 static void jpeg_v5_0_0_enable_clock_gating(struct amdgpu_device *adev)
253 {
254 	uint32_t data = 0;
255 
256 	data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL);
257 
258 	data |= 1 << JPEG_CGC_CTRL__JPEG0_DEC_MODE__SHIFT;
259 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_CTRL, data);
260 
261 	data = RREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE);
262 	data |= (JPEG_CGC_GATE__JPEG0_DEC_MASK
263 		|JPEG_CGC_GATE__JPEG_ENC_MASK
264 		|JPEG_CGC_GATE__JMCIF_MASK
265 		|JPEG_CGC_GATE__JRBBM_MASK);
266 	WREG32_SOC15(JPEG, 0, regJPEG_CGC_GATE, data);
267 }
268 
269 static int jpeg_v5_0_0_disable_power_gating(struct amdgpu_device *adev)
270 {
271 	uint32_t data = 0;
272 
273 	data = 1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT;
274 	WREG32_SOC15(JPEG, 0, regUVD_IPX_DLDO_CONFIG, data);
275 	SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS, 0,
276 			UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
277 
278 	/* disable anti hang mechanism */
279 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS), 0,
280 		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
281 
282 	return 0;
283 }
284 
285 static int jpeg_v5_0_0_enable_power_gating(struct amdgpu_device *adev)
286 {
287 	/* enable anti hang mechanism */
288 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JPEG_POWER_STATUS),
289 		UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
290 		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
291 
292 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
293 		WREG32(SOC15_REG_OFFSET(JPEG, 0, regUVD_IPX_DLDO_CONFIG),
294 			2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
295 		SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_IPX_DLDO_STATUS,
296 			1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
297 			UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
298 	}
299 
300 	return 0;
301 }
302 
303 static void jpeg_engine_5_0_0_dpg_clock_gating_mode(struct amdgpu_device *adev,
304 	       int inst_idx, uint8_t indirect)
305 {
306 	uint32_t data = 0;
307 
308 	// JPEG disable CGC
309 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
310 		data = 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
311 	else
312 		data = 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
313 
314 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
315 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
316 
317 	if (indirect) {
318 		ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_CGC_CTRL, data, indirect);
319 
320 		// Turn on All JPEG clocks
321 		data = 0;
322 		ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_CGC_GATE, data, indirect);
323 	} else {
324 		WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_CGC_CTRL, data, indirect);
325 
326 		// Turn on All JPEG clocks
327 		data = 0;
328 		WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_CGC_GATE, data, indirect);
329 	}
330 }
331 
332 /**
333  * jpeg_v5_0_0_start_dpg_mode - Jpeg start with dpg mode
334  *
335  * @adev: amdgpu_device pointer
336  * @inst_idx: instance number index
337  * @indirect: indirectly write sram
338  *
339  * Start JPEG block with dpg mode
340  */
341 static int jpeg_v5_0_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
342 {
343 	struct amdgpu_ring *ring = adev->jpeg.inst[inst_idx].ring_dec;
344 	uint32_t reg_data = 0;
345 
346 	jpeg_v5_0_0_enable_power_gating(adev);
347 
348 	// enable dynamic power gating mode
349 	reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
350 	reg_data |= UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
351 	WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
352 
353 	if (indirect)
354 		adev->jpeg.inst[inst_idx].dpg_sram_curr_addr =
355 			(uint32_t *)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr;
356 
357 	jpeg_engine_5_0_0_dpg_clock_gating_mode(adev, inst_idx, indirect);
358 
359 	/* MJPEG global tiling registers */
360 	if (indirect)
361 		ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_DEC_GFX10_ADDR_CONFIG,
362 			adev->gfx.config.gb_addr_config, indirect);
363 	else
364 		WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_DEC_GFX10_ADDR_CONFIG,
365 			adev->gfx.config.gb_addr_config, 1);
366 
367 	/* enable System Interrupt for JRBC */
368 	if (indirect)
369 		ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipJPEG_SYS_INT_EN,
370 			JPEG_SYS_INT_EN__DJRBC0_MASK, indirect);
371 	else
372 		WREG32_SOC24_JPEG_DPG_MODE(inst_idx, vcnipJPEG_SYS_INT_EN,
373 			JPEG_SYS_INT_EN__DJRBC0_MASK, 1);
374 
375 	if (indirect) {
376 		/* add nop to workaround PSP size check */
377 		ADD_SOC24_JPEG_TO_DPG_SRAM(inst_idx, vcnipUVD_NO_OP, 0, indirect);
378 
379 		amdgpu_jpeg_psp_update_sram(adev, inst_idx, 0);
380 	}
381 
382 	WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
383 		ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
384 		VCN_JPEG_DB_CTRL__EN_MASK);
385 
386 	WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_VMID, 0);
387 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
388 	WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
389 		lower_32_bits(ring->gpu_addr));
390 	WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
391 		upper_32_bits(ring->gpu_addr));
392 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_RPTR, 0);
393 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR, 0);
394 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, 0x00000002L);
395 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
396 	ring->wptr = RREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR);
397 
398 	return 0;
399 }
400 
401 /**
402  * jpeg_v5_0_0_stop_dpg_mode - Jpeg stop with dpg mode
403  *
404  * @adev: amdgpu_device pointer
405  * @inst_idx: instance number index
406  *
407  * Stop JPEG block with dpg mode
408  */
409 static void jpeg_v5_0_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
410 {
411 	uint32_t reg_data = 0;
412 
413 	reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
414 	reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
415 	WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
416 }
417 
418 /**
419  * jpeg_v5_0_0_start - start JPEG block
420  *
421  * @adev: amdgpu_device pointer
422  *
423  * Setup and start the JPEG block
424  */
425 static int jpeg_v5_0_0_start(struct amdgpu_device *adev)
426 {
427 	struct amdgpu_ring *ring = adev->jpeg.inst->ring_dec;
428 	int r;
429 
430 	if (adev->pm.dpm_enabled)
431 		amdgpu_dpm_enable_jpeg(adev, true);
432 
433 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
434 		r = jpeg_v5_0_0_start_dpg_mode(adev, 0, adev->jpeg.indirect_sram);
435 		return r;
436 	}
437 
438 	/* disable power gating */
439 	r = jpeg_v5_0_0_disable_power_gating(adev);
440 	if (r)
441 		return r;
442 
443 	/* JPEG disable CGC */
444 	jpeg_v5_0_0_disable_clock_gating(adev);
445 
446 	/* MJPEG global tiling registers */
447 	WREG32_SOC15(JPEG, 0, regJPEG_DEC_GFX10_ADDR_CONFIG,
448 		adev->gfx.config.gb_addr_config);
449 
450 	/* enable JMI channel */
451 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL), 0,
452 		~UVD_JMI_CNTL__SOFT_RESET_MASK);
453 
454 	/* enable System Interrupt for JRBC */
455 	WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regJPEG_SYS_INT_EN),
456 		JPEG_SYS_INT_EN__DJRBC0_MASK,
457 		~JPEG_SYS_INT_EN__DJRBC0_MASK);
458 
459 	WREG32_SOC15(VCN, 0, regVCN_JPEG_DB_CTRL,
460 		ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
461 		VCN_JPEG_DB_CTRL__EN_MASK);
462 
463 	WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_VMID, 0);
464 	WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
465 	WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
466 		lower_32_bits(ring->gpu_addr));
467 	WREG32_SOC15(JPEG, 0, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
468 		upper_32_bits(ring->gpu_addr));
469 	WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR, 0);
470 	WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, 0);
471 	WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_CNTL, 0x00000002L);
472 	WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
473 	ring->wptr = RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
474 
475 	return 0;
476 }
477 
478 /**
479  * jpeg_v5_0_0_stop - stop JPEG block
480  *
481  * @adev: amdgpu_device pointer
482  *
483  * stop the JPEG block
484  */
485 static int jpeg_v5_0_0_stop(struct amdgpu_device *adev)
486 {
487 	int r;
488 
489 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
490 		jpeg_v5_0_0_stop_dpg_mode(adev, 0);
491 	} else {
492 
493 		/* reset JMI */
494 		WREG32_P(SOC15_REG_OFFSET(JPEG, 0, regUVD_JMI_CNTL),
495 			UVD_JMI_CNTL__SOFT_RESET_MASK,
496 			~UVD_JMI_CNTL__SOFT_RESET_MASK);
497 
498 		jpeg_v5_0_0_enable_clock_gating(adev);
499 
500 		/* enable power gating */
501 		r = jpeg_v5_0_0_enable_power_gating(adev);
502 		if (r)
503 			return r;
504 	}
505 
506 	if (adev->pm.dpm_enabled)
507 		amdgpu_dpm_enable_jpeg(adev, false);
508 
509 	return 0;
510 }
511 
512 /**
513  * jpeg_v5_0_0_dec_ring_get_rptr - get read pointer
514  *
515  * @ring: amdgpu_ring pointer
516  *
517  * Returns the current hardware read pointer
518  */
519 static uint64_t jpeg_v5_0_0_dec_ring_get_rptr(struct amdgpu_ring *ring)
520 {
521 	struct amdgpu_device *adev = ring->adev;
522 
523 	return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_RPTR);
524 }
525 
526 /**
527  * jpeg_v5_0_0_dec_ring_get_wptr - get write pointer
528  *
529  * @ring: amdgpu_ring pointer
530  *
531  * Returns the current hardware write pointer
532  */
533 static uint64_t jpeg_v5_0_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
534 {
535 	struct amdgpu_device *adev = ring->adev;
536 
537 	if (ring->use_doorbell)
538 		return *ring->wptr_cpu_addr;
539 	else
540 		return RREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR);
541 }
542 
543 /**
544  * jpeg_v5_0_0_dec_ring_set_wptr - set write pointer
545  *
546  * @ring: amdgpu_ring pointer
547  *
548  * Commits the write pointer to the hardware
549  */
550 static void jpeg_v5_0_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
551 {
552 	struct amdgpu_device *adev = ring->adev;
553 
554 	if (ring->use_doorbell) {
555 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
556 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
557 	} else {
558 		WREG32_SOC15(JPEG, 0, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
559 	}
560 }
561 
562 static bool jpeg_v5_0_0_is_idle(struct amdgpu_ip_block *ip_block)
563 {
564 	struct amdgpu_device *adev = ip_block->adev;
565 	int ret = 1;
566 
567 	ret &= (((RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS) &
568 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
569 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
570 
571 	return ret;
572 }
573 
574 static int jpeg_v5_0_0_wait_for_idle(struct amdgpu_ip_block *ip_block)
575 {
576 	struct amdgpu_device *adev = ip_block->adev;
577 
578 	return SOC15_WAIT_ON_RREG(JPEG, 0, regUVD_JRBC_STATUS,
579 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
580 		UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
581 }
582 
583 static int jpeg_v5_0_0_set_clockgating_state(struct amdgpu_ip_block *ip_block,
584 					  enum amd_clockgating_state state)
585 {
586 	struct amdgpu_device *adev = ip_block->adev;
587 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
588 
589 	if (enable) {
590 		if (!jpeg_v5_0_0_is_idle(ip_block))
591 			return -EBUSY;
592 		jpeg_v5_0_0_enable_clock_gating(adev);
593 	} else {
594 		jpeg_v5_0_0_disable_clock_gating(adev);
595 	}
596 
597 	return 0;
598 }
599 
600 static int jpeg_v5_0_0_set_powergating_state(struct amdgpu_ip_block *ip_block,
601 					  enum amd_powergating_state state)
602 {
603 	struct amdgpu_device *adev = ip_block->adev;
604 	int ret;
605 
606 	if (state == adev->jpeg.cur_state)
607 		return 0;
608 
609 	if (state == AMD_PG_STATE_GATE)
610 		ret = jpeg_v5_0_0_stop(adev);
611 	else
612 		ret = jpeg_v5_0_0_start(adev);
613 
614 	if (!ret)
615 		adev->jpeg.cur_state = state;
616 
617 	return ret;
618 }
619 
620 static int jpeg_v5_0_0_set_interrupt_state(struct amdgpu_device *adev,
621 					struct amdgpu_irq_src *source,
622 					unsigned int type,
623 					enum amdgpu_interrupt_state state)
624 {
625 	return 0;
626 }
627 
628 static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev,
629 				      struct amdgpu_irq_src *source,
630 				      struct amdgpu_iv_entry *entry)
631 {
632 	DRM_DEBUG("IH: JPEG TRAP\n");
633 
634 	switch (entry->src_id) {
635 	case VCN_5_0__SRCID__JPEG_DECODE:
636 		amdgpu_fence_process(adev->jpeg.inst->ring_dec);
637 		break;
638 	default:
639 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
640 			  entry->src_id, entry->src_data[0]);
641 		break;
642 	}
643 
644 	return 0;
645 }
646 
647 static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = {
648 	.name = "jpeg_v5_0_0",
649 	.early_init = jpeg_v5_0_0_early_init,
650 	.sw_init = jpeg_v5_0_0_sw_init,
651 	.sw_fini = jpeg_v5_0_0_sw_fini,
652 	.hw_init = jpeg_v5_0_0_hw_init,
653 	.hw_fini = jpeg_v5_0_0_hw_fini,
654 	.suspend = jpeg_v5_0_0_suspend,
655 	.resume = jpeg_v5_0_0_resume,
656 	.is_idle = jpeg_v5_0_0_is_idle,
657 	.wait_for_idle = jpeg_v5_0_0_wait_for_idle,
658 	.set_clockgating_state = jpeg_v5_0_0_set_clockgating_state,
659 	.set_powergating_state = jpeg_v5_0_0_set_powergating_state,
660 	.dump_ip_state = amdgpu_jpeg_dump_ip_state,
661 	.print_ip_state = amdgpu_jpeg_print_ip_state,
662 };
663 
664 static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = {
665 	.type = AMDGPU_RING_TYPE_VCN_JPEG,
666 	.align_mask = 0xf,
667 	.get_rptr = jpeg_v5_0_0_dec_ring_get_rptr,
668 	.get_wptr = jpeg_v5_0_0_dec_ring_get_wptr,
669 	.set_wptr = jpeg_v5_0_0_dec_ring_set_wptr,
670 	.parse_cs = jpeg_v2_dec_ring_parse_cs,
671 	.emit_frame_size =
672 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
673 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
674 		8 + /* jpeg_v5_0_0_dec_ring_emit_vm_flush */
675 		22 + 22 + /* jpeg_v5_0_0_dec_ring_emit_fence x2 vm fence */
676 		8 + 16,
677 	.emit_ib_size = 22, /* jpeg_v5_0_0_dec_ring_emit_ib */
678 	.emit_ib = jpeg_v4_0_3_dec_ring_emit_ib,
679 	.emit_fence = jpeg_v4_0_3_dec_ring_emit_fence,
680 	.emit_vm_flush = jpeg_v4_0_3_dec_ring_emit_vm_flush,
681 	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
682 	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
683 	.insert_nop = jpeg_v4_0_3_dec_ring_nop,
684 	.insert_start = jpeg_v4_0_3_dec_ring_insert_start,
685 	.insert_end = jpeg_v4_0_3_dec_ring_insert_end,
686 	.pad_ib = amdgpu_ring_generic_pad_ib,
687 	.begin_use = amdgpu_jpeg_ring_begin_use,
688 	.end_use = amdgpu_jpeg_ring_end_use,
689 	.emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg,
690 	.emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait,
691 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
692 };
693 
694 static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev)
695 {
696 	adev->jpeg.inst->ring_dec->funcs = &jpeg_v5_0_0_dec_ring_vm_funcs;
697 }
698 
699 static const struct amdgpu_irq_src_funcs jpeg_v5_0_0_irq_funcs = {
700 	.set = jpeg_v5_0_0_set_interrupt_state,
701 	.process = jpeg_v5_0_0_process_interrupt,
702 };
703 
704 static void jpeg_v5_0_0_set_irq_funcs(struct amdgpu_device *adev)
705 {
706 	adev->jpeg.inst->irq.num_types = 1;
707 	adev->jpeg.inst->irq.funcs = &jpeg_v5_0_0_irq_funcs;
708 }
709 
710 const struct amdgpu_ip_block_version jpeg_v5_0_0_ip_block = {
711 	.type = AMD_IP_BLOCK_TYPE_JPEG,
712 	.major = 5,
713 	.minor = 0,
714 	.rev = 0,
715 	.funcs = &jpeg_v5_0_0_ip_funcs,
716 };
717