xref: /linux/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 /*
2  * Copyright 2023 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #include "amdgpu.h"
25 #include "amdgpu_jpeg.h"
26 #include "amdgpu_pm.h"
27 #include "soc15.h"
28 #include "soc15d.h"
29 #include "jpeg_v2_0.h"
30 #include "jpeg_v4_0_5.h"
31 #include "mmsch_v4_0.h"
32 
33 #include "vcn/vcn_4_0_5_offset.h"
34 #include "vcn/vcn_4_0_5_sh_mask.h"
35 #include "ivsrcid/vcn/irqsrcs_vcn_4_0.h"
36 
37 #define mmUVD_DPG_LMA_CTL						regUVD_DPG_LMA_CTL
38 #define mmUVD_DPG_LMA_CTL_BASE_IDX					regUVD_DPG_LMA_CTL_BASE_IDX
39 #define mmUVD_DPG_LMA_DATA						regUVD_DPG_LMA_DATA
40 #define mmUVD_DPG_LMA_DATA_BASE_IDX					regUVD_DPG_LMA_DATA_BASE_IDX
41 
42 #define regUVD_JPEG_PITCH_INTERNAL_OFFSET		0x401f
43 #define regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET	0x4026
44 #define regJPEG_SYS_INT_EN_INTERNAL_OFFSET		0x4141
45 #define regJPEG_CGC_CTRL_INTERNAL_OFFSET		0x4161
46 #define regJPEG_CGC_GATE_INTERNAL_OFFSET		0x4160
47 #define regUVD_NO_OP_INTERNAL_OFFSET			0x0029
48 
49 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev);
50 static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev);
51 static int jpeg_v4_0_5_set_powergating_state(void *handle,
52 				enum amd_powergating_state state);
53 
54 static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring);
55 
56 static int amdgpu_ih_clientid_jpeg[] = {
57 	SOC15_IH_CLIENTID_VCN,
58 	SOC15_IH_CLIENTID_VCN1
59 };
60 
61 /**
62  * jpeg_v4_0_5_early_init - set function pointers
63  *
64  * @handle: amdgpu_device pointer
65  *
66  * Set ring and irq function pointers
67  */
68 static int jpeg_v4_0_5_early_init(void *handle)
69 {
70 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
71 
72 	switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
73 	case IP_VERSION(4, 0, 5):
74 		adev->jpeg.num_jpeg_inst = 1;
75 		break;
76 	case IP_VERSION(4, 0, 6):
77 		adev->jpeg.num_jpeg_inst = 2;
78 		break;
79 	default:
80 		DRM_DEV_ERROR(adev->dev,
81 			"Failed to init vcn ip block(UVD_HWIP:0x%x)\n",
82 			amdgpu_ip_version(adev, UVD_HWIP, 0));
83 		return -EINVAL;
84 	}
85 
86 	adev->jpeg.num_jpeg_rings = 1;
87 
88 	jpeg_v4_0_5_set_dec_ring_funcs(adev);
89 	jpeg_v4_0_5_set_irq_funcs(adev);
90 
91 	return 0;
92 }
93 
94 /**
95  * jpeg_v4_0_5_sw_init - sw init for JPEG block
96  *
97  * @handle: amdgpu_device pointer
98  *
99  * Load firmware and sw initialization
100  */
101 static int jpeg_v4_0_5_sw_init(void *handle)
102 {
103 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
104 	struct amdgpu_ring *ring;
105 	int r, i;
106 
107 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
108 		if (adev->jpeg.harvest_config & (1 << i))
109 			continue;
110 
111 		/* JPEG TRAP */
112 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
113 				VCN_4_0__SRCID__JPEG_DECODE, &adev->jpeg.inst[i].irq);
114 		if (r)
115 			return r;
116 
117 		/* JPEG DJPEG POISON EVENT */
118 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
119 			VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq);
120 		if (r)
121 			return r;
122 
123 		/* JPEG EJPEG POISON EVENT */
124 		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
125 			VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq);
126 		if (r)
127 			return r;
128 	}
129 
130 	r = amdgpu_jpeg_sw_init(adev);
131 	if (r)
132 		return r;
133 
134 	r = amdgpu_jpeg_resume(adev);
135 	if (r)
136 		return r;
137 
138 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
139 		if (adev->jpeg.harvest_config & (1 << i))
140 			continue;
141 
142 		ring = adev->jpeg.inst[i].ring_dec;
143 		ring->use_doorbell = true;
144 		ring->vm_hub = AMDGPU_MMHUB0(0);
145 		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
146 		sprintf(ring->name, "jpeg_dec_%d", i);
147 		r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
148 				     0, AMDGPU_RING_PRIO_DEFAULT, NULL);
149 		if (r)
150 			return r;
151 
152 		adev->jpeg.internal.jpeg_pitch[0] = regUVD_JPEG_PITCH_INTERNAL_OFFSET;
153 		adev->jpeg.inst[i].external.jpeg_pitch[0] = SOC15_REG_OFFSET(JPEG, i, regUVD_JPEG_PITCH);
154 	}
155 
156 	return 0;
157 }
158 
159 /**
160  * jpeg_v4_0_5_sw_fini - sw fini for JPEG block
161  *
162  * @handle: amdgpu_device pointer
163  *
164  * JPEG suspend and free up sw allocation
165  */
166 static int jpeg_v4_0_5_sw_fini(void *handle)
167 {
168 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
169 	int r;
170 
171 	r = amdgpu_jpeg_suspend(adev);
172 	if (r)
173 		return r;
174 
175 	r = amdgpu_jpeg_sw_fini(adev);
176 
177 	return r;
178 }
179 
180 /**
181  * jpeg_v4_0_5_hw_init - start and test JPEG block
182  *
183  * @handle: amdgpu_device pointer
184  *
185  */
186 static int jpeg_v4_0_5_hw_init(void *handle)
187 {
188 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
189 	struct amdgpu_ring *ring;
190 	int r, i;
191 
192 	// TODO: Enable ring test with DPG support
193 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
194 		DRM_DEV_INFO(adev->dev, "JPEG decode initialized successfully under DPG Mode");
195 		return 0;
196 	}
197 
198 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
199 		if (adev->jpeg.harvest_config & (1 << i))
200 			continue;
201 
202 		ring = adev->jpeg.inst[i].ring_dec;
203 		r = amdgpu_ring_test_helper(ring);
204 		if (r)
205 			return r;
206 	}
207 
208 	if (!r)
209 		DRM_INFO("JPEG decode initialized successfully under SPG Mode\n");
210 
211 	return 0;
212 }
213 
214 /**
215  * jpeg_v4_0_5_hw_fini - stop the hardware block
216  *
217  * @handle: amdgpu_device pointer
218  *
219  * Stop the JPEG block, mark ring as not ready any more
220  */
221 static int jpeg_v4_0_5_hw_fini(void *handle)
222 {
223 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
224 	int i;
225 
226 	cancel_delayed_work_sync(&adev->vcn.idle_work);
227 
228 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
229 		if (adev->jpeg.harvest_config & (1 << i))
230 			continue;
231 
232 		if (!amdgpu_sriov_vf(adev)) {
233 			if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
234 			    RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS))
235 				jpeg_v4_0_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
236 		}
237 	}
238 	return 0;
239 }
240 
241 /**
242  * jpeg_v4_0_5_suspend - suspend JPEG block
243  *
244  * @handle: amdgpu_device pointer
245  *
246  * HW fini and suspend JPEG block
247  */
248 static int jpeg_v4_0_5_suspend(void *handle)
249 {
250 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
251 	int r;
252 
253 	r = jpeg_v4_0_5_hw_fini(adev);
254 	if (r)
255 		return r;
256 
257 	r = amdgpu_jpeg_suspend(adev);
258 
259 	return r;
260 }
261 
262 /**
263  * jpeg_v4_0_5_resume - resume JPEG block
264  *
265  * @handle: amdgpu_device pointer
266  *
267  * Resume firmware and hw init JPEG block
268  */
269 static int jpeg_v4_0_5_resume(void *handle)
270 {
271 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
272 	int r;
273 
274 	r = amdgpu_jpeg_resume(adev);
275 	if (r)
276 		return r;
277 
278 	r = jpeg_v4_0_5_hw_init(adev);
279 
280 	return r;
281 }
282 
283 static void jpeg_v4_0_5_disable_clock_gating(struct amdgpu_device *adev, int inst)
284 {
285 	uint32_t data = 0;
286 
287 	data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL);
288 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
289 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
290 		data &= (~JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK);
291 	} else {
292 		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
293 	}
294 
295 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
296 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
297 	WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data);
298 
299 	data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE);
300 	data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
301 		| JPEG_CGC_GATE__JPEG2_DEC_MASK
302 		| JPEG_CGC_GATE__JMCIF_MASK
303 		| JPEG_CGC_GATE__JRBBM_MASK);
304 	WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data);
305 }
306 
307 static void jpeg_v4_0_5_enable_clock_gating(struct amdgpu_device *adev, int inst)
308 {
309 	uint32_t data = 0;
310 
311 	data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL);
312 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG) {
313 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
314 		data |= JPEG_CGC_CTRL__JPEG_DEC_MODE_MASK;
315 	} else {
316 		data &= ~JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
317 	}
318 
319 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
320 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
321 	WREG32_SOC15(JPEG, inst, regJPEG_CGC_CTRL, data);
322 
323 	data = RREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE);
324 	data |= (JPEG_CGC_GATE__JPEG_DEC_MASK
325 		|JPEG_CGC_GATE__JPEG2_DEC_MASK
326 		|JPEG_CGC_GATE__JMCIF_MASK
327 		|JPEG_CGC_GATE__JRBBM_MASK);
328 	WREG32_SOC15(JPEG, inst, regJPEG_CGC_GATE, data);
329 }
330 
331 static void jpeg_engine_4_0_5_dpg_clock_gating_mode(struct amdgpu_device *adev,
332 			int inst_idx, uint8_t indirect)
333 {
334 	uint32_t data = 0;
335 
336 	if (adev->cg_flags & AMD_CG_SUPPORT_JPEG_MGCG)
337 		data |= 1 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
338 	else
339 		data |= 0 << JPEG_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
340 
341 	data |= 1 << JPEG_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
342 	data |= 4 << JPEG_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
343 	WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_CTRL_INTERNAL_OFFSET, data, indirect);
344 
345 	data = 0;
346 	WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_CGC_GATE_INTERNAL_OFFSET,
347 				data, indirect);
348 }
349 
350 static int jpeg_v4_0_5_disable_static_power_gating(struct amdgpu_device *adev, int inst)
351 {
352 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
353 		WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG),
354 			1 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
355 		SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS,
356 			0, UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
357 	}
358 
359 	/* disable anti hang mechanism */
360 	WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0,
361 		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
362 
363 	/* keep the JPEG in static PG mode */
364 	WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS), 0,
365 		~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK);
366 
367 	return 0;
368 }
369 
370 static int jpeg_v4_0_5_enable_static_power_gating(struct amdgpu_device *adev, int inst)
371 {
372 	/* enable anti hang mechanism */
373 	WREG32_P(SOC15_REG_OFFSET(JPEG, inst, regUVD_JPEG_POWER_STATUS),
374 		UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK,
375 		~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK);
376 
377 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
378 		WREG32(SOC15_REG_OFFSET(JPEG, inst, regUVD_IPX_DLDO_CONFIG),
379 			2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
380 		SOC15_WAIT_ON_RREG(JPEG, inst, regUVD_IPX_DLDO_STATUS,
381 			1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
382 			UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
383 	}
384 
385 	return 0;
386 }
387 
388 /**
389  * jpeg_v4_0_5_start_dpg_mode - Jpeg start with dpg mode
390  *
391  * @adev: amdgpu_device pointer
392  * @inst_idx: instance number index
393  * @indirect: indirectly write sram
394  *
395  * Start JPEG block with dpg mode
396  */
397 static void jpeg_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
398 {
399 	struct amdgpu_ring *ring = adev->jpeg.inst[inst_idx].ring_dec;
400 	uint32_t reg_data = 0;
401 
402 	/* enable anti hang mechanism */
403 	reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
404 	reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_POWER_STATUS_MASK;
405 	reg_data |=  0x1;
406 	WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
407 
408 	if (adev->pg_flags & AMD_PG_SUPPORT_JPEG) {
409 		WREG32(SOC15_REG_OFFSET(JPEG, inst_idx, regUVD_IPX_DLDO_CONFIG),
410 			2 << UVD_IPX_DLDO_CONFIG__ONO1_PWR_CONFIG__SHIFT);
411 		SOC15_WAIT_ON_RREG(JPEG, inst_idx, regUVD_IPX_DLDO_STATUS,
412 			1 << UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS__SHIFT,
413 			UVD_IPX_DLDO_STATUS__ONO1_PWR_STATUS_MASK);
414 	}
415 
416 	reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
417 	reg_data |= UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
418 	WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
419 
420 	if (indirect)
421 		adev->jpeg.inst[inst_idx].dpg_sram_curr_addr =
422 					(uint32_t *)adev->jpeg.inst[inst_idx].dpg_sram_cpu_addr;
423 
424 	jpeg_engine_4_0_5_dpg_clock_gating_mode(adev, inst_idx, indirect);
425 
426 	/* MJPEG global tiling registers */
427 	WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_DEC_GFX10_ADDR_CONFIG_INTERNAL_OFFSET,
428 	adev->gfx.config.gb_addr_config, indirect);
429 	/* enable System Interrupt for JRBC */
430 	WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regJPEG_SYS_INT_EN_INTERNAL_OFFSET,
431 	JPEG_SYS_INT_EN__DJRBC_MASK, indirect);
432 
433 	/* add nop to workaround PSP size check */
434 	WREG32_SOC15_JPEG_DPG_MODE(inst_idx, regUVD_NO_OP_INTERNAL_OFFSET, 0, indirect);
435 
436 	if (indirect)
437 		amdgpu_jpeg_psp_update_sram(adev, inst_idx, 0);
438 
439 	WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_VMID, 0);
440 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
441 	WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
442 		lower_32_bits(ring->gpu_addr));
443 	WREG32_SOC15(JPEG, inst_idx, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
444 		upper_32_bits(ring->gpu_addr));
445 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_RPTR, 0);
446 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR, 0);
447 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_CNTL, 0x00000002L);
448 	WREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
449 	ring->wptr = RREG32_SOC15(JPEG, inst_idx, regUVD_JRBC_RB_WPTR);
450 }
451 
452 /**
453  * jpeg_v4_0_5_stop_dpg_mode - Jpeg stop with dpg mode
454  *
455  * @adev: amdgpu_device pointer
456  * @inst_idx: instance number index
457  *
458  * Stop JPEG block with dpg mode
459  */
460 static void jpeg_v4_0_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
461 {
462 	uint32_t reg_data = 0;
463 
464 	reg_data = RREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS);
465 	reg_data &= ~UVD_JPEG_POWER_STATUS__JPEG_PG_MODE_MASK;
466 	WREG32_SOC15(JPEG, inst_idx, regUVD_JPEG_POWER_STATUS, reg_data);
467 
468 }
469 
470 /**
471  * jpeg_v4_0_5_start - start JPEG block
472  *
473  * @adev: amdgpu_device pointer
474  *
475  * Setup and start the JPEG block
476  */
477 static int jpeg_v4_0_5_start(struct amdgpu_device *adev)
478 {
479 	struct amdgpu_ring *ring;
480 	int r, i;
481 
482 	if (adev->pm.dpm_enabled)
483 		amdgpu_dpm_enable_jpeg(adev, true);
484 
485 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
486 		if (adev->jpeg.harvest_config & (1 << i))
487 			continue;
488 
489 		ring = adev->jpeg.inst[i].ring_dec;
490 		/* doorbell programming is done for every playback */
491 		adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
492 				(adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8 * i, i);
493 
494 		WREG32_SOC15(VCN, i, regVCN_JPEG_DB_CTRL,
495 			ring->doorbell_index << VCN_JPEG_DB_CTRL__OFFSET__SHIFT |
496 			VCN_JPEG_DB_CTRL__EN_MASK);
497 
498 		if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
499 			jpeg_v4_0_5_start_dpg_mode(adev, i, adev->jpeg.indirect_sram);
500 			continue;
501 		}
502 
503 		/* disable power gating */
504 		r = jpeg_v4_0_5_disable_static_power_gating(adev, i);
505 		if (r)
506 			return r;
507 
508 		/* JPEG disable CGC */
509 		jpeg_v4_0_5_disable_clock_gating(adev, i);
510 
511 		/* MJPEG global tiling registers */
512 		WREG32_SOC15(JPEG, i, regJPEG_DEC_GFX10_ADDR_CONFIG,
513 			adev->gfx.config.gb_addr_config);
514 
515 		/* enable JMI channel */
516 		WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL), 0,
517 			~UVD_JMI_CNTL__SOFT_RESET_MASK);
518 
519 		/* enable System Interrupt for JRBC */
520 		WREG32_P(SOC15_REG_OFFSET(JPEG, i, regJPEG_SYS_INT_EN),
521 			JPEG_SYS_INT_EN__DJRBC_MASK,
522 			~JPEG_SYS_INT_EN__DJRBC_MASK);
523 
524 		WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_VMID, 0);
525 		WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, (0x00000001L | 0x00000002L));
526 		WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_LOW,
527 			lower_32_bits(ring->gpu_addr));
528 		WREG32_SOC15(JPEG, i, regUVD_LMI_JRBC_RB_64BIT_BAR_HIGH,
529 			upper_32_bits(ring->gpu_addr));
530 		WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_RPTR, 0);
531 		WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR, 0);
532 		WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_CNTL, 0x00000002L);
533 		WREG32_SOC15(JPEG, i, regUVD_JRBC_RB_SIZE, ring->ring_size / 4);
534 		ring->wptr = RREG32_SOC15(JPEG, i, regUVD_JRBC_RB_WPTR);
535 	}
536 
537 	return 0;
538 }
539 
540 /**
541  * jpeg_v4_0_5_stop - stop JPEG block
542  *
543  * @adev: amdgpu_device pointer
544  *
545  * stop the JPEG block
546  */
547 static int jpeg_v4_0_5_stop(struct amdgpu_device *adev)
548 {
549 	int r, i;
550 
551 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
552 		if (adev->jpeg.harvest_config & (1 << i))
553 			continue;
554 
555 		if (adev->pg_flags & AMD_PG_SUPPORT_JPEG_DPG) {
556 			jpeg_v4_0_5_stop_dpg_mode(adev, i);
557 			continue;
558 		}
559 
560 		/* reset JMI */
561 		WREG32_P(SOC15_REG_OFFSET(JPEG, i, regUVD_JMI_CNTL),
562 			UVD_JMI_CNTL__SOFT_RESET_MASK,
563 			~UVD_JMI_CNTL__SOFT_RESET_MASK);
564 
565 		jpeg_v4_0_5_enable_clock_gating(adev, i);
566 
567 		/* enable power gating */
568 		r = jpeg_v4_0_5_enable_static_power_gating(adev, i);
569 		if (r)
570 			return r;
571 	}
572 	if (adev->pm.dpm_enabled)
573 		amdgpu_dpm_enable_jpeg(adev, false);
574 
575 	return 0;
576 }
577 
578 /**
579  * jpeg_v4_0_5_dec_ring_get_rptr - get read pointer
580  *
581  * @ring: amdgpu_ring pointer
582  *
583  * Returns the current hardware read pointer
584  */
585 static uint64_t jpeg_v4_0_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
586 {
587 	struct amdgpu_device *adev = ring->adev;
588 
589 	return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_RPTR);
590 }
591 
592 /**
593  * jpeg_v4_0_5_dec_ring_get_wptr - get write pointer
594  *
595  * @ring: amdgpu_ring pointer
596  *
597  * Returns the current hardware write pointer
598  */
599 static uint64_t jpeg_v4_0_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
600 {
601 	struct amdgpu_device *adev = ring->adev;
602 
603 	if (ring->use_doorbell)
604 		return *ring->wptr_cpu_addr;
605 	else
606 		return RREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_WPTR);
607 }
608 
609 /**
610  * jpeg_v4_0_5_dec_ring_set_wptr - set write pointer
611  *
612  * @ring: amdgpu_ring pointer
613  *
614  * Commits the write pointer to the hardware
615  */
616 static void jpeg_v4_0_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
617 {
618 	struct amdgpu_device *adev = ring->adev;
619 
620 	if (ring->use_doorbell) {
621 		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
622 		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
623 	} else {
624 		WREG32_SOC15(JPEG, ring->me, regUVD_JRBC_RB_WPTR, lower_32_bits(ring->wptr));
625 	}
626 }
627 
628 static bool jpeg_v4_0_5_is_idle(void *handle)
629 {
630 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
631 	int i, ret = 1;
632 
633 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
634 		if (adev->jpeg.harvest_config & (1 << i))
635 			continue;
636 
637 		ret &= (((RREG32_SOC15(JPEG, i, regUVD_JRBC_STATUS) &
638 			UVD_JRBC_STATUS__RB_JOB_DONE_MASK) ==
639 			UVD_JRBC_STATUS__RB_JOB_DONE_MASK));
640 	}
641 	return ret;
642 }
643 
644 static int jpeg_v4_0_5_wait_for_idle(void *handle)
645 {
646 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
647 	int i;
648 
649 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
650 		if (adev->jpeg.harvest_config & (1 << i))
651 			continue;
652 
653 		return SOC15_WAIT_ON_RREG(JPEG, i, regUVD_JRBC_STATUS,
654 			UVD_JRBC_STATUS__RB_JOB_DONE_MASK,
655 			UVD_JRBC_STATUS__RB_JOB_DONE_MASK);
656 	}
657 
658 	return 0;
659 }
660 
661 static int jpeg_v4_0_5_set_clockgating_state(void *handle,
662 					  enum amd_clockgating_state state)
663 {
664 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
665 	bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
666 	int i;
667 
668 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
669 		if (adev->jpeg.harvest_config & (1 << i))
670 			continue;
671 
672 		if (enable) {
673 			if (!jpeg_v4_0_5_is_idle(handle))
674 				return -EBUSY;
675 
676 			jpeg_v4_0_5_enable_clock_gating(adev, i);
677 		} else {
678 			jpeg_v4_0_5_disable_clock_gating(adev, i);
679 		}
680 	}
681 
682 	return 0;
683 }
684 
685 static int jpeg_v4_0_5_set_powergating_state(void *handle,
686 					  enum amd_powergating_state state)
687 {
688 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
689 	int ret;
690 
691 	if (amdgpu_sriov_vf(adev)) {
692 		adev->jpeg.cur_state = AMD_PG_STATE_UNGATE;
693 		return 0;
694 	}
695 
696 	if (state == adev->jpeg.cur_state)
697 		return 0;
698 
699 	if (state == AMD_PG_STATE_GATE)
700 		ret = jpeg_v4_0_5_stop(adev);
701 	else
702 		ret = jpeg_v4_0_5_start(adev);
703 
704 	if (!ret)
705 		adev->jpeg.cur_state = state;
706 
707 	return ret;
708 }
709 
710 static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev,
711 				      struct amdgpu_irq_src *source,
712 				      struct amdgpu_iv_entry *entry)
713 {
714 	uint32_t ip_instance;
715 
716 	DRM_DEBUG("IH: JPEG TRAP\n");
717 
718 	switch (entry->client_id) {
719 	case SOC15_IH_CLIENTID_VCN:
720 		ip_instance = 0;
721 		break;
722 	case SOC15_IH_CLIENTID_VCN1:
723 		ip_instance = 1;
724 		break;
725 	default:
726 		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
727 		return 0;
728 	}
729 
730 	switch (entry->src_id) {
731 	case VCN_4_0__SRCID__JPEG_DECODE:
732 		amdgpu_fence_process(adev->jpeg.inst[ip_instance].ring_dec);
733 		break;
734 	case VCN_4_0__SRCID_DJPEG0_POISON:
735 	case VCN_4_0__SRCID_EJPEG0_POISON:
736 		amdgpu_jpeg_process_poison_irq(adev, source, entry);
737 		break;
738 	default:
739 		DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
740 			  entry->src_id, entry->src_data[0]);
741 		break;
742 	}
743 
744 	return 0;
745 }
746 
747 static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = {
748 	.name = "jpeg_v4_0_5",
749 	.early_init = jpeg_v4_0_5_early_init,
750 	.late_init = NULL,
751 	.sw_init = jpeg_v4_0_5_sw_init,
752 	.sw_fini = jpeg_v4_0_5_sw_fini,
753 	.hw_init = jpeg_v4_0_5_hw_init,
754 	.hw_fini = jpeg_v4_0_5_hw_fini,
755 	.suspend = jpeg_v4_0_5_suspend,
756 	.resume = jpeg_v4_0_5_resume,
757 	.is_idle = jpeg_v4_0_5_is_idle,
758 	.wait_for_idle = jpeg_v4_0_5_wait_for_idle,
759 	.check_soft_reset = NULL,
760 	.pre_soft_reset = NULL,
761 	.soft_reset = NULL,
762 	.post_soft_reset = NULL,
763 	.set_clockgating_state = jpeg_v4_0_5_set_clockgating_state,
764 	.set_powergating_state = jpeg_v4_0_5_set_powergating_state,
765 	.dump_ip_state = NULL,
766 	.print_ip_state = NULL,
767 };
768 
769 static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = {
770 	.type = AMDGPU_RING_TYPE_VCN_JPEG,
771 	.align_mask = 0xf,
772 	.get_rptr = jpeg_v4_0_5_dec_ring_get_rptr,
773 	.get_wptr = jpeg_v4_0_5_dec_ring_get_wptr,
774 	.set_wptr = jpeg_v4_0_5_dec_ring_set_wptr,
775 	.emit_frame_size =
776 		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
777 		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
778 		8 + /* jpeg_v4_0_5_dec_ring_emit_vm_flush */
779 		18 + 18 + /* jpeg_v4_0_5_dec_ring_emit_fence x2 vm fence */
780 		8 + 16,
781 	.emit_ib_size = 22, /* jpeg_v4_0_5_dec_ring_emit_ib */
782 	.emit_ib = jpeg_v2_0_dec_ring_emit_ib,
783 	.emit_fence = jpeg_v2_0_dec_ring_emit_fence,
784 	.emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
785 	.test_ring = amdgpu_jpeg_dec_ring_test_ring,
786 	.test_ib = amdgpu_jpeg_dec_ring_test_ib,
787 	.insert_nop = jpeg_v2_0_dec_ring_nop,
788 	.insert_start = jpeg_v2_0_dec_ring_insert_start,
789 	.insert_end = jpeg_v2_0_dec_ring_insert_end,
790 	.pad_ib = amdgpu_ring_generic_pad_ib,
791 	.begin_use = amdgpu_jpeg_ring_begin_use,
792 	.end_use = amdgpu_jpeg_ring_end_use,
793 	.emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
794 	.emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
795 	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
796 };
797 
798 static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev)
799 {
800 	int i;
801 
802 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
803 		if (adev->jpeg.harvest_config & (1 << i))
804 			continue;
805 
806 		adev->jpeg.inst[i].ring_dec->funcs = &jpeg_v4_0_5_dec_ring_vm_funcs;
807 		adev->jpeg.inst[i].ring_dec->me = i;
808 		DRM_DEV_INFO(adev->dev, "JPEG%d decode is enabled in VM mode\n", i);
809 	}
810 }
811 
812 static const struct amdgpu_irq_src_funcs jpeg_v4_0_5_irq_funcs = {
813 	.process = jpeg_v4_0_5_process_interrupt,
814 };
815 
816 static void jpeg_v4_0_5_set_irq_funcs(struct amdgpu_device *adev)
817 {
818 	int i;
819 
820 	for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
821 		if (adev->jpeg.harvest_config & (1 << i))
822 			continue;
823 
824 		adev->jpeg.inst[i].irq.num_types = 1;
825 		adev->jpeg.inst[i].irq.funcs = &jpeg_v4_0_5_irq_funcs;
826 	}
827 }
828 
829 const struct amdgpu_ip_block_version jpeg_v4_0_5_ip_block = {
830 	.type = AMD_IP_BLOCK_TYPE_JPEG,
831 	.major = 4,
832 	.minor = 0,
833 	.rev = 5,
834 	.funcs = &jpeg_v4_0_5_ip_funcs,
835 };
836 
837