xref: /linux/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /*
2  * Copyright 2016-2024 Advanced Micro Devices, Inc. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 
24 #ifndef __AMDGPU_VCN_H__
25 #define __AMDGPU_VCN_H__
26 
27 #include "amdgpu_ras.h"
28 
29 #define AMDGPU_VCN_STACK_SIZE		(128*1024)
30 #define AMDGPU_VCN_CONTEXT_SIZE 	(512*1024)
31 
32 #define AMDGPU_VCN_FIRMWARE_OFFSET	256
33 #define AMDGPU_VCN_MAX_ENC_RINGS	3
34 
35 #define AMDGPU_MAX_VCN_INSTANCES	4
36 #define AMDGPU_MAX_VCN_ENC_RINGS  (AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES)
37 
38 #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0)
39 #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1)
40 
41 #define VCN_DEC_KMD_CMD 		0x80000000
42 #define VCN_DEC_CMD_FENCE		0x00000000
43 #define VCN_DEC_CMD_TRAP		0x00000001
44 #define VCN_DEC_CMD_WRITE_REG		0x00000004
45 #define VCN_DEC_CMD_REG_READ_COND_WAIT	0x00000006
46 #define VCN_DEC_CMD_PACKET_START	0x0000000a
47 #define VCN_DEC_CMD_PACKET_END		0x0000000b
48 
49 #define VCN_DEC_SW_CMD_NO_OP		0x00000000
50 #define VCN_DEC_SW_CMD_END		0x00000001
51 #define VCN_DEC_SW_CMD_IB		0x00000002
52 #define VCN_DEC_SW_CMD_FENCE		0x00000003
53 #define VCN_DEC_SW_CMD_TRAP		0x00000004
54 #define VCN_DEC_SW_CMD_IB_AUTO		0x00000005
55 #define VCN_DEC_SW_CMD_SEMAPHORE	0x00000006
56 #define VCN_DEC_SW_CMD_PREEMPT_FENCE	0x00000009
57 #define VCN_DEC_SW_CMD_REG_WRITE	0x0000000b
58 #define VCN_DEC_SW_CMD_REG_WAIT		0x0000000c
59 
60 #define VCN_ENC_CMD_NO_OP		0x00000000
61 #define VCN_ENC_CMD_END 		0x00000001
62 #define VCN_ENC_CMD_IB			0x00000002
63 #define VCN_ENC_CMD_FENCE		0x00000003
64 #define VCN_ENC_CMD_TRAP		0x00000004
65 #define VCN_ENC_CMD_REG_WRITE		0x0000000b
66 #define VCN_ENC_CMD_REG_WAIT		0x0000000c
67 
68 #define VCN_AON_SOC_ADDRESS_2_0 	0x1f800
69 #define VCN_VID_IP_ADDRESS_2_0		0x0
70 #define VCN_AON_IP_ADDRESS_2_0		0x30000
71 
72 #define mmUVD_RBC_XX_IB_REG_CHECK 					0x026b
73 #define mmUVD_RBC_XX_IB_REG_CHECK_BASE_IDX 				1
74 #define mmUVD_REG_XX_MASK 						0x026c
75 #define mmUVD_REG_XX_MASK_BASE_IDX 					1
76 
77 /* 1 second timeout */
78 #define VCN_IDLE_TIMEOUT	msecs_to_jiffies(1000)
79 
80 #define RREG32_SOC15_DPG_MODE_1_0(ip, inst_idx, reg, mask, sram_sel) 			\
81 	({	WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); 			\
82 		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, 				\
83 			UVD_DPG_LMA_CTL__MASK_EN_MASK | 				\
84 			((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) 	\
85 			<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | 			\
86 			(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); 		\
87 		RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); 			\
88 	})
89 
90 #define WREG32_SOC15_DPG_MODE_1_0(ip, inst_idx, reg, value, mask, sram_sel) 		\
91 	do { 										\
92 		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); 			\
93 		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); 			\
94 		WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, 				\
95 			UVD_DPG_LMA_CTL__READ_WRITE_MASK | 				\
96 			((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) 	\
97 			<< UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | 			\
98 			(sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); 		\
99 	} while (0)
100 
101 #define SOC15_DPG_MODE_OFFSET(ip, inst_idx, reg) 						\
102 	({											\
103 		uint32_t internal_reg_offset, addr;						\
104 		bool video_range, video1_range, aon_range, aon1_range;				\
105 												\
106 		addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg);		\
107 		addr <<= 2; 									\
108 		video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && 		\
109 				((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600)))));	\
110 		video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS_3_0)) && 		\
111 				((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS_3_0 + 0x2600)))));	\
112 		aon_range   = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS_2_0)) && 		\
113 				((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS_2_0 + 0x600)))));	\
114 		aon1_range   = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS_3_0)) && 		\
115 				((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS_3_0 + 0x600)))));	\
116 		if (video_range) 								\
117 			internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS_2_0) + 	\
118 				(VCN_VID_IP_ADDRESS_2_0));					\
119 		else if (aon_range)								\
120 			internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS_2_0) + 	\
121 				(VCN_AON_IP_ADDRESS_2_0));					\
122 		else if (video1_range) 								\
123 			internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS_3_0) + 	\
124 				(VCN_VID_IP_ADDRESS_2_0));					\
125 		else if (aon1_range)								\
126 			internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS_3_0) + 	\
127 				(VCN_AON_IP_ADDRESS_2_0));					\
128 		else										\
129 			internal_reg_offset = (0xFFFFF & addr);					\
130 												\
131 		internal_reg_offset >>= 2;							\
132 	})
133 
134 #define RREG32_SOC15_DPG_MODE(inst_idx, offset, mask_en) 					\
135 	({											\
136 		WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, 					\
137 			(0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |				\
138 			mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |				\
139 			offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));			\
140 		RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_DATA);				\
141 	})
142 
143 #define WREG32_SOC15_DPG_MODE(inst_idx, offset, value, mask_en, indirect)             \
144 	do {                                                                          \
145 		if (!indirect) {                                                      \
146 			WREG32_SOC15(VCN, GET_INST(VCN, inst_idx),                    \
147 				     mmUVD_DPG_LMA_DATA, value);                      \
148 			WREG32_SOC15(                                                 \
149 				VCN, GET_INST(VCN, inst_idx),                         \
150 				mmUVD_DPG_LMA_CTL,                                    \
151 				(0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |          \
152 				 mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |         \
153 				 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
154 		} else {                                                              \
155 			*adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ =              \
156 				offset;                                               \
157 			*adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ =              \
158 				value;                                                \
159 		}                                                                     \
160 	} while (0)
161 
162 #define SOC24_DPG_MODE_OFFSET(ip, inst_idx, reg)						\
163 	({											\
164 		uint32_t internal_reg_offset, addr;						\
165 		bool video_range, video1_range, aon_range, aon1_range;				\
166 												\
167 		addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg);		\
168 		addr <<= 2;									\
169 		video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS)) &&			\
170 				((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS + 0x2600)))));	\
171 		video1_range = ((((0xFFFFF & addr) >= (VCN1_VID_SOC_ADDRESS)) &&		\
172 				((0xFFFFF & addr) < ((VCN1_VID_SOC_ADDRESS + 0x2600)))));	\
173 		aon_range   = ((((0xFFFFF & addr) >= (VCN_AON_SOC_ADDRESS)) &&			\
174 				((0xFFFFF & addr) < ((VCN_AON_SOC_ADDRESS + 0x600)))));		\
175 		aon1_range   = ((((0xFFFFF & addr) >= (VCN1_AON_SOC_ADDRESS)) &&		\
176 				((0xFFFFF & addr) < ((VCN1_AON_SOC_ADDRESS + 0x600)))));	\
177 		if (video_range)								\
178 			internal_reg_offset = ((0xFFFFF & addr) - (VCN_VID_SOC_ADDRESS) +	\
179 				(VCN_VID_IP_ADDRESS));						\
180 		else if (aon_range)								\
181 			internal_reg_offset = ((0xFFFFF & addr) - (VCN_AON_SOC_ADDRESS) +	\
182 				(VCN_AON_IP_ADDRESS));						\
183 		else if (video1_range)								\
184 			internal_reg_offset = ((0xFFFFF & addr) - (VCN1_VID_SOC_ADDRESS) +	\
185 				(VCN_VID_IP_ADDRESS));						\
186 		else if (aon1_range)								\
187 			internal_reg_offset = ((0xFFFFF & addr) - (VCN1_AON_SOC_ADDRESS) +	\
188 				(VCN_AON_IP_ADDRESS));						\
189 		else										\
190 			internal_reg_offset = (0xFFFFF & addr);					\
191 												\
192 		internal_reg_offset >>= 2;							\
193 	})
194 
195 #define WREG32_SOC24_DPG_MODE(inst_idx, offset, value, mask_en, indirect)		\
196 	do {										\
197 		if (!indirect) {							\
198 			WREG32_SOC15(VCN, GET_INST(VCN, inst_idx),			\
199 				     regUVD_DPG_LMA_DATA, value);			\
200 			WREG32_SOC15(							\
201 				VCN, GET_INST(VCN, inst_idx),				\
202 				regUVD_DPG_LMA_CTL,					\
203 				(0x1 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT |		\
204 				 mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT |		\
205 				 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT));	\
206 		} else {								\
207 			*adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ =		\
208 				offset;							\
209 			*adev->vcn.inst[inst_idx].dpg_sram_curr_addr++ =		\
210 				value;							\
211 		}									\
212 	} while (0)
213 
214 #define AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE (1 << 2)
215 #define AMDGPU_FW_SHARED_FLAG_0_DRM_KEY_INJECT (1 << 4)
216 #define AMDGPU_VCN_FW_SHARED_FLAG_0_RB	(1 << 6)
217 #define AMDGPU_VCN_MULTI_QUEUE_FLAG	(1 << 8)
218 #define AMDGPU_VCN_SW_RING_FLAG		(1 << 9)
219 #define AMDGPU_VCN_FW_LOGGING_FLAG	(1 << 10)
220 #define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
221 #define AMDGPU_VCN_SMU_DPM_INTERFACE_FLAG (1 << 11)
222 #define AMDGPU_VCN_VF_RB_SETUP_FLAG (1 << 14)
223 #define AMDGPU_VCN_VF_RB_DECOUPLE_FLAG (1 << 15)
224 
225 #define MAX_NUM_VCN_RB_SETUP 4
226 
227 #define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER	0x00000001
228 #define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER		0x00000001
229 
230 #define VCN_CODEC_DISABLE_MASK_AV1  (1 << 0)
231 #define VCN_CODEC_DISABLE_MASK_VP9  (1 << 1)
232 #define VCN_CODEC_DISABLE_MASK_HEVC (1 << 2)
233 #define VCN_CODEC_DISABLE_MASK_H264 (1 << 3)
234 
235 #define AMDGPU_VCN_SMU_DPM_INTERFACE_DGPU (0)
236 #define AMDGPU_VCN_SMU_DPM_INTERFACE_APU (1)
237 
238 #define AMDGPU_DRM_KEY_INJECT_WORKAROUND_VCNFW_ASD_HANDSHAKING 2
239 
240 struct amdgpu_hwip_reg_entry;
241 
242 enum amdgpu_vcn_caps {
243 	AMDGPU_VCN_RRMT_ENABLED,
244 };
245 
246 #define AMDGPU_VCN_CAPS(caps) BIT(AMDGPU_VCN_##caps)
247 
248 enum fw_queue_mode {
249 	FW_QUEUE_RING_RESET = 1,
250 	FW_QUEUE_DPG_HOLD_OFF = 2,
251 };
252 
253 enum engine_status_constants {
254 	UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
255 	UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0,
256 	UVD_PGFSM_STATUS__UVDM_UVDU_UVDLM_PWR_ON_3_0 = 0x2A2A8AA0,
257 	UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON = 0x00000002,
258 	UVD_STATUS__UVD_BUSY = 0x00000004,
259 	GB_ADDR_CONFIG_DEFAULT = 0x26010011,
260 	UVD_STATUS__IDLE = 0x2,
261 	UVD_STATUS__BUSY = 0x5,
262 	UVD_POWER_STATUS__UVD_POWER_STATUS_TILES_OFF = 0x1,
263 	UVD_STATUS__RBC_BUSY = 0x1,
264 	UVD_PGFSM_STATUS_UVDJ_PWR_ON = 0,
265 };
266 
267 enum internal_dpg_state {
268 	VCN_DPG_STATE__UNPAUSE = 0,
269 	VCN_DPG_STATE__PAUSE,
270 };
271 
272 struct dpg_pause_state {
273 	enum internal_dpg_state fw_based;
274 	enum internal_dpg_state jpeg;
275 };
276 
277 struct amdgpu_vcn_reg{
278 	unsigned	data0;
279 	unsigned	data1;
280 	unsigned	cmd;
281 	unsigned	nop;
282 	unsigned	context_id;
283 	unsigned	ib_vmid;
284 	unsigned	ib_bar_low;
285 	unsigned	ib_bar_high;
286 	unsigned	ib_size;
287 	unsigned	gp_scratch8;
288 	unsigned	scratch9;
289 };
290 
291 struct amdgpu_vcn_fw_shared {
292 	void        *cpu_addr;
293 	uint64_t    gpu_addr;
294 	uint32_t    mem_size;
295 	uint32_t    log_offset;
296 };
297 
298 struct amdgpu_vcn_inst {
299 	struct amdgpu_device	*adev;
300 	int			inst;
301 	struct amdgpu_bo	*vcpu_bo;
302 	void			*cpu_addr;
303 	uint64_t		gpu_addr;
304 	void			*saved_bo;
305 	struct amdgpu_ring	ring_dec;
306 	struct amdgpu_ring	ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
307 	atomic_t		sched_score;
308 	struct amdgpu_irq_src	irq;
309 	struct amdgpu_irq_src	ras_poison_irq;
310 	struct amdgpu_vcn_reg	external;
311 	struct amdgpu_bo	*dpg_sram_bo;
312 	struct dpg_pause_state	pause_state;
313 	void			*dpg_sram_cpu_addr;
314 	uint64_t		dpg_sram_gpu_addr;
315 	uint32_t		*dpg_sram_curr_addr;
316 	atomic_t		dpg_enc_submission_cnt;
317 	struct amdgpu_vcn_fw_shared fw_shared;
318 	uint8_t			aid_id;
319 	const struct firmware	*fw; /* VCN firmware */
320 	uint8_t			vcn_config;
321 	uint32_t		vcn_codec_disable_mask;
322 	atomic_t		total_submission_cnt;
323 	struct mutex		vcn_pg_lock;
324 	enum amd_powergating_state cur_state;
325 	struct delayed_work	idle_work;
326 	unsigned		fw_version;
327 	unsigned		num_enc_rings;
328 	bool			indirect_sram;
329 	struct amdgpu_vcn_reg	 internal;
330 	struct mutex		vcn1_jpeg1_workaround;
331 	int (*pause_dpg_mode)(struct amdgpu_vcn_inst *vinst,
332 			      struct dpg_pause_state *new_state);
333 	int (*set_pg_state)(struct amdgpu_vcn_inst *vinst,
334 			    enum amd_powergating_state state);
335 	int (*reset)(struct amdgpu_vcn_inst *vinst);
336 	bool using_unified_queue;
337 	struct mutex		engine_reset_mutex;
338 };
339 
340 struct amdgpu_vcn_ras {
341 	struct amdgpu_ras_block_object ras_block;
342 };
343 
344 struct amdgpu_vcn {
345 	uint8_t	num_vcn_inst;
346 	struct amdgpu_vcn_inst	 inst[AMDGPU_MAX_VCN_INSTANCES];
347 
348 	unsigned	harvest_config;
349 
350 	struct ras_common_if    *ras_if;
351 	struct amdgpu_vcn_ras   *ras;
352 
353 	uint16_t inst_mask;
354 	uint8_t	num_inst_per_aid;
355 
356 	/* IP reg dump */
357 	uint32_t		*ip_dump;
358 
359 	uint32_t		supported_reset;
360 	uint32_t		caps;
361 
362 	bool			per_inst_fw;
363 	unsigned		fw_version;
364 
365 	bool			workload_profile_active;
366 	struct mutex            workload_profile_mutex;
367 	u32 reg_count;
368 	const struct amdgpu_hwip_reg_entry *reg_list;
369 };
370 
371 struct amdgpu_fw_shared_rb_ptrs_struct {
372 	/* to WA DPG R/W ptr issues.*/
373 	uint32_t  rptr;
374 	uint32_t  wptr;
375 };
376 
377 struct amdgpu_fw_shared_multi_queue {
378 	uint8_t decode_queue_mode;
379 	uint8_t encode_generalpurpose_queue_mode;
380 	uint8_t encode_lowlatency_queue_mode;
381 	uint8_t encode_realtime_queue_mode;
382 	uint8_t padding[4];
383 };
384 
385 struct amdgpu_fw_shared_sw_ring {
386 	uint8_t is_enabled;
387 	uint8_t padding[3];
388 };
389 
390 struct amdgpu_fw_shared_unified_queue_struct {
391 	uint8_t is_enabled;
392 	uint8_t queue_mode;
393 	uint8_t queue_status;
394 	uint8_t padding[5];
395 };
396 
397 struct amdgpu_fw_shared_fw_logging {
398 	uint8_t is_enabled;
399 	uint32_t addr_lo;
400 	uint32_t addr_hi;
401 	uint32_t size;
402 };
403 
404 struct amdgpu_fw_shared_smu_interface_info {
405 	uint8_t smu_interface_type;
406 	uint8_t padding[3];
407 };
408 
409 struct amdgpu_fw_shared {
410 	uint32_t present_flag_0;
411 	uint8_t pad[44];
412 	struct amdgpu_fw_shared_rb_ptrs_struct rb;
413 	uint8_t pad1[1];
414 	struct amdgpu_fw_shared_multi_queue multi_queue;
415 	struct amdgpu_fw_shared_sw_ring sw_ring;
416 	struct amdgpu_fw_shared_fw_logging fw_log;
417 	struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
418 };
419 
420 struct amdgpu_vcn_rb_setup_info {
421 	uint32_t  rb_addr_lo;
422 	uint32_t  rb_addr_hi;
423 	uint32_t  rb_size;
424 };
425 
426 struct amdgpu_fw_shared_rb_setup {
427 	uint32_t is_rb_enabled_flags;
428 
429 	union {
430 		struct {
431 			uint32_t rb_addr_lo;
432 			uint32_t rb_addr_hi;
433 			uint32_t  rb_size;
434 			uint32_t  rb4_addr_lo;
435 			uint32_t  rb4_addr_hi;
436 			uint32_t  rb4_size;
437 			uint32_t  reserved[6];
438 		};
439 
440 		struct {
441 			struct amdgpu_vcn_rb_setup_info rb_info[MAX_NUM_VCN_RB_SETUP];
442 		};
443 	};
444 };
445 
446 struct amdgpu_fw_shared_drm_key_wa {
447 	uint8_t  method;
448 	uint8_t  reserved[3];
449 };
450 
451 struct amdgpu_fw_shared_queue_decouple {
452 	uint8_t  is_enabled;
453 	uint8_t  reserved[7];
454 };
455 
456 struct amdgpu_vcn4_fw_shared {
457 	uint32_t present_flag_0;
458 	uint8_t pad[12];
459 	struct amdgpu_fw_shared_unified_queue_struct sq;
460 	uint8_t pad1[8];
461 	struct amdgpu_fw_shared_fw_logging fw_log;
462 	uint8_t pad2[20];
463 	struct amdgpu_fw_shared_rb_setup rb_setup;
464 	struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
465 	struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
466 	uint8_t pad3[9];
467 	struct amdgpu_fw_shared_queue_decouple decouple;
468 };
469 
470 struct amdgpu_vcn_fwlog {
471 	uint32_t rptr;
472 	uint32_t wptr;
473 	uint32_t buffer_size;
474 	uint32_t header_size;
475 	uint8_t wrapped;
476 };
477 
478 struct amdgpu_vcn_decode_buffer {
479 	uint32_t valid_buf_flag;
480 	uint32_t msg_buffer_address_hi;
481 	uint32_t msg_buffer_address_lo;
482 	uint32_t pad[30];
483 };
484 
485 struct amdgpu_vcn_rb_metadata {
486 	uint32_t size;
487 	uint32_t present_flag_0;
488 
489 	uint8_t version;
490 	uint8_t ring_id;
491 	uint8_t pad[26];
492 };
493 
494 struct amdgpu_vcn5_fw_shared {
495 	uint32_t present_flag_0;
496 	uint8_t pad[12];
497 	struct amdgpu_fw_shared_unified_queue_struct sq;
498 	uint8_t pad1[8];
499 	struct amdgpu_fw_shared_fw_logging fw_log;
500 	uint8_t pad2[20];
501 	struct amdgpu_fw_shared_rb_setup rb_setup;
502 	struct amdgpu_fw_shared_smu_interface_info smu_dpm_interface;
503 	struct amdgpu_fw_shared_drm_key_wa drm_key_wa;
504 	uint8_t pad3[404];
505 };
506 
507 #define VCN_BLOCK_ENCODE_DISABLE_MASK 0x80
508 #define VCN_BLOCK_DECODE_DISABLE_MASK 0x40
509 #define VCN_BLOCK_QUEUE_DISABLE_MASK 0xC0
510 
511 enum vcn_ring_type {
512 	VCN_ENCODE_RING,
513 	VCN_DECODE_RING,
514 	VCN_UNIFIED_RING,
515 };
516 
517 int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i);
518 int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i);
519 void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i);
520 int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i);
521 int amdgpu_vcn_resume(struct amdgpu_device *adev, int i);
522 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
523 void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring);
524 
525 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev,
526 				enum vcn_ring_type type, uint32_t vcn_instance);
527 
528 int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring);
529 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
530 int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring);
531 int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout);
532 int amdgpu_vcn_unified_ring_test_ib(struct amdgpu_ring *ring, long timeout);
533 
534 int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
535 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
536 
537 enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring);
538 
539 void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev, int i);
540 
541 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn);
542 void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
543                                    uint8_t i, struct amdgpu_vcn_inst *vcn);
544 
545 int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
546 			struct amdgpu_irq_src *source,
547 			struct amdgpu_iv_entry *entry);
548 int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
549 			struct ras_common_if *ras_block);
550 int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
551 
552 int amdgpu_vcn_psp_update_sram(struct amdgpu_device *adev, int inst_idx,
553 			       enum AMDGPU_UCODE_ID ucode_id);
554 int amdgpu_vcn_save_vcpu_bo(struct amdgpu_device *adev);
555 int amdgpu_vcn_sysfs_reset_mask_init(struct amdgpu_device *adev);
556 void amdgpu_vcn_sysfs_reset_mask_fini(struct amdgpu_device *adev);
557 void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev);
558 
559 int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block,
560 			      enum amd_powergating_state state);
561 int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring,
562 			  unsigned int vmid,
563 			  struct amdgpu_fence *guilty_fence);
564 int amdgpu_vcn_reg_dump_init(struct amdgpu_device *adev,
565 			     const struct amdgpu_hwip_reg_entry *reg, u32 count);
566 void amdgpu_vcn_dump_ip_state(struct amdgpu_ip_block *ip_block);
567 void amdgpu_vcn_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p);
568 void amdgpu_vcn_get_profile(struct amdgpu_device *adev);
569 void amdgpu_vcn_put_profile(struct amdgpu_device *adev);
570 
571 #endif
572