xref: /linux/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1*a26f067fSSarah Walker /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2*a26f067fSSarah Walker /* Copyright (c) 2023 Imagination Technologies Ltd. */
3*a26f067fSSarah Walker 
4*a26f067fSSarah Walker #ifndef PVR_ROGUE_FWIF_SHARED_H
5*a26f067fSSarah Walker #define PVR_ROGUE_FWIF_SHARED_H
6*a26f067fSSarah Walker 
7*a26f067fSSarah Walker #include <linux/compiler.h>
8*a26f067fSSarah Walker #include <linux/types.h>
9*a26f067fSSarah Walker 
10*a26f067fSSarah Walker #define ROGUE_FWIF_NUM_RTDATAS 2U
11*a26f067fSSarah Walker #define ROGUE_FWIF_NUM_GEOMDATAS 1U
12*a26f067fSSarah Walker #define ROGUE_FWIF_NUM_RTDATA_FREELISTS 2U
13*a26f067fSSarah Walker #define ROGUE_NUM_GEOM_CORES 1U
14*a26f067fSSarah Walker 
15*a26f067fSSarah Walker #define ROGUE_NUM_GEOM_CORES_SIZE 2U
16*a26f067fSSarah Walker 
17*a26f067fSSarah Walker /*
18*a26f067fSSarah Walker  * Maximum number of UFOs in a CCB command.
19*a26f067fSSarah Walker  * The number is based on having 32 sync prims (as originally), plus 32 sync
20*a26f067fSSarah Walker  * checkpoints.
21*a26f067fSSarah Walker  * Once the use of sync prims is no longer supported, we will retain
22*a26f067fSSarah Walker  * the same total (64) as the number of sync checkpoints which may be
23*a26f067fSSarah Walker  * supporting a fence is not visible to the client driver and has to
24*a26f067fSSarah Walker  * allow for the number of different timelines involved in fence merges.
25*a26f067fSSarah Walker  */
26*a26f067fSSarah Walker #define ROGUE_FWIF_CCB_CMD_MAX_UFOS (32U + 32U)
27*a26f067fSSarah Walker 
28*a26f067fSSarah Walker /*
29*a26f067fSSarah Walker  * This is a generic limit imposed on any DM (GEOMETRY,FRAGMENT,CDM,TDM,2D,TRANSFER)
30*a26f067fSSarah Walker  * command passed through the bridge.
31*a26f067fSSarah Walker  * Just across the bridge in the server, any incoming kick command size is
32*a26f067fSSarah Walker  * checked against this maximum limit.
33*a26f067fSSarah Walker  * In case the incoming command size is larger than the specified limit,
34*a26f067fSSarah Walker  * the bridge call is retired with error.
35*a26f067fSSarah Walker  */
36*a26f067fSSarah Walker #define ROGUE_FWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U)
37*a26f067fSSarah Walker 
38*a26f067fSSarah Walker #define ROGUE_FWIF_PRBUFFER_START (0)
39*a26f067fSSarah Walker #define ROGUE_FWIF_PRBUFFER_ZSBUFFER (0)
40*a26f067fSSarah Walker #define ROGUE_FWIF_PRBUFFER_MSAABUFFER (1)
41*a26f067fSSarah Walker #define ROGUE_FWIF_PRBUFFER_MAXSUPPORTED (2)
42*a26f067fSSarah Walker 
43*a26f067fSSarah Walker struct rogue_fwif_dma_addr {
44*a26f067fSSarah Walker 	aligned_u64 dev_addr;
45*a26f067fSSarah Walker 	u32 fw_addr;
46*a26f067fSSarah Walker 	u32 padding;
47*a26f067fSSarah Walker } __aligned(8);
48*a26f067fSSarah Walker 
49*a26f067fSSarah Walker struct rogue_fwif_ufo {
50*a26f067fSSarah Walker 	u32 addr;
51*a26f067fSSarah Walker 	u32 value;
52*a26f067fSSarah Walker };
53*a26f067fSSarah Walker 
54*a26f067fSSarah Walker #define ROGUE_FWIF_UFO_ADDR_IS_SYNC_CHECKPOINT (1)
55*a26f067fSSarah Walker 
56*a26f067fSSarah Walker struct rogue_fwif_sync_checkpoint {
57*a26f067fSSarah Walker 	u32 state;
58*a26f067fSSarah Walker 	u32 fw_ref_count;
59*a26f067fSSarah Walker };
60*a26f067fSSarah Walker 
61*a26f067fSSarah Walker struct rogue_fwif_cleanup_ctl {
62*a26f067fSSarah Walker 	/* Number of commands received by the FW */
63*a26f067fSSarah Walker 	u32 submitted_commands;
64*a26f067fSSarah Walker 	/* Number of commands executed by the FW */
65*a26f067fSSarah Walker 	u32 executed_commands;
66*a26f067fSSarah Walker } __aligned(8);
67*a26f067fSSarah Walker 
68*a26f067fSSarah Walker /*
69*a26f067fSSarah Walker  * Used to share frame numbers across UM-KM-FW,
70*a26f067fSSarah Walker  * frame number is set in UM,
71*a26f067fSSarah Walker  * frame number is required in both KM for HTB and FW for FW trace.
72*a26f067fSSarah Walker  *
73*a26f067fSSarah Walker  * May be used to house Kick flags in the future.
74*a26f067fSSarah Walker  */
75*a26f067fSSarah Walker struct rogue_fwif_cmd_common {
76*a26f067fSSarah Walker 	/* associated frame number */
77*a26f067fSSarah Walker 	u32 frame_num;
78*a26f067fSSarah Walker };
79*a26f067fSSarah Walker 
80*a26f067fSSarah Walker /*
81*a26f067fSSarah Walker  * Geometry and fragment commands require set of firmware addresses that are stored in the Kernel.
82*a26f067fSSarah Walker  * Client has handle(s) to Kernel containers storing these addresses, instead of raw addresses. We
83*a26f067fSSarah Walker  * have to patch/write these addresses in KM to prevent UM from controlling FW addresses directly.
84*a26f067fSSarah Walker  * Typedefs for geometry and fragment commands are shared between Client and Firmware (both
85*a26f067fSSarah Walker  * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use geometry|fragment
86*a26f067fSSarah Walker  * CMD type definitions directly. Therefore we have a SHARED block that is shared between UM-KM-FW
87*a26f067fSSarah Walker  * across all BVNC configurations.
88*a26f067fSSarah Walker  */
89*a26f067fSSarah Walker struct rogue_fwif_cmd_geom_frag_shared {
90*a26f067fSSarah Walker 	/* Common command attributes */
91*a26f067fSSarah Walker 	struct rogue_fwif_cmd_common cmn;
92*a26f067fSSarah Walker 
93*a26f067fSSarah Walker 	/*
94*a26f067fSSarah Walker 	 * RTData associated with this command, this is used for context
95*a26f067fSSarah Walker 	 * selection and for storing out HW-context, when TA is switched out for
96*a26f067fSSarah Walker 	 * continuing later
97*a26f067fSSarah Walker 	 */
98*a26f067fSSarah Walker 	u32 hwrt_data_fw_addr;
99*a26f067fSSarah Walker 
100*a26f067fSSarah Walker 	/* Supported PR Buffers like Z/S/MSAA Scratch */
101*a26f067fSSarah Walker 	u32 pr_buffer_fw_addr[ROGUE_FWIF_PRBUFFER_MAXSUPPORTED];
102*a26f067fSSarah Walker };
103*a26f067fSSarah Walker 
104*a26f067fSSarah Walker /*
105*a26f067fSSarah Walker  * Client Circular Command Buffer (CCCB) control structure.
106*a26f067fSSarah Walker  * This is shared between the Server and the Firmware and holds byte offsets
107*a26f067fSSarah Walker  * into the CCCB as well as the wrapping mask to aid wrap around. A given
108*a26f067fSSarah Walker  * snapshot of this queue with Cmd 1 running on the GPU might be:
109*a26f067fSSarah Walker  *
110*a26f067fSSarah Walker  *          Roff                           Doff                 Woff
111*a26f067fSSarah Walker  * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........]
112*a26f067fSSarah Walker  *            <      runnable commands       ><   !ready to run   >
113*a26f067fSSarah Walker  *
114*a26f067fSSarah Walker  * Cmd 1    : Currently executing on the GPU data master.
115*a26f067fSSarah Walker  * Cmd 2,3,4: Fence dependencies met, commands runnable.
116*a26f067fSSarah Walker  * Cmd 5... : Fence dependency not met yet.
117*a26f067fSSarah Walker  */
118*a26f067fSSarah Walker struct rogue_fwif_cccb_ctl {
119*a26f067fSSarah Walker 	/* Host write offset into CCB. This must be aligned to 16 bytes. */
120*a26f067fSSarah Walker 	u32 write_offset;
121*a26f067fSSarah Walker 	/*
122*a26f067fSSarah Walker 	 * Firmware read offset into CCB. Points to the command that is runnable
123*a26f067fSSarah Walker 	 * on GPU, if R!=W
124*a26f067fSSarah Walker 	 */
125*a26f067fSSarah Walker 	u32 read_offset;
126*a26f067fSSarah Walker 	/*
127*a26f067fSSarah Walker 	 * Firmware fence dependency offset. Points to commands not ready, i.e.
128*a26f067fSSarah Walker 	 * fence dependencies are not met.
129*a26f067fSSarah Walker 	 */
130*a26f067fSSarah Walker 	u32 dep_offset;
131*a26f067fSSarah Walker 	/* Offset wrapping mask, total capacity in bytes of the CCB-1 */
132*a26f067fSSarah Walker 	u32 wrap_mask;
133*a26f067fSSarah Walker 
134*a26f067fSSarah Walker 	/* Only used if SUPPORT_AGP is present. */
135*a26f067fSSarah Walker 	u32 read_offset2;
136*a26f067fSSarah Walker 
137*a26f067fSSarah Walker 	/* Only used if SUPPORT_AGP4 is present. */
138*a26f067fSSarah Walker 	u32 read_offset3;
139*a26f067fSSarah Walker 	/* Only used if SUPPORT_AGP4 is present. */
140*a26f067fSSarah Walker 	u32 read_offset4;
141*a26f067fSSarah Walker 
142*a26f067fSSarah Walker 	u32 padding;
143*a26f067fSSarah Walker } __aligned(8);
144*a26f067fSSarah Walker 
145*a26f067fSSarah Walker #define ROGUE_FW_LOCAL_FREELIST (0)
146*a26f067fSSarah Walker #define ROGUE_FW_GLOBAL_FREELIST (1)
147*a26f067fSSarah Walker #define ROGUE_FW_FREELIST_TYPE_LAST ROGUE_FW_GLOBAL_FREELIST
148*a26f067fSSarah Walker #define ROGUE_FW_MAX_FREELISTS (ROGUE_FW_FREELIST_TYPE_LAST + 1U)
149*a26f067fSSarah Walker 
150*a26f067fSSarah Walker struct rogue_fwif_geom_registers_caswitch {
151*a26f067fSSarah Walker 	u64 geom_reg_vdm_context_state_base_addr;
152*a26f067fSSarah Walker 	u64 geom_reg_vdm_context_state_resume_addr;
153*a26f067fSSarah Walker 	u64 geom_reg_ta_context_state_base_addr;
154*a26f067fSSarah Walker 
155*a26f067fSSarah Walker 	struct {
156*a26f067fSSarah Walker 		u64 geom_reg_vdm_context_store_task0;
157*a26f067fSSarah Walker 		u64 geom_reg_vdm_context_store_task1;
158*a26f067fSSarah Walker 		u64 geom_reg_vdm_context_store_task2;
159*a26f067fSSarah Walker 
160*a26f067fSSarah Walker 		/* VDM resume state update controls */
161*a26f067fSSarah Walker 		u64 geom_reg_vdm_context_resume_task0;
162*a26f067fSSarah Walker 		u64 geom_reg_vdm_context_resume_task1;
163*a26f067fSSarah Walker 		u64 geom_reg_vdm_context_resume_task2;
164*a26f067fSSarah Walker 
165*a26f067fSSarah Walker 		u64 geom_reg_vdm_context_store_task3;
166*a26f067fSSarah Walker 		u64 geom_reg_vdm_context_store_task4;
167*a26f067fSSarah Walker 
168*a26f067fSSarah Walker 		u64 geom_reg_vdm_context_resume_task3;
169*a26f067fSSarah Walker 		u64 geom_reg_vdm_context_resume_task4;
170*a26f067fSSarah Walker 	} geom_state[2];
171*a26f067fSSarah Walker };
172*a26f067fSSarah Walker 
173*a26f067fSSarah Walker #define ROGUE_FWIF_GEOM_REGISTERS_CSWITCH_SIZE \
174*a26f067fSSarah Walker 	sizeof(struct rogue_fwif_geom_registers_caswitch)
175*a26f067fSSarah Walker 
176*a26f067fSSarah Walker struct rogue_fwif_cdm_registers_cswitch {
177*a26f067fSSarah Walker 	u64 cdmreg_cdm_context_pds0;
178*a26f067fSSarah Walker 	u64 cdmreg_cdm_context_pds1;
179*a26f067fSSarah Walker 	u64 cdmreg_cdm_terminate_pds;
180*a26f067fSSarah Walker 	u64 cdmreg_cdm_terminate_pds1;
181*a26f067fSSarah Walker 
182*a26f067fSSarah Walker 	/* CDM resume controls */
183*a26f067fSSarah Walker 	u64 cdmreg_cdm_resume_pds0;
184*a26f067fSSarah Walker 	u64 cdmreg_cdm_context_pds0_b;
185*a26f067fSSarah Walker 	u64 cdmreg_cdm_resume_pds0_b;
186*a26f067fSSarah Walker };
187*a26f067fSSarah Walker 
188*a26f067fSSarah Walker struct rogue_fwif_static_rendercontext_state {
189*a26f067fSSarah Walker 	/* Geom registers for ctx switch */
190*a26f067fSSarah Walker 	struct rogue_fwif_geom_registers_caswitch ctxswitch_regs[ROGUE_NUM_GEOM_CORES_SIZE]
191*a26f067fSSarah Walker 		__aligned(8);
192*a26f067fSSarah Walker };
193*a26f067fSSarah Walker 
194*a26f067fSSarah Walker #define ROGUE_FWIF_STATIC_RENDERCONTEXT_SIZE \
195*a26f067fSSarah Walker 	sizeof(struct rogue_fwif_static_rendercontext_state)
196*a26f067fSSarah Walker 
197*a26f067fSSarah Walker struct rogue_fwif_static_computecontext_state {
198*a26f067fSSarah Walker 	/* CDM registers for ctx switch */
199*a26f067fSSarah Walker 	struct rogue_fwif_cdm_registers_cswitch ctxswitch_regs __aligned(8);
200*a26f067fSSarah Walker };
201*a26f067fSSarah Walker 
202*a26f067fSSarah Walker #define ROGUE_FWIF_STATIC_COMPUTECONTEXT_SIZE \
203*a26f067fSSarah Walker 	sizeof(struct rogue_fwif_static_computecontext_state)
204*a26f067fSSarah Walker 
205*a26f067fSSarah Walker enum rogue_fwif_prbuffer_state {
206*a26f067fSSarah Walker 	ROGUE_FWIF_PRBUFFER_UNBACKED = 0,
207*a26f067fSSarah Walker 	ROGUE_FWIF_PRBUFFER_BACKED,
208*a26f067fSSarah Walker 	ROGUE_FWIF_PRBUFFER_BACKING_PENDING,
209*a26f067fSSarah Walker 	ROGUE_FWIF_PRBUFFER_UNBACKING_PENDING,
210*a26f067fSSarah Walker };
211*a26f067fSSarah Walker 
212*a26f067fSSarah Walker struct rogue_fwif_prbuffer {
213*a26f067fSSarah Walker 	/* Buffer ID*/
214*a26f067fSSarah Walker 	u32 buffer_id;
215*a26f067fSSarah Walker 	/* Needs On-demand Z/S/MSAA Buffer allocation */
216*a26f067fSSarah Walker 	bool on_demand __aligned(4);
217*a26f067fSSarah Walker 	/* Z/S/MSAA -Buffer state */
218*a26f067fSSarah Walker 	enum rogue_fwif_prbuffer_state state;
219*a26f067fSSarah Walker 	/* Cleanup state */
220*a26f067fSSarah Walker 	struct rogue_fwif_cleanup_ctl cleanup_sate;
221*a26f067fSSarah Walker 	/* Compatibility and other flags */
222*a26f067fSSarah Walker 	u32 prbuffer_flags;
223*a26f067fSSarah Walker } __aligned(8);
224*a26f067fSSarah Walker 
225*a26f067fSSarah Walker /* Last reset reason for a context. */
226*a26f067fSSarah Walker enum rogue_context_reset_reason {
227*a26f067fSSarah Walker 	/* No reset reason recorded */
228*a26f067fSSarah Walker 	ROGUE_CONTEXT_RESET_REASON_NONE = 0,
229*a26f067fSSarah Walker 	/* Caused a reset due to locking up */
230*a26f067fSSarah Walker 	ROGUE_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1,
231*a26f067fSSarah Walker 	/* Affected by another context locking up */
232*a26f067fSSarah Walker 	ROGUE_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2,
233*a26f067fSSarah Walker 	/* Overran the global deadline */
234*a26f067fSSarah Walker 	ROGUE_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3,
235*a26f067fSSarah Walker 	/* Affected by another context overrunning */
236*a26f067fSSarah Walker 	ROGUE_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4,
237*a26f067fSSarah Walker 	/* Forced reset to ensure scheduling requirements */
238*a26f067fSSarah Walker 	ROGUE_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5,
239*a26f067fSSarah Walker 	/* FW Safety watchdog triggered */
240*a26f067fSSarah Walker 	ROGUE_CONTEXT_RESET_REASON_FW_WATCHDOG = 12,
241*a26f067fSSarah Walker 	/* FW page fault (no HWR) */
242*a26f067fSSarah Walker 	ROGUE_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13,
243*a26f067fSSarah Walker 	/* FW execution error (GPU reset requested) */
244*a26f067fSSarah Walker 	ROGUE_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14,
245*a26f067fSSarah Walker 	/* Host watchdog detected FW error */
246*a26f067fSSarah Walker 	ROGUE_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15,
247*a26f067fSSarah Walker 	/* Geometry DM OOM event is not allowed */
248*a26f067fSSarah Walker 	ROGUE_CONTEXT_GEOM_OOM_DISABLED = 16,
249*a26f067fSSarah Walker };
250*a26f067fSSarah Walker 
251*a26f067fSSarah Walker struct rogue_context_reset_reason_data {
252*a26f067fSSarah Walker 	enum rogue_context_reset_reason reset_reason;
253*a26f067fSSarah Walker 	u32 reset_ext_job_ref;
254*a26f067fSSarah Walker };
255*a26f067fSSarah Walker 
256*a26f067fSSarah Walker #include "pvr_rogue_fwif_shared_check.h"
257*a26f067fSSarah Walker 
258*a26f067fSSarah Walker #endif /* PVR_ROGUE_FWIF_SHARED_H */
259