xref: /linux/drivers/gpu/drm/imagination/pvr_rogue_fwif_shared.h (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 /* SPDX-License-Identifier: GPL-2.0-only OR MIT */
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #ifndef PVR_ROGUE_FWIF_SHARED_H
5 #define PVR_ROGUE_FWIF_SHARED_H
6 
7 #include <linux/compiler.h>
8 #include <linux/types.h>
9 
10 #define ROGUE_FWIF_NUM_RTDATAS 2U
11 #define ROGUE_FWIF_NUM_GEOMDATAS 1U
12 #define ROGUE_FWIF_NUM_RTDATA_FREELISTS 2U
13 #define ROGUE_NUM_GEOM_CORES 1U
14 
15 #define ROGUE_NUM_GEOM_CORES_SIZE 2U
16 
17 /*
18  * Maximum number of UFOs in a CCB command.
19  * The number is based on having 32 sync prims (as originally), plus 32 sync
20  * checkpoints.
21  * Once the use of sync prims is no longer supported, we will retain
22  * the same total (64) as the number of sync checkpoints which may be
23  * supporting a fence is not visible to the client driver and has to
24  * allow for the number of different timelines involved in fence merges.
25  */
26 #define ROGUE_FWIF_CCB_CMD_MAX_UFOS (32U + 32U)
27 
28 /*
29  * This is a generic limit imposed on any DM (GEOMETRY,FRAGMENT,CDM,TDM,2D,TRANSFER)
30  * command passed through the bridge.
31  * Just across the bridge in the server, any incoming kick command size is
32  * checked against this maximum limit.
33  * In case the incoming command size is larger than the specified limit,
34  * the bridge call is retired with error.
35  */
36 #define ROGUE_FWIF_DM_INDEPENDENT_KICK_CMD_SIZE (1024U)
37 
38 #define ROGUE_FWIF_PRBUFFER_START (0)
39 #define ROGUE_FWIF_PRBUFFER_ZSBUFFER (0)
40 #define ROGUE_FWIF_PRBUFFER_MSAABUFFER (1)
41 #define ROGUE_FWIF_PRBUFFER_MAXSUPPORTED (2)
42 
43 struct rogue_fwif_dma_addr {
44 	aligned_u64 dev_addr;
45 	u32 fw_addr;
46 	u32 padding;
47 } __aligned(8);
48 
49 struct rogue_fwif_ufo {
50 	u32 addr;
51 	u32 value;
52 };
53 
54 #define ROGUE_FWIF_UFO_ADDR_IS_SYNC_CHECKPOINT (1)
55 
56 struct rogue_fwif_sync_checkpoint {
57 	u32 state;
58 	u32 fw_ref_count;
59 };
60 
61 struct rogue_fwif_cleanup_ctl {
62 	/* Number of commands received by the FW */
63 	u32 submitted_commands;
64 	/* Number of commands executed by the FW */
65 	u32 executed_commands;
66 } __aligned(8);
67 
68 /*
69  * Used to share frame numbers across UM-KM-FW,
70  * frame number is set in UM,
71  * frame number is required in both KM for HTB and FW for FW trace.
72  *
73  * May be used to house Kick flags in the future.
74  */
75 struct rogue_fwif_cmd_common {
76 	/* associated frame number */
77 	u32 frame_num;
78 };
79 
80 /*
81  * Geometry and fragment commands require set of firmware addresses that are stored in the Kernel.
82  * Client has handle(s) to Kernel containers storing these addresses, instead of raw addresses. We
83  * have to patch/write these addresses in KM to prevent UM from controlling FW addresses directly.
84  * Typedefs for geometry and fragment commands are shared between Client and Firmware (both
85  * single-BVNC). Kernel is implemented in a multi-BVNC manner, so it can't use geometry|fragment
86  * CMD type definitions directly. Therefore we have a SHARED block that is shared between UM-KM-FW
87  * across all BVNC configurations.
88  */
89 struct rogue_fwif_cmd_geom_frag_shared {
90 	/* Common command attributes */
91 	struct rogue_fwif_cmd_common cmn;
92 
93 	/*
94 	 * RTData associated with this command, this is used for context
95 	 * selection and for storing out HW-context, when TA is switched out for
96 	 * continuing later
97 	 */
98 	u32 hwrt_data_fw_addr;
99 
100 	/* Supported PR Buffers like Z/S/MSAA Scratch */
101 	u32 pr_buffer_fw_addr[ROGUE_FWIF_PRBUFFER_MAXSUPPORTED];
102 };
103 
104 /*
105  * Client Circular Command Buffer (CCCB) control structure.
106  * This is shared between the Server and the Firmware and holds byte offsets
107  * into the CCCB as well as the wrapping mask to aid wrap around. A given
108  * snapshot of this queue with Cmd 1 running on the GPU might be:
109  *
110  *          Roff                           Doff                 Woff
111  * [..........|-1----------|=2===|=3===|=4===|~5~~~~|~6~~~~|~7~~~~|..........]
112  *            <      runnable commands       ><   !ready to run   >
113  *
114  * Cmd 1    : Currently executing on the GPU data master.
115  * Cmd 2,3,4: Fence dependencies met, commands runnable.
116  * Cmd 5... : Fence dependency not met yet.
117  */
118 struct rogue_fwif_cccb_ctl {
119 	/* Host write offset into CCB. This must be aligned to 16 bytes. */
120 	u32 write_offset;
121 	/*
122 	 * Firmware read offset into CCB. Points to the command that is runnable
123 	 * on GPU, if R!=W
124 	 */
125 	u32 read_offset;
126 	/*
127 	 * Firmware fence dependency offset. Points to commands not ready, i.e.
128 	 * fence dependencies are not met.
129 	 */
130 	u32 dep_offset;
131 	/* Offset wrapping mask, total capacity in bytes of the CCB-1 */
132 	u32 wrap_mask;
133 
134 	/* Only used if SUPPORT_AGP is present. */
135 	u32 read_offset2;
136 
137 	/* Only used if SUPPORT_AGP4 is present. */
138 	u32 read_offset3;
139 	/* Only used if SUPPORT_AGP4 is present. */
140 	u32 read_offset4;
141 
142 	u32 padding;
143 } __aligned(8);
144 
145 #define ROGUE_FW_LOCAL_FREELIST (0)
146 #define ROGUE_FW_GLOBAL_FREELIST (1)
147 #define ROGUE_FW_FREELIST_TYPE_LAST ROGUE_FW_GLOBAL_FREELIST
148 #define ROGUE_FW_MAX_FREELISTS (ROGUE_FW_FREELIST_TYPE_LAST + 1U)
149 
150 struct rogue_fwif_geom_registers_caswitch {
151 	u64 geom_reg_vdm_context_state_base_addr;
152 	u64 geom_reg_vdm_context_state_resume_addr;
153 	u64 geom_reg_ta_context_state_base_addr;
154 
155 	struct {
156 		u64 geom_reg_vdm_context_store_task0;
157 		u64 geom_reg_vdm_context_store_task1;
158 		u64 geom_reg_vdm_context_store_task2;
159 
160 		/* VDM resume state update controls */
161 		u64 geom_reg_vdm_context_resume_task0;
162 		u64 geom_reg_vdm_context_resume_task1;
163 		u64 geom_reg_vdm_context_resume_task2;
164 
165 		u64 geom_reg_vdm_context_store_task3;
166 		u64 geom_reg_vdm_context_store_task4;
167 
168 		u64 geom_reg_vdm_context_resume_task3;
169 		u64 geom_reg_vdm_context_resume_task4;
170 	} geom_state[2];
171 };
172 
173 #define ROGUE_FWIF_GEOM_REGISTERS_CSWITCH_SIZE \
174 	sizeof(struct rogue_fwif_geom_registers_caswitch)
175 
176 struct rogue_fwif_cdm_registers_cswitch {
177 	u64 cdmreg_cdm_context_pds0;
178 	u64 cdmreg_cdm_context_pds1;
179 	u64 cdmreg_cdm_terminate_pds;
180 	u64 cdmreg_cdm_terminate_pds1;
181 
182 	/* CDM resume controls */
183 	u64 cdmreg_cdm_resume_pds0;
184 	u64 cdmreg_cdm_context_pds0_b;
185 	u64 cdmreg_cdm_resume_pds0_b;
186 };
187 
188 struct rogue_fwif_static_rendercontext_state {
189 	/* Geom registers for ctx switch */
190 	struct rogue_fwif_geom_registers_caswitch ctxswitch_regs[ROGUE_NUM_GEOM_CORES_SIZE]
191 		__aligned(8);
192 };
193 
194 #define ROGUE_FWIF_STATIC_RENDERCONTEXT_SIZE \
195 	sizeof(struct rogue_fwif_static_rendercontext_state)
196 
197 struct rogue_fwif_static_computecontext_state {
198 	/* CDM registers for ctx switch */
199 	struct rogue_fwif_cdm_registers_cswitch ctxswitch_regs __aligned(8);
200 };
201 
202 #define ROGUE_FWIF_STATIC_COMPUTECONTEXT_SIZE \
203 	sizeof(struct rogue_fwif_static_computecontext_state)
204 
205 enum rogue_fwif_prbuffer_state {
206 	ROGUE_FWIF_PRBUFFER_UNBACKED = 0,
207 	ROGUE_FWIF_PRBUFFER_BACKED,
208 	ROGUE_FWIF_PRBUFFER_BACKING_PENDING,
209 	ROGUE_FWIF_PRBUFFER_UNBACKING_PENDING,
210 };
211 
212 struct rogue_fwif_prbuffer {
213 	/* Buffer ID*/
214 	u32 buffer_id;
215 	/* Needs On-demand Z/S/MSAA Buffer allocation */
216 	bool on_demand __aligned(4);
217 	/* Z/S/MSAA -Buffer state */
218 	enum rogue_fwif_prbuffer_state state;
219 	/* Cleanup state */
220 	struct rogue_fwif_cleanup_ctl cleanup_sate;
221 	/* Compatibility and other flags */
222 	u32 prbuffer_flags;
223 } __aligned(8);
224 
225 /* Last reset reason for a context. */
226 enum rogue_context_reset_reason {
227 	/* No reset reason recorded */
228 	ROGUE_CONTEXT_RESET_REASON_NONE = 0,
229 	/* Caused a reset due to locking up */
230 	ROGUE_CONTEXT_RESET_REASON_GUILTY_LOCKUP = 1,
231 	/* Affected by another context locking up */
232 	ROGUE_CONTEXT_RESET_REASON_INNOCENT_LOCKUP = 2,
233 	/* Overran the global deadline */
234 	ROGUE_CONTEXT_RESET_REASON_GUILTY_OVERRUNING = 3,
235 	/* Affected by another context overrunning */
236 	ROGUE_CONTEXT_RESET_REASON_INNOCENT_OVERRUNING = 4,
237 	/* Forced reset to ensure scheduling requirements */
238 	ROGUE_CONTEXT_RESET_REASON_HARD_CONTEXT_SWITCH = 5,
239 	/* FW Safety watchdog triggered */
240 	ROGUE_CONTEXT_RESET_REASON_FW_WATCHDOG = 12,
241 	/* FW page fault (no HWR) */
242 	ROGUE_CONTEXT_RESET_REASON_FW_PAGEFAULT = 13,
243 	/* FW execution error (GPU reset requested) */
244 	ROGUE_CONTEXT_RESET_REASON_FW_EXEC_ERR = 14,
245 	/* Host watchdog detected FW error */
246 	ROGUE_CONTEXT_RESET_REASON_HOST_WDG_FW_ERR = 15,
247 	/* Geometry DM OOM event is not allowed */
248 	ROGUE_CONTEXT_GEOM_OOM_DISABLED = 16,
249 };
250 
251 struct rogue_context_reset_reason_data {
252 	enum rogue_context_reset_reason reset_reason;
253 	u32 reset_ext_job_ref;
254 };
255 
256 #include "pvr_rogue_fwif_shared_check.h"
257 
258 #endif /* PVR_ROGUE_FWIF_SHARED_H */
259