xref: /linux/drivers/gpu/drm/xe/xe_rtp.h (revision 043790f3edb554f8db3e841fd17a33b622bc2b31)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #ifndef _XE_RTP_
7 #define _XE_RTP_
8 
9 #include <linux/types.h>
10 #include <linux/xarray.h>
11 
12 #include "xe_rtp_types.h"
13 
14 /*
15  * Register table poke infrastructure
16  */
17 
18 struct xe_hw_engine;
19 struct xe_gt;
20 struct xe_reg_sr;
21 
22 /*
23  * Helper macros - not to be used outside this header.
24  */
25 /* This counts to 12. Any more, it will return 13th argument. */
26 #define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
27 #define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
28 
29 #define __CONCAT(a, b) a ## b
30 #define CONCATENATE(a, b) __CONCAT(a, b)
31 
32 #define __CALL_FOR_EACH_1(MACRO_, x, ...) MACRO_(x)
33 #define __CALL_FOR_EACH_2(MACRO_, x, ...)					\
34 	MACRO_(x) __CALL_FOR_EACH_1(MACRO_, ##__VA_ARGS__)
35 #define __CALL_FOR_EACH_3(MACRO_, x, ...)					\
36 	MACRO_(x) __CALL_FOR_EACH_2(MACRO_, ##__VA_ARGS__)
37 #define __CALL_FOR_EACH_4(MACRO_, x, ...)					\
38 	MACRO_(x) __CALL_FOR_EACH_3(MACRO_, ##__VA_ARGS__)
39 
40 #define _CALL_FOR_EACH(NARGS_, MACRO_, x, ...)					\
41 	CONCATENATE(__CALL_FOR_EACH_, NARGS_)(MACRO_, x, ##__VA_ARGS__)
42 #define CALL_FOR_EACH(MACRO_, x, ...)						\
43 	_CALL_FOR_EACH(COUNT_ARGS(x, ##__VA_ARGS__), MACRO_, x, ##__VA_ARGS__)
44 
45 #define _XE_RTP_REG(x_)	(x_), XE_RTP_REG_REGULAR
46 #define _XE_RTP_MCR_REG(x_) (x_), XE_RTP_REG_MCR
47 
48 /*
49  * Helper macros for concatenating prefix - do not use them directly outside
50  * this header
51  */
52 #define __ADD_XE_RTP_ENTRY_FLAG_PREFIX(x) CONCATENATE(XE_RTP_ENTRY_FLAG_, x) |
53 #define __ADD_XE_RTP_ACTION_FLAG_PREFIX(x) CONCATENATE(XE_RTP_ACTION_FLAG_, x) |
54 #define __ADD_XE_RTP_RULE_PREFIX(x) CONCATENATE(XE_RTP_RULE_, x) ,
55 #define __ADD_XE_RTP_ACTION_PREFIX(x) CONCATENATE(XE_RTP_ACTION_, x) ,
56 
57 /*
58  * Macros to encode rules to match against platform, IP version, stepping, etc.
59  * Shouldn't be used directly - see XE_RTP_RULES()
60  */
61 
62 #define _XE_RTP_RULE_PLATFORM(plat__)						\
63 	{ .match_type = XE_RTP_MATCH_PLATFORM, .platform = plat__ }
64 
65 #define _XE_RTP_RULE_SUBPLATFORM(plat__, sub__)					\
66 	{ .match_type = XE_RTP_MATCH_SUBPLATFORM,				\
67 	  .platform = plat__, .subplatform = sub__ }
68 
69 #define _XE_RTP_RULE_STEP(start__, end__)					\
70 	{ .match_type = XE_RTP_MATCH_STEP,					\
71 	  .step_start = start__, .step_end = end__ }
72 
73 #define _XE_RTP_RULE_ENGINE_CLASS(cls__)					\
74 	{ .match_type = XE_RTP_MATCH_ENGINE_CLASS,				\
75 	  .engine_class = (cls__) }
76 
77 /**
78  * XE_RTP_RULE_PLATFORM - Create rule matching platform
79  * @plat_: platform to match
80  *
81  * Refer to XE_RTP_RULES() for expected usage.
82  */
83 #define XE_RTP_RULE_PLATFORM(plat_)						\
84 	_XE_RTP_RULE_PLATFORM(XE_##plat_)
85 
86 /**
87  * XE_RTP_RULE_SUBPLATFORM - Create rule matching platform and sub-platform
88  * @plat_: platform to match
89  * @sub_: sub-platform to match
90  *
91  * Refer to XE_RTP_RULES() for expected usage.
92  */
93 #define XE_RTP_RULE_SUBPLATFORM(plat_, sub_)					\
94 	_XE_RTP_RULE_SUBPLATFORM(XE_##plat_, XE_SUBPLATFORM_##plat_##_##sub_)
95 
96 /**
97  * XE_RTP_RULE_STEP - Create rule matching platform stepping
98  * @start_: First stepping matching the rule
99  * @end_: First stepping that does not match the rule
100  *
101  * Note that the range matching this rule [ @start_, @end_ ), i.e. inclusive on
102  * the left, exclusive on the right.
103  *
104  * Refer to XE_RTP_RULES() for expected usage.
105  */
106 #define XE_RTP_RULE_STEP(start_, end_)						\
107 	_XE_RTP_RULE_STEP(STEP_##start_, STEP_##end_)
108 
109 /**
110  * XE_RTP_RULE_ENGINE_CLASS - Create rule matching an engine class
111  * @cls_: Engine class to match
112  *
113  * Refer to XE_RTP_RULES() for expected usage.
114  */
115 #define XE_RTP_RULE_ENGINE_CLASS(cls_)						\
116 	_XE_RTP_RULE_ENGINE_CLASS(XE_ENGINE_CLASS_##cls_)
117 
118 /**
119  * XE_RTP_RULE_FUNC - Create rule using callback function for match
120  * @func__: Function to call to decide if rule matches
121  *
122  * This allows more complex checks to be performed. The ``XE_RTP``
123  * infrastructure will simply call the function @func_ passed to decide if this
124  * rule matches the device.
125  *
126  * Refer to XE_RTP_RULES() for expected usage.
127  */
128 #define XE_RTP_RULE_FUNC(func__)						\
129 	{ .match_type = XE_RTP_MATCH_FUNC,					\
130 	  .match_func = (func__) }
131 
132 /**
133  * XE_RTP_RULE_GRAPHICS_VERSION - Create rule matching graphics version
134  * @ver__: Graphics IP version to match
135  *
136  * Refer to XE_RTP_RULES() for expected usage.
137  */
138 #define XE_RTP_RULE_GRAPHICS_VERSION(ver__)					\
139 	{ .match_type = XE_RTP_MATCH_GRAPHICS_VERSION,				\
140 	  .ver_start = ver__, }
141 
142 /**
143  * XE_RTP_RULE_GRAPHICS_VERSION_RANGE - Create rule matching a range of graphics version
144  * @ver_start__: First graphics IP version to match
145  * @ver_end__: Last graphics IP version to match
146  *
147  * Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
148  * inclusive on boths sides
149  *
150  * Refer to XE_RTP_RULES() for expected usage.
151  */
152 #define XE_RTP_RULE_GRAPHICS_VERSION_RANGE(ver_start__, ver_end__)		\
153 	{ .match_type = XE_RTP_MATCH_GRAPHICS_VERSION_RANGE,			\
154 	  .ver_start = ver_start__, .ver_end = ver_end__, }
155 
156 /**
157  * XE_RTP_RULE_MEDIA_VERSION - Create rule matching media version
158  * @ver__: Graphics IP version to match
159  *
160  * Refer to XE_RTP_RULES() for expected usage.
161  */
162 #define XE_RTP_RULE_MEDIA_VERSION(ver__)					\
163 	{ .match_type = XE_RTP_MATCH_MEDIA_VERSION,				\
164 	  .ver_start = ver__, }
165 
166 /**
167  * XE_RTP_RULE_MEDIA_VERSION_RANGE - Create rule matching a range of media version
168  * @ver_start__: First media IP version to match
169  * @ver_end__: Last media IP version to match
170  *
171  * Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e.
172  * inclusive on boths sides
173  *
174  * Refer to XE_RTP_RULES() for expected usage.
175  */
176 #define XE_RTP_RULE_MEDIA_VERSION_RANGE(ver_start__, ver_end__)			\
177 	{ .match_type = XE_RTP_MATCH_MEDIA_VERSION_RANGE,			\
178 	  .ver_start = ver_start__, .ver_end = ver_end__, }
179 
180 /**
181  * XE_RTP_RULE_IS_INTEGRATED - Create a rule matching integrated graphics devices
182  *
183  * Refer to XE_RTP_RULES() for expected usage.
184  */
185 #define XE_RTP_RULE_IS_INTEGRATED						\
186 	{ .match_type = XE_RTP_MATCH_INTEGRATED }
187 
188 /**
189  * XE_RTP_RULE_IS_DISCRETE - Create a rule matching discrete graphics devices
190  *
191  * Refer to XE_RTP_RULES() for expected usage.
192  */
193 #define XE_RTP_RULE_IS_DISCRETE							\
194 	{ .match_type = XE_RTP_MATCH_DISCRETE }
195 
196 /**
197  * XE_RTP_ACTION_WR - Helper to write a value to the register, overriding all
198  *                    the bits
199  * @reg_: Register
200  * @reg_type_: Register type - automatically expanded by MCR_REG/_MMIO
201  * @val_: Value to set
202  * @...: Additional fields to override in the struct xe_rtp_action entry
203  *
204  * The correspondent notation in bspec is:
205  *
206  *	REGNAME = VALUE
207  */
208 #define XE_RTP_ACTION_WR(reg_, reg_type_, val_, ...)				\
209 	{ .reg = (reg_), .reg_type = (reg_type_),				\
210 	  .clr_bits = ~0u, .set_bits = (val_),					\
211 	  .read_mask = (~0u), ##__VA_ARGS__ }
212 
213 /**
214  * XE_RTP_ACTION_SET - Set bits from @val_ in the register.
215  * @reg_: Register
216  * @reg_type_: Register type - automatically expanded by MCR_REG/_MMIO
217  * @val_: Bits to set in the register
218  * @...: Additional fields to override in the struct xe_rtp_action entry
219  *
220  * For masked registers this translates to a single write, while for other
221  * registers it's a RMW. The correspondent bspec notation is (example for bits 2
222  * and 5, but could be any):
223  *
224  *	REGNAME[2] = 1
225  *	REGNAME[5] = 1
226  */
227 #define XE_RTP_ACTION_SET(reg_, reg_type_, val_, ...)				\
228 	{ .reg = (reg_), .reg_type = (reg_type_),				\
229 	  .clr_bits = (val_), .set_bits = (val_),				\
230 	  .read_mask = (val_), ##__VA_ARGS__ }
231 
232 /**
233  * XE_RTP_ACTION_CLR: Clear bits from @val_ in the register.
234  * @reg_: Register
235  * @reg_type_: Register type - automatically expanded by MCR_REG/_MMIO
236  * @val_: Bits to clear in the register
237  * @...: Additional fields to override in the struct xe_rtp_action entry
238  *
239  * For masked registers this translates to a single write, while for other
240  * registers it's a RMW. The correspondent bspec notation is (example for bits 2
241  * and 5, but could be any):
242  *
243  *	REGNAME[2] = 0
244  *	REGNAME[5] = 0
245  */
246 #define XE_RTP_ACTION_CLR(reg_, reg_type_, val_, ...)				\
247 	{ .reg = (reg_), .reg_type = (reg_type_),				\
248 	  .clr_bits = (val_), .set_bits = 0,					\
249 	  .read_mask = (val_), ##__VA_ARGS__ }
250 
251 /**
252  * XE_RTP_ACTION_FIELD_SET: Set a bit range
253  * @reg_: Register
254  * @reg_type_: Register type - automatically expanded by MCR_REG/_MMIO
255  * @mask_bits_: Mask of bits to be changed in the register, forming a field
256  * @val_: Value to set in the field denoted by @mask_bits_
257  * @...: Additional fields to override in the struct xe_rtp_action entry
258  *
259  * For masked registers this translates to a single write, while for other
260  * registers it's a RMW. The correspondent bspec notation is:
261  *
262  *	REGNAME[<end>:<start>] = VALUE
263  */
264 #define XE_RTP_ACTION_FIELD_SET(reg_, reg_type_, mask_bits_, val_, ...)		\
265 	{ .reg = (reg_), .reg_type = (reg_type_),				\
266 	  .clr_bits = (mask_bits_), .set_bits = (val_),				\
267 	  .read_mask = (mask_bits_), ##__VA_ARGS__ }
268 
269 #define XE_RTP_ACTION_FIELD_SET_NO_READ_MASK(reg_, reg_type_, mask_bits_, val_, ...)	\
270 	{ .reg = (reg_), .reg_type = (reg_type_),				\
271 	  .clr_bits = (mask_bits_), .set_bits = (val_),				\
272 	  .read_mask = 0, ##__VA_ARGS__ }
273 
274 /**
275  * XE_RTP_ACTION_WHITELIST - Add register to userspace whitelist
276  * @reg_: Register
277  * @reg_type_: Register type - automatically expanded by MCR_REG/_MMIO
278  * @val_: Whitelist-specific flags to set
279  * @...: Additional fields to override in the struct xe_rtp_action entry
280  *
281  * Add a register to the whitelist, allowing userspace to modify the ster with
282  * regular user privileges.
283  */
284 #define XE_RTP_ACTION_WHITELIST(reg_, reg_type_, val_, ...)			\
285 	/* TODO fail build if ((flags) & ~(RING_FORCE_TO_NONPRIV_MASK_VALID)) */\
286 	{ .reg = (reg_), .reg_type = (reg_type_), .set_bits = (val_),		\
287 	  .clr_bits = RING_FORCE_TO_NONPRIV_MASK_VALID,				\
288 	  ##__VA_ARGS__ }
289 
290 /**
291  * XE_RTP_NAME - Helper to set the name in xe_rtp_entry
292  * @s_: Name describing this rule, often a HW-specific number
293  *
294  * TODO: maybe move this behind a debug config?
295  */
296 #define XE_RTP_NAME(s_)	.name = (s_)
297 
298 /**
299  * XE_RTP_ENTRY_FLAG - Helper to add multiple flags to a struct xe_rtp_entry
300  * @f1_: Last part of a ``XE_RTP_ENTRY_FLAG_*``
301  * @...: Additional flags, defined like @f1_
302  *
303  * Helper to automatically add a ``XE_RTP_ENTRY_FLAG_`` prefix to @f1_ so it can
304  * be easily used to define struct xe_rtp_action entries. Example:
305  *
306  * .. code-block:: c
307  *
308  *	const struct xe_rtp_entry wa_entries[] = {
309  *		...
310  *		{ XE_RTP_NAME("test-entry"),
311  *		  ...
312  *		  XE_RTP_ENTRY_FLAG(FOREACH_ENGINE),
313  *		  ...
314  *		},
315  *		...
316  *	};
317  */
318 #define XE_RTP_ENTRY_FLAG(f1_, ...)						\
319 	.flags = (CALL_FOR_EACH(__ADD_XE_RTP_ENTRY_FLAG_PREFIX, f1_, ##__VA_ARGS__) 0)
320 
321 /**
322  * XE_RTP_ACTION_FLAG - Helper to add multiple flags to a struct xe_rtp_action
323  * @f1_: Last part of a ``XE_RTP_ENTRY_*``
324  * @...: Additional flags, defined like @f1_
325  *
326  * Helper to automatically add a ``XE_RTP_ACTION_FLAG_`` prefix to @f1_ so it
327  * can be easily used to define struct xe_rtp_action entries. Example:
328  *
329  * .. code-block:: c
330  *
331  *	const struct xe_rtp_entry wa_entries[] = {
332  *		...
333  *		{ XE_RTP_NAME("test-entry"),
334  *		  ...
335  *		  XE_RTP_ACTION_SET(..., XE_RTP_ACTION_FLAG(FOREACH_ENGINE)),
336  *		  ...
337  *		},
338  *		...
339  *	};
340  */
341 #define XE_RTP_ACTION_FLAG(f1_, ...)						\
342 	.flags = (CALL_FOR_EACH(__ADD_XE_RTP_ACTION_FLAG_PREFIX, f1_, ##__VA_ARGS__) 0)
343 
344 /**
345  * XE_RTP_RULES - Helper to set multiple rules to a struct xe_rtp_entry entry
346  * @r1: Last part of XE_RTP_MATCH_*
347  * @...: Additional rules, defined like @r1
348  *
349  * At least one rule is needed and up to 4 are supported. Multiple rules are
350  * AND'ed together, i.e. all the rules must evaluate to true for the entry to
351  * be processed. See XE_RTP_MATCH_* for the possible match rules. Example:
352  *
353  * .. code-block:: c
354  *
355  *	const struct xe_rtp_entry wa_entries[] = {
356  *		...
357  *		{ XE_RTP_NAME("test-entry"),
358  *		  XE_RTP_RULES(SUBPLATFORM(DG2, G10), STEP(A0, B0)),
359  *		  ...
360  *		},
361  *		...
362  *	};
363  */
364 #define XE_RTP_RULES(r1, ...)							\
365 	.n_rules = COUNT_ARGS(r1, ##__VA_ARGS__),				\
366 	.rules = (struct xe_rtp_rule[]) {					\
367 		CALL_FOR_EACH(__ADD_XE_RTP_RULE_PREFIX, r1, ##__VA_ARGS__)	\
368 	}
369 
370 /**
371  * XE_RTP_ACTIONS - Helper to set multiple actions to a struct xe_rtp_entry
372  * @a1: Action to take. Last part of XE_RTP_ACTION_*
373  * @...: Additional rules, defined like @r1
374  *
375  * At least one rule is needed and up to 4 are supported. Multiple rules are
376  * AND'ed together, i.e. all the rules must evaluate to true for the entry to
377  * be processed. See XE_RTP_MATCH_* for the possible match rules. Example:
378  *
379  * .. code-block:: c
380  *
381  *	const struct xe_rtp_entry wa_entries[] = {
382  *		...
383  *		{ XE_RTP_NAME("test-entry"),
384  *		  XE_RTP_RULES(...),
385  *		  XE_RTP_ACTIONS(SET(..), SET(...), CLR(...)),
386  *		  ...
387  *		},
388  *		...
389  *	};
390  */
391 #define XE_RTP_ACTIONS(a1, ...)							\
392 	.n_actions = COUNT_ARGS(a1, ##__VA_ARGS__),				\
393 	.actions = (struct xe_rtp_action[]) {					\
394 		CALL_FOR_EACH(__ADD_XE_RTP_ACTION_PREFIX, a1, ##__VA_ARGS__)	\
395 	}
396 
397 void xe_rtp_process(const struct xe_rtp_entry *entries, struct xe_reg_sr *sr,
398 		    struct xe_gt *gt, struct xe_hw_engine *hwe);
399 
400 /* Match functions to be used with XE_RTP_MATCH_FUNC */
401 
402 /**
403  * xe_rtp_match_even_instance - Match if engine instance is even
404  * @gt: GT structure
405  * @hwe: Engine instance
406  *
407  * Returns: true if engine instance is even, false otherwise
408  */
409 bool xe_rtp_match_even_instance(const struct xe_gt *gt,
410 				const struct xe_hw_engine *hwe);
411 
412 /*
413  * xe_rtp_match_first_render_or_compute - Match if it's first render or compute
414  * engine in the GT
415  *
416  * @gt: GT structure
417  * @hwe: Engine instance
418  *
419  * Registers on the render reset domain need to have their values re-applied
420  * when any of those engines are reset. Since the engines reset together, a
421  * programming can be set to just one of them. For simplicity the first engine
422  * of either render or compute class can be chosen.
423  *
424  * Returns: true if engine id is the first to match the render reset domain,
425  * false otherwise.
426  */
427 bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt,
428 					  const struct xe_hw_engine *hwe);
429 
430 #endif
431