1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #ifndef _XE_RTP_ 7 #define _XE_RTP_ 8 9 #include <linux/types.h> 10 #include <linux/xarray.h> 11 12 #define _XE_RTP_INCLUDE_PRIVATE_HELPERS 13 14 #include "xe_rtp_helpers.h" 15 #include "xe_rtp_types.h" 16 17 #undef _XE_RTP_INCLUDE_PRIVATE_HELPERS 18 19 /* 20 * Register table poke infrastructure 21 */ 22 23 struct xe_hw_engine; 24 struct xe_gt; 25 struct xe_reg_sr; 26 27 /* 28 * Macros to encode rules to match against platform, IP version, stepping, etc. 29 * Shouldn't be used directly - see XE_RTP_RULES() 30 */ 31 #define _XE_RTP_RULE_PLATFORM(plat__) \ 32 { .match_type = XE_RTP_MATCH_PLATFORM, .platform = plat__ } 33 34 #define _XE_RTP_RULE_SUBPLATFORM(plat__, sub__) \ 35 { .match_type = XE_RTP_MATCH_SUBPLATFORM, \ 36 .platform = plat__, .subplatform = sub__ } 37 38 #define _XE_RTP_RULE_GRAPHICS_STEP(start__, end__) \ 39 { .match_type = XE_RTP_MATCH_GRAPHICS_STEP, \ 40 .step_start = start__, .step_end = end__ } 41 42 #define _XE_RTP_RULE_MEDIA_STEP(start__, end__) \ 43 { .match_type = XE_RTP_MATCH_MEDIA_STEP, \ 44 .step_start = start__, .step_end = end__ } 45 46 #define _XE_RTP_RULE_ENGINE_CLASS(cls__) \ 47 { .match_type = XE_RTP_MATCH_ENGINE_CLASS, \ 48 .engine_class = (cls__) } 49 50 /** 51 * XE_RTP_RULE_PLATFORM - Create rule matching platform 52 * @plat_: platform to match 53 * 54 * Refer to XE_RTP_RULES() for expected usage. 55 */ 56 #define XE_RTP_RULE_PLATFORM(plat_) \ 57 _XE_RTP_RULE_PLATFORM(XE_##plat_) 58 59 /** 60 * XE_RTP_RULE_SUBPLATFORM - Create rule matching platform and sub-platform 61 * @plat_: platform to match 62 * @sub_: sub-platform to match 63 * 64 * Refer to XE_RTP_RULES() for expected usage. 65 */ 66 #define XE_RTP_RULE_SUBPLATFORM(plat_, sub_) \ 67 _XE_RTP_RULE_SUBPLATFORM(XE_##plat_, XE_SUBPLATFORM_##plat_##_##sub_) 68 69 /** 70 * XE_RTP_RULE_GRAPHICS_STEP - Create rule matching graphics stepping 71 * @start_: First stepping matching the rule 72 * @end_: First stepping that does not match the rule 73 * 74 * Note that the range matching this rule is [ @start_, @end_ ), i.e. inclusive 75 * on the left, exclusive on the right. 76 * 77 * Refer to XE_RTP_RULES() for expected usage. 78 */ 79 #define XE_RTP_RULE_GRAPHICS_STEP(start_, end_) \ 80 _XE_RTP_RULE_GRAPHICS_STEP(STEP_##start_, STEP_##end_) 81 82 /** 83 * XE_RTP_RULE_MEDIA_STEP - Create rule matching media stepping 84 * @start_: First stepping matching the rule 85 * @end_: First stepping that does not match the rule 86 * 87 * Note that the range matching this rule is [ @start_, @end_ ), i.e. inclusive 88 * on the left, exclusive on the right. 89 * 90 * Refer to XE_RTP_RULES() for expected usage. 91 */ 92 #define XE_RTP_RULE_MEDIA_STEP(start_, end_) \ 93 _XE_RTP_RULE_MEDIA_STEP(STEP_##start_, STEP_##end_) 94 95 /** 96 * XE_RTP_RULE_ENGINE_CLASS - Create rule matching an engine class 97 * @cls_: Engine class to match 98 * 99 * Refer to XE_RTP_RULES() for expected usage. 100 */ 101 #define XE_RTP_RULE_ENGINE_CLASS(cls_) \ 102 _XE_RTP_RULE_ENGINE_CLASS(XE_ENGINE_CLASS_##cls_) 103 104 /** 105 * XE_RTP_RULE_FUNC - Create rule using callback function for match 106 * @func__: Function to call to decide if rule matches 107 * 108 * This allows more complex checks to be performed. The ``XE_RTP`` 109 * infrastructure will simply call the function @func_ passed to decide if this 110 * rule matches the device. 111 * 112 * Refer to XE_RTP_RULES() for expected usage. 113 */ 114 #define XE_RTP_RULE_FUNC(func__) \ 115 { .match_type = XE_RTP_MATCH_FUNC, \ 116 .match_func = (func__) } 117 118 /** 119 * XE_RTP_RULE_GRAPHICS_VERSION - Create rule matching graphics version 120 * @ver__: Graphics IP version to match 121 * 122 * Refer to XE_RTP_RULES() for expected usage. 123 */ 124 #define XE_RTP_RULE_GRAPHICS_VERSION(ver__) \ 125 { .match_type = XE_RTP_MATCH_GRAPHICS_VERSION, \ 126 .ver_start = ver__, } 127 128 /** 129 * XE_RTP_RULE_GRAPHICS_VERSION_RANGE - Create rule matching a range of graphics version 130 * @ver_start__: First graphics IP version to match 131 * @ver_end__: Last graphics IP version to match 132 * 133 * Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e. 134 * inclusive on boths sides 135 * 136 * Refer to XE_RTP_RULES() for expected usage. 137 */ 138 #define XE_RTP_RULE_GRAPHICS_VERSION_RANGE(ver_start__, ver_end__) \ 139 { .match_type = XE_RTP_MATCH_GRAPHICS_VERSION_RANGE, \ 140 .ver_start = ver_start__, .ver_end = ver_end__, } 141 142 /** 143 * XE_RTP_RULE_GRAPHICS_VERSION_ANY_GT - Create rule matching graphics version on any GT 144 * @ver__: Graphics IP version to match 145 * 146 * Like XE_RTP_RULE_GRAPHICS_VERSION, but it matches even if the current GT 147 * being checked is not of the graphics type. It allows to add RTP entries to 148 * another GT when the device contains a Graphics IP with that version. 149 * 150 * Refer to XE_RTP_RULES() for expected usage. 151 */ 152 #define XE_RTP_RULE_GRAPHICS_VERSION_ANY_GT(ver__) \ 153 { .match_type = XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT, \ 154 .ver_start = ver__, } 155 156 /** 157 * XE_RTP_RULE_MEDIA_VERSION - Create rule matching media version 158 * @ver__: Media IP version to match 159 * 160 * Refer to XE_RTP_RULES() for expected usage. 161 */ 162 #define XE_RTP_RULE_MEDIA_VERSION(ver__) \ 163 { .match_type = XE_RTP_MATCH_MEDIA_VERSION, \ 164 .ver_start = ver__, } 165 166 /** 167 * XE_RTP_RULE_MEDIA_VERSION_RANGE - Create rule matching a range of media version 168 * @ver_start__: First media IP version to match 169 * @ver_end__: Last media IP version to match 170 * 171 * Note that the range matching this rule is [ @ver_start__, @ver_end__ ], i.e. 172 * inclusive on boths sides 173 * 174 * Refer to XE_RTP_RULES() for expected usage. 175 */ 176 #define XE_RTP_RULE_MEDIA_VERSION_RANGE(ver_start__, ver_end__) \ 177 { .match_type = XE_RTP_MATCH_MEDIA_VERSION_RANGE, \ 178 .ver_start = ver_start__, .ver_end = ver_end__, } 179 180 /** 181 * XE_RTP_RULE_MEDIA_VERSION_ANY_GT - Create rule matching media version on any GT 182 * @ver__: Media IP version to match 183 * 184 * Like XE_RTP_RULE_MEDIA_VERSION, but it matches even if the current GT being 185 * checked is not of the media type. It allows to add RTP entries to another 186 * GT when the device contains a Media IP with that version. 187 * 188 * Refer to XE_RTP_RULES() for expected usage. 189 */ 190 #define XE_RTP_RULE_MEDIA_VERSION_ANY_GT(ver__) \ 191 { .match_type = XE_RTP_MATCH_MEDIA_VERSION_ANY_GT, \ 192 .ver_start = ver__, } 193 194 /** 195 * XE_RTP_RULE_IS_INTEGRATED - Create a rule matching integrated graphics devices 196 * 197 * Refer to XE_RTP_RULES() for expected usage. 198 */ 199 #define XE_RTP_RULE_IS_INTEGRATED \ 200 { .match_type = XE_RTP_MATCH_INTEGRATED } 201 202 /** 203 * XE_RTP_RULE_IS_DISCRETE - Create a rule matching discrete graphics devices 204 * 205 * Refer to XE_RTP_RULES() for expected usage. 206 */ 207 #define XE_RTP_RULE_IS_DISCRETE \ 208 { .match_type = XE_RTP_MATCH_DISCRETE } 209 210 /** 211 * XE_RTP_RULE_OR - Create an OR condition for rtp rules 212 * 213 * RTP rules are AND'ed when evaluated and all of them need to match. 214 * XE_RTP_RULE_OR allows to create set of rules where any of them matching is 215 * sufficient for the action to trigger. Example: 216 * 217 * .. code-block:: c 218 * 219 * const struct xe_rtp_entry_sr entries[] = { 220 * ... 221 * { XE_RTP_NAME("test-entry"), 222 * XE_RTP_RULES(PLATFORM(DG2), OR, PLATFORM(TIGERLAKE)), 223 * ... 224 * }, 225 * ... 226 * }; 227 */ 228 #define XE_RTP_RULE_OR \ 229 { .match_type = XE_RTP_MATCH_OR } 230 231 /** 232 * XE_RTP_ACTION_WR - Helper to write a value to the register, overriding all 233 * the bits 234 * @reg_: Register 235 * @val_: Value to set 236 * @...: Additional fields to override in the struct xe_rtp_action entry 237 * 238 * The correspondent notation in bspec is: 239 * 240 * REGNAME = VALUE 241 */ 242 #define XE_RTP_ACTION_WR(reg_, val_, ...) \ 243 { .reg = XE_RTP_DROP_CAST(reg_), \ 244 .clr_bits = ~0u, .set_bits = (val_), \ 245 .read_mask = (~0u), ##__VA_ARGS__ } 246 247 /** 248 * XE_RTP_ACTION_SET - Set bits from @val_ in the register. 249 * @reg_: Register 250 * @val_: Bits to set in the register 251 * @...: Additional fields to override in the struct xe_rtp_action entry 252 * 253 * For masked registers this translates to a single write, while for other 254 * registers it's a RMW. The correspondent bspec notation is (example for bits 2 255 * and 5, but could be any): 256 * 257 * REGNAME[2] = 1 258 * REGNAME[5] = 1 259 */ 260 #define XE_RTP_ACTION_SET(reg_, val_, ...) \ 261 { .reg = XE_RTP_DROP_CAST(reg_), \ 262 .clr_bits = val_, .set_bits = val_, \ 263 .read_mask = val_, ##__VA_ARGS__ } 264 265 /** 266 * XE_RTP_ACTION_CLR: Clear bits from @val_ in the register. 267 * @reg_: Register 268 * @val_: Bits to clear in the register 269 * @...: Additional fields to override in the struct xe_rtp_action entry 270 * 271 * For masked registers this translates to a single write, while for other 272 * registers it's a RMW. The correspondent bspec notation is (example for bits 2 273 * and 5, but could be any): 274 * 275 * REGNAME[2] = 0 276 * REGNAME[5] = 0 277 */ 278 #define XE_RTP_ACTION_CLR(reg_, val_, ...) \ 279 { .reg = XE_RTP_DROP_CAST(reg_), \ 280 .clr_bits = val_, .set_bits = 0, \ 281 .read_mask = val_, ##__VA_ARGS__ } 282 283 /** 284 * XE_RTP_ACTION_FIELD_SET: Set a bit range 285 * @reg_: Register 286 * @mask_bits_: Mask of bits to be changed in the register, forming a field 287 * @val_: Value to set in the field denoted by @mask_bits_ 288 * @...: Additional fields to override in the struct xe_rtp_action entry 289 * 290 * For masked registers this translates to a single write, while for other 291 * registers it's a RMW. The correspondent bspec notation is: 292 * 293 * REGNAME[<end>:<start>] = VALUE 294 */ 295 #define XE_RTP_ACTION_FIELD_SET(reg_, mask_bits_, val_, ...) \ 296 { .reg = XE_RTP_DROP_CAST(reg_), \ 297 .clr_bits = mask_bits_, .set_bits = val_, \ 298 .read_mask = mask_bits_, ##__VA_ARGS__ } 299 300 #define XE_RTP_ACTION_FIELD_SET_NO_READ_MASK(reg_, mask_bits_, val_, ...) \ 301 { .reg = XE_RTP_DROP_CAST(reg_), \ 302 .clr_bits = (mask_bits_), .set_bits = (val_), \ 303 .read_mask = 0, ##__VA_ARGS__ } 304 305 /** 306 * XE_RTP_ACTION_WHITELIST - Add register to userspace whitelist 307 * @reg_: Register 308 * @val_: Whitelist-specific flags to set 309 * @...: Additional fields to override in the struct xe_rtp_action entry 310 * 311 * Add a register to the whitelist, allowing userspace to modify the ster with 312 * regular user privileges. 313 */ 314 #define XE_RTP_ACTION_WHITELIST(reg_, val_, ...) \ 315 /* TODO fail build if ((flags) & ~(RING_FORCE_TO_NONPRIV_MASK_VALID)) */\ 316 { .reg = XE_RTP_DROP_CAST(reg_), \ 317 .set_bits = val_, \ 318 .clr_bits = RING_FORCE_TO_NONPRIV_MASK_VALID, \ 319 ##__VA_ARGS__ } 320 321 /** 322 * XE_RTP_NAME - Helper to set the name in xe_rtp_entry 323 * @s_: Name describing this rule, often a HW-specific number 324 * 325 * TODO: maybe move this behind a debug config? 326 */ 327 #define XE_RTP_NAME(s_) .name = (s_) 328 329 /** 330 * XE_RTP_ENTRY_FLAG - Helper to add multiple flags to a struct xe_rtp_entry_sr 331 * @...: Entry flags, without the ``XE_RTP_ENTRY_FLAG_`` prefix 332 * 333 * Helper to automatically add a ``XE_RTP_ENTRY_FLAG_`` prefix to the flags 334 * when defining struct xe_rtp_entry entries. Example: 335 * 336 * .. code-block:: c 337 * 338 * const struct xe_rtp_entry_sr wa_entries[] = { 339 * ... 340 * { XE_RTP_NAME("test-entry"), 341 * ... 342 * XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), 343 * ... 344 * }, 345 * ... 346 * }; 347 */ 348 #define XE_RTP_ENTRY_FLAG(...) \ 349 .flags = (XE_RTP_PASTE_FOREACH(ENTRY_FLAG_, BITWISE_OR, (__VA_ARGS__))) 350 351 /** 352 * XE_RTP_ACTION_FLAG - Helper to add multiple flags to a struct xe_rtp_action 353 * @...: Action flags, without the ``XE_RTP_ACTION_FLAG_`` prefix 354 * 355 * Helper to automatically add a ``XE_RTP_ACTION_FLAG_`` prefix to the flags 356 * when defining struct xe_rtp_action entries. Example: 357 * 358 * .. code-block:: c 359 * 360 * const struct xe_rtp_entry_sr wa_entries[] = { 361 * ... 362 * { XE_RTP_NAME("test-entry"), 363 * ... 364 * XE_RTP_ACTION_SET(..., XE_RTP_ACTION_FLAG(FOREACH_ENGINE)), 365 * ... 366 * }, 367 * ... 368 * }; 369 */ 370 #define XE_RTP_ACTION_FLAG(...) \ 371 .flags = (XE_RTP_PASTE_FOREACH(ACTION_FLAG_, BITWISE_OR, (__VA_ARGS__))) 372 373 /** 374 * XE_RTP_RULES - Helper to set multiple rules to a struct xe_rtp_entry_sr entry 375 * @...: Rules 376 * 377 * At least one rule is needed and up to 6 are supported. Multiple rules are 378 * AND'ed together, i.e. all the rules must evaluate to true for the entry to 379 * be processed. See XE_RTP_MATCH_* for the possible match rules. Example: 380 * 381 * .. code-block:: c 382 * 383 * const struct xe_rtp_entry_sr wa_entries[] = { 384 * ... 385 * { XE_RTP_NAME("test-entry"), 386 * XE_RTP_RULES(SUBPLATFORM(DG2, G10), GRAPHICS_STEP(A0, B0)), 387 * ... 388 * }, 389 * ... 390 * }; 391 */ 392 #define XE_RTP_RULES(...) \ 393 .n_rules = COUNT_ARGS(__VA_ARGS__), \ 394 .rules = (const struct xe_rtp_rule[]) { \ 395 XE_RTP_PASTE_FOREACH(RULE_, COMMA, (__VA_ARGS__)) \ 396 } 397 398 /** 399 * XE_RTP_ACTIONS - Helper to set multiple actions to a struct xe_rtp_entry_sr 400 * @...: Actions to be taken 401 * 402 * At least one action is needed and up to 6 are supported. See XE_RTP_ACTION_* 403 * for the possible actions. Example: 404 * 405 * .. code-block:: c 406 * 407 * const struct xe_rtp_entry_sr wa_entries[] = { 408 * ... 409 * { XE_RTP_NAME("test-entry"), 410 * XE_RTP_RULES(...), 411 * XE_RTP_ACTIONS(SET(..), SET(...), CLR(...)), 412 * ... 413 * }, 414 * ... 415 * }; 416 */ 417 #define XE_RTP_ACTIONS(...) \ 418 .n_actions = COUNT_ARGS(__VA_ARGS__), \ 419 .actions = (const struct xe_rtp_action[]) { \ 420 XE_RTP_PASTE_FOREACH(ACTION_, COMMA, (__VA_ARGS__)) \ 421 } 422 423 #define XE_RTP_PROCESS_CTX_INITIALIZER(arg__) _Generic((arg__), \ 424 struct xe_hw_engine * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_ENGINE }, \ 425 struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT }) 426 427 void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx, 428 unsigned long *active_entries, 429 size_t n_entries); 430 431 void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, 432 const struct xe_rtp_entry_sr *entries, 433 struct xe_reg_sr *sr); 434 435 void xe_rtp_process(struct xe_rtp_process_ctx *ctx, 436 const struct xe_rtp_entry *entries); 437 438 /* Match functions to be used with XE_RTP_MATCH_FUNC */ 439 440 /** 441 * xe_rtp_match_even_instance - Match if engine instance is even 442 * @gt: GT structure 443 * @hwe: Engine instance 444 * 445 * Returns: true if engine instance is even, false otherwise 446 */ 447 bool xe_rtp_match_even_instance(const struct xe_gt *gt, 448 const struct xe_hw_engine *hwe); 449 450 /* 451 * xe_rtp_match_first_render_or_compute - Match if it's first render or compute 452 * engine in the GT 453 * 454 * @gt: GT structure 455 * @hwe: Engine instance 456 * 457 * Registers on the render reset domain need to have their values re-applied 458 * when any of those engines are reset. Since the engines reset together, a 459 * programming can be set to just one of them. For simplicity the first engine 460 * of either render or compute class can be chosen. 461 * 462 * Returns: true if engine id is the first to match the render reset domain, 463 * false otherwise. 464 */ 465 bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt, 466 const struct xe_hw_engine *hwe); 467 468 /* 469 * xe_rtp_match_first_gslice_fused_off - Match when first gslice is fused off 470 * 471 * @gt: GT structure 472 * @hwe: Engine instance 473 * 474 * Returns: true if first gslice is fused off, false otherwise. 475 */ 476 bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt, 477 const struct xe_hw_engine *hwe); 478 479 #endif 480