1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_rtp.h" 7 8 #include <kunit/visibility.h> 9 10 #include <uapi/drm/xe_drm.h> 11 12 #include "xe_configfs.h" 13 #include "xe_gt.h" 14 #include "xe_gt_topology.h" 15 #include "xe_macros.h" 16 #include "xe_reg_sr.h" 17 #include "xe_sriov.h" 18 19 /** 20 * DOC: Register Table Processing 21 * 22 * Internal infrastructure to define how registers should be updated based on 23 * rules and actions. This can be used to define tables with multiple entries 24 * (one per register) that will be walked over at some point in time to apply 25 * the values to the registers that have matching rules. 26 */ 27 28 static bool has_samedia(const struct xe_device *xe) 29 { 30 return xe->info.media_verx100 >= 1300; 31 } 32 33 static bool rule_matches(const struct xe_device *xe, 34 struct xe_gt *gt, 35 struct xe_hw_engine *hwe, 36 const struct xe_rtp_rule *rules, 37 unsigned int n_rules) 38 { 39 const struct xe_rtp_rule *r; 40 unsigned int i, rcount = 0; 41 bool match; 42 43 for (r = rules, i = 0; i < n_rules; r = &rules[++i]) { 44 switch (r->match_type) { 45 case XE_RTP_MATCH_OR: 46 /* 47 * This is only reached if a complete set of 48 * rules passed or none were evaluated. For both cases, 49 * shortcut the other rules and return the proper value. 50 */ 51 goto done; 52 case XE_RTP_MATCH_PLATFORM: 53 match = xe->info.platform == r->platform; 54 break; 55 case XE_RTP_MATCH_SUBPLATFORM: 56 match = xe->info.platform == r->platform && 57 xe->info.subplatform == r->subplatform; 58 break; 59 case XE_RTP_MATCH_GRAPHICS_VERSION: 60 if (drm_WARN_ON(&xe->drm, !gt)) 61 return false; 62 63 match = xe->info.graphics_verx100 == r->ver_start && 64 (!has_samedia(xe) || !xe_gt_is_media_type(gt)); 65 break; 66 case XE_RTP_MATCH_GRAPHICS_VERSION_RANGE: 67 if (drm_WARN_ON(&xe->drm, !gt)) 68 return false; 69 70 match = xe->info.graphics_verx100 >= r->ver_start && 71 xe->info.graphics_verx100 <= r->ver_end && 72 (!has_samedia(xe) || !xe_gt_is_media_type(gt)); 73 break; 74 case XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT: 75 if (drm_WARN_ON(&xe->drm, !gt)) 76 return false; 77 78 match = xe->info.graphics_verx100 == r->ver_start; 79 break; 80 case XE_RTP_MATCH_GRAPHICS_STEP: 81 if (drm_WARN_ON(&xe->drm, !gt)) 82 return false; 83 84 match = xe->info.step.graphics >= r->step_start && 85 xe->info.step.graphics < r->step_end && 86 (!has_samedia(xe) || !xe_gt_is_media_type(gt)); 87 break; 88 case XE_RTP_MATCH_MEDIA_VERSION: 89 if (drm_WARN_ON(&xe->drm, !gt)) 90 return false; 91 92 match = xe->info.media_verx100 == r->ver_start && 93 (!has_samedia(xe) || xe_gt_is_media_type(gt)); 94 break; 95 case XE_RTP_MATCH_MEDIA_VERSION_RANGE: 96 if (drm_WARN_ON(&xe->drm, !gt)) 97 return false; 98 99 match = xe->info.media_verx100 >= r->ver_start && 100 xe->info.media_verx100 <= r->ver_end && 101 (!has_samedia(xe) || xe_gt_is_media_type(gt)); 102 break; 103 case XE_RTP_MATCH_MEDIA_STEP: 104 if (drm_WARN_ON(&xe->drm, !gt)) 105 return false; 106 107 match = xe->info.step.media >= r->step_start && 108 xe->info.step.media < r->step_end && 109 (!has_samedia(xe) || xe_gt_is_media_type(gt)); 110 break; 111 case XE_RTP_MATCH_MEDIA_VERSION_ANY_GT: 112 if (drm_WARN_ON(&xe->drm, !gt)) 113 return false; 114 115 match = xe->info.media_verx100 == r->ver_start; 116 break; 117 case XE_RTP_MATCH_INTEGRATED: 118 match = !xe->info.is_dgfx; 119 break; 120 case XE_RTP_MATCH_DISCRETE: 121 match = xe->info.is_dgfx; 122 break; 123 case XE_RTP_MATCH_ENGINE_CLASS: 124 if (drm_WARN_ON(&xe->drm, !hwe)) 125 return false; 126 127 match = hwe->class == r->engine_class; 128 break; 129 case XE_RTP_MATCH_NOT_ENGINE_CLASS: 130 if (drm_WARN_ON(&xe->drm, !hwe)) 131 return false; 132 133 match = hwe->class != r->engine_class; 134 break; 135 case XE_RTP_MATCH_FUNC: 136 match = r->match_func(xe, gt, hwe); 137 break; 138 default: 139 drm_warn(&xe->drm, "Invalid RTP match %u\n", 140 r->match_type); 141 match = false; 142 } 143 144 if (!match) { 145 /* 146 * Advance rules until we find XE_RTP_MATCH_OR to check 147 * if there's another set of conditions to check 148 */ 149 while (++i < n_rules && rules[i].match_type != XE_RTP_MATCH_OR) 150 ; 151 152 if (i >= n_rules) 153 return false; 154 155 rcount = 0; 156 } else { 157 rcount++; 158 } 159 } 160 161 done: 162 if (drm_WARN_ON(&xe->drm, !rcount)) 163 return false; 164 165 return true; 166 } 167 168 static void rtp_add_sr_entry(const struct xe_rtp_action *action, 169 struct xe_gt *gt, 170 u32 mmio_base, 171 struct xe_reg_sr *sr) 172 { 173 struct xe_reg_sr_entry sr_entry = { 174 .reg = action->reg, 175 .clr_bits = action->clr_bits, 176 .set_bits = action->set_bits, 177 .read_mask = action->read_mask, 178 }; 179 180 sr_entry.reg.addr += mmio_base; 181 xe_reg_sr_add(sr, &sr_entry, gt); 182 } 183 184 static bool rtp_process_one_sr(const struct xe_rtp_entry_sr *entry, 185 struct xe_device *xe, struct xe_gt *gt, 186 struct xe_hw_engine *hwe, struct xe_reg_sr *sr) 187 { 188 const struct xe_rtp_action *action; 189 u32 mmio_base; 190 unsigned int i; 191 192 if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules)) 193 return false; 194 195 for (i = 0, action = &entry->actions[0]; i < entry->n_actions; action++, i++) { 196 if ((entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) || 197 (action->flags & XE_RTP_ACTION_FLAG_ENGINE_BASE)) 198 mmio_base = hwe->mmio_base; 199 else 200 mmio_base = 0; 201 202 rtp_add_sr_entry(action, gt, mmio_base, sr); 203 } 204 205 return true; 206 } 207 208 static void rtp_get_context(struct xe_rtp_process_ctx *ctx, 209 struct xe_hw_engine **hwe, 210 struct xe_gt **gt, 211 struct xe_device **xe) 212 { 213 switch (ctx->type) { 214 case XE_RTP_PROCESS_TYPE_DEVICE: 215 *hwe = NULL; 216 *gt = NULL; 217 *xe = ctx->xe; 218 break; 219 case XE_RTP_PROCESS_TYPE_GT: 220 *hwe = NULL; 221 *gt = ctx->gt; 222 *xe = gt_to_xe(*gt); 223 break; 224 case XE_RTP_PROCESS_TYPE_ENGINE: 225 *hwe = ctx->hwe; 226 *gt = (*hwe)->gt; 227 *xe = gt_to_xe(*gt); 228 break; 229 } 230 } 231 232 /** 233 * xe_rtp_process_ctx_enable_active_tracking - Enable tracking of active entries 234 * 235 * Set additional metadata to track what entries are considered "active", i.e. 236 * their rules match the condition. Bits are never cleared: entries with 237 * matching rules set the corresponding bit in the bitmap. 238 * 239 * @ctx: The context for processing the table 240 * @active_entries: bitmap to store the active entries 241 * @n_entries: number of entries to be processed 242 */ 243 void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx, 244 unsigned long *active_entries, 245 size_t n_entries) 246 { 247 ctx->active_entries = active_entries; 248 ctx->n_entries = n_entries; 249 } 250 EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking); 251 252 static void rtp_mark_active(struct xe_device *xe, 253 struct xe_rtp_process_ctx *ctx, 254 unsigned int idx) 255 { 256 if (!ctx->active_entries) 257 return; 258 259 if (drm_WARN_ON(&xe->drm, idx >= ctx->n_entries)) 260 return; 261 262 bitmap_set(ctx->active_entries, idx, 1); 263 } 264 265 /** 266 * xe_rtp_process_to_sr - Process all rtp @entries, adding the matching ones to 267 * the save-restore argument. 268 * @ctx: The context for processing the table, with one of device, gt or hwe 269 * @entries: Table with RTP definitions 270 * @n_entries: Number of entries to process, usually ARRAY_SIZE(entries) 271 * @sr: Save-restore struct where matching rules execute the action. This can be 272 * viewed as the "coalesced view" of multiple the tables. The bits for each 273 * register set are expected not to collide with previously added entries 274 * 275 * Walk the table pointed by @entries (with an empty sentinel) and add all 276 * entries with matching rules to @sr. If @hwe is not NULL, its mmio_base is 277 * used to calculate the right register offset 278 */ 279 void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, 280 const struct xe_rtp_entry_sr *entries, 281 size_t n_entries, 282 struct xe_reg_sr *sr) 283 { 284 const struct xe_rtp_entry_sr *entry; 285 struct xe_hw_engine *hwe = NULL; 286 struct xe_gt *gt = NULL; 287 struct xe_device *xe = NULL; 288 289 rtp_get_context(ctx, &hwe, >, &xe); 290 291 xe_assert(xe, entries); 292 293 for (entry = entries; entry - entries < n_entries; entry++) { 294 bool match = false; 295 296 if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) { 297 struct xe_hw_engine *each_hwe; 298 enum xe_hw_engine_id id; 299 300 for_each_hw_engine(each_hwe, gt, id) 301 match |= rtp_process_one_sr(entry, xe, gt, 302 each_hwe, sr); 303 } else { 304 match = rtp_process_one_sr(entry, xe, gt, hwe, sr); 305 } 306 307 if (match) 308 rtp_mark_active(xe, ctx, entry - entries); 309 } 310 } 311 EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr); 312 313 /** 314 * xe_rtp_process - Process all rtp @entries, without running any action 315 * @ctx: The context for processing the table, with one of device, gt or hwe 316 * @entries: Table with RTP definitions 317 * 318 * Walk the table pointed by @entries (with an empty sentinel), executing the 319 * rules. One difference from xe_rtp_process_to_sr(): there is no action 320 * associated with each entry since this uses struct xe_rtp_entry. Its main use 321 * is for marking active workarounds via 322 * xe_rtp_process_ctx_enable_active_tracking(). 323 */ 324 void xe_rtp_process(struct xe_rtp_process_ctx *ctx, 325 const struct xe_rtp_entry *entries) 326 { 327 const struct xe_rtp_entry *entry; 328 struct xe_hw_engine *hwe; 329 struct xe_gt *gt; 330 struct xe_device *xe; 331 332 rtp_get_context(ctx, &hwe, >, &xe); 333 334 for (entry = entries; entry && entry->rules; entry++) { 335 if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules)) 336 continue; 337 338 rtp_mark_active(xe, ctx, entry - entries); 339 } 340 } 341 EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process); 342 343 bool xe_rtp_match_even_instance(const struct xe_device *xe, 344 const struct xe_gt *gt, 345 const struct xe_hw_engine *hwe) 346 { 347 return hwe->instance % 2 == 0; 348 } 349 350 bool xe_rtp_match_first_render_or_compute(const struct xe_device *xe, 351 const struct xe_gt *gt, 352 const struct xe_hw_engine *hwe) 353 { 354 u64 render_compute_mask = gt->info.engine_mask & 355 (XE_HW_ENGINE_CCS_MASK | XE_HW_ENGINE_RCS_MASK); 356 357 return render_compute_mask && 358 hwe->engine_id == __ffs(render_compute_mask); 359 } 360 361 bool xe_rtp_match_not_sriov_vf(const struct xe_device *xe, 362 const struct xe_gt *gt, 363 const struct xe_hw_engine *hwe) 364 { 365 return !IS_SRIOV_VF(xe); 366 } 367 368 bool xe_rtp_match_psmi_enabled(const struct xe_device *xe, 369 const struct xe_gt *gt, 370 const struct xe_hw_engine *hwe) 371 { 372 return xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev)); 373 } 374 375 bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_device *xe, 376 const struct xe_gt *gt, 377 const struct xe_hw_engine *hwe) 378 { 379 return xe_gt_has_discontiguous_dss_groups(gt); 380 } 381 382 bool xe_rtp_match_has_flat_ccs(const struct xe_device *xe, 383 const struct xe_gt *gt, 384 const struct xe_hw_engine *hwe) 385 { 386 return xe->info.has_flat_ccs; 387 } 388