1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_rtp.h" 7 8 #include <kunit/visibility.h> 9 10 #include <uapi/drm/xe_drm.h> 11 12 #include "xe_configfs.h" 13 #include "xe_gt.h" 14 #include "xe_gt_topology.h" 15 #include "xe_reg_sr.h" 16 #include "xe_sriov.h" 17 18 /** 19 * DOC: Register Table Processing 20 * 21 * Internal infrastructure to define how registers should be updated based on 22 * rules and actions. This can be used to define tables with multiple entries 23 * (one per register) that will be walked over at some point in time to apply 24 * the values to the registers that have matching rules. 25 */ 26 27 static bool has_samedia(const struct xe_device *xe) 28 { 29 return xe->info.media_verx100 >= 1300; 30 } 31 32 static bool rule_matches(const struct xe_device *xe, 33 struct xe_gt *gt, 34 struct xe_hw_engine *hwe, 35 const struct xe_rtp_rule *rules, 36 unsigned int n_rules) 37 { 38 const struct xe_rtp_rule *r; 39 unsigned int i, rcount = 0; 40 bool match; 41 42 for (r = rules, i = 0; i < n_rules; r = &rules[++i]) { 43 switch (r->match_type) { 44 case XE_RTP_MATCH_OR: 45 /* 46 * This is only reached if a complete set of 47 * rules passed or none were evaluated. For both cases, 48 * shortcut the other rules and return the proper value. 49 */ 50 goto done; 51 case XE_RTP_MATCH_PLATFORM: 52 match = xe->info.platform == r->platform; 53 break; 54 case XE_RTP_MATCH_SUBPLATFORM: 55 match = xe->info.platform == r->platform && 56 xe->info.subplatform == r->subplatform; 57 break; 58 case XE_RTP_MATCH_GRAPHICS_VERSION: 59 if (drm_WARN_ON(&xe->drm, !gt)) 60 return false; 61 62 match = xe->info.graphics_verx100 == r->ver_start && 63 (!has_samedia(xe) || !xe_gt_is_media_type(gt)); 64 break; 65 case XE_RTP_MATCH_GRAPHICS_VERSION_RANGE: 66 if (drm_WARN_ON(&xe->drm, !gt)) 67 return false; 68 69 match = xe->info.graphics_verx100 >= r->ver_start && 70 xe->info.graphics_verx100 <= r->ver_end && 71 (!has_samedia(xe) || !xe_gt_is_media_type(gt)); 72 break; 73 case XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT: 74 if (drm_WARN_ON(&xe->drm, !gt)) 75 return false; 76 77 match = xe->info.graphics_verx100 == r->ver_start; 78 break; 79 case XE_RTP_MATCH_GRAPHICS_STEP: 80 if (drm_WARN_ON(&xe->drm, !gt)) 81 return false; 82 83 match = xe->info.step.graphics >= r->step_start && 84 xe->info.step.graphics < r->step_end && 85 (!has_samedia(xe) || !xe_gt_is_media_type(gt)); 86 break; 87 case XE_RTP_MATCH_MEDIA_VERSION: 88 if (drm_WARN_ON(&xe->drm, !gt)) 89 return false; 90 91 match = xe->info.media_verx100 == r->ver_start && 92 (!has_samedia(xe) || xe_gt_is_media_type(gt)); 93 break; 94 case XE_RTP_MATCH_MEDIA_VERSION_RANGE: 95 if (drm_WARN_ON(&xe->drm, !gt)) 96 return false; 97 98 match = xe->info.media_verx100 >= r->ver_start && 99 xe->info.media_verx100 <= r->ver_end && 100 (!has_samedia(xe) || xe_gt_is_media_type(gt)); 101 break; 102 case XE_RTP_MATCH_MEDIA_STEP: 103 if (drm_WARN_ON(&xe->drm, !gt)) 104 return false; 105 106 match = xe->info.step.media >= r->step_start && 107 xe->info.step.media < r->step_end && 108 (!has_samedia(xe) || xe_gt_is_media_type(gt)); 109 break; 110 case XE_RTP_MATCH_MEDIA_VERSION_ANY_GT: 111 if (drm_WARN_ON(&xe->drm, !gt)) 112 return false; 113 114 match = xe->info.media_verx100 == r->ver_start; 115 break; 116 case XE_RTP_MATCH_INTEGRATED: 117 match = !xe->info.is_dgfx; 118 break; 119 case XE_RTP_MATCH_DISCRETE: 120 match = xe->info.is_dgfx; 121 break; 122 case XE_RTP_MATCH_ENGINE_CLASS: 123 if (drm_WARN_ON(&xe->drm, !hwe)) 124 return false; 125 126 match = hwe->class == r->engine_class; 127 break; 128 case XE_RTP_MATCH_NOT_ENGINE_CLASS: 129 if (drm_WARN_ON(&xe->drm, !hwe)) 130 return false; 131 132 match = hwe->class != r->engine_class; 133 break; 134 case XE_RTP_MATCH_FUNC: 135 match = r->match_func(xe, gt, hwe); 136 break; 137 default: 138 drm_warn(&xe->drm, "Invalid RTP match %u\n", 139 r->match_type); 140 match = false; 141 } 142 143 if (!match) { 144 /* 145 * Advance rules until we find XE_RTP_MATCH_OR to check 146 * if there's another set of conditions to check 147 */ 148 while (++i < n_rules && rules[i].match_type != XE_RTP_MATCH_OR) 149 ; 150 151 if (i >= n_rules) 152 return false; 153 154 rcount = 0; 155 } else { 156 rcount++; 157 } 158 } 159 160 done: 161 if (drm_WARN_ON(&xe->drm, !rcount)) 162 return false; 163 164 return true; 165 } 166 167 static void rtp_add_sr_entry(const struct xe_rtp_action *action, 168 struct xe_gt *gt, 169 u32 mmio_base, 170 struct xe_reg_sr *sr) 171 { 172 struct xe_reg_sr_entry sr_entry = { 173 .reg = action->reg, 174 .clr_bits = action->clr_bits, 175 .set_bits = action->set_bits, 176 .read_mask = action->read_mask, 177 }; 178 179 sr_entry.reg.addr += mmio_base; 180 xe_reg_sr_add(sr, &sr_entry, gt); 181 } 182 183 static bool rtp_process_one_sr(const struct xe_rtp_entry_sr *entry, 184 struct xe_device *xe, struct xe_gt *gt, 185 struct xe_hw_engine *hwe, struct xe_reg_sr *sr) 186 { 187 const struct xe_rtp_action *action; 188 u32 mmio_base; 189 unsigned int i; 190 191 if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules)) 192 return false; 193 194 for (i = 0, action = &entry->actions[0]; i < entry->n_actions; action++, i++) { 195 if ((entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) || 196 (action->flags & XE_RTP_ACTION_FLAG_ENGINE_BASE)) 197 mmio_base = hwe->mmio_base; 198 else 199 mmio_base = 0; 200 201 rtp_add_sr_entry(action, gt, mmio_base, sr); 202 } 203 204 return true; 205 } 206 207 static void rtp_get_context(struct xe_rtp_process_ctx *ctx, 208 struct xe_hw_engine **hwe, 209 struct xe_gt **gt, 210 struct xe_device **xe) 211 { 212 switch (ctx->type) { 213 case XE_RTP_PROCESS_TYPE_DEVICE: 214 *hwe = NULL; 215 *gt = NULL; 216 *xe = ctx->xe; 217 break; 218 case XE_RTP_PROCESS_TYPE_GT: 219 *hwe = NULL; 220 *gt = ctx->gt; 221 *xe = gt_to_xe(*gt); 222 break; 223 case XE_RTP_PROCESS_TYPE_ENGINE: 224 *hwe = ctx->hwe; 225 *gt = (*hwe)->gt; 226 *xe = gt_to_xe(*gt); 227 break; 228 } 229 } 230 231 /** 232 * xe_rtp_process_ctx_enable_active_tracking - Enable tracking of active entries 233 * 234 * Set additional metadata to track what entries are considered "active", i.e. 235 * their rules match the condition. Bits are never cleared: entries with 236 * matching rules set the corresponding bit in the bitmap. 237 * 238 * @ctx: The context for processing the table 239 * @active_entries: bitmap to store the active entries 240 * @n_entries: number of entries to be processed 241 */ 242 void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx, 243 unsigned long *active_entries, 244 size_t n_entries) 245 { 246 ctx->active_entries = active_entries; 247 ctx->n_entries = n_entries; 248 } 249 EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_ctx_enable_active_tracking); 250 251 static void rtp_mark_active(struct xe_device *xe, 252 struct xe_rtp_process_ctx *ctx, 253 unsigned int idx) 254 { 255 if (!ctx->active_entries) 256 return; 257 258 if (drm_WARN_ON(&xe->drm, idx >= ctx->n_entries)) 259 return; 260 261 bitmap_set(ctx->active_entries, idx, 1); 262 } 263 264 /** 265 * xe_rtp_process_to_sr - Process all rtp @entries, adding the matching ones to 266 * the save-restore argument. 267 * @ctx: The context for processing the table, with one of device, gt or hwe 268 * @entries: Table with RTP definitions 269 * @n_entries: Number of entries to process, usually ARRAY_SIZE(entries) 270 * @sr: Save-restore struct where matching rules execute the action. This can be 271 * viewed as the "coalesced view" of multiple the tables. The bits for each 272 * register set are expected not to collide with previously added entries 273 * @process_in_vf: Whether this RTP table should get processed for SR-IOV VF 274 * devices. Should generally only be 'true' for LRC tables. 275 * 276 * Walk the table pointed by @entries (with an empty sentinel) and add all 277 * entries with matching rules to @sr. If @hwe is not NULL, its mmio_base is 278 * used to calculate the right register offset 279 */ 280 void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, 281 const struct xe_rtp_entry_sr *entries, 282 size_t n_entries, 283 struct xe_reg_sr *sr, 284 bool process_in_vf) 285 { 286 const struct xe_rtp_entry_sr *entry; 287 struct xe_hw_engine *hwe = NULL; 288 struct xe_gt *gt = NULL; 289 struct xe_device *xe = NULL; 290 291 rtp_get_context(ctx, &hwe, >, &xe); 292 293 if (!process_in_vf && IS_SRIOV_VF(xe)) 294 return; 295 296 xe_assert(xe, entries); 297 298 for (entry = entries; entry - entries < n_entries; entry++) { 299 bool match = false; 300 301 if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) { 302 struct xe_hw_engine *each_hwe; 303 enum xe_hw_engine_id id; 304 305 for_each_hw_engine(each_hwe, gt, id) 306 match |= rtp_process_one_sr(entry, xe, gt, 307 each_hwe, sr); 308 } else { 309 match = rtp_process_one_sr(entry, xe, gt, hwe, sr); 310 } 311 312 if (match) 313 rtp_mark_active(xe, ctx, entry - entries); 314 } 315 } 316 EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process_to_sr); 317 318 /** 319 * xe_rtp_process - Process all rtp @entries, without running any action 320 * @ctx: The context for processing the table, with one of device, gt or hwe 321 * @entries: Table with RTP definitions 322 * 323 * Walk the table pointed by @entries (with an empty sentinel), executing the 324 * rules. One difference from xe_rtp_process_to_sr(): there is no action 325 * associated with each entry since this uses struct xe_rtp_entry. Its main use 326 * is for marking active workarounds via 327 * xe_rtp_process_ctx_enable_active_tracking(). 328 */ 329 void xe_rtp_process(struct xe_rtp_process_ctx *ctx, 330 const struct xe_rtp_entry *entries) 331 { 332 const struct xe_rtp_entry *entry; 333 struct xe_hw_engine *hwe; 334 struct xe_gt *gt; 335 struct xe_device *xe; 336 337 rtp_get_context(ctx, &hwe, >, &xe); 338 339 for (entry = entries; entry && entry->rules; entry++) { 340 if (!rule_matches(xe, gt, hwe, entry->rules, entry->n_rules)) 341 continue; 342 343 rtp_mark_active(xe, ctx, entry - entries); 344 } 345 } 346 EXPORT_SYMBOL_IF_KUNIT(xe_rtp_process); 347 348 bool xe_rtp_match_even_instance(const struct xe_device *xe, 349 const struct xe_gt *gt, 350 const struct xe_hw_engine *hwe) 351 { 352 return hwe->instance % 2 == 0; 353 } 354 355 bool xe_rtp_match_first_render_or_compute(const struct xe_device *xe, 356 const struct xe_gt *gt, 357 const struct xe_hw_engine *hwe) 358 { 359 u64 render_compute_mask = gt->info.engine_mask & 360 (XE_HW_ENGINE_CCS_MASK | XE_HW_ENGINE_RCS_MASK); 361 362 return render_compute_mask && 363 hwe->engine_id == __ffs(render_compute_mask); 364 } 365 366 bool xe_rtp_match_not_sriov_vf(const struct xe_device *xe, 367 const struct xe_gt *gt, 368 const struct xe_hw_engine *hwe) 369 { 370 return !IS_SRIOV_VF(xe); 371 } 372 373 bool xe_rtp_match_psmi_enabled(const struct xe_device *xe, 374 const struct xe_gt *gt, 375 const struct xe_hw_engine *hwe) 376 { 377 return xe_configfs_get_psmi_enabled(to_pci_dev(xe->drm.dev)); 378 } 379 380 bool xe_rtp_match_gt_has_discontiguous_dss_groups(const struct xe_device *xe, 381 const struct xe_gt *gt, 382 const struct xe_hw_engine *hwe) 383 { 384 return xe_gt_has_discontiguous_dss_groups(gt); 385 } 386 387 bool xe_rtp_match_has_flat_ccs(const struct xe_device *xe, 388 const struct xe_gt *gt, 389 const struct xe_hw_engine *hwe) 390 { 391 return xe->info.has_flat_ccs; 392 } 393