1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #include <linux/kobject.h> 7 #include <linux/sysfs.h> 8 9 #include "i915_drv.h" 10 #include "i915_timer_util.h" 11 #include "intel_engine.h" 12 #include "intel_engine_heartbeat.h" 13 #include "sysfs_engines.h" 14 15 struct kobj_engine { 16 struct kobject base; 17 struct intel_engine_cs *engine; 18 }; 19 20 static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj) 21 { 22 return container_of(kobj, struct kobj_engine, base)->engine; 23 } 24 25 static ssize_t 26 name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 27 { 28 return sysfs_emit(buf, "%s\n", kobj_to_engine(kobj)->name); 29 } 30 31 static const struct kobj_attribute name_attr = 32 __ATTR(name, 0444, name_show, NULL); 33 34 static ssize_t 35 class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 36 { 37 return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_class); 38 } 39 40 static const struct kobj_attribute class_attr = 41 __ATTR(class, 0444, class_show, NULL); 42 43 static ssize_t 44 inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 45 { 46 return sysfs_emit(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance); 47 } 48 49 static const struct kobj_attribute inst_attr = 50 __ATTR(instance, 0444, inst_show, NULL); 51 52 static ssize_t 53 mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 54 { 55 return sysfs_emit(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base); 56 } 57 58 static const struct kobj_attribute mmio_attr = 59 __ATTR(mmio_base, 0444, mmio_show, NULL); 60 61 static const char * const vcs_caps[] = { 62 [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc", 63 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc", 64 }; 65 66 static const char * const vecs_caps[] = { 67 [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc", 68 }; 69 70 static ssize_t repr_trim(char *buf, ssize_t len) 71 { 72 /* Trim off the trailing space and replace with a newline */ 73 if (len > PAGE_SIZE) 74 len = PAGE_SIZE; 75 if (len > 0) 76 buf[len - 1] = '\n'; 77 78 return len; 79 } 80 81 static ssize_t 82 __caps_show(struct intel_engine_cs *engine, 83 unsigned long caps, char *buf, bool show_unknown) 84 { 85 const char * const *repr; 86 int count, n; 87 ssize_t len; 88 89 switch (engine->class) { 90 case VIDEO_DECODE_CLASS: 91 repr = vcs_caps; 92 count = ARRAY_SIZE(vcs_caps); 93 break; 94 95 case VIDEO_ENHANCEMENT_CLASS: 96 repr = vecs_caps; 97 count = ARRAY_SIZE(vecs_caps); 98 break; 99 100 default: 101 repr = NULL; 102 count = 0; 103 break; 104 } 105 GEM_BUG_ON(count > BITS_PER_LONG); 106 107 len = 0; 108 for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) { 109 if (n >= count || !repr[n]) { 110 if (GEM_WARN_ON(show_unknown)) 111 len += sysfs_emit_at(buf, len, "[%x] ", n); 112 } else { 113 len += sysfs_emit_at(buf, len, "%s ", repr[n]); 114 } 115 if (GEM_WARN_ON(len >= PAGE_SIZE)) 116 break; 117 } 118 return repr_trim(buf, len); 119 } 120 121 static ssize_t 122 caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 123 { 124 struct intel_engine_cs *engine = kobj_to_engine(kobj); 125 126 return __caps_show(engine, engine->uabi_capabilities, buf, true); 127 } 128 129 static const struct kobj_attribute caps_attr = 130 __ATTR(capabilities, 0444, caps_show, NULL); 131 132 static ssize_t 133 all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 134 { 135 return __caps_show(kobj_to_engine(kobj), -1, buf, false); 136 } 137 138 static const struct kobj_attribute all_caps_attr = 139 __ATTR(known_capabilities, 0444, all_caps_show, NULL); 140 141 static ssize_t 142 max_spin_store(struct kobject *kobj, struct kobj_attribute *attr, 143 const char *buf, size_t count) 144 { 145 struct intel_engine_cs *engine = kobj_to_engine(kobj); 146 unsigned long long duration, clamped; 147 int err; 148 149 /* 150 * When waiting for a request, if is it currently being executed 151 * on the GPU, we busywait for a short while before sleeping. The 152 * premise is that most requests are short, and if it is already 153 * executing then there is a good chance that it will complete 154 * before we can setup the interrupt handler and go to sleep. 155 * We try to offset the cost of going to sleep, by first spinning 156 * on the request -- if it completed in less time than it would take 157 * to go sleep, process the interrupt and return back to the client, 158 * then we have saved the client some latency, albeit at the cost 159 * of spinning on an expensive CPU core. 160 * 161 * While we try to avoid waiting at all for a request that is unlikely 162 * to complete, deciding how long it is worth spinning is for is an 163 * arbitrary decision: trading off power vs latency. 164 */ 165 166 err = kstrtoull(buf, 0, &duration); 167 if (err) 168 return err; 169 170 clamped = intel_clamp_max_busywait_duration_ns(engine, duration); 171 if (duration != clamped) 172 return -EINVAL; 173 174 WRITE_ONCE(engine->props.max_busywait_duration_ns, duration); 175 176 return count; 177 } 178 179 static ssize_t 180 max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 181 { 182 struct intel_engine_cs *engine = kobj_to_engine(kobj); 183 184 return sysfs_emit(buf, "%lu\n", engine->props.max_busywait_duration_ns); 185 } 186 187 static const struct kobj_attribute max_spin_attr = 188 __ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store); 189 190 static ssize_t 191 max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 192 { 193 struct intel_engine_cs *engine = kobj_to_engine(kobj); 194 195 return sysfs_emit(buf, "%lu\n", engine->defaults.max_busywait_duration_ns); 196 } 197 198 static const struct kobj_attribute max_spin_def = 199 __ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL); 200 201 static ssize_t 202 timeslice_store(struct kobject *kobj, struct kobj_attribute *attr, 203 const char *buf, size_t count) 204 { 205 struct intel_engine_cs *engine = kobj_to_engine(kobj); 206 unsigned long long duration, clamped; 207 int err; 208 209 /* 210 * Execlists uses a scheduling quantum (a timeslice) to alternate 211 * execution between ready-to-run contexts of equal priority. This 212 * ensures that all users (though only if they of equal importance) 213 * have the opportunity to run and prevents livelocks where contexts 214 * may have implicit ordering due to userspace semaphores. 215 */ 216 217 err = kstrtoull(buf, 0, &duration); 218 if (err) 219 return err; 220 221 clamped = intel_clamp_timeslice_duration_ms(engine, duration); 222 if (duration != clamped) 223 return -EINVAL; 224 225 WRITE_ONCE(engine->props.timeslice_duration_ms, duration); 226 227 if (execlists_active(&engine->execlists)) 228 set_timer_ms(&engine->execlists.timer, duration); 229 230 return count; 231 } 232 233 static ssize_t 234 timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 235 { 236 struct intel_engine_cs *engine = kobj_to_engine(kobj); 237 238 return sysfs_emit(buf, "%lu\n", engine->props.timeslice_duration_ms); 239 } 240 241 static const struct kobj_attribute timeslice_duration_attr = 242 __ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store); 243 244 static ssize_t 245 timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 246 { 247 struct intel_engine_cs *engine = kobj_to_engine(kobj); 248 249 return sysfs_emit(buf, "%lu\n", engine->defaults.timeslice_duration_ms); 250 } 251 252 static const struct kobj_attribute timeslice_duration_def = 253 __ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL); 254 255 static ssize_t 256 stop_store(struct kobject *kobj, struct kobj_attribute *attr, 257 const char *buf, size_t count) 258 { 259 struct intel_engine_cs *engine = kobj_to_engine(kobj); 260 unsigned long long duration, clamped; 261 int err; 262 263 /* 264 * When we allow ourselves to sleep before a GPU reset after disabling 265 * submission, even for a few milliseconds, gives an innocent context 266 * the opportunity to clear the GPU before the reset occurs. However, 267 * how long to sleep depends on the typical non-preemptible duration 268 * (a similar problem to determining the ideal preempt-reset timeout 269 * or even the heartbeat interval). 270 */ 271 272 err = kstrtoull(buf, 0, &duration); 273 if (err) 274 return err; 275 276 clamped = intel_clamp_stop_timeout_ms(engine, duration); 277 if (duration != clamped) 278 return -EINVAL; 279 280 WRITE_ONCE(engine->props.stop_timeout_ms, duration); 281 return count; 282 } 283 284 static ssize_t 285 stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 286 { 287 struct intel_engine_cs *engine = kobj_to_engine(kobj); 288 289 return sysfs_emit(buf, "%lu\n", engine->props.stop_timeout_ms); 290 } 291 292 static const struct kobj_attribute stop_timeout_attr = 293 __ATTR(stop_timeout_ms, 0644, stop_show, stop_store); 294 295 static ssize_t 296 stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 297 { 298 struct intel_engine_cs *engine = kobj_to_engine(kobj); 299 300 return sysfs_emit(buf, "%lu\n", engine->defaults.stop_timeout_ms); 301 } 302 303 static const struct kobj_attribute stop_timeout_def = 304 __ATTR(stop_timeout_ms, 0444, stop_default, NULL); 305 306 static ssize_t 307 preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr, 308 const char *buf, size_t count) 309 { 310 struct intel_engine_cs *engine = kobj_to_engine(kobj); 311 unsigned long long timeout, clamped; 312 int err; 313 314 /* 315 * After initialising a preemption request, we give the current 316 * resident a small amount of time to vacate the GPU. The preemption 317 * request is for a higher priority context and should be immediate to 318 * maintain high quality of service (and avoid priority inversion). 319 * However, the preemption granularity of the GPU can be quite coarse 320 * and so we need a compromise. 321 */ 322 323 err = kstrtoull(buf, 0, &timeout); 324 if (err) 325 return err; 326 327 clamped = intel_clamp_preempt_timeout_ms(engine, timeout); 328 if (timeout != clamped) 329 return -EINVAL; 330 331 WRITE_ONCE(engine->props.preempt_timeout_ms, timeout); 332 333 if (READ_ONCE(engine->execlists.pending[0])) 334 set_timer_ms(&engine->execlists.preempt, timeout); 335 336 return count; 337 } 338 339 static ssize_t 340 preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr, 341 char *buf) 342 { 343 struct intel_engine_cs *engine = kobj_to_engine(kobj); 344 345 return sysfs_emit(buf, "%lu\n", engine->props.preempt_timeout_ms); 346 } 347 348 static const struct kobj_attribute preempt_timeout_attr = 349 __ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store); 350 351 static ssize_t 352 preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr, 353 char *buf) 354 { 355 struct intel_engine_cs *engine = kobj_to_engine(kobj); 356 357 return sysfs_emit(buf, "%lu\n", engine->defaults.preempt_timeout_ms); 358 } 359 360 static const struct kobj_attribute preempt_timeout_def = 361 __ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL); 362 363 static ssize_t 364 heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr, 365 const char *buf, size_t count) 366 { 367 struct intel_engine_cs *engine = kobj_to_engine(kobj); 368 unsigned long long delay, clamped; 369 int err; 370 371 /* 372 * We monitor the health of the system via periodic heartbeat pulses. 373 * The pulses also provide the opportunity to perform garbage 374 * collection. However, we interpret an incomplete pulse (a missed 375 * heartbeat) as an indication that the system is no longer responsive, 376 * i.e. hung, and perform an engine or full GPU reset. Given that the 377 * preemption granularity can be very coarse on a system, the optimal 378 * value for any workload is unknowable! 379 */ 380 381 err = kstrtoull(buf, 0, &delay); 382 if (err) 383 return err; 384 385 clamped = intel_clamp_heartbeat_interval_ms(engine, delay); 386 if (delay != clamped) 387 return -EINVAL; 388 389 err = intel_engine_set_heartbeat(engine, delay); 390 if (err) 391 return err; 392 393 return count; 394 } 395 396 static ssize_t 397 heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 398 { 399 struct intel_engine_cs *engine = kobj_to_engine(kobj); 400 401 return sysfs_emit(buf, "%lu\n", engine->props.heartbeat_interval_ms); 402 } 403 404 static const struct kobj_attribute heartbeat_interval_attr = 405 __ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store); 406 407 static ssize_t 408 heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf) 409 { 410 struct intel_engine_cs *engine = kobj_to_engine(kobj); 411 412 return sysfs_emit(buf, "%lu\n", engine->defaults.heartbeat_interval_ms); 413 } 414 415 static const struct kobj_attribute heartbeat_interval_def = 416 __ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL); 417 418 static void kobj_engine_release(struct kobject *kobj) 419 { 420 kfree(kobj); 421 } 422 423 static const struct kobj_type kobj_engine_type = { 424 .release = kobj_engine_release, 425 .sysfs_ops = &kobj_sysfs_ops 426 }; 427 428 static struct kobject * 429 kobj_engine(struct kobject *dir, struct intel_engine_cs *engine) 430 { 431 struct kobj_engine *ke; 432 433 ke = kzalloc(sizeof(*ke), GFP_KERNEL); 434 if (!ke) 435 return NULL; 436 437 kobject_init(&ke->base, &kobj_engine_type); 438 ke->engine = engine; 439 440 if (kobject_add(&ke->base, dir, "%s", engine->name)) { 441 kobject_put(&ke->base); 442 return NULL; 443 } 444 445 /* xfer ownership to sysfs tree */ 446 return &ke->base; 447 } 448 449 static void add_defaults(struct kobj_engine *parent) 450 { 451 static const struct attribute * const files[] = { 452 &max_spin_def.attr, 453 &stop_timeout_def.attr, 454 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL 455 &heartbeat_interval_def.attr, 456 #endif 457 NULL 458 }; 459 struct kobj_engine *ke; 460 461 ke = kzalloc(sizeof(*ke), GFP_KERNEL); 462 if (!ke) 463 return; 464 465 kobject_init(&ke->base, &kobj_engine_type); 466 ke->engine = parent->engine; 467 468 if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) { 469 kobject_put(&ke->base); 470 return; 471 } 472 473 if (sysfs_create_files(&ke->base, files)) 474 return; 475 476 if (intel_engine_has_timeslices(ke->engine) && 477 sysfs_create_file(&ke->base, ×lice_duration_def.attr)) 478 return; 479 480 if (intel_engine_has_preempt_reset(ke->engine) && 481 sysfs_create_file(&ke->base, &preempt_timeout_def.attr)) 482 return; 483 } 484 485 void intel_engines_add_sysfs(struct drm_i915_private *i915) 486 { 487 static const struct attribute * const files[] = { 488 &name_attr.attr, 489 &class_attr.attr, 490 &inst_attr.attr, 491 &mmio_attr.attr, 492 &caps_attr.attr, 493 &all_caps_attr.attr, 494 &max_spin_attr.attr, 495 &stop_timeout_attr.attr, 496 #if CONFIG_DRM_I915_HEARTBEAT_INTERVAL 497 &heartbeat_interval_attr.attr, 498 #endif 499 NULL 500 }; 501 502 struct device *kdev = i915->drm.primary->kdev; 503 struct intel_engine_cs *engine; 504 struct kobject *dir; 505 506 dir = kobject_create_and_add("engine", &kdev->kobj); 507 if (!dir) 508 return; 509 510 for_each_uabi_engine(engine, i915) { 511 struct kobject *kobj; 512 513 kobj = kobj_engine(dir, engine); 514 if (!kobj) 515 goto err_engine; 516 517 if (sysfs_create_files(kobj, files)) 518 goto err_object; 519 520 if (intel_engine_has_timeslices(engine) && 521 sysfs_create_file(kobj, ×lice_duration_attr.attr)) 522 goto err_engine; 523 524 if (intel_engine_has_preempt_reset(engine) && 525 sysfs_create_file(kobj, &preempt_timeout_attr.attr)) 526 goto err_engine; 527 528 add_defaults(container_of(kobj, struct kobj_engine, base)); 529 530 if (0) { 531 err_object: 532 kobject_put(kobj); 533 err_engine: 534 dev_warn(kdev, "Failed to add sysfs engine '%s'\n", 535 engine->name); 536 } 537 } 538 } 539