1 /* 2 * SPDX-License-Identifier: MIT 3 * 4 * Copyright © 2017-2018 Intel Corporation 5 */ 6 7 #include <linux/irq.h> 8 #include <linux/pm_runtime.h> 9 #include "i915_pmu.h" 10 #include "intel_ringbuffer.h" 11 #include "i915_drv.h" 12 13 /* Frequency for the sampling timer for events which need it. */ 14 #define FREQUENCY 200 15 #define PERIOD max_t(u64, 10000, NSEC_PER_SEC / FREQUENCY) 16 17 #define ENGINE_SAMPLE_MASK \ 18 (BIT(I915_SAMPLE_BUSY) | \ 19 BIT(I915_SAMPLE_WAIT) | \ 20 BIT(I915_SAMPLE_SEMA)) 21 22 #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS) 23 24 static cpumask_t i915_pmu_cpumask; 25 26 static u8 engine_config_sample(u64 config) 27 { 28 return config & I915_PMU_SAMPLE_MASK; 29 } 30 31 static u8 engine_event_sample(struct perf_event *event) 32 { 33 return engine_config_sample(event->attr.config); 34 } 35 36 static u8 engine_event_class(struct perf_event *event) 37 { 38 return (event->attr.config >> I915_PMU_CLASS_SHIFT) & 0xff; 39 } 40 41 static u8 engine_event_instance(struct perf_event *event) 42 { 43 return (event->attr.config >> I915_PMU_SAMPLE_BITS) & 0xff; 44 } 45 46 static bool is_engine_config(u64 config) 47 { 48 return config < __I915_PMU_OTHER(0); 49 } 50 51 static unsigned int config_enabled_bit(u64 config) 52 { 53 if (is_engine_config(config)) 54 return engine_config_sample(config); 55 else 56 return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0)); 57 } 58 59 static u64 config_enabled_mask(u64 config) 60 { 61 return BIT_ULL(config_enabled_bit(config)); 62 } 63 64 static bool is_engine_event(struct perf_event *event) 65 { 66 return is_engine_config(event->attr.config); 67 } 68 69 static unsigned int event_enabled_bit(struct perf_event *event) 70 { 71 return config_enabled_bit(event->attr.config); 72 } 73 74 static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active) 75 { 76 u64 enable; 77 78 /* 79 * Only some counters need the sampling timer. 80 * 81 * We start with a bitmask of all currently enabled events. 82 */ 83 enable = i915->pmu.enable; 84 85 /* 86 * Mask out all the ones which do not need the timer, or in 87 * other words keep all the ones that could need the timer. 88 */ 89 enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | 90 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) | 91 ENGINE_SAMPLE_MASK; 92 93 /* 94 * When the GPU is idle per-engine counters do not need to be 95 * running so clear those bits out. 96 */ 97 if (!gpu_active) 98 enable &= ~ENGINE_SAMPLE_MASK; 99 /* 100 * Also there is software busyness tracking available we do not 101 * need the timer for I915_SAMPLE_BUSY counter. 102 * 103 * Use RCS as proxy for all engines. 104 */ 105 else if (intel_engine_supports_stats(i915->engine[RCS0])) 106 enable &= ~BIT(I915_SAMPLE_BUSY); 107 108 /* 109 * If some bits remain it means we need the sampling timer running. 110 */ 111 return enable; 112 } 113 114 void i915_pmu_gt_parked(struct drm_i915_private *i915) 115 { 116 if (!i915->pmu.base.event_init) 117 return; 118 119 spin_lock_irq(&i915->pmu.lock); 120 /* 121 * Signal sampling timer to stop if only engine events are enabled and 122 * GPU went idle. 123 */ 124 i915->pmu.timer_enabled = pmu_needs_timer(i915, false); 125 spin_unlock_irq(&i915->pmu.lock); 126 } 127 128 static void __i915_pmu_maybe_start_timer(struct drm_i915_private *i915) 129 { 130 if (!i915->pmu.timer_enabled && pmu_needs_timer(i915, true)) { 131 i915->pmu.timer_enabled = true; 132 i915->pmu.timer_last = ktime_get(); 133 hrtimer_start_range_ns(&i915->pmu.timer, 134 ns_to_ktime(PERIOD), 0, 135 HRTIMER_MODE_REL_PINNED); 136 } 137 } 138 139 void i915_pmu_gt_unparked(struct drm_i915_private *i915) 140 { 141 if (!i915->pmu.base.event_init) 142 return; 143 144 spin_lock_irq(&i915->pmu.lock); 145 /* 146 * Re-enable sampling timer when GPU goes active. 147 */ 148 __i915_pmu_maybe_start_timer(i915); 149 spin_unlock_irq(&i915->pmu.lock); 150 } 151 152 static void 153 add_sample(struct i915_pmu_sample *sample, u32 val) 154 { 155 sample->cur += val; 156 } 157 158 static void 159 engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) 160 { 161 struct intel_engine_cs *engine; 162 enum intel_engine_id id; 163 intel_wakeref_t wakeref; 164 unsigned long flags; 165 166 if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0) 167 return; 168 169 wakeref = 0; 170 if (READ_ONCE(dev_priv->gt.awake)) 171 wakeref = intel_runtime_pm_get_if_in_use(dev_priv); 172 if (!wakeref) 173 return; 174 175 spin_lock_irqsave(&dev_priv->uncore.lock, flags); 176 for_each_engine(engine, dev_priv, id) { 177 struct intel_engine_pmu *pmu = &engine->pmu; 178 bool busy; 179 u32 val; 180 181 val = I915_READ_FW(RING_CTL(engine->mmio_base)); 182 if (val == 0) /* powerwell off => engine idle */ 183 continue; 184 185 if (val & RING_WAIT) 186 add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns); 187 if (val & RING_WAIT_SEMAPHORE) 188 add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); 189 190 /* 191 * While waiting on a semaphore or event, MI_MODE reports the 192 * ring as idle. However, previously using the seqno, and with 193 * execlists sampling, we account for the ring waiting as the 194 * engine being busy. Therefore, we record the sample as being 195 * busy if either waiting or !idle. 196 */ 197 busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT); 198 if (!busy) { 199 val = I915_READ_FW(RING_MI_MODE(engine->mmio_base)); 200 busy = !(val & MODE_IDLE); 201 } 202 if (busy) 203 add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns); 204 } 205 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 206 207 intel_runtime_pm_put(dev_priv, wakeref); 208 } 209 210 static void 211 add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) 212 { 213 sample->cur += mul_u32_u32(val, mul); 214 } 215 216 static void 217 frequency_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) 218 { 219 if (dev_priv->pmu.enable & 220 config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { 221 u32 val; 222 223 val = dev_priv->gt_pm.rps.cur_freq; 224 if (dev_priv->gt.awake) { 225 intel_wakeref_t wakeref; 226 227 with_intel_runtime_pm_if_in_use(dev_priv, wakeref) 228 val = intel_get_cagf(dev_priv, 229 I915_READ_NOTRACE(GEN6_RPSTAT1)); 230 } 231 232 add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_ACT], 233 intel_gpu_freq(dev_priv, val), 234 period_ns / 1000); 235 } 236 237 if (dev_priv->pmu.enable & 238 config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { 239 add_sample_mult(&dev_priv->pmu.sample[__I915_SAMPLE_FREQ_REQ], 240 intel_gpu_freq(dev_priv, 241 dev_priv->gt_pm.rps.cur_freq), 242 period_ns / 1000); 243 } 244 } 245 246 static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) 247 { 248 struct drm_i915_private *i915 = 249 container_of(hrtimer, struct drm_i915_private, pmu.timer); 250 unsigned int period_ns; 251 ktime_t now; 252 253 if (!READ_ONCE(i915->pmu.timer_enabled)) 254 return HRTIMER_NORESTART; 255 256 now = ktime_get(); 257 period_ns = ktime_to_ns(ktime_sub(now, i915->pmu.timer_last)); 258 i915->pmu.timer_last = now; 259 260 /* 261 * Strictly speaking the passed in period may not be 100% accurate for 262 * all internal calculation, since some amount of time can be spent on 263 * grabbing the forcewake. However the potential error from timer call- 264 * back delay greatly dominates this so we keep it simple. 265 */ 266 engines_sample(i915, period_ns); 267 frequency_sample(i915, period_ns); 268 269 hrtimer_forward(hrtimer, now, ns_to_ktime(PERIOD)); 270 271 return HRTIMER_RESTART; 272 } 273 274 static u64 count_interrupts(struct drm_i915_private *i915) 275 { 276 /* open-coded kstat_irqs() */ 277 struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq); 278 u64 sum = 0; 279 int cpu; 280 281 if (!desc || !desc->kstat_irqs) 282 return 0; 283 284 for_each_possible_cpu(cpu) 285 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 286 287 return sum; 288 } 289 290 static void engine_event_destroy(struct perf_event *event) 291 { 292 struct drm_i915_private *i915 = 293 container_of(event->pmu, typeof(*i915), pmu.base); 294 struct intel_engine_cs *engine; 295 296 engine = intel_engine_lookup_user(i915, 297 engine_event_class(event), 298 engine_event_instance(event)); 299 if (WARN_ON_ONCE(!engine)) 300 return; 301 302 if (engine_event_sample(event) == I915_SAMPLE_BUSY && 303 intel_engine_supports_stats(engine)) 304 intel_disable_engine_stats(engine); 305 } 306 307 static void i915_pmu_event_destroy(struct perf_event *event) 308 { 309 WARN_ON(event->parent); 310 311 if (is_engine_event(event)) 312 engine_event_destroy(event); 313 } 314 315 static int 316 engine_event_status(struct intel_engine_cs *engine, 317 enum drm_i915_pmu_engine_sample sample) 318 { 319 switch (sample) { 320 case I915_SAMPLE_BUSY: 321 case I915_SAMPLE_WAIT: 322 break; 323 case I915_SAMPLE_SEMA: 324 if (INTEL_GEN(engine->i915) < 6) 325 return -ENODEV; 326 break; 327 default: 328 return -ENOENT; 329 } 330 331 return 0; 332 } 333 334 static int 335 config_status(struct drm_i915_private *i915, u64 config) 336 { 337 switch (config) { 338 case I915_PMU_ACTUAL_FREQUENCY: 339 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 340 /* Requires a mutex for sampling! */ 341 return -ENODEV; 342 /* Fall-through. */ 343 case I915_PMU_REQUESTED_FREQUENCY: 344 if (INTEL_GEN(i915) < 6) 345 return -ENODEV; 346 break; 347 case I915_PMU_INTERRUPTS: 348 break; 349 case I915_PMU_RC6_RESIDENCY: 350 if (!HAS_RC6(i915)) 351 return -ENODEV; 352 break; 353 default: 354 return -ENOENT; 355 } 356 357 return 0; 358 } 359 360 static int engine_event_init(struct perf_event *event) 361 { 362 struct drm_i915_private *i915 = 363 container_of(event->pmu, typeof(*i915), pmu.base); 364 struct intel_engine_cs *engine; 365 u8 sample; 366 int ret; 367 368 engine = intel_engine_lookup_user(i915, engine_event_class(event), 369 engine_event_instance(event)); 370 if (!engine) 371 return -ENODEV; 372 373 sample = engine_event_sample(event); 374 ret = engine_event_status(engine, sample); 375 if (ret) 376 return ret; 377 378 if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine)) 379 ret = intel_enable_engine_stats(engine); 380 381 return ret; 382 } 383 384 static int i915_pmu_event_init(struct perf_event *event) 385 { 386 struct drm_i915_private *i915 = 387 container_of(event->pmu, typeof(*i915), pmu.base); 388 int ret; 389 390 if (event->attr.type != event->pmu->type) 391 return -ENOENT; 392 393 /* unsupported modes and filters */ 394 if (event->attr.sample_period) /* no sampling */ 395 return -EINVAL; 396 397 if (has_branch_stack(event)) 398 return -EOPNOTSUPP; 399 400 if (event->cpu < 0) 401 return -EINVAL; 402 403 /* only allow running on one cpu at a time */ 404 if (!cpumask_test_cpu(event->cpu, &i915_pmu_cpumask)) 405 return -EINVAL; 406 407 if (is_engine_event(event)) 408 ret = engine_event_init(event); 409 else 410 ret = config_status(i915, event->attr.config); 411 if (ret) 412 return ret; 413 414 if (!event->parent) 415 event->destroy = i915_pmu_event_destroy; 416 417 return 0; 418 } 419 420 static u64 __get_rc6(struct drm_i915_private *i915) 421 { 422 u64 val; 423 424 val = intel_rc6_residency_ns(i915, 425 IS_VALLEYVIEW(i915) ? 426 VLV_GT_RENDER_RC6 : 427 GEN6_GT_GFX_RC6); 428 429 if (HAS_RC6p(i915)) 430 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); 431 432 if (HAS_RC6pp(i915)) 433 val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); 434 435 return val; 436 } 437 438 static u64 get_rc6(struct drm_i915_private *i915) 439 { 440 #if IS_ENABLED(CONFIG_PM) 441 intel_wakeref_t wakeref; 442 unsigned long flags; 443 u64 val; 444 445 wakeref = intel_runtime_pm_get_if_in_use(i915); 446 if (wakeref) { 447 val = __get_rc6(i915); 448 intel_runtime_pm_put(i915, wakeref); 449 450 /* 451 * If we are coming back from being runtime suspended we must 452 * be careful not to report a larger value than returned 453 * previously. 454 */ 455 456 spin_lock_irqsave(&i915->pmu.lock, flags); 457 458 if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { 459 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; 460 i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; 461 } else { 462 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; 463 } 464 465 spin_unlock_irqrestore(&i915->pmu.lock, flags); 466 } else { 467 struct pci_dev *pdev = i915->drm.pdev; 468 struct device *kdev = &pdev->dev; 469 470 /* 471 * We are runtime suspended. 472 * 473 * Report the delta from when the device was suspended to now, 474 * on top of the last known real value, as the approximated RC6 475 * counter value. 476 */ 477 spin_lock_irqsave(&i915->pmu.lock, flags); 478 479 /* 480 * After the above branch intel_runtime_pm_get_if_in_use failed 481 * to get the runtime PM reference we cannot assume we are in 482 * runtime suspend since we can either: a) race with coming out 483 * of it before we took the power.lock, or b) there are other 484 * states than suspended which can bring us here. 485 * 486 * We need to double-check that we are indeed currently runtime 487 * suspended and if not we cannot do better than report the last 488 * known RC6 value. 489 */ 490 if (pm_runtime_status_suspended(kdev)) { 491 val = pm_runtime_suspended_time(kdev); 492 493 if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 494 i915->pmu.suspended_time_last = val; 495 496 val -= i915->pmu.suspended_time_last; 497 val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 498 499 i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 500 } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { 501 val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; 502 } else { 503 val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; 504 } 505 506 spin_unlock_irqrestore(&i915->pmu.lock, flags); 507 } 508 509 return val; 510 #else 511 return __get_rc6(i915); 512 #endif 513 } 514 515 static u64 __i915_pmu_event_read(struct perf_event *event) 516 { 517 struct drm_i915_private *i915 = 518 container_of(event->pmu, typeof(*i915), pmu.base); 519 u64 val = 0; 520 521 if (is_engine_event(event)) { 522 u8 sample = engine_event_sample(event); 523 struct intel_engine_cs *engine; 524 525 engine = intel_engine_lookup_user(i915, 526 engine_event_class(event), 527 engine_event_instance(event)); 528 529 if (WARN_ON_ONCE(!engine)) { 530 /* Do nothing */ 531 } else if (sample == I915_SAMPLE_BUSY && 532 intel_engine_supports_stats(engine)) { 533 val = ktime_to_ns(intel_engine_get_busy_time(engine)); 534 } else { 535 val = engine->pmu.sample[sample].cur; 536 } 537 } else { 538 switch (event->attr.config) { 539 case I915_PMU_ACTUAL_FREQUENCY: 540 val = 541 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_ACT].cur, 542 USEC_PER_SEC /* to MHz */); 543 break; 544 case I915_PMU_REQUESTED_FREQUENCY: 545 val = 546 div_u64(i915->pmu.sample[__I915_SAMPLE_FREQ_REQ].cur, 547 USEC_PER_SEC /* to MHz */); 548 break; 549 case I915_PMU_INTERRUPTS: 550 val = count_interrupts(i915); 551 break; 552 case I915_PMU_RC6_RESIDENCY: 553 val = get_rc6(i915); 554 break; 555 } 556 } 557 558 return val; 559 } 560 561 static void i915_pmu_event_read(struct perf_event *event) 562 { 563 struct hw_perf_event *hwc = &event->hw; 564 u64 prev, new; 565 566 again: 567 prev = local64_read(&hwc->prev_count); 568 new = __i915_pmu_event_read(event); 569 570 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 571 goto again; 572 573 local64_add(new - prev, &event->count); 574 } 575 576 static void i915_pmu_enable(struct perf_event *event) 577 { 578 struct drm_i915_private *i915 = 579 container_of(event->pmu, typeof(*i915), pmu.base); 580 unsigned int bit = event_enabled_bit(event); 581 unsigned long flags; 582 583 spin_lock_irqsave(&i915->pmu.lock, flags); 584 585 /* 586 * Update the bitmask of enabled events and increment 587 * the event reference counter. 588 */ 589 BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); 590 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); 591 GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); 592 i915->pmu.enable |= BIT_ULL(bit); 593 i915->pmu.enable_count[bit]++; 594 595 /* 596 * Start the sampling timer if needed and not already enabled. 597 */ 598 __i915_pmu_maybe_start_timer(i915); 599 600 /* 601 * For per-engine events the bitmask and reference counting 602 * is stored per engine. 603 */ 604 if (is_engine_event(event)) { 605 u8 sample = engine_event_sample(event); 606 struct intel_engine_cs *engine; 607 608 engine = intel_engine_lookup_user(i915, 609 engine_event_class(event), 610 engine_event_instance(event)); 611 612 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != 613 I915_ENGINE_SAMPLE_COUNT); 614 BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != 615 I915_ENGINE_SAMPLE_COUNT); 616 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 617 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 618 GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); 619 620 engine->pmu.enable |= BIT(sample); 621 engine->pmu.enable_count[sample]++; 622 } 623 624 spin_unlock_irqrestore(&i915->pmu.lock, flags); 625 626 /* 627 * Store the current counter value so we can report the correct delta 628 * for all listeners. Even when the event was already enabled and has 629 * an existing non-zero value. 630 */ 631 local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 632 } 633 634 static void i915_pmu_disable(struct perf_event *event) 635 { 636 struct drm_i915_private *i915 = 637 container_of(event->pmu, typeof(*i915), pmu.base); 638 unsigned int bit = event_enabled_bit(event); 639 unsigned long flags; 640 641 spin_lock_irqsave(&i915->pmu.lock, flags); 642 643 if (is_engine_event(event)) { 644 u8 sample = engine_event_sample(event); 645 struct intel_engine_cs *engine; 646 647 engine = intel_engine_lookup_user(i915, 648 engine_event_class(event), 649 engine_event_instance(event)); 650 651 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); 652 GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); 653 GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); 654 655 /* 656 * Decrement the reference count and clear the enabled 657 * bitmask when the last listener on an event goes away. 658 */ 659 if (--engine->pmu.enable_count[sample] == 0) 660 engine->pmu.enable &= ~BIT(sample); 661 } 662 663 GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); 664 GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); 665 /* 666 * Decrement the reference count and clear the enabled 667 * bitmask when the last listener on an event goes away. 668 */ 669 if (--i915->pmu.enable_count[bit] == 0) { 670 i915->pmu.enable &= ~BIT_ULL(bit); 671 i915->pmu.timer_enabled &= pmu_needs_timer(i915, true); 672 } 673 674 spin_unlock_irqrestore(&i915->pmu.lock, flags); 675 } 676 677 static void i915_pmu_event_start(struct perf_event *event, int flags) 678 { 679 i915_pmu_enable(event); 680 event->hw.state = 0; 681 } 682 683 static void i915_pmu_event_stop(struct perf_event *event, int flags) 684 { 685 if (flags & PERF_EF_UPDATE) 686 i915_pmu_event_read(event); 687 i915_pmu_disable(event); 688 event->hw.state = PERF_HES_STOPPED; 689 } 690 691 static int i915_pmu_event_add(struct perf_event *event, int flags) 692 { 693 if (flags & PERF_EF_START) 694 i915_pmu_event_start(event, flags); 695 696 return 0; 697 } 698 699 static void i915_pmu_event_del(struct perf_event *event, int flags) 700 { 701 i915_pmu_event_stop(event, PERF_EF_UPDATE); 702 } 703 704 static int i915_pmu_event_event_idx(struct perf_event *event) 705 { 706 return 0; 707 } 708 709 struct i915_str_attribute { 710 struct device_attribute attr; 711 const char *str; 712 }; 713 714 static ssize_t i915_pmu_format_show(struct device *dev, 715 struct device_attribute *attr, char *buf) 716 { 717 struct i915_str_attribute *eattr; 718 719 eattr = container_of(attr, struct i915_str_attribute, attr); 720 return sprintf(buf, "%s\n", eattr->str); 721 } 722 723 #define I915_PMU_FORMAT_ATTR(_name, _config) \ 724 (&((struct i915_str_attribute[]) { \ 725 { .attr = __ATTR(_name, 0444, i915_pmu_format_show, NULL), \ 726 .str = _config, } \ 727 })[0].attr.attr) 728 729 static struct attribute *i915_pmu_format_attrs[] = { 730 I915_PMU_FORMAT_ATTR(i915_eventid, "config:0-20"), 731 NULL, 732 }; 733 734 static const struct attribute_group i915_pmu_format_attr_group = { 735 .name = "format", 736 .attrs = i915_pmu_format_attrs, 737 }; 738 739 struct i915_ext_attribute { 740 struct device_attribute attr; 741 unsigned long val; 742 }; 743 744 static ssize_t i915_pmu_event_show(struct device *dev, 745 struct device_attribute *attr, char *buf) 746 { 747 struct i915_ext_attribute *eattr; 748 749 eattr = container_of(attr, struct i915_ext_attribute, attr); 750 return sprintf(buf, "config=0x%lx\n", eattr->val); 751 } 752 753 static struct attribute_group i915_pmu_events_attr_group = { 754 .name = "events", 755 /* Patch in attrs at runtime. */ 756 }; 757 758 static ssize_t 759 i915_pmu_get_attr_cpumask(struct device *dev, 760 struct device_attribute *attr, 761 char *buf) 762 { 763 return cpumap_print_to_pagebuf(true, buf, &i915_pmu_cpumask); 764 } 765 766 static DEVICE_ATTR(cpumask, 0444, i915_pmu_get_attr_cpumask, NULL); 767 768 static struct attribute *i915_cpumask_attrs[] = { 769 &dev_attr_cpumask.attr, 770 NULL, 771 }; 772 773 static const struct attribute_group i915_pmu_cpumask_attr_group = { 774 .attrs = i915_cpumask_attrs, 775 }; 776 777 static const struct attribute_group *i915_pmu_attr_groups[] = { 778 &i915_pmu_format_attr_group, 779 &i915_pmu_events_attr_group, 780 &i915_pmu_cpumask_attr_group, 781 NULL 782 }; 783 784 #define __event(__config, __name, __unit) \ 785 { \ 786 .config = (__config), \ 787 .name = (__name), \ 788 .unit = (__unit), \ 789 } 790 791 #define __engine_event(__sample, __name) \ 792 { \ 793 .sample = (__sample), \ 794 .name = (__name), \ 795 } 796 797 static struct i915_ext_attribute * 798 add_i915_attr(struct i915_ext_attribute *attr, const char *name, u64 config) 799 { 800 sysfs_attr_init(&attr->attr.attr); 801 attr->attr.attr.name = name; 802 attr->attr.attr.mode = 0444; 803 attr->attr.show = i915_pmu_event_show; 804 attr->val = config; 805 806 return ++attr; 807 } 808 809 static struct perf_pmu_events_attr * 810 add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name, 811 const char *str) 812 { 813 sysfs_attr_init(&attr->attr.attr); 814 attr->attr.attr.name = name; 815 attr->attr.attr.mode = 0444; 816 attr->attr.show = perf_event_sysfs_show; 817 attr->event_str = str; 818 819 return ++attr; 820 } 821 822 static struct attribute ** 823 create_event_attributes(struct drm_i915_private *i915) 824 { 825 static const struct { 826 u64 config; 827 const char *name; 828 const char *unit; 829 } events[] = { 830 __event(I915_PMU_ACTUAL_FREQUENCY, "actual-frequency", "MHz"), 831 __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "MHz"), 832 __event(I915_PMU_INTERRUPTS, "interrupts", NULL), 833 __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), 834 }; 835 static const struct { 836 enum drm_i915_pmu_engine_sample sample; 837 char *name; 838 } engine_events[] = { 839 __engine_event(I915_SAMPLE_BUSY, "busy"), 840 __engine_event(I915_SAMPLE_SEMA, "sema"), 841 __engine_event(I915_SAMPLE_WAIT, "wait"), 842 }; 843 unsigned int count = 0; 844 struct perf_pmu_events_attr *pmu_attr = NULL, *pmu_iter; 845 struct i915_ext_attribute *i915_attr = NULL, *i915_iter; 846 struct attribute **attr = NULL, **attr_iter; 847 struct intel_engine_cs *engine; 848 enum intel_engine_id id; 849 unsigned int i; 850 851 /* Count how many counters we will be exposing. */ 852 for (i = 0; i < ARRAY_SIZE(events); i++) { 853 if (!config_status(i915, events[i].config)) 854 count++; 855 } 856 857 for_each_engine(engine, i915, id) { 858 for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 859 if (!engine_event_status(engine, 860 engine_events[i].sample)) 861 count++; 862 } 863 } 864 865 /* Allocate attribute objects and table. */ 866 i915_attr = kcalloc(count, sizeof(*i915_attr), GFP_KERNEL); 867 if (!i915_attr) 868 goto err_alloc; 869 870 pmu_attr = kcalloc(count, sizeof(*pmu_attr), GFP_KERNEL); 871 if (!pmu_attr) 872 goto err_alloc; 873 874 /* Max one pointer of each attribute type plus a termination entry. */ 875 attr = kcalloc(count * 2 + 1, sizeof(*attr), GFP_KERNEL); 876 if (!attr) 877 goto err_alloc; 878 879 i915_iter = i915_attr; 880 pmu_iter = pmu_attr; 881 attr_iter = attr; 882 883 /* Initialize supported non-engine counters. */ 884 for (i = 0; i < ARRAY_SIZE(events); i++) { 885 char *str; 886 887 if (config_status(i915, events[i].config)) 888 continue; 889 890 str = kstrdup(events[i].name, GFP_KERNEL); 891 if (!str) 892 goto err; 893 894 *attr_iter++ = &i915_iter->attr.attr; 895 i915_iter = add_i915_attr(i915_iter, str, events[i].config); 896 897 if (events[i].unit) { 898 str = kasprintf(GFP_KERNEL, "%s.unit", events[i].name); 899 if (!str) 900 goto err; 901 902 *attr_iter++ = &pmu_iter->attr.attr; 903 pmu_iter = add_pmu_attr(pmu_iter, str, events[i].unit); 904 } 905 } 906 907 /* Initialize supported engine counters. */ 908 for_each_engine(engine, i915, id) { 909 for (i = 0; i < ARRAY_SIZE(engine_events); i++) { 910 char *str; 911 912 if (engine_event_status(engine, 913 engine_events[i].sample)) 914 continue; 915 916 str = kasprintf(GFP_KERNEL, "%s-%s", 917 engine->name, engine_events[i].name); 918 if (!str) 919 goto err; 920 921 *attr_iter++ = &i915_iter->attr.attr; 922 i915_iter = 923 add_i915_attr(i915_iter, str, 924 __I915_PMU_ENGINE(engine->uabi_class, 925 engine->instance, 926 engine_events[i].sample)); 927 928 str = kasprintf(GFP_KERNEL, "%s-%s.unit", 929 engine->name, engine_events[i].name); 930 if (!str) 931 goto err; 932 933 *attr_iter++ = &pmu_iter->attr.attr; 934 pmu_iter = add_pmu_attr(pmu_iter, str, "ns"); 935 } 936 } 937 938 i915->pmu.i915_attr = i915_attr; 939 i915->pmu.pmu_attr = pmu_attr; 940 941 return attr; 942 943 err:; 944 for (attr_iter = attr; *attr_iter; attr_iter++) 945 kfree((*attr_iter)->name); 946 947 err_alloc: 948 kfree(attr); 949 kfree(i915_attr); 950 kfree(pmu_attr); 951 952 return NULL; 953 } 954 955 static void free_event_attributes(struct drm_i915_private *i915) 956 { 957 struct attribute **attr_iter = i915_pmu_events_attr_group.attrs; 958 959 for (; *attr_iter; attr_iter++) 960 kfree((*attr_iter)->name); 961 962 kfree(i915_pmu_events_attr_group.attrs); 963 kfree(i915->pmu.i915_attr); 964 kfree(i915->pmu.pmu_attr); 965 966 i915_pmu_events_attr_group.attrs = NULL; 967 i915->pmu.i915_attr = NULL; 968 i915->pmu.pmu_attr = NULL; 969 } 970 971 static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node) 972 { 973 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); 974 975 GEM_BUG_ON(!pmu->base.event_init); 976 977 /* Select the first online CPU as a designated reader. */ 978 if (!cpumask_weight(&i915_pmu_cpumask)) 979 cpumask_set_cpu(cpu, &i915_pmu_cpumask); 980 981 return 0; 982 } 983 984 static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node) 985 { 986 struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node); 987 unsigned int target; 988 989 GEM_BUG_ON(!pmu->base.event_init); 990 991 if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) { 992 target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); 993 /* Migrate events if there is a valid target */ 994 if (target < nr_cpu_ids) { 995 cpumask_set_cpu(target, &i915_pmu_cpumask); 996 perf_pmu_migrate_context(&pmu->base, cpu, target); 997 } 998 } 999 1000 return 0; 1001 } 1002 1003 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID; 1004 1005 static int i915_pmu_register_cpuhp_state(struct drm_i915_private *i915) 1006 { 1007 enum cpuhp_state slot; 1008 int ret; 1009 1010 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, 1011 "perf/x86/intel/i915:online", 1012 i915_pmu_cpu_online, 1013 i915_pmu_cpu_offline); 1014 if (ret < 0) 1015 return ret; 1016 1017 slot = ret; 1018 ret = cpuhp_state_add_instance(slot, &i915->pmu.node); 1019 if (ret) { 1020 cpuhp_remove_multi_state(slot); 1021 return ret; 1022 } 1023 1024 cpuhp_slot = slot; 1025 return 0; 1026 } 1027 1028 static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915) 1029 { 1030 WARN_ON(cpuhp_slot == CPUHP_INVALID); 1031 WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &i915->pmu.node)); 1032 cpuhp_remove_multi_state(cpuhp_slot); 1033 } 1034 1035 void i915_pmu_register(struct drm_i915_private *i915) 1036 { 1037 int ret; 1038 1039 if (INTEL_GEN(i915) <= 2) { 1040 DRM_INFO("PMU not supported for this GPU."); 1041 return; 1042 } 1043 1044 i915_pmu_events_attr_group.attrs = create_event_attributes(i915); 1045 if (!i915_pmu_events_attr_group.attrs) { 1046 ret = -ENOMEM; 1047 goto err; 1048 } 1049 1050 i915->pmu.base.attr_groups = i915_pmu_attr_groups; 1051 i915->pmu.base.task_ctx_nr = perf_invalid_context; 1052 i915->pmu.base.event_init = i915_pmu_event_init; 1053 i915->pmu.base.add = i915_pmu_event_add; 1054 i915->pmu.base.del = i915_pmu_event_del; 1055 i915->pmu.base.start = i915_pmu_event_start; 1056 i915->pmu.base.stop = i915_pmu_event_stop; 1057 i915->pmu.base.read = i915_pmu_event_read; 1058 i915->pmu.base.event_idx = i915_pmu_event_event_idx; 1059 1060 spin_lock_init(&i915->pmu.lock); 1061 hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1062 i915->pmu.timer.function = i915_sample; 1063 1064 ret = perf_pmu_register(&i915->pmu.base, "i915", -1); 1065 if (ret) 1066 goto err; 1067 1068 ret = i915_pmu_register_cpuhp_state(i915); 1069 if (ret) 1070 goto err_unreg; 1071 1072 return; 1073 1074 err_unreg: 1075 perf_pmu_unregister(&i915->pmu.base); 1076 err: 1077 i915->pmu.base.event_init = NULL; 1078 free_event_attributes(i915); 1079 DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); 1080 } 1081 1082 void i915_pmu_unregister(struct drm_i915_private *i915) 1083 { 1084 if (!i915->pmu.base.event_init) 1085 return; 1086 1087 WARN_ON(i915->pmu.enable); 1088 1089 hrtimer_cancel(&i915->pmu.timer); 1090 1091 i915_pmu_unregister_cpuhp_state(i915); 1092 1093 perf_pmu_unregister(&i915->pmu.base); 1094 i915->pmu.base.event_init = NULL; 1095 free_event_attributes(i915); 1096 } 1097