Lines Matching +full:statistical +full:- +full:profiling +full:- +full:extension +full:- +full:v1
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Perf support for the Statistical Profiling Extension, introduced as
53 if (IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR) && !perf_allow_kernel(&event->attr)) in set_spe_event_has_cx()
54 event->hw.flags |= SPE_PMU_HW_FLAGS_CX; in set_spe_event_has_cx()
59 return !!(event->hw.flags & SPE_PMU_HW_FLAGS_CX); in get_spe_event_has_cx()
98 /* Convert a free-running index from perf into an SPE buffer offset */
99 #define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
127 return !!(spe_pmu->features & arm_spe_pmu_feat_caps[cap]); in arm_spe_pmu_cap_get()
131 return spe_pmu->counter_sz; in arm_spe_pmu_cap_get()
133 return spe_pmu->min_period; in arm_spe_pmu_cap_get()
148 int cap = (long)ea->var; in arm_spe_pmu_cap_show()
241 if (attr == &format_attr_inv_event_filter.attr && !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT)) in arm_spe_pmu_format_attr_is_visible()
244 return attr->mode; in arm_spe_pmu_format_attr_is_visible()
258 return cpumap_print_to_pagebuf(true, buf, &spe_pmu->supported_cpus); in cpumask_show()
281 struct perf_event_attr *attr = &event->attr; in arm_spe_event_to_pmscr()
288 if (!attr->exclude_user) in arm_spe_event_to_pmscr()
291 if (!attr->exclude_kernel) in arm_spe_event_to_pmscr()
302 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); in arm_spe_event_sanitise_period()
303 u64 period = event->hw.sample_period; in arm_spe_event_sanitise_period()
306 if (period < spe_pmu->min_period) in arm_spe_event_sanitise_period()
307 period = spe_pmu->min_period; in arm_spe_event_sanitise_period()
313 event->hw.sample_period = period; in arm_spe_event_sanitise_period()
318 struct perf_event_attr *attr = &event->attr; in arm_spe_event_to_pmsirr()
324 reg |= event->hw.sample_period; in arm_spe_event_to_pmsirr()
331 struct perf_event_attr *attr = &event->attr; in arm_spe_event_to_pmsfcr()
355 struct perf_event_attr *attr = &event->attr; in arm_spe_event_to_pmsevfr()
361 struct perf_event_attr *attr = &event->attr; in arm_spe_event_to_pmsnevfr()
367 struct perf_event_attr *attr = &event->attr; in arm_spe_event_to_pmslatfr()
374 u64 head = PERF_IDX2OFF(handle->head, buf); in arm_spe_pmu_pad_buf()
376 memset(buf->base + head, ARM_SPE_BUF_PAD_BYTE, len); in arm_spe_pmu_pad_buf()
377 if (!buf->snapshot) in arm_spe_pmu_pad_buf()
384 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); in arm_spe_pmu_next_snapshot_off()
385 u64 head = PERF_IDX2OFF(handle->head, buf); in arm_spe_pmu_next_snapshot_off()
386 u64 limit = buf->nr_pages * PAGE_SIZE; in arm_spe_pmu_next_snapshot_off()
401 if (limit - head < spe_pmu->max_record_sz) { in arm_spe_pmu_next_snapshot_off()
402 arm_spe_pmu_pad_buf(handle, limit - head); in arm_spe_pmu_next_snapshot_off()
403 handle->head = PERF_IDX2OFF(limit, buf); in arm_spe_pmu_next_snapshot_off()
404 limit = ((buf->nr_pages * PAGE_SIZE) >> 1) + handle->head; in arm_spe_pmu_next_snapshot_off()
412 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); in __arm_spe_pmu_next_off()
414 const u64 bufsize = buf->nr_pages * PAGE_SIZE; in __arm_spe_pmu_next_off()
424 * 2. We used perf_aux_output_skip to consume handle->size bytes in __arm_spe_pmu_next_off()
430 * reduce handle->size to zero and end up reporting truncation. in __arm_spe_pmu_next_off()
432 head = PERF_IDX2OFF(handle->head, buf); in __arm_spe_pmu_next_off()
433 if (!IS_ALIGNED(head, spe_pmu->align)) { in __arm_spe_pmu_next_off()
434 unsigned long delta = roundup(head, spe_pmu->align) - head; in __arm_spe_pmu_next_off()
436 delta = min(delta, handle->size); in __arm_spe_pmu_next_off()
438 head = PERF_IDX2OFF(handle->head, buf); in __arm_spe_pmu_next_off()
442 if (!handle->size) in __arm_spe_pmu_next_off()
446 tail = PERF_IDX2OFF(handle->head + handle->size, buf); in __arm_spe_pmu_next_off()
447 wakeup = PERF_IDX2OFF(handle->wakeup, buf); in __arm_spe_pmu_next_off()
467 if (handle->wakeup < (handle->head + handle->size) && head <= wakeup) in __arm_spe_pmu_next_off()
473 arm_spe_pmu_pad_buf(handle, handle->size); in __arm_spe_pmu_next_off()
483 struct arm_spe_pmu *spe_pmu = to_spe_pmu(handle->event->pmu); in arm_spe_pmu_next_off()
485 u64 head = PERF_IDX2OFF(handle->head, buf); in arm_spe_pmu_next_off()
491 if (limit && (limit - head < spe_pmu->max_record_sz)) { in arm_spe_pmu_next_off()
492 arm_spe_pmu_pad_buf(handle, limit - head); in arm_spe_pmu_next_off()
508 event->hw.state |= PERF_HES_STOPPED; in arm_spe_perf_aux_output_begin()
517 limit = buf->snapshot ? arm_spe_pmu_next_snapshot_off(handle) in arm_spe_perf_aux_output_begin()
522 limit += (u64)buf->base; in arm_spe_perf_aux_output_begin()
523 base = (u64)buf->base + PERF_IDX2OFF(handle->head, buf); in arm_spe_perf_aux_output_begin()
535 offset = read_sysreg_s(SYS_PMBPTR_EL1) - (u64)buf->base; in arm_spe_perf_aux_output_end()
536 size = offset - PERF_IDX2OFF(handle->head, buf); in arm_spe_perf_aux_output_end()
538 if (buf->snapshot) in arm_spe_perf_aux_output_end()
539 handle->head = offset; in arm_spe_perf_aux_output_end()
546 /* Disable profiling at EL0 and EL1 */ in arm_spe_pmu_disable_and_drain_local()
554 /* Disable the profiling buffer */ in arm_spe_pmu_disable_and_drain_local()
568 * Ensure new profiling data is visible to the CPU and any external in arm_spe_pmu_buf_get_fault_act()
583 * If we've lost data, disable profiling and also set the PARTIAL in arm_spe_pmu_buf_get_fault_act()
632 struct perf_event *event = handle->event; in arm_spe_pmu_irq_handler()
644 * profiling buffer in response to a TRUNCATION flag. in arm_spe_pmu_irq_handler()
651 * If a fatal exception occurred then leaving the profiling in arm_spe_pmu_irq_handler()
654 * that the profiling buffer is disabled explicitly before in arm_spe_pmu_irq_handler()
662 * profiling as long as we didn't detect truncation. in arm_spe_pmu_irq_handler()
666 if (!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)) { in arm_spe_pmu_irq_handler()
676 /* The buffer pointers are now sane, so resume profiling. */ in arm_spe_pmu_irq_handler()
699 struct perf_event_attr *attr = &event->attr; in arm_spe_pmu_event_init()
700 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); in arm_spe_pmu_event_init()
702 /* This is, of course, deeply driver-specific */ in arm_spe_pmu_event_init()
703 if (attr->type != event->pmu->type) in arm_spe_pmu_event_init()
704 return -ENOENT; in arm_spe_pmu_event_init()
706 if (event->cpu >= 0 && in arm_spe_pmu_event_init()
707 !cpumask_test_cpu(event->cpu, &spe_pmu->supported_cpus)) in arm_spe_pmu_event_init()
708 return -ENOENT; in arm_spe_pmu_event_init()
710 if (arm_spe_event_to_pmsevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver)) in arm_spe_pmu_event_init()
711 return -EOPNOTSUPP; in arm_spe_pmu_event_init()
713 if (arm_spe_event_to_pmsnevfr(event) & arm_spe_pmsevfr_res0(spe_pmu->pmsver)) in arm_spe_pmu_event_init()
714 return -EOPNOTSUPP; in arm_spe_pmu_event_init()
716 if (attr->exclude_idle) in arm_spe_pmu_event_init()
717 return -EOPNOTSUPP; in arm_spe_pmu_event_init()
720 * Feedback-directed frequency throttling doesn't work when we in arm_spe_pmu_event_init()
726 if (attr->freq) in arm_spe_pmu_event_init()
727 return -EINVAL; in arm_spe_pmu_event_init()
731 !(spe_pmu->features & SPE_PMU_FEAT_FILT_EVT)) in arm_spe_pmu_event_init()
732 return -EOPNOTSUPP; in arm_spe_pmu_event_init()
735 !(spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT)) in arm_spe_pmu_event_init()
736 return -EOPNOTSUPP; in arm_spe_pmu_event_init()
739 !(spe_pmu->features & SPE_PMU_FEAT_FILT_TYP)) in arm_spe_pmu_event_init()
740 return -EOPNOTSUPP; in arm_spe_pmu_event_init()
743 !(spe_pmu->features & SPE_PMU_FEAT_FILT_LAT)) in arm_spe_pmu_event_init()
744 return -EOPNOTSUPP; in arm_spe_pmu_event_init()
749 return perf_allow_kernel(&event->attr); in arm_spe_pmu_event_init()
757 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); in arm_spe_pmu_start()
758 struct hw_perf_event *hwc = &event->hw; in arm_spe_pmu_start()
759 struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle); in arm_spe_pmu_start()
761 hwc->state = 0; in arm_spe_pmu_start()
763 if (hwc->state) in arm_spe_pmu_start()
772 if (spe_pmu->features & SPE_PMU_FEAT_INV_FILT_EVT) { in arm_spe_pmu_start()
784 reg = local64_read(&hwc->period_left); in arm_spe_pmu_start()
795 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); in arm_spe_pmu_stop()
796 struct hw_perf_event *hwc = &event->hw; in arm_spe_pmu_stop()
797 struct perf_output_handle *handle = this_cpu_ptr(spe_pmu->handle); in arm_spe_pmu_stop()
800 if (hwc->state & PERF_HES_STOPPED) in arm_spe_pmu_stop()
809 * to this buffer, since we might be on the context-switch in arm_spe_pmu_stop()
827 local64_set(&hwc->period_left, read_sysreg_s(SYS_PMSICR_EL1)); in arm_spe_pmu_stop()
828 hwc->state |= PERF_HES_UPTODATE; in arm_spe_pmu_stop()
831 hwc->state |= PERF_HES_STOPPED; in arm_spe_pmu_stop()
837 struct arm_spe_pmu *spe_pmu = to_spe_pmu(event->pmu); in arm_spe_pmu_add()
838 struct hw_perf_event *hwc = &event->hw; in arm_spe_pmu_add()
839 int cpu = event->cpu == -1 ? smp_processor_id() : event->cpu; in arm_spe_pmu_add()
841 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) in arm_spe_pmu_add()
842 return -ENOENT; in arm_spe_pmu_add()
844 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in arm_spe_pmu_add()
848 if (hwc->state & PERF_HES_STOPPED) in arm_spe_pmu_add()
849 ret = -EINVAL; in arm_spe_pmu_add()
867 int i, cpu = event->cpu; in arm_spe_pmu_setup_aux()
884 if (cpu == -1) in arm_spe_pmu_setup_aux()
898 buf->base = vmap(pglist, nr_pages, VM_MAP, PAGE_KERNEL); in arm_spe_pmu_setup_aux()
899 if (!buf->base) in arm_spe_pmu_setup_aux()
902 buf->nr_pages = nr_pages; in arm_spe_pmu_setup_aux()
903 buf->snapshot = snapshot; in arm_spe_pmu_setup_aux()
919 vunmap(buf->base); in arm_spe_pmu_free_aux()
926 static atomic_t pmu_idx = ATOMIC_INIT(-1); in arm_spe_pmu_perf_init()
930 struct device *dev = &spe_pmu->pdev->dev; in arm_spe_pmu_perf_init()
932 spe_pmu->pmu = (struct pmu) { in arm_spe_pmu_perf_init()
934 .parent = &spe_pmu->pdev->dev, in arm_spe_pmu_perf_init()
939 * we can support per-task profiling (which is not possible in arm_spe_pmu_perf_init()
943 * a subsequent mmap, or creates the profiling event in a in arm_spe_pmu_perf_init()
962 return -ENOMEM; in arm_spe_pmu_perf_init()
965 return perf_pmu_register(&spe_pmu->pmu, name, -1); in arm_spe_pmu_perf_init()
970 perf_pmu_unregister(&spe_pmu->pmu); in arm_spe_pmu_perf_destroy()
978 struct device *dev = &spe_pmu->pdev->dev; in __arm_spe_pmu_dev_probe()
988 spe_pmu->pmsver = (u16)fld; in __arm_spe_pmu_dev_probe()
994 "profiling buffer owned by higher exception level\n"); in __arm_spe_pmu_dev_probe()
998 /* Minimum alignment. If it's out-of-range, then fail the probe */ in __arm_spe_pmu_dev_probe()
1000 spe_pmu->align = 1 << fld; in __arm_spe_pmu_dev_probe()
1001 if (spe_pmu->align > SZ_2K) { in __arm_spe_pmu_dev_probe()
1010 spe_pmu->features |= SPE_PMU_FEAT_FILT_EVT; in __arm_spe_pmu_dev_probe()
1013 spe_pmu->features |= SPE_PMU_FEAT_INV_FILT_EVT; in __arm_spe_pmu_dev_probe()
1016 spe_pmu->features |= SPE_PMU_FEAT_FILT_TYP; in __arm_spe_pmu_dev_probe()
1019 spe_pmu->features |= SPE_PMU_FEAT_FILT_LAT; in __arm_spe_pmu_dev_probe()
1022 spe_pmu->features |= SPE_PMU_FEAT_ARCH_INST; in __arm_spe_pmu_dev_probe()
1025 spe_pmu->features |= SPE_PMU_FEAT_LDS; in __arm_spe_pmu_dev_probe()
1028 spe_pmu->features |= SPE_PMU_FEAT_ERND; in __arm_spe_pmu_dev_probe()
1030 /* This field has a spaced out encoding, so just use a look-up */ in __arm_spe_pmu_dev_probe()
1034 spe_pmu->min_period = 256; in __arm_spe_pmu_dev_probe()
1037 spe_pmu->min_period = 512; in __arm_spe_pmu_dev_probe()
1040 spe_pmu->min_period = 768; in __arm_spe_pmu_dev_probe()
1043 spe_pmu->min_period = 1024; in __arm_spe_pmu_dev_probe()
1046 spe_pmu->min_period = 1536; in __arm_spe_pmu_dev_probe()
1049 spe_pmu->min_period = 2048; in __arm_spe_pmu_dev_probe()
1052 spe_pmu->min_period = 3072; in __arm_spe_pmu_dev_probe()
1059 spe_pmu->min_period = 4096; in __arm_spe_pmu_dev_probe()
1062 /* Maximum record size. If it's out-of-range, then fail the probe */ in __arm_spe_pmu_dev_probe()
1064 spe_pmu->max_record_sz = 1 << fld; in __arm_spe_pmu_dev_probe()
1065 if (spe_pmu->max_record_sz > SZ_2K || spe_pmu->max_record_sz < 16) { in __arm_spe_pmu_dev_probe()
1078 spe_pmu->counter_sz = 12; in __arm_spe_pmu_dev_probe()
1081 spe_pmu->counter_sz = 16; in __arm_spe_pmu_dev_probe()
1086 spe_pmu->pmsver - 1, cpumask_pr_args(&spe_pmu->supported_cpus), in __arm_spe_pmu_dev_probe()
1087 spe_pmu->max_record_sz, spe_pmu->align, spe_pmu->features); in __arm_spe_pmu_dev_probe()
1089 spe_pmu->features |= SPE_PMU_FEAT_DEV_PROBED; in __arm_spe_pmu_dev_probe()
1114 enable_percpu_irq(spe_pmu->irq, IRQ_TYPE_NONE); in __arm_spe_pmu_setup_one()
1121 disable_percpu_irq(spe_pmu->irq); in __arm_spe_pmu_stop_one()
1130 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) in arm_spe_pmu_cpu_startup()
1142 if (!cpumask_test_cpu(cpu, &spe_pmu->supported_cpus)) in arm_spe_pmu_cpu_teardown()
1152 cpumask_t *mask = &spe_pmu->supported_cpus; in arm_spe_pmu_dev_init()
1156 if (ret || !(spe_pmu->features & SPE_PMU_FEAT_DEV_PROBED)) in arm_spe_pmu_dev_init()
1157 return -ENXIO; in arm_spe_pmu_dev_init()
1160 ret = request_percpu_irq(spe_pmu->irq, arm_spe_pmu_irq_handler, DRVNAME, in arm_spe_pmu_dev_init()
1161 spe_pmu->handle); in arm_spe_pmu_dev_init()
1171 &spe_pmu->hotplug_node); in arm_spe_pmu_dev_init()
1173 free_percpu_irq(spe_pmu->irq, spe_pmu->handle); in arm_spe_pmu_dev_init()
1180 cpuhp_state_remove_instance(arm_spe_pmu_online, &spe_pmu->hotplug_node); in arm_spe_pmu_dev_teardown()
1181 free_percpu_irq(spe_pmu->irq, spe_pmu->handle); in arm_spe_pmu_dev_teardown()
1187 struct platform_device *pdev = spe_pmu->pdev; in arm_spe_pmu_irq_probe()
1191 return -ENXIO; in arm_spe_pmu_irq_probe()
1194 dev_err(&pdev->dev, "expected PPI but got SPI (%d)\n", irq); in arm_spe_pmu_irq_probe()
1195 return -EINVAL; in arm_spe_pmu_irq_probe()
1198 if (irq_get_percpu_devid_partition(irq, &spe_pmu->supported_cpus)) { in arm_spe_pmu_irq_probe()
1199 dev_err(&pdev->dev, "failed to get PPI partition (%d)\n", irq); in arm_spe_pmu_irq_probe()
1200 return -EINVAL; in arm_spe_pmu_irq_probe()
1203 spe_pmu->irq = irq; in arm_spe_pmu_irq_probe()
1208 { .compatible = "arm,statistical-profiling-extension-v1", .data = (void *)1 },
1223 struct device *dev = &pdev->dev; in arm_spe_pmu_device_probe()
1230 …dev_warn_once(dev, "profiling buffer inaccessible. Try passing \"kpti=off\" on the kernel command … in arm_spe_pmu_device_probe()
1231 return -EPERM; in arm_spe_pmu_device_probe()
1236 return -ENOMEM; in arm_spe_pmu_device_probe()
1238 spe_pmu->handle = alloc_percpu(typeof(*spe_pmu->handle)); in arm_spe_pmu_device_probe()
1239 if (!spe_pmu->handle) in arm_spe_pmu_device_probe()
1240 return -ENOMEM; in arm_spe_pmu_device_probe()
1242 spe_pmu->pdev = pdev; in arm_spe_pmu_device_probe()
1262 free_percpu(spe_pmu->handle); in arm_spe_pmu_device_probe()
1272 free_percpu(spe_pmu->handle); in arm_spe_pmu_device_remove()
1313 MODULE_DESCRIPTION("Perf driver for the ARMv8.2 Statistical Profiling Extension");