Lines Matching full:hybrid
2317 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); in __intel_pmu_enable_all()
3069 unsigned long *cntr_mask = hybrid(cpuc->pmu, cntr_mask); in intel_pmu_reset()
3070 unsigned long *fixed_cntr_mask = hybrid(cpuc->pmu, fixed_cntr_mask); in intel_pmu_reset()
3235 status &= hybrid(cpuc->pmu, intel_ctrl); in handle_pmi_common()
3400 struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs); in intel_alt_er()
3420 struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs); in intel_fixup_er()
3570 struct event_constraint *event_constraints = hybrid(cpuc->pmu, event_constraints); in x86_get_event_constraints()
4105 union perf_capabilities *intel_cap = &hybrid(event->pmu, intel_cap); in intel_pmu_has_cap()
4191 return !!hybrid(pmu, acr_cause_mask64); in intel_pmu_has_acr()
4210 event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64); in intel_pmu_set_acr_cntr_constr()
4219 event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64); in intel_pmu_set_acr_caused_constr()
4418 if (hweight64(cause_mask) > hweight64(hybrid(pmu, acr_cause_mask64)) || in intel_pmu_hw_config()
4419 num > hweight64(hybrid(event->pmu, acr_cntr_mask64))) in intel_pmu_hw_config()
4568 u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl); in intel_guest_get_msrs()
5073 u64 mask = hybrid(dev_get_drvdata(dev), config_mask) & ARCH_PERFMON_EVENTSEL_UMASK2; in umask2_show()
5109 mask = hybrid(dev_get_drvdata(dev), config_mask); in evtsel_ext_is_visible()
5115 union perf_capabilities intel_cap = hybrid(dev_get_drvdata(dev), intel_cap); in evtsel_ext_is_visible()
5281 hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_UMASK2; in update_pmu_cap()
5283 hybrid(pmu, config_mask) |= ARCH_PERFMON_EVENTSEL_EQ; in update_pmu_cap()
5288 hybrid(pmu, cntr_mask64) = cntr; in update_pmu_cap()
5289 hybrid(pmu, fixed_cntr_mask64) = fixed_cntr; in update_pmu_cap()
5296 hybrid(pmu, acr_cntr_mask64) = cntr | ((u64)fixed_cntr << INTEL_PMC_IDX_FIXED); in update_pmu_cap()
5299 hybrid(pmu, acr_cause_mask64) = ecx | ((u64)edx << INTEL_PMC_IDX_FIXED); in update_pmu_cap()
5303 /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration */ in update_pmu_cap()
5304 rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities); in update_pmu_cap()
5337 * This is running on a CPU model that is known to have hybrid in find_hybrid_pmu_for_cpu()
5338 * configurations. But the CPU told us it is not hybrid, shame in find_hybrid_pmu_for_cpu()
5443 * Turn off the check for a hybrid architecture, because the in intel_pmu_cpu_starting()
5447 * a hybrid platform, e.g., Alder Lake. in intel_pmu_cpu_starting()
6799 hybrid(pmu, event_constraints) = intel_glc_event_constraints; in intel_pmu_init_glc()
6800 hybrid(pmu, pebs_constraints) = intel_glc_pebs_event_constraints; in intel_pmu_init_glc()
6819 hybrid(pmu, event_constraints) = intel_grt_event_constraints; in intel_pmu_init_grt()
6820 hybrid(pmu, pebs_constraints) = intel_grt_pebs_event_constraints; in intel_pmu_init_grt()
6821 hybrid(pmu, extra_regs) = intel_grt_extra_regs; in intel_pmu_init_grt()
6829 hybrid(pmu, event_constraints) = intel_lnc_event_constraints; in intel_pmu_init_lnc()
6830 hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints; in intel_pmu_init_lnc()
6831 hybrid(pmu, extra_regs) = intel_lnc_extra_regs; in intel_pmu_init_lnc()
6837 hybrid(pmu, event_constraints) = intel_skt_event_constraints; in intel_pmu_init_skt()
6838 hybrid(pmu, extra_regs) = intel_cmt_extra_regs; in intel_pmu_init_skt()
7564 pr_cont("Alderlake Hybrid events, "); in intel_pmu_init()
7594 pr_cont("Meteorlake Hybrid events, "); in intel_pmu_init()
7599 pr_cont("Pantherlake Hybrid events, "); in intel_pmu_init()
7605 pr_cont("Lunarlake Hybrid events, "); in intel_pmu_init()
7659 pr_cont("ArrowLake-H Hybrid events, "); in intel_pmu_init()
7719 * PMU architectural features with a per-core view. For non-hybrid, in intel_pmu_init()
7721 * update the x86_pmu from the booting CPU. For hybrid, the x86_pmu in intel_pmu_init()