Lines Matching full:pmu

86 	struct pmu *pmu; /* for custom pmu ops */  member
89 * Uncore PMU would store relevant platform topology configuration here
125 struct pmu pmu; member
154 struct intel_uncore_pmu *pmu; member
223 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); in dev_to_uncore_pmu()
263 if (offset < box->pmu->type->mmio_map_size) in uncore_mmio_is_valid_offset()
267 offset, box->pmu->type->name); in uncore_mmio_is_valid_offset()
275 return box->pmu->type->box_ctl + in uncore_mmio_box_ctl()
276 box->pmu->type->mmio_offset * box->pmu->pmu_idx; in uncore_mmio_box_ctl()
281 return box->pmu->type->box_ctl; in uncore_pci_box_ctl()
286 return box->pmu->type->fixed_ctl; in uncore_pci_fixed_ctl()
291 return box->pmu->type->fixed_ctr; in uncore_pci_fixed_ctr()
298 return idx * 8 + box->pmu->type->event_ctl; in uncore_pci_event_ctl()
300 return idx * 4 + box->pmu->type->event_ctl; in uncore_pci_event_ctl()
306 return idx * 8 + box->pmu->type->perf_ctr; in uncore_pci_perf_ctr()
311 struct intel_uncore_pmu *pmu = box->pmu; in uncore_msr_box_offset() local
312 return pmu->type->msr_offsets ? in uncore_msr_box_offset()
313 pmu->type->msr_offsets[pmu->pmu_idx] : in uncore_msr_box_offset()
314 pmu->type->msr_offset * pmu->pmu_idx; in uncore_msr_box_offset()
319 if (!box->pmu->type->box_ctl) in uncore_msr_box_ctl()
321 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); in uncore_msr_box_ctl()
326 if (!box->pmu->type->fixed_ctl) in uncore_msr_fixed_ctl()
328 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); in uncore_msr_fixed_ctl()
333 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); in uncore_msr_fixed_ctr()
383 struct intel_uncore_pmu *pmu = box->pmu; in uncore_freerunning_counter() local
385 return pmu->type->freerunning[type].counter_base + in uncore_freerunning_counter()
386 pmu->type->freerunning[type].counter_offset * idx + in uncore_freerunning_counter()
387 (pmu->type->freerunning[type].box_offsets ? in uncore_freerunning_counter()
388 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : in uncore_freerunning_counter()
389 pmu->type->freerunning[type].box_offset * pmu->pmu_idx); in uncore_freerunning_counter()
397 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); in uncore_msr_event_ctl()
399 return box->pmu->type->event_ctl + in uncore_msr_event_ctl()
400 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + in uncore_msr_event_ctl()
410 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); in uncore_msr_perf_ctr()
412 return box->pmu->type->perf_ctr + in uncore_msr_perf_ctr()
413 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + in uncore_msr_perf_ctr()
456 return box->pmu->type->perf_ctr_bits; in uncore_perf_ctr_bits()
461 return box->pmu->type->fixed_ctr_bits; in uncore_fixed_ctr_bits()
470 return box->pmu->type->freerunning[type].bits; in uncore_freerunning_bits()
478 return box->pmu->type->freerunning[type].num_counters; in uncore_num_freerunning()
484 return box->pmu->type->num_freerunning_types; in uncore_num_freerunning_types()
499 return box->pmu->type->num_counters; in uncore_num_counters()
523 box->pmu->type->ops->disable_event(box, event); in uncore_disable_event()
529 box->pmu->type->ops->enable_event(box, event); in uncore_enable_event()
535 return box->pmu->type->ops->read_counter(box, event); in uncore_read_counter()
541 if (box->pmu->type->ops->init_box) in uncore_box_init()
542 box->pmu->type->ops->init_box(box); in uncore_box_init()
549 if (box->pmu->type->ops->exit_box) in uncore_box_exit()
550 box->pmu->type->ops->exit_box(box); in uncore_box_exit()
561 return container_of(event->pmu, struct intel_uncore_pmu, pmu); in uncore_event_to_pmu()
569 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
586 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);