Lines Matching refs:pmu
86 struct pmu *pmu; /* for custom pmu ops */ member
125 struct pmu pmu; member
155 struct intel_uncore_pmu *pmu; member
224 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); in dev_to_uncore_pmu()
264 if (offset < box->pmu->type->mmio_map_size) in uncore_mmio_is_valid_offset()
268 offset, box->pmu->type->name); in uncore_mmio_is_valid_offset()
276 return box->pmu->type->box_ctl + in uncore_mmio_box_ctl()
277 box->pmu->type->mmio_offset * box->pmu->pmu_idx; in uncore_mmio_box_ctl()
282 return box->pmu->type->box_ctl; in uncore_pci_box_ctl()
287 return box->pmu->type->fixed_ctl; in uncore_pci_fixed_ctl()
292 return box->pmu->type->fixed_ctr; in uncore_pci_fixed_ctr()
299 return idx * 8 + box->pmu->type->event_ctl; in uncore_pci_event_ctl()
301 return idx * 4 + box->pmu->type->event_ctl; in uncore_pci_event_ctl()
307 return idx * 8 + box->pmu->type->perf_ctr; in uncore_pci_perf_ctr()
312 struct intel_uncore_pmu *pmu = box->pmu; in uncore_msr_box_offset() local
313 return pmu->type->msr_offsets ? in uncore_msr_box_offset()
314 pmu->type->msr_offsets[pmu->pmu_idx] : in uncore_msr_box_offset()
315 pmu->type->msr_offset * pmu->pmu_idx; in uncore_msr_box_offset()
320 if (!box->pmu->type->box_ctl) in uncore_msr_box_ctl()
322 return box->pmu->type->box_ctl + uncore_msr_box_offset(box); in uncore_msr_box_ctl()
327 if (!box->pmu->type->fixed_ctl) in uncore_msr_fixed_ctl()
329 return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); in uncore_msr_fixed_ctl()
334 return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); in uncore_msr_fixed_ctr()
384 struct intel_uncore_pmu *pmu = box->pmu; in uncore_freerunning_counter() local
386 return pmu->type->freerunning[type].counter_base + in uncore_freerunning_counter()
387 pmu->type->freerunning[type].counter_offset * idx + in uncore_freerunning_counter()
388 (pmu->type->freerunning[type].box_offsets ? in uncore_freerunning_counter()
389 pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] : in uncore_freerunning_counter()
390 pmu->type->freerunning[type].box_offset * pmu->pmu_idx); in uncore_freerunning_counter()
398 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); in uncore_msr_event_ctl()
400 return box->pmu->type->event_ctl + in uncore_msr_event_ctl()
401 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + in uncore_msr_event_ctl()
411 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); in uncore_msr_perf_ctr()
413 return box->pmu->type->perf_ctr + in uncore_msr_perf_ctr()
414 (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + in uncore_msr_perf_ctr()
457 return box->pmu->type->perf_ctr_bits; in uncore_perf_ctr_bits()
462 return box->pmu->type->fixed_ctr_bits; in uncore_fixed_ctr_bits()
471 return box->pmu->type->freerunning[type].bits; in uncore_freerunning_bits()
479 return box->pmu->type->freerunning[type].num_counters; in uncore_num_freerunning()
485 return box->pmu->type->num_freerunning_types; in uncore_num_freerunning_types()
500 return box->pmu->type->num_counters; in uncore_num_counters()
524 box->pmu->type->ops->disable_event(box, event); in uncore_disable_event()
530 box->pmu->type->ops->enable_event(box, event); in uncore_enable_event()
536 return box->pmu->type->ops->read_counter(box, event); in uncore_read_counter()
542 if (box->pmu->type->ops->init_box) in uncore_box_init()
543 box->pmu->type->ops->init_box(box); in uncore_box_init()
550 if (box->pmu->type->ops->exit_box) in uncore_box_exit()
551 box->pmu->type->ops->exit_box(box); in uncore_box_exit()
562 return container_of(event->pmu, struct intel_uncore_pmu, pmu); in uncore_event_to_pmu()
570 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
587 void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);