/linux/Documentation/devicetree/bindings/arm/ |
H A D | pmu.yaml | 4 $id: http://devicetree.org/schemas/arm/pmu.yaml# 14 ARM cores often have a PMU for counting cpu and cache events like cache misses 15 and hits. The interface to the PMU is part of the ARM ARM. The ARM PMU 22 - apm,potenza-pmu 23 - apple,avalanche-pmu 24 - apple,blizzard-pmu 25 - apple,firestorm-pmu 26 - apple,icestorm-pmu 28 - arm,arm1136-pmu 29 - arm,arm1176-pmu [all …]
|
/linux/Documentation/devicetree/bindings/soc/samsung/ |
H A D | exynos-pmu.yaml | 4 $id: http://devicetree.org/schemas/soc/samsung/exynos-pmu.yaml# 7 title: Samsung Exynos SoC series Power Management Unit (PMU) 18 - google,gs101-pmu 19 - samsung,exynos3250-pmu 20 - samsung,exynos4210-pmu 21 - samsung,exynos4212-pmu 22 - samsung,exynos4412-pmu 23 - samsung,exynos5250-pmu 24 - samsung,exynos5260-pmu 25 - samsung,exynos5410-pmu [all …]
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/ |
H A D | base.c | 32 struct nvkm_pmu *pmu = device->pmu; in nvkm_pmu_fan_controlled() local 34 /* Internal PMU FW does not currently control fans in any way, in nvkm_pmu_fan_controlled() 37 if (pmu && pmu->func->code.size) in nvkm_pmu_fan_controlled() 40 /* Default (board-loaded, or VBIOS PMU/PREOS) PMU FW on Fermi in nvkm_pmu_fan_controlled() 48 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) in nvkm_pmu_pgob() argument 50 if (pmu && pmu->func->pgob) in nvkm_pmu_pgob() 51 pmu->func->pgob(pmu, enable); in nvkm_pmu_pgob() 57 struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work); in nvkm_pmu_recv() local 58 return pmu->func->recv(pmu); in nvkm_pmu_recv() 62 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in nvkm_pmu_send() argument [all …]
|
H A D | gt215.c | 30 gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in gt215_pmu_send() argument 33 struct nvkm_subdev *subdev = &pmu->subdev; in gt215_pmu_send() 37 mutex_lock(&pmu->send.mutex); in gt215_pmu_send() 45 mutex_unlock(&pmu->send.mutex); in gt215_pmu_send() 50 * on a synchronous reply, take the PMU mutex and tell the in gt215_pmu_send() 54 pmu->recv.message = message; in gt215_pmu_send() 55 pmu->recv.process = process; in gt215_pmu_send() 65 pmu->send.base)); in gt215_pmu_send() 77 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); in gt215_pmu_send() 78 reply[0] = pmu->recv.data[0]; in gt215_pmu_send() [all …]
|
H A D | gm20b.c | 28 #include <nvfw/pmu.h> 42 struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); in gm20b_pmu_acr_bootstrap_falcon() local 52 ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, in gm20b_pmu_acr_bootstrap_falcon() 54 &pmu->subdev, msecs_to_jiffies(1000)); in gm20b_pmu_acr_bootstrap_falcon() 129 struct nvkm_pmu *pmu = priv; in gm20b_pmu_acr_init_wpr_callback() local 130 struct nvkm_subdev *subdev = &pmu->subdev; in gm20b_pmu_acr_init_wpr_callback() 139 complete_all(&pmu->wpr_ready); in gm20b_pmu_acr_init_wpr_callback() 144 gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu) in gm20b_pmu_acr_init_wpr() argument 154 return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, in gm20b_pmu_acr_init_wpr() 155 gm20b_pmu_acr_init_wpr_callback, pmu, 0); in gm20b_pmu_acr_init_wpr() [all …]
|
H A D | gk20a.c | 51 gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_target() argument 53 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_target() 59 gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_get_cur_state() argument 61 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_cur_state() 67 gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_target_state() argument 70 struct gk20a_pmu_dvfs_data *data = pmu->data; in gk20a_pmu_dvfs_get_target_state() 71 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_target_state() 86 nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n", in gk20a_pmu_dvfs_get_target_state() 95 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_dev_status() argument 98 struct nvkm_falcon *falcon = &pmu->base.falcon; in gk20a_pmu_dvfs_get_dev_status() [all …]
|
H A D | Kbuild | 2 nvkm-y += nvkm/subdev/pmu/base.o 3 nvkm-y += nvkm/subdev/pmu/memx.o 4 nvkm-y += nvkm/subdev/pmu/gt215.o 5 nvkm-y += nvkm/subdev/pmu/gf100.o 6 nvkm-y += nvkm/subdev/pmu/gf119.o 7 nvkm-y += nvkm/subdev/pmu/gk104.o 8 nvkm-y += nvkm/subdev/pmu/gk110.o 9 nvkm-y += nvkm/subdev/pmu/gk208.o 10 nvkm-y += nvkm/subdev/pmu/gk20a.o 11 nvkm-y += nvkm/subdev/pmu/gm107.o [all …]
|
/linux/drivers/soc/dove/ |
H A D | pmu.c | 3 * Marvell Dove PMU support 17 #include <linux/soc/dove/pmu.h> 42 * The PMU contains a register to reset various subsystems within the 50 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_reset() local 54 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_reset() 55 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 56 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 57 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 58 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_reset_reset() 65 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_assert() local [all …]
|
/linux/drivers/gpu/drm/i915/ |
H A D | i915_pmu.c | 33 return container_of(event->pmu, struct i915_pmu, base); in event_to_pmu() 36 static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu) in pmu_to_i915() argument 38 return container_of(pmu, struct drm_i915_private, pmu); in pmu_to_i915() 149 static bool pmu_needs_timer(struct i915_pmu *pmu) in pmu_needs_timer() argument 151 struct drm_i915_private *i915 = pmu_to_i915(pmu); in pmu_needs_timer() 159 enable = pmu->enable; in pmu_needs_timer() 201 static u64 read_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample) in read_sample() argument 203 return pmu->sample[gt_id][sample].cur; in read_sample() 207 store_sample(struct i915_pmu *pmu, unsigned int gt_id, int sample, u64 val) in store_sample() argument 209 pmu->sample[gt_id][sample].cur = val; in store_sample() [all …]
|
/linux/drivers/pmdomain/starfive/ |
H A D | jh71xx-pmu.c | 3 * StarFive JH71XX PMU (Power Management Unit) Controller Driver 15 #include <dt-bindings/power/starfive,jh7110-pmu.h> 26 /* aon pmu register offset */ 36 /* pmu int status */ 64 struct jh71xx_pmu *pmu); 76 spinlock_t lock; /* protects pmu reg */ 81 struct jh71xx_pmu *pmu; member 87 struct jh71xx_pmu *pmu = pmd->pmu; in jh71xx_pmu_get_state() local 92 *is_on = readl(pmu->base + pmu->match_data->pmu_status) & mask; in jh71xx_pmu_get_state() 99 struct jh71xx_pmu *pmu = pmd->pmu; in jh7110_pmu_set_state() local [all …]
|
/linux/arch/x86/kvm/vmx/ |
H A D | pmu_intel.c | 3 * KVM PMU support for Intel CPUs 22 #include "pmu.h" 57 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument 60 u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; in reprogram_fixed_counters() 63 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters() 64 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters() 71 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters() 73 __set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use); in reprogram_fixed_counters() 82 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_rdpmc_ecx_to_pmc() local 94 * Yell and reject attempts to read PMCs for a non-architectural PMU, in intel_rdpmc_ecx_to_pmc() [all …]
|
/linux/Documentation/devicetree/bindings/arm/rockchip/ |
H A D | pmu.yaml | 4 $id: http://devicetree.org/schemas/arm/rockchip/pmu.yaml# 7 title: Rockchip Power Management Unit (PMU) 14 The PMU is used to turn on and off different power domains of the SoCs. 22 - rockchip,px30-pmu 23 - rockchip,rk3066-pmu 24 - rockchip,rk3128-pmu 25 - rockchip,rk3288-pmu 26 - rockchip,rk3368-pmu 27 - rockchip,rk3399-pmu 28 - rockchip,rk3562-pmu [all …]
|
/linux/Documentation/devicetree/bindings/pinctrl/ |
H A D | marvell,dove-pinctrl.txt | 9 - reg: register specifiers of MPP, MPP4, and PMU MPP registers 14 Note: pmu* also allows for Power Management functions listed below 18 mpp0 0 gpio, pmu, uart2(rts), sdio0(cd), lcd0(pwm), pmu* 19 mpp1 1 gpio, pmu, uart2(cts), sdio0(wp), lcd1(pwm), pmu* 20 mpp2 2 gpio, pmu, uart2(txd), sdio0(buspwr), sata(prsnt), 21 uart1(rts), pmu* 22 mpp3 3 gpio, pmu, uart2(rxd), sdio0(ledctrl), sata(act), 23 uart1(cts), lcd-spi(cs1), pmu* 24 mpp4 4 gpio, pmu, uart3(rts), sdio1(cd), spi1(miso), pmu* 25 mpp5 5 gpio, pmu, uart3(cts), sdio1(wp), spi1(cs), pmu* [all …]
|
/linux/drivers/perf/ |
H A D | arm_pmu_platform.c | 25 static int probe_current_pmu(struct arm_pmu *pmu, in probe_current_pmu() argument 32 pr_info("probing PMU on CPU %d\n", cpu); in probe_current_pmu() 37 ret = info->init(pmu); in probe_current_pmu() 45 static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq) in pmu_parse_percpu_irq() argument 48 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_percpu_irq() 50 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus); in pmu_parse_percpu_irq() 54 for_each_cpu(cpu, &pmu->supported_cpus) in pmu_parse_percpu_irq() 95 static int pmu_parse_irqs(struct arm_pmu *pmu) in pmu_parse_irqs() argument 98 struct platform_device *pdev = pmu->plat_device; in pmu_parse_irqs() 99 struct pmu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_parse_irqs() [all …]
|
H A D | arm_pmu_acpi.c | 32 * Per the ACPI spec, the MADT cannot describe a PMU that doesn't in arm_pmu_acpi_register_irq() 149 * For lack of a better place, hook the normal PMU MADT walk 208 pr_warn("Unable to parse ACPI PMU IRQ for CPU%d: %d\n", in arm_pmu_acpi_parse_irqs() 212 pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu); in arm_pmu_acpi_parse_irqs() 252 struct arm_pmu *pmu; in arm_pmu_acpi_find_pmu() local 256 pmu = per_cpu(probed_pmus, cpu); in arm_pmu_acpi_find_pmu() 257 if (!pmu || pmu->acpi_cpuid != cpuid) in arm_pmu_acpi_find_pmu() 260 return pmu; in arm_pmu_acpi_find_pmu() 268 * the PMU (e.g. we don't have mismatched PPIs). 270 static bool pmu_irq_matches(struct arm_pmu *pmu, int irq) in pmu_irq_matches() argument [all …]
|
/linux/tools/perf/arch/arm/util/ |
H A D | pmu.c | 8 #include <linux/coresight-pmu.h> 15 #include "../../../util/pmu.h" 19 void perf_pmu__arch_init(struct perf_pmu *pmu) in perf_pmu__arch_init() argument 24 if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) { in perf_pmu__arch_init() 26 pmu->auxtrace = true; in perf_pmu__arch_init() 27 pmu->selectable = true; in perf_pmu__arch_init() 28 pmu->perf_event_attr_init_default = cs_etm_get_default_config; in perf_pmu__arch_init() 30 } else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) { in perf_pmu__arch_init() 31 pmu->auxtrace = true; in perf_pmu__arch_init() 32 pmu in perf_pmu__arch_init() [all...] |
/linux/Documentation/devicetree/bindings/perf/ |
H A D | apm-xgene-pmu.txt | 1 * APM X-Gene SoC PMU bindings 3 This is APM X-Gene SoC PMU (Performance Monitoring Unit) module. 4 The following PMU devices are supported: 11 The following section describes the SoC PMU DT node binding. 14 - compatible : Shall be "apm,xgene-pmu" for revision 1 or 15 "apm,xgene-pmu-v2" for revision 2. 19 - reg : First resource shall be the CPU bus PMU resource. 20 - interrupts : Interrupt-specifier for PMU IRQ. 23 - compatible : Shall be "apm,xgene-pmu-l3c". 24 - reg : First resource shall be the L3C PMU resource. [all …]
|
/linux/tools/perf/pmu-events/ |
H A D | Build | 1 pmu-events-y += pmu-events.o 2 JDIR = pmu-events/arch/$(SRCARCH) 5 JDIR_TEST = pmu-events/arch/test 8 JEVENTS_PY = pmu-events/jevents.py 9 METRIC_PY = pmu-events/metric.py 10 METRIC_TEST_PY = pmu-events/metric_test.py 11 EMPTY_PMU_EVENTS_C = pmu-events/empty-pmu-events.c 12 PMU_EVENTS_C = $(OUTPUT)pmu [all...] |
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ |
H A D | gm200.c | 28 #include <subdev/bios/pmu.h> 29 #include <subdev/pmu.h> 33 pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec) in pmu_code() argument 39 nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu); in pmu_code() 42 nvkm_wr32(device, 0x10a188, (pmu + i) >> 8); in pmu_code() 53 pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len) in pmu_data() argument 59 nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu); in pmu_data() 88 struct nvbios_pmuR pmu; in pmu_load() local 91 if (!nvbios_pmuRm(bios, type, &pmu)) in pmu_load() 94 if (!post || !subdev->device->pmu) in pmu_load() [all …]
|
/linux/arch/x86/kvm/ |
H A D | pmu.h | 9 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) 10 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) argument 11 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) 52 static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu) in kvm_pmu_has_perf_global_ctrl() argument 56 * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is in kvm_pmu_has_perf_global_ctrl() 58 * to/for the guest if the guest PMU supports at least "Architectural in kvm_pmu_has_perf_global_ctrl() 63 return pmu->version > 1; in kvm_pmu_has_perf_global_ctrl() 80 static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx) in kvm_pmc_idx_to_pmc() argument 82 if (idx < pmu->nr_arch_gp_counters) in kvm_pmc_idx_to_pmc() 83 return &pmu->gp_counters[idx]; in kvm_pmc_idx_to_pmc() [all …]
|
/linux/tools/perf/bench/ |
H A D | pmu-scan.c | 3 * Benchmark scanning sysfs files for PMU information. 10 #include "util/pmu.h" 35 "perf bench internals pmu-scan <options>", 44 struct perf_pmu *pmu = NULL; in save_result() local 48 while ((pmu = perf_pmus__scan(pmu)) != NULL) { in save_result() 56 r->name = strdup(pmu->name); in save_result() 57 r->is_core = pmu->is_core; in save_result() 58 r->nr_caps = pmu->nr_caps; in save_result() 60 r->nr_aliases = perf_pmu__num_events(pmu); in save_result() 63 list_for_each(list, &pmu->format) in save_result() [all …]
|
/linux/arch/powerpc/perf/ |
H A D | imc-pmu.c | 13 #include <asm/imc-pmu.h> 22 * Used to avoid races in counting the nest-pmu units during hotplug 61 return container_of(event->pmu, struct imc_pmu, pmu); in imc_event_to_pmu() 105 struct pmu *pmu = dev_get_drvdata(dev); in imc_pmu_cpumask_get_attr() local 106 struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu); in imc_pmu_cpumask_get_attr() 219 * and assign the attr_group to the pmu "pmu". 221 static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) in update_events_in_group() argument 260 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); in update_events_in_group() 261 if (!pmu->events) { in update_events_in_group() 269 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]); in update_events_in_group() [all …]
|
/linux/Documentation/admin-guide/perf/ |
H A D | starfive_starlink_pmu.rst | 2 StarFive StarLink Performance Monitor Unit (PMU) 5 StarFive StarLink Performance Monitor Unit (PMU) exists within the 9 The uncore PMU supports overflow interrupt, up to 16 programmable 64bit 11 The PMU can only be accessed via Memory Mapped I/O and are common to the 12 cores connected to the same PMU. 14 Driver exposes supported PMU events in sysfs "events" directory under:: 18 Driver exposes cpu used to handle PMU events in sysfs "cpumask" directory 32 starfive_starlink_pmu/cycles/ [Kernel PMU event] 33 starfive_starlink_pmu/read_hit/ [Kernel PMU event] 34 starfive_starlink_pmu/read_miss/ [Kernel PMU event] [all …]
|
/linux/drivers/cxl/core/ |
H A D | pmu.c | 8 #include <pmu.h> 14 struct cxl_pmu *pmu = to_cxl_pmu(dev); in cxl_pmu_release() local 16 kfree(pmu); in cxl_pmu_release() 32 struct cxl_pmu *pmu; in devm_cxl_pmu_add() local 36 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL); in devm_cxl_pmu_add() 37 if (!pmu) in devm_cxl_pmu_add() 40 pmu->assoc_id = assoc_id; in devm_cxl_pmu_add() 41 pmu->index = index; in devm_cxl_pmu_add() 42 pmu->type = type; in devm_cxl_pmu_add() 43 pmu->base = regs->pmu; in devm_cxl_pmu_add() [all …]
|
/linux/drivers/nvdimm/ |
H A D | nd_perf.c | 121 struct pmu *pmu = dev_get_drvdata(dev); in nvdimm_pmu_cpumask_show() local 124 nd_pmu = container_of(pmu, struct nvdimm_pmu, pmu); in nvdimm_pmu_cpumask_show() 162 /* Migrate nvdimm pmu events to the new target cpu if valid */ in nvdimm_pmu_cpu_offline() 164 perf_pmu_migrate_context(&nd_pmu->pmu, cpu, target); in nvdimm_pmu_cpu_offline() 213 nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR] = nvdimm_pmu_cpumask_group; in create_cpumask_attr_group() 245 /* Register the pmu instance for cpu hotplug */ in nvdimm_pmu_cpu_hotplug_init() 268 if (nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]) in nvdimm_pmu_free_hotplug_memory() 269 kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]->attrs); in nvdimm_pmu_free_hotplug_memory() 270 kfree(nd_pmu->pmu.attr_groups[NVDIMM_PMU_CPUMASK_ATTR]); in nvdimm_pmu_free_hotplug_memory() 280 /* event functions like add/del/read/event_init and pmu name should not be NULL */ in register_nvdimm_pmu() [all …]
|