| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/ |
| H A D | base.c | 32 struct nvkm_pmu *pmu = device->pmu; in nvkm_pmu_fan_controlled() local 37 if (pmu && pmu->func->code.size) in nvkm_pmu_fan_controlled() 48 nvkm_pmu_pgob(struct nvkm_pmu *pmu, bool enable) in nvkm_pmu_pgob() argument 50 if (pmu && pmu->func->pgob) in nvkm_pmu_pgob() 51 pmu->func->pgob(pmu, enable); in nvkm_pmu_pgob() 57 struct nvkm_pmu *pmu = container_of(work, typeof(*pmu), recv.work); in nvkm_pmu_recv() local 58 return pmu->func->recv(pmu); in nvkm_pmu_recv() 62 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in nvkm_pmu_send() argument 65 if (!pmu || !pmu->func->send) in nvkm_pmu_send() 67 return pmu->func->send(pmu, reply, process, message, data0, data1); in nvkm_pmu_send() [all …]
|
| H A D | gt215.c | 30 gt215_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], in gt215_pmu_send() argument 33 struct nvkm_subdev *subdev = &pmu->subdev; in gt215_pmu_send() 37 mutex_lock(&pmu->send.mutex); in gt215_pmu_send() 45 mutex_unlock(&pmu->send.mutex); in gt215_pmu_send() 54 pmu->recv.message = message; in gt215_pmu_send() 55 pmu->recv.process = process; in gt215_pmu_send() 65 pmu->send.base)); in gt215_pmu_send() 77 wait_event(pmu->recv.wait, (pmu->recv.process == 0)); in gt215_pmu_send() 78 reply[0] = pmu->recv.data[0]; in gt215_pmu_send() 79 reply[1] = pmu->recv.data[1]; in gt215_pmu_send() [all …]
|
| H A D | gk20a.c | 51 gk20a_pmu_dvfs_target(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_target() argument 53 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_target() 59 gk20a_pmu_dvfs_get_cur_state(struct gk20a_pmu *pmu, int *state) in gk20a_pmu_dvfs_get_cur_state() argument 61 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_cur_state() 67 gk20a_pmu_dvfs_get_target_state(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_target_state() argument 70 struct gk20a_pmu_dvfs_data *data = pmu->data; in gk20a_pmu_dvfs_get_target_state() 71 struct nvkm_clk *clk = pmu->base.subdev.device->clk; in gk20a_pmu_dvfs_get_target_state() 86 nvkm_trace(&pmu->base.subdev, "cur level = %d, new level = %d\n", in gk20a_pmu_dvfs_get_target_state() 95 gk20a_pmu_dvfs_get_dev_status(struct gk20a_pmu *pmu, in gk20a_pmu_dvfs_get_dev_status() argument 98 struct nvkm_falcon *falcon = &pmu->base.falcon; in gk20a_pmu_dvfs_get_dev_status() [all …]
|
| H A D | gm20b.c | 42 struct nvkm_pmu *pmu = container_of(falcon, typeof(*pmu), falcon); in gm20b_pmu_acr_bootstrap_falcon() local 52 ret = nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, in gm20b_pmu_acr_bootstrap_falcon() 54 &pmu->subdev, msecs_to_jiffies(1000)); in gm20b_pmu_acr_bootstrap_falcon() 129 struct nvkm_pmu *pmu = priv; in gm20b_pmu_acr_init_wpr_callback() local 130 struct nvkm_subdev *subdev = &pmu->subdev; in gm20b_pmu_acr_init_wpr_callback() 139 complete_all(&pmu->wpr_ready); in gm20b_pmu_acr_init_wpr_callback() 144 gm20b_pmu_acr_init_wpr(struct nvkm_pmu *pmu) in gm20b_pmu_acr_init_wpr() argument 154 return nvkm_falcon_cmdq_send(pmu->hpq, &cmd.cmd.hdr, in gm20b_pmu_acr_init_wpr() 155 gm20b_pmu_acr_init_wpr_callback, pmu, 0); in gm20b_pmu_acr_init_wpr() 159 gm20b_pmu_initmsg(struct nvkm_pmu *pmu) in gm20b_pmu_initmsg() argument [all …]
|
| /linux/tools/perf/util/ |
| H A D | pmus.c | 113 struct perf_pmu *pmu, *tmp; in perf_pmus__destroy() local 115 list_for_each_entry_safe(pmu, tmp, &core_pmus, list) { in perf_pmus__destroy() 116 list_del(&pmu->list); in perf_pmus__destroy() 118 perf_pmu__delete(pmu); in perf_pmus__destroy() 120 list_for_each_entry_safe(pmu, tmp, &other_pmus, list) { in perf_pmus__destroy() 121 list_del(&pmu->list); in perf_pmus__destroy() 123 perf_pmu__delete(pmu); in perf_pmus__destroy() 130 struct perf_pmu *pmu; in pmu_find() local 132 list_for_each_entry(pmu, &core_pmus, list) { in pmu_find() 133 if (!strcmp(pmu->name, name) || in pmu_find() [all …]
|
| H A D | pmu.c | 146 static int pmu_aliases_parse(struct perf_pmu *pmu); 189 static void perf_pmu_format__load(const struct perf_pmu *pmu, struct perf_pmu_format *format) in perf_pmu_format__load() argument 197 if (!perf_pmu__pathname_scnprintf(path, sizeof(path), pmu->name, "format")) in perf_pmu_format__load() 215 static int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load) in perf_pmu__format_parse() argument 230 format = perf_pmu__new_format(&pmu->format, name); in perf_pmu__format_parse() 263 static int pmu_format(struct perf_pmu *pmu, int dirfd, const char *name, bool eager_load) in pmu_format() argument 272 if (perf_pmu__format_parse(pmu, fd, eager_load)) in pmu_format() 320 static int perf_pmu__parse_scale(struct perf_pmu *pmu, struct perf_pmu_alias *alias) in perf_pmu__parse_scale() argument 332 scnprintf(path + len, sizeof(path) - len, "%s/events/%s.scale", pmu->name, alias->name); in perf_pmu__parse_scale() 356 static int perf_pmu__parse_unit(struct perf_pmu *pmu, struct perf_pmu_alias *alias) in perf_pmu__parse_unit() argument [all …]
|
| H A D | pmu.h | 126 void (*perf_event_attr_init_default)(const struct perf_pmu *pmu, 222 const struct perf_pmu *pmu; member 240 void pmu_add_sys_aliases(struct perf_pmu *pmu); 241 int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr, 245 int perf_pmu__config_terms(const struct perf_pmu *pmu, 250 __u64 perf_pmu__format_bits(struct perf_pmu *pmu, const char *name); 251 int perf_pmu__format_type(struct perf_pmu *pmu, const char *name); 252 int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms, 255 int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback c… 258 bool perf_pmu__has_format(const struct perf_pmu *pmu, const char *name); [all …]
|
| /linux/drivers/soc/dove/ |
| H A D | pmu.c | 50 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_reset() local 54 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_reset() 55 val = readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 56 writel_relaxed(val & ~BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 57 writel_relaxed(val | BIT(id), pmu->pmc_base + PMC_SW_RST); in pmu_reset_reset() 58 spin_unlock_irqrestore(&pmu->lock, flags); in pmu_reset_reset() 65 struct pmu_data *pmu = rcdev_to_pmu(rc); in pmu_reset_assert() local 69 spin_lock_irqsave(&pmu->lock, flags); in pmu_reset_assert() 70 val &= readl_relaxed(pmu->pmc_base + PMC_SW_RST); in pmu_reset_assert() 71 writel_relaxed(val, pmu->pmc_base + PMC_SW_RST); in pmu_reset_assert() [all …]
|
| /linux/drivers/pmdomain/starfive/ |
| H A D | jh71xx-pmu.c | 64 struct jh71xx_pmu *pmu); 81 struct jh71xx_pmu *pmu; member 87 struct jh71xx_pmu *pmu = pmd->pmu; in jh71xx_pmu_get_state() local 92 *is_on = readl(pmu->base + pmu->match_data->pmu_status) & mask; in jh71xx_pmu_get_state() 99 struct jh71xx_pmu *pmu = pmd->pmu; in jh7110_pmu_set_state() local 107 spin_lock_irqsave(&pmu->lock, flags); in jh7110_pmu_set_state() 129 writel(mask, pmu->base + mode); in jh7110_pmu_set_state() 139 writel(JH71XX_PMU_SW_ENCOURAGE_ON, pmu->base + JH71XX_PMU_SW_ENCOURAGE); in jh7110_pmu_set_state() 140 writel(encourage_lo, pmu->base + JH71XX_PMU_SW_ENCOURAGE); in jh7110_pmu_set_state() 141 writel(encourage_hi, pmu->base + JH71XX_PMU_SW_ENCOURAGE); in jh7110_pmu_set_state() [all …]
|
| /linux/drivers/perf/ |
| H A D | fsl_imx9_ddr_perf.c | 59 #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) 84 struct pmu pmu; member 117 static inline bool axi_filter_v1(struct ddr_pmu *pmu) in axi_filter_v1() argument 119 return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V1; in axi_filter_v1() 122 static inline bool axi_filter_v2(struct ddr_pmu *pmu) in axi_filter_v2() argument 124 return pmu->devtype_data->filter_ver == DDR_PERF_AXI_FILTER_V2; in axi_filter_v2() 140 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_identifier_show() local 142 return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier); in ddr_perf_identifier_show() 160 struct ddr_pmu *pmu = dev_get_drvdata(dev); in ddr_perf_cpumask_show() local 162 return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); in ddr_perf_cpumask_show() [all …]
|
| /linux/arch/x86/kvm/svm/ |
| H A D | pmu.c | 28 static struct kvm_pmc *amd_pmu_get_pmc(struct kvm_pmu *pmu, int pmc_idx) in amd_pmu_get_pmc() argument 30 unsigned int num_counters = pmu->nr_arch_gp_counters; in amd_pmu_get_pmc() 35 return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)]; in amd_pmu_get_pmc() 38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, in get_gp_pmc_amd() argument 41 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); in get_gp_pmc_amd() 44 if (!pmu->version) in get_gp_pmc_amd() 73 return amd_pmu_get_pmc(pmu, idx); in get_gp_pmc_amd() 78 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_check_rdpmc_early() local 80 if (idx >= pmu->nr_arch_gp_counters) in amd_check_rdpmc_early() 95 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in amd_msr_idx_to_pmc() local [all …]
|
| /linux/arch/x86/kvm/vmx/ |
| H A D | pmu_intel.c | 57 static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) in reprogram_fixed_counters() argument 60 u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; in reprogram_fixed_counters() 63 pmu->fixed_ctr_ctrl = data; in reprogram_fixed_counters() 64 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { in reprogram_fixed_counters() 71 pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i); in reprogram_fixed_counters() 73 __set_bit(KVM_FIXED_PMC_BASE_IDX + i, pmu->pmc_in_use); in reprogram_fixed_counters() 82 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in intel_rdpmc_ecx_to_pmc() local 97 if (WARN_ON_ONCE(!pmu->version)) in intel_rdpmc_ecx_to_pmc() 110 counters = pmu->fixed_counters; in intel_rdpmc_ecx_to_pmc() 111 num_counters = pmu->nr_arch_fixed_counters; in intel_rdpmc_ecx_to_pmc() [all …]
|
| /linux/tools/perf/arch/x86/util/ |
| H A D | pmu.c | 107 static int uncore_cha_snc(struct perf_pmu *pmu) in uncore_cha_snc() argument 124 if (sscanf(pmu->name, "uncore_cha_%u", &cha_num) != 1) { in uncore_cha_snc() 125 pr_warning("Unexpected: unable to compute CHA number '%s'\n", pmu->name); in uncore_cha_snc() 135 static int uncore_imc_snc(struct perf_pmu *pmu) in uncore_imc_snc() argument 160 if (sscanf(pmu->name, "uncore_imc_%u", &imc_num) != 1) { in uncore_imc_snc() 161 pr_warning("Unexpected: unable to compute IMC number '%s'\n", pmu->name); in uncore_imc_snc() 208 static void gnr_uncore_cha_imc_adjust_cpumask_for_snc(struct perf_pmu *pmu, bool cha) in gnr_uncore_cha_imc_adjust_cpumask_for_snc() argument 233 if (perf_cpu_map__cpu(pmu->cpus, 0).cpu != 0) { in gnr_uncore_cha_imc_adjust_cpumask_for_snc() 234 pr_debug("Ignoring cpumask adjust for %s as unexpected first CPU\n", pmu->name); in gnr_uncore_cha_imc_adjust_cpumask_for_snc() 238 pmu_snc = cha ? uncore_cha_snc(pmu) : uncore_imc_snc(pmu); in gnr_uncore_cha_imc_adjust_cpumask_for_snc() [all …]
|
| /linux/arch/x86/events/amd/ |
| H A D | uncore.c | 55 struct pmu pmu; member 96 return container_of(event->pmu, struct amd_uncore_pmu, pmu); in event_to_amd_uncore_pmu() 112 event->pmu->read(event); in amd_uncore_hrtimer() 165 struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); in amd_uncore_start() local 166 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_start() 183 struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); in amd_uncore_stop() local 184 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_stop() 191 event->pmu->read(event); in amd_uncore_stop() 204 struct amd_uncore_pmu *pmu = event_to_amd_uncore_pmu(event); in amd_uncore_add() local 205 struct amd_uncore_ctx *ctx = *per_cpu_ptr(pmu->ctx, event->cpu); in amd_uncore_add() [all …]
|
| /linux/tools/perf/arch/arm/util/ |
| H A D | pmu.c | 19 void perf_pmu__arch_init(struct perf_pmu *pmu) in perf_pmu__arch_init() argument 23 if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) { in perf_pmu__arch_init() 25 pmu->auxtrace = true; in perf_pmu__arch_init() 26 pmu->selectable = true; in perf_pmu__arch_init() 27 pmu->perf_event_attr_init_default = cs_etm_get_default_config; in perf_pmu__arch_init() 29 } else if (strstarts(pmu->name, ARM_SPE_PMU_NAME)) { in perf_pmu__arch_init() 30 pmu->auxtrace = true; in perf_pmu__arch_init() 31 pmu->selectable = true; in perf_pmu__arch_init() 32 pmu->is_uncore = false; in perf_pmu__arch_init() 33 pmu->perf_event_attr_init_default = arm_spe_pmu_default_config; in perf_pmu__arch_init() [all …]
|
| /linux/arch/x86/kvm/ |
| H A D | pmu.h | 9 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) 10 #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) argument 11 #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) 47 static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu) in kvm_pmu_has_perf_global_ctrl() argument 58 return pmu->version > 1; in kvm_pmu_has_perf_global_ctrl() 75 static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx) in kvm_pmc_idx_to_pmc() argument 77 if (idx < pmu->nr_arch_gp_counters) in kvm_pmc_idx_to_pmc() 78 return &pmu->gp_counters[idx]; in kvm_pmc_idx_to_pmc() 81 if (idx >= 0 && idx < pmu->nr_arch_fixed_counters) in kvm_pmc_idx_to_pmc() 82 return &pmu->fixed_counters[idx]; in kvm_pmc_idx_to_pmc() [all …]
|
| H A D | pmu.c | 158 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in __kvm_perf_overflow() local 173 (unsigned long *)&pmu->global_status); in __kvm_perf_overflow() 176 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in __kvm_perf_overflow() 237 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in pmc_reprogram_counter() local 249 bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable); in pmc_reprogram_counter() 503 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in reprogram_counter() local 522 fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, in reprogram_counter() 541 (eventsel & pmu->raw_event_mask), in reprogram_counter() 566 void kvm_pmu_recalc_pmc_emulation(struct kvm_pmu *pmu, struct kvm_pmc *pmc) in kvm_pmu_recalc_pmc_emulation() argument 568 bitmap_clear(pmu->pmc_counting_instructions, pmc->idx, 1); in kvm_pmu_recalc_pmc_emulation() [all …]
|
| /linux/tools/perf/tests/ |
| H A D | pmu-events.c | 45 .pmu = "default_core", 55 .pmu = "default_core", 65 .pmu = "default_core", 75 .pmu = "default_core", 85 .pmu = "default_core", 95 .pmu = "default_core", 121 .pmu = "hisi_sccl,ddrc", 132 .pmu = "uncore_cbox", 143 .pmu = "uncore_cbox", 154 .pmu = "uncore_cbox", [all …]
|
| /linux/arch/x86/events/intel/ |
| H A D | uncore.h | 86 struct pmu *pmu; /* for custom pmu ops */ member 125 struct pmu pmu; member 154 struct intel_uncore_pmu *pmu; member 223 return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu); in dev_to_uncore_pmu() 263 if (offset < box->pmu->type->mmio_map_size) in uncore_mmio_is_valid_offset() 267 offset, box->pmu->type->name); in uncore_mmio_is_valid_offset() 275 return box->pmu->type->box_ctl + in uncore_mmio_box_ctl() 276 box->pmu->type->mmio_offset * box->pmu->pmu_idx; in uncore_mmio_box_ctl() 281 return box->pmu->type->box_ctl; in uncore_pci_box_ctl() 286 return box->pmu->type->fixed_ctl; in uncore_pci_fixed_ctl() [all …]
|
| H A D | uncore.c | 139 struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) in uncore_pmu_to_box() argument 147 return dieid < uncore_max_dies() ? pmu->boxes[dieid] : NULL; in uncore_pmu_to_box() 381 return &box->pmu->pmu == event->pmu; in is_box_event() 391 max_count = box->pmu->type->num_counters; in uncore_collect_events() 392 if (box->pmu->type->fixed_ctl) in uncore_collect_events() 425 struct intel_uncore_type *type = box->pmu->type; in uncore_get_event_constraint() 450 if (box->pmu->type->ops->put_constraint) in uncore_put_event_constraint() 451 box->pmu->type->ops->put_constraint(box, event); in uncore_put_event_constraint() 689 static int uncore_validate_group(struct intel_uncore_pmu *pmu, in uncore_validate_group() argument 700 fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); in uncore_validate_group() [all …]
|
| /linux/arch/powerpc/perf/ |
| H A D | imc-pmu.c | 61 return container_of(event->pmu, struct imc_pmu, pmu); in imc_event_to_pmu() 105 struct pmu *pmu = dev_get_drvdata(dev); in imc_pmu_cpumask_get_attr() local 106 struct imc_pmu *imc_pmu = container_of(pmu, struct imc_pmu, pmu); in imc_pmu_cpumask_get_attr() 221 static int update_events_in_group(struct device_node *node, struct imc_pmu *pmu) in update_events_in_group() argument 260 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); in update_events_in_group() 261 if (!pmu->events) { in update_events_in_group() 269 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]); in update_events_in_group() 279 imc_free_events(pmu->events, ct); in update_events_in_group() 294 imc_free_events(pmu->events, ct); in update_events_in_group() 301 ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); in update_events_in_group() [all …]
|
| H A D | Makefile | 7 obj64-$(CONFIG_PPC_PERF_CTRS) += ppc970-pmu.o power5-pmu.o \ 8 power5+-pmu.o power6-pmu.o power7-pmu.o \ 9 isa207-common.o power8-pmu.o power9-pmu.o \ 10 generic-compat-pmu.o power10-pmu.o bhrb.o 11 obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o 13 obj-$(CONFIG_PPC_POWERNV) += imc-pmu.o 15 obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o e6500-pmu.o 19 obj-$(CONFIG_VPA_PMU) += vpa-pmu.o 21 obj-$(CONFIG_KVM_BOOK3S_HV_PMU) += kvm-hv-pmu.o 23 obj-$(CONFIG_PPC_8xx) += 8xx-pmu.o
|
| /linux/Documentation/devicetree/bindings/pinctrl/ |
| H A D | marvell,dove-pinctrl.txt | 14 Note: pmu* also allows for Power Management functions listed below 18 mpp0 0 gpio, pmu, uart2(rts), sdio0(cd), lcd0(pwm), pmu* 19 mpp1 1 gpio, pmu, uart2(cts), sdio0(wp), lcd1(pwm), pmu* 20 mpp2 2 gpio, pmu, uart2(txd), sdio0(buspwr), sata(prsnt), 21 uart1(rts), pmu* 22 mpp3 3 gpio, pmu, uart2(rxd), sdio0(ledctrl), sata(act), 23 uart1(cts), lcd-spi(cs1), pmu* 24 mpp4 4 gpio, pmu, uart3(rts), sdio1(cd), spi1(miso), pmu* 25 mpp5 5 gpio, pmu, uart3(cts), sdio1(wp), spi1(cs), pmu* 26 mpp6 6 gpio, pmu, uart3(txd), sdio1(buspwr), spi1(mosi), pmu* [all …]
|
| /linux/tools/perf/pmu-events/ |
| H A D | Build | 1 pmu-events-y += pmu-events.o 2 JSON = $(shell find pmu-events/arch -name '*.json' -o -name '*.csv') 3 JDIR_TEST = pmu-events/arch/test 6 JEVENTS_PY = pmu-events/jevents.py 7 METRIC_PY = pmu-events/metric.py 8 METRIC_TEST_PY = pmu-events/metric_test.py 9 EMPTY_PMU_EVENTS_C = pmu-events/empty-pmu-events.c 10 PMU_EVENTS_C = $(OUTPUT)pmu-events/pmu-events.c 11 METRIC_TEST_LOG = $(OUTPUT)pmu-events/metric_test.log 12 TEST_EMPTY_PMU_EVENTS_C = $(OUTPUT)pmu-events/test-empty-pmu-events.c [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ |
| H A D | gm200.c | 33 pmu_code(struct nv50_devinit *init, u32 pmu, u32 img, u32 len, bool sec) in pmu_code() argument 39 nvkm_wr32(device, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu); in pmu_code() 42 nvkm_wr32(device, 0x10a188, (pmu + i) >> 8); in pmu_code() 53 pmu_data(struct nv50_devinit *init, u32 pmu, u32 img, u32 len) in pmu_data() argument 59 nvkm_wr32(device, 0x10a1c0, 0x01000000 | pmu); in pmu_data() 88 struct nvbios_pmuR pmu; in pmu_load() local 91 if (!nvbios_pmuRm(bios, type, &pmu)) in pmu_load() 94 if (!post || !subdev->device->pmu) in pmu_load() 97 ret = nvkm_falcon_reset(&subdev->device->pmu->falcon); in pmu_load() 101 pmu_code(init, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false); in pmu_load() [all …]
|