Lines Matching +full:ddr +full:- +full:pmu

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Author: Lin Huang <hl@rock-chips.com>
8 #include <linux/devfreq-event.h>
67 * struct dmc_count_channel - structure to hold counter values from the DDR controller
69 * @clock_cycles: DDR clock cycles
85 * The dfi controller can monitor DDR load. It has an upper and lower threshold
87 * generated to indicate the DDR frequency should be changed.
109 struct pmu pmu; member
118 bool lp5_ckr; /* true if in 4:1 command-to-data clock ratio mode */
126 switch (dfi->ddr_type) { in rockchip_dfi_ddrtype_to_ctrl()
140 ddrmon_ver = readl_relaxed(dfi->regs); in rockchip_dfi_ddrtype_to_ctrl()
146 dfi->lp5_bank_mode); in rockchip_dfi_ddrtype_to_ctrl()
156 dev_err(&dfi->edev->dev, in rockchip_dfi_ddrtype_to_ctrl()
157 "unsupported DDRMON version 0x%04X, please let linux-rockchip know!\n", in rockchip_dfi_ddrtype_to_ctrl()
159 return -EOPNOTSUPP; in rockchip_dfi_ddrtype_to_ctrl()
161 dev_err(&dfi->edev->dev, "unsupported memory type 0x%X\n", in rockchip_dfi_ddrtype_to_ctrl()
162 dfi->ddr_type); in rockchip_dfi_ddrtype_to_ctrl()
163 return -EOPNOTSUPP; in rockchip_dfi_ddrtype_to_ctrl()
171 void __iomem *dfi_regs = dfi->regs; in rockchip_dfi_enable()
175 mutex_lock(&dfi->mutex); in rockchip_dfi_enable()
177 dfi->usecount++; in rockchip_dfi_enable()
178 if (dfi->usecount > 1) in rockchip_dfi_enable()
181 ret = clk_prepare_enable(dfi->clk); in rockchip_dfi_enable()
183 dev_err(&dfi->edev->dev, "failed to enable dfi clk: %d\n", ret); in rockchip_dfi_enable()
191 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_enable()
193 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_enable()
200 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_enable()
202 writel_relaxed(ctrl, dfi_regs + i * dfi->ddrmon_stride + in rockchip_dfi_enable()
207 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_enable()
209 if (dfi->ddrmon_ctrl_single) in rockchip_dfi_enable()
213 mutex_unlock(&dfi->mutex); in rockchip_dfi_enable()
220 void __iomem *dfi_regs = dfi->regs; in rockchip_dfi_disable()
223 mutex_lock(&dfi->mutex); in rockchip_dfi_disable()
225 dfi->usecount--; in rockchip_dfi_disable()
227 WARN_ON_ONCE(dfi->usecount < 0); in rockchip_dfi_disable()
229 if (dfi->usecount > 0) in rockchip_dfi_disable()
232 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_disable()
233 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_disable()
237 dfi_regs + i * dfi->ddrmon_stride + DDRMON_CTRL); in rockchip_dfi_disable()
239 if (dfi->ddrmon_ctrl_single) in rockchip_dfi_disable()
243 clk_disable_unprepare(dfi->clk); in rockchip_dfi_disable()
245 mutex_unlock(&dfi->mutex); in rockchip_dfi_disable()
251 void __iomem *dfi_regs = dfi->regs; in rockchip_dfi_read_counters()
253 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_read_counters()
254 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_read_counters()
256 res->c[i].read_access = readl_relaxed(dfi_regs + in rockchip_dfi_read_counters()
257 DDRMON_CH0_RD_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
258 res->c[i].write_access = readl_relaxed(dfi_regs + in rockchip_dfi_read_counters()
259 DDRMON_CH0_WR_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
260 res->c[i].access = readl_relaxed(dfi_regs + in rockchip_dfi_read_counters()
261 DDRMON_CH0_DFI_ACCESS_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
262 res->c[i].clock_cycles = readl_relaxed(dfi_regs + in rockchip_dfi_read_counters()
263 DDRMON_CH0_COUNT_NUM + i * dfi->ddrmon_stride); in rockchip_dfi_read_counters()
293 struct dmc_count *last = &dfi->last_event_count; in rockchip_dfi_get_event()
300 for (i = 0; i < dfi->max_channels; i++) { in rockchip_dfi_get_event()
303 if (!(dfi->channel_mask & BIT(i))) in rockchip_dfi_get_event()
306 a = count.c[i].access - last->c[i].access; in rockchip_dfi_get_event()
307 c = count.c[i].clock_cycles - last->c[i].clock_cycles; in rockchip_dfi_get_event()
315 edata->load_count = access * 4; in rockchip_dfi_get_event()
316 edata->total_count = clock_cycles; in rockchip_dfi_get_event()
318 dfi->last_event_count = count; in rockchip_dfi_get_event()
336 const struct dmc_count *last = &dfi->last_perf_count; in rockchip_ddr_perf_counters_add()
339 for (i = 0; i < dfi->max_channels; i++) { in rockchip_ddr_perf_counters_add()
340 res->c[i].read_access = dfi->total_count.c[i].read_access + in rockchip_ddr_perf_counters_add()
341 (u32)(now->c[i].read_access - last->c[i].read_access); in rockchip_ddr_perf_counters_add()
342 res->c[i].write_access = dfi->total_count.c[i].write_access + in rockchip_ddr_perf_counters_add()
343 (u32)(now->c[i].write_access - last->c[i].write_access); in rockchip_ddr_perf_counters_add()
344 res->c[i].access = dfi->total_count.c[i].access + in rockchip_ddr_perf_counters_add()
345 (u32)(now->c[i].access - last->c[i].access); in rockchip_ddr_perf_counters_add()
346 res->c[i].clock_cycles = dfi->total_count.c[i].clock_cycles + in rockchip_ddr_perf_counters_add()
347 (u32)(now->c[i].clock_cycles - last->c[i].clock_cycles); in rockchip_ddr_perf_counters_add()
354 struct pmu *pmu = dev_get_drvdata(dev); in ddr_perf_cpumask_show() local
355 struct rockchip_dfi *dfi = container_of(pmu, struct rockchip_dfi, pmu); in ddr_perf_cpumask_show()
357 return cpumap_print_to_pagebuf(true, buf, cpumask_of(dfi->cpu)); in ddr_perf_cpumask_show()
377 PMU_EVENT_ATTR_STRING(_name.scale, _var##_scale, "9.536743164e-07")
379 DFI_PMU_EVENT_ATTR(read-bytes0, ddr_pmu_read_bytes0, "event="__stringify(PERF_EVENT_READ_BYTES0));
380 DFI_PMU_EVENT_ATTR(write-bytes0, ddr_pmu_write_bytes0, "event="__stringify(PERF_EVENT_WRITE_BYTES0)…
382 DFI_PMU_EVENT_ATTR(read-bytes1, ddr_pmu_read_bytes1, "event="__stringify(PERF_EVENT_READ_BYTES1));
383 DFI_PMU_EVENT_ATTR(write-bytes1, ddr_pmu_write_bytes1, "event="__stringify(PERF_EVENT_WRITE_BYTES1)…
385 DFI_PMU_EVENT_ATTR(read-bytes2, ddr_pmu_read_bytes2, "event="__stringify(PERF_EVENT_READ_BYTES2));
386 DFI_PMU_EVENT_ATTR(write-bytes2, ddr_pmu_write_bytes2, "event="__stringify(PERF_EVENT_WRITE_BYTES2)…
388 DFI_PMU_EVENT_ATTR(read-bytes3, ddr_pmu_read_bytes3, "event="__stringify(PERF_EVENT_READ_BYTES3));
389 DFI_PMU_EVENT_ATTR(write-bytes3, ddr_pmu_write_bytes3, "event="__stringify(PERF_EVENT_WRITE_BYTES3)…
391 DFI_PMU_EVENT_ATTR(read-bytes, ddr_pmu_read_bytes, "event="__stringify(PERF_EVENT_READ_BYTES));
392 DFI_PMU_EVENT_ATTR(write-bytes, ddr_pmu_write_bytes, "event="__stringify(PERF_EVENT_WRITE_BYTES));
422 PMU_FORMAT_ATTR(event, "config:0-7");
443 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_init()
445 if (event->attr.type != event->pmu->type) in rockchip_ddr_perf_event_init()
446 return -ENOENT; in rockchip_ddr_perf_event_init()
448 if (event->attach_state & PERF_ATTACH_TASK) in rockchip_ddr_perf_event_init()
449 return -EINVAL; in rockchip_ddr_perf_event_init()
451 if (event->cpu < 0) { in rockchip_ddr_perf_event_init()
452 dev_warn(dfi->dev, "Can't provide per-task data!\n"); in rockchip_ddr_perf_event_init()
453 return -EINVAL; in rockchip_ddr_perf_event_init()
461 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_get_count()
462 int blen = dfi->burst_len; in rockchip_ddr_perf_event_get_count()
471 seq = read_seqbegin(&dfi->count_seqlock); in rockchip_ddr_perf_event_get_count()
473 } while (read_seqretry(&dfi->count_seqlock, seq)); in rockchip_ddr_perf_event_get_count()
475 switch (event->attr.config) { in rockchip_ddr_perf_event_get_count()
477 count = total.c[0].clock_cycles * dfi->count_multiplier; in rockchip_ddr_perf_event_get_count()
480 for (i = 0; i < dfi->max_channels; i++) in rockchip_ddr_perf_event_get_count()
481 count += total.c[i].read_access * blen * dfi->buswidth[i]; in rockchip_ddr_perf_event_get_count()
484 for (i = 0; i < dfi->max_channels; i++) in rockchip_ddr_perf_event_get_count()
485 count += total.c[i].write_access * blen * dfi->buswidth[i]; in rockchip_ddr_perf_event_get_count()
488 count = total.c[0].read_access * blen * dfi->buswidth[0]; in rockchip_ddr_perf_event_get_count()
491 count = total.c[0].write_access * blen * dfi->buswidth[0]; in rockchip_ddr_perf_event_get_count()
494 count = total.c[1].read_access * blen * dfi->buswidth[1]; in rockchip_ddr_perf_event_get_count()
497 count = total.c[1].write_access * blen * dfi->buswidth[1]; in rockchip_ddr_perf_event_get_count()
500 count = total.c[2].read_access * blen * dfi->buswidth[2]; in rockchip_ddr_perf_event_get_count()
503 count = total.c[2].write_access * blen * dfi->buswidth[2]; in rockchip_ddr_perf_event_get_count()
506 count = total.c[3].read_access * blen * dfi->buswidth[3]; in rockchip_ddr_perf_event_get_count()
509 count = total.c[3].write_access * blen * dfi->buswidth[3]; in rockchip_ddr_perf_event_get_count()
512 for (i = 0; i < dfi->max_channels; i++) in rockchip_ddr_perf_event_get_count()
513 count += total.c[i].access * blen * dfi->buswidth[i]; in rockchip_ddr_perf_event_get_count()
525 if (event->attr.config >= PERF_ACCESS_TYPE_MAX) in rockchip_ddr_perf_event_update()
529 prev = local64_xchg(&event->hw.prev_count, now); in rockchip_ddr_perf_event_update()
530 local64_add(now - prev, &event->count); in rockchip_ddr_perf_event_update()
537 local64_set(&event->hw.prev_count, now); in rockchip_ddr_perf_event_start()
542 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_add()
544 dfi->active_events++; in rockchip_ddr_perf_event_add()
546 if (dfi->active_events == 1) { in rockchip_ddr_perf_event_add()
547 dfi->total_count = (struct dmc_count){}; in rockchip_ddr_perf_event_add()
548 rockchip_dfi_read_counters(dfi, &dfi->last_perf_count); in rockchip_ddr_perf_event_add()
549 hrtimer_start(&dfi->timer, ns_to_ktime(NSEC_PER_SEC), HRTIMER_MODE_REL); in rockchip_ddr_perf_event_add()
565 struct rockchip_dfi *dfi = container_of(event->pmu, struct rockchip_dfi, pmu); in rockchip_ddr_perf_event_del()
569 dfi->active_events--; in rockchip_ddr_perf_event_del()
571 if (dfi->active_events == 0) in rockchip_ddr_perf_event_del()
572 hrtimer_cancel(&dfi->timer); in rockchip_ddr_perf_event_del()
582 write_seqlock(&dfi->count_seqlock); in rockchip_dfi_timer()
585 dfi->total_count = total; in rockchip_dfi_timer()
586 dfi->last_perf_count = now; in rockchip_dfi_timer()
588 write_sequnlock(&dfi->count_seqlock); in rockchip_dfi_timer()
590 hrtimer_forward_now(&dfi->timer, ns_to_ktime(NSEC_PER_SEC)); in rockchip_dfi_timer()
600 if (cpu != dfi->cpu) in ddr_perf_offline_cpu()
607 perf_pmu_migrate_context(&dfi->pmu, cpu, target); in ddr_perf_offline_cpu()
608 dfi->cpu = target; in ddr_perf_offline_cpu()
617 cpuhp_remove_multi_state(dfi->cpuhp_state); in rockchip_ddr_cpuhp_remove_state()
626 cpuhp_state_remove_instance_nocalls(dfi->cpuhp_state, &dfi->node); in rockchip_ddr_cpuhp_remove_instance()
633 perf_pmu_unregister(&dfi->pmu); in rockchip_ddr_perf_remove()
638 struct pmu *pmu = &dfi->pmu; in rockchip_ddr_perf_init() local
641 seqlock_init(&dfi->count_seqlock); in rockchip_ddr_perf_init()
643 pmu->module = THIS_MODULE; in rockchip_ddr_perf_init()
644 pmu->capabilities = PERF_PMU_CAP_NO_EXCLUDE; in rockchip_ddr_perf_init()
645 pmu->task_ctx_nr = perf_invalid_context; in rockchip_ddr_perf_init()
646 pmu->attr_groups = attr_groups; in rockchip_ddr_perf_init()
647 pmu->event_init = rockchip_ddr_perf_event_init; in rockchip_ddr_perf_init()
648 pmu->add = rockchip_ddr_perf_event_add; in rockchip_ddr_perf_init()
649 pmu->del = rockchip_ddr_perf_event_del; in rockchip_ddr_perf_init()
650 pmu->start = rockchip_ddr_perf_event_start; in rockchip_ddr_perf_init()
651 pmu->stop = rockchip_ddr_perf_event_stop; in rockchip_ddr_perf_init()
652 pmu->read = rockchip_ddr_perf_event_update; in rockchip_ddr_perf_init()
654 dfi->cpu = raw_smp_processor_id(); in rockchip_ddr_perf_init()
662 dev_err(dfi->dev, "cpuhp_setup_state_multi failed: %d\n", ret); in rockchip_ddr_perf_init()
666 dfi->cpuhp_state = ret; in rockchip_ddr_perf_init()
670 ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_state, dfi); in rockchip_ddr_perf_init()
674 ret = cpuhp_state_add_instance_nocalls(dfi->cpuhp_state, &dfi->node); in rockchip_ddr_perf_init()
676 dev_err(dfi->dev, "Error %d registering hotplug\n", ret); in rockchip_ddr_perf_init()
680 ret = devm_add_action_or_reset(dfi->dev, rockchip_ddr_cpuhp_remove_instance, dfi); in rockchip_ddr_perf_init()
684 hrtimer_setup(&dfi->timer, rockchip_dfi_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in rockchip_ddr_perf_init()
686 switch (dfi->ddr_type) { in rockchip_ddr_perf_init()
689 dfi->burst_len = 8; in rockchip_ddr_perf_init()
694 dfi->burst_len = 16; in rockchip_ddr_perf_init()
698 if (!dfi->count_multiplier) in rockchip_ddr_perf_init()
699 dfi->count_multiplier = 1; in rockchip_ddr_perf_init()
701 ret = perf_pmu_register(pmu, "rockchip_ddr", -1); in rockchip_ddr_perf_init()
705 return devm_add_action_or_reset(dfi->dev, rockchip_ddr_perf_remove, dfi); in rockchip_ddr_perf_init()
716 struct regmap *regmap_pmu = dfi->regmap_pmu; in rk3399_dfi_init()
719 dfi->clk = devm_clk_get(dfi->dev, "pclk_ddr_mon"); in rk3399_dfi_init()
720 if (IS_ERR(dfi->clk)) in rk3399_dfi_init()
721 return dev_err_probe(dfi->dev, PTR_ERR(dfi->clk), in rk3399_dfi_init()
724 /* get ddr type */ in rk3399_dfi_init()
726 dfi->ddr_type = FIELD_GET(RK3399_PMUGRF_OS_REG2_DDRTYPE, val); in rk3399_dfi_init()
728 dfi->channel_mask = GENMASK(1, 0); in rk3399_dfi_init()
729 dfi->max_channels = 2; in rk3399_dfi_init()
731 dfi->buswidth[0] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH0, val) == 0 ? 4 : 2; in rk3399_dfi_init()
732 dfi->buswidth[1] = FIELD_GET(RK3399_PMUGRF_OS_REG2_BW_CH1, val) == 0 ? 4 : 2; in rk3399_dfi_init()
734 dfi->ddrmon_stride = 0x14; in rk3399_dfi_init()
735 dfi->ddrmon_ctrl_single = true; in rk3399_dfi_init()
742 struct regmap *regmap_pmu = dfi->regmap_pmu; in rk3568_dfi_init()
748 /* lower 3 bits of the DDR type */ in rk3568_dfi_init()
749 dfi->ddr_type = FIELD_GET(RK3568_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2); in rk3568_dfi_init()
752 * For version three and higher the upper two bits of the DDR type are in rk3568_dfi_init()
756 dfi->ddr_type |= FIELD_GET(RK3568_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3; in rk3568_dfi_init()
758 dfi->channel_mask = BIT(0); in rk3568_dfi_init()
759 dfi->max_channels = 1; in rk3568_dfi_init()
761 dfi->buswidth[0] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2; in rk3568_dfi_init()
763 dfi->ddrmon_stride = 0x0; /* not relevant, we only have a single channel on this SoC */ in rk3568_dfi_init()
764 dfi->ddrmon_ctrl_single = true; in rk3568_dfi_init()
771 struct regmap *regmap_pmu = dfi->regmap_pmu; in rk3588_dfi_init()
778 /* lower 3 bits of the DDR type */ in rk3588_dfi_init()
779 dfi->ddr_type = FIELD_GET(RK3588_PMUGRF_OS_REG2_DRAMTYPE_INFO, reg2); in rk3588_dfi_init()
782 * For version three and higher the upper two bits of the DDR type are in rk3588_dfi_init()
786 dfi->ddr_type |= FIELD_GET(RK3588_PMUGRF_OS_REG3_DRAMTYPE_INFO_V3, reg3) << 3; in rk3588_dfi_init()
788 dfi->buswidth[0] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH0, reg2) == 0 ? 4 : 2; in rk3588_dfi_init()
789 dfi->buswidth[1] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg2) == 0 ? 4 : 2; in rk3588_dfi_init()
790 dfi->buswidth[2] = FIELD_GET(RK3568_PMUGRF_OS_REG2_BW_CH0, reg4) == 0 ? 4 : 2; in rk3588_dfi_init()
791 dfi->buswidth[3] = FIELD_GET(RK3588_PMUGRF_OS_REG2_BW_CH1, reg4) == 0 ? 4 : 2; in rk3588_dfi_init()
792 dfi->channel_mask = FIELD_GET(RK3588_PMUGRF_OS_REG2_CH_INFO, reg2) | in rk3588_dfi_init()
794 dfi->max_channels = 4; in rk3588_dfi_init()
796 dfi->ddrmon_stride = 0x4000; in rk3588_dfi_init()
797 dfi->count_multiplier = 2; in rk3588_dfi_init()
799 if (dfi->ddr_type == ROCKCHIP_DDRTYPE_LPDDR5) { in rk3588_dfi_init()
801 dfi->lp5_bank_mode = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_BANK_MODE, reg6) << 7; in rk3588_dfi_init()
802 dfi->lp5_ckr = FIELD_GET(RK3588_PMUGRF_OS_REG6_LP5_CKR, reg6); in rk3588_dfi_init()
803 if (dfi->lp5_ckr) in rk3588_dfi_init()
804 dfi->count_multiplier *= 2; in rk3588_dfi_init()
811 { .compatible = "rockchip,rk3399-dfi", .data = rk3399_dfi_init },
812 { .compatible = "rockchip,rk3568-dfi", .data = rk3568_dfi_init },
813 { .compatible = "rockchip,rk3588-dfi", .data = rk3588_dfi_init },
821 struct device *dev = &pdev->dev; in rockchip_dfi_probe()
824 struct device_node *np = pdev->dev.of_node, *node; in rockchip_dfi_probe()
828 soc_init = of_device_get_match_data(&pdev->dev); in rockchip_dfi_probe()
830 return -EINVAL; in rockchip_dfi_probe()
834 return -ENOMEM; in rockchip_dfi_probe()
836 dfi->regs = devm_platform_ioremap_resource(pdev, 0); in rockchip_dfi_probe()
837 if (IS_ERR(dfi->regs)) in rockchip_dfi_probe()
838 return PTR_ERR(dfi->regs); in rockchip_dfi_probe()
840 node = of_parse_phandle(np, "rockchip,pmu", 0); in rockchip_dfi_probe()
842 return dev_err_probe(&pdev->dev, -ENODEV, "Can't find pmu_grf registers\n"); in rockchip_dfi_probe()
844 dfi->regmap_pmu = syscon_node_to_regmap(node); in rockchip_dfi_probe()
846 if (IS_ERR(dfi->regmap_pmu)) in rockchip_dfi_probe()
847 return PTR_ERR(dfi->regmap_pmu); in rockchip_dfi_probe()
849 dfi->dev = dev; in rockchip_dfi_probe()
850 mutex_init(&dfi->mutex); in rockchip_dfi_probe()
852 desc = &dfi->desc; in rockchip_dfi_probe()
853 desc->ops = &rockchip_dfi_ops; in rockchip_dfi_probe()
854 desc->driver_data = dfi; in rockchip_dfi_probe()
855 desc->name = np->name; in rockchip_dfi_probe()
861 dfi->edev = devm_devfreq_event_add_edev(&pdev->dev, desc); in rockchip_dfi_probe()
862 if (IS_ERR(dfi->edev)) { in rockchip_dfi_probe()
863 dev_err(&pdev->dev, in rockchip_dfi_probe()
864 "failed to add devfreq-event device\n"); in rockchip_dfi_probe()
865 return PTR_ERR(dfi->edev); in rockchip_dfi_probe()
880 .name = "rockchip-dfi",
888 MODULE_AUTHOR("Lin Huang <hl@rock-chips.com>");