Lines Matching +full:cpu +full:- +full:cfg

1 // SPDX-License-Identifier: GPL-2.0-only
14 #include <linux/io-64-nonatomic-lo-hi.h>
66 /* CXL rev 3.0 Table 13-5 Events under CXL Vendor ID */
120 * - Fixed function counters refer to an Event Capabilities register.
127 void __iomem *base = info->base; in cxl_pmu_parse_caps()
137 return -ENODEV; in cxl_pmu_parse_caps()
140 info->num_counters = FIELD_GET(CXL_PMU_CAP_NUM_COUNTERS_MSK, val) + 1; in cxl_pmu_parse_caps()
141 info->counter_width = FIELD_GET(CXL_PMU_CAP_COUNTER_WIDTH_MSK, val); in cxl_pmu_parse_caps()
142 info->num_event_capabilities = FIELD_GET(CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK, val) + 1; in cxl_pmu_parse_caps()
144 info->filter_hdm = FIELD_GET(CXL_PMU_CAP_FILTERS_SUP_MSK, val) & CXL_PMU_FILTER_HDM; in cxl_pmu_parse_caps()
146 info->irq = FIELD_GET(CXL_PMU_CAP_MSI_N_MSK, val); in cxl_pmu_parse_caps()
148 info->irq = -1; in cxl_pmu_parse_caps()
151 for (i = 0; i < info->num_counters; i++) { in cxl_pmu_parse_caps()
160 set_bit(i, info->conf_counter_bm); in cxl_pmu_parse_caps()
173 return -ENOMEM; in cxl_pmu_parse_caps()
175 pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval); in cxl_pmu_parse_caps()
176 pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval); in cxl_pmu_parse_caps()
177 /* For a fixed purpose counter use the events mask from the counter CFG */ in cxl_pmu_parse_caps()
178 pmu_ev->msk = events_msk; in cxl_pmu_parse_caps()
179 pmu_ev->counter_idx = i; in cxl_pmu_parse_caps()
181 list_add(&pmu_ev->node, &info->event_caps_fixed); in cxl_pmu_parse_caps()
189 if (!bitmap_empty(info->conf_counter_bm, CXL_PMU_MAX_COUNTERS)) { in cxl_pmu_parse_caps()
194 info->num_event_capabilities) { in cxl_pmu_parse_caps()
197 return -ENOMEM; in cxl_pmu_parse_caps()
200 pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval); in cxl_pmu_parse_caps()
201 pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval); in cxl_pmu_parse_caps()
202 pmu_ev->msk = FIELD_GET(CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK, eval); in cxl_pmu_parse_caps()
203 pmu_ev->event_idx = j; in cxl_pmu_parse_caps()
204 list_add(&pmu_ev->node, &info->event_caps_configurable); in cxl_pmu_parse_caps()
231 [cxl_pmu_mask_attr] = CXL_PMU_FORMAT_ATTR(mask, "config:0-31"),
232 [cxl_pmu_gid_attr] = CXL_PMU_FORMAT_ATTR(gid, "config:32-47"),
233 [cxl_pmu_vid_attr] = CXL_PMU_FORMAT_ATTR(vid, "config:48-63"),
234 [cxl_pmu_threshold_attr] = CXL_PMU_FORMAT_ATTR(threshold, "config1:0-15"),
238 [cxl_pmu_hdm_attr] = CXL_PMU_FORMAT_ATTR(hdm, "config2:0-15"),
261 if (!info->filter_hdm && in cxl_pmu_format_is_visible()
266 return attr->mode; in cxl_pmu_format_is_visible()
277 return FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, event->attr.config); in cxl_pmu_config_get_mask()
282 return FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, event->attr.config); in cxl_pmu_config_get_gid()
287 return FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, event->attr.config); in cxl_pmu_config_get_vid()
292 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK, event->attr.config1); in cxl_pmu_config1_get_threshold()
297 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_INVERT_MSK, event->attr.config1); in cxl_pmu_config1_get_invert()
302 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_EDGE_MSK, event->attr.config1); in cxl_pmu_config1_get_edge()
314 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK, event->attr.config1); in cxl_pmu_config1_hdm_filter_en()
319 return FIELD_GET(CXL_PMU_ATTR_CONFIG2_HDM_MSK, event->attr.config2); in cxl_pmu_config2_get_hdm_decoder()
328 return sysfs_emit(buf, "config=%#llx\n", pmu_attr->id); in cxl_pmu_event_sysfs_show()
341 /* CXL rev 3.0 Table 3-17 - Device to Host Requests */
357 /* CXL rev 3.0 Table 3-20 - D2H Response Encodings */
365 /* CXL rev 3.0 Table 3-21 - CXL.cache - Mapping of H2D Requests to D2H Responses */
369 /* CXL rev 3.0 Table 3-22 - H2D Response Opcode Encodings */
377 /* CXL rev 3.0 Table 13-5 directly lists these */
380 /* CXL rev 3.1 Table 3-35 M2S Req Memory Opcodes */
391 /* CXL rev 3.0 Table 3-35 M2S RwD Memory Opcodes */
395 /* CXL rev 3.0 Table 3-38 M2S BIRsp Memory Opcodes */
402 /* CXL rev 3.0 Table 3-40 S2M BISnp Opcodes */
409 /* CXL rev 3.1 Table 3-50 S2M NDR Opcopdes */
415 /* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
418 /* CXL rev 3.0 Table 13-5 directly lists these */
434 list_for_each_entry(pmu_ev, &info->event_caps_fixed, node) { in cxl_pmu_find_fixed_counter_ev_cap()
435 if (vid != pmu_ev->vid || gid != pmu_ev->gid) in cxl_pmu_find_fixed_counter_ev_cap()
439 if (msk == pmu_ev->msk) in cxl_pmu_find_fixed_counter_ev_cap()
443 return ERR_PTR(-EINVAL); in cxl_pmu_find_fixed_counter_ev_cap()
451 list_for_each_entry(pmu_ev, &info->event_caps_configurable, node) { in cxl_pmu_find_config_counter_ev_cap()
452 if (vid != pmu_ev->vid || gid != pmu_ev->gid) in cxl_pmu_find_config_counter_ev_cap()
456 if (msk & ~pmu_ev->msk) in cxl_pmu_find_config_counter_ev_cap()
462 return ERR_PTR(-EINVAL); in cxl_pmu_find_config_counter_ev_cap()
472 int vid = FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, pmu_attr->id); in cxl_pmu_event_is_visible()
473 int gid = FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, pmu_attr->id); in cxl_pmu_event_is_visible()
474 int msk = FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, pmu_attr->id); in cxl_pmu_event_is_visible()
477 return attr->mode; in cxl_pmu_event_is_visible()
480 return attr->mode; in cxl_pmu_event_is_visible()
496 return cpumap_print_to_pagebuf(true, buf, cpumask_of(info->on_cpu)); in cpumask_show()
520 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_get_event_idx()
535 if (!test_bit(pmu_ev->counter_idx, info->used_counter_bm)) { in cxl_pmu_get_event_idx()
536 *counter_idx = pmu_ev->counter_idx; in cxl_pmu_get_event_idx()
547 bitmap_andnot(configurable_and_free, info->conf_counter_bm, in cxl_pmu_get_event_idx()
548 info->used_counter_bm, CXL_PMU_MAX_COUNTERS); in cxl_pmu_get_event_idx()
552 return -EINVAL; in cxl_pmu_get_event_idx()
558 return -EINVAL; in cxl_pmu_get_event_idx()
563 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_event_init()
566 /* Top level type sanity check - is this a Hardware Event being requested */ in cxl_pmu_event_init()
567 if (event->attr.type != event->pmu->type) in cxl_pmu_event_init()
568 return -ENOENT; in cxl_pmu_event_init()
570 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in cxl_pmu_event_init()
571 return -EOPNOTSUPP; in cxl_pmu_event_init()
583 event->cpu = info->on_cpu; in cxl_pmu_event_init()
591 void __iomem *base = info->base; in cxl_pmu_enable()
600 void __iomem *base = info->base; in cxl_pmu_disable()
614 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_event_start()
615 struct hw_perf_event *hwc = &event->hw; in cxl_pmu_event_start()
616 void __iomem *base = info->base; in cxl_pmu_event_start()
617 u64 cfg; in cxl_pmu_event_start() local
623 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) in cxl_pmu_event_start()
626 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); in cxl_pmu_event_start()
627 hwc->state = 0; in cxl_pmu_event_start()
633 if (info->filter_hdm) { in cxl_pmu_event_start()
635 cfg = cxl_pmu_config2_get_hdm_decoder(event); in cxl_pmu_event_start()
637 cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */ in cxl_pmu_event_start()
638 writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0)); in cxl_pmu_event_start()
641 cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx)); in cxl_pmu_event_start()
642 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1); in cxl_pmu_event_start()
643 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW, 1); in cxl_pmu_event_start()
644 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_ENABLE, 1); in cxl_pmu_event_start()
645 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EDGE, in cxl_pmu_event_start()
647 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INVERT, in cxl_pmu_event_start()
651 if (test_bit(hwc->idx, info->conf_counter_bm)) { in cxl_pmu_event_start()
652 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK, in cxl_pmu_event_start()
653 hwc->event_base); in cxl_pmu_event_start()
654 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENTS_MSK, in cxl_pmu_event_start()
657 cfg &= ~CXL_PMU_COUNTER_CFG_THRESHOLD_MSK; in cxl_pmu_event_start()
664 * (CXL 3.0 8.2.7.2.1 Counter Configuration - threshold field definition) in cxl_pmu_event_start()
666 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_THRESHOLD_MSK, in cxl_pmu_event_start()
668 writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx)); in cxl_pmu_event_start()
670 local64_set(&hwc->prev_count, 0); in cxl_pmu_event_start()
671 writeq(0, base + CXL_PMU_COUNTER_REG(hwc->idx)); in cxl_pmu_event_start()
678 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_read_counter()
679 void __iomem *base = info->base; in cxl_pmu_read_counter()
681 return readq(base + CXL_PMU_COUNTER_REG(event->hw.idx)); in cxl_pmu_read_counter()
686 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in __cxl_pmu_read()
687 struct hw_perf_event *hwc = &event->hw; in __cxl_pmu_read()
691 prev_cnt = local64_read(&hwc->prev_count); in __cxl_pmu_read()
693 } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) != prev_cnt); in __cxl_pmu_read()
699 delta = (new_cnt - prev_cnt) & GENMASK_ULL(info->counter_width - 1, 0); in __cxl_pmu_read()
700 if (overflow && delta < GENMASK_ULL(info->counter_width - 1, 0)) in __cxl_pmu_read()
701 delta += (1UL << info->counter_width); in __cxl_pmu_read()
703 local64_add(delta, &event->count); in __cxl_pmu_read()
713 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_event_stop()
714 void __iomem *base = info->base; in cxl_pmu_event_stop()
715 struct hw_perf_event *hwc = &event->hw; in cxl_pmu_event_stop()
716 u64 cfg; in cxl_pmu_event_stop() local
719 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); in cxl_pmu_event_stop()
720 hwc->state |= PERF_HES_STOPPED; in cxl_pmu_event_stop()
722 cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx)); in cxl_pmu_event_stop()
723 cfg &= ~(FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1) | in cxl_pmu_event_stop()
725 writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx)); in cxl_pmu_event_stop()
727 hwc->state |= PERF_HES_UPTODATE; in cxl_pmu_event_stop()
732 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_event_add()
733 struct hw_perf_event *hwc = &event->hw; in cxl_pmu_event_add()
737 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in cxl_pmu_event_add()
743 hwc->idx = idx; in cxl_pmu_event_add()
746 hwc->event_base = event_idx; in cxl_pmu_event_add()
747 info->hw_events[idx] = event; in cxl_pmu_event_add()
748 set_bit(idx, info->used_counter_bm); in cxl_pmu_event_add()
758 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_event_del()
759 struct hw_perf_event *hwc = &event->hw; in cxl_pmu_event_del()
762 clear_bit(hwc->idx, info->used_counter_bm); in cxl_pmu_event_del()
763 info->hw_events[hwc->idx] = NULL; in cxl_pmu_event_del()
770 void __iomem *base = info->base; in cxl_pmu_irq()
782 for_each_set_bit(i, overflowedbm, info->num_counters) { in cxl_pmu_irq()
783 struct perf_event *event = info->hw_events[i]; in cxl_pmu_irq()
786 dev_dbg(info->pmu.dev, in cxl_pmu_irq()
803 perf_pmu_unregister(&info->pmu); in cxl_pmu_perf_unregister()
810 cpuhp_state_remove_instance_nocalls(cxl_pmu_cpuhp_state_num, &info->node); in cxl_pmu_cpuhp_remove()
816 struct pci_dev *pdev = to_pci_dev(dev->parent); in cxl_pmu_probe()
824 return -ENOMEM; in cxl_pmu_probe()
827 INIT_LIST_HEAD(&info->event_caps_fixed); in cxl_pmu_probe()
828 INIT_LIST_HEAD(&info->event_caps_configurable); in cxl_pmu_probe()
830 info->base = pmu->base; in cxl_pmu_probe()
832 info->on_cpu = -1; in cxl_pmu_probe()
837 info->hw_events = devm_kcalloc(dev, sizeof(*info->hw_events), in cxl_pmu_probe()
838 info->num_counters, GFP_KERNEL); in cxl_pmu_probe()
839 if (!info->hw_events) in cxl_pmu_probe()
840 return -ENOMEM; in cxl_pmu_probe()
842 switch (pmu->type) { in cxl_pmu_probe()
845 pmu->assoc_id, pmu->index); in cxl_pmu_probe()
849 return -ENOMEM; in cxl_pmu_probe()
851 info->pmu = (struct pmu) { in cxl_pmu_probe()
868 if (info->irq <= 0) in cxl_pmu_probe()
869 return -EINVAL; in cxl_pmu_probe()
871 rc = pci_irq_vector(pdev, info->irq); in cxl_pmu_probe()
878 return -ENOMEM; in cxl_pmu_probe()
884 info->irq = irq; in cxl_pmu_probe()
886 rc = cpuhp_state_add_instance(cxl_pmu_cpuhp_state_num, &info->node); in cxl_pmu_probe()
894 rc = perf_pmu_register(&info->pmu, info->pmu.name, -1); in cxl_pmu_probe()
911 static int cxl_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) in cxl_pmu_online_cpu() argument
915 if (info->on_cpu != -1) in cxl_pmu_online_cpu()
918 info->on_cpu = cpu; in cxl_pmu_online_cpu()
920 * CPU HP lock is held so we should be guaranteed that the CPU hasn't yet in cxl_pmu_online_cpu()
923 WARN_ON(irq_set_affinity(info->irq, cpumask_of(cpu))); in cxl_pmu_online_cpu()
928 static int cxl_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) in cxl_pmu_offline_cpu() argument
933 if (info->on_cpu != cpu) in cxl_pmu_offline_cpu()
936 info->on_cpu = -1; in cxl_pmu_offline_cpu()
937 target = cpumask_any_but(cpu_online_mask, cpu); in cxl_pmu_offline_cpu()
939 dev_err(info->pmu.dev, "Unable to find a suitable CPU\n"); in cxl_pmu_offline_cpu()
943 perf_pmu_migrate_context(&info->pmu, cpu, target); in cxl_pmu_offline_cpu()
944 info->on_cpu = target; in cxl_pmu_offline_cpu()
946 * CPU HP lock is held so we should be guaranteed that this CPU hasn't yet in cxl_pmu_offline_cpu()
949 WARN_ON(irq_set_affinity(info->irq, cpumask_of(target))); in cxl_pmu_offline_cpu()